From bd87079080e49c8afb2598b28e5166073d310c98 Mon Sep 17 00:00:00 2001 From: Israel Fruchter Date: Thu, 14 Dec 2023 11:06:26 +0200 Subject: [PATCH] feature(pre-commit): replace pylint with ruff #5799 Ruff is rust base linter for python that work incredibly fast since pylint speed is causing use issue on SCT CI we should start using ruff, for now it uses the pylint set of rules it has implemeted and we can slow extand it's configuration to use more of the available rules set it has Ref: https://github.com/charliermarsh/ruff Ref: https://www.youtube.com/watch?v=jeoL4qsSLbE --- .pre-commit-config.yaml | 8 +- add_new_dc_test.py | 2 +- artifacts_test.py | 4 +- cdc_replication_test.py | 2 +- docker/alternator-dns/dns_server.py | 2 +- docker/env/version | 2 +- functional_tests/scylla_operator/conftest.py | 2 +- .../scylla_operator/libs/auxiliary.py | 2 +- .../scylla_operator/libs/helpers.py | 4 +- .../scylla_operator/test_functional.py | 12 +- grow_cluster_test.py | 2 +- hinted_handoff_test.py | 2 +- longevity_test.py | 4 +- mgmt_cli_test.py | 9 +- mgmt_upgrade_test.py | 9 +- performance_regression_alternator_test.py | 2 +- performance_regression_cdc_test.py | 2 +- ...ance_regression_gradual_grow_throughput.py | 2 +- ...rmance_regression_row_level_repair_test.py | 2 +- performance_regression_test.py | 2 +- performance_search_max_throughput_test.py | 2 +- pyproject.toml | 6 + requirements.in | 2 +- requirements.txt | 407 ++++++++---------- sct.py | 20 +- sct_ssh.py | 10 +- sdcm/audit.py | 2 +- sdcm/cdclog_reader_thread.py | 2 +- sdcm/cluster.py | 72 ++-- sdcm/cluster_aws.py | 40 +- sdcm/cluster_azure.py | 12 +- sdcm/cluster_baremetal.py | 6 +- sdcm/cluster_docker.py | 14 +- sdcm/cluster_gce.py | 18 +- sdcm/cluster_k8s/__init__.py | 38 +- sdcm/cluster_k8s/eks.py | 8 +- sdcm/cluster_k8s/gke.py | 8 +- sdcm/cluster_k8s/mini_k8s.py | 11 +- sdcm/coredump.py | 16 +- sdcm/db_log_reader.py | 8 +- sdcm/db_stats.py | 29 +- sdcm/ec2_client.py | 8 +- sdcm/fill_db_data.py | 8 +- sdcm/gemini_thread.py | 4 +- sdcm/loader.py | 8 +- sdcm/logcollector.py | 2 +- sdcm/mgmt/cli.py | 12 +- sdcm/mgmt/operator.py | 12 +- sdcm/microbenchmarking.py | 6 +- sdcm/monitorstack/__init__.py | 6 +- sdcm/nemesis.py | 85 ++-- sdcm/nemesis_publisher.py | 2 +- sdcm/prometheus.py | 8 +- sdcm/provision/aws/instance_parameters.py | 2 +- sdcm/provision/aws/provisioner.py | 2 +- sdcm/provision/aws/utils.py | 4 +- sdcm/provision/azure/ip_provider.py | 2 +- .../azure/network_interface_provider.py | 2 +- sdcm/provision/azure/provisioner.py | 2 +- .../azure/virtual_machine_provider.py | 6 +- .../azure/virtual_network_provider.py | 4 +- sdcm/provision/common/builders.py | 2 +- sdcm/provision/common/provisioner.py | 2 +- sdcm/provision/scylla_yaml/scylla_yaml.py | 4 +- sdcm/remote/base.py | 6 +- sdcm/remote/kubernetes_cmd_runner.py | 16 +- sdcm/remote/libssh2_client/__init__.py | 12 +- sdcm/remote/libssh2_client/exceptions.py | 7 +- sdcm/remote/local_cmd_runner.py | 6 +- sdcm/remote/remote_base.py | 14 +- sdcm/remote/remote_cmd_runner.py | 2 +- sdcm/remote/remote_file.py | 2 +- sdcm/rest/remote_curl_client.py | 2 +- sdcm/results_analyze/__init__.py | 30 +- sdcm/results_analyze/base.py | 2 +- sdcm/results_analyze/test.py | 12 +- sdcm/sct_config.py | 22 +- sdcm/sct_events/base.py | 6 +- sdcm/sct_events/database.py | 2 +- sdcm/sct_events/events_processes.py | 2 +- sdcm/sct_events/loaders.py | 2 +- sdcm/sct_events/nodetool.py | 2 +- sdcm/sct_events/operator.py | 2 +- sdcm/sct_events/stress_events.py | 4 +- sdcm/sct_events/system.py | 4 +- .../region_definition_builder.py | 2 +- sdcm/sct_runner.py | 19 +- sdcm/scylla_bench_thread.py | 4 +- sdcm/send_email.py | 8 +- sdcm/sla/libs/sla_utils.py | 14 +- sdcm/sla/sla_tests.py | 12 +- sdcm/stress/base.py | 4 +- sdcm/stress/latte_thread.py | 6 +- sdcm/stress_thread.py | 10 +- sdcm/tester.py | 63 ++- sdcm/tombstone_gc_verification_thread.py | 2 +- .../adaptive_timeouts/load_info_store.py | 4 +- sdcm/utils/alternator/api.py | 4 +- sdcm/utils/argus.py | 2 +- sdcm/utils/azure_region.py | 2 +- sdcm/utils/cdc/options.py | 9 +- .../utils/cloud_monitor/resources/__init__.py | 2 +- .../cloud_monitor/resources/static_ips.py | 2 +- sdcm/utils/common.py | 23 +- sdcm/utils/csrangehistogram.py | 4 +- sdcm/utils/data_validator.py | 50 ++- sdcm/utils/database_query_utils.py | 4 +- sdcm/utils/decorators.py | 6 +- sdcm/utils/docker_remote.py | 2 +- sdcm/utils/docker_utils.py | 6 +- sdcm/utils/file.py | 2 +- sdcm/utils/gce_region.py | 2 +- sdcm/utils/gce_utils.py | 4 +- sdcm/utils/get_username.py | 2 +- sdcm/utils/k8s/__init__.py | 34 +- sdcm/utils/k8s/chaos_mesh.py | 10 +- sdcm/utils/latency.py | 12 +- sdcm/utils/ldap.py | 2 +- sdcm/utils/log.py | 8 +- .../perf_simple_query_reporter.py | 7 +- sdcm/utils/operator/multitenant_common.py | 2 +- sdcm/utils/properties.py | 8 +- sdcm/utils/remote_logger.py | 2 +- sdcm/utils/s3_remote_uploader.py | 2 +- sdcm/utils/sstable/load_utils.py | 6 +- sdcm/utils/sstable/sstable_utils.py | 2 +- sdcm/utils/toppartition_util.py | 4 +- sdcm/utils/version_utils.py | 22 +- sdcm/wait.py | 2 +- sdcm/ycsb_thread.py | 8 +- sla_per_user_system_test.py | 11 +- test_add_remove_ldap_role_permission.py | 2 +- test_lib/compaction.py | 2 +- test_lib/cql_types.py | 4 +- test_lib/sla.py | 8 +- test_lib/utils.py | 2 +- unit_tests/dummy_remote.py | 2 +- unit_tests/lib/fake_provisioner.py | 2 +- unit_tests/lib/fake_remoter.py | 2 +- unit_tests/lib/mock_remoter.py | 2 +- unit_tests/lib/remoter_recorder.py | 2 +- unit_tests/provisioner/fake_azure_service.py | 40 +- .../test_azure_get_scylla_images.py | 2 +- .../test_azure_region_definition_builder.py | 2 +- .../test_provision_sct_resources.py | 8 +- .../provisioner/test_user_data_builder.py | 2 +- unit_tests/test_adaptive_timeouts.py | 12 +- unit_tests/test_audit.py | 4 +- unit_tests/test_cluster.py | 16 +- unit_tests/test_config.py | 6 +- unit_tests/test_events.py | 32 +- unit_tests/test_ndbench_thread.py | 2 +- unit_tests/test_nemesis_sisyphus.py | 2 +- unit_tests/test_profiler.py | 2 +- unit_tests/test_remoter.py | 8 +- unit_tests/test_scan_operation_thread.py | 4 +- unit_tests/test_sct_events_base.py | 8 +- ...t_sct_events_continuous_events_registry.py | 2 +- unit_tests/test_scylla_yaml_builders.py | 2 +- unit_tests/test_seed_selector.py | 4 +- unit_tests/test_utils_docker.py | 2 +- unit_tests/test_utils_k8s.py | 4 +- unit_tests/test_version_utils.py | 13 +- unit_tests/test_wait.py | 8 +- unit_tests/test_ycsb_thread.py | 2 +- update_java.sh | 6 + upgrade_schema_test.py | 4 +- upgrade_test.py | 10 +- .../build_system/create_test_release_jobs.py | 2 +- utils/get_supported_scylla_base_versions.py | 2 +- 170 files changed, 873 insertions(+), 939 deletions(-) create mode 100644 pyproject.toml create mode 100755 update_java.sh diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1be24edc0b2..4b78063af98 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -39,12 +39,12 @@ repos: types: [python] exclude: '\.sh$' - - id: pylint - name: pylint - entry: pylint -j 2 -d consider-using-f-string + - id: ruff + name: ruff + entry: ruff --force-exclude --fix language: system exclude: ^docker/alternator-dns/.*$ - types: [python] + 'types_or': [python, pyi] - repo: https://github.com/alessandrojcm/commitlint-pre-commit-hook rev: v5.0.0 diff --git a/add_new_dc_test.py b/add_new_dc_test.py index e22cce470e1..ed5ec15145d 100644 --- a/add_new_dc_test.py +++ b/add_new_dc_test.py @@ -90,7 +90,7 @@ def add_node_in_new_dc(self) -> BaseNode: self.monitors.reconfigure_scylla_monitoring() status = self.db_cluster.get_nodetool_status() - assert len(status.keys()) == 2, f"new datacenter was not registered. Cluster status: {status}" + assert len(status.keys()) == 2, f"new datacenter was not registered. Cluster status: {status}" # noqa: PLR2004 self.log.info("New DC to cluster has been added") return new_node diff --git a/artifacts_test.py b/artifacts_test.py index 6a7a353ab52..d242f56898b 100644 --- a/artifacts_test.py +++ b/artifacts_test.py @@ -178,7 +178,7 @@ def verify_users(self): out = self.node.remoter.run(cmd="ls -ltr --full-time /home", verbose=True).stdout.strip() for line in out.splitlines(): splitted_line = line.split() - if len(splitted_line) <= 2: + if len(splitted_line) <= 2: # noqa: PLR2004 continue user = splitted_line[-1] if user == "centos": @@ -299,7 +299,7 @@ def run_pre_create_schema(self, replication_factor=1): compaction=compaction_strategy, sstable_size=sstable_size) # pylint: disable=too-many-statements,too-many-branches - def test_scylla_service(self): + def test_scylla_service(self): # noqa: PLR0915 self.run_pre_create_schema() diff --git a/cdc_replication_test.py b/cdc_replication_test.py index cad6beb124e..cd2bd61e2a7 100644 --- a/cdc_replication_test.py +++ b/cdc_replication_test.py @@ -227,7 +227,7 @@ def test_replication_longevity(self) -> None: # pylint: disable=too-many-statements,too-many-branches,too-many-locals - def test_replication(self, is_gemini_test: bool, mode: Mode) -> None: + def test_replication(self, is_gemini_test: bool, mode: Mode) -> None: # noqa: PLR0915 assert is_gemini_test or (mode == Mode.DELTA), "cassandra-stress doesn't work with preimage/postimage modes" self.consistency_ok = False diff --git a/docker/alternator-dns/dns_server.py b/docker/alternator-dns/dns_server.py index e1537738f4a..eee83082338 100644 --- a/docker/alternator-dns/dns_server.py +++ b/docker/alternator-dns/dns_server.py @@ -20,7 +20,7 @@ def livenodes_update(): - global alternator_port + global alternator_port # noqa: PLW0602 global livenodes while True: # Contact one of the already known nodes by random, to fetch a new diff --git a/docker/env/version b/docker/env/version index 755896eff76..e5c163382ef 100644 --- a/docker/env/version +++ b/docker/env/version @@ -1 +1 @@ -1.54-update-k8s-components-1 +1.55-introduce-ruff diff --git a/functional_tests/scylla_operator/conftest.py b/functional_tests/scylla_operator/conftest.py index a340c4efb3e..9f9256e4998 100644 --- a/functional_tests/scylla_operator/conftest.py +++ b/functional_tests/scylla_operator/conftest.py @@ -70,7 +70,7 @@ def publish_test_result(): @pytest.fixture(autouse=True, scope='package', name="tester") def fixture_tester() -> ScyllaOperatorFunctionalClusterTester: - global TESTER # pylint: disable=global-statement + global TESTER # pylint: disable=global-statement # noqa: PLW0603 os.chdir(sct_abs_path()) tester_inst = ScyllaOperatorFunctionalClusterTester() TESTER = tester_inst # putting tester global, so we can report skipped test (one with mark.skip) diff --git a/functional_tests/scylla_operator/libs/auxiliary.py b/functional_tests/scylla_operator/libs/auxiliary.py index 8f299ddae8b..c15fb1a4152 100644 --- a/functional_tests/scylla_operator/libs/auxiliary.py +++ b/functional_tests/scylla_operator/libs/auxiliary.py @@ -42,7 +42,7 @@ def update_test_status(self, test_name, status, error=None): def get_test_failures(self): for test_name, test_data in self.test_data.items(): - status, message = (test_data[0], test_data[1]) if len(test_data) == 2 else ('UNKNOWN', '') + status, message = (test_data[0], test_data[1]) if len(test_data) == 2 else ('UNKNOWN', '') # noqa: PLR2004 if status != 'SUCCESS': TestFrameworkEvent( source=self.__class__.__name__, diff --git a/functional_tests/scylla_operator/libs/helpers.py b/functional_tests/scylla_operator/libs/helpers.py index 6f3f397067a..ab1b0ce7cda 100644 --- a/functional_tests/scylla_operator/libs/helpers.py +++ b/functional_tests/scylla_operator/libs/helpers.py @@ -100,7 +100,7 @@ def scylla_services_names(db_cluster: ScyllaPodCluster) -> list: if name not in ('NAME', f"{scylla_cluster_name}-client")] -def wait_for_resource_absence(db_cluster: ScyllaPodCluster, # pylint: disable=too-many-arguments +def wait_for_resource_absence(db_cluster: ScyllaPodCluster, # pylint: disable=too-many-arguments # noqa: PLR0913 resource_type: str, resource_name: str, namespace: str = SCYLLA_NAMESPACE, step: int = 2, timeout: int = 60) -> None: @@ -216,7 +216,7 @@ def verify_resharding_on_k8s(db_cluster: ScyllaPodCluster, cpus: Union[str, int, # Calculate the time spent for resharding. We need to have it be bigger than 2minutes # because it is the timeout of the liveness probe for Scylla pods. resharding_time = time.time() - resharding_started - if resharding_time < 120: + if resharding_time < 120: # noqa: PLR2004 log.warning( "Resharding was too fast - '%s's (<120s) on the '%s' node", resharding_time, node.name) diff --git a/functional_tests/scylla_operator/test_functional.py b/functional_tests/scylla_operator/test_functional.py index cd0af7a488e..de95f0faba5 100644 --- a/functional_tests/scylla_operator/test_functional.py +++ b/functional_tests/scylla_operator/test_functional.py @@ -94,7 +94,7 @@ def test_single_operator_image_tag_is_everywhere(db_cluster): @pytest.mark.required_operator("v1.11.0") -def test_deploy_quasi_multidc_db_cluster(db_cluster: ScyllaPodCluster): # pylint: disable=too-many-locals,too-many-statements,too-many-branches +def test_deploy_quasi_multidc_db_cluster(db_cluster: ScyllaPodCluster): # pylint: disable=too-many-locals,too-many-statements,too-many-branches # noqa: PLR0915 """ Deploy 2 'ScyllaCluster' K8S objects in 2 different namespaces in the single K8S cluster and combine them into a single DB cluster. @@ -144,7 +144,7 @@ def get_pod_names_and_ips(cluster_name: str, namespace: str): namespace=namespace).stdout.split("\n") pod_names_and_ips = [row.strip() for row in pod_names_and_ips if row.strip()] assert pod_names_and_ips - assert len(pod_names_and_ips) == 3 + assert len(pod_names_and_ips) == 3 # noqa: PLR2004 pod_data = {namespace: {}} for pod_name_and_ip in pod_names_and_ips: pod_name, pod_ip = pod_name_and_ip.split() @@ -165,7 +165,7 @@ def get_pod_names_and_ips(cluster_name: str, namespace: str): f" -l scylla/cluster={cluster_name} -l scylla-operator.scylladb.com/scylla-service-type=member", namespace=namespace).stdout.split() assert svc_ips - assert len(svc_ips) == 3 + assert len(svc_ips) == 3 # noqa: PLR2004 assert all(svc_ip in ('', 'None') for svc_ip in svc_ips), "SVC IPs were expected to be absent" # NOTE: read Scylla pods IPs @@ -196,7 +196,7 @@ def get_pod_names_and_ips(cluster_name: str, namespace: str): f"exec {pod_name} -- /bin/cqlsh -e \"{cqlsh_cmd}\"", namespace=current_namespace).stdout.split("---\n")[-1].split("\n") table_rows = [yaml.safe_load(row) for row in cqlsh_results if "{" in row] - assert len(table_rows) == 5, "Expected 5 peers" + assert len(table_rows) == 5, "Expected 5 peers" # noqa: PLR2004 for row in table_rows: assert row["peer"] == row["rpc_address"] assert row["peer"] != pod_ip @@ -705,7 +705,7 @@ def test_scylla_operator_pods(db_cluster: ScyllaPodCluster): scylla_operator_pods = get_pods_and_statuses(db_cluster=db_cluster, namespace=SCYLLA_OPERATOR_NAMESPACE, label='app.kubernetes.io/instance=scylla-operator') - assert len(scylla_operator_pods) == 2, f'Expected 2 scylla-operator pods, but exists {len(scylla_operator_pods)}' + assert len(scylla_operator_pods) == 2, f'Expected 2 scylla-operator pods, but exists {len(scylla_operator_pods)}' # noqa: PLR2004 not_running_pods = ','.join( [pods_info['name'] for pods_info in scylla_operator_pods if pods_info['status'] != 'Running']) @@ -780,7 +780,7 @@ def test_deploy_helm_with_default_values(db_cluster: ScyllaPodCluster): pods_name_and_status = get_pods_and_statuses(db_cluster, namespace=namespace) - assert len(pods_name_and_status) == 3, ( + assert len(pods_name_and_status) == 3, ( # noqa: PLR2004 f"Expected 3 pods to be created in {namespace} namespace " f"but actually {len(pods_name_and_status)}: {pods_name_and_status}") diff --git a/grow_cluster_test.py b/grow_cluster_test.py index 28649bef4c6..9fc570ea9a0 100644 --- a/grow_cluster_test.py +++ b/grow_cluster_test.py @@ -158,7 +158,7 @@ def test_add_remove_nodes(self): for _ in range(add_cnt): self.add_nodes(1) time.sleep(wait_interval) - rm_cnt = random.randint(1, max_random_cnt) if len(self.db_cluster.nodes) >= 10 else 0 + rm_cnt = random.randint(1, max_random_cnt) if len(self.db_cluster.nodes) >= 10 else 0 # noqa: PLR2004 if rm_cnt > 0: self.log.info('Remove %s nodes from cluster', rm_cnt) for _ in range(rm_cnt): diff --git a/hinted_handoff_test.py b/hinted_handoff_test.py index 16d64a61b47..53e9e01e165 100644 --- a/hinted_handoff_test.py +++ b/hinted_handoff_test.py @@ -33,7 +33,7 @@ def test_stop_nodes_under_stress(self): Stop node3. Read all data n=X with CL=ONE. """ - assert len(self.db_cluster.nodes) == 3, "The test requires 3 DB nodes!" + assert len(self.db_cluster.nodes) == 3, "The test requires 3 DB nodes!" # noqa: PLR2004 node1 = self.db_cluster.nodes[0] node2 = self.db_cluster.nodes[1] node3 = self.db_cluster.nodes[2] diff --git a/longevity_test.py b/longevity_test.py index 06b7887ec5e..3cb5bc54261 100644 --- a/longevity_test.py +++ b/longevity_test.py @@ -113,7 +113,7 @@ def _run_validate_large_collections_warning_in_logs(self, node): if not res: InfoEvent("Did not find expected log message warning: {}".format(msg), severity=Severity.ERROR) - def test_custom_time(self): + def test_custom_time(self): # noqa: PLR0912, PLR0915 """ Run cassandra-stress with params defined in data_dir/scylla.yaml """ @@ -327,7 +327,7 @@ def chunks(_list, chunk_size): self._pre_create_templated_user_schema(batch_start=extra_tables_idx, batch_end=extra_tables_idx+num_of_newly_created_tables) for i in range(num_of_newly_created_tables): - batch += self.create_templated_user_stress_params(extra_tables_idx + i, cs_profile=cs_profile) + batch.append(self.create_templated_user_stress_params(extra_tables_idx + i, cs_profile=cs_profile)) nodes_ips = self.all_node_ips_for_stress_command for params in batch: diff --git a/mgmt_cli_test.py b/mgmt_cli_test.py index 29adba625c2..49de9d53659 100644 --- a/mgmt_cli_test.py +++ b/mgmt_cli_test.py @@ -172,7 +172,7 @@ def restore_backup_from_backup_task(self, mgr_cluster, backup_task, keyspace_and keyspace_and_table_list=keyspace_and_table_list) # pylint: disable=too-many-arguments - def verify_backup_success(self, mgr_cluster, backup_task, keyspace_name='keyspace1', tables_names=None, + def verify_backup_success(self, mgr_cluster, backup_task, keyspace_name='keyspace1', tables_names=None, # noqa: PLR0913 truncate=True, restore_data_with_task=False, timeout=None): if tables_names is None: tables_names = ['standard1'] @@ -188,7 +188,7 @@ def verify_backup_success(self, mgr_cluster, backup_task, keyspace_name='keyspac self.restore_backup_from_backup_task(mgr_cluster=mgr_cluster, backup_task=backup_task, keyspace_and_table_list=per_keyspace_tables_dict) - def restore_backup_with_task(self, mgr_cluster, snapshot_tag, timeout, restore_schema=False, restore_data=False, + def restore_backup_with_task(self, mgr_cluster, snapshot_tag, timeout, restore_schema=False, restore_data=False, # noqa: PLR0913 location_list=None): location_list = location_list if location_list else self.locations restore_task = mgr_cluster.create_restore_task(restore_schema=restore_schema, restore_data=restore_data, @@ -251,7 +251,7 @@ def generate_background_read_load(self): number_of_loaders = self.params.get("n_loaders") scylla_version = self.db_cluster.nodes[0].scylla_version - if parse_version(scylla_version).release[0] == 2019: + if parse_version(scylla_version).release[0] == 2019: # noqa: PLR2004 # Making sure scylla version is 2019.1.x throttle_per_node = 10666 else: @@ -866,8 +866,9 @@ def test_repair_multiple_keyspace_types(self): # pylint: disable=invalid-name keyspace_repair_percentage = per_keyspace_progress.get(keyspace_name, None) assert keyspace_repair_percentage is not None, \ "The keyspace {} was not included in the repair!".format(keyspace_name) + # noqa: PLR2004 assert keyspace_repair_percentage == 100, \ - "The repair of the keyspace {} stopped at {}%".format( + "The repair of the keyspace {} stopped at {}%".format( # noqa: PLR2004 keyspace_name, keyspace_repair_percentage) localstrategy_keyspace_percentage = per_keyspace_progress.get(self.LOCALSTRATEGY_KEYSPACE_NAME, None) diff --git a/mgmt_upgrade_test.py b/mgmt_upgrade_test.py index a456d55e88e..6f622d76667 100644 --- a/mgmt_upgrade_test.py +++ b/mgmt_upgrade_test.py @@ -52,7 +52,7 @@ def _create_and_add_cluster(self): auth_token=self.monitors.mgmt_auth_token) return mgr_cluster, current_manager_version - def test_upgrade(self): # pylint: disable=too-many-locals,too-many-statements + def test_upgrade(self): # pylint: disable=too-many-locals,too-many-statements # noqa: PLR0915 manager_node = self.monitors.nodes[0] target_upgrade_server_version = self.params.get('target_scylla_mgmt_server_address') @@ -275,11 +275,10 @@ def validate_previous_task_details(task, previous_task_details): delta = current_value - previous_task_details[detail_name] # I check that the time delta is smaller than 60 seconds since we calculate the next run time on our own, # and as a result it could be a BIT imprecise - if abs(delta.total_seconds()) > 60: - mismatched_details_name_list.append(detail_name) - else: - if current_value != previous_task_details[detail_name]: + if abs(delta.total_seconds()) > 60: # noqa: PLR2004 mismatched_details_name_list.append(detail_name) + elif current_value != previous_task_details[detail_name]: + mismatched_details_name_list.append(detail_name) complete_error_description = _create_mismatched_details_error_message(previous_task_details, current_task_details, mismatched_details_name_list) diff --git a/performance_regression_alternator_test.py b/performance_regression_alternator_test.py index b56aff30d74..54b57b6d2e0 100644 --- a/performance_regression_alternator_test.py +++ b/performance_regression_alternator_test.py @@ -27,7 +27,7 @@ def __init__(self, *args): self.stack.enter_context(ignore_alternator_client_errors()) self.stack.enter_context(ignore_operation_errors()) - def _workload(self, stress_cmd, stress_num, test_name=None, sub_type=None, keyspace_num=1, prefix='', debug_message='', # pylint: disable=too-many-arguments,arguments-differ + def _workload(self, stress_cmd, stress_num, test_name=None, sub_type=None, keyspace_num=1, prefix='', debug_message='', # pylint: disable=too-many-arguments,arguments-differ # noqa: PLR0913 save_stats=True, is_alternator=True): if not is_alternator: stress_cmd = stress_cmd.replace('dynamodb', 'cassandra-cql') diff --git a/performance_regression_cdc_test.py b/performance_regression_cdc_test.py index 6bdfefdaf7e..b4da1cbebd5 100644 --- a/performance_regression_cdc_test.py +++ b/performance_regression_cdc_test.py @@ -152,7 +152,7 @@ def cdc_workflow(self, use_cdclog_reader=False): # pylint: disable=unused-varia self.check_regression_with_baseline(subtest_baseline="cdc_disabled") - def _workload_cdc(self, stress_cmd, stress_num, test_name, sub_type=None, # pylint: disable=too-many-arguments + def _workload_cdc(self, stress_cmd, stress_num, test_name, sub_type=None, # pylint: disable=too-many-arguments # noqa: PLR0913 save_stats=True, read_cdclog_cmd=None, update_cdclog_stats=False, enable_batching=True): cdc_stress_queue = None diff --git a/performance_regression_gradual_grow_throughput.py b/performance_regression_gradual_grow_throughput.py index ee5a3110960..0e7cd9d7cb8 100644 --- a/performance_regression_gradual_grow_throughput.py +++ b/performance_regression_gradual_grow_throughput.py @@ -129,7 +129,7 @@ def preload_data(self, compaction_strategy=None): self.log.info("Dataset has been populated") # pylint: disable=too-many-arguments,too-many-locals - def run_gradual_increase_load(self, stress_cmd_templ, + def run_gradual_increase_load(self, stress_cmd_templ, # noqa: PLR0913 start_ops, max_ops, throttle_step, stress_num, num_loaders, compaction_strategy, test_name): self.warmup_cache(compaction_strategy) diff --git a/performance_regression_row_level_repair_test.py b/performance_regression_row_level_repair_test.py index 3b5bfc7ee71..00be4fcf354 100644 --- a/performance_regression_row_level_repair_test.py +++ b/performance_regression_row_level_repair_test.py @@ -80,7 +80,7 @@ def preload_data(self, consistency_level=None): for stress_cmd in prepare_write_cmd: if consistency_level: - stress_cmd = self._update_cl_in_stress_cmd( + stress_cmd = self._update_cl_in_stress_cmd( # noqa: PLW2901 str_stress_cmd=stress_cmd, consistency_level=consistency_level) params.update({'stress_cmd': stress_cmd}) diff --git a/performance_regression_test.py b/performance_regression_test.py index 5c2803533f0..a460a6af187 100644 --- a/performance_regression_test.py +++ b/performance_regression_test.py @@ -179,7 +179,7 @@ def display_results(self, results, test_name=''): self.log.debug('Failed to display results: {0}'.format(results)) self.log.debug('Exception: {0}'.format(ex)) - def _workload(self, stress_cmd, stress_num, test_name, sub_type=None, keyspace_num=1, prefix='', debug_message='', # pylint: disable=too-many-arguments + def _workload(self, stress_cmd, stress_num, test_name, sub_type=None, keyspace_num=1, prefix='', debug_message='', # pylint: disable=too-many-arguments # noqa: PLR0913 save_stats=True): if debug_message: self.log.debug(debug_message) diff --git a/performance_search_max_throughput_test.py b/performance_search_max_throughput_test.py index ee3e67b8a63..ddd3488c35e 100644 --- a/performance_search_max_throughput_test.py +++ b/performance_search_max_throughput_test.py @@ -47,7 +47,7 @@ def test_search_best_mixed_throughput(self): # pylint: disable=too-many-locals self.run_search_best_performance(**stress_params) - def run_search_best_performance(self, stress_cmd_tmpl: str, # pylint: disable=too-many-arguments,too-many-locals,too-many-statements + def run_search_best_performance(self, stress_cmd_tmpl: str, # pylint: disable=too-many-arguments,too-many-locals,too-many-statements # noqa: PLR0913, PLR0915 stress_num: int, stress_num_step: int, stress_step_duration: str, diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000000..b7fe88f9eeb --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,6 @@ +[tool.ruff] +select = ["PL"] + +ignore = ["E501"] + +target-version = "py311" diff --git a/requirements.in b/requirements.in index 51f6c810e74..ad056d2fda4 100644 --- a/requirements.in +++ b/requirements.in @@ -31,7 +31,7 @@ python-jenkins==1.7.0 ssh2-python==1.0.0 argus-alm==0.11.7 parameterized==0.8.1 -pylint==2.11.1 # Needed for pre-commit hooks +ruff==0.1.8 # Needed for pre-commit hooks autopep8==1.5.7 # Needed for pre-commit hooks kubernetes==24.2.0 packaging==21.3 diff --git a/requirements.txt b/requirements.txt index 31c8328f6fa..ef06d4852e5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,14 +12,14 @@ anyconfig==0.12.0 \ --hash=sha256:2119065ad835d0e4cbcb26a58fab0705952c5657c05fa90ef7a11cdecbbafb1f \ --hash=sha256:3091d7675740686fade85755537f2a7c6ccefa8659c5bb56174e02ded74b96d5 # via -r requirements.in +anyio==4.2.0 \ + --hash=sha256:745843b39e829e108e518c489b31dc757de7d2131d53fac32bd8df268227bfee \ + --hash=sha256:e1875bb4b4e2de1669f4bc7869b6d3f54231cdced71605e6e64c9be77e3be50f + # via azure-core argus-alm==0.11.7 \ --hash=sha256:1003065e608989f1cbc6e57ad4db8fcf217db4cb8a1162fe7acbd1bb236a2e0f \ --hash=sha256:1c43e4db74a4201f12ffdb6609493a148070e65867b697344b1d9bc3a1b11d64 # via -r requirements.in -astroid==2.8.6 \ - --hash=sha256:5f6f75e45f15290e73b56f9dfde95b4bf96382284cde406ef4203e928335a495 \ - --hash=sha256:cd8326b424c971e7d87678609cf6275d22028afd37d6ac59c16d47f1245882f6 - # via pylint attrs==23.1.0 \ --hash=sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04 \ --hash=sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015 @@ -41,9 +41,9 @@ azure-common==1.1.28 \ # azure-mgmt-resource # azure-mgmt-resourcegraph # azure-mgmt-subscription -azure-core==1.29.5 \ - --hash=sha256:0fa04b7b1f7d44a4fb8468c4093deb2ea01fdf4faddbf802ed9205615f99d68c \ - --hash=sha256:52983c89d394c6f881a121e5101c5fa67278ca3b1f339c8fb2ef39230c70e9ac +azure-core==1.29.6 \ + --hash=sha256:13b485252ecd9384ae624894fe51cfa6220966207264c360beada239f88b738a \ + --hash=sha256:604a005bce6a49ba661bb7b2be84a9b169047e52fcfcd0a4e4770affab4178f7 # via # azure-identity # azure-mgmt-core @@ -86,24 +86,34 @@ azure-storage-blob==12.16.0 \ --hash=sha256:43b45f19a518a5c6895632f263b3825ebc23574f25cc84b66e1630a6160e466f \ --hash=sha256:91bb192b2a97939c4259c72373bac0f41e30810bbc853d5184f0f45904eacafd # via -r requirements.in -bcrypt==4.1.1 \ - --hash=sha256:12611c4b0a8b1c461646228344784a1089bc0c49975680a2f54f516e71e9b79e \ - --hash=sha256:12f40f78dcba4aa7d1354d35acf45fae9488862a4fb695c7eeda5ace6aae273f \ - --hash=sha256:14d41933510717f98aac63378b7956bbe548986e435df173c841d7f2bd0b2de7 \ - --hash=sha256:196008d91201bbb1aa4e666fee5e610face25d532e433a560cabb33bfdff958b \ - --hash=sha256:24c2ebd287b5b11016f31d506ca1052d068c3f9dc817160628504690376ff050 \ - --hash=sha256:2ade10e8613a3b8446214846d3ddbd56cfe9205a7d64742f0b75458c868f7492 \ - --hash=sha256:2e197534c884336f9020c1f3a8efbaab0aa96fc798068cb2da9c671818b7fbb0 \ - --hash=sha256:3d6c4e0d6963c52f8142cdea428e875042e7ce8c84812d8e5507bd1e42534e07 \ - --hash=sha256:476aa8e8aca554260159d4c7a97d6be529c8e177dbc1d443cb6b471e24e82c74 \ - --hash=sha256:755b9d27abcab678e0b8fb4d0abdebeea1f68dd1183b3f518bad8d31fa77d8be \ - --hash=sha256:a7a7b8a87e51e5e8ca85b9fdaf3a5dc7aaf123365a09be7a27883d54b9a0c403 \ - --hash=sha256:bab33473f973e8058d1b2df8d6e095d237c49fbf7a02b527541a86a5d1dc4444 \ - --hash=sha256:c6450538a0fc32fb7ce4c6d511448c54c4ff7640b2ed81badf9898dcb9e5b737 \ - --hash=sha256:d573885b637815a7f3a3cd5f87724d7d0822da64b0ab0aa7f7c78bae534e86dc \ - --hash=sha256:df37f5418d4f1cdcff845f60e747a015389fa4e63703c918330865e06ad80007 \ - --hash=sha256:f33b385c3e80b5a26b3a5e148e6165f873c1c202423570fdf45fe34e00e5f3e5 \ - --hash=sha256:fb931cd004a7ad36a89789caf18a54c20287ec1cd62161265344b9c4554fdb2e +bcrypt==4.1.2 \ + --hash=sha256:02d9ef8915f72dd6daaef40e0baeef8a017ce624369f09754baf32bb32dba25f \ + --hash=sha256:1c28973decf4e0e69cee78c68e30a523be441972c826703bb93099868a8ff5b5 \ + --hash=sha256:2a298db2a8ab20056120b45e86c00a0a5eb50ec4075b6142db35f593b97cb3fb \ + --hash=sha256:33313a1200a3ae90b75587ceac502b048b840fc69e7f7a0905b5f87fac7a1258 \ + --hash=sha256:3566a88234e8de2ccae31968127b0ecccbb4cddb629da744165db72b58d88ca4 \ + --hash=sha256:387e7e1af9a4dd636b9505a465032f2f5cb8e61ba1120e79a0e1cd0b512f3dfc \ + --hash=sha256:44290ccc827d3a24604f2c8bcd00d0da349e336e6503656cb8192133e27335e2 \ + --hash=sha256:57fa9442758da926ed33a91644649d3e340a71e2d0a5a8de064fb621fd5a3326 \ + --hash=sha256:68e3c6642077b0c8092580c819c1684161262b2e30c4f45deb000c38947bf483 \ + --hash=sha256:69057b9fc5093ea1ab00dd24ede891f3e5e65bee040395fb1e66ee196f9c9b4a \ + --hash=sha256:6cad43d8c63f34b26aef462b6f5e44fdcf9860b723d2453b5d391258c4c8e966 \ + --hash=sha256:71b8be82bc46cedd61a9f4ccb6c1a493211d031415a34adde3669ee1b0afbb63 \ + --hash=sha256:732b3920a08eacf12f93e6b04ea276c489f1c8fb49344f564cca2adb663b3e4c \ + --hash=sha256:9800ae5bd5077b13725e2e3934aa3c9c37e49d3ea3d06318010aa40f54c63551 \ + --hash=sha256:a97e07e83e3262599434816f631cc4c7ca2aa8e9c072c1b1a7fec2ae809a1d2d \ + --hash=sha256:ac621c093edb28200728a9cca214d7e838529e557027ef0581685909acd28b5e \ + --hash=sha256:b8df79979c5bae07f1db22dcc49cc5bccf08a0380ca5c6f391cbb5790355c0b0 \ + --hash=sha256:b90e216dc36864ae7132cb151ffe95155a37a14e0de3a8f64b49655dd959ff9c \ + --hash=sha256:ba4e4cc26610581a6329b3937e02d319f5ad4b85b074846bf4fef8a8cf51e7bb \ + --hash=sha256:ba55e40de38a24e2d78d34c2d36d6e864f93e0d79d0b6ce915e4335aa81d01b1 \ + --hash=sha256:be3ab1071662f6065899fe08428e45c16aa36e28bc42921c4901a191fda6ee42 \ + --hash=sha256:d75fc8cd0ba23f97bae88a6ec04e9e5351ff3c6ad06f38fe32ba50cbd0d11946 \ + --hash=sha256:e51c42750b7585cee7892c2614be0d14107fad9581d1738d954a262556dd1aab \ + --hash=sha256:ea505c97a5c465ab8c3ba75c0805a102ce526695cd6818c6de3b1a38f6f60da1 \ + --hash=sha256:eb3bd3321517916696233b5e0c67fd7d6281f0ef48e66812db35fc963a422a1c \ + --hash=sha256:f70d9c61f9c4ca7d57f3bfe88a5ccf62546ffbadf3681bb1e268d9d2e41c91a7 \ + --hash=sha256:fbe188b878313d01b7718390f31528be4010fed1faa798c5a1d0469c9c48c369 # via paramiko boto3==1.28.4 \ --hash=sha256:1f4b9c23dfcad910b6f8e74aac9fe507c1e75fcdd832e25ed2ff1e6d7a99cddf \ @@ -122,9 +132,9 @@ botocore==1.31.4 \ # awscli # boto3 # s3transfer -botocore-stubs==1.33.7.post1 \ - --hash=sha256:88f43d21dd52d9dea506591e14ed6a4df67dbb653b8576ed84119f763b8a3c9b \ - --hash=sha256:e83cb045fdeef4b3848680e8d342bbe6951f939bebe05263792080a1aadf6a7a +botocore-stubs==1.34.2 \ + --hash=sha256:7e6299577e56c9d4ef899f21e5ebda0ff98beaea524305d02b9b7c16767d9879 \ + --hash=sha256:fb60ca7561b2a9d1ddabdc5eeb96112b2f978ddfa6a294bbe31d2bff5a4e658a # via boto3-stubs build==1.0.3 \ --hash=sha256:538aab1b64f9828977f84bc63ae570b060a8ed1be419e7870b8b4fc5e6ea553b \ @@ -350,9 +360,9 @@ deepdiff==6.2.3 \ --hash=sha256:a02aaa8171351eba675cff5f795ec7a90987f86ad5449553308d4e18df57dc3d \ --hash=sha256:d83b06e043447d6770860a635abecb46e849b0494c43ced2ecafda7628c7ce72 # via -r requirements.in -distlib==0.3.7 \ - --hash=sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057 \ - --hash=sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8 +distlib==0.3.8 \ + --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \ + --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64 # via virtualenv docker==4.4.4 \ --hash=sha256:d3393c878f575d3a9ca3b94471a3c89a6d960b35feb92f033c0de36cc9d934db \ @@ -371,7 +381,9 @@ elasticsearch==7.15.0 \ exceptiongroup==1.2.0 \ --hash=sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14 \ --hash=sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68 - # via pytest + # via + # anyio + # pytest fabric==2.6.0 \ --hash=sha256:47f184b070272796fd2f9f0436799e18f2ccba4ee8ee587796fca192acd46cd2 \ --hash=sha256:7a71714b8b8f28cf828eceb155196f43ebac1bd4c849b7161ed5993d1cbcaa40 @@ -387,9 +399,9 @@ geomet==0.2.1.post1 \ --hash=sha256:91d754f7c298cbfcabd3befdb69c641c27fe75e808b27aa55028605761d17e95 \ --hash=sha256:a41a1e336b381416d6cbed7f1745c848e91defaa4d4c1bdc1312732e46ffad2b # via scylla-driver -google-api-core[grpc]==2.14.0 \ - --hash=sha256:5368a4502b793d9bbf812a5912e13e4e69f9bd87f6efb508460c43f5bbd1ce41 \ - --hash=sha256:de2fb50ed34d47ddbb2bd2dcf680ee8fead46279f4ed6b16de362aca23a18952 +google-api-core[grpc]==2.15.0 \ + --hash=sha256:2aa56d2be495551e66bbff7f729b790546f87d5c90e74781aa77233bcb395a8a \ + --hash=sha256:abc978a72658f14a2df1e5e12532effe40f94f868f6e23d95133bd6abcca35ca # via # google-api-python-client # google-cloud-compute @@ -399,9 +411,9 @@ google-api-python-client==2.93.0 \ --hash=sha256:62ee28e96031a10a1c341f226a75ac6a4f16bdb1d888dc8222b2cdca133d0031 \ --hash=sha256:f34abb671afd488bd19d30721ea20fb30d3796ddd825d6f91f26d8c718a9f07d # via -r requirements.in -google-auth==2.24.0 \ - --hash=sha256:2ec7b2a506989d7dbfdbe81cb8d0ead8876caaed14f86d29d34483cbe99c57af \ - --hash=sha256:9b82d5c8d3479a5391ea0a46d81cca698d328459da31d4a459d4e901a5d927e0 +google-auth==2.25.2 \ + --hash=sha256:42f707937feb4f5e5a39e6c4f343a17300a459aaf03141457ba505812841cc40 \ + --hash=sha256:473a8dfd0135f75bb79d878436e568f2695dce456764bf3a02b6f8c540b1d256 # via # google-api-core # google-api-python-client @@ -409,17 +421,17 @@ google-auth==2.24.0 \ # google-cloud-core # google-cloud-storage # kubernetes -google-auth-httplib2==0.1.1 \ - --hash=sha256:42c50900b8e4dcdf8222364d1f0efe32b8421fb6ed72f2613f12f75cc933478c \ - --hash=sha256:c64bc555fdc6dd788ea62ecf7bccffcf497bf77244887a3f3d7a5a02f8e3fc29 +google-auth-httplib2==0.2.0 \ + --hash=sha256:38aa7badf48f974f1eb9861794e9c0cb2a0511a4ec0679b1f886d108f5640e05 \ + --hash=sha256:b65a0a2123300dd71281a7bf6e64d65a0759287df52729bdd1ae2e47dc311a3d # via google-api-python-client google-cloud-compute==1.13.0 \ --hash=sha256:0f75d6c09cc504e43f4eceb2a466de9e9eb6fca819ca8ffdcf098ca5b21c7dc1 \ --hash=sha256:93b72129c6443c898da5a060d2021bc2d11c2a57ef2fbb9306afbb5126a376b9 # via -r requirements.in -google-cloud-core==2.3.3 \ - --hash=sha256:37b80273c8d7eee1ae816b3a20ae43585ea50506cb0e60f3cf5be5f87f1373cb \ - --hash=sha256:fbd11cad3e98a7e5b0343dc07cb1039a5ffd7a5bb96e1f1e27cee4bda4a90863 +google-cloud-core==2.4.1 \ + --hash=sha256:9b7749272a812bde58fff28868d0c5e2f585b82f37e09a1f6ed2d4d10f134073 \ + --hash=sha256:a9e6a4422b9ac5c29f79a0ede9485473338e2ce78d91f2370c01e730eab22e61 # via google-cloud-storage google-cloud-storage==2.10.0 \ --hash=sha256:934b31ead5f3994e5360f9ff5750982c5b6b11604dc072bc452c25965e076dc7 \ @@ -495,77 +507,77 @@ google-crc32c==1.5.0 \ --hash=sha256:fd8536e902db7e365f49e7d9029283403974ccf29b13fc7028b97e2295b33556 \ --hash=sha256:fe70e325aa68fa4b5edf7d1a4b6f691eb04bbccac0ace68e34820d283b5f80d4 # via google-resumable-media -google-resumable-media==2.6.0 \ - --hash=sha256:972852f6c65f933e15a4a210c2b96930763b47197cdf4aa5f5bea435efb626e7 \ - --hash=sha256:fc03d344381970f79eebb632a3c18bb1828593a2dc5572b5f90115ef7d11e81b +google-resumable-media==2.7.0 \ + --hash=sha256:5f18f5fa9836f4b083162064a1c2c98c17239bfda9ca50ad970ccf905f3e625b \ + --hash=sha256:79543cfe433b63fd81c0844b7803aba1bb8950b47bedf7d980c38fa123937e08 # via google-cloud-storage -googleapis-common-protos==1.61.0 \ - --hash=sha256:22f1915393bb3245343f6efe87f6fe868532efc12aa26b391b15132e1279f1c0 \ - --hash=sha256:8a64866a97f6304a7179873a465d6eee97b7a24ec6cfd78e0f575e96b821240b +googleapis-common-protos==1.62.0 \ + --hash=sha256:4750113612205514f9f6aa4cb00d523a94f3e8c06c5ad2fee466387dc4875f07 \ + --hash=sha256:83f0ece9f94e5672cced82f592d2a5edf527a96ed1794f0bab36d5735c996277 # via # google-api-core # grpcio-status -grpcio==1.59.3 \ - --hash=sha256:00912ce19914d038851be5cd380d94a03f9d195643c28e3ad03d355cc02ce7e8 \ - --hash=sha256:0511af8653fbda489ff11d542a08505d56023e63cafbda60e6e00d4e0bae86ea \ - --hash=sha256:0814942ba1bba269db4e760a34388640c601dece525c6a01f3b4ff030cc0db69 \ - --hash=sha256:0d42048b8a3286ea4134faddf1f9a59cf98192b94aaa10d910a25613c5eb5bfb \ - --hash=sha256:0e735ed002f50d4f3cb9ecfe8ac82403f5d842d274c92d99db64cfc998515e07 \ - --hash=sha256:16da0e40573962dab6cba16bec31f25a4f468e6d05b658e589090fe103b03e3d \ - --hash=sha256:1736496d74682e53dd0907fd515f2694d8e6a96c9a359b4080b2504bf2b2d91b \ - --hash=sha256:19ad26a7967f7999c8960d2b9fe382dae74c55b0c508c613a6c2ba21cddf2354 \ - --hash=sha256:33b8fd65d4e97efa62baec6171ce51f9cf68f3a8ba9f866f4abc9d62b5c97b79 \ - --hash=sha256:36636babfda14f9e9687f28d5b66d349cf88c1301154dc71c6513de2b6c88c59 \ - --hash=sha256:3996aaa21231451161dc29df6a43fcaa8b332042b6150482c119a678d007dd86 \ - --hash=sha256:45dddc5cb5227d30fa43652d8872dc87f086d81ab4b500be99413bad0ae198d7 \ - --hash=sha256:4619fea15c64bcdd9d447cdbdde40e3d5f1da3a2e8ae84103d94a9c1df210d7e \ - --hash=sha256:52cc38a7241b5f7b4a91aaf9000fdd38e26bb00d5e8a71665ce40cfcee716281 \ - --hash=sha256:575d61de1950b0b0699917b686b1ca108690702fcc2df127b8c9c9320f93e069 \ - --hash=sha256:5f9b2e591da751ac7fdd316cc25afafb7a626dededa9b414f90faad7f3ccebdb \ - --hash=sha256:60cddafb70f9a2c81ba251b53b4007e07cca7389e704f86266e22c4bffd8bf1d \ - --hash=sha256:6a5c3a96405966c023e139c3bcccb2c7c776a6f256ac6d70f8558c9041bdccc3 \ - --hash=sha256:6c75a1fa0e677c1d2b6d4196ad395a5c381dfb8385f07ed034ef667cdcdbcc25 \ - --hash=sha256:72b71dad2a3d1650e69ad42a5c4edbc59ee017f08c32c95694172bc501def23c \ - --hash=sha256:73afbac602b8f1212a50088193601f869b5073efa9855b3e51aaaec97848fc8a \ - --hash=sha256:7800f99568a74a06ebdccd419dd1b6e639b477dcaf6da77ea702f8fb14ce5f80 \ - --hash=sha256:8022ca303d6c694a0d7acfb2b472add920217618d3a99eb4b14edc7c6a7e8fcf \ - --hash=sha256:8239b853226e4824e769517e1b5232e7c4dda3815b200534500338960fcc6118 \ - --hash=sha256:83113bcc393477b6f7342b9f48e8a054330c895205517edc66789ceea0796b53 \ - --hash=sha256:8cd76057b5c9a4d68814610ef9226925f94c1231bbe533fdf96f6181f7d2ff9e \ - --hash=sha256:8d993399cc65e3a34f8fd48dd9ad7a376734564b822e0160dd18b3d00c1a33f9 \ - --hash=sha256:95b5506e70284ac03b2005dd9ffcb6708c9ae660669376f0192a710687a22556 \ - --hash=sha256:95d6fd804c81efe4879e38bfd84d2b26e339a0a9b797e7615e884ef4686eb47b \ - --hash=sha256:9e17660947660ccfce56c7869032910c179a5328a77b73b37305cd1ee9301c2e \ - --hash=sha256:a93a82876a4926bf451db82ceb725bd87f42292bacc94586045261f501a86994 \ - --hash=sha256:aca028a6c7806e5b61e5f9f4232432c52856f7fcb98e330b20b6bc95d657bdcc \ - --hash=sha256:b1f00a3e6e0c3dccccffb5579fc76ebfe4eb40405ba308505b41ef92f747746a \ - --hash=sha256:b36683fad5664283755a7f4e2e804e243633634e93cd798a46247b8e54e3cb0d \ - --hash=sha256:b491e5bbcad3020a96842040421e508780cade35baba30f402df9d321d1c423e \ - --hash=sha256:c0bd141f4f41907eb90bda74d969c3cb21c1c62779419782a5b3f5e4b5835718 \ - --hash=sha256:c0f0a11d82d0253656cc42e04b6a149521e02e755fe2e4edd21123de610fd1d4 \ - --hash=sha256:c4b0076f0bf29ee62335b055a9599f52000b7941f577daa001c7ef961a1fbeab \ - --hash=sha256:c82ca1e4be24a98a253d6dbaa216542e4163f33f38163fc77964b0f0d255b552 \ - --hash=sha256:cb4e9cbd9b7388fcb06412da9f188c7803742d06d6f626304eb838d1707ec7e3 \ - --hash=sha256:cdbc6b32fadab9bebc6f49d3e7ec4c70983c71e965497adab7f87de218e84391 \ - --hash=sha256:ce31fa0bfdd1f2bb15b657c16105c8652186eab304eb512e6ae3b99b2fdd7d13 \ - --hash=sha256:d1d1a17372fd425addd5812049fa7374008ffe689585f27f802d0935522cf4b7 \ - --hash=sha256:d787ecadea865bdf78f6679f6f5bf4b984f18f659257ba612979df97a298b3c3 \ - --hash=sha256:ddbd1a16138e52e66229047624de364f88a948a4d92ba20e4e25ad7d22eef025 \ - --hash=sha256:e1d8e01438d5964a11167eec1edb5f85ed8e475648f36c834ed5db4ffba24ac8 \ - --hash=sha256:e58b3cadaa3c90f1efca26ba33e0d408b35b497307027d3d707e4bcd8de862a6 \ - --hash=sha256:e78dc982bda74cef2ddfce1c91d29b96864c4c680c634e279ed204d51e227473 \ - --hash=sha256:ea40ce4404e7cca0724c91a7404da410f0144148fdd58402a5942971e3469b94 \ - --hash=sha256:eb8ba504c726befe40a356ecbe63c6c3c64c9a439b3164f5a718ec53c9874da0 \ - --hash=sha256:ed26826ee423b11477297b187371cdf4fa1eca874eb1156422ef3c9a60590dd9 \ - --hash=sha256:f2eb8f0c7c0c62f7a547ad7a91ba627a5aa32a5ae8d930783f7ee61680d7eb8d \ - --hash=sha256:fb111aa99d3180c361a35b5ae1e2c63750220c584a1344229abc139d5c891881 \ - --hash=sha256:fcfa56f8d031ffda902c258c84c4b88707f3a4be4827b4e3ab8ec7c24676320d +grpcio==1.60.0 \ + --hash=sha256:073f959c6f570797272f4ee9464a9997eaf1e98c27cb680225b82b53390d61e6 \ + --hash=sha256:0fd3b3968ffe7643144580f260f04d39d869fcc2cddb745deef078b09fd2b328 \ + --hash=sha256:1434ca77d6fed4ea312901122dc8da6c4389738bf5788f43efb19a838ac03ead \ + --hash=sha256:1c30bb23a41df95109db130a6cc1b974844300ae2e5d68dd4947aacba5985aa5 \ + --hash=sha256:20e7a4f7ded59097c84059d28230907cd97130fa74f4a8bfd1d8e5ba18c81491 \ + --hash=sha256:2199165a1affb666aa24adf0c97436686d0a61bc5fc113c037701fb7c7fceb96 \ + --hash=sha256:297eef542156d6b15174a1231c2493ea9ea54af8d016b8ca7d5d9cc65cfcc444 \ + --hash=sha256:2aef56e85901c2397bd557c5ba514f84de1f0ae5dd132f5d5fed042858115951 \ + --hash=sha256:30943b9530fe3620e3b195c03130396cd0ee3a0d10a66c1bee715d1819001eaf \ + --hash=sha256:3b36a2c6d4920ba88fa98075fdd58ff94ebeb8acc1215ae07d01a418af4c0253 \ + --hash=sha256:428d699c8553c27e98f4d29fdc0f0edc50e9a8a7590bfd294d2edb0da7be3629 \ + --hash=sha256:43e636dc2ce9ece583b3e2ca41df5c983f4302eabc6d5f9cd04f0562ee8ec1ae \ + --hash=sha256:452ca5b4afed30e7274445dd9b441a35ece656ec1600b77fff8c216fdf07df43 \ + --hash=sha256:467a7d31554892eed2aa6c2d47ded1079fc40ea0b9601d9f79204afa8902274b \ + --hash=sha256:4b44d7e39964e808b071714666a812049765b26b3ea48c4434a3b317bac82f14 \ + --hash=sha256:4c86343cf9ff7b2514dd229bdd88ebba760bd8973dac192ae687ff75e39ebfab \ + --hash=sha256:5208a57eae445ae84a219dfd8b56e04313445d146873117b5fa75f3245bc1390 \ + --hash=sha256:5ff21e000ff2f658430bde5288cb1ac440ff15c0d7d18b5fb222f941b46cb0d2 \ + --hash=sha256:675997222f2e2f22928fbba640824aebd43791116034f62006e19730715166c0 \ + --hash=sha256:676e4a44e740deaba0f4d95ba1d8c5c89a2fcc43d02c39f69450b1fa19d39590 \ + --hash=sha256:6e306b97966369b889985a562ede9d99180def39ad42c8014628dd3cc343f508 \ + --hash=sha256:6fd9584bf1bccdfff1512719316efa77be235469e1e3295dce64538c4773840b \ + --hash=sha256:705a68a973c4c76db5d369ed573fec3367d7d196673fa86614b33d8c8e9ebb08 \ + --hash=sha256:74d7d9fa97809c5b892449b28a65ec2bfa458a4735ddad46074f9f7d9550ad13 \ + --hash=sha256:77c8a317f0fd5a0a2be8ed5cbe5341537d5c00bb79b3bb27ba7c5378ba77dbca \ + --hash=sha256:79a050889eb8d57a93ed21d9585bb63fca881666fc709f5d9f7f9372f5e7fd03 \ + --hash=sha256:7db16dd4ea1b05ada504f08d0dca1cd9b926bed3770f50e715d087c6f00ad748 \ + --hash=sha256:83f2292ae292ed5a47cdcb9821039ca8e88902923198f2193f13959360c01860 \ + --hash=sha256:87c9224acba0ad8bacddf427a1c2772e17ce50b3042a789547af27099c5f751d \ + --hash=sha256:8a97a681e82bc11a42d4372fe57898d270a2707f36c45c6676e49ce0d5c41353 \ + --hash=sha256:9073513ec380434eb8d21970e1ab3161041de121f4018bbed3146839451a6d8e \ + --hash=sha256:90bdd76b3f04bdb21de5398b8a7c629676c81dfac290f5f19883857e9371d28c \ + --hash=sha256:91229d7203f1ef0ab420c9b53fe2ca5c1fbeb34f69b3bc1b5089466237a4a134 \ + --hash=sha256:92f88ca1b956eb8427a11bb8b4a0c0b2b03377235fc5102cb05e533b8693a415 \ + --hash=sha256:95ae3e8e2c1b9bf671817f86f155c5da7d49a2289c5cf27a319458c3e025c320 \ + --hash=sha256:9e30be89a75ee66aec7f9e60086fadb37ff8c0ba49a022887c28c134341f7179 \ + --hash=sha256:a48edde788b99214613e440fce495bbe2b1e142a7f214cce9e0832146c41e324 \ + --hash=sha256:a7152fa6e597c20cb97923407cf0934e14224af42c2b8d915f48bc3ad2d9ac18 \ + --hash=sha256:a9c7b71211f066908e518a2ef7a5e211670761651039f0d6a80d8d40054047df \ + --hash=sha256:b0571a5aef36ba9177e262dc88a9240c866d903a62799e44fd4aae3f9a2ec17e \ + --hash=sha256:b0fb2d4801546598ac5cd18e3ec79c1a9af8b8f2a86283c55a5337c5aeca4b1b \ + --hash=sha256:b10241250cb77657ab315270b064a6c7f1add58af94befa20687e7c8d8603ae6 \ + --hash=sha256:b87efe4a380887425bb15f220079aa8336276398dc33fce38c64d278164f963d \ + --hash=sha256:b98f43fcdb16172dec5f4b49f2fece4b16a99fd284d81c6bbac1b3b69fcbe0ff \ + --hash=sha256:c193109ca4070cdcaa6eff00fdb5a56233dc7610216d58fb81638f89f02e4968 \ + --hash=sha256:c826f93050c73e7769806f92e601e0efdb83ec8d7c76ddf45d514fee54e8e619 \ + --hash=sha256:d020cfa595d1f8f5c6b343530cd3ca16ae5aefdd1e832b777f9f0eb105f5b139 \ + --hash=sha256:d6a478581b1a1a8fdf3318ecb5f4d0cda41cacdffe2b527c23707c9c1b8fdb55 \ + --hash=sha256:de2ad69c9a094bf37c1102b5744c9aec6cf74d2b635558b779085d0263166454 \ + --hash=sha256:e278eafb406f7e1b1b637c2cf51d3ad45883bb5bd1ca56bc05e4fc135dfdaa65 \ + --hash=sha256:e381fe0c2aa6c03b056ad8f52f8efca7be29fb4d9ae2f8873520843b6039612a \ + --hash=sha256:e61e76020e0c332a98290323ecfec721c9544f5b739fab925b6e8cbe1944cf19 \ + --hash=sha256:f897c3b127532e6befdcf961c415c97f320d45614daf84deba0a54e64ea2457b \ + --hash=sha256:fb464479934778d7cc5baf463d959d361954d6533ad34c3a4f1d267e86ee25fd # via # google-api-core # grpcio-status -grpcio-status==1.59.3 \ - --hash=sha256:2fd2eb39ca4e9afb3c874c0878ff75b258db0b7dcc25570fc521f16ae0ab942a \ - --hash=sha256:65c394ba43380d6bdf8c04c61efc493104b5535552aed35817a1b4dc66598a1f +grpcio-status==1.60.0 \ + --hash=sha256:7d383fa36e59c1e61d380d91350badd4d12ac56e4de2c2b831b050362c3c572e \ + --hash=sha256:f10e0b6db3adc0fdc244b71962814ee982996ef06186446b5695b9fa635aa1ab # via google-api-core hdrhistogram==0.9.2 \ --hash=sha256:81a6f86fa500b3eddff43467332c58716a0224a668bf68c2be0f5dbff7d3e783 \ @@ -581,14 +593,16 @@ humanreadable==0.4.0 \ --hash=sha256:2879a146f0602512addfcfba227956a3f1d23b99e9f938ff91b2085a170519ba \ --hash=sha256:5b70257a8e88856f9b64a1f0a6fb7535c9002818465e298ca27d745b25e5675d # via tcconfig -identify==2.5.32 \ - --hash=sha256:0b7656ef6cba81664b783352c73f8c24b39cf82f926f78f4550eda928e5e0545 \ - --hash=sha256:5d9979348ec1a21c768ae07e0a652924538e8bce67313a73cb0f681cf08ba407 +identify==2.5.33 \ + --hash=sha256:161558f9fe4559e1557e1bff323e8631f6a0e4837f7497767c1782832f16b62d \ + --hash=sha256:d40ce5fcd762817627670da8a7d8d8e65f24342d14539c59488dc603bf662e34 # via pre-commit idna==3.6 \ --hash=sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca \ --hash=sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f - # via requests + # via + # anyio + # requests iniconfig==2.0.0 \ --hash=sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3 \ --hash=sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374 @@ -606,10 +620,6 @@ isodate==0.6.1 \ # via # azure-storage-blob # msrest -isort==5.12.0 \ - --hash=sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504 \ - --hash=sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6 - # via pylint jinja2==3.0.2 \ --hash=sha256:827a0e32839ab1600d4eb1c4c33ec5a8edfbc5cb42dafa13b81f182f97784b45 \ --hash=sha256:8569982d3f0889eed11dd620c706d39b60c36d6d25843961f33f77fb6bc6b20c @@ -624,44 +634,6 @@ kubernetes==24.2.0 \ --hash=sha256:9900f12ae92007533247167d14cdee949cd8c7721f88b4a7da5f5351da3834cd \ --hash=sha256:da19d58865cf903a8c7b9c3691a2e6315d583a98f0659964656dfdf645bf7e49 # via -r requirements.in -lazy-object-proxy==1.9.0 \ - --hash=sha256:09763491ce220c0299688940f8dc2c5d05fd1f45af1e42e636b2e8b2303e4382 \ - --hash=sha256:0a891e4e41b54fd5b8313b96399f8b0e173bbbfc03c7631f01efbe29bb0bcf82 \ - --hash=sha256:189bbd5d41ae7a498397287c408617fe5c48633e7755287b21d741f7db2706a9 \ - --hash=sha256:18b78ec83edbbeb69efdc0e9c1cb41a3b1b1ed11ddd8ded602464c3fc6020494 \ - --hash=sha256:1aa3de4088c89a1b69f8ec0dcc169aa725b0ff017899ac568fe44ddc1396df46 \ - --hash=sha256:212774e4dfa851e74d393a2370871e174d7ff0ebc980907723bb67d25c8a7c30 \ - --hash=sha256:2d0daa332786cf3bb49e10dc6a17a52f6a8f9601b4cf5c295a4f85854d61de63 \ - --hash=sha256:5f83ac4d83ef0ab017683d715ed356e30dd48a93746309c8f3517e1287523ef4 \ - --hash=sha256:659fb5809fa4629b8a1ac5106f669cfc7bef26fbb389dda53b3e010d1ac4ebae \ - --hash=sha256:660c94ea760b3ce47d1855a30984c78327500493d396eac4dfd8bd82041b22be \ - --hash=sha256:66a3de4a3ec06cd8af3f61b8e1ec67614fbb7c995d02fa224813cb7afefee701 \ - --hash=sha256:721532711daa7db0d8b779b0bb0318fa87af1c10d7fe5e52ef30f8eff254d0cd \ - --hash=sha256:7322c3d6f1766d4ef1e51a465f47955f1e8123caee67dd641e67d539a534d006 \ - --hash=sha256:79a31b086e7e68b24b99b23d57723ef7e2c6d81ed21007b6281ebcd1688acb0a \ - --hash=sha256:81fc4d08b062b535d95c9ea70dbe8a335c45c04029878e62d744bdced5141586 \ - --hash=sha256:8fa02eaab317b1e9e03f69aab1f91e120e7899b392c4fc19807a8278a07a97e8 \ - --hash=sha256:9090d8e53235aa280fc9239a86ae3ea8ac58eff66a705fa6aa2ec4968b95c821 \ - --hash=sha256:946d27deaff6cf8452ed0dba83ba38839a87f4f7a9732e8f9fd4107b21e6ff07 \ - --hash=sha256:9990d8e71b9f6488e91ad25f322898c136b008d87bf852ff65391b004da5e17b \ - --hash=sha256:9cd077f3d04a58e83d04b20e334f678c2b0ff9879b9375ed107d5d07ff160171 \ - --hash=sha256:9e7551208b2aded9c1447453ee366f1c4070602b3d932ace044715d89666899b \ - --hash=sha256:9f5fa4a61ce2438267163891961cfd5e32ec97a2c444e5b842d574251ade27d2 \ - --hash=sha256:b40387277b0ed2d0602b8293b94d7257e17d1479e257b4de114ea11a8cb7f2d7 \ - --hash=sha256:bfb38f9ffb53b942f2b5954e0f610f1e721ccebe9cce9025a38c8ccf4a5183a4 \ - --hash=sha256:cbf9b082426036e19c6924a9ce90c740a9861e2bdc27a4834fd0a910742ac1e8 \ - --hash=sha256:d9e25ef10a39e8afe59a5c348a4dbf29b4868ab76269f81ce1674494e2565a6e \ - --hash=sha256:db1c1722726f47e10e0b5fdbf15ac3b8adb58c091d12b3ab713965795036985f \ - --hash=sha256:e7c21c95cae3c05c14aafffe2865bbd5e377cfc1348c4f7751d9dc9a48ca4bda \ - --hash=sha256:e8c6cfb338b133fbdbc5cfaa10fe3c6aeea827db80c978dbd13bc9dd8526b7d4 \ - --hash=sha256:ea806fd4c37bf7e7ad82537b0757999264d5f70c45468447bb2b91afdbe73a6e \ - --hash=sha256:edd20c5a55acb67c7ed471fa2b5fb66cb17f61430b7a6b9c3b4a1e40293b1671 \ - --hash=sha256:f0117049dd1d5635bbff65444496c90e0baa48ea405125c088e93d9cf4525b11 \ - --hash=sha256:f0705c376533ed2a9e5e97aacdbfe04cecd71e0aa84c7c0595d02ef93b6e4455 \ - --hash=sha256:f12ad7126ae0c98d601a7ee504c1122bcef553d1d5e0c3bfa77b16b3968d2734 \ - --hash=sha256:f2457189d8257dd41ae9b434ba33298aec198e30adf2dcdaaa3a28b9994f6adb \ - --hash=sha256:f699ac1c768270c9e384e4cbd268d6e67aebcfae6cd623b4d7c3bfde5a35db59 - # via astroid ldap3==2.9.1 \ --hash=sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70 \ --hash=sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f @@ -741,10 +713,6 @@ mbstrdecoder==1.1.3 \ # sqliteschema # subprocrunner # typepy -mccabe==0.6.1 \ - --hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \ - --hash=sha256:dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f - # via pylint msal==1.26.0 \ --hash=sha256:224756079fe338be838737682b49f8ebc20a87c1c5eeaf590daae4532b83de15 \ --hash=sha256:be77ba6a8f49c9ff598bbcdc5dfcf1c9842f3044300109af738e8c3e371065b5 @@ -919,9 +887,7 @@ pip-tools==6.13.0 \ platformdirs==4.1.0 \ --hash=sha256:11c8f37bcca40db96d8144522d925583bdb7a31f7b0e37e3ed4318400a8e2380 \ --hash=sha256:906d548203468492d432bcb294d4bc2fff751bf84971fbb2c10918cc206ee420 - # via - # pylint - # virtualenv + # via virtualenv pluggy==1.3.0 \ --hash=sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12 \ --hash=sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7 @@ -938,13 +904,13 @@ prometheus-client==0.11.0 \ --hash=sha256:3a8baade6cb80bcfe43297e33e7623f3118d660d41387593758e2fb1ea173a86 \ --hash=sha256:b014bc76815eb1399da8ce5fc84b7717a3e63652b0c0f8804092c9363acab1b2 # via -r requirements.in -prompt-toolkit==3.0.41 \ - --hash=sha256:941367d97fc815548822aa26c2a269fdc4eb21e9ec05fc5d447cf09bad5d75f0 \ - --hash=sha256:f36fe301fafb7470e86aaf90f036eef600a3210be4decf461a5b1ca8403d3cb2 +prompt-toolkit==3.0.43 \ + --hash=sha256:3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d \ + --hash=sha256:a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6 # via questionary -proto-plus==1.22.3 \ - --hash=sha256:a49cd903bc0b6ab41f76bf65510439d56ca76f868adf0274e738bfdd096894df \ - --hash=sha256:fdcd09713cbd42480740d2fe29c990f7fbd885a67efc328aa8be6ee3e9f76a6b +proto-plus==1.23.0 \ + --hash=sha256:89075171ef11988b3fa157f5dbd8b9cf09d65fffee97e29ce403cd8defba19d2 \ + --hash=sha256:a829c79e619e1cf632de091013a4173deed13a55f326ef84f05af6f50ff4c82c # via google-cloud-compute protobuf==4.25.1 \ --hash=sha256:0bf384e75b92c42830c0a679b0cd4d6e2b36ae0cf3dbb1e1dfdda48a244f4bcd \ @@ -1063,10 +1029,6 @@ pyjwt[crypto]==2.8.0 \ # via # msal # pyjwt -pylint==2.11.1 \ - --hash=sha256:0f358e221c45cbd4dad2a1e4b883e75d28acdcccd29d40c76eb72b307269b126 \ - --hash=sha256:2c9843fff1a88ca0ad98a256806c82c5a8f86086e7ccbdb93297d86c3f90c436 - # via -r requirements.in pynacl==1.5.0 \ --hash=sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858 \ --hash=sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d \ @@ -1259,6 +1221,25 @@ rsa==4.7.2 \ # via # awscli # google-auth +ruff==0.1.8 \ + --hash=sha256:05ffe9dbd278965271252704eddb97b4384bf58b971054d517decfbf8c523f05 \ + --hash=sha256:5daaeaf00ae3c1efec9742ff294b06c3a2a9db8d3db51ee4851c12ad385cda30 \ + --hash=sha256:7d076717c67b34c162da7c1a5bda16ffc205e0e0072c03745275e7eab888719f \ + --hash=sha256:7de792582f6e490ae6aef36a58d85df9f7a0cfd1b0d4fe6b4fb51803a3ac96fa \ + --hash=sha256:a05b0ddd7ea25495e4115a43125e8a7ebed0aa043c3d432de7e7d6e8e8cd6448 \ + --hash=sha256:aa8ee4f8440023b0a6c3707f76cadce8657553655dcbb5fc9b2f9bb9bee389f6 \ + --hash=sha256:b6a21ab023124eafb7cef6d038f835cb1155cd5ea798edd8d9eb2f8b84be07d9 \ + --hash=sha256:bd8ee69b02e7bdefe1e5da2d5b6eaaddcf4f90859f00281b2333c0e3a0cc9cd6 \ + --hash=sha256:c8e3255afd186c142eef4ec400d7826134f028a85da2146102a1172ecc7c3696 \ + --hash=sha256:ce697c463458555027dfb194cb96d26608abab920fa85213deb5edf26e026664 \ + --hash=sha256:db6cedd9ffed55548ab313ad718bc34582d394e27a7875b4b952c2d29c001b26 \ + --hash=sha256:e49fbdfe257fa41e5c9e13c79b9e79a23a79bd0e40b9314bc53840f520c2c0b3 \ + --hash=sha256:e6f08ca730f4dc1b76b473bdf30b1b37d42da379202a059eae54ec7fc1fbcfed \ + --hash=sha256:f35960b02df6b827c1b903091bb14f4b003f6cf102705efc4ce78132a0aa5af3 \ + --hash=sha256:f41f692f1691ad87f51708b823af4bb2c5c87c9248ddd3191c8f088e66ce590a \ + --hash=sha256:f7ee467677467526cfe135eab86a40a0e8db43117936ac4f9b469ce9cdb3fb62 \ + --hash=sha256:ff78a7583020da124dd0deb835ece1d87bb91762d40c514ee9b67a087940528b + # via -r requirements.in s3transfer==0.6.2 \ --hash=sha256:b014be3a8a2aab98cfe1abc7229cc5a9a0cf05eb9c1f2b86b230fd8df3f78084 \ --hash=sha256:cab66d3380cca3e70939ef2255d01cd8aece6a4907a9528740f668c4b0611861 @@ -1331,6 +1312,10 @@ six==1.16.0 \ # python-jenkins # repodataparser # scylla-driver +sniffio==1.3.0 \ + --hash=sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101 \ + --hash=sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384 + # via anyio sortedcontainers==2.4.0 \ --hash=sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88 \ --hash=sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0 @@ -1398,7 +1383,6 @@ toml==0.10.2 \ # via # autopep8 # pre-commit - # pylint tomli==2.0.1 \ --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f @@ -1416,18 +1400,19 @@ typepy[datetime]==1.3.2 \ # sqliteschema # tabledata # tcconfig -types-awscrt==0.19.19 \ - --hash=sha256:850d5ad95d8f337b15fb154790f39af077faf5c08d43758fd750f379a87d5f73 \ - --hash=sha256:a577c4d60a7fb7e21b436a73207a66f6ba50329d578b347934c5d99d4d612901 +types-awscrt==0.20.0 \ + --hash=sha256:99778c952e1eae10cc7a53468413001177026c9434345bf00120bb2ea5b79109 \ + --hash=sha256:e872b65d041687ec7fb49fb4dcb871ff10ade5efeca02722e037a03bff81db7e # via botocore-stubs -types-s3transfer==0.8.2 \ - --hash=sha256:2e41756fcf94775a9949afa856489ac4570308609b0493dfbd7b4d333eb423e6 \ - --hash=sha256:5e084ebcf2704281c71b19d5da6e1544b50859367d034b50080d5316a76a9418 +types-s3transfer==0.9.0 \ + --hash=sha256:0f78c95c2ee390faad71735df35b6b81fca5bce4b864ac6a7707da2a845a5e86 \ + --hash=sha256:241e8b7b209c4064a451897bace1525ba64098a3ae955bdd0fb4b970cc69db73 # via boto3-stubs -typing-extensions==4.8.0 \ - --hash=sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0 \ - --hash=sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef +typing-extensions==4.9.0 \ + --hash=sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783 \ + --hash=sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd # via + # anyio # azure-core # azure-storage-blob # mypy-boto3-dynamodb @@ -1470,71 +1455,17 @@ wheel==0.42.0 \ --hash=sha256:177f9c9b0d45c47873b619f5b650346d632cdc35fb5e4d25058e09c9e581433d \ --hash=sha256:c45be39f7882c9d34243236f2d63cbd58039e360f85d0913425fbd7ceea617a8 # via pip-tools -wrapt==1.13.3 \ - --hash=sha256:086218a72ec7d986a3eddb7707c8c4526d677c7b35e355875a0fe2918b059179 \ - --hash=sha256:0877fe981fd76b183711d767500e6b3111378ed2043c145e21816ee589d91096 \ - --hash=sha256:0a017a667d1f7411816e4bf214646d0ad5b1da2c1ea13dec6c162736ff25a374 \ - --hash=sha256:0cb23d36ed03bf46b894cfec777eec754146d68429c30431c99ef28482b5c1df \ - --hash=sha256:1fea9cd438686e6682271d36f3481a9f3636195578bab9ca3382e2f5f01fc185 \ - --hash=sha256:220a869982ea9023e163ba915077816ca439489de6d2c09089b219f4e11b6785 \ - --hash=sha256:25b1b1d5df495d82be1c9d2fad408f7ce5ca8a38085e2da41bb63c914baadff7 \ - --hash=sha256:2dded5496e8f1592ec27079b28b6ad2a1ef0b9296d270f77b8e4a3a796cf6909 \ - --hash=sha256:2ebdde19cd3c8cdf8df3fc165bc7827334bc4e353465048b36f7deeae8ee0918 \ - --hash=sha256:43e69ffe47e3609a6aec0fe723001c60c65305784d964f5007d5b4fb1bc6bf33 \ - --hash=sha256:46f7f3af321a573fc0c3586612db4decb7eb37172af1bc6173d81f5b66c2e068 \ - --hash=sha256:47f0a183743e7f71f29e4e21574ad3fa95676136f45b91afcf83f6a050914829 \ - --hash=sha256:498e6217523111d07cd67e87a791f5e9ee769f9241fcf8a379696e25806965af \ - --hash=sha256:4b9c458732450ec42578b5642ac53e312092acf8c0bfce140ada5ca1ac556f79 \ - --hash=sha256:51799ca950cfee9396a87f4a1240622ac38973b6df5ef7a41e7f0b98797099ce \ - --hash=sha256:5601f44a0f38fed36cc07db004f0eedeaadbdcec90e4e90509480e7e6060a5bc \ - --hash=sha256:5f223101f21cfd41deec8ce3889dc59f88a59b409db028c469c9b20cfeefbe36 \ - --hash=sha256:610f5f83dd1e0ad40254c306f4764fcdc846641f120c3cf424ff57a19d5f7ade \ - --hash=sha256:6a03d9917aee887690aa3f1747ce634e610f6db6f6b332b35c2dd89412912bca \ - --hash=sha256:705e2af1f7be4707e49ced9153f8d72131090e52be9278b5dbb1498c749a1e32 \ - --hash=sha256:766b32c762e07e26f50d8a3468e3b4228b3736c805018e4b0ec8cc01ecd88125 \ - --hash=sha256:77416e6b17926d953b5c666a3cb718d5945df63ecf922af0ee576206d7033b5e \ - --hash=sha256:778fd096ee96890c10ce96187c76b3e99b2da44e08c9e24d5652f356873f6709 \ - --hash=sha256:78dea98c81915bbf510eb6a3c9c24915e4660302937b9ae05a0947164248020f \ - --hash=sha256:7dd215e4e8514004c8d810a73e342c536547038fb130205ec4bba9f5de35d45b \ - --hash=sha256:7dde79d007cd6dfa65afe404766057c2409316135cb892be4b1c768e3f3a11cb \ - --hash=sha256:81bd7c90d28a4b2e1df135bfbd7c23aee3050078ca6441bead44c42483f9ebfb \ - --hash=sha256:85148f4225287b6a0665eef08a178c15097366d46b210574a658c1ff5b377489 \ - --hash=sha256:865c0b50003616f05858b22174c40ffc27a38e67359fa1495605f96125f76640 \ - --hash=sha256:87883690cae293541e08ba2da22cacaae0a092e0ed56bbba8d018cc486fbafbb \ - --hash=sha256:8aab36778fa9bba1a8f06a4919556f9f8c7b33102bd71b3ab307bb3fecb21851 \ - --hash=sha256:8c73c1a2ec7c98d7eaded149f6d225a692caa1bd7b2401a14125446e9e90410d \ - --hash=sha256:936503cb0a6ed28dbfa87e8fcd0a56458822144e9d11a49ccee6d9a8adb2ac44 \ - --hash=sha256:944b180f61f5e36c0634d3202ba8509b986b5fbaf57db3e94df11abee244ba13 \ - --hash=sha256:96b81ae75591a795d8c90edc0bfaab44d3d41ffc1aae4d994c5aa21d9b8e19a2 \ - --hash=sha256:981da26722bebb9247a0601e2922cedf8bb7a600e89c852d063313102de6f2cb \ - --hash=sha256:ae9de71eb60940e58207f8e71fe113c639da42adb02fb2bcbcaccc1ccecd092b \ - --hash=sha256:b73d4b78807bd299b38e4598b8e7bd34ed55d480160d2e7fdaabd9931afa65f9 \ - --hash=sha256:d4a5f6146cfa5c7ba0134249665acd322a70d1ea61732723c7d3e8cc0fa80755 \ - --hash=sha256:dd91006848eb55af2159375134d724032a2d1d13bcc6f81cd8d3ed9f2b8e846c \ - --hash=sha256:e05e60ff3b2b0342153be4d1b597bbcfd8330890056b9619f4ad6b8d5c96a81a \ - --hash=sha256:e6906d6f48437dfd80464f7d7af1740eadc572b9f7a4301e7dd3d65db285cacf \ - --hash=sha256:e92d0d4fa68ea0c02d39f1e2f9cb5bc4b4a71e8c442207433d8db47ee79d7aa3 \ - --hash=sha256:e94b7d9deaa4cc7bac9198a58a7240aaf87fe56c6277ee25fa5b3aa1edebd229 \ - --hash=sha256:ea3e746e29d4000cd98d572f3ee2a6050a4f784bb536f4ac1f035987fc1ed83e \ - --hash=sha256:ec7e20258ecc5174029a0f391e1b948bf2906cd64c198a9b8b281b811cbc04de \ - --hash=sha256:ec9465dd69d5657b5d2fa6133b3e1e989ae27d29471a672416fd729b429eb554 \ - --hash=sha256:f122ccd12fdc69628786d0c947bdd9cb2733be8f800d88b5a37c57f1f1d73c10 \ - --hash=sha256:f99c0489258086308aad4ae57da9e8ecf9e1f3f30fa35d5e170b4d4896554d80 \ - --hash=sha256:f9c51d9af9abb899bd34ace878fbec8bf357b3194a10c4e8e0a25512826ef056 \ - --hash=sha256:fd76c47f20984b43d93de9a82011bb6e5f8325df6c9ed4d8310029a55fa361ea - # via astroid # The following packages are considered to be unsafe in a requirements file: -pip==23.3.1 \ - --hash=sha256:1fcaa041308d01f14575f6d0d2ea4b75a3e2871fe4f9c694976f908768e14174 \ - --hash=sha256:55eb67bb6171d37447e82213be585b75fe2b12b359e993773aca4de9247a052b +pip==23.3.2 \ + --hash=sha256:5052d7889c1f9d05224cd41741acb7c5d6fa735ab34e339624a614eaaa7e7d76 \ + --hash=sha256:7fd9972f96db22c8077a1ee2691b172c8089b17a5652a44494a9ecb0d78f9149 # via pip-tools setuptools==69.0.2 \ --hash=sha256:1e8fdff6797d3865f37397be788a4e3cba233608e9b509382a2777d25ebde7f2 \ --hash=sha256:735896e78a4742605974de002ac60562d286fa8051a7e2299445e8e8fbb01aa6 # via # anyconfig - # astroid # kubernetes # nodeenv # pip-tools diff --git a/sct.py b/sct.py index 70d5c0d3efd..036e000e41f 100755 --- a/sct.py +++ b/sct.py @@ -163,7 +163,7 @@ def getTestCaseNames(self, testCaseClass): test_cases = super().getTestCaseNames(testCaseClass) num_of_cases = len(test_cases) assert num_of_cases < 2, f"SCT expect only one test case to be selected, found {num_of_cases}:" \ - f"\n{pprint.pformat(test_cases)}" + f"\n{pprint.pformat(test_cases)}" # noqa: PLR2004 return test_cases @@ -231,7 +231,7 @@ def provision_resources(backend, test_name: str, config: str): @click.option('--dry-run', is_flag=True, default=False, help='dry run') @click.option('-b', '--backend', type=click.Choice(SCTConfiguration.available_backends), help="Backend to use") @click.pass_context -def clean_resources(ctx, post_behavior, user, test_id, logdir, dry_run, backend): # pylint: disable=too-many-arguments,too-many-branches +def clean_resources(ctx, post_behavior, user, test_id, logdir, dry_run, backend): # pylint: disable=too-many-arguments,too-many-branches # noqa: PLR0912, PLR0913 """Clean cloud resources. There are different options how to run clean up: @@ -309,7 +309,7 @@ def clean_resources(ctx, post_behavior, user, test_id, logdir, dry_run, backend) @sct_option('--test-id', 'test_id', help='test id to filter by') @click.option('--verbose', is_flag=True, default=False, help='if enable, will log progress') @click.pass_context -def list_resources(ctx, user, test_id, get_all, get_all_running, verbose): +def list_resources(ctx, user, test_id, get_all, get_all_running, verbose): # noqa: PLR0912, PLR0913, PLR0915 # pylint: disable=too-many-locals,too-many-arguments,too-many-branches,too-many-statements add_file_logger() @@ -795,7 +795,7 @@ def _run_yaml_test(backend, full_path, env): @click.option('-b', '--backend', type=click.Choice(SCTConfiguration.available_backends), default='aws') @click.option('-i', '--include', type=str, default='') @click.option('-e', '--exclude', type=str, default='') -def lint_yamls(backend, exclude: str, include: str): # pylint: disable=too-many-locals,too-many-branches +def lint_yamls(backend, exclude: str, include: str): # pylint: disable=too-many-locals,too-many-branches # noqa: PLR0912 if not include: raise ValueError('You did not provide include filters') @@ -1253,7 +1253,7 @@ def get_test_results_for_failed_test(test_status, start_time): @click.option('--runner-ip', type=str, required=False, help="Sct runner ip for the running test") @click.option('--email-recipients', help="Send email to next recipients") @click.option('--logdir', help='Directory where to find testrun folder') -def send_email(test_id=None, test_status=None, start_time=None, started_by=None, runner_ip=None, +def send_email(test_id=None, test_status=None, start_time=None, started_by=None, runner_ip=None, # noqa: PLR0912, PLR0913, PLR0915 email_recipients=None, logdir=None): if started_by is None: started_by = get_username() @@ -1392,7 +1392,7 @@ def create_nemesis_pipelines(base_job: str, backend: str): @click.argument('password', envvar='JENKINS_PASSWORD', type=str) @click.option('--sct_branch', default='master', type=str) @click.option('--sct_repo', default='git@github.com:scylladb/scylla-cluster-tests.git', type=str) -def create_test_release_jobs(branch, username, password, sct_branch, sct_repo): +def create_test_release_jobs(branch, username, password, sct_branch, sct_repo): # noqa: PLR0912 add_file_logger() base_job_dir = f'{branch}' @@ -1507,11 +1507,11 @@ def prepare_regions(cloud_provider, regions): for region in regions: if cloud_provider == "aws": - region = AwsRegion(region_name=region) + region = AwsRegion(region_name=region) # noqa: PLW2901 elif cloud_provider == "azure": - region = AzureRegion(region_name=region) + region = AzureRegion(region_name=region) # noqa: PLW2901 elif cloud_provider == "gce": - region = GceRegion(region_name=region) + region = GceRegion(region_name=region) # noqa: PLW2901 else: raise Exception(f'Unsupported Cloud provider: `{cloud_provider}') region.configure() @@ -1557,7 +1557,7 @@ def create_runner_image(cloud_provider, region, availability_zone): @click.option("-rt", "--restored-test-id", required=False, type=str, help="Test ID of the test that the runner is created for restore monitor") @click.option("-p", "--address-pool", required=False, type=str, help="ElasticIP pool to use") -def create_runner_instance(cloud_provider, region, availability_zone, instance_type, root_disk_size_gb, +def create_runner_instance(cloud_provider, region, availability_zone, instance_type, root_disk_size_gb, # noqa: PLR0913 test_id, test_name, duration, restore_monitor=False, restored_test_id="", address_pool=None): # pylint: disable=too-many-locals diff --git a/sct_ssh.py b/sct_ssh.py index 3fc2297efdf..9dc708421c8 100644 --- a/sct_ssh.py +++ b/sct_ssh.py @@ -285,7 +285,7 @@ def ssh(user, test_id, region, force_use_public_ip, node_name): help="Force usage of public address") @click.argument("node_name", required=False) @click.argument("command", required=True) -def ssh_cmd(user, test_id, region, force_use_public_ip, node_name, command): +def ssh_cmd(user, test_id, region, force_use_public_ip, node_name, command): # noqa: PLR0913 output = ssh_run_cmd(node_name, command, user, test_id, region, force_use_public_ip) if output.stderr: click.echo(click.style(output.stderr, fg='red')) @@ -296,7 +296,7 @@ def ssh_cmd(user, test_id, region, force_use_public_ip, node_name, command): return output -def ssh_run_cmd(node_name: str, command: str, user: str = None, +def ssh_run_cmd(node_name: str, command: str, user: str = None, # noqa: PLR0913 test_id: str = None, region: str = None, force_use_public_ip: bool = None) -> subprocess.CompletedProcess | None: assert user or test_id or (node_name and command) @@ -347,9 +347,9 @@ def tunnel(user, test_id, region, port, node_name): cmd = f'ssh -i ~/.ssh/scylla-qa-ec2 -N -L {local_port}:{target_ip}:{port} -o "UserKnownHostsFile=/dev/null" ' \ f'-o "StrictHostKeyChecking=no" -o ServerAliveInterval=10 {bastion_username}@{bastion_ip}' click.echo(cmd) - if port == 3000: + if port == 3000: # noqa: PLR2004 click.echo(click.style(f"connect to: http://127.0.0.1:{local_port}", fg='yellow')) - if port == 22: + if port == 22: # noqa: PLR2004 target_username = guess_username(connect_vm) click.echo(click.style( f"connect to:\nssh -i ~/.ssh/scylla-qa-ec2 -p {local_port} {target_username}@127.0.0.1", fg='yellow')) @@ -365,7 +365,7 @@ def tunnel(user, test_id, region, port, node_name): help="Force usage of public address") @click.argument("src") @click.argument("dest") -def copy_cmd(user, test_id, region, force_use_public_ip, src, dest): +def copy_cmd(user, test_id, region, force_use_public_ip, src, dest): # noqa: PLR0913 assert user or test_id connect_vm = select_instance(region=region, test_id=test_id, user=user) diff --git a/sdcm/audit.py b/sdcm/audit.py index f493cb9aee9..6f1c696aca3 100644 --- a/sdcm/audit.py +++ b/sdcm/audit.py @@ -120,7 +120,7 @@ def get_audit_log_rows(node, # pylint: disable=too-many-locals if '!NOTICE' in line[:120] and 'scylla-audit' in line[:120]: while line[-2] != '"': # read multiline audit log (must end with ") - line += log_file.readline() + line += log_file.readline() # noqa: PLW2901 audit_data = line.split(': "', maxsplit=1)[-1] try: node, cat, consistency, table, keyspace_name, opr, source, username, error = audit_data.split( diff --git a/sdcm/cdclog_reader_thread.py b/sdcm/cdclog_reader_thread.py index 1a309b8da66..e7b001a8b3f 100644 --- a/sdcm/cdclog_reader_thread.py +++ b/sdcm/cdclog_reader_thread.py @@ -134,7 +134,7 @@ def _parse_cdcreaderstressor_results(lines: List[str]) -> Dict: if not parse_enable: continue res = line.split(":") - if len(res) < 2: + if len(res) < 2: # noqa: PLR2004 continue name = res[0].strip() value = res[1].strip() diff --git a/sdcm/cluster.py b/sdcm/cluster.py index 9e2b9966182..288f541f922 100644 --- a/sdcm/cluster.py +++ b/sdcm/cluster.py @@ -243,7 +243,7 @@ class BaseNode(AutoSshContainerMixin, WebDriverContainerMixin): # pylint: disab SYSTEM_EVENTS_PATTERNS = SYSTEM_ERROR_EVENTS_PATTERNS + INSTANCE_STATUS_EVENTS_PATTERNS - def __init__(self, name, parent_cluster, ssh_login_info=None, base_logdir=None, node_prefix=None, dc_idx=0, rack=0): # pylint: disable=too-many-arguments,unused-argument + def __init__(self, name, parent_cluster, ssh_login_info=None, base_logdir=None, node_prefix=None, dc_idx=0, rack=0): # pylint: disable=too-many-arguments,unused-argument # noqa: PLR0913 self.name = name self.rack = rack self.parent_cluster = parent_cluster # reference to the Cluster object that the node belongs to @@ -889,7 +889,7 @@ def start_journal_thread(self): if self._journal_thread: self.log.debug("Use %s as logging daemon", type(self._journal_thread).__name__) self._journal_thread.start() - else: + else: # noqa: PLR5501 if logs_transport == 'rsyslog': self.log.debug("Use no logging daemon since log transport is rsyslog") elif logs_transport == 'syslog-ng': @@ -980,7 +980,7 @@ def uptime_changed(): # time is larger than 5 seconds. # # The added a time gap check will ignore short uptime change before real reboot. - return pre_uptime != post_uptime and (post_uptime - pre_uptime).seconds > 5 + return pre_uptime != post_uptime and (post_uptime - pre_uptime).seconds > 5 # noqa: PLR2004 except SSHException as ex: self.log.debug("Network isn't available, reboot might already start, %s" % ex) return False @@ -1272,7 +1272,7 @@ def is_manager_agent_up(self, port=None): port = port if port else self.MANAGER_AGENT_PORT # When the agent is IP, it should answer an https request of https://NODE_IP:10001/ping with status code 204 response = requests.get(f"https://{normalize_ipv6_url(self.external_address)}:{port}/ping", verify=False) - return response.status_code == 204 + return response.status_code == 204 # noqa: PLR2004 def wait_manager_agent_up(self, verbose=True, timeout=180): text = None @@ -1289,7 +1289,7 @@ def is_manager_server_up(self, port=None): f'''curl --write-out "%{{http_code}}\n" --silent --output /dev/null "http://127.0.0.1:{port}/ping"''', verbose=True, ignore_status=True) http_status_code = int(curl_output.stdout.strip()) - return http_status_code == 204 + return http_status_code == 204 # noqa: PLR2004 def wait_manager_server_up(self, verbose=True, timeout=300, port=None): text = None @@ -1603,7 +1603,7 @@ def config_setup(self, def fix_scylla_server_systemd_config(self): systemd_version = get_systemd_version(self.remoter.run("systemctl --version", ignore_status=True).stdout) - if systemd_version >= 240: + if systemd_version >= 240: # noqa: PLR2004 self.log.debug("systemd version %d >= 240: we can change FinalKillSignal", systemd_version) self.remoter.sudo(shell_script_cmd("""\ mkdir -p /etc/systemd/system/scylla-server.service.d @@ -1697,7 +1697,7 @@ def download_scylla_manager_repo(self, scylla_repo: str) -> None: self.remoter.sudo("apt-get update", ignore_status=True) @retrying(n=30, sleep_time=15, allowed_exceptions=(UnexpectedExit, Libssh2_UnexpectedExit,)) - def install_package(self, + def install_package(self, # noqa: PLR0913 package_name: str, package_version: str = None, wait_step: int = 30, @@ -1874,7 +1874,7 @@ def upgrade_system(self): # update repo cache after upgrade self.update_repo_cache() - def install_scylla(self, scylla_repo, scylla_version=None): + def install_scylla(self, scylla_repo, scylla_version=None): # noqa: PLR0915 """ Download and install scylla on node :param scylla_repo: scylla repo file URL @@ -1904,7 +1904,7 @@ def install_scylla(self, scylla_repo, scylla_version=None): self.install_package(package_name="apt-transport-https gnupg1-curl dirmngr openjdk-11-jre") self.install_package(self.scylla_pkg(), package_version=scylla_version) - def offline_install_scylla(self, unified_package, nonroot): + def offline_install_scylla(self, unified_package, nonroot): # noqa: PLR0912 """ Offline install scylla by unified package. """ @@ -2165,7 +2165,7 @@ def upgrade_mgmt(self, scylla_mgmt_address, start_manager_after_upgrade=True): time.sleep(5) # pylint: disable=too-many-branches - def install_mgmt(self, package_url: Optional[str] = None) -> None: + def install_mgmt(self, package_url: Optional[str] = None) -> None: # noqa: PLR0912 self.log.debug("Install scylla-manager") if self.is_docker(): @@ -2480,7 +2480,7 @@ def _gen_nodetool_cmd(self, sub_cmd, args, options): return f"{self.add_install_prefix('/usr/bin/nodetool')} {options} {sub_cmd} {args}" # pylint: disable=inconsistent-return-statements - def run_nodetool(self, sub_cmd, args="", options="", timeout=None, + def run_nodetool(self, sub_cmd, args="", options="", timeout=None, # noqa: PLR0913 ignore_status=False, verbose=True, coredump_on_timeout=False, warning_event_on_exception=None, error_message="", publish_event=True, retry=1): """ @@ -2599,7 +2599,7 @@ def get_nodes_status(self): for node_ip, node_properties in dc_status.items(): if node := node_ip_map.get(node_ip): nodes_status[node] = {'status': node_properties['state'], 'dc': dc} - else: + else: # noqa: PLR5501 if node_ip: LOGGER.error("Get nodes statuses. Failed to find a node in cluster by IP: %s", node_ip) @@ -2700,7 +2700,7 @@ def is_replacement_by_host_id_supported(self): return ComparableScyllaVersion(self.scylla_version) > '2022.3.0~dev' return ComparableScyllaVersion(self.scylla_version) > '5.2.0~dev' - def _gen_cqlsh_cmd(self, command, keyspace, timeout, host, port, connect_timeout): + def _gen_cqlsh_cmd(self, command, keyspace, timeout, host, port, connect_timeout): # noqa: PLR0913 """cqlsh [options] [host [port]]""" credentials = self.parent_cluster.get_db_auth() auth_params = "-u {} -p '{}'".format(*credentials) if credentials else '' @@ -2723,7 +2723,7 @@ def _gen_cqlsh_cmd(self, command, keyspace, timeout, host, port, connect_timeout return f'{cqlsh_cmd} {options} -e {command} --cloudconf {target_connection_bundle_file}' return f'{cqlsh_cmd} {options} -e {command} {host} {port}' - def run_cqlsh(self, cmd, keyspace=None, port=None, timeout=120, verbose=True, split=False, target_db_node=None, + def run_cqlsh(self, cmd, keyspace=None, port=None, timeout=120, verbose=True, split=False, target_db_node=None, # noqa: PLR0913 connect_timeout=60, num_retry_on_failure=1): """Runs CQL command using cqlsh utility""" cmd = self._gen_cqlsh_cmd(command=cmd, keyspace=keyspace, timeout=timeout, @@ -3032,23 +3032,23 @@ class FlakyRetryPolicy(RetryPolicy): """ def _retry_message(self, msg, retry_num): - if retry_num < 5: + if retry_num < 5: # noqa: PLR2004 LOGGER.debug("%s. Attempt #%d", msg, retry_num) return self.RETRY, None return self.RETHROW, None # pylint: disable=too-many-arguments - def on_read_timeout(self, query, consistency, required_responses, + def on_read_timeout(self, query, consistency, required_responses, # noqa: PLR0913 received_responses, data_retrieved, retry_num): return self._retry_message(msg="Retrying read after timeout", retry_num=retry_num) # pylint: disable=too-many-arguments - def on_write_timeout(self, query, consistency, write_type, + def on_write_timeout(self, query, consistency, write_type, # noqa: PLR0913 required_responses, received_responses, retry_num): return self._retry_message(msg="Retrying write after timeout", retry_num=retry_num) # pylint: disable=too-many-arguments - def on_unavailable(self, query, consistency, required_replicas, alive_replicas, retry_num): + def on_unavailable(self, query, consistency, required_replicas, alive_replicas, retry_num): # noqa: PLR0913 return self._retry_message(msg="Retrying request after UE", retry_num=retry_num) @@ -3076,7 +3076,7 @@ class BaseCluster: # pylint: disable=too-many-instance-attributes,too-many-publ """ # pylint: disable=too-many-arguments,too-many-locals,too-many-branches - def __init__(self, cluster_uuid=None, cluster_prefix='cluster', node_prefix='node', n_nodes=3, params=None, + def __init__(self, cluster_uuid=None, cluster_prefix='cluster', node_prefix='node', n_nodes=3, params=None, # noqa: PLR0913, PLR0912 region_names=None, node_type=None, extra_network_interface=False, add_nodes=True): self.extra_network_interface = extra_network_interface if params is None: @@ -3237,7 +3237,7 @@ def get_node_ips_param(self, public_ip=True): def wait_for_init(self): raise NotImplementedError("Derived class must implement 'wait_for_init' method!") - def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_bootstrap=False, instance_type=None): + def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_bootstrap=False, instance_type=None): # noqa: PLR0913 """ :param count: number of nodes to add :param ec2_user_data: @@ -3341,7 +3341,7 @@ def get_node_by_ip(self, node_ip, datacenter=None): return node return None - def _create_session(self, node, keyspace, user, password, compression, + def _create_session(self, node, keyspace, user, password, compression, # noqa: PLR0913 # pylint: disable=too-many-arguments, too-many-locals protocol_version, load_balancing_policy=None, port=None, ssl_opts=None, node_ips=None, connect_timeout=None, @@ -3388,7 +3388,7 @@ def _create_session(self, node, keyspace, user, password, compression, return ScyllaCQLSession(session, cluster_driver, verbose) - def cql_connection(self, node, keyspace=None, user=None, # pylint: disable=too-many-arguments + def cql_connection(self, node, keyspace=None, user=None, # pylint: disable=too-many-arguments # noqa: PLR0913 password=None, compression=True, protocol_version=None, port=None, ssl_opts=None, connect_timeout=100, verbose=True): if connection_bundle_file := node.parent_cluster.connection_bundle_file: @@ -3403,7 +3403,7 @@ def cql_connection(self, node, keyspace=None, user=None, # pylint: disable=too- connect_timeout=connect_timeout, verbose=verbose, connection_bundle_file=connection_bundle_file) - def cql_connection_exclusive(self, node, keyspace=None, user=None, # pylint: disable=too-many-arguments,too-many-locals + def cql_connection_exclusive(self, node, keyspace=None, user=None, # pylint: disable=too-many-arguments,too-many-locals # noqa: PLR0913 password=None, compression=True, protocol_version=None, port=None, ssl_opts=None, connect_timeout=100, verbose=True): @@ -3429,7 +3429,7 @@ def host_filter(host): connection_bundle_file=connection_bundle_file) @retrying(n=8, sleep_time=15, allowed_exceptions=(NoHostAvailable,)) - def cql_connection_patient(self, node, keyspace=None, + def cql_connection_patient(self, node, keyspace=None, # noqa: PLR0913 # pylint: disable=too-many-arguments,unused-argument user=None, password=None, compression=True, protocol_version=None, @@ -3444,7 +3444,7 @@ def cql_connection_patient(self, node, keyspace=None, return self.cql_connection(**kwargs) @retrying(n=8, sleep_time=15, allowed_exceptions=(NoHostAvailable,)) - def cql_connection_patient_exclusive(self, node, keyspace=None, + def cql_connection_patient_exclusive(self, node, keyspace=None, # noqa: PLR0913 # pylint: disable=invalid-name,too-many-arguments,unused-argument user=None, password=None, compression=True, @@ -3460,14 +3460,14 @@ def cql_connection_patient_exclusive(self, node, keyspace=None, del kwargs["self"] return self.cql_connection_exclusive(**kwargs) - def get_non_system_ks_cf_list(self, db_node, # pylint: disable=too-many-arguments + def get_non_system_ks_cf_list(self, db_node, # pylint: disable=too-many-arguments # noqa: PLR0913 filter_out_table_with_counter=False, filter_out_mv=False, filter_empty_tables=True, filter_by_keyspace: list = None) -> List[str]: return self.get_any_ks_cf_list(db_node, filter_out_table_with_counter=filter_out_table_with_counter, filter_out_mv=filter_out_mv, filter_empty_tables=filter_empty_tables, filter_out_system=True, filter_out_cdc_log_tables=True, filter_by_keyspace=filter_by_keyspace) - def get_any_ks_cf_list(self, db_node, # pylint: disable=too-many-arguments + def get_any_ks_cf_list(self, db_node, # pylint: disable=too-many-arguments # noqa: PLR0913 filter_out_table_with_counter=False, filter_out_mv=False, filter_empty_tables=True, filter_out_system=False, filter_out_cdc_log_tables=False, filter_by_keyspace: list = None) -> List[str]: @@ -3640,14 +3640,14 @@ class NodeSetupTimeout(Exception): pass -def wait_for_init_wrap(method): # pylint: disable=too-many-statements +def wait_for_init_wrap(method): # pylint: disable=too-many-statements # noqa: PLR0915 """ Wraps wait_for_init class method. Run setup of nodes simultaneously and wait for all the setups finished. Raise exception if setup failed or timeout expired. """ @wraps(method) - def wrapper(*args, **kwargs): # pylint: disable=too-many-statements,too-many-locals + def wrapper(*args, **kwargs): # pylint: disable=too-many-statements,too-many-locals # noqa: PLR0915 cl_inst = args[0] LOGGER.debug('Class instance: %s', cl_inst) LOGGER.debug('Method kwargs: %s', kwargs) @@ -3948,7 +3948,7 @@ def start_scylla(node, _queue): time_elapsed = time.time() - start_time self.log.debug('Update DB binary duration -> %s s', int(time_elapsed)) - def _update_db_packages(self, new_scylla_bin, node_list, start_service=True): + def _update_db_packages(self, new_scylla_bin, node_list, start_service=True): # noqa: PLR0915 self.log.debug('User requested to update DB packages...') def check_package_suites_distro(node, extension): @@ -4214,7 +4214,7 @@ def get_node_status_dictionary(self, ip_address=None, verification_node=None): break return node_status - def wait_for_nodes_up_and_normal(self, nodes=None, verification_node=None, iterations=60, sleep_time=3, timeout=0): # pylint: disable=too-many-arguments + def wait_for_nodes_up_and_normal(self, nodes=None, verification_node=None, iterations=60, sleep_time=3, timeout=0): # pylint: disable=too-many-arguments # noqa: PLR0913 @retrying(n=iterations, sleep_time=sleep_time, allowed_exceptions=NETWORK_EXCEPTIONS + (ClusterNodesNotReady,), message="Waiting for nodes to join the cluster", timeout=timeout) def _wait_for_nodes_up_and_normal(): @@ -4381,7 +4381,7 @@ def scylla_configure_non_root_installation(self, node, devname, verbose, timeout node.wait_db_up(verbose=verbose, timeout=timeout) node.wait_jmx_up(verbose=verbose, timeout=200) - def node_setup(self, node: BaseNode, verbose: bool = False, timeout: int = 3600): # pylint: disable=too-many-branches,too-many-statements,too-many-locals + def node_setup(self, node: BaseNode, verbose: bool = False, timeout: int = 3600): # pylint: disable=too-many-branches,too-many-statements,too-many-locals # noqa: PLR0912, PLR0915 node.wait_ssh_up(verbose=verbose, timeout=timeout) if node.distro.is_centos8 or node.distro.is_rhel8 or node.distro.is_oel8 or node.distro.is_rocky8 or node.distro.is_rocky9: node.remoter.sudo('systemctl stop iptables', ignore_status=True) @@ -4811,7 +4811,7 @@ def gemini_version(self): self.log.error("Error get gemini version: %s", details) return self._gemini_version - def node_setup(self, node, verbose=False, db_node_address=None, **kwargs): # pylint: disable=unused-argument + def node_setup(self, node, verbose=False, db_node_address=None, **kwargs): # pylint: disable=unused-argument # noqa: PLR0912, PLR0915 # pylint: disable=too-many-statements,too-many-branches self.log.info('Setup in BaseLoaderSet') @@ -4907,7 +4907,7 @@ def kill_stress_thread(self): if self.nodes and self.nodes[0].is_kubernetes(): for node in self.nodes: node.remoter.stop() - else: + else: # noqa: PLR5501 if self.params.get("use_prepared_loaders"): self.kill_cassandra_stress_thread() else: @@ -4959,7 +4959,7 @@ def _parse_cs_summary(lines): enable_parse = False for line in lines: - line = line.strip() + line = line.strip() # noqa: PLW2901 if not line: continue # Parse loader & cpu info @@ -5609,7 +5609,7 @@ def get_grafana_screenshot_and_snapshot(self, test_start_time=None): # pylint: class LocalNode(BaseNode): - def __init__(self, name, parent_cluster, # pylint: disable=too-many-arguments,unused-argument + def __init__(self, name, parent_cluster, # pylint: disable=too-many-arguments,unused-argument # noqa: PLR0913 ssh_login_info=None, base_logdir=None, node_prefix=None, dc_idx=0, rack=0): super().__init__(name=name, parent_cluster=parent_cluster, ssh_login_info=ssh_login_info, diff --git a/sdcm/cluster_aws.py b/sdcm/cluster_aws.py index ccb67e4b7b5..04ebb5ae0c4 100644 --- a/sdcm/cluster_aws.py +++ b/sdcm/cluster_aws.py @@ -75,7 +75,7 @@ class AWSCluster(cluster.BaseCluster): # pylint: disable=too-many-instance-attr Cluster of Node objects, started on Amazon EC2. """ - def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments + def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments # noqa: PLR0913 services, credentials, cluster_uuid=None, ec2_instance_type='c5.xlarge', ec2_ami_username='root', ec2_user_data='', ec2_block_device_mappings=None, @@ -128,7 +128,7 @@ def instance_profile_name(self) -> str | None: return self.params.get('aws_instance_profile_name_loader') return None - def _create_on_demand_instances(self, count, interfaces, ec2_user_data, dc_idx=0, instance_type=None): # pylint: disable=too-many-arguments + def _create_on_demand_instances(self, count, interfaces, ec2_user_data, dc_idx=0, instance_type=None): # pylint: disable=too-many-arguments # noqa: PLR0913 ami_id = self._ec2_ami_id[dc_idx] self.log.debug(f"Creating {count} on-demand instances using AMI id '{ami_id}'... ") params = dict(ImageId=ami_id, @@ -158,7 +158,7 @@ def _create_on_demand_instances(self, count, interfaces, ec2_user_data, dc_idx=0 self.log.debug("Created instances: %s." % instances) return instances - def _create_spot_instances(self, count, interfaces, ec2_user_data='', dc_idx=0, instance_type=None): # pylint: disable=too-many-arguments + def _create_spot_instances(self, count, interfaces, ec2_user_data='', dc_idx=0, instance_type=None): # pylint: disable=too-many-arguments # noqa: PLR0913 # pylint: disable=too-many-locals ec2 = ec2_client.EC2ClientWrapper(region_name=self.region_names[dc_idx], spot_max_price_percentage=self.params.get('spot_max_price')) @@ -196,7 +196,7 @@ def _create_spot_instances(self, count, interfaces, ec2_user_data='', dc_idx=0, return instances - def _create_instances(self, count, ec2_user_data='', dc_idx=0, az_idx=0, instance_type=None): # pylint: disable=too-many-arguments + def _create_instances(self, count, ec2_user_data='', dc_idx=0, az_idx=0, instance_type=None): # pylint: disable=too-many-arguments # noqa: PLR0913 if not count: # EC2 API fails if we request zero instances. return [] @@ -231,7 +231,7 @@ def _create_instances(self, count, ec2_user_data='', dc_idx=0, az_idx=0, instanc return instances - def fallback_provision_type(self, count, interfaces, ec2_user_data, dc_idx, instance_type=None): # pylint: disable=too-many-arguments + def fallback_provision_type(self, count, interfaces, ec2_user_data, dc_idx, instance_type=None): # pylint: disable=too-many-arguments # noqa: PLR0913 instances = None if self.instance_provision.lower() == 'spot' or (self.instance_provision == INSTANCE_PROVISION_SPOT_FLEET and count == 1): @@ -273,13 +273,13 @@ def check_spot_error(self, cl_ex, instance_provision): return True return False - def _create_mixed_instances(self, count, interfaces, ec2_user_data, dc_idx, instance_type=None): # pylint: disable=too-many-arguments + def _create_mixed_instances(self, count, interfaces, ec2_user_data, dc_idx, instance_type=None): # pylint: disable=too-many-arguments # noqa: PLR0913 instances = [] max_num_on_demand = 2 if isinstance(self, (ScyllaAWSCluster, CassandraAWSCluster)): - if count > 2: + if count > 2: # noqa: PLR2004 count_on_demand = max_num_on_demand - elif count == 2: + elif count == 2: # noqa: PLR2004 count_on_demand = 1 else: count_on_demand = 0 @@ -347,14 +347,14 @@ def update_bootstrap(ec2_user_data, enable_auto_bootstrap): ec2_user_data.replace('--bootstrap false', '--bootstrap true') else: ec2_user_data += ' --bootstrap true ' - else: + else: # noqa: PLR5501 if '--bootstrap ' in ec2_user_data: ec2_user_data.replace('--bootstrap true', '--bootstrap false') else: ec2_user_data += ' --bootstrap false ' return ec2_user_data - def _create_or_find_instances(self, count, ec2_user_data, dc_idx, az_idx=0, instance_type=None): # pylint: disable=too-many-arguments + def _create_or_find_instances(self, count, ec2_user_data, dc_idx, az_idx=0, instance_type=None): # pylint: disable=too-many-arguments # noqa: PLR0913 nodes = [node for node in self.nodes if node.dc_idx == dc_idx and node.rack == az_idx] if nodes: return self._create_instances(count, ec2_user_data, dc_idx, az_idx, instance_type=instance_type) @@ -370,7 +370,7 @@ def _create_or_find_instances(self, count, ec2_user_data, dc_idx, az_idx=0, inst return self._create_instances(count, ec2_user_data, dc_idx, az_idx, instance_type=instance_type) # pylint: disable=too-many-arguments - def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_bootstrap=False, instance_type=None): + def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_bootstrap=False, instance_type=None): # noqa: PLR0913 if not count: return [] ec2_user_data = self.prepare_user_data(enable_auto_bootstrap=enable_auto_bootstrap) @@ -393,7 +393,7 @@ def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_boots self.write_node_private_ip_file() return self.nodes[-count:] - def _create_node(self, instance, ami_username, node_prefix, node_index, # pylint: disable=too-many-arguments + def _create_node(self, instance, ami_username, node_prefix, node_index, # pylint: disable=too-many-arguments # noqa: PLR0913 base_logdir, dc_idx, rack): node = AWSNode(ec2_instance=instance, ec2_service=self._ec2_services[dc_idx], credentials=self._credentials[dc_idx], parent_cluster=self, ami_username=ami_username, @@ -410,7 +410,7 @@ class AWSNode(cluster.BaseNode): log = LOGGER - def __init__(self, ec2_instance, ec2_service, credentials, parent_cluster, # pylint: disable=too-many-arguments + def __init__(self, ec2_instance, ec2_service, credentials, parent_cluster, # pylint: disable=too-many-arguments # noqa: PLR0913 node_prefix='node', node_index=1, ami_username='root', base_logdir=None, dc_idx=0, rack=0): self.node_index = node_index @@ -434,7 +434,7 @@ def init(self): if not self.test_config.REUSE_CLUSTER: resources_to_tag = [self._instance.id, ] - if len(self._instance.network_interfaces) == 2: + if len(self._instance.network_interfaces) == 2: # noqa: PLR2004 # first we need to configure the both networks so we'll have public ip self.allocate_and_attach_elastic_ip(self.parent_cluster, self.dc_idx) resources_to_tag.append(self.eip_allocation_id) @@ -772,7 +772,7 @@ def ena_support(self) -> bool: class ScyllaAWSCluster(cluster.BaseScyllaCluster, AWSCluster): - def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments + def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments # noqa: PLR0913 services, credentials, ec2_instance_type='c5.xlarge', ec2_ami_username='centos', ec2_block_device_mappings=None, @@ -806,7 +806,7 @@ def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: self.version = '2.1' # pylint: disable=too-many-arguments - def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_bootstrap=False, instance_type=None): + def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_bootstrap=False, instance_type=None): # noqa: PLR0913 if not ec2_user_data: if self._ec2_user_data and isinstance(self._ec2_user_data, str): ec2_user_data = re.sub(r'(--totalnodes\s)(\d*)(\s)', @@ -854,7 +854,7 @@ def destroy(self): class CassandraAWSCluster(ScyllaAWSCluster): - def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments + def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments # noqa: PLR0913 services, credentials, ec2_instance_type='c5.xlarge', ec2_ami_username='ubuntu', ec2_block_device_mappings=None, @@ -906,7 +906,7 @@ def get_seed_nodes(self): raise ValueError('Unexpected cassandra.yaml. Contents:\n%s' % yaml_stream.read()) from exc # pylint: disable=too-many-arguments - def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_bootstrap=False, instance_type=None): + def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_bootstrap=False, instance_type=None): # noqa: PLR0913 if not ec2_user_data: if self.nodes: seeds = ",".join(self.get_seed_nodes()) @@ -946,7 +946,7 @@ def wait_for_init(self, node_list=None, verbose=False, timeout=None, check_node_ class LoaderSetAWS(cluster.BaseLoaderSet, AWSCluster): - def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments + def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments # noqa: PLR0913 services, credentials, ec2_instance_type='c5.xlarge', ec2_block_device_mappings=None, ec2_ami_username='centos', @@ -979,7 +979,7 @@ def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: class MonitorSetAWS(cluster.BaseMonitorSet, AWSCluster): - def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments + def __init__(self, ec2_ami_id, ec2_subnet_id, ec2_security_group_ids, # pylint: disable=too-many-arguments # noqa: PLR0913 services, credentials, ec2_instance_type='c5.xlarge', ec2_block_device_mappings=None, ec2_ami_username='centos', diff --git a/sdcm/cluster_azure.py b/sdcm/cluster_azure.py index 33aae3ad29b..0a2fdb66eae 100644 --- a/sdcm/cluster_azure.py +++ b/sdcm/cluster_azure.py @@ -39,7 +39,7 @@ class AzureNode(cluster.BaseNode): log = LOGGER - def __init__(self, azure_instance: VmInstance, # pylint: disable=too-many-arguments + def __init__(self, azure_instance: VmInstance, # pylint: disable=too-many-arguments # noqa: PLR0913 credentials, parent_cluster, node_prefix='node', node_index=1, base_logdir=None, dc_idx=0): @@ -162,7 +162,7 @@ def configure_remote_logging(self) -> None: class AzureCluster(cluster.BaseCluster): # pylint: disable=too-many-instance-attributes - def __init__(self, image_id, root_disk_size, # pylint: disable=too-many-arguments, too-many-locals + def __init__(self, image_id, root_disk_size, # pylint: disable=too-many-arguments, too-many-locals # noqa: PLR0913 provisioners: List[AzureProvisioner], credentials, cluster_uuid=None, instance_type='Standard_L8s_v3', region_names=None, user_name='root', cluster_prefix='cluster', @@ -185,7 +185,7 @@ def __init__(self, image_id, root_disk_size, # pylint: disable=too-many-argumen node_type=node_type) self.log.debug("AzureCluster constructor") - def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_bootstrap=False, instance_type=None): # pylint: disable=too-many-arguments + def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_bootstrap=False, instance_type=None): # pylint: disable=too-many-arguments # noqa: PLR0913 self.log.info("Adding nodes to cluster") nodes = [] @@ -245,7 +245,7 @@ def wait_for_init(self): class ScyllaAzureCluster(cluster.BaseScyllaCluster, AzureCluster): - def __init__(self, image_id, root_disk_size, # pylint: disable=too-many-arguments + def __init__(self, image_id, root_disk_size, # pylint: disable=too-many-arguments # noqa: PLR0913 provisioners: List[AzureProvisioner], credentials, instance_type='Standard_L8s_v3', user_name='ubuntu', @@ -276,7 +276,7 @@ def _wait_for_preinstalled_scylla(node): class LoaderSetAzure(cluster.BaseLoaderSet, AzureCluster): - def __init__(self, image_id, root_disk_size, provisioners, credentials, # pylint: disable=too-many-arguments + def __init__(self, image_id, root_disk_size, provisioners, credentials, # pylint: disable=too-many-arguments # noqa: PLR0913 instance_type='Standard_D2_v4', user_name='centos', user_prefix=None, n_nodes=1, params=None, region_names=None): @@ -302,7 +302,7 @@ def __init__(self, image_id, root_disk_size, provisioners, credentials, # pylin class MonitorSetAzure(cluster.BaseMonitorSet, AzureCluster): - def __init__(self, image_id, root_disk_size, provisioners, credentials, # pylint: disable=too-many-arguments + def __init__(self, image_id, root_disk_size, provisioners, credentials, # pylint: disable=too-many-arguments # noqa: PLR0913 instance_type='Standard_D2_v4', user_name='centos', user_prefix=None, n_nodes=1, targets=None, params=None, region_names=None): diff --git a/sdcm/cluster_baremetal.py b/sdcm/cluster_baremetal.py index 7f7e758c6f6..1a4d15dac6a 100644 --- a/sdcm/cluster_baremetal.py +++ b/sdcm/cluster_baremetal.py @@ -36,8 +36,8 @@ class PhysicalMachineNode(cluster.BaseNode): log = LOGGER # pylint: disable=too-many-arguments - def __init__(self, name, parent_cluster: 'PhysicalMachineCluster', - public_ip, private_ip, credentials, base_logdir=None, node_prefix=None): + def __init__(self, name, parent_cluster: 'PhysicalMachineCluster', # noqa: PLR0913 + public_ip, private_ip, credentials, base_logdir=None, node_prefix=None): # noqa: PLR0913 ssh_login_info = {'hostname': None, 'user': getattr(parent_cluster, "ssh_username", credentials.name), 'key_file': credentials.key_file} @@ -122,7 +122,7 @@ def _create_node(self, name, public_ip, private_ip): return node # pylint: disable=unused-argument,too-many-arguments - def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_bootstrap=False, instance_type=None): + def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_bootstrap=False, instance_type=None): # noqa: PLR0913 assert instance_type is None, "baremetal can provision diffrent types" for node_index in range(count): node_name = '%s-%s' % (self.node_prefix, node_index) diff --git a/sdcm/cluster_docker.py b/sdcm/cluster_docker.py index 4085f85cd94..d16a65bd12a 100644 --- a/sdcm/cluster_docker.py +++ b/sdcm/cluster_docker.py @@ -61,7 +61,7 @@ def node_container_run_args(self, seed_ip): class DockerNode(cluster.BaseNode, NodeContainerMixin): # pylint: disable=abstract-method - def __init__(self, # pylint: disable=too-many-arguments + def __init__(self, # pylint: disable=too-many-arguments # noqa: PLR0913 parent_cluster: "DockerCluster", container: Optional[Container] = None, node_prefix: str = "node", @@ -190,7 +190,7 @@ def region(self): class DockerCluster(cluster.BaseCluster): # pylint: disable=abstract-method node_container_user = "scylla-test" - def __init__(self, + def __init__(self, # noqa: PLR0913 docker_image: str = DEFAULT_SCYLLA_DB_IMAGE, docker_image_tag: str = DEFAULT_SCYLLA_DB_IMAGE_TAG, node_key_file: Optional[str] = None, @@ -259,13 +259,13 @@ def _get_nodes(self): self.nodes.append(node) return self.nodes - def add_nodes(self, count, ec2_user_data="", dc_idx=0, rack=0, enable_auto_bootstrap=False, instance_type=None): + def add_nodes(self, count, ec2_user_data="", dc_idx=0, rack=0, enable_auto_bootstrap=False, instance_type=None): # noqa: PLR0913 assert instance_type is None, "docker can't provision different instance types" return self._get_nodes() if self.test_config.REUSE_CLUSTER else self._create_nodes(count, enable_auto_bootstrap) class ScyllaDockerCluster(cluster.BaseScyllaCluster, DockerCluster): # pylint: disable=abstract-method - def __init__(self, + def __init__(self, # noqa: PLR0913 docker_image: str = DEFAULT_SCYLLA_DB_IMAGE, docker_image_tag: str = DEFAULT_SCYLLA_DB_IMAGE_TAG, node_key_file: Optional[str] = None, @@ -331,7 +331,7 @@ def get_scylla_args(self): class LoaderSetDocker(cluster.BaseLoaderSet, DockerCluster): - def __init__(self, + def __init__(self, # noqa: PLR0913 docker_image: str = DEFAULT_SCYLLA_DB_IMAGE, docker_image_tag: str = DEFAULT_SCYLLA_DB_IMAGE_TAG, node_key_file: Optional[str] = None, @@ -367,7 +367,7 @@ def node_setup(self, node: DockerNode, verbose=False, db_node_address=None, **kw class DockerMonitoringNode(cluster.BaseNode): # pylint: disable=abstract-method,too-many-instance-attributes log = LOGGER - def __init__(self, + def __init__(self, # noqa: PLR0913 parent_cluster: "MonitorSetDocker", node_prefix: str = "monitor-node", base_logdir: Optional[str] = None, @@ -449,7 +449,7 @@ def _create_node(self, node_index, container=None): node.init() return node - def add_nodes(self, count, ec2_user_data="", dc_idx=0, rack=0, enable_auto_bootstrap=False, instance_type=None): + def add_nodes(self, count, ec2_user_data="", dc_idx=0, rack=0, enable_auto_bootstrap=False, instance_type=None): # noqa: PLR0913 assert instance_type is None, "docker can provision different instance types" return self._create_nodes(count, enable_auto_bootstrap) diff --git a/sdcm/cluster_gce.py b/sdcm/cluster_gce.py index 4eaf92eeee0..31897968ac7 100644 --- a/sdcm/cluster_gce.py +++ b/sdcm/cluster_gce.py @@ -65,7 +65,7 @@ class GCENode(cluster.BaseNode): log = LOGGER - def __init__(self, gce_instance: compute_v1.Instance, # pylint: disable=too-many-arguments + def __init__(self, gce_instance: compute_v1.Instance, # pylint: disable=too-many-arguments # noqa: PLR0913 gce_service: compute_v1.InstancesClient, credentials, parent_cluster, @@ -233,7 +233,7 @@ class GCECluster(cluster.BaseCluster): # pylint: disable=too-many-instance-attr """ _gce_service: compute_v1.InstancesClient - def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments + def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments # noqa: PLR0913 cluster_uuid=None, gce_instance_type='n1-standard-1', gce_region_names=None, gce_n_local_ssd=1, gce_image_username='root', cluster_prefix='cluster', node_prefix='node', n_nodes=3, add_disks=None, params=None, node_type=None, @@ -321,14 +321,14 @@ def _get_disks_struct(self, name, dc_idx): gce_disk_struct.append(self._get_local_ssd_disk_struct(name=name, index=i, dc_idx=dc_idx)) if self._add_disks: for disk_type, disk_size in self._add_disks.items(): - disk_size = int(disk_size) + disk_size = int(disk_size) # noqa: PLW2901 if disk_size: gce_disk_struct.append(self._get_persistent_disk_struct(name=name, disk_size=disk_size, disk_type=disk_type, dc_idx=dc_idx)) self.log.debug(gce_disk_struct) return gce_disk_struct - def _create_instance(self, node_index, dc_idx, spot=False, enable_auto_bootstrap=False, instance_type=None): # pylint: disable=too-many-arguments + def _create_instance(self, node_index, dc_idx, spot=False, enable_auto_bootstrap=False, instance_type=None): # pylint: disable=too-many-arguments # noqa: PLR0913 # pylint: disable=too-many-locals def set_tags_as_labels(_instance: compute_v1.Instance): @@ -352,7 +352,7 @@ def to_short_name(value): gce_disk_struct = self._get_disks_struct(name=name, dc_idx=dc_idx) # Name must start with a lowercase letter followed by up to 63 # lowercase letters, numbers, or hyphens, and cannot end with a hyphen - assert len(name) <= 63, "Max length of instance name is 63" + assert len(name) <= 63, "Max length of instance name is 63" # noqa: PLR2004 startup_script = "" if self.params.get("scylla_linux_distro") in ("ubuntu-bionic", "ubuntu-xenial", "ubuntu-focal",): @@ -493,7 +493,7 @@ def _create_node(self, instance, node_index, dc_idx): raise CreateGCENodeError('Failed to create node: %s' % ex) from ex # pylint: disable=too-many-arguments - def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_bootstrap=False, instance_type=None): + def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_bootstrap=False, instance_type=None): # noqa: PLR0913 if count <= 0: return [] self.log.info("Adding nodes to cluster") @@ -522,7 +522,7 @@ def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_boots class ScyllaGCECluster(cluster.BaseScyllaCluster, GCECluster): - def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments + def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments # noqa: PLR0913 gce_instance_type='n1-standard-1', gce_n_local_ssd=1, gce_image_username='centos', user_prefix=None, n_nodes=3, add_disks=None, params=None, gce_datacenter=None, service_accounts=None): @@ -558,7 +558,7 @@ def _wait_for_preinstalled_scylla(node): class LoaderSetGCE(cluster.BaseLoaderSet, GCECluster): - def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments + def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments # noqa: PLR0913 gce_instance_type='n1-standard-1', gce_n_local_ssd=1, gce_image_username='centos', user_prefix=None, n_nodes=10, add_disks=None, params=None, gce_datacenter=None): @@ -589,7 +589,7 @@ def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_s class MonitorSetGCE(cluster.BaseMonitorSet, GCECluster): - def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments + def __init__(self, gce_image, gce_image_type, gce_image_size, gce_network, gce_service, credentials, # pylint: disable=too-many-arguments # noqa: PLR0913 gce_instance_type='n1-standard-1', gce_n_local_ssd=1, gce_image_username='centos', user_prefix=None, n_nodes=1, targets=None, add_disks=None, params=None, gce_datacenter=None, diff --git a/sdcm/cluster_k8s/__init__.py b/sdcm/cluster_k8s/__init__.py index 0aaf389e0ea..65a55f931ec 100644 --- a/sdcm/cluster_k8s/__init__.py +++ b/sdcm/cluster_k8s/__init__.py @@ -165,7 +165,7 @@ class CloudK8sNodePool(metaclass=abc.ABCMeta): # pylint: disable=too-many-instance-attributes - def __init__( + def __init__( # noqa: PLR0913 self, k8s_cluster: 'KubernetesCluster', name: str, @@ -366,12 +366,12 @@ def short_cluster_name(self): kubectl_cmd = partialmethod(KubernetesOps.kubectl_cmd) apply_file = partialmethod(KubernetesOps.apply_file) - def kubectl_no_wait(self, *command, namespace=None, timeout=KUBECTL_TIMEOUT, remoter=None, ignore_status=False, + def kubectl_no_wait(self, *command, namespace=None, timeout=KUBECTL_TIMEOUT, remoter=None, ignore_status=False, # noqa: PLR0913 verbose=True): return KubernetesOps.kubectl(self, *command, namespace=namespace, timeout=timeout, remoter=remoter, ignore_status=ignore_status, verbose=verbose) - def kubectl(self, *command, namespace=None, timeout=KUBECTL_TIMEOUT, remoter=None, ignore_status=False, + def kubectl(self, *command, namespace=None, timeout=KUBECTL_TIMEOUT, remoter=None, ignore_status=False, # noqa: PLR0913 verbose=True): if self.api_call_rate_limiter: self.api_call_rate_limiter.wait() @@ -405,7 +405,7 @@ def wait_body(): return result return wait_body() - def kubectl_multi_cmd(self, *command, namespace=None, timeout=KUBECTL_TIMEOUT, remoter=None, ignore_status=False, + def kubectl_multi_cmd(self, *command, namespace=None, timeout=KUBECTL_TIMEOUT, remoter=None, ignore_status=False, # noqa: PLR0913 verbose=True): if self.api_call_rate_limiter: self.api_call_rate_limiter.wait() @@ -907,7 +907,7 @@ def get_scylla_cluster_helm_values(self, cpu_limit, memory_limit, pool_name: str {"key": "app.kubernetes.io/name", "operator": "In", "values": ["scylla"]}, ]} } - if self.tenants_number < 2: + if self.tenants_number < 2: # noqa: PLR2004 placement["podAntiAffinity"] = {"preferredDuringSchedulingIgnoredDuringExecution": [{ "weight": 100, "podAffinityTerm": pod_affinity_term, @@ -1572,7 +1572,7 @@ def update_secret_from_data(self, secret_name: str, namespace: str, data: dict, existing.data[key] = base64.b64encode(json.dumps(value).encode('utf-8')).decode('utf-8') self.k8s_core_v1_api.patch_namespaced_secret(secret_name, namespace, existing) - def create_secret_from_directory(self, secret_name: str, path: str, namespace: str, secret_type: str = 'generic', + def create_secret_from_directory(self, secret_name: str, path: str, namespace: str, secret_type: str = 'generic', # noqa: PLR0913 only_files: List[str] = None): files = [fname for fname in os.listdir(path) if os.path.isfile(os.path.join(path, fname)) and (not only_files or fname in only_files)] @@ -1638,7 +1638,7 @@ def upgrade_kubernetes_platform(self, pod_objects: list[cluster.BaseNode], use_additional_scylla_nodepool: bool) -> (str, CloudK8sNodePool): raise NotImplementedError("Kubernetes upgrade is not implemented on this backend") - def move_pods_to_new_node_pool(self, pod_objects: list[cluster.BaseNode], + def move_pods_to_new_node_pool(self, pod_objects: list[cluster.BaseNode], # noqa: PLR0913 node_pool_name: str, cluster_name: str = "", cluster_namespace: str = SCYLLA_NAMESPACE, pod_readiness_timeout_minutes: int = 20): @@ -1770,7 +1770,7 @@ class BasePodContainer(cluster.BaseNode): # pylint: disable=too-many-public-met pod_readiness_timeout = 10 # minutes pod_terminate_timeout = 5 # minutes - def __init__(self, name: str, parent_cluster: PodCluster, node_prefix: str = "node", node_index: int = 1, + def __init__(self, name: str, parent_cluster: PodCluster, node_prefix: str = "node", node_index: int = 1, # noqa: PLR0913 base_logdir: Optional[str] = None, dc_idx: int = 0, rack=0): self.node_index = node_index cluster.BaseNode.__init__( @@ -2236,7 +2236,7 @@ class LoaderPodContainer(BasePodContainer): def node_type(self) -> str: return 'loader' - def __init__(self, name: str, parent_cluster: PodCluster, + def __init__(self, name: str, parent_cluster: PodCluster, # noqa: PLR0913 node_prefix: str = "node", node_index: int = 1, base_logdir: Optional[str] = None, dc_idx: int = 0, rack=0): self.loader_cluster_name = parent_cluster.loader_cluster_name @@ -2292,7 +2292,7 @@ def restart(self): class PodCluster(cluster.BaseCluster): PodContainerClass: Type[BasePodContainer] = BasePodContainer - def __init__(self, + def __init__(self, # noqa: PLR0913 k8s_clusters: List[KubernetesCluster], namespace: str = "default", container: Optional[str] = None, @@ -2339,7 +2339,7 @@ def _create_node(self, node_index: int, pod_name: str, dc_idx: int, rack: int) - node.init() return node - def add_nodes(self, + def add_nodes(self, # noqa: PLR0913 count: int, ec2_user_data: str = "", dc_idx: int = 0, @@ -2451,7 +2451,7 @@ def generate_namespace(self, namespace_template: str) -> str: return candidate_namespace # TODO: make it work correctly for case with reusage of multi-tenant cluster k8s_cluster = self.k8s_clusters[0] - if k8s_cluster.params.get('reuse_cluster') and k8s_cluster.tenants_number < 2: + if k8s_cluster.params.get('reuse_cluster') and k8s_cluster.tenants_number < 2: # noqa: PLR2004 return namespace_template raise RuntimeError("No available namespace was found") @@ -2459,7 +2459,7 @@ def generate_namespace(self, namespace_template: str) -> str: class ScyllaPodCluster(cluster.BaseScyllaCluster, PodCluster): # pylint: disable=too-many-public-methods node_setup_requires_scylla_restart = False - def __init__(self, + def __init__(self, # noqa: PLR0913 k8s_clusters: List[KubernetesCluster], scylla_cluster_name: Optional[str] = None, user_prefix: Optional[str] = None, @@ -2526,7 +2526,7 @@ def get_scylla_args(self) -> str: def pod_selector(self): return 'app=scylla' - def wait_for_nodes_up_and_normal(self, nodes=None, verification_node=None, iterations=None, + def wait_for_nodes_up_and_normal(self, nodes=None, verification_node=None, iterations=None, # noqa: PLR0913 sleep_time=None, timeout=None): # pylint: disable=too-many-arguments dc_node_mapping, nodes = {}, (nodes or self.nodes) for node in nodes: @@ -2750,7 +2750,7 @@ def _get_rack_nodes(self, rack: int, dc_idx: int) -> list: return sorted( [node for node in self.nodes if node.rack == rack and node.dc_idx == dc_idx], key=lambda n: n.name) - def add_nodes(self, # pylint: disable=too-many-locals,too-many-branches + def add_nodes(self, # pylint: disable=too-many-locals,too-many-branches noqa: PLR0913 # noqa: PLR0913, PLR0912 count: int, ec2_user_data: str = "", # NOTE: 'dc_idx=None' means 'create %count% nodes on each K8S cluster' @@ -2773,7 +2773,7 @@ def add_nodes(self, # pylint: disable=too-many-locals,too-many-branches new_nodes = [] self.log.debug( "'%s' configuration was taken for the 'dc_idx': %s", - "Single-DC" if len(dc_idx) < 2 else "Multi-DC", dc_idx) + "Single-DC" if len(dc_idx) < 2 else "Multi-DC", dc_idx) # noqa: PLR2004 for current_dc_idx in dc_idx: node_count_in_dc = count[current_dc_idx] if current_dc_idx < len(count) else count[0] self.log.debug( @@ -3010,7 +3010,7 @@ def pod_selector(self): class LoaderPodCluster(cluster.BaseLoaderSet, PodCluster): - def __init__(self, + def __init__(self, # noqa: PLR0913 k8s_clusters: List[KubernetesCluster], loader_cluster_name: Optional[str] = None, user_prefix: Optional[str] = None, @@ -3068,7 +3068,7 @@ def _get_docker_image(self): scylla_version = self.params.get('scylla_version') return f"{docker_image}:{scylla_version}" - def add_nodes(self, + def add_nodes(self, # noqa: PLR0913 count: int, ec2_user_data: str = "", # NOTE: 'dc_idx=None' means 'create %count% nodes on each K8S cluster' @@ -3151,7 +3151,7 @@ def get_tags_from_params(params: dict) -> Dict[str, str]: # - if TestRun passes then SCT will cleanup resources by itself if needed # - if TestRun fails then cluster will be kept as expected tags["keep"] = "alive" - elif int(params.get("test_duration")) > 660: + elif int(params.get("test_duration")) > 660: # noqa: PLR2004 if params.get("cluster_backend") == "k8s-eks": # NOTE: set keep:X where X is hours equal to 'duration + 1' tags["keep"] = str(params.get("test_duration") // 60 + 1) diff --git a/sdcm/cluster_k8s/eks.py b/sdcm/cluster_k8s/eks.py index 5fbdd1aa307..a5174f19639 100644 --- a/sdcm/cluster_k8s/eks.py +++ b/sdcm/cluster_k8s/eks.py @@ -157,7 +157,7 @@ class EksNodePool(CloudK8sNodePool): disk_type: Literal["standard", "io1", "io2", "gp2", "sc1", "st1"] # pylint: disable=too-many-arguments,too-many-locals - def __init__( + def __init__( # noqa: PLR0913 self, k8s_cluster: 'EksCluster', name: str, @@ -329,7 +329,7 @@ class EksCluster(KubernetesCluster, EksClusterCleanupMixin): # pylint: disable= short_cluster_name: str # pylint: disable=too-many-arguments - def __init__(self, + def __init__(self, # noqa: PLR0913 eks_cluster_version, ec2_security_group_ids, ec2_subnet_ids, @@ -754,7 +754,7 @@ class EksScyllaPodCluster(ScyllaPodCluster): nodes: List[EksScyllaPodContainer] # pylint: disable=too-many-arguments - def add_nodes(self, + def add_nodes(self, # noqa: PLR0913 count: int, ec2_user_data: str = "", dc_idx: int = None, @@ -809,7 +809,7 @@ def sort_by_index(item): instances = sorted(instances, key=sort_by_index) return [ec2.get_instance(instance['InstanceId']) for instance in instances] - def _create_instances(self, count, ec2_user_data='', dc_idx=0, az_idx=0, instance_type=None): # pylint: disable=too-many-arguments + def _create_instances(self, count, ec2_user_data='', dc_idx=0, az_idx=0, instance_type=None): # pylint: disable=too-many-arguments # noqa: PLR0913 instances = super()._create_instances(count=count, ec2_user_data=ec2_user_data, dc_idx=dc_idx, az_idx=az_idx, instance_type=instance_type) for instance in instances: diff --git a/sdcm/cluster_k8s/gke.py b/sdcm/cluster_k8s/gke.py index 931faf6b249..fc188db6cd6 100644 --- a/sdcm/cluster_k8s/gke.py +++ b/sdcm/cluster_k8s/gke.py @@ -142,7 +142,7 @@ class GkeNodePool(CloudK8sNodePool): k8s_cluster: 'GkeCluster' # pylint: disable=too-many-arguments - def __init__( + def __init__( # noqa: PLR0913 self, k8s_cluster: 'KubernetesCluster', name: str, @@ -267,7 +267,7 @@ class GkeCluster(KubernetesCluster): pools: Dict[str, GkeNodePool] # pylint: disable=too-many-arguments,too-many-locals - def __init__(self, + def __init__(self, # noqa: PLR0913 gke_cluster_version, gke_k8s_release_channel, gce_disk_size, @@ -295,7 +295,7 @@ def __init__(self, dc_parts = gce_datacenter[0].split("-")[:3] self.gce_region = "-".join(dc_parts[:2]) self.gce_zone = f"{self.gce_region}-" - self.gce_zone += availability_zone or (dc_parts[2] if len(dc_parts) == 3 else 'b') + self.gce_zone += availability_zone or (dc_parts[2] if len(dc_parts) == 3 else 'b') # noqa: PLR2004 super().__init__( params=params, cluster_uuid=cluster_uuid, @@ -589,7 +589,7 @@ class GkeScyllaPodCluster(ScyllaPodCluster): PodContainerClass = GkeScyllaPodContainer # pylint: disable=too-many-arguments - def add_nodes(self, + def add_nodes(self, # noqa: PLR0913 count: int, ec2_user_data: str = "", dc_idx: int = None, diff --git a/sdcm/cluster_k8s/mini_k8s.py b/sdcm/cluster_k8s/mini_k8s.py index 270b4866ec6..7f183f17708 100644 --- a/sdcm/cluster_k8s/mini_k8s.py +++ b/sdcm/cluster_k8s/mini_k8s.py @@ -183,7 +183,7 @@ class MinimalClusterBase(KubernetesCluster, metaclass=abc.ABCMeta): # pylint: d POOL_LABEL_NAME = POOL_LABEL_NAME # pylint: disable=too-many-arguments - def __init__(self, mini_k8s_version, params: dict, user_prefix: str = '', region_name: str = None, + def __init__(self, mini_k8s_version, params: dict, user_prefix: str = '', region_name: str = None, # noqa: PLR0913 cluster_uuid: str = None, **_): self.software_version = mini_k8s_version super().__init__(params=params, user_prefix=user_prefix, region_name=region_name, cluster_uuid=cluster_uuid) @@ -590,7 +590,7 @@ def load_images(self, images_list: [str]): self.host_node.remoter.run( f"/var/tmp/kind load docker-image {image}", ignore_status=True) - def on_deploy_completed(self): # pylint: disable=too-many-branches + def on_deploy_completed(self): # pylint: disable=too-many-branches # noqa: PLR0912 images_to_cache, images_to_retag, new_scylla_image_tag = [], {}, "" # first setup CNI plugin, otherwise everything else might get broken @@ -608,9 +608,8 @@ def on_deploy_completed(self): # pylint: disable=too-many-branches images_to_cache.extend(self.cert_manager_images) if self.params.get("k8s_local_volume_provisioner_type") != 'static': images_to_cache.append(self.dynamic_local_volume_provisioner_image) - else: - if provisioner_image := self.static_local_volume_provisioner_image: - images_to_cache.append(provisioner_image) + elif provisioner_image := self.static_local_volume_provisioner_image: + images_to_cache.append(provisioner_image) if self.params.get("k8s_use_chaos_mesh"): chaos_mesh_version = ChaosMesh.VERSION if not chaos_mesh_version.startswith("v"): @@ -730,7 +729,7 @@ class LocalMinimalScyllaPodCluster(ScyllaPodCluster): """Represents scylla cluster hosted on locally running minimal k8s clusters such as k3d, minikube or kind""" PodContainerClass = LocalMinimalScyllaPodContainer - def wait_for_nodes_up_and_normal(self, nodes=None, verification_node=None, iterations=20, sleep_time=60, timeout=0): # pylint: disable=too-many-arguments + def wait_for_nodes_up_and_normal(self, nodes=None, verification_node=None, iterations=20, sleep_time=60, timeout=0): # pylint: disable=too-many-arguments # noqa: PLR0913 @retrying(n=iterations, sleep_time=sleep_time, allowed_exceptions=(cluster.ClusterNodesNotReady, UnexpectedExit), message="Waiting for nodes to join the cluster", timeout=timeout) diff --git a/sdcm/coredump.py b/sdcm/coredump.py index e5c67052b70..8cbde05e3d2 100644 --- a/sdcm/coredump.py +++ b/sdcm/coredump.py @@ -59,7 +59,7 @@ def __str__(self): return f'CoreDump[{self.pid}]' # pylint: disable=too-many-arguments - def update(self, + def update(self, # noqa: PLR0913 node: 'BaseNode' = None, corefile: str = None, source_timestamp: Optional[float] = None, @@ -323,7 +323,7 @@ def get_list_of_cores_json(self) -> Optional[List[CoreDumpInfo]]: return pid_list def get_list_of_cores(self) -> Optional[List[CoreDumpInfo]]: - if self.systemd_version >= 248: + if self.systemd_version >= 248: # noqa: PLR2004 # since systemd/systemd@0689cfd we have option to get # the coredump information in json format return self.get_list_of_cores_json() @@ -349,7 +349,7 @@ def get_list_of_cores(self) -> Optional[List[CoreDumpInfo]]: continue columns = re.split(r'[ ]{2,}', line) - if len(columns) < 2: + if len(columns) < 2: # noqa: PLR2004 continue pid = columns[1] if re.findall(r'[^0-9]', pid): @@ -358,7 +358,7 @@ def get_list_of_cores(self) -> Optional[List[CoreDumpInfo]]: pids_list.append(CoreDumpInfo(pid=pid, node=self.node)) return pids_list - def update_coredump_info_with_more_information(self, core_info: CoreDumpInfo): + def update_coredump_info_with_more_information(self, core_info: CoreDumpInfo): # noqa: PLR0912 # pylint: disable=too-many-branches coredump_info = self._get_coredumpctl_info(core_info) corefile = '' @@ -397,7 +397,7 @@ def update_coredump_info_with_more_information(self, core_info: CoreDumpInfo): # # Coredump could be absent when file was removed for line in coredump_info.splitlines(): - line = line.strip() + line = line.strip() # noqa: PLW2901 if line.startswith('Executable:'): executable = line[12:].strip() elif line.startswith('Command Line:'): @@ -407,7 +407,7 @@ def update_coredump_info_with_more_information(self, core_info: CoreDumpInfo): # Storage: /var/lib/systemd/coredump/core.vi.1000.6c4de4c206a0476e88444e5ebaaac482.18554.1578994298000000.lz4 (inaccessible) if "inaccessible" in line: continue - line = line.replace('(present)', '') + line = line.replace('(present)', '') # noqa: PLW2901 corefile = line[line.find(':') + 1:].strip() elif line.startswith('Timestamp:'): timestring = None @@ -415,9 +415,9 @@ def update_coredump_info_with_more_information(self, core_info: CoreDumpInfo): # Converting time string "Tue 2020-01-14 10:40:25 UTC (6min ago)" to timestamp timestring = re.search(r'Timestamp: ([^\(]+)(\([^\)]+\)|)', line).group(1).strip() time_spat = timestring.split() - if len(time_spat) == 3: + if len(time_spat) == 3: # noqa: PLR2004 fmt = "%a %Y-%m-%d %H:%M:%S" - elif len(time_spat) == 4: + elif len(time_spat) == 4: # noqa: PLR2004 timezone = time_spat[3].strip() if re.search(r'^[+-][0-9]{2}$', timezone): # On some systems two digit timezone is not recognized as correct timezone diff --git a/sdcm/db_log_reader.py b/sdcm/db_log_reader.py index 3a1e0f6eebe..7042360f501 100644 --- a/sdcm/db_log_reader.py +++ b/sdcm/db_log_reader.py @@ -50,7 +50,7 @@ class DbLogReader(Process): ] # pylint: disable=too-many-arguments - def __init__(self, + def __init__(self, # noqa: PLR0913 system_log: str, remoter: CommandRunner, node_name: str, @@ -76,7 +76,7 @@ def __init__(self, def _continuous_event_patterns(self): return get_pattern_to_event_to_func_mapping(node=self._node_name) - def _read_and_publish_events(self) -> None: + def _read_and_publish_events(self) -> None: # noqa: PLR0915 """Search for all known patterns listed in `sdcm.sct_events.database.SYSTEM_ERROR_EVENTS'.""" # pylint: disable=too-many-branches,too-many-locals,too-many-statements @@ -92,7 +92,7 @@ def _read_and_publish_events(self) -> None: db_file.seek(self._last_log_position) for index, line in enumerate(db_file, start=self._last_line_no + 1): # Postpone processing line with no ending in case if half of line is written to the disc - if line[-1] == '\n' or self._skipped_end_line > 20: + if line[-1] == '\n' or self._skipped_end_line > 20: # noqa: PLR2004 self._skipped_end_line = 0 else: self._skipped_end_line += 1 @@ -106,7 +106,7 @@ def _read_and_publish_events(self) -> None: pass if self._log_lines: - line = line.strip() + line = line.strip() # noqa: PLW2901 for pattern in self.EXCLUDE_FROM_LOGGING: if pattern in line: break diff --git a/sdcm/db_stats.py b/sdcm/db_stats.py index 1151b7bfd5e..6c22f40a06a 100644 --- a/sdcm/db_stats.py +++ b/sdcm/db_stats.py @@ -69,7 +69,7 @@ def stddev(lst): return sqrt(sum((x - mean)**2 for x in lst) / len(lst)) -def get_stress_cmd_params(cmd): +def get_stress_cmd_params(cmd): # noqa: PLR0912 """ Parsing cassandra stress command :param cmd: stress cmd @@ -270,7 +270,7 @@ def query(self, query, start, end, scrap_metrics_step=None): @staticmethod def _check_start_end_time(start_time, end_time): - if end_time - start_time < 120: + if end_time - start_time < 120: # noqa: PLR2004 LOGGER.warning("Time difference too low to make a query [start_time: %s, end_time: %s", start_time, end_time) return False @@ -607,17 +607,16 @@ def get_setup_details(self): for key, value in test_params: if key in exclude_details or (isinstance(key, str) and key.startswith('stress_cmd')): # pylint: disable=no-else-continue continue + elif is_gce and key in \ + ['instance_type_loader', # pylint: disable=no-else-continue + 'instance_type_monitor', + 'instance_type_db']: + # exclude these params from gce run + continue + elif key == 'n_db_nodes' and isinstance(value, str) and re.search(r'\s', value): # multidc + setup_details['n_db_nodes'] = sum([int(i) for i in value.split()]) else: - if is_gce and key in \ - ['instance_type_loader', # pylint: disable=no-else-continue - 'instance_type_monitor', - 'instance_type_db']: - # exclude these params from gce run - continue - elif key == 'n_db_nodes' and isinstance(value, str) and re.search(r'\s', value): # multidc - setup_details['n_db_nodes'] = sum([int(i) for i in value.split()]) - else: - setup_details[key] = value + setup_details[key] = value if self.params.get('cluster_backend') == 'aws': setup_details["ami_tags_db_scylla"] = [] @@ -657,7 +656,7 @@ def get_test_details(self): test_details['log_files'] = {} return test_details - def create_test_stats(self, sub_type=None, specific_tested_stats=None, # pylint: disable=too-many-arguments + def create_test_stats(self, sub_type=None, specific_tested_stats=None, # pylint: disable=too-many-arguments # noqa: PLR0913 doc_id_with_timestamp=False, append_sub_test_to_name=True, test_name=None, test_index=None): if not self.create_stats: @@ -715,7 +714,7 @@ def update_stress_cmd_details(self, cmd, prefix='', stresser="cassandra-stress", def _calc_stats(self, ps_results): try: - if not ps_results or len(ps_results) <= 3: + if not ps_results or len(ps_results) <= 3: # noqa: PLR2004 self.log.error("Not enough data from Prometheus: %s" % ps_results) return {} stat = {} @@ -808,7 +807,7 @@ def calculate_stats_total(self): self._stats['results']['stats_total'] = total_stats # pylint: disable=too-many-arguments,too-many-locals - def update_test_details(self, errors=None, coredumps=None, scylla_conf=False, extra_stats=None, alternator=False, + def update_test_details(self, errors=None, coredumps=None, scylla_conf=False, extra_stats=None, alternator=False, # noqa: PLR0913 scrap_metrics_step=None): if not self.create_stats: return diff --git a/sdcm/ec2_client.py b/sdcm/ec2_client.py index bd23062ab60..dd625470065 100644 --- a/sdcm/ec2_client.py +++ b/sdcm/ec2_client.py @@ -68,7 +68,7 @@ def _get_ec2_client(self, region_name=None) -> EC2Client: boto3.setup_default_session(region_name=region_name) return self._get_ec2_client() - def _request_spot_instance(self, instance_type, image_id, region_name, network_if, spot_price, key_pair='', # pylint: disable=too-many-arguments + def _request_spot_instance(self, instance_type, image_id, region_name, network_if, spot_price, key_pair='', # pylint: disable=too-many-arguments # noqa: PLR0913 user_data='', count=1, duration=0, request_type='one-time', block_device_mappings=None, aws_instance_profile=None, placement_group_name=None): """ @@ -107,7 +107,7 @@ def _request_spot_instance(self, instance_type, image_id, region_name, network_i LOGGER.debug('Spot requests: %s', request_ids) return request_ids - def _request_spot_fleet(self, instance_type, image_id, region_name, network_if, key_pair='', user_data='', count=3, # pylint: disable=too-many-arguments + def _request_spot_fleet(self, instance_type, image_id, region_name, network_if, key_pair='', user_data='', count=3, # pylint: disable=too-many-arguments # noqa: PLR0913 block_device_mappings=None, aws_instance_profile=None, placement_group_name=None): spot_price = self._get_spot_price(instance_type) @@ -258,7 +258,7 @@ def add_tags(self, instance_ids: list[str] | list[Instance] | str | Instance, ta tags += tags_as_ec2_tags(TestConfig().common_tags()) self._client.create_tags(Resources=instance_ids, Tags=tags) - def create_spot_instances(self, instance_type, image_id, region_name, network_if, key_pair='', user_data='', # pylint: disable=too-many-arguments + def create_spot_instances(self, instance_type, image_id, region_name, network_if, key_pair='', user_data='', # pylint: disable=too-many-arguments # noqa: PLR0913 count=1, duration=0, block_device_mappings=None, aws_instance_profile=None, placement_group_name=None): """ Create spot instances @@ -300,7 +300,7 @@ def create_spot_instances(self, instance_type, image_id, region_name, network_if instances = [self.get_instance(instance_id) for instance_id in instance_ids] return instances - def create_spot_fleet(self, instance_type, image_id, region_name, network_if, key_pair='', user_data='', count=3, # pylint: disable=too-many-arguments + def create_spot_fleet(self, instance_type, image_id, region_name, network_if, key_pair='', user_data='', count=3, # pylint: disable=too-many-arguments # noqa: PLR0913 block_device_mappings=None, aws_instance_profile=None, placement_group_name=None): """ Create spot fleet diff --git a/sdcm/fill_db_data.py b/sdcm/fill_db_data.py index 22d7d387850..9b36a92a163 100644 --- a/sdcm/fill_db_data.py +++ b/sdcm/fill_db_data.py @@ -1055,7 +1055,7 @@ class FillDatabaseData(ClusterTester): ], 'results': [[[k] for k in range(0, 5)], [], - [[k] for k in range(0, 5) if not k == 2], + [[k] for k in range(0, 5) if not k == 2], # noqa: PLR2004 [], [], [], @@ -3118,7 +3118,7 @@ def cql_create_tables(self, session): with self._execute_and_log(f'Created tables for test "{test_name}" in {{}} seconds'): for create_table in item['create_tables']: if self.version_cdc_support(): - create_table = self._enable_cdc(item, create_table) + create_table = self._enable_cdc(item, create_table) # noqa: PLW2901 # wait a while before creating index, there is a delay of create table for # waiting the schema agreement if 'CREATE INDEX' in create_table.upper(): @@ -3383,11 +3383,11 @@ def fill_table(): self.log.info('running now session.execute') full_query_res = self.rows_to_list(session.execute(statement)) if not full_query_res: - assert f'Query "{statement}" returned no entries' + assert f'Query "{statement}" returned no entries' # noqa: PLW0129 self.log.info('running now fetch_all_rows') full_res = self.rows_to_list( fetch_all_rows(session=session, default_fetch_size=100, statement=statement)) if not full_res: - assert f'Paged query "{statement}" returned no value' + assert f'Paged query "{statement}" returned no value' # noqa: PLW0129 self.log.info('will now compare results from session.execute and fetch_all_rows') self.assertEqual(sorted(full_query_res), sorted(full_res), "Results should be identical") diff --git a/sdcm/gemini_thread.py b/sdcm/gemini_thread.py index fd0eca6da30..d60ff8ec180 100644 --- a/sdcm/gemini_thread.py +++ b/sdcm/gemini_thread.py @@ -62,7 +62,7 @@ class GeminiStressThread(DockerBasedStressThread): # pylint: disable=too-many-i DOCKER_IMAGE_PARAM_NAME = "stress_image.gemini" - def __init__(self, test_cluster, oracle_cluster, loaders, stress_cmd, timeout=None, params=None): # pylint: disable=too-many-arguments + def __init__(self, test_cluster, oracle_cluster, loaders, stress_cmd, timeout=None, params=None): # pylint: disable=too-many-arguments # noqa: PLR0913 super().__init__(loader_set=loaders, stress_cmd=stress_cmd, timeout=timeout, params=params) self.test_cluster = test_cluster self.oracle_cluster = oracle_cluster @@ -137,7 +137,7 @@ def _run_stress(self, loader, loader_idx, cpu_idx): if result.exited: gemini_stress_event.add_result(result=result) gemini_stress_event.severity = Severity.ERROR - else: + else: # noqa: PLR5501 if result.stderr: gemini_stress_event.add_result(result=result) gemini_stress_event.severity = Severity.WARNING diff --git a/sdcm/loader.py b/sdcm/loader.py index 9d9e5ada6b6..21cf5bed72b 100644 --- a/sdcm/loader.py +++ b/sdcm/loader.py @@ -50,7 +50,7 @@ class StressExporter(FileFollowerThread, metaclass=ABCMeta): METRIC_NAMES = ['lat_mean', 'lat_med', 'lat_perc_95', 'lat_perc_99', 'lat_perc_999', 'lat_max'] # pylint: disable=too-many-arguments - def __init__(self, instance_name: str, metrics: NemesisMetrics, stress_operation: str, stress_log_filename: str, + def __init__(self, instance_name: str, metrics: NemesisMetrics, stress_operation: str, stress_log_filename: str, # noqa: PLR0913 loader_idx: int, cpu_idx: int = 1): super().__init__() self.metrics = metrics @@ -131,7 +131,7 @@ def run(self): class CassandraStressExporter(StressExporter): # pylint: disable=too-many-arguments - def __init__(self, instance_name: str, metrics: NemesisMetrics, stress_operation: str, stress_log_filename: str, + def __init__(self, instance_name: str, metrics: NemesisMetrics, stress_operation: str, stress_log_filename: str, # noqa: PLR0913 loader_idx: int, cpu_idx: int = 1): self.keyspace_regex = re.compile(r'.*Keyspace:\s(.*?)$') @@ -167,7 +167,7 @@ class CassandraStressHDRExporter(StressExporter): # pylint: disable=too-many-arguments METRIC_NAMES = ['lat_perc_50', 'lat_perc_90', 'lat_perc_99', 'lat_perc_999', "lat_perc_9999"] - def __init__(self, instance_name: str, metrics: NemesisMetrics, stress_operation: str, + def __init__(self, instance_name: str, metrics: NemesisMetrics, stress_operation: str, # noqa: PLR0913 stress_log_filename: str, loader_idx: int, cpu_idx: int = 1): super().__init__(instance_name, metrics, stress_operation, stress_log_filename, loader_idx, cpu_idx) self.log_start_time = 0 @@ -247,7 +247,7 @@ def split_line(line: str) -> list: class CassandraHarryStressExporter(StressExporter): # pylint: disable=too-many-arguments,useless-super-delegation - def __init__(self, instance_name: str, metrics: NemesisMetrics, stress_operation: str, stress_log_filename: str, + def __init__(self, instance_name: str, metrics: NemesisMetrics, stress_operation: str, stress_log_filename: str, # noqa: PLR0913 loader_idx: int, cpu_idx: int = 1): super().__init__(instance_name, metrics, stress_operation, stress_log_filename, diff --git a/sdcm/logcollector.py b/sdcm/logcollector.py index 559c1a7ac48..510dc98f7c6 100644 --- a/sdcm/logcollector.py +++ b/sdcm/logcollector.py @@ -70,7 +70,7 @@ class CollectingNode(AutoSshContainerMixin, WebDriverContainerMixin): # pylint: disable=too-few-public-methods,too-many-instance-attributes logdir = None - def __init__(self, name, ssh_login_info=None, instance=None, global_ip=None, grafana_ip=None, tags=None, logdir=None): # pylint: disable=too-many-arguments + def __init__(self, name, ssh_login_info=None, instance=None, global_ip=None, grafana_ip=None, tags=None, logdir=None): # pylint: disable=too-many-arguments # noqa: PLR0913 if logdir: self.logdir = logdir self._containers = {} diff --git a/sdcm/mgmt/cli.py b/sdcm/mgmt/cli.py index 143056121e8..62a53cd77f7 100644 --- a/sdcm/mgmt/cli.py +++ b/sdcm/mgmt/cli.py @@ -546,7 +546,7 @@ def create_restore_task(self, restore_schema=False, restore_data=False, location LOGGER.debug("Created task id is: {}".format(task_id)) return RestoreTask(task_id=task_id, cluster_id=self.id, manager_node=self.manager_node) - def create_backup_task(self, dc_list=None, # pylint: disable=too-many-arguments,too-many-locals,too-many-branches + def create_backup_task(self, dc_list=None, # pylint: disable=too-many-arguments,too-many-locals,too-many-branches # noqa: PLR0913, PLR0912 dry_run=None, interval=None, keyspace_list=None, cron=None, location_list=None, num_retries=None, rate_limit_list=None, retention=None, show_tables=None, snapshot_parallel_list=None, start_date=None, upload_parallel_list=None, legacy_args=None): @@ -595,7 +595,7 @@ def create_backup_task(self, dc_list=None, # pylint: disable=too-many-arguments LOGGER.debug("Created task id is: {}".format(task_id)) return BackupTask(task_id=task_id, cluster_id=self.id, manager_node=self.manager_node) - def create_repair_task(self, dc_list=None, # pylint: disable=too-many-arguments + def create_repair_task(self, dc_list=None, # pylint: disable=too-many-arguments # noqa: PLR0913 keyspace=None, interval=None, num_retries=None, fail_fast=None, intensity=None, parallel=None, cron=None, start_date=None): # the interval string: @@ -818,7 +818,7 @@ def get_hosts_health(self): dict_hosts_health = {} for dc_name, hosts_table in dict_status_tables.items(): - if len(hosts_table) < 2: + if len(hosts_table) < 2: # noqa: PLR2004 LOGGER.debug("Cluster: {} - {} has no hosts health report".format(self.id, dc_name)) else: list_titles_row = hosts_table[0] @@ -853,7 +853,7 @@ def get_hosts_health(self): return dict_hosts_health class _HostHealth(): # pylint: disable=too-few-public-methods - def __init__(self, status, rtt, ssl, rest_status, rest_rtt, rest_http_status_code=None): # pylint: disable=too-many-arguments + def __init__(self, status, rtt, ssl, rest_status, rest_rtt, rest_http_status_code=None): # pylint: disable=too-many-arguments # noqa: PLR0913 self.status = status self.rtt = rtt self.rest_status = rest_status @@ -956,7 +956,7 @@ def get_cluster_hosts_ip(db_cluster): def get_cluster_hosts_with_ips(db_cluster): return [[n, n.ip_address] for n in db_cluster.nodes] - def add_cluster(self, name, host=None, db_cluster=None, client_encrypt=None, disable_automatic_repair=True, # pylint: disable=too-many-arguments + def add_cluster(self, name, host=None, db_cluster=None, client_encrypt=None, disable_automatic_repair=True, # pylint: disable=too-many-arguments # noqa: PLR0913 auth_token=None, credentials=None): """ :param name: cluster name @@ -1114,7 +1114,7 @@ class SCTool: def __init__(self, manager_node): self.manager_node = manager_node - def run(self, # pylint: disable=too-many-arguments + def run(self, # pylint: disable=too-many-arguments # noqa: PLR0913 cmd, is_verify_errorless_result=False, parse_table_res=True, diff --git a/sdcm/mgmt/operator.py b/sdcm/mgmt/operator.py index 06c395273c2..1daefd11640 100644 --- a/sdcm/mgmt/operator.py +++ b/sdcm/mgmt/operator.py @@ -168,7 +168,7 @@ class OperatorManagerCluster(ManagerCluster): scylla_cluster = None _id = None - def __init__(self, manager_node, cluster_id=None, client_encrypt=False, + def __init__(self, manager_node, cluster_id=None, client_encrypt=False, # noqa: PLR0913 cluster_name: str = None, scylla_cluster=None): self.cluster_name = cluster_name self.scylla_cluster = scylla_cluster @@ -223,7 +223,7 @@ def wait_for_healthchecks(self): throw_exc=True, ) - def _create_operator_backup_task(self, dc_list=None, interval=None, keyspace_list=None, location_list=None, + def _create_operator_backup_task(self, dc_list=None, interval=None, keyspace_list=None, location_list=None, # noqa: PLR0913 num_retries=None, rate_limit_list=None, retention=None, cron=None, snapshot_parallel_list=None, start_date=None, upload_parallel_list=None, name=None) -> ScyllaOperatorBackupTask: @@ -254,7 +254,7 @@ def _create_operator_backup_task(self, dc_list=None, interval=None, keyspace_lis return so_backup_task # pylint: disable=too-many-locals - def create_backup_task( + def create_backup_task( # noqa: PLR0913 self, dc_list=None, dry_run=None, @@ -289,7 +289,7 @@ def create_backup_task( ) return wait_for(lambda: self.get_mgr_backup_task(so_task), step=2, timeout=300) - def _create_scylla_operator_repair_task(self, dc_list=None, keyspace=None, interval=None, num_retries=None, + def _create_scylla_operator_repair_task(self, dc_list=None, keyspace=None, interval=None, num_retries=None, # noqa: PLR0913 fail_fast=None, intensity=None, parallel=None, name=None) -> ScyllaOperatorRepairTask: if name is None: @@ -312,7 +312,7 @@ def _create_scylla_operator_repair_task(self, dc_list=None, keyspace=None, inter raise return so_repair_task - def create_repair_task(self, dc_list=None, # pylint: disable=too-many-arguments,arguments-differ + def create_repair_task(self, dc_list=None, # pylint: disable=too-many-arguments,arguments-differ # noqa: PLR0913 keyspace=None, interval=None, num_retries=None, fail_fast=None, intensity=None, parallel=None, name=None) -> RepairTask: # NOTE: wait for the 'healthcheck' tasks be 'DONE' before starting the repair one. @@ -420,7 +420,7 @@ def __init__(self, manager_node, scylla_cluster): def rollback_upgrade(self, scylla_mgmt_address): raise NotImplementedError() - def add_cluster(self, name, host=None, db_cluster=None, client_encrypt=None, disable_automatic_repair=True, + def add_cluster(self, name, host=None, db_cluster=None, client_encrypt=None, disable_automatic_repair=True, # noqa: PLR0913 auth_token=None, credentials=None): raise NotImplementedError() diff --git a/sdcm/microbenchmarking.py b/sdcm/microbenchmarking.py index 0e0db0bdf25..ca6430dcd2e 100755 --- a/sdcm/microbenchmarking.py +++ b/sdcm/microbenchmarking.py @@ -106,7 +106,7 @@ def _get_prior_tests(self, filter_path, additional_filter=''): ) return output - def check_regression(self, current_results): # pylint: disable=arguments-differ + def check_regression(self, current_results): # pylint: disable=arguments-differ # noqa: PLR0915 # pylint: disable=too-many-locals, too-many-statements if not current_results: @@ -262,11 +262,11 @@ def get_diffs(cur_val, best_result_val, last_val): } - if ((diff_last and diff_last < -5) or (diff_best and diff_best < -5)): + if ((diff_last and diff_last < -5) or (diff_best and diff_best < -5)): # noqa: PLR2004 report_results[test_type]["has_diff"] = True stats["has_regression"] = True - if ((diff_last and diff_last > 50) or (diff_best and diff_best > 50)): + if ((diff_last and diff_last > 50) or (diff_best and diff_best > 50)): # noqa: PLR2004 report_results[test_type]['has_improve'] = True stats['has_improvement'] = True diff --git a/sdcm/monitorstack/__init__.py b/sdcm/monitorstack/__init__.py index 115d4951855..af00cbbd09c 100644 --- a/sdcm/monitorstack/__init__.py +++ b/sdcm/monitorstack/__init__.py @@ -37,7 +37,7 @@ class ErrorUploadAnnotations(Exception): pass -def restore_monitoring_stack(test_id, date_time=None): # pylint: disable=too-many-return-statements,too-many-locals +def restore_monitoring_stack(test_id, date_time=None): # pylint: disable=too-many-return-statements,too-many-locals # noqa: PLR0911 if not is_docker_available(): return False @@ -352,7 +352,7 @@ def restore_sct_dashboards(grafana_docker_port, sct_dashboard_file): data=json.dumps(dashboard_config), headers={'Content-Type': 'application/json'}) - if res.status_code != 200: + if res.status_code != 200: # noqa: PLR2004 LOGGER.info('Error uploading dashboard %s. Error message %s', sct_dashboard_file, res.text) raise ErrorUploadSCTDashboard('Error uploading dashboard {}. Error message {}'.format( sct_dashboard_file, @@ -380,7 +380,7 @@ def restore_annotations_data(monitoring_stack_dir, grafana_docker_port): annotations_url = f"http://localhost:{grafana_docker_port}/api/annotations" for an in annotations: # pylint: disable=invalid-name res = requests.post(annotations_url, data=json.dumps(an), headers={'Content-Type': 'application/json'}) - if res.status_code != 200: + if res.status_code != 200: # noqa: PLR2004 LOGGER.info('Error during uploading annotation %s. Error message %s', an, res.text) raise ErrorUploadAnnotations('Error during uploading annotation {}. Error message {}'.format(an, res.text)) diff --git a/sdcm/nemesis.py b/sdcm/nemesis.py index 17158c6876a..d8373549aae 100644 --- a/sdcm/nemesis.py +++ b/sdcm/nemesis.py @@ -250,10 +250,10 @@ def _init_num_deletions_factor(self): if not isinstance(stress_cmds, list): stress_cmds = [stress_cmds] for stress_cmd in stress_cmds: - stress_cmd = stress_cmd.split() + stress_cmd_splitted = stress_cmd.split() # In case background load has writes, we can delete all available partitions, # since they are rewritten. Otherwise, we can only delete some of them. - if 'scylla-bench' in stress_cmd and '-mode=write' in stress_cmd: + if 'scylla-bench' in stress_cmd_splitted and '-mode=write' in stress_cmd_splitted: self.num_deletions_factor = 1 break @@ -415,7 +415,7 @@ def report(self): self.log.info(operation) # pylint: disable=too-many-arguments,unused-argument - def get_list_of_methods_compatible_with_backend( + def get_list_of_methods_compatible_with_backend( # noqa: PLR0913 self, disruptive: Optional[bool] = None, run_with_gemini: Optional[bool] = None, @@ -444,7 +444,7 @@ def _is_it_on_kubernetes(self) -> bool: return isinstance(getattr(self.tester, "db_cluster", None), PodCluster) # pylint: disable=too-many-arguments,unused-argument - def get_list_of_methods_by_flags( # pylint: disable=too-many-locals + def get_list_of_methods_by_flags( # pylint: disable=too-many-locals # noqa: PLR0913 self, disruptive: Optional[bool] = None, run_with_gemini: Optional[bool] = None, @@ -1268,7 +1268,7 @@ def disrupt_nodetool_decommission(self, add_node=True): return self._nodetool_decommission(add_node=add_node) def disrupt_nodetool_seed_decommission(self, add_node=True): - if len(self.cluster.seed_nodes) < 2: + if len(self.cluster.seed_nodes) < 2: # noqa: PLR2004 raise UnsupportedNemesis("To running seed decommission the cluster must contains at least 2 seed nodes") if not self.target_node.is_seed: @@ -1332,7 +1332,7 @@ def _verify_resharding_on_k8s(self, cpus, dc_idx): # Calculate the time spent for resharding. We need to have it be bigger than 2minutes # because it is the timeout of the liveness probe for Scylla pods. resharding_time = time.time() - resharding_started - if resharding_time < 120: + if resharding_time < 120: # noqa: PLR2004 self.log.warning( "Resharding was too fast - '%s's (<120s) on the '%s' node. " "So, nemesis didn't cover the case.", @@ -1344,9 +1344,9 @@ def _verify_resharding_on_k8s(self, cpus, dc_idx): # Check that liveness probe didn't report any errors # https://github.com/scylladb/scylla-operator/issues/894 - liveness_probe_failures = list(liveness_probe_failures) - assert not liveness_probe_failures, ( - f"There are liveness probe failures: {liveness_probe_failures}") + liveness_probe_failures_return = list(liveness_probe_failures) + assert not liveness_probe_failures_return, ( + f"There are liveness probe failures: {liveness_probe_failures_return}") self.log.info("Resharding has successfully ended on whole Scylla cluster.") @@ -1358,7 +1358,7 @@ def disrupt_nodetool_flush_and_reshard_on_kubernetes(self): raise UnsupportedNemesis('It is supported only on kubernetes') dc_idx = 0 for node in self.cluster.nodes: - if hasattr(node.k8s_cluster, 'eks_cluster_version') and node.scylla_shards >= 7: + if hasattr(node.k8s_cluster, 'eks_cluster_version') and node.scylla_shards >= 7: # noqa: PLR2004 dc_idx = node.dc_idx # Calculate new value for the CPU cores dedicated for Scylla pods @@ -1471,7 +1471,7 @@ def _disrupt_kubernetes_then_decommission_and_add_scylla_node(self, disruption_m pod_readiness_timeout_minutes=30) def _get_neighbour_scylla_pods(self, scylla_pod): - if self.tester.params.get('k8s_tenants_num') < 2: + if self.tester.params.get('k8s_tenants_num') < 2: # noqa: PLR2004 return [] matched_pods = KubernetesOps.list_pods( scylla_pod.k8s_cluster, namespace=None, @@ -1580,7 +1580,7 @@ def disrupt_load_and_stream(self): column_num = SstableLoadUtils.calculate_columns_count_in_table(self.target_node) # Run load-and-stream test on regular standard1 table of cassandra-stress. - if column_num < 5: + if column_num < 5: # noqa: PLR2004 raise UnsupportedNemesis("Schema doesn't match the snapshot, not uploading") test_data = SstableLoadUtils.get_load_test_data_inventory(column_num, big_sstable=False, load_and_stream=True) @@ -1606,7 +1606,7 @@ def disrupt_nodetool_refresh(self, big_sstable: bool = False): # Note: when issue #6617 is fixed, we can try to load snapshot (cols=5) to a table (1 < cols < 5), # expect that refresh will fail (no serious db error). - if 1 < column_num < 5: + if 1 < column_num < 5: # noqa: PLR2004 raise UnsupportedNemesis("Schema doesn't match the snapshot, not uploading") test_data = SstableLoadUtils.get_load_test_data_inventory(column_num, big_sstable=big_sstable, @@ -1789,9 +1789,9 @@ def call_random_disrupt_method(self, disrupt_methods=None, predefined_sequence=F # methods and don't wait to long time to meet the balance if the test # duration is short. test_duration = self.cluster.params.get('test_duration') - if test_duration < 600: # less than 10 hours + if test_duration < 600: # less than 10 hours # noqa: PLR2004 rate = 1 - elif test_duration < 4320: # less than 3 days + elif test_duration < 4320: # less than 3 days # noqa: PLR2004 rate = 2 else: rate = 3 @@ -2394,7 +2394,7 @@ def disrupt_delete_by_rows_range(self): ks_cf = 'scylla_bench.test' # Step-1: delete_half_partition or delete_by_range_using_timestamp - if random.random() > 0.5: + if random.random() > 0.5: # noqa: PLR2004 partitions_for_exclude = self.delete_half_partition(ks_cf) else: partitions_for_exclude = self.delete_by_range_using_timestamp(ks_cf) @@ -2661,7 +2661,7 @@ def modify_table_speculative_retry(self): "'%sms'" % random.randint(1, 1000)) self._modify_table_property(name="speculative_retry", val=random.choice(options)) - def modify_table_twcs_window_size(self): + def modify_table_twcs_window_size(self): # noqa: PLR0915 """ Change window size for tables with TWCS After window size of TWCS changed, tables should be @@ -2697,17 +2697,16 @@ def set_new_twcs_settings(settings: Dict[str, Any]) -> Dict[str, Any]: if current_unit == "DAYS": current_size = current_size + 1 elif current_unit == "HOURS": - if (current_size // 24) > 2: + if (current_size // 24) > 2: # noqa: PLR2004 current_unit = "DAYS" current_size = 3 else: current_size += 10 + elif (current_size // 60) > 10: # noqa: PLR2004 + current_unit = "HOURS" + current_size = 11 else: - if (current_size // 60) > 10: - current_unit = "HOURS" - current_size = 11 - else: - current_size += 35 + current_size += 35 settings["gc"] = current_size * multiplier * expected_sstable_number // 2 settings["dttl"] = current_size * multiplier * expected_sstable_number @@ -2792,7 +2791,7 @@ def disrupt_mgmt_backup_specific_keyspaces(self): def disrupt_mgmt_backup(self): self._mgmt_backup(backup_specific_tables=False) - def disrupt_mgmt_restore(self): + def disrupt_mgmt_restore(self): # noqa: PLR0915 def get_total_scylla_partition_size(): result = self.cluster.nodes[0].remoter.run("df -k | grep /var/lib/scylla") # Size in KB free_space_size = int(result.stdout.split()[1]) / 1024 ** 2 # Converting to GB @@ -2802,11 +2801,11 @@ def choose_snapshot(snapshots_dict): snapshot_groups_by_size = snapshots_dict["snapshots_sizes"] total_partition_size = get_total_scylla_partition_size() all_snapshot_sizes = sorted(list(snapshot_groups_by_size.keys()), reverse=True) - fitting_snapshot_sizes = [size for size in all_snapshot_sizes if total_partition_size / size >= 20] - if self.tester.test_duration < 1000: + fitting_snapshot_sizes = [size for size in all_snapshot_sizes if total_partition_size / size >= 20] # noqa: PLR2004 + if self.tester.test_duration < 1000: # noqa: PLR2004 # Since verifying the restored data takes a long time, the nemesis limits the size of the restored # backup based on the test duration - fitting_snapshot_sizes = [size for size in fitting_snapshot_sizes if size < 50] + fitting_snapshot_sizes = [size for size in fitting_snapshot_sizes if size < 50] # noqa: PLR2004 # The restore should not take more than 5% of the space total space in /var/lib/scylla assert fitting_snapshot_sizes, "There's not enough space for any snapshot restoration" @@ -3103,7 +3102,7 @@ def _validate_snapshot(self, nodetool_cmd: str, snapshot_content: namedtuple): f"Expected content: {sorted(keyspace_table)} \n " f"Actual snapshot content: {sorted(snapshot_content_list)}") - def disrupt_snapshot_operations(self): # pylint: disable=too-many-statements + def disrupt_snapshot_operations(self): # pylint: disable=too-many-statements # noqa: PLR0915 """ Extend this nemesis to run 'nodetool snapshot' more options including multiple tables. Random choose between: @@ -3257,7 +3256,7 @@ def get_rate_limit_for_network_disruption(self) -> Optional[str]: avg_bitrate_per_node = (received_bytes_over_time[-1] - received_bytes_over_time[0]) / 600 avg_mpbs_per_node = avg_bitrate_per_node / 1024 / 1024 - if avg_mpbs_per_node > 10: + if avg_mpbs_per_node > 10: # noqa: PLR2004 min_limit = int(round(avg_mpbs_per_node * 0.30)) max_limit = int(round(avg_mpbs_per_node * 0.70)) rate_limit_suffix = "mbps" @@ -3394,7 +3393,7 @@ def _wait_all_nodes_un(self): for node in self.cluster.nodes: self.cluster.check_nodes_up_and_normal(verification_node=node) - def disrupt_remove_node_then_add_node(self): # pylint: disable=too-many-branches + def disrupt_remove_node_then_add_node(self): # pylint: disable=too-many-branches # noqa: PLR0915 """ https://docs.scylladb.com/operating-scylla/procedures/cluster-management/remove_node/ @@ -3419,7 +3418,7 @@ def disrupt_remove_node_then_add_node(self): # pylint: disable=too-many-branche # node_to_remove is single/last seed in cluster, before # it will be terminated, choose new seed node num_of_seed_nodes = len(self.cluster.seed_nodes) - if node_to_remove.is_seed and num_of_seed_nodes < 2: + if node_to_remove.is_seed and num_of_seed_nodes < 2: # noqa: PLR2004 new_seed_node = random.choice([n for n in self.cluster.nodes if n is not node_to_remove]) new_seed_node.set_seed_flag(True) self.cluster.update_seed_provider() @@ -3657,7 +3656,7 @@ def _iptables_randomly_get_disrupting_target(): f'{target_type} --reject-with {reject_with}' return 'dropped', f'{target_type}' - def _run_commands_wait_and_cleanup( # pylint: disable=too-many-arguments + def _run_commands_wait_and_cleanup( # pylint: disable=too-many-arguments # noqa: PLR0913 self, node, name: str, start_commands: List[str], cleanup_commands: List[str] = None, wait_time: int = 0): """ @@ -3716,7 +3715,7 @@ def disrupt_network_start_stop_interface(self): # pylint: disable=invalid-name self.target_node.start_network_interface() self._wait_all_nodes_un() - def _call_disrupt_func_after_expression_logged(self, + def _call_disrupt_func_after_expression_logged(self, # noqa: PLR0913 log_follower: Iterable[str], disrupt_func: Callable, disrupt_func_kwargs: dict = None, @@ -4043,7 +4042,7 @@ def disrupt_enable_disable_table_encryption_aws_kms_provider_with_rotation(self) additional_scylla_encryption_options={'key_provider': 'KmsKeyProviderFactory'}) @scylla_versions(("2023.1.1-dev", None)) - def _enable_disable_table_encryption(self, enable_kms_key_rotation, additional_scylla_encryption_options=None): + def _enable_disable_table_encryption(self, enable_kms_key_rotation, additional_scylla_encryption_options=None): # noqa: PLR0915 if self.cluster.params.get("cluster_backend") != "aws": raise UnsupportedNemesis("This nemesis is supported only on the AWS cluster backend") @@ -4783,7 +4782,7 @@ def disrupt_add_remove_mv(self): def disrupt_toggle_audit_syslog(self): self._disrupt_toggle_audit(store="syslog") - def _disrupt_toggle_audit(self, store: AuditStore): + def _disrupt_toggle_audit(self, store: AuditStore): # noqa: PLR0915 """ Enable audit log with all categories and user keyspaces (if audit already enabled, disable it and finish the Nemesis), verify audit log content, @@ -4836,12 +4835,12 @@ def _disrupt_toggle_audit(self, store: AuditStore): rows = audit.get_audit_log(from_datetime=audit_start, category="DML", limit_rows=1500) # filter out USE keyspace rows due to https://github.com/scylladb/scylla-enterprise/issues/3169 rows = [row for row in rows if not row.operation.startswith("USE")] - if len(rows) != 1000: + if len(rows) != 1000: # noqa: PLR2004 errors.append(f"Audit log for DML contains {len(rows)} rows while should contain 1000 rows") for row in rows: LOGGER.error("DML audit log row: %s", row) rows = audit.get_audit_log(from_datetime=audit_start, category="QUERY", limit_rows=1500) - if len(rows) != 1000: + if len(rows) != 1000: # noqa: PLR2004 errors.append(f"Audit log for QUERY contains {len(rows)} rows while should contain 1000 rows") for row in rows: LOGGER.error("QUERY audit log row: %s", row) @@ -4949,7 +4948,7 @@ def are_gate_closed_messages_raised(log_reader): "After re-enabling binary and gossip, 'gate closed' messages continue to appear" -def disrupt_method_wrapper(method, is_exclusive=False): # pylint: disable=too-many-statements +def disrupt_method_wrapper(method, is_exclusive=False): # pylint: disable=too-many-statements # noqa: PLR0915 """ Log time elapsed for method to run @@ -5007,7 +5006,7 @@ def data_validation_prints(args): args[0].log.debug(f'Data validator error: {err}') @wraps(method) - def wrapper(*args, **kwargs): # pylint: disable=too-many-statements + def wrapper(*args, **kwargs): # pylint: disable=too-many-statements # noqa: PLR0915 # pylint: disable=too-many-locals # pylint: disable=too-many-branches method_name = method.__name__ @@ -5501,17 +5500,17 @@ def prefixed(pref: str, val: str) -> str: weights: List[float] = [] listed_methods: Set[str] = set() - for name, weight in dist.items(): - name = str(name) + for _name, _weight in dist.items(): + name = str(_name) prefixed_name = prefixed('disrupt_', name) if prefixed_name not in all_methods: raise ValueError(f"'{name}' is not a valid disruption. All methods: {all_methods.keys()}") - if not is_nonnegative_number(weight): + if not is_nonnegative_number(_weight): raise ValueError("Each disruption weight must be a non-negative number." " '{weight}' is not a valid weight.") - weight = float(weight) + weight = float(_weight) if weight > 0: population.append(all_methods[prefixed_name]) weights.append(weight) diff --git a/sdcm/nemesis_publisher.py b/sdcm/nemesis_publisher.py index 1417962e2f4..49ad51e07db 100644 --- a/sdcm/nemesis_publisher.py +++ b/sdcm/nemesis_publisher.py @@ -90,7 +90,7 @@ def publish(self, disrupt_name, status=True, data=None): else: error_message_size_mb = sys.getsizeof(data["error"]) / 1024**2 diff = error_message_size_mb / self.error_message_size_limit_mb - if diff > 1.0: + if diff > 1.0: # noqa: PLR2004 # NOTE: useful for cases when loader nodes fail to connect to # terminated K8S host machines. It may provide very huge output up to 1Gb. LOGGER.warning("Got too big error message running '%s' nemesis: %sMb.\n" diff --git a/sdcm/prometheus.py b/sdcm/prometheus.py index 53d1c77c1cf..b80a252f76d 100644 --- a/sdcm/prometheus.py +++ b/sdcm/prometheus.py @@ -68,7 +68,7 @@ def start_metrics_server(): def nemesis_metrics_obj(metric_name_suffix=''): - global NM_OBJ # pylint: disable=global-statement,global-variable-not-assigned + global NM_OBJ # pylint: disable=global-statement,global-variable-not-assigned # noqa: PLW0602 if not NM_OBJ.get(metric_name_suffix): NM_OBJ[metric_name_suffix] = NemesisMetrics(metric_name_suffix) return NM_OBJ[metric_name_suffix] @@ -164,7 +164,7 @@ def _get_alerts(self, active=False): response = requests.get(f"{self._alert_manager_url}/alerts?active={int(active)}", timeout=3) else: response = requests.get(f"{self._alert_manager_url}/alerts", timeout=3) - if response.status_code == 200: + if response.status_code == 200: # noqa: PLR2004 return response.json() return None @@ -184,7 +184,7 @@ def _publish_end_of_alerts(self, alerts: dict): for alert in alerts.values(): if not alert.get('endsAt', None): alert['endsAt'] = time.strftime("%Y-%m-%dT%H:%M:%S.0Z", time.gmtime()) - alert = updated_dict.get(alert['fingerprint'], alert) + alert = updated_dict.get(alert['fingerprint'], alert) # noqa: PLW2901 labels = alert.get("labels") or {} alert_name = labels.get("alertname", "") node = labels.get("instance", "N/A") @@ -285,7 +285,7 @@ def delete_silence(self, silence_id: str) -> None: class AlertSilencer: # pylint: disable=too-many-arguments - def __init__(self, + def __init__(self, # noqa: PLR0913 alert_manager: PrometheusAlertManagerListener, alert_name: str, duration: Optional[int] = None, diff --git a/sdcm/provision/aws/instance_parameters.py b/sdcm/provision/aws/instance_parameters.py index ce8fc0a9d02..ea3f1c3022e 100644 --- a/sdcm/provision/aws/instance_parameters.py +++ b/sdcm/provision/aws/instance_parameters.py @@ -70,7 +70,7 @@ class AWSInstanceParams(InstanceParamsBase): EbsOptimized: bool = None # pylint: disable=arguments-differ - def dict( + def dict( # noqa: PLR0913 self, *, include: Union['AbstractSetIntStr', 'MappingIntStrAny'] = None, diff --git a/sdcm/provision/aws/provisioner.py b/sdcm/provision/aws/provisioner.py index 681bee5dcc9..def386c671e 100644 --- a/sdcm/provision/aws/provisioner.py +++ b/sdcm/provision/aws/provisioner.py @@ -36,7 +36,7 @@ class AWSInstanceProvisioner(InstanceProvisionerBase): # pylint: disable=too-fe _wait_interval = 5 _iam_fleet_role = 'arn:aws:iam::797456418907:role/aws-ec2-spot-fleet-role' - def provision( # pylint: disable=too-many-arguments + def provision( # pylint: disable=too-many-arguments # noqa: PLR0913 self, provision_parameters: ProvisionParameters, instance_parameters: AWSInstanceParams, diff --git a/sdcm/provision/aws/utils.py b/sdcm/provision/aws/utils.py index 3adc80c3177..44570165cff 100644 --- a/sdcm/provision/aws/utils.py +++ b/sdcm/provision/aws/utils.py @@ -197,7 +197,7 @@ def get_provisioned_spot_instance_ids(region_name: str, request_ids: List[str]) # pylint: disable=too-many-arguments -def create_spot_fleet_instance_request( +def create_spot_fleet_instance_request( # noqa: PLR0913 region_name: str, count: int, price: float, @@ -217,7 +217,7 @@ def create_spot_fleet_instance_request( # pylint: disable=too-many-arguments -def create_spot_instance_request( +def create_spot_instance_request( # noqa: PLR0913 region_name: str, count: int, price: Optional[float], diff --git a/sdcm/provision/azure/ip_provider.py b/sdcm/provision/azure/ip_provider.py index 98e7b90051a..d996b37f88b 100644 --- a/sdcm/provision/azure/ip_provider.py +++ b/sdcm/provision/azure/ip_provider.py @@ -37,7 +37,7 @@ def __post_init__(self): try: ips = self._azure_service.network.public_ip_addresses.list(self._resource_group_name) for ip in ips: - ip = self._azure_service.network.public_ip_addresses.get(self._resource_group_name, ip.name) + ip = self._azure_service.network.public_ip_addresses.get(self._resource_group_name, ip.name) # noqa: PLW2901 self._cache[ip.name] = ip except ResourceNotFoundError: pass diff --git a/sdcm/provision/azure/network_interface_provider.py b/sdcm/provision/azure/network_interface_provider.py index 78eef78907e..eb257e97e9f 100644 --- a/sdcm/provision/azure/network_interface_provider.py +++ b/sdcm/provision/azure/network_interface_provider.py @@ -36,7 +36,7 @@ def __post_init__(self): try: nics = self._azure_service.network.network_interfaces.list(self._resource_group_name) for nic in nics: - nic = self._azure_service.network.network_interfaces.get(self._resource_group_name, nic.name) + nic = self._azure_service.network.network_interfaces.get(self._resource_group_name, nic.name) # noqa: PLW2901 self._cache[nic.name] = nic except ResourceNotFoundError: pass diff --git a/sdcm/provision/azure/provisioner.py b/sdcm/provision/azure/provisioner.py index 413df8d56f3..ac634bab1b1 100644 --- a/sdcm/provision/azure/provisioner.py +++ b/sdcm/provision/azure/provisioner.py @@ -101,7 +101,7 @@ def discover_regions(cls, test_id: str = "", regions: list = None, else: # extract test_id from rg names where rg.name format is: SCT--- provisioner_params = [(test_id, rg.location, cls._get_az_from_name(rg), azure_service) for rg in all_resource_groups - if (test_id := rg.name.split("SCT-")[-1][:36]) and len(test_id) == 36] + if (test_id := rg.name.split("SCT-")[-1][:36]) and len(test_id) == 36] # noqa: PLR2004 return [cls(*params) for params in provisioner_params] def get_or_create_instance(self, definition: InstanceDefinition, diff --git a/sdcm/provision/azure/virtual_machine_provider.py b/sdcm/provision/azure/virtual_machine_provider.py index d8d0418a91e..24bcb6d3433 100644 --- a/sdcm/provision/azure/virtual_machine_provider.py +++ b/sdcm/provision/azure/virtual_machine_provider.py @@ -43,8 +43,8 @@ def __post_init__(self): """Discover existing virtual machines for resource group.""" try: v_ms = self._azure_service.compute.virtual_machines.list(self._resource_group_name) - for v_m in v_ms: - v_m = self._azure_service.compute.virtual_machines.get(self._resource_group_name, v_m.name) + for _v_m in v_ms: + v_m = self._azure_service.compute.virtual_machines.get(self._resource_group_name, _v_m.name) if v_m.provisioning_state != "Deleting": self._cache[v_m.name] = v_m except ResourceNotFoundError: @@ -149,7 +149,7 @@ def reboot(self, name: str, wait: bool = True, hard: bool = False) -> None: flags = "-ff" if hard else "-f" self.run_command(name, f"reboot {flags}") start_time = time.time() - while wait and time.time() - start_time < 600: # 10 minutes + while wait and time.time() - start_time < 600: # 10 minutes # noqa: PLR2004 time.sleep(10) instance_view = self._azure_service.compute.virtual_machines.instance_view( self._resource_group_name, vm_name=name) diff --git a/sdcm/provision/azure/virtual_network_provider.py b/sdcm/provision/azure/virtual_network_provider.py index a3c959fa443..cb510cd56af 100644 --- a/sdcm/provision/azure/virtual_network_provider.py +++ b/sdcm/provision/azure/virtual_network_provider.py @@ -36,8 +36,8 @@ def __post_init__(self): """Discover existing virtual networks for resource group.""" try: vnets = self._azure_service.network.virtual_networks.list(self._resource_group_name) - for vnet in vnets: - vnet = self._azure_service.network.virtual_networks.get(self._resource_group_name, vnet.name) + for _vnet in vnets: + vnet = self._azure_service.network.virtual_networks.get(self._resource_group_name, _vnet.name) self._cache[vnet.name] = vnet except ResourceNotFoundError: pass diff --git a/sdcm/provision/common/builders.py b/sdcm/provision/common/builders.py index 7523a7a3373..f53739182c0 100644 --- a/sdcm/provision/common/builders.py +++ b/sdcm/provision/common/builders.py @@ -32,7 +32,7 @@ def _exclude_by_default(self): exclude_fields.append(field_name) return set(exclude_fields) - def dict( + def dict( # noqa: PLR0913 self, *, include: Union['MappingIntStrAny', 'AbstractSetIntStr'] = None, diff --git a/sdcm/provision/common/provisioner.py b/sdcm/provision/common/provisioner.py index 7a49550ec69..309e8c8ef42 100644 --- a/sdcm/provision/common/provisioner.py +++ b/sdcm/provision/common/provisioner.py @@ -28,7 +28,7 @@ class InstanceProvisionerBase(BaseModel, metaclass=abc.ABCMeta): # pylint: disa Base class for provisioner - a class that provide API to provision instances """ @abc.abstractmethod - def provision( # pylint: disable=too-many-arguments + def provision( # pylint: disable=too-many-arguments # noqa: PLR0913 self, provision_parameters: ProvisionParameters, instance_parameters: InstanceParamsBase | List[InstanceParamsBase], diff --git a/sdcm/provision/scylla_yaml/scylla_yaml.py b/sdcm/provision/scylla_yaml/scylla_yaml.py index 378910ec618..006b8e9ff68 100644 --- a/sdcm/provision/scylla_yaml/scylla_yaml.py +++ b/sdcm/provision/scylla_yaml/scylla_yaml.py @@ -341,7 +341,7 @@ def set_authorizer(cls, authorizer: str): compaction_collection_items_count_warning_threshold: int = None # None - def dict( # pylint: disable=arguments-differ + def dict( # pylint: disable=arguments-differ # noqa: PLR0913 self, *, include: Union['MappingIntStrAny', 'AbstractSetIntStr'] = None, @@ -371,7 +371,7 @@ def _update_dict(self, obj: dict, fields_data: dict): if not isinstance(attr_value, dict): raise ValueError("Unexpected data `%s` in attribute `%s`" % ( type(attr_value), attr_name)) - attr_value = attr_info.type(**attr_value) + attr_value = attr_info.type(**attr_value) # noqa: PLW2901 setattr(self, attr_name, attr_value) def update(self, *objects: Union['ScyllaYaml', dict]): diff --git a/sdcm/remote/base.py b/sdcm/remote/base.py index 55cbc9220ee..33db37b2d03 100644 --- a/sdcm/remote/base.py +++ b/sdcm/remote/base.py @@ -91,7 +91,7 @@ def _setup_watchers(self, verbose: bool, log_file: str, additional_watchers: lis # pylint: disable=too-many-arguments @abstractmethod - def run(self, + def run(self, # noqa: PLR0913 cmd: str, timeout: Optional[float] = None, ignore_status: bool = False, @@ -105,7 +105,7 @@ def run(self, pass # pylint: disable=too-many-arguments - def sudo(self, + def sudo(self, # noqa: PLR0913 cmd: str, timeout: Optional[float] = None, ignore_status: bool = False, @@ -189,7 +189,7 @@ def _scp_remote_escape(filename: str) -> str: return shlex.quote("".join(new_name)) @staticmethod - def _make_ssh_command(user: str = "root", # pylint: disable=too-many-arguments + def _make_ssh_command(user: str = "root", # pylint: disable=too-many-arguments # noqa: PLR0913 port: int = 22, opts: str = '', hosts_file: str = '/dev/null', key_file: str = None, connect_timeout: float = 300, alive_interval: float = 300, extra_ssh_options: str = '') -> str: diff --git a/sdcm/remote/kubernetes_cmd_runner.py b/sdcm/remote/kubernetes_cmd_runner.py index 85aef40a53b..85423f12612 100644 --- a/sdcm/remote/kubernetes_cmd_runner.py +++ b/sdcm/remote/kubernetes_cmd_runner.py @@ -127,7 +127,7 @@ class KubernetesCmdRunner(RemoteCmdRunnerBase): exception_retryable = (ConnectionError, MaxRetryError, ThreadException) default_run_retry = 8 - def __init__(self, kluster, pod_image: str, # pylint: disable=too-many-arguments + def __init__(self, kluster, pod_image: str, # pylint: disable=too-many-arguments # noqa: PLR0913 pod_name: str, container: Optional[str] = None, namespace: str = "default") -> None: self.kluster = kluster @@ -187,7 +187,7 @@ def _create_connection(self): "k8s_namespace": self.namespace, }))) # pylint: disable=too-many-arguments - def _run_execute(self, cmd: str, timeout: Optional[float] = None, # pylint: disable=too-many-arguments + def _run_execute(self, cmd: str, timeout: Optional[float] = None, # pylint: disable=too-many-arguments # noqa: PLR0913 ignore_status: bool = False, verbose: bool = True, new_session: bool = False, watchers: Optional[List[StreamWatcher]] = None): # TODO: This should be removed than sudo calls will be done in more organized way. @@ -207,14 +207,14 @@ def _run_execute(self, cmd: str, timeout: Optional[float] = None, # pylint: dis # pylint: disable=too-many-arguments,unused-argument @retrying(n=3, sleep_time=5, allowed_exceptions=(RetryableNetworkException, )) - def receive_files(self, src, dst, delete_dst=False, preserve_perm=True, preserve_symlinks=False, timeout=300): + def receive_files(self, src, dst, delete_dst=False, preserve_perm=True, preserve_symlinks=False, timeout=300): # noqa: PLR0913 KubernetesOps.copy_file(self.kluster, f"{self.namespace}/{self.pod_name}:{src}", dst, container=self.container, timeout=timeout) return True # pylint: disable=too-many-arguments,unused-argument @retrying(n=3, sleep_time=5, allowed_exceptions=(RetryableNetworkException, )) - def send_files(self, src, dst, delete_dst=False, preserve_symlinks=False, verbose=False): + def send_files(self, src, dst, delete_dst=False, preserve_symlinks=False, verbose=False): # noqa: PLR0913 with KEY_BASED_LOCKS.get_lock(f"k8s--{self.kluster.name}--{self.namespace}--{self.pod_name}"): KubernetesOps.copy_file(self.kluster, src, f"{self.namespace}/{self.pod_name}:{dst}", container=self.container, timeout=300) @@ -412,7 +412,7 @@ def _is_pod_ready_or_completed(self) -> bool: def _is_pod_failed_or_completed(self, _cache={}) -> bool: # pylint: disable=dangerous-default-value last_call_at = _cache.get('last_call_at') - if last_call_at and time.time() - last_call_at < 3: + if last_call_at and time.time() - last_call_at < 3: # noqa: PLR2004 time.sleep(3) status = self._get_pod_status() _cache['last_call_at'] = time.time() @@ -535,7 +535,7 @@ def run(self, command, **kwargs): # pylint: disable=too-many-instance-attributes class KubernetesPodRunner(KubernetesCmdRunner): - def __init__(self, kluster, # pylint: disable=too-many-arguments,super-init-not-called + def __init__(self, kluster, # pylint: disable=too-many-arguments,super-init-not-called # noqa: PLR0913 template_path: str, template_modifiers: list, pod_name_template: str, @@ -597,7 +597,7 @@ def _create_connection(self): return connection # pylint: disable=too-many-arguments,unused-argument - def receive_files(self, src, dst, delete_dst=False, + def receive_files(self, src, dst, delete_dst=False, # noqa: PLR0913 preserve_perm=True, preserve_symlinks=False, timeout=300): # TODO: may be implemented if we want to copy files which exist in the image # because this runner doesn't imply execution of commands on the already @@ -605,7 +605,7 @@ def receive_files(self, src, dst, delete_dst=False, raise NotImplementedError() # pylint: disable=too-many-arguments,unused-argument - def send_files(self, src, dst, delete_dst=False, preserve_symlinks=False, verbose=False): + def send_files(self, src, dst, delete_dst=False, preserve_symlinks=False, verbose=False): # noqa: PLR0913 """Mount single files to a 'dynamic'aly created pods. 'src' and 'dst' params must contain filename. diff --git a/sdcm/remote/libssh2_client/__init__.py b/sdcm/remote/libssh2_client/__init__.py index 8bc7e8ed5ec..a899f47b169 100644 --- a/sdcm/remote/libssh2_client/__init__.py +++ b/sdcm/remote/libssh2_client/__init__.py @@ -79,7 +79,7 @@ def run(self): except Exception as exc: # pylint: disable=broad-except self.raised = exc - def _read_output( # pylint: disable=too-many-arguments,too-many-branches + def _read_output( # pylint: disable=too-many-arguments,too-many-branches # noqa: PLR0913, PLR0912 self, session: Session, channel: Channel, timeout: NullableTiming, timeout_read_data: NullableTiming, stdout_stream: Queue, stderr_stream: Queue): """Reads data from ssh session, split it into lines and forward lines into stderr ad stdout pipes @@ -284,7 +284,7 @@ class Client: # pylint: disable=too-many-instance-attributes timings: Timings = Timings() flood_preventing: FloodPreventingFacility = DEFAULT_FLOOD_PREVENTING - def __init__(self, host: str, user: str, password: str = None, # pylint: disable=too-many-arguments + def __init__(self, host: str, user: str, password: str = None, # pylint: disable=too-many-arguments # noqa: PLR0913 port: int = None, pkey: str = None, allow_agent: bool = None, forward_ssh_agent: bool = None, proxy_host: str = None, keepalive_seconds: int = None, timings: Timings = None, flood_preventing: FloodPreventingFacility = None): @@ -422,7 +422,7 @@ def _init_socket(self, host: str, port: int): raise ConnectError("Error connecting to host '%s:%s' - %s" % (host, port, str(error_type))) from ex @staticmethod - def _process_output( # pylint: disable=too-many-arguments, too-many-branches + def _process_output( # pylint: disable=too-many-arguments, too-many-branches # noqa: PLR0913, PLR0912 watchers: List[StreamWatcher], encoding: str, stdout_stream: StringIO, stderr_stream: StringIO, reader: SSHReaderThread, timeout: NullableTiming, timeout_read_data_chunk: NullableTiming): """Separate different approach for the case when watchers are present, since watchers are slow, @@ -462,7 +462,7 @@ def _process_output( # pylint: disable=too-many-arguments, too-many-branches return True @staticmethod - def _process_output_no_watchers( # pylint: disable=too-many-arguments + def _process_output_no_watchers( # pylint: disable=too-many-arguments # noqa: PLR0913 session: Session, channel: Channel, encoding: str, stdout_stream: StringIO, stderr_stream: StringIO, timeout: NullableTiming, timeout_read_data_chunk: NullableTiming) -> bool: eof_result = stdout_size = stderr_size = LIBSSH2_ERROR_EAGAIN @@ -551,7 +551,7 @@ def disconnect(self): pass self.sock = None - def run( # pylint: disable=unused-argument,too-many-arguments,too-many-locals + def run( # pylint: disable=unused-argument,too-many-arguments,too-many-locals # noqa: PLR0913, PLR0912 self, command: str, warn: bool = False, encoding: str = 'utf-8', # pylint: disable=redefined-outer-name hide=True, watchers=None, env=None, replace_env=False, in_stream=False, timeout=None) -> Result: """Run command, wait till it ends and return result in Result class. @@ -625,7 +625,7 @@ def _apply_env(channel: Channel, env: Dict[str, str]): for var, val in env.items(): channel.setenv(str(var), str(val)) - def _complete_run(self, channel: Channel, exception: Exception, # pylint: disable=too-many-arguments + def _complete_run(self, channel: Channel, exception: Exception, # pylint: disable=too-many-arguments # noqa: PLR0913 timeout_reached: NullableTiming, timeout: NullableTiming, result: Result, warn, # pylint: disable=redefined-outer-name stdout: StringIO, stderr: StringIO) -> Result: """Complete executing command and return result, no matter what had happened. diff --git a/sdcm/remote/libssh2_client/exceptions.py b/sdcm/remote/libssh2_client/exceptions.py index 5e98a37c844..b6f843cfcd6 100644 --- a/sdcm/remote/libssh2_client/exceptions.py +++ b/sdcm/remote/libssh2_client/exceptions.py @@ -102,11 +102,10 @@ def streams_for_display(self) -> tuple: stdout = self.result.tail("stdout") if self.result.pty: stderr = " n/a (PTYs have no stderr)" + elif "stderr" not in self.result.hide: + stderr = already_printed else: - if "stderr" not in self.result.hide: - stderr = already_printed - else: - stderr = self.result.tail("stderr") + stderr = self.result.tail("stderr") return stdout, stderr def __repr__(self) -> str: diff --git a/sdcm/remote/local_cmd_runner.py b/sdcm/remote/local_cmd_runner.py index 266c661e9d2..9653160fc22 100644 --- a/sdcm/remote/local_cmd_runner.py +++ b/sdcm/remote/local_cmd_runner.py @@ -44,7 +44,7 @@ def _create_connection(self) -> Connection: def is_up(self, timeout: float = None) -> bool: # pylint: disable=no-self-use return True - def run(self, cmd: str, timeout: Optional[float] = None, ignore_status: bool = False, # pylint: disable=too-many-arguments + def run(self, cmd: str, timeout: Optional[float] = None, ignore_status: bool = False, # pylint: disable=too-many-arguments # noqa: PLR0913 verbose: bool = True, new_session: bool = False, log_file: Optional[str] = None, retry: int = 1, watchers: Optional[List[StreamWatcher]] = None, change_context: bool = False) -> Result: @@ -89,7 +89,7 @@ def _run(): return result @retrying(n=3, sleep_time=5, allowed_exceptions=(RetryableNetworkException,)) - def receive_files( # pylint: disable=too-many-arguments,unused-argument + def receive_files( # pylint: disable=too-many-arguments,unused-argument # noqa: PLR0913 self, src: str, dst: str, delete_dst: bool = False, preserve_perm: bool = True, preserve_symlinks: bool = False, timeout: float = 300) -> bool: # pylint: disable=too-many-arguments,unused-argument if src == dst: @@ -97,7 +97,7 @@ def receive_files( # pylint: disable=too-many-arguments,unused-argument return self.run(f'cp {src} {dst}', timeout=timeout).ok @retrying(n=3, sleep_time=5, allowed_exceptions=(RetryableNetworkException,)) - def send_files( # pylint: disable=too-many-arguments,unused-argument + def send_files( # pylint: disable=too-many-arguments,unused-argument # noqa: PLR0913 self, src: str, dst: str, delete_dst: bool = False, preserve_symlinks: bool = False, verbose: bool = False, timeout: float = 300) -> bool: # pylint: disable=unused-argument if src == dst: diff --git a/sdcm/remote/remote_base.py b/sdcm/remote/remote_base.py index 6dbfb8afb3f..5daa32f24e8 100644 --- a/sdcm/remote/remote_base.py +++ b/sdcm/remote/remote_base.py @@ -46,7 +46,7 @@ class RemoteCmdRunnerBase(CommandRunner): # pylint: disable=too-many-instance-a connection_thread_map = threading.local() default_run_retry = 3 - def __init__(self, hostname: str, user: str = 'root', # pylint: disable=too-many-arguments + def __init__(self, hostname: str, user: str = 'root', # pylint: disable=too-many-arguments # noqa: PLR0913 password: str = None, port: int = None, connect_timeout: int = None, key_file: str = None, extra_ssh_options: str = None, auth_sleep_time: float = None): if port is not None: @@ -164,7 +164,7 @@ def is_up(self, timeout: float = 30): pass @retrying(n=3, sleep_time=5, allowed_exceptions=(RetryableNetworkException,)) - def receive_files(self, src: str, dst: str, delete_dst: bool = False, # pylint: disable=too-many-arguments + def receive_files(self, src: str, dst: str, delete_dst: bool = False, # pylint: disable=too-many-arguments # noqa: PLR0913, PLR0912 preserve_perm: bool = True, preserve_symlinks: bool = False, timeout: float = 300): """ Copy files from the remote host to a local path. @@ -253,7 +253,7 @@ def receive_files(self, src: str, dst: str, delete_dst: bool = False, # pylint: return files_received @retrying(n=3, sleep_time=5, allowed_exceptions=(RetryableNetworkException,)) - def send_files(self, src: str, # pylint: disable=too-many-arguments,too-many-statements + def send_files(self, src: str, # pylint: disable=too-many-arguments,too-many-statements # noqa: PLR0913, PLR0912, PLR0915 dst: str, delete_dst: bool = False, preserve_symlinks: bool = False, verbose: bool = False) -> bool: """ Copy files from a local path to the remote host. @@ -491,7 +491,7 @@ def set_file_privs(filename): else: set_file_privs(dest) - def _make_rsync_cmd( # pylint: disable=too-many-arguments + def _make_rsync_cmd( # pylint: disable=too-many-arguments # noqa: PLR0913 self, src: list, dst: str, delete_dst: bool, preserve_symlinks: bool, timeout: int = 300) -> str: """ Given a list of source paths and a destination path, produces the @@ -514,7 +514,7 @@ def _make_rsync_cmd( # pylint: disable=too-many-arguments return command % (symlink_flag, delete_flag, timeout, ssh_cmd, " ".join(src), dst) - def _run_execute(self, cmd: str, timeout: Optional[float] = None, # pylint: disable=too-many-arguments + def _run_execute(self, cmd: str, timeout: Optional[float] = None, # pylint: disable=too-many-arguments # noqa: PLR0913 ignore_status: bool = False, verbose: bool = True, new_session: bool = False, watchers: Optional[List[StreamWatcher]] = None): if verbose: @@ -540,7 +540,7 @@ def _run_execute(self, cmd: str, timeout: Optional[float] = None, # pylint: dis result.exit_status = result.exited return result - def _run_pre_run(self, cmd: str, timeout: Optional[float] = None, # pylint: disable=too-many-arguments + def _run_pre_run(self, cmd: str, timeout: Optional[float] = None, # pylint: disable=too-many-arguments # noqa: PLR0913 ignore_status: bool = False, verbose: bool = True, new_session: bool = False, log_file: Optional[str] = None, retry: int = 1, watchers: Optional[List[StreamWatcher]] = None): pass @@ -569,7 +569,7 @@ def _get_retry_params(self, retry: int = 1) -> dict: return {'n': retry, 'sleep_time': 5, 'allowed_exceptions': allowed_exceptions} # pylint: disable=too-many-arguments - def run(self, + def run(self, # noqa: PLR0913 cmd: str, timeout: float | None = None, ignore_status: bool = False, diff --git a/sdcm/remote/remote_cmd_runner.py b/sdcm/remote/remote_cmd_runner.py index 1e114b90127..0d61e940492 100644 --- a/sdcm/remote/remote_cmd_runner.py +++ b/sdcm/remote/remote_cmd_runner.py @@ -97,7 +97,7 @@ def stop(self): self.stop_ssh_up_thread() super().stop() - def _run_pre_run(self, cmd: str, timeout: Optional[float] = None, # pylint: disable=too-many-arguments + def _run_pre_run(self, cmd: str, timeout: Optional[float] = None, # pylint: disable=too-many-arguments # noqa: PLR0913 ignore_status: bool = False, verbose: bool = True, new_session: bool = False, log_file: Optional[str] = None, retry: int = 1, watchers: Optional[List[StreamWatcher]] = None): if not self.is_up(timeout=self.connect_timeout): diff --git a/sdcm/remote/remote_file.py b/sdcm/remote/remote_file.py index 37d124b2df4..588ad04fdbd 100644 --- a/sdcm/remote/remote_file.py +++ b/sdcm/remote/remote_file.py @@ -32,7 +32,7 @@ def read_to_stringio(fobj): # pylint: disable=too-many-locals,too-many-arguments @contextlib.contextmanager -def remote_file(remoter, remote_path, serializer=StringIO.getvalue, deserializer=read_to_stringio, sudo=False, +def remote_file(remoter, remote_path, serializer=StringIO.getvalue, deserializer=read_to_stringio, sudo=False, # noqa: PLR0913 preserve_ownership=True, preserve_permissions=True, log_change=True): filename = os.path.basename(remote_path) local_tempfile = os.path.join(tempfile.mkdtemp(prefix='sct'), filename) diff --git a/sdcm/rest/remote_curl_client.py b/sdcm/rest/remote_curl_client.py index c6ba1f2d8dc..70521dc4e25 100644 --- a/sdcm/rest/remote_curl_client.py +++ b/sdcm/rest/remote_curl_client.py @@ -27,7 +27,7 @@ def __init__(self, host: str, endpoint: str, node: BaseNode): self._node = node self._remoter = self._node.remoter - def run_remoter_curl(self, method: Literal["GET", "POST"], # pylint: disable=too-many-arguments + def run_remoter_curl(self, method: Literal["GET", "POST"], # pylint: disable=too-many-arguments # noqa: PLR0913 path: str, params: dict[str, str], timeout: int = 120, diff --git a/sdcm/results_analyze/__init__.py b/sdcm/results_analyze/__init__.py index 6bd25121115..aeae0f75f64 100644 --- a/sdcm/results_analyze/__init__.py +++ b/sdcm/results_analyze/__init__.py @@ -44,7 +44,7 @@ class BaseResultsAnalyzer: # pylint: disable=too-many-instance-attributes # pylint: disable=too-many-arguments - def __init__(self, es_index, es_doc_type, email_recipients=(), email_template_fp="", query_limit=1000, logger=None, + def __init__(self, es_index, es_doc_type, email_recipients=(), email_template_fp="", query_limit=1000, logger=None, # noqa: PLR0913 events=None): self._es = ES() self._conf = self._es._conf # pylint: disable=protected-access @@ -222,7 +222,7 @@ class LatencyDuringOperationsPerformanceAnalyzer(BaseResultsAnalyzer): Get latency during operations performance analyzer """ - def __init__(self, es_index, es_doc_type, email_recipients=(), logger=None, events=None): # pylint: disable=too-many-arguments + def __init__(self, es_index, es_doc_type, email_recipients=(), logger=None, events=None): # pylint: disable=too-many-arguments # noqa: PLR0913 super().__init__(es_index=es_index, es_doc_type=es_doc_type, email_recipients=email_recipients, email_template_fp="results_latency_during_ops_short.html", logger=logger, events=events) self.percentiles = ['percentile_90', 'percentile_99'] @@ -406,7 +406,7 @@ def _calculate_relative_change_magnitude(current_value, best_value): except Exception as exc: # pylint: disable=broad-except LOGGER.error("Compare results failed: %s", exc) - def check_regression(self, test_id, data, is_gce=False, node_benchmarks=None, email_subject_postfix=None): # pylint: disable=too-many-locals, too-many-branches, too-many-statements, too-many-arguments + def check_regression(self, test_id, data, is_gce=False, node_benchmarks=None, email_subject_postfix=None): # pylint: disable=too-many-locals, too-many-branches, too-many-statements, too-many-arguments # noqa: PLR0913 doc = self.get_test_by_id(test_id) full_test_name = doc["_source"]["test_details"]["test_name"] test_name = full_test_name.split('.')[-1] # Example: longevity_test.LongevityTest.test_custom_time @@ -499,7 +499,7 @@ class SpecifiedStatsPerformanceAnalyzer(BaseResultsAnalyzer): Get specified performance test results from elasticsearch DB and analyze it to find a regression """ - def __init__(self, es_index, es_doc_type, email_recipients=(), logger=None, events=None): # pylint: disable=too-many-arguments + def __init__(self, es_index, es_doc_type, email_recipients=(), logger=None, events=None): # pylint: disable=too-many-arguments # noqa: PLR0913 super().__init__(es_index=es_index, es_doc_type=es_doc_type, email_recipients=email_recipients, email_template_fp="", logger=logger, events=events) @@ -510,7 +510,7 @@ def _test_stats(self, test_doc): return None return test_doc['_source']['results'] - def check_regression(self, test_id, stats): # pylint: disable=too-many-locals, too-many-branches, too-many-statements + def check_regression(self, test_id, stats): # pylint: disable=too-many-locals, too-many-branches, too-many-statements # noqa: PLR0912, PLR0915 """ Get test results by id, filter similar results and calculate DB values for each version, then compare with max-allowed in the tested version (and report all the found versions). @@ -632,7 +632,7 @@ class PerformanceResultsAnalyzer(BaseResultsAnalyzer): PARAMS = TestStatsMixin.STRESS_STATS - def __init__(self, es_index, es_doc_type, email_recipients=(), logger=None, events=None): # pylint: disable=too-many-arguments + def __init__(self, es_index, es_doc_type, email_recipients=(), logger=None, events=None): # pylint: disable=too-many-arguments # noqa: PLR0913 super().__init__(es_index=es_index, es_doc_type=es_doc_type, email_recipients=email_recipients, email_template_fp="results_performance.html", logger=logger, events=events) @@ -709,7 +709,7 @@ def cmp(self, src, dst, version_dst, best_test_id): return cmp_res # pylint: disable=too-many-arguments - def check_regression(self, test_id, is_gce=False, email_subject_postfix=None, + def check_regression(self, test_id, is_gce=False, email_subject_postfix=None, # noqa: PLR0913, PLR0912, PLR0915 use_wide_query=False, lastyear=False, node_benchmarks=None): """ @@ -872,7 +872,7 @@ def check_regression(self, test_id, is_gce=False, email_subject_postfix=None, if ycsb: if ycsb_engine := ycsb.get('raw_cmd', "").split(): - if len(ycsb_engine) > 3: + if len(ycsb_engine) > 3: # noqa: PLR2004 index = (ycsb_engine.index("run") if "run" in ycsb_engine else None) or \ (ycsb_engine.index("load") if "load" in ycsb_engine else None) ycsb_engine = ycsb_engine[index+1] @@ -889,7 +889,7 @@ def check_regression(self, test_id, is_gce=False, email_subject_postfix=None, return True - def check_regression_with_subtest_baseline(self, test_id, base_test_id, subtest_baseline, is_gce=False): + def check_regression_with_subtest_baseline(self, test_id, base_test_id, subtest_baseline, is_gce=False): # noqa: PLR0912, PLR0915 """ Get test results by id, filter similar results and calculate max values for each version, then compare with max in the test version and all the found versions. @@ -1136,9 +1136,9 @@ def _add_best_for_info(test, subtest, metric_path, tests_info): def _mark_best_tests(self, prior_subtests, metrics, tests_info, main_test_id): main_tests_by_id = MagicList(tests_info.keys()).group_by('test_id') - for _, prior_tests in prior_subtests.items(): + for _, _prior_tests in prior_subtests.items(): prior_tests = MagicList( - [prior_test for prior_test in prior_tests if prior_test.main_test_id != main_test_id]) + [prior_test for prior_test in _prior_tests if prior_test.main_test_id != main_test_id]) if not prior_tests: continue for metric_path in metrics: @@ -1206,7 +1206,7 @@ def _get_prior_tests_for_subtests(subtests: list): return output @staticmethod - def _cleanup_not_complete_main_tests(prior_main_tests: list, prior_subtests: dict, expected_subtests_count): # pylint: disable=too-many-branches + def _cleanup_not_complete_main_tests(prior_main_tests: list, prior_subtests: dict, expected_subtests_count): # pylint: disable=too-many-branches # noqa: PLR0912 is_test_complete = {} for subtest, prior_tests in prior_subtests.items(): for prior_test_id, _ in prior_tests.group_by('main_test_id').items(): @@ -1233,7 +1233,7 @@ def _cleanup_not_complete_main_tests(prior_main_tests: list, prior_subtests: dic for num in sorted(to_delete, reverse=True): prior_tests.pop(num) - def check_regression_multi_baseline( + def check_regression_multi_baseline( # noqa: PLR0912, PLR0915 self, test_id, subtests_info: list = None, @@ -1474,7 +1474,7 @@ class ThroughputLatencyGradualGrowPayloadPerformanceAnalyzer(BaseResultsAnalyzer Performance Analyzer for results with throughput and latency of gradual payload increase """ - def __init__(self, es_index, es_doc_type, email_recipients=(), logger=None, events=None): # pylint: disable=too-many-arguments + def __init__(self, es_index, es_doc_type, email_recipients=(), logger=None, events=None): # pylint: disable=too-many-arguments # noqa: PLR0913 super().__init__(es_index=es_index, es_doc_type=es_doc_type, email_recipients=email_recipients, email_template_fp="results_incremental_throughput_increase.html", logger=logger, events=events) @@ -1498,7 +1498,7 @@ class SearchBestThroughputConfigPerformanceAnalyzer(BaseResultsAnalyzer): Get latency during operations performance analyzer """ - def __init__(self, es_index, es_doc_type, email_recipients=(), logger=None, events=None): # pylint: disable=too-many-arguments + def __init__(self, es_index, es_doc_type, email_recipients=(), logger=None, events=None): # pylint: disable=too-many-arguments # noqa: PLR0913 super().__init__(es_index=es_index, es_doc_type=es_doc_type, email_recipients=email_recipients, email_template_fp="results_search_best_throughput_config.html", logger=logger, events=events) diff --git a/sdcm/results_analyze/base.py b/sdcm/results_analyze/base.py index a0723791968..a3c806119ee 100644 --- a/sdcm/results_analyze/base.py +++ b/sdcm/results_analyze/base.py @@ -158,7 +158,7 @@ def _get_es_data_path_and_values_from_patterns(self, data_patterns: list, flatte output = {} - def data_cb(data_instance, current_instance, data_path, es_data_path, is_edge): # pylint: disable=too-many-branches, too-many-locals + def data_cb(data_instance, current_instance, data_path, es_data_path, is_edge): # pylint: disable=too-many-branches, too-many-locals # noqa: PLR0912 final_return = False for data_pattern_split in data_patterns_split: to_add = len(data_pattern_split) == len(data_path) diff --git a/sdcm/results_analyze/test.py b/sdcm/results_analyze/test.py index 99dd408ff2d..9dc574b63f8 100644 --- a/sdcm/results_analyze/test.py +++ b/sdcm/results_analyze/test.py @@ -148,7 +148,7 @@ def major_as_int(self): version_parts = self.as_string.split('.') idx = 100**5 output = 0 - if len(version_parts) >= 2: + if len(version_parts) >= 2: # noqa: PLR2004 for version_part in version_parts[:-1]: if version_part.isdecimal(): output += idx * int(version_part) @@ -437,7 +437,7 @@ def is_gce(self): def _get_es_filters(cls, depth=2): tmp = [] for es_filter in cls._get_all_es_data_mapping().values(): - es_filter = '.'.join(es_filter.split('.')[:depth]) + es_filter = '.'.join(es_filter.split('.')[:depth]) # noqa: PLW2901 if es_filter not in tmp: tmp.append(es_filter) return ['hits.hits.' + es_filter for es_filter in tmp] @@ -459,11 +459,11 @@ def _get_es_query_from_instance_data(cls, instance_data: dict): def _get_es_query_from_es_data(cls, es_data: dict): filters = [] for es_data_path, data_value in es_data.items(): - es_data_path = es_data_path.split('.') + es_data_path = es_data_path.split('.') # noqa: PLW2901 if es_data_path[0] == '_source': - es_data_path = es_data_path[1:] - es_data_path = '.'.join(es_data_path) - es_data_path = cls._escape_filter_key(es_data_path) + es_data_path = es_data_path[1:] # noqa: PLW2901 + es_data_path = '.'.join(es_data_path) # noqa: PLW2901 + es_data_path = cls._escape_filter_key(es_data_path) # noqa: PLW2901 if isinstance(data_value, str) and es_data_path not in cls._es_field_indexes and data_value != '*': filters.append(f'{es_data_path}.keyword: \"{data_value}\"') elif isinstance(data_value, bool): diff --git a/sdcm/sct_config.py b/sdcm/sct_config.py index c11f5ee43e4..e052ea59c4a 100644 --- a/sdcm/sct_config.py +++ b/sdcm/sct_config.py @@ -1626,7 +1626,7 @@ class SCTConfiguration(dict): ami_id_params = ['ami_id_db_scylla', 'ami_id_loader', 'ami_id_monitor', 'ami_id_db_cassandra', 'ami_id_db_oracle'] aws_supported_regions = ['eu-west-1', 'eu-west-2', 'us-west-2', 'us-east-1', 'eu-north-1', 'eu-central-1'] - def __init__(self): + def __init__(self): # noqa: PLR0912, PLR0915 # pylint: disable=too-many-locals,too-many-branches,too-many-statements super().__init__() self.scylla_version = None @@ -2020,14 +2020,14 @@ def list_of_stress_tools(self) -> Set[str]: if not stress_cmd: continue if not isinstance(stress_cmd, list): - stress_cmd = [stress_cmd] + stress_cmd = [stress_cmd] # noqa: PLW2901 for cmd in stress_cmd: if stress_tool := cmd.split(maxsplit=2)[0]: stress_tools.add(stress_tool) return stress_tools - def check_required_files(self): + def check_required_files(self): # noqa: PLR0912 # pylint: disable=too-many-nested-blocks # pylint: disable=too-many-branches for param_name in self.stress_cmd_params: @@ -2040,9 +2040,9 @@ def check_required_files(self): if not stress_cmd: continue if not isinstance(stress_cmd, list): - stress_cmd = [stress_cmd] + stress_cmd = [stress_cmd] # noqa: PLW2901 for cmd in stress_cmd: - cmd = cmd.strip(' ') + cmd = cmd.strip(' ') # noqa: PLW2901 if cmd.startswith('latte'): script_name_regx = re.compile(r'([/\w-]*\.rn)') script_name = script_name_regx.search(cmd).group(1) @@ -2053,8 +2053,8 @@ def check_required_files(self): continue for option in cmd.split(): if option.startswith('profile='): - option = option.split('=', 1) - if len(option) < 2: + option = option.split('=', 1) # noqa: PLW2901 + if len(option) < 2: # noqa: PLR2004 continue profile_path = option[1] if 'scylla-qa-internal' in profile_path: @@ -2095,7 +2095,7 @@ def verify_configuration(self): self._validate_nemesis_can_run_on_non_seed() self._validate_number_of_db_nodes_divides_by_az_number() - if 'extra_network_interface' in self and len(self.region_names) >= 2: + if 'extra_network_interface' in self and len(self.region_names) >= 2: # noqa: PLR2004 raise ValueError("extra_network_interface isn't supported for multi region use cases") self._check_partition_range_with_data_validation_correctness() self._verify_scylla_bench_mode_and_workload_parameters() @@ -2258,7 +2258,7 @@ def _check_partition_range_with_data_validation_correctness(self): if int(partition_range_splitted[1]) < int(partition_range_splitted[0]): raise ValueError(error_message_template.format(' should be bigger then . ')) - def verify_configuration_urls_validity(self): # pylint: disable=too-many-branches + def verify_configuration_urls_validity(self): # pylint: disable=too-many-branches # noqa: PLR0912 """ Check if ami_id and repo urls are valid """ @@ -2459,9 +2459,9 @@ def _verify_scylla_bench_mode_and_workload_parameters(self): if not stress_cmd: continue if not isinstance(stress_cmd, list): - stress_cmd = [stress_cmd] + stress_cmd = [stress_cmd] # noqa: PLW2901 for cmd in stress_cmd: - cmd = cmd.strip(' ') + cmd = cmd.strip(' ') # noqa: PLW2901 if not cmd.startswith('scylla-bench'): continue if "-mode=" not in cmd: diff --git a/sdcm/sct_events/base.py b/sdcm/sct_events/base.py index f5de3c3e8c3..ce97b79f969 100644 --- a/sdcm/sct_events/base.py +++ b/sdcm/sct_events/base.py @@ -136,7 +136,7 @@ def add_subevent_type(cls, # Check if we can add a new sub-event type: # 1) only 2 levels of sub-events allowed (i.e., `Event.TYPE.subtype') - assert len(cls.__name__.split(".")) < 3, "max level of the event's nesting is already reached" + assert len(cls.__name__.split(".")) < 3, "max level of the event's nesting is already reached" # noqa: PLR2004 # 2) name of sub-event should be a correct Python identifier. assert name.isidentifier() and not iskeyword(name), \ @@ -217,7 +217,7 @@ def add_subcontext(self): # pylint: disable=import-outside-toplevel; to avoid cyclic imports from sdcm.sct_events.continuous_event import ContinuousEventsRegistry # Add subcontext for event with ERROR and CRITICAL severity only - if self.severity.value < 3: + if self.severity.value < 3: # noqa: PLR2004 return # Issue https://github.com/scylladb/scylla-cluster-tests/issues/5544 @@ -438,7 +438,7 @@ def __init__(self, regex: str, severity=Severity.ERROR): # .* patterns works extremely slow, on big log message pattern evaluation can take >1s # in order to keep log reading fast we must avoid them at all costs if regex: - assert '.*'.count(regex) < 2 + assert '.*'.count(regex) < 2 # noqa: PLR2004 self.regex = regex self.node = None self.line = None diff --git a/sdcm/sct_events/database.py b/sdcm/sct_events/database.py index 4e5915ba82b..53322fc44d3 100644 --- a/sdcm/sct_events/database.py +++ b/sdcm/sct_events/database.py @@ -393,7 +393,7 @@ class CompactionEvent(ScyllaDatabaseContinuousEvent): save_to_files = False continuous_hash_fields = ('node', 'shard', 'table', 'compaction_process_id') - def __init__(self, node: str, shard: int, table: str, compaction_process_id: str, # pylint: disable=too-many-arguments + def __init__(self, node: str, shard: int, table: str, compaction_process_id: str, # pylint: disable=too-many-arguments # noqa: PLR0913 severity=Severity.NORMAL, **__): self.table = table self.compaction_process_id = compaction_process_id diff --git a/sdcm/sct_events/events_processes.py b/sdcm/sct_events/events_processes.py index 54d89d95f1b..9ef0cd3137e 100644 --- a/sdcm/sct_events/events_processes.py +++ b/sdcm/sct_events/events_processes.py @@ -166,7 +166,7 @@ def __str__(self): def create_default_events_process_registry(log_dir: Union[str, Path]): - global _EVENTS_PROCESSES # pylint: disable=global-statement + global _EVENTS_PROCESSES # pylint: disable=global-statement # noqa: PLW0603 with _EVENTS_PROCESSES_LOCK: if _EVENTS_PROCESSES is None: diff --git a/sdcm/sct_events/loaders.py b/sdcm/sct_events/loaders.py index ebb16897cf4..5a7e9979724 100644 --- a/sdcm/sct_events/loaders.py +++ b/sdcm/sct_events/loaders.py @@ -29,7 +29,7 @@ class GeminiStressEvent(BaseStressEvent): # pylint: disable=too-many-arguments, too-many-instance-attributes - def __init__(self, node: Any, + def __init__(self, node: Any, # noqa: PLR0913 cmd: str, log_file_name: Optional[str] = None, severity: Severity = Severity.NORMAL, diff --git a/sdcm/sct_events/nodetool.py b/sdcm/sct_events/nodetool.py index 1d7c66e37bd..f20158c7b0b 100644 --- a/sdcm/sct_events/nodetool.py +++ b/sdcm/sct_events/nodetool.py @@ -41,7 +41,7 @@ def default(self, o: Any) -> Any: # pylint: disable=too-many-instance-attributes class NodetoolEvent(ContinuousEvent): - def __init__(self, # pylint: disable=too-many-arguments + def __init__(self, # pylint: disable=too-many-arguments # noqa: PLR0913 nodetool_command, severity=Severity.NORMAL, node=None, diff --git a/sdcm/sct_events/operator.py b/sdcm/sct_events/operator.py index b71b592d0ef..047271c0354 100644 --- a/sdcm/sct_events/operator.py +++ b/sdcm/sct_events/operator.py @@ -42,7 +42,7 @@ def add_info(self: T_log_event, node, line: str, line_number: int) -> T_log_even # 06 - Month # 28 - Day splits = line.split(maxsplit=4) - if len(splits) != 5 or len(splits[0]) != 5: + if len(splits) != 5 or len(splits[0]) != 5: # noqa: PLR2004 return self type_month_date, time_string, *_ = splits try: diff --git a/sdcm/sct_events/stress_events.py b/sdcm/sct_events/stress_events.py index e44ccda1143..c0d7e12d9ca 100644 --- a/sdcm/sct_events/stress_events.py +++ b/sdcm/sct_events/stress_events.py @@ -19,7 +19,7 @@ class BaseStressEvent(ContinuousEvent, abstract=True): # pylint: disable=too-many-arguments @classmethod - def add_stress_subevents(cls, + def add_stress_subevents(cls, # noqa: PLR0913 failure: Optional[Severity] = None, error: Optional[Severity] = None, timeout: Optional[Severity] = None, @@ -53,7 +53,7 @@ def errors_formatted(self): class StressEvent(BaseStressEvent, abstract=True): # pylint: disable=too-many-arguments - def __init__(self, + def __init__(self, # noqa: PLR0913 node: Any, stress_cmd: Optional[str] = None, log_file_name: Optional[str] = None, diff --git a/sdcm/sct_events/system.py b/sdcm/sct_events/system.py index c5607d04751..b9db7de6f1b 100644 --- a/sdcm/sct_events/system.py +++ b/sdcm/sct_events/system.py @@ -53,7 +53,7 @@ class TestFrameworkEvent(InformationalEvent): # pylint: disable=too-many-instan __test__ = False # Mark this class to be not collected by pytest. # pylint: disable=too-many-arguments - def __init__(self, + def __init__(self, # noqa: PLR0913 source: Any, source_method: Optional = None, args: Optional[Sequence] = None, @@ -201,7 +201,7 @@ class AwsKmsEvent(ThreadFailedEvent): class CoreDumpEvent(InformationalEvent): # pylint: disable=too-many-arguments - def __init__(self, + def __init__(self, # noqa: PLR0913 node: Any, corefile_url: str, backtrace: str, diff --git a/sdcm/sct_provision/region_definition_builder.py b/sdcm/sct_provision/region_definition_builder.py index 145e6be26e2..0517184b88e 100644 --- a/sdcm/sct_provision/region_definition_builder.py +++ b/sdcm/sct_provision/region_definition_builder.py @@ -89,7 +89,7 @@ def build_instance_definition(self, region: str, node_type: NodeTypeType, index: user_data=user_data ) - def build_region_definition(self, region: str, availability_zone: str, n_db_nodes: int, # pylint: disable=too-many-arguments + def build_region_definition(self, region: str, availability_zone: str, n_db_nodes: int, # pylint: disable=too-many-arguments # noqa: PLR0913 n_loader_nodes: int, n_monitor_nodes: int) -> RegionDefinition: """Builds instances definitions for given region""" definitions = [] diff --git a/sdcm/sct_runner.py b/sdcm/sct_runner.py index 7e62afd720c..2e1cce94d4a 100644 --- a/sdcm/sct_runner.py +++ b/sdcm/sct_runner.py @@ -279,7 +279,7 @@ def instance(self, new_instance_value): @abstractmethod # pylint: disable=too-many-arguments - def _create_instance(self, + def _create_instance(self, # noqa: PLR0913 instance_type: str, base_image: Any, tags: dict[str, str], @@ -380,7 +380,7 @@ def create_image(self) -> None: def _get_base_image(self, image: Optional[Any] = None) -> Any: ... - def create_instance(self, # pylint: disable=too-many-arguments + def create_instance(self, # pylint: disable=too-many-arguments # noqa: PLR0913 test_id: str, test_name: str, test_duration: int, @@ -499,7 +499,7 @@ def _image(self, image_type: ImageType = ImageType.SOURCE) -> Any: return aws_region.resource.Image(existing_amis[0]["ImageId"]) # pylint: disable=too-many-arguments - def _create_instance(self, + def _create_instance(self, # noqa: PLR0913 instance_type: InstanceTypeType, base_image: Any, tags: dict[str, str], @@ -753,7 +753,7 @@ def tags_to_labels(tags: dict[str, str]) -> dict[str, str]: return {key.lower(): value.lower().replace(".", "_") for key, value in tags.items()} # pylint: disable=too-many-arguments - def _create_instance(self, + def _create_instance(self, # noqa: PLR0913 instance_type: str, base_image: Any, tags: dict[str, str], @@ -947,7 +947,7 @@ def _image(self, image_type: ImageType = ImageType.SOURCE) -> Any: return gallery_image_version return None - def _create_instance(self, # pylint: disable=too-many-arguments + def _create_instance(self, # pylint: disable=too-many-arguments # noqa: PLR0913 instance_type: str, base_image: Any, tags: dict[str, str], @@ -1173,10 +1173,9 @@ def _manage_runner_keep_tag_value(utc_now: datetime, return sct_runner_info LOGGER.info("No changes to make to runner tags.") - return sct_runner_info -def clean_sct_runners(test_status: str, +def clean_sct_runners(test_status: str, # noqa: PLR0912 test_runner_ip: str = None, dry_run: bool = False, force: bool = False) -> None: @@ -1214,9 +1213,9 @@ def clean_sct_runners(test_status: str, LOGGER.info("UTC now: %s", utc_now) if not dry_run and test_runner_ip: - sct_runner_info = _manage_runner_keep_tag_value(test_status=test_status, utc_now=utc_now, - timeout_flag=timeout_flag, sct_runner_info=sct_runner_info, - dry_run=dry_run) + _manage_runner_keep_tag_value(test_status=test_status, utc_now=utc_now, + timeout_flag=timeout_flag, sct_runner_info=sct_runner_info, + dry_run=dry_run) if sct_runner_info.keep: if "alive" in str(sct_runner_info.keep): diff --git a/sdcm/scylla_bench_thread.py b/sdcm/scylla_bench_thread.py index 79b9c2cc7ef..1db672e6b5a 100644 --- a/sdcm/scylla_bench_thread.py +++ b/sdcm/scylla_bench_thread.py @@ -104,7 +104,7 @@ class ScyllaBenchThread(DockerBasedStressThread): # pylint: disable=too-many-in } # pylint: disable=too-many-arguments - def __init__(self, stress_cmd, loader_set, timeout, node_list=None, round_robin=False, + def __init__(self, stress_cmd, loader_set, timeout, node_list=None, round_robin=False, # noqa: PLR0913 stop_test_on_failure=False, stress_num=1, credentials=None, params=None): super().__init__(loader_set=loader_set, stress_cmd=stress_cmd, timeout=timeout, stress_num=stress_num, node_list=node_list, round_robin=round_robin, params=params, @@ -249,7 +249,7 @@ def _parse_bench_summary(cls, lines): break split = line.split(':', maxsplit=1) - if len(split) < 2: + if len(split) < 2: # noqa: PLR2004 continue key = split[0].strip() value = ' '.join(split[1].split()) diff --git a/sdcm/send_email.py b/sdcm/send_email.py index fa286af14a3..3774cec57e3 100644 --- a/sdcm/send_email.py +++ b/sdcm/send_email.py @@ -78,7 +78,7 @@ def _connect(self): self.conn.starttls() self.conn.login(user=self._user, password=self._password) - def prepare_email(self, subject, content, recipients, html=True, files=()): # pylint: disable=too-many-arguments + def prepare_email(self, subject, content, recipients, html=True, files=()): # pylint: disable=too-many-arguments # noqa: PLR0913 msg = MIMEMultipart() msg['subject'] = subject msg['from'] = self.sender @@ -106,7 +106,7 @@ def prepare_email(self, subject, content, recipients, html=True, files=()): # p raise BodySizeExceeded(current_size=len(email), limit=self._body_size_limit) return email - def send(self, subject, content, recipients, html=True, files=()): # pylint: disable=too-many-arguments + def send(self, subject, content, recipients, html=True, files=()): # pylint: disable=too-many-arguments # noqa: PLR0913 """ :param subject: text :param content: text/html @@ -571,7 +571,7 @@ class PerfSimpleQueryReporter(BaseEmailReporter): email_template_file = "results_perf_simple_query.html" -def build_reporter(name: str, +def build_reporter(name: str, # noqa: PLR0912, PLR0911 email_recipients: Sequence[str] = (), logdir: Optional[str] = None) -> Optional[BaseEmailReporter]: # pylint: disable=too-many-return-statements,too-many-branches @@ -673,7 +673,7 @@ def get_running_instances_for_email_report(test_id: str, ip_filter: str = None): return nodes -def send_perf_email(reporter, test_results, logs, email_recipients, testrun_dir, start_time): # pylint: disable=too-many-arguments +def send_perf_email(reporter, test_results, logs, email_recipients, testrun_dir, start_time): # pylint: disable=too-many-arguments # noqa: PLR0913 for subject, content in test_results.items(): if 'email_body' not in content: content['email_body'] = {} diff --git a/sdcm/sla/libs/sla_utils.py b/sdcm/sla/libs/sla_utils.py index c2f5eb8e1c2..98a39b11c70 100644 --- a/sdcm/sla/libs/sla_utils.py +++ b/sdcm/sla/libs/sla_utils.py @@ -35,7 +35,7 @@ class SlaUtils: @staticmethod # pylint: disable=too-many-arguments, too-many-locals - def define_read_cassandra_stress_command(role: Role, + def define_read_cassandra_stress_command(role: Role, # noqa: PLR0913 load_type: str, c_s_workload_type: str, threads: int, @@ -140,7 +140,7 @@ def get_c_s_stats(self, read_queue, users, statistic_name, tester): return results # pylint: disable=too-many-arguments,too-many-locals,too-many-branches - def validate_scheduler_runtime(self, start_time, end_time, read_users, prometheus_stats, db_cluster, + def validate_scheduler_runtime(self, start_time, end_time, read_users, prometheus_stats, db_cluster, # noqa: PLR0913, PLR0912 expected_ratio=None, load_high_enough=None, publish_wp_error_event=False, possible_issue=None): # roles_full_info example: @@ -200,7 +200,7 @@ def validate_scheduler_runtime(self, start_time, end_time, read_users, prometheu # Zero Service Level group runtime is not expected. It may happen due to Prometheus problem # (connection or else) or issue https://github.com/scylladb/scylla-enterprise/issues/2572 - if role_sl_attribute['sl_group_runtime'] == 0.0: + if role_sl_attribute['sl_group_runtime'] == 0.0: # noqa: PLR2004 sl_group_runtime_zero = True LOGGER.debug('RUN TIME PER ROLE: {}'.format(roles_full_info)) @@ -236,7 +236,7 @@ def validate_scheduler_runtime(self, start_time, end_time, read_users, prometheu # pylint: disable=too-many-branches @staticmethod - def validate_runtime_relatively_to_share(roles_full_info: dict, node_ip: str, + def validate_runtime_relatively_to_share(roles_full_info: dict, node_ip: str, # noqa: PLR0913, PLR0912 load_high_enough: bool = None, node_cpu: float = None, publish_wp_error_event=False, possible_issue=None): @@ -247,7 +247,7 @@ def validate_runtime_relatively_to_share(roles_full_info: dict, node_ip: str, # 'sl_group_runtime': 181.23794405382156}} shares = [sl['service_level_shares'] for sl in roles_full_info.values()] - if not shares or len([s for s in shares if s]) < 2: + if not shares or len([s for s in shares if s]) < 2: # noqa: PLR2004 WorkloadPrioritisationEvent.RatioValidationEvent( message='Not enough service level shares for validation. Expected two Service Levels, received ' f'{len([s for s in shares if s])}: {shares}. Runtime can not be validated', @@ -279,7 +279,7 @@ def validate_runtime_relatively_to_share(roles_full_info: dict, node_ip: str, runtimes_ratio = False # Validate that role with higher shares get more resources and vice versa - if 0.0 not in runtimes and shares_ratio == runtimes_ratio: + if 0.0 not in runtimes and shares_ratio == runtimes_ratio: # noqa: PLR2004 WorkloadPrioritisationEvent.RatioValidationEvent( message=f'Role with higher shares got more resources on the node with IP {node_ip} as expected', severity=Severity.NORMAL).publish() @@ -293,7 +293,7 @@ def validate_runtime_relatively_to_share(roles_full_info: dict, node_ip: str, runtime_per_sl_group.append(f"{service_level['sl_group']} (shares " f"{service_level['service_level_shares']}): " f"{round(service_level['sl_group_runtime'], 2)}") - if service_level['sl_group_runtime'] == 0.0: + if service_level['sl_group_runtime'] == 0.0: # noqa: PLR2004 zero_runtime_service_level.append(service_level['sl_group']) runtime_per_sl_group_str = "\n ".join(runtime_per_sl_group) diff --git a/sdcm/sla/sla_tests.py b/sdcm/sla/sla_tests.py index e4af5bc2ecd..72ed1685cd3 100644 --- a/sdcm/sla/sla_tests.py +++ b/sdcm/sla/sla_tests.py @@ -17,7 +17,7 @@ class Steps(SlaUtils): # pylint: disable=too-many-arguments - def run_stress_and_validate_scheduler_runtime_during_load(self, tester, read_cmds, prometheus_stats, read_roles, + def run_stress_and_validate_scheduler_runtime_during_load(self, tester, read_cmds, prometheus_stats, read_roles, # noqa: PLR0913 stress_queue, sleep=600): # pylint: disable=not-context-manager with TestStepEvent(step="Run stress command and validate scheduler runtime during load") as wp_event: @@ -43,7 +43,7 @@ def run_stress_and_validate_scheduler_runtime_during_load(self, tester, read_cmd return wp_event # pylint: disable=too-many-arguments - def alter_sl_and_validate_scheduler_runtime(self, tester, service_level, new_shares, read_roles, prometheus_stats, + def alter_sl_and_validate_scheduler_runtime(self, tester, service_level, new_shares, read_roles, prometheus_stats, # noqa: PLR0913 sleep=600): # pylint: disable=not-context-manager with TestStepEvent(step=f"Alter shares from {service_level.shares} to {new_shares} Service " @@ -105,7 +105,7 @@ def drop_service_level_and_run_load(sl_for_drop, role_with_sl_to_drop, sleep=600 return wp_event # pylint: disable=too-many-arguments - def attach_sl_and_validate_scheduler_runtime(self, tester, new_service_level, role_for_attach, + def attach_sl_and_validate_scheduler_runtime(self, tester, new_service_level, role_for_attach, # noqa: PLR0913 read_roles, prometheus_stats, sleep=600, # restart_scylla parameter is temporary - wWorkaround for issue # https://github.com/scylladb/scylla-enterprise/issues/2572 @@ -158,7 +158,7 @@ def unique_subsrtr_for_name(): return str(uuid.uuid1()).split("-", maxsplit=1)[0] # pylint: disable=too-many-arguments - def _create_sla_auth(self, session, db_cluster, shares: int, index: str, superuser: bool = True) -> Role: + def _create_sla_auth(self, session, db_cluster, shares: int, index: str, superuser: bool = True) -> Role: # noqa: PLR0913 role = None try: role = create_sla_auth(session=session, shares=shares, index=index, superuser=superuser) @@ -174,7 +174,7 @@ def _create_sla_auth(self, session, db_cluster, shares: int, index: str, superus raise # pylint: disable=too-many-arguments - def _create_new_service_level(self, session, auth_entity_name_index, shares, db_cluster, service_level_for_test_step: str = None): + def _create_new_service_level(self, session, auth_entity_name_index, shares, db_cluster, service_level_for_test_step: str = None): # noqa: PLR0913 new_sl = ServiceLevel(session=session, name=SERVICE_LEVEL_NAME_TEMPLATE % ('50', auth_entity_name_index), shares=shares).create() @@ -526,7 +526,7 @@ def test_replace_service_level_using_drop_during_load(self, tester, prometheus_s return error_events # pylint: disable=lost-exception # pylint: disable=too-many-locals,too-many-arguments - def test_maximum_allowed_sls_with_max_shares_during_load(self, tester, prometheus_stats, num_of_partitions, + def test_maximum_allowed_sls_with_max_shares_during_load(self, tester, prometheus_stats, num_of_partitions, # noqa: PLR0913 cassandra_stress_column_definition=None, service_levels_amount=7): error_events = [] diff --git a/sdcm/stress/base.py b/sdcm/stress/base.py index 5a6305879f1..5147d292b8b 100644 --- a/sdcm/stress/base.py +++ b/sdcm/stress/base.py @@ -29,7 +29,7 @@ class DockerBasedStressThread: # pylint: disable=too-many-instance-attributes DOCKER_IMAGE_PARAM_NAME = "" # test yaml param that stores image - def __init__(self, loader_set, stress_cmd, timeout, stress_num=1, node_list=None, # pylint: disable=too-many-arguments + def __init__(self, loader_set, stress_cmd, timeout, stress_num=1, node_list=None, # pylint: disable=too-many-arguments # noqa: PLR0913 round_robin=False, params=None, stop_test_on_failure=True): self.loader_set: BaseLoaderSet = loader_set self.stress_cmd = stress_cmd @@ -132,7 +132,7 @@ def target_connection_bundle_file(self) -> str: def configure_event_on_failure(self, stress_event: StressEvent, exc: Exception | Failure): error_msg = format_stress_cmd_error(exc) - if (hasattr(exc, "result") and exc.result.failed) and exc.result.exited == 137: + if (hasattr(exc, "result") and exc.result.failed) and exc.result.exited == 137: # noqa: PLR2004 error_msg = f"Stress killed by test/teardown\n{error_msg}" stress_event.severity = Severity.WARNING elif self.stop_test_on_failure: diff --git a/sdcm/stress/latte_thread.py b/sdcm/stress/latte_thread.py index 359e6aeebf7..bdaaf9a75f4 100644 --- a/sdcm/stress/latte_thread.py +++ b/sdcm/stress/latte_thread.py @@ -86,9 +86,9 @@ def run(self): try: match = regex.search(line) if match: - for key, value in match.groupdict().items(): - value = float(value) - self.set_metric(self.operation, key, float(value)) + for key, _value in match.groupdict().items(): + value = float(_value) + self.set_metric(self.operation, key, value) except Exception: # pylint: disable=broad-except LOGGER.exception("fail to send metric") diff --git a/sdcm/stress_thread.py b/sdcm/stress_thread.py index f8617a66307..7033ebd12c5 100644 --- a/sdcm/stress_thread.py +++ b/sdcm/stress_thread.py @@ -64,7 +64,7 @@ def run(self) -> None: if pattern.search(line): if event.severity == Severity.CRITICAL and not self.stop_test_on_failure: - event = event.clone() # so we don't change the severity to other stress threads + event = event.clone() # so we don't change the severity to other stress threads # noqa: PLW2901 event.severity = Severity.ERROR event.add_info(node=self.node, line=line, line_number=line_number).publish() break # Stop iterating patterns to avoid creating two events for one line of the log @@ -92,7 +92,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): class CassandraStressThread(DockerBasedStressThread): # pylint: disable=too-many-instance-attributes DOCKER_IMAGE_PARAM_NAME = 'stress_image.cassandra-stress' - def __init__(self, loader_set, stress_cmd, timeout, stress_num=1, keyspace_num=1, keyspace_name='', compaction_strategy='', # pylint: disable=too-many-arguments + def __init__(self, loader_set, stress_cmd, timeout, stress_num=1, keyspace_num=1, keyspace_name='', compaction_strategy='', # pylint: disable=too-many-arguments # noqa: PLR0913 profile=None, node_list=None, round_robin=False, client_encrypt=False, stop_test_on_failure=True, params=None): super().__init__(loader_set=loader_set, stress_cmd=stress_cmd, timeout=timeout, @@ -105,7 +105,7 @@ def __init__(self, loader_set, stress_cmd, timeout, stress_num=1, keyspace_num=1 self.stop_test_on_failure = stop_test_on_failure self.compaction_strategy = compaction_strategy - def create_stress_cmd(self, cmd_runner, keyspace_idx, loader): # pylint: disable=too-many-branches + def create_stress_cmd(self, cmd_runner, keyspace_idx, loader): # pylint: disable=too-many-branches # noqa: PLR0912 stress_cmd = self.stress_cmd if "no-warmup" not in stress_cmd: @@ -200,7 +200,7 @@ def _add_hdr_log_option(stress_cmd: str, hdr_log_name: str) -> str: cs_log_option = match.group(1) if "hdrfile" not in cs_log_option: stress_cmd = stress_cmd.replace("-log", f"-log hdrfile={hdr_log_name}") - else: + else: # noqa: PLR5501 if replacing_hdr_file := re.search(r"hdrfile=(.*?)\s", cs_log_option): stress_cmd = stress_cmd.replace( f"hdrfile={replacing_hdr_file.group(1)}", f"hdrfile={hdr_log_name}") @@ -223,7 +223,7 @@ def docker_image_name(self): else: return get_docker_image_by_version(self.node_list[0].get_scylla_binary_version()) - def _run_cs_stress(self, loader, loader_idx, cpu_idx, keyspace_idx): # pylint: disable=too-many-locals,too-many-branches,too-many-statements + def _run_cs_stress(self, loader, loader_idx, cpu_idx, keyspace_idx): # pylint: disable=too-many-locals,too-many-branches,too-many-statements # noqa: PLR0912, PLR0915 cleanup_context = contextlib.nullcontext() os.makedirs(loader.logdir, exist_ok=True) diff --git a/sdcm/tester.py b/sdcm/tester.py index 27c9f91cbbc..c460d31c698 100644 --- a/sdcm/tester.py +++ b/sdcm/tester.py @@ -287,7 +287,7 @@ class ClusterTester(db_stats.TestStatsMixin, unittest.TestCase): # pylint: disa def k8s_cluster(self): return self.k8s_clusters[0] if getattr(self, 'k8s_clusters', None) else None - def __init__(self, *args, **kwargs): # pylint: disable=too-many-statements,too-many-locals,too-many-branches + def __init__(self, *args, **kwargs): # pylint: disable=too-many-statements,too-many-locals,too-many-branches # noqa: PLR0915 super().__init__(*args) self.result = None self._results = [] @@ -420,7 +420,7 @@ def send_argus_heartbeat(client: ArgusSCTClient, stop_signal: threading.Event): return fail_count = 0 while not stop_signal.is_set(): - if fail_count > 5: + if fail_count > 5: # noqa: PLR2004 break try: client.sct_heartbeat() @@ -548,8 +548,8 @@ def argus_collect_logs(self, log_links: dict[str, list[str] | str]): try: logs_to_save = [] for name, link in log_links.items(): - link = LogLink(log_name=name, log_link=link) - logs_to_save.append(link) + argus_link = LogLink(log_name=name, log_link=link) + logs_to_save.append(argus_link) self.test_config.argus_client().submit_sct_logs(logs_to_save) except Exception: # pylint: disable=broad-except self.log.error("Error saving logs to Argus", exc_info=True) @@ -838,7 +838,7 @@ def prepare_kms_host(self) -> None: @teardown_on_exception @log_run_info - def setUp(self): # pylint: disable=too-many-branches,too-many-statements + def setUp(self): # pylint: disable=too-many-branches,too-many-statements # noqa: PLR0912, PLR0915 self.credentials = [] self.db_cluster = None @@ -1049,7 +1049,7 @@ def get_nemesis_class(self): self.log.debug("Nemesis threads %s", nemesis_threads) return nemesis_threads - def get_cluster_gce(self, loader_info, db_info, monitor_info): + def get_cluster_gce(self, loader_info, db_info, monitor_info): # noqa: PLR0912, PLR0915 # pylint: disable=too-many-locals,too-many-statements,too-many-branches if loader_info['n_nodes'] is None: loader_info['n_nodes'] = int(self.params.get('n_loaders')) @@ -1166,7 +1166,7 @@ def get_cluster_gce(self, loader_info, db_info, monitor_info): else: self.monitors = NoMonitorSet() - def get_cluster_azure(self, loader_info, db_info, monitor_info): + def get_cluster_azure(self, loader_info, db_info, monitor_info): # noqa: PLR0912 # pylint: disable=too-many-branches,too-many-statements,too-many-locals regions = self.params.get('azure_region_name') test_id = str(TestConfig().test_id()) @@ -1234,7 +1234,7 @@ def get_cluster_azure(self, loader_info, db_info, monitor_info): else: self.monitors = NoMonitorSet() - def get_cluster_aws(self, loader_info, db_info, monitor_info): + def get_cluster_aws(self, loader_info, db_info, monitor_info): # noqa: PLR0912, PLR0915 # pylint: disable=too-many-locals,too-many-statements,too-many-branches regions = self.params.get('region_name').split() @@ -1574,7 +1574,7 @@ def get_cluster_k8s_gke(self, n_k8s_clusters: int): availability_zone=availability_zones[i % len(availability_zones)], cluster_uuid=( f"{self.test_config.test_id()[:8]}" - if n_k8s_clusters < 2 else f"{self.test_config.test_id()[:6]}-{i + 1}"), + if n_k8s_clusters < 2 else f"{self.test_config.test_id()[:6]}-{i + 1}"), # noqa: PLR2004 params=self.params, )) ParallelObject(timeout=7200, num_workers=n_k8s_clusters, objects=self.k8s_clusters).run( @@ -1666,7 +1666,7 @@ def get_cluster_k8s_eks(self, n_k8s_clusters: int): availability_zone=availability_zones[i % len(availability_zones)], cluster_uuid=( f"{self.test_config.test_id()[:8]}" - if n_k8s_clusters < 2 else f"{self.test_config.test_id()[:6]}-{i + 1}"), + if n_k8s_clusters < 2 else f"{self.test_config.test_id()[:6]}-{i + 1}"), # noqa: PLR2004 params=self.params, credentials=self.credentials, )) @@ -1806,7 +1806,7 @@ def run_stress(self, stress_cmd, duration=None): self.verify_stress_thread(cs_thread_pool=cs_thread_pool) # pylint: disable=too-many-arguments,too-many-return-statements - def run_stress_thread(self, stress_cmd, duration=None, stress_num=1, keyspace_num=1, profile=None, prefix='', # pylint: disable=too-many-arguments + def run_stress_thread(self, stress_cmd, duration=None, stress_num=1, keyspace_num=1, profile=None, prefix='', # pylint: disable=too-many-arguments # noqa: PLR0911, PLR0913 round_robin=False, stats_aggregate_cmds=True, keyspace_name=None, compaction_strategy='', use_single_loader=False, stop_test_on_failure=True): @@ -1843,7 +1843,7 @@ def run_stress_thread(self, stress_cmd, duration=None, stress_num=1, keyspace_nu raise ValueError(f'Unsupported stress command: "{stress_cmd[:50]}..."') # pylint: disable=too-many-arguments - def run_stress_cassandra_thread( + def run_stress_cassandra_thread( # noqa: PLR0913 self, stress_cmd, duration=None, stress_num=1, keyspace_num=1, profile=None, prefix='', round_robin=False, stats_aggregate_cmds=True, keyspace_name=None, compaction_strategy='', stop_test_on_failure=True, params=None, **_): # pylint: disable=too-many-locals @@ -1882,7 +1882,7 @@ def run_stress_cassandra_thread( return cs_thread # pylint: disable=too-many-arguments,unused-argument - def run_stress_thread_bench(self, stress_cmd, duration=None, round_robin=False, stats_aggregate_cmds=True, + def run_stress_thread_bench(self, stress_cmd, duration=None, round_robin=False, stats_aggregate_cmds=True, # noqa: PLR0913 stop_test_on_failure=True, **_): if duration: @@ -1915,7 +1915,7 @@ def run_stress_thread_bench(self, stress_cmd, duration=None, round_robin=False, self.alter_test_tables_encryption(scylla_encryption_options=scylla_encryption_options) return bench_thread - def run_stress_thread_harry(self, stress_cmd, duration=None, + def run_stress_thread_harry(self, stress_cmd, duration=None, # noqa: PLR0913 # pylint: disable=too-many-arguments,unused-argument round_robin=False, stats_aggregate_cmds=True, stop_test_on_failure=True, **_): # pylint: disable=too-many-arguments,unused-argument @@ -1941,7 +1941,7 @@ def run_stress_thread_harry(self, stress_cmd, duration=None, return harry_thread # pylint: disable=too-many-arguments - def run_ycsb_thread(self, stress_cmd, duration=None, stress_num=1, prefix='', + def run_ycsb_thread(self, stress_cmd, duration=None, stress_num=1, prefix='', # noqa: PLR0913 round_robin=False, stats_aggregate_cmds=True, **_): timeout = self.get_duration(duration) @@ -1956,7 +1956,7 @@ def run_ycsb_thread(self, stress_cmd, duration=None, stress_num=1, prefix='', node_list=self.db_cluster.nodes, round_robin=round_robin, params=self.params).run() - def run_latte_thread(self, stress_cmd, duration=None, stress_num=1, prefix='', + def run_latte_thread(self, stress_cmd, duration=None, stress_num=1, prefix='', # noqa: PLR0913 round_robin=False, stats_aggregate_cmds=True, stop_test_on_failure=True, **_): if duration: timeout = self.get_duration(duration) @@ -1980,7 +1980,7 @@ def run_latte_thread(self, stress_cmd, duration=None, stress_num=1, prefix='', params=self.params).run() # pylint: disable=too-many-arguments - def run_hydra_kcl_thread(self, stress_cmd, duration=None, stress_num=1, prefix='', + def run_hydra_kcl_thread(self, stress_cmd, duration=None, stress_num=1, prefix='', # noqa: PLR0913 round_robin=False, stats_aggregate_cmds=True, **_): timeout = self.get_duration(duration) @@ -1996,7 +1996,7 @@ def run_hydra_kcl_thread(self, stress_cmd, duration=None, stress_num=1, prefix=' round_robin=round_robin, params=self.params).run() # pylint: disable=too-many-arguments - def run_nosqlbench_thread(self, stress_cmd, duration=None, stress_num=1, prefix='', round_robin=False, + def run_nosqlbench_thread(self, stress_cmd, duration=None, stress_num=1, prefix='', round_robin=False, # noqa: PLR0913 stats_aggregate_cmds=True, stop_test_on_failure=True, **_): timeout = self.get_duration(duration) @@ -2028,7 +2028,7 @@ def run_table_compare_thread(self, stress_cmd, duration=None, stress_num=1, roun round_robin=round_robin, params=self.params).run() # pylint: disable=too-many-arguments - def run_ndbench_thread(self, stress_cmd, duration=None, stress_num=1, prefix='', + def run_ndbench_thread(self, stress_cmd, duration=None, stress_num=1, prefix='', # noqa: PLR0913 round_robin=False, stats_aggregate_cmds=True, **_): timeout = self.get_duration(duration) @@ -2044,7 +2044,7 @@ def run_ndbench_thread(self, stress_cmd, duration=None, stress_num=1, prefix='', round_robin=round_robin, params=self.params).run() # pylint: disable=too-many-arguments - def run_cdclog_reader_thread(self, stress_cmd, duration=None, stress_num=1, prefix='', + def run_cdclog_reader_thread(self, stress_cmd, duration=None, stress_num=1, prefix='', # noqa: PLR0913 round_robin=False, stats_aggregate_cmds=True, enable_batching=True, keyspace_name=None, base_table_name=None): timeout = self.get_duration(duration) @@ -2218,7 +2218,7 @@ def create_keyspace(self, keyspace_name, replication_factor): does_keyspace_exist = self.wait_validate_keyspace_existence(session, keyspace_name) return does_keyspace_exist - def create_table(self, name, key_type="varchar", # pylint: disable=too-many-arguments,too-many-branches + def create_table(self, name, key_type="varchar", # pylint: disable=too-many-arguments,too-many-branches # noqa: PLR0912, PLR0913 speculative_retry=None, read_repair=None, compression=None, gc_grace=None, columns=None, compaction=None, compact_storage=False, scylla_encryption_options=None, keyspace_name=None, @@ -2279,7 +2279,7 @@ def truncate_cf(self, ks_name: str, table_name: str, session: Session, truncate_ except Exception as ex: # pylint: disable=broad-except self.log.debug('Failed to truncate base table {0}.{1}. Error: {2}'.format(ks_name, table_name, str(ex))) - def create_materialized_view(self, ks_name, base_table_name, mv_name, mv_partition_key, mv_clustering_key, session, + def create_materialized_view(self, ks_name, base_table_name, mv_name, mv_partition_key, mv_clustering_key, session, # noqa: PLR0913 # pylint: disable=too-many-arguments mv_columns='*', speculative_retry=None, read_repair=None, compression=None, gc_grace=None, compact_storage=False): @@ -2372,7 +2372,7 @@ def _check_build_started(): def rows_to_list(rows): return [list(row) for row in rows] - def copy_table(self, node, src_keyspace, src_table, dest_keyspace, # pylint: disable=too-many-arguments + def copy_table(self, node, src_keyspace, src_table, dest_keyspace, # pylint: disable=too-many-arguments # noqa: PLR0913 dest_table, columns_list=None, copy_data=False): """ Create table with same structure as .. @@ -2397,7 +2397,7 @@ def copy_table(self, node, src_keyspace, src_table, dest_keyspace, # pylint: di return result - def copy_view(self, node, src_keyspace, src_view, dest_keyspace, # pylint: disable=too-many-arguments + def copy_view(self, node, src_keyspace, src_view, dest_keyspace, # pylint: disable=too-many-arguments # noqa: PLR0913 dest_table, columns_list=None, copy_data=False): """ Create table with same structure as .. @@ -2423,7 +2423,7 @@ def copy_view(self, node, src_keyspace, src_view, dest_keyspace, # pylint: disa return result - def create_table_as(self, node, src_keyspace, src_table, + def create_table_as(self, node, src_keyspace, src_table, # noqa: PLR0913 # pylint: disable=too-many-arguments,too-many-locals,inconsistent-return-statements dest_keyspace, dest_table, create_statement, columns_list=None): @@ -2476,7 +2476,7 @@ def create_table_as(self, node, src_keyspace, src_table, return True return False - def copy_data_between_tables(self, node, src_keyspace, src_table, dest_keyspace, + def copy_data_between_tables(self, node, src_keyspace, src_table, dest_keyspace, # noqa: PLR0913 # pylint: disable=too-many-arguments,too-many-locals dest_table, columns_list=None): """ Copy all data from one table/view to another table @@ -3342,12 +3342,11 @@ def get_nemesises_stats(self): nemesis_stats = {} if self.create_stats: nemesis_stats = self.get_doc_data(key='nemesis') + elif self.db_cluster: + for nem in self.db_cluster.nemesis: + nemesis_stats.update(nem.stats) else: - if self.db_cluster: - for nem in self.db_cluster.nemesis: - nemesis_stats.update(nem.stats) - else: - self.log.warning("No nemesises as cluster was not created") + self.log.warning("No nemesises as cluster was not created") if nemesis_stats: for detail in nemesis_stats.values(): @@ -3567,7 +3566,7 @@ def get_cs_range_histogram(self, stress_operation: str, tag_type=tag_type) return histogram_data[0] if histogram_data else {} - def get_cs_range_histogram_by_interval( + def get_cs_range_histogram_by_interval( # noqa: PLR0913 self, stress_operation: str, start_time: float, end_time: float, time_interval: int = 600, tag_type: CSHistogramTagTypes = CSHistogramTagTypes.LATENCY) -> list[dict[str, Any]]: diff --git a/sdcm/tombstone_gc_verification_thread.py b/sdcm/tombstone_gc_verification_thread.py index df42cde5a3c..f987b080a33 100644 --- a/sdcm/tombstone_gc_verification_thread.py +++ b/sdcm/tombstone_gc_verification_thread.py @@ -20,7 +20,7 @@ class TombstoneGcVerificationThread: # pylint: disable=too-many-arguments - def __init__(self, db_cluster: [BaseScyllaCluster, BaseCluster], duration: int, interval: int, + def __init__(self, db_cluster: [BaseScyllaCluster, BaseCluster], duration: int, interval: int, # noqa: PLR0913 termination_event: threading.Event, **kwargs): self._sstable_utils = SstableUtils(**kwargs) diff --git a/sdcm/utils/adaptive_timeouts/load_info_store.py b/sdcm/utils/adaptive_timeouts/load_info_store.py index 11660d30584..0f30ded02fd 100644 --- a/sdcm/utils/adaptive_timeouts/load_info_store.py +++ b/sdcm/utils/adaptive_timeouts/load_info_store.py @@ -189,7 +189,7 @@ class AdaptiveTimeoutStore(metaclass=Singleton): Used for future reference/node operations time tracking and calculations optimization.""" # pylint: disable=too-many-arguments - def store(self, metrics: dict[str, Any], operation: str, duration: int | float, timeout: int, + def store(self, metrics: dict[str, Any], operation: str, duration: int | float, timeout: int, # noqa: PLR0913 timeout_occurred: bool) -> None: pass @@ -209,7 +209,7 @@ def __init__(self): self._es = None # pylint: disable=too-many-arguments - def store(self, metrics: dict[str, Any], operation: str, duration: float, timeout: float, + def store(self, metrics: dict[str, Any], operation: str, duration: float, timeout: float, # noqa: PLR0913 timeout_occurred: bool): body = metrics body["test_id"] = TestConfig.test_id() diff --git a/sdcm/utils/alternator/api.py b/sdcm/utils/alternator/api.py index 9cf5f6a43ce..a7d5c56db7c 100644 --- a/sdcm/utils/alternator/api.py +++ b/sdcm/utils/alternator/api.py @@ -56,7 +56,7 @@ def set_write_isolation(self, node, isolation, table_name=consts.TABLE_NAME): ] dynamodb_api.client.tag_resource(ResourceArn=arn, Tags=tags) - def create_table(self, node, # pylint: disable=too-many-arguments + def create_table(self, node, # pylint: disable=too-many-arguments # noqa: PLR0913 schema=enums.YCSBSchemaTypes.HASH_AND_RANGE, isolation=None, table_name=consts.TABLE_NAME, wait_until_table_exists=True, **kwargs) -> Table: if isinstance(schema, enums.YCSBSchemaTypes): @@ -114,7 +114,7 @@ def _scan_table(part_scan_idx=None): return list(chain(*scan_result)) if len(scan_result) > 1 else scan_result return _scan_table() - def batch_write_actions(self, node, # pylint:disable=too-many-arguments,dangerous-default-value + def batch_write_actions(self, node, # pylint:disable=too-many-arguments,dangerous-default-value # noqa: PLR0913 table_name=consts.TABLE_NAME, new_items=None, delete_items=None, schema=schemas.HASH_SCHEMA): dynamodb_api = self.get_dynamodb_api(node=node) diff --git a/sdcm/utils/argus.py b/sdcm/utils/argus.py index 21e99ee5e9f..6e435ab80c8 100644 --- a/sdcm/utils/argus.py +++ b/sdcm/utils/argus.py @@ -43,7 +43,7 @@ def terminate_resource_in_argus(client: ArgusSCTClient, resource_name: str): try: client.terminate_resource(name=resource_name, reason="clean-resources: Graceful Termination") except ArgusClientError as exc: - if len(exc.args) >= 3 and exc.args[2] == 404: + if len(exc.args) >= 3 and exc.args[2] == 404: # noqa: PLR2004 LOGGER.warning("%s doesn't exist in Argus", resource_name) else: LOGGER.error("Failure to communicate resource deletion to Argus", exc_info=True) diff --git a/sdcm/utils/azure_region.py b/sdcm/utils/azure_region.py index 9fde5a88596..55fb03d81a5 100644 --- a/sdcm/utils/azure_region.py +++ b/sdcm/utils/azure_region.py @@ -289,7 +289,7 @@ def create_network_interface(self, parameters=parameters, ).result() - def create_virtual_machine(self, # pylint: disable=too-many-arguments + def create_virtual_machine(self, # pylint: disable=too-many-arguments # noqa: PLR0913 vm_name: str, vm_size: str, image: dict[str, str], diff --git a/sdcm/utils/cdc/options.py b/sdcm/utils/cdc/options.py index 9a9a4f21a66..a57907f4c35 100644 --- a/sdcm/utils/cdc/options.py +++ b/sdcm/utils/cdc/options.py @@ -60,12 +60,13 @@ def parse_cdc_blob_settings(blob: bytes) -> Dict[str, Union[bool, str]]: for regexp in CDC_SETTINGS_REGEXP: res = re.search(regexp, blob.decode()) if res: - for key, value in res.groupdict().items(): - if value in ("false", "off"): + for key, _value in res.groupdict().items(): + if _value in ("false", "off"): value = False - elif value == 'true': + elif _value == 'true': value = True - + else: + value = _value cdc_settings[key] = value return cdc_settings diff --git a/sdcm/utils/cloud_monitor/resources/__init__.py b/sdcm/utils/cloud_monitor/resources/__init__.py index 5e46b1e76c7..9ece49d5cc7 100644 --- a/sdcm/utils/cloud_monitor/resources/__init__.py +++ b/sdcm/utils/cloud_monitor/resources/__init__.py @@ -7,7 +7,7 @@ class CloudInstance: # pylint: disable=too-few-public-methods,too-many-instance-attributes pricing = None # need to be set in the child class - def __init__(self, cloud, name, instance_id, region_az, state, lifecycle, instance_type, owner, create_time, keep, project='N/A'): # pylint: disable=too-many-arguments + def __init__(self, cloud, name, instance_id, region_az, state, lifecycle, instance_type, owner, create_time, keep, project='N/A'): # pylint: disable=too-many-arguments # noqa: PLR0913 self.cloud = cloud self.name = name self.instance_id = instance_id diff --git a/sdcm/utils/cloud_monitor/resources/static_ips.py b/sdcm/utils/cloud_monitor/resources/static_ips.py index d408f3afe09..ca8995257fb 100644 --- a/sdcm/utils/cloud_monitor/resources/static_ips.py +++ b/sdcm/utils/cloud_monitor/resources/static_ips.py @@ -11,7 +11,7 @@ class StaticIP: # pylint: disable=too-few-public-methods - def __init__(self, cloud, name, address, region, used_by, owner): # pylint: disable=too-many-arguments + def __init__(self, cloud, name, address, region, used_by, owner): # pylint: disable=too-many-arguments # noqa: PLR0913 self.cloud = cloud self.name = name self.address = address diff --git a/sdcm/utils/common.py b/sdcm/utils/common.py index d303793cb25..7a7c6a89cba 100644 --- a/sdcm/utils/common.py +++ b/sdcm/utils/common.py @@ -131,7 +131,7 @@ def _remote_get_file(remoter, src, dst, user_agent=None): return remoter.run(cmd, ignore_status=True) -def remote_get_file(remoter, src, dst, hash_expected=None, retries=1, user_agent=None): # pylint: disable=too-many-arguments +def remote_get_file(remoter, src, dst, hash_expected=None, retries=1, user_agent=None): # pylint: disable=too-many-arguments # noqa: PLR0913 _remote_get_file(remoter, src, dst, user_agent) if not hash_expected: return @@ -789,7 +789,7 @@ def aws_tags_to_dict(tags_list): # pylint: disable=too-many-arguments -def list_instances_aws(tags_dict=None, region_name=None, running=False, group_as_region=False, verbose=False, availability_zone=None): +def list_instances_aws(tags_dict=None, region_name=None, running=False, group_as_region=False, verbose=False, availability_zone=None): # noqa: PLR0913 """ list all instances with specific tags AWS @@ -1461,7 +1461,7 @@ def delete_instance(instance_with_tags: tuple[GceInstance, dict]): timeout=60).run(delete_instance, ignore_exceptions=False) -def clean_instances_azure(tags_dict: dict, regions=None, dry_run=False): +def clean_instances_azure(tags_dict: dict, regions=None, dry_run=False): # noqa: PLR0912 """ Cleans instances by tags. @@ -2191,15 +2191,14 @@ def download_dir_from_cloud(url): LOGGER.info("Downloading [%s] to [%s]", url, tmp_dir) if os.path.isdir(tmp_dir) and os.listdir(tmp_dir): LOGGER.warning("[{}] already exists, skipping download".format(tmp_dir)) + elif url.startswith('s3://'): + s3_download_dir(parsed.hostname, parsed.path, tmp_dir) + elif url.startswith('gs://'): + gce_download_dir(parsed.hostname, parsed.path, tmp_dir) + elif os.path.isdir(url): + tmp_dir = url else: - if url.startswith('s3://'): - s3_download_dir(parsed.hostname, parsed.path, tmp_dir) - elif url.startswith('gs://'): - gce_download_dir(parsed.hostname, parsed.path, tmp_dir) - elif os.path.isdir(url): - tmp_dir = url - else: - raise ValueError("Unsupported url schema or non-existing directory [{}]".format(url)) + raise ValueError("Unsupported url schema or non-existing directory [{}]".format(url)) if not tmp_dir.endswith('/'): tmp_dir += '/' LOGGER.info("Finished downloading [%s]", url) @@ -2855,7 +2854,7 @@ def walk_thru_data(data, path: str, separator: str = '/') -> Any: if not name: continue if name[0] == '[' and name[-1] == ']': - name = name[1:-1] + name = name[1:-1] # noqa: PLW2901 if name.isalnum() and isinstance(current_value, (list, tuple, set)): try: current_value = current_value[int(name)] diff --git a/sdcm/utils/csrangehistogram.py b/sdcm/utils/csrangehistogram.py index a31e7e00b83..0c12095f31f 100644 --- a/sdcm/utils/csrangehistogram.py +++ b/sdcm/utils/csrangehistogram.py @@ -34,7 +34,7 @@ class CSWorkloadTypes(Enum): MIXED = "mixed" -def make_cs_range_histogram_summary( # pylint: disable=too-many-arguments,unused-argument +def make_cs_range_histogram_summary( # pylint: disable=too-many-arguments,unused-argument # noqa: PLR0913 workload: CSWorkloadTypes, pattern: str = "", base_path="", start_time: int | float = 0, end_time: int | float = sys.maxsize, absolute_time: bool = True, tag_type: CSHistogramTagTypes = CSHistogramTagTypes.LATENCY) -> list[dict[str, dict[str, int]]]: """ @@ -49,7 +49,7 @@ def make_cs_range_histogram_summary( # pylint: disable=too-many-arguments,unuse return builder.build_histogram_summary(base_path) -def make_cs_range_histogram_summary_by_interval( # pylint: disable=too-many-arguments,unused-argument +def make_cs_range_histogram_summary_by_interval( # pylint: disable=too-many-arguments,unused-argument # noqa: PLR0913 workload: CSWorkloadTypes, path: str, start_time: int | float, end_time: int | float, interval=TIME_INTERVAL, absolute_time=True, tag_type: CSHistogramTagTypes = CSHistogramTagTypes.LATENCY) -> list[dict[str, dict[str, int]]]: """ diff --git a/sdcm/utils/data_validator.py b/sdcm/utils/data_validator.py index 37bfc1a07fc..32442b003a9 100644 --- a/sdcm/utils/data_validator.py +++ b/sdcm/utils/data_validator.py @@ -462,33 +462,31 @@ def validate_range_not_expected_to_change(self, session, during_nemesis=False): message=f"Actual dataset length more then expected ({len(actual_result)} > {len(expected_result)}). " f"Issue #6181" ).publish() - else: - if not during_nemesis: - assert len(actual_result) == len(expected_result), \ - 'One or more rows are not as expected, suspected LWT wrong update. ' \ - 'Actual dataset length: {}, Expected dataset length: {}'.format(len(actual_result), - len(expected_result)) + elif not during_nemesis: + assert len(actual_result) == len(expected_result), \ + 'One or more rows are not as expected, suspected LWT wrong update. ' \ + 'Actual dataset length: {}, Expected dataset length: {}'.format(len(actual_result), + len(expected_result)) - assert actual_result == expected_result, \ - 'One or more rows are not as expected, suspected LWT wrong update' + assert actual_result == expected_result, \ + 'One or more rows are not as expected, suspected LWT wrong update' - # Raise info event at the end of the test only. - DataValidatorEvent.ImmutableRowsValidator( - severity=Severity.NORMAL, - message="Validation immutable rows finished successfully" - ).publish() - else: - if len(actual_result) < len(expected_result): - DataValidatorEvent.ImmutableRowsValidator( - severity=Severity.ERROR, - error=f"Verify immutable rows. " - f"One or more rows not found as expected, suspected LWT wrong update. " - f"Actual dataset length: {len(actual_result)}, " - f"Expected dataset length: {len(expected_result)}" - ).publish() - else: - LOGGER.debug('Verify immutable rows. Actual dataset length: %s, Expected dataset length: %s', - len(actual_result), len(expected_result)) + # Raise info event at the end of the test only. + DataValidatorEvent.ImmutableRowsValidator( + severity=Severity.NORMAL, + message="Validation immutable rows finished successfully" + ).publish() + elif len(actual_result) < len(expected_result): + DataValidatorEvent.ImmutableRowsValidator( + severity=Severity.ERROR, + error=f"Verify immutable rows. " + f"One or more rows not found as expected, suspected LWT wrong update. " + f"Actual dataset length: {len(actual_result)}, " + f"Expected dataset length: {len(expected_result)}" + ).publish() + else: + LOGGER.debug('Verify immutable rows. Actual dataset length: %s, Expected dataset length: %s', + len(actual_result), len(expected_result)) def list_of_view_names_for_update_test(self): # List of tuples of correlated view names for validation: before update, after update, expected data @@ -575,7 +573,7 @@ def save_data_for_debugging(data_for_validation: DataForValidation) -> str: return logdir # pylint: disable=too-many-locals,too-many-branches - def analyze_updated_data_and_save_in_file(self, data_for_validation: DataForValidation, session, logdir: str): + def analyze_updated_data_and_save_in_file(self, data_for_validation: DataForValidation, session, logdir: str): # noqa: PLR0912 actual_data_set = {tuple(item) for item in sorted(data_for_validation.actual_data)} expected_data_set = {tuple(item) for item in sorted(data_for_validation.expected_data)} difference_set = expected_data_set - actual_data_set # missed in the actual data after update diff --git a/sdcm/utils/database_query_utils.py b/sdcm/utils/database_query_utils.py index dbc954bed36..c1c7f5c9aca 100644 --- a/sdcm/utils/database_query_utils.py +++ b/sdcm/utils/database_query_utils.py @@ -39,7 +39,7 @@ class PartitionsValidationAttributes: # pylint: disable=too-few-public-methods, PARTITIONS_ROWS_BEFORE = "partitions_rows_before" PARTITIONS_ROWS_AFTER = "partitions_rows_after" - def __init__(self, tester, table_name: str, primary_key_column: str, limit_rows_number: int = 0, # pylint: disable=too-many-arguments + def __init__(self, tester, table_name: str, primary_key_column: str, limit_rows_number: int = 0, # pylint: disable=too-many-arguments # noqa: PLR0913 max_partitions_in_test_table: str | None = None, partition_range_with_data_validation: str | None = None, validate_partitions: bool = False): """ @@ -198,7 +198,7 @@ def get_partition_keys(ks_cf: str, session, pk_name: str = 'pk', limit: int = No return pks_list -def fetch_all_rows(session, default_fetch_size, statement, retries: int = 4, timeout: int = None, # pylint: disable=too-many-arguments +def fetch_all_rows(session, default_fetch_size, statement, retries: int = 4, timeout: int = None, # pylint: disable=too-many-arguments # noqa: PLR0913 raise_on_exceeded: bool = False, verbose=True): """ ******* Caution ******* diff --git a/sdcm/utils/decorators.py b/sdcm/utils/decorators.py index 12292f42318..878343f488d 100644 --- a/sdcm/utils/decorators.py +++ b/sdcm/utils/decorators.py @@ -38,7 +38,7 @@ class retrying: # pylint: disable=invalid-name,too-few-public-methods """ # pylint: disable=too-many-arguments,redefined-outer-name - def __init__(self, n=3, sleep_time=1, + def __init__(self, n=3, sleep_time=1, # noqa: PLR0913 allowed_exceptions=(Exception,), message="", timeout=0, raise_on_exceeded=True): if n: @@ -155,7 +155,7 @@ def wrapped(*args, **kwargs): return wrapped -def latency_calculator_decorator(original_function: Optional[Callable] = None, *, legend: Optional[str] = None): +def latency_calculator_decorator(original_function: Optional[Callable] = None, *, legend: Optional[str] = None): # noqa: PLR0915 """ Gets the start time, end time and then calculates the latency based on function 'calculate_latency'. @@ -168,7 +168,7 @@ def latency_calculator_decorator(original_function: Optional[Callable] = None, * def wrapper(func): @wraps(func) - def wrapped(*args, **kwargs): # pylint: disable=too-many-branches, too-many-locals + def wrapped(*args, **kwargs): # pylint: disable=too-many-branches, too-many-locals # noqa: PLR0912 start = time.time() start_node_list = args[0].cluster.nodes[:] reactor_stall_stats = {} diff --git a/sdcm/utils/docker_remote.py b/sdcm/utils/docker_remote.py index 3c49abf174a..395255c9751 100644 --- a/sdcm/utils/docker_remote.py +++ b/sdcm/utils/docker_remote.py @@ -9,7 +9,7 @@ class RemoteDocker(BaseNode): - def __init__(self, node, image_name, ports=None, command_line="tail -f /dev/null", extra_docker_opts="", docker_network=None): # pylint: disable=too-many-arguments + def __init__(self, node, image_name, ports=None, command_line="tail -f /dev/null", extra_docker_opts="", docker_network=None): # pylint: disable=too-many-arguments # noqa: PLR0913 self.node = node self._internal_ip_address = None self.log = LOGGER diff --git a/sdcm/utils/docker_utils.py b/sdcm/utils/docker_utils.py index 75664ce80f7..1f52ad513e4 100644 --- a/sdcm/utils/docker_utils.py +++ b/sdcm/utils/docker_utils.py @@ -174,8 +174,8 @@ def _get_attr_for_name(instance: INodeWithContainerManager, if not name_only_lookup: attr_candidate_list.append((attr, ())) - for attr_candidate, args in attr_candidate_list: - attr_candidate = getattr(instance, attr_candidate, None) + for _attr_candidate, args in attr_candidate_list: + attr_candidate = getattr(instance, _attr_candidate, None) if callable(attr_candidate): attr_candidate = attr_candidate(*args) if attr_candidate is not None: @@ -287,7 +287,7 @@ def set_all_containers_keep_alive(cls, instance: INodeWithContainerManager) -> N cls.set_container_keep_alive(instance, name) @classmethod - def ssh_copy_id(cls, # pylint: disable=too-many-arguments + def ssh_copy_id(cls, # pylint: disable=too-many-arguments # noqa: PLR0913 instance: INodeWithContainerManager, name: str, user: str, diff --git a/sdcm/utils/file.py b/sdcm/utils/file.py index cba7073b675..53dca2e29fc 100644 --- a/sdcm/utils/file.py +++ b/sdcm/utils/file.py @@ -35,7 +35,7 @@ class File: """ # pylint: disable=too-many-arguments - def __init__(self, path: str, mode: str = 'r', buffering: Optional[int] = None, + def __init__(self, path: str, mode: str = 'r', buffering: Optional[int] = None, # noqa: PLR0913 encoding: Optional[str] = None, errors: Optional[str] = None, newline: Optional[str] = None, closefd: bool = True): self.path = path diff --git a/sdcm/utils/gce_region.py b/sdcm/utils/gce_region.py index 2c49bfe2020..b4fbfcbcc38 100644 --- a/sdcm/utils/gce_region.py +++ b/sdcm/utils/gce_region.py @@ -178,7 +178,7 @@ def create_backup_service_account(self): }).execute() LOGGER.info('Created service account: %s', backup_service_account['email']) except googleapiclient.errors.HttpError as exc: - if not exc.status_code == 409: + if not exc.status_code == 409: # noqa: PLR2004 raise service_accounts = self.iam.projects().serviceAccounts().list( # pylint: disable=no-member name=f'projects/{self.project}', pageSize=100).execute() diff --git a/sdcm/utils/gce_utils.py b/sdcm/utils/gce_utils.py index 89feb4dd44d..21268fa035a 100644 --- a/sdcm/utils/gce_utils.py +++ b/sdcm/utils/gce_utils.py @@ -318,7 +318,7 @@ def wait_for_extended_operation( return result -def disk_from_image( # pylint: disable=too-many-arguments +def disk_from_image( # pylint: disable=too-many-arguments # noqa: PLR0913 disk_type: str, boot: bool, disk_size_gb: int = None, @@ -370,7 +370,7 @@ def disk_from_image( # pylint: disable=too-many-arguments return boot_disk -def create_instance( # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements +def create_instance( # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements # noqa: PLR0913, PLR0912, PLR0915 project_id: str, zone: str, instance_name: str, diff --git a/sdcm/utils/get_username.py b/sdcm/utils/get_username.py index 68e87ef2f70..c0dc06ab5b5 100644 --- a/sdcm/utils/get_username.py +++ b/sdcm/utils/get_username.py @@ -24,7 +24,7 @@ def get_email_user(email_addr: str) -> str: return email_addr.strip().split("@")[0] -def get_username() -> str: # pylint: disable=too-many-return-statements +def get_username() -> str: # pylint: disable=too-many-return-statements # noqa: PLR0911 # First check that we running on Jenkins try to get user email email = os.environ.get('BUILD_USER_EMAIL') if is_email_in_scylladb_domain(email): diff --git a/sdcm/utils/k8s/__init__.py b/sdcm/utils/k8s/__init__.py index 643f0e60e32..cf34c4265c1 100644 --- a/sdcm/utils/k8s/__init__.py +++ b/sdcm/utils/k8s/__init__.py @@ -300,7 +300,7 @@ def kubectl_cmd(kluster, *command, namespace=None, ignore_k8s_server_url=False): return " ".join(cmd) @classmethod - def kubectl(cls, kluster, *command, namespace: Optional[str] = None, timeout: int = KUBECTL_TIMEOUT, + def kubectl(cls, kluster, *command, namespace: Optional[str] = None, timeout: int = KUBECTL_TIMEOUT, # noqa: PLR0913 remoter: Optional['KubernetesCmdRunner'] = None, ignore_status: bool = False, verbose: bool = True): cmd = cls.kubectl_cmd(kluster, *command, namespace=namespace, ignore_k8s_server_url=bool(remoter)) if remoter is None: @@ -308,7 +308,7 @@ def kubectl(cls, kluster, *command, namespace: Optional[str] = None, timeout: in return remoter.run(cmd, timeout=timeout, ignore_status=ignore_status, verbose=verbose) @classmethod - def kubectl_multi_cmd(cls, kluster, *command, namespace: Optional[str] = None, timeout: int = KUBECTL_TIMEOUT, + def kubectl_multi_cmd(cls, kluster, *command, namespace: Optional[str] = None, timeout: int = KUBECTL_TIMEOUT, # noqa: PLR0913 remoter: Optional['KubernetesCmdRunner'] = None, ignore_status: bool = False, verbose: bool = True): total_command = ' '.join(command) @@ -325,7 +325,7 @@ def kubectl_multi_cmd(cls, kluster, *command, namespace: Optional[str] = None, t return remoter.run(final_command, timeout=timeout, ignore_status=ignore_status, verbose=verbose) @classmethod - def apply_file(cls, kluster, config_path, namespace=None, # pylint: disable=too-many-locals,too-many-branches + def apply_file(cls, kluster, config_path, namespace=None, # pylint: disable=too-many-locals,too-many-branches # noqa: PLR0913 timeout=KUBECTL_TIMEOUT, environ=None, envsubst=True, modifiers: List[Callable] = None, server_side=False): if environ: @@ -378,14 +378,14 @@ def run_kubectl(file_name): run_kubectl(temp_file.name) @classmethod - def copy_file(cls, kluster, src, dst, container=None, timeout=KUBECTL_TIMEOUT): + def copy_file(cls, kluster, src, dst, container=None, timeout=KUBECTL_TIMEOUT): # noqa: PLR0913 command = ["cp", src, dst] if container: command.extend(("-c", container)) cls.kubectl(kluster, *command, timeout=timeout) @classmethod - def expose_pod_ports(cls, kluster, pod_name, ports, labels=None, selector=None, namespace=None, timeout=KUBECTL_TIMEOUT): + def expose_pod_ports(cls, kluster, pod_name, ports, labels=None, selector=None, namespace=None, timeout=KUBECTL_TIMEOUT): # noqa: PLR0913 command = ["expose pod", pod_name, "--type=LoadBalancer", "--port", ",".join(map(str, ports)), f"--name={pod_name}-loadbalancer", ] @@ -421,7 +421,7 @@ def patch_kubectl_auth_config(cls, config, auth_type, cmd: str, args: list): raise ValueError(f'Unknown auth-type {auth_type}') @staticmethod - def wait_for_pods_with_condition(kluster, condition: str, total_pods: Union[int, Callable], + def wait_for_pods_with_condition(kluster, condition: str, total_pods: Union[int, Callable], # noqa: PLR0913 timeout: float, namespace: str, selector: str = '', sleep_between_retries: int = 10): @@ -432,7 +432,7 @@ def wait_for_pods_with_condition(kluster, condition: str, total_pods: Union[int, f"from the '{namespace}' namespace with '{condition}' condition to be true...") @timeout_decor(message=waiter_message, timeout=timeout * 60, sleep_time=sleep_between_retries) - def wait_for_condition(kluster, condition, total_pods, timeout, namespace, selector): + def wait_for_condition(kluster, condition, total_pods, timeout, namespace, selector): # noqa: PLR0913 # To make it more informative in worst case scenario made it repeat 5 times, by readiness_timeout // 5 if selector: selector = selector if selector.startswith("--selector=") else f'--selector={selector}' @@ -453,7 +453,7 @@ def wait_for_condition(kluster, condition, total_pods, timeout, namespace, selec wait_for_condition(kluster, condition, total_pods, timeout, namespace, selector) @staticmethod - def wait_for_pods_readiness(kluster, total_pods: Union[int, Callable], readiness_timeout: float, + def wait_for_pods_readiness(kluster, total_pods: Union[int, Callable], readiness_timeout: float, # noqa: PLR0913 namespace: str, selector: str = '', sleep_between_retries: int = 10): KubernetesOps.wait_for_pods_with_condition(kluster, condition='condition=Ready', @@ -464,7 +464,7 @@ def wait_for_pods_readiness(kluster, total_pods: Union[int, Callable], readiness sleep_between_retries=sleep_between_retries) @staticmethod - def wait_for_pods_running(kluster, total_pods: Union[int, Callable], timeout: float, + def wait_for_pods_running(kluster, total_pods: Union[int, Callable], timeout: float, # noqa: PLR0913 namespace: str, selector: str = '', sleep_between_retries: int = 10): KubernetesOps.wait_for_pods_with_condition(kluster, condition="jsonpath='{.status.phase}'=Running", @@ -571,7 +571,7 @@ def gather_k8s_logs_by_operator(cls, kluster, logdir_path=None): operator_bin_path, exc, extra={'prefix': kluster.region_name}) @classmethod - def gather_k8s_logs(cls, logdir_path, kubectl=None, namespaces=None) -> None: # pylint: disable=too-many-locals,too-many-branches,too-many-statements + def gather_k8s_logs(cls, logdir_path, kubectl=None, namespaces=None) -> None: # pylint: disable=too-many-locals,too-many-branches,too-many-statements # noqa: PLR0912, PLR0915 # NOTE: reuse data where possible to minimize spent time due to API limiter restrictions LOGGER.info("K8S-LOGS: starting logs gathering") logdir = Path(logdir_path) @@ -627,10 +627,10 @@ def gather_k8s_logs(cls, logdir_path, kubectl=None, namespaces=None) -> None: # resource_type, namespace) resource_dir = logdir / namespace_scope_dir / namespace / resource_type os.makedirs(resource_dir, exist_ok=True) - for res in resources_wide.split("\n"): - if not re.match(f"{namespace} ", res): + for _res in resources_wide.split("\n"): + if not re.match(f"{namespace} ", _res): continue - res = res.split()[1] + res = _res.split()[1] logfile = resource_dir / f"{res}.yaml" res_stdout = kubectl( f"get {resource_type}/{res} -o yaml 2>&1 | tee {logfile}", @@ -730,7 +730,7 @@ def helm(self, kluster, *command: str, namespace: Optional[str] = None, values: if values_file: values_file.close() - def _helm_install_or_upgrade(self, + def _helm_install_or_upgrade(self, # noqa: PLR0913 operation_type: str, kluster, target_chart_name: str, @@ -923,7 +923,7 @@ def stop(self, timeout=None) -> None: self.watcher.close() self.join(timeout) - def _process_line(self, line: str) -> None: # pylint: disable=too-many-branches,inconsistent-return-statements + def _process_line(self, line: str) -> None: # pylint: disable=too-many-branches,inconsistent-return-statements # noqa: PLR0912 # NOTE: line is expected to have following structure: # {"type": "ADDED", # "object": { @@ -1070,9 +1070,9 @@ def register_callbacks(self, callbacks: Union[Callable, list[Callable]], for callback in callbacks: if callable(callback): - callback = [callback, [], {}] + callback = [callback, [], {}] # noqa: PLW2901 if (isinstance(callback, (tuple, list)) - and len(callback) == 3 + and len(callback) == 3 # noqa: PLR2004 and callable(callback[0]) and isinstance(callback[1], (tuple, list)) and isinstance(callback[2], dict)): diff --git a/sdcm/utils/k8s/chaos_mesh.py b/sdcm/utils/k8s/chaos_mesh.py index 790eada1aa3..1649a66a64d 100644 --- a/sdcm/utils/k8s/chaos_mesh.py +++ b/sdcm/utils/k8s/chaos_mesh.py @@ -165,7 +165,7 @@ def start(self): self._end_time = time.time() + self._timeout # pylint: disable=too-many-return-statements - def get_status(self) -> ExperimentStatus: + def get_status(self) -> ExperimentStatus: # noqa: PLR0911 """Gets status of chaos-mesh experiment.""" result = self._k8s_cluster.kubectl( f"get {self.CHAOS_KIND} {self._name} -n {self._namespace} -o jsonpath='{{.status.conditions}}'", verbose=False) @@ -242,7 +242,7 @@ class MemoryStressExperiment(ChaosMeshExperiment): CHAOS_KIND = "StressChaos" # pylint: disable=too-many-arguments - def __init__(self, pod: "sdcm.cluster_k8s.BasePodContainer", duration: str, workers: int, size: str, time_to_reach: str | None = None): + def __init__(self, pod: "sdcm.cluster_k8s.BasePodContainer", duration: str, workers: int, size: str, time_to_reach: str | None = None): # noqa: PLR0913 """Stresses memory on scylla pod using https://github.com/chaos-mesh/memStress :param sdcm.cluster_k8s.BasePodContainer pod: affected scylla pod @@ -306,7 +306,7 @@ class IOFaultChaosExperiment(ChaosMeshExperiment): CHAOS_KIND = "IOChaos" # pylint: disable=too-many-arguments - def __init__(self, pod: "sdcm.cluster_k8s.BasePodContainer", duration: str, error: DiskError, error_probability: int, + def __init__(self, pod: "sdcm.cluster_k8s.BasePodContainer", duration: str, error: DiskError, error_probability: int, # noqa: PLR0913 methods: list[DiskMethod], volume_path: str, path: str | None = None): """Induces disk fault (programatically) using IOChaos: https://chaos-mesh.org/docs/simulate-io-chaos-on-kubernetes/ @@ -410,7 +410,7 @@ class NetworkDelayExperiment(ChaosMeshExperiment): CHAOS_KIND = "NetworkChaos" # pylint: disable=too-many-arguments - def __init__(self, pod: "sdcm.cluster_k8s.BasePodContainer", duration: str, latency: str, + def __init__(self, pod: "sdcm.cluster_k8s.BasePodContainer", duration: str, latency: str, # noqa: PLR0913 correlation: int = 0, jitter: str = "0"): """Simulate network delay fault into a specified Pod for a period of time. @@ -444,7 +444,7 @@ class NetworkBandwidthLimitExperiment(ChaosMeshExperiment): CHAOS_KIND = "NetworkChaos" # pylint: disable=too-many-arguments - def __init__(self, pod: "sdcm.cluster_k8s.BasePodContainer", duration: str, + def __init__(self, pod: "sdcm.cluster_k8s.BasePodContainer", duration: str, # noqa: PLR0913 rate: str, limit: int, buffer: int): """Simulate network bandwidth limit fault into a specified Pod for a period of time. diff --git a/sdcm/utils/latency.py b/sdcm/utils/latency.py index 2b1013572bc..7454951101c 100644 --- a/sdcm/utils/latency.py +++ b/sdcm/utils/latency.py @@ -21,7 +21,7 @@ def avg(values): # pylint: disable=too-many-arguments,too-many-locals,too-many-nested-blocks,too-many-branches -def collect_latency(monitor_node, start, end, load_type, cluster, nodes_list): +def collect_latency(monitor_node, start, end, load_type, cluster, nodes_list): # noqa: PLR0913, PLR0912 res = {} prometheus = PrometheusDBStats(host=monitor_node.external_address) duration = int(end - start) @@ -32,7 +32,7 @@ def collect_latency(monitor_node, start, end, load_type, cluster, nodes_list): for precision in cassandra_stress_precision: metric = f'c-s {precision}' if precision == 'max' else f'c-s P{precision}' if not precision == 'max': - precision = f'perc_{precision}' + precision = f'perc_{precision}' # noqa: PLW2901 query = f'sct_cassandra_stress_{load_type}_gauge{{type="lat_{precision}"}}' query_res = prometheus.query(query, start, end) latency_values_lst = [] @@ -88,7 +88,7 @@ def collect_latency(monitor_node, start, end, load_type, cluster, nodes_list): NON_METRIC_FIELDS = ["screenshots", "hdr", "hdr_summary", "duration", "duration_in_sec", "reactor_stalls_stats"] -def calculate_latency(latency_results): +def calculate_latency(latency_results): # noqa: PLR0912 result_dict = {} all_keys = list(latency_results.keys()) steady_key = '' @@ -126,16 +126,16 @@ def calculate_latency(latency_results): float(format((average - steady_val), '.2f')) if 'color' not in result_dict[key]: result_dict[key]['color'] = {} - if average - steady_val >= 10: + if average - steady_val >= 10: # noqa: PLR2004 result_dict[key]['color'][temp_key] = 'red' - elif average - steady_val >= 5: + elif average - steady_val >= 5: # noqa: PLR2004 result_dict[key]['color'][temp_key] = 'yellow' else: result_dict[key]['color'][temp_key] = 'blue' return result_dict -def analyze_hdr_percentiles(result_stats: dict[str, Any]) -> dict[str, Any]: +def analyze_hdr_percentiles(result_stats: dict[str, Any]) -> dict[str, Any]: # noqa: PLR0912 top_limit_perc_values = { "replace_node": { "percentile_90": 15, diff --git a/sdcm/utils/ldap.py b/sdcm/utils/ldap.py index b9944b0c9b5..54f01b43e2f 100644 --- a/sdcm/utils/ldap.py +++ b/sdcm/utils/ldap.py @@ -34,7 +34,7 @@ LDAP_PASSWORD = 'scylla-0' LDAP_ROLE = 'scylla_ldap' LDAP_USERS = ['scylla-qa', 'dummy-user'] -LDAP_BASE_OBJECT = (lambda l: ','.join([f'dc={part}' for part in l.split('.')]))(LDAP_DOMAIN) +LDAP_BASE_OBJECT = (lambda l: ','.join([f'dc={part}' for part in l.split('.')]))(LDAP_DOMAIN) # noqa: PLC3002 SASLAUTHD_AUTHENTICATOR = 'com.scylladb.auth.SaslauthdAuthenticator' diff --git a/sdcm/utils/log.py b/sdcm/utils/log.py index 0cd9383c82a..c0f04788abd 100644 --- a/sdcm/utils/log.py +++ b/sdcm/utils/log.py @@ -79,14 +79,14 @@ def replace_vars(obj, variables, obj_type=None): if issubclass(obj_type, dict): output = {} for attr_name, attr_value in obj.items(): - attr_name = replace_vars(attr_name, variables) - attr_value = replace_vars(attr_value, variables) + attr_name = replace_vars(attr_name, variables) # noqa: PLW2901 + attr_value = replace_vars(attr_value, variables) # noqa: PLW2901 output[attr_name] = attr_value # deepcode ignore UnhashableKey: you get same keys type as source return output if issubclass(obj_type, list): output = [] for element in obj: - element = replace_vars(element, variables) + element = replace_vars(element, variables) # noqa: PLW2901 output.append(element) # deepcode ignore InfiniteLoopByCollectionModification: Not even close return output if issubclass(obj_type, tuple): @@ -96,7 +96,7 @@ def replace_vars(obj, variables, obj_type=None): return obj -def configure_logging(exception_handler=None, # pylint: disable=too-many-arguments +def configure_logging(exception_handler=None, # pylint: disable=too-many-arguments # noqa: PLR0913 formatters=None, filters=None, handlers=None, loggers=None, config=None, variables=None): urllib3.disable_warnings() warnings.filterwarnings(action="ignore", message="unclosed", category=ResourceWarning) diff --git a/sdcm/utils/microbenchmarking/perf_simple_query_reporter.py b/sdcm/utils/microbenchmarking/perf_simple_query_reporter.py index e303ec09bf5..3c305a9c3a3 100644 --- a/sdcm/utils/microbenchmarking/perf_simple_query_reporter.py +++ b/sdcm/utils/microbenchmarking/perf_simple_query_reporter.py @@ -47,7 +47,7 @@ def get_sorted_results_as_list(results): keys.sort(reverse=True) return [results[key] for key in keys] - def check_regression(self, test_id, mad_deviation_limit=0.02, regression_limit=0.05, is_gce=False): # pylint: disable=too-many-locals,too-many-statements + def check_regression(self, test_id, mad_deviation_limit=0.02, regression_limit=0.05, is_gce=False): # pylint: disable=too-many-locals,too-many-statements # noqa: PLR0915 doc = self.get_test_by_id(test_id) if not doc: self.log.error('Cannot find test by id: {}!'.format(test_id)) @@ -117,9 +117,8 @@ def make_table_line_for_render(data): if "_per_op" in key: if diff < 1 + regression_limit: table_line["is_" + key + "_within_limits"] = True - else: - if diff > 1 - regression_limit: - table_line["is_" + key + "_within_limits"] = True + elif diff > 1 - regression_limit: + table_line["is_" + key + "_within_limits"] = True table_line[key + "_diff"] = round((diff - 1) * 100, 2) table_line[key] = round(table_line[key], 2) table_line["mad tps"] = round(table_line["mad tps"], 2) diff --git a/sdcm/utils/operator/multitenant_common.py b/sdcm/utils/operator/multitenant_common.py index d126965b8c7..ad77dcee125 100644 --- a/sdcm/utils/operator/multitenant_common.py +++ b/sdcm/utils/operator/multitenant_common.py @@ -28,7 +28,7 @@ class TenantMixin: # pylint: disable=too-many-instance-attributes _testMethodName = "runTest" # pylint: disable=too-many-instance-attributes - def __init__(self, db_cluster, loaders, monitors, # pylint: disable=too-many-arguments + def __init__(self, db_cluster, loaders, monitors, # pylint: disable=too-many-arguments # noqa: PLR0913 prometheus_db, params, test_config, cluster_index): self.db_cluster = db_cluster self.loaders = loaders diff --git a/sdcm/utils/properties.py b/sdcm/utils/properties.py index e0ffebafafc..a1c4f7ffe32 100644 --- a/sdcm/utils/properties.py +++ b/sdcm/utils/properties.py @@ -49,11 +49,11 @@ def deserialize(data: Union[str, TextIO]) -> PropertiesDict: if not line.strip() or line.lstrip()[0] == '#': output[line] = None continue - line = line.split('=', 1) - if len(line) == 2: - value = line[1] + line_splitted = line.split('=', 1) + if len(line_splitted) == 2: # noqa: PLR2004 + value = line_splitted[1] comment_pos = value.find('#') if comment_pos >= 0: value = value[0:value] - output[line[0].strip()] = value.strip().strip('"').strip("'") + output[line_splitted[0].strip()] = value.strip().strip('"').strip("'") return output diff --git a/sdcm/utils/remote_logger.py b/sdcm/utils/remote_logger.py index eed47dc7bee..bc8a844564e 100644 --- a/sdcm/utils/remote_logger.py +++ b/sdcm/utils/remote_logger.py @@ -575,7 +575,7 @@ def _logger_cmd(self) -> str: return f"echo \"I`date -u +\"%m%d %H:%M:%S\"` {message}\" >> {self._target_log_file} 2>&1" -def get_system_logging_thread(logs_transport, node, target_log_file): # pylint: disable=too-many-return-statements +def get_system_logging_thread(logs_transport, node, target_log_file): # pylint: disable=too-many-return-statements # noqa: PLR0911 if logs_transport == 'docker': if 'db-node' in node.name: return DockerScyllaLogger(node, target_log_file) diff --git a/sdcm/utils/s3_remote_uploader.py b/sdcm/utils/s3_remote_uploader.py index 6207a762fdf..6f4175bb7ae 100644 --- a/sdcm/utils/s3_remote_uploader.py +++ b/sdcm/utils/s3_remote_uploader.py @@ -40,7 +40,7 @@ def read(self, amt=1024): return buff -def upload_remote_files_directly_to_s3(ssh_info: dict[str, str], files: List[str], # pylint: disable=too-many-arguments +def upload_remote_files_directly_to_s3(ssh_info: dict[str, str], files: List[str], # pylint: disable=too-many-arguments # noqa: PLR0913 s3_bucket: str, s3_key: str, max_size_gb: int = 400, public_read_acl: bool = False): """Streams given remote files/directories straight to S3 as tar.gz file. Returns download link.""" diff --git a/sdcm/utils/sstable/load_utils.py b/sdcm/utils/sstable/load_utils.py index 1f1df4dc0c1..e08b07fb927 100644 --- a/sdcm/utils/sstable/load_utils.py +++ b/sdcm/utils/sstable/load_utils.py @@ -58,7 +58,7 @@ def distribute_test_files_to_cluster_nodes(cls, nodes, test_data: List[TestDataI @staticmethod # pylint: disable=too-many-arguments,too-many-locals - def upload_sstables(node, test_data: TestDataInventory, keyspace_name: str = 'keyspace1', table_name=None, + def upload_sstables(node, test_data: TestDataInventory, keyspace_name: str = 'keyspace1', table_name=None, # noqa: PLR0913 create_schema: bool = False, is_cloud_cluster=False, **kwargs): key_store = KeyStore() creds = key_store.get_scylladb_upload_credentials() @@ -111,7 +111,7 @@ def upload_sstables(node, test_data: TestDataInventory, keyspace_name: str = 'ke node.remoter.sudo(f'rm -f {table_folder}/upload/manifest.json') @classmethod - def run_load_and_stream(cls, node, # pylint: disable=too-many-arguments + def run_load_and_stream(cls, node, # pylint: disable=too-many-arguments # noqa: PLR0913 keyspace_name: str = 'keyspace1', table_name: str = 'standard1', start_timeout=60, end_timeout=300): """runs load and stream using API request and waits for it to finish""" @@ -189,7 +189,7 @@ def get_load_test_data_inventory(cls, column_number: int, big_sstable: bool, return COLUMN_1_DATA - if column_number >= 5: + if column_number >= 5: # noqa: PLR2004 # The snapshot has 5 columns, the snapshot (col=5) can be loaded to table (col > 5). # they rest columns will be filled to 'null'. if load_and_stream: diff --git a/sdcm/utils/sstable/sstable_utils.py b/sdcm/utils/sstable/sstable_utils.py index 0b489188df6..e741f9f4462 100644 --- a/sdcm/utils/sstable/sstable_utils.py +++ b/sdcm/utils/sstable/sstable_utils.py @@ -55,7 +55,7 @@ def get_sstables(self, from_minutes_ago: int = 0): self.log.debug('Got %s sstables %s', len(selected_sstables), message) return selected_sstables - def is_sstable_encrypted(self, sstables=None) -> list: # pylint: disable=too-many-branches + def is_sstable_encrypted(self, sstables=None) -> list: # pylint: disable=too-many-branches # noqa: PLR0912 if not sstables: sstables = self.get_sstables() if isinstance(sstables, str): diff --git a/sdcm/utils/toppartition_util.py b/sdcm/utils/toppartition_util.py index 7e1d1ff8729..2f5cd36fb12 100644 --- a/sdcm/utils/toppartition_util.py +++ b/sdcm/utils/toppartition_util.py @@ -83,8 +83,8 @@ def _parse_toppartitions_output(output: str) -> dict: def verify_output(self, output: str): toppartition_result = self._parse_toppartitions_output(output) - for sampler in self._built_args['samplers'].split(','): - sampler = sampler.upper() + for _sampler in self._built_args['samplers'].split(','): + sampler = _sampler.upper() assert sampler in toppartition_result, "{} sampler not found in result".format(sampler) assert toppartition_result[sampler]['toppartitions'] == self._built_args['toppartition'], \ "Wrong expected and actual top partitions number for {} sampler".format(sampler) diff --git a/sdcm/utils/version_utils.py b/sdcm/utils/version_utils.py index 622dae18255..34ebd9165a5 100644 --- a/sdcm/utils/version_utils.py +++ b/sdcm/utils/version_utils.py @@ -142,9 +142,9 @@ def parse(version_string: str): # NOTE: make short scylla version like '5.2' be correct semver string _scylla_version_parts = _scylla_version.split('.') - if len(_scylla_version_parts) == 2: + if len(_scylla_version_parts) == 2: # noqa: PLR2004 _scylla_version = f"{_scylla_version}.0" - elif len(_scylla_version_parts) > 2 and re.search( + elif len(_scylla_version_parts) > 2 and re.search( # noqa: PLR2004 r"\D+", _scylla_version_parts[2].split("-")[0]): _scylla_version = f"{_scylla_version_parts[0]}.{_scylla_version_parts[1]}.0-{_scylla_version_parts[2]}" for part in _scylla_version_parts[3:]: @@ -256,7 +256,7 @@ def parse(version_string: str): @retrying(n=10, sleep_time=0.1) def get_url_content(url, return_url_data=True): response = requests.get(url=url) - if response.status_code != 200: + if response.status_code != 200: # noqa: PLR2004 raise ValueError(f"The following repository URL '{url}' is incorrect") response_data = response.text if not response_data: @@ -509,7 +509,7 @@ def _list_repo_file_etag(s3_client: S3Client, prefix: str) -> Optional[dict]: return repo_file["Contents"][0]["ETag"] -def resolve_latest_repo_symlink(url: str) -> str: +def resolve_latest_repo_symlink(url: str) -> str: # noqa: PLR0912 """Resolve `url' to the actual repo link if it contains `latest' substring, otherwise, return `url' as is. If `url' doesn't point to the latest repo file then raise ScyllaRepoEvent (warning severity). @@ -547,11 +547,11 @@ def resolve_latest_repo_symlink(url: str) -> str: continuation_token = "BEGIN" while continuation_token: for build in s3_objects.get("CommonPrefixes", []): - build = build.get("Prefix", "").rstrip("/").rsplit("/", 1)[-1] + build = build.get("Prefix", "").rstrip("/").rsplit("/", 1)[-1] # noqa: PLW2901 if build == LATEST_SYMLINK_NAME: continue timestamp = NO_TIMESTAMP - if len(build) >= 12: # `build' should be a string like `202001010000' or `2020-01-01T00:00:00Z' + if len(build) >= 12: # `build' should be a string like `202001010000' or `2020-01-01T00:00:00Z' # noqa: PLR2004 try: timestamp = dateutil.parser.parse(build, ignoretz=True) except ValueError: @@ -618,7 +618,7 @@ def transform_non_semver_scylla_version_to_semver(scylla_version: str): if SEMVER_REGEX.match(scylla_version): return scylla_version version_parts = scylla_version.split(".") - if len(version_parts) > 2: + if len(version_parts) > 2: # noqa: PLR2004 new_scylla_version = f"{version_parts[0]}.{version_parts[1]}.0-{'.'.join(version_parts[2:])}" if SEMVER_REGEX.match(new_scylla_version): return new_scylla_version @@ -697,13 +697,13 @@ def __call__(self, func): self.VERSIONS[(func.__name__, func.__code__.co_filename)] = {} for min_v, max_v in self.min_max_version_pairs: scylla_type = "enterprise" if any((is_enterprise(v) for v in (min_v, max_v) if v)) else "oss" - min_v = min_v or ("3.0.0" if scylla_type == "oss" else "2019.1.rc0") - max_v = max_v or ("99.99.99" if scylla_type == "oss" else "2099.99.99") + min_v = min_v or ("3.0.0" if scylla_type == "oss" else "2019.1.rc0") # noqa: PLW2901 + max_v = max_v or ("99.99.99" if scylla_type == "oss" else "2099.99.99") # noqa: PLW2901 if max_v.count(".") == 1: # NOTE: version parse function considers 4.4 as lower than 4.4.1, # but we expect it to be any of the 4.4.x versions. # So, update all such short versions with the patch part and make it to be huge. - max_v = f"{max_v}.999" + max_v = f"{max_v}.999" # noqa: PLW2901 self.VERSIONS[(func.__name__, func.__code__.co_filename)].update({(min_v, max_v): func}) @wraps(func) @@ -821,7 +821,7 @@ def find_scylla_repo(scylla_version, dist_type='centos', dist_version=None): for key in repo_map: if scylla_version.startswith(key): return repo_map[key] - else: + else: # noqa: PLW0120 raise ValueError(f"repo for scylla version {scylla_version} wasn't found") diff --git a/sdcm/wait.py b/sdcm/wait.py index d7bb84f46ee..7b582359ad8 100644 --- a/sdcm/wait.py +++ b/sdcm/wait.py @@ -29,7 +29,7 @@ R = TypeVar("R") # pylint: disable=invalid-name -def wait_for(func, step=1, text=None, timeout=None, throw_exc=True, stop_event=None, **kwargs): # pylint: disable=too-many-arguments +def wait_for(func, step=1, text=None, timeout=None, throw_exc=True, stop_event=None, **kwargs): # pylint: disable=too-many-arguments # noqa: PLR0913 """ Wrapper function to wait with timeout option. diff --git a/sdcm/ycsb_thread.py b/sdcm/ycsb_thread.py index c49b6bc83b0..0a7512e1407 100644 --- a/sdcm/ycsb_thread.py +++ b/sdcm/ycsb_thread.py @@ -66,7 +66,7 @@ def handle_verify_metric(self, line): stat = status_match.groupdict() self.set_metric('verify', stat['status'], float(stat['value'])) - def run(self): + def run(self): # noqa: PLR0912 # pylint: disable=too-many-nested-blocks # 729.39 current ops/sec; @@ -103,9 +103,9 @@ def run(self): for key, value in match.groupdict().items(): if not key == 'count': try: - value = float(value) / 1000.0 + value = float(value) / 1000.0 # noqa: PLW2901 except ValueError: - value = float(0) + value = float(0) # noqa: PLW2901 self.set_metric(operation, key, float(value)) except Exception: # pylint: disable=broad-except @@ -119,7 +119,7 @@ class YcsbStressThread(DockerBasedStressThread): # pylint: disable=too-many-ins def copy_template(self, docker): if self.params.get('alternator_use_dns_routing'): target_address = 'alternator' - else: + else: # noqa: PLR5501 if hasattr(self.node_list[0], 'parent_cluster'): target_address = self.node_list[0].parent_cluster.get_node().cql_address else: diff --git a/sla_per_user_system_test.py b/sla_per_user_system_test.py index b6c3ffdab9a..4ea9a600391 100644 --- a/sla_per_user_system_test.py +++ b/sla_per_user_system_test.py @@ -286,7 +286,7 @@ def warm_up_cache_before_test(self, max_key_for_read, stress_duration): self.run_stress_and_verify_threads(params={'stress_cmd': read_cmds}) # pylint: disable=too-many-arguments, too-many-locals - def define_read_cassandra_stress_command(self, + def define_read_cassandra_stress_command(self, # noqa: PLR0913 role: Role, load_type: str, c_s_workload_type: str, threads: int, stress_duration_min: int, @@ -360,7 +360,7 @@ def test_read_throughput_1to5_ratio(self): # http://13.48.103.68/test/71402aa7-051b-4803-a6b4-384529680fb7/runs?additionalRuns[]=1adf34d1-15cf-4973-80ce-9de130be0b09 expected_shares_ratio = 3.5 release = parse_version(self.db_cluster.nodes[0].scylla_version.replace("~", "-")).release[0] - if release >= 2023: + if release >= 2023: # noqa: PLR2004 # Running the test with 2023.1 - ratio is improved expected_shares_ratio = 4.2 self._two_users_load_througput_workload(shares=[190, 950], load=self.MIXED_LOAD, @@ -831,9 +831,10 @@ def _compare_workloads_c_s_metrics(self, workloads_queue: list) -> dict: workloads_results.update({result[0].get("username"): result[0]}) + # noqa: PLR2004 assert len(workloads_results) == 2, \ - "Expected workload_results length to be 2, got: %s. workload_results: %s" % ( - len(workloads_results), workloads_results) + "Expected workload_results length to be 2, got: %s. workload_results: %s" % ( # noqa: PLR2004 + len(workloads_results), workloads_results) comparison_results = {} try: for item, target_margin in comparison_axis.items(): @@ -911,7 +912,7 @@ def get_test_status(self) -> str: else: return super().get_test_status() - def _throughput_latency_parallel_run(self, read_cmds, test_start_time, latency_99_for_latency_workload, + def _throughput_latency_parallel_run(self, read_cmds, test_start_time, latency_99_for_latency_workload, # noqa: PLR0913 latency_user, throughput_user, throughput_cmd_name, latency_cmd_name): def __get_stat_for_user(read, user_name): # This is handle case when both loads (latency and throughput) are run for the same user diff --git a/test_add_remove_ldap_role_permission.py b/test_add_remove_ldap_role_permission.py index b41040ffdf6..a20eff6a302 100644 --- a/test_add_remove_ldap_role_permission.py +++ b/test_add_remove_ldap_role_permission.py @@ -11,7 +11,7 @@ class AddRemoveLdapRolePermissionTest(LongevityTest, LdapUtilsMixin): - def test_add_remove_ldap_role_permission(self): # pylint: disable=too-many-statements + def test_add_remove_ldap_role_permission(self): # pylint: disable=too-many-statements # noqa: PLR0915 """ Test adding a new user with Ldap permissions, and run some load for it. diff --git a/test_lib/compaction.py b/test_lib/compaction.py index 1b65200a98c..da623fd6fdd 100644 --- a/test_lib/compaction.py +++ b/test_lib/compaction.py @@ -44,7 +44,7 @@ def get_gc_mode(node: BaseNode, keyspace: str, table: str) -> str | GcMode: split=True) LOGGER.debug("Query result for %s.%s GC mode is: %s", keyspace, table, table_gc_mode_result) gc_mode = 'N/A' - if table_gc_mode_result and len(table_gc_mode_result) >= 4: + if table_gc_mode_result and len(table_gc_mode_result) >= 4: # noqa: PLR2004 extensions_value = table_gc_mode_result[3] # TODO: A temporary workaround until 5.0 query-table-extensions issue is fixed: # https://github.com/scylladb/scylla/issues/10309 diff --git a/test_lib/cql_types.py b/test_lib/cql_types.py index edd2d8de559..d2339835fcb 100644 --- a/test_lib/cql_types.py +++ b/test_lib/cql_types.py @@ -33,7 +33,7 @@ def _create_instance(cls, self_type, *args, **kwargs): return target_class(self_type, *args, **kwargs) @classmethod - def get_random(cls, already_created_info: dict, avoid_types: list = None, # pylint: disable=too-many-arguments + def get_random(cls, already_created_info: dict, avoid_types: list = None, # pylint: disable=too-many-arguments # noqa: PLR0913 allow_levels: int = 1, allowed_types: list = None, forget_on_exhaust=False) -> 'CQLColumnType': return CQLColumnType.get_random( already_created_info, @@ -71,7 +71,7 @@ def _get_available_variants(already_created_info, avoid_types=None, allowed_type return [e for e in allowed_types if e not in excluded_types] @classmethod - def get_random(cls, already_created_info: dict, avoid_types: list = None, # pylint: disable=too-many-arguments + def get_random(cls, already_created_info: dict, avoid_types: list = None, # pylint: disable=too-many-arguments # noqa: PLR0913 allow_levels: int = 1, allowed_types: list = None, forget_on_exhaust=False): """ Randomly generates CQLColumnType instance diff --git a/test_lib/sla.py b/test_lib/sla.py index e58a417040b..3eb1ee34a09 100644 --- a/test_lib/sla.py +++ b/test_lib/sla.py @@ -63,7 +63,7 @@ def _generate_query_string(self): class ServiceLevel: # The class provide interface to manage SERVICE LEVEL # pylint: disable=too-many-arguments - def __init__(self, session, + def __init__(self, session, # noqa: PLR0913 name: str, shares: Optional[int] = 1000, timeout: str = None, @@ -204,7 +204,7 @@ class UserRoleBase: # Base class for ROLES and USERS AUTHENTICATION_ENTITY = '' - def __init__(self, session, name, password=None, superuser=None, verbose=False, **kwargs): + def __init__(self, session, name, password=None, superuser=None, verbose=False, **kwargs): # noqa: PLR0913 self._name = name self.password = password self.session = session @@ -335,7 +335,7 @@ class Role(UserRoleBase): # The class provide interface to manage ROLES AUTHENTICATION_ENTITY = 'ROLE' - def __init__(self, session, name, password=None, login=False, superuser=False, options_dict=None, verbose=True): + def __init__(self, session, name, password=None, login=False, superuser=False, options_dict=None, verbose=True): # noqa: PLR0913 super().__init__(session, name, password, superuser, verbose) self.login = login self.options_dict = options_dict @@ -399,7 +399,7 @@ class User(UserRoleBase): # The class provide interface to manage USERS AUTHENTICATION_ENTITY = 'USER' - def __init__(self, session, name, password=None, superuser=None, verbose=True): + def __init__(self, session, name, password=None, superuser=None, verbose=True): # noqa: PLR0913 super().__init__(session, name, password, superuser, verbose) def create(self) -> User: diff --git a/test_lib/utils.py b/test_lib/utils.py index 007166daf44..064d3dc1961 100644 --- a/test_lib/utils.py +++ b/test_lib/utils.py @@ -34,7 +34,7 @@ def get_data_by_path( value = getattr(current, param_name) elif hasattr(current, 'get'): value = current.get(param_name, __DEFAULT__) - elif len(param_name) > 3 and param_name[0] == '[' and param_name[-1] == ']': + elif len(param_name) > 3 and param_name[0] == '[' and param_name[-1] == ']': # noqa: PLR2004 tmp = param_name[1:-1] if tmp.isdecimal(): if hasattr(current, '__getitem__'): diff --git a/unit_tests/dummy_remote.py b/unit_tests/dummy_remote.py index ad26a6dec5f..a543540d7da 100644 --- a/unit_tests/dummy_remote.py +++ b/unit_tests/dummy_remote.py @@ -44,7 +44,7 @@ def receive_files(src, dst): class LocalNode(BaseNode): # pylint: disable=too-many-arguments - def __init__(self, name, parent_cluster, ssh_login_info=None, base_logdir=None, node_prefix=None, dc_idx=0): + def __init__(self, name, parent_cluster, ssh_login_info=None, base_logdir=None, node_prefix=None, dc_idx=0): # noqa: PLR0913 super().__init__(name, parent_cluster) self.remoter = LocalCmdRunner() self.logdir = os.path.dirname(__file__) diff --git a/unit_tests/lib/fake_provisioner.py b/unit_tests/lib/fake_provisioner.py index 55fdeba40e9..028a40085c7 100644 --- a/unit_tests/lib/fake_provisioner.py +++ b/unit_tests/lib/fake_provisioner.py @@ -76,7 +76,7 @@ def reboot_instance(self, name: str, wait: bool, hard: bool = False) -> None: def run_command(self, name: str, command: str) -> Result: """Runs command on instance.""" - return subprocess.run(command, shell=True, capture_output=True, text=True) # pylint: disable=subprocess-run-check + return subprocess.run(command, shell=True, capture_output=True, text=True, check=False) # pylint: disable=subprocess-run-check @classmethod def discover_regions(cls, test_id: str, **kwargs) -> List[Provisioner]: # pylint: disable=unused-argument diff --git a/unit_tests/lib/fake_remoter.py b/unit_tests/lib/fake_remoter.py index f81bf8311c0..0dea801647e 100644 --- a/unit_tests/lib/fake_remoter.py +++ b/unit_tests/lib/fake_remoter.py @@ -24,7 +24,7 @@ class FakeRemoter(RemoteCmdRunnerBase): result_map: Dict[Pattern, Result] = {} - def run(self, # pylint: disable=too-many-arguments + def run(self, # pylint: disable=too-many-arguments # noqa: PLR0913 cmd: str, timeout=None, ignore_status=False, diff --git a/unit_tests/lib/mock_remoter.py b/unit_tests/lib/mock_remoter.py index 3cad727c825..b322bf29b3d 100644 --- a/unit_tests/lib/mock_remoter.py +++ b/unit_tests/lib/mock_remoter.py @@ -48,7 +48,7 @@ def _process_response(self, response): # pylint: disable=no-self-use return None # pylint: disable=too-many-arguments,unused-argument - def run(self, cmd: str, timeout: Optional[float] = None, + def run(self, cmd: str, timeout: Optional[float] = None, # noqa: PLR0913 ignore_status: bool = False, verbose: bool = True, new_session: bool = False, log_file: Optional[str] = None, retry: int = 1, watchers: Optional[List[StreamWatcher]] = None, change_context: bool = False) -> Result: diff --git a/unit_tests/lib/remoter_recorder.py b/unit_tests/lib/remoter_recorder.py index b97939c7b1c..fd56df4834f 100644 --- a/unit_tests/lib/remoter_recorder.py +++ b/unit_tests/lib/remoter_recorder.py @@ -29,7 +29,7 @@ class RemoterRecorder(RemoteCmdRunner): """ responses = {} - def run(self, cmd: str, timeout: Optional[float] = None, # pylint: disable=too-many-arguments + def run(self, cmd: str, timeout: Optional[float] = None, # pylint: disable=too-many-arguments # noqa: PLR0913 ignore_status: bool = False, verbose: bool = True, new_session: bool = False, log_file: Optional[str] = None, retry: int = 1, watchers: Optional[List[StreamWatcher]] = None, change_context: bool = False) -> Result: diff --git a/unit_tests/provisioner/fake_azure_service.py b/unit_tests/provisioner/fake_azure_service.py index e866967f597..4fdf13de060 100644 --- a/unit_tests/provisioner/fake_azure_service.py +++ b/unit_tests/provisioner/fake_azure_service.py @@ -73,8 +73,8 @@ def create_or_update(self, resource_group_name: str, parameters: Dict[str, Any]) } res_group.update(**parameters) (self.path / resource_group_name).mkdir(exist_ok=True) - with open(self.path / resource_group_name / "resource_group.json", "w", encoding="utf-8") as file: - json.dump(res_group, fp=file, indent=2) + with open(self.path / resource_group_name / "resource_group.json", "w", encoding="utf-8") as file_obj: + json.dump(res_group, fp=file_obj, indent=2) return ResourceGroup.deserialize(res_group) def get(self, name) -> ResourceGroup: @@ -109,9 +109,9 @@ def list(self, resource_group_name: str) -> List[NetworkSecurityGroup]: except FileNotFoundError: raise ResourceNotFoundError("No resource group") from None elements = [] - for file in files: - with open(self.path / resource_group_name / file, "r", encoding="utf-8") as file: - elements.append(NetworkSecurityGroup.deserialize(json.load(file))) + for file_name in files: + with open(self.path / resource_group_name / file_name, "r", encoding="utf-8") as file_obj: + elements.append(NetworkSecurityGroup.deserialize(json.load(file_obj))) return elements def begin_create_or_update(self, resource_group_name: str, network_security_group_name: str, @@ -179,9 +179,9 @@ def list(self, resource_group_name: str) -> List[VirtualNetwork]: except FileNotFoundError: raise ResourceNotFoundError("No resource group") from None elements = [] - for file in files: - with open(self.path / resource_group_name / file, "r", encoding="utf-8") as file: - elements.append(VirtualNetwork.deserialize(json.load(file))) + for file_name in files: + with open(self.path / resource_group_name / file_name, "r", encoding="utf-8") as file_obj: + elements.append(VirtualNetwork.deserialize(json.load(file_obj))) return elements def begin_create_or_update(self, resource_group_name: str, virtual_network_name: str, parameters: Dict[str, Any] @@ -230,9 +230,9 @@ def list(self, resource_group_name: str, virtual_network_name: str) -> List[Subn except FileNotFoundError: raise ResourceNotFoundError("No resource group") from None elements = [] - for file in files: - with open(self.path / resource_group_name / file, "r", encoding="utf-8") as file: - elements.append(Subnet.deserialize(json.load(file))) + for file_name in files: + with open(self.path / resource_group_name / file_name, "r", encoding="utf-8") as file_obj: + elements.append(Subnet.deserialize(json.load(file_obj))) return elements def begin_create_or_update(self, resource_group_name: str, virtual_network_name: str, subnet_name: str, @@ -280,9 +280,9 @@ def list(self, resource_group_name: str) -> List[PublicIPAddress]: except FileNotFoundError: raise ResourceNotFoundError("No resource group") from None elements = [] - for file in files: - with open(self.path / resource_group_name / file, "r", encoding="utf-8") as file: - elements.append(PublicIPAddress.deserialize(json.load(file))) + for file_name in files: + with open(self.path / resource_group_name / file_name, "r", encoding="utf-8") as file_obj: + elements.append(PublicIPAddress.deserialize(json.load(file_obj))) return elements def begin_create_or_update(self, resource_group_name: str, public_ip_address_name: str, parameters: Dict[str, Any] @@ -310,8 +310,8 @@ def begin_create_or_update(self, resource_group_name: str, public_ip_address_nam "provisioningState": "Succeeded" } } - with open(self.path / resource_group_name / f"ip-{public_ip_address_name}.json", "w", encoding="utf-8") as file: - json.dump(base, fp=file, indent=2) + with open(self.path / resource_group_name / f"ip-{public_ip_address_name}.json", "w", encoding="utf-8") as file_obj: + json.dump(base, fp=file_obj, indent=2) return WaitableObject() def get(self, resource_group_name: str, public_ip_address_name: str) -> PublicIPAddress: @@ -337,9 +337,9 @@ def list(self, resource_group_name: str) -> List[NetworkInterface]: except FileNotFoundError: raise ResourceNotFoundError("No resource group") from None elements = [] - for file in files: - with open(self.path / resource_group_name / file, "r", encoding="utf-8") as file: - elements.append(NetworkInterface.deserialize(json.load(file))) + for file_name in files: + with open(self.path / resource_group_name / file_name, "r", encoding="utf-8") as file_obj: + elements.append(NetworkInterface.deserialize(json.load(file_obj))) return elements def begin_create_or_update(self, resource_group_name: str, network_interface_name: str, parameters: Dict[str, Any] @@ -584,7 +584,7 @@ def begin_restart(self, resource_group_name, vm_name # pylint: disable=unused-a # pylint: disable=unused-argument,no-self-use def begin_run_command(self, resource_group_name, vm_name, parameters) -> ResultableObject: result = subprocess.run(parameters.script[0], shell=True, capture_output=True, # pylint: disable=subprocess-run-check - text=True) + text=True, check=False) return ResultableObject(result.stdout, result.stderr) diff --git a/unit_tests/provisioner/test_azure_get_scylla_images.py b/unit_tests/provisioner/test_azure_get_scylla_images.py index 40778b80fbc..88fce1dfcf6 100644 --- a/unit_tests/provisioner/test_azure_get_scylla_images.py +++ b/unit_tests/provisioner/test_azure_get_scylla_images.py @@ -37,7 +37,7 @@ def test_can_get_scylla_images_based_on_branch(azure_service): def test_can_get_scylla_images_based_on_scylla_version(azure_service): images = get_scylla_images("4.6.4", "eastus", azure_service=azure_service) assert images[0].name == "ScyllaDB-4.6.4-0.20220718.b60f14601-1-build-28" - assert len(images) == 2 + assert len(images) == 2 # noqa: PLR2004 def test_can_get_scylla_images_based_on_revision_id(azure_service): diff --git a/unit_tests/provisioner/test_azure_region_definition_builder.py b/unit_tests/provisioner/test_azure_region_definition_builder.py index 366bc2a01d3..8885c44e296 100644 --- a/unit_tests/provisioner/test_azure_region_definition_builder.py +++ b/unit_tests/provisioner/test_azure_region_definition_builder.py @@ -54,7 +54,7 @@ def test_can_create_basic_scylla_instance_definition_from_sct_config(): type="Standard_L8s_v3", user_name="scyllaadm", root_disk_size=30, tags=tags | {"NodeType": "scylla-db", "keep_action": "", 'NodeIndex': '1'}, ssh_key=ssh_key) - assert len(region_definitions) == 2 + assert len(region_definitions) == 2 # noqa: PLR2004 actual_region_definition = region_definitions[0] assert actual_region_definition.test_id == env_config.SCT_TEST_ID diff --git a/unit_tests/provisioner/test_provision_sct_resources.py b/unit_tests/provisioner/test_provision_sct_resources.py index 62f82b1dfd3..1bbb24d9ba0 100644 --- a/unit_tests/provisioner/test_provision_sct_resources.py +++ b/unit_tests/provisioner/test_provision_sct_resources.py @@ -30,8 +30,8 @@ def test_can_provision_instances_according_to_sct_configuration(params, test_con loader_nodes = [node for node in eastus_instances if node.tags['NodeType'] == "loader"] monitor_nodes = [node for node in eastus_instances if node.tags['NodeType'] == "monitor"] - assert len(db_nodes) == 3 - assert len(loader_nodes) == 2 + assert len(db_nodes) == 3 # noqa: PLR2004 + assert len(loader_nodes) == 2 # noqa: PLR2004 assert len(monitor_nodes) == 1 db_node = db_nodes[0] assert db_node.region == "eastus" @@ -66,8 +66,8 @@ def test_fallback_on_demand_when_spot_fails(fallback_on_demand, params, test_con loader_nodes = [node for node in eastus_instances if node.tags['NodeType'] == "loader"] monitor_nodes = [node for node in eastus_instances if node.tags['NodeType'] == "monitor"] - assert len(db_nodes) == 3 - assert len(loader_nodes) == 2 + assert len(db_nodes) == 3 # noqa: PLR2004 + assert len(loader_nodes) == 2 # noqa: PLR2004 assert len(monitor_nodes) == 1 for node in db_nodes: assert node.pricing_model == PricingModel.ON_DEMAND diff --git a/unit_tests/provisioner/test_user_data_builder.py b/unit_tests/provisioner/test_user_data_builder.py index 02e1a1ce593..fe5c3c807b3 100644 --- a/unit_tests/provisioner/test_user_data_builder.py +++ b/unit_tests/provisioner/test_user_data_builder.py @@ -88,7 +88,7 @@ def test_user_data_can_merge_user_data_objects_yaml(): assert sorted(loaded_yaml['packages']) == sorted( ['some-pkg-to-install', 'another-pkg-to-install', 'pkg-from-empty']) script_files = loaded_yaml['write_files'] - assert len(script_files) == 2, "empty script user data object should not be added" + assert len(script_files) == 2, "empty script user data object should not be added" # noqa: PLR2004 assert user_data_object_1.script_to_run in script_files[0]["content"] assert user_data_object_2.script_to_run in script_files[1]["content"] diff --git a/unit_tests/test_adaptive_timeouts.py b/unit_tests/test_adaptive_timeouts.py index d929d83a2df..d3edaf91a1e 100644 --- a/unit_tests/test_adaptive_timeouts.py +++ b/unit_tests/test_adaptive_timeouts.py @@ -41,7 +41,7 @@ class MemoryAdaptiveTimeoutStore(AdaptiveTimeoutStore): def __init__(self): self._data = {} - def store(self, metrics: dict[str, Any], operation: str, duration: int, timeout: int, + def store(self, metrics: dict[str, Any], operation: str, duration: int, timeout: int, # noqa: PLR0913 timeout_occurred: bool) -> None: metrics.update({ "operation": operation, @@ -122,16 +122,16 @@ def adaptive_timeout_store(): @mock.patch('sdcm.sct_events.base.SctEvent.publish_or_dump') def test_soft_timeout_is_raised_when_timeout_reached(publish_or_dump, fake_node, adaptive_timeout_store): with adaptive_timeout(operation=Operations.SOFT_TIMEOUT, node=fake_node, timeout=0.1, stats_storage=adaptive_timeout_store) as timeout: - assert timeout == 0.1 + assert timeout == 0.1 # noqa: PLR2004 time.sleep(0.2) publish_or_dump.assert_called_once() metrics = MemoryAdaptiveTimeoutStore().get(operation=Operations.SOFT_TIMEOUT.name, timeout_occurred=True) - assert metrics[0]["duration"] > 0.2 - assert metrics[0]["timeout"] == 0.1 + assert metrics[0]["duration"] > 0.2 # noqa: PLR2004 + assert metrics[0]["timeout"] == 0.1 # noqa: PLR2004 assert metrics[0]["timeout_occurred"] is True assert metrics[0]["operation"] == "SOFT_TIMEOUT" assert metrics[0]["node_name"] == "test-node" - assert metrics[0]["shards_count"] == 3 + assert metrics[0]["shards_count"] == 3 # noqa: PLR2004 @mock.patch('sdcm.sct_events.base.SctEvent.publish_or_dump') @@ -147,6 +147,6 @@ def test_soft_timeout_is_not_raised_when_timeout_not_reached(publish_or_dump, fa @mock.patch('sdcm.sct_events.base.SctEvent.publish_or_dump') def test_decommission_timeout_is_calculated_and_stored(publish_or_dump, fake_node, adaptive_timeout_store): with adaptive_timeout(operation=Operations.DECOMMISSION, node=fake_node, stats_storage=adaptive_timeout_store) as timeout: - assert timeout == 7200 # based on data size + assert timeout == 7200 # based on data size # noqa: PLR2004 publish_or_dump.assert_not_called() assert MemoryAdaptiveTimeoutStore().get(operation=Operations.DECOMMISSION.name, timeout_occurred=False) diff --git a/unit_tests/test_audit.py b/unit_tests/test_audit.py index 9692543801c..b556e9db5ac 100644 --- a/unit_tests/test_audit.py +++ b/unit_tests/test_audit.py @@ -14,13 +14,13 @@ def test_get_audit_log_rows_can_be_filtered_by_time(): node = DummyAuditNode(name='dummy-node', parent_cluster=None) # no date filter provided rows = get_audit_log_rows(node, from_datetime=None) - assert len(list(rows)) == 95 + assert len(list(rows)) == 95 # noqa: PLR2004 # filter by date start_time = datetime(2023, 7, 24, 11, 39, 1, 123) # 2023-07-24T11:39:01.123 rows = get_audit_log_rows(node, from_datetime=start_time) rows = list(rows) - assert len(rows) == 21 + assert len(rows) == 21 # noqa: PLR2004 assert not [row for row in rows if row.event_time < start_time.replace(microsecond=0)] diff --git a/unit_tests/test_cluster.py b/unit_tests/test_cluster.py index 29bb1fd7030..623eefbf553 100644 --- a/unit_tests/test_cluster.py +++ b/unit_tests/test_cluster.py @@ -128,7 +128,7 @@ def test_search_system_interlace_reactor_stall(self): assert event_a["type"] == "REACTOR_STALLED" assert event_a["line_number"] == 0 assert event_b["type"] == "REACTOR_STALLED" - assert event_b["line_number"] == 3 + assert event_b["line_number"] == 3 # noqa: PLR2004 def test_search_kernel_callstack(self): self.node.parent_cluster = {'params': {'print_kernel_callstack': True}} @@ -142,9 +142,9 @@ def test_search_kernel_callstack(self): print(event_b) assert event_a["type"] == "KERNEL_CALLSTACK" - assert event_a["line_number"] == 2 + assert event_a["line_number"] == 2 # noqa: PLR2004 assert event_b["type"] == "KERNEL_CALLSTACK" - assert event_b["line_number"] == 5 + assert event_b["line_number"] == 5 # noqa: PLR2004 def test_search_cdc_invalid_request(self): self.node.system_log = os.path.join(os.path.dirname(__file__), 'test_data', 'system_cdc_invalid_request.log') @@ -185,7 +185,7 @@ def test_search_system_suppressed_messages(self): print(event_a) assert event_a["type"] == "SUPPRESSED_MESSAGES", 'Not expected event type {}'.format(event_a["type"]) - assert event_a["line_number"] == 6, 'Not expected event line number {}'.format(event_a["line_number"]) + assert event_a["line_number"] == 6, 'Not expected event line number {}'.format(event_a["line_number"]) # noqa: PLR2004 def test_search_one_line_backtraces(self): self.node.system_log = os.path.join(os.path.dirname(__file__), 'test_data', 'system_one_line_backtrace.log') @@ -219,7 +219,7 @@ def test_gate_closed_ignored_exception_is_catched(self): assert event_backtrace1["type"] == "GATE_CLOSED" assert event_backtrace1["line_number"] == 1 assert event_backtrace2["type"] == "GATE_CLOSED" - assert event_backtrace2["line_number"] == 3 + assert event_backtrace2["line_number"] == 3 # noqa: PLR2004 def test_compaction_stopped_exception_is_catched(self): self.node.system_log = os.path.join(os.path.dirname(__file__), 'test_data', 'compaction_stopped_exception.log') @@ -254,7 +254,7 @@ def test_appending_to_log(self): assert len(reactor_stalls) == 1 event = reactor_stalls[0] assert event["type"] == "REACTOR_STALLED" - assert event["line_number"] == 2 + assert event["line_number"] == 2 # noqa: PLR2004 assert 'Reactor stalled for 32 ms on shard 1' in event['line'] @@ -728,5 +728,5 @@ def test_describering_parsing(self): # pylint: disable=no-self-use 'end_token': 9156354786201613199, 'endpoints': '127.0.49.3', 'rpc_endpoints': '127.0.49.3'}] min_token, max_token = keyspace_min_max_tokens(node=node, keyspace="") - assert min_token == -9193109213506951143 - assert max_token == 9202125676696964746 + assert min_token == -9193109213506951143 # noqa: PLR2004 + assert max_token == 9202125676696964746 # noqa: PLR2004 diff --git a/unit_tests/test_config.py b/unit_tests/test_config.py index 130d4ba1c1b..460068da30d 100644 --- a/unit_tests/test_config.py +++ b/unit_tests/test_config.py @@ -187,7 +187,7 @@ def test_12_scylla_version_ami(self): conf = sct_config.SCTConfiguration() conf.verify_configuration() amis = conf.get('ami_id_db_scylla').split() - assert len(amis) == 2 + assert len(amis) == 2 # noqa: PLR2004 assert all(ami.startswith('ami-') for ami in amis) def test_12_scylla_version_ami_case1(self): # pylint: disable=invalid-name @@ -291,7 +291,7 @@ def test_13_scylla_version_ami_branch(self): # pylint: disable=invalid-name conf.verify_configuration() amis = conf.get('ami_id_db_scylla').split() - assert len(amis) == 2 + assert len(amis) == 2 # noqa: PLR2004 assert all(ami.startswith('ami-') for ami in amis) @pytest.mark.integration @@ -304,7 +304,7 @@ def test_13_scylla_version_ami_branch_latest(self): # pylint: disable=invalid-n conf.verify_configuration() amis = conf.get('ami_id_db_scylla').split() - assert len(amis) == 2 + assert len(amis) == 2 # noqa: PLR2004 assert all(ami.startswith('ami-') for ami in amis) def test_conf_check_required_files(self): # pylint: disable=no-self-use diff --git a/unit_tests/test_events.py b/unit_tests/test_events.py index e7d43ef1e0d..782ef0d19a8 100644 --- a/unit_tests/test_events.py +++ b/unit_tests/test_events.py @@ -510,7 +510,7 @@ def test_count_reactor_stall(self): statistics = counter_manager.get_stats().copy() assert len(statistics.keys()) == 1, "Number of events in statistics is wrong" - assert statistics["DatabaseLogEvent.REACTOR_STALLED"]["counter"] == 4 + assert statistics["DatabaseLogEvent.REACTOR_STALLED"]["counter"] == 4 # noqa: PLR2004 assert statistics["DatabaseLogEvent.REACTOR_STALLED"]["ms"] == {50: 2, 10: 1, 20: 1} reason_stat_dir = self.get_events_counter().events_stat_dir / Path(count_condition_name) / \ Path("DatabaseLogEvent.REACTOR_STALLED") @@ -538,7 +538,7 @@ def test_count_reactor_stall_with_obj_instance(self): counter.stop_event_counter() statistics = counter.get_stats() assert len(statistics.keys()) == 1, "Number of events in statistics is wrong" - assert statistics["DatabaseLogEvent.REACTOR_STALLED"]["counter"] == 4 + assert statistics["DatabaseLogEvent.REACTOR_STALLED"]["counter"] == 4 # noqa: PLR2004 assert statistics["DatabaseLogEvent.REACTOR_STALLED"]["ms"] == {50: 2, 10: 1, 20: 1} reason_stat_dir = self.get_events_counter().events_stat_dir / Path(count_condition_name) / \ Path("DatabaseLogEvent.REACTOR_STALLED") @@ -564,7 +564,7 @@ def test_count_reactor_stall_not_matched_by_regexp(self): time.sleep(1) statistics = counter_manager.get_stats().copy() assert len(statistics.keys()) == 1, "Number of events in statistics is wrong" - assert statistics["DatabaseLogEvent.REACTOR_STALLED"]["counter"] == 4 + assert statistics["DatabaseLogEvent.REACTOR_STALLED"]["counter"] == 4 # noqa: PLR2004 assert statistics["DatabaseLogEvent.REACTOR_STALLED"]["ms"] == {0: 4} def test_count_several_events(self): @@ -585,8 +585,8 @@ def test_count_several_events(self): # assert not counter_manager._counter_device._cm_register, f"{dict(counter_manager._counter_device._cm_register)}" statistics = counter_manager.get_stats().copy() - assert len(statistics.keys()) == 2, "Number of events in statistics is wrong" - assert statistics["DatabaseLogEvent.REACTOR_STALLED"]["counter"] == 3 + assert len(statistics.keys()) == 2, "Number of events in statistics is wrong" # noqa: PLR2004 + assert statistics["DatabaseLogEvent.REACTOR_STALLED"]["counter"] == 3 # noqa: PLR2004 assert statistics["DatabaseLogEvent.REACTOR_STALLED"]["ms"] == {50: 2, 30: 1} assert statistics["DatabaseLogEvent.KERNEL_CALLSTACK"]["counter"] == 1 reason_stat_dir = self.get_events_counter().events_stat_dir / Path(count_condition_name) @@ -620,8 +620,8 @@ def test_skip_not_required_count_required_events(self): time.sleep(1) statistics = counter_manager.get_stats().copy() - assert len(statistics.keys()) == 2, "Number of events in statistics is wrong" - assert statistics["DatabaseLogEvent.REACTOR_STALLED"]["counter"] == 2 + assert len(statistics.keys()) == 2, "Number of events in statistics is wrong" # noqa: PLR2004 + assert statistics["DatabaseLogEvent.REACTOR_STALLED"]["counter"] == 2 # noqa: PLR2004 assert statistics["DatabaseLogEvent.REACTOR_STALLED"]["ms"] == {50: 1, 2501: 1} assert statistics["DatabaseLogEvent.KERNEL_CALLSTACK"]["counter"] == 1 reason_stat_dir = self.get_events_counter().events_stat_dir / Path(count_condition_name) @@ -663,14 +663,14 @@ def test_count_with_several_count_managers(self): statistics1 = counter_manager1.get_stats().copy() statistics2 = counter_manager2.get_stats().copy() - assert len(statistics1.keys()) == 2, "Number of events in statistics is wrong" - assert len(statistics2.keys()) == 2, "Number of events in statistics is wrong" - assert statistics1["DatabaseLogEvent.REACTOR_STALLED"]["counter"] == 2 + assert len(statistics1.keys()) == 2, "Number of events in statistics is wrong" # noqa: PLR2004 + assert len(statistics2.keys()) == 2, "Number of events in statistics is wrong" # noqa: PLR2004 + assert statistics1["DatabaseLogEvent.REACTOR_STALLED"]["counter"] == 2 # noqa: PLR2004 assert statistics1["DatabaseLogEvent.REACTOR_STALLED"]["ms"] == {50: 1, 200: 1} assert statistics1["DatabaseLogEvent.KERNEL_CALLSTACK"]["counter"] == 1 - assert statistics2["DatabaseLogEvent.REACTOR_STALLED"]["counter"] == 2 + assert statistics2["DatabaseLogEvent.REACTOR_STALLED"]["counter"] == 2 # noqa: PLR2004 assert statistics2["DatabaseLogEvent.REACTOR_STALLED"]["ms"] == {50: 1, 200: 1} - assert statistics2["DatabaseLogEvent.BACKTRACE"]["counter"] == 3 + assert statistics2["DatabaseLogEvent.BACKTRACE"]["counter"] == 3 # noqa: PLR2004 reason_stat_dir = self.get_events_counter().events_stat_dir / Path(count_condition_name1) assert reason_stat_dir.is_dir() @@ -726,14 +726,14 @@ def test_count_with_several_nested_count_managers(self): statistics1 = counter_manager1.get_stats().copy() - assert len(statistics1.keys()) == 2, "Number of events in statistics is wrong" - assert len(statistics2.keys()) == 2, "Number of events in statistics is wrong" - assert statistics1["DatabaseLogEvent.REACTOR_STALLED"]["counter"] == 2 + assert len(statistics1.keys()) == 2, "Number of events in statistics is wrong" # noqa: PLR2004 + assert len(statistics2.keys()) == 2, "Number of events in statistics is wrong" # noqa: PLR2004 + assert statistics1["DatabaseLogEvent.REACTOR_STALLED"]["counter"] == 2 # noqa: PLR2004 assert statistics1["DatabaseLogEvent.REACTOR_STALLED"]["ms"] == {50: 1, 1000: 1} assert statistics1["DatabaseLogEvent.KERNEL_CALLSTACK"]["counter"] == 1 assert statistics2["DatabaseLogEvent.REACTOR_STALLED"]["counter"] == 1 assert statistics2["DatabaseLogEvent.REACTOR_STALLED"]["ms"] == {1000: 1} - assert statistics2["DatabaseLogEvent.BACKTRACE"]["counter"] == 2 + assert statistics2["DatabaseLogEvent.BACKTRACE"]["counter"] == 2 # noqa: PLR2004 reason_stat_dir = self.get_events_counter().events_stat_dir / Path(count_condition_name1) assert reason_stat_dir.is_dir() diff --git a/unit_tests/test_ndbench_thread.py b/unit_tests/test_ndbench_thread.py index 721f2f0dfa9..d8c3640b6b0 100644 --- a/unit_tests/test_ndbench_thread.py +++ b/unit_tests/test_ndbench_thread.py @@ -86,7 +86,7 @@ def cleanup_thread(): assert len(cat["ERROR"]) >= 1 assert any("Encountered an exception when driving load" in err for err in cat["ERROR"]) - assert len(cat["CRITICAL"]) >= 2 + assert len(cat["CRITICAL"]) >= 2 # noqa: PLR2004 assert any("BUILD FAILED" in critical for critical in cat["CRITICAL"]) diff --git a/unit_tests/test_nemesis_sisyphus.py b/unit_tests/test_nemesis_sisyphus.py index e18a7e32c75..5e18160f311 100644 --- a/unit_tests/test_nemesis_sisyphus.py +++ b/unit_tests/test_nemesis_sisyphus.py @@ -71,7 +71,7 @@ def test_list_all_available_nemesis(generate_file=True): disruption_list, disruptions_dict, disruption_classes = sisyphus.get_list_of_disrupt_methods( subclasses_list=subclasses, export_properties=True) - assert len(disruption_list) == 85 + assert len(disruption_list) == 85 # noqa: PLR2004 if generate_file: with open('data_dir/nemesis.yml', 'w', encoding="utf-8") as outfile1: diff --git a/unit_tests/test_profiler.py b/unit_tests/test_profiler.py index 22121a39084..e52a9e43372 100644 --- a/unit_tests/test_profiler.py +++ b/unit_tests/test_profiler.py @@ -182,7 +182,7 @@ def test_profile_disabled(self): break tmp.stop() - def _add_tests(self): # pylint: disable=too-many-statements + def _add_tests(self): # pylint: disable=too-many-statements # noqa: PLR0915 self._subroutines.append(Thread(target=thread_body, name="Thread.daemon", daemon=True)) self._subroutines.append(LibThread(target=thread_body, name="lib.Thread.daemon", daemon=True)) self._subroutines.append(ThreadCustomClass(target=thread_body, name="Thread.CustomClass.daemon", daemon=True)) diff --git a/unit_tests/test_remoter.py b/unit_tests/test_remoter.py index ca92859b74b..7e8920f0b34 100644 --- a/unit_tests/test_remoter.py +++ b/unit_tests/test_remoter.py @@ -110,7 +110,7 @@ def _create_and_run_twice_in_same_thread(remoter_type, key_file, stmt, kwargs, p # pylint: disable=too-many-arguments @staticmethod - def _create_and_run_in_same_thread(remoter_type, host, key_file, stmt, kwargs, paramiko_thread_results): + def _create_and_run_in_same_thread(remoter_type, host, key_file, stmt, kwargs, paramiko_thread_results): # noqa: PLR0913 if issubclass(remoter_type, (RemoteCmdRunner, RemoteLibSSH2CmdRunner)): remoter = remoter_type(hostname=host, user=getpass.getuser(), key_file=key_file) else: @@ -200,7 +200,7 @@ def _run_parallel(thread_count, thread_body, args, kwargs): # @parameterized.expand(ALL_COMMANDS_WITH_ALL_OPTIONS) @unittest.skip('To be ran manually') - def test_run_in_mainthread( # pylint: disable=too-many-arguments + def test_run_in_mainthread( # pylint: disable=too-many-arguments # noqa: PLR0913 self, remoter_type, host: str, stmt: str, verbose: bool, ignore_status: bool, new_session: bool, retry: int, timeout: Union[float, None]): kwargs = { @@ -237,7 +237,7 @@ def test_run_in_mainthread( # pylint: disable=too-many-arguments # @parameterized.expand(ALL_COMMANDS_WITH_ALL_OPTIONS) @unittest.skip('To be ran manually') - def test_create_and_run_in_same_thread( # pylint: disable=too-many-arguments,too-many-locals + def test_create_and_run_in_same_thread( # pylint: disable=too-many-arguments,too-many-locals # noqa: PLR0913 self, remoter_type, host: str, stmt: str, verbose: bool, ignore_status: bool, new_session: bool, retry: int, timeout: Union[float, None]): kwargs = { @@ -264,7 +264,7 @@ def test_create_and_run_in_same_thread( # pylint: disable=too-many-arguments,to # @parameterized.expand(ALL_COMMANDS_WITH_ALL_OPTIONS) @unittest.skip('To be ran manually') - def test_create_and_run_in_separate_thread( # pylint: disable=too-many-arguments + def test_create_and_run_in_separate_thread( # pylint: disable=too-many-arguments # noqa: PLR0913 self, remoter_type, host: str, stmt: str, verbose: bool, ignore_status: bool, new_session: bool, retry: int, timeout: Union[float, None]): kwargs = { diff --git a/unit_tests/test_scan_operation_thread.py b/unit_tests/test_scan_operation_thread.py index 296d03f637a..ef922ba3a31 100644 --- a/unit_tests/test_scan_operation_thread.py +++ b/unit_tests/test_scan_operation_thread.py @@ -173,7 +173,7 @@ def execute_async(*args, **kwargs): [['partition', 'WARNING', 0, 'execute_async'], ['aggregate', 'ERROR', 60*30, 'execute'], ['table', 'WARNING', 0, 'execute']]) -def test_scan_negative_operation_timed_out(mode, severity, timeout, execute_mock, events, node): +def test_scan_negative_operation_timed_out(mode, severity, timeout, execute_mock, events, node): # noqa: PLR0913 # pylint: disable=redefined-outer-name # pylint: disable=too-many-arguments if execute_mock == 'execute_async': @@ -256,7 +256,7 @@ def execute_async(*args, **kwargs): ['partition', 'execute_async'], ['aggregate', 'execute'], ['table', 'execute']]) -def test_scan_negative_exception(mode, severity, running_nemesis, execute_mock, events, node): +def test_scan_negative_exception(mode, severity, running_nemesis, execute_mock, events, node): # noqa: PLR0913 # pylint: disable=redefined-outer-name # pylint: disable=too-many-arguments if running_nemesis: diff --git a/unit_tests/test_sct_events_base.py b/unit_tests/test_sct_events_base.py index f418b2299b7..c7045006af5 100644 --- a/unit_tests/test_sct_events_base.py +++ b/unit_tests/test_sct_events_base.py @@ -137,7 +137,7 @@ class Z(SctEvent): self.assertNotEqual(z, y) def test_equal_pickle_unpickle(self): - global Y # pylint: disable=global-variable-not-assigned; assigned by class definition + global Y # pylint: disable=global-variable-not-assigned; assigned by class definition # noqa: PLW0603 class Y(SctEvent): pass @@ -372,7 +372,7 @@ class Mixin: self.assertEqual(yt.attr1, "value1") def test_add_subevent_type_pickle(self): - global Y # pylint: disable=global-variable-not-assigned; assigned by class definition + global Y # pylint: disable=global-variable-not-assigned; assigned by class definition # noqa: PLW0603 class Y(SctEvent): T: Type[SctEvent] @@ -493,7 +493,7 @@ class Y(LogEvent): self.assertTrue(y._ready_to_publish) def test_clone_fresh(self): - global Y # pylint: disable=global-variable-not-assigned; assigned by class definition + global Y # pylint: disable=global-variable-not-assigned; assigned by class definition # noqa: PLW0603 class Y(LogEvent): pass @@ -516,7 +516,7 @@ class Y(LogEvent): self.assertIsInstance(y, SctEventProtocol) def test_clone_with_info(self): - global Y # pylint: disable=global-variable-not-assigned; assigned by class definition + global Y # pylint: disable=global-variable-not-assigned; assigned by class definition # noqa: PLW0603 class Y(LogEvent): pass diff --git a/unit_tests/test_sct_events_continuous_events_registry.py b/unit_tests/test_sct_events_continuous_events_registry.py index d25243df4dc..0a9700c4d74 100644 --- a/unit_tests/test_sct_events_continuous_events_registry.py +++ b/unit_tests/test_sct_events_continuous_events_registry.py @@ -172,6 +172,6 @@ def test_get_compact_events_by_continues_hash_from_log(self, populated_registry: assert found_event assert isinstance(found_event, CompactionEvent) assert found_event.node == 'node1' - assert found_event.shard == 2 + assert found_event.shard == 2 # noqa: PLR2004 assert found_event.table == 'system.local' assert found_event.compaction_process_id == 'edc49670-2a65-11ec-a8b8-b62621e7624c' diff --git a/unit_tests/test_scylla_yaml_builders.py b/unit_tests/test_scylla_yaml_builders.py index a436cc98e4b..7fce0c9cf27 100644 --- a/unit_tests/test_scylla_yaml_builders.py +++ b/unit_tests/test_scylla_yaml_builders.py @@ -415,7 +415,7 @@ def init_nodes(self): node.config_setup(append_scylla_args=self.get_scylla_args()) # pylint: disable=too-many-arguments - def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_bootstrap=False, instance_type=None): + def add_nodes(self, count, ec2_user_data='', dc_idx=0, rack=0, enable_auto_bootstrap=False, instance_type=None): # noqa: PLR0913 pass diff --git a/unit_tests/test_seed_selector.py b/unit_tests/test_seed_selector.py index 51ed83c2657..a0106ed27da 100644 --- a/unit_tests/test_seed_selector.py +++ b/unit_tests/test_seed_selector.py @@ -90,9 +90,9 @@ def test_random_2_seeds(self): self.setup_cluster(nodes_number=3) self.cluster.set_test_params(seeds_selector='random', seeds_num=2, db_type='scylla') self.cluster.set_seeds() - self.assertTrue(len(self.cluster.seed_nodes) == 2) + self.assertTrue(len(self.cluster.seed_nodes) == 2) # noqa: PLR2004 self.assertTrue(len(self.cluster.non_seed_nodes) == 1) - self.assertTrue(len(self.cluster.seed_nodes_addresses) == 2) + self.assertTrue(len(self.cluster.seed_nodes_addresses) == 2) # noqa: PLR2004 def test_first_1_seed(self): self.setup_cluster(nodes_number=1) diff --git a/unit_tests/test_utils_docker.py b/unit_tests/test_utils_docker.py index a33464117cf..5d0fbf1be19 100644 --- a/unit_tests/test_utils_docker.py +++ b/unit_tests/test_utils_docker.py @@ -151,7 +151,7 @@ def setUp(self) -> None: self.node = DummyNode() self.node._containers["c1"] = self.container = DummyContainer() - def test_get_docker_client(self): + def test_get_docker_client(self): # noqa: PLR0915 with self.subTest("Default Docker client"): self.assertEqual(ContainerManager.get_docker_client(self.node, "c2"), ContainerManager.default_docker_client) diff --git a/unit_tests/test_utils_k8s.py b/unit_tests/test_utils_k8s.py index 8315d3a0946..3de51a54e2f 100644 --- a/unit_tests/test_utils_k8s.py +++ b/unit_tests/test_utils_k8s.py @@ -168,7 +168,7 @@ def get_k8s_endpoint_update(namespace, pod_name, ip, required_labels_in_place=Tr # pylint: disable=protected-access -def test_scylla_pods_ip_change_tracker_01_positive_scenario(): # pylint: disable=too-many-statements +def test_scylla_pods_ip_change_tracker_01_positive_scenario(): # pylint: disable=too-many-statements # noqa: PLR0915 # Init objects core_v1_api_mock, k8s_kluster, ip_mapper = mock.Mock(), FakeK8SKluster(), {} namespace1, pod_names1 = 'scylla', ("pod-name-1", "pod-name-2", "pod-name-3") @@ -200,7 +200,7 @@ def test_scylla_pods_ip_change_tracker_01_positive_scenario(): # pylint: disabl ip_tracker.register_callbacks( callbacks=ns2_eachpod_callbacks_as_list, namespace=namespace2, add_pod_name_as_kwarg=True) - assert len(ip_mapper) == 2, ip_mapper + assert len(ip_mapper) == 2, ip_mapper # noqa: PLR2004 assert namespace1 in ip_mapper and namespace2 in ip_mapper, ip_mapper assert pod_names1[0] in ip_mapper[namespace1], ip_mapper diff --git a/unit_tests/test_version_utils.py b/unit_tests/test_version_utils.py index 10c5468fd06..a8faca8ee60 100644 --- a/unit_tests/test_version_utils.py +++ b/unit_tests/test_version_utils.py @@ -216,14 +216,13 @@ def __init__(self, scylla_version, nemesis_like_class): node_scylla_version = "2023.1.dev" elif scylla_version.startswith('master:') or scylla_version == "": node_scylla_version = "4.7.dev" + elif ":" in scylla_version: + node_scylla_version = scylla_version.split(":")[0] + if node_scylla_version.count(".") < 1: + node_scylla_version += ".0" + node_scylla_version += ".dev" else: - if ":" in scylla_version: - node_scylla_version = scylla_version.split(":")[0] - if node_scylla_version.count(".") < 1: - node_scylla_version += ".0" - node_scylla_version += ".dev" - else: - node_scylla_version = scylla_version + node_scylla_version = scylla_version nodes = [type("Node", (object,), {"scylla_version": node_scylla_version})] if nemesis_like_class: self.cluster = type("Cluster", (object,), { diff --git a/unit_tests/test_wait.py b/unit_tests/test_wait.py index bfde7c1007c..6f089231235 100644 --- a/unit_tests/test_wait.py +++ b/unit_tests/test_wait.py @@ -120,7 +120,7 @@ def test_04_stop_by_event(self, throw_exc): res = wait_for(self.callback, timeout=3, step=.5, throw_exc=throw_exc, stop_event=self.ev, arg1=1, arg2=3) self.assertFalse(res) - self.assertTrue(len(self.calls) < 6, f"{len(self.calls)}") + self.assertTrue(len(self.calls) < 6, f"{len(self.calls)}") # noqa: PLR2004 def test_04_stop_by_event_in_main_thread(self): self.callback_return_true_after = 3 @@ -132,7 +132,7 @@ def test_04_stop_by_event_in_main_thread(self): exc = th.exception() self.assertFalse(exc, f"{exc}") self.assertFalse(res, f"{res}") - self.assertTrue(len(self.calls) < 5) + self.assertTrue(len(self.calls) < 5) # noqa: PLR2004 def test_04_return_result_before_stop_event_and_wait_timeout(self): self.callback_return_true_after = 2 @@ -156,10 +156,10 @@ def test_04_raise_exception_in_func_before_set_event(self, throw_exc): def callback(arg1, arg2): self.calls.append((arg1, arg2)) - if len(self.calls) == 3: + if len(self.calls) == 3: # noqa: PLR2004 raise Exception("Raise before event") - if len(self.calls) == 10: + if len(self.calls) == 10: # noqa: PLR2004 return "what ever" return False th = threading.Thread(target=self.set_stop_in_timeout, kwargs={"ev": self.ev, "set_after": 5}) diff --git a/unit_tests/test_ycsb_thread.py b/unit_tests/test_ycsb_thread.py index 95079846e7d..c5c428060e5 100644 --- a/unit_tests/test_ycsb_thread.py +++ b/unit_tests/test_ycsb_thread.py @@ -173,7 +173,7 @@ def check_metrics(): # 5. check that events with the expected error were raised cat = file_logger.get_events_by_category() - assert len(cat["ERROR"]) == 2 + assert len(cat["ERROR"]) == 2 # noqa: PLR2004 assert "=UNEXPECTED_STATE" in cat["ERROR"][0] assert "=ERROR" in cat["ERROR"][1] diff --git a/update_java.sh b/update_java.sh new file mode 100755 index 00000000000..f92d8883b93 --- /dev/null +++ b/update_java.sh @@ -0,0 +1,6 @@ +for ip in 52.5.51.115 54.196.10.64 52.21.196.141 54.242.253.21 34.237.65.206 +do +#ssh -i ~/.ssh/scylla-qa-ec2 jenkins@$ip sudo yum install -y java-11-openjdk +ssh -i ~/.ssh/scylla-qa-ec2 jenkins@$ip sudo apt-get install -y openjdk-11-jdk-headless +ssh -i ~/.ssh/scylla-qa-ec2 jenkins@$ip sudo update-alternatives --set java java-11-openjdk.x86_64 +done diff --git a/upgrade_schema_test.py b/upgrade_schema_test.py index d361f3d7066..9954b245d58 100644 --- a/upgrade_schema_test.py +++ b/upgrade_schema_test.py @@ -258,8 +258,8 @@ def _get_thrift_client(self, host, port=9160): # 9160 def test_upgrade_schema(self): - global thrift_client - global cql_client + global thrift_client # noqa: PLW0603 + global cql_client # noqa: PLW0603 ips = [] for node in self.db_cluster.nodes: ips.append(node.public_ip_address) diff --git a/upgrade_test.py b/upgrade_test.py index 004f7374304..6a2289a4489 100644 --- a/upgrade_test.py +++ b/upgrade_test.py @@ -184,7 +184,7 @@ def upgrade_node(self, node, upgrade_sstables=True): @decorate_with_context(ignore_abort_requested_errors) # https://github.com/scylladb/scylla/issues/10447#issuecomment-1194155163 - def _upgrade_node(self, node, upgrade_sstables=True, new_scylla_repo=None, new_version=None): + def _upgrade_node(self, node, upgrade_sstables=True, new_scylla_repo=None, new_version=None): # noqa: PLR0912, PLR0915 # pylint: disable=too-many-branches,too-many-statements new_scylla_repo = new_scylla_repo or self.params.get('new_scylla_repo') new_version = new_version or self.params.get('new_version') @@ -296,7 +296,7 @@ def _upgrade_node(self, node, upgrade_sstables=True, new_scylla_repo=None, new_v InfoEvent(message='upgrade_node - starting to "daemon-reload"').publish() node.remoter.run('sudo systemctl daemon-reload') InfoEvent(message='upgrade_node - ended to "daemon-reload"').publish() - else: + else: # noqa: PLR5501 if node.distro.is_rhel_like: InfoEvent(message='upgrade_node - starting to "yum update"').publish() node.remoter.run(r'sudo yum update {}\* -y'.format(scylla_pkg_ver)) @@ -345,7 +345,7 @@ def rollback_node(self, node, upgrade_sstables=True): self._rollback_node(node=node, upgrade_sstables=upgrade_sstables) @decorate_with_context(ignore_abort_requested_errors) - def _rollback_node(self, node, upgrade_sstables=True): + def _rollback_node(self, node, upgrade_sstables=True): # noqa: PLR0912, PLR0915 # pylint: disable=too-many-branches,too-many-statements InfoEvent(message='Rollbacking a Node').publish() result = node.remoter.run('scylla --version') @@ -599,7 +599,7 @@ def _update_scylla_yaml_on_node(node_to_update: BaseNode, updates: dict): with node_to_update.remote_scylla_yaml() as scylla_yaml: scylla_yaml.update(updates) - def test_rolling_upgrade(self): # pylint: disable=too-many-locals,too-many-statements + def test_rolling_upgrade(self): # pylint: disable=too-many-locals,too-many-statements # noqa: PLR0915 """ Upgrade half of nodes in the cluster, and start special read workload during the stage. Checksum method is changed to xxhash from Scylla 2.2, @@ -1338,7 +1338,7 @@ def prepare_data_before_upgrade(self): return cs_user_profiles - def _custom_profile_rolling_upgrade(self, cs_user_profiles, new_scylla_repo=None, new_version=None): # pylint: disable=too-many-locals,too-many-statements + def _custom_profile_rolling_upgrade(self, cs_user_profiles, new_scylla_repo=None, new_version=None): # pylint: disable=too-many-locals,too-many-statements # noqa: PLR0915 InfoEvent(message='Starting write workload during entire test').publish() user_profiles, duration_per_cs_profile = self.parse_cs_user_profiles_param(cs_user_profiles) entire_write_thread_pool = self.run_cs_user_profiles(cs_profiles=user_profiles, diff --git a/utils/build_system/create_test_release_jobs.py b/utils/build_system/create_test_release_jobs.py index 971536e66a4..f8ad9fbda4e 100644 --- a/utils/build_system/create_test_release_jobs.py +++ b/utils/build_system/create_test_release_jobs.py @@ -25,7 +25,7 @@ class JenkinsPipelines: - def __init__(self, username, password, base_job_dir, sct_branch_name, sct_repo): # pylint: disable=too-many-arguments + def __init__(self, username, password, base_job_dir, sct_branch_name, sct_repo): # pylint: disable=too-many-arguments # noqa: PLR0913 self.jenkins = jenkins.Jenkins('https://jenkins.scylladb.com', username=username, password=password) self.base_sct_dir = Path(__file__).parent.parent.parent self.base_job_dir = base_job_dir diff --git a/utils/get_supported_scylla_base_versions.py b/utils/get_supported_scylla_base_versions.py index 08c9d4a37d1..f8c3f5d9c53 100755 --- a/utils/get_supported_scylla_base_versions.py +++ b/utils/get_supported_scylla_base_versions.py @@ -75,7 +75,7 @@ def set_start_support_version(self, backend: str = None) -> None: LOGGER.info("Support start versions set: oss=%s enterprise=%s", self.oss_start_support_version, self.ent_start_support_version) - def get_supported_scylla_base_versions(self, supported_versions) -> list: # pylint: disable=too-many-branches + def get_supported_scylla_base_versions(self, supported_versions) -> list: # pylint: disable=too-many-branches # noqa: PLR0912 """ We have special base versions list for each release, and we don't support to upgraded from enterprise to opensource. This function is used to get the base versions list which will be used in the upgrade test.