Skip to content

Commit

Permalink
feature(pre-commit): replace pylint with ruff scylladb#5799
Browse files Browse the repository at this point in the history
Ruff is rust base linter for python that work incredibly fast
since pylint speed is causing use issue on SCT CI
we should start using ruff,
for now it uses the pylint set of rules it has implemeted
and we can slow extand it's configuration to use more
of the available rules set it has

Ref: https://github.com/charliermarsh/ruff
Ref: https://www.youtube.com/watch?v=jeoL4qsSLbE
  • Loading branch information
fruch committed Dec 17, 2023
1 parent 9016c20 commit bd87079
Show file tree
Hide file tree
Showing 170 changed files with 873 additions and 939 deletions.
8 changes: 4 additions & 4 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -39,12 +39,12 @@ repos:
types: [python]
exclude: '\.sh$'

- id: pylint
name: pylint
entry: pylint -j 2 -d consider-using-f-string
- id: ruff
name: ruff
entry: ruff --force-exclude --fix
language: system
exclude: ^docker/alternator-dns/.*$
types: [python]
'types_or': [python, pyi]

- repo: https://github.com/alessandrojcm/commitlint-pre-commit-hook
rev: v5.0.0
Expand Down
2 changes: 1 addition & 1 deletion add_new_dc_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def add_node_in_new_dc(self) -> BaseNode:
self.monitors.reconfigure_scylla_monitoring()

status = self.db_cluster.get_nodetool_status()
assert len(status.keys()) == 2, f"new datacenter was not registered. Cluster status: {status}"
assert len(status.keys()) == 2, f"new datacenter was not registered. Cluster status: {status}" # noqa: PLR2004
self.log.info("New DC to cluster has been added")
return new_node

Expand Down
4 changes: 2 additions & 2 deletions artifacts_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ def verify_users(self):
out = self.node.remoter.run(cmd="ls -ltr --full-time /home", verbose=True).stdout.strip()
for line in out.splitlines():
splitted_line = line.split()
if len(splitted_line) <= 2:
if len(splitted_line) <= 2: # noqa: PLR2004
continue
user = splitted_line[-1]
if user == "centos":
Expand Down Expand Up @@ -299,7 +299,7 @@ def run_pre_create_schema(self, replication_factor=1):
compaction=compaction_strategy, sstable_size=sstable_size)

# pylint: disable=too-many-statements,too-many-branches
def test_scylla_service(self):
def test_scylla_service(self): # noqa: PLR0915

self.run_pre_create_schema()

Expand Down
2 changes: 1 addition & 1 deletion cdc_replication_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ def test_replication_longevity(self) -> None:

# pylint: disable=too-many-statements,too-many-branches,too-many-locals

def test_replication(self, is_gemini_test: bool, mode: Mode) -> None:
def test_replication(self, is_gemini_test: bool, mode: Mode) -> None: # noqa: PLR0915
assert is_gemini_test or (mode == Mode.DELTA), "cassandra-stress doesn't work with preimage/postimage modes"

self.consistency_ok = False
Expand Down
2 changes: 1 addition & 1 deletion docker/alternator-dns/dns_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@


def livenodes_update():
global alternator_port
global alternator_port # noqa: PLW0602
global livenodes
while True:
# Contact one of the already known nodes by random, to fetch a new
Expand Down
2 changes: 1 addition & 1 deletion docker/env/version
Original file line number Diff line number Diff line change
@@ -1 +1 @@
1.54-update-k8s-components-1
1.55-introduce-ruff
2 changes: 1 addition & 1 deletion functional_tests/scylla_operator/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def publish_test_result():

@pytest.fixture(autouse=True, scope='package', name="tester")
def fixture_tester() -> ScyllaOperatorFunctionalClusterTester:
global TESTER # pylint: disable=global-statement
global TESTER # pylint: disable=global-statement # noqa: PLW0603
os.chdir(sct_abs_path())
tester_inst = ScyllaOperatorFunctionalClusterTester()
TESTER = tester_inst # putting tester global, so we can report skipped test (one with mark.skip)
Expand Down
2 changes: 1 addition & 1 deletion functional_tests/scylla_operator/libs/auxiliary.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def update_test_status(self, test_name, status, error=None):

def get_test_failures(self):
for test_name, test_data in self.test_data.items():
status, message = (test_data[0], test_data[1]) if len(test_data) == 2 else ('UNKNOWN', '')
status, message = (test_data[0], test_data[1]) if len(test_data) == 2 else ('UNKNOWN', '') # noqa: PLR2004
if status != 'SUCCESS':
TestFrameworkEvent(
source=self.__class__.__name__,
Expand Down
4 changes: 2 additions & 2 deletions functional_tests/scylla_operator/libs/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def scylla_services_names(db_cluster: ScyllaPodCluster) -> list:
if name not in ('NAME', f"{scylla_cluster_name}-client")]


def wait_for_resource_absence(db_cluster: ScyllaPodCluster, # pylint: disable=too-many-arguments
def wait_for_resource_absence(db_cluster: ScyllaPodCluster, # pylint: disable=too-many-arguments # noqa: PLR0913
resource_type: str, resource_name: str,
namespace: str = SCYLLA_NAMESPACE,
step: int = 2, timeout: int = 60) -> None:
Expand Down Expand Up @@ -216,7 +216,7 @@ def verify_resharding_on_k8s(db_cluster: ScyllaPodCluster, cpus: Union[str, int,
# Calculate the time spent for resharding. We need to have it be bigger than 2minutes
# because it is the timeout of the liveness probe for Scylla pods.
resharding_time = time.time() - resharding_started
if resharding_time < 120:
if resharding_time < 120: # noqa: PLR2004
log.warning(
"Resharding was too fast - '%s's (<120s) on the '%s' node",
resharding_time, node.name)
Expand Down
12 changes: 6 additions & 6 deletions functional_tests/scylla_operator/test_functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def test_single_operator_image_tag_is_everywhere(db_cluster):


@pytest.mark.required_operator("v1.11.0")
def test_deploy_quasi_multidc_db_cluster(db_cluster: ScyllaPodCluster): # pylint: disable=too-many-locals,too-many-statements,too-many-branches
def test_deploy_quasi_multidc_db_cluster(db_cluster: ScyllaPodCluster): # pylint: disable=too-many-locals,too-many-statements,too-many-branches # noqa: PLR0915
"""
Deploy 2 'ScyllaCluster' K8S objects in 2 different namespaces in the single K8S cluster
and combine them into a single DB cluster.
Expand Down Expand Up @@ -144,7 +144,7 @@ def get_pod_names_and_ips(cluster_name: str, namespace: str):
namespace=namespace).stdout.split("\n")
pod_names_and_ips = [row.strip() for row in pod_names_and_ips if row.strip()]
assert pod_names_and_ips
assert len(pod_names_and_ips) == 3
assert len(pod_names_and_ips) == 3 # noqa: PLR2004
pod_data = {namespace: {}}
for pod_name_and_ip in pod_names_and_ips:
pod_name, pod_ip = pod_name_and_ip.split()
Expand All @@ -165,7 +165,7 @@ def get_pod_names_and_ips(cluster_name: str, namespace: str):
f" -l scylla/cluster={cluster_name} -l scylla-operator.scylladb.com/scylla-service-type=member",
namespace=namespace).stdout.split()
assert svc_ips
assert len(svc_ips) == 3
assert len(svc_ips) == 3 # noqa: PLR2004
assert all(svc_ip in ('', 'None') for svc_ip in svc_ips), "SVC IPs were expected to be absent"

# NOTE: read Scylla pods IPs
Expand Down Expand Up @@ -196,7 +196,7 @@ def get_pod_names_and_ips(cluster_name: str, namespace: str):
f"exec {pod_name} -- /bin/cqlsh -e \"{cqlsh_cmd}\"",
namespace=current_namespace).stdout.split("---\n")[-1].split("\n")
table_rows = [yaml.safe_load(row) for row in cqlsh_results if "{" in row]
assert len(table_rows) == 5, "Expected 5 peers"
assert len(table_rows) == 5, "Expected 5 peers" # noqa: PLR2004
for row in table_rows:
assert row["peer"] == row["rpc_address"]
assert row["peer"] != pod_ip
Expand Down Expand Up @@ -705,7 +705,7 @@ def test_scylla_operator_pods(db_cluster: ScyllaPodCluster):
scylla_operator_pods = get_pods_and_statuses(db_cluster=db_cluster, namespace=SCYLLA_OPERATOR_NAMESPACE,
label='app.kubernetes.io/instance=scylla-operator')

assert len(scylla_operator_pods) == 2, f'Expected 2 scylla-operator pods, but exists {len(scylla_operator_pods)}'
assert len(scylla_operator_pods) == 2, f'Expected 2 scylla-operator pods, but exists {len(scylla_operator_pods)}' # noqa: PLR2004

not_running_pods = ','.join(
[pods_info['name'] for pods_info in scylla_operator_pods if pods_info['status'] != 'Running'])
Expand Down Expand Up @@ -780,7 +780,7 @@ def test_deploy_helm_with_default_values(db_cluster: ScyllaPodCluster):

pods_name_and_status = get_pods_and_statuses(db_cluster, namespace=namespace)

assert len(pods_name_and_status) == 3, (
assert len(pods_name_and_status) == 3, ( # noqa: PLR2004
f"Expected 3 pods to be created in {namespace} namespace "
f"but actually {len(pods_name_and_status)}: {pods_name_and_status}")

Expand Down
2 changes: 1 addition & 1 deletion grow_cluster_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def test_add_remove_nodes(self):
for _ in range(add_cnt):
self.add_nodes(1)
time.sleep(wait_interval)
rm_cnt = random.randint(1, max_random_cnt) if len(self.db_cluster.nodes) >= 10 else 0
rm_cnt = random.randint(1, max_random_cnt) if len(self.db_cluster.nodes) >= 10 else 0 # noqa: PLR2004
if rm_cnt > 0:
self.log.info('Remove %s nodes from cluster', rm_cnt)
for _ in range(rm_cnt):
Expand Down
2 changes: 1 addition & 1 deletion hinted_handoff_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def test_stop_nodes_under_stress(self):
Stop node3.
Read all data n=X with CL=ONE.
"""
assert len(self.db_cluster.nodes) == 3, "The test requires 3 DB nodes!"
assert len(self.db_cluster.nodes) == 3, "The test requires 3 DB nodes!" # noqa: PLR2004
node1 = self.db_cluster.nodes[0]
node2 = self.db_cluster.nodes[1]
node3 = self.db_cluster.nodes[2]
Expand Down
4 changes: 2 additions & 2 deletions longevity_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def _run_validate_large_collections_warning_in_logs(self, node):
if not res:
InfoEvent("Did not find expected log message warning: {}".format(msg), severity=Severity.ERROR)

def test_custom_time(self):
def test_custom_time(self): # noqa: PLR0912, PLR0915
"""
Run cassandra-stress with params defined in data_dir/scylla.yaml
"""
Expand Down Expand Up @@ -327,7 +327,7 @@ def chunks(_list, chunk_size):
self._pre_create_templated_user_schema(batch_start=extra_tables_idx,
batch_end=extra_tables_idx+num_of_newly_created_tables)
for i in range(num_of_newly_created_tables):
batch += self.create_templated_user_stress_params(extra_tables_idx + i, cs_profile=cs_profile)
batch.append(self.create_templated_user_stress_params(extra_tables_idx + i, cs_profile=cs_profile))

nodes_ips = self.all_node_ips_for_stress_command
for params in batch:
Expand Down
9 changes: 5 additions & 4 deletions mgmt_cli_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def restore_backup_from_backup_task(self, mgr_cluster, backup_task, keyspace_and
keyspace_and_table_list=keyspace_and_table_list)

# pylint: disable=too-many-arguments
def verify_backup_success(self, mgr_cluster, backup_task, keyspace_name='keyspace1', tables_names=None,
def verify_backup_success(self, mgr_cluster, backup_task, keyspace_name='keyspace1', tables_names=None, # noqa: PLR0913
truncate=True, restore_data_with_task=False, timeout=None):
if tables_names is None:
tables_names = ['standard1']
Expand All @@ -188,7 +188,7 @@ def verify_backup_success(self, mgr_cluster, backup_task, keyspace_name='keyspac
self.restore_backup_from_backup_task(mgr_cluster=mgr_cluster, backup_task=backup_task,
keyspace_and_table_list=per_keyspace_tables_dict)

def restore_backup_with_task(self, mgr_cluster, snapshot_tag, timeout, restore_schema=False, restore_data=False,
def restore_backup_with_task(self, mgr_cluster, snapshot_tag, timeout, restore_schema=False, restore_data=False, # noqa: PLR0913
location_list=None):
location_list = location_list if location_list else self.locations
restore_task = mgr_cluster.create_restore_task(restore_schema=restore_schema, restore_data=restore_data,
Expand Down Expand Up @@ -251,7 +251,7 @@ def generate_background_read_load(self):
number_of_loaders = self.params.get("n_loaders")

scylla_version = self.db_cluster.nodes[0].scylla_version
if parse_version(scylla_version).release[0] == 2019:
if parse_version(scylla_version).release[0] == 2019: # noqa: PLR2004
# Making sure scylla version is 2019.1.x
throttle_per_node = 10666
else:
Expand Down Expand Up @@ -866,8 +866,9 @@ def test_repair_multiple_keyspace_types(self): # pylint: disable=invalid-name
keyspace_repair_percentage = per_keyspace_progress.get(keyspace_name, None)
assert keyspace_repair_percentage is not None, \
"The keyspace {} was not included in the repair!".format(keyspace_name)
# noqa: PLR2004
assert keyspace_repair_percentage == 100, \
"The repair of the keyspace {} stopped at {}%".format(
"The repair of the keyspace {} stopped at {}%".format( # noqa: PLR2004
keyspace_name, keyspace_repair_percentage)

localstrategy_keyspace_percentage = per_keyspace_progress.get(self.LOCALSTRATEGY_KEYSPACE_NAME, None)
Expand Down
9 changes: 4 additions & 5 deletions mgmt_upgrade_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def _create_and_add_cluster(self):
auth_token=self.monitors.mgmt_auth_token)
return mgr_cluster, current_manager_version

def test_upgrade(self): # pylint: disable=too-many-locals,too-many-statements
def test_upgrade(self): # pylint: disable=too-many-locals,too-many-statements # noqa: PLR0915
manager_node = self.monitors.nodes[0]

target_upgrade_server_version = self.params.get('target_scylla_mgmt_server_address')
Expand Down Expand Up @@ -275,11 +275,10 @@ def validate_previous_task_details(task, previous_task_details):
delta = current_value - previous_task_details[detail_name]
# I check that the time delta is smaller than 60 seconds since we calculate the next run time on our own,
# and as a result it could be a BIT imprecise
if abs(delta.total_seconds()) > 60:
mismatched_details_name_list.append(detail_name)
else:
if current_value != previous_task_details[detail_name]:
if abs(delta.total_seconds()) > 60: # noqa: PLR2004
mismatched_details_name_list.append(detail_name)
elif current_value != previous_task_details[detail_name]:
mismatched_details_name_list.append(detail_name)
complete_error_description = _create_mismatched_details_error_message(previous_task_details,
current_task_details,
mismatched_details_name_list)
Expand Down
2 changes: 1 addition & 1 deletion performance_regression_alternator_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def __init__(self, *args):
self.stack.enter_context(ignore_alternator_client_errors())
self.stack.enter_context(ignore_operation_errors())

def _workload(self, stress_cmd, stress_num, test_name=None, sub_type=None, keyspace_num=1, prefix='', debug_message='', # pylint: disable=too-many-arguments,arguments-differ
def _workload(self, stress_cmd, stress_num, test_name=None, sub_type=None, keyspace_num=1, prefix='', debug_message='', # pylint: disable=too-many-arguments,arguments-differ # noqa: PLR0913
save_stats=True, is_alternator=True):
if not is_alternator:
stress_cmd = stress_cmd.replace('dynamodb', 'cassandra-cql')
Expand Down
2 changes: 1 addition & 1 deletion performance_regression_cdc_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ def cdc_workflow(self, use_cdclog_reader=False): # pylint: disable=unused-varia

self.check_regression_with_baseline(subtest_baseline="cdc_disabled")

def _workload_cdc(self, stress_cmd, stress_num, test_name, sub_type=None, # pylint: disable=too-many-arguments
def _workload_cdc(self, stress_cmd, stress_num, test_name, sub_type=None, # pylint: disable=too-many-arguments # noqa: PLR0913
save_stats=True, read_cdclog_cmd=None, update_cdclog_stats=False, enable_batching=True):
cdc_stress_queue = None

Expand Down
2 changes: 1 addition & 1 deletion performance_regression_gradual_grow_throughput.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def preload_data(self, compaction_strategy=None):
self.log.info("Dataset has been populated")

# pylint: disable=too-many-arguments,too-many-locals
def run_gradual_increase_load(self, stress_cmd_templ,
def run_gradual_increase_load(self, stress_cmd_templ, # noqa: PLR0913
start_ops, max_ops, throttle_step,
stress_num, num_loaders, compaction_strategy, test_name):
self.warmup_cache(compaction_strategy)
Expand Down
2 changes: 1 addition & 1 deletion performance_regression_row_level_repair_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ def preload_data(self, consistency_level=None):

for stress_cmd in prepare_write_cmd:
if consistency_level:
stress_cmd = self._update_cl_in_stress_cmd(
stress_cmd = self._update_cl_in_stress_cmd( # noqa: PLW2901
str_stress_cmd=stress_cmd, consistency_level=consistency_level)
params.update({'stress_cmd': stress_cmd})

Expand Down
2 changes: 1 addition & 1 deletion performance_regression_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ def display_results(self, results, test_name=''):
self.log.debug('Failed to display results: {0}'.format(results))
self.log.debug('Exception: {0}'.format(ex))

def _workload(self, stress_cmd, stress_num, test_name, sub_type=None, keyspace_num=1, prefix='', debug_message='', # pylint: disable=too-many-arguments
def _workload(self, stress_cmd, stress_num, test_name, sub_type=None, keyspace_num=1, prefix='', debug_message='', # pylint: disable=too-many-arguments # noqa: PLR0913
save_stats=True):
if debug_message:
self.log.debug(debug_message)
Expand Down
2 changes: 1 addition & 1 deletion performance_search_max_throughput_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def test_search_best_mixed_throughput(self): # pylint: disable=too-many-locals

self.run_search_best_performance(**stress_params)

def run_search_best_performance(self, stress_cmd_tmpl: str, # pylint: disable=too-many-arguments,too-many-locals,too-many-statements
def run_search_best_performance(self, stress_cmd_tmpl: str, # pylint: disable=too-many-arguments,too-many-locals,too-many-statements # noqa: PLR0913, PLR0915
stress_num: int,
stress_num_step: int,
stress_step_duration: str,
Expand Down
6 changes: 6 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
[tool.ruff]
select = ["PL"]

ignore = ["E501"]

target-version = "py311"
2 changes: 1 addition & 1 deletion requirements.in
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ python-jenkins==1.7.0
ssh2-python==1.0.0
argus-alm==0.11.7
parameterized==0.8.1
pylint==2.11.1 # Needed for pre-commit hooks
ruff==0.1.8 # Needed for pre-commit hooks
autopep8==1.5.7 # Needed for pre-commit hooks
kubernetes==24.2.0
packaging==21.3
Expand Down
Loading

0 comments on commit bd87079

Please sign in to comment.