Skip to content

Commit

Permalink
feature(ruff): pyupgrade and isort checks
Browse files Browse the repository at this point in the history
  • Loading branch information
fruch committed Dec 17, 2023
1 parent 982734c commit e7c5788
Show file tree
Hide file tree
Showing 343 changed files with 3,989 additions and 3,431 deletions.
7 changes: 3 additions & 4 deletions add_new_dc_test.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import warnings
from typing import Tuple, List

from cassandra import ConsistencyLevel
from cassandra.query import SimpleStatement # pylint: disable=no-name-in-module
Expand Down Expand Up @@ -56,15 +55,15 @@ def test_add_new_dc(self) -> None: # pylint: disable=too-many-locals
self.verify_data_can_be_read_from_new_dc(new_node)
self.log.info("Test completed.")

def reconfigure_keyspaces_to_use_network_topology_strategy(self, keyspaces: List[str], replication_factors: dict[str, int]) -> None:
def reconfigure_keyspaces_to_use_network_topology_strategy(self, keyspaces: list[str], replication_factors: dict[str, int]) -> None:
node = self.db_cluster.nodes[0]
self.log.info("Reconfiguring keyspace Replication Strategy")
network_topology_strategy = NetworkTopologyReplicationStrategy(
**replication_factors)
for keyspace in keyspaces:
cql = f"ALTER KEYSPACE {keyspace} WITH replication = {network_topology_strategy}"
node.run_cqlsh(cql)
self.log.info("Replication Strategies for {} reconfigured".format(keyspaces))
self.log.info(f"Replication Strategies for {keyspaces} reconfigured")

def prewrite_db_with_data(self) -> None:
self.log.info("Prewriting database...")
Expand All @@ -73,7 +72,7 @@ def prewrite_db_with_data(self) -> None:
self.verify_stress_thread(cs_thread_pool=pre_thread)
self.log.info("Database pre write completed")

def start_stress_during_adding_new_dc(self) -> Tuple[CassandraStressThread, CassandraStressThread]:
def start_stress_during_adding_new_dc(self) -> tuple[CassandraStressThread, CassandraStressThread]:
self.log.info("Running stress during adding new DC")
stress_cmds = self.params.get('stress_cmd')
read_thread = self.run_stress_thread(stress_cmd=stress_cmds[0], stats_aggregate_cmds=False, round_robin=False)
Expand Down
6 changes: 4 additions & 2 deletions admission_control_overload_test.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
import time

from invoke import exceptions
from sdcm.tester import ClusterTester

from sdcm.db_stats import PrometheusDBStats
from sdcm.tester import ClusterTester


class AdmissionControlOverloadTest(ClusterTester):
Expand Down Expand Up @@ -53,7 +55,7 @@ def run_load(self, job_num, job_cmd, is_prepare=False):
try:
results.append(self.get_stress_results(stress))
except exceptions.CommandTimedOut as ex:
self.log.debug('some c-s timed out\n{}'.format(ex))
self.log.debug(f'some c-s timed out\n{ex}')

return is_ever_triggered

Expand Down
8 changes: 4 additions & 4 deletions artifacts_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,21 +11,21 @@
#
# Copyright (c) 2020 ScyllaDB
import datetime
import json
import pprint
import re
import typing
from functools import cached_property
import json

import yaml
import requests
import yaml

from sdcm.sct_events import Severity
from sdcm.sct_events.database import ScyllaHousekeepingServiceEvent
from sdcm.tester import ClusterTester
from sdcm.utils.adaptive_timeouts import NodeLoadInfoServices
from sdcm.utils.common import ScyllaProduct, get_latest_scylla_release
from sdcm.utils.housekeeping import HousekeepingDB
from sdcm.utils.common import get_latest_scylla_release, ScyllaProduct

STRESS_CMD: str = "/usr/bin/cassandra-stress"

Expand Down Expand Up @@ -285,7 +285,7 @@ def run_pre_create_schema(self, replication_factor=1):
scylla_encryption_options = self.params.get('scylla_encryption_options')
self.log.debug('Pre Creating Schema for c-s with %s keyspaces', keyspace_num)
for i in range(1, keyspace_num+1):
keyspace_name = 'keyspace{}'.format(i)
keyspace_name = f'keyspace{i}'
self.create_keyspace(keyspace_name=keyspace_name, replication_factor=replication_factor)
self.log.debug('%s Created', keyspace_name)
col_num = 5
Expand Down
3 changes: 1 addition & 2 deletions big_cluster_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,7 @@

import logging

from sdcm.tester import ClusterTester
from sdcm.tester import teardown_on_exception
from sdcm.tester import ClusterTester, teardown_on_exception


class HugeClusterTest(ClusterTester):
Expand Down
25 changes: 12 additions & 13 deletions cdc_replication_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,22 +13,21 @@
#
# Copyright (c) 2020 ScyllaDB

import os
import random
import shutil
import sys
import os
import time
import random
from enum import Enum
from textwrap import dedent
from typing import Optional, Tuple

from cassandra import ConsistencyLevel
from cassandra.query import SimpleStatement # pylint: disable=no-name-in-module

from sdcm import cluster
from sdcm.tester import ClusterTester
from sdcm.gemini_thread import GeminiStressThread
from sdcm.nemesis import CategoricalMonkey
from sdcm.tester import ClusterTester


class Mode(Enum):
Expand Down Expand Up @@ -92,7 +91,7 @@ def test_replication_cs(self) -> None:
self.test_replication(False, Mode.DELTA)

def test_replication_gemini(self, mode: Mode) -> None:
self.log.info('Using gemini to generate workload. Mode: {}'.format(mode.name))
self.log.info(f'Using gemini to generate workload. Mode: {mode.name}')
self.test_replication(True, mode)

def test_replication_gemini_delta(self) -> None:
Expand Down Expand Up @@ -174,14 +173,14 @@ def test_replication_longevity(self) -> None:
# One more round would cause the nodes to run out of disk space.
no_rounds = 9
for rnd in range(no_rounds):
self.log.info('Starting round {}'.format(rnd))
self.log.info(f'Starting round {rnd}')

self.log.info('Starting nemesis')
self.db_cluster.start_nemesis()

self.log.info('Waiting for workload generation to finish (~30 minutes)...')
stress_results = self.verify_gemini_results(queue=stress_thread)
self.log.info('gemini results: {}'.format(stress_results))
self.log.info(f'gemini results: {stress_results}')

self.log.info('Waiting for replicator to finish (sleeping 180s)...')
time.sleep(180)
Expand Down Expand Up @@ -279,10 +278,10 @@ def test_replication(self, is_gemini_test: bool, mode: Mode) -> None: # noqa: P
self.log.info('Waiting for stressor to finish...')
if is_gemini_test:
stress_results = self.verify_gemini_results(queue=stress_thread)
self.log.info('gemini results: {}'.format(stress_results))
self.log.info(f'gemini results: {stress_results}')
else:
stress_results = stress_thread.get_results()
self.log.info('cassandra-stress results: {}'.format(list(stress_results)))
self.log.info(f'cassandra-stress results: {list(stress_results)}')

self.log.info('Waiting for replicator to finish (sleeping 60s)...')
time.sleep(60)
Expand Down Expand Up @@ -318,7 +317,7 @@ def test_replication(self, is_gemini_test: bool, mode: Mode) -> None: # noqa: P
self.fail('Consistency check failed.')

# Compares tables using the scylla-migrate tool.
def check_consistency(self, migrate_log_dst_path: str, compare_timestamps: bool = True) -> Tuple[bool, bool]:
def check_consistency(self, migrate_log_dst_path: str, compare_timestamps: bool = True) -> tuple[bool, bool]:
loader_node = self.loaders.nodes[0]
self.log.info('Comparing table contents using scylla-migrate...')
res = loader_node.remoter.run(cmd='./scylla-migrate check --master-address {} --replica-address {}'
Expand All @@ -331,7 +330,7 @@ def check_consistency(self, migrate_log_dst_path: str, compare_timestamps: bool

migrate_ok = res.ok
if not migrate_ok:
self.log.error('scylla-migrate command returned status {}'.format(res.exit_status))
self.log.error(f'scylla-migrate command returned status {res.exit_status}')
with open(migrate_log_dst_path, encoding="utf-8") as file:
consistency_ok = 'Consistency check OK.\n' in (line for line in file)

Expand Down Expand Up @@ -380,14 +379,14 @@ def start_replicator(self, mode: Mode) -> None:
self.cs_db_cluster.nodes[0].external_address,
mode_str(mode)))

self.log.info('Replicator script:\n{}'.format(replicator_script))
self.log.info(f'Replicator script:\n{replicator_script}')

self.log.info('Starting replicator.')
res = self.loaders.nodes[0].remoter.run(cmd=replicator_script)
if res.exit_status != 0:
self.fail('Could not start CDC replicator.')

def start_gemini(self, seed: Optional[int] = None) -> GeminiStressThread:
def start_gemini(self, seed: int | None = None) -> GeminiStressThread:
params = {'gemini_seed': seed} if seed else {}
return GeminiStressThread(
test_cluster=self.db_cluster,
Expand Down
2 changes: 1 addition & 1 deletion corrupt_then_rebuild_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@
#
# Copyright (c) 2017 ScyllaDB

from sdcm.tester import ClusterTester
from sdcm import nemesis
from sdcm.tester import ClusterTester


class CorruptThenRebuildTest(ClusterTester):
Expand Down
2 changes: 1 addition & 1 deletion destroy_data_then_repair_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@
#
# Copyright (c) 2017 ScyllaDB

from sdcm.tester import ClusterTester
from sdcm import nemesis
from sdcm.tester import ClusterTester


class CorruptThenRepair(ClusterTester):
Expand Down
2 changes: 1 addition & 1 deletion enospc_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@
#
# Copyright (c) 2017 ScyllaDB

from sdcm.tester import ClusterTester
from sdcm.nemesis import EnospcAllNodesMonkey
from sdcm.tester import ClusterTester


class EnospcTest(ClusterTester):
Expand Down
2 changes: 1 addition & 1 deletion full_cluster_stop_start_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def test_full_cluster_stop_start(self):
time.sleep(60)
# making sure all nodes are up after RestartSecs are over
for node in nodes:
self.log.info("making sure node '{}' is up".format(node))
self.log.info(f"making sure node '{node}' is up")
node.wait_db_up(verbose=True, timeout=300)

stress_queue = self.run_stress_thread(stress_cmd=self.params.get('stress_read_cmd'))
Expand Down
1 change: 0 additions & 1 deletion functional_tests/mocked/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@

from sdcm.utils.aws_region import AwsRegion


AWS_REGION = "us-east-1"


Expand Down
12 changes: 6 additions & 6 deletions functional_tests/scylla_operator/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,23 +13,23 @@
#
# Copyright (c) 2021 ScyllaDB

import contextlib
import logging
import os
import time
import traceback
import contextlib

from typing import Optional

import pytest
from deepdiff import DeepDiff

from functional_tests.scylla_operator.libs.auxiliary import ScyllaOperatorFunctionalClusterTester, sct_abs_path
from functional_tests.scylla_operator.libs.auxiliary import (
ScyllaOperatorFunctionalClusterTester,
sct_abs_path,
)
from sdcm.cluster_k8s import ScyllaPodCluster
from sdcm.utils import version_utils


TESTER: Optional[ScyllaOperatorFunctionalClusterTester] = None
TESTER: ScyllaOperatorFunctionalClusterTester | None = None
LOGGER = logging.getLogger(__name__)


Expand Down
3 changes: 1 addition & 2 deletions functional_tests/scylla_operator/libs/auxiliary.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,9 @@
import os

from sdcm.cluster_k8s import ScyllaPodCluster
from sdcm.tester import ClusterTester
from sdcm.sct_events import Severity
from sdcm.sct_events.system import TestFrameworkEvent

from sdcm.tester import ClusterTester

SCT_ROOT = os.path.realpath(os.path.join(__file__, '..', '..', '..', '..'))

Expand Down
9 changes: 4 additions & 5 deletions functional_tests/scylla_operator/libs/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,16 @@
# See LICENSE for more details.
#
# Copyright (c) 2021 ScyllaDB
from enum import Enum
import logging
import time
from typing import Union
import yaml
from enum import Enum

import yaml
from kubernetes.client import exceptions as k8s_exceptions

from sdcm.cluster import (
DB_LOG_PATTERN_RESHARDING_START,
DB_LOG_PATTERN_RESHARDING_FINISH,
DB_LOG_PATTERN_RESHARDING_START,
)
from sdcm.cluster_k8s import (
SCYLLA_MANAGER_NAMESPACE,
Expand Down Expand Up @@ -169,7 +168,7 @@ def reinstall_scylla_manager(db_cluster: ScyllaPodCluster, manager_version: str)
log.info("Scylla Manager '%s' has successfully been installed", manager_version)


def verify_resharding_on_k8s(db_cluster: ScyllaPodCluster, cpus: Union[str, int, float]):
def verify_resharding_on_k8s(db_cluster: ScyllaPodCluster, cpus: str | int | float):
nodes_data = []
for node in reversed(db_cluster.nodes):
liveness_probe_failures = node.follow_system_log(
Expand Down
43 changes: 21 additions & 22 deletions functional_tests/scylla_operator/test_functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,53 +14,52 @@
# Copyright (c) 2021 ScyllaDB

# pylint: disable=too-many-lines
import base64
import logging
import os
import random
import ssl
import threading
import time
import ssl
import base64
import path

import path
import pytest
import yaml
from cassandra.cluster import ( # pylint: disable=no-name-in-module
EXEC_PROFILE_DEFAULT,
Cluster,
ExecutionProfile,
EXEC_PROFILE_DEFAULT,
)
from cassandra.policies import WhiteListRoundRobinPolicy

from functional_tests.scylla_operator.libs.helpers import (
PodStatuses,
get_orphaned_services,
get_pod_storage_capacity,
get_pods_and_statuses,
get_pods_without_probe,
get_scylla_sysctl_value,
reinstall_scylla_manager,
set_scylla_sysctl_value,
verify_resharding_on_k8s,
wait_for_resource_absence,
)
from sdcm.cluster_k8s import (
ScyllaPodCluster,
SCYLLA_NAMESPACE,
SCYLLA_MANAGER_NAMESPACE,
SCYLLA_OPERATOR_NAMESPACE
SCYLLA_NAMESPACE,
SCYLLA_OPERATOR_NAMESPACE,
ScyllaPodCluster,
)
from sdcm.mgmt import TaskStatus
from sdcm.utils.common import ParallelObject
from sdcm.utils.k8s import (
convert_cpu_units_to_k8s_value,
convert_cpu_value_from_k8s_to_units,
HelmValues,
KubernetesOps,
convert_cpu_units_to_k8s_value,
convert_cpu_value_from_k8s_to_units,
)
from sdcm.utils.k8s.chaos_mesh import PodFailureExperiment

from functional_tests.scylla_operator.libs.helpers import (
get_scylla_sysctl_value,
get_orphaned_services,
get_pods_without_probe,
get_pods_and_statuses,
get_pod_storage_capacity,
PodStatuses,
reinstall_scylla_manager,
set_scylla_sysctl_value,
verify_resharding_on_k8s,
wait_for_resource_absence,
)

log = logging.getLogger()

# TODO: add support for multiDC setups
Expand Down
Loading

0 comments on commit e7c5788

Please sign in to comment.