diff --git a/.flake8 b/.flake8 index ece5e3fbb1..961528bc88 100644 --- a/.flake8 +++ b/.flake8 @@ -1,6 +1,5 @@ [flake8] - -max-line-length = 120 +select=FCN,M511 exclude = doc, @@ -11,12 +10,10 @@ exclude = docs/*, .cache/* -fcfn_exclude_functions = +fcn_exclude_functions = update, - getLogger, loads, get, - appens, next, setdefault, dumps, @@ -44,6 +41,9 @@ fcfn_exclude_functions = re, requests, signal, + yaml, + benedict, + logger, enable-extensions = FCN, diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 89c131b024..5116dcf8ac 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,33 +2,8 @@ default_language_version: python: python3 repos: - - repo: https://github.com/PyCQA/autoflake - rev: "v2.2.0" - hooks: - - id: autoflake - args: - - --in-place - - --remove-unused-variables - - --remove-all-unused-imports - - - repo: https://github.com/hadialqattan/pycln - rev: "v2.1.5" - hooks: - - id: pycln - - - repo: https://github.com/PyCQA/isort - rev: "5.12.0" - hooks: - - id: isort - - - repo: https://github.com/psf/black - rev: "23.7.0" - hooks: - - id: black - args: [--preview] - - repo: https://github.com/pre-commit/pre-commit-hooks - rev: "v4.4.0" + rev: "v4.5.0" hooks: - id: check-merge-conflict - id: debug-statements @@ -37,21 +12,26 @@ repos: - id: end-of-file-fixer - id: check-ast - id: check-builtin-literals + - id: check-docstring-first + - id: check-toml - repo: https://github.com/PyCQA/flake8 - rev: "6.0.0" + rev: "6.1.0" hooks: - id: flake8 - args: [--max-line-length=120] + args: [--config=.flake8] additional_dependencies: [ - "git+https://github.com/RedHatQE/flake8-plugins.git", - "pep8-naming", - "flake8-comprehensions", + "git+https://github.com/RedHatQE/flake8-plugins.git@v0.0.2", "flake8-mutable", - "flake8-simplify", ] - repo: https://github.com/Yelp/detect-secrets rev: v1.4.0 hooks: - id: detect-secrets + + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.1.5 + hooks: + - id: ruff + - id: ruff-format diff --git a/examples/special_cases.py b/examples/special_cases.py index 81408eeb70..6032241b73 100644 --- a/examples/special_cases.py +++ b/examples/special_cases.py @@ -2,6 +2,7 @@ Some resources have the same `kind` but different API groups. For example: `Network` which exists in both operator.openshift.io and config.openshift.io API groups """ + from ocp_resources.network import Network # To get the Network resource which uses the default API in the class ("config.openshift.io") diff --git a/ocp_resources/catalog_source.py b/ocp_resources/catalog_source.py index 1e03861c98..8da1c7b302 100644 --- a/ocp_resources/catalog_source.py +++ b/ocp_resources/catalog_source.py @@ -31,16 +31,12 @@ def __init__( self.image = image self.display_name = display_name self.publisher = publisher - self.update_strategy_registry_poll_interval = ( - update_strategy_registry_poll_interval - ) + self.update_strategy_registry_poll_interval = update_strategy_registry_poll_interval def to_dict(self): super().to_dict() if not self.yaml_file: - if not all( - [self.source_type, self.image, self.display_name, self.publisher] - ): + if not all([self.source_type, self.image, self.display_name, self.publisher]): raise ValueError( "Passing yaml_file or all parameters 'source_type', 'image'," " 'display_name' and 'publisher' is required." diff --git a/ocp_resources/catalog_source_config.py b/ocp_resources/catalog_source_config.py index 6c0ea5d21a..35a5e0f0d0 100644 --- a/ocp_resources/catalog_source_config.py +++ b/ocp_resources/catalog_source_config.py @@ -80,7 +80,5 @@ def wait_for_csc_status(self, status, timeout=120): except TimeoutExpiredError: if current_status: - self.logger.error( - f"Status of {self.kind} {self.name} is {current_status}" - ) + self.logger.error(f"Status of {self.kind} {self.name} is {current_status}") raise diff --git a/ocp_resources/cdi_config.py b/ocp_resources/cdi_config.py index 58056bef87..1ee34e9cd1 100644 --- a/ocp_resources/cdi_config.py +++ b/ocp_resources/cdi_config.py @@ -34,9 +34,7 @@ def wait_until_upload_url_changed(self, uploadproxy_url, timeout=TIMEOUT_4MINUTE Returns: bool: True if url is equal to uploadProxyURL. """ - self.logger.info( - f"Wait for {self.kind} {self.name} to ensure current URL == uploadProxyURL" - ) + self.logger.info(f"Wait for {self.kind} {self.name} to ensure current URL == uploadProxyURL") samples = TimeoutSampler( wait_timeout=timeout, sleep=1, diff --git a/ocp_resources/cluster_role_binding.py b/ocp_resources/cluster_role_binding.py index c0ee72ad9d..f547ccbc78 100644 --- a/ocp_resources/cluster_role_binding.py +++ b/ocp_resources/cluster_role_binding.py @@ -30,9 +30,7 @@ def to_dict(self): super().to_dict() if not self.yaml_file: if not self.cluster_role: - raise ValueError( - "Passing yaml_file or parameter 'cluster_role' is required." - ) + raise ValueError("Passing yaml_file or parameter 'cluster_role' is required.") self.res.setdefault("roleRef", {}) self.res["roleRef"] = { "apiGroup": self.api_group, diff --git a/ocp_resources/controller_revision.py b/ocp_resources/controller_revision.py index bfd489fbf1..f72cd1cda8 100644 --- a/ocp_resources/controller_revision.py +++ b/ocp_resources/controller_revision.py @@ -38,8 +38,6 @@ def to_dict(self): super().to_dict() if not self.yaml_file: if self.owner_references: - self.res.setdefault("metadata", {}).update( - {"ownerReference": self.owner_references} - ) + self.res.setdefault("metadata", {}).update({"ownerReference": self.owner_references}) if self.revision_object: self.res.update({"data": self.revision_object.res}) diff --git a/ocp_resources/cron_job.py b/ocp_resources/cron_job.py index c5e76d0c98..e3da594a86 100644 --- a/ocp_resources/cron_job.py +++ b/ocp_resources/cron_job.py @@ -48,10 +48,7 @@ def to_dict(self): super().to_dict() if not self.yaml_file: if not (self.job_template and self.schedule): - raise ValueError( - "yaml_file or parameters 'job_template' and 'schedule' are" - " required." - ) + raise ValueError("yaml_file or parameters 'job_template' and 'schedule' are" " required.") self.res.update( { "spec": { @@ -65,12 +62,8 @@ def to_dict(self): if self.suspend: self.res["spec"]["suspend"] = self.suspend if self.successful_jobs_history_limit: - self.res["spec"][ - "successfulJobsHistoryLimit" - ] = self.successful_jobs_history_limit + self.res["spec"]["successfulJobsHistoryLimit"] = self.successful_jobs_history_limit if self.failed_jobs_history_limit: - self.res["spec"][ - "failedJobsHistoryLimit" - ] = self.failed_jobs_history_limit + self.res["spec"]["failedJobsHistoryLimit"] = self.failed_jobs_history_limit if self.concurrency_policy: self.res["spec"]["concurrencyPolicy"] = self.concurrency_policy diff --git a/ocp_resources/csi_storage_capacity.py b/ocp_resources/csi_storage_capacity.py index d19fda9858..df3e83865f 100644 --- a/ocp_resources/csi_storage_capacity.py +++ b/ocp_resources/csi_storage_capacity.py @@ -61,9 +61,7 @@ def to_dict(self): super().to_dict() if not self.yaml_file: if not self.storage_class_name: - raise ValueError( - "yaml_file or parameter 'storage_class_name' is required." - ) + raise ValueError("yaml_file or parameter 'storage_class_name' is required.") self.res.update( { "storageClassName": self.storage_class_name, diff --git a/ocp_resources/daemonset.py b/ocp_resources/daemonset.py index 372692334f..1961cc1daa 100644 --- a/ocp_resources/daemonset.py +++ b/ocp_resources/daemonset.py @@ -36,10 +36,7 @@ def wait_until_deployed(self, timeout=TIMEOUT_4MINUTES): status = sample.items[0].status desired_number_scheduled = status.desiredNumberScheduled number_ready = status.numberReady - if ( - desired_number_scheduled > 0 - and desired_number_scheduled == number_ready - ): + if desired_number_scheduled > 0 and desired_number_scheduled == number_ready: return def delete(self, wait=False, timeout=TIMEOUT_4MINUTES, body=None): diff --git a/ocp_resources/data_import_cron.py b/ocp_resources/data_import_cron.py index aea9bd24d7..fcc14451bb 100644 --- a/ocp_resources/data_import_cron.py +++ b/ocp_resources/data_import_cron.py @@ -64,12 +64,8 @@ def to_dict(self): "spec": { "template": { "spec": { - "source": { - "registry": {"pullMethod": self.pull_method} - }, - "storage": { - "resources": {"requests": {"storage": self.size}} - }, + "source": {"registry": {"pullMethod": self.pull_method}}, + "storage": {"resources": {"requests": {"storage": self.size}}}, } } } @@ -78,11 +74,7 @@ def to_dict(self): spec = self.res["spec"]["template"]["spec"] if self.bind_immediate_annotation: self.res["metadata"].setdefault("annotations", {}).update( - { - f"{NamespacedResource.ApiGroup.CDI_KUBEVIRT_IO}/storage.bind.immediate.requested": ( - "true" - ) - } + {f"{NamespacedResource.ApiGroup.CDI_KUBEVIRT_IO}/storage.bind.immediate.requested": ("true")} ) if self.image_stream: spec["source"]["registry"]["imageStream"] = self.image_stream diff --git a/ocp_resources/datavolume.py b/ocp_resources/datavolume.py index 051d475cf9..6c7118f838 100644 --- a/ocp_resources/datavolume.py +++ b/ocp_resources/datavolume.py @@ -179,26 +179,16 @@ def to_dict(self): if self.source == "http" or "registry": self.res["spec"]["source"][self.source]["url"] = self.url if self.cert_configmap: - self.res["spec"]["source"][self.source][ - "certConfigMap" - ] = self.cert_configmap + self.res["spec"]["source"][self.source]["certConfigMap"] = self.cert_configmap if self.source == "upload" or self.source == "blank": self.res["spec"]["source"][self.source] = {} if self.hostpath_node: self.res["metadata"].setdefault("annotations", {}).update( - { - f"{NamespacedResource.ApiGroup.KUBEVIRT_IO}/provisionOnNode": ( - self.hostpath_node - ) - } + {f"{NamespacedResource.ApiGroup.KUBEVIRT_IO}/provisionOnNode": (self.hostpath_node)} ) if self.multus_annotation: self.res["metadata"].setdefault("annotations", {}).update( - { - f"{NamespacedResource.ApiGroup.K8S_V1_CNI_CNCF_IO}/networks": ( - self.multus_annotation - ) - } + {f"{NamespacedResource.ApiGroup.K8S_V1_CNI_CNCF_IO}/networks": (self.multus_annotation)} ) if self.bind_immediate_annotation: self.res["metadata"].setdefault("annotations", {}).update( @@ -213,11 +203,7 @@ def to_dict(self): self.res["spec"]["preallocation"] = self.preallocation if self.delete_after_completion: self.res["metadata"].setdefault("annotations", {}).update( - { - f"{self.api_group}/storage.deleteAfterCompletion": ( - self.delete_after_completion - ) - } + {f"{self.api_group}/storage.deleteAfterCompletion": (self.delete_after_completion)} ) def wait_deleted(self, timeout=TIMEOUT_4MINUTES): @@ -238,9 +224,7 @@ def wait(self, timeout=TIMEOUT_10MINUTES, failure_timeout=TIMEOUT_2MINUTES): # If DV's status is not Pending, continue with the flow self.wait_for_status(status=self.Status.SUCCEEDED, timeout=timeout) - self.pvc.wait_for_status( - status=PersistentVolumeClaim.Status.BOUND, timeout=timeout - ) + self.pvc.wait_for_status(status=PersistentVolumeClaim.Status.BOUND, timeout=timeout) @property def pvc(self): @@ -252,9 +236,7 @@ def pvc(self): @property def scratch_pvc(self): - scratch_pvc_prefix = ( - self.pvc.prime_pvc.name if self.pvc.use_populator else self.name - ) + scratch_pvc_prefix = self.pvc.prime_pvc.name if self.pvc.use_populator else self.name return PersistentVolumeClaim( name=f"{scratch_pvc_prefix}-scratch", namespace=self.namespace, @@ -318,9 +300,7 @@ def test_dv(): self._check_none_pending_status(failure_timeout=failure_timeout) sample = None - status_of_dv_str = ( - f"Status of {self.kind} '{self.name}' in namespace '{self.namespace}':\n" - ) + status_of_dv_str = f"Status of {self.kind} '{self.name}' in namespace '{self.namespace}':\n" try: for sample in TimeoutSampler( sleep=1, @@ -328,14 +308,9 @@ def test_dv(): func=lambda: self.exists, ): # DV reach to success if the status is succeeded or if the DV does not exist - if ( - sample is None - or sample.get("status", {}).get("phase") == self.Status.SUCCEEDED - ): + if sample is None or sample.get("status", {}).get("phase") == self.Status.SUCCEEDED: break - elif stop_status_func and stop_status_func( - *stop_status_func_args, **stop_status_func_kwargs - ): + elif stop_status_func and stop_status_func(*stop_status_func_args, **stop_status_func_kwargs): raise TimeoutExpiredError( value=( "Exited on the stop_status_func" @@ -349,9 +324,7 @@ def test_dv(): raise # For CSI storage, PVC gets Bound after DV succeeded - return self.pvc.wait_for_status( - status=PersistentVolumeClaim.Status.BOUND, timeout=TIMEOUT_1MINUTE - ) + return self.pvc.wait_for_status(status=PersistentVolumeClaim.Status.BOUND, timeout=TIMEOUT_1MINUTE) def delete(self, wait=False, timeout=TIMEOUT_4MINUTES, body=None): """ diff --git a/ocp_resources/deployment.py b/ocp_resources/deployment.py index 4dd790e684..8aa6857438 100644 --- a/ocp_resources/deployment.py +++ b/ocp_resources/deployment.py @@ -67,9 +67,6 @@ def wait_for_replicas(self, deployed=True, timeout=TIMEOUT_4MINUTES): if ( (deployed and spec_replicas) - and spec_replicas - == updated_replicas - == available_replicas - == ready_replicas + and spec_replicas == updated_replicas == available_replicas == ready_replicas ) or not (deployed or spec_replicas or total_replicas): return diff --git a/ocp_resources/endpoint_slice.py b/ocp_resources/endpoint_slice.py index 9fee7d166c..4903471262 100644 --- a/ocp_resources/endpoint_slice.py +++ b/ocp_resources/endpoint_slice.py @@ -55,10 +55,7 @@ def to_dict(self): super().to_dict() if not self.yaml_file: if not (self.address_type and self.endpoints): - raise ValueError( - "yaml_file or parameters 'address_type' and 'endpoints' are" - " required." - ) + raise ValueError("yaml_file or parameters 'address_type' and 'endpoints' are" " required.") self.res.update( { "addressTypes": self.address_type, diff --git a/ocp_resources/endpoints.py b/ocp_resources/endpoints.py index 9cce321b46..3dd33d4e1f 100644 --- a/ocp_resources/endpoints.py +++ b/ocp_resources/endpoints.py @@ -52,9 +52,7 @@ def to_dict(self): super().to_dict() if not self.yaml_file: if not (self.addresses and self.ports): - raise ValueError( - "yaml_file or parameters 'addresses' and 'ports' are required." - ) + raise ValueError("yaml_file or parameters 'addresses' and 'ports' are required.") self.res.update( { "subsets": { diff --git a/ocp_resources/event.py b/ocp_resources/event.py index df5037d92a..fbc51218ee 100644 --- a/ocp_resources/event.py +++ b/ocp_resources/event.py @@ -54,9 +54,7 @@ def get( f" resource_version={resource_version}, timeout={timeout}" ) - event_listener = dyn_client.resources.get( - api_version=cls.api_version, kind=cls.__name__ - ) + event_listener = dyn_client.resources.get(api_version=cls.api_version, kind=cls.__name__) yield from event_listener.watch( namespace=namespace, name=name, diff --git a/ocp_resources/hyperconverged.py b/ocp_resources/hyperconverged.py index 2889931d8d..e21771f597 100644 --- a/ocp_resources/hyperconverged.py +++ b/ocp_resources/hyperconverged.py @@ -33,11 +33,7 @@ def to_dict(self): super().to_dict() if not self.yaml_file: if self.infra: - self.res.setdefault("spec", {}).setdefault("infra", {}).update( - self.infra - ) + self.res.setdefault("spec", {}).setdefault("infra", {}).update(self.infra) if self.workloads: - self.res.setdefault("spec", {}).setdefault("workloads", {}).update( - self.workloads - ) + self.res.setdefault("spec", {}).setdefault("workloads", {}).update(self.workloads) diff --git a/ocp_resources/image_content_source_policy.py b/ocp_resources/image_content_source_policy.py index 62e48048ef..298c36096e 100644 --- a/ocp_resources/image_content_source_policy.py +++ b/ocp_resources/image_content_source_policy.py @@ -28,6 +28,4 @@ def to_dict(self): if not self.yaml_file: if not self.repository_digest_mirrors: raise ValueError("repository_digest_mirrors must be defined") - self.res["spec"] = { - "repositoryDigestMirrors": self.repository_digest_mirrors - } + self.res["spec"] = {"repositoryDigestMirrors": self.repository_digest_mirrors} diff --git a/ocp_resources/ip_address_pool.py b/ocp_resources/ip_address_pool.py index 527852f203..618cf1432c 100644 --- a/ocp_resources/ip_address_pool.py +++ b/ocp_resources/ip_address_pool.py @@ -43,10 +43,7 @@ def to_dict(self): super().to_dict() if not self.yaml_file: if not self.addresses: - raise ValueError( - "Either required parameter is missing 'addresses' or provide" - " yaml_file." - ) + raise ValueError("Either required parameter is missing 'addresses' or provide" " yaml_file.") self.res["spec"] = {"addresses": self.addresses} diff --git a/ocp_resources/job.py b/ocp_resources/job.py index 705df08850..4e336f55a4 100644 --- a/ocp_resources/job.py +++ b/ocp_resources/job.py @@ -76,14 +76,10 @@ def to_dict(self): self.res["spec"]["template"]["spec"]["containers"] = self.containers if self.service_account: - self.res["spec"]["template"]["spec"][ - "serviceAccount" - ] = self.service_account + self.res["spec"]["template"]["spec"]["serviceAccount"] = self.service_account if self.restart_policy: - self.res["spec"]["template"]["spec"][ - "restartPolicy" - ] = self.restart_policy + self.res["spec"]["template"]["spec"]["restartPolicy"] = self.restart_policy def delete(self, wait=False, timeout=TIMEOUT_4MINUTES, body=None): """ @@ -98,7 +94,5 @@ def delete(self, wait=False, timeout=TIMEOUT_4MINUTES, body=None): bool: True if delete succeeded, False otherwise. """ if not body and self.background_propagation_policy: - body = kubernetes.client.V1DeleteOptions( - propagation_policy=self.background_propagation_policy - ) + body = kubernetes.client.V1DeleteOptions(propagation_policy=self.background_propagation_policy) return super().delete(wait=wait, timeout=timeout, body=body) diff --git a/ocp_resources/l2_advertisement.py b/ocp_resources/l2_advertisement.py index f5c27a25e3..4eeb3fa697 100644 --- a/ocp_resources/l2_advertisement.py +++ b/ocp_resources/l2_advertisement.py @@ -51,6 +51,4 @@ def to_dict(self): self.res["spec"]["ipAddressPools"] = self.ip_address_pools if self.ip_address_pools_selectors: - self.res["spec"][ - "ipAddressPoolSelectors" - ] = self.ip_address_pools_selectors + self.res["spec"]["ipAddressPoolSelectors"] = self.ip_address_pools_selectors diff --git a/ocp_resources/machine.py b/ocp_resources/machine.py index 1a7e88a674..d37c504ec8 100644 --- a/ocp_resources/machine.py +++ b/ocp_resources/machine.py @@ -35,15 +35,11 @@ def cluster_name(self): @property def machine_role(self): - return self.instance.metadata.labels[ - f"{self.api_group}/cluster-api-machine-role" - ] + return self.instance.metadata.labels[f"{self.api_group}/cluster-api-machine-role"] @property def machine_type(self): - return self.instance.metadata.labels[ - f"{self.api_group}/cluster-api-machine-type" - ] + return self.instance.metadata.labels[f"{self.api_group}/cluster-api-machine-type"] @property def machineset_name(self): diff --git a/ocp_resources/machine_health_check.py b/ocp_resources/machine_health_check.py index 46af0fb1fe..2b4deb4d0f 100644 --- a/ocp_resources/machine_health_check.py +++ b/ocp_resources/machine_health_check.py @@ -49,9 +49,7 @@ def to_dict(self): super().to_dict() if not self.yaml_file: if self.reboot_strategy: - self.res["metadata"]["annotations"] = { - f"{self.api_group}/remediation-strategy": "external-baremetal" - } + self.res["metadata"]["annotations"] = {f"{self.api_group}/remediation-strategy": "external-baremetal"} self.res.setdefault("spec", {}) self.res["spec"]["nodeStartupTimeout"] = self.node_startup_timeout self.res["spec"]["maxUnhealthy"] = self.max_unhealthy diff --git a/ocp_resources/machine_set.py b/ocp_resources/machine_set.py index ca0e94ee18..7c3fcf2182 100644 --- a/ocp_resources/machine_set.py +++ b/ocp_resources/machine_set.py @@ -104,26 +104,16 @@ def to_dict(self): "selector": { "matchLabels": { f"{self.api_group}/{_cluster_api_cluster}": self.cluster_name, - f"{self.api_group}/{_cluster_api_machineset}": ( - f"{self.cluster_name}-{self.machine_role}" - ), + f"{self.api_group}/{_cluster_api_machineset}": (f"{self.cluster_name}-{self.machine_role}"), } }, "template": { _metadata: { _labels: { - f"{self.api_group}/{_cluster_api_cluster}": ( - self.cluster_name - ), - f"{self.api_group}/{_cluster_api_machine_role}": ( - self.machine_role - ), - f"{self.api_group}/{_cluster_api_machine_type}": ( - self.machine_type - ), - f"{self.api_group}/{_cluster_api_machineset}": ( - f"{self.cluster_name}-{self.machine_role}" - ), + f"{self.api_group}/{_cluster_api_cluster}": (self.cluster_name), + f"{self.api_group}/{_cluster_api_machine_role}": (self.machine_role), + f"{self.api_group}/{_cluster_api_machine_type}": (self.machine_type), + f"{self.api_group}/{_cluster_api_machineset}": (f"{self.cluster_name}-{self.machine_role}"), } }, _spec: {"providerSpec": self.provider_spec}, @@ -172,9 +162,7 @@ def wait_for_replicas(self, timeout=TIMEOUT_5MINUTES, sleep=1): ) return False - def scale_replicas( - self, replicas, wait_timeout=TIMEOUT_5MINUTES, sleep=1, wait=True - ): + def scale_replicas(self, replicas, wait_timeout=TIMEOUT_5MINUTES, sleep=1, wait=True): """ Scale down/up a machine-set replicas. @@ -190,10 +178,7 @@ def scale_replicas( super().to_dict() self.res.update({"spec": {"replicas": replicas}}) - self.logger.info( - f"Scale machine-set from {self.desired_replicas} replicas to" - f" {replicas} replicas" - ) + self.logger.info(f"Scale machine-set from {self.desired_replicas} replicas to {replicas} replicas") self.update(resource_dict=self.res) if wait: return self.wait_for_replicas(timeout=wait_timeout, sleep=sleep) diff --git a/ocp_resources/migration.py b/ocp_resources/migration.py index 1ea9194f00..98e5dc7028 100644 --- a/ocp_resources/migration.py +++ b/ocp_resources/migration.py @@ -58,6 +58,4 @@ def to_dict(self): ) if self.cut_over: - self.res["spec"]["cutover"] = self.cut_over.strftime( - format="%Y-%m-%dT%H:%M:%SZ" - ) + self.res["spec"]["cutover"] = self.cut_over.strftime(format="%Y-%m-%dT%H:%M:%SZ") diff --git a/ocp_resources/migration_policy.py b/ocp_resources/migration_policy.py index 75b3e51ac9..92333ef489 100644 --- a/ocp_resources/migration_policy.py +++ b/ocp_resources/migration_policy.py @@ -51,14 +51,10 @@ def to_dict(self): if self.bandwidth_per_migration: self.res["spec"]["bandwidthPerMigration"] = self.bandwidth_per_migration if self.completion_timeout_per_gb: - self.res["spec"][ - "completionTimeoutPerGiB" - ] = self.completion_timeout_per_gb + self.res["spec"]["completionTimeoutPerGiB"] = self.completion_timeout_per_gb if self.namespace_selector: selectors.setdefault("namespaceSelector", self.namespace_selector) if self.vmi_selector: - selectors.setdefault( - "virtualMachineInstanceSelector", self.vmi_selector - ) + selectors.setdefault("virtualMachineInstanceSelector", self.vmi_selector) diff --git a/ocp_resources/mtv.py b/ocp_resources/mtv.py index 9932ac6279..41ac45fdc3 100644 --- a/ocp_resources/mtv.py +++ b/ocp_resources/mtv.py @@ -4,11 +4,7 @@ def _get_status_condition_log_message(**status_condition): log_msg = "Waiting For: \n" for status_condition_name, status_condition in status_condition.items(): - log_msg += ( - f"{status_condition_name}->{status_condition} \n" - if status_condition - else "" - ) + log_msg += f"{status_condition_name}->{status_condition} \n" if status_condition else "" return log_msg @@ -104,39 +100,19 @@ def wait_for_resource_status( try: for sample in samples: current_conditions = ( - sample.items[0].status.get("conditions") - if sample.items and sample.items[0].status - else [] + sample.items[0].status.get("conditions") if sample.items and sample.items[0].status else [] ) for condition in current_conditions: last_condition = condition - valid_status_type = ( - condition_status == condition.status - and condition_type == condition.type - ) - valid_message = ( - condition_message == condition.message - or condition_message is None - ) - valid_reason = ( - condition_reason == condition.reason or condition_reason is None - ) - valid_category = ( - condition_category == condition.category - or condition_category is None - ) - if all( - [valid_status_type, valid_message, valid_reason, valid_category] - ): + valid_status_type = condition_status == condition.status and condition_type == condition.type + valid_message = condition_message == condition.message or condition_message is None + valid_reason = condition_reason == condition.reason or condition_reason is None + valid_category = condition_category == condition.category or condition_category is None + if all([valid_status_type, valid_message, valid_reason, valid_category]): return except TimeoutExpiredError: - self.logger.error( - msg=( - f"Last Status Condition of {self.kind} {self.name} was:" - f" {last_condition}" - ) - ) + self.logger.error(msg=(f"Last Status Condition of {self.kind} {self.name} was: {last_condition}")) raise def wait_for_condition_ready(self, wait_timeout=360): diff --git a/ocp_resources/multi_cluster_observability.py b/ocp_resources/multi_cluster_observability.py index 64f0d32948..a383eba02b 100644 --- a/ocp_resources/multi_cluster_observability.py +++ b/ocp_resources/multi_cluster_observability.py @@ -43,9 +43,7 @@ def to_dict(self): if not self.metric_object_storage: raise ValueError("metric_object_storage or yaml file is required") spec_dict = {"observabilityAddonSpec": self.observability_addon_spec} - spec_dict.setdefault("storageConfig", {})[ - "metricObjectStorage" - ] = self.metric_object_storage + spec_dict.setdefault("storageConfig", {})["metricObjectStorage"] = self.metric_object_storage if self.enable_downsampling: spec_dict["enableDownsampling"] = self.enable_downsampling diff --git a/ocp_resources/network_attachment_definition.py b/ocp_resources/network_attachment_definition.py index 0988666666..e74fd17cf0 100644 --- a/ocp_resources/network_attachment_definition.py +++ b/ocp_resources/network_attachment_definition.py @@ -52,9 +52,7 @@ def to_dict(self): if not self.yaml_file: if self.resource_name is not None: self.res["metadata"]["annotations"] = { - f"{NamespacedResource.ApiGroup.K8S_V1_CNI_CNCF_IO}/resourceName": ( - self.resource_name - ) + f"{NamespacedResource.ApiGroup.K8S_V1_CNI_CNCF_IO}/resourceName": (self.resource_name) } self.res["spec"] = {} if self.config: @@ -154,9 +152,7 @@ def to_dict(self): super().to_dict() if self.tuning_type: self.old_nad_format = True - self.res["spec"]["config"].setdefault("plugins", []).append( - {"type": self.tuning_type} - ) + self.res["spec"]["config"].setdefault("plugins", []).append({"type": self.tuning_type}) self.res["spec"]["config"] = json.dumps(self.res["spec"]["config"]) diff --git a/ocp_resources/node.py b/ocp_resources/node.py index 511c44dd31..3719849e82 100644 --- a/ocp_resources/node.py +++ b/ocp_resources/node.py @@ -14,16 +14,13 @@ class Status(Resource.Status): @property def kubelet_ready(self): return any( - stat["reason"] == "KubeletReady" - and stat["status"] == self.Condition.Status.TRUE + stat["reason"] == "KubeletReady" and stat["status"] == self.Condition.Status.TRUE for stat in self.instance.status.conditions ) @property def machine_name(self): - return self.instance.metadata.annotations[ - f"{self.ApiGroup.MACHINE_OPENSHIFT_IO}/machine" - ].split("/")[-1] + return self.instance.metadata.annotations[f"{self.ApiGroup.MACHINE_OPENSHIFT_IO}/machine"].split("/")[-1] @property def internal_ip(self): diff --git a/ocp_resources/node_network_configuration_policy.py b/ocp_resources/node_network_configuration_policy.py index 7b0e08fd68..0a9921f83b 100644 --- a/ocp_resources/node_network_configuration_policy.py +++ b/ocp_resources/node_network_configuration_policy.py @@ -115,10 +115,7 @@ def _nodes(self): return list(Node.get(dyn_client=self.client, name=self.node_selector)) if self.node_selector_labels: node_labels = ",".join( - [ - f"{label_key}={label_value}" - for label_key, label_value in self.node_selector_labels.items() - ] + [f"{label_key}={label_value}" for label_key, label_value in self.node_selector_labels.items()] ) return list(Node.get(dyn_client=self.client, label_selector=node_labels)) @@ -126,17 +123,11 @@ def set_interface(self, interface): if not self.res: super().to_dict() # First drop the interface if it's already in the list - interfaces = [ - iface - for iface in self.desired_state["interfaces"] - if iface["name"] != interface["name"] - ] + interfaces = [iface for iface in self.desired_state["interfaces"] if iface["name"] != interface["name"]] # Add the interface interfaces.append(interface) self.desired_state["interfaces"] = interfaces - self.res.setdefault("spec", {}).setdefault("desiredState", {})["interfaces"] = ( - self.desired_state["interfaces"] - ) + self.res.setdefault("spec", {}).setdefault("desiredState", {})["interfaces"] = self.desired_state["interfaces"] def to_dict(self): super().to_dict() @@ -145,9 +136,7 @@ def to_dict(self): self.res.setdefault("spec", {}).setdefault("desiredState", {}) if self.node_selector_spec: - self.res.setdefault("spec", {}).setdefault( - "nodeSelector", self.node_selector_spec - ) + self.res.setdefault("spec", {}).setdefault("nodeSelector", self.node_selector_spec) if self.capture: self.res["spec"]["capture"] = self.capture @@ -159,9 +148,7 @@ def to_dict(self): self.res["spec"]["desiredState"]["routes"] = self.routes if self.max_unavailable: - self.res.setdefault("spec", {}).setdefault( - "maxUnavailable", self.max_unavailable - ) + self.res.setdefault("spec", {}).setdefault("maxUnavailable", self.max_unavailable) if self.iface: """ @@ -262,9 +249,7 @@ def _ports_backup(self, ip_family): elif ip_family == IPV6_STR: self.ipv6_ports_backup_dict[port] = _port[ip_family] else: - raise ValueError( - f"'ip_family' must be either '{IPV4_STR}' or '{IPV6_STR}'" - ) + raise ValueError(f"'ip_family' must be either '{IPV4_STR}' or '{IPV6_STR}'") def ipv4_ports_backup(self): self._ports_backup(ip_family=IPV4_STR) @@ -339,13 +324,7 @@ def _absent_interface(self): self.add_ports() ResourceEditor( - patches={ - self: { - "spec": { - "desiredState": {"interfaces": self.desired_state["interfaces"]} - } - } - } + patches={self: {"spec": {"desiredState": {"interfaces": self.desired_state["interfaces"]}}}} ).update() @property @@ -375,8 +354,7 @@ def wait_for_configuration_conditions_unknown_or_progressing(self, wait_timeout= and sample[0]["type"] == self.Conditions.Type.AVAILABLE and ( sample[0]["status"] == self.Condition.Status.UNKNOWN - or sample[0]["reason"] - == self.Conditions.Reason.CONFIGURATION_PROGRESSING + or sample[0]["reason"] == self.Conditions.Reason.CONFIGURATION_PROGRESSING ) ): return sample @@ -387,15 +365,11 @@ def _process_failed_status(self, failed_condition_reason): nnce_name = failed_nnce.instance.metadata.name nnce_dict = failed_nnce.instance.to_dict() for cond in nnce_dict["status"]["conditions"]: - err_msg = self._get_nnce_error_msg( - nnce_name=nnce_name, nnce_condition=cond - ) + err_msg = self._get_nnce_error_msg(nnce_name=nnce_name, nnce_condition=cond) if err_msg: last_err_msg = err_msg - raise NNCPConfigurationFailed( - f"Reason: {failed_condition_reason}\n{last_err_msg}" - ) + raise NNCPConfigurationFailed(f"Reason: {failed_condition_reason}\n{last_err_msg}") def wait_for_status_success(self): failed_condition_reason = self.Conditions.Reason.FAILED_TO_CONFIGURE @@ -404,9 +378,7 @@ def wait_for_status_success(self): # if we get here too fast there are no conditions, we need to wait. self.wait_for_configuration_conditions_unknown_or_progressing() - samples = TimeoutSampler( - wait_timeout=self.success_timeout, sleep=1, func=lambda: self.status - ) + samples = TimeoutSampler(wait_timeout=self.success_timeout, sleep=1, func=lambda: self.status) try: for sample in samples: if sample == self.Conditions.Reason.SUCCESSFULLY_CONFIGURED: @@ -414,14 +386,10 @@ def wait_for_status_success(self): return sample elif sample == no_match_node_condition_reason: - raise NNCPConfigurationFailed( - f"{self.name}. Reason: {no_match_node_condition_reason}" - ) + raise NNCPConfigurationFailed(f"{self.name}. Reason: {no_match_node_condition_reason}") elif sample == failed_condition_reason: - self._process_failed_status( - failed_condition_reason=failed_condition_reason - ) + self._process_failed_status(failed_condition_reason=failed_condition_reason) except (TimeoutExpiredError, NNCPConfigurationFailed): self.logger.error( @@ -439,9 +407,7 @@ def nnces(self): return nnces def node_nnce(self, node_name): - nnce = [ - nnce for nnce in self.nnces if nnce.labels["nmstate.io/node"] == node_name - ] + nnce = [nnce for nnce in self.nnces if nnce.labels["nmstate.io/node"] == node_name] return nnce[0] if nnce else None @staticmethod @@ -473,8 +439,5 @@ def _get_failed_nnce(self): continue for nnce_cond in nnce.instance.status.conditions: - if ( - nnce_cond.type == "Failing" - and nnce_cond.status == Resource.Condition.Status.TRUE - ): + if nnce_cond.type == "Failing" and nnce_cond.status == Resource.Condition.Status.TRUE: yield nnce diff --git a/ocp_resources/node_network_state.py b/ocp_resources/node_network_state.py index dd48a55f7b..b4c506d7a7 100644 --- a/ocp_resources/node_network_state.py +++ b/ocp_resources/node_network_state.py @@ -34,11 +34,7 @@ def __init__( def set_interface(self, interface): # First drop the interface is's already in the list - interfaces = [ - iface - for iface in self.desired_state["interfaces"] - if iface["name"] != interface["name"] - ] + interfaces = [iface for iface in self.desired_state["interfaces"] if iface["name"] != interface["name"]] # Add the interface interfaces.append(interface) @@ -79,9 +75,7 @@ def _find_up_interface(): return None self.logger.info(f"Checking if interface {name} is up -- {self.name}") - samples = TimeoutSampler( - wait_timeout=TIMEOUT_4MINUTES, sleep=SLEEP, func=_find_up_interface - ) + samples = TimeoutSampler(wait_timeout=TIMEOUT_4MINUTES, sleep=SLEEP, func=_find_up_interface) for sample in samples: if sample: return diff --git a/ocp_resources/persistent_volume.py b/ocp_resources/persistent_volume.py index 79f090cf8c..a6377cebc7 100644 --- a/ocp_resources/persistent_volume.py +++ b/ocp_resources/persistent_volume.py @@ -18,10 +18,4 @@ def max_available_pvs(self): """ Returns the maximum number (int) of PV's which are in 'Available' state """ - return len( - [ - pv - for pv in self.api.get()["items"] - if pv.status.phase == Resource.Condition.AVAILABLE - ] - ) + return len([pv for pv in self.api.get()["items"] if pv.status.phase == Resource.Condition.AVAILABLE]) diff --git a/ocp_resources/persistent_volume_claim.py b/ocp_resources/persistent_volume_claim.py index 21e938ec59..fe4e9bfd93 100644 --- a/ocp_resources/persistent_volume_claim.py +++ b/ocp_resources/persistent_volume_claim.py @@ -82,16 +82,12 @@ def to_dict(self): kubevirt.io/provisionOnNode: """ if self.hostpath_node: - self.res["metadata"]["annotations"] = { - "kubevirt.io/provisionOnNode": self.hostpath_node - } + self.res["metadata"]["annotations"] = {"kubevirt.io/provisionOnNode": self.hostpath_node} if self.storage_class: self.res["spec"]["storageClassName"] = self.storage_class if self.pvlabel: - self.res["spec"]["selector"] = { - "matchLabels": {"pvLabel": self.pvlabel} - } + self.res["spec"]["selector"] = {"matchLabels": {"pvLabel": self.pvlabel}} def bound(self): """ @@ -105,18 +101,11 @@ def bound(self): @property def selected_node(self): - return self.instance.metadata.annotations.get( - "volume.kubernetes.io/selected-node" - ) + return self.instance.metadata.annotations.get("volume.kubernetes.io/selected-node") @property def use_populator(self): - return ( - self.instance.metadata.annotations.get( - f"{self.ApiGroup.CDI_KUBEVIRT_IO}/storage.usePopulator" - ) - == "true" - ) + return self.instance.metadata.annotations.get(f"{self.ApiGroup.CDI_KUBEVIRT_IO}/storage.usePopulator") == "true" @property def prime_pvc(self): diff --git a/ocp_resources/pipeline.py b/ocp_resources/pipeline.py index b86389b309..bfed2504c1 100644 --- a/ocp_resources/pipeline.py +++ b/ocp_resources/pipeline.py @@ -46,10 +46,7 @@ def to_dict(self): super().to_dict() if not self.yaml_file: if not (self.tasks or self.params or self.final_parallel_tasks): - raise ValueError( - "spec is expected to have at least one of the optional fields, got" - " none" - ) + raise ValueError("spec is expected to have at least one of the optional fields, got" " none") self.res["spec"] = {} if self.params: self.res["spec"]["params"] = self.params diff --git a/ocp_resources/pipelineruns.py b/ocp_resources/pipelineruns.py index b8f597947d..1f03b5f71e 100644 --- a/ocp_resources/pipelineruns.py +++ b/ocp_resources/pipelineruns.py @@ -48,8 +48,7 @@ def to_dict(self): if self.params: self.res["spec"]["params"] = [ - {"name": params_name, "value": params_value} - for params_name, params_value in self.params.items() + {"name": params_name, "value": params_value} for params_name, params_value in self.params.items() ] if self.service_account_name: diff --git a/ocp_resources/pod.py b/ocp_resources/pod.py index 6fc3c58bfa..f76ca63557 100644 --- a/ocp_resources/pod.py +++ b/ocp_resources/pod.py @@ -16,13 +16,7 @@ def __init__(self, command, rc, out, err): self.err = err def __str__(self): - return ( - "Command execution failure: " - f"{self.cmd}, " - f"RC: {self.rc}, " - f"OUT: {self.out}, " - f"ERR: {self.err}" - ) + return f"Command execution failure: {self.cmd} RC: {self.rc}, OUT: {self.out}, ERR: {self.err}" class Pod(NamespacedResource): @@ -105,23 +99,17 @@ def execute(self, command, timeout=60, container=None, ignore_rc=False): while resp.is_open(): resp.run_forever(timeout=2) try: - error_channel = json.loads( - resp.read_channel(kubernetes.stream.ws_client.ERROR_CHANNEL) - ) + error_channel = json.loads(resp.read_channel(kubernetes.stream.ws_client.ERROR_CHANNEL)) break except json.decoder.JSONDecodeError: # Check remaining time, in order to throw exception # if remaining time reached zero if timeout_watch.remaining_time() <= 0: - raise ExecOnPodError( - command=command, rc=-1, out="", err=stream_closed_error - ) + raise ExecOnPodError(command=command, rc=-1, out="", err=stream_closed_error) rcstring = error_channel.get("status") if rcstring is None: - raise ExecOnPodError( - command=command, rc=-1, out="", err=stream_closed_error - ) + raise ExecOnPodError(command=command, rc=-1, out="", err=stream_closed_error) stdout = resp.read_stdout(timeout=5) stderr = resp.read_stderr(timeout=5) @@ -133,9 +121,7 @@ def execute(self, command, timeout=60, container=None, ignore_rc=False): raise ExecOnPodError(command=command, rc=-1, out="", err=error_channel) returncode = [ - int(cause["message"]) - for cause in error_channel["details"]["causes"] - if cause["reason"] == "ExitCode" + int(cause["message"]) for cause in error_channel["details"]["causes"] if cause["reason"] == "ExitCode" ][0] raise ExecOnPodError(command=command, rc=returncode, out=stdout, err=stderr) @@ -147,9 +133,7 @@ def log(self, **kwargs): Returns: str: Pod logs. """ - return self._kube_v1_api.read_namespaced_pod_log( - name=self.name, namespace=self.namespace, **kwargs - ) + return self._kube_v1_api.read_namespaced_pod_log(name=self.name, namespace=self.namespace, **kwargs) @property def node(self): diff --git a/ocp_resources/resource.py b/ocp_resources/resource.py index 15d7412880..64b3398d4a 100644 --- a/ocp_resources/resource.py +++ b/ocp_resources/resource.py @@ -35,27 +35,21 @@ skip_existing_resource_creation_teardown, ) -LOGGER = get_logger(__name__) +LOGGER = get_logger(name=__name__) MAX_SUPPORTED_API_VERSION = "v2" def _find_supported_resource(dyn_client, api_group, kind): results = dyn_client.resources.search(group=api_group, kind=kind) - sorted_results = sorted( - results, key=lambda result: KubeAPIVersion(result.api_version), reverse=True - ) + sorted_results = sorted(results, key=lambda result: KubeAPIVersion(result.api_version), reverse=True) for result in sorted_results: - if KubeAPIVersion(result.api_version) <= KubeAPIVersion( - MAX_SUPPORTED_API_VERSION - ): + if KubeAPIVersion(result.api_version) <= KubeAPIVersion(MAX_SUPPORTED_API_VERSION): return result def _get_api_version(dyn_client, api_group, kind): # Returns api_group/api_version - res = _find_supported_resource( - dyn_client=dyn_client, api_group=api_group, kind=kind - ) + res = _find_supported_resource(dyn_client=dyn_client, api_group=api_group, kind=kind) if not res: log = f"Couldn't find {kind} in {api_group} api group" LOGGER.warning(log) @@ -103,8 +97,7 @@ def sub_resource_level(current_class, owner_class, parent_class): [ class_iterator for class_iterator in current_class.mro() - if class_iterator not in owner_class.mro() - and issubclass(class_iterator, parent_class) + if class_iterator not in owner_class.mro() and issubclass(class_iterator, parent_class) ] ): return class_iterator.__name__ @@ -129,20 +122,11 @@ def parse(self, vstring): with contextlib.suppress(ValueError): components[idx] = int(obj) - errmsg = ( - f"version '{vstring}' does not conform to kubernetes api versioning" - " guidelines" - ) + errmsg = f"version '{vstring}' does not conform to kubernetes api versioning" " guidelines" - if ( - len(components) not in (2, 4) - or components[0] != "v" - or not isinstance(components[1], int) - ): + if len(components) not in (2, 4) or components[0] != "v" or not isinstance(components[1], int): raise ValueError(errmsg) - if len(components) == 4 and ( - components[2] not in ("alpha", "beta") or not isinstance(components[3], int) - ): + if len(components) == 4 and (components[2] not in ("alpha", "beta") or not isinstance(components[3], int)): raise ValueError(errmsg) self.version = components @@ -274,17 +258,13 @@ class ApiGroup: METRICS_K8S_IO = "metrics.k8s.io" MIGRATIONS_KUBEVIRT_IO = "migrations.kubevirt.io" MONITORING_COREOS_COM = "monitoring.coreos.com" - NETWORKADDONSOPERATOR_NETWORK_KUBEVIRT_IO = ( - "networkaddonsoperator.network.kubevirt.io" - ) + NETWORKADDONSOPERATOR_NETWORK_KUBEVIRT_IO = "networkaddonsoperator.network.kubevirt.io" NETWORKING_ISTIO_IO = "networking.istio.io" NETWORKING_K8S_IO = "networking.k8s.io" NODE_LABELLER_KUBEVIRT_IO = "node-labeller.kubevirt.io" NMSTATE_IO = "nmstate.io" NODEMAINTENANCE_KUBEVIRT_IO = "nodemaintenance.kubevirt.io" - OBSERVABILITY_OPEN_CLUSTER_MANAGEMENT_IO = ( - "observability.open-cluster-management.io" - ) + OBSERVABILITY_OPEN_CLUSTER_MANAGEMENT_IO = "observability.open-cluster-management.io" OCS_OPENSHIFT_IO = "ocs.openshift.io" OPERATOR_OPEN_CLUSTER_MANAGEMENT_IO = "operator.open-cluster-management.io" OPERATOR_OPENSHIFT_IO = "operator.openshift.io" @@ -367,8 +347,7 @@ def __init__( self.api_group = api_group or self.api_group if not self.api_group and not self.api_version: raise NotImplementedError( - "Subclasses of Resource require self.api_group or self.api_version to" - " be defined" + "Subclasses of Resource require self.api_group or self.api_version to" " be defined" ) self.namespace = None self.name = name @@ -442,9 +421,7 @@ def _base_body(self): "metadata": {"name": self.name}, } if self.label: - self.res.setdefault("metadata", {}).setdefault("labels", {}).update( - self.label - ) + self.res.setdefault("metadata", {}).setdefault("labels", {}).update(self.label) def to_dict(self): """ @@ -525,8 +502,7 @@ def clean_up(self): check_exists=False, ): self.logger.warning( - f"Skip resource {self.kind} {self.name} teardown. Got" - f" {_export_str}={skip_resource_teardown}" + f"Skip resource {self.kind} {self.name} teardown. Got {_export_str}={skip_resource_teardown}" ) return @@ -535,9 +511,7 @@ def clean_up(self): @classmethod def _prepare_resources(cls, dyn_client, singular_name, *args, **kwargs): if not cls.api_version: - cls.api_version = _get_api_version( - dyn_client=dyn_client, api_group=cls.api_group, kind=cls.kind - ) + cls.api_version = _get_api_version(dyn_client=dyn_client, api_group=cls.api_group, kind=cls.kind) get_kwargs = {"singular_name": singular_name} if singular_name else {} return dyn_client.resources.get( @@ -558,9 +532,7 @@ def _set_client_and_api_version(self): self.client = get_client(config_file=self.config_file, context=self.context) if not self.api_version: - self.api_version = _get_api_version( - dyn_client=self.client, api_group=self.api_group, kind=self.kind - ) + self.api_version = _get_api_version(dyn_client=self.client, api_group=self.api_group, kind=self.kind) def full_api(self, **kwargs): """ @@ -585,9 +557,7 @@ def full_api(self, **kwargs): kwargs = self._prepare_singular_name_kwargs(**kwargs) - return self.client.resources.get( - api_version=self.api_version, kind=self.kind, **kwargs - ) + return self.client.resources.get(api_version=self.api_version, kind=self.kind, **kwargs) @property def api(self): @@ -652,16 +622,12 @@ def client_wait_deleted(self, timeout): Raises: TimeoutExpiredError: If resource still exists. """ - samples = TimeoutSampler( - wait_timeout=timeout, sleep=1, func=lambda: self.exists - ) + samples = TimeoutSampler(wait_timeout=timeout, sleep=1, func=lambda: self.exists) for sample in samples: if not sample: return - def wait_for_status( - self, status, timeout=TIMEOUT_4MINUTES, stop_status=None, sleep=1 - ): + def wait_for_status(self, status, timeout=TIMEOUT_4MINUTES, stop_status=None, sleep=1): """ Wait for resource to be in status @@ -696,23 +662,17 @@ def wait_for_status( current_status = sample_status.phase if current_status != last_logged_status: last_logged_status = current_status - self.logger.info( - f"Status of {self.kind} {self.name} is {current_status}" - ) + self.logger.info(f"Status of {self.kind} {self.name} is {current_status}") if current_status == status: return if current_status == stop_status: - raise TimeoutExpiredError( - f"Status of {self.kind} {self.name} is {current_status}" - ) + raise TimeoutExpiredError(f"Status of {self.kind} {self.name} is {current_status}") except TimeoutExpiredError: if current_status: - self.logger.error( - f"Status of {self.kind} {self.name} is {current_status}" - ) + self.logger.error(f"Status of {self.kind} {self.name} is {current_status}") raise def create(self, wait=False): @@ -735,9 +695,7 @@ def create(self, wait=False): self.logger.info(f"Create {self.kind} {self.name}") self.logger.info(f"Posting {hashed_res}") self.logger.debug(f"\n{yaml.dump(hashed_res)}") - resource_ = self.api.create( - body=self.res, namespace=self.namespace, dry_run=self.dry_run - ) + resource_ = self.api.create(body=self.res, namespace=self.namespace, dry_run=self.dry_run) with contextlib.suppress(TimeoutExpiredError): # some resources do not support get() (no instance) or the client do not have permissions self.initial_resource_version = self.instance.metadata.resourceVersion @@ -803,9 +761,7 @@ def update_replace(self, resource_dict): self.api.replace(body=resource_dict, name=self.name, namespace=self.namespace) @staticmethod - def retry_cluster_exceptions( - func, exceptions_dict=DEFAULT_CLUSTER_RETRY_EXCEPTIONS, **kwargs - ): + def retry_cluster_exceptions(func, exceptions_dict=DEFAULT_CLUSTER_RETRY_EXCEPTIONS, **kwargs): sampler = TimeoutSampler( wait_timeout=10, sleep=1, @@ -845,18 +801,14 @@ def get( dyn_client = get_client(config_file=config_file, context=context) def _get(): - _resources = cls._prepare_resources( - dyn_client=dyn_client, singular_name=singular_name, *args, **kwargs - ) + _resources = cls._prepare_resources(dyn_client=dyn_client, singular_name=singular_name, *args, **kwargs) try: for resource_field in _resources.items: yield cls(client=dyn_client, name=resource_field.metadata.name) except TypeError: yield cls(client=dyn_client, name=_resources.metadata.name) - return Resource.retry_cluster_exceptions( - func=_get, exceptions_dict=exceptions_dict - ) + return Resource.retry_cluster_exceptions(func=_get, exceptions_dict=exceptions_dict) @property def instance(self): @@ -916,10 +868,7 @@ def wait_for_condition(self, condition, status, timeout=300): Raises: TimeoutExpiredError: If Resource condition in not in desire status. """ - self.logger.info( - f"Wait for {self.kind}/{self.name}'s '{condition}' condition to be" - f" '{status}'" - ) + self.logger.info(f"Wait for {self.kind}/{self.name}'s '{condition}' condition to be '{status}'") timeout_watcher = TimeoutWatch(timeout=timeout) for sample in TimeoutSampler( @@ -1031,9 +980,7 @@ def events( ) @staticmethod - def get_all_cluster_resources( - config_file=None, config_dict=None, context=None, *args, **kwargs - ): + def get_all_cluster_resources(config_file=None, config_dict=None, context=None, *args, **kwargs): """ Get all cluster resources @@ -1052,9 +999,7 @@ def get_all_cluster_resources( print(f"Resource: {resource}") """ - client = get_client( - config_file=config_file, config_dict=config_dict, context=context - ) + client = get_client(config_file=config_file, config_dict=config_dict, context=context) for _resource in client.resources.search(): try: _resources = client.get(_resource, *args, **kwargs) @@ -1162,9 +1107,7 @@ def get( if not dyn_client: dyn_client = get_client(config_file=config_file, context=context) - _resources = cls._prepare_resources( - dyn_client=dyn_client, singular_name=singular_name, *args, **kwargs - ) + _resources = cls._prepare_resources(dyn_client=dyn_client, singular_name=singular_name, *args, **kwargs) try: for resource_field in _resources.items: if raw: @@ -1284,9 +1227,7 @@ def update(self, backup_resources=False): )[0].to_dict() namespace = update.get("metadata", {}).get("namespace") - backup = self._create_backup( - original=original_resource_dict, patch=update - ) + backup = self._create_backup(original=original_resource_dict, patch=update) if namespace: # Add namespace to metadata for restore. backup["metadata"]["namespace"] = namespace @@ -1297,28 +1238,19 @@ def update(self, backup_resources=False): resource_to_patch.append(resource) self._backups[resource] = backup else: - LOGGER.warning( - "ResourceEdit: no diff found in patch for " - f"{resource.name} -- skipping" - ) + LOGGER.warning(f"ResourceEdit: no diff found in patch for {resource.name} -- skipping") if not resource_to_patch: return else: resource_to_patch = self._patches - patches_to_apply = { - resource: self._patches[resource] for resource in resource_to_patch - } + patches_to_apply = {resource: self._patches[resource] for resource in resource_to_patch} # apply changes - self._apply_patches_sampler( - patches=patches_to_apply, action_text="Updating", action=self.action - ) + self._apply_patches_sampler(patches=patches_to_apply, action_text="Updating", action=self.action) def restore(self): - self._apply_patches_sampler( - patches=self._backups, action_text="Restoring", action=self.action - ) + self._apply_patches_sampler(patches=self._backups, action_text="Restoring", action=self.action) def __enter__(self): self.update(backup_resources=True) @@ -1335,9 +1267,7 @@ def _dictify_resourcefield(res): return ResourceEditor._dictify_resourcefield(res=dict(res.items())) elif isinstance(res, dict): return { - ResourceEditor._dictify_resourcefield( - res=key - ): ResourceEditor._dictify_resourcefield(res=value) + ResourceEditor._dictify_resourcefield(res=key): ResourceEditor._dictify_resourcefield(res=value) for key, value in res.items() } elif isinstance(res, list): @@ -1372,9 +1302,7 @@ def _create_backup(original, patch): continue # recursive call - key_diff = ResourceEditor._create_backup( - original=original[key], patch=value - ) + key_diff = ResourceEditor._create_backup(original=original[key], patch=value) if key_diff is not None: diff_dict[key] = key_diff @@ -1401,10 +1329,7 @@ def _apply_patches(patches, action_text, action): """ for resource, patch in patches.items(): - LOGGER.info( - f"ResourceEdits: {action_text} data for " - f"resource {resource.kind} {resource.name}" - ) + LOGGER.info(f"ResourceEdits: {action_text} data for resource {resource.kind} {resource.name}") # add name to patch if "metadata" not in patch: @@ -1424,15 +1349,11 @@ def _apply_patches(patches, action_text, action): patch["metadata"]["name"] = resource.name patch["metadata"]["namespace"] = resource.namespace - patch["metadata"][ - "resourceVersion" - ] = resource.instance.metadata.resourceVersion + patch["metadata"]["resourceVersion"] = resource.instance.metadata.resourceVersion patch["kind"] = resource.kind patch["apiVersion"] = resource.api_version - resource.update_replace( - resource_dict=patch - ) # replace the resource metadata + resource.update_replace(resource_dict=patch) # replace the resource metadata def _apply_patches_sampler(self, patches, action_text, action): exceptions_dict = {ConflictError: []} diff --git a/ocp_resources/route.py b/ocp_resources/route.py index 771b7466dc..28671b6245 100644 --- a/ocp_resources/route.py +++ b/ocp_resources/route.py @@ -38,9 +38,7 @@ def to_dict(self): super().to_dict() if not self.yaml_file: if self.service: - self.res.update( - {"spec": {"to": {"kind": "Service", "name": self.service}}} - ) + self.res.update({"spec": {"to": {"kind": "Service", "name": self.service}}}) if self.destination_ca_cert: self.res["spec"]["tls"] = { "destinationCACertificate": self.destination_ca_cert, diff --git a/ocp_resources/secret.py b/ocp_resources/secret.py index a3cfcee31e..1db5be737a 100644 --- a/ocp_resources/secret.py +++ b/ocp_resources/secret.py @@ -64,15 +64,11 @@ def to_dict(self): @property def certificate_not_after(self): - return self.instance.metadata.annotations[ - "auth.openshift.io/certificate-not-after" - ] + return self.instance.metadata.annotations["auth.openshift.io/certificate-not-after"] @property def certificate_not_before(self): - return self.instance.metadata.annotations[ - "auth.openshift.io/certificate-not-before" - ] + return self.instance.metadata.annotations["auth.openshift.io/certificate-not-before"] @property def keys_to_hash(self): diff --git a/ocp_resources/sriov_network_node_policy.py b/ocp_resources/sriov_network_node_policy.py index d08e537dea..2d9f47ad51 100644 --- a/ocp_resources/sriov_network_node_policy.py +++ b/ocp_resources/sriov_network_node_policy.py @@ -62,6 +62,4 @@ def to_dict(self): if self.node_selector: self.res["spec"]["nodeSelector"] = self.node_selector else: - self.res["spec"]["nodeSelector"] = { - "feature.node.kubernetes.io/network-sriov.capable": "true" - } + self.res["spec"]["nodeSelector"] = {"feature.node.kubernetes.io/network-sriov.capable": "true"} diff --git a/ocp_resources/sriov_network_node_state.py b/ocp_resources/sriov_network_node_state.py index d345a0a5f4..ebea159918 100644 --- a/ocp_resources/sriov_network_node_state.py +++ b/ocp_resources/sriov_network_node_state.py @@ -26,9 +26,7 @@ def totalvfs(iface): return iface.totalvfs def wait_for_status_sync(self, wanted_status, timeout=1000): - self.logger.info( - f"Wait for {self.kind} {self.name} status to be {wanted_status}" - ) + self.logger.info(f"Wait for {self.kind} {self.name} status to be {wanted_status}") try: timeout_watcher = TimeoutWatch(timeout=timeout) for sample in TimeoutSampler( @@ -47,8 +45,5 @@ def wait_for_status_sync(self, wanted_status, timeout=1000): if sample == wanted_status: return except TimeoutExpiredError: - self.logger.error( - f"after {timeout} seconds, {self.name} status is" - f" {self.instance.status.syncStatus}" - ) + self.logger.error(f"after {timeout} seconds, {self.name} status is {self.instance.status.syncStatus}") raise diff --git a/ocp_resources/storage_class.py b/ocp_resources/storage_class.py index 4f10436456..a7d4a33f00 100644 --- a/ocp_resources/storage_class.py +++ b/ocp_resources/storage_class.py @@ -42,9 +42,7 @@ class VolumeBindingMode: WaitForFirstConsumer = "WaitForFirstConsumer" class Annotations: - IS_DEFAULT_CLASS = ( - f"{Resource.ApiGroup.STORAGECLASS_KUBERNETES_IO}/is-default-class" - ) + IS_DEFAULT_CLASS = f"{Resource.ApiGroup.STORAGECLASS_KUBERNETES_IO}/is-default-class" class ReclaimPolicy: DELETE = "Delete" diff --git a/ocp_resources/subscription.py b/ocp_resources/subscription.py index 0dcee0cd81..c37d2a76c8 100644 --- a/ocp_resources/subscription.py +++ b/ocp_resources/subscription.py @@ -58,11 +58,7 @@ def to_dict(self): ) if self.node_selector: - self.res["spec"].setdefault("config", {}).setdefault( - "nodeSelector", {} - ).update(self.node_selector) + self.res["spec"].setdefault("config", {}).setdefault("nodeSelector", {}).update(self.node_selector) if self.tolerations: - self.res["spec"].setdefault("config", {}).setdefault( - "tolerations", [] - ).append(self.tolerations) + self.res["spec"].setdefault("config", {}).setdefault("tolerations", []).append(self.tolerations) diff --git a/ocp_resources/task_run.py b/ocp_resources/task_run.py index 6c5e748819..e7ed759177 100644 --- a/ocp_resources/task_run.py +++ b/ocp_resources/task_run.py @@ -42,10 +42,7 @@ def to_dict(self): if not (self.task_ref or self.task_spec): raise ValueError("Mandatory to have either task_ref or task_spec") if self.task_ref and self.task_spec: - raise ValueError( - "Validation failed: expected exactly one either task_ref or" - " task_spec, got both" - ) + raise ValueError("Validation failed: expected exactly one either task_ref or" " task_spec, got both") self.res["spec"] = {} if self.task_ref: self.res["spec"]["taskRef"] = {"name": self.task_ref} @@ -55,8 +52,7 @@ def to_dict(self): if self.params: self.res["spec"]["params"] = [ - {"name": params_name, "value": params_value} - for params_name, params_value in self.params.items() + {"name": params_name, "value": params_value} for params_name, params_value in self.params.items() ] if self.taskrun_timeout: diff --git a/ocp_resources/template.py b/ocp_resources/template.py index 41a9731ce1..92892128da 100644 --- a/ocp_resources/template.py +++ b/ocp_resources/template.py @@ -28,12 +28,8 @@ class Flavor: class Annotations: DEPRECATED = f"{NamespacedResource.ApiGroup.TEMPLATE_KUBEVIRT_IO}/deprecated" PROVIDER = f"{NamespacedResource.ApiGroup.TEMPLATE_KUBEVIRT_IO}/provider" - PROVIDER_SUPPORT_LEVEL = ( - f"{NamespacedResource.ApiGroup.TEMPLATE_KUBEVIRT_IO}/provider-support-level" - ) - PROVIDER_URL = ( - f"{NamespacedResource.ApiGroup.TEMPLATE_KUBEVIRT_IO}/provider-url" - ) + PROVIDER_SUPPORT_LEVEL = f"{NamespacedResource.ApiGroup.TEMPLATE_KUBEVIRT_IO}/provider-support-level" + PROVIDER_URL = f"{NamespacedResource.ApiGroup.TEMPLATE_KUBEVIRT_IO}/provider-url" class VMAnnotations: OS = f"{Resource.ApiGroup.VM_KUBEVIRT_IO}/os" @@ -53,9 +49,7 @@ def process(self, client=None, **kwargs): instance_dict["parameters"] = params # namespace label - If not defined, the template is expected to belong to the same namespace as the VM. instance_namespace = instance_dict["metadata"]["namespace"] - instance_dict["objects"][0]["metadata"]["labels"][ - "vm.kubevirt.io/template.namespace" - ] = instance_namespace + instance_dict["objects"][0]["metadata"]["labels"]["vm.kubevirt.io/template.namespace"] = instance_namespace instance_json = json.dumps(instance_dict) body = json.loads(instance_json) @@ -70,8 +64,6 @@ def process(self, client=None, **kwargs): def generate_template_labels(os, workload, flavor): return [ f"{Template.Labels.OS}/{os}", - ( - f"{Template.Labels.WORKLOAD}/{getattr(Template.Workload, workload.upper())}" - ), + (f"{Template.Labels.WORKLOAD}/{getattr(Template.Workload, workload.upper())}"), f"{Template.Labels.FLAVOR}/{getattr(Template.Flavor, flavor.upper())}", ] diff --git a/ocp_resources/utils.py b/ocp_resources/utils.py index 2b757dcf04..608af4e29b 100644 --- a/ocp_resources/utils.py +++ b/ocp_resources/utils.py @@ -148,10 +148,7 @@ def __iter__(self): finally: if self.elapsed_time and self.print_log: - LOGGER.info( - "Elapsed time:" - f" {self.elapsed_time} [{datetime.timedelta(seconds=self.elapsed_time)}]" - ) + LOGGER.info(f"Elapsed time: {self.elapsed_time} [{datetime.timedelta(seconds=self.elapsed_time)}]") raise TimeoutExpiredError(self._get_exception_log(exp=last_exp)) @@ -186,9 +183,7 @@ def _is_raisable_exception(self, exp): for entry in self.exceptions_dict: if isinstance(exp, entry): # Check inheritance for raised exception exception_messages = self.exceptions_dict.get(entry) - if self._is_exception_matched( - exp=exp, exception_messages=exception_messages - ): + if self._is_exception_matched(exp=exp, exception_messages=exception_messages): return False return True @@ -224,9 +219,7 @@ def remaining_time(self): return self.start_time + self.timeout - time.time() -def skip_existing_resource_creation_teardown( - resource, export_str, user_exported_args, check_exists=True -): +def skip_existing_resource_creation_teardown(resource, export_str, user_exported_args, check_exists=True): """ Args: resource (Resource): Resource to match against. @@ -269,10 +262,7 @@ def _return_resource(_resource, _check_exists, _msg): ) for _name, _namespace in _resource_args.items(): - if resource_name == _name and ( - resource_namespace == _namespace - or not (resource_namespace and _namespace) - ): + if resource_name == _name and (resource_namespace == _namespace or not (resource_namespace and _namespace)): return _return_resource( _resource=resource, _check_exists=check_exists, diff --git a/ocp_resources/virtual_machine.py b/ocp_resources/virtual_machine.py index 3ce76a4824..73378c686e 100644 --- a/ocp_resources/virtual_machine.py +++ b/ocp_resources/virtual_machine.py @@ -73,9 +73,7 @@ def _subresource_api_url(self): ) def api_request(self, method, action, **params): - return super().api_request( - method=method, action=action, url=self._subresource_api_url, **params - ) + return super().api_request(method=method, action=action, url=self._subresource_api_url, **params) def to_dict(self): super().to_dict() @@ -94,9 +92,7 @@ def restart(self, timeout=TIMEOUT_4MINUTES, wait=False): self.vmi.virt_launcher_pod.wait_deleted() return self.vmi.wait_until_running(timeout=timeout, stop_status="dummy") - def stop( - self, timeout=TIMEOUT_4MINUTES, vmi_delete_timeout=TIMEOUT_4MINUTES, wait=False - ): + def stop(self, timeout=TIMEOUT_4MINUTES, vmi_delete_timeout=TIMEOUT_4MINUTES, wait=False): self.api_request(method="PUT", action="stop") if wait: self.wait_for_ready_status(timeout=timeout, status=None) @@ -113,10 +109,7 @@ def wait_for_ready_status(self, status, timeout=TIMEOUT_4MINUTES, sleep=1): Raises: TimeoutExpiredError: If timeout reached. """ - self.logger.info( - f"Wait for {self.kind} {self.name} status to be" - f" {'ready' if status == True else status}" - ) + self.logger.info(f"Wait for {self.kind} {self.name} status to be {'ready' if status is True else status}") samples = TimeoutSampler( wait_timeout=timeout, sleep=sleep, diff --git a/ocp_resources/virtual_machine_clone.py b/ocp_resources/virtual_machine_clone.py index 0c8c77f84a..953428822e 100644 --- a/ocp_resources/virtual_machine_clone.py +++ b/ocp_resources/virtual_machine_clone.py @@ -48,9 +48,7 @@ def to_dict(self): source = spec.setdefault("source", {}) source["apiGroup"] = NamespacedResource.ApiGroup.KUBEVIRT_IO - source["kind"] = ( - self.source_kind if self.source_kind else VirtualMachine.kind - ) + source["kind"] = self.source_kind if self.source_kind else VirtualMachine.kind source["name"] = self.source_name if self.target_name: diff --git a/ocp_resources/virtual_machine_export.py b/ocp_resources/virtual_machine_export.py index 42d782ca22..05c2c64995 100644 --- a/ocp_resources/virtual_machine_export.py +++ b/ocp_resources/virtual_machine_export.py @@ -53,9 +53,7 @@ def to_dict(self): super().to_dict() if not self.yaml_file: if not (self.source_kind and self.source_name): - raise ValueError( - "source_kind and source_name or a yaml_file is required" - ) + raise ValueError("source_kind and source_name or a yaml_file is required") self.res.update( { "spec": { diff --git a/ocp_resources/virtual_machine_import.py b/ocp_resources/virtual_machine_import.py index 6386c06562..5a4f3492f3 100644 --- a/ocp_resources/virtual_machine_import.py +++ b/ocp_resources/virtual_machine_import.py @@ -125,9 +125,7 @@ def __init__( self.target_vm_name = target_vm_name self.start_vm = start_vm self.provider_credentials_secret_name = provider_credentials_secret_name - self.provider_credentials_secret_namespace = ( - provider_credentials_secret_namespace - ) + self.provider_credentials_secret_namespace = provider_credentials_secret_namespace self.provider_mappings = provider_mappings self.resource_mapping_name = resource_mapping_name self.resource_mapping_namespace = resource_mapping_namespace @@ -156,13 +154,9 @@ def to_dict(self): secret["namespace"] = self.provider_credentials_secret_namespace if self.resource_mapping_name: - spec.setdefault("resourceMapping", {})[ - "name" - ] = self.resource_mapping_name + spec.setdefault("resourceMapping", {})["name"] = self.resource_mapping_name if self.resource_mapping_namespace: - spec.setdefault("resourceMapping", {})[ - "namespace" - ] = self.resource_mapping_namespace + spec.setdefault("resourceMapping", {})["namespace"] = self.resource_mapping_namespace if self.target_vm_name: spec["targetVmName"] = self.target_vm_name @@ -173,13 +167,9 @@ def to_dict(self): if self.warm: spec["warm"] = self.warm if self.finalize_date: - spec["finalizeDate"] = self.finalize_date.strftime( - format="%Y-%m-%dT%H:%M:%SZ" - ) + spec["finalizeDate"] = self.finalize_date.strftime(format="%Y-%m-%dT%H:%M:%SZ") - provider_source = spec.setdefault("source", {}).setdefault( - self.provider_type, {} - ) + provider_source = spec.setdefault("source", {}).setdefault(self.provider_type, {}) vm = provider_source.setdefault("vm", {}) if self.vm_id: vm["id"] = self.vm_id @@ -193,28 +183,16 @@ def to_dict(self): if self.provider_mappings: if self.provider_mappings.disk_mappings: - mappings = _map_mappings( - mappings=self.provider_mappings.disk_mappings - ) - provider_source.setdefault("mappings", {}).setdefault( - "diskMappings", mappings - ) + mappings = _map_mappings(mappings=self.provider_mappings.disk_mappings) + provider_source.setdefault("mappings", {}).setdefault("diskMappings", mappings) if self.provider_mappings.network_mappings: - mappings = _map_mappings( - mappings=self.provider_mappings.network_mappings - ) - provider_source.setdefault("mappings", {}).setdefault( - "networkMappings", mappings - ) + mappings = _map_mappings(mappings=self.provider_mappings.network_mappings) + provider_source.setdefault("mappings", {}).setdefault("networkMappings", mappings) if self.provider_mappings.storage_mappings: - mappings = _map_mappings( - mappings=self.provider_mappings.storage_mappings - ) - provider_source.setdefault("mappings", {}).setdefault( - "storageMappings", mappings - ) + mappings = _map_mappings(mappings=self.provider_mappings.storage_mappings) + provider_source.setdefault("mappings", {}).setdefault("storageMappings", mappings) def wait( self, @@ -223,10 +201,7 @@ def wait( cond_status=Condition.Status.TRUE, cond_type=Condition.SUCCEEDED, ): - self.logger.info( - f"Wait for {self.kind} {self.name} {cond_reason} condition to be" - f" {cond_status}" - ) + self.logger.info(f"Wait for {self.kind} {self.name} {cond_reason} condition to be {cond_status}") samples = TimeoutSampler( wait_timeout=timeout, sleep=1, @@ -244,11 +219,7 @@ def wait( current_conditions = sample_status.conditions for cond in current_conditions: last_condition = cond - if ( - cond.type == cond_type - and cond.status == cond_status - and cond.reason == cond_reason - ): + if cond.type == cond_type and cond.status == cond_status and cond.reason == cond_reason: msg = ( f"Status of {self.kind} {self.name} {cond.type} is " f"{cond.status} ({cond.reason}: {cond.message})" @@ -294,9 +265,7 @@ def to_dict(self): super().to_dict() if not self.yaml_file: for provider, mapping in self.mapping.items(): - res_provider_section = self.res.setdefault("spec", {}).setdefault( - provider, {} - ) + res_provider_section = self.res.setdefault("spec", {}).setdefault(provider, {}) if mapping.network_mappings is not None: res_provider_section.setdefault( "networkMappings", diff --git a/ocp_resources/virtual_machine_instance.py b/ocp_resources/virtual_machine_instance.py index 8fb5453e87..fb560b51da 100644 --- a/ocp_resources/virtual_machine_instance.py +++ b/ocp_resources/virtual_machine_instance.py @@ -49,9 +49,7 @@ def _subresource_api_url(self): ) def api_request(self, method, action, **params): - return super().api_request( - method=method, action=action, url=self._subresource_api_url, **params - ) + return super().api_request(method=method, action=action, url=self._subresource_api_url, **params) def pause(self, timeout=TIMEOUT_4MINUTES, wait=False): self.api_request(method="PUT", action="pause") @@ -86,9 +84,7 @@ def virt_launcher_pod(self): else: return pods[0] - raise ResourceNotFoundError( - f"VIRT launcher POD not found for {self.kind}:{self.name}" - ) + raise ResourceNotFoundError(f"VIRT launcher POD not found for {self.kind}:{self.name}") @property def virt_handler_pod(self): @@ -117,17 +113,13 @@ def wait_until_running(self, timeout=TIMEOUT_4MINUTES, logs=True, stop_status=No TimeoutExpiredError: If VMI failed to run. """ try: - self.wait_for_status( - status=self.Status.RUNNING, timeout=timeout, stop_status=stop_status - ) + self.wait_for_status(status=self.Status.RUNNING, timeout=timeout, stop_status=stop_status) except TimeoutExpiredError as sampler_ex: if not logs: raise try: virt_pod = self.virt_launcher_pod - self.logger.error( - f"Status of virt-launcher pod {virt_pod.name}: {virt_pod.status}" - ) + self.logger.error(f"Status of virt-launcher pod {virt_pod.name}: {virt_pod.status}") self.logger.debug(f"{virt_pod.name} *****LOGS*****") self.logger.debug(virt_pod.log(container="compute")) except ResourceNotFoundError as virt_pod_ex: @@ -148,9 +140,7 @@ def wait_for_pause_status(self, pause, timeout=TIMEOUT_4MINUTES): Raises: TimeoutExpiredError: If resource not exists. """ - self.logger.info( - f"Wait until {self.kind} {self.name} is {'Paused' if pause else 'Unpuased'}" - ) + self.logger.info(f"Wait until {self.kind} {self.name} is {'Paused' if pause else 'Unpuased'}") self.wait_for_domstate_pause_status(pause=pause, timeout=timeout) self.wait_for_vmi_condition_pause_status(pause=pause, timeout=timeout) @@ -179,9 +169,7 @@ def wait_for_vmi_condition_pause_status(self, pause, timeout=TIMEOUT_4MINUTES): # 'reason' may not exist yet # or # 'reason' may still exist after unpause if the CR has not been updated before we perform this check - if (pause and not sample.get("reason")) or ( - sample.get("reason") == "PausedByUser" and not pause - ): + if (pause and not sample.get("reason")) or (sample.get("reason") == "PausedByUser" and not pause): continue # Paused VM if pause and sample["reason"] == "PausedByUser": @@ -205,8 +193,7 @@ def node(self): def virsh_cmd(self, action): return shlex.split( - "virsh" - f" {self.virt_launcher_pod_hypervisor_connection_uri} {action} {self.namespace}_{self.name}" + f"virsh {self.virt_launcher_pod_hypervisor_connection_uri} {action} {self.namespace}_{self.name}" ) def get_xml(self): @@ -256,14 +243,10 @@ def virt_launcher_pod_hypervisor_connection_uri(self): socket = ( virtqemud_socket if virtqemud_socket - in self.virt_launcher_pod.execute( - command=["ls", "/var/run/libvirt/"], container="compute" - ) + in self.virt_launcher_pod.execute(command=["ls", "/var/run/libvirt/"], container="compute") else "libvirt" ) - hypervisor_connection_uri = ( - f"-c qemu+unix:///session?socket=/var/run/libvirt/{socket}-sock" - ) + hypervisor_connection_uri = f"-c qemu+unix:///session?socket=/var/run/libvirt/{socket}-sock" return hypervisor_connection_uri def get_domstate(self): @@ -320,17 +303,11 @@ def guest_user_info(self): def os_version(self): vmi_os_version = self.instance.status.guestOSInfo.get("version", {}) if not vmi_os_version: - self.logger.warning( - "Guest agent is not installed on the VM; OS version is not available." - ) + self.logger.warning("Guest agent is not installed on the VM; OS version is not available.") return vmi_os_version def interface_ip(self, interface): - iface_ip = [ - iface["ipAddress"] - for iface in self.interfaces - if iface["interfaceName"] == interface - ] + iface_ip = [iface["ipAddress"] for iface in self.interfaces if iface["interfaceName"] == interface] return iface_ip[0] if iface_ip else None def execute_virsh_command(self, command): diff --git a/ocp_resources/virtual_machine_restore.py b/ocp_resources/virtual_machine_restore.py index fe513fc249..4bfce094e6 100644 --- a/ocp_resources/virtual_machine_restore.py +++ b/ocp_resources/virtual_machine_restore.py @@ -43,9 +43,7 @@ def to_dict(self): super().to_dict() if not self.yaml_file: spec = self.res.setdefault("spec", {}) - spec.setdefault("target", {})[ - "apiGroup" - ] = NamespacedResource.ApiGroup.KUBEVIRT_IO + spec.setdefault("target", {})["apiGroup"] = NamespacedResource.ApiGroup.KUBEVIRT_IO spec["target"]["kind"] = VirtualMachine.kind spec["target"]["name"] = self.vm_name spec["virtualMachineSnapshotName"] = self.snapshot_name @@ -61,9 +59,7 @@ def wait_complete(self, status=True, timeout=TIMEOUT_4MINUTES): Raises: TimeoutExpiredError: If timeout reached. """ - self.logger.info( - f"Wait for {self.kind} {self.name} status to be complete = {status}" - ) + self.logger.info(f"Wait for {self.kind} {self.name} status to be complete = {status}") timeout_watcher = TimeoutWatch(timeout=timeout) for sample in TimeoutSampler( diff --git a/ocp_resources/virtual_machine_snapshot.py b/ocp_resources/virtual_machine_snapshot.py index 1cb5f18442..0e6d77f1cd 100644 --- a/ocp_resources/virtual_machine_snapshot.py +++ b/ocp_resources/virtual_machine_snapshot.py @@ -41,9 +41,7 @@ def to_dict(self): super().to_dict() if not self.yaml_file: spec = self.res.setdefault("spec", {}) - spec.setdefault("source", {})[ - "apiGroup" - ] = NamespacedResource.ApiGroup.KUBEVIRT_IO + spec.setdefault("source", {})["apiGroup"] = NamespacedResource.ApiGroup.KUBEVIRT_IO spec["source"]["kind"] = VirtualMachine.kind spec["source"]["name"] = self.vm_name @@ -58,10 +56,7 @@ def wait_ready_to_use(self, status=True, timeout=TIMEOUT_4MINUTES): Raises: TimeoutExpiredError: If timeout reached. """ - self.logger.info( - f"Wait for {self.kind} {self.name} status to be" - f" {'' if status else 'not '}ready to use" - ) + self.logger.info(f"Wait for {self.kind} {self.name} status to be {'' if status else 'not '}ready to use") timeout_watcher = TimeoutWatch(timeout=timeout) for sample in TimeoutSampler( diff --git a/pyproject.toml b/pyproject.toml index e759351aac..ec824c8adc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,20 +1,11 @@ -[tool.black] -line-length = 88 -target_version = ['py37', 'py38', 'py39', 'py310', "py311"] -exclude = ''' -( - /( - | \.git - | \.venv - | \.mypy_cache - | \.tox - )/ -) -''' - -[tool.isort] -line_length = 88 -profile = "black" +[tool.ruff] +preview = true +line-length = 120 +fix = true +output-format = "grouped" + +[tool.ruff.format] +exclude = [".git", ".venv", ".mypy_cache", ".tox", "__pycache__"] [tool.poetry] name = "openshift-python-wrapper" diff --git a/tests/test_resources.py b/tests/test_resources.py index 57d17c1ae1..5e7e496106 100644 --- a/tests/test_resources.py +++ b/tests/test_resources.py @@ -31,7 +31,5 @@ def test_create(): @pytest.mark.kubevirt def test_vm(namespace): name = "test-vm" - with VirtualMachine( - name=name, namespace=namespace.name, body=generate_yaml_from_template(name=name) - ): + with VirtualMachine(name=name, namespace=namespace.name, body=generate_yaml_from_template(name=name)): pass diff --git a/tests/test_validate_resources.py b/tests/test_validate_resources.py index 355c09ef7e..1c35372650 100644 --- a/tests/test_validate_resources.py +++ b/tests/test_validate_resources.py @@ -51,9 +51,7 @@ def _process_api_type(api_type, api_value, resource_dict, cls): def _get_api_group_and_version(bodies): for targets in bodies: api_type = targets.targets[0].id - return api_type, getattr( - targets.value, "attr", getattr(targets.value, "s", None) - ) + return api_type, getattr(targets.value, "attr", getattr(targets.value, "s", None)) def _get_namespaced(cls, resource_dict, api_value): @@ -61,9 +59,7 @@ def _get_namespaced(cls, resource_dict, api_value): for base in getattr(cls, "bases", []): api_group_name = _api_group_name(api_value=api_value) namespaced = base.id == "NamespacedResource" - api_group = _api_group_dict( - resource_dict=resource_dict, api_group_name=api_group_name - ) + api_group = _api_group_dict(resource_dict=resource_dict, api_group_name=api_group_name) should_be_namespaced = api_group["namespaced"] == "true" if namespaced != should_be_namespaced: @@ -79,24 +75,18 @@ def _get_api_group(api_value, cls, resource_dict): api_group_name = _api_group_name(api_value=api_value) if api_group_name not in resource_dict["api_group"]: - errors.append( - f"Resource {cls.name} api_group should be " - f"{resource_dict['api_group']}. got {api_group_name}" - ) + errors.append(f"Resource {cls.name} api_group should be {resource_dict['api_group']}. got {api_group_name}") return errors def _get_api_version(api_value, cls, resource_dict): errors = [] api_group_name = _api_group_name(api_value=api_value) - api_group = _api_group_dict( - resource_dict=resource_dict, api_group_name=api_group_name - ) + api_group = _api_group_dict(resource_dict=resource_dict, api_group_name=api_group_name) if api_value.lower() != api_group["api_version"]: desire_api_group = resource_dict["api_version"].split("/")[0] errors.append( - f"Resource {cls.name} have api_version {api_value} " - f"but should have api_group = {desire_api_group}" + f"Resource {cls.name} have api_version {api_value} but should have api_group = {desire_api_group}" ) return errors @@ -114,8 +104,7 @@ def _resource_file(): @pytest.fixture() def resources_definitions(): file_ = ( - "https://raw.githubusercontent.com/RedHatQE/" - "openshift-resources-definitions/main/resources_definitions.json" + "https://raw.githubusercontent.com/RedHatQE/" "openshift-resources-definitions/main/resources_definitions.json" ) content = requests.get(file_).content return json.loads(content) @@ -133,17 +122,9 @@ def resources_definitions_errors(resources_definitions): if not resource_dict: continue - bodies = [ - body_ - for body_ in getattr(cls, "body") - if isinstance(body_, ast.Assign) - ] + bodies = [body_ for body_ in getattr(cls, "body") if isinstance(body_, ast.Assign)] api_type, api_value = _get_api_group_and_version(bodies=bodies) - errors.extend( - _get_namespaced( - cls=cls, resource_dict=resource_dict, api_value=api_value - ) - ) + errors.extend(_get_namespaced(cls=cls, resource_dict=resource_dict, api_value=api_value)) errors.extend( _process_api_type( api_type=api_type, diff --git a/tests/unittests/test_utils.py b/tests/unittests/test_utils.py index 990b6c7dc5..9e170e6fc7 100644 --- a/tests/unittests/test_utils.py +++ b/tests/unittests/test_utils.py @@ -113,9 +113,7 @@ def test_timeout_sampler_raises(self, test_params): "runtime_exception": IndexError("my allowed exception text"), }, { - "exception_log_regex": ( - "^.*\nLast exception: IndexError: my allowed exception text$" - ), + "exception_log_regex": ("^.*\nLast exception: IndexError: my allowed exception text$"), }, id="init_multi_exceptions_raise_allowed_with_allowed_msg", ), @@ -131,11 +129,8 @@ def test_timeout_sampler_raises_timeout(self, test_params, expected): ) except TimeoutExpiredError as exp: exception_log = str(exp) - exception_match = re.compile( - pattern=expected["exception_log_regex"], flags=re.DOTALL - ).match(string=exception_log) + exception_match = re.compile(pattern=expected["exception_log_regex"], flags=re.DOTALL).match( + string=exception_log + ) - assert exception_match, ( - f"Expected Regex: {expected['exception_log_regex']!r} Exception Log:" - f" {exception_log!r}" - ) + assert exception_match, f"Expected Regex: {expected['exception_log_regex']!r} Exception Log: {exception_log!r}"