From 387924c16881e8da8c3f5e283b9da997ed6c0da9 Mon Sep 17 00:00:00 2001 From: Teddy Andrieux Date: Thu, 26 Aug 2021 17:18:39 +0200 Subject: [PATCH] tests: Rework pytest to use `DynamicClient` Since we no longer use python-kubernetes models in the product let's also move to `DynamicClient` in the tests. NOTE: Tests can likely be rewrited a bit more but this commit just move to `DynamicClient` --- tests/conftest.py | 74 +++----- tests/install/steps/test_expansion.py | 12 +- tests/kube_utils.py | 170 +++++------------- .../features/service_configuration.feature | 2 +- tests/post/steps/conftest.py | 12 +- tests/post/steps/test_authentication.py | 2 +- tests/post/steps/test_dns.py | 6 +- tests/post/steps/test_ingress.py | 19 +- tests/post/steps/test_logging.py | 25 ++- tests/post/steps/test_monitoring.py | 25 +-- tests/post/steps/test_network.py | 2 +- tests/post/steps/test_salt_api.py | 8 +- tests/post/steps/test_sanity.py | 51 +++--- tests/post/steps/test_seccomp.py | 6 +- .../post/steps/test_service_configuration.py | 19 +- tests/post/steps/test_solutions.py | 14 +- tests/post/steps/test_static_pods.py | 8 +- tests/post/steps/test_versions.py | 6 +- tests/post/steps/test_volume.py | 16 +- 19 files changed, 202 insertions(+), 275 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 5b7754972f..879064b958 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -84,17 +84,17 @@ def kubeconfig(kubeconfig_data, tmp_path): @pytest.fixture def control_plane_ingress_ip(k8s_client): """Return the Control Plane Ingress IP from Kubernetes service""" - ingress_svc = k8s_client.read_namespaced_service( + ingress_svc = k8s_client.resources.get(api_version="v1", kind="Service").get( name="ingress-nginx-control-plane-controller", namespace="metalk8s-ingress", ) - return ingress_svc.spec.load_balancer_ip or ingress_svc.spec.external_i_ps[0] + return ingress_svc.spec.loadBalancerIP or ingress_svc.spec.externalIPs[0] @pytest.fixture def control_plane_ingress_ep(k8s_client, control_plane_ingress_ip): """Return the Control Plane Ingress Endpoint from Kubernetes service""" - ingress_svc = k8s_client.read_namespaced_service( + ingress_svc = k8s_client.resources.get(api_version="v1", kind="Service").get( name="ingress-nginx-control-plane-controller", namespace="metalk8s-ingress", ) @@ -104,49 +104,23 @@ def control_plane_ingress_ep(k8s_client, control_plane_ingress_ip): @pytest.fixture -def k8s_apiclient(kubeconfig): - """Return an ApiClient to use for interacting with all K8s APIs.""" - return kubernetes.config.new_client_from_config( +def k8s_client(request, kubeconfig): + """Return a DynamicClient to use for interaction with all K8s APIs.""" + k8s_apiclient = kubernetes.config.new_client_from_config( config_file=kubeconfig, persist_config=False ) + return kubernetes.dynamic.DynamicClient(k8s_apiclient) @pytest.fixture -def k8s_client(request, k8s_apiclient): - """Parametrized fixture to instantiate a client for a single K8s API. - - By default, this will return a CoreV1Api client. - One can decorate a test function to use another API, like so: - - ``` - @pytest.mark.parametrize( - 'k8s_client', ['AppsV1Api'], indirect=True - ) - def test_something(k8s_client): - assert k8s_client.list_namespaced_deployment(namespace="default") - ``` - - FIXME: this is not working as of right now, since `pytest-bdd` manipulates - fixtures in its own way through the various scenario/when/then/given - decorators. - """ - api_name = getattr(request, "param", "CoreV1Api") - api_cls = getattr(kubernetes.client, api_name, None) - - if api_cls is None: - pytest.fail( - "Unknown K8s API '{}' to use with `k8s_client` fixture.".format(api_name) - ) - - return api_cls(api_client=k8s_apiclient) - - -@pytest.fixture -def admin_sa(k8s_client, k8s_apiclient): +def admin_sa(k8s_client): """Fixture to create a ServiceAccount which is bind to `cluster-admin` ClusterRole and return the ServiceAccount name """ - rbac_k8s_client = kubernetes.client.RbacAuthorizationV1Api(api_client=k8s_apiclient) + sa_k8s_client = k8s_client.resources.get(api_version="v1", kind="ServiceAccount") + crb_k8s_client = k8s_client.resources.get( + api_version="rbac.authorization.k8s.io/v1", kind="ClusterRoleBinding" + ) sa_name = "test-admin" sa_namespace = "default" sa_manifest = { @@ -168,14 +142,12 @@ def admin_sa(k8s_client, k8s_apiclient): ], } - k8s_client.create_namespaced_service_account( - body=sa_manifest, namespace=sa_namespace - ) - rbac_k8s_client.create_cluster_role_binding(body=crb_manifest) + sa_k8s_client.create(body=sa_manifest, namespace=sa_namespace) + crb_k8s_client.create(body=crb_manifest) def _check_crb_exists(): try: - rbac_k8s_client.read_cluster_role_binding(name=sa_name) + crb_k8s_client.get(name=sa_name) except kubernetes.client.rest.ApiException as err: if err.status == 404: raise AssertionError("ClusterRoleBinding not yet created") @@ -183,9 +155,7 @@ def _check_crb_exists(): def _check_sa_exists(): try: - sa_obj = k8s_client.read_namespaced_service_account( - name=sa_name, namespace=sa_namespace - ) + sa_obj = sa_k8s_client.get(name=sa_name, namespace=sa_namespace) except kubernetes.client.rest.ApiException as err: if err.status == 404: raise AssertionError("ServiceAccount not yet created") @@ -195,7 +165,7 @@ def _check_sa_exists(): assert sa_obj.secrets[0].name try: - secret_obj = k8s_client.read_namespaced_secret( + secret_obj = k8s_client.resources.get(api_version="v1", kind="Secret").get( sa_obj.secrets[0].name, sa_namespace ) except kubernetes.client.rest.ApiException as err: @@ -214,14 +184,14 @@ def _check_sa_exists(): yield (sa_name, sa_namespace) try: - rbac_k8s_client.delete_cluster_role_binding( + crb_k8s_client.delete( name=sa_name, body=kubernetes.client.V1DeleteOptions(propagation_policy="Foreground"), ) except kubernetes.client.rest.ApiException: pass - k8s_client.delete_namespaced_service_account( + sa_k8s_client.delete( name=sa_name, namespace=sa_namespace, body=kubernetes.client.V1DeleteOptions(propagation_policy="Foreground"), @@ -477,6 +447,10 @@ def _verify_kubeapi_service(host): def etcdctl(k8s_client, command, ssh_config): """Run an etcdctl command inside the etcd container.""" + # NOTE: We use Kubernetes client instead of DynamicClient as it + # ease the execution of command in a Pod + client = kubernetes.client.CoreV1Api(api_client=k8s_client.client) + name = "etcd-{}".format(utils.get_node_name("bootstrap", ssh_config)) etcd_command = [ @@ -491,7 +465,7 @@ def etcdctl(k8s_client, command, ssh_config): "/etc/kubernetes/pki/etcd/server.crt", ] + command output = kubernetes.stream.stream( - k8s_client.connect_get_namespaced_pod_exec, + client.connect_get_namespaced_pod_exec, name=name, namespace="kube-system", command=etcd_command, diff --git a/tests/install/steps/test_expansion.py b/tests/install/steps/test_expansion.py index 6b983060c0..d67d562748 100644 --- a/tests/install/steps/test_expansion.py +++ b/tests/install/steps/test_expansion.py @@ -30,7 +30,9 @@ def declare_node(host, ssh_config, version, k8s_client, node_type, node_name): """Declare the given node in Kubernetes.""" node_ip = get_node_ip(host, node_name, ssh_config) node_manifest = get_node_manifest(node_type, version, node_ip, node_name) - k8s_client.create_node(body=node_from_manifest(node_manifest)) + k8s_client.resources.get(api_version="v1", kind="Node").create( + body=node_from_manifest(node_manifest) + ) @when(parsers.parse('we deploy the node "{node_name}"')) @@ -72,7 +74,7 @@ def deploy_node(host, ssh_config, version, node_name): def check_node_is_registered(k8s_client, node_name): """Check if the given node is registered in Kubernetes.""" try: - k8s_client.read_node(node_name) + k8s_client.resources.get(api_version="v1", kind="Node").get(name=node_name) except k8s.client.rest.ApiException as exn: pytest.fail(str(exn)) @@ -83,7 +85,11 @@ def check_node_status(k8s_client, node_name, expected_status): def _check_node_status(): try: - status = k8s_client.read_node_status(node_name).status + status = ( + k8s_client.resources.get(api_version="v1", kind="Node") + .get(name=node_name) + .status + ) except k8s.client.rest.ApiException as exn: raise AssertionError(exn) # If really not ready, status may not have been pushed yet. diff --git a/tests/kube_utils.py b/tests/kube_utils.py index e052d5589d..2ead41aacb 100644 --- a/tests/kube_utils.py +++ b/tests/kube_utils.py @@ -108,9 +108,7 @@ def get_pods( if label: kwargs["label_selector"] = label - if namespace: - return k8s_client.list_namespaced_pod(namespace=namespace, **kwargs).items - return k8s_client.list_pod_for_all_namespaces(**kwargs).items + return k8s_client.resources.get(api_version="v1", kind="Pod").get(**kwargs).items def check_pod_status(k8s_client, name, namespace="default", state="Running"): @@ -123,7 +121,9 @@ def check_pod_status(k8s_client, name, namespace="default", state="Running"): def _check_pod_status(): try: - pod = k8s_client.read_namespaced_pod(name=name, namespace=namespace) + pod = k8s_client.resources.get(api_version="v1", kind="Pod").get( + name=name, namespace=namespace + ) except ApiException as err: if err.status == 404: raise AssertionError("Pod not yet created") @@ -142,9 +142,19 @@ def _check_pod_status(): class Client(abc.ABC): """Helper class for manipulation of K8s resources in tests.""" - def __init__(self, k8s_client, kind, retry_count, retry_delay): - self._client = k8s_client + def __init__( + self, + k8s_client, + kind, + api_version="v1", + namespace=None, + retry_count=10, + retry_delay=2, + ): self._kind = kind + self._api_version = api_version + self._namespace = namespace + self._client = k8s_client.resources.get(api_version=api_version, kind=kind) self._count = retry_count self._delay = retry_delay @@ -202,10 +212,7 @@ def check_deletion_marker(self, name): def _check_deletion_marker(): obj = self.get(name) assert obj is not None, "{} {} not found".format(self._kind, name) - if isinstance(obj, dict): - tstamp = obj["metadata"].get("deletionTimestamp") - else: - tstamp = obj.metadata.deletion_timestamp + tstamp = obj["metadata"].get("deletionTimestamp") assert tstamp is not None, "{} {} is not marked for deletion".format( self._kind, name ) @@ -217,29 +224,26 @@ def _check_deletion_marker(): name="checking that {} {} is marked for deletion".format(self._kind, name), ) - @abc.abstractmethod def list(self): """Return a list of existing objects.""" - pass + return self._client.get(namespace=self._namespace).items - @abc.abstractmethod def _create(self, body): """Create a new object using the given body.""" - pass + return self._client.create(body=body, namespace=self._namespace) - @abc.abstractmethod def _get(self, name): """Return the object identified by `name`, raise if not found.""" - pass + return self._client.get(name=name, namespace=self._namespace) - @abc.abstractmethod def _delete(self, name): """Delete the object identified by `name`. The object may be simply marked for deletion and stay around for a while. """ - pass + body = kubernetes.client.V1DeleteOptions() + return self._client.delete(name=name, namespace=self._namespace, body=body) # }}} @@ -248,53 +252,29 @@ def _delete(self, name): class VolumeClient(Client): def __init__(self, k8s_client, ssh_config): - super().__init__(k8s_client, kind="Volume", retry_count=30, retry_delay=4) self._ssh_config = ssh_config - self._group = "storage.metalk8s.scality.com" - self._version = "v1alpha1" - self._plural = "volumes" - - def list(self): - return self._client.list_cluster_custom_object( - group=self._group, version=self._version, plural=self._plural - )["items"] + super().__init__( + k8s_client, + kind="Volume", + api_version="storage.metalk8s.scality.com/v1alpha1", + retry_count=30, + retry_delay=4, + ) def _create(self, body): # Fixup the node name. body["spec"]["nodeName"] = utils.get_node_name( body["spec"]["nodeName"], self._ssh_config ) - self._client.create_cluster_custom_object( - group=self._group, version=self._version, plural=self._plural, body=body - ) - - def _get(self, name): - return self._client.get_cluster_custom_object( - group=self._group, version=self._version, plural=self._plural, name=name - ) - - def _delete(self, name): - body = kubernetes.client.V1DeleteOptions() - self._client.delete_cluster_custom_object( - group=self._group, - version=self._version, - plural=self._plural, - name=name, - body=body, - grace_period_seconds=0, - ) + self._client.create(body=body) def wait_for_status(self, name, status, wait_for_device_name=False): def _wait_for_status(): volume = self.get(name) assert volume is not None, "Volume not found" - try: - actual_status = volume["status"] - except KeyError: - assert ( - status == "Unknown" - ), "Unexpected status: expected {}, got none".format(status) + actual_status = volume.get("status") + assert actual_status, f"Unexpected status expected {status}, got none" phase = self.compute_phase(actual_status) assert phase == status, "Unexpected status: expected {}, got {}".format( @@ -303,7 +283,7 @@ def _wait_for_status(): if wait_for_device_name: assert ( - "deviceName" in actual_status + "deviceName" in actual_status.keys() ), "Volume status.deviceName has not been reconciled" return volume @@ -345,24 +325,7 @@ def get_error(volume_status): class PersistentVolumeClient(Client): def __init__(self, k8s_client): - super().__init__( - k8s_client, kind="PersistentVolume", retry_count=10, retry_delay=2 - ) - - def list(self): - return self._client.list_persistent_volume().items - - def _create(self, body): - self._client.create_persistent_volume(body=body) - - def _get(self, name): - return self._client.read_persistent_volume(name) - - def _delete(self, name): - body = kubernetes.client.V1DeleteOptions() - self._client.delete_persistent_volume( - name=name, body=body, grace_period_seconds=0 - ) + super().__init__(k8s_client, kind="PersistentVolume") # }}} @@ -371,42 +334,19 @@ def _delete(self, name): class PersistentVolumeClaimClient(Client): def __init__(self, k8s_client, namespace="default"): - super().__init__( - k8s_client, kind="PersistentVolumeClaim", retry_count=10, retry_delay=2 - ) - self._namespace = namespace + super().__init__(k8s_client, kind="PersistentVolumeClaim", namespace=namespace) def create_for_volume(self, volume, pv): """Create a PVC matching the given volume.""" assert pv is not None, "PersistentVolume {} not found".format(volume) body = PVC_TEMPLATE.format( volume_name=volume, - storage_class=pv.spec.storage_class_name, - access=pv.spec.access_modes[0], + storage_class=pv.spec.storageClassName, + access=pv.spec.accessModes[0], size=pv.spec.capacity["storage"], ) self.create_from_yaml(body) - def list(self): - return self._client.list_namespaced_persistent_volume_claim( - namespace=self._namespace - ).items - - def _create(self, body): - self._client.create_namespaced_persistent_volume_claim( - namespace=self._namespace, body=body - ) - - def _get(self, name): - return self._client.read_namespaced_persistent_volume_claim( - name=name, namespace=self._namespace - ) - - def _delete(self, name): - self._client.delete_namespaced_persistent_volume_claim( - name=name, namespace=self._namespace, grace_period_seconds=0 - ) - # }}} # PodClient {{{ @@ -414,9 +354,8 @@ def _delete(self, name): class PodClient(Client): def __init__(self, k8s_client, image, namespace="default"): - super().__init__(k8s_client, kind="Pod", retry_count=30, retry_delay=2) + super().__init__(k8s_client, kind="Pod", namespace=namespace, retry_count=30) self._image = image - self._namespace = namespace def create_with_volume(self, volume_name, command): """Create a pod using the specified volume.""" @@ -437,19 +376,9 @@ def create_with_volume(self, volume_name, command): name="wait for pod {}".format(pod_name), ) - def list(self): - return self._client.list_namespaced_pod(namespace=self._namespace).items - - def _create(self, body): - self._client.create_namespaced_pod(namespace=self._namespace, body=body) - - def _get(self, name): - return self._client.read_namespaced_pod(name=name, namespace=self._namespace) - def _delete(self, name): - self._client.delete_namespaced_pod( - name=name, namespace=self._namespace, grace_period_seconds=0 - ) + body = kubernetes.client.V1DeleteOptions(grace_period_seconds=0) + return self._client.delete(name=name, namespace=self._namespace, body=body) # }}} @@ -458,19 +387,12 @@ def _delete(self, name): class StorageClassClient(Client): def __init__(self, k8s_client): - super().__init__(k8s_client, kind="StorageClass", retry_count=10, retry_delay=2) - - def list(self): - return self._client.list_storage_class().items - - def _create(self, body): - self._client.create_storage_class(body=body) - - def _get(self, name): - return self._client.read_storage_class(name=name) - - def _delete(self, name): - self._client.delete_storage_class(name=name, grace_period_seconds=0) + super().__init__( + k8s_client, + kind="StorageClass", + api_version="storage.k8s.io/v1", + retry_count=10, + ) # }}} diff --git a/tests/post/features/service_configuration.feature b/tests/post/features/service_configuration.feature index a792c92c6b..f885c3a476 100644 --- a/tests/post/features/service_configuration.feature +++ b/tests/post/features/service_configuration.feature @@ -8,7 +8,7 @@ Feature: Cluster and Services Configurations When we update the CSC 'spec.deployment.replicas' to '3' And we apply the 'metalk8s.addons.dex.deployed' state And we wait for the rollout of 'deploy/dex' in namespace 'metalk8s-auth' to complete - Then we have '3' at 'status.available_replicas' for 'dex' Deployment in namespace 'metalk8s-auth' + Then we have '3' at 'status.availableReplicas' for 'dex' Deployment in namespace 'metalk8s-auth' Scenario: Update Admin static user password Given the Kubernetes API is available diff --git a/tests/post/steps/conftest.py b/tests/post/steps/conftest.py index 4bfb5ec93c..fa3974c814 100644 --- a/tests/post/steps/conftest.py +++ b/tests/post/steps/conftest.py @@ -12,10 +12,8 @@ @pytest.fixture -def volume_client(k8s_apiclient, ssh_config): - return kube_utils.VolumeClient( - CustomObjectsApi(api_client=k8s_apiclient), ssh_config - ) +def volume_client(k8s_client, ssh_config): + return kube_utils.VolumeClient(k8s_client, ssh_config) @pytest.fixture @@ -34,8 +32,8 @@ def pod_client(k8s_client, utils_image): @pytest.fixture -def sc_client(k8s_apiclient): - return kube_utils.StorageClassClient(StorageV1Api(api_client=k8s_apiclient)) +def sc_client(k8s_client): + return kube_utils.StorageClassClient(k8s_client) # }}} @@ -119,7 +117,7 @@ def test_volume(volume_client, name): @given("we are on a multi node cluster") def check_multi_node(k8s_client): - nodes = k8s_client.list_node() + nodes = k8s_client.resources.get(api_version="v1", kind="Node").get() if len(nodes.items) == 1: pytest.skip("We skip single node cluster for this test") diff --git a/tests/post/steps/test_authentication.py b/tests/post/steps/test_authentication.py index 670d71afef..97c4ca556e 100644 --- a/tests/post/steps/test_authentication.py +++ b/tests/post/steps/test_authentication.py @@ -80,7 +80,7 @@ def _wait_for_ingress_pod_and_container(): for pod in pods: assert all( - container.ready == True for container in pod.status.container_statuses + container.ready == True for container in pod.status.containerStatuses ) utils.retry( diff --git a/tests/post/steps/test_dns.py b/tests/post/steps/test_dns.py index 3ee9027533..0d8edf0ddb 100644 --- a/tests/post/steps/test_dns.py +++ b/tests/post/steps/test_dns.py @@ -21,7 +21,9 @@ def utils_pod(k8s_client, utils_image): manifest["spec"]["containers"][0]["image"] = utils_image pod_name = manifest["metadata"]["name"] - k8s_client.create_namespaced_pod(body=manifest, namespace="default") + pod_k8s_client = k8s_client.resources.get(api_version="v1", kind="Pod") + + pod_k8s_client.create(body=manifest, namespace="default") # Wait for the Pod to be ready utils.retry( @@ -36,7 +38,7 @@ def utils_pod(k8s_client, utils_image): yield pod_name # Clean-up resources - k8s_client.delete_namespaced_pod( + pod_k8s_client.delete( name=pod_name, namespace="default", body=client.V1DeleteOptions( diff --git a/tests/post/steps/test_ingress.py b/tests/post/steps/test_ingress.py index 4b7ee65cf2..0187b6292f 100644 --- a/tests/post/steps/test_ingress.py +++ b/tests/post/steps/test_ingress.py @@ -52,8 +52,8 @@ def context(): def teardown(context, host, ssh_config, version, k8s_client): yield if "node_to_uncordon" in context: - k8s_client.patch_node( - context["node_to_uncordon"], {"spec": {"unschedulable": False}} + k8s_client.resources.get(api_version="v1", kind="Node").patch( + name=context["node_to_uncordon"], body={"spec": {"unschedulable": False}} ) if "bootstrap_to_restore" in context: @@ -146,16 +146,19 @@ def stop_cp_ingress_vip_node(context, k8s_client): context["node_to_uncordon"] = node_name # Cordon node - k8s_client.patch_node(node_name, {"spec": {"unschedulable": True}}) + k8s_client.resources.get(api_version="v1", kind="Node").patch( + name=node_name, patch={"spec": {"unschedulable": True}} + ) + pod_k8s_client = k8s_client.resources.get(api_version="v1", kind="Pod") # Delete Control Plane Ingress Controller from node - cp_ingress_pods = k8s_client.list_namespaced_pod( - "metalk8s-ingress", + cp_ingress_pods = pod_k8s_client.get( + namespace="metalk8s-ingress", label_selector="app.kubernetes.io/instance=ingress-nginx-control-plane", field_selector="spec.nodeName={}".format(node_name), ) for pod in cp_ingress_pods.items: - k8s_client.delete_namespaced_pod(pod.metadata.name, pod.metadata.namespace) + pod_k8s_client.delete(name=pod.metadata.name, namespace=pod.metadata.namespace) @when(parsers.parse("we set control plane ingress IP to node '{node_name}' IP")) @@ -230,8 +233,8 @@ def get_node_hosting_cp_ingress_vip(k8s_client): "involvedObject.kind=Service", "involvedObject.name=ingress-nginx-control-plane-controller", ] - events = k8s_client.list_namespaced_event( - "metalk8s-ingress", + events = k8s_client.resources.get(api_version="v1", kind="Event").get( + namespace="metalk8s-ingress", field_selector=",".join(field_selectors), ) diff --git a/tests/post/steps/test_logging.py b/tests/post/steps/test_logging.py index 40afb9b07c..2fa1b531d0 100644 --- a/tests/post/steps/test_logging.py +++ b/tests/post/steps/test_logging.py @@ -1,10 +1,11 @@ +import datetime import os import pathlib import time import uuid import yaml -from kubernetes import client +import kubernetes.client import pytest from pytest_bdd import scenario, given, when, then, parsers @@ -58,8 +59,11 @@ def test_logging_pipeline_is_working(host): @given("the Loki API is available") def check_loki_api(k8s_client): def _check_loki_ready(): + # NOTE: We use Kubernetes client instead of DynamicClient as it + # ease the "service proxy path" + client = kubernetes.client.CoreV1Api(api_client=k8s_client.client) try: - response = k8s_client.connect_get_namespaced_service_proxy_with_path( + response = client.connect_get_namespaced_service_proxy_with_path( "loki:http-metrics", "metalk8s-logging", path="ready" ) except Exception as exc: # pylint: disable=broad-except @@ -81,8 +85,13 @@ def set_up_logger_pod(k8s_client, utils_image): name = manifest["metadata"]["name"] namespace = manifest["metadata"]["namespace"] - result = k8s_client.create_namespaced_pod(body=manifest, namespace=namespace) - pod_creation_ts = int(result.metadata.creation_timestamp.timestamp()) + pod_k8s_client = k8s_client.resources.get(api_version="v1", kind="Pod") + result = pod_k8s_client.create(body=manifest, namespace=namespace) + pod_creation_ts = int( + datetime.datetime.strptime( + result.metadata.creationTimestamp, "%Y-%m-%dT%H:%M:%SZ" + ).timestamp() + ) utils.retry( kube_utils.check_pod_status( @@ -98,10 +107,10 @@ def set_up_logger_pod(k8s_client, utils_image): yield pod_creation_ts - k8s_client.delete_namespaced_pod( + pod_k8s_client.delete( name=name, namespace=namespace, - body=client.V1DeleteOptions( + body=kubernetes.client.V1DeleteOptions( grace_period_seconds=0, ), ) @@ -131,7 +140,7 @@ def push_log_to_loki(k8s_client, context): } ] } - response = k8s_client.api_client.call_api( + response = k8s_client.client.call_api( "/api/v1/namespaces/{namespace}/services/{name}/proxy/{path}", "POST", path_params, @@ -236,7 +245,7 @@ def query_loki_api(k8s_client, content, route="query"): "namespace": "metalk8s-logging", "path": "loki/api/v1/{0}".format(route), } - response = k8s_client.api_client.call_api( + response = k8s_client.client.call_api( "/api/v1/namespaces/{namespace}/services/{name}/proxy/{path}", "GET", path_params, diff --git a/tests/post/steps/test_monitoring.py b/tests/post/steps/test_monitoring.py index e3a6fe46ea..958517174d 100644 --- a/tests/post/steps/test_monitoring.py +++ b/tests/post/steps/test_monitoring.py @@ -3,7 +3,6 @@ import random import string -import kubernetes.client from kubernetes.client.rest import ApiException import pytest @@ -104,12 +103,14 @@ def check_grafana_api(grafana_api): @given(parsers.parse("the '{name}' APIService exists")) -def apiservice_exists(host, name, k8s_apiclient, request): - client = kubernetes.client.ApiregistrationV1Api(api_client=k8s_apiclient) +def apiservice_exists(host, name, k8s_client, request): + client = k8s_client.resources.get( + api_version="apiregistration.k8s.io/v1", kind="APIService" + ) def _check_object_exists(): try: - _ = client.read_api_service(name) + _ = client.get(name=name) except ApiException as err: if err.status == 404: raise AssertionError("APIService not yet created") @@ -156,12 +157,14 @@ def _wait_job_status(): @then(parsers.parse("the '{name}' APIService is {condition}")) -def apiservice_condition_met(name, condition, k8s_apiclient): - client = kubernetes.client.ApiregistrationV1Api(api_client=k8s_apiclient) +def apiservice_condition_met(name, condition, k8s_client): + client = k8s_client.resources.get( + api_version="apiregistration.k8s.io/v1", kind="APIService" + ) def _check_object_exists(): try: - svc = client.read_api_service(name) + svc = client.get(name=name) ok = False for cond in svc.status.conditions: @@ -183,9 +186,9 @@ def _check_object_exists(): @then( parsers.parse("a pod with label '{label}' in namespace '{namespace}' has metrics") ) -def pod_has_metrics(label, namespace, k8s_apiclient): +def pod_has_metrics(label, namespace, k8s_client): def _pod_has_metrics(): - result = k8s_apiclient.call_api( + result = k8s_client.client.call_api( resource_path="/apis/metrics.k8s.io/v1beta1/" "namespaces/{namespace}/pods", method="GET", response_type=object, @@ -210,9 +213,9 @@ def _pod_has_metrics(): @then(parsers.parse("a node with label '{label}' has metrics")) -def node_has_metrics(label, k8s_apiclient): +def node_has_metrics(label, k8s_client): def _node_has_metrics(): - result = k8s_apiclient.call_api( + result = k8s_client.client.call_api( resource_path="/apis/metrics.k8s.io/v1beta1/nodes", method="GET", response_type=object, diff --git a/tests/post/steps/test_network.py b/tests/post/steps/test_network.py index 84c34dd3ad..936431d5de 100644 --- a/tests/post/steps/test_network.py +++ b/tests/post/steps/test_network.py @@ -13,7 +13,7 @@ def test_all_listening_processes(host): @given("we run on an untainted single node") def running_on_single_node_untainted(k8s_client): - nodes = k8s_client.list_node() + nodes = k8s_client.resources.get(api_version="v1", kind="Node").get() if len(nodes.items) != 1: pytest.skip("We skip multi nodes clusters for this test") diff --git a/tests/post/steps/test_salt_api.py b/tests/post/steps/test_salt_api.py index 7615ead4de..2d0db31b98 100644 --- a/tests/post/steps/test_salt_api.py +++ b/tests/post/steps/test_salt_api.py @@ -184,10 +184,10 @@ def have_no_perms(host, context): def _login_salt_api_sa(address, k8s_client, name, namespace, username=None): - service_account = k8s_client.read_namespaced_service_account( - name=name, namespace=namespace - ) - secret = k8s_client.read_namespaced_secret( + service_account = k8s_client.resources.get( + api_version="v1", kind="ServiceAccount" + ).get(name=name, namespace=namespace) + secret = k8s_client.resources.get(api_version="v1", kind="Secret").get( name=service_account.secrets[0].name, namespace=namespace ) token = base64.decodebytes(secret.data["token"].encode("utf-8")) diff --git a/tests/post/steps/test_sanity.py b/tests/post/steps/test_sanity.py index b003e28997..041107abf7 100644 --- a/tests/post/steps/test_sanity.py +++ b/tests/post/steps/test_sanity.py @@ -1,3 +1,4 @@ +import kubernetes.client from kubernetes.client import AppsV1Api from kubernetes.client.rest import ApiException import pytest @@ -6,15 +7,6 @@ from tests import kube_utils from tests import utils -# Fixtures {{{ - - -@pytest.fixture -def apps_client(k8s_apiclient): - return AppsV1Api(api_client=k8s_apiclient) - - -# }}} # Scenarios {{{ @@ -142,8 +134,12 @@ def _wait_for_pod(): _wait_for_pod, times=10, wait=3, name="wait for pod labeled '{}'".format(label) ) + # NOTE: We use Kubernetes client instead of DynamicClient as it + # ease the retrieving of Pod logs + client = kubernetes.client.CoreV1Api(api_client=k8s_client.client) + for container in pod.spec.containers: - logs = k8s_client.read_namespaced_pod_log( + logs = client.read_namespaced_pod_log( pod.metadata.name, namespace, container=container.name ) @@ -154,11 +150,12 @@ def _wait_for_pod(): @then("the static Pod in the namespace runs on nodes") def check_static_pod(k8s_client, name, namespace, role): + node_k8s_client = k8s_client.resources.get(api_version="v1", kind="Node") if role == "all": - nodes = k8s_client.list_node() + nodes = node_k8s_client.get() else: role_label = "node-role.kubernetes.io/{}=".format(role) - nodes = k8s_client.list_node(label_selector=role_label) + nodes = node_k8s_client.get(label_selector=role_label) pod_names = ["{}-{}".format(name, node.metadata.name) for node in nodes.items] for pod_name in pod_names: @@ -185,18 +182,18 @@ def check_static_pod(k8s_client, name, namespace, role): "replicas available" ) ) -def check_deployment(apps_client, name, namespace): +def check_deployment(k8s_client, name, namespace): def _wait_for_deployment(): try: - deploy = apps_client.read_namespaced_deployment( - name=name, namespace=namespace - ) + deploy = k8s_client.resources.get( + api_version="apps/v1", kind="Deployment" + ).get(name=name, namespace=namespace) except ApiException as exc: if exc.status == 404: pytest.fail("Deployment '{}/{}' does not exist".format(namespace, name)) raise - assert deploy.spec.replicas == deploy.status.available_replicas, ( + assert deploy.spec.replicas == deploy.status.availableReplicas, ( "Deployment is not ready yet (desired={desired}, " "available={status.available_replicas}, " "unavailable={status.unavailable_replicas})" @@ -217,12 +214,12 @@ def _wait_for_deployment(): "Pods ready" ) ) -def check_daemonset(apps_client, name, namespace): +def check_daemonset(k8s_client, name, namespace): def _wait_for_daemon_set(): try: - daemon_set = apps_client.read_namespaced_daemon_set( - name=name, namespace=namespace - ) + daemon_set = k8s_client.resources.get( + api_version="apps/v1", kind="DaemonSet" + ).get(name=name, namespace=namespace) except ApiException as exc: if exc.status == 404: pytest.fail("DaemonSet '{}/{}' does not exist".format(namespace, name)) @@ -250,12 +247,12 @@ def _wait_for_daemon_set(): "the StatefulSet in the namespace has all desired " "replicas available" ) -def check_statefulset(apps_client, name, namespace): +def check_statefulset(k8s_client, name, namespace): def _wait_for_stateful_set(): try: - stateful_set = apps_client.read_namespaced_stateful_set( - name=name, namespace=namespace - ) + stateful_set = k8s_client.resources.get( + api_version="apps/v1", kind="StatefulSet" + ).get(name=name, namespace=namespace) except ApiException as exc: if exc.status == 404: pytest.fail( @@ -264,10 +261,10 @@ def _wait_for_stateful_set(): raise desired = stateful_set.spec.replicas - ready = stateful_set.status.ready_replicas + ready = stateful_set.status.readyReplicas assert desired == ready, ( "StatefulSet is not ready yet (desired={}, ready={})" - ).format(desired, available) + ).format(desired, ready) utils.retry( _wait_for_stateful_set, diff --git a/tests/post/steps/test_seccomp.py b/tests/post/steps/test_seccomp.py index 034b325b0c..5e694bf5f3 100644 --- a/tests/post/steps/test_seccomp.py +++ b/tests/post/steps/test_seccomp.py @@ -47,12 +47,14 @@ def utils_pod(k8s_client, utils_image): "test": "seccomp1", } - k8s_client.create_namespaced_pod(body=manifest, namespace="default") + pod_k8s_client = k8s_client.resources.get(api_version="v1", kind="Pod") + + pod_k8s_client.create(body=manifest, namespace="default") try: yield pod_name finally: - k8s_client.delete_namespaced_pod( + pod_k8s_client.delete( name=pod_name, namespace="default", body=client.V1DeleteOptions( diff --git a/tests/post/steps/test_service_configuration.py b/tests/post/steps/test_service_configuration.py index fe5070fb79..94048b82d0 100644 --- a/tests/post/steps/test_service_configuration.py +++ b/tests/post/steps/test_service_configuration.py @@ -5,8 +5,6 @@ import pytest from pytest_bdd import scenario, given, then, when, parsers -from kubernetes.client import AppsV1Api - from tests import utils # Scenarios {{{ @@ -88,7 +86,7 @@ def apply_csc(csc, state): ) ) def get_deployments( - k8s_apiclient, + k8s_client, value, path, deployment, @@ -96,10 +94,11 @@ def get_deployments( ): def _wait_for_deployment(): try: - k8s_appsv1_client = AppsV1Api(api_client=k8s_apiclient) - response = k8s_appsv1_client.read_namespaced_deployment( - name=deployment, namespace=namespace - ).to_dict() + response = ( + k8s_client.resources.get(api_version="apps/v1", kind="Deployment") + .get(name=deployment, namespace=namespace) + .to_dict() + ) except Exception as exc: pytest.fail( "Unable to read {} Deployment with error: {!s}".format(deployment, exc) @@ -164,7 +163,7 @@ class ClusterServiceConfiguration: CSC_KEY = "config.yaml" def __init__(self, k8s_client, name, namespace, host, ssh_config, version): - self.client = k8s_client + self.client = k8s_client.resources.get(api_version="v1", kind="ConfigMap") self.name = name self.namespace = namespace self.host = host @@ -178,7 +177,7 @@ def get(self): return self.csc try: - response = self.client.read_namespaced_config_map(self.name, self.namespace) + response = self.client.get(name=self.name, namespace=self.namespace) except Exception as exc: raise ClusterServiceConfigurationError( "Unable to read {} ConfigMap in namespace {} with error: {!s}".format( @@ -225,7 +224,7 @@ def update(self, config, apply_config=True): } try: - self.client.patch_namespaced_config_map(self.name, self.namespace, patch) + self.client.patch(name=self.name, namespace=self.namespace, body=patch) except Exception as exc: raise ClusterServiceConfigurationError( "Unable to patch ConfigMap {} in namespace {} with error {!s}".format( diff --git a/tests/post/steps/test_solutions.py b/tests/post/steps/test_solutions.py index ed17d43d67..a4c53b2128 100644 --- a/tests/post/steps/test_solutions.py +++ b/tests/post/steps/test_solutions.py @@ -7,8 +7,6 @@ import pytest from pytest_bdd import scenario, given, then, when, parsers -import kubernetes.client -from kubernetes.client import AppsV1Api from kubernetes.client.rest import ApiException from tests import kube_utils @@ -53,7 +51,7 @@ def test_deploy_solution(host): @given(parsers.parse("no Solution '{name}' is imported")) -def is_absent_solution(host, name, k8s_client): +def is_absent_solution(host, name): with host.sudo(): assert ( name not in host.mount_point.get_mountpoints() @@ -285,7 +283,7 @@ def read_solution_environment(k8s_client, name): @then(parsers.parse("we have no Solution '{name}' archive mounted")) -def no_solution_mountpoint(host, name, k8s_client): +def no_solution_mountpoint(host, name): with host.sudo(): assert ( name not in host.mount_point.get_mountpoints() @@ -323,7 +321,11 @@ def no_solution_config(host): def get_configmap(k8s_client, name, namespace): try: - response = k8s_client.read_namespaced_config_map(name, namespace).to_dict() + response = ( + k8s_client.resources.get(api_version="v1", kind="ConfigMap") + .get(name=name, namespace=namespace) + .to_dict() + ) except Exception as exc: if isinstance(exc, ApiException) and exc.status == 404: return None @@ -333,7 +335,7 @@ def get_configmap(k8s_client, name, namespace): def get_environment(k8s_client, name): try: - response = k8s_client.list_namespace( + response = k8s_client.resources.get(api_version="v1", kind="Namespace").get( label_selector="{}={}".format(ENVIRONMENT_LABEL, name) ) except (ApiException) as exc: diff --git a/tests/post/steps/test_static_pods.py b/tests/post/steps/test_static_pods.py index 5ba899e1ac..ce4778011a 100644 --- a/tests/post/steps/test_static_pods.py +++ b/tests/post/steps/test_static_pods.py @@ -106,7 +106,9 @@ def set_up_static_pod(host, nodename, k8s_client, utils_image, transient_files): name="wait for Pod '{}'".format(fullname), ) - pod = k8s_client.read_namespaced_pod(name=fullname, namespace="default") + pod = k8s_client.resources.get(api_version="v1", kind="Pod").get( + name=fullname, namespace="default" + ) return pod.metadata.uid @@ -150,7 +152,9 @@ def wait_for_pod_reloaded(): name="wait for Pod '{}' to be reloaded".format(fullname), ) - pod = k8s_client.read_namespaced_pod(name=fullname, namespace="default") + pod = k8s_client.resources.get(api_version="v1", kind="Pod").get( + name=fullname, namespace="default" + ) assert pod.metadata.uid != static_pod_id diff --git a/tests/post/steps/test_versions.py b/tests/post/steps/test_versions.py index 3e904d7541..537041c21b 100644 --- a/tests/post/steps/test_versions.py +++ b/tests/post/steps/test_versions.py @@ -1,4 +1,3 @@ -from kubernetes.client import VersionApi from pytest_bdd import scenario, then from tests import versions @@ -12,12 +11,11 @@ def test_cluster_version(host): # Then @then("the Kubernetes version deployed is the same as the configured one") -def check_kubernetes_version(k8s_apiclient): +def check_kubernetes_version(k8s_client): # NOTE: the `vX.Y.Z` format is used by Kubernetes, not our buildchain configured_version = "v{}".format(versions.K8S_VERSION) - k8s_client = VersionApi(api_client=k8s_apiclient) - observed_version = k8s_client.get_code().git_version + observed_version = k8s_client.version["kubernetes"]["gitVersion"] assert configured_version == observed_version, ( "The running version of Kubernetes is '{}', while the expected version" diff --git a/tests/post/steps/test_volume.py b/tests/post/steps/test_volume.py index 8c6c374ca9..0b40cff3c4 100644 --- a/tests/post/steps/test_volume.py +++ b/tests/post/steps/test_volume.py @@ -301,7 +301,7 @@ def _check_pv_label(): pv = pv_client.get(name) assert pv is not None, "PersistentVolume {} not found".format(name) labels = pv.metadata.labels - assert key in labels, "Label {} is missing".format(key) + assert key in labels.keys(), "Label {} is missing".format(key) assert ( labels[key] == value ), "Unexpected value for label {}: expected {}, got {}".format( @@ -388,10 +388,14 @@ def check_volume_deletion_marker(name, volume_client): def check_file_content_inside_pod(volume_name, path, content, k8s_client): name = "{}-pod".format(volume_name) + # NOTE: We use Kubernetes client instead of DynamicClient as it + # ease the execution of command in a Pod + client = k8s.client.CoreV1Api(api_client=k8s_client.client) + def _check_file_content(): try: result = k8s.stream.stream( - k8s_client.connect_get_namespaced_pod_exec, + client.connect_get_namespaced_pod_exec, name=name, namespace="default", command=["cat", path], @@ -420,7 +424,9 @@ def _check_file_content(): def check_storage_is_created(context, host, name): volume = context.get(name) assert volume is not None, "volume {} not found in context".format(name) - assert "sparseLoopDevice" in volume["spec"], "unsupported volume type for this step" + assert ( + "sparseLoopDevice" in volume["spec"].keys() + ), "unsupported volume type for this step" uuid = volume["metadata"]["uid"] capacity = volume["spec"]["sparseLoopDevice"]["size"] # Check that the sparse file exists and has the proper size. @@ -438,7 +444,9 @@ def check_storage_is_created(context, host, name): def check_storage_is_deleted(context, host, name): volume = context.get(name) assert volume is not None, "volume {} not found in context".format(name) - assert "sparseLoopDevice" in volume["spec"], "unsupported volume type for this step" + assert ( + "sparseLoopDevice" in volume["spec"].keys() + ), "unsupported volume type for this step" uuid = volume["metadata"]["uid"] # Check that the sparse file is deleted. path = "/var/lib/metalk8s/storage/sparse/{}".format(uuid)