diff --git a/cloudman/clusterman/clients/kube_client.py b/cloudman/clusterman/clients/kube_client.py index 4a390617..45457180 100644 --- a/cloudman/clusterman/clients/kube_client.py +++ b/cloudman/clusterman/clients/kube_client.py @@ -100,7 +100,7 @@ def _get_job_pods_in_node(self, node_name, state): f"spec.nodeName={node_name},status.phase={state}", "--selector", "job-name", "-o", "yaml"]) - def wait_till_jobs_complete(self, node, timeout=3600): + def wait_till_jobs_complete(self, node, timeout=3600*24*7): name = node.get('metadata', {}).get('name') retryer = tenacity.Retrying( stop=tenacity.stop_after_delay(timeout), @@ -117,6 +117,12 @@ def drain(self, node, force=True, timeout=120, ignore_daemonsets=True): f"--ignore-daemonsets={'true' if ignore_daemonsets else 'false'}"] ) + def delete(self, node): + name = node.get('metadata', {}).get('name') + return helpers.run_command( + ["kubectl", "delete", "node", name] + ) + class KubeSecretService(KubeService): diff --git a/cloudman/clusterman/clients/rancher.py b/cloudman/clusterman/clients/rancher.py deleted file mode 100644 index e40f10ed..00000000 --- a/cloudman/clusterman/clients/rancher.py +++ /dev/null @@ -1,81 +0,0 @@ -import requests -from string import Template -from requests.auth import AuthBase - - -class RancherAuth(AuthBase): - - def __init__(self, client): - # setup any auth-related data here - self.client = client - - def __call__(self, r): - # modify and return the request - r.headers['content-type'] = "application/json" - r.headers['authorization'] = "Bearer " + self.client.api_key - return r - - -class RancherClient(object): - - INSTALLED_APP_URL = ("$rancher_url/v3/projects/$project_id/app" - "?targetNamespace=galaxy-ns") - NODE_COMMAND_URL = "$rancher_url/v3/clusterregistrationtoken" - NODE_LIST_URL = "$rancher_url/v3/nodes/?clusterId=$cluster_id" - NODE_DRAIN_URL = "$rancher_url/v3/nodes/$node_id?action=drain" - NODE_DELETE_URL = "$rancher_url/v3/nodes/$node_id" - - def __init__(self, rancher_url, api_key, cluster_id, project_id): - self.rancher_url = rancher_url - self.api_key = api_key - self.cluster_id = cluster_id - self.project_id = project_id - - def _format_url(self, url): - result = Template(url).safe_substitute({ - 'rancher_url': self.rancher_url, - 'cluster_id': self.cluster_id, - 'project_id': self.project_id - }) - return result - - def _get_auth(self): - return RancherAuth(self) - - def _api_get(self, url, data): - return requests.get(self._format_url(url), auth=self._get_auth(), - verify=False, json=data).json() - - def _api_post(self, url, data, json_response=True): - r = requests.post(self._format_url(url), auth=self._get_auth(), - verify=False, json=data) - if json_response: - return r.json() - else: - return r - - def _api_delete(self, url, data): - return requests.delete(self._format_url(url), auth=self._get_auth(), - verify=False, json=data).json() - - def get_cluster_registration_command(self): - return self._api_post( - self.NODE_COMMAND_URL, - data={"type": "clusterRegistrationToken", - "clusterId": f"{self.cluster_id}"} - ).get('nodeCommand') - - def get_nodes(self): - return self._api_get(self.NODE_LIST_URL, data=None) - - def find_node(self, ip): - matches = [n for n in self.get_nodes()['data'] - if n.get('ipAddress') == ip or - n.get('externalIpAddress') == ip] - return matches[0]['id'] if matches else None - - def delete_node(self, node_id): - node_url = Template(self.NODE_DELETE_URL).safe_substitute({ - 'node_id': node_id - }) - return self._api_delete(node_url, data=None) diff --git a/cloudman/clusterman/cluster_templates.py b/cloudman/clusterman/cluster_templates.py index 5b328bef..80069b15 100644 --- a/cloudman/clusterman/cluster_templates.py +++ b/cloudman/clusterman/cluster_templates.py @@ -1,6 +1,5 @@ import abc from rest_framework.exceptions import ValidationError -from .clients.rancher import RancherClient from cloudlaunch import models as cl_models @@ -32,44 +31,34 @@ def deactivate_autoscaling(self): @staticmethod def get_template_for(context, cluster): - if cluster.cluster_type == "KUBE_RANCHER": - return CMRancherTemplate(context, cluster) + if cluster.cluster_type == "KUBE_RKE": + return CMRKETemplate(context, cluster) else: raise KeyError("Cannon get cluster template for unknown cluster " "type: %s" % cluster.cluster_type) -class CMRancherTemplate(CMClusterTemplate): +class CMRKETemplate(CMClusterTemplate): def __init__(self, context, cluster): - super(CMRancherTemplate, self).__init__(context, cluster) - settings = cluster.connection_settings.get('rancher_config') - self._rancher_url = settings.get('rancher_url') - self._rancher_api_key = settings.get('rancher_api_key') - self._rancher_cluster_id = settings.get('rancher_cluster_id') - self._rancher_project_id = settings.get('rancher_project_id') + super(CMRKETemplate, self).__init__(context, cluster) + settings = cluster.connection_settings.get('rke_config') + self._rke_registration_server = settings.get('rke_registration_server') + self._rke_registration_token = settings.get('rke_registration_token') + self._rke_cluster_id = settings.get('rke_cluster_id') @property - def rancher_url(self): - return self._rancher_url + def rke_registration_server(self): + return self._rke_registration_server @property - def rancher_api_key(self): - return self._rancher_api_key + def rke_registration_token(self): + return self._rke_registration_token @property - def rancher_cluster_id(self): - return self._rancher_cluster_id + def rke_cluster_id(self): + return self._rke_cluster_id - @property - def rancher_project_id(self): - return self._rancher_project_id - - @property - def rancher_client(self): - return RancherClient(self.rancher_url, self.rancher_api_key, - self.rancher_cluster_id, - self.rancher_project_id) def _find_matching_vm_type(self, zone_model=None, default_vm_type=None, min_vcpus=0, min_ram=0, vm_family=""): @@ -114,30 +103,33 @@ def add_node(self, name, vm_type=None, zone=None, min_vcpus=0, min_ram=0, vm_fam target_zone=zone) params = { 'name': name, - 'application': 'cm_rancher_kubernetes_plugin', + 'application': 'cm_rke_kubernetes_plugin', 'deployment_target_id': deployment_target.id, 'application_version': '0.1.0', 'config_app': { - 'rancher_action': 'add_node', - 'config_rancher_kube': { - 'rancher_url': self.rancher_url, - 'rancher_api_key': self.rancher_api_key, - 'rancher_cluster_id': self.rancher_cluster_id, - 'rancher_project_id': self.rancher_project_id, - 'rancher_node_command': ( - self.rancher_client.get_cluster_registration_command() - + " --worker") + 'action': 'add_node', + 'config_kube_rke': { + 'rke_registration_server': self.rke_registration_server, + 'rke_registration_token': self.rke_registration_token, + 'rke_cluster_id': self.rke_cluster_id }, "config_appliance": { "sshUser": "ubuntu", "runner": "ansible", - "repository": "https://github.com/CloudVE/ansible-docker-boot", - "inventoryTemplate": "${host}\n\n" - "[all:vars]\n" - "ansible_ssh_port=22\n" - "ansible_user='${user}'\n" - "ansible_ssh_private_key_file=pk\n" - "ansible_ssh_extra_args='-o StrictHostKeyChecking=no'\n" + "repository": "https://github.com/CloudVE/cloudman-boot", + "inventoryTemplate": + "[controllers]\n\n" + "[agents]\n" + "${host}\n\n" + "[rke_cluster:children]\n" + "controllers\n" + "agents\n\n" + "[all:vars]\n" + "ansible_ssh_port=22\n" + "ansible_user='${user}'\n" + "ansible_ssh_private_key_file=pk\n" + "ansible_ssh_extra_args='-o StrictHostKeyChecking=no" + " -o ControlMaster=no'\n" }, 'config_cloudlaunch': (settings.get('app_config', {}) .get('config_cloudlaunch', {})), @@ -166,4 +158,3 @@ def add_node(self, name, vm_type=None, zone=None, min_vcpus=0, min_ram=0, vm_fam def remove_node(self, node): return self.context.cloudlaunch_client.deployments.tasks.create( action='DELETE', deployment_pk=node.deployment.pk) - diff --git a/cloudman/clusterman/fixtures/rancher_app_def.json b/cloudman/clusterman/fixtures/rancher_app_def.json index 4c9418cb..c477468d 100644 --- a/cloudman/clusterman/fixtures/rancher_app_def.json +++ b/cloudman/clusterman/fixtures/rancher_app_def.json @@ -1,17 +1,17 @@ [ { "model": "cloudlaunch.application", - "pk": "cm_rancher_kubernetes_plugin", + "pk": "cm_rke_kubernetes_plugin", "fields": { "added": "2016-06-27T22:10:17.212Z", "updated": "2017-12-22T21:38:44.060Z", - "name": "Rancher Kubernetes Plugin", + "name": "RKE Kubernetes Plugin", "status": "LIVE", - "summary": "A rancher kubernetes plugin for cloudman", + "summary": "A RKE kubernetes plugin for cloudman", "maintainer": "cloudve.org", - "description": "A rancher kubernetes plugin for cloudman", + "description": "A RKE kubernetes plugin for cloudman", "info_url": "", - "icon_url": "https://upload.wikimedia.org/wikipedia/commons/3/3a/Logo-ubuntu_no%28r%29-black_orange-hex.svg?download", + "icon_url": "https://docs.rke2.io/assets/logo-horizontal-rke.svg", "default_launch_config": "", "default_version": 1, "display_order": 1000, @@ -22,12 +22,12 @@ "model": "cloudlaunch.applicationversion", "pk": 1, "fields": { - "application": "cm_rancher_kubernetes_plugin", + "application": "cm_rke_kubernetes_plugin", "version": "0.1.0", "frontend_component_path": "", "frontend_component_name": "", - "backend_component_name": "clusterman.plugins.rancher_kubernetes_app.RancherKubernetesApp", - "default_launch_config": "{\r\n \"config_cloudlaunch\":{\r\n \"firewall\":[\r\n {\r\n \"securityGroup\":\"cloudlaunch-vm\",\r\n \"rules\":[\r\n {\r\n \"protocol\":\"tcp\",\r\n \"from\":\"22\",\r\n \"to\":\"22\",\r\n \"cidr\":\"0.0.0.0/0\"\r\n },\r\n {\r\n \"protocol\":\"tcp\",\r\n \"from\":\"443\",\r\n \"to\":\"443\",\r\n \"cidr\":\"0.0.0.0/0\"\r\n },\r\n {\r\n \"protocol\":\"udp\",\r\n \"from\":\"8472\",\r\n \"to\":\"8472\",\r\n \"cidr\":\"0.0.0.0/0\"\r\n },\r\n {\r\n \"protocol\":\"tcp\",\r\n \"from\":\"9099\",\r\n \"to\":\"9099\",\r\n \"cidr\":\"0.0.0.0/0\"\r\n },\r\n {\r\n \"protocol\":\"tcp\",\r\n \"from\":\"10254\",\r\n \"to\":\"10254\",\r\n \"cidr\":\"0.0.0.0/0\"\r\n },\r\n {\r\n \"protocol\":\"tcp\",\r\n \"from\":\"6443\",\r\n \"to\":\"6443\",\r\n \"cidr\":\"0.0.0.0/0\"\r\n }\r\n ]\r\n }\r\n ]\r\n }\r\n}", + "backend_component_name": "clusterman.plugins.rke_kubernetes_app.RKEKubernetesApp", + "default_launch_config": "{\r\n \"config_cloudlaunch\":{\r\n \"firewall\":[\r\n {\r\n \"securityGroup\":\"cloudlaunch-vm\",\r\n \"rules\":[\r\n {\r\n \"protocol\":\"tcp\",\r\n \"from\":\"22\",\r\n \"to\":\"22\",\r\n \"cidr\":\"0.0.0.0/0\"\r\n },\r\n {\r\n \"protocol\":\"tcp\",\r\n \"from\":\"443\",\r\n \"to\":\"443\",\r\n \"cidr\":\"0.0.0.0/0\"\r\n },\r\n ]\r\n }\r\n ]\r\n }\r\n}", "default_target": null } } diff --git a/cloudman/clusterman/management/commands/create_cluster.py b/cloudman/clusterman/management/commands/create_cluster.py index 02e16216..e8686820 100644 --- a/cloudman/clusterman/management/commands/create_cluster.py +++ b/cloudman/clusterman/management/commands/create_cluster.py @@ -12,7 +12,7 @@ class Command(BaseCommand): help = 'Creates a CloudMan cluster. Currently supported cluster' \ - 'types: RANCHER_KUBE. Specify rancher connection settings in yaml' \ + 'types: KUBE_RKE. Specify RKE connection settings in yaml' \ 'format in the settings_file.' def add_arguments(self, parser): diff --git a/cloudman/clusterman/management/commands/import_cloud_data.py b/cloudman/clusterman/management/commands/import_cloud_data.py index 44a48e78..5a074ad4 100644 --- a/cloudman/clusterman/management/commands/import_cloud_data.py +++ b/cloudman/clusterman/management/commands/import_cloud_data.py @@ -97,9 +97,9 @@ def load_cloud_data(json_data): image_obj, _ = cl_models.Image.objects.get_or_create( name=name, defaults={**image, "region": region_obj}) - # connect rancher app as target + # connect rke app as target version = cl_models.ApplicationVersion.objects.filter( - application='cm_rancher_kubernetes_plugin').first() + application='cm_rke_kubernetes_plugin').first() target = cl_models.CloudDeploymentTarget.objects.filter( target_zone=zone_obj).first() cl_models.ApplicationVersionCloudConfig.objects.create( diff --git a/cloudman/clusterman/plugins/rancher_kubernetes_app.py b/cloudman/clusterman/plugins/rke_kubernetes_app.py similarity index 51% rename from cloudman/clusterman/plugins/rancher_kubernetes_app.py rename to cloudman/clusterman/plugins/rke_kubernetes_app.py index 8e80ba60..ae6b81b2 100644 --- a/cloudman/clusterman/plugins/rancher_kubernetes_app.py +++ b/cloudman/clusterman/plugins/rke_kubernetes_app.py @@ -1,13 +1,10 @@ """Plugin implementation for a simple web application.""" -import time - from celery.utils.log import get_task_logger from cloudlaunch.backend_plugins.base_vm_app import BaseVMAppPlugin from cloudlaunch.backend_plugins.cloudman2_app import get_iam_handler_for from cloudlaunch.configurers import AnsibleAppConfigurer -from clusterman.clients.rancher import RancherClient from clusterman.clients.kube_client import KubeClient from rest_framework.serializers import ValidationError @@ -22,23 +19,18 @@ def get_required_val(data, name, message): return val -class RancherKubernetesApp(BaseVMAppPlugin): +class RKEKubernetesApp(BaseVMAppPlugin): """ - Rancher Kubernetes Appliance. + RKE Kubernetes Appliance. """ @staticmethod def validate_app_config(provider, name, cloud_config, app_config): - rancher_config = get_required_val( - app_config, "config_rancher_kube", "Rancher configuration data" - " must be provided. config_rancher_kube entry not found in" + rke_config = get_required_val( + app_config, "config_kube_rke", "RKE configuration data" + " must be provided. config_kube_rke entry not found in" " app_config.") - #user_data = "#!/bin/bash\n" - #user_data += get_required_val( - # rancher_config, "rancher_node_command", - # "The rancher node command for adding the worker node must be" - # "included as part of config_rancher_kube") - #user_data += "\n" - #return user_data + assert 'rke_registration_server' in rke_config + assert 'rke_registration_token' in rke_config return app_config def deploy(self, name, task, app_config, provider_config, **kwargs): @@ -53,12 +45,6 @@ def deploy(self, name, task, app_config, provider_config, **kwargs): name, task, app_config, provider_config) return result - def _create_rancher_client(self, rancher_cfg): - return RancherClient(rancher_cfg.get('rancher_url'), - rancher_cfg.get('rancher_api_key'), - rancher_cfg.get('rancher_cluster_id'), - rancher_cfg.get('rancher_project_id')) - def delete(self, provider, deployment): """ Delete resource(s) associated with the supplied deployment. @@ -69,33 +55,29 @@ def delete(self, provider, deployment): *Note* that this method will delete resource(s) associated with the deployment - this is an un-recoverable action. """ - app_config = deployment.get('app_config') - rancher_cfg = app_config.get('config_rancher_kube') - rancher_client = self._create_rancher_client(rancher_cfg) node_ip = deployment.get( 'launch_result', {}).get('cloudLaunch', {}).get('publicIP') try: - rancher_node_id = rancher_client.find_node(ip=node_ip) - if rancher_node_id: - try: - kube_client = KubeClient() - k8s_node = kube_client.nodes.find(node_ip)[0] - # stop new jobs being scheduled on this node - kube_client.nodes.cordon(k8s_node) - # let existing jobs finish - kube_client.nodes.wait_till_jobs_complete(k8s_node) - # drain remaining pods - kube_client.nodes.drain(k8s_node, timeout=120) - finally: - # remove node from rancher - rancher_client.delete_node(rancher_node_id) + kube_client = KubeClient() + k8s_node = kube_client.nodes.find(node_ip)[0] + try: + # stop new jobs being scheduled on this node + kube_client.nodes.cordon(k8s_node) + # let existing jobs finish + kube_client.nodes.wait_till_jobs_complete(k8s_node) + # drain remaining pods + kube_client.nodes.drain(k8s_node, timeout=120) + + finally: + # delete the k8s node + kube_client.nodes.delete(k8s_node) finally: # delete the VM return super().delete(provider, deployment) def _get_configurer(self, app_config): # CloudMan2 can only be configured with ansible - return RancherKubernetesAnsibleAppConfigurer() + return RKEKubernetesAnsibleAppConfigurer() def _provision_host(self, name, task, app_config, provider_config): provider = provider_config.get('cloud_provider') @@ -108,26 +90,19 @@ def _provision_host(self, name, task, app_config, provider_config): provider_config['extra_provider_args'] = \ handler.create_iam_policy() result = super()._provision_host(name, task, app_config, provider_config) - # Add required cluster tag for AWS - if provider.PROVIDER_ID == "aws": - inst_id = result['cloudLaunch'].get('instance').get('id') - cluster_id = app_config.get('config_rancher_kube', {}).get( - 'rancher_cluster_id') - inst = provider.compute.instances.get(inst_id) - # pylint:disable=protected-access - inst._ec2_instance.create_tags( - Tags=[{'Key': f'kubernetes.io/cluster/{cluster_id}', - 'Value': "owned"}]) return result -class RancherKubernetesAnsibleAppConfigurer(AnsibleAppConfigurer): +class RKEKubernetesAnsibleAppConfigurer(AnsibleAppConfigurer): """Add CloudMan2 specific vars to playbook.""" def configure(self, app_config, provider_config): - playbook_vars = [ - ('ansible_shell_command', app_config.get('config_rancher_kube', {}).get( - 'rancher_node_command')) - ] + playbook_vars = { + 'kube_cloud_provider': provider_config.get('cloud_provider'), + 'rke_registration_server': app_config.get('config_kube_rke', {}).get( + 'rke_registration_server'), + 'rke_registration_token': app_config.get('config_kube_rke', {}).get( + 'rke_registration_token') + } return super().configure(app_config, provider_config, playbook_vars=playbook_vars) diff --git a/cloudman/clusterman/tests/data/initial_cluster_data_aws.yaml b/cloudman/clusterman/tests/data/initial_cluster_data_aws.yaml index 5d20bf32..d61634ba 100644 --- a/cloudman/clusterman/tests/data/initial_cluster_data_aws.yaml +++ b/cloudman/clusterman/tests/data/initial_cluster_data_aws.yaml @@ -63,8 +63,7 @@ host_config: -----END PRIVATE KEY----- ssh_public_key: ssh-rsa AAAASomeKey/0DV ssh_user: ubuntu -rancher_config: - rancher_url: https://127.0.0.1:4430 - rancher_api_key: token-bf4j5:sometoken - rancher_cluster_id: c-abcd1 - rancher_project_id: c-abcd1:p-7zr5p +rke_config: + rke_registration_server: 10.1.1.210 + rke_registration_token: token-bf4j5:sometoken + rke_cluster_id: cluster.hostname.com diff --git a/cloudman/clusterman/tests/data/initial_cluster_data_azure.yaml b/cloudman/clusterman/tests/data/initial_cluster_data_azure.yaml index cf99bc2f..d8509726 100644 --- a/cloudman/clusterman/tests/data/initial_cluster_data_azure.yaml +++ b/cloudman/clusterman/tests/data/initial_cluster_data_azure.yaml @@ -68,8 +68,8 @@ host_config: -----END PRIVATE KEY----- ssh_public_key: ssh-rsa AAAASomeKey/0DV ssh_user: ubuntu -rancher_config: - rancher_url: https://127.0.0.1:4430 - rancher_api_key: token-bf4j5:sometoken - rancher_cluster_id: c-abcd1 - rancher_project_id: c-abcd1:p-7zr5p +rke_config: + rke_registration_server: 10.1.1.210 + rke_registration_token: token-bf4j5:sometoken + rke_cluster_id: cluster.hostname.com + diff --git a/cloudman/clusterman/tests/data/initial_cluster_data_gcp.yaml b/cloudman/clusterman/tests/data/initial_cluster_data_gcp.yaml index 0b37f8f7..b9d7452e 100644 --- a/cloudman/clusterman/tests/data/initial_cluster_data_gcp.yaml +++ b/cloudman/clusterman/tests/data/initial_cluster_data_gcp.yaml @@ -1,4 +1,4 @@ -app_config: +cloudman/clusterman/tests/data/initial_cluster_data_gcp.yaml app_config: config_appliance: inventoryTemplate: repository: https://github.com/CloudVE/ansible-cloudman2 @@ -65,8 +65,7 @@ host_config: -----END PRIVATE KEY----- ssh_public_key: ssh-rsa AAAASomeKey/0DV ssh_user: ubuntu -rancher_config: - rancher_url: https://127.0.0.1:4430 - rancher_api_key: token-bf4j5:sometoken - rancher_cluster_id: c-abcd1 - rancher_project_id: c-abcd1:p-7zr5p +rke_config: + rke_registration_server: 10.1.1.210 + rke_registration_token: token-bf4j5:sometoken + rke_cluster_id: cluster.hostname.com diff --git a/cloudman/clusterman/tests/data/initial_cluster_data_openstack.yaml b/cloudman/clusterman/tests/data/initial_cluster_data_openstack.yaml index 9aed726e..7e803833 100644 --- a/cloudman/clusterman/tests/data/initial_cluster_data_openstack.yaml +++ b/cloudman/clusterman/tests/data/initial_cluster_data_openstack.yaml @@ -66,8 +66,7 @@ host_config: -----END PRIVATE KEY----- ssh_public_key: ssh-rsa AAAASomeKey/0DV ssh_user: ubuntu -rancher_config: - rancher_url: https://127.0.0.1:4430 - rancher_api_key: token-bf4j5:sometoken - rancher_cluster_id: c-abcd1 - rancher_project_id: c-abcd1:p-7zr5p +rke_config: + rke_registration_server: 10.1.1.210 + rke_registration_token: token-bf4j5:sometoken + rke_cluster_id: cluster.hostname.com diff --git a/cloudman/clusterman/tests/mock_kubectl.py b/cloudman/clusterman/tests/mock_kubectl.py index f3ec6cae..5f7cb011 100644 --- a/cloudman/clusterman/tests/mock_kubectl.py +++ b/cloudman/clusterman/tests/mock_kubectl.py @@ -177,6 +177,12 @@ def _create_parser(self): parser_delete_ns.add_argument( 'namespace', type=str, help='namespace name') parser_delete_ns.set_defaults(func=self._kubectl_delete_namespace) + # Kubectl delete node + parser_delete_node = subparsers_delete.add_parser( + 'node', help='delete a node') + parser_delete_node.add_argument( + 'node', type=str, help='node name') + parser_delete_node.set_defaults(func=self._kubectl_delete_node) # kubectl cordon parser_cordon = subparsers.add_parser('cordon', help='cordon node') @@ -251,6 +257,10 @@ def _kubectl_get_nodes(self, args): yaml.dump(response, stream=output, default_flow_style=False) return output.getvalue() + def _kubectl_delete_node(self, args): + # pretend to succeed + pass + def _kubectl_get_pods(self, args): # get a copy of the response template response = dict(self.list_template) diff --git a/cloudman/clusterman/tests/test_cluster_api.py b/cloudman/clusterman/tests/test_cluster_api.py index 36f385ae..b631ac53 100644 --- a/cloudman/clusterman/tests/test_cluster_api.py +++ b/cloudman/clusterman/tests/test_cluster_api.py @@ -13,8 +13,6 @@ from rest_framework import status from rest_framework.test import APITestCase, APILiveServerTestCase -import responses - from .client_mocker import ClientMocker @@ -39,7 +37,7 @@ class CMClusterServiceTestBase(APITestCase): CLUSTER_DATA = { 'name': 'testcluster2', - 'cluster_type': 'KUBE_RANCHER', + 'cluster_type': 'KUBE_RKE', 'connection_settings': load_cluster_data() } @@ -60,8 +58,6 @@ def setUp(self): self.client.force_login( User.objects.get_or_create(username='clusteradmin', is_superuser=True, is_staff=True)[0]) - responses.add(responses.POST, 'https://127.0.0.1:4430/v3/clusters/c-abcd1?action=generateKubeconfig', - json={'config': load_kube_config()}, status=200) class CMClusterServiceTests(CMClusterServiceTestBase): @@ -111,7 +107,6 @@ def _check_no_clusters_exist(self): self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data['results']), 0) - @responses.activate def test_crud_cluster(self): """ Ensure we can register a new cluster with cloudman. @@ -224,23 +219,6 @@ def create_mock_provider(self, name, config): patcher4.start() self.addCleanup(patcher4.stop) - responses.add_passthru('http://localhost') - responses.add(responses.POST, 'https://127.0.0.1:4430/v3/clusterregistrationtoken', - json={'nodeCommand': 'docker run rancher --worker'}, status=200) - responses.add(responses.GET, 'https://127.0.0.1:4430/v3/nodes/?clusterId=c-abcd1', - json= - {'data': [ - {'id': 'c-ph9ck:m-01606aca4649', - 'ipAddress': '10.1.1.1', - 'externalIpAddress': None - } - ]}, - status=200) - responses.add(responses.POST, 'https://127.0.0.1:4430/v3/nodes/c-ph9ck:m-01606aca4649?action=drain', - json={}, status=200) - responses.add(responses.DELETE, 'https://127.0.0.1:4430/v3/nodes/c-ph9ck:m-01606aca4649', - json={}, status=200) - super().setUp() @@ -281,19 +259,6 @@ def _check_cluster_node_exists(self, cluster_id, node_id): return response.data['id'] def _delete_cluster_node(self, cluster_id, node_id): - responses.add(responses.GET, 'https://127.0.0.1:4430/v3/nodes/?clusterId=c-abcd1', - json= - {'data': [ - {'id': 'c-ph9ck:m-01606aca4649', - 'ipAddress': '10.1.1.1', - 'externalIpAddress': None - } - ]}, - status=200) - responses.add(responses.POST, 'https://127.0.0.1:4430/v3/nodes/c-ph9ck:m-01606aca4649?action=drain', - json={}, status=200) - responses.add(responses.DELETE, 'https://127.0.0.1:4430/v3/nodes/c-ph9ck:m-01606aca4649', - json={}, status=200) url = reverse('clusterman:node-detail', args=[cluster_id, node_id]) return self.client.delete(url) @@ -303,7 +268,6 @@ def _check_no_cluster_nodes_exist(self, cluster_id): self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data['results']), 0) - @responses.activate def test_crud_cluster_node(self): """ Ensure we can register a new node with cloudman. @@ -328,7 +292,6 @@ def test_crud_cluster_node(self): # check it no longer exists self._check_no_cluster_nodes_exist(cluster_id) - @responses.activate def test_node_create_unauthorized(self): cluster_id = self._create_cluster() self.client.force_login( @@ -336,7 +299,6 @@ def test_node_create_unauthorized(self): response = self._create_cluster_node(cluster_id) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - @responses.activate def test_node_delete_unauthorized(self): cluster_id = self._create_cluster() self._create_cluster_node(cluster_id) @@ -416,7 +378,6 @@ def _check_no_autoscalers_exist(self, cluster_id): self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(len(response.data['results']), 0) - @responses.activate def test_crud_autoscaler(self): """ Ensure we can register a new node with cloudman. @@ -445,7 +406,6 @@ def test_crud_autoscaler(self): # check it no longer exists self._check_no_autoscalers_exist(cluster_id) - @responses.activate def test_autoscaler_create_unauthorized(self): cluster_id = self._create_cluster() self.client.force_login( @@ -453,7 +413,6 @@ def test_autoscaler_create_unauthorized(self): response = self._create_autoscaler(cluster_id) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - @responses.activate def test_autoscaler_delete_unauthorized(self): cluster_id = self._create_cluster() self._create_autoscaler(cluster_id) @@ -670,7 +629,6 @@ def _count_nodes_in_scale_group(self, cluster_id, autoscaler_id): return len([n for n in response.data['results'] if n['autoscaler'] == int(autoscaler_id)]) - @responses.activate def test_scale_up_default(self): # create the parent cluster cluster_id = self._create_cluster() @@ -690,7 +648,6 @@ def test_scale_up_default(self): self.assertEqual(len(vm_types), 1) self.assertTrue("m5.24xlarge" in vm_types) - @responses.activate def test_scale_down_default(self): # create the parent cluster cluster_id = self._create_cluster() @@ -709,7 +666,6 @@ def test_scale_down_default(self): count = self._count_cluster_nodes(cluster_id) self.assertEqual(count, 0) - @responses.activate def test_scaling_while_deactivated(self): # create the parent cluster cluster_id = self._create_cluster() @@ -738,7 +694,6 @@ def test_scaling_while_deactivated(self): count = self._count_cluster_nodes(cluster_id) self.assertEqual(count, 1) - @responses.activate def test_scaling_is_within_bounds(self): # create the parent cluster cluster_id = self._create_cluster() @@ -772,7 +727,6 @@ def test_scaling_is_within_bounds(self): self.assertEqual(len(vm_types), 1) self.assertTrue("m1.medium" in vm_types) - @responses.activate def test_scaling_with_manual_nodes(self): # create the parent cluster cluster_id = self._create_cluster() @@ -798,7 +752,6 @@ def test_scaling_with_manual_nodes(self): count = self._count_cluster_nodes(cluster_id) self.assertEqual(count, 1) - @responses.activate def test_scaling_within_zone_group(self): # create the parent cluster cluster_id = self._create_cluster() @@ -875,7 +828,6 @@ def _login_as_autoscaling_user(self, impersonate_user=None): self.client.force_login( User.objects.get_or_create(username='autoscaletestuser')[0]) - @responses.activate def test_autoscaling_user_scale_up_permissions(self): # create the parent cluster cluster_id = self._create_cluster() @@ -887,7 +839,6 @@ def test_autoscaling_user_scale_up_permissions(self): count = self._count_cluster_nodes(cluster_id) self.assertEqual(count, 1) - @responses.activate def test_autoscaling_user_scale_down_permissions(self): # create the parent cluster cluster_id = self._create_cluster() @@ -900,7 +851,6 @@ def test_autoscaling_user_scale_down_permissions(self): count = self._count_cluster_nodes(cluster_id) self.assertEqual(count, 0) - @responses.activate def test_autoscaling_user_no_extra_permissions(self): # create a parent cluster cluster_id = self._create_cluster() @@ -915,7 +865,6 @@ def test_autoscaling_user_no_extra_permissions(self): response = self._update_cluster(cluster_id) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - @responses.activate def test_autoscale_up_signal_unauthorized(self): cluster_id = self._create_cluster() self._create_autoscaler(cluster_id) @@ -924,7 +873,6 @@ def test_autoscale_up_signal_unauthorized(self): response = self._signal_scaleup(cluster_id) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - @responses.activate def test_autoscale_down_signal_unauthorized(self): cluster_id = self._create_cluster() self._create_autoscaler(cluster_id) @@ -933,7 +881,6 @@ def test_autoscale_down_signal_unauthorized(self): response = self._signal_scaledown(cluster_id) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN, response.data) - @responses.activate def test_create_autoscale_user_impersonate(self): # create a parent cluster cluster_id = self._create_cluster() @@ -945,7 +892,6 @@ def test_create_autoscale_user_impersonate(self): count = self._count_cluster_nodes(cluster_id) self.assertEqual(count, 1) - @responses.activate def test_create_autoscale_user_impersonate_no_perms(self): # create a parent cluster cluster_id = self._create_cluster() @@ -964,7 +910,6 @@ def test_create_autoscale_user_impersonate_no_perms(self): count = self._count_cluster_nodes(cluster_id) self.assertEqual(count, 0) - @responses.activate def test_scale_up_unschedulable(self): # create the parent cluster cluster_id = self._create_cluster() @@ -982,4 +927,4 @@ def test_scale_up_unschedulable(self): # Ensure that the created node has the correct size vm_types = self._get_cluster_node_vm_types(cluster_id) self.assertEqual(len(vm_types), 1) - self.assertTrue("r5.24xlarge" in vm_types or "r5d.24xlarge" in vm_types) + self.assertTrue("m5.24xlarge" in vm_types) diff --git a/cloudman/clusterman/tests/test_mgmt_commands.py b/cloudman/clusterman/tests/test_mgmt_commands.py index ea1a3c74..f8044bdc 100644 --- a/cloudman/clusterman/tests/test_mgmt_commands.py +++ b/cloudman/clusterman/tests/test_mgmt_commands.py @@ -74,16 +74,16 @@ def test_create_cluster_no_args(self): call_command('create_cluster') def test_create_cluster(self): - call_command('create_cluster', 'test_cluster', 'KUBE_RANCHER', self.INITIAL_CLUSTER_DATA) + call_command('create_cluster', 'test_cluster', 'KUBE_RKE', self.INITIAL_CLUSTER_DATA) cluster = cm_models.CMCluster.objects.get(name='test_cluster') - self.assertEquals(cluster.cluster_type, 'KUBE_RANCHER') + self.assertEquals(cluster.cluster_type, 'KUBE_RKE') def test_create_cluster_existing(self): with transaction.atomic(): - call_command('create_cluster', 'test_cluster', 'KUBE_RANCHER', self.INITIAL_CLUSTER_DATA) + call_command('create_cluster', 'test_cluster', 'KUBE_RKE', self.INITIAL_CLUSTER_DATA) self.assertEqual(cm_models.CMCluster.objects.all().count(), 1) with transaction.atomic(): - call_command('create_cluster', 'test_cluster', 'KUBE_RANCHER', self.INITIAL_CLUSTER_DATA) + call_command('create_cluster', 'test_cluster', 'KUBE_RKE', self.INITIAL_CLUSTER_DATA) self.assertEqual(cm_models.CMCluster.objects.all().count(), 1) diff --git a/cloudman/helmsman/tests/test_mgmt_commands.py b/cloudman/helmsman/tests/test_mgmt_commands.py index cd473653..2b66ca20 100644 --- a/cloudman/helmsman/tests/test_mgmt_commands.py +++ b/cloudman/helmsman/tests/test_mgmt_commands.py @@ -59,7 +59,7 @@ def test_helmsman_load_config_template_registry(self): call_command('helmsman_load_config', self.INITIAL_HELMSMAN_DATA) template = hm_models.HMInstallTemplate.objects.get(name='terminalman') self.assertEqual(template.chart, "terminalman") - self.assertIn("domain", template.context) + self.assertIn("starting_dir", template.context) def test_update_install_template(self): call_command('helmsman_load_config', self.INITIAL_HELMSMAN_DATA) diff --git a/requirements_test.txt b/requirements_test.txt index 6b5db374..650d0347 100644 --- a/requirements_test.txt +++ b/requirements_test.txt @@ -1,5 +1,7 @@ +responses<=0.12 git+https://github.com/celery/django-celery-results brotlipy +paramiko git+https://github.com/CloudVE/cloudbridge#egg=cloudbridge[test] git+https://github.com/CloudVE/djcloudbridge#egg=djcloudbridge[test] # Leave cloudlaunch-cli before cloudlaunch-server due to coreapi version mismatch