From c094082d062e7a93a0e6440bf9b2b76ad3e03942 Mon Sep 17 00:00:00 2001 From: Pulkit Tandon Date: Thu, 24 May 2018 09:04:41 +0530 Subject: [PATCH] Introduced kubernetes Introspect utilities. Introduced kubernetes introspect utilities and corresponding checks for k8s objects in kube manager. Added few introspect checks in agent as well Change-Id: I064a664383b126ee3467c15ab433ce360d02a21e Closes-bug: #1773072 --- common/connections.py | 5 +- common/contrail_test_init.py | 3 + fixtures/k8s/ingress.py | 21 +- fixtures/k8s/namespace.py | 23 +++ fixtures/k8s/network_policy.py | 97 +++++++++- fixtures/k8s/pod.py | 18 ++ fixtures/k8s/service.py | 26 ++- scripts/k8s_scripts/test_policy.py | 120 +++++++++--- scripts/k8s_scripts/test_service.py | 6 + serial_scripts/k8s_scripts/test_policy.py | 11 +- serial_scripts/k8s_scripts/test_service.py | 2 + tcutils/agent/vna_introspect_utils.py | 38 ++++ .../config/kube_manager_introspect_utils.py | 14 -- tcutils/kubernetes/k8s_introspect_utils.py | 183 ++++++++++++++++++ 14 files changed, 511 insertions(+), 56 deletions(-) delete mode 100755 tcutils/config/kube_manager_introspect_utils.py create mode 100644 tcutils/kubernetes/k8s_introspect_utils.py diff --git a/common/connections.py b/common/connections.py index f15a22109..4e4473ffe 100755 --- a/common/connections.py +++ b/common/connections.py @@ -5,7 +5,8 @@ from tcutils.agent.vna_introspect_utils import * from tcutils.collector.opserver_introspect_utils import * from tcutils.collector.analytics_tests import * -from tcutils.config.kube_manager_introspect_utils import KubeManagerInspect + +from tcutils.kubernetes.k8s_introspect_utils import KubeManagerInspect from vnc_api.vnc_api import * from tcutils.vdns.dns_introspect_utils import DnsAgentInspect from tcutils.util import custom_dict, get_plain_uuid @@ -228,7 +229,7 @@ def get_kube_manager_h(self, refresh=False): if not getattr(self, '_kube_manager_inspect', None) or refresh: for km_ip in self.inputs.kube_manager_ips: #contrail-status would increase run time hence netstat approach - cmd = 'netstat -antp | grep :8108 | grep LISTEN' + cmd = 'netstat -antp | grep :%s | grep LISTEN' % self.inputs.k8s_port if 'LISTEN' in self.inputs.run_cmd_on_server(km_ip, cmd, container='contrail-kube-manager'): self._kube_manager_inspect = KubeManagerInspect(km_ip, diff --git a/common/contrail_test_init.py b/common/contrail_test_init.py index 0209230be..1bcb609dd 100755 --- a/common/contrail_test_init.py +++ b/common/contrail_test_init.py @@ -281,6 +281,8 @@ def parse_ini_file(self): 'dns_port', '8092') self.agent_port = read_config_option(self.config, 'services', 'agent_port', '8085') + self.k8s_port = read_config_option(self.config, 'services', + 'k8s_port', '8108') self.api_server_ip = read_config_option(self.config, 'services', 'config_api_ip', None) self.analytics_api_ip = read_config_option(self.config, 'services', @@ -798,6 +800,7 @@ def parse_yml_file(self): self.analytics_api_port = contrail_configs.get('ANALYTICS_API_PORT') or '8081' self.bgp_port = contrail_configs.get('CONTROL_INTROSPECT_PORT') or '8083' self.dns_port = contrail_configs.get('DNS_INTROSPECT_PORT') or '8092' + self.k8s_port = contrail_configs.get('K8S_INTROSPECT_PORT') or '8108' self.agent_port = '8085' self.api_server_ip = contrail_configs.get('CONFIG_API_VIP') self.analytics_api_ip = contrail_configs.get('ANALYTICS_API_VIP') diff --git a/fixtures/k8s/ingress.py b/fixtures/k8s/ingress.py index b4eb20734..51333202e 100644 --- a/fixtures/k8s/ingress.py +++ b/fixtures/k8s/ingress.py @@ -32,7 +32,8 @@ def __init__(self, self.tls = [] if tls is None else tls self.default_backend = {} if default_backend is None else default_backend self.v1_beta_h = self.k8s_client.v1_beta_h - + self.connections = connections + self.already_exists = None def setUp(self): @@ -48,6 +49,10 @@ def verify_on_setup(self): self.logger.error('Ingress %s verification in Contrail api failed' % (self.name)) return False + if not self.verify_ingress_in_kube_manager(): + self.logger.error('Ingress %s verification in Kube Manager failed' + % (self.name)) + return False self.logger.info('Ingress %s verification passed' % (self.name)) return True # end verify_on_setup @@ -148,3 +153,17 @@ def verify_ingress_in_k8s(self): self.name)) return True # end verify_ingress_in_k8s + + @retry(delay=1, tries=10) + def verify_ingress_in_kube_manager(self): + km_h = self.connections.get_kube_manager_h() + self.lb_info = km_h.get_svc_or_ingress_lb_info(uuid = self.uuid) + if self.lb_info: + self.logger.info('Ingress %s with uuid %s found in kube manager' + % (self.name, self.uuid)) + else: + self.logger.warn('Ingress %s with uuid %s not found in kube manager' + % (self.name, self.uuid)) + return False + return True + # end verify_service_in_kube_manager diff --git a/fixtures/k8s/namespace.py b/fixtures/k8s/namespace.py index 513ffb744..ea7c1a1e8 100644 --- a/fixtures/k8s/namespace.py +++ b/fixtures/k8s/namespace.py @@ -52,6 +52,10 @@ def verify_on_setup(self): self.logger.error('Namespace %s not seen in Contrail API' % ( self.name)) return False + if not self.verify_namespace_in_kube_manager(): + self.logger.error('Namespace %s not seen in Kube Manager' % ( + self.name)) + return False self.logger.info('Namespace %s verification passed' % (self.name)) self.verify_is_run = True return True @@ -114,6 +118,25 @@ def verify_namespace_in_contrail_api(self): self.api_s_obj.uuid)) return True # end verify_namespace_in_contrail_api + + @retry(delay=2, tries=10) + def verify_namespace_in_kube_manager(self): + km_h = self.connections.get_kube_manager_h() + self.namespace_info = km_h.get_namespace_info(ns_uuid = self.uuid) + if self.namespace_info: + if self.namespace_info['phase'] == "Active": + self.logger.info('Namespace %s with uuid %s found in kube manager' + % (self.name, self.uuid)) + else: + self.logger.warn("Namespace present in kube manager but phase is %s" + % self.namespace_info['phase']) + return False + else: + self.logger.warn('Namespace %s with uuid %s not found in kube manager' + % (self.name, self.uuid)) + return False + return True + # end verify_namespace_in_kube_manager def cleanUp(self): super(NamespaceFixture, self).cleanUp() diff --git a/fixtures/k8s/network_policy.py b/fixtures/k8s/network_policy.py index f38c9441f..8c777680c 100644 --- a/fixtures/k8s/network_policy.py +++ b/fixtures/k8s/network_policy.py @@ -25,7 +25,14 @@ def __init__(self, self.metadata = {} if metadata is None else metadata self.spec = {} if spec is None else spec self.v1_networking = self.k8s_client.v1_networking - + self.agent_inspect = connections.agent_inspect + self.connections = connections + self.inputs = connections.inputs + self.k8s_default_network_policies = ['default-policy-management:k8s-allowall', + 'default-policy-management:k8s-Ingress', + 'default-policy-management:k8s-denyall'] + self.k8s_defaut_aps = "default-policy-management:k8s" + self.already_exists = None def setUp(self): @@ -33,7 +40,23 @@ def setUp(self): self.create() def verify_on_setup(self): - pass + if not self.verify_network_policy_in_k8s(): + self.logger.error('Network Policy %s verification in kubernetes failed' + % (self.name)) + return False + if not self.verify_network_policy_in_kube_manager(): + self.logger.error('Network Policy %s verification in Kube Manager failed' + % (self.name)) + return False + if not self.verify_default_policies_in_agent(): + self.logger.error('Default k8s Policy verification in Agent failed') + return False + if not self.verify_firewall_policy_in_agent(): + self.logger.error('Network Policy %s verification in Agent failed' + % (self.name)) + return False + self.logger.info('Network Policy %s verification passed' % (self.name)) + return True # end verify_on_setup def cleanUp(self): @@ -87,4 +110,72 @@ def update(self, metadata=None, spec=None): metadata=self.metadata, spec=self.spec) self._populate_attr() - # end create + # end update + + @retry(delay=1, tries=10) + def verify_network_policy_in_k8s(self): + if self.read(): + self.logger.info("Network policy found in k8s") + else: + self.logger.warn('Network policy not Found in K8s') + return False + return True + # end verify_ingress_in_k8s + + @retry(delay=1, tries=10) + def verify_network_policy_in_kube_manager(self): + km_h = self.connections.get_kube_manager_h() + self.np_info = km_h.get_network_policy_info(np_uuid = self.uuid) + if self.np_info: + self.logger.info('Network Policy %s with uuid %s found in kube manager' + % (self.name, self.uuid)) + else: + self.logger.warn('Network Policy %s with uuid %s not found in kube manager' + % (self.name, self.uuid)) + return False + return True + # end verify_ingress_in_k8s + + @retry(delay=1, tries=10) + def verify_firewall_policy_in_agent(self): + km_h = self.connections.get_kube_manager_h() + agent_h = self.agent_inspect[self.inputs.compute_ips[0]] + # Get associated Firewall policy. + self.np_info = km_h.get_network_policy_info(np_uuid = self.uuid) + fw_polify_fq_name = self.np_info['vnc_firewall_policy_fqname'] + # Search for corresponding firewall policy in agent + fwPolicy = agent_h.get_fw_policy(policy_fq_name = fw_polify_fq_name) + #fw_policyNames = [elem['name'] for elem in fwPolicyList] + if not fwPolicy: + self.logger.warn("Network policy with name %s not found in agent" + % self.name) + return False + return True + #end verify_firewall_policy_in_agent + + @retry(delay=1, tries=10) + def verify_default_policies_in_agent(self): + km_h = self.connections.get_kube_manager_h() + agent_h = self.agent_inspect[self.inputs.compute_ips[0]] + + default_aps = agent_h.get_aps(aps_fq_name = self.k8s_defaut_aps) + if not default_aps: + self.logger.warn("Default APS %s for k8s not found in agent" + % self.k8s_defaut_aps) + return False + aps_fw_policy_uuid = [elem['firewall_policy'] for elem in default_aps['firewall_policy_list']] + for elem in self.k8s_default_network_policies : + fw_policy = agent_h.get_fw_policy(policy_fq_name = elem) + if not fw_policy: + self.logger.warn("Network policy with name %s not found in agent" + % elem) + return False + if fw_policy['uuid'] not in aps_fw_policy_uuid: + self.logger.warn("Network policy with name %s not associated with default ks8" + % elem) + return False + return True + #end verify_firewall_policy_in_agent + + + diff --git a/fixtures/k8s/pod.py b/fixtures/k8s/pod.py index dece5a7ac..deb40c947 100644 --- a/fixtures/k8s/pod.py +++ b/fixtures/k8s/pod.py @@ -71,6 +71,10 @@ def verify_on_setup(self): self.logger.error('Pod %s not seen in Contrail agent' % ( self.name)) return False + if not self.verify_pod_in_kube_manager(): + self.logger.error('Pod %s not seen in Contrail Kube Manager' % ( + self.name)) + return False self.logger.info('Pod %s verification passed' % (self.name)) return True # end verify_on_setup @@ -278,6 +282,20 @@ def set_compute_ip(self): self.compute_ip = self.host_ip # end set_compute_ip + @retry(delay=2, tries=10) + def verify_pod_in_kube_manager(self): + km_h = self.connections.get_kube_manager_h() + self.pod_info = km_h.get_pod_info(pod_uuid = self.uuid) + if self.pod_info: + self.logger.info('Pod %s with uuid %s found in kube manager' + % (self.name, self.uuid)) + else: + self.logger.warn('Pod %s with uuid %s not found in kube manager' + % (self.name, self.uuid)) + return False + return True + # verify_pod_in_kube_manager + @retry(delay=2, tries=10) def verify_pod_in_contrail_agent(self): self.set_compute_ip() diff --git a/fixtures/k8s/service.py b/fixtures/k8s/service.py index 09a4a6e50..6734b7dd9 100644 --- a/fixtures/k8s/service.py +++ b/fixtures/k8s/service.py @@ -23,6 +23,7 @@ def __init__(self, self.metadata = {} if metadata is None else metadata self.spec = {} if spec is None else spec self.v1_h = self.k8s_client.v1_h + self.connections = connections self.already_exists = None @@ -35,6 +36,10 @@ def verify_on_setup(self): self.logger.error('Service %s verification in Contrail api failed' % (self.name)) return False + if not self.verify_service_in_kube_manager(): + self.logger.error('Service %s verification in Kube Manager failed' + % (self.name)) + return False self.logger.info('Service %s verification passed' % (self.name)) return True # end verify_on_setup @@ -97,17 +102,30 @@ def verify_service_in_contrail_api(self): 'Service UUID %s not yet found in contrail-api' % (self.uuid)) return False - exp_name = 'service-%s' % (self.name) - if obj.name != exp_name: + if self.name not in obj.name: self.logger.warn( - 'Service %s name not matching that in contrail-api' + 'Service name not matching that in contrail-api' 'Expected : %s, Got : %s' % - (self.name, exp_name, obj.name)) + (self.name, obj.name)) return False self.logger.info('Validated that Service %s is seen in ' 'contrail-api' % (self.name)) return True # end verify_service_in_contrail_api + + @retry(delay=1, tries=10) + def verify_service_in_kube_manager(self): + km_h = self.connections.get_kube_manager_h() + self.lb_info = km_h.get_svc_or_ingress_lb_info(uuid = self.uuid) + if self.lb_info: + self.logger.info('Service %s with uuid %s found in kube manager' + % (self.name, self.uuid)) + else: + self.logger.warn('Service %s with uuid %s not found in kube manager' + % (self.name, self.uuid)) + return False + return True + # end verify_service_in_kube_manager @retry(delay=1, tries=10) def get_external_ips(self): diff --git a/scripts/k8s_scripts/test_policy.py b/scripts/k8s_scripts/test_policy.py index d82abc69a..97c7e4aad 100644 --- a/scripts/k8s_scripts/test_policy.py +++ b/scripts/k8s_scripts/test_policy.py @@ -153,9 +153,10 @@ def test_allow_all_ingress(self): assert self.client1_pod_ns1.ping_with_certainty(self.client2_pod_ns1.pod_ip) assert self.client1_pod_ns2.ping_with_certainty(self.client1_pod_ns1.pod_ip) assert self.client1_pod_ns1.ping_with_certainty(self.client1_pod_ns2.pod_ip) - self.setup_update_simple_policy(name="allow-all-ingress", + policy = self.setup_update_simple_policy(name="allow-all-ingress", namespace= self.ns1.name, ingress_all =True) + assert policy.verify_on_setup() # All traffic should still work as it is ingress allow all policy assert self.client1_pod_ns1.ping_with_certainty(self.client2_pod_ns1.pod_ip) assert self.client1_pod_ns2.ping_with_certainty(self.client1_pod_ns1.pod_ip) @@ -178,9 +179,10 @@ def test_deny_all_ingress(self): assert self.client1_pod_ns2.ping_with_certainty(self.client1_pod_ns1.pod_ip) assert self.client1_pod_ns1.ping_with_certainty(self.client1_pod_ns2.pod_ip) policy_types = ["Ingress"] - self.setup_update_simple_policy(name="deny-all-ingress", + policy = self.setup_update_simple_policy(name="deny-all-ingress", namespace= self.ns1.name, policy_types = policy_types) + assert policy.verify_on_setup() #All ingress traffic to all pods of namespace "default" should be dropped. assert self.client1_pod_ns1.ping_with_certainty(self.client2_pod_ns1.pod_ip, expectation=False) @@ -206,11 +208,12 @@ def test_ingress_podselector_for_pod(self): # All traffic between everyone should work assert self.client1_pod_ns1.ping_with_certainty(self.web_pod_ns1.pod_ip) assert self.client1_pod_ns2.ping_with_certainty(self.web_pod_ns1.pod_ip) - self.setup_update_simple_policy(name="ingress-pod-to-pod", + policy = self.setup_update_simple_policy(name="ingress-pod-to-pod", pod_selector = self.web_pod_ns1.labels, policy_types = ["Ingress"], namespace= self.ns1.name, ingress_pods= self.client1_pod_ns1.labels) + assert policy.verify_on_setup() # All ingress traffic to pod cls.web_pod_ns1 will be dropped except from pod self.client1_pod_ns1.labels assert self.client1_pod_ns1.ping_with_certainty(self.web_pod_ns1.pod_ip) assert self.client2_pod_ns1.ping_with_certainty(self.web_pod_ns1.pod_ip, @@ -236,11 +239,12 @@ def test_ingress_namespaceselector_for_pod(self): # All traffic between everyone should work assert self.client1_pod_ns1.ping_with_certainty(self.web_pod_ns1.pod_ip) assert self.client1_pod_ns2.ping_with_certainty(self.web_pod_ns1.pod_ip) - self.setup_update_simple_policy(name="ingress-ns-to-pod", + policy = self.setup_update_simple_policy(name="ingress-ns-to-pod", pod_selector = self.web_pod_ns1.labels, policy_types = ["Ingress"], namespace= self.ns1.name, ingress_namespaces= self.ns2.labels) + assert policy.verify_on_setup() # All ingress traffic to pod cls.web_pod_ns1 will be allowed only from all pods of namespace "non-default" assert self.client1_pod_ns2.ping_with_certainty(self.web_pod_ns1.pod_ip) assert self.client2_pod_ns2.ping_with_certainty(self.web_pod_ns1.pod_ip) @@ -272,12 +276,13 @@ def test_ingress_ipblock_for_pod(self): allowed_ip.append("0/24") ingress_allow_cidr = ".".join(allowed_ip) deny_cidr = self.client1_pod_ns1.pod_ip + "/32" - self.setup_update_simple_policy(name="ingress-ipblock-to-pod", + policy = self.setup_update_simple_policy(name="ingress-ipblock-to-pod", pod_selector = self.web_pod_ns1.labels, policy_types = ["Ingress"], namespace= self.ns1.name, ingress_ipblock= {"cidr" : ingress_allow_cidr, "_except" : [deny_cidr]}) + assert policy.verify_on_setup() # INgress traffic should be allowed from pods lying in "ingress_allow_cidr" but not from host ip of self.client1_pod_ns1 assert self.client1_pod_ns2.ping_with_certainty(self.web_pod_ns1.pod_ip) if self.client2_pod_ns1.pod_ip.split(".")[:3] == self.web_pod_ns1.pod_ip.split(".")[:3]: @@ -308,12 +313,13 @@ def test_ingress_port_for_pod(self): url2 = 'http://%s' % (self.web_pod_ns2.pod_ip) assert self.validate_wget(self.client1_pod_ns1, url) assert self.validate_wget(self.client1_pod_ns2, url) - self.setup_update_simple_policy(name="ingress-port-to-pod", + policy = self.setup_update_simple_policy(name="ingress-port-to-pod", pod_selector = self.web_pod_ns1.labels, policy_types = ["Ingress"], namespace= self.ns1.name, ports=['TCP/80'], ingress_all =True) + assert policy.verify_on_setup() # Ingress TCP traffic should be allowed and ICMP traffic should drop assert self.validate_wget(self.client1_pod_ns1, url) assert self.validate_wget(self.client1_pod_ns2, url) @@ -344,10 +350,11 @@ def test_ingress_podselector_for_namespace(self): url = 'http://%s' % (self.web_pod_ns1.pod_ip) assert self.validate_wget(self.client1_pod_ns1, url) assert self.validate_wget(self.client1_pod_ns2, url) - self.setup_update_simple_policy(name="ingress-pod-to-ns", + policy = self.setup_update_simple_policy(name="ingress-pod-to-ns", policy_types = ["Ingress"], namespace= self.ns1.name, ingress_pods= self.client1_pod_ns1.labels) + assert policy.verify_on_setup() # Traffic should only be allowed from self.client1_pod_ns1 inside namespace default assert self.client1_pod_ns1.ping_with_certainty(self.client2_pod_ns1.pod_ip) assert self.validate_wget(self.client1_pod_ns1, url) @@ -380,10 +387,11 @@ def test_ingress_podselector_for_namespace_negative(self): url = 'http://%s' % (self.web_pod_ns1.pod_ip) assert self.validate_wget(self.client1_pod_ns1, url) assert self.validate_wget(self.client1_pod_ns2, url) - self.setup_update_simple_policy(name="ingress-pod-to-ns", + policy = self.setup_update_simple_policy(name="ingress-pod-to-ns", policy_types = ["Ingress"], namespace= self.ns1.name, ingress_pods= self.client1_pod_ns2.labels) + assert policy.verify_on_setup() # Being a negative case where PodSelector "ingress_pods" is from different namespace, this policy is similar as deny all policy on namespace "default" assert self.client1_pod_ns2.ping_with_certainty(self.client1_pod_ns1.pod_ip, expectation=False) @@ -417,10 +425,11 @@ def test_ingress_namespaceselector_for_namespace(self): url = 'http://%s' % (self.web_pod_ns1.pod_ip) assert self.validate_wget(self.client1_pod_ns1, url) assert self.validate_wget(self.client1_pod_ns2, url) - self.setup_update_simple_policy(name="ingress-ns-to-ns", + policy = self.setup_update_simple_policy(name="ingress-ns-to-ns", policy_types = ["Ingress"], namespace= self.ns1.name, ingress_namespaces= self.ns2.labels) + assert policy.verify_on_setup() # Traffic from "non-default" namespace to "default" namespace should work assert self.client1_pod_ns2.ping_with_certainty(self.client1_pod_ns1.pod_ip) assert self.validate_wget(self.client1_pod_ns2, url) @@ -456,11 +465,12 @@ def test_ingress_ipblock_for_namespace(self): allowed_ip.append("0/24") ingress_allow_cidr = ".".join(allowed_ip) deny_cidr = self.client2_pod_ns2.pod_ip + "/32" - self.setup_update_simple_policy(name="ingress-ipblock-to-ns", + policy = self.setup_update_simple_policy(name="ingress-ipblock-to-ns", policy_types = ["Ingress"], namespace= self.ns1.name, ingress_ipblock= {"cidr" : ingress_allow_cidr, "_except" : [deny_cidr]}) + assert policy.verify_on_setup() # Ingress traffic should be allowed from pods lying in "ingress_allow_cidr" assert self.validate_wget(self.client1_pod_ns2, url) assert self.client1_pod_ns2.ping_with_certainty(self.client1_pod_ns1.pod_ip) @@ -492,11 +502,12 @@ def test_ingress_port_for_namespace(self): url2 = 'http://%s' % (self.web_pod_ns2.pod_ip) assert self.validate_wget(self.client1_pod_ns2, url) assert self.validate_wget(self.client1_pod_ns1, url2) - self.setup_update_simple_policy(name="ingress-port-to-ns", + policy = self.setup_update_simple_policy(name="ingress-port-to-ns", policy_types = ["Ingress"], namespace= self.ns2.name, ports=['TCP/80'], ingress_all =True) + assert policy.verify_on_setup() # Ingress TCP traffic should be allowed in namespace "non default" assert self.validate_wget(self.client1_pod_ns2, url2) assert self.validate_wget(self.client1_pod_ns1, url2) @@ -542,6 +553,7 @@ def test_policy_with_multiple_ingress_rules(self): namespace = self.ns2.name, policy_types = policy_types, ingress= ingress_list) + assert policy1.verify_on_setup() # Ingress traffic to self.web_pod_ns2 should only be allowed from self.client1_pod_ns2 within namespace "non-defaut" assert self.validate_wget(self.client1_pod_ns2, url2) assert self.validate_wget(self.client2_pod_ns2, url2, expectation = False) @@ -571,7 +583,7 @@ def test_policy_with_multiple_ingress_rules(self): namespace = self.ns2.name, policy_types = policy_types, ingress= ingress_list) - + assert policy2.verify_on_setup() # INgess traffic to self.web_pod_ns2 should be allowed from self.client2_pod_ns1 as per cidr rule assert self.validate_wget(self.client2_pod_ns1, url2) # INgess traffic to self.web_pod_ns2 should not be allowed from self.client1_pod_ns1 as per except rule @@ -629,7 +641,7 @@ def test_ingress_rules_edit(self): namespace = self.ns2.name, policy_types = policy_types, ingress= ingress_list) - + assert policy1.verify_on_setup() # Ingress traffic to self.web_pod_ns2 should only be allowed from self.client1_pod_ns2 within namespace "non-defaut" assert self.validate_wget(self.client1_pod_ns2, url2) assert self.validate_wget(self.client2_pod_ns2, url2, expectation = False) @@ -724,6 +736,8 @@ def test_multiple_ingress_policies(self): policy_types = ["Ingress"], namespace= self.ns1.name, ingress_namespaces= self.ns2.labels) + assert policy1.verify_on_setup() + assert policy2.verify_on_setup() ingress_list = [ {'from': [ {'pod_selector': self.client2_pod_ns1.labels}, @@ -740,6 +754,8 @@ def test_multiple_ingress_policies(self): policy_types = ["Ingress"], namespace= self.ns2.name, ingress_pods= self.client1_pod_ns2.labels) + assert policy3.verify_on_setup() + assert policy4.verify_on_setup() allowed_ip = self.client1_pod_ns1.pod_ip.split(".")[:3] allowed_ip.append("0/24") ingress_allow_cidr = ".".join(allowed_ip) @@ -834,7 +850,7 @@ def test_ingress_rules_label_edit(self): namespace = self.ns2.name, policy_types = policy_types, ingress= ingress_list) - + assert policy1.verify_on_setup() # Ingress behavior as per the above policy should be as follows: assert self.validate_wget(self.client1_pod_ns2, url2) assert self.validate_wget(self.client2_pod_ns2, url2, expectation = False) @@ -895,6 +911,7 @@ def test_allow_all_egress(self): namespace = self.ns1.name, policy_types = policy_types, egress= egress_list) + assert policy1.verify_on_setup() # All traffic should still work as it is ingress allow all policy assert self.client1_pod_ns1.ping_with_certainty(self.client2_pod_ns1.pod_ip) assert self.client1_pod_ns1.ping_with_certainty(self.client1_pod_ns2.pod_ip) @@ -915,9 +932,10 @@ def test_deny_all_egress(self): assert self.client1_pod_ns2.ping_with_certainty(self.client1_pod_ns1.pod_ip) assert self.client1_pod_ns1.ping_with_certainty(self.client1_pod_ns2.pod_ip) policy_types = ["Egress"] - self.setup_update_simple_policy(name="deny-all-egress", + policy = self.setup_update_simple_policy(name="deny-all-egress", namespace= self.ns1.name, policy_types = policy_types) + assert policy.verify_on_setup() #All egress traffic from all pods of namespace "default" should be dropped. assert self.client1_pod_ns1.ping_with_certainty(self.client2_pod_ns1.pod_ip, expectation=False) @@ -943,11 +961,12 @@ def test_egress_podselector_for_pod(self): # All traffic between everyone should work assert self.client1_pod_ns1.ping_with_certainty(self.web_pod_ns1.pod_ip) assert self.client1_pod_ns2.ping_with_certainty(self.web_pod_ns1.pod_ip) - self.setup_update_simple_policy(name="egress-pod-to-pod", + policy = self.setup_update_simple_policy(name="egress-pod-to-pod", pod_selector = self.client2_pod_ns1.labels, policy_types = ["Egress"], namespace= self.ns1.name, egress_pods= self.client1_pod_ns1.labels) + assert policy.verify_on_setup() # All egress traffic from pod cls.client2_pod_ns1 will be dropped except to pod self.client1_pod_ns1.labels assert self.client2_pod_ns1.ping_with_certainty(self.client1_pod_ns1.pod_ip) assert self.client2_pod_ns1.ping_with_certainty(self.web_pod_ns1.pod_ip, @@ -979,10 +998,11 @@ def test_egress_podselector_for_namespace(self): url = 'http://%s' % (self.web_pod_ns1.pod_ip) assert self.validate_wget(self.client1_pod_ns1, url) assert self.validate_wget(self.client1_pod_ns2, url) - self.setup_update_simple_policy(name="egress-pod-to-ns", + policy = self.setup_update_simple_policy(name="egress-pod-to-ns", policy_types = ["Egress"], namespace= self.ns1.name, egress_pods= self.client1_pod_ns1.labels) + assert policy.verify_on_setup() # Traffic should only be allowed to self.client1_pod_ns1 inside namespace default assert self.client2_pod_ns1.ping_with_certainty(self.client1_pod_ns1.pod_ip) # Traffic to any other Pod inside defaut namespace should not be allowed @@ -1015,11 +1035,12 @@ def test_egress_namespaceselector_for_pod(self): # All traffic between everyone should work assert self.client1_pod_ns1.ping_with_certainty(self.web_pod_ns1.pod_ip) assert self.client1_pod_ns2.ping_with_certainty(self.web_pod_ns1.pod_ip) - self.setup_update_simple_policy(name="egress-ns-to-pod", + policy = self.setup_update_simple_policy(name="egress-ns-to-pod", pod_selector = self.client1_pod_ns1.labels, policy_types = ["Egress"], namespace= self.ns1.name, egress_namespaces= self.ns2.labels) + assert policy.verify_on_setup() # All egress traffic from pod cls.client1_pod_ns1 will be allowed only to all pods of namespace "non-default" assert self.client1_pod_ns1.ping_with_certainty(self.web_pod_ns2.pod_ip) assert self.client1_pod_ns1.ping_with_certainty(self.client1_pod_ns2.pod_ip) @@ -1052,10 +1073,11 @@ def test_egress_namespaceselector_for_namespace(self): url2 = 'http://%s' % (self.web_pod_ns2.pod_ip) assert self.validate_wget(self.client1_pod_ns1, url) assert self.validate_wget(self.client1_pod_ns2, url) - self.setup_update_simple_policy(name="egress-ns-to-ns", + policy = self.setup_update_simple_policy(name="egress-ns-to-ns", policy_types = ["Egress"], namespace= self.ns1.name, egress_namespaces= self.ns2.labels) + assert policy.verify_on_setup() # Traffic to "non-default" namespace from "default" namespace should work assert self.client1_pod_ns1.ping_with_certainty(self.client1_pod_ns2.pod_ip) assert self.validate_wget(self.client1_pod_ns1, url2) @@ -1090,12 +1112,13 @@ def test_egress_ipblock_for_pod(self): allowed_ip.append("0/24") egress_allow_cidr = ".".join(allowed_ip) deny_cidr = self.client1_pod_ns1.pod_ip + "/32" - self.setup_update_simple_policy(name="egress-ipblock-to-pod", + policy = self.setup_update_simple_policy(name="egress-ipblock-to-pod", pod_selector = self.client2_pod_ns1.labels, policy_types = ["Egress"], namespace= self.ns1.name, egress_ipblock= {"cidr" : egress_allow_cidr, "_except" : [deny_cidr]}) + assert policy.verify_on_setup() # Egress traffic should be allowed from self.client2_pod_ns1 to self.client1_pod_ns2 only. assert self.client2_pod_ns1.ping_with_certainty(self.client1_pod_ns2.pod_ip) # Verify egress to other pods: @@ -1136,10 +1159,11 @@ def test_egress_ipblock_for_namespace(self): assert self.validate_wget(self.client1_pod_ns1, url2) assert self.client2_pod_ns1.ping_with_certainty(self.client2_pod_ns2.pod_ip) egress_allow_cidr = self.client1_pod_ns2.pod_ip + "/32" - self.setup_update_simple_policy(name="egress-ipblock-ns", + policy = self.setup_update_simple_policy(name="egress-ipblock-ns", policy_types = ["Egress"], namespace= self.ns1.name, egress_ipblock= {"cidr" : egress_allow_cidr}) + assert policy.verify_on_setup() # Egress traffic should be allowed from any pod of namespace "default" to self.client1_pod_ns2 only. assert self.client1_pod_ns1.ping_with_certainty(self.client1_pod_ns2.pod_ip) assert self.client2_pod_ns1.ping_with_certainty(self.client1_pod_ns2.pod_ip) @@ -1175,12 +1199,13 @@ def test_egress_port_for_pod(self): assert self.validate_wget(self.client1_pod_ns1, url2) assert self.client1_pod_ns1.ping_with_certainty(self.web_pod_ns1.pod_ip) assert self.client1_pod_ns1.ping_with_certainty(self.client1_pod_ns2.pod_ip) - self.setup_update_simple_policy(name="egress-ports-pod", + policy = self.setup_update_simple_policy(name="egress-ports-pod", pod_selector = self.client1_pod_ns1.labels, policy_types = ["Egress"], namespace= self.ns1.name, egress_ports=['TCP/80'], egress_all =True) + assert policy.verify_on_setup() # Only TCP Egress traffic should be allowed from self.client1_pod_ns1 assert self.validate_wget(self.client1_pod_ns1, url) assert self.validate_wget(self.client1_pod_ns1, url2) @@ -1217,11 +1242,12 @@ def test_egress_port_for_namespace(self): assert self.validate_wget(self.client1_pod_ns2, url2) assert self.client1_pod_ns2.ping_with_certainty(self.web_pod_ns1.pod_ip) assert self.client1_pod_ns2.ping_with_certainty(self.client2_pod_ns2.pod_ip) - self.setup_update_simple_policy(name="egress-ports-ns", + policy = self.setup_update_simple_policy(name="egress-ports-ns", policy_types = ["Egress"], namespace= self.ns2.name, egress_ports=['TCP/80'], egress_all =True) + assert policy.verify_on_setup() # Only TCP Egress traffic should be allowed from any pod of "non-default" namespace assert self.validate_wget(self.client1_pod_ns2, url) assert self.validate_wget(self.client1_pod_ns2, url2) @@ -1269,6 +1295,7 @@ def test_policy_with_multiple_egress_rules(self): namespace = self.ns2.name, policy_types = policy_types, egress= egress_list) + assert policy1.verify_on_setup() # Egress traffic within "non-default" namespace should only be allowed to web_pod_ns2 assert self.validate_wget(self.client1_pod_ns2, url2) assert self.client1_pod_ns2.ping_with_certainty(self.client1_pod_ns1.pod_ip, @@ -1298,6 +1325,7 @@ def test_policy_with_multiple_egress_rules(self): namespace = self.ns2.name, policy_types = policy_types, egress= egress_list) + assert policy2.verify_on_setup() # Pod client1_pod_ns2 can only reach pods mentioned in the rule assert self.validate_wget(self.client1_pod_ns2, url2) # podSelector rule assert self.validate_wget(self.client1_pod_ns2, url) # ip_block cidr rule @@ -1354,6 +1382,7 @@ def test_egress_rules_edit(self): namespace = self.ns2.name, policy_types = policy_types, egress= egress_list) + assert policy1.verify_on_setup() # Egress traffic within "non-default" namespace should only be allowed to web_pod_ns2 assert self.validate_wget(self.client1_pod_ns2, url2) assert self.client1_pod_ns2.ping_with_certainty(self.client1_pod_ns1.pod_ip, @@ -1452,6 +1481,8 @@ def test_multiple_egress_policies(self): namespace= self.ns1.name, egress_ports=['TCP/80'], egress_all =True) + assert policy1.verify_on_setup() + assert policy2.verify_on_setup() egress_allow_cidr = self.web_pod_ns1.pod_ip + "/32" policy3 = self.setup_update_simple_policy(name="egress-ipblock-ns", policy_types = ["Egress"], @@ -1462,6 +1493,8 @@ def test_multiple_egress_policies(self): namespace= self.ns2.name, egress_ports=['TCP/80'], egress_all =True) + assert policy3.verify_on_setup() + assert policy4.verify_on_setup() # Verifying egress behavior on namspace "default" as per 1st 2 policies assert self.validate_wget(self.client1_pod_ns1, url2) assert self.client1_pod_ns1.ping_with_certainty(self.web_pod_ns2.pod_ip) @@ -1557,7 +1590,7 @@ def test_egress_rules_label_edit(self): namespace = self.ns2.name, policy_types = policy_types, egress= egress_list) - + assert policy1.verify_on_setup() # Egress behavior as per the above policy should be as follows: assert self.validate_wget(self.client2_pod_ns2, url) assert self.validate_wget(self.client2_pod_ns2, url2, expectation = False) @@ -1623,6 +1656,7 @@ def test_ingress_allow_egress_allow_all(self): ingress= ingress_list, egress= egress_list, policy_types= policy_types) + assert policy1.verify_on_setup() assert self.client2_pod_ns2.ping_with_certainty(self.client1_pod_ns1.pod_ip) assert self.client1_pod_ns1.ping_with_certainty(self.client2_pod_ns1.pod_ip) assert self.validate_wget(self.client1_pod_ns2, url) @@ -1655,6 +1689,7 @@ def test_ingress_allow_egress_deny_all(self): ingress= ingress_list, egress= egress_list, policy_types= policy_types) + assert policy1.verify_on_setup() # Ingress allow all assert self.client2_pod_ns2.ping_with_certainty(self.client1_pod_ns1.pod_ip) assert self.client1_pod_ns1.ping_with_certainty(self.client2_pod_ns1.pod_ip) @@ -1704,6 +1739,7 @@ def test_ingress_deny_egress_allow_all(self): ingress= ingress_list, egress= egress_list, policy_types= policy_types) + assert policy1.verify_on_setup() # Ingress deny all assert self.client2_pod_ns2.ping_with_certainty(self.client1_pod_ns1.pod_ip, expectation=False) @@ -1738,6 +1774,7 @@ def test_ingress_deny_egress_deny_all(self): name="ingress-deny-egress-deny", namespace= self.ns1.name, policy_types = policy_types) + assert policy1.verify_on_setup() # Verify that all ingress traffic is dropped assert self.client2_pod_ns2.ping_with_certainty(self.client1_pod_ns1.pod_ip, expectation=False) @@ -1810,6 +1847,7 @@ def test_ingress_egress_on_pod(self): policy_types = policy_types, ingress= ingress_list, egress= egress_list) + assert policy1.verify_on_setup() # Verify that all ingress rules are operational assert self.client2_pod_ns3.ping_with_certainty(self.client1_pod_ns3.pod_ip) assert self.client1_pod_ns2.ping_with_certainty(self.client1_pod_ns3.pod_ip) @@ -1882,6 +1920,7 @@ def test_ingress_egress_on_namespace(self): policy_types = policy_types, ingress= ingress_list, egress= egress_list) + assert policy1.verify_on_setup() # Verify that all ingress rules are operational # Note that below 2 checks should fail as per egress port policy but they pass # as the egress port rule will fall after ingress pod selector rule in priority @@ -2023,6 +2062,7 @@ def test_ingress_policy_over_isolated_namespace(self): namespace = ns3_clients[3].name, policy_types = policy_types, ingress= ingress_list) + assert policy1.verify_on_setup() # Verify that network policy do not take precedence over network isolation assert self.validate_wget(ns3_clients[1], url3) assert self.validate_wget(ns3_clients[0], url3, expectation=False) @@ -2100,6 +2140,7 @@ def test_egress_policy_over_isolated_namespace(self): namespace = ns3_clients[3].name, policy_types = policy_types, egress= egress_list) + assert policy1.verify_on_setup() # Verify that network policy works as expected assert self.validate_wget(ns3_clients[1], url3) assert self.validate_wget(ns3_clients[0], url3) @@ -2188,6 +2229,7 @@ def test_k8s_configurations_post_policy_creation(self): policy_types = policy_types, ingress= ingress_list, egress= egress_list) + assert policy1.verify_on_setup() namespace2 = self.setup_namespace(name = get_random_name("ns2")) namespace2.verify_on_setup() namespace1.set_labels({'test_site': "nss1"}) @@ -2270,6 +2312,7 @@ def test_policy_negative(self): policy_types = policy_types, ingress= ingress_list, egress= egress_list) + assert policy1.verify_on_setup() #Verify that there is no impact on client2_ns2 due to above policy as it is applied over different namespace assert client2_ns2.ping_with_certainty(client1_ns1.pod_ip) assert client2_ns2.ping_with_certainty(client1_ns2.pod_ip) @@ -2338,6 +2381,7 @@ def test_deployment_replica_updation(self): namespace = namespace1.name, policy_types = policy_types, ingress= ingress_list) + assert policy1.verify_on_setup() pod_ip_list = [] for pod in deployment.get_pods_list(): pod_ip_list.append(pod.status.pod_ip) @@ -2424,6 +2468,7 @@ def test_multiple_pod_selector(self): policy_types = policy_types, ingress= ingress_list, egress= egress_list) + assert policy1.verify_on_setup() #Verify ingress rules assert client4_pod_ns1.ping_with_certainty(client1_pod_ns1.pod_ip, expectation=False) @@ -2511,14 +2556,14 @@ def test_multiple_values_in_ingress_rule(self): namespace = namespace1.name, policy_types = policy_types, ingress= ingress_list_1) - + assert policy1.verify_on_setup() ingress_list_2 = [{'from': [{'namespace_selector': {'new_site_for': "ns2", 'site_for': "ns3"}}]}] policy2 = self.setup_update_policy(pod_selector=client2_pod_ns1.labels, name="ingress-policy-ns-rule-multiple-values", namespace = namespace1.name, policy_types = policy_types, ingress= ingress_list_2) - + assert policy2.verify_on_setup() cidr_1 = client2_pod_ns2.pod_ip + "/32" cidr_2 = client2_pod_ns4.pod_ip + "/32" ingress_allow_cidr = client1_pod_ns4.pod_ip.split(".")[:1][0] + ".0.0.0/8" @@ -2530,7 +2575,7 @@ def test_multiple_values_in_ingress_rule(self): namespace = namespace1.name, policy_types = policy_types, ingress= ingress_list_3) - + assert policy3.verify_on_setup() #Verify ingress rules for podSelector assert client2_pod_ns1.ping_with_certainty(client1_pod_ns1.pod_ip) assert client3_pod_ns1.ping_with_certainty(client1_pod_ns1.pod_ip) @@ -2630,14 +2675,14 @@ def test_multiple_values_in_egress_rule(self): namespace = namespace1.name, policy_types = policy_types, egress= egress_list_1) - + assert policy1.verify_on_setup() egress_list_2 = [{'to': [{'namespace_selector': {'new_site_for': "ns2", 'site_for': "ns3"}}]}] policy2 = self.setup_update_policy(pod_selector=client2_pod_ns1.labels, name="egress-policy-ns-rule-multiple-values", namespace = namespace1.name, policy_types = policy_types, egress= egress_list_2) - + assert policy2.verify_on_setup() cidr_1 = client2_pod_ns2.pod_ip + "/32" cidr_2 = client2_pod_ns4.pod_ip + "/32" egress_allow_cidr = client1_pod_ns4.pod_ip.split(".")[:1][0] + ".0.0.0/8" @@ -2649,6 +2694,7 @@ def test_multiple_values_in_egress_rule(self): namespace = namespace1.name, policy_types = policy_types, egress= egress_list_3) + assert policy3.verify_on_setup() #Verify egress rules for podSelector assert client1_pod_ns1.ping_with_certainty(client2_pod_ns1.pod_ip) assert client1_pod_ns1.ping_with_certainty(client3_pod_ns1.pod_ip) @@ -2827,6 +2873,7 @@ def test_ingress_rule_on_namespace_with_service(self): % (self.web2_pod_ns2.name)) service_ns1 = self.setup_http_service(namespace=self.ns1.name, labels={'app': 'web_ns1'}) + assert service_ns1.verify_on_setup() assert self.validate_nginx_lb([self.web1_pod_ns1,self.web2_pod_ns1], service_ns1.cluster_ip, test_pod=self.client1_pod_ns1) @@ -2839,6 +2886,7 @@ def test_ingress_rule_on_namespace_with_service(self): namespace = self.ns1.name, policy_types = policy_types, ingress= ingress_list) + assert policy1.verify_on_setup() # Verify is policy works as expected assert self.validate_nginx_lb([self.web1_pod_ns1,self.web2_pod_ns1], service_ns1.cluster_ip, @@ -2914,8 +2962,10 @@ def test_egress_rule_on_namespace_with_service(self): % (self.web2_pod_ns2.name)) service_ns1 = self.setup_http_service(namespace=self.ns1.name, labels={'app': 'web_ns1'}) + assert service_ns1.verify_on_setup() service_ns2 = self.setup_http_service(namespace=self.ns2.name, labels={'app': 'web_ns2'}) + assert service_ns2.verify_on_setup() assert self.validate_nginx_lb([self.web1_pod_ns1,self.web2_pod_ns1], service_ns1.cluster_ip, test_pod=self.client1_pod_ns1) @@ -2935,6 +2985,7 @@ def test_egress_rule_on_namespace_with_service(self): namespace = self.ns1.name, policy_types = policy_types, egress= egress_list) + assert policy1.verify_on_setup() # Verify that policy should fail as the DNAT at VIP of load balancer(Service) wont be resolved assert self.validate_nginx_lb([self.web1_pod_ns1,self.web2_pod_ns1], service_ns1.cluster_ip, @@ -3034,6 +3085,7 @@ def test_ingress_rule_on_pod_with_service(self): % (self.web2_pod_ns2.name)) service_ns1 = self.setup_http_service(namespace=self.ns1.name, labels={'app2': 'common_label'}) + assert service_ns1.verify_on_setup() assert self.validate_nginx_lb([self.web1_pod_ns1,self.web2_pod_ns1], service_ns1.cluster_ip, test_pod=self.client1_pod_ns1) @@ -3047,6 +3099,7 @@ def test_ingress_rule_on_pod_with_service(self): namespace = self.ns1.name, policy_types = policy_types, ingress= ingress_list) + assert policy1.verify_on_setup() # Verify is policy works as expected assert self.validate_nginx_lb([self.web1_pod_ns1,self.web2_pod_ns1], service_ns1.cluster_ip, @@ -3120,6 +3173,8 @@ def test_egress_rule_on_pod_with_service(self): labels={'app': 'web_ns1'}) service_ns2 = self.setup_http_service(namespace=self.ns2.name, labels={'app': 'web_ns2'}) + assert service_ns1.verify_on_setup() + assert service_ns2.verify_on_setup() assert self.validate_nginx_lb([self.web1_pod_ns1,self.web2_pod_ns1], service_ns1.cluster_ip, test_pod=self.client1_pod_ns1) @@ -3140,6 +3195,7 @@ def test_egress_rule_on_pod_with_service(self): namespace = self.ns1.name, policy_types = policy_types, egress= egress_list) + assert policy1.verify_on_setup() # Verify if policy works as expected assert self.validate_nginx_lb([self.web1_pod_ns1,self.web2_pod_ns1], service_ns1.cluster_ip, @@ -3238,6 +3294,7 @@ def test_ingress_rule_on_namespace_with_k8s_ingress(self): % (self.web2_pod_ns2.name)) service_ns1 = self.setup_http_service(namespace=self.ns1.name, labels={'app': 'web_ns1'}) + assert service_ns1.verify_on_setup() k8s_ingress = self.setup_simple_nginx_ingress(service_ns1.name, namespace=self.ns1.name) assert k8s_ingress.verify_on_setup() @@ -3258,6 +3315,7 @@ def test_ingress_rule_on_namespace_with_k8s_ingress(self): namespace = self.ns1.name, policy_types = policy_types, ingress= ingress_list) + assert policy1.verify_on_setup() # Verify is policy works as expected assert self.validate_nginx_lb([self.web1_pod_ns1,self.web2_pod_ns1], k8s_ingress.cluster_ip, @@ -3321,6 +3379,7 @@ def test_ingress_rule_on_pod_with_k8s_ingress(self): % (self.web2_pod_ns2.name)) service_ns1 = self.setup_http_service(namespace=self.ns1.name, labels={'app': 'web_ns1'}) + assert service_ns1.verify_on_setup() k8s_ingress = self.setup_simple_nginx_ingress(service_ns1.name, namespace=self.ns1.name) assert k8s_ingress.verify_on_setup() @@ -3342,6 +3401,7 @@ def test_ingress_rule_on_pod_with_k8s_ingress(self): namespace = self.ns1.name, policy_types = policy_types, ingress= ingress_list) + assert policy1.verify_on_setup() # Verify is policy works as expected assert self.validate_nginx_lb([self.web1_pod_ns1,self.web2_pod_ns1], k8s_ingress.cluster_ip, @@ -3461,7 +3521,7 @@ def test_ingress_rule_on_namespace_with_k8s_ingress_fanout(self): namespace = self.ns1.name, policy_types = policy_types, ingress= ingress_list) - + assert policy1.verify_on_setup() # Now validate ingress from within the cluster network assert self.validate_nginx_lb([temp_pod1, temp_pod2], ingress.cluster_ip, test_pod=self.client1_pod_ns1, path=path1, host=host1, diff --git a/scripts/k8s_scripts/test_service.py b/scripts/k8s_scripts/test_service.py index 6a6f207b9..5c638c8b1 100644 --- a/scripts/k8s_scripts/test_service.py +++ b/scripts/k8s_scripts/test_service.py @@ -39,6 +39,7 @@ def test_service_1(self): assert self.verify_nginx_pod(pod1) assert self.verify_nginx_pod(pod2) assert pod3.verify_on_setup() + assert service.verify_on_setup() # Now validate load-balancing on the service assert self.validate_nginx_lb([pod1, pod2], service.cluster_ip, @@ -71,6 +72,7 @@ def test_service_with_type_loadbalancer(self): assert self.verify_nginx_pod(pod1) assert self.verify_nginx_pod(pod2) assert pod3.verify_on_setup() + assert service.verify_on_setup() # Now validate load-balancing on the service assert self.validate_nginx_lb([pod1, pod2], service.cluster_ip, @@ -112,6 +114,7 @@ def test_service_access_from_different_ns(self): assert self.verify_nginx_pod(pod1) assert self.verify_nginx_pod(pod2) assert pod3.verify_on_setup() + assert service.verify_on_setup() if self.setup_namespace_isolation: expectation = False @@ -142,6 +145,8 @@ def test_service_scale_up_down(self): client_pod = self.setup_busybox_pod(namespace=namespace.name) service = self.setup_http_service(namespace=namespace.name, labels=labels) + assert service.verify_on_setup() + for i in range(0, 10): pod = self.setup_nginx_pod(namespace=namespace.name, labels=labels) all_pods.append(pod) @@ -237,6 +242,7 @@ def test_service_with_external_ip(self): assert self.verify_nginx_pod(pod1) assert self.verify_nginx_pod(pod2) assert pod3.verify_on_setup() + assert service.verify_on_setup() # Now validate load-balancing on the service assert self.validate_nginx_lb([pod1, pod2], service.cluster_ip, diff --git a/serial_scripts/k8s_scripts/test_policy.py b/serial_scripts/k8s_scripts/test_policy.py index 63d16c0c8..de4ebb684 100644 --- a/serial_scripts/k8s_scripts/test_policy.py +++ b/serial_scripts/k8s_scripts/test_policy.py @@ -88,7 +88,7 @@ def test_ingress_policy_over_project_isolation(self): namespace = ns1_clients[3].name, policy_types = policy_types, ingress= ingress_list) - + assert policy1.verify_on_setup() # Verify that ingress policy works as expected assert ns1_clients[1].ping_with_certainty(ns1_clients[0].pod_ip) assert ns1_clients[0].ping_with_certainty(ns1_clients[1].pod_ip, @@ -162,7 +162,7 @@ def test_egress_policy_over_project_isolation(self): namespace = ns1_clients[3].name, policy_types = policy_types, egress= egress_list) - + assert policy1.verify_on_setup() # Verify that egress policy works as expected assert ns1_clients[1].ping_with_certainty(ns1_clients[0].pod_ip, expectation=False) @@ -487,6 +487,7 @@ def test_policy_kube_manager_restart(self): self.validate_reachability_pre_policy_common(pod_list_ns1, pod_list_ns2, pod_list_ns3) # Create a network Policy policy1 = self.create_update_policy_common(pod_list_ns1, pod_list_ns2, pod_list_ns3) + assert policy1.verify_on_setup() # Restart Kube manager self.restart_kube_manager() # Verify that policy works fine after restart @@ -517,6 +518,7 @@ def test_policy_vrouter_agent_restart(self): self.validate_reachability_pre_policy_common(pod_list_ns1, pod_list_ns2, pod_list_ns3) # Create a network Policy policy1 = self.create_update_policy_common(pod_list_ns1, pod_list_ns2, pod_list_ns3) + assert policy1.verify_on_setup() # Restart Vrouter agent self.restart_vrouter_agent() # Verify that policy works fine after restart @@ -546,6 +548,7 @@ def test_policy_rule_pod_restart(self): self.validate_reachability_pre_policy_common(pod_list_ns1, pod_list_ns2, pod_list_ns3) # Create a network Policy policy1 = self.create_update_policy_common(pod_list_ns1, pod_list_ns2, pod_list_ns3) + assert policy1.verify_on_setup() # Restart POD used as PodSelector in the rule assert self.restart_pod(self.client2_pod_ns3) # Verify that policy works fine after restart @@ -575,6 +578,7 @@ def test_policy_pod_restart(self): self.validate_reachability_pre_policy_common(pod_list_ns1, pod_list_ns2, pod_list_ns3) # Create a network Policy policy1 = self.create_update_policy_common(pod_list_ns1, pod_list_ns2, pod_list_ns3) + assert policy1.verify_on_setup() #Updating the policy self.create_update_policy_common(pod_list_ns1, pod_list_ns2, pod_list_ns3, update=True, policy_fixture=policy1) @@ -603,6 +607,7 @@ def test_policy_docker_restart(self): self.validate_reachability_pre_policy_common(pod_list_ns1, pod_list_ns2, pod_list_ns3) # Create a network Policy policy1 = self.create_update_policy_common(pod_list_ns1, pod_list_ns2, pod_list_ns3) + assert policy1.verify_on_setup() # Restart POD used as PodSelector in the rule self.inputs.restart_service(service_name = "docker", host_ips = self.inputs.k8s_slave_ips) @@ -636,6 +641,7 @@ def test_policy_kubelet_restart_on_slave(self): self.validate_reachability_pre_policy_common(pod_list_ns1, pod_list_ns2, pod_list_ns3) # Create a network Policy policy1 = self.create_update_policy_common(pod_list_ns1, pod_list_ns2, pod_list_ns3) + assert policy1.verify_on_setup() # Restart POD used as PodSelector in the rule self.inputs.restart_service(service_name = "kubelet", host_ips = self.inputs.k8s_slave_ips) @@ -668,6 +674,7 @@ def test_policy_kubelet_restart_on_master(self): self.validate_reachability_pre_policy_common(pod_list_ns1, pod_list_ns2, pod_list_ns3) # Create a network Policy policy1 = self.create_update_policy_common(pod_list_ns1, pod_list_ns2, pod_list_ns3) + assert policy1.verify_on_setup() # Restart POD used as PodSelector in the rule self.inputs.restart_service(service_name = "kubelet", host_ips = [self.inputs.k8s_master_ip]) diff --git a/serial_scripts/k8s_scripts/test_service.py b/serial_scripts/k8s_scripts/test_service.py index 56b9d87b3..74d82bc4e 100644 --- a/serial_scripts/k8s_scripts/test_service.py +++ b/serial_scripts/k8s_scripts/test_service.py @@ -43,9 +43,11 @@ def test_service_with_kube_manager_restart(self): labels=labels) pod3 = self.setup_busybox_pod(namespace=namespace.name) + assert self.verify_nginx_pod(pod1) assert self.verify_nginx_pod(pod2) assert pod3.verify_on_setup() + assert service.verify_on_setup() # Now validate load-balancing on the service assert self.validate_nginx_lb([pod1, pod2], service.cluster_ip, diff --git a/tcutils/agent/vna_introspect_utils.py b/tcutils/agent/vna_introspect_utils.py index d2a5db192..bed12d7e9 100644 --- a/tcutils/agent/vna_introspect_utils.py +++ b/tcutils/agent/vna_introspect_utils.py @@ -1320,6 +1320,44 @@ def get_vna_snat_port_config(self): port_pool[protocol]['bound_port_list'].append(bound_port.text) return port_pool + def get_fw_policy(self, policy_fq_name = None): + ''' + Get the list of all firewall policies created on agent + ''' + path = 'Snh_AclReq?' + xpath = './__AclResp_list/AclResp/acl_list' + p = self.dict_get(path) + fwPolicydict = EtreeToDict(xpath).get_all_entry(p) + fwPolicyList = fwPolicydict['acl_list'] + if not policy_fq_name: + return fwPolicyList + else: + policy_index = [i for i, x in enumerate(fwPolicyList) if x['name']==policy_fq_name] + if policy_index: + return fwPolicyList[policy_index[0]] + else: + return [] + # end get_fw_policy + + def get_aps(self, aps_fq_name = None): + ''' + Get list of all Application Policy Set created on agent + ''' + path = 'Snh_ApplicationPolicySetReq?' + xpath = './__ApplicationPolicySetResp_list/ApplicationPolicySetResp/application_policy_set_list' + p = self.dict_get(path) + apsdict = EtreeToDict(xpath).get_all_entry(p) + apsList = apsdict['application_policy_set_list'] + if not aps_fq_name: + return apsList + else: + policy_index = [i for i, x in enumerate(apsList) if x['name']==aps_fq_name] + if policy_index: + return apsList[policy_index[0]] + else: + return [] + # end get_aps + if __name__ == '__main__': v = AgentInspect('10.204.217.198') import pdb; pdb.set_trace() diff --git a/tcutils/config/kube_manager_introspect_utils.py b/tcutils/config/kube_manager_introspect_utils.py deleted file mode 100755 index 3f8569885..000000000 --- a/tcutils/config/kube_manager_introspect_utils.py +++ /dev/null @@ -1,14 +0,0 @@ -import logging as LOG - -from tcutils.verification_util import * - -LOG.basicConfig(format='%(levelname)s: %(message)s', level=LOG.DEBUG) - - -class KubeManagerInspect(VerificationUtilBase): - - def __init__(self, ip, logger=LOG, args=None, port=8108): - super(KubeManagerInspect, self).__init__( - ip, port, XmlDrv, logger=logger, args=args) - self.ip = ip - diff --git a/tcutils/kubernetes/k8s_introspect_utils.py b/tcutils/kubernetes/k8s_introspect_utils.py new file mode 100644 index 000000000..d092b0f1e --- /dev/null +++ b/tcutils/kubernetes/k8s_introspect_utils.py @@ -0,0 +1,183 @@ +import logging as LOG +from lxml import etree +import re + +from tcutils.verification_util import * + + +LOG.basicConfig(format='%(levelname)s: %(message)s', level=LOG.DEBUG) + +class KubeManagerInspect(VerificationUtilBase): + + def __init__(self, ip, port=8108, logger=LOG, args=None): + super(KubeManagerInspect, self).__init__(ip, port, XmlDrv, + logger=logger, args=args) + self.ip = ip + + def _join(self, *args): + """Joins the args with ':'""" + return ':'.join(args) + + def get_pod_list(self): + ''' + Return List of all User created Pods. + + Return Info format: + [{'nodename': 'nodec61', 'name': 'ctest-nginx-pod-92002975', 'phase': 'Running', 'ip': '10.204.217.101', + 'labels': {'map': {'element': 'ctest-namespace-02645930'}}, 'uuid': '456fad44-5d86-11e8-8b98-002590c55f6a'}, + {'nodename': 'nodec60', 'name': 'ctest-nginx-pod-25952679', 'phase': 'Running', 'ip': '10.204.217.100', + 'labels': {'map': {'element': 'ctest-namespace-02645930'}}, 'uuid': '456b97e5-5d86-11e8-8b98-002590c55f6a'}, + {'phase': 'Running', 'ip': '10.204.217.100', 'nodename': 'nodec60', 'uuid': '457330fe-5d86-11e8-8b98-002590c55f6a', + 'name': 'ctest-busybox-pod-87380339'}, {'nodename': 'nodec61', 'name': 'kube-dns-6f4fd4bdf-hbjss', + phase': 'Running', 'ip': '10.204.217.101', 'labels': {'map': {'element': 'kube-system'}}, 'uuid': '6b8eee9a-5d2f-11e8-b80b-002590c55f6a'}] + + Sample URL: + http://10.204.217.71:8108/Snh_PodDatabaseList? + ''' + pod_list = None + try: + p = self.dict_get('Snh_PodDatabaseList?') + xpath = './PodDatabaseListResp/pods' + podsInstances = EtreeToDict(xpath).get_all_entry(p) + pod_list = podsInstances.get('pods', []) + except Exception as e: + print e + finally: + return pod_list + + def get_pod_info(self ,pod_uuid): + ''' + Return detailed info about Pod + + Return Info format : + {'pod_node': 'nodec61', 'vm_interfaces': [{'vmi_uuid': '4611175a-5d86-11e8-9bc9-002590c55f6a'}], + 'pod_labels': {'map': {'element': 'ctest-namespace-02645930'}}, 'uuid': '456fad44-5d86-11e8-8b98-002590c55f6a', + 'node_ip': '10.204.217.101', 'owner': 'k8s', 'vrouter_uuid': {'VRUuid': {'vr_uuid': 'c140acf4-adab-40d4-ac0d-8a06c6f2e192'}}, + 'pod_namespace': 'ctest-namespace-02645930', 'annotations': {'map': {'element': '456fad44-5d86-11e8-8b98-002590c55f6a'}}, + 'name': 'ctest-nginx-pod-92002975__456fad44-5d86-11e8-8b98-002590c55f6a'} + + Sample URL: + http://10.204.217.71:8108/Snh_VirtualMachineDatabaseList?x=456fad44-5d86-11e8-8b98-002590c55f6a + ''' + podsInfo = None + try: + pod_req = 'Snh_VirtualMachineDatabaseList?x=%s' % pod_uuid + p = self.dict_get(pod_req) + xpath = './VirtualMachineDatabaseListResp/vms/list/VirtualMachineInstance' + podsInfo = EtreeToDict(xpath).get_all_entry(p) + except Exception as e: + print e + finally: + return podsInfo + + def get_namespace_list(self): + ''' + Return list of all namespaces. + + Return Info format: + [{'phase': 'Active', 'isolated': 'false', 'labels': {'map': None}, 'uuid': '693cb9d2-5d2f-11e8-b80b-002590c55f6a', + 'name': 'contrail'}, {'phase': 'Active', 'isolated': 'false', 'labels': {'map': None}, + 'uuid': '65da2d5f-5d2f-11e8-b80b-002590c55f6a', 'name': 'kube-public'}, + 'phase': 'Active', 'isolated': 'false', 'labels': {'map': {'element': 'default'}}, + 'uuid': '6376554d-5d2f-11e8-b80b-002590c55f6a', 'name': 'default'}, + {'phase': 'Active', 'isolated': 'false', 'labels': {'map': None}, 'uuid': '64080bdd-5d2f-11e8-b80b-002590c55f6a', + 'name': 'kube-system'}, {'phase': 'Active', 'isolated': 'false', 'labels': {'map': None}, + 'uuid': '439df569-5d86-11e8-8b98-002590c55f6a', 'name': 'ctest-namespace-02645930'}] + + Sample URL: + http://10.204.217.71:8108/Snh_NamespaceDatabaseList?namespace_uuid= + ''' + ns_list = None + try: + p = self.dict_get('Snh_NamespaceDatabaseList?namespace_uuid=') + xpath = './NamespaceDatabaseListResp/namespaces' + nsInstances = EtreeToDict(xpath).get_all_entry(p) + ns_list = nsInstances.get('namespaces', []) + except Exception as e: + print e + finally: + return ns_list + + def get_namespace_info(self, ns_uuid): + ''' + Return detailed info about Namespace + + Return Info format : + {'phase': 'Active', 'isolated': 'false', 'labels': {'map': {'element': 'default'}}, + 'uuid': '6376554d-5d2f-11e8-b80b-002590c55f6a', 'name': 'default'} + + Sample URL: + http://10.204.217.71:8108/Snh_NamespaceDatabaseList?namespace_uuid=6376554d-5d2f-11e8-b80b-002590c55f6a + ''' + nsInfo = None + try: + ns_req = 'Snh_NamespaceDatabaseList?namespace_uuid=%s' % ns_uuid + p = self.dict_get(ns_req) + xpath = './NamespaceDatabaseListResp/namespaces/list/NamespaceInstance' + nsInfo = EtreeToDict(xpath).get_all_entry(p) + except Exception as e: + print e + finally: + return nsInfo + + def get_svc_or_ingress_lb_info(self, uuid): + ''' + Return detailed info about Service + + Return Info format : + {'uuid_to_service': '4569ec7e-5d86-11e8-8b98-002590c55f6a', + 'lb_listeners': [{'lb_listener_uuid': '30ad28ed-43ae-4755-a2df-833ce8562906'}], + 'vm_interfaces': [{'vmi_uuid': '0f7128a9-705a-41d7-8aa0-50d618d20dfa'}], + 'fq_name': ['default-domain', 'ctest-namespace-02645930', 'ctest-nginx-svc-48573694__4569ec7e-5d86-11e8-8b98-002590c55f6a'], + 'name': 'ctest-nginx-svc-48573694__4569ec7e-5d86-11e8-8b98-002590c55f6a', + 'external_ip': 'None', 'annotations': {'map': {'element': 'k8s'}}, 'selectors': None} + + Sample URL: + http://10.204.217.71:8108/Snh_LoadbalancerDatabaseList?x=4569ec7e-5d86-11e8-8b98-002590c55f6a + ''' + lbInfo = None + try: + lb_req = 'Snh_LoadbalancerDatabaseList?x=%s' % uuid + p = self.dict_get(lb_req) + xpath = './LoadbalancerDatabaseListResp/lbs/list/LoadbalancerInstance' + lbInfo = EtreeToDict(xpath).get_all_entry(p) + except Exception as e: + print e + finally: + return lbInfo + + def get_network_policy_info(self, np_uuid): + ''' + Return detailed info about Network Policy + + Return Info format : + {'name_space': 'default', 'uuid': '3855b8f9-5da1-11e8-8b98-002590c55f6a', + 'spec_string': "{u'policyTypes': [u'Egress'], u'egress': [{u'ports': [{u'protocol': u'TCP', u'port': 80}]}], + u'podSelector': {u'matchLabels': {u'app': u'client1_ns1'}}}", + 'spec': {'NetworkPolicySpec': {'ingress': None, 'egress': [{'toPolicy': None, 'ports': [{'protocol': 'TCP', 'port': '80'}]}], + 'podSelector': {'NetworkPolicyLabelSelectors': {'matchLabels': {'map': {'element': 'client1_ns1'}}}}}}, + 'vnc_firewall_policy_fqname': 'default-policy-management:default-egress-ports-pod', 'name': 'egress-ports-pod'} + + Sample URL: + http://10.204.217.52:8108/Snh_NetworkPolicyDatabaseList?network_policy_uuid=42735572-5d9d-11e8-8b98-002590c55f6a + ''' + npInstance = None + try: + np_req = 'Snh_NetworkPolicyDatabaseList?network_policy_uuid=%s' % np_uuid + p = self.dict_get(np_req) + xpath = './NetworkPolicyDatabaseListResp/network_policies/list/NetworkPolicyInstance' + npInstance = EtreeToDict(xpath).get_all_entry(p) + except Exception as e: + print e + finally: + return npInstance + +if __name__ == '__main__': + k8s = KubeManagerInspect('10.204.217.52') + import pdb; pdb.set_trace() + v = k8s.get_pod_list() + #y = k8s.get_pod_info(pod_uuid = "456fad44-5d86-11e8-8b98-002590c55f6a") + z = k8s.get_namespace_list() + #zz = k8s.get_namespace_info( ns_uuid = "6376554d-5d2f-11e8-b80b-002590c55f6a") + #vv = k8s.get_svc_or_ingress_lb_info(svc_uuid = "4088278e-5d9d-11e8-8b98-002590c55f6a") + p = k8s.get_network_policy_info(np_uuid ="3855b8f9-5da1-11e8-8b98-002590c55f6a")