diff --git a/fixtures/k8s/deployment.py b/fixtures/k8s/deployment.py index 09ed82414..3faf00e07 100644 --- a/fixtures/k8s/deployment.py +++ b/fixtures/k8s/deployment.py @@ -90,7 +90,7 @@ def delete(self): return self.k8s_client.delete_deployment(self.namespace, self.name) # end delete - @retry(delay=5, tries=40) + @retry(delay=5, tries=60) def verify_deployment_in_k8s(self): self.read() self.logger.debug('Replicas: %s, Available: %s' %( diff --git a/serial_scripts/k8s_scripts/test_fabric_snat_restart_scenarios.py b/serial_scripts/k8s_scripts/test_fabric_snat_restart_scenarios.py index 5e995831e..e7f57ca25 100644 --- a/serial_scripts/k8s_scripts/test_fabric_snat_restart_scenarios.py +++ b/serial_scripts/k8s_scripts/test_fabric_snat_restart_scenarios.py @@ -7,7 +7,7 @@ from tcutils.util import get_random_name import test import time - +from tcutils.contrail_status_check import ContrailStatusChecker class TestFabricSNATRestarts(BaseK8sTest): @@ -207,7 +207,6 @@ def test_snat_with_kubelet_restart_on_master(self): client3, client4) #end test_snat_with_kubelet_restart_on_master - @test.attr(type=['k8s_sanity']) @preposttest_wrapper def test_snat_with_docker_restart_on_master(self): """ @@ -222,7 +221,10 @@ def test_snat_with_docker_restart_on_master(self): client3, client4) self.inputs.restart_service(service_name = "docker", host_ips = [self.inputs.k8s_master_ip]) - time.sleep(60) # Wait timer for all contrail service to come up. + time.sleep(30) + cluster_status, error_nodes = ContrailStatusChecker(self.inputs).wait_till_contrail_cluster_stable() + assert cluster_status, 'All nodes and services not up. Failure nodes are: %s' % ( + error_nodes) self.verify_ping_between_pods_across_namespaces_and_public_network(client1, client2, client3, client4) #end test_snat_with_docker_restart diff --git a/serial_scripts/k8s_scripts/test_ingress.py b/serial_scripts/k8s_scripts/test_ingress.py index 4ac02d8cf..2fd1aad3c 100644 --- a/serial_scripts/k8s_scripts/test_ingress.py +++ b/serial_scripts/k8s_scripts/test_ingress.py @@ -268,7 +268,6 @@ def test_ingress_fanout_with_vrouter_agent_restart(self): assert self.validate_nginx_lb([pod3, pod4], ingress.cluster_ip, test_pod=pod5, path=path2, host=host2) - @test.attr(type=['k8s_sanity']) @skip_because(mx_gw = False) @preposttest_wrapper def test_ingress_fanout_with_node_reboot(self): @@ -276,7 +275,7 @@ def test_ingress_fanout_with_node_reboot(self): This host are supported by repective service. Service has required backend pod with required path mentioned in ingress rule. From the local node, do a wget on the ingress public ip Validate that service and its loadbalancing works. - Reboot the nodes + Reboot the compute nodes Re verify the loadbalancing works after the nodes reboot ''' app1 = 'http_test1' @@ -354,7 +353,13 @@ def test_ingress_fanout_with_node_reboot(self): # Now validate ingress from public network assert self.validate_nginx_lb([pod1, pod2], ingress.external_ips[0], path=path1, host=host1) assert self.validate_nginx_lb([pod3, pod4], ingress.external_ips[0], path=path2, host=host2) - self.restart_vrouter_agent() + for node in self.inputs.k8s_slave_ips: + self.inputs.reboot(node) + time.sleep(60) # Noticed that services are not ready after reboot. Thus, giving some time for service + # like docker, kubelet and contrail services to start + cluster_status, error_nodes = ContrailStatusChecker(self.inputs).wait_till_contrail_cluster_stable( + nodes=self.inputs.compute_ips, roles="vrouter") + assert cluster_status, 'Cluster is not stable after restart' assert self.validate_nginx_lb([pod1, pod2], ingress.cluster_ip, test_pod=pod5, path=path1, host=host1) assert self.validate_nginx_lb([pod3, pod4], ingress.cluster_ip, diff --git a/serial_scripts/k8s_scripts/test_policy.py b/serial_scripts/k8s_scripts/test_policy.py index 04a11ff6d..f5540a9a1 100644 --- a/serial_scripts/k8s_scripts/test_policy.py +++ b/serial_scripts/k8s_scripts/test_policy.py @@ -664,7 +664,6 @@ def test_policy_kubelet_restart_on_slave(self): self.verify_policy_post_modification_common(pod_list_ns1, pod_list_ns2, pod_list_ns3) #end test_policy_kubelet_restart_on_slave - @test.attr(type=['k8s_sanity']) @preposttest_wrapper def test_policy_kubelet_restart_on_master(self): """