Skip to content

Commit

Permalink
Adding new testcases for reboot restart scenarios and currently these
Browse files Browse the repository at this point in the history
cases will fail due to open bug  1750354 and
Closes-Bug: #1750571

Change-Id: Iefeb13da4210c2599427077f4cc79f2698a506d1
  • Loading branch information
vvelpula committed Feb 21, 2018
1 parent 2b56289 commit cd6b1cf
Show file tree
Hide file tree
Showing 2 changed files with 221 additions and 4 deletions.
192 changes: 192 additions & 0 deletions serial_scripts/k8s_scripts/test_ingress.py
Expand Up @@ -2,6 +2,7 @@
from common.k8s.base import BaseK8sTest
from k8s.ingress import IngressFixture
from tcutils.wrappers import preposttest_wrapper
from tcutils.contrail_status_check import ContrailStatusChecker


class TestIngress(BaseK8sTest):
Expand Down Expand Up @@ -66,3 +67,194 @@ def test_ingress_with_kube_manager_restart(self):
assert self.validate_nginx_lb([pod1, pod2], ingress.external_ips[0])
# end test_ingress_with_kube_manager_restart


@preposttest_wrapper
def test_ingress_fanout_with_vrouter_agent_restart(self):
'''Creating a fanout ingress with 2 different host having 2 different path along with a default backend
This host are supported by repective service. Service has required backend pod with required path
mentioned in ingress rule. From the local node, do a wget on the ingress public ip
Validate that service and its loadbalancing works.
Restart the Kube manager
Re verify the loadbalancing works after the kubemanager restart
'''

app1 = 'http_test1'
app2 = 'http_test2'
labels1 = {'app':app1}
labels2 = {'app':app2}
service_name1 = 's1'
service_name2 = 's2'
path1 = 'foo'
path2 = 'bar'
host1 = 'foo.bar.com'
host2 = 'bar.foo.com'
ingress_name = 'testingress'
namespace = self.setup_namespace(name='default')
assert namespace.verify_on_setup()

service1 = self.setup_http_service(namespace=namespace.name,
labels=labels1,
name=service_name1)

service2 = self.setup_http_service(namespace=namespace.name,
labels=labels2,
name=service_name2)

pod1 = self.setup_nginx_pod(namespace=namespace.name,
labels=labels1)
pod2 = self.setup_nginx_pod(namespace=namespace.name,
labels=labels1)
pod3 = self.setup_nginx_pod(namespace=namespace.name,
labels=labels2)
pod4 = self.setup_nginx_pod(namespace=namespace.name,
labels=labels2)

rules = [{'host': host1,
'http': {'paths': [{
'path':'/'+path1,
'backend': { 'service_name': service_name1,
'service_port': 80
}
}]
}
},
{'host': host2,
'http': {'paths': [{
'path': '/'+path2,
'backend': { 'service_name': service_name2,
'service_port': 80
}
}]
}
}]
default_backend = {'service_name': service_name1,
'service_port': 80}

ingress = self.setup_ingress(name=ingress_name,
namespace=namespace.name,
rules=rules,
default_backend=default_backend)
assert ingress.verify_on_setup()

pod5 = self.setup_busybox_pod(namespace=namespace.name)
self.verify_nginx_pod(pod1,path=path1)
self.verify_nginx_pod(pod2,path=path1)
self.verify_nginx_pod(pod3,path=path2)
self.verify_nginx_pod(pod4,path=path2)

assert pod5.verify_on_setup()

# Now validate ingress from within the cluster network
assert self.validate_nginx_lb([pod1, pod2], ingress.cluster_ip,
test_pod=pod5, path=path1, host=host1)
assert self.validate_nginx_lb([pod3, pod4], ingress.cluster_ip,
test_pod=pod5, path=path2, host=host2)

# Now validate ingress from public network
assert self.validate_nginx_lb([pod1, pod2], ingress.external_ips[0], path=path1, host=host1)
assert self.validate_nginx_lb([pod3, pod4], ingress.external_ips[0], path=path2, host=host2)
for compute_ip in self.inputs.compute_ips:
self.inputs.restart_service('contrail-vrouter-agent',[compute_ip],
container='agent')
cluster_status, error_nodes = ContrailStatusChecker().wait_till_contrail_cluster_stable()
assert cluster_status, 'Cluster is not stable after restart'
assert self.validate_nginx_lb([pod1, pod2], ingress.cluster_ip,
test_pod=pod5, path=path1, host=host1)
assert self.validate_nginx_lb([pod3, pod4], ingress.cluster_ip,
test_pod=pod5, path=path2, host=host2)


@preposttest_wrapper
def test_ingress_fanout_with_node_reboot(self):
'''Creating a fanout ingress with 2 different host having 2 different path along with a default backend
This host are supported by repective service. Service has required backend pod with required path
mentioned in ingress rule. From the local node, do a wget on the ingress public ip
Validate that service and its loadbalancing works.
Reboot the nodes
Re verify the loadbalancing works after the nodes reboot
'''
app1 = 'http_test1'
app2 = 'http_test2'
labels1 = {'app':app1}
labels2 = {'app':app2}
service_name1 = 's1'
service_name2 = 's2'
path1 = 'foo'
path2 = 'bar'
host1 = 'foo.bar.com'
host2 = 'bar.foo.com'
ingress_name = 'testingress'
namespace = self.setup_namespace(name='default')
assert namespace.verify_on_setup()

service1 = self.setup_http_service(namespace=namespace.name,
labels=labels1,
name=service_name1)

service2 = self.setup_http_service(namespace=namespace.name,
labels=labels2,
name=service_name2)

pod1 = self.setup_nginx_pod(namespace=namespace.name,
labels=labels1)
pod2 = self.setup_nginx_pod(namespace=namespace.name,
labels=labels1)
pod3 = self.setup_nginx_pod(namespace=namespace.name,
labels=labels2)
pod4 = self.setup_nginx_pod(namespace=namespace.name,
labels=labels2)

rules = [{'host': host1,
'http': {'paths': [{
'path':'/'+path1,
'backend': { 'service_name': service_name1,
'service_port': 80
}
}]
}
},
{'host': host2,
'http': {'paths': [{
'path': '/'+path2,
'backend': { 'service_name': service_name2,
'service_port': 80
}
}]
}
}]
default_backend = {'service_name': service_name1,
'service_port': 80}

ingress = self.setup_ingress(name=ingress_name,
namespace=namespace.name,
rules=rules,
default_backend=default_backend)
assert ingress.verify_on_setup()

pod5 = self.setup_busybox_pod(namespace=namespace.name)
self.verify_nginx_pod(pod1,path=path1)
self.verify_nginx_pod(pod2,path=path1)
self.verify_nginx_pod(pod3,path=path2)
self.verify_nginx_pod(pod4,path=path2)

assert pod5.verify_on_setup()

# Now validate ingress from within the cluster network
assert self.validate_nginx_lb([pod1, pod2], ingress.cluster_ip,
test_pod=pod5, path=path1, host=host1)
assert self.validate_nginx_lb([pod3, pod4], ingress.cluster_ip,
test_pod=pod5, path=path2, host=host2)

# Now validate ingress from public network
assert self.validate_nginx_lb([pod1, pod2], ingress.external_ips[0], path=path1, host=host1)
assert self.validate_nginx_lb([pod3, pod4], ingress.external_ips[0], path=path2, host=host2)
for compute_ip in self.inputs.compute_ips:
self.inputs.reboot(compute_ip)
self.sleep(10)
cluster_status, error_nodes = ContrailStatusChecker().wait_till_contrail_cluster_stable()
assert cluster_status, 'Cluster is not stable after restart'
assert self.validate_nginx_lb([pod1, pod2], ingress.cluster_ip,
test_pod=pod5, path=path1, host=host1)
assert self.validate_nginx_lb([pod3, pod4], ingress.cluster_ip,
test_pod=pod5, path=path2, host=host2)

33 changes: 29 additions & 4 deletions serial_scripts/k8s_scripts/test_isolation.py
@@ -1,8 +1,8 @@
from common.k8s.base import BaseK8sTest
from tcutils.wrappers import preposttest_wrapper
from time import sleep

from tcutils.util import get_random_name
from tcutils.contrail_status_check import ContrailStatusChecker

class TestNSIsolationSerial(BaseK8sTest):

Expand Down Expand Up @@ -80,7 +80,7 @@ def setup_common_namespaces_pods(self, prov_service = False, prov_ingress = Fals
@preposttest_wrapper
def test_pods_isolation_post_kube_manager_restart(self):
"""
This test case verifies the connectivity between pods of different namespaces with
This test case verifies the connectivity between pods of different namespaces with
namespace isolation enabled post restart of contrail-kube-manager
Verify:
1. Pods in other namespaces in the Kubernetes cluster will NOT be able to reach pods in the isolated namespace.
Expand All @@ -105,7 +105,7 @@ def test_pods_isolation_post_kube_manager_restart(self):
@preposttest_wrapper
def test_service_isolation_post_kube_manager_restart(self):
"""
This test case verifies the connectivity between pods and service of different namespaces with
This test case verifies the connectivity between pods and service of different namespaces with
namespace isolation enabled post restart of contrail-kube-manager
Verify:
1. Pods in isolated namespace will be able to reach ALL Services created in any namespace in the kubernetes cluster.
Expand Down Expand Up @@ -156,6 +156,32 @@ def test_ingress_isolation_post_kube_manager_restart(self):
assert self.validate_nginx_lb([client1[0], client1[1]], client1[5].external_ips[0])
#end test_ingress_isolation_post_kube_manager_restart

@preposttest_wrapper
def test_ingress_isolation_vrouter_agent_restart(self):
"""
Test test case verifies ingress operations post restart of vrouter-agent
Verify:
1. This test case verifies the connectivity to ingress existing in isolated namespace
2. Also verifies connectivity of ingress existing in an non isolated namespace from pod in isolated namespace
Restart vrouter-agent and verify both the points again
"""
client1, client2, client3 = self.setup_common_namespaces_pods(prov_service = True,
prov_ingress = True)
assert self.validate_nginx_lb([client3[0], client3[1]], client3[5].cluster_ip,
test_pod=client1[2])
client1[4].disable_service_isolation()
assert self.validate_nginx_lb([client1[0], client1[1]], client1[5].external_ips[0])
for compute_ip in self.inputs.compute_ips:
self.inputs.restart_service('contrail-vrouter-agent',[compute_ip],
container='agent')
cluster_status, error_nodes = ContrailStatusChecker().wait_till_contrail_cluster_stable()
assert cluster_status, 'Cluster is not stable after restart'
self.sleep(5)
assert self.validate_nginx_lb([client3[0], client3[1]], client3[5].cluster_ip,
test_pod=client1[2])
assert self.validate_nginx_lb([client1[0], client1[1]], client1[5].external_ips[0])


class TestCustomIsolationSerial(BaseK8sTest):

@classmethod
Expand Down Expand Up @@ -406,7 +432,6 @@ def test_reachability_across_projects_with_isolated_namespace(self):
"""
client1, client2 = self.setup_common_namespaces_pods(prov_service = True,
isolation = True)
import pdb;pdb.set_trace()
# Reachability of Pods
assert client1[2].ping_to_ip(client1[0].pod_ip)
assert client2[2].ping_to_ip(client2[0].pod_ip)
Expand Down

0 comments on commit cd6b1cf

Please sign in to comment.