/
base.py
1144 lines (1051 loc) · 44.9 KB
/
base.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from fabric.api import local, settings
import time
import re
import test
import ipaddress
import vnc_api_test
import uuid
from tcutils.util import get_random_name, retry
from tcutils.gevent_lib import exec_in_parallel
from tcutils.verification_util import *
from lxml import etree
from k8s.pod import PodFixture
from k8s.service import ServiceFixture
from k8s.ingress import IngressFixture
from k8s.namespace import NamespaceFixture
from k8s.tls_secret import TLSSecretFixture
from k8s.deployment import DeploymentFixture
from k8s.network_policy import NetworkPolicyFixture
from common.connections import ContrailConnections
from common import create_public_vn
from common.base import GenericTestBase
from vn_test import VNFixture
import gevent
K8S_SERVICE_IPAM = ['default-domain', 'default', 'service-ipam']
K8S_PUBLIC_VN_NAME = '__public__'
K8S_PUBLIC_FIP_POOL_NAME = '__fip_pool_public__'
class BaseK8sTest(GenericTestBase, vnc_api_test.VncLibFixture):
@classmethod
def setUpClass(cls):
super(BaseK8sTest, cls).setUpClass()
cls.connections = ContrailConnections(cls.inputs,
project_name=cls.inputs.admin_tenant,
username=cls.inputs.admin_username,
password=cls.inputs.admin_password,
logger=cls.logger)
cls.vnc_lib_fixture = cls.connections.vnc_lib_fixture
cls.vnc_lib = cls.connections.vnc_lib
cls.vnc_h = cls.vnc_lib_fixture.vnc_h
cls.agent_inspect = cls.connections.agent_inspect
cls.cn_inspect = cls.connections.cn_inspect
cls.analytics_obj = cls.connections.analytics_obj
cls.api_s_inspect = cls.connections.api_server_inspect
cls.logger = cls.connections.logger
cls.k8s_client = cls.connections.k8s_client
cls.setup_namespace_isolation = False
cls.setup_custom_isolation = False
cls.public_vn = create_public_vn.PublicVn(connections=cls.connections,
public_vn=K8S_PUBLIC_VN_NAME,
public_tenant=cls.inputs.admin_tenant,
logger=cls.logger,
fip_pool_name=K8S_PUBLIC_FIP_POOL_NAME,
api_option='contrail')
# end setUpClass
@classmethod
def tearDownClass(cls):
super(BaseK8sTest, cls).tearDownClass()
# end tearDownClass
def setup_http_service(self,
name=None,
namespace='default',
labels=None,
metadata=None,
spec=None,
type=None,
external_ips=None,
frontend_port=80,
nodePort=None,
backend_port=80):
'''
A simple helper method to create a service
Noticed that nginx continues to listen on port 80 even if target port
is different. So, recommended not to change backend_port for now
'''
metadata = metadata or {}
spec = spec or {}
name = name or get_random_name('nginx-svc')
metadata.update({'name': name})
selector_dict = {}
labels = labels or {}
d1 = {'protocol': 'TCP','port': int(frontend_port),'targetPort': int(backend_port) }
if nodePort:
d1['nodePort'] = int(nodePort)
spec.update({
'ports': [d1]
})
if labels:
selector_dict = {'selector': labels}
spec.update(selector_dict)
if type:
type_dict = {'type': type}
spec.update(type_dict)
if external_ips:
external_ips_dict = {'external_i_ps': external_ips}
spec.update(external_ips_dict)
return self.useFixture(ServiceFixture(
connections=self.connections,
name=name,
namespace=namespace,
metadata=metadata,
spec=spec))
# end setup_http_service
def setup_simple_nginx_ingress(self,
service_name,
name=None,
namespace='default',
service_port=80,
**kwargs):
default_backend = {'service_name': service_name,
'service_port': service_port}
return self.setup_ingress(name=name,
namespace=namespace,
default_backend=default_backend,
**kwargs)
# end setup_simple_nginx_ingress
def setup_ingress(self,
name=None,
namespace='default',
metadata=None,
default_backend=None,
rules=None,
spec=None,
**kwargs):
'''
A very basic helper method to create an ingress
'''
if metadata is None: metadata = {}
if spec is None: spec = {}
if default_backend is None: default_backend = {}
if rules is None: rules = []
tls = kwargs.get('tls', None)
name = name or get_random_name('nginx-ingress')
metadata.update({'name': name})
if default_backend:
spec.update({'backend': default_backend})
if rules:
spec.update({'rules': rules})
if tls:
spec.update({'tls': tls})
return self.useFixture(IngressFixture(
connections=self.connections,
name=name,
namespace=namespace,
metadata=metadata,
spec=spec,
tls=tls))
# end setup_ingress
def setup_namespace(self,
name=None,
isolation = None,
ip_fabric_snat = None,
ip_fabric_forwarding = None,
custom_isolation = False,
fq_network_name = None):
isolation = isolation or self.setup_namespace_isolation
if custom_isolation == False:
vn_fq_name = None
return self.useFixture(NamespaceFixture(
connections=self.connections,
name=name, isolation=isolation,
ip_fabric_snat=ip_fabric_snat,
ip_fabric_forwarding=ip_fabric_forwarding,
custom_isolation = custom_isolation,
fq_network_name = fq_network_name))
# end create_namespace
def setup_pod(self,
name=None,
namespace='default',
metadata=None,
spec=None,
labels=None,
custom_isolation = False,
fq_network_name = {},
**kwargs):
name = name or get_random_name('pod')
metadata = metadata or {}
spec = spec or {}
labels = labels or {}
metadata['name'] = metadata.get('name') or name
if labels:
metadata['labels'] = metadata.get('labels', {})
metadata['labels'].update(labels)
return self.useFixture(PodFixture(
connections=self.connections,
namespace=namespace,
metadata=metadata,
spec=spec,
custom_isolation = custom_isolation,
fq_network_name = fq_network_name,
**kwargs))
# end setup_pod
def setup_nginx_pod(self,
name=None,
namespace='default',
metadata=None,
container_port=80,
labels=None,
spec=None,
custom_isolation = False,
fq_network_name = {}):
'''
Noticed that nginx continues to listen on port 80 even if target port
(container_port) is different
'''
metadata = metadata or {}
spec = spec or {}
labels = labels or {}
name = name or get_random_name('nginx-pod')
if labels:
metadata['labels'] = metadata.get('labels', {})
metadata['labels'].update(labels)
spec = spec or {
'containers': [
{'image': 'nginx',
'ports': [
{'container_port': int(container_port)}
],
}
]
}
return self.setup_pod(name=name,
namespace=namespace,
metadata=metadata,
spec=spec,
shell='/bin/bash',
custom_isolation = custom_isolation,
fq_network_name = fq_network_name)
# end setup_nginx_pod
def verify_nginx_pod(self, pod, path=None):
result = pod.verify_on_setup()
if result:
if path:
pod.run_cmd('echo %s > /usr/share/nginx/html/index.html' % (pod.name))
cmd = "cp /usr/share/nginx/html/index.html /usr/share/nginx/html/%s" %(path)
pod.run_cmd(cmd)
else:
pod.run_cmd('echo %s > /usr/share/nginx/html/index.html' % (
pod.name))
return result
# end verify_nginx_pod
def setup_busybox_pod(self,
name=None,
namespace='default',
metadata=None,
spec=None,
labels=None,
custom_isolation = False,
fq_network_name = {}):
metadata = metadata or {}
spec = spec or {}
labels = labels or {}
name = name or get_random_name('busybox-pod')
spec = spec or {
'containers': [
{'image': 'busybox',
'command': ['sleep', '1000000'],
'image_pull_policy': 'IfNotPresent',
}
],
'restart_policy': 'Always',
}
return self.setup_pod(name=name,
namespace=namespace,
metadata=metadata,
spec=spec,
labels=labels,
shell='/bin/sh',
custom_isolation = custom_isolation,
fq_network_name = fq_network_name)
# end setup_busybox_pod
def setup_ubuntuapp_pod(self,
name=None,
namespace='default',
metadata=None,
spec=None,
labels=None):
metadata = metadata or {}
spec = spec or {}
labels = labels or {}
name = name or get_random_name('ubuntuapp-pod')
spec = spec or {
'containers': [
{'image': 'ubuntu-upstart',
'command': ['sleep', '1000000'],
'image_pull_policy': 'IfNotPresent',
}
],
'restart_policy': 'Always',
}
return self.setup_pod(name=name,
namespace=namespace,
metadata=metadata,
spec=spec,
labels=labels,
shell='/bin/sh')
# end setup_ubuntuapp_pod
@retry(delay=1, tries=5)
def validate_wget(self, pod, link, expectation=True, **kwargs):
ret_val = self.do_wget(link, pod=pod, **kwargs)
result = ret_val == expectation
if result:
self.logger.info('wget check of of %s from %s passed' % (link,
pod.name))
else:
self.logger.warn('wget check of of %s from %s failed' % (link,
pod.name))
return result
# end validate_wget
def do_wget(self, link, pod=None, output_file='/dev/null', host='',
timeout=5, return_output=False, tries=1,
cert=None):
'''
Returns boolean by default
Returns (boolean, output) if return_output is True
'''
host_str = ''
cert_str = ''
output = ''
if host:
host_str = '--header "Host:%s" ' % (host)
if 'https' in link and not cert:
cert_str = ' --no-check-certificate'
cmd = 'wget %s %s -O %s -T %s -t %s %s' % (link, host_str, output_file,
timeout, tries, cert_str)
if not pod:
with settings(warn_only=True):
output = local(cmd, capture=True)
pod_str = 'local'
else:
output = pod.run_cmd(cmd, shell='/bin/sh -l -c')
pod_str = 'Pod %s' % (pod.name)
if '100%' in output:
self.logger.debug('[Pod %s] Cmd %s passed' % (pod_str, cmd))
self.logger.debug('[Pod %s] Cmd output: %s' % (pod_str, output))
result = True
else:
self.logger.debug('[Pod %s] Cmd %s failed. Output :%s' % (pod_str,
cmd, output))
self.logger.debug('[Pod %s] Cmd output: %s' % (pod_str, output))
result = False
if return_output:
return (result, output)
else:
return result
# end do_wget
@retry(delay=1, tries=5)
def validate_nginx_lb(self,
lb_pods,
service_ip,
test_pod=None,
host=None,
path='',
port='80',
nodePort=None,
barred_pods=None,
protocol=None,
cert=None,
expectation=True):
'''
From test_pod , run wget on http://<service_ip>:<port> and check
if the all the lb_pods respond to atleast one of the requests over
3*len(lb_pods) attempts
barred_pods : pods where the http requests should never be seen
'''
host_str = ''
protocol = protocol or 'http'
barred_pods = barred_pods or []
attempts = len(lb_pods) * 5
hit = {}
hit_me_not = {}
for x in lb_pods:
hit[x.name] = 0
for x in barred_pods:
hit_me_not[x.name] = 0
if nodePort:
port = nodePort
link = '%s://%s:%s/%s' % (protocol, service_ip, port, path)
for i in range(0, attempts):
(ret_val, output) = self.do_wget(link, pod=test_pod, host=host,
output_file='-',
return_output=True,
cert=cert)
for pod in lb_pods:
if pod.name in output:
hit[pod.name] += 1
for pod in barred_pods:
if pod.name in output:
hit_me_not[pod.name] += 1
if hit_me_not and 0 not in hit_me_not.values():
self.logger.warn('HTTP request seem to have hit an unexpected '
' pod. Stats : %s' % (hit_me_not))
return False
if 0 not in hit.values() and expectation==True:
self.logger.info('Responses seen from all pods, lb seems fine.'
'Hits : %s' % (hit))
return True
if 0 in hit.values():
if expectation==False:
self.logger.info('As expected, responses not seen from pods'
'Hits : %s' % (hit))
return True
else:
self.logger.warn('No http hit seen for one or more pods.'
'Pls check. Hits: %s' % (hit))
return False
else:
if expectation==False:
self.logger.warn('Pods responding even if expectation was False'
'Hits : %s' % (hit))
return False
else:
self.logger.info('Nginx lb hits seem to be ok: %s' % (hit))
return True
# end validate_nginx_lb
def setup_update_policy(self,
pod_selector=None,
name=None,
namespace='default',
metadata=None,
spec=None,
policy_types=None,
ingress=None,
egress=None,
update=False,
np_fixture=None):
'''
A helper method to create generic network policy
Ex :
ingress = [
{ 'from': [
{ 'pod_selector': {'role': 'frontend' }
},
{ 'namespace_selector': {'a': 'b' }
}
],
'ports': [ 'tcp/80', 'UDP/53' ]'
},
...
...
]
egress = [
{ 'to':
[
{ 'pod_selector': {'role': 'temp' }
},
{'ip_block':
{"cidr" : "1.2.3.4/24"},
},
],
"ports" : [ 'tcp/80', 'UDP/53' ]
},
...
...
]
'''
metadata = metadata or {}
spec = spec or {}
ingress = ingress or {}
egress = egress or {}
ingress_list = []
egress_list = []
name = name or get_random_name('np-')
metadata.update({'name': name})
selector_dict = {}
pod_selector_dict = {}
if pod_selector is not None:
pod_selector_dict = {'match_labels': pod_selector}
if ingress is not None:
ingress_item_dict = {}
for ingress_item in ingress:
from_entries = []
if ingress_item == {}:
ingress_list.append({})
break
for from_item in ingress_item.get('from', {}):
ingress_pod_dict = {}
ingress_ns_dict = {}
ingress_ip_block_dict = {}
ingress_pod_selector = None
ingress_ns_selector = None
ingress_ip_block = None
from_item_dict = from_item.get('pod_selector') or {}
for k, v in from_item_dict.iteritems():
if not ingress_pod_dict:
ingress_pod_dict = {'match_labels': {}}
ingress_pod_dict['match_labels'].update({k: v})
ingress_pod_selector = {
'pod_selector': ingress_pod_dict}
from_item_dict = from_item.get('namespace_selector') or {}
for k, v in from_item_dict.iteritems():
if not ingress_ns_dict:
ingress_ns_dict = {'match_labels': {}}
ingress_ns_dict['match_labels'].update({k: v})
ingress_ns_selector = {
'namespace_selector': ingress_ns_dict}
from_item_dict = from_item.get('ip_block') or {}
for k, v in from_item_dict.iteritems():
if not ingress_ip_block_dict:
ingress_ip_block_dict = {'cidr': ""}
if k == "cidr":
ingress_ip_block_dict.update({k: v})
if k == "_except":
ingress_ip_block_dict.update({k: v})
ingress_ip_block = {
'ip_block': ingress_ip_block_dict}
from_entries.append(ingress_pod_selector or
ingress_ns_selector or
ingress_ip_block)
# end for from_item
port_list = []
for port_str in ingress_item.get('ports', {}):
protocol, port = port_str.split('/')
port_list.append({'protocol': protocol, 'port': int(port)})
# end for port_str
if len(from_entries)>0:
ingress_item_dict = {'from': from_entries}
if port_list:
ingress_item_dict.update({'ports': port_list})
ingress_list.append(ingress_item_dict)
# end for ingress_item
# end if ingress
if egress is not None:
egress_item_dict = {}
for egress_item in egress:
to_entries = []
if egress_item == {}:
egress_list.append({})
break
for to_item in egress_item.get('to', {}):
egress_pod_dict = {}
egress_ns_dict = {}
egress_ip_block_dict = {}
egress_pod_selector = None
egress_ns_selector = None
egress_ip_block = None
to_item_dict = to_item.get('pod_selector') or {}
for k, v in to_item_dict.iteritems():
if not egress_pod_dict:
egress_pod_dict = {'match_labels': {}}
egress_pod_dict['match_labels'].update({k: v})
egress_pod_selector = {
'pod_selector': egress_pod_dict}
to_item_dict = to_item.get('namespace_selector') or {}
for k, v in to_item_dict.iteritems():
if not egress_ns_dict:
egress_ns_dict = {'match_labels': {}}
egress_ns_dict['match_labels'].update({k: v})
egress_ns_selector = {
'namespace_selector': egress_ns_dict}
to_item_dict = to_item.get('ip_block') or {}
for k, v in to_item_dict.iteritems():
if not egress_ip_block_dict:
egress_ip_block_dict = {'cidr': ""}
if k == "cidr":
egress_ip_block_dict.update({k: v})
if k == "_except":
egress_ip_block_dict.update({k: v})
egress_ip_block = {
'ip_block': egress_ip_block_dict}
to_entries.append(egress_pod_selector or
egress_ns_selector or
egress_ip_block)
# end for to_item
port_list = []
for port_str in egress_item.get('egress_ports', {}):
protocol, port = port_str.split('/')
port_list.append({'protocol': protocol, 'port': int(port)})
# end for port_str
if len(to_entries) > 0:
egress_item_dict = {'to': to_entries}
if port_list:
egress_item_dict.update({'egress_ports': port_list})
egress_list.append(egress_item_dict)
# end for egress_item
# end of egress
if policy_types:
spec['policy_types'] = policy_types
if ingress:
spec['ingress'] = ingress_list
if egress:
spec['egress'] = egress_list
spec['pod_selector'] = pod_selector_dict
if update == False:
return self.useFixture(NetworkPolicyFixture(
connections=self.connections,
name=name,
namespace=namespace,
metadata=metadata,
spec=spec))
else:
return np_fixture.update(metadata=np_fixture.metadata,
spec=spec)
# end setup_policy
def setup_update_simple_policy(self,
pod_selector=None,
name=None,
namespace='default',
metadata=None,
spec=None,
ingress_pods=None,
ingress_namespaces=None,
ingress_ipblock=None,
egress_pods=None,
egress_namespaces=None,
egress_ipblock=None,
ingress_all = False,
egress_all = False,
policy_types=None,
ports=None,
egress_ports=None,
update = False,
np_fixture = None):
'''
A simple helper method to create a network policy with a single
ingress entry and a single from condition
Ex :
ingress_pod : { 'role': 'frontend'}
ingress_namespace : { 'project': 'mynamespace'}
ingress_ipblock : { "cidr" : "10.204.217.0/24", "_except" : ["10.204.217.4/30"] }
egress_pod : { 'role': 'frontend'}
egress_namespace : { 'project': 'mynamespace'}
egress_ipblock : { "cidr" : "10.204.217.0/24"}
ports = ['tcp/80']
egress_ports = ['tcp/80']
policy_types = ["Ingress"] or ["Egress"]
'''
metadata = metadata or {}
spec = spec or {}
ingress_pods = ingress_pods
ingress_namespaces = ingress_namespaces
ingress_ipblock = ingress_ipblock
egress_pods = egress_pods
egress_namespaces = egress_namespaces
egress_ipblock = egress_ipblock
ports = ports
egress_ports = egress_ports
ingress_pod_selector = None
ingress_ns_selector = None
ingress_ipblock_selector = None
egress_pod_selector = None
egress_ns_selector = None
egress_ipblock_selector = None
port_list = []
egress_port_list = []
name = name or get_random_name('np-')
metadata.update({'name': name})
selector_dict = {}
pod_selector_dict = {}
policy_types = policy_types
if pod_selector is not None:
pod_selector_dict = {'match_labels': pod_selector}
if ingress_pods is not None:
ingress_pod_dict = {'match_labels': {}}
for k, v in ingress_pods.iteritems():
ingress_pod_dict['match_labels'].update({k: v})
ingress_pod_selector = {'pod_selector': ingress_pod_dict}
if ingress_namespaces is not None:
ingress_ns_dict = {'match_labels': {}}
for k, v in ingress_namespaces.iteritems():
ingress_ns_dict['match_labels'].update({k: v})
ingress_ns_selector = {'namespace_selector': ingress_ns_dict}
if ingress_ipblock is not None:
ingress_ipblock_selector = {'ip_block': ingress_ipblock}
if egress_pods is not None:
egress_pod_dict = {'match_labels': {}}
for k, v in egress_pods.iteritems():
egress_pod_dict['match_labels'].update({k: v})
egress_pod_selector = {'pod_selector': egress_pod_dict}
if egress_namespaces is not None:
egress_ns_dict = {'match_labels': {}}
for k, v in egress_namespaces.iteritems():
egress_ns_dict['match_labels'].update({k: v})
egress_ns_selector = {'namespace_selector': egress_ns_dict}
if egress_ipblock is not None:
egress_ipblock_selector = {'ip_block': egress_ipblock}
if ports is not None:
for port_str in ports:
protocol, port = port_str.split('/')
port_list.append({'protocol': protocol, 'port': int(port)})
if egress_ports is not None:
for port_str in egress_ports:
protocol, port = port_str.split('/')
egress_port_list.append({'protocol': protocol, 'port': int(port)})
if ingress_all == True:
spec.update({
'ingress': [{}]
})
elif ingress_pod_selector or ingress_ns_selector or ingress_ipblock_selector:
spec.update({
'ingress': [
{'from': [ingress_pod_selector or ingress_ns_selector or ingress_ipblock_selector],
}
]
})
elif egress_all == True:
spec.update({
'egress': [{}]
})
elif egress_pod_selector or egress_ns_selector or egress_ipblock_selector:
spec.update({
'egress': [
{'to': [egress_pod_selector or egress_ns_selector or egress_ipblock_selector],
}
]
})
#space
spec.update({'pod_selector': pod_selector_dict})
if ports is not None and (policy_types == ["Ingress"] or policy_types == [] ):
spec['ingress'][0]['ports'] = port_list
if egress_ports is not None and policy_types == ["Egress"]:
spec['egress'][0]['egress_ports'] = egress_port_list
if policy_types:
spec["policy_types"] = policy_types
#space
if update == False:
return self.useFixture(NetworkPolicyFixture(
connections=self.connections,
name=name,
namespace=namespace,
metadata=metadata,
spec=spec))
else:
return np_fixture.update(metadata=np_fixture.metadata,
spec=spec)
# end setup_simple_policy
def setup_isolation(self, namespace_fixture):
namespace_fixture.enable_isolation()
self.addCleanup(namespace_fixture.disable_isolation)
# end self.setup_isolation
def setup_deployment(self,
name=None,
namespace='default',
metadata=None,
spec=None,
min_ready_seconds=None,
paused=None,
progress_deadline_seconds=None,
replicas=None,
revision_history_limit=None,
rollback_to=None,
strategy=None,
template=None):
'''
A helper method to create a deployment
Ref https://github.com/kubernetes-incubator/client-python/blob/master/kubernetes/docs/AppsV1beta1DeploymentSpec.md
'''
metadata = metadata or {}
spec = spec or {}
name = name or get_random_name('dep-')
metadata.update({'name': name})
if min_ready_seconds:
spec.update({'min_ready_seconds': min_ready_seconds})
if paused:
spec.update({'paused': paused})
if progress_deadline_seconds:
spec.update(
{'progress_deadline_seconds': progress_deadline_seconds})
if replicas:
spec.update({'replicas': replicas})
if revision_history_limit:
spec.update({'revision_history_limit': revision_history_limit})
if revision_history_limit:
spec.update({'revision_history_limit': revision_history_limit})
if rollback_to:
spec.update({'rollback_to': rollback_to})
if strategy:
spec.update({'strategy': strategy})
if template:
spec.update({'template': template})
obj = self.useFixture(DeploymentFixture(
connections=self.connections,
namespace=namespace,
metadata=metadata,
spec=spec))
return obj
# end setup_deployment
def setup_nginx_deployment(self,
name=None,
namespace='default',
replicas=1,
pod_labels=None,
container_port=80,
metadata=None,
spec=None,
template_metadata=None,
template_spec=None):
metadata = metadata or {}
spec = spec or {}
pod_labels = pod_labels or {}
name = name or get_random_name('nginx-dep')
template_metadata = template_metadata or {}
if pod_labels:
template_metadata['labels'] = template_metadata.get('labels', {})
template_metadata['labels'].update(pod_labels)
template_spec = template_spec or {
'containers': [
{'image': 'nginx',
'ports': [
{'container_port': int(container_port)}
],
}
]
}
if replicas:
spec.update({'replicas': replicas})
spec.update({
'template': {
'metadata': template_metadata,
'spec': template_spec
}
})
return self.setup_deployment(name=name,
namespace=namespace,
metadata=metadata,
spec=spec)
# end setup_nginx_deployment
def restart_kube_manager(self, ips=None):
'''
Restarts kube-managers
If no ips is specified, restarts all kube-managers on all nodes
'''
ips = ips or self.inputs.kube_manager_ips
self.logger.info('Will restart contrail-kube-manager services now on'
' %s' %(ips))
self.inputs.restart_service('contrail-kube-manager', ips,
container='contrail-kube-manager',
verify_service=True)
# end restart_kube_manager
def restart_vrouter_agent(self, ips=None):
'''
Restarts vrouter agent
If no ips is specified, restarts all agents
'''
ips = ips or self.inputs.compute_ips
self.logger.info('Will restart contrail-vrouter-agent services now on'
' %s' %(ips))
self.inputs.restart_service('contrail-vrouter-agent', ips,
container='agent',
verify_service=True)
# end restart_vrouter_agent
def restart_pod(self, pod_fixture):
'''
Restarts a specific container using docker restart
'''
host = pod_fixture.compute_ip
username = self.inputs.host_data[host]['username']
password = self.inputs.host_data[host]['password']
cmd = "docker ps -f NAME=%s -f status=running 2>/dev/null | grep -v POD | sed -n 2p | awk '{print $1}'" \
% (pod_fixture.name + "_" + pod_fixture.namespace)
self.logger.info('Running %s on %s' %
(cmd, self.inputs.host_data[host]['name']))
container_id = self.inputs.run_cmd_on_server(host, cmd, username, password,
as_sudo=True)
if not container_id:
self.logger.warn('Container cant be found on host')
return False
issue_cmd = 'docker restart %s -t 60' % (container_id)
self.logger.info('Running %s on %s' %
(issue_cmd, self.inputs.host_data[host]['name']))
self.inputs.run_cmd_on_server(host, issue_cmd, username, password, pty=True,
as_sudo=True)
verify_command = "docker ps -f NAME=%s -f status=running 2>/dev/null | grep -v POD" \
% (pod_fixture.name + "_" + pod_fixture.namespace)
for i in range(3):
output = self.inputs.run_cmd_on_server(host, verify_command, username,
password, as_sudo=True)
if not output or 'Up' not in output:
self.logger.warn('Container is not up on host %s'%(host))
return False
time.sleep(3)
return True
# end restart_pod
def create_snat_router(self, name):
obj = self.connections.vnc_lib_fixture.vnc_h.create_router(name=name,
project_obj=self.connections.vnc_lib_fixture.get_project_obj())
self.addCleanup(self.connections.vnc_lib_fixture.vnc_h.delete_router, obj)
return obj
def connect_vn_with_router(self, router_obj, vn_fq_name):
# Configure VN name from namespace
# Read VN from API
vn_fq_name_str = ':'.join(vn_fq_name)
vn_obj=self.vnc_lib.virtual_network_read(fq_name_str=vn_fq_name_str)
# To associate VN to logical router need to create a dummy port
vmi_id = str(uuid.uuid4())
vmi_obj = vnc_api_test.VirtualMachineInterface(name=vmi_id,
parent_obj=self.connections.vnc_lib_fixture.get_project_obj())
vmi_obj.add_virtual_network(vn_obj)
self.vnc_lib.virtual_machine_interface_create(vmi_obj)
self.addCleanup(self.vnc_lib.virtual_machine_interface_delete, id=vmi_obj.uuid)
# Connect namespace VN to router
router_obj.add_virtual_machine_interface(vmi_obj)
self.addCleanup(self._remove_namespace_from_router,router_obj,vmi_obj)
# Update logical router object
self.vnc_lib.logical_router_update(router_obj)
return router_obj
def _remove_namespace_from_router(self, router_obj, vmi_obj):
router_obj.del_virtual_machine_interface(vmi_obj)
# Update logical router object
self.vnc_lib.logical_router_update(router_obj)
def configure_snat_for_pod (self, pod):
# Create logical router
router_obj = self.create_snat_router("snat_router")
# Connect router with virtual network associated to pod
self.connect_vn_with_router(router_obj, pod.vn_fq_names[0])
# Configure external_gateway
self.connections.vnc_lib_fixture.vnc_h.connect_gateway_with_router(router_obj,\
self.public_vn.public_vn_fixture.obj)
# end configure_snat_for_pod
def verify_reachability(self, source_pod, dest_pods):
'''
Returns (boolean, list of booleans)
'''
results = []
for dest_pod in dest_pods:
result = source_pod.ping_with_certainty(dest_pod.pod_ip)