/
tkgs_ha_utils.go
897 lines (793 loc) · 38.2 KB
/
tkgs_ha_utils.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"fmt"
"math/rand"
"os/exec"
"strings"
"time"
ginkgo "github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
cnstypes "github.com/vmware/govmomi/cns/types"
"github.com/vmware/govmomi/object"
vim25types "github.com/vmware/govmomi/vim25/types"
"golang.org/x/crypto/ssh"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
fnodes "k8s.io/kubernetes/test/e2e/framework/node"
fpod "k8s.io/kubernetes/test/e2e/framework/pod"
fpv "k8s.io/kubernetes/test/e2e/framework/pv"
fssh "k8s.io/kubernetes/test/e2e/framework/ssh"
fss "k8s.io/kubernetes/test/e2e/framework/statefulset"
)
// checkAnnotationOnSvcPvc checks tkg HA specific annotations on SVC PVC
func checkAnnotationOnSvcPvc(svcPVC *v1.PersistentVolumeClaim,
allowedTopologies map[string][]string, categories []string) error {
annotationsMap := svcPVC.Annotations
if accessibleTopoString, x := annotationsMap[tkgHAccessibleAnnotationKey]; x {
accessibleTopology := strings.Split(accessibleTopoString, ":")
topoKey := strings.Split(accessibleTopology[0], "{")[1]
topoVal := strings.Split(accessibleTopology[1], "}")[0]
category := strings.SplitAfter(topoKey, "/")[1]
categoryKey := strings.Split(category, `"`)[0]
if isValuePresentInTheList(categories, categoryKey) {
if isValuePresentInTheList(allowedTopologies[topoKey], topoVal) {
return fmt.Errorf("couldn't find allowed accessible topology: %v on svc pvc: %s"+
"instead found: %v", allowedTopologies[topoKey], svcPVC.Name, topoVal)
}
} else {
return fmt.Errorf("couldn't find key: %s on allowed categories %v",
category, categories)
}
} else {
return fmt.Errorf("couldn't find annotation key: %s on svc pvc: %s",
tkgHAccessibleAnnotationKey, svcPVC.Name)
}
if requestedTopoString, y := annotationsMap[tkgHARequestedAnnotationKey]; y {
availabilityTopo := strings.Split(requestedTopoString, ",")
for _, avlTopo := range availabilityTopo {
requestedTopology := strings.Split(avlTopo, ":")
topoKey := strings.Split(requestedTopology[0], "{")[1]
topoVal := strings.Split(requestedTopology[1], "}")[0]
category := strings.SplitAfter(topoKey, "/")[1]
categoryKey := strings.Split(category, `"`)[0]
if isValuePresentInTheList(categories, categoryKey) {
if isValuePresentInTheList(allowedTopologies[topoKey], topoVal) {
return fmt.Errorf("couldn't find allowed accessible topology: %v on svc pvc: %s"+
"instead found: %v", allowedTopologies[topoKey], svcPVC.Name, topoVal)
}
} else {
return fmt.Errorf("couldn't find key: %s on allowed categories %v",
category, categories)
}
}
} else {
return fmt.Errorf("couldn't find annotation key: %s on svc pvc: %s",
tkgHARequestedAnnotationKey, svcPVC.Name)
}
return nil
}
// isValuePresentInTheList is a util method which checks whether a particular string
// is present in a given list or not
func isValuePresentInTheList(strArr []string, str string) bool {
for _, s := range strArr {
if strings.Contains(s, str) {
return true
}
}
return false
}
// verifyAnnotationsAndNodeAffinity verifies annotations on SVC PVC
// and node affinities and pod location of volumes on correct zones
func verifyAnnotationsAndNodeAffinity(allowedTopologyHAMap map[string][]string,
categories []string, pod *v1.Pod, nodeList *v1.NodeList,
svcPVC *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, svcPVCName string) {
framework.Logf("Verify SV PVC has TKG HA annotations set")
err := checkAnnotationOnSvcPvc(svcPVC, allowedTopologyHAMap, categories)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("SVC PVC: %s has TKG HA annotations set", svcPVC.Name)
framework.Logf("Verify GV PV has has required PV node affinity details")
_, err = verifyVolumeTopologyForLevel5(pv, allowedTopologyHAMap)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("GC PV: %s has required Pv node affinity details", pv.Name)
framework.Logf("Verify SV PV has has required PV node affinity details")
svcPV := getPvFromSupervisorCluster(svcPVCName)
_, err = verifyVolumeTopologyForLevel5(svcPV, allowedTopologyHAMap)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("SVC PV: %s has required PV node affinity details", svcPV.Name)
_, err = verifyPodLocationLevel5(pod, nodeList, allowedTopologyHAMap)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
// verifyVolumeProvisioningWithServiceDown brings the service down and creates the statefulset and then brings up
// the service and validates the volumes are bound and required annotations and node affinity are present
func verifyVolumeProvisioningWithServiceDown(serviceName string, namespace string, client clientset.Interface,
storagePolicyName string, allowedTopologyHAMap map[string][]string, categories []string, isServiceStopped bool,
f *framework.Framework) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ginkgo.By("CNS_TEST: Running for GC setup")
nodeList, err := fnodes.GetReadySchedulableNodes(client)
framework.ExpectNoError(err, "Unable to find ready and schedulable Node")
if !(len(nodeList.Items) > 0) {
framework.Failf("Unable to find ready and schedulable Node")
}
ginkgo.By(fmt.Sprintf("Stopping %v on the vCenter host", serviceName))
vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort
err = invokeVCenterServiceControl(stopOperation, serviceName, vcAddress)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
isServiceStopped = true
err = waitVCenterServiceToBeInState(serviceName, vcAddress, svcStoppedMessage)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer func() {
if isServiceStopped {
ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", serviceName))
err = invokeVCenterServiceControl(startOperation, serviceName, vcAddress)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = waitVCenterServiceToBeInState(serviceName, vcAddress, svcRunningMessage)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
bootstrap()
isServiceStopped = false
}
}()
ginkgo.By("Create statefulset with default pod management policy with replica 3")
createResourceQuota(client, namespace, rqLimit, storagePolicyName)
storageclass, err := client.StorageV1().StorageClasses().Get(ctx, storagePolicyName, metav1.GetOptions{})
if !apierrors.IsNotFound(err) {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
// Creating StatefulSet service
ginkgo.By("Creating service")
service := CreateService(namespace, client)
defer func() {
deleteService(namespace, client, service)
}()
statefulset := GetStatefulSetFromManifest(namespace)
ginkgo.By("Creating statefulset")
statefulset.Spec.VolumeClaimTemplates[len(statefulset.Spec.VolumeClaimTemplates)-1].
Spec.StorageClassName = &storageclass.Name
*statefulset.Spec.Replicas = 3
_, err = client.AppsV1().StatefulSets(namespace).Create(ctx, statefulset, metav1.CreateOptions{})
framework.ExpectNoError(err)
replicas := *(statefulset.Spec.Replicas)
defer func() {
scaleDownNDeleteStsDeploymentsInNamespace(ctx, client, namespace)
pvcs, err := client.CoreV1().PersistentVolumeClaims(namespace).List(ctx, metav1.ListOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, claim := range pvcs.Items {
pv := getPvFromClaim(client, namespace, claim.Name)
err := fpv.DeletePersistentVolumeClaim(client, claim.Name, namespace)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verify it's PV and corresponding volumes are deleted from CNS")
err = fpv.WaitForPersistentVolumeDeleted(client, pv.Name, poll,
pollTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
volumeHandle := pv.Spec.CSI.VolumeHandle
err = e2eVSphere.waitForCNSVolumeToBeDeleted(volumeHandle)
gomega.Expect(err).NotTo(gomega.HaveOccurred(),
fmt.Sprintf("Volume: %s should not be present in the CNS after it is deleted from "+
"kubernetes", volumeHandle))
}
}()
ginkgo.By(fmt.Sprintf("PVC and POD creations should be in pending state since %s is down", serviceName))
pvcs, err := client.CoreV1().PersistentVolumeClaims(namespace).List(ctx, metav1.ListOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, pvc := range pvcs.Items {
err = fpv.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, client,
pvc.Namespace, pvc.Name, framework.Poll, time.Minute)
gomega.Expect(err).NotTo(gomega.HaveOccurred(),
fmt.Sprintf("Failed to find the volume in pending state with err: %v", err))
}
pods := fss.GetPodList(client, statefulset)
for _, pod := range pods.Items {
if pod.Status.Phase != v1.PodPending {
framework.Failf("Expected pod to be in: %s state but is in: %s state", v1.PodPending,
pod.Status.Phase)
}
}
ginkgo.By(fmt.Sprintf("Starting %v on the vCenter host", serviceName))
err = invokeVCenterServiceControl(startOperation, serviceName, vcAddress)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
isServiceStopped = false
err = waitVCenterServiceToBeInState(serviceName, vcAddress, svcRunningMessage)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
bootstrap()
verifyVolumeMetadataOnStatefulsets(client, ctx, namespace, statefulset, replicas,
allowedTopologyHAMap, categories, storagePolicyName, nodeList, f)
}
// verifyOnlineVolumeExpansionOnGc is a util method which helps in verifying online volume expansion on gc
func verifyOnlineVolumeExpansionOnGc(client clientset.Interface, namespace string, svcPVCName string,
volHandle string, pvclaim *v1.PersistentVolumeClaim, pod *v1.Pod, f *framework.Framework) {
rand.New(rand.NewSource(time.Now().Unix()))
testdataFile := fmt.Sprintf("/tmp/testdata_%v_%v", time.Now().Unix(), rand.Intn(1000))
ginkgo.By(fmt.Sprintf("Creating a 512mb test data file %v", testdataFile))
op, err := exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%v", testdataFile),
"bs=64k", "count=8000").Output()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer func() {
op, err = exec.Command("rm", "-f", testdataFile).Output()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
_ = e2ekubectl.RunKubectlOrDie(namespace, "cp", testdataFile,
fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name))
onlineVolumeResizeCheck(f, client, namespace, svcPVCName, volHandle, pvclaim, pod)
ginkgo.By("Checking data consistency after PVC resize")
_ = e2ekubectl.RunKubectlOrDie(namespace, "cp",
fmt.Sprintf("%v/%v:/mnt/volume1/testdata", namespace, pod.Name), testdataFile+"_pod")
defer func() {
op, err = exec.Command("rm", "-f", testdataFile+"_pod").Output()
fmt.Println("rm: ", op)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
ginkgo.By("Running diff...")
op, err = exec.Command("diff", testdataFile, testdataFile+"_pod").Output()
fmt.Println("diff: ", op)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(len(op)).To(gomega.BeZero())
ginkgo.By("File system resize finished successfully in GC")
ginkgo.By("Checking for PVC resize completion on SVC PVC")
_, err = waitForFSResizeInSvc(svcPVCName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
// verifyOfflineVolumeExpansionOnGc is a util method which helps in verifying offline volume expansion on gc
func verifyOfflineVolumeExpansionOnGc(client clientset.Interface, pvclaim *v1.PersistentVolumeClaim, svcPVCName string,
namespace string, volHandle string, pod *v1.Pod, pv *v1.PersistentVolume, f *framework.Framework) {
ginkgo.By("Check filesystem size for mount point /mnt/volume1 before expansion")
originalFsSize, err := getFSSizeMb(f, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Delete POD.
ginkgo.By(fmt.Sprintf("Deleting the pod %s in namespace %s before expansion", pod.Name, namespace))
err = fpod.DeletePodWithWait(client, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verify volume is detached from the node before expansion")
isDiskDetached, err := e2eVSphere.waitForVolumeDetachedFromNode(client,
pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskDetached).To(gomega.BeTrue(),
fmt.Sprintf("Volume %q is not detached from the node %q", volHandle, pod.Spec.NodeName))
// Modify PVC spec to trigger volume expansion. We expand the PVC while
// no pod is using it to ensure offline expansion.
ginkgo.By("Expanding current pvc")
currentPvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage]
newSize := currentPvcSize.DeepCopy()
newSize.Add(resource.MustParse(diskSize))
framework.Logf("currentPvcSize %v, newSize %v", currentPvcSize, newSize)
pvclaim, err = expandPVCSize(pvclaim, newSize, client)
framework.ExpectNoError(err, "While updating pvc for more size")
gomega.Expect(pvclaim).NotTo(gomega.BeNil())
pvcSize := pvclaim.Spec.Resources.Requests[v1.ResourceStorage]
if pvcSize.Cmp(newSize) != 0 {
framework.Failf("error updating pvc size %q", pvclaim.Name)
}
ginkgo.By("Checking for PVC request size change on SVC PVC")
b, err := verifyPvcRequestedSizeUpdateInSupervisorWithWait(svcPVCName, newSize)
gomega.Expect(b).To(gomega.BeTrue())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Waiting for controller volume resize to finish")
err = waitForPvResizeForGivenPvc(pvclaim, client, totalResizeWaitPeriod)
framework.ExpectNoError(err, "While waiting for pvc resize to finish")
ginkgo.By("Checking for resize on SVC PV")
verifyPVSizeinSupervisor(svcPVCName, newSize)
ginkgo.By("Checking for 'FileSystemResizePending' status condition on SVC PVC")
err = waitForSvcPvcToReachFileSystemResizePendingCondition(svcPVCName, pollTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Checking for conditions on pvc")
pvclaim, err = waitForPVCToReachFileSystemResizePendingCondition(client, namespace, pvclaim.Name, pollTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("Invoking QueryCNSVolumeWithResult with VolumeID: %s", volHandle))
queryResult, err := e2eVSphere.queryCNSVolumeWithResult(volHandle)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
if len(queryResult.Volumes) == 0 {
err = fmt.Errorf("QueryCNSVolumeWithResult returned no volume")
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verifying disk size requested in volume expansion is honored")
newSizeInMb := convertGiStrToMibInt64(newSize)
if queryResult.Volumes[0].BackingObjectDetails.(*cnstypes.CnsBlockBackingDetails).CapacityInMb != newSizeInMb {
err = fmt.Errorf("got wrong disk size after volume expansion")
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Create a new Pod to use this PVC, and verify volume has been attached.
ginkgo.By("Creating a new pod to attach PV again to the node")
pod, err = createPod(client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, execCommand)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer func() {
ginkgo.By("Delete pod")
err = fpod.DeletePodWithWait(client, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
ginkgo.By(fmt.Sprintf("Verify volume after expansion: %s is attached to the node: %s",
pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName))
vmUUID, err := getVMUUIDFromNodeName(pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node")
ginkgo.By("Waiting for file system resize to finish")
pvclaim, err = waitForFSResize(pvclaim, client)
framework.ExpectNoError(err, "while waiting for fs resize to finish")
pvcConditions := pvclaim.Status.Conditions
expectEqual(len(pvcConditions), 0, "pvc should not have conditions")
ginkgo.By("Verify filesystem size for mount point /mnt/volume1 after expansion")
fsSize, err := getFSSizeMb(f, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Filesystem size may be smaller than the size of the block volume.
// Here since filesystem was already formatted on the original volume,
// we can compare the new filesystem size with the original filesystem size.
if fsSize < originalFsSize {
framework.Failf("error updating filesystem size for %q. Resulting filesystem size is %d", pvclaim.Name, fsSize)
}
ginkgo.By("File system resize finished successfully in GC")
ginkgo.By("Checking for PVC resize completion on SVC PVC")
_, err = waitForFSResizeInSvc(svcPVCName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
// verifyVolumeMetadataOnStatefulsets verifies sts pod replicas and tkg annotations and
// node affinities on svc pvc and verify cns volume meetadata
func verifyVolumeMetadataOnStatefulsets(client clientset.Interface, ctx context.Context, namespace string,
statefulset *appsv1.StatefulSet, replicas int32, allowedTopologyHAMap map[string][]string,
categories []string, storagePolicyName string, nodeList *v1.NodeList, f *framework.Framework) {
// Waiting for pods status to be Ready
fss.WaitForStatusReadyReplicas(client, statefulset, replicas)
gomega.Expect(fss.CheckMount(client, statefulset, mountPath)).NotTo(gomega.HaveOccurred())
ssPodsBeforeScaleDown := fss.GetPodList(client, statefulset)
gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(),
fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
gomega.Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(gomega.BeTrue(),
"Number of Pods in the statefulset should match with number of replicas")
ginkgo.By("Verify GV PV and SV PV has has required PV node affinity details")
ginkgo.By("Verify SV PVC has TKG HA annotations set")
// Get the list of Volumes attached to Pods before scale down
for _, sspod := range ssPodsBeforeScaleDown.Items {
pod, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, volumespec := range sspod.Spec.Volumes {
if volumespec.PersistentVolumeClaim != nil {
pvcName := volumespec.PersistentVolumeClaim.ClaimName
pv := getPvFromClaim(client, statefulset.Namespace, pvcName)
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx,
pvcName, metav1.GetOptions{})
gomega.Expect(pvclaim).NotTo(gomega.BeNil())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
volHandle := getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle)
gomega.Expect(volHandle).NotTo(gomega.BeEmpty())
svcPVCName := pv.Spec.CSI.VolumeHandle
svcPVC := getPVCFromSupervisorCluster(svcPVCName)
gomega.Expect(*svcPVC.Spec.StorageClassName == storagePolicyName).To(
gomega.BeTrue(), "SV Pvc storageclass does not match with SV storageclass")
framework.Logf("GC PVC's storageclass matches SVC PVC's storageclass")
verifyAnnotationsAndNodeAffinity(allowedTopologyHAMap, categories, pod,
nodeList, svcPVC, pv, svcPVCName)
// Verify the attached volume match the one in CNS cache
err = waitAndVerifyCnsVolumeMetadata4GCVol(volHandle, svcPVCName, pvclaim,
pv, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}
}
replicas = 5
framework.Logf(fmt.Sprintf("Scaling up statefulset: %v to number of Replica: %v",
statefulset.Name, replicas))
_, scaleupErr := fss.Scale(client, statefulset, replicas)
gomega.Expect(scaleupErr).NotTo(gomega.HaveOccurred())
fss.WaitForStatusReplicas(client, statefulset, replicas)
fss.WaitForStatusReadyReplicas(client, statefulset, replicas)
ssPodsAfterScaleUp := fss.GetPodList(client, statefulset)
gomega.Expect(ssPodsAfterScaleUp.Items).NotTo(gomega.BeEmpty(),
fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
gomega.Expect(len(ssPodsAfterScaleUp.Items) == int(replicas)).To(gomega.BeTrue(),
"Number of Pods in the statefulset %s, %v, should match with number of replicas %v",
statefulset.Name, ssPodsAfterScaleUp.Size(), replicas,
)
// Get the list of Volumes attached to Pods before scale down
for _, sspod := range ssPodsBeforeScaleDown.Items {
pod, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, volumespec := range sspod.Spec.Volumes {
if volumespec.PersistentVolumeClaim != nil {
pvcName := volumespec.PersistentVolumeClaim.ClaimName
pv := getPvFromClaim(client, statefulset.Namespace, pvcName)
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx,
pvcName, metav1.GetOptions{})
gomega.Expect(pvclaim).NotTo(gomega.BeNil())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
volHandle := getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle)
gomega.Expect(volHandle).NotTo(gomega.BeEmpty())
svcPVCName := pv.Spec.CSI.VolumeHandle
svcPVC := getPVCFromSupervisorCluster(svcPVCName)
verifyAnnotationsAndNodeAffinity(allowedTopologyHAMap, categories, pod,
nodeList, svcPVC, pv, svcPVCName)
framework.Logf(fmt.Sprintf("Verify volume: %s is attached to the node: %s",
pv.Spec.CSI.VolumeHandle, sspod.Spec.NodeName))
var vmUUID string
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
verifyCRDInSupervisorWithWait(ctx, f, pod.Spec.NodeName+"-"+svcPVCName,
crdCNSNodeVMAttachment, crdVersion, crdGroup, true)
isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Disk is not attached to the node")
framework.Logf("After scale up, verify the attached volumes match those in CNS Cache")
err = waitAndVerifyCnsVolumeMetadata4GCVol(volHandle, svcPVCName, pvclaim,
pv, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}
}
}
// verifyVolumeMetadataOnDeployments verifies tkg annotations and
// node affinities on svc pvc and and verify cns volume metadata
func verifyVolumeMetadataOnDeployments(ctx context.Context,
client clientset.Interface, deployment *appsv1.Deployment, namespace string,
allowedTopologyHAMap map[string][]string, categories []string,
nodeList *v1.NodeList, storagePolicyName string) {
pods, err := GetPodsForMultipleDeployment(client, deployment)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, depPod := range pods.Items {
pod, err := client.CoreV1().Pods(namespace).Get(ctx, depPod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, volumespec := range depPod.Spec.Volumes {
if volumespec.PersistentVolumeClaim != nil {
pvcName := volumespec.PersistentVolumeClaim.ClaimName
pv := getPvFromClaim(client, namespace, pvcName)
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx,
pvcName, metav1.GetOptions{})
gomega.Expect(pvclaim).NotTo(gomega.BeNil())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
if guestCluster {
volHandle := getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle)
gomega.Expect(volHandle).NotTo(gomega.BeEmpty())
svcPVCName := pv.Spec.CSI.VolumeHandle
svcPVC := getPVCFromSupervisorCluster(svcPVCName)
gomega.Expect(*svcPVC.Spec.StorageClassName == storagePolicyName).To(
gomega.BeTrue(), "SV Pvc storageclass does not match with SV storageclass")
framework.Logf("GC PVC's storageclass matches SVC PVC's storageclass")
verifyAnnotationsAndNodeAffinity(allowedTopologyHAMap, categories, pod,
nodeList, svcPVC, pv, svcPVCName)
// Verify the attached volume match the one in CNS cache
err = waitAndVerifyCnsVolumeMetadata4GCVol(volHandle, svcPVCName, pvclaim,
pv, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
} else if vanillaCluster {
err = waitAndVerifyCnsVolumeMetadata(pv.Spec.CSI.VolumeHandle, pvclaim, pv, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}
}
}
}
// execStopContainerOnGc logs into gc master node using ssh private key and stops csi container
// running on that node
func execStopContainerOnGc(sshClientConfig *ssh.ClientConfig, svcMasterIP string, containerName string,
gcMasterIP string, svcNamespace string) error {
sshSecretName := GetAndExpectStringEnvVar(sshSecretName)
cmdToGetPrivateKey := fmt.Sprintf("kubectl get secret %s -n %s -o"+
"jsonpath={'.data.ssh-privatekey'} | base64 -d > key", sshSecretName, svcNamespace)
framework.Logf("Invoking command '%v' on host %v", cmdToGetPrivateKey,
svcMasterIP)
cmdResult, err := sshExec(sshClientConfig, svcMasterIP,
cmdToGetPrivateKey)
if err != nil || cmdResult.Code != 0 {
fssh.LogResult(cmdResult)
return fmt.Errorf("couldn't execute command: %s on host: %v , error: %s",
cmdToGetPrivateKey, svcMasterIP, err)
}
enablePermissionCmd := "chmod 600 key"
framework.Logf("Invoking command '%v' on host %v", enablePermissionCmd,
svcMasterIP)
cmdResult, err = sshExec(sshClientConfig, svcMasterIP,
enablePermissionCmd)
if err != nil || cmdResult.Code != 0 {
fssh.LogResult(cmdResult)
return fmt.Errorf("couldn't execute command: %s on host: %v , error: %s",
enablePermissionCmd, svcMasterIP, err)
}
cmdToGetContainerInfo := fmt.Sprintf("ssh -o 'StrictHostKeyChecking=no' -i key %s@%s "+
"'sudo -i crictl ps| grep %s' > container.log 2> /dev/null", gcNodeUser, gcMasterIP, containerName)
framework.Logf("Invoking command '%v' on host %v", cmdToGetContainerInfo,
svcMasterIP)
cmdResult, err = sshExec(sshClientConfig, svcMasterIP,
cmdToGetContainerInfo)
if err != nil || cmdResult.Code != 0 {
fssh.LogResult(cmdResult)
return fmt.Errorf("couldn't execute command: %s on host: %v , error: %s",
cmdToGetContainerInfo, svcMasterIP, err)
}
cmdToGetContainerId := "cat container.log | awk '{print $1}' | tr -d '\n'"
framework.Logf("Invoking command '%v' on host %v", cmdToGetContainerInfo,
svcMasterIP)
cmdResult, err = sshExec(sshClientConfig, svcMasterIP,
cmdToGetContainerId)
if err != nil || cmdResult.Code != 0 {
fssh.LogResult(cmdResult)
return fmt.Errorf("couldn't execute command: %s on host: %v , error: %s",
cmdToGetContainerId, svcMasterIP, err)
}
containerID := cmdResult.Stdout
containerStopCmd := fmt.Sprintf("ssh -o 'StrictHostKeyChecking=no' -i key %s@%s "+
"'sudo -i crictl stop %s' 2> /dev/null", gcNodeUser, gcMasterIP, containerID)
framework.Logf("Invoking command '%v' on host %v", containerStopCmd,
svcMasterIP)
cmdResult, err = sshExec(sshClientConfig, svcMasterIP,
containerStopCmd)
if err != nil || cmdResult.Code != 0 {
fssh.LogResult(cmdResult)
return fmt.Errorf("couldn't execute command: %s on host: %v , error: %s",
containerStopCmd, svcMasterIP, err)
}
// delete the temporary log file
cmd := "rm container.log"
framework.Logf("Invoking command '%v' on host %v", cmd,
svcMasterIP)
result, err := sshExec(sshClientConfig, svcMasterIP,
cmd)
if err != nil || result.Code != 0 {
fssh.LogResult(result)
return fmt.Errorf("couldn't execute command: %s on host: %v , error: %s",
cmd, svcMasterIP, err)
}
return nil
}
// getPodsFromNodeNames fetches list of pods scheduled on a given list of nodes
func getPodsFromNodeNames(pods []*v1.Pod, nodeNames []string) []string {
var podScheduledOnNodes []string
for _, pod := range pods {
if isValuePresentInTheList(nodeNames, pod.Spec.NodeName) {
podScheduledOnNodes = append(podScheduledOnNodes, pod.Name)
}
}
return podScheduledOnNodes
}
// getNodesOfZone fetches list of k8s node names for a given availability zone
func getNodesOfZone(nodeList *v1.NodeList, availabilityZone string) []string {
var nodeNames []string
for _, node := range nodeList.Items {
nodeLabels := node.Labels
if nodeLabels[zoneKey] == availabilityZone {
nodeNames = append(nodeNames, node.Name)
}
}
return nodeNames
}
// getClusterNameFromZone fetches clusterName for a given availability zone
func getClusterNameFromZone(ctx context.Context, availabilityZone string) string {
clusterName := ""
clusterComputeResourceList, _, err := getClusterName(ctx, &e2eVSphere)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
nimbusGeneratedVcPwd := GetAndExpectStringEnvVar(vcUIPwd)
cmd := fmt.Sprintf("dcli +username %s +password %s +skip +show com vmware "+
"vcenter consumptiondomains zones cluster associations get --zone "+
"%s", adminUser, nimbusGeneratedVcPwd, availabilityZone)
vcAddress := e2eVSphere.Config.Global.VCenterHostname + ":" + sshdPort
framework.Logf("Invoking command %v on vCenter host %v", cmd, vcAddress)
result, err := fssh.SSH(cmd, vcAddress, framework.TestContext.Provider)
framework.Logf("result: %v", result)
clusterId := strings.Split(result.Stdout, "- ")[1]
clusterID := strings.TrimSpace(clusterId)
framework.Logf("clusterId: %v", clusterID)
fmt.Print(clusterId)
if err != nil || result.Code != 0 {
fssh.LogResult(result)
framework.Failf("couldn't execute command: %s on vCenter host: %v", cmd, err)
}
for _, cluster := range clusterComputeResourceList {
clusterMoId := cluster.Reference().Value
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("cluster MOID %v", clusterMoId)
if clusterMoId == clusterID {
framework.Logf("Found matching cluster domain!!")
clusterName = cluster.Name()
break
}
}
framework.Logf("cluster on zone is: %s", clusterName)
if clusterName == "" {
framework.Failf("couldn't find cluster on zone %s", availabilityZone)
}
return clusterName
}
// waitForPodsToBeInTerminatingPhase waits for pods to come to terminating state
// in guest cluster by running kubectl commands
func waitForPodsToBeInTerminatingPhase(sshClientConfig *ssh.ClientConfig, svcMasterIP string,
podName string, namespace string, timeout time.Duration) error {
kubeConfigPath := GetAndExpectStringEnvVar(gcKubeConfigPath)
waitErr := wait.PollImmediate(poll, timeout, func() (bool, error) {
cmd := fmt.Sprintf("kubectl get pod %s --kubeconfig %s -n %s --no-headers|awk '{print $3}'",
podName, kubeConfigPath, namespace)
framework.Logf("Invoking command '%v' on host %v", cmd,
svcMasterIP)
cmdResult, err := sshExec(sshClientConfig, svcMasterIP,
cmd)
if err != nil || cmdResult.Code != 0 {
fssh.LogResult(cmdResult)
return false, fmt.Errorf("couldn't execute command: %s on host: %v , error: %s",
cmd, svcMasterIP, err)
}
framework.Logf("result %v", cmdResult)
framework.Logf("stdout %s", cmdResult.Stdout)
podPhase := strings.TrimSpace(cmdResult.Stdout)
if podPhase == "Terminating" {
framework.Logf("Pod %s is in terminating state", podName)
return true, nil
}
return false, nil
})
return waitErr
}
// getApiServerIpOfZone fetches the supervisor apiserver ip of a particular zone
func getApiServerIpOfZone(ctx context.Context, zone string) string {
var hostNames []string
apiServerIpInZone := ""
// Get Cluster details
clusterComputeResource, _, err := getClusterName(ctx, &e2eVSphere)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
apiServerIPs := GetAndExpectStringEnvVar(apiServerIPs)
apiServerIps := strings.Split(apiServerIPs, ",")
framework.Logf("apiServerIps: %v", apiServerIps)
clusterName := getClusterNameFromZone(ctx, zone)
hostsInCluster := getHostsByClusterName(ctx, clusterComputeResource, clusterName)
for _, hostObj := range hostsInCluster {
hostNames = append(hostNames, hostObj.Name())
}
framework.Logf("hostNames: %v", hostNames)
for _, apiServer := range apiServerIps {
host := getHostIpWhereVmIsPresent(apiServer)
if isValuePresentInTheList(hostNames, host) {
framework.Logf("Found apiserver in zone: %v", apiServer)
apiServerIpInZone = apiServer
break
}
}
if apiServerIpInZone == "" {
framework.Failf("couldn't find cluster on zone %s", zone)
}
return apiServerIpInZone
}
// waitForApiServerToBeUp waits for supervisor apiserver ip to be up by running kubectl commands
func waitForApiServerToBeUp(svcMasterIp string, sshClientConfig *ssh.ClientConfig,
timeout time.Duration) error {
kubeConfigPath := GetAndExpectStringEnvVar(gcKubeConfigPath)
waitErr := wait.PollImmediate(poll, timeout, func() (bool, error) {
cmd := fmt.Sprintf("kubectl get ns,sc --kubeconfig %s",
kubeConfigPath)
framework.Logf("Invoking command '%v' on host %v", cmd,
svcMasterIp)
cmdResult, err := sshExec(sshClientConfig, svcMasterIp,
cmd)
framework.Logf("result %v", cmdResult)
if err != nil {
return false, nil
}
if err == nil {
framework.Logf("Apiserver is fully up")
return true, nil
}
return false, nil
})
return waitErr
}
// enterHostIntoMM puts a host into maintenance mode with a particular timeout and
// maintenance mode type
func enterHostIntoMM(ctx context.Context, host *object.HostSystem, mmModeType string,
timeout int32, evacuateVms bool) {
mmSpec := vim25types.VsanHostDecommissionMode{
ObjectAction: mmModeType,
}
hostMMSpec := vim25types.HostMaintenanceSpec{
VsanMode: &mmSpec,
Purpose: "",
}
task, err := host.EnterMaintenanceMode(ctx, timeout, false, &hostMMSpec)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
_, err = task.WaitForResult(ctx, nil)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Host: %v in in maintenance mode", host)
}
// exitHostMM exits a host from maintenance mode with a particular timeout
func exitHostMM(ctx context.Context, host *object.HostSystem, timeout int32) {
task, err := host.ExitMaintenanceMode(ctx, timeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
_, err = task.WaitForResult(ctx, nil)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf("Host: %v exited from maintenance mode", host)
}
// PodAffinity values are set in this method
func getPodAffinityTerm(allowedTopologyHAMap map[string][]string) []v1.PodAffinityTerm {
var podAffinityTerm v1.PodAffinityTerm
var podAffinityTerms []v1.PodAffinityTerm
var labelSelector *metav1.LabelSelector
var labelSelectorRequirements []metav1.LabelSelectorRequirement
var labelSelectorRequirement metav1.LabelSelectorRequirement
labelSelectorRequirement.Key = "app"
labelSelectorRequirement.Operator = "In"
labelSelectorRequirement.Values = []string{"nginx"}
labelSelectorRequirements = append(labelSelectorRequirements, labelSelectorRequirement)
labelSelector = new(metav1.LabelSelector)
labelSelector.MatchExpressions = labelSelectorRequirements
podAffinityTerm.LabelSelector = labelSelector
for key := range allowedTopologyHAMap {
podAffinityTerm.TopologyKey = key
}
podAffinityTerms = append(podAffinityTerms, podAffinityTerm)
return podAffinityTerms
}
// verifyStsVolumeMetadata verifies sts pod replicas and tkg annotations and
// node affinities on svc pvc and verify cns volume meetadata
func verifyStsVolumeMetadata(client clientset.Interface, ctx context.Context, namespace string,
statefulset *appsv1.StatefulSet, replicas int32, allowedTopologyHAMap map[string][]string,
categories []string, storagePolicyName string, nodeList *v1.NodeList, f *framework.Framework) {
// Waiting for pods status to be Ready
fss.WaitForStatusReadyReplicas(client, statefulset, replicas)
gomega.Expect(fss.CheckMount(client, statefulset, mountPath)).NotTo(gomega.HaveOccurred())
ssPodsBeforeScaleDown := fss.GetPodList(client, statefulset)
gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(),
fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
gomega.Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(gomega.BeTrue(),
"Number of Pods in the statefulset should match with number of replicas")
ginkgo.By("Verify GV PV and SV PV has has required PV node affinity details")
ginkgo.By("Verify SV PVC has TKG HA annotations set")
// Get the list of Volumes attached to Pods before scale down
for _, sspod := range ssPodsBeforeScaleDown.Items {
pod, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, volumespec := range sspod.Spec.Volumes {
if volumespec.PersistentVolumeClaim != nil {
pvcName := volumespec.PersistentVolumeClaim.ClaimName
pv := getPvFromClaim(client, statefulset.Namespace, pvcName)
pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx,
pvcName, metav1.GetOptions{})
gomega.Expect(pvclaim).NotTo(gomega.BeNil())
gomega.Expect(err).NotTo(gomega.HaveOccurred())
volHandle := getVolumeIDFromSupervisorCluster(pv.Spec.CSI.VolumeHandle)
gomega.Expect(volHandle).NotTo(gomega.BeEmpty())
svcPVCName := pv.Spec.CSI.VolumeHandle
svcPVC := getPVCFromSupervisorCluster(svcPVCName)
gomega.Expect(*svcPVC.Spec.StorageClassName == storagePolicyName).To(
gomega.BeTrue(), "SV Pvc storageclass does not match with SV storageclass")
framework.Logf("GC PVC's storageclass matches SVC PVC's storageclass")
verifyAnnotationsAndNodeAffinity(allowedTopologyHAMap, categories, pod,
nodeList, svcPVC, pv, svcPVCName)
// Verify the attached volume match the one in CNS cache
err = waitAndVerifyCnsVolumeMetadata4GCVol(volHandle, svcPVCName, pvclaim,
pv, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.Logf(fmt.Sprintf("Verify volume: %s is attached to the node: %s",
pv.Spec.CSI.VolumeHandle, sspod.Spec.NodeName))
var vmUUID string
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
vmUUID, err = getVMUUIDFromNodeName(pod.Spec.NodeName)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
verifyCRDInSupervisorWithWait(ctx, f, pod.Spec.NodeName+"-"+svcPVCName,
crdCNSNodeVMAttachment, crdVersion, crdGroup, true)
isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, volHandle, vmUUID)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Disk is not attached to the node")
framework.Logf("verify the attached volumes match those in CNS Cache")
err = waitAndVerifyCnsVolumeMetadata4GCVol(volHandle, svcPVCName, pvclaim,
pv, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
}
}
}