-
Notifications
You must be signed in to change notification settings - Fork 188
/
Copy pathbinding_modes_with_topology.go
203 lines (179 loc) · 8.5 KB
/
binding_modes_with_topology.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"fmt"
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
fnodes "k8s.io/kubernetes/test/e2e/framework/node"
fpod "k8s.io/kubernetes/test/e2e/framework/pod"
fpv "k8s.io/kubernetes/test/e2e/framework/pv"
admissionapi "k8s.io/pod-security-admission/api"
)
var _ = ginkgo.Describe("[csi-topology-vanilla] Topology-Aware-Provisioning-With-Volume-Binding-Modes", func() {
f := framework.NewDefaultFramework("e2e-vsphere-topology-aware-provisioning")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
var (
client clientset.Interface
namespace string
zoneValues []string
regionValues []string
pvZone string
pvRegion string
allowedTopologies []v1.TopologySelectorLabelRequirement
nodeList *v1.NodeList
pod *v1.Pod
pvclaim *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
storageclass *storagev1.StorageClass
bindingMode storagev1.VolumeBindingMode
err error
)
ginkgo.BeforeEach(func() {
client = f.ClientSet
namespace = f.Namespace.Name
bootstrap()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nodeList, err = fnodes.GetReadySchedulableNodes(ctx, f.ClientSet)
framework.ExpectNoError(err, "Unable to find ready and schedulable Node")
if !(len(nodeList.Items) > 0) {
framework.Failf("Unable to find ready and schedulable Node")
}
bindingMode = storagev1.VolumeBindingWaitForFirstConsumer
})
ginkgo.AfterEach(func() {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ginkgo.By("Performing test cleanup")
if pvclaim != nil {
framework.ExpectNoError(fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace),
"Failed to delete PVC ", pvclaim.Name)
}
if pv != nil {
framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeoutShort))
framework.ExpectNoError(e2eVSphere.waitForCNSVolumeToBeDeleted(pv.Spec.CSI.VolumeHandle))
}
})
verifyTopologyAwareProvisioning := func(f *framework.Framework, client clientset.Interface, namespace string,
scParameters map[string]string, allowedTopologies []v1.TopologySelectorLabelRequirement) {
var cancel context.CancelFunc
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
storageclass, pvclaim, err = createPVCAndStorageClass(ctx, client, namespace,
nil, nil, "", allowedTopologies, bindingMode, false, "")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer func() {
ginkgo.By("Deleting the Storage Class")
err = client.StorageV1().StorageClasses().Delete(ctx, storageclass.Name, *metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}()
// Wait for additional 30 seconds to make sure that provision volume claim
// remains in pending state waiting for first consumer.
ginkgo.By("Waiting for 30 seconds and verifying whether the PVC is still in pending state")
time.Sleep(time.Duration(sleepTimeOut) * time.Second)
ginkgo.By("Expect claim status to be in Pending state")
err = fpv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimPending, client,
pvclaim.Namespace, pvclaim.Name, framework.Poll, time.Minute)
gomega.Expect(err).NotTo(gomega.HaveOccurred(),
fmt.Sprintf("Failed to find the volume in pending state with err: %v", err))
ginkgo.By("Creating a pod")
pod, err = createPod(ctx, client, namespace, nil, []*v1.PersistentVolumeClaim{pvclaim}, false, "")
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Expect claim to be in Bound state and provisioning volume passes")
err = fpv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client,
pvclaim.Namespace, pvclaim.Name, framework.Poll, time.Minute)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Failed to provision volume with err: %v", err))
pv = getPvFromClaim(client, pvclaim.Namespace, pvclaim.Name)
ginkgo.By(fmt.Sprintf("Verify volume: %s is attached to the node: %s",
pv.Spec.CSI.VolumeHandle, pod.Spec.NodeName))
vmUUID := getNodeUUID(ctx, client, pod.Spec.NodeName)
isDiskAttached, err := e2eVSphere.isVolumeAttachedToVM(client, pv.Spec.CSI.VolumeHandle, vmUUID)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(isDiskAttached).To(gomega.BeTrue(), "Volume is not attached to the node")
if allowedTopologies == nil {
// Get the topology value from pod's location to verify if it matches
// with volume's node affinity rules.
ginkgo.By("Verify volume is provisioned in same zone and region as that of the Pod")
podRegion, podZone, err := getTopologyFromPod(pod, nodeList)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
zones := []string{podZone}
regions := []string{podRegion}
pvRegion, pvZone, err = verifyVolumeTopology(pv, zones, regions)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
} else {
ginkgo.By("Verify if volume is provisioned in the selected zone and region")
pvRegion, pvZone, err = verifyVolumeTopology(pv, zoneValues, regionValues)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Verify Pod is scheduled on a node belonging to same topology as the PV it is attached to")
err = verifyPodLocation(pod, nodeList, pvZone, pvRegion)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}
ginkgo.By("Performing cleanup")
ginkgo.By("Deleting the pod and wait for disk to detach")
err = fpod.DeletePodWithWait(ctx, client, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
ginkgo.By("Deleting the PVC")
err = fpv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
pvclaim = nil
ginkgo.By("Verify if PV is deleted")
framework.ExpectNoError(fpv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, poll, pollTimeoutShort))
pv = nil
}
// Test to verify provisioning with "VolumeBindingMode = WaitForFirstConsumer"
// in Storage Class and "AllowedTopologies" not specified.
// When AllowedTopologies is not specified Volume Create Request will have
// all zones and regions in "AccessibilityRequirement".
//
// Steps
// 1. Create a Storage Class with "VolumeBindingMode = WaitForFirstConsumer".
// 2. Create a PVC using the above SC.
// 3. Verify that the PVC is not in Bound phase.
// 4. Create a Pod using the above PVC.
// 5. Verify volume is created and contains NodeAffinity rules.
// 6. Verify Pod is scheduled in zone and region where volume was created.
// 7. Delete Pod and wait for disk to be detached.
// 8. Delete PVC.
// 9. Delete SC.
ginkgo.It("Verify provisioning succeeds with VolumeBindingMode set to "+
"WaitForFirstConsumer and without AllowedTopologies in the storage class ", func() {
verifyTopologyAwareProvisioning(f, client, namespace, nil, nil)
})
// Test to verify provisioning with "VolumeBindingMode = WaitForFirstConsumer"
// in Storage Class and "AllowedTopologies" also specified.
//
// Steps
// 1. Create a Storage Class with "VolumeBindingMode = WaitForFirstConsumer".
// 2. Create a PVC using the above SC.
// 3. Verify that the PVC is not in Bound phase.
// 4. Create a Pod using the above PVC.
// 5. Verify volume is created and contains NodeAffinity rules.
// 6. Verify Pod is scheduled in zone and region where volume was created.
// 7. Delete Pod and wait for disk to be detached.
// 8. Delete PVC.
// 9. Delete SC.
ginkgo.It("Verify topology aware provisioning succeeds with VolumeBindingMode set to WaitForFirstConsumer", func() {
// Preparing allowedTopologies using topologies with shared datastores.
regionZoneValue := GetAndExpectStringEnvVar(envRegionZoneWithSharedDS)
regionValues, zoneValues, allowedTopologies = topologyParameterForStorageClass(regionZoneValue)
verifyTopologyAwareProvisioning(f, client, namespace, nil, allowedTopologies)
})
})