forked from kubernetes/kubernetes
-
Notifications
You must be signed in to change notification settings - Fork 1
/
vsphere_scale.go
233 lines (206 loc) · 8.97 KB
/
vsphere_scale.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vsphere
import (
"fmt"
"strconv"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storageV1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
/*
Perform vsphere volume life cycle management at scale based on user configurable value for number of volumes.
The following actions will be performed as part of this test.
1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capalibilies.)
2. Read VCP_SCALE_VOLUME_COUNT, VCP_SCALE_INSTANCES, VCP_SCALE_VOLUMES_PER_POD, VSPHERE_SPBM_POLICY_NAME, VSPHERE_DATASTORE from System Environment.
3. Launch VCP_SCALE_INSTANCES goroutine for creating VCP_SCALE_VOLUME_COUNT volumes. Each goroutine is responsible for create/attach of VCP_SCALE_VOLUME_COUNT/VCP_SCALE_INSTANCES volumes.
4. Read VCP_SCALE_VOLUMES_PER_POD from System Environment. Each pod will be have VCP_SCALE_VOLUMES_PER_POD attached to it.
5. Once all the go routines are completed, we delete all the pods and volumes.
*/
const (
NodeLabelKey = "vsphere_e2e_label"
)
// NodeSelector holds
type NodeSelector struct {
labelKey string
labelValue string
}
var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
f := framework.NewDefaultFramework("vcp-at-scale")
var (
client clientset.Interface
namespace string
nodeSelectorList []*NodeSelector
volumeCount int
numberOfInstances int
volumesPerPod int
policyName string
datastoreName string
nodeVolumeMapChan chan map[string][]string
nodes *v1.NodeList
scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4}
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
nodeVolumeMapChan = make(chan map[string][]string)
// Read the environment variables
volumeCount = GetAndExpectIntEnvVar(VCPScaleVolumeCount)
volumesPerPod = GetAndExpectIntEnvVar(VCPScaleVolumesPerPod)
numberOfInstances = GetAndExpectIntEnvVar(VCPScaleInstances)
Expect(numberOfInstances > 5).NotTo(BeTrue(), "Maximum allowed instances are 5")
Expect(numberOfInstances > volumeCount).NotTo(BeTrue(), "Number of instances should be less than the total volume count")
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
nodes = framework.GetReadySchedulableNodesOrDie(client)
if len(nodes.Items) < 2 {
framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
}
// Verify volume count specified by the user can be satisfied
if volumeCount > volumesPerNode*len(nodes.Items) {
framework.Skipf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), volumesPerNode*len(nodes.Items))
}
nodeSelectorList = createNodeLabels(client, namespace, nodes)
})
/*
Remove labels from all the nodes
*/
framework.AddCleanupAction(func() {
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
if len(namespace) > 0 {
for _, node := range nodes.Items {
framework.RemoveLabelOffNode(client, node.Name, NodeLabelKey)
}
}
})
It("vsphere scale tests", func() {
var pvcClaimList []string
nodeVolumeMap := make(map[string][]string)
// Volumes will be provisioned with each different types of Storage Class
scArrays := make([]*storageV1.StorageClass, len(scNames))
for index, scname := range scNames {
// Create vSphere Storage Class
By(fmt.Sprintf("Creating Storage Class : %q", scname))
var sc *storageV1.StorageClass
scParams := make(map[string]string)
var err error
switch scname {
case storageclass1:
scParams = nil
case storageclass2:
scParams[Policy_HostFailuresToTolerate] = "1"
case storageclass3:
scParams[SpbmStoragePolicy] = policyName
case storageclass4:
scParams[Datastore] = datastoreName
}
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(scname, scParams))
Expect(sc).NotTo(BeNil(), "Storage class is empty")
Expect(err).NotTo(HaveOccurred(), "Failed to create storage class")
defer client.StorageV1().StorageClasses().Delete(scname, nil)
scArrays[index] = sc
}
volumeCountPerInstance := volumeCount / numberOfInstances
for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ {
if instanceCount == numberOfInstances-1 {
volumeCountPerInstance = volumeCount
}
volumeCount = volumeCount - volumeCountPerInstance
go VolumeCreateAndAttach(client, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan)
}
// Get the list of all volumes attached to each node from the go routines by reading the data from the channel
for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ {
for node, volumeList := range <-nodeVolumeMapChan {
nodeVolumeMap[node] = append(nodeVolumeMap[node], volumeList...)
}
}
podList, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{})
for _, pod := range podList.Items {
pvcClaimList = append(pvcClaimList, getClaimsForPod(&pod, volumesPerPod)...)
By("Deleting pod")
err = framework.DeletePodWithWait(f, client, &pod)
Expect(err).NotTo(HaveOccurred())
}
By("Waiting for volumes to be detached from the node")
err = waitForVSphereDisksToDetach(nodeVolumeMap)
Expect(err).NotTo(HaveOccurred())
for _, pvcClaim := range pvcClaimList {
err = framework.DeletePersistentVolumeClaim(client, pvcClaim, namespace)
Expect(err).NotTo(HaveOccurred())
}
})
})
// Get PVC claims for the pod
func getClaimsForPod(pod *v1.Pod, volumesPerPod int) []string {
pvcClaimList := make([]string, volumesPerPod)
for i, volumespec := range pod.Spec.Volumes {
if volumespec.PersistentVolumeClaim != nil {
pvcClaimList[i] = volumespec.PersistentVolumeClaim.ClaimName
}
}
return pvcClaimList
}
// VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale
func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*storageV1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) {
defer GinkgoRecover()
nodeVolumeMap := make(map[string][]string)
nodeSelectorIndex := 0
for index := 0; index < volumeCountPerInstance; index = index + volumesPerPod {
if (volumeCountPerInstance - index) < volumesPerPod {
volumesPerPod = volumeCountPerInstance - index
}
pvclaims := make([]*v1.PersistentVolumeClaim, volumesPerPod)
for i := 0; i < volumesPerPod; i++ {
By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", sc[index%len(sc)]))
Expect(err).NotTo(HaveOccurred())
pvclaims[i] = pvclaim
}
By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
By("Creating pod to attach PV to the node")
nodeSelector := nodeSelectorList[nodeSelectorIndex%len(nodeSelectorList)]
// Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred())
for _, pv := range persistentvolumes {
nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath)
}
By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
nodeSelectorIndex++
}
nodeVolumeMapChan <- nodeVolumeMap
close(nodeVolumeMapChan)
}
func createNodeLabels(client clientset.Interface, namespace string, nodes *v1.NodeList) []*NodeSelector {
var nodeSelectorList []*NodeSelector
for i, node := range nodes.Items {
labelVal := "vsphere_e2e_" + strconv.Itoa(i)
nodeSelector := &NodeSelector{
labelKey: NodeLabelKey,
labelValue: labelVal,
}
nodeSelectorList = append(nodeSelectorList, nodeSelector)
framework.AddOrUpdateLabelOnNode(client, node.Name, NodeLabelKey, labelVal)
}
return nodeSelectorList
}