forked from kubernetes/kubernetes
-
Notifications
You must be signed in to change notification settings - Fork 1
/
vsphere_stress.go
183 lines (157 loc) · 8.73 KB
/
vsphere_stress.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"os"
"strconv"
"sync"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storageV1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
k8stype "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
)
/*
Induce stress to create volumes in parallel with multiple threads based on user configurable values for number of threads and iterations per thread.
The following actions will be performed as part of this test.
1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capalibilies.)
2. READ VCP_STRESS_INSTANCES, VCP_STRESS_ITERATIONS, VSPHERE_SPBM_POLICY_NAME and VSPHERE_DATASTORE from System Environment.
3. Launch goroutine for volume lifecycle operations.
4. Each instance of routine iterates for n times, where n is read from system env - VCP_STRESS_ITERATIONS
5. Each iteration creates 1 PVC, 1 POD using the provisioned PV, Verify disk is attached to the node, Verify pod can access the volume, delete the pod and finally delete the PVC.
*/
var _ = SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("vcp-stress")
var (
client clientset.Interface
namespace string
instances int
iterations int
err error
scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4}
policyName string
datastoreName string
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
client = f.ClientSet
namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
// if VCP_STRESS_INSTANCES = 12 and VCP_STRESS_ITERATIONS is 10. 12 threads will run in parallel for 10 times.
// Resulting 120 Volumes and POD Creation. Volumes will be provisioned with each different types of Storage Class,
// Each iteration creates PVC, verify PV is provisioned, then creates a pod, verify volume is attached to the node, and then delete the pod and delete pvc.
instancesStr := os.Getenv("VCP_STRESS_INSTANCES")
Expect(instancesStr).NotTo(BeEmpty(), "ENV VCP_STRESS_INSTANCES is not set")
instances, err = strconv.Atoi(instancesStr)
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP-STRESS-INSTANCES")
Expect(instances <= volumesPerNode*len(nodeList.Items)).To(BeTrue(), fmt.Sprintf("Number of Instances should be less or equal: %v", volumesPerNode*len(nodeList.Items)))
Expect(instances > len(scNames)).To(BeTrue(), "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes")
iterationStr := os.Getenv("VCP_STRESS_ITERATIONS")
Expect(instancesStr).NotTo(BeEmpty(), "ENV VCP_STRESS_ITERATIONS is not set")
iterations, err = strconv.Atoi(iterationStr)
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_STRESS_ITERATIONS")
Expect(iterations > 0).To(BeTrue(), "VCP_STRESS_ITERATIONS should be greater than 0")
policyName = os.Getenv("VSPHERE_SPBM_POLICY_NAME")
datastoreName = os.Getenv("VSPHERE_DATASTORE")
Expect(policyName).NotTo(BeEmpty(), "ENV VSPHERE_SPBM_POLICY_NAME is not set")
Expect(datastoreName).NotTo(BeEmpty(), "ENV VSPHERE_DATASTORE is not set")
})
It("vsphere stress tests", func() {
scArrays := make([]*storageV1.StorageClass, len(scNames))
for index, scname := range scNames {
// Create vSphere Storage Class
By(fmt.Sprintf("Creating Storage Class : %v", scname))
var sc *storageV1.StorageClass
var err error
switch scname {
case storageclass1:
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass1, nil))
case storageclass2:
var scVSanParameters map[string]string
scVSanParameters = make(map[string]string)
scVSanParameters[Policy_HostFailuresToTolerate] = "1"
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters))
case storageclass3:
var scSPBMPolicyParameters map[string]string
scSPBMPolicyParameters = make(map[string]string)
scSPBMPolicyParameters[SpbmStoragePolicy] = policyName
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters))
case storageclass4:
var scWithDSParameters map[string]string
scWithDSParameters = make(map[string]string)
scWithDSParameters[Datastore] = datastoreName
scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters)
sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec)
}
Expect(sc).NotTo(BeNil())
Expect(err).NotTo(HaveOccurred())
defer client.StorageV1().StorageClasses().Delete(scname, nil)
scArrays[index] = sc
}
var wg sync.WaitGroup
wg.Add(instances)
for instanceCount := 0; instanceCount < instances; instanceCount++ {
instanceId := fmt.Sprintf("Thread:%v", instanceCount+1)
go PerformVolumeLifeCycleInParallel(f, client, namespace, instanceId, scArrays[instanceCount%len(scArrays)], iterations, &wg)
}
wg.Wait()
})
})
// goroutine to perform volume lifecycle operations in parallel
func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceId string, sc *storageV1.StorageClass, iterations int, wg *sync.WaitGroup) {
defer wg.Done()
defer GinkgoRecover()
vsp, err := getVSphere(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
for iterationCount := 0; iterationCount < iterations; iterationCount++ {
logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceId, iterationCount+1)
By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name))
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, "1Gi", sc))
Expect(err).NotTo(HaveOccurred())
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name))
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name))
// Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name))
Expect(f.WaitForPodRunningSlow(pod.Name)).NotTo(HaveOccurred())
// Get the copy of the Pod to know the assigned node name.
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
isVolumeAttached, verifyDiskAttachedError := verifyVSphereDiskAttached(client, vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, types.NodeName(pod.Spec.NodeName))
Expect(isVolumeAttached).To(BeTrue())
Expect(verifyDiskAttachedError).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Verifing the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name))
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp)
By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name))
err = framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
err = waitForVSphereDiskToDetach(client, vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName))
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name))
Expect(framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)).NotTo(HaveOccurred())
}
}