-
Notifications
You must be signed in to change notification settings - Fork 18
/
util.go
128 lines (114 loc) · 4.83 KB
/
util.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
package custom_testsuites
import (
"context"
"crypto/sha256"
"encoding/base64"
"fmt"
"math/rand"
"os"
"github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
)
const NamespacePrefix = "aws-s3-csi-e2e-"
// genBinDataFromSeed generate binData with random seed
func genBinDataFromSeed(len int, seed int64) []byte {
binData := make([]byte, len)
randLocal := rand.New(rand.NewSource(seed))
_, err := randLocal.Read(binData)
if err != nil {
fmt.Printf("Error: %v\n", err)
}
return binData
}
func checkWriteToPath(f *framework.Framework, pod *v1.Pod, path string, toWrite int, seed int64) {
data := genBinDataFromSeed(toWrite, seed)
encoded := base64.StdEncoding.EncodeToString(data)
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s bs=%d count=1", encoded, path, toWrite))
ginkgo.By(fmt.Sprintf("written data with sha: %x", sha256.Sum256(data)))
}
func checkReadFromPath(f *framework.Framework, pod *v1.Pod, path string, toWrite int, seed int64) {
sum := sha256.Sum256(genBinDataFromSeed(toWrite, seed))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s bs=%d count=1 | sha256sum", path, toWrite))
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s bs=%d count=1 | sha256sum | grep -Fq %x", path, toWrite, sum))
}
func createVolumeResourceWithMountOptions(ctx context.Context, config *storageframework.PerTestConfig, pattern storageframework.TestPattern, mountOptions []string) *storageframework.VolumeResource {
f := config.Framework
r := storageframework.VolumeResource{
Config: config,
Pattern: pattern,
}
pDriver, _ := config.Driver.(storageframework.PreprovisionedPVTestDriver)
r.Volume = pDriver.CreateVolume(ctx, config, storageframework.PreprovisionedPV)
pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, "", r.Volume)
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
GenerateName: fmt.Sprintf("%s-", config.Driver.GetDriverInfo().Name),
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: *pvSource,
StorageClassName: f.Namespace.Name,
NodeAffinity: volumeNodeAffinity,
MountOptions: mountOptions, // this is not set by storageframework.CreateVolumeResource, which is why we need to implement our own function
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany},
Capacity: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1200Gi"),
},
},
}
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
Namespace: f.Namespace.Name,
},
Spec: v1.PersistentVolumeClaimSpec{
StorageClassName: &f.Namespace.Name,
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("1200Gi"),
},
},
},
}
framework.Logf("Creating PVC and PV")
var err error
r.Pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(ctx, pvc, metav1.CreateOptions{})
framework.ExpectNoError(err, "PVC, PVC creation failed")
r.Pv, err = f.ClientSet.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{})
framework.ExpectNoError(err, "PVC, PV creation failed")
err = e2epv.WaitOnPVandPVC(ctx, f.ClientSet, f.Timeouts, f.Namespace.Name, r.Pv, r.Pvc)
framework.ExpectNoError(err, "PVC, PV failed to bind")
return &r
}
func createPod(ctx context.Context, client clientset.Interface, namespace string, pod *v1.Pod) (*v1.Pod, error) {
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("pod Create API error: %w", err)
}
// Waiting for pod to be running
err = e2epod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)
if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %w", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %w", err)
}
return pod, nil
}
func copySmallFileToPod(ctx context.Context, f *framework.Framework, pod *v1.Pod, hostPath, podPath string) {
data, err := os.ReadFile(hostPath)
framework.ExpectNoError(err)
encoded := base64.StdEncoding.EncodeToString(data)
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d > %s", encoded, podPath))
}