-
Notifications
You must be signed in to change notification settings - Fork 300
/
chaos_test.go
342 lines (303 loc) · 13 KB
/
chaos_test.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
//go:build e2e
// +build e2e
package e2e
import (
"context"
"math/rand"
"strings"
"sync"
"testing"
"time"
. "github.com/onsi/gomega"
cpomanifests "github.com/openshift/hypershift/control-plane-operator/controllers/hostedcontrolplane/manifests"
"github.com/openshift/hypershift/hypershift-operator/controllers/manifests"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/utils/pointer"
hyperv1 "github.com/openshift/hypershift/api/v1beta1"
e2eutil "github.com/openshift/hypershift/test/e2e/util"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
)
// TestHAEtcdChaos launches a HighlyAvailable control plane and executes a suite
// of chaotic etcd tests which ensure no data is lost in the chaos.
func TestHAEtcdChaos(t *testing.T) {
t.Parallel()
ctx, cancel := context.WithCancel(testContext)
defer cancel()
// Create a cluster
clusterOpts := globalOpts.DefaultClusterOptions(t)
clusterOpts.ControlPlaneAvailabilityPolicy = string(hyperv1.HighlyAvailable)
clusterOpts.NodePoolReplicas = 0
e2eutil.NewHypershiftTest(t, ctx, func(t *testing.T, g Gomega, mgtClient crclient.Client, hostedCluster *hyperv1.HostedCluster) {
t.Run("SingleMemberRecovery", testSingleMemberRecovery(ctx, mgtClient, hostedCluster))
t.Run("KillRandomMembers", testKillRandomMembers(ctx, mgtClient, hostedCluster))
t.Run("KillAllMembers", testKillAllMembers(ctx, mgtClient, hostedCluster))
}).Execute(&clusterOpts, hyperv1.NonePlatform, globalOpts.ArtifactDir, globalOpts.ServiceAccountSigningKey)
}
// testKillRandomMembers ensures that data is preserved following a period where
// random etcd members are repeatedly killed.
func testKillRandomMembers(parentCtx context.Context, client crclient.Client, cluster *hyperv1.HostedCluster) func(t *testing.T) {
return func(t *testing.T) {
g := NewWithT(t)
ctx, cancel := context.WithCancel(parentCtx)
defer cancel()
guestNamespace := manifests.HostedControlPlaneNamespace(cluster.Namespace, cluster.Name).Name
t.Logf("Hosted control plane namespace is %s", guestNamespace)
// Get a client for the cluster
t.Logf("Waiting for guest client to become available")
guestClient := e2eutil.WaitForGuestClient(t, ctx, client, cluster)
// Create data in the cluster which should survive the ensuring chaos
value, _ := time.Now().MarshalText()
cm := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: e2eutil.SimpleNameGenerator.GenerateName("marker-"),
},
Data: map[string]string{"value": string(value)},
}
err := wait.PollImmediateUntil(5*time.Second, func() (bool, error) {
if err := guestClient.Create(ctx, cm); err != nil {
return false, nil
}
return true, nil
}, ctx.Done())
g.Expect(err).NotTo(HaveOccurred(), "failed to create marker configmap")
// Find etcd pods in the control plane namespace
etcdSts := cpomanifests.EtcdStatefulSet(guestNamespace)
err = client.Get(ctx, crclient.ObjectKeyFromObject(etcdSts), etcdSts)
g.Expect(err).NotTo(HaveOccurred(), "failed to get etcd statefulset")
etcdPods := &corev1.PodList{}
err = client.List(ctx, etcdPods, &crclient.ListOptions{
Namespace: manifests.HostedControlPlaneNamespace(cluster.Namespace, cluster.Name).Name,
LabelSelector: labels.Set(etcdSts.Spec.Selector.MatchLabels).AsSelector(),
})
g.Expect(err).NotTo(HaveOccurred(), "failed to list etcd pods")
g.Expect(etcdPods.Items).NotTo(BeEmpty(), "couldn't find any etcd pods")
t.Logf("found %d etcd pods", len(etcdPods.Items))
// Delete random etcd pods for a while
func() {
duration, period := 30*time.Second, 5*time.Second
t.Logf("deleting random etcd pods every %s for %s", period, duration)
ctx, cancel := context.WithTimeout(ctx, duration)
defer cancel()
wait.UntilWithContext(ctx, func(ctx context.Context) {
pod := randomPods(etcdPods.Items, 1)[0]
err := client.Delete(ctx, &pod, &crclient.DeleteOptions{
GracePeriodSeconds: pointer.Int64(0),
})
if err != nil {
t.Errorf("failed to delete pod %s: %s", pod.Name, err)
} else {
t.Logf("deleted pod %s", pod.Name)
}
}, period)
}()
// The etcd cluster should eventually roll out completely
err = wait.PollImmediateUntil(5*time.Second, func() (bool, error) {
err := client.Get(ctx, crclient.ObjectKeyFromObject(etcdSts), etcdSts)
if err != nil {
t.Logf("failed to get statefulset %s/%s: %s", etcdSts.Namespace, etcdSts.Name, err)
return false, nil
}
return *etcdSts.Spec.Replicas == etcdSts.Status.ReadyReplicas, nil
}, ctx.Done())
g.Expect(err).NotTo(HaveOccurred(), "etcd statefulset available replicas never converged")
t.Logf("etcd statefulset recovered successfully")
// The data should eventually be observed to have survived
err = wait.PollImmediateUntil(5*time.Second, func() (bool, error) {
actual := &corev1.ConfigMap{}
if err := guestClient.Get(ctx, crclient.ObjectKeyFromObject(cm), actual); err != nil {
t.Logf("failed to get marker configmap: %s", err)
return false, nil
}
g.Expect(actual.Data).ToNot(BeNil(), "marker configmap is missing data")
g.Expect(actual.Data["value"]).To(Equal(string(value)), "marker data value doesn't match original")
t.Logf("marker data was verified")
return true, nil
}, ctx.Done())
g.Expect(err).NotTo(HaveOccurred(), "failed to verify data following disruption")
}
}
// testKillAllMembers ensures that data is preserved following the simultaneous
// loss of all etcd members.
func testKillAllMembers(parentCtx context.Context, client crclient.Client, cluster *hyperv1.HostedCluster) func(t *testing.T) {
return func(t *testing.T) {
g := NewWithT(t)
ctx, cancel := context.WithCancel(parentCtx)
defer cancel()
guestNamespace := manifests.HostedControlPlaneNamespace(cluster.Namespace, cluster.Name).Name
t.Logf("Hosted control plane namespace is %s", guestNamespace)
// Get a client for the cluster
t.Logf("Waiting for guest client to become available")
guestClient := e2eutil.WaitForGuestClient(t, ctx, client, cluster)
// Create data in the cluster which should survive the ensuring chaos
value, _ := time.Now().MarshalText()
cm := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: e2eutil.SimpleNameGenerator.GenerateName("marker-"),
},
Data: map[string]string{"value": string(value)},
}
err := wait.PollImmediateUntil(5*time.Second, func() (bool, error) {
if err := guestClient.Create(ctx, cm); err != nil {
return false, nil
}
return true, nil
}, ctx.Done())
g.Expect(err).NotTo(HaveOccurred(), "failed to create marker configmap")
// Find etcd pods in the control plane namespace
etcdSts := cpomanifests.EtcdStatefulSet(guestNamespace)
err = client.Get(ctx, crclient.ObjectKeyFromObject(etcdSts), etcdSts)
g.Expect(err).NotTo(HaveOccurred(), "failed to get etcd statefulset")
etcdPods := &corev1.PodList{}
err = client.List(ctx, etcdPods, &crclient.ListOptions{
Namespace: manifests.HostedControlPlaneNamespace(cluster.Namespace, cluster.Name).Name,
LabelSelector: labels.Set(etcdSts.Spec.Selector.MatchLabels).AsSelector(),
})
g.Expect(err).NotTo(HaveOccurred(), "failed to list etcd pods")
g.Expect(etcdPods.Items).NotTo(BeEmpty(), "couldn't find any etcd pods")
t.Logf("found %d etcd pods", len(etcdPods.Items))
// Delete all etcd pods which should be a majority outage
var wg sync.WaitGroup
wg.Add(len(etcdPods.Items))
for i := range etcdPods.Items {
go func(pod *corev1.Pod) {
timeout, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
err := client.Delete(timeout, pod, &crclient.DeleteOptions{
GracePeriodSeconds: pointer.Int64(0),
})
if err != nil {
t.Errorf("failed to delete pod %s: %s", pod.Name, err)
} else {
t.Logf("deleted pod %s", pod.Name)
}
wg.Done()
}(&etcdPods.Items[i])
}
wg.Wait()
// Ensure that all etcd pods are replaced
err = wait.PollImmediateUntil(5*time.Second, func() (bool, error) {
for _, pod := range etcdPods.Items {
actual := &corev1.Pod{}
if err := client.Get(ctx, crclient.ObjectKeyFromObject(&pod), actual); err != nil {
t.Logf("failed to get pod %s: %v", pod.Name, err)
return false, nil
}
if pod.UID == actual.UID {
t.Logf("pod %s not replaced yet", pod.Name)
return false, nil
}
}
return true, nil
}, ctx.Done())
g.Expect(err).NotTo(HaveOccurred(), "failed to wait for etcd pods to be replaced")
// The etcd cluster should eventually roll out completely
err = wait.PollImmediateUntil(5*time.Second, func() (bool, error) {
err := client.Get(ctx, crclient.ObjectKeyFromObject(etcdSts), etcdSts)
if err != nil {
t.Logf("failed to get statefulset %s/%s: %s", etcdSts.Namespace, etcdSts.Name, err)
return false, nil
}
return *etcdSts.Spec.Replicas == etcdSts.Status.ReadyReplicas, nil
}, ctx.Done())
g.Expect(err).NotTo(HaveOccurred(), "etcd statefulset available replicas never converged")
t.Logf("etcd statefulset recovered successfully")
// The data should eventually be observed to have survived
err = wait.PollImmediateUntil(5*time.Second, func() (bool, error) {
actual := &corev1.ConfigMap{}
if err := guestClient.Get(ctx, crclient.ObjectKeyFromObject(cm), actual); err != nil {
t.Logf("failed to get marker configmap: %s", err)
return false, nil
}
g.Expect(actual.Data).ToNot(BeNil(), "marker configmap is missing data")
g.Expect(actual.Data["value"]).To(Equal(string(value)), "marker data value doesn't match original")
t.Logf("marker data was verified")
return true, nil
}, ctx.Done())
g.Expect(err).NotTo(HaveOccurred(), "failed to verify data following disruption")
}
}
// testSingleMemberRecovery ensures that the etcd cluster can recover from a single member losing its data
func testSingleMemberRecovery(parentCtx context.Context, client crclient.Client, cluster *hyperv1.HostedCluster) func(t *testing.T) {
return func(t *testing.T) {
g := NewWithT(t)
ctx, cancel := context.WithCancel(parentCtx)
defer cancel()
guestNamespace := manifests.HostedControlPlaneNamespace(cluster.Namespace, cluster.Name).Name
t.Logf("Hosted control plane namespace is %s", guestNamespace)
// Wait for a guest client to become available
t.Logf("Waiting for guest client to become available")
_ = e2eutil.WaitForGuestClient(t, ctx, client, cluster)
// Find etcd pods in the control plane namespace
etcdSts := cpomanifests.EtcdStatefulSet(guestNamespace)
err := client.Get(ctx, crclient.ObjectKeyFromObject(etcdSts), etcdSts)
g.Expect(err).NotTo(HaveOccurred(), "failed to get etcd statefulset")
etcdPods := &corev1.PodList{}
err = client.List(ctx, etcdPods, &crclient.ListOptions{
Namespace: manifests.HostedControlPlaneNamespace(cluster.Namespace, cluster.Name).Name,
LabelSelector: labels.Set(etcdSts.Spec.Selector.MatchLabels).AsSelector(),
})
// Delete a single etcd pod along with its pvc
randomPod := randomPods(etcdPods.Items, 1)[0]
originalPodID := randomPod.UID
pvcName := "data-etcd" + strings.TrimPrefix(randomPod.Name, "etcd")
pvc := &corev1.PersistentVolumeClaim{}
pvc.Name = pvcName
pvc.Namespace = randomPod.Namespace
var wg sync.WaitGroup
wg.Add(2)
go func(pod *corev1.Pod) {
defer wg.Done()
err := client.Delete(ctx, pod)
g.Expect(err).ToNot(HaveOccurred(), "failed to delete etcd pod")
t.Logf("Deleted etcd pod %s", pod.Name)
}(&randomPod)
go func(pvc *corev1.PersistentVolumeClaim) {
defer wg.Done()
err := client.Delete(ctx, pvc)
g.Expect(err).ToNot(HaveOccurred(), "failed to delete etcd pvc")
t.Logf("Deleted etcd pvc %s", pvc.Name)
}(pvc)
wg.Wait()
// Wait for etcd pod to be replaced
t.Log("Waiting for deleted pod to be replaced")
err = wait.PollImmediateUntil(5*time.Second, func() (bool, error) {
err := client.Get(ctx, crclient.ObjectKeyFromObject(&randomPod), &randomPod)
if err != nil {
t.Logf("failed to get pod %s/%s: %v", randomPod.Namespace, randomPod.Name, err)
return false, nil
}
if randomPod.UID == originalPodID {
t.Log("original pod has not been replaced")
return false, nil
}
t.Log("original pod is now replaced")
return true, nil
}, ctx.Done())
// The etcd cluster should eventually roll out completely
t.Log("Waiting for etcd statefulset to recover")
err = wait.PollImmediateUntil(5*time.Second, func() (bool, error) {
err := client.Get(ctx, crclient.ObjectKeyFromObject(etcdSts), etcdSts)
if err != nil {
t.Logf("failed to get statefulset %s/%s: %s", etcdSts.Namespace, etcdSts.Name, err)
return false, nil
}
return *etcdSts.Spec.Replicas == etcdSts.Status.ReadyReplicas, nil
}, ctx.Done())
g.Expect(err).NotTo(HaveOccurred(), "etcd statefulset available replicas never converged")
t.Logf("etcd statefulset recovered successfully")
}
}
// TODO: Generics :-)
func randomPods(pods []corev1.Pod, count int) []corev1.Pod {
var selected []corev1.Pod
indexes := rand.Perm(len(pods))
for i := 0; i < count; i++ {
selected = append(selected, pods[indexes[i]])
}
return selected
}