forked from openshift/origin
-
Notifications
You must be signed in to change notification settings - Fork 0
/
mongodb_replica_statefulset.go
141 lines (120 loc) · 4.55 KB
/
mongodb_replica_statefulset.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
package image_ecosystem
import (
"fmt"
"time"
g "github.com/onsi/ginkgo"
o "github.com/onsi/gomega"
exutil "github.com/openshift/origin/test/extended/util"
dbutil "github.com/openshift/origin/test/extended/util/db"
kapiv1 "k8s.io/api/core/v1"
)
var _ = g.Describe("[Conformance][image_ecosystem][mongodb][Slow] openshift mongodb replication (with statefulset)", func() {
defer g.GinkgoRecover()
const templatePath = "https://raw.githubusercontent.com/sclorg/mongodb-container/master/examples/petset/mongodb-petset-persistent.yaml"
oc := exutil.NewCLI("mongodb-petset-replica", exutil.KubeConfigPath()).Verbose()
g.Context("", func() {
g.BeforeEach(func() {
exutil.DumpDockerInfo()
})
g.AfterEach(func() {
if g.CurrentGinkgoTestDescription().Failed {
exutil.DumpPodStates(oc)
exutil.DumpPodLogsStartingWith("", oc)
}
})
g.Describe("creating from a template", func() {
g.AfterEach(func() {
for i := 0; i < 3; i++ {
pod := fmt.Sprintf("mongodb-replicaset-%d", i)
podLogs, err := oc.Run("logs").Args(pod, "--timestamps").Output()
if err != nil {
ginkgolog("error retrieving pod logs for %s: %v", pod, err)
continue
}
ginkgolog("pod logs for %s:\n%s", podLogs, err)
}
})
g.It(fmt.Sprintf("should instantiate the template"), func() {
oc.SetOutputDir(exutil.TestContext.OutputDir)
g.By("creating persistent volumes")
_, err := exutil.SetupHostPathVolumes(
oc.AdminKubeClient().Core().PersistentVolumes(),
oc.Namespace(),
"256Mi",
3,
)
o.Expect(err).NotTo(o.HaveOccurred())
defer cleanup(oc)
g.By("creating a new app")
o.Expect(
oc.Run("new-app").Args(
"-f", templatePath,
"-p", "VOLUME_CAPACITY=256Mi",
"-p", "MEMORY_LIMIT=512Mi",
"-p", "MONGODB_IMAGE=centos/mongodb-32-centos7",
"-p", "MONGODB_SERVICE_NAME=mongodb-replicaset",
).Execute(),
).Should(o.Succeed())
g.By("waiting for all pods to reach ready status")
podNames, err := exutil.WaitForPods(
oc.KubeClient().Core().Pods(oc.Namespace()),
exutil.ParseLabelsOrDie("name=mongodb-replicaset"),
exutil.CheckPodIsReadyFn,
3,
8*time.Minute,
)
if err != nil {
desc, _ := oc.Run("describe").Args("statefulset").Output()
ginkgolog("\n\nStatefulset at failure:\n%s\n\n", desc)
desc, _ = oc.Run("describe").Args("pods").Output()
ginkgolog("\n\nPods at statefulset failure:\n%s\n\n", desc)
}
o.Expect(err).NotTo(o.HaveOccurred())
g.By("expecting that we can insert a new record on primary node")
mongo := dbutil.NewMongoDB(podNames[0])
replicaSet := mongo.(exutil.ReplicaSet)
out, err := replicaSet.QueryPrimary(oc, `db.test.save({ "status" : "passed" })`)
ginkgolog("save result: %s\n", out)
o.Expect(err).ShouldNot(o.HaveOccurred())
g.By("expecting that we can read a record from all members")
for _, podName := range podNames {
o.Expect(readRecordFromPod(oc, podName)).To(o.Succeed())
}
g.By("restarting replica set")
err = oc.Run("delete").Args("pods", "--all", "-n", oc.Namespace()).Execute()
o.Expect(err).ShouldNot(o.HaveOccurred())
g.By("waiting for all pods to be gracefully deleted")
podNames, err = exutil.WaitForPods(
oc.KubeClient().Core().Pods(oc.Namespace()),
exutil.ParseLabelsOrDie("name=mongodb-replicaset"),
func(pod kapiv1.Pod) bool { return pod.DeletionTimestamp != nil },
0,
4*time.Minute,
)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("waiting for all pods to reach ready status")
podNames, err = exutil.WaitForPods(
oc.KubeClient().Core().Pods(oc.Namespace()),
exutil.ParseLabelsOrDie("name=mongodb-replicaset"),
exutil.CheckPodIsReadyFn,
3,
4*time.Minute,
)
o.Expect(err).NotTo(o.HaveOccurred())
g.By("expecting that we can read a record from all members after its restart")
for _, podName := range podNames {
o.Expect(readRecordFromPod(oc, podName)).To(o.Succeed())
}
})
})
})
})
func readRecordFromPod(oc *exutil.CLI, podName string) error {
// don't include _id field to output because it changes every time
findCmd := "rs.slaveOk(); printjson(db.test.find({}, {_id: 0}).toArray())"
fmt.Fprintf(g.GinkgoWriter, "DEBUG: reading record from the pod %v\n", podName)
mongoPod := dbutil.NewMongoDB(podName)
// pod is running but we need to wait when it will be really ready
// (will become a member of replica set and will finish data sync)
return exutil.WaitForQueryOutputContains(oc, mongoPod, 1*time.Minute, false, findCmd, `{ "status" : "passed" }`)
}