This repository has been archived by the owner on Aug 20, 2021. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 17
/
Copy pathvoltestkube.go
266 lines (227 loc) · 7.76 KB
/
voltestkube.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
/*
Copyright 2018 Docker, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"time"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
// Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
// _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"net/http"
)
type Config struct {
KubeConfigFile string
PodUrl string
}
type TestCheck struct {
Name string
Passed bool
Message string
}
var testList []TestCheck
var exitStatus int
type patchBoolValue struct {
Op string `json:"op"`
Path string `json:"path"`
Value bool `json:"value"`
}
func main() {
exitStatus = 0
configVars := getConfig()
var err error
var test TestCheck
config, err := clientcmd.BuildConfigFromFlags("", configVars.KubeConfigFile)
if err != nil {
log.Fatal(err)
}
c, err := kubernetes.NewForConfig(config)
if err != nil {
log.Fatal(err)
}
// Test coverage:
// Confirm Kube version
test.Name = "Kubernetes Version"
version, err := c.Discovery().ServerVersion()
if err != nil {
test.Passed = false
log.Println(err)
} else {
test.Passed = true
test.Message = version.String()
}
testList = appendTestCheck(testList, test)
// Confirm test pod exists
namespace := "default"
pod := "voltest-0"
test.Name = "Test Pod Existence"
_, err = c.CoreV1().Pods(namespace).Get(pod, metav1.GetOptions{})
if errors.IsNotFound(err) {
test.Passed = false
test.Message = "Pod " + pod + " in namespace " + namespace + " not found"
} else if statusError, isStatus := err.(*errors.StatusError); isStatus {
test.Passed = false
test.Message = "Error getting pod " + pod + "in namespace " +
namespace + ": " + statusError.ErrStatus.Message
} else if err != nil {
test.Passed = false
test.Message = err.Error()
} else {
test.Passed = true
test.Message = "Found pod " + pod + " in namespace " + namespace
}
testList = appendTestCheck(testList, test)
// Confirm test pod is running
test.Name = "Confirm Running Pod"
p, err := c.CoreV1().Pods(namespace).Get(pod, metav1.GetOptions{})
fmt.Printf("Pod %s is %s\n", pod, p.Status.Phase)
// Confirm that status page of container is happy
statusUrl := configVars.PodUrl + "/status"
fmt.Println(statusUrl)
resp := getContainerCall(statusUrl)
if resp == "OK" {
test.Passed = true
test.Message = "Pod running"
} else {
test.Passed = false
test.Message = "Pod not running"
}
testList = appendTestCheck(testList, test)
// Clear storage data
// Slight bug here, workaround is explained:
// the resetfilecheck call should return "1",
// but my test environment is running an older version of the
// container. So... for now we'll just run the /textcheck
// and /bincheck and confirm that we expect "0" for those
// after running the reset.
// Refactor note: This isn't a test - might should be, but we're ignoring that
// until I get the test container pushed into DockerHub
resp = getContainerCall(configVars.PodUrl + "/resetfilecheck")
if resp == "1" {
fmt.Println("Reset test data for clean run")
} else {
fmt.Println("Couldn't reset test data, check environment.")
os.Exit(1)
}
// Initialize storage data
getContainerCall(configVars.PodUrl + "/runfilecheck")
// Confirm textfile
test = textCheck(configVars.PodUrl, "Initial Textfile Content Confirmation")
testList = appendTestCheck(testList, test)
// Confirm binfile
test = binCheck(configVars.PodUrl, "Initial Binary Content Confirmation")
testList = appendTestCheck(testList, test)
// Reschedule container
//We're not using getContainerCall because an http error here
// is expected and okay
// refactor note: Should check to see if we can run getContainerCall now
fmt.Println("Shutting down container")
sresp, err := http.Get(configVars.PodUrl + "/shutdown")
if err != nil && sresp != nil {
fmt.Println("Expected http error on container shutdown")
}
// Confirm textfile on rescheduled container
// This can take a little time, so we'll loop around a sleep
//
fmt.Println("Waiting for container restart - we wait up to 10 minutes")
fmt.Println("Should be pulling status from " + configVars.PodUrl + "/status")
for i := 0; i < 60; i++ {
time.Sleep(10 * time.Second)
hresp, err := http.Get(configVars.PodUrl + "/status")
if err != nil {
fmt.Print(".")
fmt.Println(err.Error())
} else {
body, err := ioutil.ReadAll(hresp.Body)
if err != nil {
fmt.Print(".")
} else {
if string(body) == "OK" {
fmt.Println("\nContainer restarted successfully, moving on")
break
}
}
}
}
// confirm binfile on rescheduled container
test = textCheck(configVars.PodUrl, "Post-restart Textfile Content Confirmation")
testList = appendTestCheck(testList, test)
test = binCheck(configVars.PodUrl, "Post-restart Binaryfile Content Confirmation")
testList = appendTestCheck(testList, test)
// Force failover test onto a different node.
// First, let's get the node name:
// Then, set the node unschedulable
// then, kill the container
p, err = c.CoreV1().Pods(namespace).Get(pod, metav1.GetOptions{})
fmt.Printf("Pod node %s is %s\n", pod, p.Spec.NodeName)
n, err := c.CoreV1().Nodes().Get(p.Spec.NodeName, metav1.GetOptions{})
n.Spec.Unschedulable = true
n, err = c.CoreV1().Nodes().Update(n)
fmt.Println("Pod was running on " + p.Spec.NodeName)
fmt.Println("Shutting down container for forced reschedule")
// We're not using getContainerCall because an http error here
// is expected and okay
fmt.Println("http error okay here")
gracePeriodSeconds := int64(0)
err = c.CoreV1().Pods(namespace).Delete(p.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds})
// Confirm textfile on rescheduled container
// This can take a little time, so we'll loop around a sleep
fmt.Println("Waiting for container rechedule - we wait up to 10 minutes")
for i := 0; i < 60; i++ {
time.Sleep(10 * time.Second)
hresp, err := http.Get(configVars.PodUrl + "/status")
if err != nil {
fmt.Print(".")
} else {
body, err := ioutil.ReadAll(hresp.Body)
if err != nil {
fmt.Println(err.Error())
} else {
if string(body) == "OK" {
fmt.Println("Container rescheduled successfully, moving on")
break
}
}
}
}
p, err = c.CoreV1().Pods(namespace).Get(pod, metav1.GetOptions{})
fmt.Println("Pod is now running on " + p.Spec.NodeName)
// confirm binfile on rescheduled container
test = textCheck(configVars.PodUrl, "Rescheduled Textfile Content Confirmation")
testList = appendTestCheck(testList, test)
test = binCheck(configVars.PodUrl, "Rescheduled Binaryfile Content Confirmation")
testList = appendTestCheck(testList, test)
// Cleanup post test:
// reset unschedulable node back to schedulable
// They had to make things this complicated, huh?
fmt.Println("Going into cleanup...")
//n.Spec.Unschedulable = false
fmt.Println("Cleaning up taint on " + n.Name)
patchVal := []patchBoolValue{{
Op: "replace",
Path: "/spec/unschedulable",
Value: false,
}}
patchValBytes, _ := json.Marshal(patchVal)
//'{"spec": {"unschedulable": false}}'
n, err = c.CoreV1().Nodes().Patch(n.GetName(), types.JSONPatchType, patchValBytes)
exitStatus = reportAndOutput(testList)
os.Exit(exitStatus)
}