-
Notifications
You must be signed in to change notification settings - Fork 0
/
test_reconciler.go
183 lines (155 loc) · 4.76 KB
/
test_reconciler.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
package canary
import (
"context"
v1 "github.com/petomalina/krane/pkg/apis/krane/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"strconv"
)
// reconcileDestinationRules is an idempotent function that creates/reads the baseline instance
func (r *ReconcileCanary) reconcileTestJob(ctx context.Context, canary *v1.Canary) (*corev1.Pod, error) {
if canary.Status.Progress == v1.CanaryProgress_Cleanup {
return nil, nil
}
L := log.WithValues("canary", canary.Name, "job", "test")
// if not testing, we are just gonna skip
if canary.Status.Progress != v1.CanaryProgress_Testing {
return nil, nil
}
L.Info("Reconciling test step")
defer L.Info("Reconciliation complete")
policy, err := r.GetCanaryPolicy(ctx, canary)
if err != nil {
return nil, err
}
// get the test pod so we can handle it
testJob := &corev1.Pod{}
err = r.client.Get(ctx, types.NamespacedName{
Name: GetTestJobname(canary),
Namespace: canary.Namespace,
}, testJob)
if err != nil {
if !errors.IsNotFound(err) {
return nil, err
}
testJob, err = r.CreateTestJob(canary, policy)
if err != nil {
return nil, err
}
err = r.client.Create(ctx, testJob)
if err != nil {
return nil, err
}
canary.Status.Testing.Status = v1.CanaryPhaseStatus_InProgress
canary.Status.Testing.Message = "Job initialized"
canary.Status.Testing.PodName = testJob.Name
err = r.client.Status().Update(ctx, canary)
if err != nil {
return nil, err
}
}
// new values for the test job
var newStatus v1.CanaryPhaseStatus
var newMessage string
// update run state if the pod is running
var runState *corev1.ContainerStateRunning
if runState = GetContainerRunningState(testJob, "testjob"); runState != nil {
newStatus = v1.CanaryPhaseStatus_InProgress
newMessage = "Job running"
}
// update terminal state if the pod has died
var termState *corev1.ContainerStateTerminated
if termState = GetContainerTerminalState(testJob, "testjob"); termState != nil {
switch termState.ExitCode {
case 0:
newStatus = v1.CanaryPhaseStatus_Success
newMessage = "Job finished successfully"
default:
newStatus = v1.CanaryPhaseStatus_Failure
newMessage = "Job failed"
}
}
if newStatus != canary.Status.Testing.Status || canary.Status.Testing.Message != newMessage {
canary.Status.Testing.Status = newStatus
canary.Status.Testing.Message = newMessage
err = r.client.Status().Update(ctx, canary)
if err != nil {
return nil, err
}
}
return testJob, nil
}
func GetTestJobname(c *v1.Canary) string {
return c.Name + "-testjob"
}
func (r *ReconcileCanary) CreateTestJob(canary *v1.Canary, policy *v1.CanaryPolicy) (*corev1.Pod, error) {
labels := map[string]string{
"app": GetTestJobname(canary),
"version": "stable",
}
envs := []corev1.EnvVar{}
if policy.Spec.TestSpec.Boundary.Requests != 0 {
envs = append(envs, corev1.EnvVar{
Name: "KRANE_BOUNDARY_REQUESTS",
Value: strconv.Itoa(policy.Spec.TestSpec.Boundary.Requests),
})
}
if policy.Spec.TestSpec.Boundary.Time != "" {
envs = append(envs, corev1.EnvVar{
Name: "KRANE_BOUNDARY_TIME",
Value: policy.Spec.TestSpec.Boundary.Time,
})
}
pod := &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: GetTestJobname(canary),
Namespace: canary.Namespace,
Labels: labels,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "testjob",
Image: policy.Spec.TestSpec.Image,
Command: policy.Spec.TestSpec.Cmd,
Env: append([]corev1.EnvVar{
{
// canary services have the same name as canaries themselves
Name: "KRANE_TARGET",
Value: canary.Name,
},
}, envs...),
ImagePullPolicy: corev1.PullAlways,
},
},
RestartPolicy: corev1.RestartPolicyNever,
},
}
if err := controllerutil.SetControllerReference(canary, pod, r.scheme); err != nil {
return nil, err
}
return pod, nil
}
// IsPodDone returns true if the pod has all containers completed
func GetContainerTerminalState(job *corev1.Pod, containerName string) *corev1.ContainerStateTerminated {
for _, c := range job.Status.ContainerStatuses {
// skip the istio-proxy container so we can ignore it
if c.Name == containerName && c.State.Terminated != nil {
return c.State.Terminated
}
}
return nil
}
// GetContainerRunningState returns a running state if the container has that
func GetContainerRunningState(job *corev1.Pod, containerName string) *corev1.ContainerStateRunning {
for _, c := range job.Status.ContainerStatuses {
// skip the istio-proxy container so we can ignore it
if c.Name == containerName && c.State.Terminated != nil {
return c.State.Running
}
}
return nil
}