/
kube.go
166 lines (129 loc) · 4.63 KB
/
kube.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
package deployer
import (
"errors"
"fmt"
"strconv"
"strings"
"time"
"github.com/papabearsoftware/eks-lambda-deployer/internal/util"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
type KubeClient struct {
Client kubernetes.Interface
}
func getExistingDeployment() (*appsv1.Deployment, error) {
d, err := kube.Client.AppsV1().Deployments(deploymentJSON.Namespace).Get(deploymentJSON.Deployment, v1.GetOptions{})
if err != nil {
util.LogError(fmt.Sprintf("Error getting deployment %s in namespace %s", deploymentJSON.Deployment, deploymentJSON.Namespace), err.Error())
return nil, err
}
return d, nil
}
func checkDeploymentStatus(rv string, ts string) error {
// TODO either make this an env var or handle checking pending pods better
time.Sleep(15 * time.Second)
pods, err := kube.Client.CoreV1().Pods(deploymentJSON.Namespace).List(v1.ListOptions{
LabelSelector: fmt.Sprintf("lambda-deploy-timestamp=%s", ts),
})
if err != nil {
util.LogError("Error getting pods to check status", err.Error())
return err
}
for _, pod := range pods.Items {
if pod.Name != "nginx" {
util.LogDebug(fmt.Sprintf("%+v", pod.Status))
}
switch pod.Status.Phase {
case corev1.PodFailed:
util.LogError(fmt.Sprintf("Pod %s is in a failed state. Dumping pods and rolling back.", pod.Name), errors.New("PodFailed").Error())
util.LogError(fmt.Sprintf("%+v", pods), "")
return errors.New("PodFailed")
case corev1.PodSucceeded:
util.LogInfo("Pods are reporting successful run")
return nil
case corev1.PodRunning:
util.LogInfo("Pods are in running state. Deployment was successful")
return nil
case corev1.PodUnknown:
util.LogError(fmt.Sprintf("Pod %s is in a unknown state. Dumping pods and rolling back.", pod.Name), errors.New("PodUnknown").Error())
util.LogError(fmt.Sprintf("%+v", pods), "")
return errors.New("PodUnknown")
case corev1.PodPending:
util.LogInfo(fmt.Sprintf("Pod %s is still in pending state. Checking if there are any issues", pod.Name))
if err = checkPendingPods(pod); err != nil {
return err
} else {
return nil
}
default:
util.LogDebug("We should never be here")
return nil
}
}
return nil
}
func checkPendingPods(p corev1.Pod) error {
for _, c := range p.Status.ContainerStatuses {
// State.Waiting will be nil if the container is running
if c.State.Waiting != nil {
switch c.State.Waiting.Reason {
case "ErrImagePull", "CrashLoopBackOff", "ImagePullBackOff", "InvalidImageName", "CreateContainerConfigError":
util.LogError(fmt.Sprintf("Container %s is in %s state", c.Name, c.State.Waiting.Reason), errors.New("ContainerError").Error())
return errors.New("ContainerError")
}
}
}
return nil
}
func revert(d *appsv1.Deployment) {
//util.LogDebug(fmt.Sprintf("%+v", d))
_, err = kube.Client.AppsV1().Deployments(deploymentJSON.Namespace).Update(d)
if err != nil {
util.LogError("Tried to rollback deployment but received an error", err.Error())
} else {
util.LogInfo("Successfully rolled back deployment")
}
}
func deploy() error {
deployment, err := getExistingDeployment()
util.LogDebug(fmt.Sprintf("checking deployment %+v", deployment))
if err != nil {
util.LogError("Received error when retrieving deployment", err.Error())
return err
}
var deploymentContainerMap map[string]string
deploymentContainerMap = make(map[string]string)
existingDeploymentCopy, _ := getExistingDeployment()
for _, container := range deploymentJSON.Containers {
util.LogInfo(fmt.Sprintf("%+v", container))
deploymentContainerMap[container.ContainerName] = container.Tag
}
var s int32
s = 10
deployment.Spec.ProgressDeadlineSeconds = &s
for i, c := range deployment.Spec.Template.Spec.Containers {
if _, ok := deploymentContainerMap[c.Name]; ok {
image := strings.Split(c.Image, ":")
deployment.Spec.Template.Spec.Containers[i].Image = fmt.Sprintf("%s:%s", image[0], deploymentContainerMap[c.Name])
} else {
util.LogInfo(fmt.Sprintf("Did not find a matching deployment for container for %s", c.Name))
}
}
ts := time.Now().Unix()
stringTS := strconv.FormatInt(ts, 10)
// Convert with strconv otherwise we get a unicode character
deployment.Spec.Template.ObjectMeta.Labels["lambda-deploy-timestamp"] = stringTS
dd, err := kube.Client.AppsV1().Deployments(deploymentJSON.Namespace).Update(deployment)
//fmt.Println(dd.ResourceVersion)
if err = checkDeploymentStatus(dd.ResourceVersion, stringTS); err != nil {
if deploymentJSON.RollbackOnFail {
existingDeploymentCopy.ResourceVersion = ""
revert(existingDeploymentCopy)
}
return err
}
return nil
}