/
app.go
126 lines (119 loc) · 3.89 KB
/
app.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
package app_create
import (
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
kapi "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/kubectl"
apps "github.com/openshift/origin/pkg/apps/apis/apps"
)
func (d *AppCreate) createAndCheckAppDC() bool {
result := &d.result.App
result.BeginTime = jsonTime(time.Now())
defer recordTrial(result)
if !d.createAppDC() {
return false
}
result.Success = d.checkPodRunning()
return result.Success
}
// create the DC
func (d *AppCreate) createAppDC() bool {
defer recordTime(&d.result.App.CreatedTime)
gracePeriod := int64(0)
dc := &apps.DeploymentConfig{
ObjectMeta: metav1.ObjectMeta{
Name: d.appName,
Labels: d.label,
},
Spec: apps.DeploymentConfigSpec{
Replicas: 1,
Selector: d.label,
Triggers: []apps.DeploymentTriggerPolicy{
{Type: apps.DeploymentTriggerOnConfigChange},
},
Template: &kapi.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{Labels: d.label},
Spec: kapi.PodSpec{
TerminationGracePeriodSeconds: &gracePeriod,
Containers: []kapi.Container{
{
Name: d.appName,
Image: d.appImage,
Ports: []kapi.ContainerPort{
{
Name: "http",
ContainerPort: int32(d.appPort),
Protocol: kapi.ProtocolTCP,
},
},
ImagePullPolicy: kapi.PullIfNotPresent,
Command: []string{
"socat", "-T", "1", "-d",
fmt.Sprintf("%s-l:%d,reuseaddr,fork,crlf", kapi.ProtocolTCP, d.appPort),
"system:\"echo 'HTTP/1.0 200 OK'; echo 'Content-Type: text/plain'; echo; echo 'Hello'\"",
},
ReadinessProbe: &kapi.Probe{
// The action taken to determine the health of a container
Handler: kapi.Handler{
HTTPGet: &kapi.HTTPGetAction{
Path: "/",
Port: intstr.FromInt(d.appPort),
},
},
InitialDelaySeconds: 0,
TimeoutSeconds: 1,
PeriodSeconds: 1,
},
},
},
},
},
},
}
if _, err := d.AppsClient.Apps().DeploymentConfigs(d.project).Create(dc); err != nil {
d.out.Error("DCluAC006", err, fmt.Sprintf("%s: Creating deploymentconfig '%s' failed:\n%v", now(), d.appName, err))
return false
}
return true
}
// wait for a pod to become active
func (d *AppCreate) checkPodRunning() bool {
defer recordTime(&d.result.App.ReadyTime)
d.out.Debug("DCluAC007", fmt.Sprintf("%s: Waiting %ds for pod to reach running state.", now(), d.deployTimeout))
watcher, err := d.KubeClient.Core().Pods(d.project).Watch(metav1.ListOptions{LabelSelector: d.labelSelector, TimeoutSeconds: &d.deployTimeout})
if err != nil {
d.out.Error("DCluAC008", err, fmt.Sprintf(`
%s: Failed to establish a watch for '%s' to deploy a pod:
%v
This may be a transient error. Check the master API logs for anomalies near this time.
`, now(), d.appName, err))
return false
}
defer stopWatcher(watcher)
for event := range watcher.ResultChan() {
running, err := kubectl.PodContainerRunning(d.appName)(event)
if err != nil {
d.out.Error("DCluAC009", err, fmt.Sprintf(`
%s: Error while watching for app pod to deploy:
%v
This may be a transient error. Check the master API logs for anomalies near this time.
`, now(), err))
return false
}
if running {
d.out.Info("DCluAC010", fmt.Sprintf("%s: App '%s' is running", now(), d.appName))
return true
}
}
d.out.Error("DCluAC011", nil, fmt.Sprintf(`
%s: App pod was not in running state before timeout (%d sec)
There are many reasons why this can occur; for example:
* The app or deployer image may not be available (check pod status)
* Downloading an image may have timed out (consider increasing timeout)
* The scheduler may be unable to find an appropriate node for it to run (check deployer logs)
* The node container runtime may be malfunctioning (check node and docker/cri-o logs)
`, now(), d.deployTimeout))
return false
}