forked from kubernetes/kubernetes
-
Notifications
You must be signed in to change notification settings - Fork 0
/
worker.go
153 lines (125 loc) · 4.36 KB
/
worker.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package prober
import (
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
kubeletutil "k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/pkg/probe"
"k8s.io/kubernetes/pkg/util"
)
// worker handles the periodic probing of its assigned container. Each worker has a go-routine
// associated with it which runs the probe loop until the container permanently terminates, or the
// stop channel is closed. The worker uses the probe Manager's statusManager to get up-to-date
// container IDs.
// TODO: Handle liveness probing
type worker struct {
// Channel for stopping the probe, it should be closed to trigger a stop.
stop chan struct{}
// The pod containing this probe (read-only)
pod *api.Pod
// The container to probe (read-only)
container api.Container
// Describes the probe configuration (read-only)
spec *api.Probe
// The last known container ID for this worker.
containerID kubecontainer.ContainerID
}
// Creates and starts a new probe worker.
func (m *manager) newWorker(
pod *api.Pod,
container api.Container) *worker {
w := &worker{
stop: make(chan struct{}),
pod: pod,
container: container,
spec: container.ReadinessProbe,
}
// Start the worker thread.
go run(m, w)
return w
}
// run periodically probes the container.
func run(m *manager, w *worker) {
probeTicker := time.NewTicker(m.defaultProbePeriod)
defer func() {
// Clean up.
probeTicker.Stop()
if !w.containerID.IsEmpty() {
m.readinessCache.Remove(w.containerID)
}
m.removeReadinessProbe(w.pod.UID, w.container.Name)
}()
probeLoop:
for doProbe(m, w) {
// Wait for next probe tick.
select {
case <-w.stop:
break probeLoop
case <-probeTicker.C:
// continue
}
}
}
// doProbe probes the container once and records the result.
// Returns whether the worker should continue.
func doProbe(m *manager, w *worker) (keepGoing bool) {
defer util.HandleCrash(func(_ interface{}) { keepGoing = true })
status, ok := m.statusManager.GetPodStatus(w.pod.UID)
if !ok {
// Either the pod has not been created yet, or it was already deleted.
glog.V(3).Infof("No status for pod: %v", kubeletutil.FormatPodName(w.pod))
return true
}
// Worker should terminate if pod is terminated.
if status.Phase == api.PodFailed || status.Phase == api.PodSucceeded {
glog.V(3).Infof("Pod %v %v, exiting probe worker",
kubeletutil.FormatPodName(w.pod), status.Phase)
return false
}
c, ok := api.GetContainerStatus(status.ContainerStatuses, w.container.Name)
if !ok {
// Either the container has not been created yet, or it was deleted.
glog.V(3).Infof("Non-existant container probed: %v - %v",
kubeletutil.FormatPodName(w.pod), w.container.Name)
return true // Wait for more information.
}
if w.containerID.String() != c.ContainerID {
if !w.containerID.IsEmpty() {
m.readinessCache.Remove(w.containerID)
}
w.containerID = kubecontainer.ParseContainerID(c.ContainerID)
}
if c.State.Running == nil {
glog.V(3).Infof("Non-running container probed: %v - %v",
kubeletutil.FormatPodName(w.pod), w.container.Name)
m.readinessCache.Set(w.containerID, results.Failure)
// Abort if the container will not be restarted.
return c.State.Terminated == nil ||
w.pod.Spec.RestartPolicy != api.RestartPolicyNever
}
if int64(time.Since(c.State.Running.StartedAt.Time).Seconds()) < w.spec.InitialDelaySeconds {
// Readiness defaults to false during the initial delay.
m.readinessCache.Set(w.containerID, results.Failure)
return true
}
// TODO: Move error handling out of prober.
result, _ := m.prober.ProbeReadiness(w.pod, status, w.container, w.containerID)
if result != probe.Unknown {
m.readinessCache.Set(w.containerID, result != probe.Failure)
}
return true
}