-
Notifications
You must be signed in to change notification settings - Fork 303
/
conv.go
367 lines (322 loc) · 12.3 KB
/
conv.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
package session
import (
"fmt"
"strings"
"time"
v1 "k8s.io/api/core/v1"
ctrl "sigs.k8s.io/controller-runtime"
"github.com/tilt-dev/tilt/internal/engine/buildcontrol"
"github.com/tilt-dev/tilt/internal/store/k8sconv"
"github.com/tilt-dev/tilt/internal/store"
"github.com/tilt-dev/tilt/pkg/apis"
"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
session "github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
"github.com/tilt-dev/tilt/pkg/model"
)
func (r *Reconciler) targetsForResource(mt *store.ManifestTarget, holds buildcontrol.HoldSet, ci *v1alpha1.SessionCISpec, result *ctrl.Result) []session.Target {
var targets []session.Target
if bt := buildTarget(mt, holds); bt != nil {
targets = append(targets, *bt)
}
if rt := r.runtimeTarget(mt, holds, ci, result); rt != nil {
targets = append(targets, *rt)
}
return targets
}
func (r *Reconciler) k8sRuntimeTarget(mt *store.ManifestTarget, ci *v1alpha1.SessionCISpec, result *ctrl.Result) *session.Target {
krs := mt.State.K8sRuntimeState()
if mt.Manifest.PodReadinessMode() == model.PodReadinessIgnore && krs.HasEverDeployedSuccessfully && krs.PodLen() == 0 {
// HACK: engine assumes anything with an image will create a pod; PodReadinessIgnore is used in these
// instances to avoid getting stuck in pending forever; in reality, there's no "runtime" target being
// monitored by Tilt, so instead of faking it, just omit it (note: only applies AFTER first deploy so
// that we can determine there are no pods, so it will appear in waiting until then, which is actually
// desirable and matches behavior in K8sRuntimeState::RuntimeStatus())
// see https://github.com/tilt-dev/tilt/issues/3619
return nil
}
target := &session.Target{
Name: fmt.Sprintf("%s:runtime", mt.Manifest.Name.String()),
Type: k8sTargetType(mt),
Resources: []string{mt.Manifest.Name.String()},
}
if mt.State.DisableState == session.DisableStateDisabled {
target.State.Disabled = &session.TargetStateDisabled{}
return target
}
status := mt.RuntimeStatus()
pod := krs.MostRecentPod()
phase := v1.PodPhase(pod.Phase)
// A Target's StartTime / FinishTime is meant to be a total representation
// of when the YAML started deploying until when it became ready. We
// also want it to persist across pod restarts, so we can use it
// to check if the pod is within the grace period.
//
// Ideally, we'd use KubernetesApply's LastApplyStartTime, but this
// is LastSuccessfulDeployTime is good enough.
createdAt := apis.NewMicroTime(mt.State.LastSuccessfulDeployTime)
k8sGracePeriod := time.Duration(0)
if ci != nil && ci.K8sGracePeriod != nil {
k8sGracePeriod = ci.K8sGracePeriod.Duration
}
graceStatus := v1alpha1.TargetGraceNotApplicable
if k8sGracePeriod > 0 && !createdAt.Time.IsZero() {
graceSoFar := r.clock.Since(createdAt.Time)
if k8sGracePeriod <= graceSoFar {
graceStatus = v1alpha1.TargetGraceExceeded
} else {
graceStatus = v1alpha1.TargetGraceTolerated
// Use the ctrl.Result to schedule a reconcile.
requeueAfter := k8sGracePeriod - graceSoFar
if result.RequeueAfter == 0 || result.RequeueAfter > requeueAfter {
result.RequeueAfter = requeueAfter
}
}
}
if status == v1alpha1.RuntimeStatusOK {
if v1.PodSucceeded == phase {
target.State.Terminated = &session.TargetStateTerminated{
StartTime: createdAt,
}
return target
}
target.State.Active = &session.TargetStateActive{
StartTime: createdAt,
Ready: true,
}
return target
}
if status == v1alpha1.RuntimeStatusError {
if phase == v1.PodFailed {
podErr := strings.Join(pod.Errors, "; ")
if podErr == "" {
podErr = fmt.Sprintf("Pod %q failed", pod.Name)
}
target.State.Terminated = &session.TargetStateTerminated{
StartTime: createdAt,
Error: podErr,
GraceStatus: graceStatus,
}
return target
}
for _, ctr := range store.AllPodContainers(pod) {
if k8sconv.ContainerStatusToRuntimeState(ctr) == v1alpha1.RuntimeStatusError {
target.State.Terminated = &session.TargetStateTerminated{
StartTime: apis.NewMicroTime(pod.CreatedAt.Time),
Error: fmt.Sprintf("Pod %s in error state due to container %s: %s",
pod.Name, ctr.Name, pod.Status),
GraceStatus: graceStatus,
}
return target
}
}
target.State.Terminated = &session.TargetStateTerminated{
StartTime: createdAt,
Error: "unknown error",
GraceStatus: graceStatus,
}
return target
}
if status == v1alpha1.RuntimeStatusPending {
if v1.PodRunning == phase {
target.State.Active = &session.TargetStateActive{
StartTime: createdAt,
Ready: false,
}
return target
}
waitReason := pod.Status
if waitReason == "" {
if pod.Name == "" {
waitReason = "waiting-for-pod"
} else {
waitReason = "unknown"
}
}
target.State.Waiting = &session.TargetStateWaiting{
WaitReason: waitReason,
}
}
return target
}
func (r *Reconciler) localServeTarget(mt *store.ManifestTarget, holds buildcontrol.HoldSet) *session.Target {
if mt.Manifest.LocalTarget().ServeCmd.Empty() {
// there is no serve_cmd, so don't return a runtime target at all
// (there will still be a build target from the update cmd)
return nil
}
target := &session.Target{
Name: fmt.Sprintf("%s:serve", mt.Manifest.Name.String()),
Resources: []string{mt.Manifest.Name.String()},
Type: session.TargetTypeServer,
}
if mt.State.DisableState == session.DisableStateDisabled {
target.State.Disabled = &session.TargetStateDisabled{}
return target
}
lrs := mt.State.LocalRuntimeState()
if runtimeErr := lrs.RuntimeStatusError(); runtimeErr != nil {
target.State.Terminated = &session.TargetStateTerminated{
StartTime: apis.NewMicroTime(lrs.StartTime),
FinishTime: apis.NewMicroTime(lrs.FinishTime),
Error: errToString(runtimeErr),
}
} else if lrs.PID != 0 {
target.State.Active = &session.TargetStateActive{
StartTime: apis.NewMicroTime(lrs.StartTime),
Ready: lrs.Ready,
}
} else if mt.Manifest.TriggerMode.AutoInitial() || mt.State.StartedFirstBuild() {
// default to waiting unless this resource has auto_init=False and has never
// had a build triggered for other reasons (e.g. trigger_mode=TRIGGER_MODE_AUTO and
// a relevant file change or being manually invoked via UI)
// the latter case ensures there's no race condition between a build being
// triggered and the local process actually being launched
//
// otherwise, Terminated/Active/Waiting will all be nil, which indicates that
// the target is currently inactive
target.State.Waiting = waitingFromHolds(mt.Manifest.Name, holds)
}
return target
}
// genericRuntimeTarget creates a target from the RuntimeState interface without any domain-specific considerations.
//
// This is both used for target types that don't require specialized logic (Docker Compose) as well as a fallback for
// any new types that don't have deeper support here.
func (r *Reconciler) genericRuntimeTarget(mt *store.ManifestTarget, holds buildcontrol.HoldSet) *session.Target {
target := &session.Target{
Name: fmt.Sprintf("%s:runtime", mt.Manifest.Name.String()),
Resources: []string{mt.Manifest.Name.String()},
Type: session.TargetTypeServer,
}
if mt.State.DisableState == session.DisableStateDisabled {
target.State.Disabled = &session.TargetStateDisabled{}
return target
}
runtimeStatus := mt.RuntimeStatus()
switch runtimeStatus {
case v1alpha1.RuntimeStatusPending:
target.State.Waiting = waitingFromHolds(mt.Manifest.Name, holds)
case v1alpha1.RuntimeStatusOK:
target.State.Active = &session.TargetStateActive{
StartTime: apis.NewMicroTime(mt.State.LastSuccessfulDeployTime),
// generic resources have no readiness concept so they're just ready by default
// (this also applies to Docker Compose, since we don't support its health checks)
Ready: true,
}
case v1alpha1.RuntimeStatusError:
errMsg := errToString(mt.State.RuntimeState.RuntimeStatusError())
if errMsg == "" {
errMsg = "Server target %q failed"
}
target.State.Terminated = &session.TargetStateTerminated{
Error: errMsg,
}
}
return target
}
func (r *Reconciler) runtimeTarget(mt *store.ManifestTarget, holds buildcontrol.HoldSet, ci *v1alpha1.SessionCISpec, result *ctrl.Result) *session.Target {
if mt.Manifest.IsK8s() {
return r.k8sRuntimeTarget(mt, ci, result)
} else if mt.Manifest.IsLocal() {
return r.localServeTarget(mt, holds)
} else {
return r.genericRuntimeTarget(mt, holds)
}
}
// buildTarget creates a "build" (or update) target for the resource.
//
// Currently, the engine aggregates many different targets into a single build record, and that's reflected here.
// Ideally, as the internals change, more granularity will provided and this might actually return a slice of targets
// rather than a single target. For example, a K8s resource might have an image build step and then a deployment (i.e.
// kubectl apply) step - currently, both of these will be aggregated together, which can make it harder to diagnose
// where something is stuck or slow.
func buildTarget(mt *store.ManifestTarget, holds buildcontrol.HoldSet) *session.Target {
if mt.Manifest.IsLocal() && mt.Manifest.LocalTarget().UpdateCmdSpec == nil {
return nil
}
res := &session.Target{
Name: fmt.Sprintf("%s:update", mt.Manifest.Name.String()),
Resources: []string{mt.Manifest.Name.String()},
Type: session.TargetTypeJob,
}
if mt.State.DisableState == session.DisableStateDisabled {
res.State.Disabled = &session.TargetStateDisabled{}
return res
}
isPending := mt.NextBuildReason() != model.BuildReasonNone
currentBuild := mt.State.EarliestCurrentBuild()
if isPending {
res.State.Waiting = waitingFromHolds(mt.Manifest.Name, holds)
} else if !currentBuild.Empty() {
res.State.Active = &session.TargetStateActive{
StartTime: apis.NewMicroTime(currentBuild.StartTime),
}
} else if len(mt.State.BuildHistory) != 0 {
lastBuild := mt.State.LastBuild()
res.State.Terminated = &session.TargetStateTerminated{
StartTime: apis.NewMicroTime(lastBuild.StartTime),
FinishTime: apis.NewMicroTime(lastBuild.FinishTime),
Error: errToString(lastBuild.Error),
}
}
return res
}
func k8sTargetType(mt *store.ManifestTarget) session.TargetType {
if !mt.Manifest.IsK8s() {
return ""
}
krs := mt.State.K8sRuntimeState()
if krs.PodReadinessMode == model.PodReadinessSucceeded {
return session.TargetTypeJob
}
return session.TargetTypeServer
}
func waitingFromHolds(mn model.ManifestName, holds buildcontrol.HoldSet) *session.TargetStateWaiting {
// in the API, the reason is not _why_ the target "exists", but rather an explanation for why it's not yet
// active and is in a pending state (e.g. waitingFromHolds for dependencies)
waitReason := "unknown"
if hold, ok := holds[mn]; ok && hold.Reason != store.HoldReasonNone {
waitReason = string(hold.Reason)
}
return &session.TargetStateWaiting{
WaitReason: waitReason,
}
}
// tiltfileTarget creates a session.Target object from a Tiltfile ManifestState
//
// This is slightly different from generic resource handling because there is no
// ManifestTarget in the engine for the Tiltfile (just ManifestState) and config
// file changes are stored stop level on state, but conceptually it does similar
// things.
func tiltfileTarget(name model.ManifestName, ms *store.ManifestState) session.Target {
target := session.Target{
Name: "tiltfile:update",
Resources: []string{name.String()},
Type: session.TargetTypeJob,
}
// Tiltfile is special in engine state and doesn't have a target, just state, so
// this logic is largely duplicated from the generic resource build logic
if ms.IsBuilding() {
target.State.Active = &session.TargetStateActive{
StartTime: apis.NewMicroTime(ms.EarliestCurrentBuild().StartTime),
}
} else if hasPendingChanges, _ := ms.HasPendingChanges(); hasPendingChanges {
target.State.Waiting = &session.TargetStateWaiting{
WaitReason: "config-changed",
}
} else if len(ms.BuildHistory) != 0 {
lastBuild := ms.LastBuild()
target.State.Terminated = &session.TargetStateTerminated{
StartTime: apis.NewMicroTime(lastBuild.StartTime),
FinishTime: apis.NewMicroTime(lastBuild.FinishTime),
Error: errToString(lastBuild.Error),
}
} else {
// given the current engine behavior, this doesn't actually occur because
// the first build happens as part of initialization
target.State.Waiting = &session.TargetStateWaiting{
WaitReason: "initial-build",
}
}
return target
}