-
Notifications
You must be signed in to change notification settings - Fork 289
/
build_result.go
404 lines (341 loc) · 10.7 KB
/
build_result.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
package store
import (
"fmt"
"sort"
"github.com/docker/distribution/reference"
"k8s.io/apimachinery/pkg/types"
"github.com/windmilleng/tilt/internal/container"
"github.com/windmilleng/tilt/internal/dockercompose"
"github.com/windmilleng/tilt/internal/k8s"
"github.com/windmilleng/tilt/pkg/model"
)
// The results of a successful build.
type BuildResult interface {
TargetID() model.TargetID
BuildType() model.BuildType
}
type LocalBuildResult struct {
id model.TargetID
}
func (r LocalBuildResult) TargetID() model.TargetID { return r.id }
func (r LocalBuildResult) BuildType() model.BuildType { return model.BuildTypeLocal }
func NewLocalBuildResult(id model.TargetID) LocalBuildResult {
return LocalBuildResult{
id: id,
}
}
type ImageBuildResult struct {
id model.TargetID
// The name+tag of the image that the pod is running.
//
// The tag is derived from a content-addressable digest.
Image reference.NamedTagged
}
func (r ImageBuildResult) TargetID() model.TargetID { return r.id }
func (r ImageBuildResult) BuildType() model.BuildType { return model.BuildTypeImage }
// For image targets.
func NewImageBuildResult(id model.TargetID, image reference.NamedTagged) ImageBuildResult {
return ImageBuildResult{
id: id,
Image: image,
}
}
type LiveUpdateBuildResult struct {
id model.TargetID
// The name+tag of the image that the pod is running.
Image reference.NamedTagged
// The ID of the container(s) that we live-updated in-place.
//
// The contents of the container have diverged from the image it's built on,
// so we need to keep track of that.
LiveUpdatedContainerIDs []container.ID
}
func (r LiveUpdateBuildResult) TargetID() model.TargetID { return r.id }
func (r LiveUpdateBuildResult) BuildType() model.BuildType { return model.BuildTypeLiveUpdate }
// For in-place container updates.
func NewLiveUpdateBuildResult(id model.TargetID, image reference.NamedTagged, containerIDs []container.ID) LiveUpdateBuildResult {
return LiveUpdateBuildResult{
id: id,
Image: image,
LiveUpdatedContainerIDs: containerIDs,
}
}
type DockerComposeBuildResult struct {
id model.TargetID
// The ID of the container that Docker Compose created.
//
// When we deploy a Docker Compose service, we wait synchronously for the
// container to start. Note that this is a different concurrency model than
// we use for Kubernetes, where the pods appear some time later via an
// asynchronous event.
DockerComposeContainerID container.ID
}
func (r DockerComposeBuildResult) TargetID() model.TargetID { return r.id }
func (r DockerComposeBuildResult) BuildType() model.BuildType { return model.BuildTypeDockerCompose }
// For docker compose deploy targets.
func NewDockerComposeDeployResult(id model.TargetID, containerID container.ID) DockerComposeBuildResult {
return DockerComposeBuildResult{
id: id,
DockerComposeContainerID: containerID,
}
}
type K8sBuildResult struct {
id model.TargetID
// The UIDs that we deployed to a Kubernetes cluster.
DeployedUIDs []types.UID
}
func (r K8sBuildResult) TargetID() model.TargetID { return r.id }
func (r K8sBuildResult) BuildType() model.BuildType { return model.BuildTypeK8s }
// For kubernetes deploy targets.
func NewK8sDeployResult(id model.TargetID, uids []types.UID) BuildResult {
return K8sBuildResult{
id: id,
DeployedUIDs: uids,
}
}
func ImageFromBuildResult(r BuildResult) reference.NamedTagged {
switch r := r.(type) {
case ImageBuildResult:
return r.Image
case LiveUpdateBuildResult:
return r.Image
}
return nil
}
type BuildResultSet map[model.TargetID]BuildResult
func (set BuildResultSet) LiveUpdatedContainerIDs() []container.ID {
result := []container.ID{}
for _, r := range set {
r, ok := r.(LiveUpdateBuildResult)
if ok {
result = append(result, r.LiveUpdatedContainerIDs...)
}
}
return result
}
func (set BuildResultSet) DeployedUIDSet() UIDSet {
result := NewUIDSet()
for _, r := range set {
r, ok := r.(K8sBuildResult)
if ok {
result.Add(r.DeployedUIDs...)
}
}
return result
}
func MergeBuildResultsSet(a, b BuildResultSet) BuildResultSet {
res := make(BuildResultSet)
for k, v := range a {
res[k] = v
}
for k, v := range b {
res[k] = v
}
return res
}
func (set BuildResultSet) BuildTypes() []model.BuildType {
btMap := make(map[model.BuildType]bool, len(set))
for _, br := range set {
if br != nil {
btMap[br.BuildType()] = true
}
}
result := make([]model.BuildType, 0, len(btMap))
for key := range btMap {
result = append(result, key)
}
return result
}
// Returns a container ID iff it's the only container ID in the result set.
// If there are multiple container IDs, we have to give up.
func (set BuildResultSet) OneAndOnlyLiveUpdatedContainerID() container.ID {
var id container.ID
for _, br := range set {
result, ok := br.(LiveUpdateBuildResult)
if !ok {
continue
}
if len(result.LiveUpdatedContainerIDs) == 0 {
continue
}
if len(result.LiveUpdatedContainerIDs) > 1 {
return ""
}
curID := result.LiveUpdatedContainerIDs[0]
if curID == "" {
continue
}
if id != "" && curID != id {
return ""
}
id = curID
}
return id
}
// The state of the system since the last successful build.
// This data structure should be considered immutable.
// All methods that return a new BuildState should first clone the existing build state.
type BuildState struct {
// The last successful build.
LastResult BuildResult
// Files changed since the last result was build.
// This must be liberal: it's ok if this has too many files, but not ok if it has too few.
FilesChangedSet map[string]bool
RunningContainers []ContainerInfo
}
func NewBuildState(result BuildResult, files []string) BuildState {
set := make(map[string]bool, len(files))
for _, f := range files {
set[f] = true
}
return BuildState{
LastResult: result,
FilesChangedSet: set,
}
}
func (b BuildState) WithRunningContainers(cInfos []ContainerInfo) BuildState {
b.RunningContainers = cInfos
return b
}
// NOTE(maia): Interim method to replicate old behavior where every
// BuildState had a single ContainerInfo
func (b BuildState) OneContainerInfo() ContainerInfo {
if len(b.RunningContainers) == 0 {
return ContainerInfo{}
}
return b.RunningContainers[0]
}
func (b BuildState) LastImageAsString() string {
img := ImageFromBuildResult(b.LastResult)
if img == nil {
return ""
}
return img.String()
}
// Return the files changed since the last result in sorted order.
// The sorting helps ensure that this is deterministic, both for testing
// and for deterministic builds.
func (b BuildState) FilesChanged() []string {
result := make([]string, 0, len(b.FilesChangedSet))
for file, _ := range b.FilesChangedSet {
result = append(result, file)
}
sort.Strings(result)
return result
}
// A build state is empty if there are no previous results.
func (b BuildState) IsEmpty() bool {
return b.LastResult == nil
}
func (b BuildState) HasImage() bool {
return ImageFromBuildResult(b.LastResult) != nil
}
// Whether the image represented by this state needs to be built.
// If the image has already been built, and no files have been
// changed since then, then we can re-use the previous result.
func (b BuildState) NeedsImageBuild() bool {
lastBuildWasImgBuild := b.LastResult != nil &&
b.LastResult.BuildType() == model.BuildTypeImage
return !lastBuildWasImgBuild || len(b.FilesChangedSet) > 0
}
type BuildStateSet map[model.TargetID]BuildState
func (set BuildStateSet) Empty() bool {
return len(set) == 0
}
func (set BuildStateSet) FilesChanged() []string {
resultMap := map[string]bool{}
for _, state := range set {
for k := range state.FilesChangedSet {
resultMap[k] = true
}
}
result := make([]string, 0, len(resultMap))
for k := range resultMap {
result = append(result, k)
}
sort.Strings(result)
return result
}
// Information describing a single running & ready container
type ContainerInfo struct {
PodID k8s.PodID
ContainerID container.ID
ContainerName container.Name
Namespace k8s.Namespace
}
func (c ContainerInfo) Empty() bool {
return c == ContainerInfo{}
}
func IDsForInfos(infos []ContainerInfo) []container.ID {
ids := make([]container.ID, len(infos))
for i, info := range infos {
ids[i] = info.ContainerID
}
return ids
}
func AllRunningContainers(mt *ManifestTarget) []ContainerInfo {
if mt.Manifest.IsDC() {
return RunningContainersForDC(mt.State.DCRuntimeState())
}
var result []ContainerInfo
for _, iTarget := range mt.Manifest.ImageTargets {
cInfos, err := RunningContainersForTargetForOnePod(iTarget, mt.State.K8sRuntimeState())
if err != nil {
// HACK(maia): just don't collect container info for targets running
// more than one pod -- we don't support LiveUpdating them anyway,
// so no need to monitor those containers for crashes.
continue
}
result = append(result, cInfos...)
}
return result
}
// If all containers running the given image are ready, returns info for them.
// (If this image is running on multiple pods, return an error.)
func RunningContainersForTargetForOnePod(iTarget model.ImageTarget, runtimeState K8sRuntimeState) ([]ContainerInfo, error) {
if runtimeState.PodLen() > 1 {
return nil, fmt.Errorf("can only get container info for a single pod; image target %s has %d pods", iTarget.ID(), runtimeState.PodLen())
}
if runtimeState.PodLen() == 0 {
return nil, nil
}
pod := runtimeState.MostRecentPod()
if pod.PodID == "" {
return nil, nil
}
// If there was a recent deploy, the runtime state might not have the
// new pods yet. We check the PodAncestorID and see if it's in the most
// recent deploy set. If it's not, then we can should ignore these pods.
ancestorUID := runtimeState.PodAncestorUID
if ancestorUID != "" && !runtimeState.DeployedUIDSet.Contains(ancestorUID) {
return nil, nil
}
var containers []ContainerInfo
for _, c := range pod.Containers {
// Only return containers matching our image
if c.ImageRef == nil || iTarget.DeploymentRef.Name() != c.ImageRef.Name() {
continue
}
if c.ID == "" || c.Name == "" || !c.Ready {
// If we're missing any relevant info for this container, OR if the
// container isn't ready, we can't update it in place.
// (Since we'll need to fully rebuild this image, we shouldn't bother
// in-place updating ANY containers on this pod -- they'll all
// be recreated when we image build. So don't return ANY ContainerInfos.)
return nil, nil
}
containers = append(containers, ContainerInfo{
PodID: pod.PodID,
ContainerID: c.ID,
ContainerName: c.Name,
Namespace: pod.Namespace,
})
}
return containers, nil
}
func RunningContainersForDC(state dockercompose.State) []ContainerInfo {
return []ContainerInfo{
ContainerInfo{ContainerID: state.ContainerID},
}
}
var BuildStateClean = BuildState{}