/
live_update_build_and_deployer.go
276 lines (235 loc) · 9.01 KB
/
live_update_build_and_deployer.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
package engine
import (
"context"
"fmt"
"strings"
"time"
"github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/windmilleng/tilt/internal/ospath"
"github.com/windmilleng/tilt/internal/analytics"
"github.com/windmilleng/tilt/internal/engine/buildcontrol"
"github.com/windmilleng/tilt/internal/container"
"github.com/windmilleng/tilt/internal/containerupdate"
"github.com/windmilleng/tilt/internal/build"
"github.com/windmilleng/tilt/internal/ignore"
"github.com/windmilleng/tilt/internal/k8s"
"github.com/windmilleng/tilt/internal/store"
"github.com/windmilleng/tilt/pkg/logger"
"github.com/windmilleng/tilt/pkg/model"
)
var _ BuildAndDeployer = &LiveUpdateBuildAndDeployer{}
type LiveUpdateBuildAndDeployer struct {
dcu *containerupdate.DockerContainerUpdater
scu *containerupdate.SyncletUpdater
ecu *containerupdate.ExecUpdater
updMode buildcontrol.UpdateMode
env k8s.Env
runtime container.Runtime
clock build.Clock
}
func NewLiveUpdateBuildAndDeployer(dcu *containerupdate.DockerContainerUpdater,
scu *containerupdate.SyncletUpdater, ecu *containerupdate.ExecUpdater,
updMode buildcontrol.UpdateMode, env k8s.Env, runtime container.Runtime, c build.Clock) *LiveUpdateBuildAndDeployer {
return &LiveUpdateBuildAndDeployer{
dcu: dcu,
scu: scu,
ecu: ecu,
updMode: updMode,
env: env,
runtime: runtime,
clock: c,
}
}
// Info needed to perform a live update
type liveUpdInfo struct {
iTarget model.ImageTarget
state store.BuildState
changedFiles []build.PathMapping
runs []model.Run
hotReload bool
}
func (lui liveUpdInfo) Empty() bool { return lui.iTarget.ID() == model.ImageTarget{}.ID() }
func (lubad *LiveUpdateBuildAndDeployer) BuildAndDeploy(ctx context.Context, st store.RStore, specs []model.TargetSpec, stateSet store.BuildStateSet) (store.BuildResultSet, error) {
liveUpdateStateSet, err := extractImageTargetsForLiveUpdates(specs, stateSet)
if err != nil {
return store.BuildResultSet{}, err
}
containerUpdater := lubad.containerUpdaterForSpecs(specs)
liveUpdInfos := make([]liveUpdInfo, 0, len(liveUpdateStateSet))
if len(liveUpdateStateSet) == 0 {
return nil, buildcontrol.SilentRedirectToNextBuilderf("no targets for Live Update found")
}
for _, luStateTree := range liveUpdateStateSet {
luInfo, err := liveUpdateInfoForStateTree(luStateTree)
if err != nil {
return store.BuildResultSet{}, err
}
if !luInfo.Empty() {
liveUpdInfos = append(liveUpdInfos, luInfo)
}
}
ps := build.NewPipelineState(ctx, len(liveUpdInfos), lubad.clock)
err = nil
defer func() {
ps.End(ctx, err)
}()
var dontFallBackErr error
for _, info := range liveUpdInfos {
ps.StartPipelineStep(ctx, "updating image %s", info.iTarget.Refs.ClusterRef().Name())
err = lubad.buildAndDeploy(ctx, ps, containerUpdater, info.iTarget, info.state, info.changedFiles, info.runs, info.hotReload)
if err != nil {
if !buildcontrol.IsDontFallBackError(err) {
// something went wrong, we want to fall back -- bail and
// let the next builder take care of it
ps.EndPipelineStep(ctx)
return store.BuildResultSet{}, err
}
// if something went wrong due to USER failure (i.e. run step failed),
// run the rest of the container updates so all the containers are in
// a consistent state, then return this error, i.e. don't fall back.
dontFallBackErr = err
}
ps.EndPipelineStep(ctx)
}
err = dontFallBackErr
return createResultSet(liveUpdateStateSet, liveUpdInfos), err
}
func (lubad *LiveUpdateBuildAndDeployer) buildAndDeploy(ctx context.Context, ps *build.PipelineState, cu containerupdate.ContainerUpdater, iTarget model.ImageTarget, state store.BuildState, changedFiles []build.PathMapping, runs []model.Run, hotReload bool) error {
span, ctx := opentracing.StartSpanFromContext(ctx, "LiveUpdateBuildAndDeployer-buildAndDeploy")
span.SetTag("target", iTarget.Refs.ConfigurationRef.String())
defer span.Finish()
startTime := time.Now()
defer func() {
analytics.Get(ctx).Timer("build.container", time.Since(startTime), nil)
}()
l := logger.Get(ctx)
cIDStr := container.ShortStrs(store.IDsForInfos(state.RunningContainers))
suffix := ""
if len(state.RunningContainers) != 1 {
suffix = "(s)"
}
ps.StartBuildStep(ctx, "Updating container%s: %s", suffix, cIDStr)
filter := ignore.CreateBuildContextFilter(iTarget)
boiledSteps, err := build.BoilRuns(runs, changedFiles)
if err != nil {
return err
}
// rm files from container
toRemove, toArchive, err := build.MissingLocalPaths(ctx, changedFiles)
if err != nil {
return errors.Wrap(err, "MissingLocalPaths")
}
if len(toRemove) > 0 {
l.Infof("Will delete %d file(s) from container%s: %s", len(toRemove), suffix, cIDStr)
for _, pm := range toRemove {
l.Infof("- '%s' (matched local path: '%s')", pm.ContainerPath, pm.LocalPath)
}
}
if len(toArchive) > 0 {
l.Infof("Will copy %d file(s) to container%s: %s", len(toArchive), suffix, cIDStr)
for _, pm := range toArchive {
l.Infof("- %s", pm.PrettyStr())
}
}
var lastUserBuildFailure error
for _, cInfo := range state.RunningContainers {
archive := build.TarArchiveForPaths(ctx, toArchive, filter)
err = cu.UpdateContainer(ctx, cInfo, archive,
build.PathMappingsToContainerPaths(toRemove), boiledSteps, hotReload)
if err != nil {
if runFail, ok := build.MaybeRunStepFailure(err); ok {
// Keep running updates -- we want all containers to have the same files on them
// even if the Runs don't succeed
lastUserBuildFailure = err
logger.Get(ctx).Infof(" → Failed to update container %s: run step %q failed with exit code: %d",
cInfo.ContainerID.ShortStr(), runFail.Cmd.String(), runFail.ExitCode)
continue
}
// Something went wrong with this update and it's NOT the user's fault--
// likely a infrastructure error. Bail, and fall back to full build.
return err
} else {
logger.Get(ctx).Infof(" → Container %s updated!", cInfo.ContainerID.ShortStr())
if lastUserBuildFailure != nil {
// This build succeeded, but previously at least one failed due to user error.
// We may have inconsistent state--bail, and fall back to full build.
return fmt.Errorf("Failed to update container: container %s successfully updated, "+
"but last update failed with '%v'", cInfo.ContainerID.ShortStr(), lastUserBuildFailure)
}
}
}
if lastUserBuildFailure != nil {
return buildcontrol.WrapDontFallBackError(lastUserBuildFailure)
}
return nil
}
// liveUpdateInfoForStateTree validates the state tree for LiveUpdate and returns
// all the info we need to execute the update.
func liveUpdateInfoForStateTree(stateTree liveUpdateStateTree) (liveUpdInfo, error) {
iTarget := stateTree.iTarget
state := stateTree.iTargetState
filesChanged := stateTree.filesChanged
var err error
var fileMappings []build.PathMapping
var runs []model.Run
var hotReload bool
if luInfo := iTarget.LiveUpdateInfo(); !luInfo.Empty() {
var pathsMatchingNoSync []string
fileMappings, pathsMatchingNoSync, err = build.FilesToPathMappings(filesChanged, luInfo.SyncSteps())
if err != nil {
return liveUpdInfo{}, err
}
if len(pathsMatchingNoSync) > 0 {
prettyPaths := ospath.FileListDisplayNames(iTarget.LocalPaths(), pathsMatchingNoSync)
return liveUpdInfo{}, buildcontrol.RedirectToNextBuilderInfof(
"Found file(s) not matching any sync (files: %s)", strings.Join(prettyPaths, ", "))
}
// If any changed files match a FallBackOn file, fall back to next BuildAndDeployer
anyMatch, file, err := luInfo.FallBackOnFiles().AnyMatch(build.PathMappingsToLocalPaths(fileMappings))
if err != nil {
return liveUpdInfo{}, err
}
if anyMatch {
prettyFile := ospath.FileListDisplayNames(iTarget.LocalPaths(), []string{file})[0]
return liveUpdInfo{}, buildcontrol.RedirectToNextBuilderInfof(
"Detected change to fall_back_on file %q", prettyFile)
}
runs = luInfo.RunSteps()
hotReload = !luInfo.ShouldRestart()
} else {
// We should have validated this when generating the LiveUpdateStateTrees, but double check!
panic(fmt.Sprintf("did not find Live Update info on target %s, "+
"which should have already been validated for Live Update", iTarget.ID()))
}
if len(fileMappings) == 0 {
// No files matched a sync for this image, no Live Update to run
return liveUpdInfo{}, nil
}
return liveUpdInfo{
iTarget: iTarget,
state: state,
changedFiles: fileMappings,
runs: runs,
hotReload: hotReload,
}, nil
}
func (lubad *LiveUpdateBuildAndDeployer) containerUpdaterForSpecs(specs []model.TargetSpec) containerupdate.ContainerUpdater {
isDC := len(model.ExtractDockerComposeTargets(specs)) > 0
if isDC || lubad.updMode == buildcontrol.UpdateModeContainer {
return lubad.dcu
}
if lubad.updMode == buildcontrol.UpdateModeSynclet {
return lubad.scu
}
if lubad.updMode == buildcontrol.UpdateModeKubectlExec {
return lubad.ecu
}
if shouldUseSynclet(lubad.updMode, lubad.env, lubad.runtime) {
return lubad.scu
}
if lubad.runtime == container.RuntimeDocker && lubad.env.UsesLocalDockerRegistry() {
return lubad.dcu
}
return lubad.ecu
}