Skip to content

Commit

Permalink
buildcontrol: remove crash rebuilds (#5633)
Browse files Browse the repository at this point in the history
  • Loading branch information
nicks committed Mar 28, 2022
1 parent f4ae5b3 commit 044acd5
Show file tree
Hide file tree
Showing 24 changed files with 44 additions and 585 deletions.
8 changes: 0 additions & 8 deletions internal/container/container.go
Expand Up @@ -73,11 +73,3 @@ func MustWithTag(name reference.Named, tag string) reference.NamedTagged {
}
return nt
}

func NewIDSet(ids ...ID) map[ID]bool {
result := make(map[ID]bool, len(ids))
for _, id := range ids {
result[id] = true
}
return result
}
14 changes: 1 addition & 13 deletions internal/engine/buildcontrol/build_control.go
Expand Up @@ -88,13 +88,6 @@ func NextTargetToBuild(state store.EngineState) (*store.ManifestTarget, HoldSet)
return NextUnbuiltTargetToBuild(unbuilt), holds
}

// Next prioritize builds that crashed and need a rebuilt to have up-to-date code.
for _, mt := range targets {
if mt.State.NeedsRebuildFromCrash {
return mt, holds
}
}

// Check to see if any targets are currently being successfully reconciled,
// and so full rebuilt should be held back. This takes manual triggers into account.
HoldLiveUpdateTargetsHandledByReconciler(state, targets, holds)
Expand Down Expand Up @@ -180,7 +173,7 @@ func canReuseImageTargetHeuristic(spec model.TargetSpec, status store.BuildStatu
}

switch result.(type) {
case store.ImageBuildResult, store.LiveUpdateBuildResult:
case store.ImageBuildResult:
return true
}
return false
Expand Down Expand Up @@ -466,11 +459,6 @@ func FindTargetsNeedingAnyBuild(state store.EngineState) []*store.ManifestTarget
continue
}

if target.State.NeedsRebuildFromCrash {
result = append(result, target)
continue
}

if queue[target.Manifest.Name] {
result = append(result, target)
continue
Expand Down
18 changes: 1 addition & 17 deletions internal/engine/buildcontrol/build_control_test.go
Expand Up @@ -28,7 +28,7 @@ import (
func TestNextTargetToBuildDoesntReturnCurrentlyBuildingTarget(t *testing.T) {
f := newTestFixture(t)

mt := f.manifestNeedingCrashRebuild()
mt := f.upsertK8sManifest("k8s1")
f.st.UpsertManifestTarget(mt)

// Verify this target is normally next-to-build
Expand Down Expand Up @@ -741,22 +741,6 @@ func (f *testFixture) upsertLocalManifest(name model.ManifestName, opts ...manif
return f.upsertManifest(b.WithLocalResource(fmt.Sprintf("exec-%s", name), nil).Build())
}

func (f *testFixture) manifestNeedingCrashRebuild() *store.ManifestTarget {
m := manifestbuilder.New(f, "needs-crash-rebuild").
WithK8sYAML(testyaml.SanchoYAML).
Build()
mt := store.NewManifestTarget(m)
mt.State.BuildHistory = []model.BuildRecord{
model.BuildRecord{
StartTime: time.Now().Add(-5 * time.Second),
FinishTime: time.Now(),
},
}
mt.State.NeedsRebuildFromCrash = true
mt.State.DisableState = v1alpha1.DisableStateEnabled
return mt
}

type manifestOption func(manifestbuilder.ManifestBuilder) manifestbuilder.ManifestBuilder

func withResourceDeps(deps ...string) manifestOption {
Expand Down
23 changes: 0 additions & 23 deletions internal/engine/buildcontrol/image_build_and_deployer_test.go
Expand Up @@ -258,29 +258,6 @@ func TestImageIsClean(t *testing.T) {
assert.Equal(t, 0, f.docker.PushCount)
}

func TestImageIsDirtyAfterContainerBuild(t *testing.T) {
f := newIBDFixture(t, clusterid.ProductGKE)

manifest := NewSanchoDockerBuildManifest(f)
iTargetID1 := manifest.ImageTargets[0].ID()
result1 := store.NewLiveUpdateBuildResult(
iTargetID1,
[]container.ID{container.ID("12345")})

stateSet := store.BuildStateSet{
iTargetID1: store.NewBuildState(result1, []string{}, nil),
}
_, err := f.BuildAndDeploy(BuildTargets(manifest), stateSet)
if err != nil {
t.Fatal(err)
}

// Expect build + push; last result has a container ID, which implies that it was an in-place
// update, so the current state of this manifest is NOT reflected in an existing image.
assert.Equal(t, 1, f.docker.BuildCount)
assert.Equal(t, 1, f.docker.PushCount)
}

func TestMultiStageDockerBuild(t *testing.T) {
f := newIBDFixture(t, clusterid.ProductGKE)

Expand Down
22 changes: 10 additions & 12 deletions internal/engine/buildcontroller.go
Expand Up @@ -233,18 +233,16 @@ func buildStateSet(ctx context.Context, manifest model.Manifest,
//
// This will probably need to change as the mapping between containers and
// manifests becomes many-to-one.
if !ms.NeedsRebuildFromCrash {
iTarget, ok := spec.(model.ImageTarget)
if ok {
selector := iTarget.LiveUpdateSpec.Selector
if manifest.IsK8s() && selector.Kubernetes != nil {
buildState.KubernetesSelector = selector.Kubernetes
buildState.KubernetesResource = kresource
}

if manifest.IsDC() {
buildState.DockerComposeService = dcs
}
iTarget, ok := spec.(model.ImageTarget)
if ok {
selector := iTarget.LiveUpdateSpec.Selector
if manifest.IsK8s() && selector.Kubernetes != nil {
buildState.KubernetesSelector = selector.Kubernetes
buildState.KubernetesResource = kresource
}

if manifest.IsDC() {
buildState.DockerComposeService = dcs
}
}
result[id] = buildState
Expand Down
46 changes: 0 additions & 46 deletions internal/engine/buildcontroller_test.go
Expand Up @@ -232,52 +232,6 @@ func TestBuildControllerImageBuildTrigger(t *testing.T) {
}
}

// Make sure we don't try display messages about live update after a full build trigger.
// https://github.com/tilt-dev/tilt/issues/3915
func TestFullBuildTriggerClearsLiveUpdate(t *testing.T) {
f := newTestFixture(t)
mName := model.ManifestName("foobar")

manifest := f.newManifest(mName.String())
basePB := f.registerForDeployer(manifest)
opt := func(ia InitAction) InitAction {
ia.TerminalMode = store.TerminalModeStream
return ia
}
f.Start([]model.Manifest{manifest}, opt)

f.nextCallComplete()

f.podEvent(basePB.Build())
f.WaitUntilManifestState("foobar loaded", "foobar", func(ms store.ManifestState) bool {
return ms.K8sRuntimeState().PodLen() == 1
})
f.WaitUntil("foobar k8sresource loaded", func(s store.EngineState) bool {
return s.KubernetesResources["foobar"] != nil && len(s.KubernetesResources["foobar"].FilteredPods) == 1
})
f.withManifestState("foobar", func(ms store.ManifestState) {
ms.LiveUpdatedContainerIDs["containerID"] = true
})

f.b.completeBuildsManually = true
f.store.Dispatch(server.AppendToTriggerQueueAction{Name: mName})
f.WaitUntilManifestState("foobar building", "foobar", func(ms store.ManifestState) bool {
return ms.IsBuilding()
})

f.podEvent(basePB.WithDeletionTime(time.Now()).Build())
f.WaitUntilManifestState("foobar deleting", "foobar", func(ms store.ManifestState) bool {
return ms.K8sRuntimeState().PodLen() == 0
})
assert.Contains(t, f.log.String(), "Initial Build")
f.WaitUntil("Trigger appears", func(st store.EngineState) bool {
return strings.Contains(f.log.String(), "Unknown Trigger")
})
assert.NotContains(t, f.log.String(), "Detected a container change")

f.completeBuildForManifest(manifest)
}

func TestBuildQueueOrdering(t *testing.T) {
f := newTestFixture(t)

Expand Down
40 changes: 2 additions & 38 deletions internal/engine/upper_test.go
Expand Up @@ -222,10 +222,6 @@ type fakeBuildAndDeployer struct {

buildCount int

// Set this to simulate a container update that returns the container IDs
// it updated.
nextLiveUpdateContainerIDs []container.ID

// Inject the container ID of the container started by Docker Compose.
// If not set, we will auto-generate an ID.
nextDockerComposeContainerID container.ID
Expand All @@ -239,13 +235,6 @@ type fakeBuildAndDeployer struct {
// Do not set this directly, use fixture.SetNextBuildError
nextBuildError error

// Set this to simulate a live-update compile error
//
// This is slightly different than a compile error, because the containers are
// still running with the synced files. The build system returns a
// result with the container ID.
nextLiveUpdateCompileError error

buildLogOutput map[model.TargetID]string

resultsByID store.BuildResultSet
Expand Down Expand Up @@ -350,14 +339,7 @@ func (b *fakeBuildAndDeployer) BuildAndDeploy(ctx context.Context, st store.RSto
return result, err
}

containerIDs := b.nextLiveUpdateContainerIDs
if len(containerIDs) > 0 {
for k := range result {
result[k] = store.NewLiveUpdateBuildResult(k, containerIDs)
}
}

if !call.dc().Empty() && len(b.nextLiveUpdateContainerIDs) == 0 {
if !call.dc().Empty() {
dcContainerID := container.ID(fmt.Sprintf("dc-%s", path.Base(call.dc().ID().Name.String())))
if b.nextDockerComposeContainerID != "" {
dcContainerID = b.nextDockerComposeContainerID
Expand All @@ -383,16 +365,13 @@ func (b *fakeBuildAndDeployer) BuildAndDeploy(ctx context.Context, st store.RSto
result[call.k8s().ID()] = nextK8sResult
}

err = b.nextLiveUpdateCompileError
b.nextLiveUpdateCompileError = nil
b.nextLiveUpdateContainerIDs = nil
b.nextDockerComposeContainerID = ""

for key, val := range result {
b.resultsByID[key] = val
}

return result, err
return result, nil
}

func (b *fakeBuildAndDeployer) updateKubernetesApplyStatus(ctx context.Context, kTarg model.K8sTarget, iTargets []model.ImageTarget) error {
Expand Down Expand Up @@ -3647,21 +3626,6 @@ func (f *testFixture) SetNextBuildError(err error) {
f.store.RUnlockState()
}

func (f *testFixture) SetNextLiveUpdateCompileError(err error, containerIDs []container.ID) {
f.WaitUntil("any in-flight builds have hit the buildAndDeployer", func(state store.EngineState) bool {
f.b.mu.Lock()
defer f.b.mu.Unlock()
return f.b.buildCount == state.BuildControllerStartCount
})

_ = f.store.RLockState()
f.b.mu.Lock()
f.b.nextLiveUpdateCompileError = err
f.b.nextLiveUpdateContainerIDs = containerIDs
f.b.mu.Unlock()
f.store.RUnlockState()
}

// Wait until the given view test passes.
func (f *testFixture) WaitUntilHUD(msg string, isDone func(view.View) bool) {
f.fakeHud().WaitUntil(f.T(), f.ctx, msg, isDone)
Expand Down
4 changes: 2 additions & 2 deletions internal/hud/buildstatus.go
Expand Up @@ -39,12 +39,12 @@ func makeBuildStatus(res view.Resource, triggerMode model.TriggerMode) buildStat
}
}

if !res.CurrentBuild.Empty() && !res.CurrentBuild.Reason.IsCrashOnly() {
if !res.CurrentBuild.Empty() {
status = "In prog."
duration = time.Since(res.CurrentBuild.StartTime)
edits = res.CurrentBuild.Edits
reason = res.CurrentBuild.Reason
} else if !res.PendingBuildSince.IsZero() && !res.PendingBuildReason.IsCrashOnly() {
} else if !res.PendingBuildSince.IsZero() {
status = "Pending"
if triggerMode.AutoOnChange() {
duration = time.Since(res.PendingBuildSince)
Expand Down
2 changes: 0 additions & 2 deletions internal/hud/edits.go
Expand Up @@ -87,8 +87,6 @@ func (esl *EditStatusLineComponent) Render(w rty.Writer, width, height int) erro
if len(bs.edits) == 0 {
if bs.reason.Has(model.BuildReasonFlagInit) {
sb.Fg(cLightText).Text("FIRST BUILD ")
} else if bs.reason.Has(model.BuildReasonFlagCrash) {
sb.Fg(cLightText).Text("CRASH BUILD ")
}
} else {
sb.Fg(cLightText).Text("EDITED FILES ")
Expand Down
4 changes: 0 additions & 4 deletions internal/hud/renderer.go
Expand Up @@ -11,7 +11,6 @@ import (
"github.com/tilt-dev/tilt/internal/hud/view"
"github.com/tilt-dev/tilt/internal/rty"
"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
"github.com/tilt-dev/tilt/pkg/model"
)

const defaultLogPaneHeight = 8
Expand Down Expand Up @@ -202,9 +201,6 @@ func isInError(res view.Resource) bool {

func isCrashing(res view.Resource) bool {
return (res.IsK8s() && res.K8sInfo().PodRestarts > 0) ||
res.LastBuild().Reason.Has(model.BuildReasonFlagCrash) ||
res.CurrentBuild.Reason.Has(model.BuildReasonFlagCrash) ||
res.PendingBuildReason.Has(model.BuildReasonFlagCrash) ||
res.IsDC() && res.DockerComposeTarget().RuntimeStatus() == v1alpha1.RuntimeStatusError
}

Expand Down

0 comments on commit 044acd5

Please sign in to comment.