Skip to content

Commit

Permalink
tiltfile: add an api for changing the readiness mode of a resource (#…
Browse files Browse the repository at this point in the history
…3640)

Helps resolve #3619
  • Loading branch information
nicks committed Jul 30, 2020
1 parent a55bbf4 commit f64f63f
Show file tree
Hide file tree
Showing 16 changed files with 239 additions and 68 deletions.
13 changes: 8 additions & 5 deletions internal/engine/buildcontrol/build_control_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,16 +89,19 @@ func TestLocalDependsOnNonWorkloadK8s(t *testing.T) {
defer f.TearDown()

local1 := f.upsertLocalManifest("local1", withResourceDeps("k8s1"))
k8s1 := f.upsertK8sManifest("k8s1", withK8sNonWorkload())
k8s2 := f.upsertK8sManifest("k8s2", withK8sNonWorkload())
k8s1 := f.upsertK8sManifest("k8s1", withK8sPodReadiness(model.PodReadinessIgnore))
k8s2 := f.upsertK8sManifest("k8s2", withK8sPodReadiness(model.PodReadinessIgnore))

f.assertNextTargetToBuild("k8s1")

k8s1.State.AddCompletedBuild(model.BuildRecord{
StartTime: time.Now(),
FinishTime: time.Now(),
})
k8s1.State.RuntimeState = store.K8sRuntimeState{NonWorkload: true, HasEverDeployedSuccessfully: true}
k8s1.State.RuntimeState = store.K8sRuntimeState{
PodReadinessMode: model.PodReadinessIgnore,
HasEverDeployedSuccessfully: true,
}

f.assertNextTargetToBuild("local1")
local1.State.AddCompletedBuild(model.BuildRecord{
Expand Down Expand Up @@ -260,8 +263,8 @@ func withResourceDeps(deps ...string) manifestOption {
return m.WithResourceDeps(deps...)
})
}
func withK8sNonWorkload() manifestOption {
func withK8sPodReadiness(pr model.PodReadinessMode) manifestOption {
return manifestOption(func(m manifestbuilder.ManifestBuilder) manifestbuilder.ManifestBuilder {
return m.WithK8sNonWorkload()
return m.WithK8sPodReadiness(pr)
})
}
2 changes: 1 addition & 1 deletion internal/engine/buildcontroller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1378,7 +1378,7 @@ func TestLocalDependsOnNonWorkloadK8s(t *testing.T) {
Build()
k8s1 := manifestbuilder.New(f, "k8s1").
WithK8sYAML(testyaml.SanchoYAML).
WithK8sNonWorkload().
WithK8sPodReadiness(model.PodReadinessIgnore).
Build()
f.Start([]model.Manifest{local1, k8s1})

Expand Down
5 changes: 0 additions & 5 deletions internal/hud/webview/analytics_nudge.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,7 @@ func NeedsNudge(st store.EngineState) bool {
}

for _, targ := range manifestTargs {
if targ.Manifest.NonWorkloadManifest() {
continue
}

if !targ.State.LastSuccessfulDeployTime.IsZero() {
// A resource has been green at some point
return true
}
}
Expand Down
16 changes: 0 additions & 16 deletions internal/hud/webview/analytics_nudge_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,26 +7,10 @@ import (
"github.com/stretchr/testify/assert"
"github.com/tilt-dev/wmclient/pkg/analytics"

"github.com/tilt-dev/tilt/internal/k8s"
"github.com/tilt-dev/tilt/internal/k8s/testyaml"
"github.com/tilt-dev/tilt/internal/store"
"github.com/tilt-dev/tilt/pkg/model"
)

func TestNeedsNudgeK8sYaml(t *testing.T) {
state := newState(nil)

m, err := k8s.NewK8sOnlyManifestFromYAML(testyaml.SanchoYAML)
assert.NoError(t, err)
targ := store.NewManifestTarget(m)
targ.State = &store.ManifestState{LastSuccessfulDeployTime: time.Now()}
state.UpsertManifestTarget(targ)

nudge := NeedsNudge(*state)
assert.False(t, nudge,
"manifest is k8s_yaml, expected needsNudge = false")
}

func TestNeedsNudgeRedManifest(t *testing.T) {
state := newState(nil)

Expand Down
2 changes: 1 addition & 1 deletion internal/hud/webview/convert.go
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ func tiltfileResourceProtoView(s store.EngineState) (*proto_webview.Resource, er
func protoPopulateResourceInfoView(mt *store.ManifestTarget, r *proto_webview.Resource) error {
r.RuntimeStatus = string(model.RuntimeStatusNotApplicable)

if mt.Manifest.NonWorkloadManifest() {
if mt.Manifest.PodReadinessMode() == model.PodReadinessIgnore {
r.YamlResourceInfo = &proto_webview.YAMLResourceInfo{
K8SResources: mt.Manifest.K8sTarget().DisplayNames,
}
Expand Down
8 changes: 4 additions & 4 deletions internal/k8s/target.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ func MustTarget(name model.TargetName, yaml string) model.K8sTarget {
if err != nil {
panic(fmt.Errorf("MustTarget: %v", err))
}
target, err := NewTarget(name, entities, nil, nil, nil, nil, false, nil)
target, err := NewTarget(name, entities, nil, nil, nil, nil, model.PodReadinessIgnore, nil)
if err != nil {
panic(fmt.Errorf("MustTarget: %v", err))
}
Expand All @@ -31,7 +31,7 @@ func NewTarget(
extraPodSelectors []labels.Selector,
dependencyIDs []model.TargetID,
refInjectCounts map[string]int,
nonWorkload bool,
podReadinessMode model.PodReadinessMode,
allLocators []ImageLocator) (model.K8sTarget, error) {
sorted := SortedEntities(entities)
yaml, err := SerializeSpecYAML(sorted)
Expand Down Expand Up @@ -65,13 +65,13 @@ func NewTarget(
ExtraPodSelectors: extraPodSelectors,
DisplayNames: displayNames,
ObjectRefs: objectRefs,
NonWorkload: nonWorkload,
PodReadinessMode: podReadinessMode,
ImageLocators: myLocators,
}.WithDependencyIDs(dependencyIDs).WithRefInjectCounts(refInjectCounts), nil
}

func NewK8sOnlyManifest(name model.ManifestName, entities []K8sEntity, allLocators []ImageLocator) (model.Manifest, error) {
kTarget, err := NewTarget(name.TargetName(), entities, nil, nil, nil, nil, true, allLocators)
kTarget, err := NewTarget(name.TargetName(), entities, nil, nil, nil, nil, model.PodReadinessIgnore, allLocators)
if err != nil {
return model.Manifest{}, err
}
Expand Down
3 changes: 2 additions & 1 deletion internal/k8s/target_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,12 @@ import (
"github.com/stretchr/testify/require"

"github.com/tilt-dev/tilt/internal/k8s/testyaml"
"github.com/tilt-dev/tilt/pkg/model"
)

func TestNewTargetSortsK8sEntities(t *testing.T) {
entities := MustParseYAMLFromString(t, testyaml.OutOfOrderYaml)
targ, err := NewTarget("foo", entities, nil, nil, nil, nil, false, nil)
targ, err := NewTarget("foo", entities, nil, nil, nil, nil, model.PodReadinessWait, nil)
require.NoError(t, err)

expectedKindOrder := []string{"PersistentVolume", "PersistentVolumeClaim", "ConfigMap", "Service", "StatefulSet", "Job", "Pod"}
Expand Down
2 changes: 1 addition & 1 deletion internal/store/engine_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -756,7 +756,7 @@ func resourceInfoView(mt *ManifestTarget) view.ResourceInfoView {
runStatus = mt.State.RuntimeState.RuntimeStatus()
}

if mt.Manifest.NonWorkloadManifest() {
if mt.Manifest.PodReadinessMode() == model.PodReadinessIgnore {
return view.YAMLResourceInfo{
K8sDisplayNames: mt.Manifest.K8sTarget().DisplayNames,
}
Expand Down
8 changes: 4 additions & 4 deletions internal/store/runtime_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ type K8sRuntimeState struct {
LastReadyOrSucceededTime time.Time
HasEverDeployedSuccessfully bool

NonWorkload bool
PodReadinessMode model.PodReadinessMode
}

func (K8sRuntimeState) RuntimeState() {}
Expand All @@ -94,7 +94,7 @@ func NewK8sRuntimeStateWithPods(m model.Manifest, pods ...Pod) K8sRuntimeState {

func NewK8sRuntimeState(m model.Manifest) K8sRuntimeState {
return K8sRuntimeState{
NonWorkload: m.K8sTarget().NonWorkload,
PodReadinessMode: m.PodReadinessMode(),
Pods: make(map[k8s.PodID]*Pod),
LBs: make(map[k8s.ServiceName]*url.URL),
DeployedUIDSet: NewUIDSet(),
Expand All @@ -116,7 +116,7 @@ func (s K8sRuntimeState) RuntimeStatus() model.RuntimeStatus {
return model.RuntimeStatusPending
}

if s.NonWorkload {
if s.PodReadinessMode == model.PodReadinessIgnore {
return model.RuntimeStatusOK
}

Expand Down Expand Up @@ -149,7 +149,7 @@ func (s K8sRuntimeState) HasEverBeenReadyOrSucceeded() bool {
if !s.HasEverDeployedSuccessfully {
return false
}
if s.NonWorkload {
if s.PodReadinessMode == model.PodReadinessIgnore {
return true
}
return !s.LastReadyOrSucceededTime.IsZero()
Expand Down
24 changes: 15 additions & 9 deletions internal/testutils/manifestbuilder/manifestbuilder.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ type ManifestBuilder struct {
f Fixture
name model.ManifestName

k8sNonWorkload bool
k8sPodReadiness model.PodReadinessMode
k8sYAML string
k8sPodSelectors []labels.Selector
k8sImageLocators []k8s.ImageLocator
Expand All @@ -38,10 +38,18 @@ type ManifestBuilder struct {
}

func New(f Fixture, name model.ManifestName) ManifestBuilder {
k8sPodReadiness := model.PodReadinessWait

// TODO(nick): A better solution would be to identify whether this
// is a workload based on the YAML.
if name == model.UnresourcedYAMLManifestName {
k8sPodReadiness = model.PodReadinessIgnore
}

return ManifestBuilder{
f: f,
name: name,
k8sNonWorkload: name == model.UnresourcedYAMLManifestName,
f: f,
name: name,
k8sPodReadiness: k8sPodReadiness,
}
}

Expand All @@ -52,10 +60,8 @@ func (b ManifestBuilder) WithNamedJSONPathImageLocator(name, path string) Manife
return b
}

// TODO(nick): A better solution would be to identify whether this
// is a workload based on the YAML.
func (b ManifestBuilder) WithK8sNonWorkload() ManifestBuilder {
b.k8sNonWorkload = true
func (b ManifestBuilder) WithK8sPodReadiness(pr model.PodReadinessMode) ManifestBuilder {
b.k8sPodReadiness = pr
return b
}

Expand Down Expand Up @@ -142,7 +148,7 @@ func (b ManifestBuilder) Build() model.Manifest {
for _, locator := range b.k8sImageLocators {
k8sTarget.ImageLocators = append(k8sTarget.ImageLocators, locator)
}
k8sTarget.NonWorkload = b.k8sNonWorkload
k8sTarget.PodReadinessMode = b.k8sPodReadiness

m = assembleK8s(
model.Manifest{Name: b.name, ResourceDependencies: rds},
Expand Down
8 changes: 8 additions & 0 deletions internal/tiltfile/k8s.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ type k8sResource struct {
// labels for pods that we should watch and associate with this resource
extraPodSelectors []labels.Selector

podReadinessMode model.PodReadinessMode

dependencyIDs []model.TargetID

triggerMode triggerMode
Expand Down Expand Up @@ -75,6 +77,7 @@ type k8sResourceOptions struct {
resourceDeps []string
objects []string
manuallyGrouped bool
podReadinessMode model.PodReadinessMode
}

func (r *k8sResource) addRefSelector(selector container.RefSelector) {
Expand Down Expand Up @@ -401,6 +404,7 @@ func (s *tiltfileState) k8sResourceV2(thread *starlark.Thread, fn *starlark.Buil
var triggerMode triggerMode
var resourceDepsVal starlark.Sequence
var objectsVal starlark.Sequence
var podReadinessMode tiltfile_k8s.PodReadinessMode
autoInit := true

if err := s.unpackArgs(fn.Name(), args, kwargs,
Expand All @@ -412,6 +416,7 @@ func (s *tiltfileState) k8sResourceV2(thread *starlark.Thread, fn *starlark.Buil
"resource_deps?", &resourceDepsVal,
"objects?", &objectsVal,
"auto_init?", &autoInit,
"pod_readiness?", &podReadinessMode,
); err != nil {
return nil, err
}
Expand Down Expand Up @@ -456,6 +461,8 @@ func (s *tiltfileState) k8sResourceV2(thread *starlark.Thread, fn *starlark.Buil
return nil, fmt.Errorf("k8s_resource has only non-workload objects but doesn't provide a new_name")
}

// NOTE(nick): right now this overwrites all previously set options on this
// resource. Is it worthwhile to make this additive?
s.k8sResourceOptions[resourceName] = k8sResourceOptions{
newName: newName,
portForwards: portForwards,
Expand All @@ -466,6 +473,7 @@ func (s *tiltfileState) k8sResourceV2(thread *starlark.Thread, fn *starlark.Buil
resourceDeps: resourceDeps,
objects: objects,
manuallyGrouped: manuallyGrouped,
podReadinessMode: podReadinessMode.Value,
}

return starlark.None, nil
Expand Down
39 changes: 39 additions & 0 deletions internal/tiltfile/k8s/readiness.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
package k8s

import (
"fmt"

"go.starlark.net/starlark"

"github.com/tilt-dev/tilt/internal/tiltfile/value"
"github.com/tilt-dev/tilt/pkg/model"
)

// Deserializing pod readiness from starlark values.
type PodReadinessMode struct {
Value model.PodReadinessMode
}

func (m *PodReadinessMode) Unpack(v starlark.Value) error {
s, ok := value.AsString(v)
if !ok {
return fmt.Errorf("Must be a string. Got: %s", v.Type())
}

if s == string(model.PodReadinessIgnore) {
m.Value = model.PodReadinessIgnore
return nil
}

if s == string(model.PodReadinessWait) {
m.Value = model.PodReadinessWait
return nil
}

if s == "" {
m.Value = model.PodReadinessNone
return nil
}

return fmt.Errorf("Invalid value. Allowed: {%s, %s}. Got: %s", model.PodReadinessIgnore, model.PodReadinessWait, s)
}
32 changes: 28 additions & 4 deletions internal/tiltfile/tiltfile_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -693,6 +693,7 @@ func (s *tiltfileState) assembleK8sV2() error {
}
if r, ok := s.k8sByName[workload]; ok {
r.extraPodSelectors = opts.extraPodSelectors
r.podReadinessMode = opts.podReadinessMode
r.portForwards = opts.portForwards
r.triggerMode = opts.triggerMode
r.autoInit = opts.autoInit
Expand Down Expand Up @@ -1106,13 +1107,36 @@ func (s *tiltfileState) translateK8s(resources []*k8sResource) ([]model.Manifest
ResourceDependencies: mds,
}

nonWorkload := false
if r.manuallyGrouped {
nonWorkload = len(r.extraPodSelectors) == 0 && len(r.dependencyIDs) == 0
podReadinessMode := r.podReadinessMode
if podReadinessMode == model.PodReadinessNone {
// Auto-infer the readiness mode
//
// CONVO:
// jazzdan: This still feels overloaded to me
// nicks: i think whenever we define a new CRD, we need to know:

// how to find the images in it
// how to find any pods it deploys (if they can't be found by owner references)
// if it should not expect pods at all (e.g., PostgresVersion)
// if it should wait for the pods to be ready before building the next resource (e.g., servers)
// if it should wait for the pods to be complete before building the next resource (e.g., jobs)
// and it's complicated a bit by the fact that there are both normal CRDs where the image shows up in the same place each time, and more meta CRDs (like HelmRelease) where it might appear in different places
//
// feels like wer're still doing this very ad-hoc rather than holistically
podReadinessMode = model.PodReadinessWait

// If the resource was
// 1) manually grouped (i.e., we didn't find any images in it)
// 2) doesn't have pod selectors, and
// 3) doesn't depend on images
// assume that it will never create pods.
if r.manuallyGrouped && len(r.extraPodSelectors) == 0 && len(r.dependencyIDs) == 0 {
podReadinessMode = model.PodReadinessIgnore
}
}

k8sTarget, err := k8s.NewTarget(mn.TargetName(), r.entities, s.defaultedPortForwards(r.portForwards),
r.extraPodSelectors, r.dependencyIDs, r.imageRefMap, nonWorkload, locators)
r.extraPodSelectors, r.dependencyIDs, r.imageRefMap, podReadinessMode, locators)
if err != nil {
return nil, err
}
Expand Down

0 comments on commit f64f63f

Please sign in to comment.