Skip to content

Commit

Permalink
tiltfile: add support for labels in *_resource functions (#4767)
Browse files Browse the repository at this point in the history
* initial commit for k8s resource labels

* add labels argument to docker_compose and local_resource functions

* create UIResource objects as tiltfile objects, fix local+dc labels

* (refactor) Simplify the typings for labels within the Tiltfile

* (test) Add tests for label Tiltfile functionality

* (fix) Add label name validation so that labels cannot be empty strings

* (fix) Fix broken tests by setting a default empty map value for labels in Manifest items

Co-authored-by: Matt Landis <matt@windmill.engineering>
  • Loading branch information
lizzthabet and Matt Landis committed Jul 20, 2021
1 parent 477aad4 commit 85f21fe
Show file tree
Hide file tree
Showing 14 changed files with 324 additions and 9 deletions.
34 changes: 31 additions & 3 deletions internal/engine/configs/api.go
Expand Up @@ -94,6 +94,7 @@ func getExistingAPIObjects(ctx context.Context, client ctrlclient.Client) (objec
&v1alpha1.ImageMap{},
&v1alpha1.FileWatch{},
&v1alpha1.Cmd{},
&v1alpha1.UIResource{},
}

// TODO(nick): Parallelize this?
Expand Down Expand Up @@ -129,6 +130,7 @@ func toAPIObjects(tlr tiltfile.TiltfileLoadResult, mode store.EngineMode) object
WatchSettings: tlr.WatchSettings,
EngineMode: mode,
})
result[(&v1alpha1.UIResource{}).GetGroupVersionResource()] = toUIResourceObjects(tlr)
return result
}

Expand Down Expand Up @@ -225,6 +227,32 @@ func toCmdObjects(tlr tiltfile.TiltfileLoadResult) typedObjectSet {
return result
}

// Pulls out all the UIResource objects generated by the Tiltfile.
func toUIResourceObjects(tlr tiltfile.TiltfileLoadResult) typedObjectSet {
result := typedObjectSet{}

for _, m := range tlr.Manifests {
name := string(m.Name)

m = m.WithLabels(m.Labels)
labels := m.Labels
// TODO(lizz): consider moving this label assignment to the Tiltfile itself
labels[LabelOwnerKind] = LabelOwnerKindTiltfile

r := &v1alpha1.UIResource{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Labels: labels,
Annotations: map[string]string{
v1alpha1.AnnotationManifest: m.Name.String(),
},
},
}
result[name] = r
}
return result
}

// Reconcile the new API objects against the existing API objects.
func updateNewObjects(ctx context.Context, client ctrlclient.Client, newObjects, oldObjects objectSet) error {
// TODO(nick): Does it make sense to parallelize the API calls?
Expand All @@ -248,9 +276,9 @@ func updateNewObjects(ctx context.Context, client ctrlclient.Client, newObjects,
}

// Are there other fields here we should check?
// e.g., once labels are generated from the Tiltfile, it seems
// we should also update the labels when they change.
if !apicmp.DeepEqual(old.GetSpec(), obj.GetSpec()) {
specChanged := !apicmp.DeepEqual(old.GetSpec(), obj.GetSpec())
labelsChanged := !apicmp.DeepEqual(old.GetLabels(), obj.GetLabels())
if specChanged || labelsChanged {
obj.SetResourceVersion(old.GetResourceVersion())
err := client.Update(ctx, obj)
if err != nil {
Expand Down
5 changes: 4 additions & 1 deletion internal/hud/webview/convert.go
Expand Up @@ -228,10 +228,13 @@ func toUIResource(mt *store.ManifestTarget, s store.EngineState) (*v1alpha1.UIRe
// at once).
hasPendingChanges, pendingBuildSince := ms.HasPendingChanges()

labels := mt.Manifest.Labels

name := mt.Manifest.Name
r := &v1alpha1.UIResource{
ObjectMeta: metav1.ObjectMeta{
Name: name.String(),
Name: name.String(),
Labels: labels,
},
Status: v1alpha1.UIResourceStatus{
LastDeployTime: lastDeploy,
Expand Down
8 changes: 8 additions & 0 deletions internal/tiltfile/docker_compose.go
Expand Up @@ -93,6 +93,7 @@ func (s *tiltfileState) dcResource(thread *starlark.Thread, fn *starlark.Builtin
var triggerMode triggerMode
var resourceDepsVal starlark.Sequence
var links links.LinkList
var labels value.LabelSet

if err := s.unpackArgs(fn.Name(), args, kwargs,
"name", &name,
Expand All @@ -109,6 +110,7 @@ func (s *tiltfileState) dcResource(thread *starlark.Thread, fn *starlark.Builtin
"trigger_mode?", &triggerMode,
"resource_deps?", &resourceDepsVal,
"links?", &links,
"labels?", &labels,
); err != nil {
return nil, err
}
Expand Down Expand Up @@ -137,6 +139,8 @@ func (s *tiltfileState) dcResource(thread *starlark.Thread, fn *starlark.Builtin
}
svc.Links = append(svc.Links, links.Links...)

svc.Labels = labels.Values

if imageRefAsStr != nil {
normalized, err := container.ParseNamed(*imageRefAsStr)
if err != nil {
Expand Down Expand Up @@ -190,6 +194,8 @@ type dcService struct {
TriggerMode triggerMode
Links []model.Link

Labels map[string]string

resourceDeps []string
}

Expand Down Expand Up @@ -307,6 +313,8 @@ func (s *tiltfileState) dcServiceToManifest(service *dcService, dcSet dcResource
ResourceDependencies: mds,
}.WithDeployTarget(dcInfo)

m = m.WithLabels(service.Labels)

if service.DfPath == "" {
// DC service may not have Dockerfile -- e.g. may be just an image that we pull and run.
return m, nil
Expand Down
13 changes: 13 additions & 0 deletions internal/tiltfile/k8s.go
Expand Up @@ -61,6 +61,8 @@ type k8sResource struct {
manuallyGrouped bool

links []model.Link

labels map[string]string
}

// holds options passed to `k8s_resource` until assembly happens
Expand All @@ -76,6 +78,7 @@ type k8sResourceOptions struct {
manuallyGrouped bool
podReadinessMode model.PodReadinessMode
links []model.Link
labels map[string]string
}

func (r *k8sResource) addEntities(entities []k8s.K8sEntity,
Expand Down Expand Up @@ -263,6 +266,7 @@ func (s *tiltfileState) k8sResource(thread *starlark.Thread, fn *starlark.Builti
var podReadinessMode tiltfile_k8s.PodReadinessMode
var links links.LinkList
var autoInit = value.BoolOrNone{Value: true}
var labels value.LabelSet

if err := s.unpackArgs(fn.Name(), args, kwargs,
"workload?", &workload,
Expand All @@ -275,6 +279,7 @@ func (s *tiltfileState) k8sResource(thread *starlark.Thread, fn *starlark.Builti
"auto_init?", &autoInit,
"pod_readiness?", &podReadinessMode,
"links?", &links,
"labels?", &labels,
); err != nil {
return nil, err
}
Expand Down Expand Up @@ -341,6 +346,14 @@ func (s *tiltfileState) k8sResource(thread *starlark.Thread, fn *starlark.Builti
}
o.links = append(o.links, links.Links...)

if o.labels == nil {
o.labels = make(map[string]string)
}

for k, v := range labels.Values {
o.labels[k] = v
}

s.k8sResourceOptions[resourceName] = o

return starlark.None, nil
Expand Down
4 changes: 4 additions & 0 deletions internal/tiltfile/local_resource.go
Expand Up @@ -30,6 +30,7 @@ type localResource struct {
ignores []string
allowParallel bool
links []model.Link
labels map[string]string

// for use in testing mvp
tags []string
Expand All @@ -52,6 +53,7 @@ func (s *tiltfileState) localResource(thread *starlark.Thread, fn *starlark.Buil
var ignoresVal starlark.Value
var allowParallel bool
var links links.LinkList
var labels value.LabelSet
autoInit := true

var isTest bool
Expand Down Expand Up @@ -80,6 +82,7 @@ func (s *tiltfileState) localResource(thread *starlark.Thread, fn *starlark.Buil
"serve_cmd_bat?", &serveCmdBatVal,
"allow_parallel?", &allowParallel,
"links?", &links,
"labels?", &labels,
"tags?", &tagsVal,
"env?", &updateEnv,
"serve_env?", &serveEnv,
Expand Down Expand Up @@ -132,6 +135,7 @@ func (s *tiltfileState) localResource(thread *starlark.Thread, fn *starlark.Buil
ignores: ignores,
allowParallel: allowParallel,
links: links.Links,
labels: labels.Values,
tags: tags,
isTest: isTest,
readinessProbe: readinessProbe.Spec(),
Expand Down
15 changes: 15 additions & 0 deletions internal/tiltfile/tiltfile_docker_compose_test.go
Expand Up @@ -664,6 +664,21 @@ default_registry('bar.com')
f.loadErrString("default_registry is not supported with docker compose")
}

func TestDockerComposeLabels(t *testing.T) {
f := newFixture(t)
defer f.TearDown()

f.dockerfile(filepath.Join("foo", "Dockerfile"))
f.file("docker-compose.yml", simpleConfig)
f.file("Tiltfile", `
docker_compose('docker-compose.yml')
dc_resource("foo", labels="test")
`)

f.load("foo")
f.assertNextManifest("foo", resourceLabels("test"))
}

func TestTriggerModeDC(t *testing.T) {
for _, testCase := range []struct {
name string
Expand Down
5 changes: 5 additions & 0 deletions internal/tiltfile/tiltfile_state.go
Expand Up @@ -704,6 +704,7 @@ func (s *tiltfileState) assembleK8s() error {
r.autoInit = opts.autoInit
r.resourceDeps = opts.resourceDeps
r.links = opts.links
r.labels = opts.labels
if opts.newName != "" && opts.newName != r.name {
if _, ok := s.k8sByName[opts.newName]; ok {
return fmt.Errorf("k8s_resource() specified to rename %q to %q, but there already exists a resource with that name", r.name, opts.newName)
Expand Down Expand Up @@ -1035,6 +1036,8 @@ func (s *tiltfileState) translateK8s(resources []*k8sResource, updateSettings mo
ResourceDependencies: mds,
}

m = m.WithLabels(r.labels)

iTargets, err := s.imgTargetsForDependencyIDs(r.dependencyIDs, registry)
if err != nil {
return nil, errors.Wrapf(err, "getting image build info for %s", r.name)
Expand Down Expand Up @@ -1425,6 +1428,8 @@ func (s *tiltfileState) translateLocal() ([]model.Manifest, error) {
ResourceDependencies: mds,
}.WithDeployTarget(lt)

m = m.WithLabels(r.labels)

result = append(result, m)
}

Expand Down
64 changes: 64 additions & 0 deletions internal/tiltfile/tiltfile_test.go
Expand Up @@ -5636,6 +5636,54 @@ k8s_resource(
f.assertNoMoreManifests()
}

func TestK8sResourceLabels(t *testing.T) {
f := newFixture(t)
defer f.TearDown()

f.setupFoo()

f.file("Tiltfile", `
k8s_yaml('foo.yaml')
k8s_resource('foo', labels="test")
`)

f.load()
f.assertNumManifests(1)
f.assertNextManifest("foo", resourceLabels("test"))
}

func TestK8sResourceLabelsAppend(t *testing.T) {
f := newFixture(t)
defer f.TearDown()

f.setupFoo()

f.file("Tiltfile", `
k8s_yaml('foo.yaml')
k8s_resource('foo', labels="test")
k8s_resource('foo', labels="test2")
`)

f.load()
f.assertNumManifests(1)
f.assertNextManifest("foo", resourceLabels("test", "test2"))
}

func TestLocalResourceLabels(t *testing.T) {
f := newFixture(t)
defer f.TearDown()

f.file("Tiltfile", `
local_resource("test", cmd="echo hi", labels="foo")
local_resource("test2", cmd="echo hi2", labels=["bar", "baz"])
`)

f.load()
f.assertNumManifests(2)
f.assertNextManifest("test", resourceLabels("foo"))
f.assertNextManifest("test2", resourceLabels("bar", "baz"))
}

type fixture struct {
ctx context.Context
out *bytes.Buffer
Expand Down Expand Up @@ -6147,6 +6195,8 @@ func (f *fixture) assertNextManifest(name model.ManifestName, opts ...interface{
f.t.Fatalf("unknown matcher for local target %T", matcher)
}
}
case resourceLabelsHelper:
assert.Equal(f.t, opt.labels, m.Labels)
default:
f.t.Fatalf("unexpected arg to assertNextManifest: %T %v", opt, opt)
}
Expand Down Expand Up @@ -6379,6 +6429,20 @@ func resourceDeps(deps ...string) resourceDependenciesHelper {
return resourceDependenciesHelper{deps: mns}
}

type resourceLabelsHelper struct {
labels map[string]string
}

func resourceLabels(labels ...string) resourceLabelsHelper {
ret := resourceLabelsHelper{
labels: make(map[string]string),
}
for _, l := range labels {
ret.labels[l] = l
}
return ret
}

type imageHelper struct {
ref string
localRef string
Expand Down

0 comments on commit 85f21fe

Please sign in to comment.