Skip to content

Commit

Permalink
k8s: apply resources individually. Fixes #3719 (#3739)
Browse files Browse the repository at this point in the history
  • Loading branch information
nicks committed Sep 1, 2020
1 parent b4a738a commit 780867f
Show file tree
Hide file tree
Showing 6 changed files with 130 additions and 37 deletions.
6 changes: 6 additions & 0 deletions integration/configmap/Tiltfile
@@ -0,0 +1,6 @@
include('../Tiltfile')

watch_file('small.txt')
watch_file('large.txt')
k8s_yaml(local('kubectl create configmap -n tilt-integration large-configmap --from-file=key.txt=./large.txt --dry-run=client -o=yaml', quiet=True))
k8s_yaml(local('kubectl create configmap -n tilt-integration small-configmap --from-file=key.txt=./small.txt --dry-run=client -o=yaml', quiet=True))
28 changes: 28 additions & 0 deletions integration/configmap/large.txt

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions integration/configmap/small.txt
@@ -0,0 +1 @@
hello world
45 changes: 45 additions & 0 deletions integration/configmap_test.go
@@ -0,0 +1,45 @@
//+build integration

package integration

import (
"context"
"testing"
"time"

"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

func TestConfigMap(t *testing.T) {
f := newK8sFixture(t, "configmap")
defer f.TearDown()

f.TiltWatch()

ctx, cancel := context.WithTimeout(f.ctx, time.Minute)
defer cancel()

f.WaitUntil(ctx, "Waiting for small configmap to show up", func() (string, error) {
out, _ := f.runCommand("kubectl", "get", "configmap", "small-configmap", namespaceFlag, "-o=go-template", "--template='{{.data}}'")
return out.String(), nil
}, "hello world")

firstCreationTime, err := f.runCommand("kubectl", "get", "configmap", "small-configmap", namespaceFlag, "-o=go-template", "--template='{{.metadata.creationTimestamp}}'")
require.NoError(t, err)
require.NotEqual(t, "", firstCreationTime.String())

f.ReplaceContents("small.txt", "hello world", "goodbye world")

f.WaitUntil(ctx, "Waiting for small configmap to get replaced", func() (string, error) {
out, _ := f.runCommand("kubectl", "get", "configmap", "small-configmap", namespaceFlag, "-o=go-template", "--template='{{.data}}'")
return out.String(), nil
}, "goodbye world")

secondCreationTime, err := f.runCommand("kubectl", "get", "configmap", "small-configmap", namespaceFlag, "-o=go-template", "--template='{{.metadata.creationTimestamp}}'")
require.NoError(t, err)
require.NotEqual(t, "", secondCreationTime.String())

// Make sure we applied the configmap instead of recreating it
assert.Equal(t, firstCreationTime, secondCreationTime)
}
49 changes: 26 additions & 23 deletions internal/k8s/client.go
Expand Up @@ -246,26 +246,29 @@ func timeoutError(timeout time.Duration) error {
}

func (k K8sClient) Upsert(ctx context.Context, entities []K8sEntity, timeout time.Duration) ([]K8sEntity, error) {
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()

result := make([]K8sEntity, 0, len(entities))

mutable, immutable := MutableAndImmutableEntities(entities)

if len(mutable) > 0 {
newEntities, err := k.applyEntitiesAndMaybeForce(ctx, mutable)
for _, e := range mutable {
innerCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()

newEntity, err := k.applyEntityAndMaybeForce(innerCtx, e)
if err != nil {
if ctx.Err() == context.DeadlineExceeded {
return nil, timeoutError(timeout)
}
return nil, err
}
result = append(result, newEntities...)
result = append(result, newEntity...)
}

if len(immutable) > 0 {
newEntities, err := k.forceReplaceEntities(ctx, immutable)
for _, e := range immutable {
innerCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()

newEntities, err := k.forceReplaceEntity(innerCtx, e)
if err != nil {
if ctx.Err() == context.DeadlineExceeded {
return nil, timeoutError(timeout)
Expand All @@ -278,19 +281,19 @@ func (k K8sClient) Upsert(ctx context.Context, entities []K8sEntity, timeout tim
return result, nil
}

func (k K8sClient) forceReplaceEntities(ctx context.Context, entities []K8sEntity) ([]K8sEntity, error) {
stdout, stderr, err := k.actOnEntities(ctx, []string{"replace", "-o", "yaml", "--force"}, entities)
func (k K8sClient) forceReplaceEntity(ctx context.Context, entity K8sEntity) ([]K8sEntity, error) {
stdout, stderr, err := k.actOnEntity(ctx, []string{"replace", "-o", "yaml", "--force"}, entity)
if err != nil {
return nil, errors.Wrapf(err, "kubectl replace:\nstderr: %s", stderr)
}

return parseYAMLFromStringWithDeletedResources(stdout)
}

// applyEntitiesAndMaybeForce `kubectl apply`'s the given entities, and if the call fails with
// an immutible field error, attempts to `replace --force` them.
func (k K8sClient) applyEntitiesAndMaybeForce(ctx context.Context, entities []K8sEntity) ([]K8sEntity, error) {
stdout, stderr, err := k.actOnEntities(ctx, []string{"apply", "-o", "yaml"}, entities)
// applyEntityAndMaybeForce `kubectl apply`'s the given entity, and if the call fails with
// an immutible field error, attempts to `replace --force` it.
func (k K8sClient) applyEntityAndMaybeForce(ctx context.Context, entity K8sEntity) ([]K8sEntity, error) {
stdout, stderr, err := k.actOnEntity(ctx, []string{"apply", "-o", "yaml"}, entity)
if err != nil {
reason, shouldTryReplace := maybeShouldTryReplaceReason(stderr)

Expand All @@ -301,13 +304,13 @@ func (k K8sClient) applyEntitiesAndMaybeForce(ctx context.Context, entities []K8
// NOTE(maia): we don't use `kubecutl replace --force`, because we want to ensure that all
// dependant pods get deleted rather than orphaned. We WANT these pods to be deleted
// and recreated so they have all the new labels, etc. of their controlling k8s entity.
logger.Get(ctx).Infof("Falling back to 'kubectl delete && create': %s", reason)
logger.Get(ctx).Infof("Applying %s failed. Retrying with 'kubectl delete && create': %s", entity.Name(), reason)
// --ignore-not-found because, e.g., if we fell back due to large metadata.annotations, the object might not exist
_, stderr, err = k.actOnEntities(ctx, []string{"delete", "--ignore-not-found=true"}, entities)
_, stderr, err = k.actOnEntity(ctx, []string{"delete", "--ignore-not-found=true"}, entity)
if err != nil {
return nil, errors.Wrapf(err, "kubectl delete (as part of delete && create):\nstderr: %s", stderr)
}
stdout, stderr, err = k.actOnEntities(ctx, []string{"create", "-o", "yaml"}, entities)
stdout, stderr, err = k.actOnEntity(ctx, []string{"create", "-o", "yaml"}, entity)
if err != nil {
return nil, errors.Wrapf(err, "kubectl create (as part of delete && create):\nstderr: %s", stderr)
}
Expand Down Expand Up @@ -371,20 +374,20 @@ func (k K8sClient) Delete(ctx context.Context, entities []K8sEntity) error {
l.Infof("Deleting via kubectl:")
for _, e := range entities {
l.Infof("→ %s/%s", e.GVK().Kind, e.Name())
}

_, stderr, err := k.actOnEntities(ctx, []string{"delete", "--ignore-not-found"}, entities)
if err != nil {
return errors.Wrapf(err, "kubectl delete:\nstderr: %s", stderr)
_, stderr, err := k.actOnEntity(ctx, []string{"delete", "--ignore-not-found"}, e)
if err != nil {
return errors.Wrapf(err, "kubectl delete:\nstderr: %s", stderr)
}
}
return nil
}

func (k K8sClient) actOnEntities(ctx context.Context, cmdArgs []string, entities []K8sEntity) (stdout string, stderr string, err error) {
func (k K8sClient) actOnEntity(ctx context.Context, cmdArgs []string, entity K8sEntity) (stdout string, stderr string, err error) {
args := append([]string{}, cmdArgs...)
args = append(args, "-f", "-")

rawYAML, err := SerializeSpecYAML(entities)
rawYAML, err := SerializeSpecYAML([]K8sEntity{entity})
if err != nil {
return "", "", errors.Wrapf(err, "serializeYaml for kubectl %s", cmdArgs)
}
Expand Down
38 changes: 24 additions & 14 deletions internal/k8s/client_test.go
Expand Up @@ -44,7 +44,7 @@ func TestUpsert(t *testing.T) {
assert.Nil(t, err)
_, err = f.k8sUpsert(f.ctx, postgres)
assert.Nil(t, err)
assert.Equal(t, 1, len(f.runner.calls))
assert.Equal(t, 5, len(f.runner.calls))
assert.Equal(t, []string{"apply", "-o", "yaml", "-f", "-"}, f.runner.calls[0].argv)
}

Expand All @@ -61,24 +61,26 @@ func TestUpsertMutableAndImmutable(t *testing.T) {

// two different calls: one for mutable entities (namespace, deployment),
// one for immutable (job)
require.Len(t, f.runner.calls, 2)
require.Len(t, f.runner.calls, 3)

call0 := f.runner.calls[0]
call1 := f.runner.calls[1]
require.Equal(t, []string{"apply", "-o", "yaml", "-f", "-"}, call0.argv, "expected args for call 0")
require.Equal(t, []string{"apply", "-o", "yaml", "-f", "-"}, call1.argv, "expected args for call 1")

// compare entities instead of strings because str > entity > string gets weird
call0Entities := mustParseYAML(t, call0.stdin)
require.Len(t, call0Entities, 2, "expect two mutable entities applied")
call0Entity := mustParseYAML(t, call0.stdin)[0]
call1Entity := mustParseYAML(t, call1.stdin)[0]

// `apply` should preserve input order of entities (we sort them further upstream)
require.Equal(t, eDeploy, call0Entities[0], "expect call 0 to have applied deployment first (preserve input order)")
require.Equal(t, eNamespace, call0Entities[1], "expect call 0 to have applied namespace second (preserve input order)")

call1 := f.runner.calls[1]
require.Equal(t, []string{"replace", "-o", "yaml", "--force", "-f", "-"}, call1.argv, "expected args for call 1")
call1Entities := mustParseYAML(t, call1.stdin)
require.Len(t, call1Entities, 1, "expect only one immutable entity applied")
require.Equal(t, eJob, call1Entities[0], "expect call 1 to have applied job")
require.Equal(t, eDeploy, call0Entity, "expect call 0 to have applied deployment first (preserve input order)")
require.Equal(t, eNamespace, call1Entity, "expect call 0 to have applied namespace second (preserve input order)")

call2 := f.runner.calls[2]
require.Equal(t, []string{"replace", "-o", "yaml", "--force", "-f", "-"}, call2.argv, "expected args for call 1")
call2Entities := mustParseYAML(t, call2.stdin)
require.Len(t, call2Entities, 1, "expect only one immutable entity applied")
require.Equal(t, eJob, call2Entities[0], "expect call 1 to have applied job")
}

func TestUpsertAnnotationTooLong(t *testing.T) {
Expand All @@ -95,13 +97,17 @@ func TestUpsertAnnotationTooLong(t *testing.T) {
{"apply", "-o", "yaml", "-f", "-"},
{"delete", "--ignore-not-found=true", "-f", "-"},
{"create", "-o", "yaml", "-f", "-"},
{"apply", "-o", "yaml", "-f", "-"},
{"apply", "-o", "yaml", "-f", "-"},
{"apply", "-o", "yaml", "-f", "-"},
{"apply", "-o", "yaml", "-f", "-"},
}
require.Len(t, f.runner.calls, len(expectedArgs))

for i, call := range f.runner.calls {
require.Equalf(t, expectedArgs[i], call.argv, "expected args for call %d", i)
observedEntities := mustParseYAML(t, call.stdin)
require.Lenf(t, observedEntities, len(postgres), "expect %d entities", len(postgres))
require.Len(t, observedEntities, 1, "expect 1 entity")
}
}

Expand All @@ -112,10 +118,14 @@ func TestUpsertStatefulsetForbidden(t *testing.T) {

f.setStderr(`The StatefulSet "postgres" is invalid: spec: Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', and 'updateStrategy' are forbidden.`)
_, err = f.k8sUpsert(f.ctx, postgres)
if assert.Nil(t, err) && assert.Equal(t, 3, len(f.runner.calls)) {
if assert.Nil(t, err) && assert.Equal(t, 7, len(f.runner.calls)) {
assert.Equal(t, []string{"apply", "-o", "yaml", "-f", "-"}, f.runner.calls[0].argv)
assert.Equal(t, []string{"delete", "--ignore-not-found=true", "-f", "-"}, f.runner.calls[1].argv)
assert.Equal(t, []string{"create", "-o", "yaml", "-f", "-"}, f.runner.calls[2].argv)
assert.Equal(t, []string{"apply", "-o", "yaml", "-f", "-"}, f.runner.calls[3].argv)
assert.Equal(t, []string{"apply", "-o", "yaml", "-f", "-"}, f.runner.calls[4].argv)
assert.Equal(t, []string{"apply", "-o", "yaml", "-f", "-"}, f.runner.calls[5].argv)
assert.Equal(t, []string{"apply", "-o", "yaml", "-f", "-"}, f.runner.calls[6].argv)
}
}

Expand Down

0 comments on commit 780867f

Please sign in to comment.