From 939e4b4d51917990b1a0ef310f56bfac6f8d5d16 Mon Sep 17 00:00:00 2001 From: Josh Hawn Date: Mon, 14 Aug 2017 15:49:05 -0700 Subject: [PATCH] IsTaskDirty: Ignore PullOptions for running tasks This patch causes the orchestrator to ignore the PullOptions field of a ContainerSpec when determining whether a task is considered to be 'dirty'. It is only ignored if and only if the current state of the task is either READY, STARTING, or RUNNING. Docker-DCO-1.1-Signed-off-by: Josh Hawn (github: jlhawn) --- manager/orchestrator/task.go | 26 +++++++++- manager/orchestrator/update/updater_test.go | 53 +++++++++++++++++++++ 2 files changed, 78 insertions(+), 1 deletion(-) diff --git a/manager/orchestrator/task.go b/manager/orchestrator/task.go index 8ae3a4cc26..5134d00956 100644 --- a/manager/orchestrator/task.go +++ b/manager/orchestrator/task.go @@ -67,7 +67,31 @@ func IsTaskDirty(s *api.Service, t *api.Task) bool { return false } - return !reflect.DeepEqual(s.Spec.Task, t.Spec) || + // Make shallow copy of the service for the comparison. + service := *s + + // For non-failed tasks with a container spec runtime that have already + // pulled the required image (i.e., current state is between READY and + // RUNNING inclusively), ignore the value of the `PullOptions` field by + // setting the copied service to have the same PullOptions value as the + // task. A difference in only the `PullOptions` field should not cause + // a running (or ready to run) task to be considered 'dirty' when we + // handle updates. + // See https://github.com/docker/swarmkit/issues/971 + currentState := t.Status.State + ignorePullOpts := api.TaskStateReady <= currentState && currentState <= api.TaskStateRunning + + if ignorePullOpts && service.Spec.Task.GetContainer() != nil && t.Spec.GetContainer() != nil { + // Modify a copy of the service's container spec. + serviceContainer := *service.Spec.Task.GetContainer() + serviceContainer.PullOptions = t.Spec.GetContainer().PullOptions + + service.Spec.Task.Runtime = &api.TaskSpec_Container{ + Container: &serviceContainer, + } + } + + return !reflect.DeepEqual(service.Spec.Task, t.Spec) || (t.Endpoint != nil && !reflect.DeepEqual(s.Spec.Endpoint, t.Endpoint.Spec)) } diff --git a/manager/orchestrator/update/updater_test.go b/manager/orchestrator/update/updater_test.go index e5e476243d..52492ee9cd 100644 --- a/manager/orchestrator/update/updater_test.go +++ b/manager/orchestrator/update/updater_test.go @@ -118,6 +118,7 @@ func TestUpdater(t *testing.T) { }, } + // Create the cluster, service, and tasks for the service. err := s.Update(func(tx store.Tx) error { assert.NoError(t, store.CreateCluster(tx, cluster)) assert.NoError(t, store.CreateService(tx, service)) @@ -136,6 +137,7 @@ func TestUpdater(t *testing.T) { } } + // Change the image and log driver to force an update. service.Spec.Task.GetContainer().Image = "v:2" service.Spec.Task.LogDriver = &api.Driver{Name: "tasklogdriver"} updater := NewUpdater(s, restart.NewSupervisor(s), cluster, service) @@ -148,6 +150,7 @@ func TestUpdater(t *testing.T) { } } + // Update the spec again to force an update. service.Spec.Task.GetContainer().Image = "v:3" cluster.Spec.TaskDefaults.LogDriver = &api.Driver{Name: "clusterlogdriver"} // make cluster default logdriver. service.Spec.Update = &api.UpdateConfig{ @@ -197,6 +200,56 @@ func TestUpdater(t *testing.T) { assert.Equal(t, "v:5", task.Spec.GetContainer().Image) } } + + // Update pull options with new registry auth. + service.Spec.Task.GetContainer().PullOptions = &api.ContainerSpec_PullOptions{ + RegistryAuth: "opaque-token-1", + } + originalTasks = getRunnableSlotSlice(t, s, service) + updater = NewUpdater(s, restart.NewSupervisor(s), cluster, service) + updater.Run(ctx, originalTasks) + updatedTasks = getRunnableSlotSlice(t, s, service) + assert.Len(t, updatedTasks, instances) + + // Confirm that the original runnable tasks are all still there. + runnableTaskIDs := make(map[string]struct{}, len(updatedTasks)) + for _, slot := range updatedTasks { + for _, task := range slot { + runnableTaskIDs[task.ID] = struct{}{} + } + } + assert.Len(t, runnableTaskIDs, instances) + for _, slot := range originalTasks { + for _, task := range slot { + assert.Contains(t, runnableTaskIDs, task.ID) + } + } + + // Update the desired state of the tasks to SHUTDOWN to simulate the + // case where images failed to pull due to bad registry auth. + taskSlots := make([]orchestrator.Slot, len(updatedTasks)) + assert.NoError(t, s.Update(func(tx store.Tx) error { + for i, slot := range updatedTasks { + taskSlots[i] = make(orchestrator.Slot, len(slot)) + for j, task := range slot { + task = store.GetTask(tx, task.ID) + task.DesiredState = api.TaskStateShutdown + task.Status.State = task.DesiredState + assert.NoError(t, store.UpdateTask(tx, task)) + taskSlots[i][j] = task + } + } + return nil + })) + + // Update pull options again with a different registry auth. + service.Spec.Task.GetContainer().PullOptions = &api.ContainerSpec_PullOptions{ + RegistryAuth: "opaque-token-2", + } + updater = NewUpdater(s, restart.NewSupervisor(s), cluster, service) + updater.Run(ctx, taskSlots) // Note that these tasks are all shutdown. + updatedTasks = getRunnableSlotSlice(t, s, service) + assert.Len(t, updatedTasks, instances) } func TestUpdaterFailureAction(t *testing.T) {