Skip to content

Commit

Permalink
IsTaskDirty: Ignore PullOptions for running tasks
Browse files Browse the repository at this point in the history
This patch causes the orchestrator to ignore the PullOptions field of a
ContainerSpec when determining whether a task is considered to be 'dirty'.
It is only ignored if and only if the current state of the task is either
READY, STARTING, or RUNNING.

Docker-DCO-1.1-Signed-off-by: Josh Hawn <josh.hawn@docker.com> (github: jlhawn)
  • Loading branch information
Josh Hawn committed Aug 16, 2017
1 parent b5ca4a2 commit 939e4b4
Show file tree
Hide file tree
Showing 2 changed files with 78 additions and 1 deletion.
26 changes: 25 additions & 1 deletion manager/orchestrator/task.go
Expand Up @@ -67,7 +67,31 @@ func IsTaskDirty(s *api.Service, t *api.Task) bool {
return false
}

return !reflect.DeepEqual(s.Spec.Task, t.Spec) ||
// Make shallow copy of the service for the comparison.
service := *s

// For non-failed tasks with a container spec runtime that have already
// pulled the required image (i.e., current state is between READY and
// RUNNING inclusively), ignore the value of the `PullOptions` field by
// setting the copied service to have the same PullOptions value as the
// task. A difference in only the `PullOptions` field should not cause
// a running (or ready to run) task to be considered 'dirty' when we
// handle updates.
// See https://github.com/docker/swarmkit/issues/971
currentState := t.Status.State
ignorePullOpts := api.TaskStateReady <= currentState && currentState <= api.TaskStateRunning

if ignorePullOpts && service.Spec.Task.GetContainer() != nil && t.Spec.GetContainer() != nil {
// Modify a copy of the service's container spec.
serviceContainer := *service.Spec.Task.GetContainer()
serviceContainer.PullOptions = t.Spec.GetContainer().PullOptions

service.Spec.Task.Runtime = &api.TaskSpec_Container{
Container: &serviceContainer,
}
}

return !reflect.DeepEqual(service.Spec.Task, t.Spec) ||
(t.Endpoint != nil && !reflect.DeepEqual(s.Spec.Endpoint, t.Endpoint.Spec))
}

Expand Down
53 changes: 53 additions & 0 deletions manager/orchestrator/update/updater_test.go
Expand Up @@ -118,6 +118,7 @@ func TestUpdater(t *testing.T) {
},
}

// Create the cluster, service, and tasks for the service.
err := s.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateCluster(tx, cluster))
assert.NoError(t, store.CreateService(tx, service))
Expand All @@ -136,6 +137,7 @@ func TestUpdater(t *testing.T) {
}
}

// Change the image and log driver to force an update.
service.Spec.Task.GetContainer().Image = "v:2"
service.Spec.Task.LogDriver = &api.Driver{Name: "tasklogdriver"}
updater := NewUpdater(s, restart.NewSupervisor(s), cluster, service)
Expand All @@ -148,6 +150,7 @@ func TestUpdater(t *testing.T) {
}
}

// Update the spec again to force an update.
service.Spec.Task.GetContainer().Image = "v:3"
cluster.Spec.TaskDefaults.LogDriver = &api.Driver{Name: "clusterlogdriver"} // make cluster default logdriver.
service.Spec.Update = &api.UpdateConfig{
Expand Down Expand Up @@ -197,6 +200,56 @@ func TestUpdater(t *testing.T) {
assert.Equal(t, "v:5", task.Spec.GetContainer().Image)
}
}

// Update pull options with new registry auth.
service.Spec.Task.GetContainer().PullOptions = &api.ContainerSpec_PullOptions{
RegistryAuth: "opaque-token-1",
}
originalTasks = getRunnableSlotSlice(t, s, service)
updater = NewUpdater(s, restart.NewSupervisor(s), cluster, service)
updater.Run(ctx, originalTasks)
updatedTasks = getRunnableSlotSlice(t, s, service)
assert.Len(t, updatedTasks, instances)

// Confirm that the original runnable tasks are all still there.
runnableTaskIDs := make(map[string]struct{}, len(updatedTasks))
for _, slot := range updatedTasks {
for _, task := range slot {
runnableTaskIDs[task.ID] = struct{}{}
}
}
assert.Len(t, runnableTaskIDs, instances)
for _, slot := range originalTasks {
for _, task := range slot {
assert.Contains(t, runnableTaskIDs, task.ID)
}
}

// Update the desired state of the tasks to SHUTDOWN to simulate the
// case where images failed to pull due to bad registry auth.
taskSlots := make([]orchestrator.Slot, len(updatedTasks))
assert.NoError(t, s.Update(func(tx store.Tx) error {
for i, slot := range updatedTasks {
taskSlots[i] = make(orchestrator.Slot, len(slot))
for j, task := range slot {
task = store.GetTask(tx, task.ID)
task.DesiredState = api.TaskStateShutdown
task.Status.State = task.DesiredState
assert.NoError(t, store.UpdateTask(tx, task))
taskSlots[i][j] = task
}
}
return nil
}))

// Update pull options again with a different registry auth.
service.Spec.Task.GetContainer().PullOptions = &api.ContainerSpec_PullOptions{
RegistryAuth: "opaque-token-2",
}
updater = NewUpdater(s, restart.NewSupervisor(s), cluster, service)
updater.Run(ctx, taskSlots) // Note that these tasks are all shutdown.
updatedTasks = getRunnableSlotSlice(t, s, service)
assert.Len(t, updatedTasks, instances)
}

func TestUpdaterFailureAction(t *testing.T) {
Expand Down

0 comments on commit 939e4b4

Please sign in to comment.