Skip to content

Commit

Permalink
Merge pull request #2351 from jlhawn/temp_971
Browse files Browse the repository at this point in the history
IsTaskDirty: Ignore PullOptions for running tasks
  • Loading branch information
aluzzardi committed Aug 17, 2017
2 parents 3882d21 + 2c47a1e commit 163a8c2
Show file tree
Hide file tree
Showing 2 changed files with 76 additions and 1 deletion.
24 changes: 23 additions & 1 deletion manager/orchestrator/task.go
Expand Up @@ -67,7 +67,29 @@ func IsTaskDirty(s *api.Service, t *api.Task) bool {
return false
}

return !reflect.DeepEqual(s.Spec.Task, t.Spec) ||
// Make a deep copy of the service and task spec for the comparison.
serviceTaskSpec := *s.Spec.Task.Copy()

// For non-failed tasks with a container spec runtime that have already
// pulled the required image (i.e., current state is between READY and
// RUNNING inclusively), ignore the value of the `PullOptions` field by
// setting the copied service to have the same PullOptions value as the
// task. A difference in only the `PullOptions` field should not cause
// a running (or ready to run) task to be considered 'dirty' when we
// handle updates.
// See https://github.com/docker/swarmkit/issues/971
currentState := t.Status.State
// Ignore PullOpts if the task is desired to be in a "runnable" state
// and its last known current state is between READY and RUNNING in
// which case we know that the task either successfully pulled its
// container image or didn't need to.
ignorePullOpts := t.DesiredState <= api.TaskStateRunning && currentState >= api.TaskStateReady && currentState <= api.TaskStateRunning
if ignorePullOpts && serviceTaskSpec.GetContainer() != nil && t.Spec.GetContainer() != nil {
// Modify the service's container spec.
serviceTaskSpec.GetContainer().PullOptions = t.Spec.GetContainer().PullOptions
}

return !reflect.DeepEqual(serviceTaskSpec, t.Spec) ||
(t.Endpoint != nil && !reflect.DeepEqual(s.Spec.Endpoint, t.Endpoint.Spec))
}

Expand Down
53 changes: 53 additions & 0 deletions manager/orchestrator/update/updater_test.go
Expand Up @@ -118,6 +118,7 @@ func TestUpdater(t *testing.T) {
},
}

// Create the cluster, service, and tasks for the service.
err := s.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateCluster(tx, cluster))
assert.NoError(t, store.CreateService(tx, service))
Expand All @@ -136,6 +137,7 @@ func TestUpdater(t *testing.T) {
}
}

// Change the image and log driver to force an update.
service.Spec.Task.GetContainer().Image = "v:2"
service.Spec.Task.LogDriver = &api.Driver{Name: "tasklogdriver"}
updater := NewUpdater(s, restart.NewSupervisor(s), cluster, service)
Expand All @@ -148,6 +150,7 @@ func TestUpdater(t *testing.T) {
}
}

// Update the spec again to force an update.
service.Spec.Task.GetContainer().Image = "v:3"
cluster.Spec.TaskDefaults.LogDriver = &api.Driver{Name: "clusterlogdriver"} // make cluster default logdriver.
service.Spec.Update = &api.UpdateConfig{
Expand Down Expand Up @@ -197,6 +200,56 @@ func TestUpdater(t *testing.T) {
assert.Equal(t, "v:5", task.Spec.GetContainer().Image)
}
}

// Update pull options with new registry auth.
service.Spec.Task.GetContainer().PullOptions = &api.ContainerSpec_PullOptions{
RegistryAuth: "opaque-token-1",
}
originalTasks = getRunnableSlotSlice(t, s, service)
updater = NewUpdater(s, restart.NewSupervisor(s), cluster, service)
updater.Run(ctx, originalTasks)
updatedTasks = getRunnableSlotSlice(t, s, service)
assert.Len(t, updatedTasks, instances)

// Confirm that the original runnable tasks are all still there.
runnableTaskIDs := make(map[string]struct{}, len(updatedTasks))
for _, slot := range updatedTasks {
for _, task := range slot {
runnableTaskIDs[task.ID] = struct{}{}
}
}
assert.Len(t, runnableTaskIDs, instances)
for _, slot := range originalTasks {
for _, task := range slot {
assert.Contains(t, runnableTaskIDs, task.ID)
}
}

// Update the desired state of the tasks to SHUTDOWN to simulate the
// case where images failed to pull due to bad registry auth.
taskSlots := make([]orchestrator.Slot, len(updatedTasks))
assert.NoError(t, s.Update(func(tx store.Tx) error {
for i, slot := range updatedTasks {
taskSlots[i] = make(orchestrator.Slot, len(slot))
for j, task := range slot {
task = store.GetTask(tx, task.ID)
task.DesiredState = api.TaskStateShutdown
task.Status.State = task.DesiredState
assert.NoError(t, store.UpdateTask(tx, task))
taskSlots[i][j] = task
}
}
return nil
}))

// Update pull options again with a different registry auth.
service.Spec.Task.GetContainer().PullOptions = &api.ContainerSpec_PullOptions{
RegistryAuth: "opaque-token-2",
}
updater = NewUpdater(s, restart.NewSupervisor(s), cluster, service)
updater.Run(ctx, taskSlots) // Note that these tasks are all shutdown.
updatedTasks = getRunnableSlotSlice(t, s, service)
assert.Len(t, updatedTasks, instances)
}

func TestUpdaterFailureAction(t *testing.T) {
Expand Down

0 comments on commit 163a8c2

Please sign in to comment.