-
Notifications
You must be signed in to change notification settings - Fork 0
/
init.go
174 lines (159 loc) · 5.18 KB
/
init.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
package taskinit
import (
"context"
"sort"
"time"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/api/defaults"
"github.com/docker/swarmkit/log"
"github.com/docker/swarmkit/manager/orchestrator"
"github.com/docker/swarmkit/manager/orchestrator/restart"
"github.com/docker/swarmkit/manager/state/store"
gogotypes "github.com/gogo/protobuf/types"
)
// InitHandler defines orchestrator's action to fix tasks at start.
type InitHandler interface {
IsRelatedService(service *api.Service) bool
FixTask(ctx context.Context, batch *store.Batch, t *api.Task)
SlotTuple(t *api.Task) orchestrator.SlotTuple
}
// CheckTasks fixes tasks in the store before orchestrator runs. The previous leader might
// not have finished processing their updates and left them in an inconsistent state.
func CheckTasks(ctx context.Context, s *store.MemoryStore, readTx store.ReadTx, initHandler InitHandler, startSupervisor *restart.Supervisor) error {
instances := make(map[orchestrator.SlotTuple][]*api.Task)
err := s.Batch(func(batch *store.Batch) error {
tasks, err := store.FindTasks(readTx, store.All)
if err != nil {
return err
}
for _, t := range tasks {
if t.ServiceID == "" {
continue
}
// TODO(aluzzardi): We should NOT retrieve the service here.
service := store.GetService(readTx, t.ServiceID)
if service == nil {
// Service was deleted
err := batch.Update(func(tx store.Tx) error {
return store.DeleteTask(tx, t.ID)
})
if err != nil {
log.G(ctx).WithError(err).Error("failed to delete task")
}
continue
}
if !initHandler.IsRelatedService(service) {
continue
}
tuple := initHandler.SlotTuple(t)
instances[tuple] = append(instances[tuple], t)
// handle task updates from agent which should have been triggered by task update events
initHandler.FixTask(ctx, batch, t)
// desired state ready is a transient state that it should be started.
// however previous leader may not have started it, retry start here
if t.DesiredState != api.TaskStateReady || t.Status.State > api.TaskStateRunning {
continue
}
restartDelay, _ := gogotypes.DurationFromProto(defaults.Service.Task.Restart.Delay)
if t.Spec.Restart != nil && t.Spec.Restart.Delay != nil {
var err error
restartDelay, err = gogotypes.DurationFromProto(t.Spec.Restart.Delay)
if err != nil {
log.G(ctx).WithError(err).Error("invalid restart delay")
restartDelay, _ = gogotypes.DurationFromProto(defaults.Service.Task.Restart.Delay)
}
}
if restartDelay != 0 {
var timestamp time.Time
if t.Status.AppliedAt != nil {
timestamp, err = gogotypes.TimestampFromProto(t.Status.AppliedAt)
} else {
timestamp, err = gogotypes.TimestampFromProto(t.Status.Timestamp)
}
if err == nil {
restartTime := timestamp.Add(restartDelay)
calculatedRestartDelay := time.Until(restartTime)
if calculatedRestartDelay < restartDelay {
restartDelay = calculatedRestartDelay
}
if restartDelay > 0 {
_ = batch.Update(func(tx store.Tx) error {
t := store.GetTask(tx, t.ID)
// TODO(aluzzardi): This is shady as well. We should have a more generic condition.
if t == nil || t.DesiredState != api.TaskStateReady {
return nil
}
startSupervisor.DelayStart(ctx, tx, nil, t.ID, restartDelay, true)
return nil
})
continue
}
} else {
log.G(ctx).WithError(err).Error("invalid status timestamp")
}
}
// Start now
err := batch.Update(func(tx store.Tx) error {
return startSupervisor.StartNow(tx, t.ID)
})
if err != nil {
log.G(ctx).WithError(err).WithField("task.id", t.ID).Error("moving task out of delayed state failed")
}
}
return nil
})
if err != nil {
return err
}
for tuple, instance := range instances {
// Find the most current spec version. That's the only one
// we care about for the purpose of reconstructing restart
// history.
maxVersion := uint64(0)
for _, t := range instance {
if t.SpecVersion != nil && t.SpecVersion.Index > maxVersion {
maxVersion = t.SpecVersion.Index
}
}
// Create a new slice with just the current spec version tasks.
var upToDate []*api.Task
for _, t := range instance {
if t.SpecVersion != nil && t.SpecVersion.Index == maxVersion {
upToDate = append(upToDate, t)
}
}
// Sort by creation timestamp
sort.Sort(tasksByCreationTimestamp(upToDate))
// All up-to-date tasks in this instance except the first one
// should be considered restarted.
if len(upToDate) < 2 {
continue
}
for _, t := range upToDate[1:] {
startSupervisor.RecordRestartHistory(tuple, t)
}
}
return nil
}
type tasksByCreationTimestamp []*api.Task
func (t tasksByCreationTimestamp) Len() int {
return len(t)
}
func (t tasksByCreationTimestamp) Swap(i, j int) {
t[i], t[j] = t[j], t[i]
}
func (t tasksByCreationTimestamp) Less(i, j int) bool {
if t[i].Meta.CreatedAt == nil {
return true
}
if t[j].Meta.CreatedAt == nil {
return false
}
if t[i].Meta.CreatedAt.Seconds < t[j].Meta.CreatedAt.Seconds {
return true
}
if t[i].Meta.CreatedAt.Seconds > t[j].Meta.CreatedAt.Seconds {
return false
}
return t[i].Meta.CreatedAt.Nanos < t[j].Meta.CreatedAt.Nanos
}