forked from hashicorp/nomad
-
Notifications
You must be signed in to change notification settings - Fork 0
/
generic_sched.go
300 lines (251 loc) · 8.98 KB
/
generic_sched.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
package scheduler
import (
"fmt"
"log"
"github.com/hashicorp/nomad/nomad/structs"
)
const (
// maxServiceScheduleAttempts is used to limit the number of times
// we will attempt to schedule if we continue to hit conflicts for services.
maxServiceScheduleAttempts = 5
// maxBatchScheduleAttempts is used to limit the number of times
// we will attempt to schedule if we continue to hit conflicts for batch.
maxBatchScheduleAttempts = 2
// allocNotNeeded is the status used when a job no longer requires an allocation
allocNotNeeded = "alloc not needed due to job update"
// allocMigrating is the status used when we must migrate an allocation
allocMigrating = "alloc is being migrated"
// allocUpdating is the status used when a job requires an update
allocUpdating = "alloc is being updated due to job update"
// allocInPlace is the status used when speculating on an in-place update
allocInPlace = "alloc updating in-place"
)
// SetStatusError is used to set the status of the evaluation to the given error
type SetStatusError struct {
Err error
EvalStatus string
}
func (s *SetStatusError) Error() string {
return s.Err.Error()
}
// GenericScheduler is used for 'service' and 'batch' type jobs. This scheduler is
// designed for long-lived services, and as such spends more time attemping
// to make a high quality placement. This is the primary scheduler for
// most workloads. It also supports a 'batch' mode to optimize for fast decision
// making at the cost of quality.
type GenericScheduler struct {
logger *log.Logger
state State
planner Planner
batch bool
eval *structs.Evaluation
job *structs.Job
plan *structs.Plan
ctx *EvalContext
stack *GenericStack
limitReached bool
nextEval *structs.Evaluation
}
// NewServiceScheduler is a factory function to instantiate a new service scheduler
func NewServiceScheduler(logger *log.Logger, state State, planner Planner) Scheduler {
s := &GenericScheduler{
logger: logger,
state: state,
planner: planner,
batch: false,
}
return s
}
// NewBatchScheduler is a factory function to instantiate a new batch scheduler
func NewBatchScheduler(logger *log.Logger, state State, planner Planner) Scheduler {
s := &GenericScheduler{
logger: logger,
state: state,
planner: planner,
batch: true,
}
return s
}
// Process is used to handle a single evaluation
func (s *GenericScheduler) Process(eval *structs.Evaluation) error {
// Store the evaluation
s.eval = eval
// Verify the evaluation trigger reason is understood
switch eval.TriggeredBy {
case structs.EvalTriggerJobRegister, structs.EvalTriggerNodeUpdate,
structs.EvalTriggerJobDeregister, structs.EvalTriggerRollingUpdate:
default:
desc := fmt.Sprintf("scheduler cannot handle '%s' evaluation reason",
eval.TriggeredBy)
return setStatus(s.logger, s.planner, s.eval, s.nextEval, structs.EvalStatusFailed, desc)
}
// Retry up to the maxScheduleAttempts
limit := maxServiceScheduleAttempts
if s.batch {
limit = maxBatchScheduleAttempts
}
if err := retryMax(limit, s.process); err != nil {
if statusErr, ok := err.(*SetStatusError); ok {
return setStatus(s.logger, s.planner, s.eval, s.nextEval, statusErr.EvalStatus, err.Error())
}
return err
}
// Update the status to complete
return setStatus(s.logger, s.planner, s.eval, s.nextEval, structs.EvalStatusComplete, "")
}
// process is wrapped in retryMax to iteratively run the handler until we have no
// further work or we've made the maximum number of attempts.
func (s *GenericScheduler) process() (bool, error) {
// Lookup the Job by ID
var err error
s.job, err = s.state.JobByID(s.eval.JobID)
if err != nil {
return false, fmt.Errorf("failed to get job '%s': %v",
s.eval.JobID, err)
}
// Create a plan
s.plan = s.eval.MakePlan(s.job)
// Create an evaluation context
s.ctx = NewEvalContext(s.state, s.plan, s.logger)
// Construct the placement stack
s.stack = NewGenericStack(s.batch, s.ctx)
if s.job != nil {
s.stack.SetJob(s.job)
}
// Compute the target job allocations
if err := s.computeJobAllocs(); err != nil {
s.logger.Printf("[ERR] sched: %#v: %v", s.eval, err)
return false, err
}
// If the plan is a no-op, we can bail
if s.plan.IsNoOp() {
return true, nil
}
// If the limit of placements was reached we need to create an evaluation
// to pickup from here after the stagger period.
if s.limitReached && s.nextEval == nil {
s.nextEval = s.eval.NextRollingEval(s.job.Update.Stagger)
if err := s.planner.CreateEval(s.nextEval); err != nil {
s.logger.Printf("[ERR] sched: %#v failed to make next eval for rolling update: %v", s.eval, err)
return false, err
}
s.logger.Printf("[DEBUG] sched: %#v: rolling update limit reached, next eval '%s' created", s.eval, s.nextEval.ID)
}
// Submit the plan
result, newState, err := s.planner.SubmitPlan(s.plan)
if err != nil {
return false, err
}
// If we got a state refresh, try again since we have stale data
if newState != nil {
s.logger.Printf("[DEBUG] sched: %#v: refresh forced", s.eval)
s.state = newState
return false, nil
}
// Try again if the plan was not fully committed, potential conflict
fullCommit, expected, actual := result.FullCommit(s.plan)
if !fullCommit {
s.logger.Printf("[DEBUG] sched: %#v: attempted %d placements, %d placed",
s.eval, expected, actual)
return false, nil
}
// Success!
return true, nil
}
// computeJobAllocs is used to reconcile differences between the job,
// existing allocations and node status to update the allocations.
func (s *GenericScheduler) computeJobAllocs() error {
// Materialize all the task groups, job could be missing if deregistered
var groups map[string]*structs.TaskGroup
if s.job != nil {
groups = materializeTaskGroups(s.job)
}
// Lookup the allocations by JobID
allocs, err := s.state.AllocsByJob(s.eval.JobID)
if err != nil {
return fmt.Errorf("failed to get allocs for job '%s': %v",
s.eval.JobID, err)
}
// Filter out the allocations in a terminal state
allocs = structs.FilterTerminalAllocs(allocs)
// Determine the tainted nodes containing job allocs
tainted, err := taintedNodes(s.state, allocs)
if err != nil {
return fmt.Errorf("failed to get tainted nodes for job '%s': %v",
s.eval.JobID, err)
}
// Diff the required and existing allocations
diff := diffAllocs(s.job, tainted, groups, allocs)
s.logger.Printf("[DEBUG] sched: %#v: %#v", s.eval, diff)
// Add all the allocs to stop
for _, e := range diff.stop {
s.plan.AppendUpdate(e.Alloc, structs.AllocDesiredStatusStop, allocNotNeeded)
}
// Attempt to do the upgrades in place
diff.update = inplaceUpdate(s.ctx, s.eval, s.job, s.stack, diff.update)
// Check if a rolling upgrade strategy is being used
limit := len(diff.update) + len(diff.migrate)
if s.job != nil && s.job.Update.Rolling() {
limit = s.job.Update.MaxParallel
}
// Treat migrations as an eviction and a new placement.
s.limitReached = evictAndPlace(s.ctx, diff, diff.migrate, allocMigrating, &limit)
// Treat non in-place updates as an eviction and new placement.
s.limitReached = evictAndPlace(s.ctx, diff, diff.update, allocUpdating, &limit)
// Nothing remaining to do if placement is not required
if len(diff.place) == 0 {
return nil
}
// Compute the placements
return s.computePlacements(diff.place)
}
// computePlacements computes placements for allocations
func (s *GenericScheduler) computePlacements(place []allocTuple) error {
// Get the base nodes
nodes, err := readyNodesInDCs(s.state, s.job.Datacenters)
if err != nil {
return err
}
// Update the set of placement ndoes
s.stack.SetNodes(nodes)
// Track the failed task groups so that we can coalesce
// the failures together to avoid creating many failed allocs.
failedTG := make(map[*structs.TaskGroup]*structs.Allocation)
for _, missing := range place {
// Check if this task group has already failed
if alloc, ok := failedTG[missing.TaskGroup]; ok {
alloc.Metrics.CoalescedFailures += 1
continue
}
// Attempt to match the task group
option, size := s.stack.Select(missing.TaskGroup)
// Create an allocation for this
alloc := &structs.Allocation{
ID: structs.GenerateUUID(),
EvalID: s.eval.ID,
Name: missing.Name,
JobID: s.job.ID,
Job: s.job,
TaskGroup: missing.TaskGroup.Name,
Resources: size,
Metrics: s.ctx.Metrics(),
}
// Set fields based on if we found an allocation option
if option != nil {
alloc.NodeID = option.Node.ID
alloc.TaskResources = option.TaskResources
alloc.DesiredStatus = structs.AllocDesiredStatusRun
alloc.ClientStatus = structs.AllocClientStatusPending
alloc.TaskStates = initTaskState(missing.TaskGroup, structs.TaskStatePending)
s.plan.AppendAlloc(alloc)
} else {
alloc.DesiredStatus = structs.AllocDesiredStatusFailed
alloc.DesiredDescription = "failed to find a node for placement"
alloc.ClientStatus = structs.AllocClientStatusFailed
alloc.TaskStates = initTaskState(missing.TaskGroup, structs.TaskStateDead)
s.plan.AppendFailed(alloc)
failedTG[missing.TaskGroup] = alloc
}
}
return nil
}