/
resolver.go
416 lines (369 loc) · 13.7 KB
/
resolver.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
// Copyright 2015 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package uniter
import (
"fmt"
jujucharm "github.com/juju/charm/v12"
"github.com/juju/charm/v12/hooks"
"github.com/juju/errors"
"github.com/juju/juju/core/life"
"github.com/juju/juju/core/model"
"github.com/juju/juju/rpc/params"
"github.com/juju/juju/worker"
"github.com/juju/juju/worker/uniter/hook"
"github.com/juju/juju/worker/uniter/operation"
"github.com/juju/juju/worker/uniter/remotestate"
"github.com/juju/juju/worker/uniter/resolver"
"github.com/juju/juju/wrench"
)
// ResolverConfig defines configuration for the uniter resolver.
type ResolverConfig struct {
ModelType model.ModelType
ClearResolved func() error
ReportHookError func(hook.Info) error
ShouldRetryHooks bool
StartRetryHookTimer func()
StopRetryHookTimer func()
VerifyCharmProfile resolver.Resolver
UpgradeSeries resolver.Resolver
Reboot resolver.Resolver
Leadership resolver.Resolver
Actions resolver.Resolver
CreatedRelations resolver.Resolver
Relations resolver.Resolver
Storage resolver.Resolver
Commands resolver.Resolver
Secrets resolver.Resolver
OptionalResolvers []resolver.Resolver
Logger Logger
}
type uniterResolver struct {
config ResolverConfig
retryHookTimerStarted bool
}
// NewUniterResolver returns a new resolver.Resolver for the uniter.
func NewUniterResolver(cfg ResolverConfig) resolver.Resolver {
return &uniterResolver{
config: cfg,
retryHookTimerStarted: false,
}
}
func (s *uniterResolver) NextOp(
localState resolver.LocalState,
remoteState remotestate.Snapshot,
opFactory operation.Factory,
) (_ operation.Operation, err error) {
badge := "<unspecified>"
defer func() {
if err != nil && errors.Cause(err) != resolver.ErrNoOperation && err != resolver.ErrRestart {
s.config.Logger.Debugf("next %q operation could not be resolved: %v", badge, err)
}
}()
if remoteState.Life == life.Dead || localState.Removed {
return nil, resolver.ErrUnitDead
}
logger := s.config.Logger
// Operations for series-upgrade need to be resolved early,
// in particular because no other operations should be run when the unit
// has completed preparation and is waiting for upgrade completion.
badge = "upgrade series"
op, err := s.config.UpgradeSeries.NextOp(localState, remoteState, opFactory)
if errors.Cause(err) != resolver.ErrNoOperation {
if errors.Cause(err) == resolver.ErrDoNotProceed {
return nil, resolver.ErrNoOperation
}
return op, err
}
// Check if we need to notify the charms because a reboot was detected.
badge = "reboot"
op, err = s.config.Reboot.NextOp(localState, remoteState, opFactory)
if errors.Cause(err) != resolver.ErrNoOperation {
return op, err
}
if localState.Kind == operation.Upgrade {
badge = "upgrade"
if localState.Conflicted {
return s.nextOpConflicted(localState, remoteState, opFactory)
}
// continue upgrading the charm
logger.Infof("resuming charm upgrade")
return s.newUpgradeOperation(localState, remoteState, opFactory)
}
if localState.Restart {
// We've just run the upgrade op, which will change the
// unit's charm URL. We need to restart the resolver
// loop so that we start watching the correct events.
return nil, resolver.ErrRestart
}
if s.retryHookTimerStarted && (localState.Kind != operation.RunHook || localState.Step != operation.Pending) {
// The hook-retry timer is running, but there is no pending
// hook operation. We're not in an error state, so stop the
// timer now to reset the backoff state.
s.config.StopRetryHookTimer()
s.retryHookTimerStarted = false
}
badge = "relations"
op, err = s.config.CreatedRelations.NextOp(localState, remoteState, opFactory)
if errors.Cause(err) != resolver.ErrNoOperation {
return op, err
}
badge = "leadership"
op, err = s.config.Leadership.NextOp(localState, remoteState, opFactory)
if errors.Cause(err) != resolver.ErrNoOperation {
return op, err
}
badge = "optional"
for _, r := range s.config.OptionalResolvers {
op, err = r.NextOp(localState, remoteState, opFactory)
if errors.Cause(err) != resolver.ErrNoOperation {
return op, err
}
}
badge = "secrets"
op, err = s.config.Secrets.NextOp(localState, remoteState, opFactory)
if errors.Cause(err) != resolver.ErrNoOperation {
return op, err
}
badge = "actions"
op, err = s.config.Actions.NextOp(localState, remoteState, opFactory)
if errors.Cause(err) != resolver.ErrNoOperation {
return op, err
}
badge = "commands"
op, err = s.config.Commands.NextOp(localState, remoteState, opFactory)
if errors.Cause(err) != resolver.ErrNoOperation {
return op, err
}
badge = "storage"
op, err = s.config.Storage.NextOp(localState, remoteState, opFactory)
if errors.Cause(err) != resolver.ErrNoOperation {
return op, err
}
// If we are to shut down, we don't want to start running any more queued/pending hooks.
if remoteState.Shutdown {
badge = "shutdown"
logger.Debugf("unit agent is shutting down, will not run pending/queued hooks")
return s.nextOp(localState, remoteState, opFactory)
}
switch localState.Kind {
case operation.RunHook:
step := localState.Step
if localState.HookStep != nil {
step = *localState.HookStep
}
switch step {
case operation.Pending:
badge = "resolve hook"
logger.Infof("awaiting error resolution for %q hook", localState.Hook.Kind)
return s.nextOpHookError(localState, remoteState, opFactory)
case operation.Queued:
badge = "queued hook"
logger.Infof("found queued %q hook", localState.Hook.Kind)
if localState.Hook.Kind == hooks.Install {
// Special case: handle install in nextOp,
// so we do nothing when the unit is dying.
return s.nextOp(localState, remoteState, opFactory)
}
return opFactory.NewRunHook(*localState.Hook)
case operation.Done:
// Only check for the wrench if trace logging is enabled. Otherwise,
// we'd have to parse the charm url every time just to check to see
// if a wrench existed.
badge = "commit hook"
if localState.CharmURL != "" && logger.IsTraceEnabled() {
// If it's set, the charm url will parse.
curl := jujucharm.MustParseURL(localState.CharmURL)
if curl != nil && wrench.IsActive("hooks", fmt.Sprintf("%s-%s-error", curl.Name, localState.Hook.Kind)) {
s.config.Logger.Errorf("commit hook %q failed due to a wrench in the works", localState.Hook.Kind)
return nil, errors.Errorf("commit hook %q failed due to a wrench in the works", localState.Hook.Kind)
}
}
logger.Infof("committing %q hook", localState.Hook.Kind)
return opFactory.NewSkipHook(*localState.Hook)
default:
return nil, errors.Errorf("unknown hook operation step %v", step)
}
case operation.Continue:
badge = "idle"
logger.Debugf("no operations in progress; waiting for changes")
return s.nextOp(localState, remoteState, opFactory)
default:
return nil, errors.Errorf("unknown operation kind %v", localState.Kind)
}
}
// nextOpConflicted is called after an upgrade operation has failed, and hasn't
// yet been resolved or reverted. When in this mode, the resolver will only
// consider those two possibilities for progressing.
func (s *uniterResolver) nextOpConflicted(
localState resolver.LocalState,
remoteState remotestate.Snapshot,
opFactory operation.Factory,
) (operation.Operation, error) {
// Only IAAS models deal with conflicted upgrades.
// TODO(caas) - what to do here.
// Verify the charm profile before proceeding. No hooks to run, if the
// correct one is not yet applied.
_, err := s.config.VerifyCharmProfile.NextOp(localState, remoteState, opFactory)
if e := errors.Cause(err); e == resolver.ErrDoNotProceed {
return nil, resolver.ErrNoOperation
} else if e != resolver.ErrNoOperation {
return nil, err
}
if remoteState.ResolvedMode != params.ResolvedNone {
if err := s.config.ClearResolved(); err != nil {
return nil, errors.Trace(err)
}
return opFactory.NewResolvedUpgrade(localState.CharmURL)
}
if remoteState.ForceCharmUpgrade && s.charmModified(localState, remoteState) {
return opFactory.NewRevertUpgrade(remoteState.CharmURL)
}
return nil, resolver.ErrWaiting
}
func (s *uniterResolver) newUpgradeOperation(
localState resolver.LocalState,
remoteState remotestate.Snapshot,
opFactory operation.Factory,
) (operation.Operation, error) {
// Verify the charm profile before proceeding. No hooks to run, if the
// correct one is not yet applied.
_, err := s.config.VerifyCharmProfile.NextOp(localState, remoteState, opFactory)
if e := errors.Cause(err); e == resolver.ErrDoNotProceed {
return nil, resolver.ErrNoOperation
} else if e != resolver.ErrNoOperation {
return nil, err
}
return opFactory.NewUpgrade(remoteState.CharmURL)
}
func (s *uniterResolver) nextOpHookError(
localState resolver.LocalState,
remoteState remotestate.Snapshot,
opFactory operation.Factory,
) (operation.Operation, error) {
// Report the hook error.
if err := s.config.ReportHookError(*localState.Hook); err != nil {
return nil, errors.Trace(err)
}
if remoteState.ForceCharmUpgrade && s.charmModified(localState, remoteState) {
return s.newUpgradeOperation(localState, remoteState, opFactory)
}
switch remoteState.ResolvedMode {
case params.ResolvedNone:
if remoteState.RetryHookVersion > localState.RetryHookVersion {
// We've been asked to retry: clear the hook timer
// started state so we'll restart it if this fails.
//
// If the hook fails again, we'll re-enter this method
// with the retry hook versions equal and restart the
// timer. If the hook succeeds, we'll enter nextOp
// and stop the timer.
s.retryHookTimerStarted = false
return opFactory.NewRunHook(*localState.Hook)
}
if !s.retryHookTimerStarted && s.config.ShouldRetryHooks {
// We haven't yet started a retry timer, so start one
// now. If we retry and fail, retryHookTimerStarted is
// cleared so that we'll still start it again.
s.config.StartRetryHookTimer()
s.retryHookTimerStarted = true
}
return nil, resolver.ErrNoOperation
case params.ResolvedRetryHooks:
s.config.StopRetryHookTimer()
s.retryHookTimerStarted = false
if err := s.config.ClearResolved(); err != nil {
return nil, errors.Trace(err)
}
return opFactory.NewRunHook(*localState.Hook)
case params.ResolvedNoHooks:
s.config.StopRetryHookTimer()
s.retryHookTimerStarted = false
if err := s.config.ClearResolved(); err != nil {
return nil, errors.Trace(err)
}
return opFactory.NewSkipHook(*localState.Hook)
default:
return nil, errors.Errorf(
"unknown resolved mode %q", remoteState.ResolvedMode,
)
}
}
func (s *uniterResolver) charmModified(local resolver.LocalState, remote remotestate.Snapshot) bool {
// CAAS models may not yet have read the charm url from state.
if remote.CharmURL == "" {
return false
}
if local.CharmURL != remote.CharmURL {
s.config.Logger.Debugf("upgrade from %v to %v", local.CharmURL, remote.CharmURL)
return true
}
if local.CharmModifiedVersion != remote.CharmModifiedVersion {
s.config.Logger.Debugf("upgrade from CharmModifiedVersion %v to %v", local.CharmModifiedVersion, remote.CharmModifiedVersion)
return true
}
return false
}
func (s *uniterResolver) nextOp(
localState resolver.LocalState,
remoteState remotestate.Snapshot,
opFactory operation.Factory,
) (operation.Operation, error) {
switch remoteState.Life {
case life.Alive:
if remoteState.Shutdown {
if localState.Started && !localState.Stopped {
return opFactory.NewRunHook(hook.Info{Kind: hooks.Stop})
} else if !localState.Started || localState.Stopped {
return nil, worker.ErrTerminateAgent
}
}
case life.Dying:
// Normally we handle relations last, but if we're dying we
// must ensure that all relations are broken first.
op, err := s.config.Relations.NextOp(localState, remoteState, opFactory)
if errors.Cause(err) != resolver.ErrNoOperation {
return op, err
}
// We're not in a hook error and the unit is Dying,
// so we should proceed to tear down.
//
// TODO(axw) move logic for cascading destruction of
// subordinates, relation units and storage
// attachments into state, via cleanups.
if localState.Started && !localState.Stopped {
return opFactory.NewRunHook(hook.Info{Kind: hooks.Stop})
} else if localState.Installed && !localState.Removed {
return opFactory.NewRunHook(hook.Info{Kind: hooks.Remove})
}
fallthrough
case life.Dead:
// The unit is dying/dead and stopped, so tell the uniter
// to terminate.
return nil, resolver.ErrUnitDead
}
// Now that storage hooks have run at least once, before anything else,
// we need to run the install hook.
// TODO(cmars): remove !localState.Started. It's here as a temporary
// measure because unit agent upgrades aren't being performed yet.
if !localState.Installed && !localState.Started {
return opFactory.NewRunHook(hook.Info{Kind: hooks.Install})
}
if s.charmModified(localState, remoteState) {
return s.newUpgradeOperation(localState, remoteState, opFactory)
}
configHashChanged := localState.ConfigHash != remoteState.ConfigHash
trustHashChanged := localState.TrustHash != remoteState.TrustHash
addressesHashChanged := localState.AddressesHash != remoteState.AddressesHash
if configHashChanged || trustHashChanged || addressesHashChanged {
return opFactory.NewRunHook(hook.Info{Kind: hooks.ConfigChanged})
}
op, err := s.config.Relations.NextOp(localState, remoteState, opFactory)
if errors.Cause(err) != resolver.ErrNoOperation {
return op, err
}
// UpdateStatus hook runs if nothing else needs to.
if localState.UpdateStatusVersion != remoteState.UpdateStatusVersion {
return opFactory.NewRunHook(hook.Info{Kind: hooks.UpdateStatus})
}
return nil, resolver.ErrNoOperation
}