forked from juju/juju
/
mutater.go
319 lines (284 loc) · 9.72 KB
/
mutater.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
// Copyright 2019 Canonical Ltd.
// Licensed under the AGPLv3, see LICENCE file for details.
package instancemutater
import (
"fmt"
"strings"
"sync"
"time"
"github.com/juju/clock"
"github.com/juju/collections/set"
"github.com/juju/errors"
"github.com/juju/names/v4"
"github.com/juju/worker/v3"
"github.com/DavinZhang/juju/api/instancemutater"
"github.com/DavinZhang/juju/apiserver/params"
"github.com/DavinZhang/juju/core/instance"
"github.com/DavinZhang/juju/core/life"
"github.com/DavinZhang/juju/core/lxdprofile"
"github.com/DavinZhang/juju/core/status"
"github.com/DavinZhang/juju/core/watcher"
"github.com/DavinZhang/juju/environs"
"github.com/DavinZhang/juju/wrench"
)
//go:generate go run github.com/golang/mock/mockgen -package mocks -destination mocks/mutatercontext_mock.go github.com/DavinZhang/juju/worker/instancemutater MutaterContext
// lifetimeContext was extracted to allow the various Context clients to get
// the benefits of the catacomb encapsulating everything that should happen
// here. A clean implementation would almost certainly not need this.
type lifetimeContext interface {
KillWithError(error)
add(worker.Worker) error
dying() <-chan struct{}
errDying() error
}
type MachineContext interface {
lifetimeContext
getBroker() environs.LXDProfiler
getRequiredLXDProfiles(string) []string
}
type MutaterMachine struct {
context MachineContext
logger Logger
machineApi instancemutater.MutaterMachine
id string
}
type MutaterContext interface {
MachineContext
newMachineContext() MachineContext
getMachine(tag names.MachineTag) (instancemutater.MutaterMachine, error)
}
type mutater struct {
context MutaterContext
logger Logger
wg *sync.WaitGroup
machines map[names.MachineTag]chan struct{}
machineDead chan instancemutater.MutaterMachine
}
func (m *mutater) startMachines(tags []names.MachineTag) error {
for _, tag := range tags {
select {
case <-m.context.dying():
return m.context.errDying()
default:
}
m.logger.Tracef("received tag %q", tag.String())
if ch := m.machines[tag]; ch == nil {
// First time we receive the tag, setup watchers.
api, err := m.context.getMachine(tag)
if err != nil {
return errors.Trace(err)
}
id := api.Tag().Id()
// Ensure we do not watch any KVM containers.
containerType, err := api.ContainerType()
if err != nil {
return errors.Trace(err)
}
if containerType == instance.KVM {
m.logger.Tracef("ignoring KVM container machine-%s", id)
continue
}
profileChangeWatcher, err := api.WatchLXDProfileVerificationNeeded()
if err != nil {
if errors.IsNotSupported(err) {
m.logger.Tracef("ignoring manual machine-%s", id)
continue
}
return errors.Annotatef(err, "failed to start watching application lxd profiles for machine-%s", id)
}
ch = make(chan struct{})
m.machines[tag] = ch
machine := MutaterMachine{
context: m.context.newMachineContext(),
logger: m.logger,
machineApi: api,
id: id,
}
m.wg.Add(1)
go runMachine(machine, profileChangeWatcher, ch, m.machineDead, func() { m.wg.Done() })
} else {
// We've received this tag before, therefore
// the machine has been removed from the model
// cache and no longer needed
ch <- struct{}{}
}
}
return nil
}
func runMachine(
machine MutaterMachine,
profileChangeWatcher watcher.NotifyWatcher,
removed <-chan struct{}, died chan<- instancemutater.MutaterMachine, cleanup func(),
) {
defer cleanup()
defer func() {
// We can't just send on the dead channel because the
// central loop might be trying to write to us on the
// removed channel.
for {
select {
case <-machine.context.dying():
return
case died <- machine.machineApi:
return
case <-removed:
}
}
}()
if err := machine.context.add(profileChangeWatcher); err != nil {
machine.context.KillWithError(err)
return
}
if err := machine.watchProfileChangesLoop(removed, profileChangeWatcher); err != nil {
machine.context.KillWithError(err)
}
}
// watchProfileChanges, any error returned will cause the worker to restart.
func (m MutaterMachine) watchProfileChangesLoop(removed <-chan struct{}, profileChangeWatcher watcher.NotifyWatcher) error {
m.logger.Tracef("watching change on MutaterMachine %s", m.id)
for {
select {
case <-m.context.dying():
return m.context.errDying()
case <-profileChangeWatcher.Changes():
info, err := m.machineApi.CharmProfilingInfo()
if err != nil {
// If the machine is not provisioned then we need to wait for
// new changes from the watcher.
if params.IsCodeNotProvisioned(errors.Cause(err)) {
m.logger.Tracef("got not provisioned machine-%s on charm profiling info, wait for another change", m.id)
continue
}
return errors.Trace(err)
}
if err = m.processMachineProfileChanges(info); err != nil && errors.IsNotValid(err) {
// Return to stop mutating the machine, but no need to restart
// the worker.
return nil
} else if err != nil {
return errors.Trace(err)
}
case <-removed:
if err := m.machineApi.Refresh(); err != nil {
return errors.Trace(err)
}
if m.machineApi.Life() == life.Dead {
return nil
}
}
}
}
func (m MutaterMachine) processMachineProfileChanges(info *instancemutater.UnitProfileInfo) error {
if info == nil || (len(info.CurrentProfiles) == 0 && len(info.ProfileChanges) == 0) {
// no changes to be made, return now.
return nil
}
if err := m.machineApi.Refresh(); err != nil {
return err
}
if m.machineApi.Life() == life.Dead {
return errors.NotValidf("machine %q", m.id)
}
// Set the modification status to idle, that way we have a baseline for
// future changes.
if err := m.machineApi.SetModificationStatus(status.Idle, "", nil); err != nil {
return errors.Annotatef(err, "cannot set status for machine %q modification status", m.id)
}
report := func(retErr error) error {
if retErr != nil {
m.logger.Errorf("cannot upgrade machine-%s lxd profiles: %s", m.id, retErr.Error())
if err := m.machineApi.SetModificationStatus(status.Error, fmt.Sprintf("cannot upgrade machine's lxd profile: %s", retErr.Error()), nil); err != nil {
m.logger.Errorf("cannot set modification status of machine %q error: %v", m.id, err)
}
} else {
if err := m.machineApi.SetModificationStatus(status.Applied, "", nil); err != nil {
m.logger.Errorf("cannot reset modification status of machine %q applied: %v", m.id, err)
}
}
return retErr
}
// Convert info.ProfileChanges into a struct which can be used to
// add or remove profiles from a machine. Use it to create a list
// of expected profiles.
post, err := m.gatherProfileData(info)
if err != nil {
return report(errors.Annotatef(err, "%s", m.id))
}
expectedProfiles := m.context.getRequiredLXDProfiles(info.ModelName)
for _, p := range post {
if p.Profile != nil {
expectedProfiles = append(expectedProfiles, p.Name)
}
}
verified, err := m.verifyCurrentProfiles(string(info.InstanceId), expectedProfiles)
if err != nil {
return report(errors.Annotatef(err, "%s", m.id))
}
if verified {
m.logger.Infof("no changes necessary to machine-%s lxd profiles (%v)", m.id, expectedProfiles)
return report(nil)
}
// Adding a wrench to test charm not running hooks before profile can be applied.
// Do not bother for the default or model profile. We're not interested in non
// charm profiles.
if wrench.IsActive("instance-mutater", "disable-apply-lxdprofile") && len(expectedProfiles) > 1 {
m.logger.Warningf("waiting 3 minutes to apply lxd profiles %q due to wrench in the works", strings.Join(expectedProfiles, ", "))
select {
case <-clock.WallClock.After(3 * time.Minute):
m.logger.Warningf("continue with apply lxd profiles")
}
}
m.logger.Infof("machine-%s (%s) assign lxd profiles %q, %#v", m.id, string(info.InstanceId), expectedProfiles, post)
broker := m.context.getBroker()
currentProfiles, err := broker.AssignLXDProfiles(string(info.InstanceId), expectedProfiles, post)
if err != nil {
m.logger.Errorf("failure to assign lxd profiles %s to machine-%s: %s", expectedProfiles, m.id, err)
return report(err)
}
return report(m.machineApi.SetCharmProfiles(currentProfiles))
}
func (m MutaterMachine) gatherProfileData(info *instancemutater.UnitProfileInfo) ([]lxdprofile.ProfilePost, error) {
var result []lxdprofile.ProfilePost
for _, pu := range info.ProfileChanges {
oldName, err := lxdprofile.MatchProfileNameByAppName(info.CurrentProfiles, pu.ApplicationName)
if err != nil {
return nil, err
}
if pu.Profile.Empty() && oldName == "" {
// There is no new Profile and no Profile for this application applied
// already, move on. A charm without an lxd profile.
continue
}
name := lxdprofile.Name(info.ModelName, pu.ApplicationName, pu.Revision)
if oldName != "" && name != oldName {
// add the old profile name to the result, so the profile can
// be deleted from the lxd server.
result = append(result, lxdprofile.ProfilePost{Name: oldName})
}
add := lxdprofile.ProfilePost{Name: name}
// should not happen, but you never know.
if !pu.Profile.Empty() {
// We make a copy since the loop var keeps the same pointer.
p := pu.Profile
add.Profile = &p
}
result = append(result, add)
}
return result, nil
}
func (m MutaterMachine) verifyCurrentProfiles(instID string, expectedProfiles []string) (bool, error) {
broker := m.context.getBroker()
obtainedProfiles, err := broker.LXDProfileNames(instID)
if err != nil {
return false, err
}
obtainedSet := set.NewStrings(obtainedProfiles...)
expectedSet := set.NewStrings(expectedProfiles...)
if obtainedSet.Union(expectedSet).Size() > obtainedSet.Size() {
return false, nil
}
if expectedSet.Union(obtainedSet).Size() > expectedSet.Size() {
return false, nil
}
return true, nil
}