forked from Cloud-Foundations/Dominator
/
mdb.go
116 lines (112 loc) · 3.52 KB
/
mdb.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
package herd
import (
"reflect"
"time"
filegenclient "github.com/Cloud-Foundations/Dominator/lib/filegen/client"
"github.com/Cloud-Foundations/Dominator/lib/mdb"
"github.com/Cloud-Foundations/Dominator/lib/srpc"
)
func (herd *Herd) mdbUpdate(mdb *mdb.Mdb) {
numNew, numDeleted, numChanged, wantedImages, clientResourcesToDelete :=
herd.mdbUpdateGetLock(mdb)
for _, clientResource := range clientResourcesToDelete {
clientResource.ScheduleClose()
}
// Clean up unreferenced images.
herd.imageManager.SetImageInterestList(wantedImages, true)
pluralNew := "s"
if numNew == 1 {
pluralNew = ""
}
pluralDeleted := "s"
if numDeleted == 1 {
pluralDeleted = ""
}
pluralChanged := "s"
if numChanged == 1 {
pluralChanged = ""
}
herd.logger.Printf(
"MDB update: %d new sub%s, %d removed sub%s, %d changed sub%s",
numNew, pluralNew, numDeleted, pluralDeleted, numChanged, pluralChanged)
}
func (herd *Herd) mdbUpdateGetLock(mdb *mdb.Mdb) (
int, int, int, map[string]struct{}, []*srpc.ClientResource) {
herd.LockWithTimeout(time.Minute)
defer herd.Unlock()
startTime := time.Now()
numNew := 0
numDeleted := 0
numChanged := 0
herd.subsByIndex = make([]*Sub, 0, len(mdb.Machines))
// Mark for delete all current subs, then later unmark ones in the new MDB.
subsToDelete := make(map[string]struct{})
for _, sub := range herd.subsByName {
subsToDelete[sub.mdb.Hostname] = struct{}{}
}
wantedImages := make(map[string]struct{})
wantedImages[herd.defaultImageName] = struct{}{}
wantedImages[herd.nextDefaultImageName] = struct{}{}
for _, machine := range mdb.Machines { // Sorted by Hostname.
if machine.Hostname == "" {
herd.logger.Printf("Empty Hostname field, ignoring \"%s\"\n",
machine)
continue
}
sub := herd.subsByName[machine.Hostname]
wantedImages[machine.RequiredImage] = struct{}{}
wantedImages[machine.PlannedImage] = struct{}{}
img := herd.imageManager.GetNoError(machine.RequiredImage)
if sub == nil {
sub = &Sub{
herd: herd,
mdb: machine,
cancelChannel: make(chan struct{}),
}
herd.subsByName[machine.Hostname] = sub
sub.fileUpdateChannel = herd.computedFilesManager.Add(
filegenclient.Machine{machine, sub.getComputedFiles(img)}, 16)
numNew++
} else {
if sub.mdb.RequiredImage != machine.RequiredImage {
if sub.status == statusSynced {
sub.status = statusWaitingToPoll
}
}
if !reflect.DeepEqual(sub.mdb, machine) {
sub.mdb = machine
sub.generationCount = 0 // Force a full poll.
herd.computedFilesManager.Update(
filegenclient.Machine{machine, sub.getComputedFiles(img)})
sub.sendCancel()
numChanged++
}
}
delete(subsToDelete, machine.Hostname)
herd.subsByIndex = append(herd.subsByIndex, sub)
img = herd.imageManager.GetNoError(machine.PlannedImage)
if img == nil {
sub.havePlannedImage = false
} else {
sub.havePlannedImage = true
}
}
delete(wantedImages, "")
// Delete flagged subs (those not in the new MDB).
clientResourcesToDelete := make([]*srpc.ClientResource, 0)
for subHostname := range subsToDelete {
sub := herd.subsByName[subHostname]
sub.deletingFlagMutex.Lock()
sub.deleting = true
if sub.clientResource != nil {
clientResourcesToDelete = append(clientResourcesToDelete,
sub.clientResource)
}
sub.deletingFlagMutex.Unlock()
herd.computedFilesManager.Remove(subHostname)
delete(herd.subsByName, subHostname)
numDeleted++
}
mdbUpdateTimeDistribution.Add(time.Since(startTime))
return numNew, numDeleted, numChanged, wantedImages, clientResourcesToDelete
}