forked from cockroachdb/cockroach
/
allocator.go
382 lines (354 loc) · 14.6 KB
/
allocator.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
// Copyright 2014 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License. See the AUTHORS file
// for names of contributors.
//
// Author: Spencer Kimball (spencer.kimball@gmail.com)
// Author: Kathy Spradlin (kathyspradlin@gmail.com)
// Author: Matt Tracy (matt@cockroachlabs.com)
package storage
import (
"math"
"math/rand"
"github.com/cockroachdb/cockroach/config"
"github.com/cockroachdb/cockroach/proto"
"github.com/cockroachdb/cockroach/util"
"github.com/cockroachdb/cockroach/util/log"
)
const (
// maxFractionUsedThreshold: if the fraction used of a store descriptor
// capacity is greater than this value, it will never be used as a rebalance
// target and it will always be eligible to rebalance replicas to other
// stores.
maxFractionUsedThreshold = 0.95
// minFractionUsedThreshold: if the mean fraction used of a list of store
// descriptors is less than this, then range count will be used to make
// rebalancing decisions instead of the fraction of bytes used. This is
// useful for distributing load evenly on nascent deployments.
minFractionUsedThreshold = 0.02
// rebalanceFromMean is used to declare a range above and below the average
// used capacity of the cluster. If a store's usage is below this range, it
// is a rebalancing target and can accept new replicas; if usage is above
// this range, the store is eligible to rebalance replicas to other stores.
rebalanceFromMean = 0.025 // 2.5%
// rebalanceShouldRebalanceChance represents a chance that an individual
// replica should attempt to rebalance. This helps introduce some
// probabilistic "jitter" to shouldRebalance() function: the store will not
// take every rebalancing opportunity available.
rebalanceShouldRebalanceChance = 0.05
// priorities for various repair operations.
removeDeadReplicaPriority float64 = 10000
addMissingReplicaPriority float64 = 1000
removeExtraReplicaPriority float64 = 100
)
// AllocatorAction enumerates the various replication adjustments that may be
// recommended by the allocator.
type AllocatorAction int
const (
aaNoop AllocatorAction = iota
aaRemove
aaAdd
aaRemoveDead
)
// RebalancingOptions are configurable options which effect the way that the
// replicate queue will handle rebalancing opportunities.
type RebalancingOptions struct {
// AllowRebalance allows this store to attempt to rebalance its own
// replicas to other stores.
AllowRebalance bool
// Deterministic makes rebalance decisions deterministic, based on
// current cluster statistics. If this flag is not set, rebalance operations
// will have random behavior. This flag is intended to be set for testing
// purposes only.
Deterministic bool
}
// Allocator makes allocation decisions based on available capacity
// in other stores which match the required attributes for a desired
// range replica.
//
// When choosing a new allocation target, three candidates from
// available stores meeting a max fraction of bytes used threshold
// (maxFractionUsedThreshold) are chosen at random and the least
// loaded of the three is selected in order to bias loading towards a
// more balanced cluster, while still spreading load over all
// available servers. "Load" is defined according to fraction of bytes
// used, if greater than minFractionUsedThreshold; otherwise it's
// defined according to range count.
//
// When choosing a rebalance target, a random store is selected from
// amongst the set of stores with fraction of bytes within
// rebalanceFromMean from the mean.
type Allocator struct {
storePool *StorePool
randGen *rand.Rand
options RebalancingOptions
}
// MakeAllocator creates a new allocator using the specified StorePool.
func MakeAllocator(storePool *StorePool, options RebalancingOptions) Allocator {
return Allocator{
storePool: storePool,
randGen: rand.New(rand.NewSource(rand.Int63())),
options: options,
}
}
// getUsedNodes returns a set of node IDs which are already being used
// to store replicas.
func getUsedNodes(existing []proto.Replica) map[proto.NodeID]struct{} {
usedNodes := map[proto.NodeID]struct{}{}
for _, replica := range existing {
usedNodes[replica.NodeID] = struct{}{}
}
return usedNodes
}
// ComputeAction determines the exact operation needed to repair the supplied
// range, as governed by the supplied zone configuration. It returns the
// required action that should be taken and a replica on which the action should
// be performed.
func (a *Allocator) ComputeAction(zone config.ZoneConfig, desc *proto.RangeDescriptor) (
AllocatorAction, float64) {
deadReplicas := a.storePool.deadReplicas(desc.Replicas)
if len(deadReplicas) > 0 {
// The range has dead replicas, which should be removed immediately.
// Adjust the priority by the number of dead replicas the range has.
quorum := computeQuorum(len(desc.Replicas))
liveReplicas := len(desc.Replicas) - len(deadReplicas)
return aaRemoveDead, removeDeadReplicaPriority + float64(quorum-liveReplicas)
}
// TODO(mrtracy): Handle non-homogenous and mismatched attribute sets.
need := len(zone.ReplicaAttrs)
have := len(desc.Replicas)
if have < need {
// Range is under-replicated, and should add an additional replica.
// Priority is adjusted by the difference between the current replica
// count and the quorum of the desired replica count.
neededQuorum := computeQuorum(need)
return aaAdd, addMissingReplicaPriority + float64(neededQuorum-have)
}
if have > need {
// Range is over-replicated, and should remove a replica.
// Ranges with an even number of replicas get extra priority because
// they have a more fragile quorum.
return aaRemove, removeExtraReplicaPriority - float64(have%2)
}
// Nothing to do.
return aaNoop, 0
}
// AllocateTarget returns a suitable store for a new allocation with the
// required attributes. Nodes already accommodating existing replicas are ruled
// out as targets. If relaxConstraints is true, then the required attributes
// will be relaxed as necessary, from least specific to most specific, in order
// to allocate a target. If needed, a filter function can be added that further
// filter the results. The function will be passed the storeDesc and the used
// and new counts. It returns a bool indicating inclusion or exclusion from the
// set of stores being considered.
func (a *Allocator) AllocateTarget(required proto.Attributes, existing []proto.Replica, relaxConstraints bool,
filter func(storeDesc *proto.StoreDescriptor, count, used *stat) bool) (*proto.StoreDescriptor, error) {
// Because more redundancy is better than less, if relaxConstraints, the
// matching here is lenient, and tries to find a target by relaxing an
// attribute constraint, from last attribute to first.
for attrs := append([]string(nil), required.Attrs...); ; attrs = attrs[:len(attrs)-1] {
stores, sl := a.selectRandom(3, proto.Attributes{Attrs: attrs}, existing)
// Choose the store with the least fraction of bytes used.
var leastStore *proto.StoreDescriptor
for _, s := range stores {
// Filter store descriptor.
if filter != nil && !filter(s, &sl.count, &sl.used) {
continue
}
if leastStore == nil {
leastStore = s
continue
}
// Use counts instead of capacities if the cluster has mean
// fraction used below a threshold level. This is primarily useful
// for balancing load evenly in nascent deployments.
if sl.used.mean < minFractionUsedThreshold {
if s.Capacity.RangeCount < leastStore.Capacity.RangeCount {
leastStore = s
}
} else if s.Capacity.FractionUsed() < leastStore.Capacity.FractionUsed() {
leastStore = s
}
}
if leastStore != nil {
return leastStore, nil
}
if len(attrs) == 0 {
return nil, util.Errorf("unable to allocate a target store; no candidates available")
} else if !relaxConstraints {
return nil, util.Errorf("unable to allocate a target store; no candidates available with attributes %s", required)
}
}
}
// RemoveTarget returns a suitable replica to remove from the provided replica
// set. It attempts to consider which of the provided replicas would be the best
// candidate for removal.
//
// TODO(mrtracy): removeTarget eventually needs to accept the attributes from
// the zone config associated with the provided replicas. This will allow it to
// make correct decisions in the case of ranges with heterogeneous replica
// requirements (i.e. multiple data centers).
func (a Allocator) RemoveTarget(existing []proto.Replica) (proto.Replica, error) {
if len(existing) == 0 {
return proto.Replica{}, util.Errorf("must supply at least one replica to allocator.RemoveTarget()")
}
// Retrieve store descriptors for the provided replicas from the StorePool.
type replStore struct {
repl proto.Replica
store *proto.StoreDescriptor
}
replStores := make([]replStore, len(existing))
usedStat := stat{}
for i := range existing {
desc := a.storePool.getStoreDescriptor(existing[i].StoreID)
if desc == nil {
continue
}
replStores[i] = replStore{
repl: existing[i],
store: desc,
}
usedStat.update(desc.Capacity.FractionUsed())
}
// Based on store statistics, determine which replica is the "worst" and
// thus should be removed.
var worst replStore
for i, rs := range replStores {
if i == 0 {
worst = rs
continue
}
if usedStat.mean < minFractionUsedThreshold {
if rs.store.Capacity.RangeCount > worst.store.Capacity.RangeCount {
worst = rs
}
continue
}
if rs.store.Capacity.FractionUsed() > worst.store.Capacity.FractionUsed() {
worst = rs
}
}
return worst.repl, nil
}
// RebalanceTarget returns a suitable store for a rebalance target
// with required attributes. Rebalance targets are selected via the
// same mechanism as AllocateTarget(), except the chosen target must
// follow some additional criteria. Namely, if chosen, it must further
// the goal of balancing the cluster.
//
// Simply ignoring a rebalance opportunity in the event that the
// target chosen by AllocateTarget() doesn't fit balancing criteria
// is perfectly fine, as other stores in the cluster will also be
// doing their probabilistic best to rebalance. This helps prevent
// a stampeding herd targeting an abnormally under-utilized store.
func (a Allocator) RebalanceTarget(required proto.Attributes, existing []proto.Replica) *proto.StoreDescriptor {
filter := func(s *proto.StoreDescriptor, count, used *stat) bool {
// In clusters with very low disk usage, a store is eligible to be a
// rebalancing target if the number of ranges on that store is below
// average. This is primarily useful for distributing load evenly in a
// nascent deployment.
if used.mean < minFractionUsedThreshold {
return float64(s.Capacity.RangeCount) < count.mean
}
// A store is eligible to be a rebalancing target if its disk usage is
// sufficiently below the mean usage for stores with matching
// attributes.
maxFractionUsed := used.mean * (1 - rebalanceFromMean)
if maxFractionUsedThreshold < maxFractionUsed {
// In clusters with very high average usage, rebalancing is clamped
// at maxFractionUsedThreshold: even if a store's usage is below
// average, it will not be a rebalancing target if usage is above
// this maximum threshold.
maxFractionUsed = maxFractionUsedThreshold
}
return s.Capacity.FractionUsed() < maxFractionUsed
}
if !a.options.AllowRebalance {
return nil
}
// Note that relaxConstraints is false; on a rebalance, there is
// no sense in relaxing constraints; wait until a better option
// is available.
s, err := a.AllocateTarget(required, existing, false /* relaxConstraints */, filter)
if err != nil {
return nil
}
return s
}
// shouldRebalance returns whether the specified store should attempt to
// rebalance a replica to another store.
func (a Allocator) shouldRebalance(storeID proto.StoreID) bool {
if !a.options.AllowRebalance {
return false
}
// In production, add some random jitter to shouldRebalance.
if !a.options.Deterministic && a.randGen.Float32() > rebalanceShouldRebalanceChance {
return false
}
storeDesc := a.storePool.getStoreDescriptor(storeID)
if storeDesc == nil {
if log.V(1) {
log.Warningf(
"shouldRebalance couldn't find store with id %d in StorePool",
storeID)
}
return false
}
sl := a.storePool.getStoreList(*storeDesc.CombinedAttrs(), a.options.Deterministic)
// In clusters with very low disk usage, a store is eligible for rebalancing
// if the number of ranges on the store is above average. This is primarily
// useful for distributing load in a nascent deployment.
if sl.used.mean < minFractionUsedThreshold {
return float64(storeDesc.Capacity.RangeCount) > math.Ceil(sl.count.mean)
}
// A store is eligible for rebalancing if its disk usage is sufficiently above
// the mean usage for stores with matching attributes.
minFractionUsed := sl.used.mean * (1 + rebalanceFromMean)
if maxFractionUsedThreshold < minFractionUsed {
// In clusters with very high usage, we will allow replicas to seek
// rebalancing opportunities even if they are below the cluster's average
// usage.
minFractionUsed = maxFractionUsedThreshold
}
return storeDesc.Capacity.FractionUsed() > minFractionUsed
}
// selectRandom chooses count random store descriptors which match the
// required attributes and do not include any of the existing
// replicas. If the supplied filter is nil, it is ignored. Returns the
// list of matching descriptors, and the store list matching the
// required attributes.
func (a Allocator) selectRandom(count int, required proto.Attributes, existing []proto.Replica) ([]*proto.StoreDescriptor, *StoreList) {
var descs []*proto.StoreDescriptor
sl := a.storePool.getStoreList(required, a.options.Deterministic)
used := getUsedNodes(existing)
// Randomly permute available stores matching the required attributes.
for _, idx := range a.randGen.Perm(len(sl.stores)) {
// Skip used nodes.
if _, ok := used[sl.stores[idx].Node.NodeID]; ok {
continue
}
// Add this store; exit loop if we've satisfied count.
descs = append(descs, sl.stores[idx])
if len(descs) >= count {
break
}
}
if len(descs) == 0 {
return nil, nil
}
return descs, sl
}
// computeQuorum computes the quorum value for the given number of nodes.
func computeQuorum(nodes int) int {
return (nodes / 2) + 1
}