forked from cockroachdb/cockroach
/
gc_queue.go
469 lines (424 loc) · 17.3 KB
/
gc_queue.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
// Copyright 2014 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
//
// Author: Spencer Kimball (spencer.kimball@gmail.com)
package storage
import (
"fmt"
"sync"
"time"
"github.com/cockroachdb/cockroach/client"
"github.com/cockroachdb/cockroach/config"
"github.com/cockroachdb/cockroach/gossip"
"github.com/cockroachdb/cockroach/keys"
"github.com/cockroachdb/cockroach/roachpb"
"github.com/cockroachdb/cockroach/storage/engine"
"github.com/cockroachdb/cockroach/util"
"github.com/cockroachdb/cockroach/util/log"
"github.com/gogo/protobuf/proto"
)
const (
// gcQueueMaxSize is the max size of the gc queue.
gcQueueMaxSize = 100
// gcQueueTimerDuration is the duration between GCs of queued replicas.
gcQueueTimerDuration = 1 * time.Second
// gcByteCountNormalization is the count of GC'able bytes which
// amount to a score of "1" added to total replica priority.
gcByteCountNormalization = 1 << 20 // 1 MB
// intentAgeNormalization is the average age of outstanding intents
// which amount to a score of "1" added to total replica priority.
intentAgeNormalization = 24 * time.Hour // 1 day
// intentAgeThreshold is the threshold after which an extant intent
// will be resolved.
intentAgeThreshold = 2 * time.Hour // 2 hour
// txnCleanupThreshold is the threshold after which a transaction is
// considered abandoned and fit for removal, as measured by the maximum
// of its last heartbeat and timestamp.
// TODO(tschottdorf): need to enforce at all times that this is much
// larger than the heartbeat interval used by the coordinator.
txnCleanupThreshold = time.Hour
)
// gcQueue manages a queue of replicas slated to be scanned in their
// entirety using the MVCC versions iterator. The gc queue manages the
// following tasks:
//
// - GC of version data via TTL expiration (and more complex schemes
// as implemented going forward).
// - Resolve extant write intents (pushing their transactions).
// - GC of old transaction and sequence cache entries. This should include
// most committed entries almost immediately and, after a threshold on
// inactivity, all others.
//
// The shouldQueue function combines the need for the above tasks into a
// single priority. If any task is overdue, shouldQueue returns true.
type gcQueue struct {
baseQueue
}
// newGCQueue returns a new instance of gcQueue.
func newGCQueue(gossip *gossip.Gossip) *gcQueue {
gcq := &gcQueue{}
gcq.baseQueue = makeBaseQueue("gc", gcq, gossip, gcQueueMaxSize)
return gcq
}
func (*gcQueue) needsLeaderLease() bool {
return true
}
// acceptsUnsplitRanges is false because the proper GC
// policy cannot be determined for ranges that span zone configs.
func (*gcQueue) acceptsUnsplitRanges() bool {
return false
}
// shouldQueue determines whether a replica should be queued for garbage
// collection, and if so, at what priority. Returns true for shouldQ
// in the event that the cumulative ages of GC'able bytes or extant
// intents exceed thresholds.
func (*gcQueue) shouldQueue(now roachpb.Timestamp, repl *Replica,
sysCfg *config.SystemConfig) (shouldQ bool, priority float64) {
desc := repl.Desc()
zone, err := sysCfg.GetZoneConfigForKey(desc.StartKey)
if err != nil {
log.Errorf("could not find zone config for range %s: %s", repl, err)
return
}
// GC score is the total GC'able bytes age normalized by 1 MB * the replica's TTL in seconds.
gcScore := float64(repl.stats.GetGCBytesAge(now.WallTime)) / float64(zone.GC.TTLSeconds) / float64(gcByteCountNormalization)
// Intent score. This computes the average age of outstanding intents
// and normalizes.
intentScore := repl.stats.GetAvgIntentAge(now.WallTime) / float64(intentAgeNormalization.Nanoseconds()/1E9)
// Compute priority.
if gcScore >= 1 {
priority += gcScore
}
if intentScore >= 1 {
priority += intentScore
}
shouldQ = priority > 0
return
}
// process iterates through all keys in a replica's range, calling the garbage
// collector for each key and associated set of values. GC'd keys are batched
// into GC calls. Extant intents are resolved if intents are older than
// intentAgeThreshold. The transaction and sequence cache records are also
// scanned and old entries evicted. During normal operation, both of these
// records are cleaned up when their respective transaction finishes, so the
// amount of work done here is expected to be small.
//
// Some care needs to be taken to avoid cyclic recreation of entries during GC:
// * a Push initiated due to an intent may recreate a transaction entry
// * resolving an intent may write a new sequence cache entry
// * obtaining the transaction for a sequence cache entry requires a Push
//
// The following order is taken below:
// 1) collect all intents with sufficiently old txn record
// 2) collect these intents' transactions
// 3) scan the transaction table, collecting abandoned or completed txns
// 4) push all of these transactions (possibly recreating entries)
// 5) resolve all intents (unless the txn is still PENDING), which will recreate
// sequence cache entries (but with the txn timestamp; i.e. likely gc'able)
// 6) scan the sequence table for old entries
// 7) push these transactions (again, recreating txn entries).
// 8) send a GCRequest.
func (gcq *gcQueue) process(now roachpb.Timestamp, repl *Replica,
sysCfg *config.SystemConfig) error {
snap := repl.store.Engine().NewSnapshot()
desc := repl.Desc()
iter := newReplicaDataIterator(desc, snap)
defer iter.Close()
defer snap.Close()
// Lookup the GC policy for the zone containing this key range.
zone, err := sysCfg.GetZoneConfigForKey(desc.StartKey)
if err != nil {
return util.Errorf("could not find zone config for range %s: %s", repl, err)
}
gc := engine.NewGarbageCollector(now, zone.GC)
// Compute intent expiration (intent age at which we attempt to resolve).
intentExp := now
intentExp.WallTime -= intentAgeThreshold.Nanoseconds()
txnExp := now
txnExp.WallTime -= txnCleanupThreshold.Nanoseconds()
gcArgs := &roachpb.GCRequest{}
// TODO(tschottdorf): This is one of these instances in which we want
// to be more careful that the request ends up on the correct Replica,
// and we might have to worry about mixing range-local and global keys
// in a batch which might end up spanning Ranges by the time it executes.
gcArgs.Key = desc.StartKey.AsRawKey()
gcArgs.EndKey = desc.EndKey.AsRawKey()
var expBaseKey roachpb.Key
var keys []engine.MVCCKey
var vals [][]byte
// Maps from txn ID to txn and intent key slice.
txnMap := map[string]*roachpb.Transaction{}
intentSpanMap := map[string][]roachpb.Span{}
// processKeysAndValues is invoked with each key and its set of
// values. Intents older than the intent age threshold are sent for
// resolution and values after the MVCC metadata, and possible
// intent, are sent for garbage collection.
processKeysAndValues := func() {
// If there's more than a single value for the key, possibly send for GC.
if len(keys) > 1 {
meta := &engine.MVCCMetadata{}
if err := proto.Unmarshal(vals[0], meta); err != nil {
log.Errorf("unable to unmarshal MVCC metadata for key %q: %s", keys[0], err)
} else {
// In the event that there's an active intent, send for
// intent resolution if older than the threshold.
startIdx := 1
if meta.Txn != nil {
// Keep track of intent to resolve if older than the intent
// expiration threshold.
if meta.Timestamp.Less(intentExp) {
id := string(meta.Txn.ID)
txnMap[id] = meta.Txn
intentSpanMap[id] = append(intentSpanMap[id], roachpb.Span{Key: expBaseKey})
}
// With an active intent, GC ignores MVCC metadata & intent value.
startIdx = 2
}
// See if any values may be GC'd.
if gcTS := gc.Filter(keys[startIdx:], vals[startIdx:]); !gcTS.Equal(roachpb.ZeroTimestamp) {
// TODO(spencer): need to split the requests up into
// multiple requests in the event that more than X keys
// are added to the request.
gcArgs.Keys = append(gcArgs.Keys, roachpb.GCRequest_GCKey{Key: expBaseKey, Timestamp: gcTS})
}
}
}
}
// Iterate through the keys and values of this replica's range.
for ; iter.Valid(); iter.Next() {
iterKey := iter.Key()
if !iterKey.IsValue() || !iterKey.Key.Equal(expBaseKey) {
// Moving to the next key (& values).
processKeysAndValues()
expBaseKey = iterKey.Key
if !iterKey.IsValue() {
keys = []engine.MVCCKey{iter.Key()}
vals = [][]byte{iter.Value()}
continue
}
// An implicit metadata.
keys = []engine.MVCCKey{engine.MakeMVCCMetadataKey(iterKey.Key)}
// A nil value for the encoded MVCCMetadata. This will unmarshal to an
// empty MVCCMetadata which is sufficient for processKeysAndValues to
// determine that there is no intent.
vals = [][]byte{nil}
}
keys = append(keys, iter.Key())
vals = append(vals, iter.Value())
}
if iter.Error() != nil {
return iter.Error()
}
// Handle last collected set of keys/vals.
processKeysAndValues()
txnKeys, err := processTransactionTable(repl, txnMap, txnExp)
if err != nil {
return err
}
// From now on, all newly added keys are range-local.
// TODO(tschottdorf): Might need to use two requests at some point since we
// hard-coded the full non-local key range in the header, but that does
// not take into account the range-local keys. It will be OK as long as
// we send directly to the Replica, though.
gcArgs.Keys = append(gcArgs.Keys, txnKeys...)
// Process push transactions in parallel.
var wg sync.WaitGroup
for _, txn := range txnMap {
if txn.Status != roachpb.PENDING {
continue
}
wg.Add(1)
go pushTxn(repl, now, txn, roachpb.PUSH_ABORT, &wg)
}
wg.Wait()
// Resolve all intents.
var intents []roachpb.Intent
for id, txn := range txnMap {
if txn.Status != roachpb.PENDING {
for _, intent := range intentSpanMap[id] {
intents = append(intents, roachpb.Intent{Span: intent, Txn: *txn})
}
}
}
if pErr := repl.resolveIntents(repl.context(), intents, true /* wait */, false /* !poison */); pErr != nil {
return pErr.GoError()
}
// Deal with any leftover sequence cache keys. There shouldn't be many of
// them.
gcArgs.Keys = append(gcArgs.Keys, processSequenceCache(repl, now, txnExp, txnMap)...)
var ba roachpb.BatchRequest
// Technically not needed since we're talking directly to the Range.
ba.RangeID = desc.RangeID
ba.Timestamp = now
ba.Add(gcArgs)
if _, pErr := repl.Send(repl.context(), ba); pErr != nil {
return pErr.GoError()
}
// Store current timestamp as last verification for this replica, as
// we've just successfully scanned.
if err := repl.SetLastVerificationTimestamp(now); err != nil {
log.Errorf("failed to set last verification timestamp for replica %s: %s", repl, err)
}
return nil
}
// processTransactionTable scans the transaction table and updates txnMap with
// those transactions which are old and either PENDING or with intents
// registered. In the first case we want to push the transaction so that it is
// aborted, and in the second case we may have to resolve the intents success-
// fully before GCing the entry. The transaction records which can be gc'ed are
// returned separately and are not added to txnMap nor intentSpanMap.
func processTransactionTable(r *Replica, txnMap map[string]*roachpb.Transaction, cutoff roachpb.Timestamp) ([]roachpb.GCRequest_GCKey, error) {
snap := r.store.Engine().NewSnapshot()
defer snap.Close()
var gcKeys []roachpb.GCRequest_GCKey
handleOne := func(kv roachpb.KeyValue) error {
var txn roachpb.Transaction
if err := kv.Value.GetProto(&txn); err != nil {
return err
}
ts := txn.Timestamp
if heartbeatTS := txn.LastHeartbeat; heartbeatTS != nil {
ts.Forward(*heartbeatTS)
}
if !ts.Less(cutoff) {
return nil
}
id := string(txn.ID)
// The transaction record should be considered for removal.
switch txn.Status {
case roachpb.PENDING:
// Marked as running, so we need to push it to abort it but won't
// try to GC it in this cycle (for convenience).
// TODO(tschottdorf): refactor so that we can GC PENDING entries
// in the same cycle, but keeping the calls to pushTxn in a central
// location (keeping it easy to batch them up in the future).
txnMap[id] = &txn
return nil
case roachpb.ABORTED:
// If we remove this transaction, it effectively still counts as
// ABORTED (by design). So this can be GC'ed even if we can't
// resolve the intents.
// Note: Most aborted transaction weren't aborted by their client,
// but instead by the coordinator - those will not have any intents
// persisted, though they still might exist in the system.
if err := r.resolveIntents(r.context(),
roachpb.AsIntents(txn.Intents, &txn), true /* wait */, false /* !poison */); err != nil {
log.Warningf("failed to resolve intents of aborted txn on gc: %s", err)
}
case roachpb.COMMITTED:
// It's committed, so it doesn't need a push but we can only
// GC it after its intents are resolved.
if err := r.resolveIntents(r.context(),
roachpb.AsIntents(txn.Intents, &txn), true /* wait */, false /* !poison */); err != nil {
log.Warningf("unable to resolve intents of committed txn on gc: %s", err)
// Returning the error here would abort the whole GC run, and
// we don't want that. Instead, we simply don't GC this entry.
return nil
}
default:
panic(fmt.Sprintf("invalid transaction state: %s", txn))
}
gcKeys = append(gcKeys, roachpb.GCRequest_GCKey{Key: kv.Key}) // zero timestamp
return nil
}
startKey := keys.TransactionKey(roachpb.KeyMin, nil)
endKey := keys.TransactionKey(roachpb.KeyMax, nil)
_, err := engine.MVCCIterate(snap, startKey, endKey, roachpb.ZeroTimestamp, true /* consistent */, nil /* txn */, false /* !reverse */, func(kv roachpb.KeyValue) (bool, error) {
return false, handleOne(kv)
})
return gcKeys, err
}
// processSequenceCache iterates through the local sequence cache entries,
// pushing the transactions (in cleanup mode) for those entries which appear
// to be old enough. In case the transaction indicates that it's terminated,
// the sequence cache keys are included in the result.
func processSequenceCache(r *Replica, now, cutoff roachpb.Timestamp, prevTxns map[string]*roachpb.Transaction) []roachpb.GCRequest_GCKey {
snap := r.store.Engine().NewSnapshot()
defer snap.Close()
txns := make(map[string]*roachpb.Transaction)
idToKeys := make(map[string][]roachpb.GCRequest_GCKey)
r.sequence.Iterate(snap, func(key, id []byte, v roachpb.SequenceCacheEntry) {
idStr := string(id)
// If we've pushed this Txn previously, attempt cleanup (in case the
// push was successful). Initiate new pushes only for newly discovered
// "old" entries.
if prevTxn, ok := prevTxns[idStr]; ok && prevTxn.Status != roachpb.PENDING {
txns[idStr] = prevTxn
idToKeys[idStr] = append(idToKeys[idStr], roachpb.GCRequest_GCKey{Key: key})
} else if !cutoff.Less(v.Timestamp) {
txns[idStr] = &roachpb.Transaction{ID: id, Key: v.Key, Status: roachpb.PENDING}
idToKeys[idStr] = append(idToKeys[idStr], roachpb.GCRequest_GCKey{Key: key})
}
})
var wg sync.WaitGroup
wg.Add(len(txns))
for _, txn := range txns {
// Check if the Txn is still alive. If this indicates that the Txn is
// aborted and old enough to guarantee that any running coordinator
// would have realized that the transaction wasn't running by means
// of a heartbeat, then we're free to remove the sequence cache entry.
// In the most likely case, there isn't even an entry (which will
// be apparent by a zero timestamp and nil last heartbeat).
go pushTxn(r, now, txn, roachpb.PUSH_TOUCH, &wg)
}
wg.Wait()
var gcKeys []roachpb.GCRequest_GCKey
for idStr, txn := range txns {
if txn.Status == roachpb.PENDING {
continue
}
ts := txn.Timestamp
if txn.LastHeartbeat != nil {
ts.Forward(*txn.LastHeartbeat)
}
if !cutoff.Less(ts) {
// This is it, we can delete our sequence cache entries.
gcKeys = append(gcKeys, idToKeys[idStr]...)
}
}
return gcKeys
}
// timer returns a constant duration to space out GC processing
// for successive queued replicas.
func (*gcQueue) timer() time.Duration {
return gcQueueTimerDuration
}
// pushTxn attempts to abort the txn via push. The wait group is signaled on
// completion.
func pushTxn(repl *Replica, now roachpb.Timestamp, txn *roachpb.Transaction,
typ roachpb.PushTxnType, wg *sync.WaitGroup) {
defer wg.Done() // signal wait group always on completion
if log.V(1) {
log.Infof("pushing txn %s ts=%s", txn, txn.OrigTimestamp)
}
// Attempt to push the transaction which created the intent.
pushArgs := &roachpb.PushTxnRequest{
Span: roachpb.Span{
Key: txn.Key,
},
Now: now,
PusherTxn: roachpb.Transaction{Priority: roachpb.MaxUserPriority},
PusheeTxn: *txn,
PushType: typ,
}
b := &client.Batch{}
b.InternalAddRequest(pushArgs)
br, err := repl.store.DB().RunWithResponse(b)
if err != nil {
log.Warningf("push of txn %s failed: %s", txn, err)
return
}
// Update the supplied txn on successful push.
*txn = br.Responses[0].GetInner().(*roachpb.PushTxnResponse).PusheeTxn
}