forked from cockroachdb/cockroach
/
store.go
1711 lines (1536 loc) · 55.6 KB
/
store.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2014 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License. See the AUTHORS file
// for names of contributors.
//
// Author: Spencer Kimball (spencer.kimball@gmail.com)
package storage
import (
"bytes"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/cockroachdb/cockroach/client"
"github.com/cockroachdb/cockroach/config"
"github.com/cockroachdb/cockroach/gossip"
"github.com/cockroachdb/cockroach/keys"
"github.com/cockroachdb/cockroach/multiraft"
"github.com/cockroachdb/cockroach/proto"
"github.com/cockroachdb/cockroach/security"
"github.com/cockroachdb/cockroach/storage/engine"
"github.com/cockroachdb/cockroach/structured"
"github.com/cockroachdb/cockroach/util"
"github.com/cockroachdb/cockroach/util/encoding"
"github.com/cockroachdb/cockroach/util/hlc"
"github.com/cockroachdb/cockroach/util/log"
"github.com/cockroachdb/cockroach/util/retry"
"github.com/cockroachdb/cockroach/util/stop"
"github.com/cockroachdb/cockroach/util/tracer"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
gogoproto "github.com/gogo/protobuf/proto"
"github.com/google/btree"
"golang.org/x/net/context"
)
const (
// GCResponseCacheExpiration is the expiration duration for response
// cache entries.
GCResponseCacheExpiration = 1 * time.Hour
// rangeIDAllocCount is the number of Range IDs to allocate per allocation.
rangeIDAllocCount = 10
defaultRaftTickInterval = 100 * time.Millisecond
defaultHeartbeatIntervalTicks = 3
defaultRaftElectionTimeoutTicks = 15
// ttlCapacityGossip is time-to-live for capacity-related info.
ttlCapacityGossip = 2 * time.Minute
)
var (
// defaultRangeRetryOptions are default retry options for retrying commands
// sent to the store's ranges, for WriteTooOld and WriteIntent errors.
defaultRangeRetryOptions = retry.Options{
InitialBackoff: 50 * time.Millisecond,
MaxBackoff: 5 * time.Second,
Multiplier: 2,
}
// TestStoreContext has some fields initialized with values relevant
// in tests.
TestStoreContext = StoreContext{
RaftTickInterval: 100 * time.Millisecond,
RaftHeartbeatIntervalTicks: 1,
RaftElectionTimeoutTicks: 2,
ScanInterval: 10 * time.Minute,
}
)
var (
changeTypeRaftToInternal = map[raftpb.ConfChangeType]proto.ReplicaChangeType{
raftpb.ConfChangeAddNode: proto.ADD_REPLICA,
raftpb.ConfChangeRemoveNode: proto.REMOVE_REPLICA,
}
changeTypeInternalToRaft = map[proto.ReplicaChangeType]raftpb.ConfChangeType{
proto.ADD_REPLICA: raftpb.ConfChangeAddNode,
proto.REMOVE_REPLICA: raftpb.ConfChangeRemoveNode,
}
)
// verifyKeyLength verifies key length. Extra key length is allowed for
// the local key prefix (for example, a transaction record), and also for
// keys prefixed with the meta1 or meta2 addressing prefixes. There is a
// special case for both key-local AND meta1 or meta2 addressing prefixes.
func verifyKeyLength(key proto.Key) error {
maxLength := proto.KeyMaxLength
if bytes.HasPrefix(key, keys.LocalRangePrefix) {
key = key[len(keys.LocalRangePrefix):]
_, key = encoding.DecodeBytes(key, nil)
}
if bytes.HasPrefix(key, keys.MetaPrefix) {
key = key[len(keys.Meta1Prefix):]
}
if len(key) > maxLength {
return util.Errorf("maximum key length exceeded for %q", key)
}
return nil
}
// verifyKeys verifies keys. If checkEndKey is true, then the end key
// is verified to be non-nil and greater than start key. If
// checkEndKey is false, end key is verified to be nil. Additionally,
// verifies that start key is less than KeyMax and end key is less
// than or equal to KeyMax. It also verifies that a key range that
// contains range-local keys is completely range-local.
func verifyKeys(start, end proto.Key, checkEndKey bool) error {
if err := verifyKeyLength(start); err != nil {
return err
}
if !start.Less(proto.KeyMax) {
return util.Errorf("start key %q must be less than KeyMax", start)
}
if !checkEndKey {
if len(end) != 0 {
return util.Errorf("end key %q should not be specified for this operation", end)
}
return nil
}
if end == nil {
return util.Errorf("end key must be specified")
}
if err := verifyKeyLength(end); err != nil {
return err
}
if proto.KeyMax.Less(end) {
return util.Errorf("end key %q must be less than or equal to KeyMax", end)
}
if !start.Less(end) {
return util.Errorf("end key %q must be greater than start %q", end, start)
}
if bytes.HasPrefix(start, keys.LocalRangePrefix) && !bytes.HasPrefix(end, keys.LocalRangePrefix) {
return util.Errorf("start key is range-local, but end key is not")
}
return nil
}
type rangeAlreadyExists struct {
rng *Replica
}
// Error implements the error interface.
func (e *rangeAlreadyExists) Error() string {
return fmt.Sprintf("range for Range ID %d already exists on store", e.rng.Desc().RangeID)
}
// rangeKeyItem is a common interface for proto.Key and Range.
type rangeKeyItem interface {
getKey() proto.Key
}
// rangeBTreeKey is a type alias of proto.Key that implements the
// rangeKeyItem interface and the btree.Item interface.
type rangeBTreeKey proto.Key
var _ rangeKeyItem = rangeBTreeKey{}
func (k rangeBTreeKey) getKey() proto.Key {
return (proto.Key)(k)
}
var _ btree.Item = rangeBTreeKey{}
func (k rangeBTreeKey) Less(i btree.Item) bool {
return k.getKey().Less(i.(rangeKeyItem).getKey())
}
var _ rangeKeyItem = &Replica{}
func (r *Replica) getKey() proto.Key {
return r.Desc().EndKey
}
var _ btree.Item = &Replica{}
// Less returns true if the range's end key is less than the given item's key.
func (r *Replica) Less(i btree.Item) bool {
return r.getKey().Less(i.(rangeKeyItem).getKey())
}
// A NotBootstrappedError indicates that an engine has not yet been
// bootstrapped due to a store identifier not being present.
type NotBootstrappedError struct{}
// Error formats error.
func (e *NotBootstrappedError) Error() string {
return "store has not been bootstrapped"
}
// storeRangeSet is an implementation of rangeSet which
// cycles through a store's rangesByKey btree.
type storeRangeSet struct {
store *Store
rangeIDs []proto.RangeID // Range IDs of ranges to be visited.
visited int // Number of visited ranges. -1 when Visit() is not being called.
}
func newStoreRangeSet(store *Store) *storeRangeSet {
return &storeRangeSet{
store: store,
visited: 0,
}
}
func (rs *storeRangeSet) Visit(visitor func(*Replica) bool) {
// Copy the range IDs to a slice and iterate over the slice so
// that we can safely (e.g., no race, no range skip) iterate
// over ranges regardless of how BTree is implemented.
rs.store.mu.RLock()
rs.rangeIDs = make([]proto.RangeID, rs.store.replicasByKey.Len())
i := 0
rs.store.replicasByKey.Ascend(func(item btree.Item) bool {
rs.rangeIDs[i] = item.(*Replica).Desc().RangeID
i++
return true
})
rs.store.mu.RUnlock()
rs.visited = 0
for _, rangeID := range rs.rangeIDs {
rs.visited++
rs.store.mu.RLock()
rng, ok := rs.store.replicas[rangeID]
rs.store.mu.RUnlock()
if ok {
if !visitor(rng) {
break
}
}
}
rs.visited = 0
}
func (rs *storeRangeSet) EstimatedCount() int {
rs.store.mu.RLock()
defer rs.store.mu.RUnlock()
if rs.visited <= 0 {
return rs.store.replicasByKey.Len()
}
return len(rs.rangeIDs) - rs.visited
}
// A Store maintains a map of ranges by start key. A Store corresponds
// to one physical device.
type Store struct {
Ident proto.StoreIdent
ctx StoreContext
db *client.DB
engine engine.Engine // The underlying key-value store
_allocator *allocator // Makes allocation decisions
rangeIDAlloc *idAllocator // Range ID allocator
gcQueue *gcQueue // Garbage collection queue
_splitQueue *splitQueue // Range splitting queue
verifyQueue *verifyQueue // Checksum verification queue
replicateQueue *replicateQueue // Replication queue
_rangeGCQueue *rangeGCQueue // Range GC queue
scanner *replicaScanner // Range scanner
feed StoreEventFeed // Event Feed
multiraft *multiraft.MultiRaft
started int32
stopper *stop.Stopper
startedAt int64
nodeDesc *proto.NodeDescriptor
initComplete sync.WaitGroup // Signaled by async init tasks
mu sync.RWMutex // Protects variables below...
replicas map[proto.RangeID]*Replica // Map of replicas by Range ID
replicasByKey *btree.BTree // btree keyed by ranges end keys.
uninitReplicas map[proto.RangeID]*Replica // Map of uninitialized replicas by Range ID
}
var _ multiraft.Storage = &Store{}
// A StoreContext encompasses the auxiliary objects and configuration
// required to create a store.
// All fields holding a pointer or an interface are required to create
// a store; the rest will have sane defaults set if omitted.
type StoreContext struct {
Clock *hlc.Clock
DB *client.DB
Gossip *gossip.Gossip
Transport multiraft.Transport
// RangeRetryOptions are the retry options when retryable errors are
// encountered sending commands to ranges.
RangeRetryOptions retry.Options
// RaftTickInterval is the resolution of the Raft timer; other raft timeouts
// are defined in terms of multiples of this value.
RaftTickInterval time.Duration
// RaftHeartbeatIntervalTicks is the number of ticks that pass between heartbeats.
RaftHeartbeatIntervalTicks int
// RaftElectionTimeoutTicks is the number of ticks that must pass before a follower
// considers a leader to have failed and calls a new election. Should be significantly
// higher than RaftHeartbeatIntervalTicks. The raft paper recommends a value of 150ms
// for local networks.
RaftElectionTimeoutTicks int
// ScanInterval is the default value for the scan interval
ScanInterval time.Duration
// ScanMaxIdleTime is the maximum time the scanner will be idle between ranges.
// If enabled (> 0), the scanner may complete in less than ScanInterval for small
// stores.
ScanMaxIdleTime time.Duration
// EventFeed is a feed to which this store will publish events.
EventFeed *util.Feed
// Tracer is a request tracer.
Tracer *tracer.Tracer
}
// Valid returns true if the StoreContext is populated correctly.
// We don't check for Gossip and DB since some of our tests pass
// that as nil.
func (sc *StoreContext) Valid() bool {
return sc.Clock != nil && sc.Transport != nil &&
sc.RaftTickInterval != 0 && sc.RaftHeartbeatIntervalTicks > 0 &&
sc.RaftElectionTimeoutTicks > 0 && sc.ScanInterval > 0
}
// setDefaults initializes unset fields in StoreConfig to values
// suitable for use on a local network.
// TODO(tschottdorf) see if this ought to be configurable via flags.
func (sc *StoreContext) setDefaults() {
sc.RangeRetryOptions = defaultRangeRetryOptions
if sc.RaftTickInterval == 0 {
sc.RaftTickInterval = defaultRaftTickInterval
}
if sc.RaftHeartbeatIntervalTicks == 0 {
sc.RaftHeartbeatIntervalTicks = defaultHeartbeatIntervalTicks
}
if sc.RaftElectionTimeoutTicks == 0 {
sc.RaftElectionTimeoutTicks = defaultRaftElectionTimeoutTicks
}
}
// NewStore returns a new instance of a store.
func NewStore(ctx StoreContext, eng engine.Engine, nodeDesc *proto.NodeDescriptor) *Store {
// TODO(tschottdorf) find better place to set these defaults.
ctx.setDefaults()
if !ctx.Valid() {
panic(fmt.Sprintf("invalid store configuration: %+v", &ctx))
}
s := &Store{
ctx: ctx,
db: ctx.DB, // TODO(tschottdorf) remove redundancy.
engine: eng,
_allocator: newAllocator(ctx.Gossip),
replicas: map[proto.RangeID]*Replica{},
replicasByKey: btree.New(64 /* degree */),
uninitReplicas: map[proto.RangeID]*Replica{},
nodeDesc: nodeDesc,
}
// Add range scanner and configure with queues.
s.scanner = newReplicaScanner(ctx.ScanInterval, ctx.ScanMaxIdleTime, newStoreRangeSet(s))
s.gcQueue = newGCQueue()
s._splitQueue = newSplitQueue(s.db, s.ctx.Gossip)
s.verifyQueue = newVerifyQueue(s.ReplicaCount)
s.replicateQueue = newReplicateQueue(s.ctx.Gossip, s.allocator(), s.ctx.Clock)
s._rangeGCQueue = newRangeGCQueue(s.db)
s.scanner.AddQueues(s.gcQueue, s._splitQueue, s.verifyQueue, s.replicateQueue, s._rangeGCQueue)
return s
}
// String formats a store for debug output.
func (s *Store) String() string {
return fmt.Sprintf("store=%d:%d (%s)", s.Ident.NodeID, s.Ident.StoreID, s.engine)
}
// Context returns a base context to pass along with commands being executed,
// derived from the supplied context (which is allowed to be nil).
func (s *Store) Context(ctx context.Context) context.Context {
if ctx == nil {
ctx = context.Background()
}
return log.Add(ctx,
log.NodeID, s.Ident.NodeID,
log.StoreID, s.Ident.StoreID)
}
// IsStarted returns true if the Store has been started.
func (s *Store) IsStarted() bool {
return atomic.LoadInt32(&s.started) == 1
}
// StartedAt returns the timestamp at which the store was most recently started.
func (s *Store) StartedAt() int64 {
return s.startedAt
}
// Start the engine, set the GC and read the StoreIdent.
func (s *Store) Start(stopper *stop.Stopper) error {
s.stopper = stopper
if s.Ident.NodeID == 0 {
// Open engine (i.e. initialize RocksDB database). "NodeID != 0"
// implies the engine has already been opened.
if err := s.engine.Open(); err != nil {
return err
}
s.stopper.AddCloser(s.engine)
// Read store ident and return a not-bootstrapped error if necessary.
ok, err := engine.MVCCGetProto(s.engine, keys.StoreIdentKey(), proto.ZeroTimestamp, true,
nil, &s.Ident)
if err != nil {
return err
} else if !ok {
return &NotBootstrappedError{}
}
}
// If the nodeID is 0, it has not be assigned yet.
// TODO(bram): Figure out how to remove this special case.
if s.nodeDesc.NodeID != 0 && s.Ident.NodeID != s.nodeDesc.NodeID {
return util.Errorf("node id:%d does not equal the one in node descriptor:%d", s.Ident.NodeID, s.nodeDesc.NodeID)
}
// Create ID allocators.
idAlloc, err := newIDAllocator(keys.RangeIDGenerator, s.db, 2 /* min ID */, rangeIDAllocCount, s.stopper)
if err != nil {
return err
}
s.rangeIDAlloc = idAlloc
now := s.ctx.Clock.Now()
s.startedAt = now.WallTime
// Start store event feed.
s.feed = NewStoreEventFeed(s.Ident.StoreID, s.ctx.EventFeed)
s.feed.startStore(s.startedAt)
// GCTimeouts method is called each time an engine compaction is
// underway. It sets minimum timeouts for transaction records and
// response cache entries.
minTxnTS := int64(0) // disable GC of transactions until we know minimum write intent age
minRCacheTS := now.WallTime - GCResponseCacheExpiration.Nanoseconds()
s.engine.SetGCTimeouts(minTxnTS, minRCacheTS)
// Iterator over all range-local key-based data.
start := keys.RangeDescriptorKey(proto.KeyMin)
end := keys.RangeDescriptorKey(proto.KeyMax)
if s.multiraft, err = multiraft.NewMultiRaft(s.RaftNodeID(), &multiraft.Config{
Transport: s.ctx.Transport,
Storage: s,
StateMachine: s,
TickInterval: s.ctx.RaftTickInterval,
ElectionTimeoutTicks: s.ctx.RaftElectionTimeoutTicks,
HeartbeatIntervalTicks: s.ctx.RaftHeartbeatIntervalTicks,
EntryFormatter: raftEntryFormatter,
// TODO(bdarnell): Multiraft deadlocks if the Events channel is
// unbuffered. Temporarily give it some breathing room until the underlying
// deadlock is fixed. See #1185, #1193.
EventBufferSize: 1000,
}, s.stopper); err != nil {
return err
}
// Iterate over all range descriptors, ignoring uncommitted versions
// (consistent=false). Uncommitted intents which have been abandoned
// due to a split crashing halfway will simply be resolved on the
// next split attempt. They can otherwise be ignored.
s.mu.Lock()
s.feed.beginScanRanges()
if _, err := engine.MVCCIterate(s.engine, start, end, now, false /* !consistent */, nil, /* txn */
false /* !reverse */, func(kv proto.KeyValue) (bool, error) {
// Only consider range metadata entries; ignore others.
_, suffix, _ := keys.DecodeRangeKey(kv.Key)
if !suffix.Equal(keys.LocalRangeDescriptorSuffix) {
return false, nil
}
var desc proto.RangeDescriptor
if err := gogoproto.Unmarshal(kv.Value.Bytes, &desc); err != nil {
return false, err
}
rng, err := NewReplica(&desc, s)
if err != nil {
return false, err
}
if err = s.addReplicaInternal(rng); err != nil {
return false, err
}
s.feed.registerRange(rng, true /* scan */)
// Note that we do not create raft groups at this time; they will be created
// on-demand the first time they are needed. This helps reduce the amount of
// election-related traffic in a cold start.
// Raft initialization occurs when we propose a command on this range or
// receive a raft message addressed to it.
// TODO(bdarnell): Also initialize raft groups when read leases are needed.
// TODO(bdarnell): Scan all ranges at startup for unapplied log entries
// and initialize those groups.
return false, nil
}); err != nil {
return err
}
s.feed.endScanRanges()
s.mu.Unlock()
// Start Raft processing goroutines.
s.multiraft.Start()
s.processRaft()
// Gossip is only ever nil while bootstrapping a cluster and
// in unittests.
if s.ctx.Gossip != nil {
// Register callbacks for any changes to accounting and zone
// configurations; we split ranges along prefix boundaries to
// avoid having a range that has two different accounting/zone
// configs. (We don't need a callback for permissions since
// permissions don't have such a requirement.)
s.ctx.Gossip.RegisterCallback(gossip.KeyConfigAccounting, s.configGossipUpdate)
s.ctx.Gossip.RegisterCallback(gossip.KeyConfigZone, s.configGossipUpdate)
// Start a single goroutine in charge of periodically gossipping the
// sentinel and first range metadata if we have a first range.
// This may wake up ranges and requires everything to be set up and
// running.
s.startGossip()
// Start the scanner. The construction here makes sure that the scanner
// only starts after Gossip has connected, and that it does not block Start
// from returning (as doing so might prevent Gossip from ever connecting).
s.stopper.RunWorker(func() {
select {
case <-s.ctx.Gossip.Connected:
s.scanner.Start(s.ctx.Clock, s.stopper)
case <-s.stopper.ShouldStop():
return
}
})
}
// Set the started flag (for unittests).
atomic.StoreInt32(&s.started, 1)
return nil
}
// WaitForInit waits for any asynchronous processes begun in Start()
// to complete their initialization. In particular, this includes
// gossiping. In some cases this may block until the range GC queue
// has completed its scan. Only for testing.
func (s *Store) WaitForInit() {
s.initComplete.Wait()
}
// startGossip runs an infinite loop in a goroutine which regularly checks
// whether the store has a first range or config replica and asks those ranges
// to gossip accordingly.
func (s *Store) startGossip() {
ctx := s.Context(nil)
// Periodic updates run in a goroutine and signal a WaitGroup upon completion
// of their first iteration.
s.initComplete.Add(2)
s.stopper.RunWorker(func() {
// Run the first time without waiting for the Ticker and signal the WaitGroup.
if err := s.maybeGossipFirstRange(); err != nil {
log.Warningc(ctx, "error gossiping first range data: %s", err)
}
s.initComplete.Done()
ticker := time.NewTicker(clusterIDGossipInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if err := s.maybeGossipFirstRange(); err != nil {
log.Warningc(ctx, "error gossiping first range data: %s", err)
}
case <-s.stopper.ShouldStop():
return
}
}
})
s.stopper.RunWorker(func() {
if err := s.maybeGossipConfigs(); err != nil {
log.Warningc(ctx, "error gossiping configs: %s", err)
}
s.initComplete.Done()
ticker := time.NewTicker(configGossipInterval)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if err := s.maybeGossipConfigs(); err != nil {
log.Warningc(ctx, "error gossiping configs: %s", err)
}
case <-s.stopper.ShouldStop():
return
}
}
})
}
// maybeGossipFirstRange checks whether the store has a replia of the first
// range and if so, reminds it to gossip the first range descriptor and
// sentinel gossip.
func (s *Store) maybeGossipFirstRange() error {
rng := s.LookupReplica(proto.KeyMin, nil)
if rng != nil {
return rng.maybeGossipFirstRange()
}
return nil
}
// maybeGossipConfigs checks which of the store's ranges contain config
// descriptors and lets these ranges gossip them. Config gossip entries do not
// expire, so this is a rarely needed action in a working cluster - if values
// change, ranges will update gossip autonomously. However, the lease holder,
// who is normally in charge of that might crash after updates before gossiping
// and a new leader lease is only acquired if needed. To account for this rare
// scenario, we activate the very few ranges that hold config maps
// periodically.
func (s *Store) maybeGossipConfigs() error {
for _, cd := range configDescriptors {
rng := s.LookupReplica(cd.keyPrefix, nil)
if rng == nil {
// This store has no range with this configuration.
continue
}
// Wake up the replica. If it acquires a fresh lease, it will
// gossip. If an unexpected error occurs (i.e. nobody else seems to
// have an active lease but we still failed to obtain it), return
// that error. If we ignored it we would run the risk of running a
// cluster without configs gossiped.
if _, err := rng.getLeaseForGossip(s.Context(nil)); err != nil {
return err
}
}
return nil
}
// configGossipUpdate is a callback for gossip updates to
// configuration maps which affect range split boundaries.
func (s *Store) configGossipUpdate(key string, contentsChanged bool) {
if !contentsChanged {
return // Skip update if it's just a newer timestamp or fewer hops to info
}
ctx := s.Context(nil)
info, err := s.ctx.Gossip.GetInfo(key)
if err != nil {
log.Errorc(ctx, "unable to fetch %s config from gossip: %s", key, err)
return
}
configMap, ok := info.(config.PrefixConfigMap)
if !ok {
log.Errorc(ctx, "gossiped info is not a prefix configuration map: %+v", info)
return
}
s.maybeSplitRangesByConfigs(configMap)
// If the zone configs changed, run through ranges and set max bytes.
if key == gossip.KeyConfigZone {
s.setRangesMaxBytes(configMap)
}
}
// GossipCapacity broadcasts the node's capacity on the gossip network.
func (s *Store) GossipCapacity() {
storeDesc, err := s.Descriptor()
ctx := s.Context(nil)
if err != nil {
log.Warningc(ctx, "problem getting store descriptor for store %+v: %v", s.Ident, err)
return
}
// Unique gossip key per store.
keyMaxCapacity := gossip.MakeCapacityKey(storeDesc.Node.NodeID, storeDesc.StoreID)
// Gossip store descriptor.
err = s.ctx.Gossip.AddInfo(keyMaxCapacity, *storeDesc, ttlCapacityGossip)
if err != nil {
log.Warningc(ctx, "%s", err)
}
}
// maybeSplitRangesByConfigs determines ranges which should be
// split by the boundaries of the prefix config map, if any, and
// adds them to the split queue.
func (s *Store) maybeSplitRangesByConfigs(configMap config.PrefixConfigMap) {
s.mu.Lock()
defer s.mu.Unlock()
for _, config := range configMap {
// Find the range which contains this config prefix, if any.
var rng *Replica
s.replicasByKey.AscendGreaterOrEqual((rangeBTreeKey)(config.Prefix.Next()), func(i btree.Item) bool {
rng = i.(*Replica)
return false
})
// If the config doesn't split the range, continue.
if rng == nil || !rng.Desc().ContainsKey(config.Prefix) {
continue
}
s.splitQueue().MaybeAdd(rng, s.ctx.Clock.Now())
}
}
// DisableRangeGCQueue disables or enables the range GC queue.
// Exposed only for testing.
func (s *Store) DisableRangeGCQueue(disabled bool) {
s.rangeGCQueue().SetDisabled(disabled)
}
// ForceReplicationScan iterates over all ranges and enqueues any that
// need to be replicated. Exposed only for testing.
func (s *Store) ForceReplicationScan(t util.Tester) {
s.mu.Lock()
defer s.mu.Unlock()
for _, r := range s.replicas {
s.replicateQueue.MaybeAdd(r, s.ctx.Clock.Now())
}
}
// ForceRangeGCScan iterates over all ranges and enqueues any that
// may need to be GC'd. Exposed only for testing.
func (s *Store) ForceRangeGCScan(t util.Tester) {
s.mu.Lock()
defer s.mu.Unlock()
for _, r := range s.replicas {
s._rangeGCQueue.MaybeAdd(r, s.ctx.Clock.Now())
}
}
// setRangesMaxBytes sets the max bytes for every range according
// to the zone configs.
//
// TODO(spencer): scanning all ranges with the lock held could cause
// perf issues if the number of ranges grows large enough.
func (s *Store) setRangesMaxBytes(zoneMap config.PrefixConfigMap) {
s.mu.Lock()
defer s.mu.Unlock()
zone := zoneMap[0].Config.(*config.ZoneConfig)
idx := 0
// Note that we must iterate through the ranges in lexicographic
// order to match the ordering of the zoneMap.
s.replicasByKey.Ascend(func(i btree.Item) bool {
rng := i.(*Replica)
if idx < len(zoneMap)-1 && !rng.Desc().StartKey.Less(zoneMap[idx+1].Prefix) {
idx++
zone = zoneMap[idx].Config.(*config.ZoneConfig)
}
rng.SetMaxBytes(zone.RangeMaxBytes)
return true
})
}
// Bootstrap writes a new store ident to the underlying engine. To
// ensure that no crufty data already exists in the engine, it scans
// the engine contents before writing the new store ident. The engine
// should be completely empty. It returns an error if called on a
// non-empty engine.
func (s *Store) Bootstrap(ident proto.StoreIdent, stopper *stop.Stopper) error {
if s.Ident.NodeID != 0 {
return util.Errorf("engine already bootstrapped")
}
if err := s.engine.Open(); err != nil {
return err
}
stopper.AddCloser(s.engine)
s.Ident = ident
kvs, err := engine.Scan(s.engine, proto.EncodedKey(proto.KeyMin), proto.EncodedKey(proto.KeyMax), 1)
if err != nil {
return util.Errorf("store %s: unable to access: %s", s.engine, err)
} else if len(kvs) > 0 {
// See if this is an already-bootstrapped store.
ok, err := engine.MVCCGetProto(s.engine, keys.StoreIdentKey(), proto.ZeroTimestamp, true, nil, &s.Ident)
if err != nil {
return util.Errorf("store %s is non-empty but cluster ID could not be determined: %s", s.engine, err)
}
if ok {
return util.Errorf("store %s already belongs to cockroach cluster %s", s.engine, s.Ident.ClusterID)
}
return util.Errorf("store %s is not-empty and has invalid contents (first key: %q)", s.engine, kvs[0].Key)
}
err = engine.MVCCPutProto(s.engine, nil, keys.StoreIdentKey(), proto.ZeroTimestamp, nil, &s.Ident)
return err
}
// GetReplica fetches a replica by Range ID. Returns an error if no replica is found.
func (s *Store) GetReplica(rangeID proto.RangeID) (*Replica, error) {
s.mu.RLock()
defer s.mu.RUnlock()
if rng, ok := s.replicas[rangeID]; ok {
return rng, nil
}
return nil, proto.NewRangeNotFoundError(rangeID)
}
// LookupReplica looks up a replica via binary search over the
// "replicasByKey" btree. Returns nil if no replica is found for
// specified key range. Note that the specified keys are transformed
// using Key.Address() to ensure we lookup replicas correctly for local
// keys. When end is nil, a replica that contains start is looked up.
func (s *Store) LookupReplica(start, end proto.Key) *Replica {
s.mu.RLock()
defer s.mu.RUnlock()
startAddr := keys.KeyAddress(start)
endAddr := keys.KeyAddress(end)
var rng *Replica
s.replicasByKey.AscendGreaterOrEqual((rangeBTreeKey)(startAddr.Next()), func(i btree.Item) bool {
rng = i.(*Replica)
return false
})
if rng == nil || !rng.Desc().ContainsKeyRange(startAddr, endAddr) {
return nil
}
return rng
}
// RaftStatus returns the current raft status of the given range.
func (s *Store) RaftStatus(rangeID proto.RangeID) *raft.Status {
return s.multiraft.Status(rangeID)
}
// BootstrapRange creates the first range in the cluster and manually
// writes it to the store. Default range addressing records are
// created for meta1 and meta2. Default configurations for accounting,
// permissions, users, and zones are created. All configs are specified
// for the empty key prefix, meaning they apply to the entire
// database. Permissions are granted to all users and the zone
// requires three replicas with no other specifications. It also adds
// the range tree and the root node, the first range, to it.
func (s *Store) BootstrapRange() error {
desc := &proto.RangeDescriptor{
RangeID: 1,
StartKey: proto.KeyMin,
EndKey: proto.KeyMax,
NextReplicaID: 2,
Replicas: []proto.Replica{
{
NodeID: 1,
StoreID: 1,
ReplicaID: 1,
},
},
}
if err := desc.Validate(); err != nil {
return err
}
batch := s.engine.NewBatch()
ms := &engine.MVCCStats{}
now := s.ctx.Clock.Now()
// Range descriptor.
if err := engine.MVCCPutProto(batch, ms, keys.RangeDescriptorKey(desc.StartKey), now, nil, desc); err != nil {
return err
}
// GC Metadata.
gcMeta := proto.NewGCMetadata(now.WallTime)
if err := engine.MVCCPutProto(batch, ms, keys.RangeGCMetadataKey(desc.RangeID), proto.ZeroTimestamp, nil, gcMeta); err != nil {
return err
}
// Verification timestamp.
if err := engine.MVCCPutProto(batch, ms, keys.RangeLastVerificationTimestampKey(desc.RangeID), proto.ZeroTimestamp, nil, &now); err != nil {
return err
}
// Range addressing for meta2.
meta2Key := keys.RangeMetaKey(proto.KeyMax)
if err := engine.MVCCPutProto(batch, ms, meta2Key, now, nil, desc); err != nil {
return err
}
// Range addressing for meta1.
meta1Key := keys.RangeMetaKey(meta2Key)
if err := engine.MVCCPutProto(batch, ms, meta1Key, now, nil, desc); err != nil {
return err
}
// Accounting config.
acctConfig := &config.AcctConfig{}
key := keys.MakeKey(keys.ConfigAccountingPrefix, proto.KeyMin)
if err := engine.MVCCPutProto(batch, ms, key, now, nil, acctConfig); err != nil {
return err
}
// Permission config.
permConfig := &config.PermConfig{
Read: []string{security.RootUser}, // root user
Write: []string{security.RootUser}, // root user
}
key = keys.MakeKey(keys.ConfigPermissionPrefix, proto.KeyMin)
if err := engine.MVCCPutProto(batch, ms, key, now, nil, permConfig); err != nil {
return err
}
// User config.
// TODO(marc): instead of a root entry, maybe we should have a default "node".
userConfig := &config.UserConfig{}
key = keys.MakeKey(keys.ConfigUserPrefix, proto.KeyMin)
if err := engine.MVCCPutProto(batch, ms, key, now, nil, userConfig); err != nil {
return err
}
// Zone config.
zoneConfig := &config.ZoneConfig{
ReplicaAttrs: []proto.Attributes{
{},
{},
{},
},
RangeMinBytes: 1048576,
RangeMaxBytes: 67108864,
GC: &config.GCPolicy{
TTLSeconds: 24 * 60 * 60, // 1 day
},
}
key = keys.MakeKey(keys.ConfigZonePrefix, proto.KeyMin)
if err := engine.MVCCPutProto(batch, ms, key, now, nil, zoneConfig); err != nil {
return err
}
// We reserve the first 1000 descriptor IDs.
key = keys.DescIDGenerator
value := proto.Value{}
value.SetInteger(int64(structured.MaxReservedDescID + 1))
value.InitChecksum(key)
if err := engine.MVCCPut(batch, nil, key, now, value, nil); err != nil {
return err
}
// Range Tree setup.
if err := SetupRangeTree(batch, ms, now, desc.StartKey); err != nil {
return err
}
if err := engine.MVCCSetRangeStats(batch, 1, ms); err != nil {
return err
}
if err := batch.Commit(); err != nil {
return err
}
return nil
}
// The following methods implement the RangeManager interface.
// ClusterID accessor.
func (s *Store) ClusterID() string { return s.Ident.ClusterID }
// StoreID accessor.
func (s *Store) StoreID() proto.StoreID { return s.Ident.StoreID }
// RaftNodeID accessor.
func (s *Store) RaftNodeID() proto.RaftNodeID {
return proto.MakeRaftNodeID(s.Ident.NodeID, s.Ident.StoreID)
}
// Clock accessor.
func (s *Store) Clock() *hlc.Clock { return s.ctx.Clock }
// Engine accessor.
func (s *Store) Engine() engine.Engine { return s.engine }
// DB accessor.
func (s *Store) DB() *client.DB { return s.ctx.DB }
// Allocator accessor.
func (s *Store) allocator() *allocator { return s._allocator }
// Gossip accessor.
func (s *Store) Gossip() *gossip.Gossip { return s.ctx.Gossip }
// splitQueue accessor.
func (s *Store) splitQueue() *splitQueue { return s._splitQueue }
// rangeGCQueue accessor.
func (s *Store) rangeGCQueue() *rangeGCQueue { return s._rangeGCQueue }
// Stopper accessor.
func (s *Store) Stopper() *stop.Stopper { return s.stopper }
// EventFeed accessor.
func (s *Store) EventFeed() StoreEventFeed { return s.feed }
// Tracer accessor.
func (s *Store) Tracer() *tracer.Tracer { return s.ctx.Tracer }
// NewRangeDescriptor creates a new descriptor based on start and end
// keys and the supplied proto.Replicas slice. It allocates new
// replica IDs to fill out the supplied replicas.
func (s *Store) NewRangeDescriptor(start, end proto.Key, replicas []proto.Replica) (*proto.RangeDescriptor, error) {