-
Notifications
You must be signed in to change notification settings - Fork 3
/
server.go
1787 lines (1603 loc) · 70.6 KB
/
server.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Copyright 2015 The etcd Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package etcdserver
import (
"context"
"encoding/json"
"expvar"
"fmt"
"math"
"math/rand"
"net/http"
"os"
"path"
"regexp"
"sync"
"sync/atomic"
"time"
"github.com/coreos/etcd/alarm"
"github.com/coreos/etcd/auth"
"github.com/coreos/etcd/compactor"
"github.com/coreos/etcd/discovery"
"github.com/coreos/etcd/etcdserver/api"
"github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
"github.com/coreos/etcd/etcdserver/membership"
"github.com/coreos/etcd/etcdserver/stats"
"github.com/coreos/etcd/lease"
"github.com/coreos/etcd/lease/leasehttp"
"github.com/coreos/etcd/mvcc"
"github.com/coreos/etcd/mvcc/backend"
"github.com/coreos/etcd/pkg/fileutil"
"github.com/coreos/etcd/pkg/idutil"
"github.com/coreos/etcd/pkg/pbutil"
"github.com/coreos/etcd/pkg/runtime"
"github.com/coreos/etcd/pkg/schedule"
"github.com/coreos/etcd/pkg/types"
"github.com/coreos/etcd/pkg/wait"
"github.com/coreos/etcd/raft"
"github.com/coreos/etcd/raft/raftpb"
"github.com/coreos/etcd/rafthttp"
"github.com/coreos/etcd/snap"
"github.com/coreos/etcd/store"
"github.com/coreos/etcd/version"
"github.com/coreos/etcd/wal"
"github.com/coreos/go-semver/semver"
"github.com/coreos/pkg/capnslog"
"github.com/prometheus/client_golang/prometheus"
)
const (
DefaultSnapCount = 100000
StoreClusterPrefix = "/0"
StoreKeysPrefix = "/1"
// HealthInterval is the minimum time the cluster should be healthy
// before accepting add member requests.
HealthInterval = 5 * time.Second
purgeFileInterval = 30 * time.Second
// monitorVersionInterval should be smaller than the timeout
// on the connection. Or we will not be able to reuse the connection
// (since it will timeout).
monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second
// max number of in-flight snapshot messages etcdserver allows to have
// This number is more than enough for most clusters with 5 machines.
maxInFlightMsgSnap = 16
releaseDelayAfterSnapshot = 30 * time.Second
// maxPendingRevokes is the maximum number of outstanding expired lease revocations.
maxPendingRevokes = 16
recommendedMaxRequestBytes = 10 * 1024 * 1024
)
var (
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver")
storeMemberAttributeRegexp = regexp.MustCompile(path.Join(membership.StoreMembersPrefix, "[[:xdigit:]]{1,16}", "attributes"))
)
func init() {
rand.Seed(time.Now().UnixNano())
expvar.Publish(
"file_descriptor_limit",
expvar.Func(
func() interface{} {
n, _ := runtime.FDLimit()
return n
},
),
)
}
type Response struct {
Term uint64
Index uint64
Event *store.Event
Watcher store.Watcher
Err error
}
type ServerV2 interface {
Server
// Do takes a V2 request and attempts to fulfill it, returning a Response.
Do(ctx context.Context, r pb.Request) (Response, error)
stats.Stats
ClientCertAuthEnabled() bool
}
type ServerV3 interface {
Server
ID() types.ID
RaftTimer
}
func (s *EtcdServer) ClientCertAuthEnabled() bool { return s.Cfg.ClientCertAuthEnabled }
//该结构体是etcd服务端的核心接口,其中定义了etcd服务端的主要功能
type Server interface {
// Leader returns the ID of the leader Server. 获取当前集群中的Leader的ID
Leader() types.ID
// AddMember attempts to add a member into the cluster. It will return
// ErrIDRemoved if member ID is removed from the cluster, or return
// ErrIDExists if member ID exists in the cluster.
AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) //向当前etcd集群中添加一个节点
// RemoveMember attempts to remove a member from the cluster. It will
// return ErrIDRemoved if member ID is removed from the cluster, or return
// ErrIDNotFound if member ID is not in the cluster.
RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) //从当前etcd集群中删除一个节点
// UpdateMember attempts to update an existing member in the cluster. It will 修改集群成员属性,如果成员ID不存在则返回错误
// return ErrIDNotFound if the member ID does not exist.
UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error)
// ClusterVersion is the cluster-wide minimum major.minor version.
// Cluster version is set to the min version that an etcd member is
// compatible with when first bootstrap.
//
// ClusterVersion is nil until the cluster is bootstrapped (has a quorum).
//
// During a rolling upgrades, the ClusterVersion will be updated
// automatically after a sync. (5 second by default)
//
// The API/raft component can utilize ClusterVersion to determine if
// it can accept a client request or a raft RPC.
// NOTE: ClusterVersion might be nil when etcd 2.1 works with etcd 2.0 and
// the leader is etcd 2.0. etcd 2.0 leader will not update clusterVersion since
// this feature is introduced post 2.0.
ClusterVersion() *semver.Version
Cluster() api.Cluster
Alarms() []*pb.AlarmMember
}
// EtcdServer is the production implementation of the Server interface
type EtcdServer struct {
// inflightSnapshots holds count the number of snapshots currently inflight.
inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned. 当前已发送出去但未收到响应的快照个数
appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned. 当前节点已应用的Entry记录的最大索引值
committedIndex uint64 // must use atomic operations to access; keep 64-bit aligned. 当前已提交的Entry记录的索引值
// consistIndex used to hold the offset of current executing entry
// It is initialized to 0 before executing any entry.
consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned.
r raftNode // uses 64-bit atomics; keep 64-bit aligned. EtcdServer实例与底层etcd-raft模块通信的桥梁
//当前节点将自身的信息推送到集群中其他节点之后,会将该通过关闭,也作为EtcdServer实例,可以对外提供服务的一个信号
readych chan struct{}
Cfg ServerConfig //封装了配置信息
w wait.Wait //Wait主要负责协调多个后台goroutine之间的执行。
readMu sync.RWMutex
// read routine notifies etcd server that it waits for reading by sending an empty struct to
// readwaitC
readwaitc chan struct{}
// readNotifier is used to notify the read routine that it can process the request
// when there is no error
readNotifier *notifier
//EtcdServer.start()方法启动会启动多个后台goroutine,其中一个后台goroutine执行EtcdServer.run()方法,监听stop通道。在EtcdServer.Stop()方法中会将stop通道关闭,
//触发该run goroutine的结束。在run goroutine结束之前还会关闭stopping和done通道,从而触发其他后台goroutine的关闭。
// stop signals the run goroutine should shutdown.
stop chan struct{}
// stopping is closed by run goroutine on shutdown.
stopping chan struct{}
// done is closed when all goroutines from start() complete.
done chan struct{}
errorc chan error
id types.ID //记录当前节点的ID
attributes membership.Attributes //记录当前节点的名称及接收集群中其他节点请求的URL地址
cluster *membership.RaftCluster //记录当前集群中全部节点的信息
store store.Store //etcd v2版本存储
snapshotter *snap.Snapshotter //用来读写快照文件
applyV2 ApplierV2 //applierv2接口是应用V2版本的Entry记录,其底层封装了V2存储
// applyV3 is the applier with auth and quotas applierV3接口主要是应用V3版本的Entry记录,其底层封装V3存储
applyV3 applierV3
// applyV3Base is the core applier without auth or quotas
applyV3Base applierV3
applyWait wait.WaitTime
kv mvcc.ConsistentWatchableKV //etcd v3版本的存储
lessor lease.Lessor
bemu sync.Mutex
be backend.Backend //etcd V3版本的后端存储
authStore auth.AuthStore //用于记录权限控制相关的信息
alarmStore *alarm.AlarmStore //用于记录报警相关的信息
stats *stats.ServerStats
lstats *stats.LeaderStats
SyncTicker *time.Ticker //用来控制Leader节点定期发送SYNC消息的频率
// compactor is used to auto-compact the KV.
compactor compactor.Compactor //Leader节点会对存储进行定期压缩,该字段用于控制定期压缩的频率
// peerRt used to send requests (version, lease) to peers.
peerRt http.RoundTripper
reqIDGen *idutil.Generator //用于生成请求的唯一标识
// forceVersionC is used to force the version monitor loop
// to detect the cluster version immediately.
forceVersionC chan struct{}
// wgMu blocks concurrent waitgroup mutation while server stopping
wgMu sync.RWMutex
// wg is used to wait for the go routines that depends on the server state 在EtcdServer.Stop()方法中会通过该字段等待所有的后台goroutine全部退出
// to exit when stopping the server.
wg sync.WaitGroup
// ctx is used for etcd-initiated requests that may need to be canceled
// on etcd server shutdown.
ctx context.Context
cancel context.CancelFunc
leadTimeMu sync.RWMutex
leadElectedTime time.Time //记录当前节点最后一次转换成Leader状态的时间戳
}
// 该函数会完成EtcdServer的初始化,也是etcd服务端生命周期的起始。其初始化的大致流程如下:(1)定义初始化过程中使用的变量,创建当前节点使用的目录;(2)根据配置
// 项初始化etcd-raft模块使用到的相关组件,例如,检测当前wal目录下是否存在WAL日志文件、初始化V2存储、查找BoltDB数据库文件、创建Backend实例、创建RoundTripper
// 实例等。(3)根据前面对WAL日志文件的查找结果及当期节点启动时的配置信息,初始化etcd-raft模块中的Node实例。(4)创建EtcdServer实例,并初始化其各个字段;
// NewServer creates a new EtcdServer from the supplied configuration. The
// configuration is considered static for the lifetime of the EtcdServer.
func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
st := store.New(StoreClusterPrefix, StoreKeysPrefix)
var (
w *wal.WAL //用于管理WAL日志文件的WAL实例
n raft.Node //etcd-raft模块中的Node实例
s *raft.MemoryStorage //MemoryStorage实例
id types.ID //记录当前节点的ID
cl *membership.RaftCluster //当前集群中的所有成员的信息
)
if cfg.MaxRequestBytes > recommendedMaxRequestBytes {
plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes)
}
//每个etcd节点都有会将其数据保存到“节点名称.etcd/member”目录下。如果在下面没有特殊说明,则提到的目录都是该目录下的子目录。这里会检测该目录是否存在,
//如果不存在就创建该目录。
if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil { //确定snap目录是否存在,该目录是用来存放快照文件的。
return nil, fmt.Errorf("cannot access data directory: %v", terr)
}
haveWAL := wal.Exist(cfg.WALDir()) //检测wal目录下是否存在WAL日志文件
//创建V2版本存储
if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
plog.Fatalf("create snapshot directory error: %v", err)
}
ss := snap.New(cfg.SnapDir()) //创建Snapshotter实例,用来读写snap目录下的快照文件
bepath := cfg.backendPath() //获取BoltDB数据库存放的路径
beExist := fileutil.Exist(bepath) //检测BoltDB数据库文件是否存在
be := openBackend(cfg) //创建Backend实例,其中会单独启动一个后台goroutine来创建Backend实例
defer func() {
if err != nil {
be.Close()
}
}()
//根据配置创建RoundTripper实例,它主要负责实现网络请求等功能。
prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout())
if err != nil {
return nil, err
}
var (
remotes []*membership.Member
snapshot *raftpb.Snapshot
)
switch {
//场景1:即当前节点的wal目录不存在WAL日志文件,当前节点正在加入一个正在运行的集群
case !haveWAL && !cfg.NewCluster: //没有WAL日志文件且当前节点正在加入一个正在运行的集群
//对配置的合法性进行检测,其中涉及配置信息中是否包含当前节点的相关信息,以及集群各个节点暴露的URL地址是否重复等。
if err = cfg.VerifyJoinExisting(); err != nil {
return nil, err
}
//根据配置信息,创建RaftCluster实例和其中的Member实例
cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
if err != nil {
return nil, err
}
//GetClusterFromRemotePeers函数会过滤当前节点的信息,然后排序集群中其他节点暴露的URL地址并返回GetClusterFromRemotePeers从集群中其他节点请求集群信息
//并创建相应的RaftCluster实例,然后将其与getRemotePeerURLs()返回值比较
existingCluster, gerr := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), prt)
if gerr != nil {
return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr)
}
if err = membership.ValidateClusterAndAssignIDs(cl, existingCluster); err != nil {
return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
}
if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, prt) {
return nil, fmt.Errorf("incompatible with current running cluster")
}
remotes = existingCluster.Members() //检测正在运行的集群与当前节点的版本,保证其版本相互兼容
cl.SetID(existingCluster.ID()) //更新本地RaftCluster实例中的集群ID
cl.SetStore(st) //设置本地RaftCluster实例中的store字段
cl.SetBackend(be) //设置本地RaftCluster实例中的be字段,同时会在BoltDB中初始化后续用到的Bucket("member","members_removed","cluster")
cfg.Print()
id, n, s, w = startNode(cfg, cl, nil) //调用startNode()函数,初始化raft.Node实例及相关组件
//场景2:当前节点的wal目录下不存在WAL日志文件且当前集群是新建的。
case !haveWAL && cfg.NewCluster: //没有WAL日志文件且当前集群是新建的
if err = cfg.VerifyBootstrap(); err != nil { //对当前节点启动使用的配置进行一系列检测
return nil, err
}
cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap) //根据配置信息创建本地RaftCluster实例
if err != nil {
return nil, err
}
m := cl.MemberByName(cfg.Name) //根据当前节点的名称,从RaftCluster中查找当前节点对应的Member实例
//从集群中其他节点获取当前集群的信息,检测是否有同名的节点已经启动了
if isMemberBootstrapped(cl, cfg.Name, prt, cfg.bootstrapTimeout()) {
return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
}
if cfg.ShouldDiscover() { //根据当前的配置检测是否需要使用Discover模式启动
var str string
str, err = discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
if err != nil {
return nil, &DiscoveryError{Op: "join", Err: err}
}
var urlsmap types.URLsMap
urlsmap, err = types.NewURLsMap(str)
if err != nil {
return nil, err
}
if checkDuplicateURL(urlsmap) {
return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
}
if cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, urlsmap); err != nil {
return nil, err
}
}
cl.SetStore(st) //设置本地RaftCluster实例中的store字段
cl.SetBackend(be) //设置本地RaftCluster实例中的be字段,同时会在BoltDB中初始化后续使用的Bucket
cfg.PrintWithInitial()
id, n, s, w = startNode(cfg, cl, cl.MemberIDs()) //调用startNode函数
//场景3:wal目录下存在的WAL日志文件的场景
case haveWAL: //存在WAL日志文件
if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil { //检测member和wal文件夹都是可写的
return nil, fmt.Errorf("cannot write to member directory: %v", err)
}
if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
}
if cfg.ShouldDiscover() {
plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
}
snapshot, err = ss.Load() //加载快照数据
if err != nil && err != snap.ErrNoSnapshot {
return nil, err
}
if snapshot != nil { //根据加载的快照数据,对V2存储和V3存储进行恢复
if err = st.Recovery(snapshot.Data); err != nil { //使用快照数据恢复V2存储
plog.Panicf("recovered store from snapshot error: %v", err)
}
plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index)
if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil { //使用快照数据恢复V3存储
plog.Panicf("recovering backend from snapshot error: %v", err)
}
}
cfg.Print()
if !cfg.ForceNewCluster {
id, cl, n, s, w = restartNode(cfg, snapshot) //重启raft.Node节点
} else { //启动单节点的方式
id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot)
}
cl.SetStore(st) //设置本地RaftCluster实例的相关字段
cl.SetBackend(be)
cl.Recover(api.UpdateCapability) //从V2存储中,恢复集群中其他节点的信息,其中还会检测当前服务端的版本与WAL日志文件以及快照数据的版本直接的兼容性
if cl.Version() != nil && !cl.Version().LessThan(semver.Version{Major: 3}) && !beExist {
os.RemoveAll(bepath)
return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath)
}
//该场景主要是通过快照数据恢复当前节点的V2和V3存储,然后恢复etcd-raft模块中的Node实例。其中V2存储就是假装快照文件中的JSON数据。V3存储的恢复是在
//recoverSnapshotBackend()函数实现的。
default: //不支持其他场景
return nil, fmt.Errorf("unsupported bootstrap config")
}
if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
return nil, fmt.Errorf("cannot access member directory: %v", terr)
}
sstats := stats.NewServerStats(cfg.Name, id.String())
lstats := stats.NewLeaderStats(id.String())
heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
srv = &EtcdServer{ //创建EtcdServer实例
readych: make(chan struct{}),
Cfg: cfg,
errorc: make(chan error, 1),
store: st,
snapshotter: ss,
r: *newRaftNode( //使用前面创建的各个组件创建etcdserver.raftNode实例
raftNodeConfig{
isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
Node: n,
heartbeat: heartbeat,
raftStorage: s,
storage: NewStorage(w, ss),
},
),
id: id,
attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
cluster: cl,
stats: sstats,
lstats: lstats,
SyncTicker: time.NewTicker(500 * time.Millisecond),
peerRt: prt,
reqIDGen: idutil.NewGenerator(uint16(id), time.Now()),
forceVersionC: make(chan struct{}),
}
serverID.With(prometheus.Labels{"server_id": id.String()}).Set(1)
srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster} //初始化EtcdServer.applyV2字段
srv.be = be //初始化EtcdServer.be字段
minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat
// always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. 因为在store.restore()方法中除了恢复内存索引,还会重新绑定键值对与
// If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. 对应的Lease,所以需要先恢复EtcdServer.lessor,再恢复EtcdServer.kv字段。
srv.lessor = lease.NewLessor(srv.be, int64(math.Ceil(minTTL.Seconds())))
srv.kv = mvcc.New(srv.be, srv.lessor, &srv.consistIndex)
if beExist {
kvindex := srv.kv.ConsistentIndex()
// TODO: remove kvindex != 0 checking when we do not expect users to upgrade
// etcd from pre-3.0 release.
if snapshot != nil && kvindex < snapshot.Metadata.Index {
if kvindex != 0 {
return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d).", bepath, kvindex, snapshot.Metadata.Index)
}
plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index)
}
}
newSrv := srv // since srv == nil in defer if srv is returned as nil
defer func() {
// closing backend without first closing kv can cause
// resumed compactions to fail with closed tx errors
if err != nil {
newSrv.kv.Close()
}
}()
srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex()) //根据ConsistentIndex检测当前的Backend实例是否可用
tp, err := auth.NewTokenProvider(cfg.AuthToken,
func(index uint64) <-chan struct{} {
return srv.applyWait.Wait(index)
},
)
if err != nil {
plog.Errorf("failed to create token provider: %s", err)
return nil, err
}
srv.authStore = auth.NewAuthStore(srv.be, tp) //初始化EtcdServer.authStore字段
if num := cfg.AutoCompactionRetention; num != 0 { //启动后台goroutine,进行自动压缩
srv.compactor, err = compactor.New(cfg.AutoCompactionMode, num, srv.kv, srv)
if err != nil {
return nil, err
}
srv.compactor.Run()
}
srv.applyV3Base = srv.newApplierV3Backend() //初始化applyV3Base字段
if err = srv.restoreAlarms(); err != nil { //初始化alarmStore字段以及applyV3字段
return nil, err
}
// TODO: move transport initialization near the definition of remote
tr := &rafthttp.Transport{ //创建rafthttp.Transport实例
TLSInfo: cfg.PeerTLSInfo,
DialTimeout: cfg.peerDialTimeout(),
ID: id,
URLs: cfg.PeerURLs,
ClusterID: cl.ID(),
Raft: srv,
Snapshotter: ss,
ServerStats: sstats,
LeaderStats: lstats,
ErrorC: srv.errorc,
}
if err = tr.Start(); err != nil { //启动rafthttp.Transport实例
return nil, err
}
// add all remotes into transport 向rafthttp.Transport实例中添加集群中各个节点对应的Peer实例和Remote实例
for _, m := range remotes {
if m.ID != id {
tr.AddRemote(m.ID, m.PeerURLs)
}
}
for _, m := range cl.Members() {
if m.ID != id {
tr.AddPeer(m.ID, m.PeerURLs)
}
}
srv.r.transport = tr //设置raft.Node.transport字段
return srv, nil
}
func (s *EtcdServer) adjustTicks() {
clusterN := len(s.cluster.Members())
// single-node fresh start, or single-node recovers from snapshot
if clusterN == 1 {
ticks := s.Cfg.ElectionTicks - 1
plog.Infof("%s as single-node; fast-forwarding %d ticks (election ticks %d)", s.ID(), ticks, s.Cfg.ElectionTicks)
s.r.advanceTicks(ticks)
return
}
if !s.Cfg.InitialElectionTickAdvance {
plog.Infof("skipping initial election tick advance (election tick %d)", s.Cfg.ElectionTicks)
return
}
// retry up to "rafthttp.ConnReadTimeout", which is 5-sec
// until peer connection reports; otherwise:
// 1. all connections failed, or
// 2. no active peers, or
// 3. restarted single-node with no snapshot
// then, do nothing, because advancing ticks would have no effect
waitTime := rafthttp.ConnReadTimeout
itv := 50 * time.Millisecond
for i := int64(0); i < int64(waitTime/itv); i++ {
select {
case <-time.After(itv):
case <-s.stopping:
return
}
peerN := s.r.transport.ActivePeers()
if peerN > 1 {
// multi-node received peer connection reports
// adjust ticks, in case slow leader message receive
ticks := s.Cfg.ElectionTicks - 2
plog.Infof("%s initialzed peer connection; fast-forwarding %d ticks (election ticks %d) with %d active peer(s)", s.ID(), ticks, s.Cfg.ElectionTicks, peerN)
s.r.advanceTicks(ticks)
return
}
}
}
// Start performs any initialization of the Server necessary for it to
// begin serving requests. It must be called before Do or Process.
// Start must be non-blocking; any long-running server functionality
// should be implemented in goroutines.
func (s *EtcdServer) Start() { //启动当前节点
s.start() //其中会启动一个后台goroutine,执行EtcdServer.run()方法
s.goAttach(func() { s.adjustTicks() })
s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) }) //启动一个后台goroutine,将当前节点的相关信息发送到集群其他节点
s.goAttach(s.purgeFile) //启动一个后台goroutine,定义清理WAL日志文件和快照文件
s.goAttach(func() { monitorFileDescriptor(s.stopping) }) //启动一个后台goroutine,实现一些监控相关功能
s.goAttach(s.monitorVersions) //启动一个后台goroutine监控集群中其他节点的版本信息,主要是在版本升级的时候使用
s.goAttach(s.linearizableReadLoop) //启动一个后台goroutine,用来实现Linearizable Read的功能
s.goAttach(s.monitorKVHash)
}
// start prepares and starts server in a new goroutine. It is no longer safe to 该方法会初始化EtcdServer实例中剩余的未初始化字段,然后启动后台goroutine来
// modify a server's fields after it has been sent to Start. 执行EtcdServer.run()方法。run()方法是EtcdServer启动的核心,其中会启动前面
// This function is just used for testing. EtcdServer.raftNode实例,然后出了etcd-raft模块返回的Ready实例。
func (s *EtcdServer) start() {
if s.Cfg.SnapCount == 0 {
plog.Infof("set snapshot count to default %d", DefaultSnapCount)
s.Cfg.SnapCount = DefaultSnapCount
}
s.w = wait.New()
s.applyWait = wait.NewTimeList()
s.done = make(chan struct{})
s.stop = make(chan struct{})
s.stopping = make(chan struct{})
s.ctx, s.cancel = context.WithCancel(context.Background())
s.readwaitc = make(chan struct{}, 1)
s.readNotifier = newNotifier()
if s.ClusterVersion() != nil {
plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String()))
} else {
plog.Infof("starting server... [version: %v, cluster version: to_be_decided]", version.Version)
}
// TODO: if this is an empty log, writes all peer infos
// into the first entry
go s.run()
}
//该方法会启动两个后台goroutine,其中一个后台goroutine负责定期清理WAL日志文件,另一个后台goroutine负责定期清理快照文件。
func (s *EtcdServer) purgeFile() {
var dberrc, serrc, werrc <-chan error
if s.Cfg.MaxSnapFiles > 0 {
dberrc = fileutil.PurgeFile(s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done)
//这里会启动一个后台goroutine,定期清理快照文件(默认purgeFileInterval的值为30s)
serrc = fileutil.PurgeFile(s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.done)
}
if s.Cfg.MaxWALFiles > 0 {
//启动一个后台goroutine,定期清理WAL日志文件(默认purgeFileInterval的值为30s)
werrc = fileutil.PurgeFile(s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.done)
}
select {
case e := <-dberrc:
plog.Fatalf("failed to purge snap db file %v", e)
case e := <-serrc:
plog.Fatalf("failed to purge snap file %v", e)
case e := <-werrc:
plog.Fatalf("failed to purge wal file %v", e)
case <-s.stopping:
return
}
}
func (s *EtcdServer) ID() types.ID { return s.id }
func (s *EtcdServer) Cluster() api.Cluster { return s.cluster }
func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) }
type ServerPeer interface {
ServerV2
RaftHandler() http.Handler
LeaseHandler() http.Handler
}
func (s *EtcdServer) LeaseHandler() http.Handler {
if s.lessor == nil {
return nil
}
return leasehttp.NewHandler(s.lessor, s.ApplyWait)
}
func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() }
// Process takes a raft message and applies it to the server's raft state
// machine, respecting any timeout of the given context.
func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
if s.cluster.IsIDRemoved(types.ID(m.From)) {
plog.Warningf("reject message from removed member %s", types.ID(m.From).String())
return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member")
}
if m.Type == raftpb.MsgApp {
s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size())
}
return s.r.Step(ctx, m)
}
func (s *EtcdServer) IsIDRemoved(id uint64) bool { return s.cluster.IsIDRemoved(types.ID(id)) }
func (s *EtcdServer) ReportUnreachable(id uint64) { s.r.ReportUnreachable(id) }
// ReportSnapshot reports snapshot sent status to the raft state machine,
// and clears the used snapshot from the snapshot store.
func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) {
s.r.ReportSnapshot(id, status)
}
type etcdProgress struct {
confState raftpb.ConfState
snapi uint64
appliedt uint64
appliedi uint64
}
// 该结构体功能:在结构体EtcdServer中记录了当前节点的状态信息,例如,当前是否是Leader节点、Entry记录的提交位置等。
// raftReadyHandler contains a set of EtcdServer operations to be called by raftNode,
// and helps decouple state machine logic from Raft algorithms.
// TODO: add a state machine interface to apply the commit entries and do snapshot/recover
type raftReadyHandler struct {
updateLeadership func(newLeader bool)
updateCommittedIndex func(uint64)
}
func (s *EtcdServer) run() {
sn, err := s.r.raftStorage.Snapshot()
if err != nil {
plog.Panicf("get snapshot from raft storage error: %v", err)
}
// asynchronously accept apply packets, dispatch progress in-order
sched := schedule.NewFIFOScheduler() //FIFO调度器
var (
smu sync.RWMutex
syncC <-chan time.Time
)
setSyncC := func(ch <-chan time.Time) { //setSyncC()和getSyncC()方法是用来设置发送SYNC消息的定时器
smu.Lock()
syncC = ch
smu.Unlock()
}
getSyncC := func() (ch <-chan time.Time) {
smu.RLock()
ch = syncC
smu.RUnlock()
return
}
//raftNode在处理etcd-raft模块返回的Ready.SoftState字段时,会调用raftReadyHandler.updateLeadership()回调函数,其中会根据当前节点的状态和Leader节点是否
//发生变化完成一些相应的操作。
rh := &raftReadyHandler{
updateLeadership: func(newLeader bool) {
if !s.isLeader() {
if s.lessor != nil {
s.lessor.Demote() //调用lessor.Demote()将节点的Lessor实例降级
}
if s.compactor != nil { //非Leader节点暂停自动压缩
s.compactor.Pause()
}
setSyncC(nil) //非Leader节点不会发送SYNC消息,将该定时器设置为nil
} else {
if newLeader {
t := time.Now()
s.leadTimeMu.Lock()
//如果发生Leader节点的切换,且当前节点成为Leader节点,则初始化leadElectedTime字段,该字段记录了当前节点最近一次成为Leader节点的时间
s.leadElectedTime = t
s.leadTimeMu.Unlock()
}
setSyncC(s.SyncTicker.C) //Leader节点会定期发送SYNC消息,恢复该定时器
if s.compactor != nil { //重启自动压缩的功能
s.compactor.Resume()
}
}
// TODO: remove the nil checking
// current test utility does not provide the stats
if s.stats != nil {
s.stats.BecomeLeader()
}
},
//在raftNode处理apply实例时会调用updateCommittedIndex()函数,该函数会根据apply实例中封装的待应用Entry记录和快照数据确定当前的committedIndex值,然后
//调用raftReadyHandler中的同名回调函数更新EtcdServer.committedIndex字段值
updateCommittedIndex: func(ci uint64) {
cci := s.getCommittedIndex()
if ci > cci {
s.setCommittedIndex(ci)
}
},
}
//启动raftNode,其中会启动后台goroutine处理etcd-raft模块返回的Ready实例
s.r.start(rh)
//记录当前快照相关的元数据信息和已应用Entry记录的位置信息
ep := etcdProgress{
confState: sn.Metadata.ConfState,
snapi: sn.Metadata.Index,
appliedt: sn.Metadata.Term,
appliedi: sn.Metadata.Index,
}
defer func() {
s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping
close(s.stopping)
s.wgMu.Unlock()
s.cancel()
sched.Stop()
// wait for gouroutines before closing raft so wal stays open
s.wg.Wait()
s.SyncTicker.Stop()
// must stop raft after scheduler-- etcdserver can leak rafthttp pipelines
// by adding a peer after raft stops the transport
s.r.stop()
// kv, lessor and backend can be nil if running without v3 enabled
// or running unit tests.
if s.lessor != nil {
s.lessor.Stop()
}
if s.kv != nil {
s.kv.Close()
}
if s.authStore != nil {
s.authStore.Close()
}
if s.be != nil {
s.be.Close()
}
if s.compactor != nil {
s.compactor.Stop()
}
close(s.done)
}()
var expiredLeaseC <-chan []*lease.Lease
if s.lessor != nil {
expiredLeaseC = s.lessor.ExpiredLeasesC()
}
for {
select {
case ap := <-s.r.apply(): //读取raftNode.applyc通道中的apply实例并进行处理
f := func(context.Context) { s.applyAll(&ep, &ap) }
sched.Schedule(f)
case leases := <-expiredLeaseC: //监听expiredLeaseC通道
s.goAttach(func() { //启动单独的goroutine
// Increases throughput of expired leases deletion process through parallelization
c := make(chan struct{}, maxPendingRevokes) //用于限流,上限是16
for _, lease := range leases { //遍历过期的Lease实例
select {
case c <- struct{}{}: //向c通道中添加一个空结构体
case <-s.stopping:
return
}
lid := lease.ID
s.goAttach(func() { //启动一个后台线程,完成指定Lease的撤销
ctx := s.authStore.WithRoot(s.ctx)
_, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: int64(lid)})
if lerr == nil {
leaseExpired.Inc()
} else {
plog.Warningf("failed to revoke %016x (%q)", lid, lerr.Error())
}
<-c
})
}
})
case err := <-s.errorc:
plog.Errorf("%s", err)
plog.Infof("the data-dir used by this member must be removed.")
return
case <-getSyncC(): //定时发送SYNC消息
if s.store.HasTTLKeys() { //如果V2存储中只有永久节点,则无须发送SYNC
s.sync(s.Cfg.ReqTimeout()) //发送SYNC消息的目的是为了清理V2存储中的过期节点
}
case <-s.stop:
return
}
}
}
//该方法首先调用applySnapshot()方法处理apply实例中记录的快照数据,然后调用applyEntries方法处理apply实例中的Entry记录,之后根据apply实例的处理结果检测是否需要
//生成新的快照文件,最后处理MsgSnap消息。
func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
s.applySnapshot(ep, apply) //调用applySnapshot方法处理apply实例中记录的快照数据
s.applyEntries(ep, apply) //调用applyEntries方法处理apply实例中的Entry记录
proposalsApplied.Set(float64(ep.appliedi))
//etcdProgress.appliedi记录了已应用Entry的索引值。这里通过调用WaitTime.Trigger()方法将id小于endProgress.appliedi的Entry对应的通道全部关闭,这样可以通知
//其他监听通道的goroutine。
s.applyWait.Trigger(ep.appliedi)
// wait for the raft routine to finish the disk writes before triggering a 当Ready处理基本完成时,会向notifyc通道中写入一个信号,通知当前goroutine去检测
// snapshot. or applied index might be greater than the last index in raft 是否需要生成快照。
// storage, since the raft routine might be slower than apply routine.
<-apply.notifyc
s.triggerSnapshot(ep) //根据当前状态决定是否触发快照的生成
select {
// snapshot requested via send()
// 在raftNode中处理Ready实例时,如果并没有直接发送MsgSnap消息,而是将其写入msgSnapC通道中,这里会读取MsgSnapC通道,并完成快照数据的发送。
case m := <-s.r.msgSnapC:
//将V2存储的快照数据和V3存储的数据合并成完整的快照数据
merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState)
s.sendMergedSnap(merged) //发送快照数据
default:
}
}
//该方法会先等待raftNode将快照数据持久化到磁盘中,之后根据快照元数据查找BoltDB数据库文件并重建Backend实例,最后根据重建后的存储更新本地RaftCluster实例。
func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
if raft.IsEmptySnap(apply.snapshot) { //检测待应用的快照数据是否为空,如果为空则直接返回
return
}
plog.Infof("applying snapshot at index %d...", ep.snapi)
defer plog.Infof("finished applying incoming snapshot at index %d", ep.snapi)
if apply.snapshot.Metadata.Index <= ep.appliedi { //如果该快照中最后一条Entry的索引值小于当前节点已应用Entry索引值,则程序异常结束
plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1",
apply.snapshot.Metadata.Index, ep.appliedi)
}
// wait for raftNode to persist snapshot onto the disk
// raftNode在将快照数据写入磁盘文件之后,会向notifc通道中写入一个空结构体作为信号,这里会阻塞等待该信号
<-apply.notifyc
//根据快照信息查找对应的BoltDB数据库文件,并创建新的Backend实例
newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot)
if err != nil {
plog.Panic(err)
}
// always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases. 因为在store.restore()方法中除了恢复内存索引,还会重新绑定
// If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers. 键值对与对应的Lease,所以需要先恢复EtcdServer.lessor,再
if s.lessor != nil { //恢复EtcdServer.kv字段
plog.Info("recovering lessor...")
s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write() })
plog.Info("finished recovering lessor")
}
plog.Info("restoring mvcc store...")
if err := s.kv.Restore(newbe); err != nil { //重置EtcdServer.consistIndex字段
plog.Panicf("restore KV error: %v", err)
}
s.consistIndex.setConsistentIndex(s.kv.ConsistentIndex())
plog.Info("finished restoring mvcc store")
// Closing old backend might block until all the txns
// on the backend are finished.
// We do not want to wait on closing the old backend.
s.bemu.Lock()
oldbe := s.be
go func() {
plog.Info("closing old backend...")
defer plog.Info("finished closing old backend")
//因为此时可能还有事务在执行,关闭旧Backend实例可能会被阻塞,所以这里启动一个后台goroutine用来关闭Backend实例
if err := oldbe.Close(); err != nil {
plog.Panicf("close backend error: %v", err)
}
}()
s.be = newbe //更新EtcdServer实例中使用的Backend实例
s.bemu.Unlock()
plog.Info("recovering alarms...")
//恢复EtcdServer中的alarmStore和authStore,它们分别对应BoltDB中的alarm Bucket和auth Bucket
if err := s.restoreAlarms(); err != nil {
plog.Panicf("restore alarms error: %v", err)
}
plog.Info("finished recovering alarms")
if s.authStore != nil {
plog.Info("recovering auth store...")
s.authStore.Recover(newbe)
plog.Info("finished recovering auth store")
}
plog.Info("recovering store v2...")
if err := s.store.Recovery(apply.snapshot.Data); err != nil { //恢复V2版本存储
plog.Panicf("recovery store error: %v", err)
}
plog.Info("finished recovering store v2")
s.cluster.SetBackend(s.be) //设置RaftCluster.backend实例
plog.Info("recovering cluster configuration...")
s.cluster.Recover(api.UpdateCapability) //恢复本地的集群信息
plog.Info("finished recovering cluster configuration")
plog.Info("removing old peers from network...")
// recover raft transport
s.r.transport.RemoveAllPeers() //清空Transport中所有Peer实例,并根据恢复后的RaftCluster实例重新添加
plog.Info("finished removing old peers from network")
plog.Info("adding peers from new cluster configuration into network...")
for _, m := range s.cluster.Members() {
if m.ID == s.ID() {
continue
}
s.r.transport.AddPeer(m.ID, m.PeerURLs)
}
plog.Info("finished adding peers from new cluster configuration into network...")
//更新etcdProgress,其中涉及已应用Entry记录的Term值、Index值和快照相关信息
ep.appliedt = apply.snapshot.Metadata.Term
ep.appliedi = apply.snapshot.Metadata.Index
ep.snapi = ep.appliedi
ep.confState = apply.snapshot.Metadata.ConfState
}
//应用完快照数据之后,run goroutine紧接着会调用EtcdServer.applyEntries方法处理待应用的Entry记录。
func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) {
if len(apply.entries) == 0 {
return
}
firsti := apply.entries[0].Index //检测是否存在待应用的Entry记录,如果为空则直接返回
if firsti > ep.appliedi+1 { //检测待应用的第一条Entry记录是否合法
plog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, ep.appliedi)