forked from etcd-io/etcd
-
Notifications
You must be signed in to change notification settings - Fork 0
/
rpc.pb.go
executable file
·5112 lines (5027 loc) · 150 KB
/
rpc.pb.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: rpcpb/rpc.proto
/*
Package rpcpb is a generated protocol buffer package.
It is generated from these files:
rpcpb/rpc.proto
It has these top-level messages:
Request
SnapshotInfo
Response
Member
Tester
Etcd
*/
package rpcpb
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Operation int32
const (
// NOT_STARTED is the agent status before etcd first start.
Operation_NOT_STARTED Operation = 0
// INITIAL_START_ETCD is only called to start etcd, the very first time.
Operation_INITIAL_START_ETCD Operation = 10
// RESTART_ETCD is sent to restart killed etcd.
Operation_RESTART_ETCD Operation = 11
// SIGTERM_ETCD pauses etcd process while keeping data directories
// and previous etcd configurations.
Operation_SIGTERM_ETCD Operation = 20
// SIGQUIT_ETCD_AND_REMOVE_DATA kills etcd process and removes all data
// directories to simulate destroying the whole machine.
Operation_SIGQUIT_ETCD_AND_REMOVE_DATA Operation = 21
// SAVE_SNAPSHOT is sent to trigger local member to download its snapshot
// onto its local disk with the specified path from tester.
Operation_SAVE_SNAPSHOT Operation = 30
// RESTORE_RESTART_FROM_SNAPSHOT is sent to trigger local member to
// restore a cluster from existing snapshot from disk, and restart
// an etcd instance from recovered data.
Operation_RESTORE_RESTART_FROM_SNAPSHOT Operation = 31
// RESTART_FROM_SNAPSHOT is sent to trigger local member to restart
// and join an existing cluster that has been recovered from a snapshot.
// Local member joins this cluster with fresh data.
Operation_RESTART_FROM_SNAPSHOT Operation = 32
// SIGQUIT_ETCD_AND_ARCHIVE_DATA is sent when consistency check failed,
// thus need to archive etcd data directories.
Operation_SIGQUIT_ETCD_AND_ARCHIVE_DATA Operation = 40
// SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT destroys etcd process,
// etcd data, and agent server.
Operation_SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT Operation = 41
// BLACKHOLE_PEER_PORT_TX_RX drops all outgoing/incoming packets from/to
// the peer port on target member's peer port.
Operation_BLACKHOLE_PEER_PORT_TX_RX Operation = 100
// UNBLACKHOLE_PEER_PORT_TX_RX removes outgoing/incoming packet dropping.
Operation_UNBLACKHOLE_PEER_PORT_TX_RX Operation = 101
// DELAY_PEER_PORT_TX_RX delays all outgoing/incoming packets from/to
// the peer port on target member's peer port.
Operation_DELAY_PEER_PORT_TX_RX Operation = 200
// UNDELAY_PEER_PORT_TX_RX removes all outgoing/incoming delays.
Operation_UNDELAY_PEER_PORT_TX_RX Operation = 201
)
var Operation_name = map[int32]string{
0: "NOT_STARTED",
10: "INITIAL_START_ETCD",
11: "RESTART_ETCD",
20: "SIGTERM_ETCD",
21: "SIGQUIT_ETCD_AND_REMOVE_DATA",
30: "SAVE_SNAPSHOT",
31: "RESTORE_RESTART_FROM_SNAPSHOT",
32: "RESTART_FROM_SNAPSHOT",
40: "SIGQUIT_ETCD_AND_ARCHIVE_DATA",
41: "SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT",
100: "BLACKHOLE_PEER_PORT_TX_RX",
101: "UNBLACKHOLE_PEER_PORT_TX_RX",
200: "DELAY_PEER_PORT_TX_RX",
201: "UNDELAY_PEER_PORT_TX_RX",
}
var Operation_value = map[string]int32{
"NOT_STARTED": 0,
"INITIAL_START_ETCD": 10,
"RESTART_ETCD": 11,
"SIGTERM_ETCD": 20,
"SIGQUIT_ETCD_AND_REMOVE_DATA": 21,
"SAVE_SNAPSHOT": 30,
"RESTORE_RESTART_FROM_SNAPSHOT": 31,
"RESTART_FROM_SNAPSHOT": 32,
"SIGQUIT_ETCD_AND_ARCHIVE_DATA": 40,
"SIGQUIT_ETCD_AND_REMOVE_DATA_AND_STOP_AGENT": 41,
"BLACKHOLE_PEER_PORT_TX_RX": 100,
"UNBLACKHOLE_PEER_PORT_TX_RX": 101,
"DELAY_PEER_PORT_TX_RX": 200,
"UNDELAY_PEER_PORT_TX_RX": 201,
}
func (x Operation) String() string {
return proto.EnumName(Operation_name, int32(x))
}
func (Operation) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} }
// Case defines various system faults or test case in distributed systems,
// in order to verify correct behavior of etcd servers and clients.
type Case int32
const (
// SIGTERM_ONE_FOLLOWER stops a randomly chosen follower (non-leader)
// but does not delete its data directories on disk for next restart.
// It waits "delay-ms" before recovering this failure.
// The expected behavior is that the follower comes back online
// and rejoins the cluster, and then each member continues to process
// client requests ('Put' request that requires Raft consensus).
Case_SIGTERM_ONE_FOLLOWER Case = 0
// SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT stops a randomly chosen
// follower but does not delete its data directories on disk for next
// restart. And waits until most up-to-date node (leader) applies the
// snapshot count of entries since the stop operation.
// The expected behavior is that the follower comes back online and
// rejoins the cluster, and then active leader sends snapshot
// to the follower to force it to follow the leader's log.
// As always, after recovery, each member must be able to process
// client requests.
Case_SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 1
// SIGTERM_LEADER stops the active leader node but does not delete its
// data directories on disk for next restart. Then it waits "delay-ms"
// before recovering this failure, in order to trigger election timeouts.
// The expected behavior is that a new leader gets elected, and the
// old leader comes back online and rejoins the cluster as a follower.
// As always, after recovery, each member must be able to process
// client requests.
Case_SIGTERM_LEADER Case = 2
// SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT stops the active leader node
// but does not delete its data directories on disk for next restart.
// And waits until most up-to-date node ("new" leader) applies the
// snapshot count of entries since the stop operation.
// The expected behavior is that cluster elects a new leader, and the
// old leader comes back online and rejoins the cluster as a follower.
// And it receives the snapshot from the new leader to overwrite its
// store. As always, after recovery, each member must be able to
// process client requests.
Case_SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 3
// SIGTERM_QUORUM stops majority number of nodes to make the whole cluster
// inoperable but does not delete data directories on stopped nodes
// for next restart. And it waits "delay-ms" before recovering failure.
// The expected behavior is that nodes come back online, thus cluster
// comes back operative as well. As always, after recovery, each member
// must be able to process client requests.
Case_SIGTERM_QUORUM Case = 4
// SIGTERM_ALL stops the whole cluster but does not delete data directories
// on disk for next restart. And it waits "delay-ms" before recovering
// this failure.
// The expected behavior is that nodes come back online, thus cluster
// comes back operative as well. As always, after recovery, each member
// must be able to process client requests.
Case_SIGTERM_ALL Case = 5
// SIGQUIT_AND_REMOVE_ONE_FOLLOWER stops a randomly chosen follower
// (non-leader), deletes its data directories on disk, and removes
// this member from cluster (membership reconfiguration). On recovery,
// tester adds a new member, and this member joins the existing cluster
// with fresh data. It waits "delay-ms" before recovering this
// failure. This simulates destroying one follower machine, where operator
// needs to add a new member from a fresh machine.
// The expected behavior is that a new member joins the existing cluster,
// and then each member continues to process client requests.
Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER Case = 10
// SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT stops a randomly
// chosen follower, deletes its data directories on disk, and removes
// this member from cluster (membership reconfiguration). On recovery,
// tester adds a new member, and this member joins the existing cluster
// restart. On member remove, cluster waits until most up-to-date node
// (leader) applies the snapshot count of entries since the stop operation.
// This simulates destroying a leader machine, where operator needs to add
// a new member from a fresh machine.
// The expected behavior is that a new member joins the existing cluster,
// and receives a snapshot from the active leader. As always, after
// recovery, each member must be able to process client requests.
Case_SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 11
// SIGQUIT_AND_REMOVE_LEADER stops the active leader node, deletes its
// data directories on disk, and removes this member from cluster.
// On recovery, tester adds a new member, and this member joins the
// existing cluster with fresh data. It waits "delay-ms" before
// recovering this failure. This simulates destroying a leader machine,
// where operator needs to add a new member from a fresh machine.
// The expected behavior is that a new member joins the existing cluster,
// and then each member continues to process client requests.
Case_SIGQUIT_AND_REMOVE_LEADER Case = 12
// SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT stops the active leader,
// deletes its data directories on disk, and removes this member from
// cluster (membership reconfiguration). On recovery, tester adds a new
// member, and this member joins the existing cluster restart. On member
// remove, cluster waits until most up-to-date node (new leader) applies
// the snapshot count of entries since the stop operation. This simulates
// destroying a leader machine, where operator needs to add a new member
// from a fresh machine.
// The expected behavior is that on member remove, cluster elects a new
// leader, and a new member joins the existing cluster and receives a
// snapshot from the newly elected leader. As always, after recovery, each
// member must be able to process client requests.
Case_SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 13
// SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH first
// stops majority number of nodes, deletes data directories on those quorum
// nodes, to make the whole cluster inoperable. Now that quorum and their
// data are totally destroyed, cluster cannot even remove unavailable nodes
// (e.g. 2 out of 3 are lost, so no leader can be elected).
// Let's assume 3-node cluster of node A, B, and C. One day, node A and B
// are destroyed and all their data are gone. The only viable solution is
// to recover from C's latest snapshot.
//
// To simulate:
// 1. Assume node C is the current leader with most up-to-date data.
// 2. Download snapshot from node C, before destroying node A and B.
// 3. Destroy node A and B, and make the whole cluster inoperable.
// 4. Now node C cannot operate either.
// 5. SIGTERM node C and remove its data directories.
// 6. Restore a new seed member from node C's latest snapshot file.
// 7. Add another member to establish 2-node cluster.
// 8. Add another member to establish 3-node cluster.
// 9. Add more if any.
//
// The expected behavior is that etcd successfully recovers from such
// disastrous situation as only 1-node survives out of 3-node cluster,
// new members joins the existing cluster, and previous data from snapshot
// are still preserved after recovery process. As always, after recovery,
// each member must be able to process client requests.
Case_SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH Case = 14
// BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER drops all outgoing/incoming
// packets from/to the peer port on a randomly chosen follower
// (non-leader), and waits for "delay-ms" until recovery.
// The expected behavior is that once dropping operation is undone,
// each member must be able to process client requests.
Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER Case = 100
// BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT drops
// all outgoing/incoming packets from/to the peer port on a randomly
// chosen follower (non-leader), and waits for most up-to-date node
// (leader) applies the snapshot count of entries since the blackhole
// operation.
// The expected behavior is that once packet drop operation is undone,
// the slow follower tries to catch up, possibly receiving the snapshot
// from the active leader. As always, after recovery, each member must
// be able to process client requests.
Case_BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 101
// BLACKHOLE_PEER_PORT_TX_RX_LEADER drops all outgoing/incoming packets
// from/to the peer port on the active leader (isolated), and waits for
// "delay-ms" until recovery, in order to trigger election timeout.
// The expected behavior is that after election timeout, a new leader gets
// elected, and once dropping operation is undone, the old leader comes
// back and rejoins the cluster as a follower. As always, after recovery,
// each member must be able to process client requests.
Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER Case = 102
// BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT drops all
// outgoing/incoming packets from/to the peer port on the active leader,
// and waits for most up-to-date node (leader) applies the snapshot
// count of entries since the blackhole operation.
// The expected behavior is that cluster elects a new leader, and once
// dropping operation is undone, the old leader comes back and rejoins
// the cluster as a follower. The slow follower tries to catch up, likely
// receiving the snapshot from the new active leader. As always, after
// recovery, each member must be able to process client requests.
Case_BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 103
// BLACKHOLE_PEER_PORT_TX_RX_QUORUM drops all outgoing/incoming packets
// from/to the peer ports on majority nodes of cluster, thus losing its
// leader and cluster being inoperable. And it waits for "delay-ms"
// until recovery.
// The expected behavior is that once packet drop operation is undone,
// nodes come back online, thus cluster comes back operative. As always,
// after recovery, each member must be able to process client requests.
Case_BLACKHOLE_PEER_PORT_TX_RX_QUORUM Case = 104
// BLACKHOLE_PEER_PORT_TX_RX_ALL drops all outgoing/incoming packets
// from/to the peer ports on all nodes, thus making cluster totally
// inoperable. It waits for "delay-ms" until recovery.
// The expected behavior is that once packet drop operation is undone,
// nodes come back online, thus cluster comes back operative. As always,
// after recovery, each member must be able to process client requests.
Case_BLACKHOLE_PEER_PORT_TX_RX_ALL Case = 105
// DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER delays outgoing/incoming packets
// from/to the peer port on a randomly chosen follower (non-leader).
// It waits for "delay-ms" until recovery.
// The expected behavior is that once packet delay operation is undone,
// the follower comes back and tries to catch up with latest changes from
// cluster. And as always, after recovery, each member must be able to
// process client requests.
Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER Case = 200
// RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER delays outgoing/incoming
// packets from/to the peer port on a randomly chosen follower
// (non-leader) with a randomized time duration (thus isolated). It
// waits for "delay-ms" until recovery.
// The expected behavior is that once packet delay operation is undone,
// each member must be able to process client requests.
Case_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER Case = 201
// DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT delays
// outgoing/incoming packets from/to the peer port on a randomly chosen
// follower (non-leader), and waits for most up-to-date node (leader)
// applies the snapshot count of entries since the delay operation.
// The expected behavior is that the delayed follower gets isolated
// and behind the current active leader, and once delay operation is undone,
// the slow follower comes back and catches up possibly receiving snapshot
// from the active leader. As always, after recovery, each member must be
// able to process client requests.
Case_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 202
// RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT delays
// outgoing/incoming packets from/to the peer port on a randomly chosen
// follower (non-leader) with a randomized time duration, and waits for
// most up-to-date node (leader) applies the snapshot count of entries
// since the delay operation.
// The expected behavior is that the delayed follower gets isolated
// and behind the current active leader, and once delay operation is undone,
// the slow follower comes back and catches up, possibly receiving a
// snapshot from the active leader. As always, after recovery, each member
// must be able to process client requests.
Case_RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT Case = 203
// DELAY_PEER_PORT_TX_RX_LEADER delays outgoing/incoming packets from/to
// the peer port on the active leader. And waits for "delay-ms" until
// recovery.
// The expected behavior is that cluster may elect a new leader, and
// once packet delay operation is undone, the (old) leader comes back
// and tries to catch up with latest changes from cluster. As always,
// after recovery, each member must be able to process client requests.
Case_DELAY_PEER_PORT_TX_RX_LEADER Case = 204
// RANDOM_DELAY_PEER_PORT_TX_RX_LEADER delays outgoing/incoming packets
// from/to the peer port on the active leader with a randomized time
// duration. And waits for "delay-ms" until recovery.
// The expected behavior is that cluster may elect a new leader, and
// once packet delay operation is undone, the (old) leader comes back
// and tries to catch up with latest changes from cluster. As always,
// after recovery, each member must be able to process client requests.
Case_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER Case = 205
// DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT delays
// outgoing/incoming packets from/to the peer port on the active leader,
// and waits for most up-to-date node (current or new leader) applies the
// snapshot count of entries since the delay operation.
// The expected behavior is that cluster may elect a new leader, and
// the old leader gets isolated and behind the current active leader,
// and once delay operation is undone, the slow follower comes back
// and catches up, likely receiving a snapshot from the active leader.
// As always, after recovery, each member must be able to process client
// requests.
Case_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 206
// RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT delays
// outgoing/incoming packets from/to the peer port on the active leader,
// with a randomized time duration. And it waits for most up-to-date node
// (current or new leader) applies the snapshot count of entries since the
// delay operation.
// The expected behavior is that cluster may elect a new leader, and
// the old leader gets isolated and behind the current active leader,
// and once delay operation is undone, the slow follower comes back
// and catches up, likely receiving a snapshot from the active leader.
// As always, after recovery, each member must be able to process client
// requests.
Case_RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT Case = 207
// DELAY_PEER_PORT_TX_RX_QUORUM delays outgoing/incoming packets from/to
// the peer ports on majority nodes of cluster. And it waits for
// "delay-ms" until recovery, likely to trigger election timeouts.
// The expected behavior is that cluster may elect a new leader, while
// quorum of nodes struggle with slow networks, and once delay operation
// is undone, nodes come back and cluster comes back operative. As always,
// after recovery, each member must be able to process client requests.
Case_DELAY_PEER_PORT_TX_RX_QUORUM Case = 208
// RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM delays outgoing/incoming packets
// from/to the peer ports on majority nodes of cluster, with randomized
// time durations. And it waits for "delay-ms" until recovery, likely
// to trigger election timeouts.
// The expected behavior is that cluster may elect a new leader, while
// quorum of nodes struggle with slow networks, and once delay operation
// is undone, nodes come back and cluster comes back operative. As always,
// after recovery, each member must be able to process client requests.
Case_RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM Case = 209
// DELAY_PEER_PORT_TX_RX_ALL delays outgoing/incoming packets from/to the
// peer ports on all nodes. And it waits for "delay-ms" until recovery,
// likely to trigger election timeouts.
// The expected behavior is that cluster may become totally inoperable,
// struggling with slow networks across the whole cluster. Once delay
// operation is undone, nodes come back and cluster comes back operative.
// As always, after recovery, each member must be able to process client
// requests.
Case_DELAY_PEER_PORT_TX_RX_ALL Case = 210
// RANDOM_DELAY_PEER_PORT_TX_RX_ALL delays outgoing/incoming packets
// from/to the peer ports on all nodes, with randomized time durations.
// And it waits for "delay-ms" until recovery, likely to trigger
// election timeouts.
// The expected behavior is that cluster may become totally inoperable,
// struggling with slow networks across the whole cluster. Once delay
// operation is undone, nodes come back and cluster comes back operative.
// As always, after recovery, each member must be able to process client
// requests.
Case_RANDOM_DELAY_PEER_PORT_TX_RX_ALL Case = 211
// NO_FAIL_WITH_STRESS stops injecting failures while testing the
// consistency and correctness under pressure loads, for the duration of
// "delay-ms". Goal is to ensure cluster be still making progress
// on recovery, and verify system does not deadlock following a sequence
// of failure injections.
// The expected behavior is that cluster remains fully operative in healthy
// condition. As always, after recovery, each member must be able to process
// client requests.
Case_NO_FAIL_WITH_STRESS Case = 300
// NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS neither injects failures nor
// sends stressig client requests to the cluster, for the duration of
// "delay-ms". Goal is to ensure cluster be still making progress
// on recovery, and verify system does not deadlock following a sequence
// of failure injections.
// The expected behavior is that cluster remains fully operative in healthy
// condition, and clients requests during liveness period succeed without
// errors.
// Note: this is how Google Chubby does failure injection testing
// https://static.googleusercontent.com/media/research.google.com/en//archive/paxos_made_live.pdf.
Case_NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS Case = 301
// FAILPOINTS injects failpoints to etcd server runtime, triggering panics
// in critical code paths.
Case_FAILPOINTS Case = 400
// EXTERNAL runs external failure injection scripts.
Case_EXTERNAL Case = 500
)
var Case_name = map[int32]string{
0: "SIGTERM_ONE_FOLLOWER",
1: "SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
2: "SIGTERM_LEADER",
3: "SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT",
4: "SIGTERM_QUORUM",
5: "SIGTERM_ALL",
10: "SIGQUIT_AND_REMOVE_ONE_FOLLOWER",
11: "SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
12: "SIGQUIT_AND_REMOVE_LEADER",
13: "SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT",
14: "SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH",
100: "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER",
101: "BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
102: "BLACKHOLE_PEER_PORT_TX_RX_LEADER",
103: "BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
104: "BLACKHOLE_PEER_PORT_TX_RX_QUORUM",
105: "BLACKHOLE_PEER_PORT_TX_RX_ALL",
200: "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER",
201: "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER",
202: "DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
203: "RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT",
204: "DELAY_PEER_PORT_TX_RX_LEADER",
205: "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER",
206: "DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
207: "RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT",
208: "DELAY_PEER_PORT_TX_RX_QUORUM",
209: "RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM",
210: "DELAY_PEER_PORT_TX_RX_ALL",
211: "RANDOM_DELAY_PEER_PORT_TX_RX_ALL",
300: "NO_FAIL_WITH_STRESS",
301: "NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS",
400: "FAILPOINTS",
500: "EXTERNAL",
}
var Case_value = map[string]int32{
"SIGTERM_ONE_FOLLOWER": 0,
"SIGTERM_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 1,
"SIGTERM_LEADER": 2,
"SIGTERM_LEADER_UNTIL_TRIGGER_SNAPSHOT": 3,
"SIGTERM_QUORUM": 4,
"SIGTERM_ALL": 5,
"SIGQUIT_AND_REMOVE_ONE_FOLLOWER": 10,
"SIGQUIT_AND_REMOVE_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 11,
"SIGQUIT_AND_REMOVE_LEADER": 12,
"SIGQUIT_AND_REMOVE_LEADER_UNTIL_TRIGGER_SNAPSHOT": 13,
"SIGQUIT_AND_REMOVE_QUORUM_AND_RESTORE_LEADER_SNAPSHOT_FROM_SCRATCH": 14,
"BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER": 100,
"BLACKHOLE_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 101,
"BLACKHOLE_PEER_PORT_TX_RX_LEADER": 102,
"BLACKHOLE_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT": 103,
"BLACKHOLE_PEER_PORT_TX_RX_QUORUM": 104,
"BLACKHOLE_PEER_PORT_TX_RX_ALL": 105,
"DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER": 200,
"RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER": 201,
"DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 202,
"RANDOM_DELAY_PEER_PORT_TX_RX_ONE_FOLLOWER_UNTIL_TRIGGER_SNAPSHOT": 203,
"DELAY_PEER_PORT_TX_RX_LEADER": 204,
"RANDOM_DELAY_PEER_PORT_TX_RX_LEADER": 205,
"DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT": 206,
"RANDOM_DELAY_PEER_PORT_TX_RX_LEADER_UNTIL_TRIGGER_SNAPSHOT": 207,
"DELAY_PEER_PORT_TX_RX_QUORUM": 208,
"RANDOM_DELAY_PEER_PORT_TX_RX_QUORUM": 209,
"DELAY_PEER_PORT_TX_RX_ALL": 210,
"RANDOM_DELAY_PEER_PORT_TX_RX_ALL": 211,
"NO_FAIL_WITH_STRESS": 300,
"NO_FAIL_WITH_NO_STRESS_FOR_LIVENESS": 301,
"FAILPOINTS": 400,
"EXTERNAL": 500,
}
func (x Case) String() string {
return proto.EnumName(Case_name, int32(x))
}
func (Case) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} }
type Stresser int32
const (
Stresser_KV Stresser = 0
Stresser_LEASE Stresser = 1
Stresser_ELECTION_RUNNER Stresser = 2
Stresser_WATCH_RUNNER Stresser = 3
Stresser_LOCK_RACER_RUNNER Stresser = 4
Stresser_LEASE_RUNNER Stresser = 5
)
var Stresser_name = map[int32]string{
0: "KV",
1: "LEASE",
2: "ELECTION_RUNNER",
3: "WATCH_RUNNER",
4: "LOCK_RACER_RUNNER",
5: "LEASE_RUNNER",
}
var Stresser_value = map[string]int32{
"KV": 0,
"LEASE": 1,
"ELECTION_RUNNER": 2,
"WATCH_RUNNER": 3,
"LOCK_RACER_RUNNER": 4,
"LEASE_RUNNER": 5,
}
func (x Stresser) String() string {
return proto.EnumName(Stresser_name, int32(x))
}
func (Stresser) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{2} }
type Checker int32
const (
Checker_KV_HASH Checker = 0
Checker_LEASE_EXPIRE Checker = 1
Checker_RUNNER Checker = 2
Checker_NO_CHECK Checker = 3
)
var Checker_name = map[int32]string{
0: "KV_HASH",
1: "LEASE_EXPIRE",
2: "RUNNER",
3: "NO_CHECK",
}
var Checker_value = map[string]int32{
"KV_HASH": 0,
"LEASE_EXPIRE": 1,
"RUNNER": 2,
"NO_CHECK": 3,
}
func (x Checker) String() string {
return proto.EnumName(Checker_name, int32(x))
}
func (Checker) EnumDescriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} }
type Request struct {
Operation Operation `protobuf:"varint,1,opt,name=Operation,proto3,enum=rpcpb.Operation" json:"Operation,omitempty"`
// Member contains the same Member object from tester configuration.
Member *Member `protobuf:"bytes,2,opt,name=Member" json:"Member,omitempty"`
// Tester contains tester configuration.
Tester *Tester `protobuf:"bytes,3,opt,name=Tester" json:"Tester,omitempty"`
}
func (m *Request) Reset() { *m = Request{} }
func (m *Request) String() string { return proto.CompactTextString(m) }
func (*Request) ProtoMessage() {}
func (*Request) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{0} }
// SnapshotInfo contains SAVE_SNAPSHOT request results.
type SnapshotInfo struct {
MemberName string `protobuf:"bytes,1,opt,name=MemberName,proto3" json:"MemberName,omitempty"`
MemberClientURLs []string `protobuf:"bytes,2,rep,name=MemberClientURLs" json:"MemberClientURLs,omitempty"`
SnapshotPath string `protobuf:"bytes,3,opt,name=SnapshotPath,proto3" json:"SnapshotPath,omitempty"`
SnapshotFileSize string `protobuf:"bytes,4,opt,name=SnapshotFileSize,proto3" json:"SnapshotFileSize,omitempty"`
SnapshotTotalSize string `protobuf:"bytes,5,opt,name=SnapshotTotalSize,proto3" json:"SnapshotTotalSize,omitempty"`
SnapshotTotalKey int64 `protobuf:"varint,6,opt,name=SnapshotTotalKey,proto3" json:"SnapshotTotalKey,omitempty"`
SnapshotHash int64 `protobuf:"varint,7,opt,name=SnapshotHash,proto3" json:"SnapshotHash,omitempty"`
SnapshotRevision int64 `protobuf:"varint,8,opt,name=SnapshotRevision,proto3" json:"SnapshotRevision,omitempty"`
Took string `protobuf:"bytes,9,opt,name=Took,proto3" json:"Took,omitempty"`
}
func (m *SnapshotInfo) Reset() { *m = SnapshotInfo{} }
func (m *SnapshotInfo) String() string { return proto.CompactTextString(m) }
func (*SnapshotInfo) ProtoMessage() {}
func (*SnapshotInfo) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{1} }
type Response struct {
Success bool `protobuf:"varint,1,opt,name=Success,proto3" json:"Success,omitempty"`
Status string `protobuf:"bytes,2,opt,name=Status,proto3" json:"Status,omitempty"`
// Member contains the same Member object from tester request.
Member *Member `protobuf:"bytes,3,opt,name=Member" json:"Member,omitempty"`
// SnapshotInfo contains SAVE_SNAPSHOT request results.
SnapshotInfo *SnapshotInfo `protobuf:"bytes,4,opt,name=SnapshotInfo" json:"SnapshotInfo,omitempty"`
}
func (m *Response) Reset() { *m = Response{} }
func (m *Response) String() string { return proto.CompactTextString(m) }
func (*Response) ProtoMessage() {}
func (*Response) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{2} }
type Member struct {
// EtcdExecPath is the executable etcd binary path in agent server.
EtcdExecPath string `protobuf:"bytes,1,opt,name=EtcdExecPath,proto3" json:"EtcdExecPath,omitempty" yaml:"etcd-exec-path"`
// AgentAddr is the agent HTTP server address.
AgentAddr string `protobuf:"bytes,11,opt,name=AgentAddr,proto3" json:"AgentAddr,omitempty" yaml:"agent-addr"`
// FailpointHTTPAddr is the agent's failpoints HTTP server address.
FailpointHTTPAddr string `protobuf:"bytes,12,opt,name=FailpointHTTPAddr,proto3" json:"FailpointHTTPAddr,omitempty" yaml:"failpoint-http-addr"`
// BaseDir is the base directory where all logs and etcd data are stored.
BaseDir string `protobuf:"bytes,101,opt,name=BaseDir,proto3" json:"BaseDir,omitempty" yaml:"base-dir"`
// EtcdLogPath is the log file to store current etcd server logs.
EtcdLogPath string `protobuf:"bytes,102,opt,name=EtcdLogPath,proto3" json:"EtcdLogPath,omitempty" yaml:"etcd-log-path"`
// EtcdClientProxy is true when client traffic needs to be proxied.
// If true, listen client URL port must be different than advertise client URL port.
EtcdClientProxy bool `protobuf:"varint,201,opt,name=EtcdClientProxy,proto3" json:"EtcdClientProxy,omitempty" yaml:"etcd-client-proxy"`
// EtcdPeerProxy is true when peer traffic needs to be proxied.
// If true, listen peer URL port must be different than advertise peer URL port.
EtcdPeerProxy bool `protobuf:"varint,202,opt,name=EtcdPeerProxy,proto3" json:"EtcdPeerProxy,omitempty" yaml:"etcd-peer-proxy"`
// EtcdClientEndpoint is the etcd client endpoint.
EtcdClientEndpoint string `protobuf:"bytes,301,opt,name=EtcdClientEndpoint,proto3" json:"EtcdClientEndpoint,omitempty" yaml:"etcd-client-endpoint"`
// Etcd defines etcd binary configuration flags.
Etcd *Etcd `protobuf:"bytes,302,opt,name=Etcd" json:"Etcd,omitempty" yaml:"etcd"`
// EtcdOnSnapshotRestore defines one-time use configuration during etcd
// snapshot recovery process.
EtcdOnSnapshotRestore *Etcd `protobuf:"bytes,303,opt,name=EtcdOnSnapshotRestore" json:"EtcdOnSnapshotRestore,omitempty"`
// ClientCertData contains cert file contents from this member's etcd server.
ClientCertData string `protobuf:"bytes,401,opt,name=ClientCertData,proto3" json:"ClientCertData,omitempty" yaml:"client-cert-data"`
ClientCertPath string `protobuf:"bytes,402,opt,name=ClientCertPath,proto3" json:"ClientCertPath,omitempty" yaml:"client-cert-path"`
// ClientKeyData contains key file contents from this member's etcd server.
ClientKeyData string `protobuf:"bytes,403,opt,name=ClientKeyData,proto3" json:"ClientKeyData,omitempty" yaml:"client-key-data"`
ClientKeyPath string `protobuf:"bytes,404,opt,name=ClientKeyPath,proto3" json:"ClientKeyPath,omitempty" yaml:"client-key-path"`
// ClientTrustedCAData contains trusted CA file contents from this member's etcd server.
ClientTrustedCAData string `protobuf:"bytes,405,opt,name=ClientTrustedCAData,proto3" json:"ClientTrustedCAData,omitempty" yaml:"client-trusted-ca-data"`
ClientTrustedCAPath string `protobuf:"bytes,406,opt,name=ClientTrustedCAPath,proto3" json:"ClientTrustedCAPath,omitempty" yaml:"client-trusted-ca-path"`
// PeerCertData contains cert file contents from this member's etcd server.
PeerCertData string `protobuf:"bytes,501,opt,name=PeerCertData,proto3" json:"PeerCertData,omitempty" yaml:"peer-cert-data"`
PeerCertPath string `protobuf:"bytes,502,opt,name=PeerCertPath,proto3" json:"PeerCertPath,omitempty" yaml:"peer-cert-path"`
// PeerKeyData contains key file contents from this member's etcd server.
PeerKeyData string `protobuf:"bytes,503,opt,name=PeerKeyData,proto3" json:"PeerKeyData,omitempty" yaml:"peer-key-data"`
PeerKeyPath string `protobuf:"bytes,504,opt,name=PeerKeyPath,proto3" json:"PeerKeyPath,omitempty" yaml:"peer-key-path"`
// PeerTrustedCAData contains trusted CA file contents from this member's etcd server.
PeerTrustedCAData string `protobuf:"bytes,505,opt,name=PeerTrustedCAData,proto3" json:"PeerTrustedCAData,omitempty" yaml:"peer-trusted-ca-data"`
PeerTrustedCAPath string `protobuf:"bytes,506,opt,name=PeerTrustedCAPath,proto3" json:"PeerTrustedCAPath,omitempty" yaml:"peer-trusted-ca-path"`
// SnapshotPath is the snapshot file path to store or restore from.
SnapshotPath string `protobuf:"bytes,601,opt,name=SnapshotPath,proto3" json:"SnapshotPath,omitempty" yaml:"snapshot-path"`
// SnapshotInfo contains last SAVE_SNAPSHOT request results.
SnapshotInfo *SnapshotInfo `protobuf:"bytes,602,opt,name=SnapshotInfo" json:"SnapshotInfo,omitempty"`
}
func (m *Member) Reset() { *m = Member{} }
func (m *Member) String() string { return proto.CompactTextString(m) }
func (*Member) ProtoMessage() {}
func (*Member) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{3} }
type Tester struct {
DataDir string `protobuf:"bytes,1,opt,name=DataDir,proto3" json:"DataDir,omitempty" yaml:"data-dir"`
Network string `protobuf:"bytes,2,opt,name=Network,proto3" json:"Network,omitempty" yaml:"network"`
Addr string `protobuf:"bytes,3,opt,name=Addr,proto3" json:"Addr,omitempty" yaml:"addr"`
// DelayLatencyMsRv is the delay latency in milliseconds,
// to inject to simulated slow network.
DelayLatencyMs uint32 `protobuf:"varint,11,opt,name=DelayLatencyMs,proto3" json:"DelayLatencyMs,omitempty" yaml:"delay-latency-ms"`
// DelayLatencyMsRv is the delay latency random variable in milliseconds.
DelayLatencyMsRv uint32 `protobuf:"varint,12,opt,name=DelayLatencyMsRv,proto3" json:"DelayLatencyMsRv,omitempty" yaml:"delay-latency-ms-rv"`
// UpdatedDelayLatencyMs is the update delay latency in milliseconds,
// to inject to simulated slow network. It's the final latency to apply,
// in case the latency numbers are randomly generated from given delay latency field.
UpdatedDelayLatencyMs uint32 `protobuf:"varint,13,opt,name=UpdatedDelayLatencyMs,proto3" json:"UpdatedDelayLatencyMs,omitempty" yaml:"updated-delay-latency-ms"`
// RoundLimit is the limit of rounds to run failure set (-1 to run without limits).
RoundLimit int32 `protobuf:"varint,21,opt,name=RoundLimit,proto3" json:"RoundLimit,omitempty" yaml:"round-limit"`
// ExitOnCaseFail is true, then exit tester on first failure.
ExitOnCaseFail bool `protobuf:"varint,22,opt,name=ExitOnCaseFail,proto3" json:"ExitOnCaseFail,omitempty" yaml:"exit-on-failure"`
// EnablePprof is true to enable profiler.
EnablePprof bool `protobuf:"varint,23,opt,name=EnablePprof,proto3" json:"EnablePprof,omitempty" yaml:"enable-pprof"`
// CaseDelayMs is the delay duration after failure is injected.
// Useful when triggering snapshot or no-op failure cases.
CaseDelayMs uint32 `protobuf:"varint,31,opt,name=CaseDelayMs,proto3" json:"CaseDelayMs,omitempty" yaml:"case-delay-ms"`
// CaseShuffle is true to randomize failure injecting order.
CaseShuffle bool `protobuf:"varint,32,opt,name=CaseShuffle,proto3" json:"CaseShuffle,omitempty" yaml:"case-shuffle"`
// Cases is the selected test cases to schedule.
// If empty, run all failure cases.
Cases []string `protobuf:"bytes,33,rep,name=Cases" json:"Cases,omitempty" yaml:"cases"`
// FailpointCommands is the list of "gofail" commands
// (e.g. panic("etcd-tester"),1*sleep(1000).
FailpointCommands []string `protobuf:"bytes,34,rep,name=FailpointCommands" json:"FailpointCommands,omitempty" yaml:"failpoint-commands"`
// RunnerExecPath is a path of etcd-runner binary.
RunnerExecPath string `protobuf:"bytes,41,opt,name=RunnerExecPath,proto3" json:"RunnerExecPath,omitempty" yaml:"runner-exec-path"`
// ExternalExecPath is a path of script for enabling/disabling an external fault injector.
ExternalExecPath string `protobuf:"bytes,42,opt,name=ExternalExecPath,proto3" json:"ExternalExecPath,omitempty" yaml:"external-exec-path"`
// Stressers is the list of stresser types:
// KV, LEASE, ELECTION_RUNNER, WATCH_RUNNER, LOCK_RACER_RUNNER, LEASE_RUNNER.
Stressers []string `protobuf:"bytes,101,rep,name=Stressers" json:"Stressers,omitempty" yaml:"stressers"`
// Checkers is the list of consistency checker types:
// KV_HASH, LEASE_EXPIRE, NO_CHECK, RUNNER.
// Leave empty to skip consistency checks.
Checkers []string `protobuf:"bytes,102,rep,name=Checkers" json:"Checkers,omitempty" yaml:"checkers"`
// StressKeySize is the size of each small key written into etcd.
StressKeySize int32 `protobuf:"varint,201,opt,name=StressKeySize,proto3" json:"StressKeySize,omitempty" yaml:"stress-key-size"`
// StressKeySizeLarge is the size of each large key written into etcd.
StressKeySizeLarge int32 `protobuf:"varint,202,opt,name=StressKeySizeLarge,proto3" json:"StressKeySizeLarge,omitempty" yaml:"stress-key-size-large"`
// StressKeySuffixRange is the count of key range written into etcd.
// Stress keys are created with "fmt.Sprintf("foo%016x", rand.Intn(keySuffixRange)".
StressKeySuffixRange int32 `protobuf:"varint,203,opt,name=StressKeySuffixRange,proto3" json:"StressKeySuffixRange,omitempty" yaml:"stress-key-suffix-range"`
// StressKeySuffixRangeTxn is the count of key range written into etcd txn (max 100).
// Stress keys are created with "fmt.Sprintf("/k%03d", i)".
StressKeySuffixRangeTxn int32 `protobuf:"varint,204,opt,name=StressKeySuffixRangeTxn,proto3" json:"StressKeySuffixRangeTxn,omitempty" yaml:"stress-key-suffix-range-txn"`
// StressKeyTxnOps is the number of operations per a transaction (max 64).
StressKeyTxnOps int32 `protobuf:"varint,205,opt,name=StressKeyTxnOps,proto3" json:"StressKeyTxnOps,omitempty" yaml:"stress-key-txn-ops"`
// StressClients is the number of concurrent stressing clients
// with "one" shared TCP connection.
StressClients int32 `protobuf:"varint,301,opt,name=StressClients,proto3" json:"StressClients,omitempty" yaml:"stress-clients"`
// StressQPS is the maximum number of stresser requests per second.
StressQPS int32 `protobuf:"varint,302,opt,name=StressQPS,proto3" json:"StressQPS,omitempty" yaml:"stress-qps"`
}
func (m *Tester) Reset() { *m = Tester{} }
func (m *Tester) String() string { return proto.CompactTextString(m) }
func (*Tester) ProtoMessage() {}
func (*Tester) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{4} }
type Etcd struct {
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty" yaml:"name"`
DataDir string `protobuf:"bytes,2,opt,name=DataDir,proto3" json:"DataDir,omitempty" yaml:"data-dir"`
WALDir string `protobuf:"bytes,3,opt,name=WALDir,proto3" json:"WALDir,omitempty" yaml:"wal-dir"`
// HeartbeatIntervalMs is the time (in milliseconds) of a heartbeat interval.
// Default value is 100, which is 100ms.
HeartbeatIntervalMs int64 `protobuf:"varint,11,opt,name=HeartbeatIntervalMs,proto3" json:"HeartbeatIntervalMs,omitempty" yaml:"heartbeat-interval"`
// ElectionTimeoutMs is the time (in milliseconds) for an election to timeout.
// Default value is 1000, which is 1s.
ElectionTimeoutMs int64 `protobuf:"varint,12,opt,name=ElectionTimeoutMs,proto3" json:"ElectionTimeoutMs,omitempty" yaml:"election-timeout"`
ListenClientURLs []string `protobuf:"bytes,21,rep,name=ListenClientURLs" json:"ListenClientURLs,omitempty" yaml:"listen-client-urls"`
AdvertiseClientURLs []string `protobuf:"bytes,22,rep,name=AdvertiseClientURLs" json:"AdvertiseClientURLs,omitempty" yaml:"advertise-client-urls"`
ClientAutoTLS bool `protobuf:"varint,23,opt,name=ClientAutoTLS,proto3" json:"ClientAutoTLS,omitempty" yaml:"auto-tls"`
ClientCertAuth bool `protobuf:"varint,24,opt,name=ClientCertAuth,proto3" json:"ClientCertAuth,omitempty" yaml:"client-cert-auth"`
ClientCertFile string `protobuf:"bytes,25,opt,name=ClientCertFile,proto3" json:"ClientCertFile,omitempty" yaml:"cert-file"`
ClientKeyFile string `protobuf:"bytes,26,opt,name=ClientKeyFile,proto3" json:"ClientKeyFile,omitempty" yaml:"key-file"`
ClientTrustedCAFile string `protobuf:"bytes,27,opt,name=ClientTrustedCAFile,proto3" json:"ClientTrustedCAFile,omitempty" yaml:"trusted-ca-file"`
ListenPeerURLs []string `protobuf:"bytes,31,rep,name=ListenPeerURLs" json:"ListenPeerURLs,omitempty" yaml:"listen-peer-urls"`
AdvertisePeerURLs []string `protobuf:"bytes,32,rep,name=AdvertisePeerURLs" json:"AdvertisePeerURLs,omitempty" yaml:"initial-advertise-peer-urls"`
PeerAutoTLS bool `protobuf:"varint,33,opt,name=PeerAutoTLS,proto3" json:"PeerAutoTLS,omitempty" yaml:"peer-auto-tls"`
PeerClientCertAuth bool `protobuf:"varint,34,opt,name=PeerClientCertAuth,proto3" json:"PeerClientCertAuth,omitempty" yaml:"peer-client-cert-auth"`
PeerCertFile string `protobuf:"bytes,35,opt,name=PeerCertFile,proto3" json:"PeerCertFile,omitempty" yaml:"peer-cert-file"`
PeerKeyFile string `protobuf:"bytes,36,opt,name=PeerKeyFile,proto3" json:"PeerKeyFile,omitempty" yaml:"peer-key-file"`
PeerTrustedCAFile string `protobuf:"bytes,37,opt,name=PeerTrustedCAFile,proto3" json:"PeerTrustedCAFile,omitempty" yaml:"peer-trusted-ca-file"`
InitialCluster string `protobuf:"bytes,41,opt,name=InitialCluster,proto3" json:"InitialCluster,omitempty" yaml:"initial-cluster"`
InitialClusterState string `protobuf:"bytes,42,opt,name=InitialClusterState,proto3" json:"InitialClusterState,omitempty" yaml:"initial-cluster-state"`
InitialClusterToken string `protobuf:"bytes,43,opt,name=InitialClusterToken,proto3" json:"InitialClusterToken,omitempty" yaml:"initial-cluster-token"`
SnapshotCount int64 `protobuf:"varint,51,opt,name=SnapshotCount,proto3" json:"SnapshotCount,omitempty" yaml:"snapshot-count"`
QuotaBackendBytes int64 `protobuf:"varint,52,opt,name=QuotaBackendBytes,proto3" json:"QuotaBackendBytes,omitempty" yaml:"quota-backend-bytes"`
PreVote bool `protobuf:"varint,63,opt,name=PreVote,proto3" json:"PreVote,omitempty" yaml:"pre-vote"`
InitialCorruptCheck bool `protobuf:"varint,64,opt,name=InitialCorruptCheck,proto3" json:"InitialCorruptCheck,omitempty" yaml:"initial-corrupt-check"`
}
func (m *Etcd) Reset() { *m = Etcd{} }
func (m *Etcd) String() string { return proto.CompactTextString(m) }
func (*Etcd) ProtoMessage() {}
func (*Etcd) Descriptor() ([]byte, []int) { return fileDescriptorRpc, []int{5} }
func init() {
proto.RegisterType((*Request)(nil), "rpcpb.Request")
proto.RegisterType((*SnapshotInfo)(nil), "rpcpb.SnapshotInfo")
proto.RegisterType((*Response)(nil), "rpcpb.Response")
proto.RegisterType((*Member)(nil), "rpcpb.Member")
proto.RegisterType((*Tester)(nil), "rpcpb.Tester")
proto.RegisterType((*Etcd)(nil), "rpcpb.Etcd")
proto.RegisterEnum("rpcpb.Operation", Operation_name, Operation_value)
proto.RegisterEnum("rpcpb.Case", Case_name, Case_value)
proto.RegisterEnum("rpcpb.Stresser", Stresser_name, Stresser_value)
proto.RegisterEnum("rpcpb.Checker", Checker_name, Checker_value)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Transport service
type TransportClient interface {
Transport(ctx context.Context, opts ...grpc.CallOption) (Transport_TransportClient, error)
}
type transportClient struct {
cc *grpc.ClientConn
}
func NewTransportClient(cc *grpc.ClientConn) TransportClient {
return &transportClient{cc}
}
func (c *transportClient) Transport(ctx context.Context, opts ...grpc.CallOption) (Transport_TransportClient, error) {
stream, err := grpc.NewClientStream(ctx, &_Transport_serviceDesc.Streams[0], c.cc, "/rpcpb.Transport/Transport", opts...)
if err != nil {
return nil, err
}
x := &transportTransportClient{stream}
return x, nil
}
type Transport_TransportClient interface {
Send(*Request) error
Recv() (*Response, error)
grpc.ClientStream
}
type transportTransportClient struct {
grpc.ClientStream
}
func (x *transportTransportClient) Send(m *Request) error {
return x.ClientStream.SendMsg(m)
}
func (x *transportTransportClient) Recv() (*Response, error) {
m := new(Response)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// Server API for Transport service
type TransportServer interface {
Transport(Transport_TransportServer) error
}
func RegisterTransportServer(s *grpc.Server, srv TransportServer) {
s.RegisterService(&_Transport_serviceDesc, srv)
}
func _Transport_Transport_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(TransportServer).Transport(&transportTransportServer{stream})
}
type Transport_TransportServer interface {
Send(*Response) error
Recv() (*Request, error)
grpc.ServerStream
}
type transportTransportServer struct {
grpc.ServerStream
}
func (x *transportTransportServer) Send(m *Response) error {
return x.ServerStream.SendMsg(m)
}
func (x *transportTransportServer) Recv() (*Request, error) {
m := new(Request)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _Transport_serviceDesc = grpc.ServiceDesc{
ServiceName: "rpcpb.Transport",
HandlerType: (*TransportServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "Transport",
Handler: _Transport_Transport_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "rpcpb/rpc.proto",
}
func (m *Request) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Request) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Operation != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintRpc(dAtA, i, uint64(m.Operation))
}
if m.Member != nil {
dAtA[i] = 0x12
i++
i = encodeVarintRpc(dAtA, i, uint64(m.Member.Size()))
n1, err := m.Member.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n1
}
if m.Tester != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintRpc(dAtA, i, uint64(m.Tester.Size()))
n2, err := m.Tester.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n2
}
return i, nil
}
func (m *SnapshotInfo) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SnapshotInfo) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.MemberName) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintRpc(dAtA, i, uint64(len(m.MemberName)))
i += copy(dAtA[i:], m.MemberName)
}
if len(m.MemberClientURLs) > 0 {
for _, s := range m.MemberClientURLs {
dAtA[i] = 0x12
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
if len(m.SnapshotPath) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintRpc(dAtA, i, uint64(len(m.SnapshotPath)))
i += copy(dAtA[i:], m.SnapshotPath)
}
if len(m.SnapshotFileSize) > 0 {
dAtA[i] = 0x22
i++
i = encodeVarintRpc(dAtA, i, uint64(len(m.SnapshotFileSize)))
i += copy(dAtA[i:], m.SnapshotFileSize)
}
if len(m.SnapshotTotalSize) > 0 {
dAtA[i] = 0x2a
i++
i = encodeVarintRpc(dAtA, i, uint64(len(m.SnapshotTotalSize)))
i += copy(dAtA[i:], m.SnapshotTotalSize)
}
if m.SnapshotTotalKey != 0 {
dAtA[i] = 0x30
i++
i = encodeVarintRpc(dAtA, i, uint64(m.SnapshotTotalKey))
}
if m.SnapshotHash != 0 {
dAtA[i] = 0x38
i++
i = encodeVarintRpc(dAtA, i, uint64(m.SnapshotHash))
}
if m.SnapshotRevision != 0 {
dAtA[i] = 0x40
i++
i = encodeVarintRpc(dAtA, i, uint64(m.SnapshotRevision))
}
if len(m.Took) > 0 {