-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy patheth2fastspec.py
1360 lines (1084 loc) · 57.8 KB
/
eth2fastspec.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
from typing import Iterator, List as PyList, NamedTuple
from hashlib import sha256
import milagro_bls_binding as milagro_bls
from eth2spec.phase0.spec import *
apply_constants_config(globals())
def bls_Verify(PK, message, signature):
try:
result = milagro_bls.Verify(PK, message, signature)
except Exception:
result = False
finally:
return result
def bls_FastAggregateVerify(pubkeys, message, signature):
try:
result = milagro_bls.FastAggregateVerify(list(pubkeys), message, signature)
except Exception:
result = False
finally:
return result
def integer_squareroot(n: uint64) -> uint64:
"""
Return the largest integer ``x`` such that ``x**2 <= n``.
"""
x = n
y = (x + 1) // 2
while y < x:
x = y
y = (x + n // x) // 2
return x
def xor(bytes_1: Bytes32, bytes_2: Bytes32) -> Bytes32:
"""
Return the exclusive-or of two 32-byte strings.
"""
return Bytes32(a ^ b for a, b in zip(bytes_1, bytes_2))
def hash(x):
return sha256(x).digest()
def compute_fork_data_root(current_version: Version, genesis_validators_root: Root) -> Root:
"""
Return the 32-byte fork data root for the ``current_version`` and ``genesis_validators_root``.
This is used primarily in signature domains to avoid collisions across forks/chains.
"""
return hash_tree_root(ForkData(
current_version=current_version,
genesis_validators_root=genesis_validators_root,
))
# ShuffleList shuffles a list, using the given seed for randomness. Mutates the input list.
def shuffle_list(input: Sequence[ValidatorIndex], seed: Root):
_inner_shuffle_list(input, seed, True)
# UnshuffleList undoes a list shuffling using the seed of the shuffling. Mutates the input list.
def unshuffle_list(input: Sequence[ValidatorIndex], seed: Root):
_inner_shuffle_list(input, seed, False)
_SHUFFLE_H_SEED_SIZE = 32
_SHUFFLE_H_ROUND_SIZE = 1
_SHUFFLE_H_POSITION_WINDOW_SIZE = 4
_SHUFFLE_H_PIVOT_VIEW_SIZE = _SHUFFLE_H_SEED_SIZE + _SHUFFLE_H_ROUND_SIZE
_SHUFFLE_H_TOTAL_SIZE = _SHUFFLE_H_SEED_SIZE + _SHUFFLE_H_ROUND_SIZE + _SHUFFLE_H_POSITION_WINDOW_SIZE
# Shuffles or unshuffles, depending on the `dir` (true for shuffling, false for unshuffling
def _inner_shuffle_list(input: Sequence[ValidatorIndex], seed: Root, dir: bool):
if len(input) <= 1:
# nothing to (un)shuffle
return
listSize = len(input)
buf = bytearray([0] * _SHUFFLE_H_TOTAL_SIZE)
r = 0
if not dir:
# Start at last round.
# Iterating through the rounds in reverse, un-swaps everything, effectively un-shuffling the list.
r = SHUFFLE_ROUND_COUNT - 1
# Seed is always the first 32 bytes of the hash input, we never have to change this part of the buffer.
buf[:_SHUFFLE_H_SEED_SIZE] = seed[:]
while True:
# spec: pivot = bytes_to_int(hash(seed + int_to_bytes1(round))[0:8]) % list_size
# This is the "int_to_bytes1(round)", appended to the seed.
buf[_SHUFFLE_H_SEED_SIZE] = r
# Seed is already in place, now just hash the correct part of the buffer, and take a uint64 from it,
# and modulo it to get a pivot within range.
h = hash(buf[:_SHUFFLE_H_PIVOT_VIEW_SIZE])
pivot = int.from_bytes(h[:8], byteorder=ENDIANNESS) % listSize
# Split up the for-loop in two:
# 1. Handle the part from 0 (incl) to pivot (incl). This is mirrored around (pivot / 2)
# 2. Handle the part from pivot (excl) to N (excl). This is mirrored around ((pivot / 2) + (size/2))
# The pivot defines a split in the array, with each of the splits mirroring their data within the split.
# Print out some example even/odd sized index lists, with some even/odd pivots,
# and you can deduce how the mirroring works exactly.
# Note that the mirror is strict enough to not consider swapping the index @mirror with itself.
mirror = (pivot + 1) >> 1
# Since we are iterating through the "positions" in order, we can just repeat the hash every 256th position.
# No need to pre-compute every possible hash for efficiency like in the example code.
# We only need it consecutively (we are going through each in reverse order however, but same thing)
#
# spec: source = hash(seed + int_to_bytes1(round) + int_to_bytes4(position # 256))
# - seed is still in 0:32 (excl., 32 bytes)
# - round number is still in 32
# - mix in the position for randomness, except the last byte of it,
# which will be used later to select a bit from the resulting hash.
# We start from the pivot position, and work back to the mirror position (of the part left to the pivot).
# This makes us process each pear exactly once (instead of unnecessarily twice, like in the spec)
buf[_SHUFFLE_H_PIVOT_VIEW_SIZE:] = ((pivot >> 8) & 0xffff_ffff).to_bytes(length=4, byteorder=ENDIANNESS)
source = hash(buf)
byteV = source[(pivot&0xff)>>3]
i, j = 0, pivot
while i < mirror:
# The pair is i,j. With j being the bigger of the two, hence the "position" identifier of the pair.
# Every 256th bit (aligned to j).
if j&0xff == 0xff:
# just overwrite the last part of the buffer, reuse the start (seed, round)
buf[_SHUFFLE_H_PIVOT_VIEW_SIZE:] = ((j >> 8) & 0xffff_ffff).to_bytes(length=4, byteorder=ENDIANNESS)
source = hash(buf)
# Same trick with byte retrieval. Only every 8th.
if j&0x7 == 0x7:
byteV = source[(j&0xff)>>3]
bitV = (byteV >> (j & 0x7)) & 0x1
if bitV == 1:
# swap the pair items
input[i], input[j] = input[j], input[i]
i, j = i+1, j-1
# Now repeat, but for the part after the pivot.
mirror = (pivot + listSize + 1) >> 1
end = listSize - 1
# Again, seed and round input is in place, just update the position.
# We start at the end, and work back to the mirror point.
# This makes us process each pear exactly once (instead of unnecessarily twice, like in the spec)
buf[_SHUFFLE_H_PIVOT_VIEW_SIZE:] = ((end >> 8) & 0xffff_ffff).to_bytes(length=4, byteorder=ENDIANNESS)
source = hash(buf)
byteV = source[(end&0xff)>>3]
i, j = pivot+1, end
while i < mirror:
# Exact same thing (copy of above loop body)
#--------------------------------------------
# The pair is i,j. With j being the bigger of the two, hence the "position" identifier of the pair.
# Every 256th bit (aligned to j).
if j&0xff == 0xff:
# just overwrite the last part of the buffer, reuse the start (seed, round)
buf[_SHUFFLE_H_PIVOT_VIEW_SIZE:] = ((j >> 8) & 0xffff_ffff).to_bytes(length=4, byteorder=ENDIANNESS)
source = hash(buf)
# Same trick with byte retrieval. Only every 8th.
if j&0x7 == 0x7:
byteV = source[(j&0xff)>>3]
bitV = (byteV >> (j & 0x7)) & 0x1
if bitV == 1:
# swap the pair items
input[i], input[j] = input[j], input[i]
i, j = i+1, j-1
#--------------------------------------------
# go forwards?
if dir:
# -> shuffle
r += 1
if r == SHUFFLE_ROUND_COUNT:
break
else:
if r == 0:
break
# -> un-shuffle
r -= 1
def compute_shuffled_index(index: ValidatorIndex, index_count: int, seed: Bytes32) -> ValidatorIndex:
"""
Return the shuffled validator index corresponding to ``seed`` (and ``index_count``).
"""
assert index < index_count
# Swap or not (https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf)
# See the 'generalized domain' algorithm on page 3
for current_round in range(SHUFFLE_ROUND_COUNT):
pivot_bytez = hash(seed + int_to_bytes(current_round, length=1))[0:8]
pivot = int.from_bytes(pivot_bytez, byteorder=ENDIANNESS) % index_count
flip = int((pivot + index_count - index) % index_count)
position = max(index, flip)
source = hash(seed + int_to_bytes(current_round, length=1) + int_to_bytes(position // 256, length=4))
byte = source[(position % 256) // 8]
bit = (byte >> (position % 8)) % 2
index = flip if bit else index
return ValidatorIndex(index)
def compute_committee_count(active_validators_count: int) -> int:
validators_per_slot = active_validators_count // SLOTS_PER_EPOCH
committees_per_slot = validators_per_slot // TARGET_COMMITTEE_SIZE
if MAX_COMMITTEES_PER_SLOT < committees_per_slot:
committees_per_slot = MAX_COMMITTEES_PER_SLOT
if committees_per_slot == 0:
committees_per_slot = 1
return committees_per_slot
Committee = Sequence[ValidatorIndex] # as with indexed attestation order (index of validator within committee)
SlotCommittees = Sequence[Committee] # by index of committee (len <= MAX_COMMITTEES_PER_SLOT)
EpochCommittees = Sequence[SlotCommittees] # (len == SLOTS_PER_EPOCH)
# With a high amount of shards, or low amount of validators,
# some shards may not have a committee this epoch.
class ShufflingEpoch(object):
epoch: Epoch
active_indices: Sequence[ValidatorIndex] # non-shuffled active validator indices
shuffling: Sequence[ValidatorIndex] # the active validator indices, shuffled into their committee
committees: EpochCommittees # list of lists of slices of Shuffling
# indices_bounded: (index, activation_epoch, exit_epoch) per validator.
def __init__(self,
state: BeaconState,
indices_bounded: Sequence[Tuple[ValidatorIndex, Epoch, Epoch]],
epoch: Epoch):
self.epoch = epoch
seed = get_seed(state, epoch, DOMAIN_BEACON_ATTESTER)
self.active_indices = [index for (index, activation_epoch, exit_epoch) in indices_bounded
if activation_epoch <= epoch < exit_epoch]
shuffling = list(self.active_indices) # copy
unshuffle_list(shuffling, seed)
self.shuffling = shuffling
active_validator_count = len(self.active_indices)
committees_per_slot = compute_committee_count(active_validator_count)
committee_count = committees_per_slot * uint64(SLOTS_PER_EPOCH)
def slice_committee(slot: int, comm_index: int):
index = (slot * committees_per_slot) + comm_index
start_offset = (active_validator_count * index) // committee_count
end_offset = (active_validator_count * (index + 1)) // committee_count
assert start_offset <= end_offset
return self.shuffling[start_offset:end_offset]
self.committees = [[slice_committee(slot, comm_index) for comm_index in range(committees_per_slot)]
for slot in range(SLOTS_PER_EPOCH)]
def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32) -> ValidatorIndex:
"""
Return from ``indices`` a random index sampled by effective balance.
"""
assert len(indices) > 0
MAX_RANDOM_BYTE = 2**8 - 1
i = 0
while True:
candidate_index = indices[compute_shuffled_index(ValidatorIndex(i % len(indices)), len(indices), seed)]
random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32]
effective_balance = state.validators[candidate_index].effective_balance
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
return ValidatorIndex(candidate_index)
i += 1
class EpochsContext(object):
pubkey2index: Dict[BLSPubkey, ValidatorIndex]
index2pubkey: PyList[BLSPubkey]
proposers: Sequence[ValidatorIndex] # 1 proposer per slot, only of current epoch.
previous_shuffling: Optional[ShufflingEpoch]
current_shuffling: Optional[ShufflingEpoch]
next_shuffling: Optional[ShufflingEpoch]
def __init__(self):
self.pubkey2index = {}
self.index2pubkey = []
self.proposers = []
self.previous_shuffling = None
self.current_shuffling = None
self.next_shuffling = None
def load_state(self, state: BeaconState):
self.sync_pubkeys(state)
current_epoch = compute_epoch_at_slot(state.slot)
previous_epoch = GENESIS_EPOCH if current_epoch == GENESIS_EPOCH else Epoch(current_epoch - 1)
next_epoch = Epoch(current_epoch + 1)
indices_bounded = [(ValidatorIndex(i), v.activation_epoch, v.exit_epoch)
for i, v in enumerate(state.validators.readonly_iter())]
self.current_shuffling = ShufflingEpoch(state, indices_bounded, current_epoch)
if previous_epoch == current_epoch: # In case of genesis
self.previous_shuffling = self.current_shuffling
else:
self.previous_shuffling = ShufflingEpoch(state, indices_bounded, previous_epoch)
self.next_shuffling = ShufflingEpoch(state, indices_bounded, next_epoch)
self._reset_proposers(state)
def _reset_proposers(self, state: BeaconState):
epoch_seed = get_seed(state, self.current_shuffling.epoch, DOMAIN_BEACON_PROPOSER)
start_slot = compute_start_slot_at_epoch(self.current_shuffling.epoch)
self.proposers = [
compute_proposer_index(state, self.current_shuffling.active_indices,
hash(epoch_seed + slot.to_bytes(length=8, byteorder=ENDIANNESS)))
for slot in range(start_slot, start_slot+SLOTS_PER_EPOCH)
]
def copy(self) -> "EpochsContext":
epochs_ctx = EpochsContext()
# Full copy of pubkeys, this can mutate
epochs_ctx.pubkey2index = self.pubkey2index.copy()
epochs_ctx.index2pubkey = self.index2pubkey.copy()
# Only shallow-copy the other data, it doesn't mutate (only completely replaced on rotation)
epochs_ctx.proposers = self.proposers
epochs_ctx.previous_shuffling = self.previous_shuffling
epochs_ctx.current_shuffling = self.current_shuffling
epochs_ctx.next_shuffling = self.next_shuffling
return epochs_ctx
def sync_pubkeys(self, state: BeaconState):
if self.pubkey2index is None:
self.pubkey2index = {}
if self.index2pubkey is None:
self.index2pubkey = []
current_count = len(self.pubkey2index)
assert current_count == len(self.index2pubkey)
for i in range(current_count, len(state.validators)):
pubkey: BLSPubkey = state.validators[i].pubkey
index = ValidatorIndex(i)
self.pubkey2index[pubkey] = index
self.index2pubkey.append(pubkey)
def rotate_epochs(self, state: BeaconState):
self.previous_shuffling = self.current_shuffling
self.current_shuffling = self.next_shuffling
next_epoch = Epoch(self.current_shuffling.epoch + 1)
indices_bounded = [(ValidatorIndex(i), v.activation_epoch, v.exit_epoch)
for i, v in enumerate(state.validators.readonly_iter())]
self.next_shuffling = ShufflingEpoch(state, indices_bounded, next_epoch)
self._reset_proposers(state)
def _get_slot_comms(self, slot: Slot) -> SlotCommittees:
epoch = compute_epoch_at_slot(slot)
epoch_slot = slot % SLOTS_PER_EPOCH
if epoch == self.previous_shuffling.epoch:
return self.previous_shuffling.committees[epoch_slot]
elif epoch == self.current_shuffling.epoch:
return self.current_shuffling.committees[epoch_slot]
elif epoch == self.next_shuffling.epoch:
return self.next_shuffling.committees[epoch_slot]
else:
raise Exception(f"crosslink committee retrieval: out of range epoch: {epoch}")
# Return the beacon committee at slot for index.
def get_beacon_committee(self, slot: Slot, index: CommitteeIndex) -> Committee:
slot_comms = self._get_slot_comms(slot)
if index >= len(slot_comms):
raise Exception(f"crosslink committee retrieval: out of range committee index: {index}")
return slot_comms[index]
def get_committee_count_at_slot(self, slot: Slot) -> uint64:
return uint64(len(self._get_slot_comms(slot)))
def get_beacon_proposer(self, slot: Slot) -> ValidatorIndex:
epoch = compute_epoch_at_slot(slot)
assert epoch == self.current_shuffling.epoch
return self.proposers[slot % SLOTS_PER_EPOCH]
FLAG_PREV_SOURCE_ATTESTER = 1 << 0
FLAG_PREV_TARGET_ATTESTER = 1 << 1
FLAG_PREV_HEAD_ATTESTER = 1 << 2
FLAG_CURR_SOURCE_ATTESTER = 1 << 3
FLAG_CURR_TARGET_ATTESTER = 1 << 4
FLAG_CURR_HEAD_ATTESTER = 1 << 5
FLAG_UNSLASHED = 1 << 6
FLAG_ELIGIBLE_ATTESTER = 1 << 7
class FlatValidator(object):
__slots__ = 'effective_balance', 'slashed', 'activation_eligibility_epoch',\
'activation_epoch', 'exit_epoch', 'withdrawable_epoch'
effective_balance: Gwei # Balance at stake
slashed: boolean
# Status epochs
activation_eligibility_epoch: Epoch # When criteria for activation were met
activation_epoch: Epoch
exit_epoch: Epoch
withdrawable_epoch: Epoch # When validator can withdraw funds
def __init__(self, v: Validator):
_, _, self.effective_balance, self.slashed, self.activation_eligibility_epoch, \
self.activation_epoch, self.exit_epoch, self.withdrawable_epoch = v
class AttesterStatus(object):
__slots__ = 'flags', 'proposer_index', 'inclusion_delay', 'validator', 'active'
flags: int
proposer_index: int # -1 when not included by any proposer
inclusion_delay: int
validator: FlatValidator
active: bool
def __init__(self, v: FlatValidator):
self.flags = 0
self.proposer_index = -1
self.inclusion_delay = 0
self.validator = v
self.active = False
def has_markers(flags: int, markers: int) -> bool:
return flags & markers == markers
class EpochStakeSummary(object):
__slots__ = 'source_stake', 'target_stake', 'head_stake'
source_stake: Gwei
target_stake: Gwei
head_stake: Gwei
def __init__(self):
self.source_stake = Gwei(0)
self.target_stake = Gwei(0)
self.head_stake = Gwei(0)
class EpochProcess(object):
prev_epoch: Epoch
current_epoch: Epoch
statuses: PyList[AttesterStatus]
total_active_stake: Gwei
prev_epoch_unslashed_stake: EpochStakeSummary
curr_epoch_unslashed_target_stake: Gwei
active_validators: int # Thanks to exit delay, this does not change within the epoch processing.
indices_to_slash: PyList[ValidatorIndex]
indices_to_set_activation_eligibility: PyList[ValidatorIndex]
# ignores churn. Apply churn-limit manually.
# Maybe, because finality affects it still.
indices_to_maybe_activate: PyList[ValidatorIndex]
indices_to_eject: PyList[ValidatorIndex]
exit_queue_end: Epoch
exit_queue_end_churn: int
churn_limit: int
def __init__(self):
self.current_epoch = Epoch(0)
self.prev_epoch = Epoch(0)
self.statuses = []
self.total_active_stake = Gwei(0)
self.prev_epoch_unslashed_stake = EpochStakeSummary()
self.curr_epoch_unslashed_target_stake = Gwei(0)
self.active_validators = 0
self.indices_to_slash = []
self.indices_to_set_activation_eligibility = []
self.indices_to_maybe_activate = []
self.indices_to_eject = []
self.exit_queue_end = Epoch(0)
self.exit_queue_end_churn = 0
self.churn_limit = 0
def compute_epoch_at_slot(slot: Slot) -> Epoch:
"""
Return the epoch number at ``slot``.
"""
return Epoch(slot // SLOTS_PER_EPOCH)
def get_churn_limit(active_validator_count: uint64) -> uint64:
return max(MIN_PER_EPOCH_CHURN_LIMIT, active_validator_count // CHURN_LIMIT_QUOTIENT)
def is_active_validator(v: Validator, epoch: Epoch) -> bool:
return v.activation_epoch <= epoch < v.exit_epoch
def is_active_flat_validator(v: FlatValidator, epoch: Epoch) -> bool:
return v.activation_epoch <= epoch < v.exit_epoch
def compute_activation_exit_epoch(epoch: Epoch) -> Epoch:
"""
Return the epoch during which validator activations and exits initiated in ``epoch`` take effect.
"""
return Epoch(epoch + 1 + MAX_SEED_LOOKAHEAD)
def compute_start_slot_at_epoch(epoch: Epoch) -> Slot:
"""
Return the start slot of ``epoch``.
"""
return Slot(epoch * SLOTS_PER_EPOCH)
def get_block_root_at_slot(state: BeaconState, slot: Slot) -> Root:
"""
Return the block root at a recent ``slot``.
"""
assert slot < state.slot <= slot + SLOTS_PER_HISTORICAL_ROOT
return state.block_roots[slot % SLOTS_PER_HISTORICAL_ROOT]
def get_block_root(state: BeaconState, epoch: Epoch) -> Root:
"""
Return the block root at the start of a recent ``epoch``.
"""
return get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch))
def prepare_epoch_process_state(epochs_ctx: EpochsContext, state: BeaconState) -> EpochProcess:
# TODO maybe allocate status array at exact capacity? count = len(state.validators)
out = EpochProcess()
current_epoch = epochs_ctx.current_shuffling.epoch
prev_epoch = epochs_ctx.previous_shuffling.epoch
out.current_epoch = current_epoch
out.prev_epoch = prev_epoch
slashings_epoch = current_epoch + (EPOCHS_PER_SLASHINGS_VECTOR // 2)
exit_queue_end = compute_activation_exit_epoch(current_epoch)
active_count = uint64(0)
# fast read-only iterate over tree-structured validator set.
for i, tree_v in enumerate(state.validators.readonly_iter()):
v = FlatValidator(tree_v)
status = AttesterStatus(v)
if v.slashed:
if slashings_epoch == v.withdrawable_epoch:
out.indices_to_slash.append(ValidatorIndex(i))
else:
status.flags |= FLAG_UNSLASHED
if is_active_flat_validator(v, prev_epoch) or (v.slashed and (prev_epoch + 1 < v.withdrawable_epoch)):
status.flags |= FLAG_ELIGIBLE_ATTESTER
active = is_active_flat_validator(v, current_epoch)
if active:
status.active = True
out.total_active_stake += v.effective_balance
active_count += 1
if v.exit_epoch != FAR_FUTURE_EPOCH and v.exit_epoch > exit_queue_end:
exit_queue_end = v.exit_epoch
if v.activation_eligibility_epoch == FAR_FUTURE_EPOCH and v.effective_balance == MAX_EFFECTIVE_BALANCE:
out.indices_to_set_activation_eligibility.append(ValidatorIndex(i))
if v.activation_epoch == FAR_FUTURE_EPOCH and v.activation_eligibility_epoch <= current_epoch:
out.indices_to_maybe_activate.append(ValidatorIndex(i))
if status.active and v.effective_balance <= EJECTION_BALANCE and v.exit_epoch == FAR_FUTURE_EPOCH:
out.indices_to_eject.append(ValidatorIndex(i))
out.statuses.append(status)
out.active_validators = active_count
if out.total_active_stake < EFFECTIVE_BALANCE_INCREMENT:
out.total_active_stake = EFFECTIVE_BALANCE_INCREMENT
# order by the sequence of activation_eligibility_epoch setting and then index
out.indices_to_maybe_activate = sorted(out.indices_to_maybe_activate,
key=lambda i: (out.statuses[i].validator.activation_eligibility_epoch, i))
exit_queue_end_churn = uint64(0)
for status in out.statuses:
if status.validator.exit_epoch == exit_queue_end:
exit_queue_end_churn += 1
churn_limit = get_churn_limit(active_count)
if exit_queue_end_churn >= churn_limit:
exit_queue_end += 1
exit_queue_end_churn = 0
out.exit_queue_end_churn = exit_queue_end_churn
out.exit_queue_end = exit_queue_end
out.churn_limit = churn_limit
def status_process_epoch(statuses: Sequence[AttesterStatus],
attestations: Iterator[PendingAttestation],
epoch: Epoch, source_flag: int, target_flag: int, head_flag: int):
actual_target_block_root = get_block_root_at_slot(state, compute_start_slot_at_epoch(epoch))
for att in attestations:
# Load all the attestation details from the state tree once, do not reload for each participant.
aggregation_bits, att_data, inclusion_delay, proposer_index = att
att_slot, committee_index, att_beacon_block_root, _, att_target = att_data
att_bits = list(aggregation_bits)
att_voted_target_root = att_target.root == actual_target_block_root
att_voted_head_root = att_beacon_block_root == get_block_root_at_slot(state, att_slot)
# attestation-target is already known to be this epoch, get it from the pre-computed shuffling directly.
committee = epochs_ctx.get_beacon_committee(att_slot, committee_index)
participants = list(index for i, index in enumerate(committee) if att_bits[i])
if epoch == prev_epoch:
for p in participants:
status = statuses[p]
# If the attestation is the earliest, i.e. has the smallest delay
if status.proposer_index == -1 or status.inclusion_delay > inclusion_delay:
status.proposer_index = proposer_index
status.inclusion_delay = inclusion_delay
for p in participants:
status = statuses[p]
# remember the participant as one of the good validators
status.flags |= source_flag
# If the attestation is for the boundary:
if att_voted_target_root:
status.flags |= target_flag
# Head votes must be a subset of target votes
if att_voted_head_root:
status.flags |= head_flag
# When used in a non-epoch transition on top of genesis state, avoid reaching to a block from before genesis.
if state.slot > 0:
status_process_epoch(out.statuses, state.previous_epoch_attestations.readonly_iter(), prev_epoch,
FLAG_PREV_SOURCE_ATTESTER, FLAG_PREV_TARGET_ATTESTER, FLAG_PREV_HEAD_ATTESTER)
# When used in a non-epoch transition, it may be the absolute start of the epoch,
# and the current epoch will not have any attestations (or a target block root to match them against)
if compute_start_slot_at_epoch(current_epoch) < state.slot:
status_process_epoch(out.statuses, state.current_epoch_attestations.readonly_iter(), current_epoch,
FLAG_CURR_SOURCE_ATTESTER, FLAG_CURR_TARGET_ATTESTER, FLAG_CURR_HEAD_ATTESTER)
# Python quirk; avoid Gwei during summation here, not worth the __add__ overhead.
prev_source_unsl_stake, prev_target_unsl_stake, prev_head_unsl_stake = 0, 0, 0
curr_epoch_unslashed_target_stake = 0
for status in out.statuses:
if has_markers(status.flags, FLAG_PREV_SOURCE_ATTESTER | FLAG_UNSLASHED):
prev_source_unsl_stake += status.validator.effective_balance
if has_markers(status.flags, FLAG_PREV_TARGET_ATTESTER):
prev_target_unsl_stake += status.validator.effective_balance
if has_markers(status.flags, FLAG_PREV_HEAD_ATTESTER):
prev_head_unsl_stake += status.validator.effective_balance
if has_markers(status.flags, FLAG_CURR_TARGET_ATTESTER | FLAG_UNSLASHED):
curr_epoch_unslashed_target_stake += status.validator.effective_balance
out.prev_epoch_unslashed_stake.source_stake = max(prev_source_unsl_stake, EFFECTIVE_BALANCE_INCREMENT)
out.prev_epoch_unslashed_stake.target_stake = max(prev_target_unsl_stake, EFFECTIVE_BALANCE_INCREMENT)
out.prev_epoch_unslashed_stake.head_stake = max(prev_head_unsl_stake, EFFECTIVE_BALANCE_INCREMENT)
out.curr_epoch_unslashed_target_stake = max(curr_epoch_unslashed_target_stake, EFFECTIVE_BALANCE_INCREMENT)
return out
def get_randao_mix(state: BeaconState, epoch: Epoch) -> Bytes32:
"""
Return the randao mix at a recent ``epoch``.
"""
return state.randao_mixes[epoch % EPOCHS_PER_HISTORICAL_VECTOR]
def int_to_bytes(n: int, length: int) -> bytes:
"""
Return the ``length``-byte serialization of ``n`` in ``ENDIANNESS``-endian.
"""
return n.to_bytes(length=length, byteorder=ENDIANNESS)
def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Bytes32:
"""
Return the seed at ``epoch``.
"""
mix = get_randao_mix(state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1)) # Avoid underflow
return hash(domain_type + int_to_bytes(epoch, length=8) + mix)
def increase_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None:
"""
Increase the validator balance at index ``index`` by ``delta``.
"""
state.balances[index] += delta
def decrease_balance(state: BeaconState, index: ValidatorIndex, delta: Gwei) -> None:
"""
Decrease the validator balance at index ``index`` by ``delta``, with underflow protection.
"""
state.balances[index] = 0 if delta > state.balances[index] else state.balances[index] - delta
def process_justification_and_finalization(epochs_ctx: EpochsContext, process: EpochProcess, state: BeaconState) -> None:
previous_epoch = process.prev_epoch
current_epoch = process.current_epoch
if current_epoch <= GENESIS_EPOCH + 1:
return
old_previous_justified_checkpoint = state.previous_justified_checkpoint
old_current_justified_checkpoint = state.current_justified_checkpoint
# Process justifications
state.previous_justified_checkpoint = state.current_justified_checkpoint
bits = state.justification_bits
# shift bits, zero out new bit space
bits[1:] = bits[:-1]
bits[0] = 0b0
if process.prev_epoch_unslashed_stake.target_stake * 3 >= process.total_active_stake * 2:
state.current_justified_checkpoint = Checkpoint(epoch=previous_epoch,
root=get_block_root(state, previous_epoch))
bits[1] = 0b1
if process.curr_epoch_unslashed_target_stake * 3 >= process.total_active_stake * 2:
state.current_justified_checkpoint = Checkpoint(epoch=current_epoch,
root=get_block_root(state, current_epoch))
bits[0] = 0b1
state.justification_bits = bits
assert len(bits) == 4
# Process finalizations
# The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source
if all(bits[1:4]) and old_previous_justified_checkpoint.epoch + 3 == current_epoch:
state.finalized_checkpoint = old_previous_justified_checkpoint
# The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source
if all(bits[1:3]) and old_previous_justified_checkpoint.epoch + 2 == current_epoch:
state.finalized_checkpoint = old_previous_justified_checkpoint
# The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source
if all(bits[0:3]) and old_current_justified_checkpoint.epoch + 2 == current_epoch:
state.finalized_checkpoint = old_current_justified_checkpoint
# The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source
if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch:
state.finalized_checkpoint = old_current_justified_checkpoint
class Deltas(NamedTuple):
rewards: Sequence[Gwei]
penalties: Sequence[Gwei]
class RewardsAndPenalties(NamedTuple):
source: Deltas
target: Deltas
head: Deltas
inclusion_delay: Deltas
inactivity: Deltas
def mk_rew_pen(size: int) -> RewardsAndPenalties:
return RewardsAndPenalties(
source=Deltas([Gwei(0)] * size, [Gwei(0)] * size),
target=Deltas([Gwei(0)] * size, [Gwei(0)] * size),
head=Deltas([Gwei(0)] * size, [Gwei(0)] * size),
inclusion_delay=Deltas([Gwei(0)] * size, [Gwei(0)] * size),
inactivity=Deltas([Gwei(0)] * size, [Gwei(0)] * size),
)
def get_attestation_rewards_and_penalties(epochs_ctx: EpochsContext, process: EpochProcess, state: BeaconState) -> RewardsAndPenalties:
validator_count = len(process.statuses)
res = mk_rew_pen(validator_count)
def has_markers(flags: int, markers: int) -> bool:
return flags & markers == markers
increment = EFFECTIVE_BALANCE_INCREMENT
total_balance = max(process.total_active_stake, increment)
prev_epoch_source_stake = max(process.prev_epoch_unslashed_stake.source_stake, increment)
prev_epoch_target_stake = max(process.prev_epoch_unslashed_stake.target_stake, increment)
prev_epoch_head_stake = max(process.prev_epoch_unslashed_stake.head_stake, increment)
# Sqrt first, before factoring out the increment for later usage.
balance_sq_root = integer_squareroot(total_balance)
finality_delay = process.prev_epoch - state.finalized_checkpoint.epoch
is_inactivity_leak = finality_delay > MIN_EPOCHS_TO_INACTIVITY_PENALTY
# All summed effective balances are normalized to effective-balance increments, to avoid overflows.
total_balance //= increment
prev_epoch_source_stake //= increment
prev_epoch_target_stake //= increment
prev_epoch_head_stake //= increment
for i, status in enumerate(process.statuses):
eff_balance = status.validator.effective_balance
base_reward = eff_balance * BASE_REWARD_FACTOR // balance_sq_root // BASE_REWARDS_PER_EPOCH
proposer_reward = base_reward // PROPOSER_REWARD_QUOTIENT
# Inclusion speed bonus
if has_markers(status.flags, FLAG_PREV_SOURCE_ATTESTER | FLAG_UNSLASHED):
res.inclusion_delay.rewards[status.proposer_index] += proposer_reward
max_attester_reward = base_reward - proposer_reward
res.inclusion_delay.rewards[i] += max_attester_reward // status.inclusion_delay
if status.flags & FLAG_ELIGIBLE_ATTESTER != 0:
# In case of `is_inactivity_leak`:
# Since full base reward will be canceled out by inactivity penalty deltas,
# optimal participation receives full base reward compensation here.
# Expected FFG source
if has_markers(status.flags, FLAG_PREV_SOURCE_ATTESTER | FLAG_UNSLASHED):
if is_inactivity_leak:
res.source.rewards[i] += base_reward
else:
# Justification-participation reward
res.source.rewards[i] += base_reward * prev_epoch_source_stake // total_balance
else:
# Justification-non-participation R-penalty
res.source.penalties[i] += base_reward
# Expected FFG target
if has_markers(status.flags, FLAG_PREV_TARGET_ATTESTER | FLAG_UNSLASHED):
if is_inactivity_leak:
res.target.rewards[i] += base_reward
else:
# Boundary-attestation reward
res.target.rewards[i] += base_reward * prev_epoch_target_stake // total_balance
else:
# Boundary-attestation-non-participation R-penalty
res.target.penalties[i] += base_reward
# Expected head
if has_markers(status.flags, FLAG_PREV_HEAD_ATTESTER | FLAG_UNSLASHED):
if is_inactivity_leak:
res.head.rewards[i] += base_reward
else:
# Canonical-participation reward
res.head.rewards[i] += base_reward * prev_epoch_head_stake // total_balance
else:
# Non-canonical-participation R-penalty
res.head.penalties[i] += base_reward
# Take away max rewards if we're not finalizing
if is_inactivity_leak:
# If validator is performing optimally this cancels all rewards for a neutral balance
res.inclusion_delay.penalties[i] += base_reward * BASE_REWARDS_PER_EPOCH - proposer_reward
if not has_markers(status.flags, FLAG_PREV_TARGET_ATTESTER | FLAG_UNSLASHED):
res.inclusion_delay.penalties[i] += eff_balance * finality_delay // INACTIVITY_PENALTY_QUOTIENT
return res
def process_rewards_and_penalties(epochs_ctx: EpochsContext, process: EpochProcess, state: BeaconState) -> None:
if process.current_epoch == GENESIS_EPOCH:
return
res = get_attestation_rewards_and_penalties(epochs_ctx, process, state)
new_balances = list(map(int, state.balances.readonly_iter()))
def add_rewards(deltas: Deltas):
for i, reward in enumerate(deltas.rewards):
new_balances[i] += reward
def add_penalties(deltas: Deltas):
for i, penalty in enumerate(deltas.penalties):
if penalty > new_balances[i]:
new_balances[i] = 0
else:
new_balances[i] -= penalty
add_rewards(res.source)
add_rewards(res.target)
add_rewards(res.head)
add_rewards(res.inclusion_delay)
add_rewards(res.inactivity)
add_penalties(res.source)
add_penalties(res.target)
add_penalties(res.head)
add_penalties(res.inclusion_delay)
add_penalties(res.inactivity)
# Important: do not change state one balance at a time.
# Set them all at once, constructing the tree in one go.
state.balances = new_balances
def process_registry_updates(epochs_ctx: EpochsContext, process: EpochProcess, state: BeaconState) -> None:
exit_end = process.exit_queue_end
end_churn = process.exit_queue_end_churn
# Process ejections
for index in process.indices_to_eject:
validator = state.validators[index]
# Set validator exit epoch and withdrawable epoch
validator.exit_epoch = exit_end
validator.withdrawable_epoch = Epoch(exit_end + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
end_churn += 1
if end_churn >= process.churn_limit:
end_churn = 0
exit_end += 1
# Set new activation eligibilities
for index in process.indices_to_set_activation_eligibility:
state.validators[index].activation_eligibility_epoch = epochs_ctx.current_shuffling.epoch + 1
finality_epoch = state.finalized_checkpoint.epoch
# Dequeue validators for activation up to churn limit
for index in process.indices_to_maybe_activate[:process.churn_limit]:
# Placement in queue is finalized
if process.statuses[index].validator.activation_eligibility_epoch > finality_epoch:
break # remaining validators all have an activation_eligibility_epoch that is higher anyway, break early.
validator = state.validators[index]
validator.activation_epoch = compute_activation_exit_epoch(process.current_epoch)
def process_slashings(epochs_ctx: EpochsContext, process: EpochProcess, state: BeaconState) -> None:
total_balance = process.total_active_stake
slashings_scale = min(sum(state.slashings.readonly_iter()) * 3, total_balance)
for index in process.indices_to_slash:
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
effective_balance = process.statuses[index].validator.effective_balance
penalty_numerator = effective_balance // increment * slashings_scale
penalty = penalty_numerator // total_balance * increment
decrease_balance(state, index, penalty)
HYSTERESIS_INCREMENT = EFFECTIVE_BALANCE_INCREMENT // HYSTERESIS_QUOTIENT
DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER
UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER
def process_final_updates(epochs_ctx: EpochsContext, process: EpochProcess, state: BeaconState) -> None:
current_epoch = process.current_epoch
next_epoch = Epoch(current_epoch + 1)
# Reset eth1 data votes
if next_epoch % EPOCHS_PER_ETH1_VOTING_PERIOD == 0:
state.eth1_data_votes = []
# Update effective balances with hysteresis
for (index, status), balance in zip(enumerate(process.statuses), state.balances.readonly_iter()):
effective_balance = status.validator.effective_balance
if balance + DOWNWARD_THRESHOLD < effective_balance or effective_balance + UPWARD_THRESHOLD < balance:
new_effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
state.validators[index].effective_balance = new_effective_balance
# Reset slashings
state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0)
# Set randao mix
state.randao_mixes[next_epoch % EPOCHS_PER_HISTORICAL_VECTOR] = get_randao_mix(state, current_epoch)
# Set historical root accumulator
if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0:
historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots)
state.historical_roots.append(hash_tree_root(historical_batch))
# Rotate current/previous epoch attestations
state.previous_epoch_attestations = state.current_epoch_attestations
state.current_epoch_attestations = []
def process_block_header(epochs_ctx: EpochsContext, state: BeaconState, block: BeaconBlock) -> None:
# Verify that the slots match
assert block.slot == state.slot
# Verify that the block is newer than latest block header
assert block.slot > state.latest_block_header.slot
# Verify that proposer index is the correct index
proposer_index = epochs_ctx.get_beacon_proposer(state.slot)
assert block.proposer_index == proposer_index
# Verify that the parent matches
assert block.parent_root == hash_tree_root(state.latest_block_header)
# Cache current block as the new latest block
state.latest_block_header = BeaconBlockHeader(
slot=block.slot,
proposer_index=block.proposer_index,
parent_root=block.parent_root,