-
Notifications
You must be signed in to change notification settings - Fork 179
/
common.go
156 lines (132 loc) · 5.29 KB
/
common.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
package verification
import (
"fmt"
"sync"
"github.com/onflow/flow-go/consensus/hotstuff/model"
"github.com/onflow/flow-go/crypto"
"github.com/onflow/flow-go/model/flow"
)
// MakeVoteMessage generates the message we have to sign in order to be able
// to verify signatures without having the full block. To that effect, each data
// structure that is signed contains the sometimes redundant view number and
// block ID; this allows us to create the signed message and verify the signed
// message without having the full block contents.
func MakeVoteMessage(view uint64, blockID flow.Identifier) []byte {
msg := flow.MakeID(struct {
BlockID flow.Identifier
View uint64
}{
BlockID: blockID,
View: view,
})
return msg[:]
}
// checkVotesValidity checks the validity of each vote by checking that they are
// all for the same view number, the same block ID and that each vote is from a
// different signer.
func checkVotesValidity(votes []*model.Vote) error {
// first, we should be sure to have votes at all
if len(votes) == 0 {
return fmt.Errorf("need at least one vote")
}
// we use this map to check each vote has a different signer
signerIDs := make(map[flow.Identifier]struct{}, len(votes))
// we use the view and block ID from the first vote to check that all votes
// have the same view and bloc ID
view := votes[0].View
blockID := votes[0].BlockID
// go through all votes to check their validity
for _, vote := range votes {
// if we have a view mismatch, bail
if vote.View != view {
return fmt.Errorf("view mismatch between votes (%d != %d)", vote.View, view)
}
// if we have a block ID mismatch, bail
if vote.BlockID != blockID {
return fmt.Errorf("block ID mismatch between votes (%x != %x)", vote.BlockID, blockID)
}
// register the signer in our map
signerIDs[vote.SignerID] = struct{}{}
}
// check that we have as many signers as votes
if len(signerIDs) != len(votes) {
return fmt.Errorf("less signers than votes (signers: %d, votes: %d)", len(signerIDs), len(votes))
}
return nil
}
// stakingKeysAggregator is a structure that aggregates the staking
// public keys for QC verifications.
type stakingKeysAggregator struct {
lastStakingSigners map[flow.Identifier]*flow.Identity
lastStakingKey crypto.PublicKey
sync.RWMutex
}
// creates a new staking keys aggregator
func newStakingKeysAggregator() *stakingKeysAggregator {
aggregator := &stakingKeysAggregator{
lastStakingSigners: map[flow.Identifier]*flow.Identity{},
lastStakingKey: NeutralBLSPublicKey(),
RWMutex: sync.RWMutex{},
}
return aggregator
}
// aggregatedStakingKey returns the aggregated public key of the input signers.
func (s *stakingKeysAggregator) aggregatedStakingKey(signers flow.IdentityList) (crypto.PublicKey, error) {
// this greedy algorithm assumes the signers set does not vary much from one call
// to aggregatedStakingKey to another. It computes the delta of signers compared to the
// latest list of signers and adjust the latest aggregated public key. This is faster
// than aggregating the public keys from scratch at each call.
// Considering votes have equal weights, aggregating keys from scratch takes n*2/3 key operations, where
// n is the number of Hotstuff participants. This corresponds to the worst case of the greedy
// algorithm. The worst case happens when the 2/3 latest signers and the 2/3 new signers only
// have 1/3 in common (the minimum common ratio).
s.RLock()
lastSet := s.lastStakingSigners
lastKey := s.lastStakingKey
s.RUnlock()
// get the signers delta and update the last list for the next comparison
newSignerKeys, missingSignerKeys, updatedSignerSet := identitiesDeltaKeys(signers, lastSet)
// add the new keys
var err error
updatedKey, err := AggregateBLSPublicKeys(append(newSignerKeys, lastKey))
if err != nil {
return nil, fmt.Errorf("adding new staking keys failed: %w", err)
}
// remove the missing keys
updatedKey, err = RemoveBLSPublicKeys(updatedKey, missingSignerKeys)
if err != nil {
return nil, fmt.Errorf("removing missing staking keys failed: %w", err)
}
// update the latest list and public key. The current thread may overwrite the result of another thread
// but the greedy algorithm remains valid.
s.Lock()
s.lastStakingSigners = updatedSignerSet
s.lastStakingKey = updatedKey
s.Unlock()
return updatedKey, nil
}
// identitiesDeltaKeys computes the delta between the reference s.lastStakingSigners
// and the input identity list.
// It returns a list of the new signer keys, a list of the missing signer keys and the new map of signers.
func identitiesDeltaKeys(signers flow.IdentityList, lastSet map[flow.Identifier]*flow.Identity) (
[]crypto.PublicKey, []crypto.PublicKey, map[flow.Identifier]*flow.Identity) {
var newSignerKeys, missingSignerKeys []crypto.PublicKey
// create a map of the input list,
// and check the new signers
signersMap := map[flow.Identifier]*flow.Identity{}
for _, signer := range signers {
signersMap[signer.ID()] = signer
_, ok := lastSet[signer.ID()]
if !ok {
newSignerKeys = append(newSignerKeys, signer.StakingPubKey)
}
}
// look for missing signers
for signerID, signer := range lastSet {
_, ok := signersMap[signerID]
if !ok {
missingSignerKeys = append(missingSignerKeys, signer.StakingPubKey)
}
}
return newSignerKeys, missingSignerKeys, signersMap
}