-
Notifications
You must be signed in to change notification settings - Fork 107
/
keymanager_replicate.go
176 lines (150 loc) · 4.79 KB
/
keymanager_replicate.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
package runtime
import (
"bytes"
"context"
"fmt"
"slices"
beacon "github.com/oasisprotocol/oasis-core/go/beacon/api"
"github.com/oasisprotocol/oasis-core/go/keymanager/secrets"
"github.com/oasisprotocol/oasis-core/go/oasis-test-runner/env"
"github.com/oasisprotocol/oasis-core/go/oasis-test-runner/oasis"
"github.com/oasisprotocol/oasis-core/go/oasis-test-runner/scenario"
)
// KeymanagerReplicate is the keymanager replication scenario.
var KeymanagerReplicate scenario.Scenario = newKmReplicateImpl()
type kmReplicateImpl struct {
Scenario
}
func newKmReplicateImpl() scenario.Scenario {
return &kmReplicateImpl{
Scenario: *NewScenario(
"keymanager-replication",
NewTestClient().WithScenario(InsertRemoveKeyValueEncScenario),
),
}
}
func (sc *kmReplicateImpl) Clone() scenario.Scenario {
return &kmReplicateImpl{
Scenario: *sc.Scenario.Clone().(*Scenario),
}
}
func (sc *kmReplicateImpl) Fixture() (*oasis.NetworkFixture, error) {
f, err := sc.Scenario.Fixture()
if err != nil {
return nil, err
}
// Speed up the test.
f.Network.Beacon.VRFParameters = &beacon.VRFParameters{
Interval: 10,
ProofSubmissionDelay: 2,
}
// We don't need compute workers.
f.ComputeWorkers = []oasis.ComputeWorkerFixture{}
// This requires multiple keymanagers.
f.Keymanagers = []oasis.KeymanagerFixture{
{Runtime: 0, Entity: 1, Policy: 0},
{Runtime: 0, Entity: 1, Policy: 0},
{Runtime: 0, Entity: 1, Policy: 0, NodeFixture: oasis.NodeFixture{NoAutoStart: true}},
{Runtime: 0, Entity: 1, Policy: 0, NodeFixture: oasis.NodeFixture{NoAutoStart: true}},
}
// Enable master secret rotation.
f.KeymanagerPolicies[0].MasterSecretRotationInterval = 1
return f, nil
}
func (sc *kmReplicateImpl) Run(ctx context.Context, _ *env.Env) error {
// Start the first two key managers.
if err := sc.Net.Start(); err != nil {
return err
}
// Wait until 3 master secrets are generated.
if _, err := sc.WaitMasterSecret(ctx, 2); err != nil {
return fmt.Errorf("master secret not generated: %w", err)
}
// Make sure exactly two key managers were generating secrets.
status, err := sc.KeyManagerStatus(ctx)
if err != nil {
return err
}
if len(status.Nodes) != 2 {
return fmt.Errorf("key manager committee should consist of two nodes")
}
// Stop the second manager.
// Upon restarting, its master secrets will be partially synchronized (3 out of 6).
if err = sc.Net.Keymanagers()[1].Stop(); err != nil {
return err
}
// Generate another 3 master secrets.
if _, err = sc.WaitMasterSecret(ctx, 5); err != nil {
return fmt.Errorf("master secret not generated: %w", err)
}
// Make sure the first key manager was generating secrets.
status, err = sc.KeyManagerStatus(ctx)
if err != nil {
return err
}
if len(status.Nodes) != 1 {
return fmt.Errorf("key manager committee should consist of one node")
}
// Start key managers that are not running and wait until they replicate
// master secrets from the first one.
if err = sc.StartAndWaitKeymanagers(ctx, []int{1, 2, 3}); err != nil {
return err
}
// If the replication was successful, the next key manager committee should
// consist of all nodes.
if status, err = sc.waitKeymanagerStatuses(ctx, 2); err != nil {
return err
}
if !status.IsInitialized {
return fmt.Errorf("key manager failed to initialize")
}
if len(status.Nodes) != len(sc.Net.Keymanagers()) {
return fmt.Errorf("key manager committee should consist of all nodes")
}
for _, km := range sc.Net.Keymanagers() {
if !slices.Contains(status.Nodes, km.NodeID) {
return fmt.Errorf("node missing from key manager status")
}
}
// Wait few blocks so that the key managers transition to the new secret and register
// with the latest checksum. The latter can take some time.
if _, err = sc.WaitBlocks(ctx, 8); err != nil {
return err
}
// Check if checksums match.
for idx := range sc.Net.Keymanagers() {
initRsp, err := sc.KeymanagerInitResponse(ctx, idx)
if err != nil {
return err
}
if !bytes.Equal(initRsp.Checksum, status.Checksum) {
return fmt.Errorf("key manager checksum mismatch")
}
}
// If we came this far than all key managers should have the same state.
// Let's test if they replicated the same secrets by fetching long-term
// public keys for all generations.
return sc.CompareLongtermPublicKeys(ctx, []int{0, 1, 2, 3})
}
func (sc *kmReplicateImpl) waitKeymanagerStatuses(ctx context.Context, n int) (*secrets.Status, error) {
sc.Logger.Info("waiting for key manager status", "n", n)
stCh, stSub, err := sc.Net.Controller().Keymanager.Secrets().WatchStatuses(ctx)
if err != nil {
return nil, err
}
defer stSub.Close()
for {
select {
case <-ctx.Done():
return nil, ctx.Err()
case status := <-stCh:
if !status.ID.Equal(&KeyManagerRuntimeID) {
continue
}
n--
if n <= 0 {
return status, nil
}
}
}
}