Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix eviction of absent participant #20

Merged
merged 4 commits into from
Sep 28, 2020
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 22 additions & 3 deletions share/dkg/dkg.go
Original file line number Diff line number Diff line change
Expand Up @@ -552,6 +552,7 @@ func (d *DistKeyGenerator) ProcessResponses(bundles []*ResponseBundle) (*Result,
return res, nil, err
}

var validAuthors []Index
var foundComplaint bool
for _, bundle := range bundles {
if bundle == nil {
Expand All @@ -566,8 +567,7 @@ func (d *DistKeyGenerator) ProcessResponses(bundles []*ResponseBundle) (*Result,
}

if bytes.Compare(bundle.SessionID, d.c.Nonce) != 0 {
// XXX for the moment we continue,
// TODO: fix it with a proper eviction list of share holders
d.evictedHolders = append(d.evictedHolders, bundle.ShareIndex)
continue
}

Expand All @@ -591,8 +591,28 @@ func (d *DistKeyGenerator) ProcessResponses(bundles []*ResponseBundle) (*Result,
if response.Status == Complaint {
foundComplaint = true
}
validAuthors = append(validAuthors, bundle.ShareIndex)
}
}
// In case of fast sync, we want to make sure all share holders have sent a
// valid response. All share holders that did not will be evicted from the
// final group. Since we are using a broadcast channel, if a node is honest,
// its response will be received by all honest nodes.
if d.c.FastSync {
// we only need to look at the nodes that did not sent any response,
// since the invalid one are already markes as evicted
allSent := append(validAuthors, d.evictedHolders...)
isPresent := isEvicted
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is counter-intuitive. from what i can see, this method is only used in this if block. can you directly call 'isEvicted' in the loop below. Renaming it to a variable named the inverse property makes it more confusing what is being checked.

for _, n := range d.c.NewNodes {
if d.canReceive && d.nidx == n.Index {
continue // we dont evict ourself
}
if !isPresent(allSent, n.Index) {
d.evictedHolders = append(d.evictedHolders, n.Index)
}
}
}

if !foundComplaint {
// there is no complaint !
if d.canReceive && d.statuses.CompleteSuccess() {
Expand Down Expand Up @@ -883,7 +903,6 @@ func (d *DistKeyGenerator) computeResharingResult() (*Result, error) {
qual = append(qual, newNode)
}
}

if len(qual) < d.c.Threshold {
return nil, fmt.Errorf("dkg: too many uncompliant new participants %d/%d", len(qual), d.c.Threshold)
}
Expand Down
156 changes: 151 additions & 5 deletions share/dkg/dkg_test.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package dkg

import (
"fmt"
"testing"

"github.com/drand/kyber"
Expand Down Expand Up @@ -215,6 +216,112 @@ func TestDKGFull(t *testing.T) {
testResults(t, suite, thr, n, results)
}

func TestDKGResharing(t *testing.T) {
n := 5
thr := 4
var suite = bn256.NewSuiteG2()
var sigSuite = bn256.NewSuiteG1()
tns := GenerateTestNodes(suite, n)
list := NodesFromTest(tns)
conf := Config{
Suite: suite,
NewNodes: list,
Threshold: thr,
Auth: schnorr.NewScheme(suite),
}

results := RunDKG(t, tns, conf, nil, nil, nil)
for i, t := range tns {
t.res = results[i]
}
testResults(t, suite, thr, n, results)

// create a partial signature with the share now and make sure the partial
// signature is verifiable and then *not* verifiable after the resharing
oldShare := results[0].Key.Share
msg := []byte("Hello World")
scheme := tbls.NewThresholdSchemeOnG1(sigSuite)
oldPartial, err := scheme.Sign(oldShare, msg)
require.NoError(t, err)
poly := share.NewPubPoly(suite, suite.Point().Base(), results[0].Key.Commits)
require.NoError(t, scheme.VerifyPartial(poly, msg, oldPartial))

// we setup now the second group with higher node count and higher threshold
// and we remove one node from the previous group
newN := n + 5
newT := thr + 4
var newTns = make([]*TestNode, newN)
// remove the last node from the previous group
offline := 1
copy(newTns, tns[:n-offline])
// + offline because we fill the gap of the offline nodes by new nodes
newNode := newN - n + offline
for i := 0; i < newNode; i++ {
// new node can have the same index as a previous one, separation is made
newTns[n-1+i] = NewTestNode(suite, n-1+i)
}
newList := NodesFromTest(newTns)
newConf := &Config{
Suite: suite,
NewNodes: newList,
OldNodes: list,
Threshold: newT,
OldThreshold: thr,
Auth: schnorr.NewScheme(suite),
}

SetupReshareNodes(newTns, newConf, tns[0].res.Key.Commits)

var deals []*DealBundle
for _, node := range newTns {
if node.res == nil {
// new members don't issue deals
continue
}
d, err := node.dkg.Deals()
require.NoError(t, err)
deals = append(deals, d)
}

var responses []*ResponseBundle
for _, node := range newTns {
resp, err := node.dkg.ProcessDeals(deals)
require.NoError(t, err)
if resp != nil {
// last node from the old group is not present so there should be
// some responses !
responses = append(responses, resp)
}
}
require.True(t, len(responses) > 0)

results = nil
for _, node := range newTns {
res, just, err := node.dkg.ProcessResponses(responses)
require.NoError(t, err)
require.Nil(t, res)
// since the last old node is absent he can't give any justifications
require.Nil(t, just)
}

for _, node := range newTns {
res, err := node.dkg.ProcessJustifications(nil)
require.NoError(t, err)
require.NotNil(t, res)
results = append(results, res)
}
testResults(t, suite, newT, newN, results)

// test a tbls signature is correct
newShare := results[0].Key.Share
newPartial, err := scheme.Sign(newShare, msg)
require.NoError(t, err)
newPoly := share.NewPubPoly(suite, suite.Point().Base(), results[0].Key.Commits)
require.NoError(t, scheme.VerifyPartial(newPoly, msg, newPartial))
// test we can not verify the old partial with the new public polynomial
require.Error(t, scheme.VerifyPartial(poly, msg, newPartial))
}

func TestDKGThreshold(t *testing.T) {
n := 5
thr := 4
Expand Down Expand Up @@ -280,8 +387,8 @@ func TestDKGThreshold(t *testing.T) {
testResults(t, suite, thr, n, filtered)
}

func TestDKGResharing(t *testing.T) {
n := 5
func TestDKGResharingFast(t *testing.T) {
n := 6
thr := 4
var suite = bn256.NewSuiteG2()
var sigSuite = bn256.NewSuiteG1()
Expand Down Expand Up @@ -346,13 +453,26 @@ func TestDKGResharing(t *testing.T) {
newTns[n-1+i] = NewTestNode(suite, n-1+i)
}
newList := NodesFromTest(newTns)
// key from the previous and new group which is registered in the
// group but wont participate
p := 1
skipKey := list[p].Public
var skipNew Index
for _, n := range newList {
if n.Public.Equal(skipKey) {
skipNew = n.Index
}
}
fmt.Println("skipping old index: ", list[p].Index, "public key", skipKey, "newIdx", skipNew)

newConf := &Config{
Suite: suite,
NewNodes: newList,
OldNodes: list,
Threshold: newT,
OldThreshold: thr,
Auth: schnorr.NewScheme(suite),
FastSync: true,
}

SetupReshareNodes(newTns, newConf, tns[0].res.Key.Commits)
Expand All @@ -363,15 +483,22 @@ func TestDKGResharing(t *testing.T) {
// new members don't issue deals
continue
}
if node.Public.Equal(skipKey) {
continue
}
d, err := node.dkg.Deals()
require.NoError(t, err)
deals = append(deals, d)
}

var responses []*ResponseBundle
for _, node := range newTns {
if node.Public.Equal(skipKey) {
continue
}
resp, err := node.dkg.ProcessDeals(deals)
require.NoError(t, err)

if resp != nil {
// last node from the old group is not present so there should be
// some responses !
Expand All @@ -381,20 +508,39 @@ func TestDKGResharing(t *testing.T) {
require.True(t, len(responses) > 0)

results = nil
var justifs []*JustificationBundle
for _, node := range newTns {
if node.Public.Equal(skipKey) {
continue
}
res, just, err := node.dkg.ProcessResponses(responses)
require.NoError(t, err)
require.Nil(t, res)
// since the last old node is absent he can't give any justifications
require.Nil(t, just)
if node.res == nil {
// new members don't issue justifications
continue
}
require.NotNil(t, just.Justifications)
fmt.Println(just)
require.Equal(t, just.Justifications[0].ShareIndex, skipNew)
justifs = append(justifs, just)
}

for _, node := range newTns {
res, err := node.dkg.ProcessJustifications(nil)
if node.Public.Equal(skipKey) {
continue
}
res, err := node.dkg.ProcessJustifications(justifs)
require.NoError(t, err)
require.NotNil(t, res)
results = append(results, res)
}

for _, res := range results {
for _, n := range res.QUAL {
require.False(t, n.Public.Equal(skipKey))
}
}
testResults(t, suite, newT, newN, results)

// test a tbls signature is correct
Expand Down