Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix eviction of absent participant #20

Merged
merged 4 commits into from
Sep 28, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 39 additions & 13 deletions share/dkg/dkg.go
Original file line number Diff line number Diff line change
Expand Up @@ -490,7 +490,7 @@ func (d *DistKeyGenerator) ProcessDeals(bundles []*DealBundle) (*ResponseBundle,
// if the node is evicted, we don't even need to send a complaint or a
// response response since every honest node evicts him as well.
// XXX Is that always true ? Should we send a complaint still ?
if isEvicted(d.evicted, node.Index) {
if contains(d.evicted, node.Index) {
continue
}

Expand Down Expand Up @@ -552,6 +552,7 @@ func (d *DistKeyGenerator) ProcessResponses(bundles []*ResponseBundle) (*Result,
return res, nil, err
}

var validAuthors []Index
var foundComplaint bool
for _, bundle := range bundles {
if bundle == nil {
Expand All @@ -566,8 +567,7 @@ func (d *DistKeyGenerator) ProcessResponses(bundles []*ResponseBundle) (*Result,
}

if bytes.Compare(bundle.SessionID, d.c.Nonce) != 0 {
// XXX for the moment we continue,
// TODO: fix it with a proper eviction list of share holders
d.evictedHolders = append(d.evictedHolders, bundle.ShareIndex)
continue
}

Expand All @@ -591,8 +591,29 @@ func (d *DistKeyGenerator) ProcessResponses(bundles []*ResponseBundle) (*Result,
if response.Status == Complaint {
foundComplaint = true
}
validAuthors = append(validAuthors, bundle.ShareIndex)
}
}

// In case of fast sync, we want to make sure all share holders have sent a
// valid response (success or complaint). All share holders that did not
// will be evicted from the final group. Since we are using a broadcast
// channel, if a node is honest, its response will be received by all honest
// nodes.
if d.c.FastSync {
// we only need to look at the nodes that did not sent any response,
// since the invalid one are already markes as evicted
allSent := append(validAuthors, d.evictedHolders...)
for _, n := range d.c.NewNodes {
if d.canReceive && d.nidx == n.Index {
continue // we dont evict ourself
}
if !contains(allSent, n.Index) {
d.evictedHolders = append(d.evictedHolders, n.Index)
}
}
}

if !foundComplaint {
// there is no complaint !
if d.canReceive && d.statuses.CompleteSuccess() {
Expand Down Expand Up @@ -691,7 +712,7 @@ func (d *DistKeyGenerator) ProcessJustifications(bundles []*JustificationBundle)
// index is invalid
continue
}
if isEvicted(d.evicted, bundle.DealerIndex) {
if contains(d.evicted, bundle.DealerIndex) {
// already evicted node
continue
}
Expand Down Expand Up @@ -746,7 +767,7 @@ func (d *DistKeyGenerator) ProcessJustifications(bundles []*JustificationBundle)
// check if there is enough dealer entries marked as all success
var allGood int
for _, n := range d.c.OldNodes {
if isEvicted(d.evicted, n.Index) {
if contains(d.evicted, n.Index) {
continue
}
if !d.statuses.AllTrue(n.Index) {
Expand Down Expand Up @@ -869,17 +890,22 @@ func (d *DistKeyGenerator) computeResharingResult() (*Result, error) {
// protocol (i.e. absent nodes will not be counted)
var qual []Node
for _, newNode := range d.c.NewNodes {
idx := newNode.Index
var invalid bool
for _, validDealer := range validDealers {
if !d.statuses.Get(validDealer, idx) {
// look if this node is also a dealer which have been misbehaving
for _, oldNode := range d.c.OldNodes {
if d.statuses.AllTrue(oldNode.Index) {
// it's a valid dealer as well
continue
}
if oldNode.Public.Equal(newNode.Public) {
// it's an invalid dealer, so we evict him
invalid = true
break
}
}
// look if they have not been misbehaving during response phase
invalid = invalid || isEvicted(d.evictedHolders, idx)
if !invalid {
// we also check if he has been misbehaving during the response phase
// only
if !invalid && !contains(d.evictedHolders, newNode.Index) {
qual = append(qual, newNode)
}
}
Expand Down Expand Up @@ -911,7 +937,7 @@ func (d *DistKeyGenerator) computeDKGResult() (*Result, error) {

// however we do need to check for evicted share holders since in this
// case (DKG) both are the same.
if isEvicted(d.evictedHolders, n.Index) {
if contains(d.evictedHolders, n.Index) {
continue
}

Expand Down Expand Up @@ -985,7 +1011,7 @@ func isIndexIncluded(list []Node, index uint32) bool {
return false
}

func isEvicted(nodes []Index, node uint32) bool {
func contains(nodes []Index, node Index) bool {
for _, idx := range nodes {
if node == idx {
return true
Expand Down
155 changes: 150 additions & 5 deletions share/dkg/dkg_test.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package dkg

import (
"fmt"
"testing"

"github.com/drand/kyber"
Expand Down Expand Up @@ -215,6 +216,112 @@ func TestDKGFull(t *testing.T) {
testResults(t, suite, thr, n, results)
}

func TestDKGResharing(t *testing.T) {
n := 5
thr := 4
var suite = bn256.NewSuiteG2()
var sigSuite = bn256.NewSuiteG1()
tns := GenerateTestNodes(suite, n)
list := NodesFromTest(tns)
conf := Config{
Suite: suite,
NewNodes: list,
Threshold: thr,
Auth: schnorr.NewScheme(suite),
}

results := RunDKG(t, tns, conf, nil, nil, nil)
for i, t := range tns {
t.res = results[i]
}
testResults(t, suite, thr, n, results)

// create a partial signature with the share now and make sure the partial
// signature is verifiable and then *not* verifiable after the resharing
oldShare := results[0].Key.Share
msg := []byte("Hello World")
scheme := tbls.NewThresholdSchemeOnG1(sigSuite)
oldPartial, err := scheme.Sign(oldShare, msg)
require.NoError(t, err)
poly := share.NewPubPoly(suite, suite.Point().Base(), results[0].Key.Commits)
require.NoError(t, scheme.VerifyPartial(poly, msg, oldPartial))

// we setup now the second group with higher node count and higher threshold
// and we remove one node from the previous group
newN := n + 5
newT := thr + 4
var newTns = make([]*TestNode, newN)
// remove the last node from the previous group
offline := 1
copy(newTns, tns[:n-offline])
// + offline because we fill the gap of the offline nodes by new nodes
newNode := newN - n + offline
for i := 0; i < newNode; i++ {
// new node can have the same index as a previous one, separation is made
newTns[n-1+i] = NewTestNode(suite, n-1+i)
}
newList := NodesFromTest(newTns)
newConf := &Config{
Suite: suite,
NewNodes: newList,
OldNodes: list,
Threshold: newT,
OldThreshold: thr,
Auth: schnorr.NewScheme(suite),
}

SetupReshareNodes(newTns, newConf, tns[0].res.Key.Commits)

var deals []*DealBundle
for _, node := range newTns {
if node.res == nil {
// new members don't issue deals
continue
}
d, err := node.dkg.Deals()
require.NoError(t, err)
deals = append(deals, d)
}

var responses []*ResponseBundle
for _, node := range newTns {
resp, err := node.dkg.ProcessDeals(deals)
require.NoError(t, err)
if resp != nil {
// last node from the old group is not present so there should be
// some responses !
responses = append(responses, resp)
}
}
require.True(t, len(responses) > 0)

results = nil
for _, node := range newTns {
res, just, err := node.dkg.ProcessResponses(responses)
require.NoError(t, err)
require.Nil(t, res)
// since the last old node is absent he can't give any justifications
require.Nil(t, just)
}

for _, node := range newTns {
res, err := node.dkg.ProcessJustifications(nil)
require.NoError(t, err)
require.NotNil(t, res)
results = append(results, res)
}
testResults(t, suite, newT, newN, results)

// test a tbls signature is correct
newShare := results[0].Key.Share
newPartial, err := scheme.Sign(newShare, msg)
require.NoError(t, err)
newPoly := share.NewPubPoly(suite, suite.Point().Base(), results[0].Key.Commits)
require.NoError(t, scheme.VerifyPartial(newPoly, msg, newPartial))
// test we can not verify the old partial with the new public polynomial
require.Error(t, scheme.VerifyPartial(poly, msg, newPartial))
}

func TestDKGThreshold(t *testing.T) {
n := 5
thr := 4
Expand Down Expand Up @@ -280,8 +387,8 @@ func TestDKGThreshold(t *testing.T) {
testResults(t, suite, thr, n, filtered)
}

func TestDKGResharing(t *testing.T) {
n := 5
func TestDKGResharingFast(t *testing.T) {
n := 6
thr := 4
var suite = bn256.NewSuiteG2()
var sigSuite = bn256.NewSuiteG1()
Expand Down Expand Up @@ -346,13 +453,26 @@ func TestDKGResharing(t *testing.T) {
newTns[n-1+i] = NewTestNode(suite, n-1+i)
}
newList := NodesFromTest(newTns)
// key from the previous and new group which is registered in the
// group but wont participate
p := 1
skipKey := list[p].Public
var skipNew Index
for _, n := range newList {
if n.Public.Equal(skipKey) {
skipNew = n.Index
}
}
fmt.Println("skipping old index: ", list[p].Index, "public key", skipKey, "newIdx", skipNew)

newConf := &Config{
Suite: suite,
NewNodes: newList,
OldNodes: list,
Threshold: newT,
OldThreshold: thr,
Auth: schnorr.NewScheme(suite),
FastSync: true,
}

SetupReshareNodes(newTns, newConf, tns[0].res.Key.Commits)
Expand All @@ -363,15 +483,22 @@ func TestDKGResharing(t *testing.T) {
// new members don't issue deals
continue
}
if node.Public.Equal(skipKey) {
continue
}
d, err := node.dkg.Deals()
require.NoError(t, err)
deals = append(deals, d)
}

var responses []*ResponseBundle
for _, node := range newTns {
if node.Public.Equal(skipKey) {
continue
}
resp, err := node.dkg.ProcessDeals(deals)
require.NoError(t, err)

if resp != nil {
// last node from the old group is not present so there should be
// some responses !
Expand All @@ -381,20 +508,38 @@ func TestDKGResharing(t *testing.T) {
require.True(t, len(responses) > 0)

results = nil
var justifs []*JustificationBundle
for _, node := range newTns {
if node.Public.Equal(skipKey) {
continue
}
res, just, err := node.dkg.ProcessResponses(responses)
require.NoError(t, err)
require.Nil(t, res)
// since the last old node is absent he can't give any justifications
require.Nil(t, just)
if node.res == nil {
// new members don't issue justifications
continue
}
require.NotNil(t, just.Justifications)
require.Equal(t, just.Justifications[0].ShareIndex, skipNew)
justifs = append(justifs, just)
}

for _, node := range newTns {
res, err := node.dkg.ProcessJustifications(nil)
if node.Public.Equal(skipKey) {
continue
}
res, err := node.dkg.ProcessJustifications(justifs)
require.NoError(t, err)
require.NotNil(t, res)
results = append(results, res)
}

for _, res := range results {
for _, n := range res.QUAL {
require.False(t, n.Public.Equal(skipKey))
}
}
testResults(t, suite, newT, newN, results)

// test a tbls signature is correct
Expand Down