Skip to content

Commit

Permalink
[FAB-1846] Integration between deliver and election
Browse files Browse the repository at this point in the history
Creation of leader election service during channel initialization in gossip
and connecting between election callback to deliver service

Change-Id: Ic2b8b1b5ebc770abcf4b935d39ce06087086b0c9
Signed-off-by: Gennady Laventman <gennady@il.ibm.com>
  • Loading branch information
gennadylaventman committed Mar 1, 2017
1 parent 6a3f766 commit a6edbff
Show file tree
Hide file tree
Showing 11 changed files with 753 additions and 63 deletions.
66 changes: 47 additions & 19 deletions core/deliverservice/deliveryclient.go
Expand Up @@ -21,6 +21,8 @@ import (
"sync"
"time"

"fmt"

"github.com/hyperledger/fabric/core/comm"
"github.com/hyperledger/fabric/core/deliverservice/blocksprovider"
"github.com/hyperledger/fabric/protos/orderer"
Expand All @@ -39,10 +41,13 @@ func init() {
// DeliverService used to communicate with orderers to obtain
// new block and send the to the committer service
type DeliverService interface {
// JoinChain once peer joins the chain it should need to check whenever
// it has been selected as a leader and open connection to the configured
// ordering service endpoint
JoinChain(chainID string, ledgerInfo blocksprovider.LedgerInfo) error
// StartDeliverForChannel dynamically starts delivery of new blocks from ordering service
// to channel peers.
StartDeliverForChannel(chainID string, ledgerInfo blocksprovider.LedgerInfo) error

// StopDeliverForChannel dynamically stops delivery of new blocks from ordering service
// to channel peers.
StopDeliverForChannel(chainID string) error

// Stop terminates delivery service and closes the connection
Stop()
Expand Down Expand Up @@ -131,27 +136,29 @@ func NewFactoryDeliverService(gossip blocksprovider.GossipServiceAdapter, factor
}
}

// JoinChain initialize the grpc stream for given chainID, creates blocks provider instance
// to spawn in go routine to read new blocks starting from the position provided by ledger
// StartDeliverForChannel starts blocks delivery for channel
// initializes the grpc stream for given chainID, creates blocks provider instance
// that spawns in go routine to read new blocks starting from the position provided by ledger
// info instance.
func (d *deliverServiceImpl) JoinChain(chainID string, ledgerInfo blocksprovider.LedgerInfo) error {
isLeader := viper.GetBool("peer.gossip.orgLeader")

if isLeader {
func (d *deliverServiceImpl) StartDeliverForChannel(chainID string, ledgerInfo blocksprovider.LedgerInfo) error {
d.lock.Lock()
defer d.lock.Unlock()
if d.stopping {
errMsg := fmt.Sprintf("Delivery service is stopping cannot join a new channel %s", chainID)
logger.Errorf(errMsg)
return errors.New(errMsg)
}
if _, exist := d.clients[chainID]; exist {
errMsg := fmt.Sprintf("Delivery service - block provider already exists for %s found, can't start delivery", chainID)
logger.Errorf(errMsg)
return errors.New(errMsg)
} else {
abc, err := d.clientsFactory.Create()
if err != nil {
logger.Errorf("Unable to initialize atomic broadcast, due to %s", err)
return err
}

d.lock.Lock()
defer d.lock.Unlock()

if d.stopping {
logger.Errorf("Delivery service is stopping cannot join a new channel")
return errors.New("Delivery service is stopping cannot join a new channel")
}

logger.Debug("This peer will pass blocks from orderer service to other peers")
d.clients[chainID] = blocksprovider.NewBlocksProvider(chainID, abc, d.gossip)

if err := d.clients[chainID].RequestBlocks(ledgerInfo); err == nil {
Expand All @@ -162,6 +169,27 @@ func (d *deliverServiceImpl) JoinChain(chainID string, ledgerInfo blocksprovider
return nil
}

// StopDeliverForChannel stops blocks delivery for channel by stopping channel block provider
func (d *deliverServiceImpl) StopDeliverForChannel(chainID string) error {
d.lock.Lock()
defer d.lock.Unlock()
if d.stopping {
errMsg := fmt.Sprintf("Delivery service is stopping, cannot stop delivery for channel %s", chainID)
logger.Errorf(errMsg)
return errors.New(errMsg)
}
if client, exist := d.clients[chainID]; exist {
client.Stop()
delete(d.clients, chainID)
logger.Debug("This peer will stop pass blocks from orderer service to other peers")
} else {
errMsg := fmt.Sprintf("Delivery service - no block provider for %s found, can't stop delivery", chainID)
logger.Errorf(errMsg)
return errors.New(errMsg)
}
return nil
}

// Stop all service and release resources
func (d *deliverServiceImpl) Stop() {
d.lock.Lock()
Expand Down
13 changes: 12 additions & 1 deletion core/deliverservice/deliveryclient_test.go
Expand Up @@ -49,9 +49,17 @@ func TestNewDeliverService(t *testing.T) {
}

service := NewFactoryDeliverService(gossipServiceAdapter, factory, nil)
service.JoinChain("TEST_CHAINID", &mocks.MockLedgerInfo{0})
assert.NilError(t, service.StartDeliverForChannel("TEST_CHAINID", &mocks.MockLedgerInfo{0}))

// Lets start deliver twice
assert.Error(t, service.StartDeliverForChannel("TEST_CHAINID", &mocks.MockLedgerInfo{0}), "can't start delivery")
// Lets stop deliver that not started
assert.Error(t, service.StopDeliverForChannel("TEST_CHAINID2"), "can't stop delivery")

// Let it try to simulate a few recv -> gossip rounds
time.Sleep(time.Duration(10) * time.Millisecond)
assert.NilError(t, service.StopDeliverForChannel("TEST_CHAINID"))

time.Sleep(time.Duration(10) * time.Millisecond)
service.Stop()

Expand All @@ -61,4 +69,7 @@ func TestNewDeliverService(t *testing.T) {
assert.Equal(t, atomic.LoadInt32(&blocksDeliverer.RecvCnt), atomic.LoadInt32(&gossipServiceAdapter.AddPayloadsCnt))
assert.Equal(t, atomic.LoadInt32(&blocksDeliverer.RecvCnt), atomic.LoadInt32(&gossipServiceAdapter.GossipCallsCnt))

assert.Error(t, service.StartDeliverForChannel("TEST_CHAINID", &mocks.MockLedgerInfo{0}), "Delivery service is stopping")
assert.Error(t, service.StopDeliverForChannel("TEST_CHAINID"), "Delivery service is stopping")

}
13 changes: 9 additions & 4 deletions core/peer/peer_test.go
Expand Up @@ -40,10 +40,15 @@ import (
type mockDeliveryClient struct {
}

// JoinChain once peer joins the chain it should need to check whenever
// it has been selected as a leader and open connection to the configured
// ordering service endpoint
func (*mockDeliveryClient) JoinChain(chainID string, ledgerInfo blocksprovider.LedgerInfo) error {
// StartDeliverForChannel dynamically starts delivery of new blocks from ordering service
// to channel peers.
func (ds *mockDeliveryClient) StartDeliverForChannel(chainID string, ledgerInfo blocksprovider.LedgerInfo) error {
return nil
}

// StopDeliverForChannel dynamically stops delivery of new blocks from ordering service
// to channel peers.
func (ds *mockDeliveryClient) StopDeliverForChannel(chainID string) error {
return nil
}

Expand Down
13 changes: 9 additions & 4 deletions core/scc/cscc/configure_test.go
Expand Up @@ -46,10 +46,15 @@ import (
type mockDeliveryClient struct {
}

// JoinChain once peer joins the chain it should need to check whenever
// it has been selected as a leader and open connection to the configured
// ordering service endpoint
func (*mockDeliveryClient) JoinChain(chainID string, ledgerInfo blocksprovider.LedgerInfo) error {
// StartDeliverForChannel dynamically starts delivery of new blocks from ordering service
// to channel peers.
func (ds *mockDeliveryClient) StartDeliverForChannel(chainID string, ledgerInfo blocksprovider.LedgerInfo) error {
return nil
}

// StopDeliverForChannel dynamically stops delivery of new blocks from ordering service
// to channel peers.
func (ds *mockDeliveryClient) StopDeliverForChannel(chainID string) error {
return nil
}

Expand Down
19 changes: 10 additions & 9 deletions gossip/election/adapter.go
Expand Up @@ -23,6 +23,7 @@ import (

"github.com/hyperledger/fabric/gossip/common"
"github.com/hyperledger/fabric/gossip/discovery"
"github.com/hyperledger/fabric/gossip/util"
proto "github.com/hyperledger/fabric/protos/gossip"
"github.com/op/go-logging"
)
Expand All @@ -44,7 +45,7 @@ func (mi *msgImpl) IsDeclaration() bool {
}

type peerImpl struct {
member *discovery.NetworkMember
member discovery.NetworkMember
}

func (pi *peerImpl) ID() peerID {
Expand All @@ -66,8 +67,8 @@ type gossip interface {
}

type adapterImpl struct {
gossip gossip
self *discovery.NetworkMember
gossip gossip
selfPKIid common.PKIidType

incTime uint64
seqNum uint64
Expand All @@ -81,17 +82,17 @@ type adapterImpl struct {
}

// NewAdapter creates new leader election adapter
func NewAdapter(gossip gossip, self *discovery.NetworkMember, channel common.ChainID) LeaderElectionAdapter {
func NewAdapter(gossip gossip, pkiid common.PKIidType, channel common.ChainID) LeaderElectionAdapter {
return &adapterImpl{
gossip: gossip,
self: self,
gossip: gossip,
selfPKIid: pkiid,

incTime: uint64(time.Now().UnixNano()),
seqNum: uint64(0),

channel: channel,

logger: logging.MustGetLogger("LeaderElectionAdapter"),
logger: util.GetLogger(util.LoggingElectionModule, ""),

doneCh: make(chan struct{}),
stopOnce: &sync.Once{},
Expand Down Expand Up @@ -134,7 +135,7 @@ func (ai *adapterImpl) CreateMessage(isDeclaration bool) Msg {
seqNum := ai.seqNum

leadershipMsg := &proto.LeadershipMessage{
PkiID: ai.self.PKIid,
PkiID: ai.selfPKIid,
IsDeclaration: isDeclaration,
Timestamp: &proto.PeerTime{
IncNumber: ai.incTime,
Expand All @@ -156,7 +157,7 @@ func (ai *adapterImpl) Peers() []Peer {

var res []Peer
for _, peer := range peers {
res = append(res, &peerImpl{&peer})
res = append(res, &peerImpl{peer})
}

return res
Expand Down
10 changes: 5 additions & 5 deletions gossip/election/adapter_test.go
Expand Up @@ -41,7 +41,7 @@ func TestNewAdapter(t *testing.T) {
peersCluster := newClusterOfPeers("0")
peersCluster.addPeer("peer0", mockGossip)

NewAdapter(mockGossip, selfNetworkMember, []byte("channel0"))
NewAdapter(mockGossip, selfNetworkMember.PKIid, []byte("channel0"))
}

func TestAdapterImpl_CreateMessage(t *testing.T) {
Expand All @@ -52,7 +52,7 @@ func TestAdapterImpl_CreateMessage(t *testing.T) {
}
mockGossip := newGossip("peer0", selfNetworkMember)

adapter := NewAdapter(mockGossip, selfNetworkMember, []byte("channel0"))
adapter := NewAdapter(mockGossip, selfNetworkMember.PKIid, []byte("channel0"))
msg := adapter.CreateMessage(true)

if !msg.(*msgImpl).msg.IsLeadershipMsg() {
Expand Down Expand Up @@ -142,15 +142,15 @@ func TestAdapterImpl_Gossip(t *testing.T) {
case msg := <-channels[fmt.Sprintf("Peer%d", 1)]:
if !msg.IsDeclaration() {
t.Error("Msg should be declaration")
} else if !bytes.Equal(msg.SenderID(), sender.self.PKIid) {
} else if !bytes.Equal(msg.SenderID(), sender.selfPKIid) {
t.Error("Msg Sender is wrong")
} else {
totalMsg++
}
case msg := <-channels[fmt.Sprintf("Peer%d", 2)]:
if !msg.IsDeclaration() {
t.Error("Msg should be declaration")
} else if !bytes.Equal(msg.SenderID(), sender.self.PKIid) {
} else if !bytes.Equal(msg.SenderID(), sender.selfPKIid) {
t.Error("Msg Sender is wrong")
} else {
totalMsg++
Expand Down Expand Up @@ -308,7 +308,7 @@ func createCluster(peers ...int) (*clusterOfPeers, map[string]*adapterImpl) {
}

mockGossip := newGossip(peerEndpoint, peerMember)
adapter := NewAdapter(mockGossip, peerMember, []byte("channel0"))
adapter := NewAdapter(mockGossip, peerMember.PKIid, []byte("channel0"))
adapters[peerEndpoint] = adapter.(*adapterImpl)
cluster.addPeer(peerEndpoint, mockGossip)
}
Expand Down
26 changes: 13 additions & 13 deletions gossip/election/election.go
Expand Up @@ -183,8 +183,8 @@ func (le *leaderElectionSvcImpl) start() {
}

func (le *leaderElectionSvcImpl) handleMessages() {
le.logger.Info(le.id, ": Entering")
defer le.logger.Info(le.id, ": Exiting")
le.logger.Debug(le.id, ": Entering")
defer le.logger.Debug(le.id, ": Exiting")
defer le.stopWG.Done()
msgChan := le.adapter.Accept()
for {
Expand Down Expand Up @@ -270,14 +270,14 @@ func (le *leaderElectionSvcImpl) run() {
}

func (le *leaderElectionSvcImpl) leaderElection() {
le.logger.Info(le.id, ": Entering")
defer le.logger.Info(le.id, ": Exiting")
le.logger.Debug(le.id, ": Entering")
defer le.logger.Debug(le.id, ": Exiting")
le.propose()
le.waitForInterrupt(leaderElectionDuration)
// If someone declared itself as a leader, give up
// on trying to become a leader too
if le.isLeaderExists() {
le.logger.Info(le.id, ": Some peer is already a leader")
le.logger.Debug(le.id, ": Some peer is already a leader")
return
}
// Leader doesn't exist, let's see if there is a better candidate than us
Expand All @@ -296,8 +296,8 @@ func (le *leaderElectionSvcImpl) leaderElection() {

// propose sends a leadership proposal message to remote peers
func (le *leaderElectionSvcImpl) propose() {
le.logger.Info(le.id, ": Entering")
le.logger.Info(le.id, ": Exiting")
le.logger.Debug(le.id, ": Entering")
le.logger.Debug(le.id, ": Exiting")
leadershipProposal := le.adapter.CreateMessage(false)
le.adapter.Gossip(leadershipProposal)
}
Expand All @@ -324,8 +324,8 @@ func (le *leaderElectionSvcImpl) leader() {
// waitForMembershipStabilization waits for membership view to stabilize
// or until a time limit expires, or until a peer declares itself as a leader
func (le *leaderElectionSvcImpl) waitForMembershipStabilization(timeLimit time.Duration) {
le.logger.Info(le.id, ": Entering")
defer le.logger.Info(le.id, ": Exiting")
le.logger.Debug(le.id, ": Entering")
defer le.logger.Debug(le.id, ": Exiting, peers found", len(le.adapter.Peers()))
endTime := time.Now().Add(timeLimit)
viewSize := len(le.adapter.Peers())
for !le.shouldStop() {
Expand Down Expand Up @@ -368,13 +368,13 @@ func (le *leaderElectionSvcImpl) IsLeader() bool {
}

func (le *leaderElectionSvcImpl) beLeader() {
le.logger.Info(le.id, ": Becoming a leader")
le.logger.Debug(le.id, ": Becoming a leader")
atomic.StoreInt32(&le.isLeader, int32(1))
le.callback(true)
}

func (le *leaderElectionSvcImpl) stopBeingLeader() {
le.logger.Info(le.id, "Stopped being a leader")
le.logger.Debug(le.id, "Stopped being a leader")
atomic.StoreInt32(&le.isLeader, int32(0))
le.callback(false)
}
Expand All @@ -385,8 +385,8 @@ func (le *leaderElectionSvcImpl) shouldStop() bool {

// Stop stops the LeaderElectionService
func (le *leaderElectionSvcImpl) Stop() {
le.logger.Info(le.id, ": Entering")
defer le.logger.Info(le.id, ": Exiting")
le.logger.Debug(le.id, ": Entering")
defer le.logger.Debug(le.id, ": Exiting")
atomic.StoreInt32(&le.toDie, int32(1))
le.stopChan <- struct{}{}
le.stopWG.Wait()
Expand Down
9 changes: 4 additions & 5 deletions gossip/gossip/gossip_test.go
Expand Up @@ -531,8 +531,10 @@ func TestDissemination(t *testing.T) {
leadershipChan, _ := peers[i-1].Accept(acceptLeadershp, false)
go func(index int, ch <-chan *proto.GossipMessage) {
defer wgLeadership.Done()
<-ch
receivedLeadershipMessages[index]++
msg := <-ch
if bytes.Equal(msg.Channel, common.ChainID("A")) {
receivedLeadershipMessages[index]++
}
}(i-1, leadershipChan)
}

Expand Down Expand Up @@ -886,9 +888,6 @@ func createDataMsg(seqnum uint64, data []byte, hash string, channel common.Chain

func createLeadershipMsg(isDeclaration bool, channel common.ChainID, incTime uint64, seqNum uint64, endpoint string, pkiid []byte) *proto.GossipMessage {

metadata := []byte{}
metadata = strconv.AppendBool(metadata, isDeclaration)

leadershipMsg := &proto.LeadershipMessage{
IsDeclaration: isDeclaration,
PkiID: pkiid,
Expand Down

0 comments on commit a6edbff

Please sign in to comment.