Skip to content
This repository has been archived by the owner on Aug 2, 2021. It is now read-only.

network: Reconnect to the same peers on startup #1844

Merged
merged 3 commits into from
Oct 17, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
85 changes: 64 additions & 21 deletions network/hive.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@ import (
"github.com/ethersphere/swarm/state"
)

const connectionsKey = "conns"
const addressesKey = "peers"

/*
Hive is the logistic manager of the swarm

Expand Down Expand Up @@ -135,32 +138,35 @@ func (h *Hive) Stop() error {
// at each iteration, ask the overlay driver to suggest the most preferred peer to connect to
// as well as advertises saturation depth if needed
func (h *Hive) connect() {
loop:
for {
select {
case <-h.ticker.C:
addr, depth, changed := h.SuggestPeer()
if h.Discovery && changed {
NotifyDepth(uint8(depth), h.Kademlia)
}
if addr == nil {
continue loop
}

log.Trace(fmt.Sprintf("%08x hive connect() suggested %08x", h.BaseAddr()[:4], addr.Address()[:4]))
under, err := enode.ParseV4(string(addr.Under()))
if err != nil {
log.Warn(fmt.Sprintf("%08x unable to connect to bee %08x: invalid node URL: %v", h.BaseAddr()[:4], addr.Address()[:4], err))
continue loop
}
log.Trace(fmt.Sprintf("%08x attempt to connect to bee %08x", h.BaseAddr()[:4], addr.Address()[:4]))
h.addPeer(under)
h.tickHive()
case <-h.done:
break loop
return
}
}
}

func (h *Hive) tickHive() {
addr, depth, changed := h.SuggestPeer()
if h.Discovery && changed {
NotifyDepth(uint8(depth), h.Kademlia)
}
if addr != nil {
log.Trace(fmt.Sprintf("%08x hive connect() suggested %08x", h.BaseAddr()[:4], addr.Address()[:4]))
underA := addr.Under()
s := string(underA)
under, err := enode.ParseV4(s)
if err != nil {
log.Warn(fmt.Sprintf("%08x unable to connect to bee %08x: invalid node URL: %v", h.BaseAddr()[:4], addr.Address()[:4], err))
return
}
log.Trace(fmt.Sprintf("%08x attempt to connect to bee %08x", h.BaseAddr()[:4], addr.Address()[:4]))
h.addPeer(under)
}
}

// Run protocol run function
func (h *Hive) Run(p *BzzPeer) error {
h.trackPeer(p)
Expand Down Expand Up @@ -232,7 +238,7 @@ func (h *Hive) Peer(id enode.ID) *BzzPeer {
// loadPeers, savePeer implement persistence callback/
func (h *Hive) loadPeers() error {
var as []*BzzAddr
err := h.Store.Get("peers", &as)
err := h.Store.Get(addressesKey, &as)
if err != nil {
if err == state.ErrNotFound {
log.Info(fmt.Sprintf("hive %08x: no persisted peers found", h.BaseAddr()[:4]))
Expand All @@ -249,13 +255,40 @@ func (h *Hive) loadPeers() error {
}
}
log.Info(fmt.Sprintf("hive %08x: peers loaded", h.BaseAddr()[:4]))
errRegistering := h.Register(as...)
var conns []*BzzAddr
err = h.Store.Get(connectionsKey, &conns)
if err != nil {
if err == state.ErrNotFound {
log.Info(fmt.Sprintf("hive %08x: no persisted peer connections found", h.BaseAddr()[:4]))
} else {
log.Warn(fmt.Sprintf("hive %08x: error loading connections: %v", h.BaseAddr()[:4], err))
}

return h.Register(as...)
} else {
go h.connectInitialPeers(conns)
}
return errRegistering
}

func (h *Hive) connectInitialPeers(conns []*BzzAddr) {
log.Info(fmt.Sprintf("%08x hive connectInitialPeers() With %v saved connections", h.BaseAddr()[:4], len(conns)))
for _, addr := range conns {
log.Trace(fmt.Sprintf("%08x hive connect() suggested initial %08x", h.BaseAddr()[:4], addr.Address()[:4]))
under, err := enode.ParseV4(string(addr.Under()))
if err != nil {
log.Warn(fmt.Sprintf("%08x unable to connect to bee %08x: invalid node URL: %v", h.BaseAddr()[:4], addr.Address()[:4], err))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

log.Warn("suggestInitialPeers: invalid enode ID", "self", hex.EncodeToString(h.BaseAddr()[:4],  "peer",  addr,    "err", err)

not to use fmt in log

update: I see now this is all over the original code, so TODO cleanup later.

continue
}
log.Trace(fmt.Sprintf("%08x attempt to connect to bee %08x", h.BaseAddr()[:4], addr.Address()[:4]))
h.addPeer(under)
}
}

// savePeers, savePeer implement persistence callback/
func (h *Hive) savePeers() error {
var peers []*BzzAddr
var conns []*BzzAddr
h.Kademlia.EachAddr(nil, 256, func(pa *BzzAddr, i int) bool {
if pa == nil {
log.Warn(fmt.Sprintf("empty addr: %v", i))
Expand All @@ -265,8 +298,18 @@ func (h *Hive) savePeers() error {
peers = append(peers, pa)
return true
})
if err := h.Store.Put("peers", peers); err != nil {

h.Kademlia.EachConn(nil, 256, func(p *Peer, i int) bool {
log.Trace("saving connected peer", "OAddr", hexutil.Encode(p.OAddr), "UAddr", p.UAddr)
conns = append(conns, p.BzzAddr)
return true
})
if err := h.Store.Put(addressesKey, peers); err != nil {
return fmt.Errorf("could not save peers: %v", err)
}

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

for consistency, i would change line 308 to have "addrs" instead of "peers"

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That will break backward compatibility. Are we sure we want that?

if err := h.Store.Put(connectionsKey, conns); err != nil {
return fmt.Errorf("could not save peer connections: %v", err)
}
return nil
}
141 changes: 141 additions & 0 deletions network/hive_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,14 @@ import (
"testing"
"time"

"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethersphere/swarm/log"
p2ptest "github.com/ethersphere/swarm/p2p/testing"
"github.com/ethersphere/swarm/pot"
"github.com/ethersphere/swarm/state"
)

Expand Down Expand Up @@ -175,3 +180,139 @@ func TestHiveStatePersistence(t *testing.T) {
t.Fatalf("%d peers left over: %v", len(peers), peers)
}
}

// TestHiveStateConnections connect the node to some peers and then after cleanup/save in store those peers
// are retrieved and used as suggested peer initially.
func TestHiveStateConnections(t *testing.T) {
dir, err := ioutil.TempDir("", "hive_test_store")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)

const peersCount = 5

nodeIdToBzzAddr := make(map[string]*BzzAddr)
addedChan := make(chan struct{}, 5)
startHive := func(t *testing.T, dir string) (h *Hive, cleanupFunc func()) {
store, err := state.NewDBStore(dir)
if err != nil {
t.Fatal(err)
}

params := NewHiveParams()
params.Discovery = false

prvkey, err := crypto.GenerateKey()
if err != nil {
t.Fatal(err)
}

h = NewHive(params, NewKademlia(PrivateKeyToBzzKey(prvkey), NewKadParams()), store)
s := p2ptest.NewProtocolTester(prvkey, 0, func(p *p2p.Peer, rw p2p.MsgReadWriter) error { return nil })

if err := h.Start(s.Server); err != nil {
t.Fatal(err)
}
//Close ticker to avoid interference with initial peer suggestion
h.ticker.Stop()
//Overwrite addPeer so the Node is added as a peer automatically.
// The related Overlay address is retrieved from nodeIdToBzzAddr where it has been saved before
h.addPeer = func(node *enode.Node) {
bzzAddr := nodeIdToBzzAddr[encodeId(node.ID())]
if bzzAddr == nil {
t.Fatalf("Enode [%v] not found in saved peers!", encodeId(node.ID()))
}
bzzPeer := newConnPeerLocal(bzzAddr.Address(), h.Kademlia)
h.On(bzzPeer)
addedChan <- struct{}{}
}

cleanupFunc = func() {
err := h.Stop()
if err != nil {
t.Fatal(err)
}

s.Stop()
}
return h, cleanupFunc
}

h1, cleanup1 := startHive(t, dir)
peers := make(map[string]bool)
for i := 0; i < peersCount; i++ {
raddr := RandomBzzAddr()
h1.Register(raddr)
peers[raddr.String()] = true
}
const initialPeers = 5
for i := 0; i < initialPeers; i++ {
suggestedPeer, _, _ := h1.SuggestPeer()
if suggestedPeer != nil {
testAddPeer(suggestedPeer, h1, nodeIdToBzzAddr)
}

}
numConns := h1.conns.Size()
connAddresses := make(map[string]string)
h1.EachConn(h1.base, 255, func(peer *Peer, i int) bool {
key := hexutil.Encode(peer.Address())
connAddresses[key] = key
return true
})
log.Warn("After 5 suggestions", "numConns", numConns)
cleanup1()

// start the hive and check that we suggest previous connected peers
h2, _ := startHive(t, dir)
// there should be at some point 5 conns
connsAfterLoading := 0
iterations := 0
connsAfterLoading = h2.conns.Size()
for connsAfterLoading != numConns && iterations < 5 {
select {
case <-addedChan:
connsAfterLoading = h2.conns.Size()
case <-time.After(1 * time.Second):
iterations++
}
log.Trace("Iteration waiting for initial connections", "numConns", connsAfterLoading, "iterations", iterations)
}
if connsAfterLoading != numConns {
t.Errorf("Expected 5 peer connecteds from previous execution but got %v", connsAfterLoading)
}
h2.EachConn(h2.base, 255, func(peer *Peer, i int) bool {
key := hexutil.Encode(peer.Address())
if connAddresses[key] != key {
t.Errorf("Expected address %v to be in connections as it was a previous peer connected", key)
} else {
log.Warn("Previous peer connected again", "addr", key)
}
return true
})
}

// Create a Peer with the suggested address and store the relationshsip enode -> BzzAddr for later retrieval
func testAddPeer(suggestedPeer *BzzAddr, h1 *Hive, nodeIdToBzzAddr map[string]*BzzAddr) {
byteAddresses := suggestedPeer.Address()
bzzPeer := newConnPeerLocal(byteAddresses, h1.Kademlia)
nodeIdToBzzAddr[encodeId(bzzPeer.ID())] = bzzPeer.BzzAddr
bzzPeer.kad = h1.Kademlia
h1.On(bzzPeer)
}

func encodeId(id enode.ID) string {
addr := id[:]
return hexutil.Encode(addr)
}

// We create a test Peer with underlay address to localhost and using overlay address provided
func newConnPeerLocal(addr []byte, kademlia *Kademlia) *Peer {
hash := [common.HashLength]byte{}
copy(hash[:], addr)
potAddress := pot.Address(hash)
peer := newDiscPeer(potAddress)
peer.kad = kademlia
return peer
}