Skip to content

Commit

Permalink
GetValidCustodyPeers: Optimize and add tests.
Browse files Browse the repository at this point in the history
  • Loading branch information
nalepae committed Jun 17, 2024
1 parent 65f03ac commit b2d0537
Show file tree
Hide file tree
Showing 2 changed files with 105 additions and 2 deletions.
22 changes: 21 additions & 1 deletion beacon-chain/p2p/custody.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@ import (

// GetValidCustodyPeers returns a list of peers that custody a super set of the local node's custody columns.
func (s *Service) GetValidCustodyPeers(peers []peer.ID) ([]peer.ID, error) {
// Get the total number of columns.
numberOfColumns := params.BeaconConfig().NumberOfColumns

// Get the custody subnets count of the local node.
localCustodySubnetCount := params.BeaconConfig().CustodyRequirement
if flags.Get().SubscribeToAllSubnets {
Expand All @@ -24,7 +27,10 @@ func (s *Service) GetValidCustodyPeers(peers []peer.ID) ([]peer.ID, error) {
return nil, errors.Wrap(err, "custody columns")
}

var validPeers []peer.ID
localCustotyColumnsCount := uint64(len(localCustodyColumns))

// Find the valid peers.
validPeers := make([]peer.ID, 0, len(peers))

loop:
for _, pid := range peers {
Expand All @@ -43,6 +49,20 @@ loop:
return nil, errors.Wrap(err, "custody columns")
}

remoteCustodyColumnsCount := uint64(len(remoteCustodyColumns))

// If the remote peer custodies less columns than the local node, skip it.
if remoteCustodyColumnsCount < localCustotyColumnsCount {
continue
}

// If the remote peers custodies all the possible columns, add it to the list.
if remoteCustodyColumnsCount == numberOfColumns {
copiedId := pid
validPeers = append(validPeers, copiedId)
continue
}

// Filter out invalid peers.
for c := range localCustodyColumns {
if !remoteCustodyColumns[c] {
Expand Down
85 changes: 84 additions & 1 deletion beacon-chain/p2p/custody_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,100 @@ package p2p

import (
"context"
"crypto/ecdsa"
"net"
"testing"
"time"

"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers"
"github.com/prysmaticlabs/prysm/v5/config/params"
ecdsaprysm "github.com/prysmaticlabs/prysm/v5/crypto/ecdsa"
prysmNetwork "github.com/prysmaticlabs/prysm/v5/network"
"github.com/prysmaticlabs/prysm/v5/testing/require"
)

func createPeer(t *testing.T, privateKeyOffset int, custodyCount uint64) (*enr.Record, peer.ID, *ecdsa.PrivateKey) {
privateKeyBytes := make([]byte, 32)
for i := 0; i < 32; i++ {
privateKeyBytes[i] = byte(privateKeyOffset + i)
}

unmarshalledPrivateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes)
require.NoError(t, err)

privateKey, err := ecdsaprysm.ConvertFromInterfacePrivKey(unmarshalledPrivateKey)
require.NoError(t, err)

peerID, err := peer.IDFromPrivateKey(unmarshalledPrivateKey)
require.NoError(t, err)

record := &enr.Record{}
record.Set(CustodySubnetCount(custodyCount))
record.Set(enode.Secp256k1(privateKey.PublicKey))

return record, peerID, privateKey
}

func TestGetValidCustodyPeers(t *testing.T) {
genesisValidatorRoot := make([]byte, 32)

for i := 0; i < 32; i++ {
genesisValidatorRoot[i] = byte(i)
}

service := &Service{
cfg: &Config{},
genesisTime: time.Now(),
genesisValidatorsRoot: genesisValidatorRoot,
peers: peers.NewStatus(context.Background(), &peers.StatusConfig{
ScorerParams: &scorers.Config{},
}),
}

ipAddrString, err := prysmNetwork.ExternalIPv4()
require.NoError(t, err)
ipAddr := net.ParseIP(ipAddrString)

custodyRequirement := params.BeaconConfig().CustodyRequirement
dataColumnSidecarSubnetCount := params.BeaconConfig().DataColumnSidecarSubnetCount

// Peer 1 custodies exactly the same columns than us.
// (We use the same keys pair than ours for simplicity)
peer1Record, peer1ID, localPrivateKey := createPeer(t, 1, custodyRequirement)

// Peer 2 custodies all the columns.
peer2Record, peer2ID, _ := createPeer(t, 2, dataColumnSidecarSubnetCount)

// Peer 3 custodies different columns than us (but the same count).
// (We use the same public key than peer 2 for simplicity)
peer3Record, peer3ID, _ := createPeer(t, 3, custodyRequirement)

// Peer 4 custodies less columns than us.
peer4Record, peer4ID, _ := createPeer(t, 4, custodyRequirement-1)

listener, err := service.createListener(ipAddr, localPrivateKey)
require.NoError(t, err)

service.dv5Listener = listener

service.peers.Add(peer1Record, peer1ID, nil, network.DirOutbound)
service.peers.Add(peer2Record, peer2ID, nil, network.DirOutbound)
service.peers.Add(peer3Record, peer3ID, nil, network.DirOutbound)
service.peers.Add(peer4Record, peer4ID, nil, network.DirOutbound)

actual, err := service.GetValidCustodyPeers([]peer.ID{peer1ID, peer2ID, peer3ID, peer4ID})
require.NoError(t, err)

expected := []peer.ID{peer1ID, peer2ID}
require.DeepSSZEqual(t, expected, actual)
}

func TestCustodyCountFromRemotePeer(t *testing.T) {
const (
expected uint64 = 7
Expand Down Expand Up @@ -56,7 +140,6 @@ func TestCustodyCountFromRemotePeer(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
// Create peers status.
peers := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{},
})

Expand Down

0 comments on commit b2d0537

Please sign in to comment.