From 7a5f2bf7ce9ee5bf88ffb7052e2509d615dc9590 Mon Sep 17 00:00:00 2001 From: Manu NALEPA Date: Wed, 19 Jun 2024 17:36:42 +0200 Subject: [PATCH] wip --- beacon-chain/core/peerdas/helpers.go | 35 ++- beacon-chain/core/peerdas/log.go | 5 + beacon-chain/p2p/custody.go | 35 +-- beacon-chain/p2p/custody_test.go | 5 +- beacon-chain/p2p/discovery.go | 18 +- beacon-chain/p2p/discovery_test.go | 3 +- beacon-chain/p2p/testing/fuzz_p2p.go | 68 ++--- beacon-chain/p2p/testing/mock_host.go | 24 +- beacon-chain/p2p/testing/mock_peermanager.go | 8 +- beacon-chain/p2p/testing/p2p.go | 54 ++-- beacon-chain/sync/data_columns_sampling.go | 152 ++++++++--- .../sync/data_columns_sampling_test.go | 246 ++++++++++++++++++ beacon-chain/sync/rpc_send_request.go | 3 +- 13 files changed, 506 insertions(+), 150 deletions(-) create mode 100644 beacon-chain/core/peerdas/log.go create mode 100644 beacon-chain/sync/data_columns_sampling_test.go diff --git a/beacon-chain/core/peerdas/helpers.go b/beacon-chain/core/peerdas/helpers.go index 741f5397cd2..96a9ca1c4c9 100644 --- a/beacon-chain/core/peerdas/helpers.go +++ b/beacon-chain/core/peerdas/helpers.go @@ -7,6 +7,7 @@ import ( cKzg4844 "github.com/ethereum/c-kzg-4844/bindings/go" "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" "github.com/holiman/uint256" errors "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/v5/config/params" @@ -18,13 +19,24 @@ import ( ) // Bytes per cell -const bytesPerCell = cKzg4844.FieldElementsPerCell * cKzg4844.BytesPerFieldElement +const ( + CustodySubnetCountEnrKey = "csc" + + bytesPerCell = cKzg4844.FieldElementsPerCell * cKzg4844.BytesPerFieldElement +) + +// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#the-discovery-domain-discv5 +type CustodySubnetCount uint64 + +func (CustodySubnetCount) ENRKey() string { return CustodySubnetCountEnrKey } var ( // Custom errors - errCustodySubnetCountTooLarge = errors.New("custody subnet count larger than data column sidecar subnet count") - errIndexTooLarge = errors.New("column index is larger than the specified columns count") - errMismatchLength = errors.New("mismatch in the length of the commitments and proofs") + errCustodySubnetCountTooLarge = errors.New("custody subnet count larger than data column sidecar subnet count") + errIndexTooLarge = errors.New("column index is larger than the specified columns count") + errMismatchLength = errors.New("mismatch in the length of the commitments and proofs") + errRecordNil = errors.New("record is nil") + errCannotLoadCustodySubnetCount = errors.New("cannot load the custody subnet count from peer") // maxUint256 is the maximum value of a uint256. maxUint256 = &uint256.Int{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64} @@ -350,3 +362,18 @@ func ExtendedSampleCount(samplesPerSlot, allowedFailures uint64) uint64 { return sampleCount } + +func CustodyCountFromRecord(record *enr.Record) (uint64, error) { + // By default, we assume the peer custodies the minimum number of subnets. + if record == nil { + return 0, errRecordNil + } + + // Load the `custody_subnet_count` + var csc CustodySubnetCount + if err := record.Load(&csc); err != nil { + return 0, errCannotLoadCustodySubnetCount + } + + return uint64(csc), nil +} diff --git a/beacon-chain/core/peerdas/log.go b/beacon-chain/core/peerdas/log.go new file mode 100644 index 00000000000..ff09a77f828 --- /dev/null +++ b/beacon-chain/core/peerdas/log.go @@ -0,0 +1,5 @@ +package peerdas + +import "github.com/sirupsen/logrus" + +var log = logrus.WithField("prefix", "peerdas") diff --git a/beacon-chain/p2p/custody.go b/beacon-chain/p2p/custody.go index 25b894f1282..160b50cee89 100644 --- a/beacon-chain/p2p/custody.go +++ b/beacon-chain/p2p/custody.go @@ -82,42 +82,29 @@ loop: // CustodyCountFromRemotePeer retrieves the custody count from a remote peer. func (s *Service) CustodyCountFromRemotePeer(pid peer.ID) uint64 { // By default, we assume the peer custodies the minimum number of subnets. - custodyCount := params.BeaconConfig().CustodyRequirement + custodyRequirement := params.BeaconConfig().CustodyRequirement // Retrieve the ENR of the peer. record, err := s.peers.ENR(pid) if err != nil { log.WithError(err).WithFields(logrus.Fields{ "peerID": pid, - "defaultValue": custodyCount, + "defaultValue": custodyRequirement, }).Error("Failed to retrieve ENR for peer, defaulting to the default value") - return custodyCount - } - if record == nil { - // This is the case for inbound peers. So we don't log an error for this. - log.WithFields(logrus.Fields{ - "peerID": pid, - "defaultValue": custodyCount, - }).Debug("No ENR found for peer, defaulting to the default value") - return custodyCount + return custodyRequirement } - // Load the `custody_subnet_count` - var csc CustodySubnetCount - if err := record.Load(&csc); err != nil { - log.WithFields(logrus.Fields{ + // Retrieve the custody subnets count from the ENR. + custodyCount, err := peerdas.CustodyCountFromRecord(record) + if err != nil { + log.WithError(err).WithFields(logrus.Fields{ "peerID": pid, - "defaultValue": custodyCount, - }).Warning("Cannot load the custody subnet count from peer, defaulting to the default value") + "defaultValue": custodyRequirement, + }).Error("Failed to retrieve custody count from ENR for peer, defaulting to the default value") - return custodyCount + return custodyRequirement } - log.WithFields(logrus.Fields{ - "peerID": pid, - "custodyCount": csc, - }).Debug("Custody count read from peer's ENR") - - return uint64(csc) + return custodyCount } diff --git a/beacon-chain/p2p/custody_test.go b/beacon-chain/p2p/custody_test.go index c2c662e9c8a..f6cd9be9236 100644 --- a/beacon-chain/p2p/custody_test.go +++ b/beacon-chain/p2p/custody_test.go @@ -12,6 +12,7 @@ import ( "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" + "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas" "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers" "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers" "github.com/prysmaticlabs/prysm/v5/config/params" @@ -36,7 +37,7 @@ func createPeer(t *testing.T, privateKeyOffset int, custodyCount uint64) (*enr.R require.NoError(t, err) record := &enr.Record{} - record.Set(CustodySubnetCount(custodyCount)) + record.Set(peerdas.CustodySubnetCount(custodyCount)) record.Set(enode.Secp256k1(privateKey.PublicKey)) return record, peerID, privateKey @@ -102,7 +103,7 @@ func TestCustodyCountFromRemotePeer(t *testing.T) { pid = "test-id" ) - csc := CustodySubnetCount(expected) + csc := peerdas.CustodySubnetCount(expected) // Define a nil record var nilRecord *enr.Record = nil diff --git a/beacon-chain/p2p/discovery.go b/beacon-chain/p2p/discovery.go index a71efe12c77..41ce45f8df9 100644 --- a/beacon-chain/p2p/discovery.go +++ b/beacon-chain/p2p/discovery.go @@ -17,6 +17,7 @@ import ( "github.com/prysmaticlabs/go-bitfield" "github.com/prysmaticlabs/prysm/v5/beacon-chain/cache" + "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas" "github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags" "github.com/prysmaticlabs/prysm/v5/config/features" "github.com/prysmaticlabs/prysm/v5/config/params" @@ -43,22 +44,13 @@ const ( udp6 ) -const ( - quickProtocolEnrKey = "quic" - custodySubnetCountEnrKey = "csc" -) +const quickProtocolEnrKey = "quic" -type ( - quicProtocol uint16 - CustodySubnetCount uint64 -) +type quicProtocol uint16 // quicProtocol is the "quic" key, which holds the QUIC port of the node. func (quicProtocol) ENRKey() string { return quickProtocolEnrKey } -// https://github.com/ethereum/consensus-specs/blob/dev/specs/_features/eip7594/p2p-interface.md#the-discovery-domain-discv5 -func (CustodySubnetCount) ENRKey() string { return custodySubnetCountEnrKey } - // RefreshPersistentSubnets checks that we are tracking our local persistent subnets for a variety of gossip topics. // This routine checks for our attestation, sync committee and data column subnets and updates them if they have // been rotated. @@ -275,9 +267,9 @@ func (s *Service) createLocalNode( } if features.Get().EnablePeerDAS { - custodySubnetEntry := CustodySubnetCount(params.BeaconConfig().CustodyRequirement) + custodySubnetEntry := peerdas.CustodySubnetCount(params.BeaconConfig().CustodyRequirement) if flags.Get().SubscribeToAllSubnets { - custodySubnetEntry = CustodySubnetCount(params.BeaconConfig().DataColumnSidecarSubnetCount) + custodySubnetEntry = peerdas.CustodySubnetCount(params.BeaconConfig().DataColumnSidecarSubnetCount) } localNode.Set(custodySubnetEntry) } diff --git a/beacon-chain/p2p/discovery_test.go b/beacon-chain/p2p/discovery_test.go index 47737996524..f187d53f99b 100644 --- a/beacon-chain/p2p/discovery_test.go +++ b/beacon-chain/p2p/discovery_test.go @@ -24,6 +24,7 @@ import ( mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing" "github.com/prysmaticlabs/prysm/v5/beacon-chain/cache" + "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas" "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers" "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/peerdata" "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers" @@ -237,7 +238,7 @@ func TestCreateLocalNode(t *testing.T) { // Check custody_subnet_count config. custodySubnetCount := new(uint64) - require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(custodySubnetCountEnrKey, custodySubnetCount))) + require.NoError(t, localNode.Node().Record().Load(enr.WithEntry(peerdas.CustodySubnetCountEnrKey, custodySubnetCount))) require.Equal(t, uint64(1), *custodySubnetCount) }) } diff --git a/beacon-chain/p2p/testing/fuzz_p2p.go b/beacon-chain/p2p/testing/fuzz_p2p.go index 0f358d5b8a1..0bc3f9646a3 100644 --- a/beacon-chain/p2p/testing/fuzz_p2p.go +++ b/beacon-chain/p2p/testing/fuzz_p2p.go @@ -28,166 +28,166 @@ func NewFuzzTestP2P() *FakeP2P { } // Encoding -- fake. -func (_ *FakeP2P) Encoding() encoder.NetworkEncoding { +func (*FakeP2P) Encoding() encoder.NetworkEncoding { return &encoder.SszNetworkEncoder{} } // AddConnectionHandler -- fake. -func (_ *FakeP2P) AddConnectionHandler(_, _ func(ctx context.Context, id peer.ID) error) { +func (*FakeP2P) AddConnectionHandler(_, _ func(ctx context.Context, id peer.ID) error) { } // AddDisconnectionHandler -- fake. -func (_ *FakeP2P) AddDisconnectionHandler(_ func(ctx context.Context, id peer.ID) error) { +func (*FakeP2P) AddDisconnectionHandler(_ func(ctx context.Context, id peer.ID) error) { } // AddPingMethod -- fake. -func (_ *FakeP2P) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) { +func (*FakeP2P) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) { } // PeerID -- fake. -func (_ *FakeP2P) PeerID() peer.ID { +func (*FakeP2P) PeerID() peer.ID { return "fake" } // ENR returns the enr of the local peer. -func (_ *FakeP2P) ENR() *enr.Record { +func (*FakeP2P) ENR() *enr.Record { return new(enr.Record) } // NodeID returns the node id of the local peer. -func (_ *FakeP2P) NodeID() enode.ID { +func (*FakeP2P) NodeID() enode.ID { return [32]byte{} } // DiscoveryAddresses -- fake -func (_ *FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) { +func (*FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) { return nil, nil } // FindPeersWithSubnet mocks the p2p func. -func (_ *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) { +func (*FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) { return false, nil } // RefreshENR mocks the p2p func. -func (_ *FakeP2P) RefreshPersistentSubnets() {} +func (*FakeP2P) RefreshPersistentSubnets() {} // LeaveTopic -- fake. -func (_ *FakeP2P) LeaveTopic(_ string) error { +func (*FakeP2P) LeaveTopic(_ string) error { return nil } // Metadata -- fake. -func (_ *FakeP2P) Metadata() metadata.Metadata { +func (*FakeP2P) Metadata() metadata.Metadata { return nil } // Peers -- fake. -func (_ *FakeP2P) Peers() *peers.Status { +func (*FakeP2P) Peers() *peers.Status { return nil } // PublishToTopic -- fake. -func (_ *FakeP2P) PublishToTopic(_ context.Context, _ string, _ []byte, _ ...pubsub.PubOpt) error { +func (*FakeP2P) PublishToTopic(_ context.Context, _ string, _ []byte, _ ...pubsub.PubOpt) error { return nil } // Send -- fake. -func (_ *FakeP2P) Send(_ context.Context, _ interface{}, _ string, _ peer.ID) (network.Stream, error) { +func (*FakeP2P) Send(_ context.Context, _ interface{}, _ string, _ peer.ID) (network.Stream, error) { return nil, nil } // PubSub -- fake. -func (_ *FakeP2P) PubSub() *pubsub.PubSub { +func (*FakeP2P) PubSub() *pubsub.PubSub { return nil } // MetadataSeq -- fake. -func (_ *FakeP2P) MetadataSeq() uint64 { +func (*FakeP2P) MetadataSeq() uint64 { return 0 } // SetStreamHandler -- fake. -func (_ *FakeP2P) SetStreamHandler(_ string, _ network.StreamHandler) { +func (*FakeP2P) SetStreamHandler(_ string, _ network.StreamHandler) { } // SubscribeToTopic -- fake. -func (_ *FakeP2P) SubscribeToTopic(_ string, _ ...pubsub.SubOpt) (*pubsub.Subscription, error) { +func (*FakeP2P) SubscribeToTopic(_ string, _ ...pubsub.SubOpt) (*pubsub.Subscription, error) { return nil, nil } // JoinTopic -- fake. -func (_ *FakeP2P) JoinTopic(_ string, _ ...pubsub.TopicOpt) (*pubsub.Topic, error) { +func (*FakeP2P) JoinTopic(_ string, _ ...pubsub.TopicOpt) (*pubsub.Topic, error) { return nil, nil } // Host -- fake. -func (_ *FakeP2P) Host() host.Host { +func (*FakeP2P) Host() host.Host { return nil } // Disconnect -- fake. -func (_ *FakeP2P) Disconnect(_ peer.ID) error { +func (*FakeP2P) Disconnect(_ peer.ID) error { return nil } // Broadcast -- fake. -func (_ *FakeP2P) Broadcast(_ context.Context, _ proto.Message) error { +func (*FakeP2P) Broadcast(_ context.Context, _ proto.Message) error { return nil } // BroadcastAttestation -- fake. -func (_ *FakeP2P) BroadcastAttestation(_ context.Context, _ uint64, _ ethpb.Att) error { +func (*FakeP2P) BroadcastAttestation(_ context.Context, _ uint64, _ ethpb.Att) error { return nil } // BroadcastSyncCommitteeMessage -- fake. -func (_ *FakeP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error { +func (*FakeP2P) BroadcastSyncCommitteeMessage(_ context.Context, _ uint64, _ *ethpb.SyncCommitteeMessage) error { return nil } // BroadcastBlob -- fake. -func (_ *FakeP2P) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.BlobSidecar) error { +func (*FakeP2P) BroadcastBlob(_ context.Context, _ uint64, _ *ethpb.BlobSidecar) error { return nil } // BroadcastDataColumn -- fake. -func (_ *FakeP2P) BroadcastDataColumn(_ context.Context, _ uint64, _ *ethpb.DataColumnSidecar) error { +func (*FakeP2P) BroadcastDataColumn(_ context.Context, _ uint64, _ *ethpb.DataColumnSidecar) error { return nil } // InterceptPeerDial -- fake. -func (_ *FakeP2P) InterceptPeerDial(peer.ID) (allow bool) { +func (*FakeP2P) InterceptPeerDial(peer.ID) (allow bool) { return true } // InterceptAddrDial -- fake. -func (_ *FakeP2P) InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) { +func (*FakeP2P) InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) { return true } // InterceptAccept -- fake. -func (_ *FakeP2P) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) { +func (*FakeP2P) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) { return true } // InterceptSecured -- fake. -func (_ *FakeP2P) InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool) { +func (*FakeP2P) InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool) { return true } // InterceptUpgraded -- fake. -func (_ *FakeP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) { +func (*FakeP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) { return true, 0 } -func (_ *FakeP2P) CustodyCountFromRemotePeer(peer.ID) uint64 { +func (*FakeP2P) CustodyCountFromRemotePeer(peer.ID) uint64 { return 0 } -func (_ *FakeP2P) GetValidCustodyPeers(peers []peer.ID) ([]peer.ID, error) { +func (*FakeP2P) GetValidCustodyPeers(peers []peer.ID) ([]peer.ID, error) { return peers, nil } diff --git a/beacon-chain/p2p/testing/mock_host.go b/beacon-chain/p2p/testing/mock_host.go index 38d66533f3c..88c75930a65 100644 --- a/beacon-chain/p2p/testing/mock_host.go +++ b/beacon-chain/p2p/testing/mock_host.go @@ -18,12 +18,12 @@ type MockHost struct { } // ID -- -func (_ *MockHost) ID() peer.ID { +func (*MockHost) ID() peer.ID { return "" } // Peerstore -- -func (_ *MockHost) Peerstore() peerstore.Peerstore { +func (*MockHost) Peerstore() peerstore.Peerstore { return nil } @@ -33,46 +33,46 @@ func (m *MockHost) Addrs() []ma.Multiaddr { } // Network -- -func (_ *MockHost) Network() network.Network { +func (*MockHost) Network() network.Network { return nil } // Mux -- -func (_ *MockHost) Mux() protocol.Switch { +func (*MockHost) Mux() protocol.Switch { return nil } // Connect -- -func (_ *MockHost) Connect(_ context.Context, _ peer.AddrInfo) error { +func (*MockHost) Connect(_ context.Context, _ peer.AddrInfo) error { return nil } // SetStreamHandler -- -func (_ *MockHost) SetStreamHandler(_ protocol.ID, _ network.StreamHandler) {} +func (*MockHost) SetStreamHandler(_ protocol.ID, _ network.StreamHandler) {} // SetStreamHandlerMatch -- -func (_ *MockHost) SetStreamHandlerMatch(protocol.ID, func(id protocol.ID) bool, network.StreamHandler) { +func (*MockHost) SetStreamHandlerMatch(protocol.ID, func(id protocol.ID) bool, network.StreamHandler) { } // RemoveStreamHandler -- -func (_ *MockHost) RemoveStreamHandler(_ protocol.ID) {} +func (*MockHost) RemoveStreamHandler(_ protocol.ID) {} // NewStream -- -func (_ *MockHost) NewStream(_ context.Context, _ peer.ID, _ ...protocol.ID) (network.Stream, error) { +func (*MockHost) NewStream(_ context.Context, _ peer.ID, _ ...protocol.ID) (network.Stream, error) { return nil, nil } // Close -- -func (_ *MockHost) Close() error { +func (*MockHost) Close() error { return nil } // ConnManager -- -func (_ *MockHost) ConnManager() connmgr.ConnManager { +func (*MockHost) ConnManager() connmgr.ConnManager { return nil } // EventBus -- -func (_ *MockHost) EventBus() event.Bus { +func (*MockHost) EventBus() event.Bus { return nil } diff --git a/beacon-chain/p2p/testing/mock_peermanager.go b/beacon-chain/p2p/testing/mock_peermanager.go index 15a6fa266dd..84431d593db 100644 --- a/beacon-chain/p2p/testing/mock_peermanager.go +++ b/beacon-chain/p2p/testing/mock_peermanager.go @@ -21,7 +21,7 @@ type MockPeerManager struct { } // Disconnect . -func (_ *MockPeerManager) Disconnect(peer.ID) error { +func (*MockPeerManager) Disconnect(peer.ID) error { return nil } @@ -54,12 +54,12 @@ func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) { } // RefreshENR . -func (_ MockPeerManager) RefreshPersistentSubnets() {} +func (MockPeerManager) RefreshPersistentSubnets() {} // FindPeersWithSubnet . -func (_ MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) { +func (MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) { return true, nil } // AddPingMethod . -func (_ MockPeerManager) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {} +func (MockPeerManager) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) {} diff --git a/beacon-chain/p2p/testing/p2p.go b/beacon-chain/p2p/testing/p2p.go index 98db07c4ea7..a15d15bdfc5 100644 --- a/beacon-chain/p2p/testing/p2p.go +++ b/beacon-chain/p2p/testing/p2p.go @@ -23,9 +23,11 @@ import ( swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" "github.com/multiformats/go-multiaddr" ssz "github.com/prysmaticlabs/fastssz" + "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas" "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/encoder" "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers" "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers/scorers" + "github.com/prysmaticlabs/prysm/v5/config/params" ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1/metadata" "github.com/sirupsen/logrus" @@ -51,9 +53,10 @@ type TestP2P struct { } // NewTestP2P initializes a new p2p test service. -func NewTestP2P(t *testing.T) *TestP2P { +func NewTestP2P(t *testing.T, opts ...swarmt.Option) *TestP2P { + opts = append(opts, swarmt.OptDisableQUIC) ctx := context.Background() - h := bhost.NewBlankHost(swarmt.GenSwarm(t, swarmt.OptDisableQUIC)) + h := bhost.NewBlankHost(swarmt.GenSwarm(t, opts...)) ps, err := pubsub.NewFloodSub(ctx, h, pubsub.WithMessageSigning(false), pubsub.WithStrictSignatureVerification(false), @@ -239,7 +242,7 @@ func (p *TestP2P) LeaveTopic(topic string) error { } // Encoding returns ssz encoding. -func (_ *TestP2P) Encoding() encoder.NetworkEncoding { +func (*TestP2P) Encoding() encoder.NetworkEncoding { return &encoder.SszNetworkEncoder{} } @@ -266,17 +269,17 @@ func (p *TestP2P) Host() host.Host { } // ENR returns the enr of the local peer. -func (_ *TestP2P) ENR() *enr.Record { +func (*TestP2P) ENR() *enr.Record { return new(enr.Record) } // NodeID returns the node id of the local peer. -func (_ *TestP2P) NodeID() enode.ID { +func (*TestP2P) NodeID() enode.ID { return [32]byte{} } // DiscoveryAddresses -- -func (_ *TestP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) { +func (*TestP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) { return nil, nil } @@ -358,7 +361,7 @@ func (p *TestP2P) Send(ctx context.Context, msg interface{}, topic string, pid p } // Started always returns true. -func (_ *TestP2P) Started() bool { +func (*TestP2P) Started() bool { return true } @@ -368,12 +371,12 @@ func (p *TestP2P) Peers() *peers.Status { } // FindPeersWithSubnet mocks the p2p func. -func (_ *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) { +func (*TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) { return false, nil } // RefreshENR mocks the p2p func. -func (_ *TestP2P) RefreshPersistentSubnets() {} +func (*TestP2P) RefreshPersistentSubnets() {} // ForkDigest mocks the p2p func. func (p *TestP2P) ForkDigest() ([4]byte, error) { @@ -391,39 +394,54 @@ func (p *TestP2P) MetadataSeq() uint64 { } // AddPingMethod mocks the p2p func. -func (_ *TestP2P) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) { +func (*TestP2P) AddPingMethod(_ func(ctx context.Context, id peer.ID) error) { // no-op } // InterceptPeerDial . -func (_ *TestP2P) InterceptPeerDial(peer.ID) (allow bool) { +func (*TestP2P) InterceptPeerDial(peer.ID) (allow bool) { return true } // InterceptAddrDial . -func (_ *TestP2P) InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) { +func (*TestP2P) InterceptAddrDial(peer.ID, multiaddr.Multiaddr) (allow bool) { return true } // InterceptAccept . -func (_ *TestP2P) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) { +func (*TestP2P) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) { return true } // InterceptSecured . -func (_ *TestP2P) InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool) { +func (*TestP2P) InterceptSecured(network.Direction, peer.ID, network.ConnMultiaddrs) (allow bool) { return true } // InterceptUpgraded . -func (_ *TestP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) { +func (*TestP2P) InterceptUpgraded(network.Conn) (allow bool, reason control.DisconnectReason) { return true, 0 } -func (_ *TestP2P) CustodyCountFromRemotePeer(peer.ID) uint64 { - return 0 +func (s *TestP2P) CustodyCountFromRemotePeer(pid peer.ID) uint64 { + // By default, we assume the peer custodies the minimum number of subnets. + custodyRequirement := params.BeaconConfig().CustodyRequirement + + // Retrieve the ENR of the peer. + record, err := s.peers.ENR(pid) + if err != nil { + return custodyRequirement + } + + // Retrieve the custody subnets count from the ENR. + custodyCount, err := peerdas.CustodyCountFromRecord(record) + if err != nil { + return custodyRequirement + } + + return custodyCount } -func (_ *TestP2P) GetValidCustodyPeers(peers []peer.ID) ([]peer.ID, error) { +func (*TestP2P) GetValidCustodyPeers(peers []peer.ID) ([]peer.ID, error) { return peers, nil } diff --git a/beacon-chain/sync/data_columns_sampling.go b/beacon-chain/sync/data_columns_sampling.go index b8289ae8fb5..888d7b2c517 100644 --- a/beacon-chain/sync/data_columns_sampling.go +++ b/beacon-chain/sync/data_columns_sampling.go @@ -14,6 +14,7 @@ import ( "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas" "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p" "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types" + "github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags" fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams" "github.com/prysmaticlabs/prysm/v5/config/params" "github.com/prysmaticlabs/prysm/v5/crypto/rand" @@ -21,21 +22,25 @@ import ( "github.com/prysmaticlabs/prysm/v5/runtime/version" ) -// randomSlice returns a slice of `count` random integers in the range [0, count[. -// Each item is unique. -func randomSlice(count uint64) []uint64 { - slice := make([]uint64, count) +type roundSummary struct { + RequestedColumns []uint64 + MissingColumns map[uint64]bool +} - for i := uint64(0); i < count; i++ { - slice[i] = i +// randomizeColumns returns a slice containing all columns in a random order. +func randomizeColumns(columns map[uint64]bool) []uint64 { + // Create a slice from columns. + randomized := make([]uint64, 0, len(columns)) + for column := range columns { + randomized = append(randomized, column) } // Shuffle the slice. - rand.NewGenerator().Shuffle(len(slice), func(i, j int) { - slice[i], slice[j] = slice[j], slice[i] + rand.NewGenerator().Shuffle(len(randomized), func(i, j int) { + randomized[i], randomized[j] = randomized[j], randomized[i] }) - return slice + return randomized } // sortedSliceFromMap returns a sorted slices of keys from a map. @@ -160,7 +165,7 @@ func (s *Service) sampleDataColumnsFromPeer( func (s *Service) sampleDataColumnsFromPeers( columnsToSample []uint64, root [fieldparams.RootLength]byte, -) (uint64, error) { +) (map[uint64]bool, error) { // Build all remaining columns to sample. remainingColumnsToSample := make(map[uint64]bool, len(columnsToSample)) for _, column := range columnsToSample { @@ -170,9 +175,9 @@ func (s *Service) sampleDataColumnsFromPeers( // Get the active peers from the p2p service. activePids := s.cfg.p2p.Peers().Active() - // Query all peers until either all columns to request are retrieved or all active peers are queried (whichever comes first). - retrievedColumnsCount := 0 + retrievedColumns := make(map[uint64]bool, len(columnsToSample)) + // Query all peers until either all columns to request are retrieved or all active peers are queried (whichever comes first). for i := 0; len(remainingColumnsToSample) > 0 && i < len(activePids); i++ { // Get the peer ID. pid := activePids[i] @@ -180,7 +185,7 @@ func (s *Service) sampleDataColumnsFromPeers( // Get the custody columns of the peer. peerCustodyColumns, err := s.custodyColumnsFromPeer(pid) if err != nil { - return 0, errors.Wrap(err, "custody columns from peer") + return nil, errors.Wrap(err, "custody columns from peer") } // Compute the intersection of the peer custody columns and the remaining columns to request. @@ -199,57 +204,74 @@ func (s *Service) sampleDataColumnsFromPeers( // Sample data columns from the peer. peerRetrievedColumns, err := s.sampleDataColumnsFromPeer(pid, peerRequestedColumns, root) if err != nil { - return 0, errors.Wrap(err, "sample data columns from peer") + return nil, errors.Wrap(err, "sample data columns from peer") } // Update the retrieved columns. - retrievedColumnsCount += len(peerRetrievedColumns) + for column := range peerRetrievedColumns { + retrievedColumns[column] = true + } } - return uint64(retrievedColumnsCount), nil + return retrievedColumns, nil } // incrementalDAS samples data columns from active peers using incremental DAS. // https://ethresear.ch/t/lossydas-lossy-incremental-and-diagonal-sampling-for-data-availability/18963#incrementaldas-dynamically-increase-the-sample-size-10 -func (s *Service) incrementalDAS(root [fieldparams.RootLength]byte, sampleCount uint64) error { - // Retrieve the number of columns. - columnsCount := params.BeaconConfig().NumberOfColumns - - // Ramdomize all columns. - columns := randomSlice(columnsCount) - - // Define the first column to sample. - missingColumnsCount := uint64(0) - +func (s *Service) incrementalDAS( + root [fieldparams.RootLength]byte, + columns []uint64, + sampleCount uint64, +) (bool, []roundSummary, error) { + columnsCount, missingColumnsCount := uint64(len(columns)), uint64(0) firstColumnToSample, extendedSampleCount := uint64(0), peerdas.ExtendedSampleCount(sampleCount, 0) - for i := 1; ; i++ { + roundSummaries := make([]roundSummary, 0, 1) // We optimistically allocate only one round summary. + + for round := 1; ; /*No exit condition */ round++ { if extendedSampleCount > columnsCount { - // We already tried to sample all columns, this is the unhappy path. + // We already tried to sample all possible columns, this is the unhappy path. log.WithField("root", fmt.Sprintf("%#x", root)).Warning("Some columns are still missing after sampling all possible columns") - return nil + return false, roundSummaries, nil } + // Get the columns to sample for this round. columnsToSample := columns[firstColumnToSample:extendedSampleCount] columnsToSampleCount := extendedSampleCount - firstColumnToSample - retrievedSampleCount, err := s.sampleDataColumnsFromPeers(columnsToSample, root) + // Sample the data columns from the peers. + retrievedSamples, err := s.sampleDataColumnsFromPeers(columnsToSample, root) if err != nil { - return errors.Wrap(err, "sample data columns from peers") + return false, nil, errors.Wrap(err, "sample data columns from peers") + } + + // Compute the missing samples. + missingSamples := make(map[uint64]bool, max(0, len(columnsToSample)-len(retrievedSamples))) + for _, column := range columnsToSample { + if !retrievedSamples[column] { + missingSamples[column] = true + } } + roundSummaries = append(roundSummaries, roundSummary{ + RequestedColumns: columnsToSample, + MissingColumns: missingSamples, + }) + + retrievedSampleCount := uint64(len(retrievedSamples)) + if retrievedSampleCount == columnsToSampleCount { // All columns were correctly sampled, this is the happy path. log.WithFields(logrus.Fields{ "root": fmt.Sprintf("%#x", root), - "roundsNeeded": i, + "roundsNeeded": round, }).Debug("All columns were successfully sampled") - return nil + return true, roundSummaries, nil } if retrievedSampleCount > columnsToSampleCount { // This should never happen. - return errors.New("retrieved more columns than requested") + return false, nil, errors.New("retrieved more columns than requested") } // Some columns are missing, we need to extend the sample size. @@ -261,7 +283,7 @@ func (s *Service) incrementalDAS(root [fieldparams.RootLength]byte, sampleCount log.WithFields(logrus.Fields{ "root": fmt.Sprintf("%#x", root), - "round": i, + "round": round, "missingColumnsCount": missingColumnsCount, "currentSampleCount": oldExtendedSampleCount, "nextSampleCount": extendedSampleCount, @@ -271,10 +293,47 @@ func (s *Service) incrementalDAS(root [fieldparams.RootLength]byte, sampleCount // DataColumnSamplingRoutine runs incremental DAS on block when received. func (s *Service) DataColumnSamplingRoutine(ctx context.Context) { + // Get the custody subnets count. + custodySubnetsCount := params.BeaconConfig().CustodyRequirement + if flags.Get().SubscribeToAllSubnets { + custodySubnetsCount = params.BeaconConfig().DataColumnSidecarSubnetCount + } + // Create a subscription to the state feed. stateChannel := make(chan *feed.Event, 1) stateSub := s.cfg.stateNotifier.StateFeed().Subscribe(stateChannel) + // Retrieve the number of columns. + columnsCount := params.BeaconConfig().NumberOfColumns + + // Retrieve all columns we custody. + custodyColumns, err := peerdas.CustodyColumns(s.cfg.p2p.NodeID(), custodySubnetsCount) + if err != nil { + log.WithError(err).Error("Failed to get custody columns") + return + } + + custodyColumnsCount := uint64(len(custodyColumns)) + + // Compute the number of columns to sample. + if custodyColumnsCount >= columnsCount/2 { + log.WithFields(logrus.Fields{ + "custodyColumnsCount": custodyColumnsCount, + "columnsCount": columnsCount, + }).Debug("At least half of the columns are custody columns, no need to sample") + return + } + + samplesCount := min(params.BeaconConfig().SamplesPerSlot, columnsCount/2-custodyColumnsCount) + + // Compute all the columns we do NOT custody. + nonCustodyColums := make(map[uint64]bool, columnsCount-custodyColumnsCount) + for i := uint64(0); i < columnsCount; i++ { + if !custodyColumns[i] { + nonCustodyColums[i] = true + } + } + // Unsubscribe from the state feed when the function returns. defer stateSub.Unsubscribe() @@ -315,9 +374,28 @@ func (s *Service) DataColumnSamplingRoutine(ctx context.Context) { continue } + // Ramdomize all columns. + randomizedColumns := randomizeColumns(nonCustodyColums) + // Sample data columns with incremental DAS. - if err := s.incrementalDAS(data.BlockRoot, params.BeaconConfig().SamplesPerSlot); err != nil { - log.WithError(err).Error("Failed to sample data columns") + ok, _, err = s.incrementalDAS(data.BlockRoot, randomizedColumns, samplesCount) + if err != nil { + log.WithError(err).Error("Error during incremental DAS") + } + + if ok { + log.WithFields(logrus.Fields{ + "root": fmt.Sprintf("%#x", data.BlockRoot), + "columns": randomizedColumns, + "sampleCount": samplesCount, + }).Debug("Data column sampling successful") + } else { + log.WithFields(logrus.Fields{ + "root": fmt.Sprintf("%#x", data.BlockRoot), + "columns": randomizedColumns, + "sampleCount": samplesCount, + }).Warning("Data column sampling failed") + } case <-s.ctx.Done(): diff --git a/beacon-chain/sync/data_columns_sampling_test.go b/beacon-chain/sync/data_columns_sampling_test.go new file mode 100644 index 00000000000..711ec52a48f --- /dev/null +++ b/beacon-chain/sync/data_columns_sampling_test.go @@ -0,0 +1,246 @@ +package sync + +import ( + "context" + "testing" + + "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/network" + swarmt "github.com/libp2p/go-libp2p/p2p/net/swarm/testing" + mock "github.com/prysmaticlabs/prysm/v5/beacon-chain/blockchain/testing" + "github.com/prysmaticlabs/prysm/v5/beacon-chain/core/peerdas" + "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/peers" + p2ptest "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/testing" + p2pTypes "github.com/prysmaticlabs/prysm/v5/beacon-chain/p2p/types" + fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams" + ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/v5/runtime/version" + "github.com/prysmaticlabs/prysm/v5/testing/require" +) + +func TestRandomizeColumns(t *testing.T) { + const count uint64 = 128 + + // Generate columns. + columns := make(map[uint64]bool, count) + for i := uint64(0); i < count; i++ { + columns[i] = true + } + + // Randomize columns. + randomizedColumns := randomizeColumns(columns) + + // Convert back to a map. + randomizedColumnsMap := make(map[uint64]bool, count) + for _, column := range randomizedColumns { + randomizedColumnsMap[column] = true + } + + // Check duplicates and missing columns. + require.Equal(t, len(columns), len(randomizedColumnsMap)) + + // Check the values. + for column := range randomizedColumnsMap { + require.Equal(t, true, column < count) + } +} + +// // generateColumns generates a list of columns with the first columns fixed. +// func generateColumns(firstColumns []uint64) []uint64 { +// columnsCount := params.BeaconConfig().NumberOfColumns + +// missingColumns := make(map[uint64]bool, columnsCount) +// for i := uint64(0); i < columnsCount; i++ { +// missingColumns[i] = true +// } + +// for _, column := range firstColumns { +// delete(missingColumns, column) +// } + +// result := make([]uint64, 0, columnsCount) +// result = append(result, firstColumns...) +// for column := range missingColumns { +// result = append(result, column) +// } + +// return result +// } + +// createAndConnectPeer creates a peer with a private key `offset` fixed. +// The peer is added and connected to `p2pService` +func createAndConnectPeer( + t *testing.T, + p2pService *p2ptest.TestP2P, + chainService *mock.ChainService, + header *ethpb.BeaconBlockHeader, + custodyCount uint64, + columnsNotToRespond map[uint64]bool, + offset int, +) { + emptyRoot := [fieldparams.RootLength]byte{} + emptySignature := [fieldparams.BLSSignatureLength]byte{} + emptyKzgCommitmentInclusionProof := [4][]byte{ + emptyRoot[:], emptyRoot[:], emptyRoot[:], emptyRoot[:], + } + + // Create the private key, depending on the offset. + privateKeyBytes := make([]byte, 32) + for i := 0; i < 32; i++ { + privateKeyBytes[i] = byte(offset + i) + } + + privateKey, err := crypto.UnmarshalSecp256k1PrivateKey(privateKeyBytes) + require.NoError(t, err) + + // Create the peer. + peer := p2ptest.NewTestP2P(t, swarmt.OptPeerPrivateKey(privateKey)) + + // TODO: Do not hardcode the topic. + peer.SetStreamHandler("/eth2/beacon_chain/req/data_column_sidecars_by_root/1/ssz_snappy", func(stream network.Stream) { + // Decode the request. + req := new(p2pTypes.DataColumnSidecarsByRootReq) + err := peer.Encoding().DecodeWithMaxLength(stream, req) + require.NoError(t, err) + + for _, identifier := range *req { + // Filter out the columns not to respond. + if columnsNotToRespond[identifier.ColumnIndex] { + continue + } + + // Create the response. + resp := ethpb.DataColumnSidecar{ + ColumnIndex: identifier.ColumnIndex, + SignedBlockHeader: ðpb.SignedBeaconBlockHeader{ + Header: header, + Signature: emptySignature[:], + }, + KzgCommitmentsInclusionProof: emptyKzgCommitmentInclusionProof[:], + } + + // Send the response. + err := WriteDataColumnSidecarChunk(stream, chainService, p2pService.Encoding(), &resp) + require.NoError(t, err) + } + + // Close the stream. + closeStream(stream, log) + }) + + // Create the record and set the custody count. + enr := &enr.Record{} + enr.Set(peerdas.CustodySubnetCount(custodyCount)) + + // Add the peer and connect it. + p2pService.Peers().Add(enr, peer.PeerID(), nil, network.DirOutbound) + p2pService.Peers().SetConnectionState(peer.PeerID(), peers.PeerConnected) + p2pService.Connect(peer) +} + +func TestIncrementalDAS(t *testing.T) { + const custodyRequirement uint64 = 1 + + emptyRoot := [fieldparams.RootLength]byte{} + emptyHeader := ðpb.BeaconBlockHeader{ + ParentRoot: emptyRoot[:], + StateRoot: emptyRoot[:], + BodyRoot: emptyRoot[:], + } + + emptyHeaderRoot, err := emptyHeader.HashTreeRoot() + require.NoError(t, err) + + testCases := []struct { + name string + samplesCount uint64 + possibleColumnsToRequest []uint64 + columnsNotToRespond map[uint64]bool + expectedSuccess bool + expectedRoundSummaries []roundSummary + }{ + { + name: "All columns are correctly sampled in a single round", + samplesCount: 5, + possibleColumnsToRequest: []uint64{70, 35, 99, 6, 38, 3, 67, 102, 12, 44, 76, 108}, + columnsNotToRespond: map[uint64]bool{}, + expectedSuccess: true, + expectedRoundSummaries: []roundSummary{ + { + RequestedColumns: []uint64{70, 35, 99, 6, 38}, + MissingColumns: map[uint64]bool{}, + }, + }, + }, + { + name: "Two missing columns in the first round, ok in the second round", + samplesCount: 5, + possibleColumnsToRequest: []uint64{70, 35, 99, 6, 38, 3, 67, 102, 12, 44, 76, 108}, + columnsNotToRespond: map[uint64]bool{6: true, 70: true}, + expectedSuccess: true, + expectedRoundSummaries: []roundSummary{ + { + RequestedColumns: []uint64{70, 35, 99, 6, 38}, + MissingColumns: map[uint64]bool{70: true, 6: true}, + }, + { + RequestedColumns: []uint64{3, 67, 102, 12, 44, 76}, + MissingColumns: map[uint64]bool{}, + }, + }, + }, + { + name: "Two missing columns in the first round, one missing in the second round. Fail to sample.", + samplesCount: 5, + possibleColumnsToRequest: []uint64{70, 35, 99, 6, 38, 3, 67, 102, 12, 44, 76, 108}, + columnsNotToRespond: map[uint64]bool{6: true, 70: true, 3: true}, + expectedSuccess: false, + expectedRoundSummaries: []roundSummary{ + { + RequestedColumns: []uint64{70, 35, 99, 6, 38}, + MissingColumns: map[uint64]bool{70: true, 6: true}, + }, + { + RequestedColumns: []uint64{3, 67, 102, 12, 44, 76}, + MissingColumns: map[uint64]bool{3: true}, + }, + }, + }, + } + + for _, tc := range testCases { + // Create a context. + ctx := context.Background() + + // Create the p2p service. + p2pService := p2ptest.NewTestP2P(t) + + // Create a peer custodying `custodyRequirement` subnets. + chainService, clock := defaultMockChain(t) + + // Custody columns: [6, 38, 70, 102] + createAndConnectPeer(t, p2pService, chainService, emptyHeader, custodyRequirement, tc.columnsNotToRespond, 1) + + // Custody columns: [3, 35, 67, 99] + createAndConnectPeer(t, p2pService, chainService, emptyHeader, custodyRequirement, tc.columnsNotToRespond, 2) + + // Custody columns: [12, 44, 76, 108] + createAndConnectPeer(t, p2pService, chainService, emptyHeader, custodyRequirement, tc.columnsNotToRespond, 3) + + service := &Service{ + cfg: &config{ + p2p: p2pService, + clock: clock, + }, + ctx: ctx, + ctxMap: map[[4]byte]int{{245, 165, 253, 66}: version.Deneb}, + } + + actualSuccess, actualRoundSummaries, err := service.incrementalDAS(emptyHeaderRoot, tc.possibleColumnsToRequest, tc.samplesCount) + + require.NoError(t, err) + require.Equal(t, tc.expectedSuccess, actualSuccess) + require.DeepEqual(t, tc.expectedRoundSummaries, actualRoundSummaries) + } +} diff --git a/beacon-chain/sync/rpc_send_request.go b/beacon-chain/sync/rpc_send_request.go index 054c48a96a6..00823c8c872 100644 --- a/beacon-chain/sync/rpc_send_request.go +++ b/beacon-chain/sync/rpc_send_request.go @@ -482,7 +482,8 @@ func dataColumnValidatorFromRootReq(req *p2ptypes.DataColumnSidecarsByRootReq) D columnIds[blockRoot][sc.ColumnIndex] = true } return func(sc blocks.RODataColumn) error { - columnIndices := columnIds[sc.BlockRoot()] + mickey := sc.BlockRoot() + columnIndices := columnIds[mickey] if columnIndices == nil { return errors.Wrapf(errUnrequested, "root=%#x", sc.BlockRoot()) }