diff --git a/les/backend.go b/les/backend.go index 944e7695d21..a50fe0ced5b 100644 --- a/les/backend.go +++ b/les/backend.go @@ -119,7 +119,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) { leth.retriever = newRetrieveManager(peers, leth.reqDist, leth.serverPool) leth.odr = NewLesOdr(chainDb, light.DefaultClientIndexerConfig, leth.retriever) - leth.chtIndexer = light.NewChtIndexer(chainDb, leth.odr, params.CHTFrequencyClient, params.HelperTrieConfirmations) + leth.chtIndexer = light.NewChtIndexer(chainDb, leth.odr, params.CHTFrequency, params.HelperTrieConfirmations) leth.bloomTrieIndexer = light.NewBloomTrieIndexer(chainDb, leth.odr, params.BloomBitsBlocksClient, params.BloomTrieFrequency) leth.odr.SetIndexers(leth.chtIndexer, leth.bloomTrieIndexer, leth.bloomIndexer) @@ -179,8 +179,6 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) { func lesTopic(genesisHash common.Hash, protocolVersion uint) discv5.Topic { var name string switch protocolVersion { - case lpv1: - name = "LES" case lpv2: name = "LES2" default: diff --git a/les/benchmark.go b/les/benchmark.go index cb302c6ea5c..925d1d89e8d 100644 --- a/les/benchmark.go +++ b/les/benchmark.go @@ -135,8 +135,7 @@ func (b *benchmarkHelperTrie) init(pm *ProtocolManager, count int) error { b.sectionCount, b.headNum, _ = pm.server.bloomTrieIndexer.Sections() } else { b.sectionCount, _, _ = pm.server.chtIndexer.Sections() - b.sectionCount /= (params.CHTFrequencyClient / params.CHTFrequencyServer) - b.headNum = b.sectionCount*params.CHTFrequencyClient - 1 + b.headNum = b.sectionCount*params.CHTFrequency - 1 } if b.sectionCount == 0 { return fmt.Errorf("no processed sections available") diff --git a/les/commons.go b/les/commons.go index 21fb25714af..32fd654499d 100644 --- a/les/commons.go +++ b/les/commons.go @@ -80,28 +80,16 @@ func (c *lesCommons) nodeInfo() interface{} { sections, _, _ := c.chtIndexer.Sections() sections2, _, _ := c.bloomTrieIndexer.Sections() - if !c.protocolManager.lightSync { - // convert to client section size if running in server mode - sections /= c.iConfig.PairChtSize / c.iConfig.ChtSize - } - if sections2 < sections { sections = sections2 } if sections > 0 { sectionIndex := sections - 1 sectionHead := c.bloomTrieIndexer.SectionHead(sectionIndex) - var chtRoot common.Hash - if c.protocolManager.lightSync { - chtRoot = light.GetChtRoot(c.chainDb, sectionIndex, sectionHead) - } else { - idxV2 := (sectionIndex+1)*c.iConfig.PairChtSize/c.iConfig.ChtSize - 1 - chtRoot = light.GetChtRoot(c.chainDb, idxV2, sectionHead) - } cht = params.TrustedCheckpoint{ SectionIndex: sectionIndex, SectionHead: sectionHead, - CHTRoot: chtRoot, + CHTRoot: light.GetChtRoot(c.chainDb, sectionIndex, sectionHead), BloomRoot: light.GetBloomTrieRoot(c.chainDb, sectionIndex, sectionHead), } } diff --git a/les/costtracker.go b/les/costtracker.go index 69531937ef5..014b888c01b 100644 --- a/les/costtracker.go +++ b/les/costtracker.go @@ -39,11 +39,8 @@ var ( GetBlockBodiesMsg: {0, 700000}, GetReceiptsMsg: {0, 1000000}, GetCodeMsg: {0, 450000}, - GetProofsV1Msg: {0, 600000}, GetProofsV2Msg: {0, 600000}, - GetHeaderProofsMsg: {0, 1000000}, GetHelperTrieProofsMsg: {0, 1000000}, - SendTxMsg: {0, 450000}, SendTxV2Msg: {0, 450000}, GetTxStatusMsg: {0, 250000}, } @@ -53,11 +50,8 @@ var ( GetBlockBodiesMsg: {0, 40}, GetReceiptsMsg: {0, 40}, GetCodeMsg: {0, 80}, - GetProofsV1Msg: {0, 80}, GetProofsV2Msg: {0, 80}, - GetHeaderProofsMsg: {0, 20}, GetHelperTrieProofsMsg: {0, 20}, - SendTxMsg: {0, 66000}, SendTxV2Msg: {0, 66000}, GetTxStatusMsg: {0, 50}, } @@ -67,11 +61,8 @@ var ( GetBlockBodiesMsg: {0, 100000}, GetReceiptsMsg: {0, 200000}, GetCodeMsg: {0, 50000}, - GetProofsV1Msg: {0, 4000}, GetProofsV2Msg: {0, 4000}, - GetHeaderProofsMsg: {0, 4000}, GetHelperTrieProofsMsg: {0, 4000}, - SendTxMsg: {0, 0}, SendTxV2Msg: {0, 100}, GetTxStatusMsg: {0, 100}, } diff --git a/les/handler.go b/les/handler.go index 7c290b717d8..9c72c6b1335 100644 --- a/les/handler.go +++ b/les/handler.go @@ -772,80 +772,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { Obj: resp.Receipts, } - case GetProofsV1Msg: - p.Log().Trace("Received proofs request") - // Decode the retrieval message - var req struct { - ReqID uint64 - Reqs []ProofReq - } - if err := msg.Decode(&req); err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // Gather state data until the fetch or network limits is reached - var ( - bytes int - proofs proofsData - ) - reqCnt := len(req.Reqs) - if !accept(req.ReqID, uint64(reqCnt), MaxProofsFetch) { - return errResp(ErrRequestRejected, "") - } - go func() { - for i, req := range req.Reqs { - if i != 0 && !task.waitOrStop() { - return - } - // Look up the root hash belonging to the request - number := rawdb.ReadHeaderNumber(pm.chainDb, req.BHash) - if number == nil { - p.Log().Warn("Failed to retrieve block num for proof", "hash", req.BHash) - continue - } - header := rawdb.ReadHeader(pm.chainDb, req.BHash, *number) - if header == nil { - p.Log().Warn("Failed to retrieve header for proof", "block", *number, "hash", req.BHash) - continue - } - // Open the account or storage trie for the request - statedb := pm.blockchain.StateCache() - - var trie state.Trie - switch len(req.AccKey) { - case 0: - // No account key specified, open an account trie - trie, err = statedb.OpenTrie(header.Root) - if trie == nil || err != nil { - p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "root", header.Root, "err", err) - continue - } - default: - // Account key specified, open a storage trie - account, err := pm.getAccount(statedb.TrieDB(), header.Root, common.BytesToHash(req.AccKey)) - if err != nil { - p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(req.AccKey), "err", err) - continue - } - trie, err = statedb.OpenStorageTrie(common.BytesToHash(req.AccKey), account.Root) - if trie == nil || err != nil { - p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(req.AccKey), "root", account.Root, "err", err) - continue - } - } - // Prove the user's request from the account or stroage trie - var proof light.NodeList - if err := trie.Prove(req.Key, 0, &proof); err != nil { - p.Log().Warn("Failed to prove state request", "block", header.Number, "hash", header.Hash(), "err", err) - continue - } - proofs = append(proofs, proof) - if bytes += proof.DataSize(); bytes >= softResponseLimit { - break - } - } - sendResponse(req.ReqID, uint64(reqCnt), p.ReplyProofs(req.ReqID, proofs), task.done()) - }() - case GetProofsV2Msg: p.Log().Trace("Received les/2 proofs request") // Decode the retrieval message @@ -927,27 +853,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { sendResponse(req.ReqID, uint64(reqCnt), p.ReplyProofsV2(req.ReqID, nodes.NodeList()), task.done()) }() - case ProofsV1Msg: - if pm.odr == nil { - return errResp(ErrUnexpectedResponse, "") - } - - p.Log().Trace("Received proofs response") - // A batch of merkle proofs arrived to one of our previous requests - var resp struct { - ReqID, BV uint64 - Data []light.NodeList - } - if err := msg.Decode(&resp); err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - p.fcServer.ReceivedReply(resp.ReqID, resp.BV) - deliverMsg = &Msg{ - MsgType: MsgProofsV1, - ReqID: resp.ReqID, - Obj: resp.Data, - } - case ProofsV2Msg: if pm.odr == nil { return errResp(ErrUnexpectedResponse, "") @@ -969,54 +874,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { Obj: resp.Data, } - case GetHeaderProofsMsg: - p.Log().Trace("Received headers proof request") - // Decode the retrieval message - var req struct { - ReqID uint64 - Reqs []ChtReq - } - if err := msg.Decode(&req); err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // Gather state data until the fetch or network limits is reached - var ( - bytes int - proofs []ChtResp - ) - reqCnt := len(req.Reqs) - if !accept(req.ReqID, uint64(reqCnt), MaxHelperTrieProofsFetch) { - return errResp(ErrRequestRejected, "") - } - go func() { - trieDb := trie.NewDatabase(rawdb.NewTable(pm.chainDb, light.ChtTablePrefix)) - for i, req := range req.Reqs { - if i != 0 && !task.waitOrStop() { - return - } - if header := pm.blockchain.GetHeaderByNumber(req.BlockNum); header != nil { - sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, req.ChtNum*pm.iConfig.ChtSize-1) - if root := light.GetChtRoot(pm.chainDb, req.ChtNum-1, sectionHead); root != (common.Hash{}) { - trie, err := trie.New(root, trieDb) - if err != nil { - continue - } - var encNumber [8]byte - binary.BigEndian.PutUint64(encNumber[:], req.BlockNum) - - var proof light.NodeList - trie.Prove(encNumber[:], 0, &proof) - - proofs = append(proofs, ChtResp{Header: header, Proof: proof}) - if bytes += proof.DataSize() + estHeaderRlpSize; bytes >= softResponseLimit { - break - } - } - } - } - sendResponse(req.ReqID, uint64(reqCnt), p.ReplyHeaderProofs(req.ReqID, proofs), task.done()) - }() - case GetHelperTrieProofsMsg: p.Log().Trace("Received helper trie proof request") // Decode the retrieval message @@ -1081,26 +938,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { sendResponse(req.ReqID, uint64(reqCnt), p.ReplyHelperTrieProofs(req.ReqID, HelperTrieResps{Proofs: nodes.NodeList(), AuxData: auxData}), task.done()) }() - case HeaderProofsMsg: - if pm.odr == nil { - return errResp(ErrUnexpectedResponse, "") - } - - p.Log().Trace("Received headers proof response") - var resp struct { - ReqID, BV uint64 - Data []ChtResp - } - if err := msg.Decode(&resp); err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - p.fcServer.ReceivedReply(resp.ReqID, resp.BV) - deliverMsg = &Msg{ - MsgType: MsgHeaderProofs, - ReqID: resp.ReqID, - Obj: resp.Data, - } - case HelperTrieProofsMsg: if pm.odr == nil { return errResp(ErrUnexpectedResponse, "") @@ -1122,29 +959,6 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { Obj: resp.Data, } - case SendTxMsg: - if pm.txpool == nil { - return errResp(ErrRequestRejected, "") - } - // Transactions arrived, parse all of them and deliver to the pool - var txs []*types.Transaction - if err := msg.Decode(&txs); err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - reqCnt := len(txs) - if !accept(0, uint64(reqCnt), MaxTxSend) { - return errResp(ErrRequestRejected, "") - } - go func() { - for i, tx := range txs { - if i != 0 && !task.waitOrStop() { - return - } - pm.txpool.AddRemotes([]*types.Transaction{tx}) - } - sendResponse(0, uint64(reqCnt), nil, task.done()) - }() - case SendTxV2Msg: if pm.txpool == nil { return errResp(ErrRequestRejected, "") @@ -1261,9 +1075,8 @@ func (pm *ProtocolManager) getAccount(triedb *trie.Database, root, hash common.H func (pm *ProtocolManager) getHelperTrie(id uint, idx uint64) (common.Hash, string) { switch id { case htCanonical: - idxV1 := (idx+1)*(pm.iConfig.PairChtSize/pm.iConfig.ChtSize) - 1 - sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idxV1+1)*pm.iConfig.ChtSize-1) - return light.GetChtRoot(pm.chainDb, idxV1, sectionHead), light.ChtTablePrefix + sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idx+1)*pm.iConfig.ChtSize-1) + return light.GetChtRoot(pm.chainDb, idx, sectionHead), light.ChtTablePrefix case htBloomBits: sectionHead := rawdb.ReadCanonicalHash(pm.chainDb, (idx+1)*pm.iConfig.BloomTrieSize-1) return light.GetBloomTrieRoot(pm.chainDb, idx, sectionHead), light.BloomTrieTablePrefix diff --git a/les/handler_test.go b/les/handler_test.go index 5cf31b8f57c..c1db65cf39a 100644 --- a/les/handler_test.go +++ b/les/handler_test.go @@ -46,7 +46,6 @@ func expectResponse(r p2p.MsgReader, msgcode, reqID, bv uint64, data interface{} } // Tests that block headers can be retrieved from a remote chain based on user queries. -func TestGetBlockHeadersLes1(t *testing.T) { testGetBlockHeaders(t, 1) } func TestGetBlockHeadersLes2(t *testing.T) { testGetBlockHeaders(t, 2) } func testGetBlockHeaders(t *testing.T, protocol int) { @@ -174,7 +173,6 @@ func testGetBlockHeaders(t *testing.T, protocol int) { } // Tests that block contents can be retrieved from a remote chain based on their hashes. -func TestGetBlockBodiesLes1(t *testing.T) { testGetBlockBodies(t, 1) } func TestGetBlockBodiesLes2(t *testing.T) { testGetBlockBodies(t, 2) } func testGetBlockBodies(t *testing.T, protocol int) { @@ -249,7 +247,6 @@ func testGetBlockBodies(t *testing.T, protocol int) { } // Tests that the contract codes can be retrieved based on account addresses. -func TestGetCodeLes1(t *testing.T) { testGetCode(t, 1) } func TestGetCodeLes2(t *testing.T) { testGetCode(t, 2) } func testGetCode(t *testing.T, protocol int) { @@ -281,7 +278,6 @@ func testGetCode(t *testing.T, protocol int) { } // Tests that the transaction receipts can be retrieved based on hashes. -func TestGetReceiptLes1(t *testing.T) { testGetReceipt(t, 1) } func TestGetReceiptLes2(t *testing.T) { testGetReceipt(t, 2) } func testGetReceipt(t *testing.T, protocol int) { @@ -307,7 +303,6 @@ func testGetReceipt(t *testing.T, protocol int) { } // Tests that trie merkle proofs can be retrieved -func TestGetProofsLes1(t *testing.T) { testGetProofs(t, 1) } func TestGetProofsLes2(t *testing.T) { testGetProofs(t, 2) } func testGetProofs(t *testing.T, protocol int) { @@ -316,10 +311,7 @@ func testGetProofs(t *testing.T, protocol int) { defer tearDown() bc := server.pm.blockchain.(*core.BlockChain) - var ( - proofreqs []ProofReq - proofsV1 [][]rlp.RawValue - ) + var proofreqs []ProofReq proofsV2 := light.NewNodeSet() accounts := []common.Address{testBankAddress, acc1Addr, acc2Addr, {}} @@ -334,112 +326,61 @@ func testGetProofs(t *testing.T, protocol int) { Key: crypto.Keccak256(acc[:]), } proofreqs = append(proofreqs, req) - - switch protocol { - case 1: - var proof light.NodeList - trie.Prove(crypto.Keccak256(acc[:]), 0, &proof) - proofsV1 = append(proofsV1, proof) - case 2: - trie.Prove(crypto.Keccak256(acc[:]), 0, proofsV2) - } + trie.Prove(crypto.Keccak256(acc[:]), 0, proofsV2) } } // Send the proof request and verify the response - switch protocol { - case 1: - cost := server.tPeer.GetRequestCost(GetProofsV1Msg, len(proofreqs)) - sendRequest(server.tPeer.app, GetProofsV1Msg, 42, cost, proofreqs) - if err := expectResponse(server.tPeer.app, ProofsV1Msg, 42, testBufLimit, proofsV1); err != nil { - t.Errorf("proofs mismatch: %v", err) - } - case 2: - cost := server.tPeer.GetRequestCost(GetProofsV2Msg, len(proofreqs)) - sendRequest(server.tPeer.app, GetProofsV2Msg, 42, cost, proofreqs) - if err := expectResponse(server.tPeer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil { - t.Errorf("proofs mismatch: %v", err) - } + cost := server.tPeer.GetRequestCost(GetProofsV2Msg, len(proofreqs)) + sendRequest(server.tPeer.app, GetProofsV2Msg, 42, cost, proofreqs) + if err := expectResponse(server.tPeer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil { + t.Errorf("proofs mismatch: %v", err) } } // Tests that CHT proofs can be correctly retrieved. -func TestGetCHTProofsLes1(t *testing.T) { testGetCHTProofs(t, 1) } func TestGetCHTProofsLes2(t *testing.T) { testGetCHTProofs(t, 2) } func testGetCHTProofs(t *testing.T, protocol int) { config := light.TestServerIndexerConfig - frequency := config.ChtSize - if protocol == 2 { - frequency = config.PairChtSize - } waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) { - expectSections := frequency / config.ChtSize for { cs, _, _ := cIndexer.Sections() - bs, _, _ := bIndexer.Sections() - if cs >= expectSections && bs >= expectSections { + if cs >= 1 { break } time.Sleep(10 * time.Millisecond) } } - server, tearDown := newServerEnv(t, int(frequency+config.ChtConfirms), protocol, waitIndexers) + server, tearDown := newServerEnv(t, int(config.ChtSize+config.ChtConfirms), protocol, waitIndexers) defer tearDown() bc := server.pm.blockchain.(*core.BlockChain) // Assemble the proofs from the different protocols - header := bc.GetHeaderByNumber(frequency - 1) + header := bc.GetHeaderByNumber(config.ChtSize - 1) rlp, _ := rlp.EncodeToBytes(header) key := make([]byte, 8) - binary.BigEndian.PutUint64(key, frequency-1) + binary.BigEndian.PutUint64(key, config.ChtSize-1) - proofsV1 := []ChtResp{{ - Header: header, - }} proofsV2 := HelperTrieResps{ AuxData: [][]byte{rlp}, } - switch protocol { - case 1: - root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(frequency-1).Hash()) - trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix))) - - var proof light.NodeList - trie.Prove(key, 0, &proof) - proofsV1[0].Proof = proof - - case 2: - root := light.GetChtRoot(server.db, (frequency/config.ChtSize)-1, bc.GetHeaderByNumber(frequency-1).Hash()) - trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix))) - trie.Prove(key, 0, &proofsV2.Proofs) - } + root := light.GetChtRoot(server.db, 0, bc.GetHeaderByNumber(config.ChtSize-1).Hash()) + trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(server.db, light.ChtTablePrefix))) + trie.Prove(key, 0, &proofsV2.Proofs) // Assemble the requests for the different protocols - requestsV1 := []ChtReq{{ - ChtNum: frequency / config.ChtSize, - BlockNum: frequency - 1, - }} requestsV2 := []HelperTrieReq{{ Type: htCanonical, - TrieIdx: frequency/config.PairChtSize - 1, + TrieIdx: 0, Key: key, AuxReq: auxHeader, }} // Send the proof request and verify the response - switch protocol { - case 1: - cost := server.tPeer.GetRequestCost(GetHeaderProofsMsg, len(requestsV1)) - sendRequest(server.tPeer.app, GetHeaderProofsMsg, 42, cost, requestsV1) - if err := expectResponse(server.tPeer.app, HeaderProofsMsg, 42, testBufLimit, proofsV1); err != nil { - t.Errorf("proofs mismatch: %v", err) - } - case 2: - cost := server.tPeer.GetRequestCost(GetHelperTrieProofsMsg, len(requestsV2)) - sendRequest(server.tPeer.app, GetHelperTrieProofsMsg, 42, cost, requestsV2) - if err := expectResponse(server.tPeer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil { - t.Errorf("proofs mismatch: %v", err) - } + cost := server.tPeer.GetRequestCost(GetHelperTrieProofsMsg, len(requestsV2)) + sendRequest(server.tPeer.app, GetHelperTrieProofsMsg, 42, cost, requestsV2) + if err := expectResponse(server.tPeer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil { + t.Errorf("proofs mismatch: %v", err) } } @@ -449,10 +390,8 @@ func TestGetBloombitsProofs(t *testing.T) { waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) { for { - cs, _, _ := cIndexer.Sections() - bs, _, _ := bIndexer.Sections() bts, _, _ := btIndexer.Sections() - if cs >= 8 && bs >= 8 && bts >= 1 { + if bts >= 1 { break } time.Sleep(10 * time.Millisecond) diff --git a/les/odr.go b/les/odr.go index 5d98c66a907..daf2ea19ed4 100644 --- a/les/odr.go +++ b/les/odr.go @@ -84,9 +84,7 @@ const ( MsgBlockBodies = iota MsgCode MsgReceipts - MsgProofsV1 MsgProofsV2 - MsgHeaderProofs MsgHelperTrieProofs ) diff --git a/les/odr_requests.go b/les/odr_requests.go index 6bd4a29310a..66d6175b8a3 100644 --- a/les/odr_requests.go +++ b/les/odr_requests.go @@ -188,14 +188,7 @@ type TrieRequest light.TrieRequest // GetCost returns the cost of the given ODR request according to the serving // peer's cost table (implementation of LesOdrRequest) func (r *TrieRequest) GetCost(peer *peer) uint64 { - switch peer.version { - case lpv1: - return peer.GetRequestCost(GetProofsV1Msg, 1) - case lpv2: - return peer.GetRequestCost(GetProofsV2Msg, 1) - default: - panic(nil) - } + return peer.GetRequestCost(GetProofsV2Msg, 1) } // CanSend tells if a certain peer is suitable for serving the given request @@ -220,38 +213,22 @@ func (r *TrieRequest) Request(reqID uint64, peer *peer) error { func (r *TrieRequest) Validate(db ethdb.Database, msg *Msg) error { log.Debug("Validating trie proof", "root", r.Id.Root, "key", r.Key) - switch msg.MsgType { - case MsgProofsV1: - proofs := msg.Obj.([]light.NodeList) - if len(proofs) != 1 { - return errInvalidEntryCount - } - nodeSet := proofs[0].NodeSet() - // Verify the proof and store if checks out - if _, _, err := trie.VerifyProof(r.Id.Root, r.Key, nodeSet); err != nil { - return fmt.Errorf("merkle proof verification failed: %v", err) - } - r.Proof = nodeSet - return nil - - case MsgProofsV2: - proofs := msg.Obj.(light.NodeList) - // Verify the proof and store if checks out - nodeSet := proofs.NodeSet() - reads := &readTraceDB{db: nodeSet} - if _, _, err := trie.VerifyProof(r.Id.Root, r.Key, reads); err != nil { - return fmt.Errorf("merkle proof verification failed: %v", err) - } - // check if all nodes have been read by VerifyProof - if len(reads.reads) != nodeSet.KeyCount() { - return errUselessNodes - } - r.Proof = nodeSet - return nil - - default: + if msg.MsgType != MsgProofsV2 { return errInvalidMessageType } + proofs := msg.Obj.(light.NodeList) + // Verify the proof and store if checks out + nodeSet := proofs.NodeSet() + reads := &readTraceDB{db: nodeSet} + if _, _, err := trie.VerifyProof(r.Id.Root, r.Key, reads); err != nil { + return fmt.Errorf("merkle proof verification failed: %v", err) + } + // check if all nodes have been read by VerifyProof + if len(reads.reads) != nodeSet.KeyCount() { + return errUselessNodes + } + r.Proof = nodeSet + return nil } type CodeReq struct { @@ -330,32 +307,13 @@ type HelperTrieResps struct { // describes all responses, not just a single one AuxData [][]byte } -// legacy LES/1 -type ChtReq struct { - ChtNum, BlockNum uint64 - FromLevel uint -} - -// legacy LES/1 -type ChtResp struct { - Header *types.Header - Proof []rlp.RawValue -} - // ODR request type for requesting headers by Canonical Hash Trie, see LesOdrRequest interface type ChtRequest light.ChtRequest // GetCost returns the cost of the given ODR request according to the serving // peer's cost table (implementation of LesOdrRequest) func (r *ChtRequest) GetCost(peer *peer) uint64 { - switch peer.version { - case lpv1: - return peer.GetRequestCost(GetHeaderProofsMsg, 1) - case lpv2: - return peer.GetRequestCost(GetHelperTrieProofsMsg, 1) - default: - panic(nil) - } + return peer.GetRequestCost(GetHelperTrieProofsMsg, 1) } // CanSend tells if a certain peer is suitable for serving the given request @@ -377,21 +335,7 @@ func (r *ChtRequest) Request(reqID uint64, peer *peer) error { Key: encNum[:], AuxReq: auxHeader, } - switch peer.version { - case lpv1: - var reqsV1 ChtReq - if req.Type != htCanonical || req.AuxReq != auxHeader || len(req.Key) != 8 { - return fmt.Errorf("Request invalid in LES/1 mode") - } - blockNum := binary.BigEndian.Uint64(req.Key) - // convert HelperTrie request to old CHT request - reqsV1 = ChtReq{ChtNum: (req.TrieIdx + 1) * (r.Config.ChtSize / r.Config.PairChtSize), BlockNum: blockNum, FromLevel: req.FromLevel} - return peer.RequestHelperTrieProofs(reqID, r.GetCost(peer), []ChtReq{reqsV1}) - case lpv2: - return peer.RequestHelperTrieProofs(reqID, r.GetCost(peer), []HelperTrieReq{req}) - default: - panic(nil) - } + return peer.RequestHelperTrieProofs(reqID, r.GetCost(peer), []HelperTrieReq{req}) } // Valid processes an ODR request reply message from the LES network @@ -400,78 +344,50 @@ func (r *ChtRequest) Request(reqID uint64, peer *peer) error { func (r *ChtRequest) Validate(db ethdb.Database, msg *Msg) error { log.Debug("Validating CHT", "cht", r.ChtNum, "block", r.BlockNum) - switch msg.MsgType { - case MsgHeaderProofs: // LES/1 backwards compatibility - proofs := msg.Obj.([]ChtResp) - if len(proofs) != 1 { - return errInvalidEntryCount - } - proof := proofs[0] - - // Verify the CHT - var encNumber [8]byte - binary.BigEndian.PutUint64(encNumber[:], r.BlockNum) - - value, _, err := trie.VerifyProof(r.ChtRoot, encNumber[:], light.NodeList(proof.Proof).NodeSet()) - if err != nil { - return err - } - var node light.ChtNode - if err := rlp.DecodeBytes(value, &node); err != nil { - return err - } - if node.Hash != proof.Header.Hash() { - return errCHTHashMismatch - } - // Verifications passed, store and return - r.Header = proof.Header - r.Proof = light.NodeList(proof.Proof).NodeSet() - r.Td = node.Td - case MsgHelperTrieProofs: - resp := msg.Obj.(HelperTrieResps) - if len(resp.AuxData) != 1 { - return errInvalidEntryCount - } - nodeSet := resp.Proofs.NodeSet() - headerEnc := resp.AuxData[0] - if len(headerEnc) == 0 { - return errHeaderUnavailable - } - header := new(types.Header) - if err := rlp.DecodeBytes(headerEnc, header); err != nil { - return errHeaderUnavailable - } + if msg.MsgType != MsgHelperTrieProofs { + return errInvalidMessageType + } + resp := msg.Obj.(HelperTrieResps) + if len(resp.AuxData) != 1 { + return errInvalidEntryCount + } + nodeSet := resp.Proofs.NodeSet() + headerEnc := resp.AuxData[0] + if len(headerEnc) == 0 { + return errHeaderUnavailable + } + header := new(types.Header) + if err := rlp.DecodeBytes(headerEnc, header); err != nil { + return errHeaderUnavailable + } - // Verify the CHT - var encNumber [8]byte - binary.BigEndian.PutUint64(encNumber[:], r.BlockNum) + // Verify the CHT + var encNumber [8]byte + binary.BigEndian.PutUint64(encNumber[:], r.BlockNum) - reads := &readTraceDB{db: nodeSet} - value, _, err := trie.VerifyProof(r.ChtRoot, encNumber[:], reads) - if err != nil { - return fmt.Errorf("merkle proof verification failed: %v", err) - } - if len(reads.reads) != nodeSet.KeyCount() { - return errUselessNodes - } + reads := &readTraceDB{db: nodeSet} + value, _, err := trie.VerifyProof(r.ChtRoot, encNumber[:], reads) + if err != nil { + return fmt.Errorf("merkle proof verification failed: %v", err) + } + if len(reads.reads) != nodeSet.KeyCount() { + return errUselessNodes + } - var node light.ChtNode - if err := rlp.DecodeBytes(value, &node); err != nil { - return err - } - if node.Hash != header.Hash() { - return errCHTHashMismatch - } - if r.BlockNum != header.Number.Uint64() { - return errCHTNumberMismatch - } - // Verifications passed, store and return - r.Header = header - r.Proof = nodeSet - r.Td = node.Td - default: - return errInvalidMessageType + var node light.ChtNode + if err := rlp.DecodeBytes(value, &node); err != nil { + return err + } + if node.Hash != header.Hash() { + return errCHTHashMismatch + } + if r.BlockNum != header.Number.Uint64() { + return errCHTNumberMismatch } + // Verifications passed, store and return + r.Header = header + r.Proof = nodeSet + r.Td = node.Td return nil } diff --git a/les/odr_test.go b/les/odr_test.go index ac81fbcf02f..bc587a1832d 100644 --- a/les/odr_test.go +++ b/les/odr_test.go @@ -38,8 +38,6 @@ import ( type odrTestFn func(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte -func TestOdrGetBlockLes1(t *testing.T) { testOdr(t, 1, 1, odrGetBlock) } - func TestOdrGetBlockLes2(t *testing.T) { testOdr(t, 2, 1, odrGetBlock) } func odrGetBlock(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte { @@ -56,8 +54,6 @@ func odrGetBlock(ctx context.Context, db ethdb.Database, config *params.ChainCon return rlp } -func TestOdrGetReceiptsLes1(t *testing.T) { testOdr(t, 1, 1, odrGetReceipts) } - func TestOdrGetReceiptsLes2(t *testing.T) { testOdr(t, 2, 1, odrGetReceipts) } func odrGetReceipts(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte { @@ -78,8 +74,6 @@ func odrGetReceipts(ctx context.Context, db ethdb.Database, config *params.Chain return rlp } -func TestOdrAccountsLes1(t *testing.T) { testOdr(t, 1, 1, odrAccounts) } - func TestOdrAccountsLes2(t *testing.T) { testOdr(t, 2, 1, odrAccounts) } func odrAccounts(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte { @@ -108,8 +102,6 @@ func odrAccounts(ctx context.Context, db ethdb.Database, config *params.ChainCon return res } -func TestOdrContractCallLes1(t *testing.T) { testOdr(t, 1, 2, odrContractCall) } - func TestOdrContractCallLes2(t *testing.T) { testOdr(t, 2, 2, odrContractCall) } type callmsg struct { diff --git a/les/peer.go b/les/peer.go index 0c15add9c6b..bf3f0f7621c 100644 --- a/les/peer.go +++ b/les/peer.go @@ -34,10 +34,9 @@ import ( ) var ( - errClosed = errors.New("peer set is closed") - errAlreadyRegistered = errors.New("peer is already registered") - errNotRegistered = errors.New("peer is not registered") - errInvalidHelpTrieReq = errors.New("invalid help trie request") + errClosed = errors.New("peer set is closed") + errAlreadyRegistered = errors.New("peer is already registered") + errNotRegistered = errors.New("peer is not registered") ) const maxResponseErrors = 50 // number of invalid responses tolerated (makes the protocol less brittle but still avoids spam) @@ -244,18 +243,8 @@ func (p *peer) GetTxRelayCost(amount, size int) uint64 { p.lock.RLock() defer p.lock.RUnlock() - var msgcode uint64 - switch p.version { - case lpv1: - msgcode = SendTxMsg - case lpv2: - msgcode = SendTxV2Msg - default: - panic(nil) - } - - cost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(amount) - sizeCost := p.fcCosts[msgcode].baseCost + p.fcCosts[msgcode].reqCost*uint64(size)/txSizeCostLimit + cost := p.fcCosts[SendTxV2Msg].baseCost + p.fcCosts[SendTxV2Msg].reqCost*uint64(amount) + sizeCost := p.fcCosts[SendTxV2Msg].baseCost + p.fcCosts[SendTxV2Msg].reqCost*uint64(size)/txSizeCostLimit if sizeCost > cost { cost = sizeCost } @@ -307,24 +296,12 @@ func (p *peer) ReplyReceiptsRLP(reqID uint64, receipts []rlp.RawValue) *reply { return &reply{p.rw, ReceiptsMsg, reqID, data} } -// ReplyProofs creates a reply with a batch of legacy LES/1 merkle proofs, corresponding to the ones requested. -func (p *peer) ReplyProofs(reqID uint64, proofs proofsData) *reply { - data, _ := rlp.EncodeToBytes(proofs) - return &reply{p.rw, ProofsV1Msg, reqID, data} -} - // ReplyProofsV2 creates a reply with a batch of merkle proofs, corresponding to the ones requested. func (p *peer) ReplyProofsV2(reqID uint64, proofs light.NodeList) *reply { data, _ := rlp.EncodeToBytes(proofs) return &reply{p.rw, ProofsV2Msg, reqID, data} } -// ReplyHeaderProofs creates a reply with a batch of legacy LES/1 header proofs, corresponding to the ones requested. -func (p *peer) ReplyHeaderProofs(reqID uint64, proofs []ChtResp) *reply { - data, _ := rlp.EncodeToBytes(proofs) - return &reply{p.rw, HeaderProofsMsg, reqID, data} -} - // ReplyHelperTrieProofs creates a reply with a batch of HelperTrie proofs, corresponding to the ones requested. func (p *peer) ReplyHelperTrieProofs(reqID uint64, resp HelperTrieResps) *reply { data, _ := rlp.EncodeToBytes(resp) @@ -374,36 +351,13 @@ func (p *peer) RequestReceipts(reqID, cost uint64, hashes []common.Hash) error { // RequestProofs fetches a batch of merkle proofs from a remote node. func (p *peer) RequestProofs(reqID, cost uint64, reqs []ProofReq) error { p.Log().Debug("Fetching batch of proofs", "count", len(reqs)) - switch p.version { - case lpv1: - return sendRequest(p.rw, GetProofsV1Msg, reqID, cost, reqs) - case lpv2: - return sendRequest(p.rw, GetProofsV2Msg, reqID, cost, reqs) - default: - panic(nil) - } + return sendRequest(p.rw, GetProofsV2Msg, reqID, cost, reqs) } // RequestHelperTrieProofs fetches a batch of HelperTrie merkle proofs from a remote node. -func (p *peer) RequestHelperTrieProofs(reqID, cost uint64, data interface{}) error { - switch p.version { - case lpv1: - reqs, ok := data.([]ChtReq) - if !ok { - return errInvalidHelpTrieReq - } - p.Log().Debug("Fetching batch of header proofs", "count", len(reqs)) - return sendRequest(p.rw, GetHeaderProofsMsg, reqID, cost, reqs) - case lpv2: - reqs, ok := data.([]HelperTrieReq) - if !ok { - return errInvalidHelpTrieReq - } - p.Log().Debug("Fetching batch of HelperTrie proofs", "count", len(reqs)) - return sendRequest(p.rw, GetHelperTrieProofsMsg, reqID, cost, reqs) - default: - panic(nil) - } +func (p *peer) RequestHelperTrieProofs(reqID, cost uint64, reqs []HelperTrieReq) error { + p.Log().Debug("Fetching batch of HelperTrie proofs", "count", len(reqs)) + return sendRequest(p.rw, GetHelperTrieProofsMsg, reqID, cost, reqs) } // RequestTxStatus fetches a batch of transaction status records from a remote node. @@ -415,14 +369,7 @@ func (p *peer) RequestTxStatus(reqID, cost uint64, txHashes []common.Hash) error // SendTxStatus creates a reply with a batch of transactions to be added to the remote transaction pool. func (p *peer) SendTxs(reqID, cost uint64, txs rlp.RawValue) error { p.Log().Debug("Sending batch of transactions", "size", len(txs)) - switch p.version { - case lpv1: - return p2p.Send(p.rw, SendTxMsg, txs) // old message format does not include reqID - case lpv2: - return sendRequest(p.rw, SendTxV2Msg, reqID, cost, txs) - default: - panic(nil) - } + return sendRequest(p.rw, SendTxV2Msg, reqID, cost, txs) } type keyValueEntry struct { diff --git a/les/protocol.go b/les/protocol.go index 65395ac0520..86e450d01c5 100644 --- a/les/protocol.go +++ b/les/protocol.go @@ -33,19 +33,18 @@ import ( // Constants to match up protocol versions and messages const ( - lpv1 = 1 lpv2 = 2 ) // Supported versions of the les protocol (first is primary) var ( - ClientProtocolVersions = []uint{lpv2, lpv1} - ServerProtocolVersions = []uint{lpv2, lpv1} + ClientProtocolVersions = []uint{lpv2} + ServerProtocolVersions = []uint{lpv2} AdvertiseProtocolVersions = []uint{lpv2} // clients are searching for the first advertised protocol in the list ) // Number of implemented message corresponding to different protocol versions. -var ProtocolLengths = map[uint]uint64{lpv1: 15, lpv2: 22} +var ProtocolLengths = map[uint]uint64{lpv2: 22} const ( NetworkId = 1 @@ -54,7 +53,7 @@ const ( // les protocol message codes const ( - // Protocol messages belonging to LPV1 + // Protocol messages inherited from LPV1 StatusMsg = 0x00 AnnounceMsg = 0x01 GetBlockHeadersMsg = 0x02 @@ -63,14 +62,9 @@ const ( BlockBodiesMsg = 0x05 GetReceiptsMsg = 0x06 ReceiptsMsg = 0x07 - GetProofsV1Msg = 0x08 - ProofsV1Msg = 0x09 GetCodeMsg = 0x0a CodeMsg = 0x0b - SendTxMsg = 0x0c - GetHeaderProofsMsg = 0x0d - HeaderProofsMsg = 0x0e - // Protocol messages belonging to LPV2 + // Protocol messages introduced in LPV2 GetProofsV2Msg = 0x0f ProofsV2Msg = 0x10 GetHelperTrieProofsMsg = 0x11 @@ -89,10 +83,7 @@ var requests = map[uint64]requestInfo{ GetBlockHeadersMsg: {"GetBlockHeaders", MaxHeaderFetch}, GetBlockBodiesMsg: {"GetBlockBodies", MaxBodyFetch}, GetReceiptsMsg: {"GetReceipts", MaxReceiptFetch}, - GetProofsV1Msg: {"GetProofsV1", MaxProofsFetch}, GetCodeMsg: {"GetCode", MaxCodeFetch}, - SendTxMsg: {"SendTx", MaxTxSend}, - GetHeaderProofsMsg: {"GetHeaderProofs", MaxHelperTrieProofsFetch}, GetProofsV2Msg: {"GetProofsV2", MaxProofsFetch}, GetHelperTrieProofsMsg: {"GetHelperTrieProofs", MaxHelperTrieProofsFetch}, SendTxV2Msg: {"SendTxV2", MaxTxSend}, diff --git a/les/request_test.go b/les/request_test.go index c9c18519828..e0d00d18c5d 100644 --- a/les/request_test.go +++ b/les/request_test.go @@ -36,24 +36,18 @@ func secAddr(addr common.Address) []byte { type accessTestFn func(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest -func TestBlockAccessLes1(t *testing.T) { testAccess(t, 1, tfBlockAccess) } - func TestBlockAccessLes2(t *testing.T) { testAccess(t, 2, tfBlockAccess) } func tfBlockAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest { return &light.BlockRequest{Hash: bhash, Number: number} } -func TestReceiptsAccessLes1(t *testing.T) { testAccess(t, 1, tfReceiptsAccess) } - func TestReceiptsAccessLes2(t *testing.T) { testAccess(t, 2, tfReceiptsAccess) } func tfReceiptsAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest { return &light.ReceiptsRequest{Hash: bhash, Number: number} } -func TestTrieEntryAccessLes1(t *testing.T) { testAccess(t, 1, tfTrieEntryAccess) } - func TestTrieEntryAccessLes2(t *testing.T) { testAccess(t, 2, tfTrieEntryAccess) } func tfTrieEntryAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest { @@ -63,8 +57,6 @@ func tfTrieEntryAccess(db ethdb.Database, bhash common.Hash, number uint64) ligh return nil } -func TestCodeAccessLes1(t *testing.T) { testAccess(t, 1, tfCodeAccess) } - func TestCodeAccessLes2(t *testing.T) { testAccess(t, 2, tfCodeAccess) } func tfCodeAccess(db ethdb.Database, bhash common.Hash, num uint64) light.OdrRequest { diff --git a/les/server.go b/les/server.go index 3716685e179..6c2b227f424 100644 --- a/les/server.go +++ b/les/server.go @@ -89,7 +89,7 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) { config: config, chainDb: eth.ChainDb(), iConfig: light.DefaultServerIndexerConfig, - chtIndexer: light.NewChtIndexer(eth.ChainDb(), nil, params.CHTFrequencyServer, params.HelperTrieProcessConfirmations), + chtIndexer: light.NewChtIndexer(eth.ChainDb(), nil, params.CHTFrequency, params.HelperTrieProcessConfirmations), bloomTrieIndexer: light.NewBloomTrieIndexer(eth.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency), protocolManager: pm, }, @@ -108,15 +108,11 @@ func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) { srv.thcBlockProcessing = config.LightServ/100 + 1 srv.fcManager = flowcontrol.NewClientManager(nil, &mclock.System{}) - chtV1SectionCount, _, _ := srv.chtIndexer.Sections() // indexer still uses LES/1 4k section size for backwards server compatibility - chtV2SectionCount := chtV1SectionCount / (params.CHTFrequencyClient / params.CHTFrequencyServer) - if chtV2SectionCount != 0 { - // convert to LES/2 section - chtLastSection := chtV2SectionCount - 1 - // convert last LES/2 section index back to LES/1 index for chtIndexer.SectionHead - chtLastSectionV1 := (chtLastSection+1)*(params.CHTFrequencyClient/params.CHTFrequencyServer) - 1 - chtSectionHead := srv.chtIndexer.SectionHead(chtLastSectionV1) - chtRoot := light.GetChtRoot(pm.chainDb, chtLastSectionV1, chtSectionHead) + chtSectionCount, _, _ := srv.chtIndexer.Sections() + if chtSectionCount != 0 { + chtLastSection := chtSectionCount - 1 + chtSectionHead := srv.chtIndexer.SectionHead(chtLastSection) + chtRoot := light.GetChtRoot(pm.chainDb, chtLastSection, chtSectionHead) logger.Info("Loaded CHT", "section", chtLastSection, "head", chtSectionHead, "root", chtRoot) } bloomTrieSectionCount, _, _ := srv.bloomTrieIndexer.Sections() diff --git a/light/odr_test.go b/light/odr_test.go index c1762c43ec9..55725d84e0a 100644 --- a/light/odr_test.go +++ b/light/odr_test.go @@ -99,7 +99,7 @@ func (odr *testOdr) IndexerConfig() *IndexerConfig { type odrTestFn func(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) -func TestOdrGetBlockLes1(t *testing.T) { testChainOdr(t, 1, odrGetBlock) } +func TestOdrGetBlockLes2(t *testing.T) { testChainOdr(t, 1, odrGetBlock) } func odrGetBlock(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) { var block *types.Block @@ -115,7 +115,7 @@ func odrGetBlock(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc return rlp, nil } -func TestOdrGetReceiptsLes1(t *testing.T) { testChainOdr(t, 1, odrGetReceipts) } +func TestOdrGetReceiptsLes2(t *testing.T) { testChainOdr(t, 1, odrGetReceipts) } func odrGetReceipts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) { var receipts types.Receipts @@ -137,7 +137,7 @@ func odrGetReceipts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, return rlp, nil } -func TestOdrAccountsLes1(t *testing.T) { testChainOdr(t, 1, odrAccounts) } +func TestOdrAccountsLes2(t *testing.T) { testChainOdr(t, 1, odrAccounts) } func odrAccounts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc *LightChain, bhash common.Hash) ([]byte, error) { dummyAddr := common.HexToAddress("1234567812345678123456781234567812345678") @@ -161,7 +161,7 @@ func odrAccounts(ctx context.Context, db ethdb.Database, bc *core.BlockChain, lc return res, st.Error() } -func TestOdrContractCallLes1(t *testing.T) { testChainOdr(t, 1, odrContractCall) } +func TestOdrContractCallLes2(t *testing.T) { testChainOdr(t, 1, odrContractCall) } type callmsg struct { types.Message diff --git a/light/postprocess.go b/light/postprocess.go index 306de46940c..2a837e6c1d8 100644 --- a/light/postprocess.go +++ b/light/postprocess.go @@ -41,9 +41,6 @@ type IndexerConfig struct { // The block frequency for creating CHTs. ChtSize uint64 - // A special auxiliary field represents client's chtsize for server config, otherwise represents server's chtsize. - PairChtSize uint64 - // The number of confirmations needed to generate/accept a canonical hash help trie. ChtConfirms uint64 @@ -64,8 +61,7 @@ type IndexerConfig struct { var ( // DefaultServerIndexerConfig wraps a set of configs as a default indexer config for server side. DefaultServerIndexerConfig = &IndexerConfig{ - ChtSize: params.CHTFrequencyServer, - PairChtSize: params.CHTFrequencyClient, + ChtSize: params.CHTFrequency, ChtConfirms: params.HelperTrieProcessConfirmations, BloomSize: params.BloomBitsBlocks, BloomConfirms: params.BloomConfirms, @@ -74,8 +70,7 @@ var ( } // DefaultClientIndexerConfig wraps a set of configs as a default indexer config for client side. DefaultClientIndexerConfig = &IndexerConfig{ - ChtSize: params.CHTFrequencyClient, - PairChtSize: params.CHTFrequencyServer, + ChtSize: params.CHTFrequency, ChtConfirms: params.HelperTrieConfirmations, BloomSize: params.BloomBitsBlocksClient, BloomConfirms: params.HelperTrieConfirmations, @@ -84,8 +79,7 @@ var ( } // TestServerIndexerConfig wraps a set of configs as a test indexer config for server side. TestServerIndexerConfig = &IndexerConfig{ - ChtSize: 64, - PairChtSize: 512, + ChtSize: 512, ChtConfirms: 4, BloomSize: 64, BloomConfirms: 4, @@ -95,7 +89,6 @@ var ( // TestClientIndexerConfig wraps a set of configs as a test indexer config for client side. TestClientIndexerConfig = &IndexerConfig{ ChtSize: 512, - PairChtSize: 64, ChtConfirms: 32, BloomSize: 512, BloomConfirms: 32, @@ -116,7 +109,7 @@ var ( ErrNoTrustedCht = errors.New("no trusted canonical hash trie") ErrNoTrustedBloomTrie = errors.New("no trusted bloom trie") ErrNoHeader = errors.New("header not found") - chtPrefix = []byte("chtRoot-") // chtPrefix + chtNum (uint64 big endian) -> trie root hash + chtPrefix = []byte("chtRootV2-") // chtPrefix + chtNum (uint64 big endian) -> trie root hash ChtTablePrefix = "cht-" ) @@ -127,7 +120,6 @@ type ChtNode struct { } // GetChtRoot reads the CHT root associated to the given section from the database -// Note that sectionIdx is specified according to LES/1 CHT section size. func GetChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) common.Hash { var encNumber [8]byte binary.BigEndian.PutUint64(encNumber[:], sectionIdx) @@ -136,7 +128,6 @@ func GetChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead common.Hash) c } // StoreChtRoot writes the CHT root associated to the given section into the database -// Note that sectionIdx is specified according to LES/1 CHT section size. func StoreChtRoot(db ethdb.Database, sectionIdx uint64, sectionHead, root common.Hash) { var encNumber [8]byte binary.BigEndian.PutUint64(encNumber[:], sectionIdx) @@ -163,7 +154,7 @@ func NewChtIndexer(db ethdb.Database, odr OdrBackend, size, confirms uint64) *co triedb: trie.NewDatabaseWithCache(trieTable, 1), // Use a tiny cache only to keep memory down sectionSize: size, } - return core.NewChainIndexer(db, rawdb.NewTable(db, "chtIndex-"), backend, size, confirms, time.Millisecond*100, "cht") + return core.NewChainIndexer(db, rawdb.NewTable(db, "chtIndexV2-"), backend, size, confirms, time.Millisecond*100, "cht") } // fetchMissingNodes tries to retrieve the last entry of the latest trusted CHT from the @@ -235,9 +226,7 @@ func (c *ChtIndexerBackend) Commit() error { } c.triedb.Commit(root, false) - if ((c.section+1)*c.sectionSize)%params.CHTFrequencyClient == 0 { - log.Info("Storing CHT", "section", c.section*c.sectionSize/params.CHTFrequencyClient, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root)) - } + log.Info("Storing CHT", "section", c.section, "head", fmt.Sprintf("%064x", c.lastHash), "root", fmt.Sprintf("%064x", root)) StoreChtRoot(c.diskdb, c.section, c.lastHash, root) return nil } diff --git a/params/network_params.go b/params/network_params.go index f8731e89797..024c4af457e 100644 --- a/params/network_params.go +++ b/params/network_params.go @@ -32,13 +32,8 @@ const ( // considered probably final and its rotated bits are calculated. BloomConfirms = 256 - // CHTFrequencyClient is the block frequency for creating CHTs on the client side. - CHTFrequencyClient = 32768 - - // CHTFrequencyServer is the block frequency for creating CHTs on the server side. - // Eventually this can be merged back with the client version, but that requires a - // full database upgrade, so that should be left for a suitable moment. - CHTFrequencyServer = 4096 + // CHTFrequency is the block frequency for creating CHTs + CHTFrequency = 32768 // BloomTrieFrequency is the block frequency for creating BloomTrie on both // server/client sides.