From 109e8ab81492ba4b9548c375c9dfc10884eeb8cd Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 7 Jun 2022 00:08:33 +0700 Subject: [PATCH 001/136] StageSenders: wrong canonical array size at initial sync with snapshots (#4378) * save * save * save --- cmd/integration/commands/refetence_db.go | 3 +++ eth/stagedsync/stage_bodies.go | 11 +++-------- eth/stagedsync/stage_senders.go | 18 ++++++++++-------- eth/stagedsync/sync.go | 3 +-- 4 files changed, 17 insertions(+), 18 deletions(-) diff --git a/cmd/integration/commands/refetence_db.go b/cmd/integration/commands/refetence_db.go index f58fca9fd00..4e0a1697d17 100644 --- a/cmd/integration/commands/refetence_db.go +++ b/cmd/integration/commands/refetence_db.go @@ -13,9 +13,11 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" mdbx2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/log/v3" "github.com/spf13/cobra" "github.com/torquem-ch/mdbx-go/mdbx" + "go.uber.org/atomic" ) var stateBuckets = []string{ @@ -371,6 +373,7 @@ func kv2kv(ctx context.Context, src, dst kv.RwDB) error { continue } + kv.ReadAhead(ctx, src, atomic.NewBool(false), name, nil, math.MaxUint32) c, err := dstTx.RwCursor(name) if err != nil { return err diff --git a/eth/stagedsync/stage_bodies.go b/eth/stagedsync/stage_bodies.go index 4da5e990913..391a4eaf936 100644 --- a/eth/stagedsync/stage_bodies.go +++ b/eth/stagedsync/stage_bodies.go @@ -58,6 +58,9 @@ func BodiesForward( test bool, // Set to true in tests, allows the stage to fail rather than wait indefinitely firstCycle bool, ) error { + if cfg.snapshots != nil && s.BlockNumber < cfg.snapshots.BlocksAvailable() { + s.BlockNumber = cfg.snapshots.BlocksAvailable() + } var d1, d2, d3, d4, d5, d6 time.Duration var err error @@ -71,14 +74,6 @@ func BodiesForward( } timeout := cfg.timeout - if cfg.snapshots != nil { - if s.BlockNumber < cfg.snapshots.BlocksAvailable() { - if err := s.Update(tx, cfg.snapshots.BlocksAvailable()); err != nil { - return err - } - s.BlockNumber = cfg.snapshots.BlocksAvailable() - } - } // This will update bd.maxProgress if _, _, _, err = cfg.bd.UpdateFromDb(tx); err != nil { return err diff --git a/eth/stagedsync/stage_senders.go b/eth/stagedsync/stage_senders.go index 3ea0f86afb8..4e10937d855 100644 --- a/eth/stagedsync/stage_senders.go +++ b/eth/stagedsync/stage_senders.go @@ -63,6 +63,10 @@ func StageSendersCfg(db kv.RwDB, chainCfg *params.ChainConfig, tmpdir string, pr } func SpawnRecoverSendersStage(cfg SendersCfg, s *StageState, u Unwinder, tx kv.RwTx, toBlock uint64, ctx context.Context) error { + if cfg.blockRetire != nil && cfg.blockRetire.Snapshots() != nil && cfg.blockRetire.Snapshots().Cfg().Enabled && s.BlockNumber < cfg.blockRetire.Snapshots().BlocksAvailable() { + s.BlockNumber = cfg.blockRetire.Snapshots().BlocksAvailable() + } + quitCh := ctx.Done() useExternalTx := tx != nil if !useExternalTx { @@ -94,9 +98,6 @@ func SpawnRecoverSendersStage(cfg SendersCfg, s *StageState, u Unwinder, tx kv.R logEvery := time.NewTicker(30 * time.Second) defer logEvery.Stop() - canonical := make([]common.Hash, to-s.BlockNumber) - currentHeaderIdx := uint64(0) - canonicalC, err := tx.Cursor(kv.HeaderCanonical) if err != nil { return err @@ -104,9 +105,8 @@ func SpawnRecoverSendersStage(cfg SendersCfg, s *StageState, u Unwinder, tx kv.R defer canonicalC.Close() startFrom := s.BlockNumber + 1 - if cfg.blockRetire != nil && cfg.blockRetire.Snapshots() != nil && startFrom < cfg.blockRetire.Snapshots().BlocksAvailable() { - startFrom = cfg.blockRetire.Snapshots().BlocksAvailable() - } + currentHeaderIdx := uint64(0) + canonical := make([]common.Hash, to-s.BlockNumber) for k, v, err := canonicalC.Seek(dbutils.EncodeBlockNumber(startFrom)); k != nil; k, v, err = canonicalC.Next() { if err != nil { @@ -209,7 +209,7 @@ func SpawnRecoverSendersStage(cfg SendersCfg, s *StageState, u Unwinder, tx kv.R defer bodiesC.Close() Loop: - for k, _, err := bodiesC.Seek(dbutils.EncodeBlockNumber(s.BlockNumber + 1)); k != nil; k, _, err = bodiesC.Next() { + for k, _, err := bodiesC.Seek(dbutils.EncodeBlockNumber(startFrom)); k != nil; k, _, err = bodiesC.Next() { if err != nil { return err } @@ -219,6 +219,7 @@ Loop: blockNumber := binary.BigEndian.Uint64(k[:8]) blockHash := common.BytesToHash(k[8:]) + if blockNumber > to { break } @@ -227,6 +228,7 @@ Loop: // non-canonical case continue } + body := rawdb.ReadCanonicalBodyWithTransactions(tx, blockHash, blockNumber) select { @@ -364,7 +366,6 @@ func UnwindSendersStage(s *UnwindState, tx kv.RwTx, cfg SendersCfg, ctx context. func PruneSendersStage(s *PruneState, tx kv.RwTx, cfg SendersCfg, ctx context.Context) (err error) { logEvery := time.NewTicker(logInterval) defer logEvery.Stop() - to := cfg.prune.TxIndex.PruneTo(s.ForwardProgress) useExternalTx := tx != nil if !useExternalTx { tx, err = cfg.db.BeginRw(ctx) @@ -392,6 +393,7 @@ func PruneSendersStage(s *PruneState, tx kv.RwTx, cfg SendersCfg, ctx context.Co } } } else if cfg.prune.TxIndex.Enabled() { + to := cfg.prune.TxIndex.PruneTo(s.ForwardProgress) if err = PruneTable(tx, kv.Senders, to, ctx, 1_000); err != nil { return err } diff --git a/eth/stagedsync/sync.go b/eth/stagedsync/sync.go index 3ea16cf4f36..552754b1503 100644 --- a/eth/stagedsync/sync.go +++ b/eth/stagedsync/sync.go @@ -3,7 +3,6 @@ package stagedsync import ( "context" "fmt" - "os" "time" libcommon "github.com/ledgerwatch/erigon-lib/common" @@ -223,7 +222,7 @@ func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { if string(stage.ID) == debug.StopBeforeStage() { // stop process for debugging reasons log.Warn("STOP_BEFORE_STAGE env flag forced to stop app") - os.Exit(1) + return libcommon.ErrStopped } if stage.Disabled || stage.Forward == nil { From f3575ce46aa547736a8d03b189da2dcd5bac440f Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Mon, 6 Jun 2022 19:22:08 +0200 Subject: [PATCH 002/136] Fix MarkAllVerified (#4380) --- turbo/stages/headerdownload/header_algos.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 4f6b777c05e..ef64c4d0120 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -174,8 +174,7 @@ func (hd *HeaderDownload) removeUpwards(link *Link) { func (hd *HeaderDownload) MarkAllVerified() { hd.lock.Lock() defer hd.lock.Unlock() - for hd.insertQueue.Len() > 0 { - link := hd.insertQueue[0] + for _, link := range hd.insertQueue { if !link.verified { link.linked = true link.verified = true From b2f9b25300a371f9a62d40f9e89bef5c024529b2 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Mon, 6 Jun 2022 18:23:41 +0100 Subject: [PATCH 003/136] For for overwritten anchor (#4379) * For for overwritten anchor * Switch to time types, log retry time in diagnostics Co-authored-by: Alex Sharp Co-authored-by: Alexey Sharp --- cmd/sentry/sentry/sentry_multi_client.go | 4 +- eth/stagedsync/stage_headers.go | 6 +-- turbo/stages/headerdownload/header_algos.go | 42 ++++++++++--------- .../headerdownload/header_data_struct.go | 10 +++-- 4 files changed, 34 insertions(+), 28 deletions(-) diff --git a/cmd/sentry/sentry/sentry_multi_client.go b/cmd/sentry/sentry/sentry_multi_client.go index 3b487333da2..70721dae3e6 100644 --- a/cmd/sentry/sentry/sentry_multi_client.go +++ b/cmd/sentry/sentry/sentry_multi_client.go @@ -414,12 +414,12 @@ func (cs *MultiClient) blockHeaders(ctx context.Context, pkt eth.BlockHeadersPac canRequestMore := cs.Hd.ProcessHeaders(csHeaders, false /* newBlock */, ConvertH512ToPeerID(peerID)) if canRequestMore { - currentTime := uint64(time.Now().Unix()) + currentTime := time.Now() req, penalties := cs.Hd.RequestMoreHeaders(currentTime) if req != nil { if _, sentToPeer := cs.SendHeaderRequest(ctx, req); sentToPeer { // If request was actually sent to a peer, we update retry time to be 5 seconds in the future - cs.Hd.UpdateRetryTime(req, currentTime, 5 /* timeout */) + cs.Hd.UpdateRetryTime(req, currentTime, 5*time.Second /* timeout */) log.Trace("Sent request", "height", req.Number) cs.Hd.UpdateStats(req, false /* skeleton */) } diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index df5927a17d1..916722a26f7 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -727,13 +727,13 @@ Loop: } break } - currentTime := uint64(time.Now().Unix()) + currentTime := time.Now() req, penalties := cfg.hd.RequestMoreHeaders(currentTime) if req != nil { _, sentToPeer = cfg.headerReqSend(ctx, req) if sentToPeer { // If request was actually sent to a peer, we update retry time to be 5 seconds in the future - cfg.hd.UpdateRetryTime(req, currentTime, 5 /* timeout */) + cfg.hd.UpdateRetryTime(req, currentTime, 5*time.Second /* timeout */) log.Trace("Sent request", "height", req.Number) } } @@ -747,7 +747,7 @@ Loop: _, sentToPeer = cfg.headerReqSend(ctx, req) if sentToPeer { // If request was actually sent to a peer, we update retry time to be 5 seconds in the future - cfg.hd.UpdateRetryTime(req, currentTime, 5 /*timeout */) + cfg.hd.UpdateRetryTime(req, currentTime, 5*time.Second /*timeout */) log.Trace("Sent request", "height", req.Number) cfg.hd.UpdateStats(req, false /* skeleton */) diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index ef64c4d0120..8dbe374dca4 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -240,6 +240,7 @@ func (hd *HeaderDownload) LogAnchorState() { func (hd *HeaderDownload) logAnchorState() { //nolint:prealloc var ss []string + currentTime := time.Now() for anchorParent, anchor := range hd.anchors { var sb strings.Builder sb.WriteString(fmt.Sprintf("{%8d", anchor.blockHeight)) @@ -293,6 +294,7 @@ func (hd *HeaderDownload) logAnchorState() { sb.WriteString(fmt.Sprintf("-%d links=%d (%s)}", end, len(bs), sbb.String())) sb.WriteString(fmt.Sprintf(" => %x", anchorParent)) sb.WriteString(fmt.Sprintf(", anchorQueue.idx=%d", anchor.idx)) + sb.WriteString(fmt.Sprintf(", next retry in %v", anchor.nextRetryTime.Sub(currentTime))) ss = append(ss, sb.String()) } sort.Strings(ss) @@ -380,7 +382,7 @@ func (hd *HeaderDownload) invalidateAnchor(anchor *Anchor, reason string) { } } -func (hd *HeaderDownload) RequestMoreHeaders(currentTime uint64) (*HeaderRequest, []PenaltyItem) { +func (hd *HeaderDownload) RequestMoreHeaders(currentTime time.Time) (*HeaderRequest, []PenaltyItem) { hd.lock.Lock() defer hd.lock.Unlock() var penalties []PenaltyItem @@ -391,7 +393,7 @@ func (hd *HeaderDownload) RequestMoreHeaders(currentTime uint64) (*HeaderRequest for hd.anchorQueue.Len() > 0 { anchor := (*hd.anchorQueue)[0] // Only process the anchors for which the nextRetryTime has already come - if anchor.nextRetryTime > currentTime { + if anchor.nextRetryTime.After(currentTime) { return nil, penalties } if anchor.timeouts < 10 { @@ -412,7 +414,7 @@ func (hd *HeaderDownload) RequestMoreHeaders(currentTime uint64) (*HeaderRequest return nil, penalties } -func (hd *HeaderDownload) requestMoreHeadersForPOS(currentTime uint64) (timeout bool, request *HeaderRequest, penalties []PenaltyItem) { +func (hd *HeaderDownload) requestMoreHeadersForPOS(currentTime time.Time) (timeout bool, request *HeaderRequest, penalties []PenaltyItem) { anchor := hd.posAnchor if anchor == nil { log.Trace("No PoS anchor") @@ -420,7 +422,7 @@ func (hd *HeaderDownload) requestMoreHeadersForPOS(currentTime uint64) (timeout } // Only process the anchors for which the nextRetryTime has already come - if anchor.nextRetryTime > currentTime { + if anchor.nextRetryTime.After(currentTime) { return } @@ -469,7 +471,7 @@ func (hd *HeaderDownload) UpdateStats(req *HeaderRequest, skeleton bool) { } -func (hd *HeaderDownload) UpdateRetryTime(req *HeaderRequest, currentTime, timeout uint64) { +func (hd *HeaderDownload) UpdateRetryTime(req *HeaderRequest, currentTime time.Time, timeout time.Duration) { hd.lock.Lock() defer hd.lock.Unlock() if req.Anchor.idx == -1 { @@ -477,7 +479,7 @@ func (hd *HeaderDownload) UpdateRetryTime(req *HeaderRequest, currentTime, timeo return } req.Anchor.timeouts++ - req.Anchor.nextRetryTime = currentTime + timeout + req.Anchor.nextRetryTime = currentTime.Add(timeout) heap.Fix(hd.anchorQueue, req.Anchor.idx) } @@ -924,9 +926,16 @@ func (hd *HeaderDownload) ProcessHeader(sh ChainSegmentHeader, newBlock bool, pe // Duplicate return false } + if parentAnchor, ok := hd.anchors[sh.Header.ParentHash]; ok { + // Alternative branch connected to an existing anchor + // Adding link as another child to the anchor and quit (not to overwrite the anchor) + link := hd.addHeaderAsLink(sh, false /* persisted */) + link.next = parentAnchor.fLink + parentAnchor.fLink = link + return false + } parent, foundParent := hd.links[sh.Header.ParentHash] anchor, foundAnchor := hd.anchors[sh.Hash] - //fmt.Printf("sh = %d %x, foundParent=%t, foundAnchor=%t\n", sh.Number, sh.Hash, foundParent, foundAnchor) if !foundParent && !foundAnchor { if sh.Number < hd.highestInDb { log.Debug(fmt.Sprintf("new anchor too far in the past: %d, latest header in db: %d", sh.Number, hd.highestInDb)) @@ -939,16 +948,12 @@ func (hd *HeaderDownload) ProcessHeader(sh ChainSegmentHeader, newBlock bool, pe } link := hd.addHeaderAsLink(sh, false /* persisted */) if foundAnchor { + // The new link is what anchor was pointing to, so the link takes over the child links of the anchor and the anchor is removed link.fChild = anchor.fLink hd.removeAnchor(anchor) - //fmt.Printf("removed anchor %d %x\n", anchor.blockHeight, anchor.parentHash) - } - if parentAnchor, ok := hd.anchors[sh.Header.ParentHash]; ok { - link.next = parentAnchor.fLink - parentAnchor.fLink = link } if foundParent { - //fmt.Printf("sh = %d %x, found parent\n", sh.Number, sh.Hash) + // Add this link as another child to the parent that is found link.next = parent.fChild parent.fChild = link if parent.persisted { @@ -956,16 +961,15 @@ func (hd *HeaderDownload) ProcessHeader(sh ChainSegmentHeader, newBlock bool, pe hd.moveLinkToQueue(link, InsertQueueID) } } else { + // The link has not known parent, therefore it becomes an anchor, unless it is too far in the past if sh.Number+params.FullImmutabilityThreshold < hd.highestInDb { log.Debug("Remove upwards", "height", link.blockHeight, "hash", link.blockHeight) hd.removeUpwards(link) return false } - //fmt.Printf("sh = %d %x, nof found parent or anchor\n", sh.Number, sh.Hash) - // See if it links existing anchor anchor = &Anchor{ parentHash: sh.Header.ParentHash, - nextRetryTime: 0, // Will ensure this anchor will be top priority + nextRetryTime: time.Time{}, // Will ensure this anchor will be top priority peerID: peerID, blockHeight: sh.Number, } @@ -1219,11 +1223,11 @@ func (hd *HeaderDownload) StartPoSDownloader( for { var req *HeaderRequest var penalties []PenaltyItem - var currentTime uint64 + var currentTime time.Time hd.lock.Lock() if hd.posStatus == Syncing { - currentTime = uint64(time.Now().Unix()) + currentTime = time.Now() var timeout bool timeout, req, penalties = hd.requestMoreHeadersForPOS(currentTime) if timeout { @@ -1240,7 +1244,7 @@ func (hd *HeaderDownload) StartPoSDownloader( _, sentToPeer := headerReqSend(ctx, req) if sentToPeer { // If request was actually sent to a peer, we update retry time to be 5 seconds in the future - hd.UpdateRetryTime(req, currentTime, 5 /* timeout */) + hd.UpdateRetryTime(req, currentTime, 5*time.Second /* timeout */) log.Trace("Sent request", "height", req.Number) } } diff --git a/turbo/stages/headerdownload/header_data_struct.go b/turbo/stages/headerdownload/header_data_struct.go index 3ddd551af63..7041122f7aa 100644 --- a/turbo/stages/headerdownload/header_data_struct.go +++ b/turbo/stages/headerdownload/header_data_struct.go @@ -5,6 +5,7 @@ import ( "fmt" "math/big" "sync" + "time" lru "github.com/hashicorp/golang-lru" "github.com/ledgerwatch/erigon-lib/etl" @@ -105,9 +106,9 @@ type Anchor struct { fLink *Link // Links attached immediately to this anchor (pointer to the first one, the rest can be found by following `next` fields) parentHash common.Hash // Hash of the header this anchor can be connected to (to disappear) blockHeight uint64 - nextRetryTime uint64 // Zero when anchor has just been created, otherwise time when anchor needs to be check to see if retry is needed - timeouts int // Number of timeout that this anchor has experiences - after certain threshold, it gets invalidated - idx int // Index of the anchor in the queue to be able to modify specific items + nextRetryTime time.Time // Zero when anchor has just been created, otherwise time when anchor needs to be check to see if retry is needed + timeouts int // Number of timeout that this anchor has experiences - after certain threshold, it gets invalidated + idx int // Index of the anchor in the queue to be able to modify specific items } // AnchorQueue is a priority queue of anchors that priorises by the time when @@ -133,7 +134,7 @@ func (aq AnchorQueue) Less(i, j int) bool { // When next retry times are the same, we prioritise low block height anchors return aq[i].blockHeight < aq[j].blockHeight } - return aq[i].nextRetryTime < aq[j].nextRetryTime + return aq[i].nextRetryTime.Before(aq[j].nextRetryTime) } func (aq AnchorQueue) Swap(i, j int) { @@ -153,6 +154,7 @@ func (aq *AnchorQueue) Pop() interface{} { n := len(old) x := old[n-1] *aq = old[0 : n-1] + x.idx = -1 return x } From 3d3b190e42a6eaa8e17c9dfe48a7ea022cb5e88d Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Mon, 6 Jun 2022 23:04:24 +0100 Subject: [PATCH 004/136] Copy deletion key to prevent deleting more blocks (#4384) Co-authored-by: Alex Sharp --- core/rawdb/accessors_chain.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 2334678d580..2e8add00087 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -1149,10 +1149,13 @@ func DeleteAncientBlocks(tx kv.RwTx, blockTo uint64, blocksDeleteLimit int) (del } } } - if err = tx.Delete(kv.Headers, k, nil); err != nil { + // Copying k because otherwise the same memory will be reused + // for the next key and Delete below will end up deleting 1 more record than required + kCopy := common.CopyBytes(k) + if err = tx.Delete(kv.Headers, kCopy, nil); err != nil { return } - if err = tx.Delete(kv.BlockBody, k, nil); err != nil { + if err = tx.Delete(kv.BlockBody, kCopy, nil); err != nil { return } } From f6c9d2beeab23eaa7b0e6effaacbab23b536d99a Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 7 Jun 2022 09:36:42 +0700 Subject: [PATCH 005/136] downloader torrent_hashes --verify: 1 error line per file #4386 --- cmd/downloader/downloader/util.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/cmd/downloader/downloader/util.go b/cmd/downloader/downloader/util.go index ef896335b1e..c31278be860 100644 --- a/cmd/downloader/downloader/util.go +++ b/cmd/downloader/downloader/util.go @@ -347,6 +347,8 @@ func AddTorrentFile(ctx context.Context, torrentFilePath string, torrentClient * return t, nil } +var ErrSkip = fmt.Errorf("skip") + func VerifyDtaFiles(ctx context.Context, snapDir string) error { logEvery := time.NewTicker(5 * time.Second) defer logEvery.Stop() @@ -379,12 +381,12 @@ func VerifyDtaFiles(ctx context.Context, snapDir string) error { return err } - err = verifyTorrent(&info, snapDir, func(i int, good bool) error { + if err = verifyTorrent(&info, snapDir, func(i int, good bool) error { j++ if !good { failsAmount++ log.Error("[Snapshots] Verify hash mismatch", "at piece", i, "file", info.Name) - return nil + return ErrSkip } select { case <-logEvery.C: @@ -394,8 +396,10 @@ func VerifyDtaFiles(ctx context.Context, snapDir string) error { default: } return nil - }) - if err != nil { + }); err != nil { + if errors.Is(ErrSkip, err) { + continue + } return err } } From a53642b4bf80a365ea11a98be86fb692f62ffc9e Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 7 Jun 2022 10:24:50 +0700 Subject: [PATCH 006/136] datadir.Dirs configuration object to group dir config (#4387) --- cmd/downloader/main.go | 21 +- cmd/integration/commands/reset_state.go | 206 ++----------------- cmd/integration/commands/stages.go | 73 +++---- cmd/integration/commands/state_stages.go | 3 +- cmd/rpcdaemon/cli/config.go | 17 +- cmd/rpcdaemon/cli/httpcfg/http_cfg.go | 3 +- cmd/sentry/main.go | 5 - cmd/txpool/main.go | 11 +- cmd/utils/flags.go | 8 +- core/rawdb/rawdbreset/reset_stages.go | 239 +++++++++++++++++++++++ eth/backend.go | 6 +- eth/ethconfig/config.go | 3 +- node/node.go | 2 + node/node_test.go | 5 +- node/nodecfg/config.go | 2 + node/nodecfg/datadir/dirs.go | 23 +++ node/nodecfg/defaults.go | 2 + turbo/app/make_app.go | 2 + turbo/app/snapshots.go | 46 ++--- turbo/cli/flags.go | 8 +- turbo/stages/mock_sentry.go | 6 +- 21 files changed, 378 insertions(+), 313 deletions(-) create mode 100644 core/rawdb/rawdbreset/reset_stages.go create mode 100644 node/nodecfg/datadir/dirs.go diff --git a/cmd/downloader/main.go b/cmd/downloader/main.go index b82c7c237e5..6cf8644ce6b 100644 --- a/cmd/downloader/main.go +++ b/cmd/downloader/main.go @@ -19,6 +19,7 @@ import ( "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/common/paths" "github.com/ledgerwatch/erigon/internal/debug" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/p2p/nat" "github.com/ledgerwatch/log/v3" "github.com/pelletier/go-toml/v2" @@ -32,7 +33,7 @@ import ( ) var ( - datadir string + datadirCli string forceRebuild bool forceVerify bool downloaderApiAddr string @@ -74,7 +75,7 @@ func init() { } func withDataDir(cmd *cobra.Command) { - cmd.Flags().StringVar(&datadir, utils.DataDirFlag.Name, paths.DefaultDataDir(), utils.DataDirFlag.Usage) + cmd.Flags().StringVar(&datadirCli, utils.DataDirFlag.Name, paths.DefaultDataDir(), utils.DataDirFlag.Usage) if err := cmd.MarkFlagDirname(utils.DataDirFlag.Name); err != nil { panic(err) } @@ -112,7 +113,7 @@ var rootCmd = &cobra.Command{ } func Downloader(ctx context.Context) error { - snapDir := filepath.Join(datadir, "snapshots") + dirs := datadir.New(datadirCli) torrentLogLevel, err := torrentcfg.Str2LogLevel(torrentVerbosity) if err != nil { return err @@ -126,13 +127,13 @@ func Downloader(ctx context.Context) error { return err } - log.Info("Run snapshot downloader", "addr", downloaderApiAddr, "datadir", datadir, "download.rate", downloadRate.String(), "upload.rate", uploadRate.String()) + log.Info("Run snapshot downloader", "addr", downloaderApiAddr, "datadir", dirs.DataDir, "download.rate", downloadRate.String(), "upload.rate", uploadRate.String()) natif, err := nat.Parse(natSetting) if err != nil { return fmt.Errorf("invalid nat option %s: %w", natSetting, err) } - cfg, err := torrentcfg.New(snapDir, torrentLogLevel, natif, downloadRate, uploadRate, torrentPort, torrentConnsPerFile, torrentDownloadSlots) + cfg, err := torrentcfg.New(dirs.Snap, torrentLogLevel, natif, downloadRate, uploadRate, torrentPort, torrentConnsPerFile, torrentDownloadSlots) if err != nil { return err } @@ -164,16 +165,16 @@ var printTorrentHashes = &cobra.Command{ Use: "torrent_hashes", Example: "go run ./cmd/downloader torrent_hashes --datadir ", RunE: func(cmd *cobra.Command, args []string) error { - snapDir := filepath.Join(datadir, "snapshots") + dirs := datadir.New(datadirCli) ctx := cmd.Context() if forceVerify { // remove and create .torrent files (will re-read all snapshots) - return downloader.VerifyDtaFiles(ctx, snapDir) + return downloader.VerifyDtaFiles(ctx, dirs.Snap) } if forceRebuild { // remove and create .torrent files (will re-read all snapshots) //removePieceCompletionStorage(snapDir) - files, err := downloader.AllTorrentPaths(snapDir) + files, err := downloader.AllTorrentPaths(dirs.Snap) if err != nil { return err } @@ -182,13 +183,13 @@ var printTorrentHashes = &cobra.Command{ return err } } - if err := downloader.BuildTorrentFilesIfNeed(ctx, snapDir); err != nil { + if err := downloader.BuildTorrentFilesIfNeed(ctx, dirs.Snap); err != nil { return err } } res := map[string]string{} - files, err := downloader.AllTorrentPaths(snapDir) + files, err := downloader.AllTorrentPaths(dirs.Snap) if err != nil { return err } diff --git a/cmd/integration/commands/reset_state.go b/cmd/integration/commands/reset_state.go index 17e0cd727cc..76ffc879661 100644 --- a/cmd/integration/commands/reset_state.go +++ b/cmd/integration/commands/reset_state.go @@ -1,7 +1,6 @@ package commands import ( - "context" "encoding/binary" "fmt" "os" @@ -9,9 +8,8 @@ import ( "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/eth/stagedsync" + reset2 "github.com/ledgerwatch/erigon/core/rawdb/rawdbreset" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/log/v3" @@ -26,13 +24,23 @@ var cmdResetState = &cobra.Command{ logger := log.New() db := openDB(chaindata, logger, true) defer db.Close() + if err := db.View(ctx, func(tx kv.Tx) error { return printStages(tx) }); err != nil { + return err + } - err := resetState(db, logger, ctx) + genesis, _ := genesisByChain(chain) + err := reset2.ResetState(db, ctx, genesis) if err != nil { log.Error(err.Error()) return err } + // set genesis after reset all buckets + fmt.Printf("After reset: \n") + if err := db.View(ctx, func(tx kv.Tx) error { return printStages(tx) }); err != nil { + return err + } + return nil }, } @@ -44,196 +52,6 @@ func init() { rootCmd.AddCommand(cmdResetState) } -func resetState(db kv.RwDB, logger log.Logger, ctx context.Context) error { - if err := db.View(ctx, func(tx kv.Tx) error { return printStages(tx) }); err != nil { - return err - } - // don't reset senders here - if err := db.Update(ctx, stagedsync.ResetHashState); err != nil { - return err - } - if err := db.Update(ctx, stagedsync.ResetIH); err != nil { - return err - } - if err := db.Update(ctx, resetHistory); err != nil { - return err - } - if err := db.Update(ctx, resetLogIndex); err != nil { - return err - } - if err := db.Update(ctx, resetCallTraces); err != nil { - return err - } - if err := db.Update(ctx, resetTxLookup); err != nil { - return err - } - if err := db.Update(ctx, resetFinish); err != nil { - return err - } - - genesis, _ := byChain(chain) - if err := db.Update(ctx, func(tx kv.RwTx) error { return resetExec(tx, genesis) }); err != nil { - return err - } - - // set genesis after reset all buckets - fmt.Printf("After reset: \n") - if err := db.View(ctx, func(tx kv.Tx) error { return printStages(tx) }); err != nil { - return err - } - return nil -} - -func resetSenders(tx kv.RwTx) error { - if err := tx.ClearBucket(kv.Senders); err != nil { - return err - } - if err := stages.SaveStageProgress(tx, stages.Senders, 0); err != nil { - return err - } - if err := stages.SaveStagePruneProgress(tx, stages.Senders, 0); err != nil { - return err - } - return nil -} - -func resetExec(tx kv.RwTx, g *core.Genesis) error { - if err := tx.ClearBucket(kv.HashedAccounts); err != nil { - return err - } - if err := tx.ClearBucket(kv.HashedStorage); err != nil { - return err - } - if err := tx.ClearBucket(kv.ContractCode); err != nil { - return err - } - if err := tx.ClearBucket(kv.PlainState); err != nil { - return err - } - if err := tx.ClearBucket(kv.AccountChangeSet); err != nil { - return err - } - if err := tx.ClearBucket(kv.StorageChangeSet); err != nil { - return err - } - if err := tx.ClearBucket(kv.PlainContractCode); err != nil { - return err - } - if err := tx.ClearBucket(kv.Receipts); err != nil { - return err - } - if err := tx.ClearBucket(kv.Log); err != nil { - return err - } - if err := tx.ClearBucket(kv.IncarnationMap); err != nil { - return err - } - if err := tx.ClearBucket(kv.Code); err != nil { - return err - } - if err := tx.ClearBucket(kv.CallTraceSet); err != nil { - return err - } - if err := tx.ClearBucket(kv.Epoch); err != nil { - return err - } - if err := tx.ClearBucket(kv.PendingEpoch); err != nil { - return err - } - if err := tx.ClearBucket(kv.BorReceipts); err != nil { - return err - } - if err := stages.SaveStageProgress(tx, stages.Execution, 0); err != nil { - return err - } - if err := stages.SaveStagePruneProgress(tx, stages.Execution, 0); err != nil { - return err - } - - if _, _, err := g.WriteGenesisState(tx); err != nil { - return err - } - return nil -} - -func resetHistory(tx kv.RwTx) error { - if err := tx.ClearBucket(kv.AccountsHistory); err != nil { - return err - } - if err := tx.ClearBucket(kv.StorageHistory); err != nil { - return err - } - if err := stages.SaveStageProgress(tx, stages.AccountHistoryIndex, 0); err != nil { - return err - } - if err := stages.SaveStageProgress(tx, stages.StorageHistoryIndex, 0); err != nil { - return err - } - if err := stages.SaveStagePruneProgress(tx, stages.AccountHistoryIndex, 0); err != nil { - return err - } - if err := stages.SaveStagePruneProgress(tx, stages.StorageHistoryIndex, 0); err != nil { - return err - } - - return nil -} - -func resetLogIndex(tx kv.RwTx) error { - if err := tx.ClearBucket(kv.LogAddressIndex); err != nil { - return err - } - if err := tx.ClearBucket(kv.LogTopicIndex); err != nil { - return err - } - if err := stages.SaveStageProgress(tx, stages.LogIndex, 0); err != nil { - return err - } - if err := stages.SaveStagePruneProgress(tx, stages.LogIndex, 0); err != nil { - return err - } - return nil -} - -func resetCallTraces(tx kv.RwTx) error { - if err := tx.ClearBucket(kv.CallFromIndex); err != nil { - return err - } - if err := tx.ClearBucket(kv.CallToIndex); err != nil { - return err - } - if err := stages.SaveStageProgress(tx, stages.CallTraces, 0); err != nil { - return err - } - if err := stages.SaveStagePruneProgress(tx, stages.CallTraces, 0); err != nil { - return err - } - return nil -} - -func resetTxLookup(tx kv.RwTx) error { - if err := tx.ClearBucket(kv.TxLookup); err != nil { - return err - } - if err := stages.SaveStageProgress(tx, stages.TxLookup, 0); err != nil { - return err - } - if err := stages.SaveStagePruneProgress(tx, stages.TxLookup, 0); err != nil { - return err - } - return nil -} - -func resetFinish(tx kv.RwTx) error { - if err := stages.SaveStageProgress(tx, stages.Finish, 0); err != nil { - return err - } - if err := stages.SaveStagePruneProgress(tx, stages.Finish, 0); err != nil { - return err - } - return nil -} - func printStages(db kv.Tx) error { var err error var progress uint64 diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index 1297a083726..5a384fd02d5 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -15,11 +15,11 @@ import ( "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cmd/sentry/sentry" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" + reset2 "github.com/ledgerwatch/erigon/core/rawdb/rawdbreset" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/ethconfig" @@ -30,6 +30,7 @@ import ( "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/migrations" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/services" @@ -290,7 +291,7 @@ var cmdSetSnapshto = &cobra.Command{ logger := log.New() db := openDB(chaindata, logger, true) defer db.Close() - _, chainConfig := byChain(chain) + _, chainConfig := genesisByChain(chain) snapshots := allSnapshots(chainConfig, db) if err := db.Update(context.Background(), func(tx kv.RwTx) error { return snap.ForceSetFlags(tx, snapshots.Cfg()) @@ -446,11 +447,10 @@ func stageHeaders(db kv.RwDB, ctx context.Context) error { } if reset { - progress, err := stages.GetStageProgress(tx, stages.Headers) - if err != nil { - return fmt.Errorf("read Bodies progress: %w", err) + if err := reset2.ResetBlocks(tx); err != nil { + return err } - unwind = progress + return nil } progress, err := stages.GetStageProgress(tx, stages.Headers) @@ -500,24 +500,6 @@ func stageHeaders(db kv.RwDB, ctx context.Context) error { return err } - if reset { - // ensure no grabage records left (it may happen if db is inconsistent) - if err := tx.ForEach(kv.BlockBody, dbutils.EncodeBlockNumber(2), func(k, _ []byte) error { return tx.Delete(kv.BlockBody, k, nil) }); err != nil { - return err - } - if err := tx.ClearBucket(kv.NonCanonicalTxs); err != nil { - return err - } - if err := tx.ClearBucket(kv.EthTx); err != nil { - return err - } - if err := rawdb.ResetSequence(tx, kv.EthTx, 0); err != nil { - return err - } - if err := rawdb.ResetSequence(tx, kv.NonCanonicalTxs, 0); err != nil { - return err - } - } log.Info("Progress", "headers", progress) return nil }) @@ -602,7 +584,7 @@ func stageSenders(db kv.RwDB, ctx context.Context) error { } if reset { - err = resetSenders(tx) + err = reset2.ResetSenders(tx) if err != nil { return err } @@ -656,8 +638,8 @@ func stageExec(db kv.RwDB, ctx context.Context) error { tmpdir := filepath.Join(datadir, etl.TmpDirName) if reset { - genesis, _ := byChain(chain) - if err := db.Update(ctx, func(tx kv.RwTx) error { return resetExec(tx, genesis) }); err != nil { + genesis, _ := genesisByChain(chain) + if err := db.Update(ctx, func(tx kv.RwTx) error { return reset2.ResetExec(tx, genesis) }); err != nil { return err } return nil @@ -830,7 +812,7 @@ func stageLogIndex(db kv.RwDB, ctx context.Context) error { defer tx.Rollback() if reset { - err = resetLogIndex(tx) + err = reset2.ResetLogIndex(tx) if err != nil { return err } @@ -885,7 +867,7 @@ func stageCallTraces(kv kv.RwDB, ctx context.Context) error { defer tx.Rollback() if reset { - err = resetCallTraces(tx) + err = reset2.ResetCallTraces(tx) if err != nil { return err } @@ -946,7 +928,7 @@ func stageHistory(db kv.RwDB, ctx context.Context) error { defer tx.Rollback() if reset { - err = resetHistory(tx) + err = reset2.ResetHistory(tx) if err != nil { return err } @@ -1017,7 +999,7 @@ func stageTxLookup(db kv.RwDB, ctx context.Context) error { defer tx.Rollback() if reset { - err = resetTxLookup(tx) + err = reset2.ResetTxLookup(tx) if err != nil { return err } @@ -1085,19 +1067,6 @@ func removeMigration(db kv.RwDB, ctx context.Context) error { }) } -func byChain(chain string) (*core.Genesis, *params.ChainConfig) { - var chainConfig *params.ChainConfig - var genesis *core.Genesis - if chain == "" { - chainConfig = params.MainnetChainConfig - genesis = core.DefaultGenesisBlock() - } else { - chainConfig = params.ChainConfigByChainName(chain) - genesis = core.DefaultGenesisBlockByChainName(chain) - } - return genesis, chainConfig -} - var openSnapshotOnce sync.Once var _allSnapshotsSingleton *snapshotsync.RoSnapshots @@ -1164,7 +1133,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) } vmConfig := &vm.Config{} - genesis, _ := byChain(chain) + genesis, _ := genesisByChain(chain) events := privateapi.NewEvents() @@ -1189,6 +1158,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) if miningConfig != nil { cfg.Miner = *miningConfig } + cfg.Dirs = datadir.New(datadir) allSn := allSnapshots(chainConfig, db) cfg.Snapshot = allSn.Cfg() cfg.SnapDir = filepath.Join(datadir, "snapshots") @@ -1282,3 +1252,16 @@ func overrideStorageMode(db kv.RwDB) error { return nil }) } + +func genesisByChain(chain string) (*core.Genesis, *params.ChainConfig) { + var chainConfig *params.ChainConfig + var genesis *core.Genesis + if chain == "" { + chainConfig = params.MainnetChainConfig + genesis = core.DefaultGenesisBlock() + } else { + chainConfig = params.ChainConfigByChainName(chain) + genesis = core.DefaultGenesisBlockByChainName(chain) + } + return genesis, chainConfig +} diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go index f63021677e0..b10bd65f510 100644 --- a/cmd/integration/commands/state_stages.go +++ b/cmd/integration/commands/state_stages.go @@ -6,7 +6,6 @@ import ( "encoding/json" "fmt" "os" - "path" "path/filepath" "sort" "time" @@ -59,7 +58,7 @@ Examples: miningConfig := params.MiningConfig{} utils.SetupMinerCobra(cmd, &miningConfig) logger := log.New() - db := openDB(path.Join(cfg.DataDir, "chaindata"), logger, true) + db := openDB(cfg.Dirs.Chaindata, logger, true) defer db.Close() if err := syncBySmallSteps(db, miningConfig, ctx); err != nil { diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index 098670bef72..ed913abee58 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -15,6 +15,7 @@ import ( "time" "github.com/ledgerwatch/erigon/internal/debug" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/rpc/rpccfg" "github.com/ledgerwatch/erigon-lib/direct" @@ -63,7 +64,6 @@ func RootCommand() (*cobra.Command, *httpcfg.HttpCfg) { cfg := &httpcfg.HttpCfg{StateCache: kvcache.DefaultCoherentConfig} rootCmd.PersistentFlags().StringVar(&cfg.PrivateApiAddr, "private.api.addr", "127.0.0.1:9090", "private api network address, for example: 127.0.0.1:9090") rootCmd.PersistentFlags().StringVar(&cfg.DataDir, "datadir", "", "path to Erigon working directory") - rootCmd.PersistentFlags().StringVar(&cfg.Chaindata, "chaindata", "", "path to the database") rootCmd.PersistentFlags().StringVar(&cfg.HttpListenAddress, "http.addr", nodecfg.DefaultHTTPHost, "HTTP-RPC server listening interface") rootCmd.PersistentFlags().StringVar(&cfg.EngineHTTPListenAddress, "engine.addr", nodecfg.DefaultHTTPHost, "HTTP-RPC server listening interface for engineAPI") rootCmd.PersistentFlags().StringVar(&cfg.TLSCertfile, "tls.cert", "", "certificate for client side TLS handshake") @@ -100,22 +100,17 @@ func RootCommand() (*cobra.Command, *httpcfg.HttpCfg) { if err := rootCmd.MarkPersistentFlagDirname("datadir"); err != nil { panic(err) } - if err := rootCmd.MarkPersistentFlagDirname("chaindata"); err != nil { - panic(err) - } rootCmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error { if err := utils.SetupCobra(cmd); err != nil { return err } - cfg.WithDatadir = cfg.DataDir != "" || cfg.Chaindata != "" + cfg.WithDatadir = cfg.DataDir != "" if cfg.WithDatadir { if cfg.DataDir == "" { cfg.DataDir = paths.DefaultDataDir() } - if cfg.Chaindata == "" { - cfg.Chaindata = filepath.Join(cfg.DataDir, "chaindata") - } + cfg.Dirs = datadir.New(cfg.DataDir) } if cfg.TxPoolApiAddr == "" { cfg.TxPoolApiAddr = cfg.PrivateApiAddr @@ -251,9 +246,9 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, // If PrivateApiAddr is checked first, the Chaindata option will never work if cfg.WithDatadir { var rwKv kv.RwDB - log.Trace("Creating chain db", "path", cfg.Chaindata) + log.Trace("Creating chain db", "path", cfg.Dirs.Chaindata) limiter := make(chan struct{}, cfg.DBReadConcurrency) - rwKv, err = kv2.NewMDBX(logger).RoTxsLimiter(limiter).Path(cfg.Chaindata).Readonly().Open() + rwKv, err = kv2.NewMDBX(logger).RoTxsLimiter(limiter).Path(cfg.Dirs.Chaindata).Readonly().Open() if err != nil { return nil, nil, nil, nil, nil, nil, nil, nil, ff, err } @@ -345,7 +340,7 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, onNewSnapshot := func() {} if cfg.WithDatadir { if cfg.Snap.Enabled { - allSnapshots := snapshotsync.NewRoSnapshots(cfg.Snap, filepath.Join(cfg.DataDir, "snapshots")) + allSnapshots := snapshotsync.NewRoSnapshots(cfg.Snap, cfg.Dirs.Snap) if err := allSnapshots.Reopen(); err != nil { return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("allSnapshots.Reopen: %w", err) } diff --git a/cmd/rpcdaemon/cli/httpcfg/http_cfg.go b/cmd/rpcdaemon/cli/httpcfg/http_cfg.go index 59156349851..6a7d3505110 100644 --- a/cmd/rpcdaemon/cli/httpcfg/http_cfg.go +++ b/cmd/rpcdaemon/cli/httpcfg/http_cfg.go @@ -3,6 +3,7 @@ package httpcfg import ( "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" ) type HttpCfg struct { @@ -10,7 +11,7 @@ type HttpCfg struct { PrivateApiAddr string WithDatadir bool // Erigon's database can be read by separated processes on same machine - in read-only mode - with full support of transactions. It will share same "OS PageCache" with Erigon process. DataDir string - Chaindata string + Dirs datadir.Dirs HttpListenAddress string EngineHTTPListenAddress string TLSCertfile string diff --git a/cmd/sentry/main.go b/cmd/sentry/main.go index 23522a5a9d4..478c84cfbb8 100644 --- a/cmd/sentry/main.go +++ b/cmd/sentry/main.go @@ -3,7 +3,6 @@ package main import ( "fmt" "os" - "path/filepath" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon/cmd/sentry/sentry" @@ -19,7 +18,6 @@ import ( var ( sentryAddr string // Address of the sentry : - chaindata string // Path to chaindata datadir string // Path to td working dir natSetting string // NAT setting @@ -64,9 +62,6 @@ var rootCmd = &cobra.Command{ if err := debug.SetupCobra(cmd); err != nil { panic(err) } - if chaindata == "" { - chaindata = filepath.Join(datadir, "chaindata") - } }, PersistentPostRun: func(cmd *cobra.Command, args []string) { debug.Exit() diff --git a/cmd/txpool/main.go b/cmd/txpool/main.go index e09807036ea..0715c5759b2 100644 --- a/cmd/txpool/main.go +++ b/cmd/txpool/main.go @@ -24,6 +24,7 @@ import ( "github.com/ledgerwatch/erigon/common/paths" "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/internal/debug" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/log/v3" "github.com/spf13/cobra" ) @@ -33,7 +34,7 @@ var ( traceSenders []string privateApiAddr string txpoolApiAddr string - datadir string // Path to td working dir + datadirCli string // Path to td working dir TLSCertfile string TLSCACert string @@ -52,7 +53,7 @@ func init() { rootCmd.Flags().StringSliceVar(&sentryAddr, "sentry.api.addr", []string{"localhost:9091"}, "comma separated sentry addresses ':,:'") rootCmd.Flags().StringVar(&privateApiAddr, "private.api.addr", "localhost:9090", "execution service :") rootCmd.Flags().StringVar(&txpoolApiAddr, "txpool.api.addr", "localhost:9094", "txpool service :") - rootCmd.Flags().StringVar(&datadir, utils.DataDirFlag.Name, paths.DefaultDataDir(), utils.DataDirFlag.Usage) + rootCmd.Flags().StringVar(&datadirCli, utils.DataDirFlag.Name, paths.DefaultDataDir(), utils.DataDirFlag.Usage) if err := rootCmd.MarkFlagDirname(utils.DataDirFlag.Name); err != nil { panic(err) } @@ -95,7 +96,7 @@ var rootCmd = &cobra.Command{ return fmt.Errorf("could not connect to remoteKv: %w", err) } - log.Info("TxPool started", "db", filepath.Join(datadir, "txpool")) + log.Info("TxPool started", "db", filepath.Join(datadirCli, "txpool")) sentryClients := make([]direct.SentryClient, len(sentryAddr)) for i := range sentryAddr { @@ -112,7 +113,9 @@ var rootCmd = &cobra.Command{ } cfg := txpool.DefaultConfig - cfg.DBDir = filepath.Join(datadir, "txpool") + dirs := datadir.New(datadirCli) + + cfg.DBDir = dirs.TxPool cfg.CommitEvery = 30 * time.Second cfg.PendingSubPoolLimit = pendingPoolLimit cfg.BaseFeeSubPoolLimit = baseFeePoolLimit diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 2300d9b70a1..86edbd326d1 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -35,6 +35,7 @@ import ( "github.com/ledgerwatch/erigon-lib/txpool" "github.com/ledgerwatch/erigon/cmd/downloader/downloader/torrentcfg" "github.com/ledgerwatch/erigon/node/nodecfg" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/log/v3" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -1068,6 +1069,7 @@ func setDataDir(ctx *cli.Context, cfg *nodecfg.Config) { } else { cfg.DataDir = DataDirForNetwork(cfg.DataDir, ctx.GlobalString(ChainFlag.Name)) } + cfg.Dirs = datadir.New(cfg.DataDir) if err := cfg.MdbxPageSize.UnmarshalText([]byte(ctx.GlobalString(DbPageSizeFlag.Name))); err != nil { panic(err) @@ -1101,7 +1103,7 @@ func setDataDirCobra(f *pflag.FlagSet, cfg *nodecfg.Config) { } cfg.DataDir = DataDirForNetwork(cfg.DataDir, chain) - + cfg.Dirs = datadir.New(cfg.DataDir) } func setGPO(ctx *cli.Context, cfg *gasprice.Config) { @@ -1370,7 +1372,7 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) { // SetEthConfig applies eth-related command line flags to the config. func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.Config) { cfg.Sync.UseSnapshots = ctx.GlobalBoolT(SnapshotFlag.Name) - cfg.SnapDir = filepath.Join(nodeConfig.DataDir, "snapshots") + cfg.Dirs = datadir.New(nodeConfig.DataDir) cfg.Snapshot.KeepBlocks = ctx.GlobalBool(SnapKeepBlocksFlag.Name) cfg.Snapshot.Produce = !ctx.GlobalBool(SnapStopFlag.Name) if !ctx.GlobalIsSet(DownloaderAddrFlag.Name) { @@ -1388,7 +1390,7 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C if err != nil { panic(err) } - cfg.Torrent, err = torrentcfg.New(cfg.SnapDir, + cfg.Torrent, err = torrentcfg.New(cfg.Dirs.Snap, lvl, nodeConfig.P2P.NAT, downloadRate, uploadRate, diff --git a/core/rawdb/rawdbreset/reset_stages.go b/core/rawdb/rawdbreset/reset_stages.go new file mode 100644 index 00000000000..c8842a104ca --- /dev/null +++ b/core/rawdb/rawdbreset/reset_stages.go @@ -0,0 +1,239 @@ +package rawdbreset + +import ( + "context" + "fmt" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" +) + +func ResetState(db kv.RwDB, ctx context.Context, g *core.Genesis) error { + // don't reset senders here + if err := db.Update(ctx, stagedsync.ResetHashState); err != nil { + return err + } + if err := db.Update(ctx, stagedsync.ResetIH); err != nil { + return err + } + if err := db.Update(ctx, ResetHistory); err != nil { + return err + } + if err := db.Update(ctx, ResetLogIndex); err != nil { + return err + } + if err := db.Update(ctx, ResetCallTraces); err != nil { + return err + } + if err := db.Update(ctx, ResetTxLookup); err != nil { + return err + } + if err := db.Update(ctx, ResetFinish); err != nil { + return err + } + + if err := db.Update(ctx, func(tx kv.RwTx) error { return ResetExec(tx, g) }); err != nil { + return err + } + return nil +} + +func ResetBlocks(tx kv.RwTx) error { + // keep Genesis + if err := rawdb.TruncateBlocks(context.Background(), tx, 1); err != nil { + return err + } + if err := stages.SaveStageProgress(tx, stages.Bodies, 1); err != nil { + return fmt.Errorf("saving Bodies progress failed: %w", err) + } + if err := stages.SaveStageProgress(tx, stages.Headers, 1); err != nil { + return fmt.Errorf("saving Bodies progress failed: %w", err) + } + + // remove all canonical markers from this point + if err := rawdb.TruncateCanonicalHash(tx, 1); err != nil { + return err + } + if err := rawdb.TruncateTd(tx, 1); err != nil { + return err + } + hash, err := rawdb.ReadCanonicalHash(tx, 0) + if err != nil { + return err + } + if err = rawdb.WriteHeadHeaderHash(tx, hash); err != nil { + return err + } + + // ensure no grabage records left (it may happen if db is inconsistent) + if err := tx.ForEach(kv.BlockBody, dbutils.EncodeBlockNumber(2), func(k, _ []byte) error { return tx.Delete(kv.BlockBody, k, nil) }); err != nil { + return err + } + if err := tx.ClearBucket(kv.NonCanonicalTxs); err != nil { + return err + } + if err := tx.ClearBucket(kv.EthTx); err != nil { + return err + } + if err := rawdb.ResetSequence(tx, kv.EthTx, 0); err != nil { + return err + } + if err := rawdb.ResetSequence(tx, kv.NonCanonicalTxs, 0); err != nil { + return err + } + + return nil +} +func ResetSenders(tx kv.RwTx) error { + if err := tx.ClearBucket(kv.Senders); err != nil { + return err + } + if err := stages.SaveStageProgress(tx, stages.Senders, 0); err != nil { + return err + } + if err := stages.SaveStagePruneProgress(tx, stages.Senders, 0); err != nil { + return err + } + return nil +} + +func ResetExec(tx kv.RwTx, g *core.Genesis) error { + if err := tx.ClearBucket(kv.HashedAccounts); err != nil { + return err + } + if err := tx.ClearBucket(kv.HashedStorage); err != nil { + return err + } + if err := tx.ClearBucket(kv.ContractCode); err != nil { + return err + } + if err := tx.ClearBucket(kv.PlainState); err != nil { + return err + } + if err := tx.ClearBucket(kv.AccountChangeSet); err != nil { + return err + } + if err := tx.ClearBucket(kv.StorageChangeSet); err != nil { + return err + } + if err := tx.ClearBucket(kv.PlainContractCode); err != nil { + return err + } + if err := tx.ClearBucket(kv.Receipts); err != nil { + return err + } + if err := tx.ClearBucket(kv.Log); err != nil { + return err + } + if err := tx.ClearBucket(kv.IncarnationMap); err != nil { + return err + } + if err := tx.ClearBucket(kv.Code); err != nil { + return err + } + if err := tx.ClearBucket(kv.CallTraceSet); err != nil { + return err + } + if err := tx.ClearBucket(kv.Epoch); err != nil { + return err + } + if err := tx.ClearBucket(kv.PendingEpoch); err != nil { + return err + } + if err := tx.ClearBucket(kv.BorReceipts); err != nil { + return err + } + if err := stages.SaveStageProgress(tx, stages.Execution, 0); err != nil { + return err + } + if err := stages.SaveStagePruneProgress(tx, stages.Execution, 0); err != nil { + return err + } + + if _, _, err := g.WriteGenesisState(tx); err != nil { + return err + } + return nil +} + +func ResetHistory(tx kv.RwTx) error { + if err := tx.ClearBucket(kv.AccountsHistory); err != nil { + return err + } + if err := tx.ClearBucket(kv.StorageHistory); err != nil { + return err + } + if err := stages.SaveStageProgress(tx, stages.AccountHistoryIndex, 0); err != nil { + return err + } + if err := stages.SaveStageProgress(tx, stages.StorageHistoryIndex, 0); err != nil { + return err + } + if err := stages.SaveStagePruneProgress(tx, stages.AccountHistoryIndex, 0); err != nil { + return err + } + if err := stages.SaveStagePruneProgress(tx, stages.StorageHistoryIndex, 0); err != nil { + return err + } + + return nil +} + +func ResetLogIndex(tx kv.RwTx) error { + if err := tx.ClearBucket(kv.LogAddressIndex); err != nil { + return err + } + if err := tx.ClearBucket(kv.LogTopicIndex); err != nil { + return err + } + if err := stages.SaveStageProgress(tx, stages.LogIndex, 0); err != nil { + return err + } + if err := stages.SaveStagePruneProgress(tx, stages.LogIndex, 0); err != nil { + return err + } + return nil +} + +func ResetCallTraces(tx kv.RwTx) error { + if err := tx.ClearBucket(kv.CallFromIndex); err != nil { + return err + } + if err := tx.ClearBucket(kv.CallToIndex); err != nil { + return err + } + if err := stages.SaveStageProgress(tx, stages.CallTraces, 0); err != nil { + return err + } + if err := stages.SaveStagePruneProgress(tx, stages.CallTraces, 0); err != nil { + return err + } + return nil +} + +func ResetTxLookup(tx kv.RwTx) error { + if err := tx.ClearBucket(kv.TxLookup); err != nil { + return err + } + if err := stages.SaveStageProgress(tx, stages.TxLookup, 0); err != nil { + return err + } + if err := stages.SaveStagePruneProgress(tx, stages.TxLookup, 0); err != nil { + return err + } + return nil +} + +func ResetFinish(tx kv.RwTx) error { + if err := stages.SaveStageProgress(tx, stages.Finish, 0); err != nil { + return err + } + if err := stages.SaveStagePruneProgress(tx, stages.Finish, 0); err != nil { + return err + } + return nil +} diff --git a/eth/backend.go b/eth/backend.go index 31179b946cd..90f39139524 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -766,11 +766,11 @@ func (s *Ethereum) NodesInfo(limit int) (*remote.NodesInfoReply, error) { } // sets up blockReader and client downloader -func (s *Ethereum) setUpBlockReader(ctx context.Context, isSnapshotEnabled bool, config *ethconfig.Config, stack *node.Node) (services.FullBlockReader, *snapshotsync.RoSnapshots, error) { +func (s *Ethereum) setUpBlockReader(ctx context.Context, isSnapshotEnabled bool, cfg *ethconfig.Config, stack *node.Node) (services.FullBlockReader, *snapshotsync.RoSnapshots, error) { var err error if isSnapshotEnabled { - allSnapshots := snapshotsync.NewRoSnapshots(config.Snapshot, config.SnapDir) + allSnapshots := snapshotsync.NewRoSnapshots(cfg.Snapshot, cfg.Dirs.Snap) if err = allSnapshots.Reopen(); err != nil { return nil, nil, fmt.Errorf("[Snapshots] Reopen: %w", err) } @@ -781,7 +781,7 @@ func (s *Ethereum) setUpBlockReader(ctx context.Context, isSnapshotEnabled bool, s.downloaderClient, err = downloadergrpc.NewClient(ctx, stack.Config().DownloaderAddr) } else { // start embedded Downloader - s.downloader, err = downloader.New(config.Torrent) + s.downloader, err = downloader.New(cfg.Torrent) if err != nil { return nil, nil, err } diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index ab1e1f10984..b9165de123c 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -34,6 +34,7 @@ import ( "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/eth/gasprice" "github.com/ledgerwatch/erigon/ethdb/prune" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/params/networkname" ) @@ -175,7 +176,7 @@ type Config struct { Snapshot Snapshot Torrent *torrentcfg.Cfg - SnapDir string + Dirs datadir.Dirs // Address to connect to external snapshot downloader // empty if you want to use internal bittorrent snapshot downloader diff --git a/node/node.go b/node/node.go index 5261177aa84..79f3eed79a6 100644 --- a/node/node.go +++ b/node/node.go @@ -30,6 +30,7 @@ import ( "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon/node/nodecfg" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc/rpccfg" @@ -83,6 +84,7 @@ func New(conf *nodecfg.Config) (*Node, error) { return nil, err } conf.DataDir = absdatadir + conf.Dirs = datadir.New(conf.DataDir) } if conf.Log == nil { conf.Log = log.New() diff --git a/node/node_test.go b/node/node_test.go index bc0c198d897..dacc4c58a3f 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -31,6 +31,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/node/nodecfg" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/log/v3" @@ -44,11 +45,13 @@ var ( ) func testNodeConfig(t *testing.T) *nodecfg.Config { - return &nodecfg.Config{ + cfg := &nodecfg.Config{ Name: "test node", P2P: p2p.Config{PrivateKey: testNodeKey}, DataDir: t.TempDir(), } + cfg.Dirs = datadir.New(cfg.DataDir) + return cfg } // Tests that an empty protocol stack can be closed more than once. diff --git a/node/nodecfg/config.go b/node/nodecfg/config.go index 048e16cb3af..4ebb9f05429 100644 --- a/node/nodecfg/config.go +++ b/node/nodecfg/config.go @@ -29,6 +29,7 @@ import ( "github.com/ledgerwatch/erigon/cmd/rpcdaemon/cli/httpcfg" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/paths" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/p2p/enode" "github.com/ledgerwatch/erigon/rpc/rpccfg" @@ -63,6 +64,7 @@ type Config struct { // databases or flat files. This enables ephemeral nodes which can fully reside // in memory. DataDir string + Dirs datadir.Dirs // Configuration of peer-to-peer networking. P2P p2p.Config diff --git a/node/nodecfg/datadir/dirs.go b/node/nodecfg/datadir/dirs.go new file mode 100644 index 00000000000..524a34b5b00 --- /dev/null +++ b/node/nodecfg/datadir/dirs.go @@ -0,0 +1,23 @@ +package datadir + +import ( + "path/filepath" +) + +type Dirs struct { + DataDir string + Chaindata string + Tmp string + Snap string + TxPool string +} + +func New(datadir string) Dirs { + return Dirs{ + DataDir: datadir, + Chaindata: filepath.Join(datadir, "chaindata"), + Tmp: filepath.Join(datadir, "etl-temp"), + Snap: filepath.Join(datadir, "snapshots"), + TxPool: filepath.Join(datadir, "txpool"), + } +} diff --git a/node/nodecfg/defaults.go b/node/nodecfg/defaults.go index 4f3185994cb..5ac48cfa777 100644 --- a/node/nodecfg/defaults.go +++ b/node/nodecfg/defaults.go @@ -18,6 +18,7 @@ package nodecfg import ( "github.com/ledgerwatch/erigon/common/paths" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/p2p/nat" "github.com/ledgerwatch/erigon/rpc/rpccfg" @@ -36,6 +37,7 @@ const ( // DefaultConfig contains reasonable default settings. var DefaultConfig = Config{ DataDir: paths.DefaultDataDir(), + Dirs: datadir.New(paths.DefaultDataDir()), HTTPPort: DefaultHTTPPort, HTTPModules: []string{"net", "web3"}, HTTPVirtualHosts: []string{"localhost"}, diff --git a/turbo/app/make_app.go b/turbo/app/make_app.go index b74bdd7e13c..efafa7afa0a 100644 --- a/turbo/app/make_app.go +++ b/turbo/app/make_app.go @@ -7,6 +7,7 @@ import ( "github.com/ledgerwatch/erigon/internal/flags" "github.com/ledgerwatch/erigon/node" "github.com/ledgerwatch/erigon/node/nodecfg" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/params" "github.com/urfave/cli" @@ -55,6 +56,7 @@ func NewNodeConfig(ctx *cli.Context) *nodecfg.Config { nodeConfig.Name = "erigon" if ctx.GlobalIsSet(utils.DataDirFlag.Name) { nodeConfig.DataDir = ctx.GlobalString(utils.DataDirFlag.Name) + nodeConfig.Dirs = datadir.New(nodeConfig.DataDir) } return &nodeConfig } diff --git a/turbo/app/snapshots.go b/turbo/app/snapshots.go index 01983ff0905..fd87d640159 100644 --- a/turbo/app/snapshots.go +++ b/turbo/app/snapshots.go @@ -8,7 +8,6 @@ import ( "fmt" "io" "os" - "path" "path/filepath" "runtime" @@ -26,6 +25,7 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/internal/debug" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" @@ -121,13 +121,11 @@ func doIndicesCommand(cliCtx *cli.Context) error { ctx, cancel := common.RootContext() defer cancel() - datadir := cliCtx.String(utils.DataDirFlag.Name) - snapDir := filepath.Join(datadir, "snapshots") - tmpDir := filepath.Join(datadir, etl.TmpDirName) + dirs := datadir.New(cliCtx.String(utils.DataDirFlag.Name)) rebuild := cliCtx.Bool(SnapshotRebuildFlag.Name) from := cliCtx.Uint64(SnapshotFromFlag.Name) - chainDB := mdbx.NewMDBX(log.New()).Path(path.Join(datadir, "chaindata")).Readonly().MustOpen() + chainDB := mdbx.NewMDBX(log.New()).Path(dirs.Chaindata).Readonly().MustOpen() defer chainDB.Close() if rebuild { @@ -139,7 +137,7 @@ func doIndicesCommand(cliCtx *cli.Context) error { if workers > 4 { workers = 4 } - if err := rebuildIndices(ctx, chainDB, cfg, snapDir, tmpDir, from, workers); err != nil { + if err := rebuildIndices(ctx, chainDB, cfg, dirs, from, workers); err != nil { log.Error("Error", "err", err) } } @@ -194,13 +192,12 @@ func doCompress(cliCtx *cli.Context) error { return fmt.Errorf("expecting .seg file path") } f := args[0] - datadir := cliCtx.String(utils.DataDirFlag.Name) - tmpDir := filepath.Join(datadir, etl.TmpDirName) + dirs := datadir.New(cliCtx.String(utils.DataDirFlag.Name)) workers := runtime.GOMAXPROCS(-1) - 1 if workers < 1 { workers = 1 } - c, err := compress.NewCompressor(ctx, "", f, tmpDir, compress.MinPatternScore, workers, log.LvlInfo) + c, err := compress.NewCompressor(ctx, "", f, dirs.Tmp, compress.MinPatternScore, workers, log.LvlInfo) if err != nil { return err } @@ -238,26 +235,24 @@ func doRetireCommand(cliCtx *cli.Context) error { ctx, cancel := common.RootContext() defer cancel() - datadir := cliCtx.String(utils.DataDirFlag.Name) - snapDir := filepath.Join(datadir, "snapshots") - tmpDir := filepath.Join(datadir, etl.TmpDirName) + dirs := datadir.New(cliCtx.String(utils.DataDirFlag.Name)) from := cliCtx.Uint64(SnapshotFromFlag.Name) to := cliCtx.Uint64(SnapshotToFlag.Name) every := cliCtx.Uint64(SnapshotEveryFlag.Name) - chainDB := mdbx.NewMDBX(log.New()).Label(kv.ChainDB).Path(path.Join(datadir, "chaindata")).MustOpen() + chainDB := mdbx.NewMDBX(log.New()).Label(kv.ChainDB).Path(dirs.Chaindata).MustOpen() defer chainDB.Close() cfg := ethconfig.NewSnapCfg(true, true, true) chainConfig := tool.ChainConfigFromDB(chainDB) chainID, _ := uint256.FromBig(chainConfig.ChainID) - snapshots := snapshotsync.NewRoSnapshots(cfg, snapDir) + snapshots := snapshotsync.NewRoSnapshots(cfg, dirs.Snap) if err := snapshots.Reopen(); err != nil { return err } workers := cmp.Max(1, runtime.GOMAXPROCS(-1)-1) - br := snapshotsync.NewBlockRetire(workers, tmpDir, snapshots, chainDB, nil, nil) + br := snapshotsync.NewBlockRetire(workers, dirs.Tmp, snapshots, chainDB, nil, nil) log.Info("Params", "from", from, "to", to, "every", every) for i := from; i < to; i += every { @@ -291,30 +286,29 @@ func doSnapshotCommand(cliCtx *cli.Context) error { if segmentSize < 1000 { return fmt.Errorf("too small --segment.size %d", segmentSize) } - datadir := cliCtx.String(utils.DataDirFlag.Name) - snapDir := filepath.Join(datadir, "snapshots") - dir.MustExist(snapDir) - dir.MustExist(filepath.Join(snapDir, "db")) // this folder will be checked on existance - to understand that snapshots are ready - tmpDir := filepath.Join(datadir, etl.TmpDirName) - dir.MustExist(tmpDir) + dirs := datadir.New(cliCtx.String(utils.DataDirFlag.Name)) + dir.MustExist(dirs.Snap) + dir.MustExist(filepath.Join(dirs.Snap, "db")) // this folder will be checked on existance - to understand that snapshots are ready + dir.MustExist(dirs.Tmp) - chainDB := mdbx.NewMDBX(log.New()).Label(kv.ChainDB).Path(filepath.Join(datadir, "chaindata")).Readonly().MustOpen() + chainDB := mdbx.NewMDBX(log.New()).Label(kv.ChainDB).Path(dirs.Chaindata).Readonly().MustOpen() defer chainDB.Close() - if err := snapshotBlocks(ctx, chainDB, fromBlock, toBlock, segmentSize, snapDir, tmpDir); err != nil { + if err := snapshotBlocks(ctx, chainDB, fromBlock, toBlock, segmentSize, dirs.Snap, dirs.Tmp); err != nil { log.Error("Error", "err", err) } return nil } -func rebuildIndices(ctx context.Context, chainDB kv.RoDB, cfg ethconfig.Snapshot, snapDir, tmpDir string, from uint64, workers int) error { + +func rebuildIndices(ctx context.Context, chainDB kv.RoDB, cfg ethconfig.Snapshot, dirs datadir.Dirs, from uint64, workers int) error { chainConfig := tool.ChainConfigFromDB(chainDB) chainID, _ := uint256.FromBig(chainConfig.ChainID) - allSnapshots := snapshotsync.NewRoSnapshots(cfg, snapDir) + allSnapshots := snapshotsync.NewRoSnapshots(cfg, dirs.Snap) if err := allSnapshots.Reopen(); err != nil { return err } - if err := snapshotsync.BuildIndices(ctx, allSnapshots, *chainID, tmpDir, from, workers, log.LvlInfo); err != nil { + if err := snapshotsync.BuildIndices(ctx, allSnapshots, *chainID, dirs.Tmp, from, workers, log.LvlInfo); err != nil { return err } return nil diff --git a/turbo/cli/flags.go b/turbo/cli/flags.go index 5613d79d9ec..6d43a1ef9ea 100644 --- a/turbo/cli/flags.go +++ b/turbo/cli/flags.go @@ -2,7 +2,6 @@ package cli import ( "fmt" - "path/filepath" "strings" "time" @@ -17,6 +16,7 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/node/nodecfg" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/log/v3" "github.com/spf13/pflag" "github.com/urfave/cli" @@ -283,9 +283,9 @@ func setEmbeddedRpcDaemon(ctx *cli.Context, cfg *nodecfg.Config) { jwtSecretPath = cfg.DataDir + "/jwt.hex" } c := &httpcfg.HttpCfg{ - Enabled: ctx.GlobalBool(utils.HTTPEnabledFlag.Name), - DataDir: cfg.DataDir, - Chaindata: filepath.Join(cfg.DataDir, "chaindata"), + Enabled: ctx.GlobalBool(utils.HTTPEnabledFlag.Name), + DataDir: cfg.DataDir, + Dirs: datadir.New(cfg.DataDir), TLSKeyFile: cfg.TLSKeyFile, TLSCACert: cfg.TLSCACert, diff --git a/turbo/stages/mock_sentry.go b/turbo/stages/mock_sentry.go index 9786442a557..9e7978647d7 100644 --- a/turbo/stages/mock_sentry.go +++ b/turbo/stages/mock_sentry.go @@ -6,7 +6,6 @@ import ( "fmt" "math/big" "os" - "path/filepath" "sync" "testing" @@ -38,6 +37,7 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/ethdb/prune" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/engineapi" @@ -193,7 +193,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey } else { tmpdir = os.TempDir() } - snapDir := filepath.Join(tmpdir, "snapshots") + dirs := datadir.New(tmpdir) var err error db := memdb.New() @@ -205,7 +205,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey t: t, Log: log.New(), tmpdir: tmpdir, - snapDir: snapDir, + snapDir: dirs.Snap, Engine: engine, ChainConfig: gspec.Config, Key: key, From 7c79d6d45389028c0eb85fa919286f1ef2a07837 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 7 Jun 2022 11:00:37 +0700 Subject: [PATCH 007/136] Cfg dirs (#4388) --- cmd/cons/commands/clique.go | 2 +- cmd/cons/commands/root.go | 4 +-- cmd/integration/commands/flags.go | 6 ++-- cmd/integration/commands/root.go | 4 +-- cmd/integration/commands/stages.go | 35 ++++++++++++------------ cmd/integration/commands/state_stages.go | 24 ++++++++-------- cmd/sentry/main.go | 8 +++--- 7 files changed, 40 insertions(+), 43 deletions(-) diff --git a/cmd/cons/commands/clique.go b/cmd/cons/commands/clique.go index c63e6f23b82..7db1fed7f28 100644 --- a/cmd/cons/commands/clique.go +++ b/cmd/cons/commands/clique.go @@ -84,7 +84,7 @@ func cliqueEngine(ctx context.Context, logger log.Logger) error { return err } } - server.db = openDB(filepath.Join(datadir, "clique", "db"), logger) + server.db = openDB(filepath.Join(datadirCli, "clique", "db"), logger) server.c = clique.New(server.chainConfig, params.CliqueSnapshot, server.db) <-ctx.Done() return nil diff --git a/cmd/cons/commands/root.go b/cmd/cons/commands/root.go index 0e4a57d5c37..023ff094d52 100644 --- a/cmd/cons/commands/root.go +++ b/cmd/cons/commands/root.go @@ -16,7 +16,7 @@ import ( var ( consensusAddr string // Address of the consensus engine : - datadir string // Path to the working dir + datadirCli string // Path to the working dir config string // `file:`` to specify config file in file system, `embed:`` to use embedded file, `test` to register test interface and receive config from test driver ) @@ -52,7 +52,7 @@ func must(err error) { } func withDataDir(cmd *cobra.Command) { - cmd.Flags().StringVar(&datadir, "datadir", paths.DefaultDataDir(), "directory where databases and temporary files are kept") + cmd.Flags().StringVar(&datadirCli, "datadir", paths.DefaultDataDir(), "directory where databases and temporary files are kept") must(cmd.MarkFlagDirname("datadir")) } diff --git a/cmd/integration/commands/flags.go b/cmd/integration/commands/flags.go index 06ebb91fc74..dbbed8ef6d9 100644 --- a/cmd/integration/commands/flags.go +++ b/cmd/integration/commands/flags.go @@ -17,7 +17,7 @@ var ( batchSizeStr string reset bool bucket string - datadir, toChaindata string + datadirCli, toChaindata string migration string integrityFast, integritySlow bool file string @@ -90,7 +90,7 @@ func withBucket(cmd *cobra.Command) { } func withDataDir2(cmd *cobra.Command) { - cmd.Flags().StringVar(&datadir, utils.DataDirFlag.Name, paths.DefaultDataDir(), utils.DataDirFlag.Usage) + cmd.Flags().StringVar(&datadirCli, utils.DataDirFlag.Name, paths.DefaultDataDir(), utils.DataDirFlag.Usage) must(cmd.MarkFlagDirname(utils.DataDirFlag.Name)) must(cmd.MarkFlagRequired(utils.DataDirFlag.Name)) cmd.Flags().IntVar(&databaseVerbosity, "database.verbosity", 2, "Enabling internal db logs. Very high verbosity levels may require recompile db. Default: 2, means warning.") @@ -98,7 +98,7 @@ func withDataDir2(cmd *cobra.Command) { } func withDataDir(cmd *cobra.Command) { - cmd.Flags().StringVar(&datadir, "datadir", paths.DefaultDataDir(), "data directory for temporary ELT files") + cmd.Flags().StringVar(&datadirCli, "datadir", paths.DefaultDataDir(), "data directory for temporary ELT files") must(cmd.MarkFlagDirname("datadir")) cmd.Flags().StringVar(&chaindata, "chaindata", "", "path to the db") diff --git a/cmd/integration/commands/root.go b/cmd/integration/commands/root.go index f9111257a0c..c9614c291a9 100644 --- a/cmd/integration/commands/root.go +++ b/cmd/integration/commands/root.go @@ -21,7 +21,7 @@ var rootCmd = &cobra.Command{ panic(err) } if chaindata == "" { - chaindata = filepath.Join(datadir, "chaindata") + chaindata = filepath.Join(datadirCli, "chaindata") } }, PersistentPostRun: func(cmd *cobra.Command, args []string) { @@ -46,7 +46,7 @@ func openDB(path string, logger log.Logger, applyMigrations bool) kv.RwDB { log.Info("Re-Opening DB in exclusive mode to apply DB migrations") db.Close() db = openKV(label, logger, path, true) - if err := migrations.NewMigrator(label).Apply(db, datadir); err != nil { + if err := migrations.NewMigrator(label).Apply(db, datadirCli); err != nil { panic(err) } db.Close() diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index 5a384fd02d5..52feb4185c9 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -536,7 +536,7 @@ func stageBodies(db kv.RwDB, ctx context.Context) error { } func stageSenders(db kv.RwDB, ctx context.Context) error { - tmpdir := filepath.Join(datadir, etl.TmpDirName) + tmpdir := filepath.Join(datadirCli, etl.TmpDirName) _, _, chainConfig, _, sync, _, _ := newSync(ctx, db, nil) must(sync.SetCurrentStage(stages.Senders)) @@ -635,7 +635,7 @@ func stageSenders(db kv.RwDB, ctx context.Context) error { func stageExec(db kv.RwDB, ctx context.Context) error { pm, engine, chainConfig, vmConfig, sync, _, _ := newSync(ctx, db, nil) must(sync.SetCurrentStage(stages.Execution)) - tmpdir := filepath.Join(datadir, etl.TmpDirName) + tmpdir := filepath.Join(datadirCli, etl.TmpDirName) if reset { genesis, _ := genesisByChain(chain) @@ -695,7 +695,7 @@ func stageExec(db kv.RwDB, ctx context.Context) error { func stageTrie(db kv.RwDB, ctx context.Context) error { pm, _, chainConfig, _, sync, _, _ := newSync(ctx, db, nil) must(sync.SetCurrentStage(stages.IntermediateHashes)) - tmpdir := filepath.Join(datadir, etl.TmpDirName) + tmpdir := filepath.Join(datadirCli, etl.TmpDirName) tx, err := db.BeginRw(ctx) if err != nil { @@ -747,7 +747,7 @@ func stageTrie(db kv.RwDB, ctx context.Context) error { } func stageHashState(db kv.RwDB, ctx context.Context) error { - tmpdir := filepath.Join(datadir, etl.TmpDirName) + tmpdir := filepath.Join(datadirCli, etl.TmpDirName) pm, _, _, _, sync, _, _ := newSync(ctx, db, nil) must(sync.SetCurrentStage(stages.HashState)) @@ -801,7 +801,7 @@ func stageHashState(db kv.RwDB, ctx context.Context) error { } func stageLogIndex(db kv.RwDB, ctx context.Context) error { - tmpdir := filepath.Join(datadir, etl.TmpDirName) + tmpdir := filepath.Join(datadirCli, etl.TmpDirName) pm, _, _, _, sync, _, _ := newSync(ctx, db, nil) must(sync.SetCurrentStage(stages.LogIndex)) @@ -856,7 +856,7 @@ func stageLogIndex(db kv.RwDB, ctx context.Context) error { } func stageCallTraces(kv kv.RwDB, ctx context.Context) error { - tmpdir := filepath.Join(datadir, etl.TmpDirName) + tmpdir := filepath.Join(datadirCli, etl.TmpDirName) pm, _, _, _, sync, _, _ := newSync(ctx, kv, nil) must(sync.SetCurrentStage(stages.CallTraces)) @@ -917,7 +917,7 @@ func stageCallTraces(kv kv.RwDB, ctx context.Context) error { } func stageHistory(db kv.RwDB, ctx context.Context) error { - tmpdir := filepath.Join(datadir, etl.TmpDirName) + tmpdir := filepath.Join(datadirCli, etl.TmpDirName) pm, _, _, _, sync, _, _ := newSync(ctx, db, nil) must(sync.SetCurrentStage(stages.AccountHistoryIndex)) @@ -987,7 +987,7 @@ func stageHistory(db kv.RwDB, ctx context.Context) error { } func stageTxLookup(db kv.RwDB, ctx context.Context) error { - tmpdir := filepath.Join(datadir, etl.TmpDirName) + tmpdir := filepath.Join(datadirCli, etl.TmpDirName) pm, _, chainConfig, _, sync, _, _ := newSync(ctx, db, nil) must(sync.SetCurrentStage(stages.TxLookup)) @@ -1089,7 +1089,7 @@ func allSnapshots(cc *params.ChainConfig, db kv.RwDB) *snapshotsync.RoSnapshots }); err != nil { panic(err) } - _allSnapshotsSingleton = snapshotsync.NewRoSnapshots(snapCfg, filepath.Join(datadir, "snapshots")) + _allSnapshotsSingleton = snapshotsync.NewRoSnapshots(snapCfg, filepath.Join(datadirCli, "snapshots")) if useSnapshots { if err := _allSnapshotsSingleton.Reopen(); err != nil { panic(err) @@ -1114,7 +1114,7 @@ func getBlockReader(cc *params.ChainConfig, db kv.RwDB) (blockReader services.Fu } func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) (prune.Mode, consensus.Engine, *params.ChainConfig, *vm.Config, *stagedsync.Sync, *stagedsync.Sync, stagedsync.MiningState) { - tmpdir := filepath.Join(datadir, etl.TmpDirName) + tmpdir := filepath.Join(datadirCli, etl.TmpDirName) logger := log.New() var pm prune.Mode @@ -1158,24 +1158,23 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) if miningConfig != nil { cfg.Miner = *miningConfig } - cfg.Dirs = datadir.New(datadir) + cfg.Dirs = datadir.New(datadirCli) allSn := allSnapshots(chainConfig, db) cfg.Snapshot = allSn.Cfg() - cfg.SnapDir = filepath.Join(datadir, "snapshots") var engine consensus.Engine config := ðconfig.Defaults if chainConfig.Clique != nil { c := params.CliqueSnapshot - c.DBPath = filepath.Join(datadir, "clique", "db") - engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, c, config.Miner.Notify, config.Miner.Noverify, "", true, datadir, allSn) + c.DBPath = filepath.Join(datadirCli, "clique", "db") + engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, c, config.Miner.Notify, config.Miner.Noverify, "", true, datadirCli, allSn) } else if chainConfig.Aura != nil { - engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, ¶ms.AuRaConfig{DBPath: filepath.Join(datadir, "aura")}, config.Miner.Notify, config.Miner.Noverify, "", true, datadir, allSn) + engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, ¶ms.AuRaConfig{DBPath: filepath.Join(datadirCli, "aura")}, config.Miner.Notify, config.Miner.Noverify, "", true, datadirCli, allSn) } else if chainConfig.Parlia != nil { - consensusConfig := ¶ms.ParliaConfig{DBPath: filepath.Join(datadir, "parlia")} - engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, "", true, datadir, allSn) + consensusConfig := ¶ms.ParliaConfig{DBPath: filepath.Join(datadirCli, "parlia")} + engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, "", true, datadirCli, allSn) } else if chainConfig.Bor != nil { consensusConfig := &config.Bor - engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, HeimdallURL, false, datadir, allSn) + engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, HeimdallURL, false, datadirCli, allSn) } else { //ethash engine = ethash.NewFaker() } diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go index b10bd65f510..556777812c2 100644 --- a/cmd/integration/commands/state_stages.go +++ b/cmd/integration/commands/state_stages.go @@ -6,17 +6,12 @@ import ( "encoding/json" "fmt" "os" - "path/filepath" "sort" "time" "github.com/c2h5oh/datasize" common2 "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/node/nodecfg" - "github.com/spf13/cobra" - - "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/changeset" @@ -32,9 +27,12 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb/bitmapdb" + "github.com/ledgerwatch/erigon/node/nodecfg" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/params" erigoncli "github.com/ledgerwatch/erigon/turbo/cli" "github.com/ledgerwatch/log/v3" + "github.com/spf13/cobra" ) var stateStags = &cobra.Command{ @@ -154,7 +152,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. } defer tx.Rollback() - tmpDir := filepath.Join(datadir, etl.TmpDirName) + dirs := datadir.New(datadirCli) quit := ctx.Done() var batchSize datasize.ByteSize @@ -183,7 +181,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. stateStages.DisableStages(stages.Headers, stages.BlockHashes, stages.Bodies, stages.Senders) - execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, changeSetHook, chainConfig, engine, vmConfig, nil, false, tmpDir, getBlockReader(chainConfig, db)) + execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, changeSetHook, chainConfig, engine, vmConfig, nil, false, dirs.Tmp, getBlockReader(chainConfig, db)) execUntilFunc := func(execToBlock uint64) func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx) error { return func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx) error { @@ -311,7 +309,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. miner.MiningConfig.ExtraData = nextBlock.Extra() miningStages.MockExecFunc(stages.MiningCreateBlock, func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, u stagedsync.Unwinder, tx kv.RwTx) error { err = stagedsync.SpawnMiningCreateBlockStage(s, tx, - stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, nil, nil, nil, tmpDir), + stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, nil, nil, nil, dirs.Tmp), quit) if err != nil { return err @@ -408,7 +406,7 @@ func checkMinedBlock(b1, b2 *types.Block, chainConfig *params.ChainConfig) { func loopIh(db kv.RwDB, ctx context.Context, unwind uint64) error { _, _, chainConfig, _, sync, _, _ := newSync(ctx, db, nil) - tmpdir := filepath.Join(datadir, etl.TmpDirName) + dirs := datadir.New(datadirCli) tx, err := db.BeginRw(ctx) if err != nil { return err @@ -423,12 +421,12 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64) error { to := execStage.BlockNumber - unwind _ = sync.SetCurrentStage(stages.HashState) u := &stagedsync.UnwindState{ID: stages.HashState, UnwindPoint: to} - if err = stagedsync.UnwindHashStateStage(u, stage(sync, tx, nil, stages.HashState), tx, stagedsync.StageHashStateCfg(db, tmpdir), ctx); err != nil { + if err = stagedsync.UnwindHashStateStage(u, stage(sync, tx, nil, stages.HashState), tx, stagedsync.StageHashStateCfg(db, dirs.Tmp), ctx); err != nil { return err } _ = sync.SetCurrentStage(stages.IntermediateHashes) u = &stagedsync.UnwindState{ID: stages.IntermediateHashes, UnwindPoint: to} - if err = stagedsync.UnwindIntermediateHashesStage(u, stage(sync, tx, nil, stages.IntermediateHashes), tx, stagedsync.StageTrieCfg(db, true, true, tmpdir, getBlockReader(chainConfig, db)), ctx); err != nil { + if err = stagedsync.UnwindIntermediateHashesStage(u, stage(sync, tx, nil, stages.IntermediateHashes), tx, stagedsync.StageTrieCfg(db, true, true, dirs.Tmp, getBlockReader(chainConfig, db)), ctx); err != nil { return err } must(tx.Commit()) @@ -473,7 +471,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64) error { func loopExec(db kv.RwDB, ctx context.Context, unwind uint64) error { pm, engine, chainConfig, vmConfig, sync, _, _ := newSync(ctx, db, nil) - tmpdir := filepath.Join(datadir, etl.TmpDirName) + dirs := datadir.New(datadirCli) tx, err := db.BeginRw(ctx) if err != nil { @@ -493,7 +491,7 @@ func loopExec(db kv.RwDB, ctx context.Context, unwind uint64) error { from := progress(tx, stages.Execution) to := from + unwind - cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil, false, tmpdir, getBlockReader(chainConfig, db)) + cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil, false, dirs.Tmp, getBlockReader(chainConfig, db)) // set block limit of execute stage sync.MockExecFunc(stages.Execution, func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx) error { diff --git a/cmd/sentry/main.go b/cmd/sentry/main.go index 478c84cfbb8..cf12e8ff66d 100644 --- a/cmd/sentry/main.go +++ b/cmd/sentry/main.go @@ -18,7 +18,7 @@ import ( var ( sentryAddr string // Address of the sentry : - datadir string // Path to td working dir + datadirCli string // Path to td working dir natSetting string // NAT setting port int // Listening port @@ -37,7 +37,7 @@ func init() { utils.CobraFlags(rootCmd, append(debug.Flags, utils.MetricFlags...)) rootCmd.Flags().StringVar(&sentryAddr, "sentry.api.addr", "localhost:9091", "grpc addresses") - rootCmd.Flags().StringVar(&datadir, utils.DataDirFlag.Name, paths.DefaultDataDir(), utils.DataDirFlag.Usage) + rootCmd.Flags().StringVar(&datadirCli, utils.DataDirFlag.Name, paths.DefaultDataDir(), utils.DataDirFlag.Usage) rootCmd.Flags().StringVar(&natSetting, utils.NATFlag.Name, utils.NATFlag.Value, utils.NATFlag.Usage) rootCmd.Flags().IntVar(&port, utils.ListenPortFlag.Name, utils.ListenPortFlag.Value, utils.ListenPortFlag.Usage) rootCmd.Flags().StringSliceVar(&staticPeers, utils.StaticPeersFlag.Name, []string{}, utils.StaticPeersFlag.Usage) @@ -72,7 +72,7 @@ var rootCmd = &cobra.Command{ nodeConfig := node2.NewNodeConfig() p2pConfig, err := utils.NewP2PConfig( nodiscover, - datadir, + datadirCli, netRestrict, natSetting, maxPeers, @@ -87,7 +87,7 @@ var rootCmd = &cobra.Command{ return err } - return sentry.Sentry(cmd.Context(), datadir, sentryAddr, discoveryDNS, p2pConfig, uint(p), healthCheck) + return sentry.Sentry(cmd.Context(), datadirCli, sentryAddr, discoveryDNS, p2pConfig, uint(p), healthCheck) }, } From e146b66e35ac5e2e22b0bbe52bbdbe1d5fc8cb7b Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 7 Jun 2022 11:54:04 +0700 Subject: [PATCH 008/136] more usage of dirs object #4390 --- cmd/sentry/main.go | 6 +++-- cmd/sentry/sentry/sentry_grpc_server.go | 5 ++-- cmd/utils/flags.go | 34 ++++++++++++------------- eth/backend.go | 5 ++-- node/node.go | 27 +++++--------------- node/node_test.go | 14 +++++----- node/nodecfg/config.go | 17 ++++++------- node/nodecfg/config_test.go | 9 ++++--- node/nodecfg/datadir/dirs.go | 14 ++++++++++ node/nodecfg/defaults.go | 1 - turbo/app/make_app.go | 3 +-- turbo/cli/flags.go | 6 ++--- 12 files changed, 68 insertions(+), 73 deletions(-) diff --git a/cmd/sentry/main.go b/cmd/sentry/main.go index cf12e8ff66d..80ed49d8522 100644 --- a/cmd/sentry/main.go +++ b/cmd/sentry/main.go @@ -10,6 +10,7 @@ import ( "github.com/ledgerwatch/erigon/common/paths" "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/internal/debug" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" node2 "github.com/ledgerwatch/erigon/turbo/node" "github.com/spf13/cobra" ) @@ -69,10 +70,11 @@ var rootCmd = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { p := eth.ETH66 + dirs := datadir.New(datadirCli) nodeConfig := node2.NewNodeConfig() p2pConfig, err := utils.NewP2PConfig( nodiscover, - datadirCli, + dirs, netRestrict, natSetting, maxPeers, @@ -87,7 +89,7 @@ var rootCmd = &cobra.Command{ return err } - return sentry.Sentry(cmd.Context(), datadirCli, sentryAddr, discoveryDNS, p2pConfig, uint(p), healthCheck) + return sentry.Sentry(cmd.Context(), dirs, sentryAddr, discoveryDNS, p2pConfig, uint(p), healthCheck) }, } diff --git a/cmd/sentry/sentry/sentry_grpc_server.go b/cmd/sentry/sentry/sentry_grpc_server.go index 817972dda60..965a1675795 100644 --- a/cmd/sentry/sentry/sentry_grpc_server.go +++ b/cmd/sentry/sentry/sentry_grpc_server.go @@ -27,6 +27,7 @@ import ( "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/core/forkid" "github.com/ledgerwatch/erigon/eth/protocols/eth" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/p2p/dnsdisc" "github.com/ledgerwatch/erigon/p2p/enode" @@ -532,8 +533,8 @@ func NewGrpcServer(ctx context.Context, dialCandidates enode.Iterator, readNodeI } // Sentry creates and runs standalone sentry -func Sentry(ctx context.Context, datadir string, sentryAddr string, discoveryDNS []string, cfg *p2p.Config, protocolVersion uint, healthCheck bool) error { - dir.MustExist(datadir) +func Sentry(ctx context.Context, dirs datadir.Dirs, sentryAddr string, discoveryDNS []string, cfg *p2p.Config, protocolVersion uint, healthCheck bool) error { + dir.MustExist(dirs.DataDir) sentryServer := NewGrpcServer(ctx, nil, func() *eth.NodeInfo { return nil }, cfg, protocolVersion) sentryServer.discoveryDNS = discoveryDNS diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 86edbd326d1..46b54df4c9b 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -816,7 +816,7 @@ func ParseNodesFromURLs(urls []string) ([]*enode.Node, error) { // - doesn't setup bootnodes - they will set when genesisHash will know func NewP2PConfig( nodiscover bool, - datadir string, + dirs datadir.Dirs, netRestrict string, natSetting string, maxPeers int, @@ -830,12 +830,12 @@ func NewP2PConfig( var enodeDBPath string switch protocol { case eth.ETH66: - enodeDBPath = filepath.Join(datadir, "nodes", "eth66") + enodeDBPath = filepath.Join(dirs.Nodes, "eth66") default: return nil, fmt.Errorf("unknown protocol: %v", protocol) } - serverKey, err := nodeKey(datadir) + serverKey, err := nodeKey(dirs.DataDir) if err != nil { return nil, err } @@ -1019,7 +1019,7 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config, nodeName, datadir string) { func SetNodeConfig(ctx *cli.Context, cfg *nodecfg.Config) { setDataDir(ctx, cfg) setNodeUserIdent(ctx, cfg) - SetP2PConfig(ctx, &cfg.P2P, cfg.NodeName(), cfg.DataDir) + SetP2PConfig(ctx, &cfg.P2P, cfg.NodeName(), cfg.Dirs.DataDir) cfg.DownloaderAddr = strings.TrimSpace(ctx.GlobalString(DownloaderAddrFlag.Name)) cfg.SentryLogPeerInfo = ctx.GlobalIsSet(SentryLogPeerInfoFlag.Name) @@ -1065,11 +1065,11 @@ func DataDirForNetwork(datadir string, network string) string { func setDataDir(ctx *cli.Context, cfg *nodecfg.Config) { if ctx.GlobalIsSet(DataDirFlag.Name) { - cfg.DataDir = ctx.GlobalString(DataDirFlag.Name) + cfg.Dirs.DataDir = ctx.GlobalString(DataDirFlag.Name) } else { - cfg.DataDir = DataDirForNetwork(cfg.DataDir, ctx.GlobalString(ChainFlag.Name)) + cfg.Dirs.DataDir = DataDirForNetwork(cfg.Dirs.DataDir, ctx.GlobalString(ChainFlag.Name)) } - cfg.Dirs = datadir.New(cfg.DataDir) + cfg.Dirs = datadir.New(cfg.Dirs.DataDir) if err := cfg.MdbxPageSize.UnmarshalText([]byte(ctx.GlobalString(DbPageSizeFlag.Name))); err != nil { panic(err) @@ -1097,13 +1097,13 @@ func setDataDirCobra(f *pflag.FlagSet, cfg *nodecfg.Config) { panic(err) } if dirname != "" { - cfg.DataDir = dirname + cfg.Dirs.DataDir = dirname } else { - cfg.DataDir = DataDirForNetwork(cfg.DataDir, chain) + cfg.Dirs.DataDir = DataDirForNetwork(cfg.Dirs.DataDir, chain) } - cfg.DataDir = DataDirForNetwork(cfg.DataDir, chain) - cfg.Dirs = datadir.New(cfg.DataDir) + cfg.Dirs.DataDir = DataDirForNetwork(cfg.Dirs.DataDir, chain) + cfg.Dirs = datadir.New(cfg.Dirs.DataDir) } func setGPO(ctx *cli.Context, cfg *gasprice.Config) { @@ -1372,7 +1372,7 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) { // SetEthConfig applies eth-related command line flags to the config. func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.Config) { cfg.Sync.UseSnapshots = ctx.GlobalBoolT(SnapshotFlag.Name) - cfg.Dirs = datadir.New(nodeConfig.DataDir) + cfg.Dirs = nodeConfig.Dirs cfg.Snapshot.KeepBlocks = ctx.GlobalBool(SnapKeepBlocksFlag.Name) cfg.Snapshot.Produce = !ctx.GlobalBool(SnapStopFlag.Name) if !ctx.GlobalIsSet(DownloaderAddrFlag.Name) { @@ -1414,12 +1414,12 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C setTxPool(ctx, &cfg.DeprecatedTxPool) cfg.TxPool = core.DefaultTxPool2Config(cfg.DeprecatedTxPool) - cfg.TxPool.DBDir = filepath.Join(nodeConfig.DataDir, "txpool") + cfg.TxPool.DBDir = nodeConfig.Dirs.TxPool - setEthash(ctx, nodeConfig.DataDir, cfg) - setClique(ctx, &cfg.Clique, nodeConfig.DataDir) - setAuRa(ctx, &cfg.Aura, nodeConfig.DataDir) - setParlia(ctx, &cfg.Parlia, nodeConfig.DataDir) + setEthash(ctx, nodeConfig.Dirs.DataDir, cfg) + setClique(ctx, &cfg.Clique, nodeConfig.Dirs.DataDir) + setAuRa(ctx, &cfg.Aura, nodeConfig.Dirs.DataDir) + setParlia(ctx, &cfg.Parlia, nodeConfig.Dirs.DataDir) setMiner(ctx, &cfg.Miner) setWhitelist(ctx, cfg) setBorConfig(ctx, cfg) diff --git a/eth/backend.go b/eth/backend.go index 90f39139524..2288d9ca4ad 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -36,7 +36,6 @@ import ( "github.com/holiman/uint256" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/direct" - "github.com/ledgerwatch/erigon-lib/etl" proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" "github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" @@ -150,7 +149,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere config.Miner.GasPrice = new(big.Int).Set(ethconfig.Defaults.Miner.GasPrice) } - tmpdir := filepath.Join(stack.Config().DataDir, etl.TmpDirName) + tmpdir := stack.Config().Dirs.Tmp if err := os.RemoveAll(tmpdir); err != nil { // clean it on startup return nil, fmt.Errorf("clean tmp dir: %s, %w", tmpdir, err) } @@ -242,7 +241,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere } cfg66 := stack.Config().P2P - cfg66.NodeDatabase = filepath.Join(stack.Config().DataDir, "nodes", "eth66") + cfg66.NodeDatabase = filepath.Join(stack.Config().Dirs.Nodes, "eth66") server66 := sentry.NewGrpcServer(backend.sentryCtx, d66, readNodeInfo, &cfg66, eth.ETH66) backend.sentryServers = append(backend.sentryServers, server66) sentries = []direct.SentryClient{direct.NewSentryClientDirect(eth.ETH66, server66)} diff --git a/node/node.go b/node/node.go index 79f3eed79a6..e0f07f43c4b 100644 --- a/node/node.go +++ b/node/node.go @@ -30,7 +30,6 @@ import ( "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon/node/nodecfg" - "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc/rpccfg" @@ -78,14 +77,6 @@ func New(conf *nodecfg.Config) (*Node, error) { // working directory don't affect the node. confCopy := *conf conf = &confCopy - if conf.DataDir != "" { - absdatadir, err := filepath.Abs(conf.DataDir) - if err != nil { - return nil, err - } - conf.DataDir = absdatadir - conf.Dirs = datadir.New(conf.DataDir) - } if conf.Log == nil { conf.Log = log.New() } @@ -277,11 +268,11 @@ func (n *Node) stopServices(running []Lifecycle) error { } func (n *Node) openDataDir() error { - if n.config.DataDir == "" { + if n.config.Dirs.DataDir == "" { return nil // ephemeral } - instdir := n.config.DataDir + instdir := n.config.Dirs.DataDir if err := os.MkdirAll(instdir, 0700); err != nil { return err } @@ -477,7 +468,7 @@ func (n *Node) Server() *p2p.Server { // DataDir retrieves the current datadir used by the protocol stack. func (n *Node) DataDir() string { - return n.config.DataDir + return n.config.Dirs.DataDir } // HTTPEndpoint returns the URL of the HTTP server. Note that this URL does not @@ -505,18 +496,12 @@ func OpenDatabase(config *nodecfg.Config, logger log.Logger, label kv.Label) (kv name = "test" } var db kv.RwDB - if config.DataDir == "" { + if config.Dirs.DataDir == "" { db = memdb.New() return db, nil } - oldDbPath := filepath.Join(config.DataDir, "erigon", name) - dbPath := filepath.Join(config.DataDir, name) - if _, err := os.Stat(oldDbPath); err == nil { - log.Error("Old directory location found", "path", oldDbPath, "please move to new path", dbPath) - return nil, fmt.Errorf("safety error, see log message") - } - + dbPath := filepath.Join(config.Dirs.DataDir, name) var openFunc func(exclusive bool) (kv.RwDB, error) log.Info("Opening Database", "label", name, "path", dbPath) openFunc = func(exclusive bool) (kv.RwDB, error) { @@ -550,7 +535,7 @@ func OpenDatabase(config *nodecfg.Config, logger log.Logger, label kv.Label) (kv if err != nil { return nil, err } - if err = migrator.Apply(db, config.DataDir); err != nil { + if err = migrator.Apply(db, config.Dirs.DataDir); err != nil { return nil, err } db.Close() diff --git a/node/node_test.go b/node/node_test.go index dacc4c58a3f..891a11abf5b 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -45,13 +45,11 @@ var ( ) func testNodeConfig(t *testing.T) *nodecfg.Config { - cfg := &nodecfg.Config{ - Name: "test node", - P2P: p2p.Config{PrivateKey: testNodeKey}, - DataDir: t.TempDir(), + return &nodecfg.Config{ + Name: "test node", + P2P: p2p.Config{PrivateKey: testNodeKey}, + Dirs: datadir.New(t.TempDir()), } - cfg.Dirs = datadir.New(cfg.DataDir) - return cfg } // Tests that an empty protocol stack can be closed more than once. @@ -110,7 +108,7 @@ func TestNodeUsedDataDir(t *testing.T) { dir := t.TempDir() // Create a new node based on the data directory - original, originalErr := New(&nodecfg.Config{DataDir: dir}) + original, originalErr := New(&nodecfg.Config{Dirs: datadir.New(dir)}) if originalErr != nil { t.Fatalf("failed to create original protocol stack: %v", originalErr) } @@ -120,7 +118,7 @@ func TestNodeUsedDataDir(t *testing.T) { } // Create a second node based on the same data directory and ensure failure - if _, err := New(&nodecfg.Config{DataDir: dir}); !errors.Is(err, ErrDataDirUsed) { + if _, err := New(&nodecfg.Config{Dirs: datadir.New(dir)}); !errors.Is(err, ErrDataDirUsed) { t.Fatalf("duplicate datadir failure mismatch: have %v, want %v", err, ErrDataDirUsed) } } diff --git a/node/nodecfg/config.go b/node/nodecfg/config.go index 4ebb9f05429..92fa118957c 100644 --- a/node/nodecfg/config.go +++ b/node/nodecfg/config.go @@ -58,13 +58,12 @@ type Config struct { // in the devp2p node identifier. Version string `toml:"-"` - // DataDir is the file system folder the node should use for any data storage + // Dirs is the file system folder the node should use for any data storage // requirements. The configured data directory will not be directly shared with // registered services, instead those can use utility methods to create/access // databases or flat files. This enables ephemeral nodes which can fully reside // in memory. - DataDir string - Dirs datadir.Dirs + Dirs datadir.Dirs // Configuration of peer-to-peer networking. P2P p2p.Config @@ -188,10 +187,10 @@ func (c *Config) IPCEndpoint() string { } // Resolve names into the data directory full paths otherwise if filepath.Base(c.IPCPath) == c.IPCPath { - if c.DataDir == "" { + if c.Dirs.DataDir == "" { return filepath.Join(os.TempDir(), c.IPCPath) } - return filepath.Join(c.DataDir, c.IPCPath) + return filepath.Join(c.Dirs.DataDir, c.IPCPath) } return c.IPCPath } @@ -209,7 +208,7 @@ func DefaultIPCEndpoint(clientIdentifier string) string { panic("empty executable name") } } - config := &Config{DataDir: paths.DefaultDataDir(), IPCPath: clientIdentifier + ".ipc"} + config := &Config{Dirs: datadir.New(paths.DefaultDataDir()), IPCPath: clientIdentifier + ".ipc"} return config.IPCEndpoint() } @@ -279,10 +278,10 @@ func (c *Config) ResolvePath(path string) string { if filepath.IsAbs(path) { return path } - if c.DataDir == "" { + if c.Dirs.DataDir == "" { return "" } - return filepath.Join(c.DataDir, path) + return filepath.Join(c.Dirs.DataDir, path) } // StaticNodes returns a list of node enode URLs configured as static nodes. @@ -301,7 +300,7 @@ func (c *Config) TrustedNodes() ([]*enode.Node, error) { // file from within the data directory. func (c *Config) parsePersistentNodes(w *bool, path string) []*enode.Node { // Short circuit if no node config is present - if c.DataDir == "" { + if c.Dirs.DataDir == "" { return nil } if _, err := os.Stat(path); err != nil { diff --git a/node/nodecfg/config_test.go b/node/nodecfg/config_test.go index 5b703ddfee4..2add2eebaca 100644 --- a/node/nodecfg/config_test.go +++ b/node/nodecfg/config_test.go @@ -24,6 +24,7 @@ import ( node2 "github.com/ledgerwatch/erigon/node" "github.com/ledgerwatch/erigon/node/nodecfg" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" ) // Tests that datadirs can be successfully created, be them manually configured @@ -34,7 +35,7 @@ func TestDataDirCreation(t *testing.T) { } // Create a temporary data dir and check that it can be used by a node dir := t.TempDir() - node, err := node2.New(&nodecfg.Config{DataDir: dir}) + node, err := node2.New(&nodecfg.Config{Dirs: datadir.New(dir)}) if err != nil { t.Fatalf("failed to create stack with existing datadir: %v", err) } @@ -43,7 +44,7 @@ func TestDataDirCreation(t *testing.T) { } // Generate a long non-existing datadir path and check that it gets created by a node dir = filepath.Join(dir, "a", "b", "c", "d", "e", "f") - node, err = node2.New(&nodecfg.Config{DataDir: dir}) + node, err = node2.New(&nodecfg.Config{Dirs: datadir.New(dir)}) if err != nil { t.Fatalf("failed to create stack with creatable datadir: %v", err) } @@ -61,7 +62,7 @@ func TestDataDirCreation(t *testing.T) { defer os.Remove(file.Name()) dir = filepath.Join(file.Name(), "invalid/path") - node, err = node2.New(&nodecfg.Config{DataDir: dir}) + node, err = node2.New(&nodecfg.Config{Dirs: datadir.New(dir)}) if err == nil { t.Fatalf("protocol stack created with an invalid datadir") if err := node.Close(); err != nil { @@ -95,7 +96,7 @@ func TestIPCPathResolution(t *testing.T) { for i, test := range tests { // Only run when platform/test match if (runtime.GOOS == "windows") == test.Windows { - if endpoint := (&nodecfg.Config{DataDir: test.DataDir, IPCPath: test.IPCPath}).IPCEndpoint(); endpoint != test.Endpoint { + if endpoint := (&nodecfg.Config{Dirs: datadir.New(test.DataDir), IPCPath: test.IPCPath}).IPCEndpoint(); endpoint != test.Endpoint { t.Errorf("test %d: IPC endpoint mismatch: have %s, want %s", i, endpoint, test.Endpoint) } } diff --git a/node/nodecfg/datadir/dirs.go b/node/nodecfg/datadir/dirs.go index 524a34b5b00..c16625fbf7f 100644 --- a/node/nodecfg/datadir/dirs.go +++ b/node/nodecfg/datadir/dirs.go @@ -4,20 +4,34 @@ import ( "path/filepath" ) +// Dirs is the file system folder the node should use for any data storage +// requirements. The configured data directory will not be directly shared with +// registered services, instead those can use utility methods to create/access +// databases or flat files type Dirs struct { DataDir string Chaindata string Tmp string Snap string TxPool string + Nodes string } func New(datadir string) Dirs { + if datadir != "" { + absdatadir, err := filepath.Abs(datadir) + if err != nil { + panic(err) + } + datadir = absdatadir + } + return Dirs{ DataDir: datadir, Chaindata: filepath.Join(datadir, "chaindata"), Tmp: filepath.Join(datadir, "etl-temp"), Snap: filepath.Join(datadir, "snapshots"), TxPool: filepath.Join(datadir, "txpool"), + Nodes: filepath.Join(datadir, "nodes"), } } diff --git a/node/nodecfg/defaults.go b/node/nodecfg/defaults.go index 5ac48cfa777..d13022b93ee 100644 --- a/node/nodecfg/defaults.go +++ b/node/nodecfg/defaults.go @@ -36,7 +36,6 @@ const ( // DefaultConfig contains reasonable default settings. var DefaultConfig = Config{ - DataDir: paths.DefaultDataDir(), Dirs: datadir.New(paths.DefaultDataDir()), HTTPPort: DefaultHTTPPort, HTTPModules: []string{"net", "web3"}, diff --git a/turbo/app/make_app.go b/turbo/app/make_app.go index efafa7afa0a..81b9e688d69 100644 --- a/turbo/app/make_app.go +++ b/turbo/app/make_app.go @@ -55,8 +55,7 @@ func NewNodeConfig(ctx *cli.Context) *nodecfg.Config { nodeConfig.IPCPath = "" // force-disable IPC endpoint nodeConfig.Name = "erigon" if ctx.GlobalIsSet(utils.DataDirFlag.Name) { - nodeConfig.DataDir = ctx.GlobalString(utils.DataDirFlag.Name) - nodeConfig.Dirs = datadir.New(nodeConfig.DataDir) + nodeConfig.Dirs = datadir.New(ctx.GlobalString(utils.DataDirFlag.Name)) } return &nodeConfig } diff --git a/turbo/cli/flags.go b/turbo/cli/flags.go index 6d43a1ef9ea..8eaf765ca4e 100644 --- a/turbo/cli/flags.go +++ b/turbo/cli/flags.go @@ -16,7 +16,6 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/node/nodecfg" - "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/log/v3" "github.com/spf13/pflag" "github.com/urfave/cli" @@ -280,12 +279,11 @@ func ApplyFlagsForNodeConfig(ctx *cli.Context, cfg *nodecfg.Config) { func setEmbeddedRpcDaemon(ctx *cli.Context, cfg *nodecfg.Config) { jwtSecretPath := ctx.GlobalString(utils.JWTSecretPath.Name) if jwtSecretPath == "" { - jwtSecretPath = cfg.DataDir + "/jwt.hex" + jwtSecretPath = cfg.Dirs.DataDir + "/jwt.hex" } c := &httpcfg.HttpCfg{ Enabled: ctx.GlobalBool(utils.HTTPEnabledFlag.Name), - DataDir: cfg.DataDir, - Dirs: datadir.New(cfg.DataDir), + Dirs: cfg.Dirs, TLSKeyFile: cfg.TLSKeyFile, TLSCACert: cfg.TLSCACert, From 974b1d88443251e2c63b50a22ec632fb0f15c568 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 7 Jun 2022 12:08:24 +0700 Subject: [PATCH 009/136] more use of dirs config #4391 --- node/nodecfg/config.go | 2 +- node/nodecfg/datadir/dirs.go | 28 ++++++++++++++++------------ 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/node/nodecfg/config.go b/node/nodecfg/config.go index 92fa118957c..ff8742b1fa1 100644 --- a/node/nodecfg/config.go +++ b/node/nodecfg/config.go @@ -190,7 +190,7 @@ func (c *Config) IPCEndpoint() string { if c.Dirs.DataDir == "" { return filepath.Join(os.TempDir(), c.IPCPath) } - return filepath.Join(c.Dirs.DataDir, c.IPCPath) + return filepath.Join(c.Dirs.RelativeDataDir, c.IPCPath) } return c.IPCPath } diff --git a/node/nodecfg/datadir/dirs.go b/node/nodecfg/datadir/dirs.go index c16625fbf7f..81c031c37b9 100644 --- a/node/nodecfg/datadir/dirs.go +++ b/node/nodecfg/datadir/dirs.go @@ -9,16 +9,19 @@ import ( // registered services, instead those can use utility methods to create/access // databases or flat files type Dirs struct { - DataDir string - Chaindata string - Tmp string - Snap string - TxPool string - Nodes string + DataDir string + RelativeDataDir string // like dataDir, but without filepath.Abs() resolution + Chaindata string + Tmp string + Snap string + TxPool string + Nodes string } func New(datadir string) Dirs { + relativeDataDir := datadir if datadir != "" { + var err error absdatadir, err := filepath.Abs(datadir) if err != nil { panic(err) @@ -27,11 +30,12 @@ func New(datadir string) Dirs { } return Dirs{ - DataDir: datadir, - Chaindata: filepath.Join(datadir, "chaindata"), - Tmp: filepath.Join(datadir, "etl-temp"), - Snap: filepath.Join(datadir, "snapshots"), - TxPool: filepath.Join(datadir, "txpool"), - Nodes: filepath.Join(datadir, "nodes"), + RelativeDataDir: relativeDataDir, + DataDir: datadir, + Chaindata: filepath.Join(datadir, "chaindata"), + Tmp: filepath.Join(datadir, "etl-temp"), + Snap: filepath.Join(datadir, "snapshots"), + TxPool: filepath.Join(datadir, "txpool"), + Nodes: filepath.Join(datadir, "nodes"), } } From 2211088fa8cc74015f1b7a9aad991daa45f74619 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 7 Jun 2022 12:20:49 +0700 Subject: [PATCH 010/136] don't open snapshots at startup #4392 --- eth/backend.go | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/eth/backend.go b/eth/backend.go index 2288d9ca4ad..0e753ba8636 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -267,11 +267,6 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere }() } - blockReader, allSnapshots, err := backend.setUpBlockReader(ctx, config.Snapshot.Enabled, config, stack) - if err != nil { - return nil, err - } - var consensusConfig interface{} if chainConfig.Clique != nil { @@ -287,8 +282,6 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere consensusConfig = &config.Ethash } - backend.engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, config.HeimdallURL, config.WithoutHeimdall, stack.DataDir(), allSnapshots) - log.Info("Initialising Ethereum protocol", "network", config.NetworkID) log.Info("Using snapshots", "on", config.Snapshot.Enabled) @@ -310,10 +303,6 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere log.Warn("Incorrect snapshot enablement", "got", config.Sync.UseSnapshots, "change_to", useSnapshots) config.Sync.UseSnapshots = useSnapshots config.Snapshot.Enabled = ethconfig.UseSnapshotsByChainName(chainConfig.ChainName) && useSnapshots - blockReader, allSnapshots, err = backend.setUpBlockReader(ctx, config.Snapshot.Enabled, config, stack) - if err != nil { - return err - } } log.Info("Effective", "prune_flags", config.Prune.String(), "snapshot_flags", config.Snapshot.String()) @@ -322,6 +311,12 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere return nil, err } + blockReader, allSnapshots, err := backend.setUpBlockReader(ctx, config.Snapshot.Enabled, config, stack) + if err != nil { + return nil, err + } + backend.engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, config.HeimdallURL, config.WithoutHeimdall, stack.DataDir(), allSnapshots) + backend.sentriesClient, err = sentry.NewMultiClient( chainKv, stack.Config().NodeName(), @@ -770,9 +765,9 @@ func (s *Ethereum) setUpBlockReader(ctx context.Context, isSnapshotEnabled bool, if isSnapshotEnabled { allSnapshots := snapshotsync.NewRoSnapshots(cfg.Snapshot, cfg.Dirs.Snap) - if err = allSnapshots.Reopen(); err != nil { - return nil, nil, fmt.Errorf("[Snapshots] Reopen: %w", err) - } + //if err = allSnapshots.Reopen(); err != nil { + // return nil, nil, fmt.Errorf("[Snapshots] Reopen: %w", err) + //} blockReader := snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) if len(stack.Config().DownloaderAddr) > 0 { From d655854b53888e86bae6cdef0b64e59e73f6cd71 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 7 Jun 2022 12:33:33 +0700 Subject: [PATCH 011/136] Snapshots: optimisticaly open at app startup (#4393) * save * save --- cmd/rpcdaemon/cli/config.go | 11 ++++++----- eth/backend.go | 4 +--- turbo/snapshotsync/block_snapshots.go | 5 +++++ 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index ed913abee58..fbc721a25d1 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -341,14 +341,15 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, if cfg.WithDatadir { if cfg.Snap.Enabled { allSnapshots := snapshotsync.NewRoSnapshots(cfg.Snap, cfg.Dirs.Snap) - if err := allSnapshots.Reopen(); err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("allSnapshots.Reopen: %w", err) - } + allSnapshots.OptimisticReopen() log.Info("[Snapshots] see new", "blocks", allSnapshots.BlocksAvailable()) // don't reopen it right here, because snapshots may be not ready yet onNewSnapshot = func() { - allSnapshots.Reopen() - log.Info("[Snapshots] see new", "blocks", allSnapshots.BlocksAvailable()) + if err := allSnapshots.Reopen(); err != nil { + log.Error("[Snapshots] reopen", "err", err) + } else { + log.Info("[Snapshots] see new", "blocks", allSnapshots.BlocksAvailable()) + } } blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) } else { diff --git a/eth/backend.go b/eth/backend.go index 0e753ba8636..0d5782795fb 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -765,9 +765,7 @@ func (s *Ethereum) setUpBlockReader(ctx context.Context, isSnapshotEnabled bool, if isSnapshotEnabled { allSnapshots := snapshotsync.NewRoSnapshots(cfg.Snapshot, cfg.Dirs.Snap) - //if err = allSnapshots.Reopen(); err != nil { - // return nil, nil, fmt.Errorf("[Snapshots] Reopen: %w", err) - //} + allSnapshots.OptimisticReopen() blockReader := snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) if len(stack.Config().DownloaderAddr) > 0 { diff --git a/turbo/snapshotsync/block_snapshots.go b/turbo/snapshotsync/block_snapshots.go index 0350018723a..0e1ddb0d984 100644 --- a/turbo/snapshotsync/block_snapshots.go +++ b/turbo/snapshotsync/block_snapshots.go @@ -375,6 +375,11 @@ func (s *RoSnapshots) AsyncOpenAll(ctx context.Context) { }() } +// OptimisticReopen - optimistically open snapshots (ignoring error), useful at App startup because: +// - user must be able: delete any snapshot file and Erigon will self-heal by re-downloading +// - RPC return Nil for historical blocks if snapshots are not open +func (s *RoSnapshots) OptimisticReopen() { _ = s.Reopen() } + func (s *RoSnapshots) Reopen() error { s.Headers.lock.Lock() defer s.Headers.lock.Unlock() From 454c7aa87f01ce790cf1c86f6dbcf29ccfd346b8 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 7 Jun 2022 13:12:27 +0700 Subject: [PATCH 012/136] torrent: allow del db #4394 --- cmd/downloader/downloader/downloader.go | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/cmd/downloader/downloader/downloader.go b/cmd/downloader/downloader/downloader.go index b1f6c304239..ca085ca0f09 100644 --- a/cmd/downloader/downloader/downloader.go +++ b/cmd/downloader/downloader/downloader.go @@ -63,7 +63,7 @@ func New(cfg *torrentcfg.Cfg) (*Downloader, error) { if common.FileExist(cfg.DataDir + "_tmp") { // migration from prev versions _ = os.Rename(cfg.DataDir+"_tmp", filepath.Join(cfg.DataDir, "tmp")) // ignore error, because maybe they are on different drive, or target folder already created manually, all is fine } - if !common.FileExist(filepath.Join(cfg.DataDir, "db")) { + if !common.FileExist(filepath.Join(cfg.DataDir, "db")) && !HasSegFile(cfg.DataDir) { // it's ok to remove "datadir/snapshots/db" dir or add .seg files manually cfg.DataDir = filepath.Join(cfg.DataDir, "tmp") } else { if err := copyFromTmp(cfg.DataDir); err != nil { @@ -367,3 +367,19 @@ func MainLoop(ctx context.Context, d *Downloader, silent bool) { } } } + +func HasSegFile(dir string) bool { + files, err := os.ReadDir(dir) + if err != nil { + return false + } + for _, f := range files { + if f.IsDir() { + continue + } + if filepath.Ext(f.Name()) == ".seg" { + return true + } + } + return false +} From e90bc39e04bb5ada5aef5f738cd821b57176a393 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Tue, 7 Jun 2022 09:42:58 +0100 Subject: [PATCH 013/136] Another anchor fix (#4395) Co-authored-by: Alexey Sharp --- turbo/stages/headerdownload/header_algos.go | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 8dbe374dca4..16b6e100e7f 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -926,14 +926,6 @@ func (hd *HeaderDownload) ProcessHeader(sh ChainSegmentHeader, newBlock bool, pe // Duplicate return false } - if parentAnchor, ok := hd.anchors[sh.Header.ParentHash]; ok { - // Alternative branch connected to an existing anchor - // Adding link as another child to the anchor and quit (not to overwrite the anchor) - link := hd.addHeaderAsLink(sh, false /* persisted */) - link.next = parentAnchor.fLink - parentAnchor.fLink = link - return false - } parent, foundParent := hd.links[sh.Header.ParentHash] anchor, foundAnchor := hd.anchors[sh.Hash] if !foundParent && !foundAnchor { @@ -952,6 +944,13 @@ func (hd *HeaderDownload) ProcessHeader(sh ChainSegmentHeader, newBlock bool, pe link.fChild = anchor.fLink hd.removeAnchor(anchor) } + if parentAnchor, ok := hd.anchors[sh.Header.ParentHash]; ok { + // Alternative branch connected to an existing anchor + // Adding link as another child to the anchor and quit (not to overwrite the anchor) + link.next = parentAnchor.fLink + parentAnchor.fLink = link + return false + } if foundParent { // Add this link as another child to the parent that is found link.next = parent.fChild From a1f4472f109d400d3abc1322b21d68371def064a Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Tue, 7 Jun 2022 10:51:28 +0100 Subject: [PATCH 014/136] added back PR 3806 (#4382) --- cmd/rpcdaemon/commands/rpc_block.go | 10 +++++ core/rawdb/accessors_chain.go | 70 +++++++++++++++++++++++++++++ eth/stagedsync/stage_headers.go | 9 +++- 3 files changed, 87 insertions(+), 2 deletions(-) diff --git a/cmd/rpcdaemon/commands/rpc_block.go b/cmd/rpcdaemon/commands/rpc_block.go index b24451c538b..9c001ba8ac7 100644 --- a/cmd/rpcdaemon/commands/rpc_block.go +++ b/cmd/rpcdaemon/commands/rpc_block.go @@ -4,6 +4,8 @@ import ( "fmt" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/rpc" ) @@ -26,6 +28,14 @@ func getBlockNumber(number rpc.BlockNumber, tx kv.Tx) (uint64, error) { } func getLatestBlockNumber(tx kv.Tx) (uint64, error) { + forkchoiceHeadHash := rawdb.ReadForkchoiceHead(tx) + if forkchoiceHeadHash != (common.Hash{}) { + forkchoiceHeadNum := rawdb.ReadHeaderNumber(tx, forkchoiceHeadHash) + if forkchoiceHeadNum != nil { + return *forkchoiceHeadNum, nil + } + } + blockNum, err := stages.GetStageProgress(tx, stages.Execution) if err != nil { return 0, fmt.Errorf("getting latest block number: %w", err) diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 2e8add00087..23be03265d7 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -145,6 +145,76 @@ func WriteHeadBlockHash(db kv.Putter, hash common.Hash) { } } +// DeleteHeaderNumber removes hash->number mapping. +func DeleteHeaderNumber(db kv.Deleter, hash common.Hash) { + if err := db.Delete(kv.HeaderNumber, hash[:], nil); err != nil { + log.Crit("Failed to delete hash mapping", "err", err) + } +} + +// ReadForkchoiceHead retrieves headBlockHash from the last Engine API forkChoiceUpdated. +func ReadForkchoiceHead(db kv.Getter) common.Hash { + data, err := db.GetOne(kv.LastForkchoice, []byte("headBlockHash")) + if err != nil { + log.Error("ReadForkchoiceHead failed", "err", err) + } + if len(data) == 0 { + return common.Hash{} + } + return common.BytesToHash(data) +} + +// WriteForkchoiceHead stores headBlockHash from the last Engine API forkChoiceUpdated. +func WriteForkchoiceHead(db kv.Putter, hash common.Hash) { + if err := db.Put(kv.LastForkchoice, []byte("headBlockHash"), hash[:]); err != nil { + log.Crit("Failed to store head block hash", "err", err) + } +} + +// ReadForkchoiceSafe retrieves safeBlockHash from the last Engine API forkChoiceUpdated. +func ReadForkchoiceSafe(db kv.Getter) common.Hash { + data, err := db.GetOne(kv.LastForkchoice, []byte("safeBlockHash")) + if err != nil { + log.Error("ReadForkchoiceSafe failed", "err", err) + return common.Hash{} + } + + if len(data) == 0 { + return common.Hash{} + } + + return common.BytesToHash(data) +} + +// WriteForkchoiceSafe stores safeBlockHash from the last Engine API forkChoiceUpdated. +func WriteForkchoiceSafe(db kv.Putter, hash common.Hash) { + if err := db.Put(kv.LastForkchoice, []byte("safeBlockHash"), hash[:]); err != nil { + log.Crit("Failed to store safe block hash", "err", err) + } +} + +// ReadForkchoiceFinalized retrieves finalizedBlockHash from the last Engine API forkChoiceUpdated. +func ReadForkchoiceFinalized(db kv.Getter) common.Hash { + data, err := db.GetOne(kv.LastForkchoice, []byte("finalizedBlockHash")) + if err != nil { + log.Error("ReadForkchoiceFinalize failed", "err", err) + return common.Hash{} + } + + if len(data) == 0 { + return common.Hash{} + } + + return common.BytesToHash(data) +} + +// WriteForkchoiceFinalized stores finalizedBlockHash from the last Engine API forkChoiceUpdated. +func WriteForkchoiceFinalized(db kv.Putter, hash common.Hash) { + if err := db.Put(kv.LastForkchoice, []byte("finalizedBlockHash"), hash[:]); err != nil { + log.Crit("Failed to safe finalized block hash", "err", err) + } +} + // ReadHeaderRLP retrieves a block header in its raw RLP database encoding. func ReadHeaderRLP(db kv.Getter, hash common.Hash, number uint64) rlp.RawValue { data, err := db.GetOne(kv.Headers, dbutils.HeaderKey(number, hash)) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 916722a26f7..644fa0a9525 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -207,7 +207,9 @@ func safeAndFinalizedBlocksAreCanonical( if err != nil { return false, err } - if !safeIsCanonical { + if safeIsCanonical { + rawdb.WriteForkchoiceSafe(tx, forkChoice.SafeBlockHash) + } else { log.Warn(fmt.Sprintf("[%s] Non-canonical SafeBlockHash", s.LogPrefix()), "forkChoice", forkChoice) if sendErrResponse { cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ @@ -223,7 +225,9 @@ func safeAndFinalizedBlocksAreCanonical( if err != nil { return false, err } - if !finalizedIsCanonical { + if finalizedIsCanonical { + rawdb.WriteForkchoiceFinalized(tx, forkChoice.FinalizedBlockHash) + } else { log.Warn(fmt.Sprintf("[%s] Non-canonical FinalizedBlockHash", s.LogPrefix()), "forkChoice", forkChoice) if sendErrResponse { cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ @@ -366,6 +370,7 @@ func finishHandlingForkChoice( if err := rawdb.WriteHeadHeaderHash(tx, forkChoice.HeadBlockHash); err != nil { return err } + rawdb.WriteForkchoiceHead(tx, forkChoice.HeadBlockHash) sendErrResponse := cfg.hd.GetPendingPayloadStatus() != (common.Hash{}) canonical, err := safeAndFinalizedBlocksAreCanonical(forkChoice, s, tx, cfg, sendErrResponse) From a4a466bb75413864ca209357296d10557b6c5b66 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Tue, 7 Jun 2022 16:13:02 +0200 Subject: [PATCH 015/136] Fixed Last in the mining mutation (#4397) * removed left over from previous mining algo * added ethereum database * test improvement --- ethdb/olddb/miningmutation_test.go | 85 +++++++++++++++++++++++++++++ ethdb/olddb/miningmutationcursor.go | 74 +++++++++++++++++++++---- 2 files changed, 147 insertions(+), 12 deletions(-) create mode 100644 ethdb/olddb/miningmutation_test.go diff --git a/ethdb/olddb/miningmutation_test.go b/ethdb/olddb/miningmutation_test.go new file mode 100644 index 00000000000..032a0fe6886 --- /dev/null +++ b/ethdb/olddb/miningmutation_test.go @@ -0,0 +1,85 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +//go:build !js + +package olddb + +import ( + "context" + "testing" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/stretchr/testify/require" +) + +func initializeDB(rwTx kv.RwTx) { + rwTx.Put(kv.HashedAccounts, []byte("AAAA"), []byte("value")) + rwTx.Put(kv.HashedAccounts, []byte("CAAA"), []byte("value1")) + rwTx.Put(kv.HashedAccounts, []byte("CBAA"), []byte("value2")) + rwTx.Put(kv.HashedAccounts, []byte("CCAA"), []byte("value3")) +} + +func TestLastMiningDB(t *testing.T) { + rwTx, err := memdb.New().BeginRw(context.Background()) + require.NoError(t, err) + + initializeDB(rwTx) + + batch := NewMiningBatch(rwTx) + batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4")) + batch.Put(kv.HashedAccounts, []byte("BCAA"), []byte("value5")) + + cursor, err := batch.Cursor(kv.HashedAccounts) + require.NoError(t, err) + + key, value, err := cursor.Last() + require.NoError(t, err) + + require.Equal(t, key, []byte("CCAA")) + require.Equal(t, value, []byte("value3")) + + key, value, err = cursor.Next() + require.NoError(t, err) + require.Equal(t, key, []byte(nil)) + require.Equal(t, value, []byte(nil)) +} + +func TestLastMiningMem(t *testing.T) { + rwTx, err := memdb.New().BeginRw(context.Background()) + require.NoError(t, err) + + initializeDB(rwTx) + + batch := NewMiningBatch(rwTx) + batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4")) + batch.Put(kv.HashedAccounts, []byte("DCAA"), []byte("value5")) + + cursor, err := batch.Cursor(kv.HashedAccounts) + require.NoError(t, err) + + key, value, err := cursor.Last() + require.NoError(t, err) + + require.Equal(t, key, []byte("DCAA")) + require.Equal(t, value, []byte("value5")) + + key, value, err = cursor.Next() + require.NoError(t, err) + require.Equal(t, key, []byte(nil)) + require.Equal(t, value, []byte(nil)) +} diff --git a/ethdb/olddb/miningmutationcursor.go b/ethdb/olddb/miningmutationcursor.go index 2d1db38c8f8..6a9c01d7b4f 100644 --- a/ethdb/olddb/miningmutationcursor.go +++ b/ethdb/olddb/miningmutationcursor.go @@ -54,27 +54,39 @@ func (m *miningmutationcursor) Current() ([]byte, []byte, error) { return common.CopyBytes(m.currentPair.key), common.CopyBytes(m.currentPair.value), nil } -func (m *miningmutationcursor) goForward(memKey, memValue, dbKey, dbValue []byte) ([]byte, []byte, error) { - var err error - if memValue == nil && dbValue == nil { - return nil, nil, nil - } +func (m *miningmutationcursor) skipIntersection(memKey, memValue, dbKey, dbValue []byte) (newDbKey []byte, newDbValue []byte, err error) { + newDbKey = dbKey + newDbValue = dbValue // Check for duplicates if bytes.Compare(memKey, dbKey) == 0 { if !m.isDupsort { - if dbKey, dbValue, err = m.cursor.Next(); err != nil { - return nil, nil, err + if newDbKey, newDbValue, err = m.cursor.Next(); err != nil { + return } } else if bytes.Compare(memValue, dbValue) == 0 { - if dbKey, dbValue, err = m.dupCursor.NextDup(); err != nil { - return nil, nil, err + if newDbKey, newDbValue, err = m.dupCursor.NextDup(); err != nil { + return } } else if len(memValue) >= 32 && len(dbValue) >= 32 && m.table == kv.HashedStorage && bytes.Compare(memValue[:32], dbValue[:32]) == 0 { - if dbKey, dbValue, err = m.dupCursor.NextDup(); err != nil { - return nil, nil, err + if newDbKey, newDbValue, err = m.dupCursor.NextDup(); err != nil { + return } } } + return +} + +func (m *miningmutationcursor) goForward(memKey, memValue, dbKey, dbValue []byte) ([]byte, []byte, error) { + var err error + if memValue == nil && dbValue == nil { + return nil, nil, nil + } + + dbKey, dbValue, err = m.skipIntersection(memKey, memValue, dbKey, dbValue) + if err != nil { + return nil, nil, err + } + m.currentDbEntry = cursorentry{dbKey, dbValue} m.currentMemEntry = cursorentry{memKey, memValue} // compare entries @@ -242,7 +254,45 @@ func (m *miningmutationcursor) Last() ([]byte, []byte, error) { return nil, nil, err } - return m.goForward(memKey, memValue, dbKey, dbValue) + dbKey, dbValue, err = m.skipIntersection(memKey, memValue, dbKey, dbValue) + if err != nil { + return nil, nil, err + } + + m.currentDbEntry = cursorentry{dbKey, dbValue} + m.currentMemEntry = cursorentry{memKey, memValue} + // Basic checks + if dbValue == nil { + m.isPrevFromDb = false + return memKey, memValue, nil + } + + if memValue == nil { + m.isPrevFromDb = true + return dbKey, dbValue, nil + } + // Check which one is last and return it + keyCompare := bytes.Compare(memKey, dbKey) + if keyCompare == 0 { + if bytes.Compare(memValue, dbValue) > 0 { + m.currentDbEntry = cursorentry{} + m.isPrevFromDb = false + return memKey, memValue, nil + } + m.currentMemEntry = cursorentry{} + m.isPrevFromDb = true + return dbKey, dbValue, nil + } + + if keyCompare > 0 { + m.currentDbEntry = cursorentry{} + m.isPrevFromDb = false + return memKey, memValue, nil + } + + m.currentMemEntry = cursorentry{} + m.isPrevFromDb = true + return dbKey, dbValue, nil } func (m *miningmutationcursor) Prev() ([]byte, []byte, error) { From 07df1eb5987a2adaa50c51d73a41cd12e516d1b2 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Tue, 7 Jun 2022 18:39:19 +0200 Subject: [PATCH 016/136] Fix mining PoS for Deleting of state entries (#4399) * fixed delete hits * added tests --- ethdb/olddb/miningmutation_test.go | 27 ++++++++++ ethdb/olddb/miningmutationcursor.go | 77 ++++++++++++++++++++++++++--- 2 files changed, 98 insertions(+), 6 deletions(-) diff --git a/ethdb/olddb/miningmutation_test.go b/ethdb/olddb/miningmutation_test.go index 032a0fe6886..8cc6abe91ce 100644 --- a/ethdb/olddb/miningmutation_test.go +++ b/ethdb/olddb/miningmutation_test.go @@ -83,3 +83,30 @@ func TestLastMiningMem(t *testing.T) { require.Equal(t, key, []byte(nil)) require.Equal(t, value, []byte(nil)) } + +func TestDeleteMining(t *testing.T) { + rwTx, err := memdb.New().BeginRw(context.Background()) + require.NoError(t, err) + + initializeDB(rwTx) + batch := NewMiningBatch(rwTx) + batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4")) + batch.Put(kv.HashedAccounts, []byte("DCAA"), []byte("value5")) + batch.Put(kv.HashedAccounts, []byte("FCAA"), []byte("value5")) + + batch.Delete(kv.HashedAccounts, []byte("BAAA"), nil) + batch.Delete(kv.HashedAccounts, []byte("CBAA"), nil) + + cursor, err := batch.Cursor(kv.HashedAccounts) + require.NoError(t, err) + + key, value, err := cursor.SeekExact([]byte("BAAA")) + require.NoError(t, err) + require.Equal(t, key, []byte(nil)) + require.Equal(t, value, []byte(nil)) + + key, value, err = cursor.SeekExact([]byte("CBAA")) + require.NoError(t, err) + require.Equal(t, key, []byte(nil)) + require.Equal(t, value, []byte(nil)) +} diff --git a/ethdb/olddb/miningmutationcursor.go b/ethdb/olddb/miningmutationcursor.go index 6a9c01d7b4f..a805a8aad7a 100644 --- a/ethdb/olddb/miningmutationcursor.go +++ b/ethdb/olddb/miningmutationcursor.go @@ -46,9 +46,52 @@ func (m *miningmutationcursor) First() ([]byte, []byte, error) { return nil, nil, err } + if dbKey != nil && m.mutation.isEntryDeleted(m.table, dbKey) { + if dbKey, dbValue, err = m.getNextOnDb(false); err != nil { + return nil, nil, err + } + } + return m.goForward(memKey, memValue, dbKey, dbValue) } +func (m *miningmutationcursor) getNextOnDb(dup bool) (key []byte, value []byte, err error) { + if dup { + key, value, err = m.dupCursor.NextDup() + if err != nil { + return + } + } else { + key, value, err = m.cursor.Next() + if err != nil { + return + } + } + + for key != nil && value != nil && m.mutation.isEntryDeleted(m.table, m.convertAutoDupsort(key, value)) { + if dup { + key, value, err = m.dupCursor.NextDup() + if err != nil { + return + } + } else { + key, value, err = m.cursor.Next() + if err != nil { + return + } + } + } + return +} + +func (m *miningmutationcursor) convertAutoDupsort(key []byte, value []byte) []byte { + // The only dupsorted table we are interested is HashedStorage + if m.table != kv.HashedStorage { + return key + } + return append(key, value[:32]...) +} + // Current return the current key and values the cursor is on. func (m *miningmutationcursor) Current() ([]byte, []byte, error) { return common.CopyBytes(m.currentPair.key), common.CopyBytes(m.currentPair.value), nil @@ -60,15 +103,15 @@ func (m *miningmutationcursor) skipIntersection(memKey, memValue, dbKey, dbValue // Check for duplicates if bytes.Compare(memKey, dbKey) == 0 { if !m.isDupsort { - if newDbKey, newDbValue, err = m.cursor.Next(); err != nil { + if newDbKey, newDbValue, err = m.getNextOnDb(false); err != nil { return } } else if bytes.Compare(memValue, dbValue) == 0 { - if newDbKey, newDbValue, err = m.dupCursor.NextDup(); err != nil { + if newDbKey, newDbValue, err = m.getNextOnDb(true); err != nil { return } } else if len(memValue) >= 32 && len(dbValue) >= 32 && m.table == kv.HashedStorage && bytes.Compare(memValue[:32], dbValue[:32]) == 0 { - if newDbKey, newDbValue, err = m.dupCursor.NextDup(); err != nil { + if newDbKey, newDbValue, err = m.getNextOnDb(true); err != nil { return } } @@ -113,7 +156,7 @@ func (m *miningmutationcursor) goForward(memKey, memValue, dbKey, dbValue []byte // Next returns the next element of the mutation. func (m *miningmutationcursor) Next() ([]byte, []byte, error) { if m.isPrevFromDb { - k, v, err := m.cursor.Next() + k, v, err := m.getNextOnDb(false) if err != nil { return nil, nil, err } @@ -131,7 +174,7 @@ func (m *miningmutationcursor) Next() ([]byte, []byte, error) { // NextDup returns the next element of the mutation. func (m *miningmutationcursor) NextDup() ([]byte, []byte, error) { if m.isPrevFromDb { - k, v, err := m.dupCursor.NextDup() + k, v, err := m.getNextOnDb(true) if err != nil { return nil, nil, err @@ -154,6 +197,14 @@ func (m *miningmutationcursor) Seek(seek []byte) ([]byte, []byte, error) { return nil, nil, err } + // If the entry is marked as DB find one that is not + if dbKey != nil && m.mutation.isEntryDeleted(m.table, dbKey) { + dbKey, dbValue, err = m.getNextOnDb(false) + if err != nil { + return nil, nil, err + } + } + memKey, memValue, err := m.memCursor.Seek(seek) if err != nil { return nil, nil, err @@ -182,7 +233,7 @@ func (m *miningmutationcursor) SeekExact(seek []byte) ([]byte, []byte, error) { return nil, nil, err } - if dbKey != nil { + if dbKey != nil && !m.mutation.isEntryDeleted(m.table, seek) { m.currentDbEntry.key = dbKey m.currentDbEntry.value = dbValue m.currentMemEntry.key, m.currentMemEntry.value, err = m.memCursor.Seek(seek) @@ -234,6 +285,13 @@ func (m *miningmutationcursor) SeekBothRange(key, value []byte) ([]byte, error) return nil, err } + if dbValue != nil && m.mutation.isEntryDeleted(m.table, m.convertAutoDupsort(key, dbValue)) { + _, dbValue, err = m.getNextOnDb(true) + if err != nil { + return nil, err + } + } + memValue, err := m.memDupCursor.SeekBothRange(key, value) if err != nil { return nil, err @@ -261,7 +319,14 @@ func (m *miningmutationcursor) Last() ([]byte, []byte, error) { m.currentDbEntry = cursorentry{dbKey, dbValue} m.currentMemEntry = cursorentry{memKey, memValue} + // Basic checks + if dbKey != nil && m.mutation.isEntryDeleted(m.table, dbKey) { + m.currentDbEntry = cursorentry{} + m.isPrevFromDb = false + return memKey, memValue, nil + } + if dbValue == nil { m.isPrevFromDb = false return memKey, memValue, nil From d7d698f565bd3c57981799a1a78f84ce3b0bdb3b Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 8 Jun 2022 01:59:14 +0700 Subject: [PATCH 017/136] db migration to reset blocks (#4389) * save * save * save * save * save * save * Update reset_blocks.go * Not to remove too many tx lookup files * Fix truncate blocks and add reset txlookup * Fix bodies * Fix nil pointer Co-authored-by: ledgerwatch Co-authored-by: Alexey Sharp Co-authored-by: Alex Sharp --- core/rawdb/accessors_chain.go | 7 ++- eth/stagedsync/stage_bodies.go | 9 ++++ eth/stagedsync/stage_call_traces.go | 2 +- migrations/db_schema_version.go | 3 +- migrations/migrations.go | 10 ++-- migrations/migrations_test.go | 19 ++++---- migrations/reset_blocks.go | 69 +++++++++++++++++++++++++++ migrations/txs_begin_end.go | 3 +- turbo/snapshotsync/block_snapshots.go | 6 +-- turbo/snapshotsync/snap/files.go | 42 ++++++++++++++++ 10 files changed, 150 insertions(+), 20 deletions(-) create mode 100644 migrations/reset_blocks.go diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 23be03265d7..7da67b4a896 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -1332,10 +1332,13 @@ func TruncateBlocks(ctx context.Context, tx kv.RwTx, blockFrom uint64) error { return err } } - if err := tx.Delete(kv.Headers, k, nil); err != nil { + // Copying k because otherwise the same memory will be reused + // for the next key and Delete below will end up deleting 1 more record than required + kCopy := common.CopyBytes(k) + if err := tx.Delete(kv.Headers, kCopy, nil); err != nil { return err } - if err := tx.Delete(kv.BlockBody, k, nil); err != nil { + if err := tx.Delete(kv.BlockBody, kCopy, nil); err != nil { return err } diff --git a/eth/stagedsync/stage_bodies.go b/eth/stagedsync/stage_bodies.go index 391a4eaf936..247e0b04f7a 100644 --- a/eth/stagedsync/stage_bodies.go +++ b/eth/stagedsync/stage_bodies.go @@ -58,8 +58,10 @@ func BodiesForward( test bool, // Set to true in tests, allows the stage to fail rather than wait indefinitely firstCycle bool, ) error { + var doUpdate bool if cfg.snapshots != nil && s.BlockNumber < cfg.snapshots.BlocksAvailable() { s.BlockNumber = cfg.snapshots.BlocksAvailable() + doUpdate = true } var d1, d2, d3, d4, d5, d6 time.Duration @@ -74,6 +76,13 @@ func BodiesForward( } timeout := cfg.timeout + // this update is required, because cfg.bd.UpdateFromDb(tx) below reads it and initialises requestedLow accordingly + // if not done, it will cause downloading from block 1 + if doUpdate { + if err := s.Update(tx, s.BlockNumber); err != nil { + return err + } + } // This will update bd.maxProgress if _, _, _, err = cfg.bd.UpdateFromDb(tx); err != nil { return err diff --git a/eth/stagedsync/stage_call_traces.go b/eth/stagedsync/stage_call_traces.go index e1256b0177e..ce7a9540d0c 100644 --- a/eth/stagedsync/stage_call_traces.go +++ b/eth/stagedsync/stage_call_traces.go @@ -185,7 +185,7 @@ func promoteCallTraces(logPrefix string, tx kv.RwTx, startBlock, endBlock uint64 case <-logEvery.C: var m runtime.MemStats libcommon.ReadMemStats(&m) - log.Info(fmt.Sprintf("[%s] Pruning call trace intermediate table", logPrefix), "number", blockNum, + log.Info(fmt.Sprintf("[%s] Pruning call trace table", logPrefix), "number", blockNum, "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) } if err = traceCursor.DeleteCurrentDuplicates(); err != nil { diff --git a/migrations/db_schema_version.go b/migrations/db_schema_version.go index cf2ce83f321..17122e0a155 100644 --- a/migrations/db_schema_version.go +++ b/migrations/db_schema_version.go @@ -4,11 +4,12 @@ import ( "context" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" ) var dbSchemaVersion5 = Migration{ Name: "db_schema_version5", - Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + Up: func(db kv.RwDB, dirs datadir.Dirs, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err diff --git a/migrations/migrations.go b/migrations/migrations.go index 501805a4b66..2013a0fa833 100644 --- a/migrations/migrations.go +++ b/migrations/migrations.go @@ -10,6 +10,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/log/v3" "github.com/ugorji/go/codec" ) @@ -33,6 +34,7 @@ var migrations = map[kv.Label][]Migration{ kv.ChainDB: { dbSchemaVersion5, txsBeginEnd, + resetBlocks, }, kv.TxPoolDB: {}, kv.SentryDB: {}, @@ -41,7 +43,7 @@ var migrations = map[kv.Label][]Migration{ type Callback func(tx kv.RwTx, progress []byte, isDone bool) error type Migration struct { Name string - Up func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) error + Up func(db kv.RwDB, dirs datadir.Dirs, progress []byte, BeforeCommit Callback) error } var ( @@ -151,10 +153,11 @@ func (m *Migrator) VerifyVersion(db kv.RwDB) error { return nil } -func (m *Migrator) Apply(db kv.RwDB, datadir string) error { +func (m *Migrator) Apply(db kv.RwDB, dataDir string) error { if len(m.Migrations) == 0 { return nil } + dirs := datadir.New(dataDir) var applied map[string][]byte if err := db.View(context.Background(), func(tx kv.Tx) error { @@ -198,7 +201,8 @@ func (m *Migrator) Apply(db kv.RwDB, datadir string) error { return fmt.Errorf("migrator.Apply: %w", err) } - if err := v.Up(db, filepath.Join(datadir, "migrations", v.Name), progress, func(tx kv.RwTx, key []byte, isDone bool) error { + dirs.Tmp = filepath.Join(dirs.DataDir, "migrations", v.Name) + if err := v.Up(db, dirs, progress, func(tx kv.RwTx, key []byte, isDone bool) error { if !isDone { if key != nil { if err := tx.Put(kv.Migrations, []byte("_progress_"+v.Name), key); err != nil { diff --git a/migrations/migrations_test.go b/migrations/migrations_test.go index e4e951b6a4d..7082ca8b21c 100644 --- a/migrations/migrations_test.go +++ b/migrations/migrations_test.go @@ -8,6 +8,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/stretchr/testify/require" ) @@ -17,7 +18,7 @@ func TestApplyWithInit(t *testing.T) { m := []Migration{ { "one", - func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + func(db kv.RwDB, dirs datadir.Dirs, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err @@ -32,7 +33,7 @@ func TestApplyWithInit(t *testing.T) { }, { "two", - func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + func(db kv.RwDB, dirs datadir.Dirs, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err @@ -81,14 +82,14 @@ func TestApplyWithoutInit(t *testing.T) { m := []Migration{ { "one", - func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + func(db kv.RwDB, dirs datadir.Dirs, progress []byte, BeforeCommit Callback) (err error) { t.Fatal("shouldn't been executed") return nil }, }, { "two", - func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + func(db kv.RwDB, dirs datadir.Dirs, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err @@ -145,7 +146,7 @@ func TestWhenNonFirstMigrationAlreadyApplied(t *testing.T) { m := []Migration{ { "one", - func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + func(db kv.RwDB, dirs datadir.Dirs, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err @@ -160,7 +161,7 @@ func TestWhenNonFirstMigrationAlreadyApplied(t *testing.T) { }, { "two", - func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + func(db kv.RwDB, dirs datadir.Dirs, progress []byte, BeforeCommit Callback) (err error) { t.Fatal("shouldn't been executed") return nil }, @@ -226,7 +227,7 @@ func TestValidation(t *testing.T) { m := []Migration{ { Name: "repeated_name", - Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + Up: func(db kv.RwDB, dirs datadir.Dirs, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err @@ -241,7 +242,7 @@ func TestValidation(t *testing.T) { }, { Name: "repeated_name", - Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + Up: func(db kv.RwDB, dirs datadir.Dirs, progress []byte, BeforeCommit Callback) (err error) { tx, err := db.BeginRw(context.Background()) if err != nil { return err @@ -275,7 +276,7 @@ func TestCommitCallRequired(t *testing.T) { m := []Migration{ { Name: "one", - Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + Up: func(db kv.RwDB, dirs datadir.Dirs, progress []byte, BeforeCommit Callback) (err error) { //don't call BeforeCommit return nil }, diff --git a/migrations/reset_blocks.go b/migrations/reset_blocks.go new file mode 100644 index 00000000000..add8240bad2 --- /dev/null +++ b/migrations/reset_blocks.go @@ -0,0 +1,69 @@ +package migrations + +import ( + "context" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/rawdb/rawdbreset" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" + "github.com/ledgerwatch/log/v3" +) + +var resetBlocks = Migration{ + Name: "reset_blocks_3", + Up: func(db kv.RwDB, dirs datadir.Dirs, progress []byte, BeforeCommit Callback) (err error) { + tx, err := db.BeginRw(context.Background()) + if err != nil { + return err + } + defer tx.Rollback() + + enabled, err := snap.Enabled(tx) + if err != nil { + return err + } + + if !enabled { + if err := BeforeCommit(tx, nil, true); err != nil { + return err + } + return + } + genesisBlock := rawdb.ReadHeaderByNumber(tx, 0) + if genesisBlock == nil { + if err := BeforeCommit(tx, nil, true); err != nil { + return err + } + return nil + } + chainConfig, err := rawdb.ReadChainConfig(tx, genesisBlock.Hash()) + if err != nil { + return err + } + log.Warn("NOTE: this migration will remove recent blocks (and senders) to fix several recent bugs. Your node will re-download last ~400K blocks, should not take very long") + + if err := snap.RemoveNonPreverifiedFiles(chainConfig.ChainName, dirs.Snap); err != nil { + return err + } + + if err := rawdbreset.ResetBlocks(tx); err != nil { + return err + } + + if err := rawdbreset.ResetSenders(tx); err != nil { + return err + } + + if err := rawdbreset.ResetTxLookup(tx); err != nil { + return err + } + + // This migration is no-op, but it forces the migration mechanism to apply it and thus write the DB schema version info + if err := BeforeCommit(tx, nil, true); err != nil { + return err + } + return tx.Commit() + }, +} diff --git a/migrations/txs_begin_end.go b/migrations/txs_begin_end.go index c90a71b4fa4..fbcc16c1667 100644 --- a/migrations/txs_begin_end.go +++ b/migrations/txs_begin_end.go @@ -15,6 +15,7 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/log/v3" ) @@ -25,7 +26,7 @@ var ErrTxsBeginEndNoMigration = fmt.Errorf("in this Erigon version DB format was var txsBeginEnd = Migration{ Name: "txs_begin_end", - Up: func(db kv.RwDB, tmpdir string, progress []byte, BeforeCommit Callback) (err error) { + Up: func(db kv.RwDB, dirs datadir.Dirs, progress []byte, BeforeCommit Callback) (err error) { logEvery := time.NewTicker(10 * time.Second) defer logEvery.Stop() diff --git a/turbo/snapshotsync/block_snapshots.go b/turbo/snapshotsync/block_snapshots.go index 0e1ddb0d984..48b8d8217b9 100644 --- a/turbo/snapshotsync/block_snapshots.go +++ b/turbo/snapshotsync/block_snapshots.go @@ -388,7 +388,7 @@ func (s *RoSnapshots) Reopen() error { s.Txs.lock.Lock() defer s.Txs.lock.Unlock() s.closeSegmentsLocked() - files, err := segments2(s.dir) + files, err := segments(s.dir) if err != nil { return err } @@ -483,7 +483,7 @@ func (s *RoSnapshots) ReopenSegments() error { s.Txs.lock.Lock() defer s.Txs.lock.Unlock() s.closeSegmentsLocked() - files, err := segments2(s.dir) + files, err := segments(s.dir) if err != nil { return err } @@ -830,7 +830,7 @@ func noOverlaps(in []snap.FileInfo) (res []snap.FileInfo) { return res } -func segments2(dir string) (res []snap.FileInfo, err error) { +func segments(dir string) (res []snap.FileInfo, err error) { list, err := snap.Segments(dir) if err != nil { return nil, err diff --git a/turbo/snapshotsync/snap/files.go b/turbo/snapshotsync/snap/files.go index 0e1ee2e7d03..4f3328cda48 100644 --- a/turbo/snapshotsync/snap/files.go +++ b/turbo/snapshotsync/snap/files.go @@ -9,6 +9,7 @@ import ( "strconv" "strings" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapshothashes" "golang.org/x/exp/slices" ) @@ -208,3 +209,44 @@ func ParseDir(dir string) (res []FileInfo, err error) { return res, nil } + +func RemoveNonPreverifiedFiles(chainName, snapDir string) error { + preverified := snapshothashes.KnownConfig(chainName).Preverified + keep := map[string]struct{}{} + for _, p := range preverified { + ext := filepath.Ext(p.Name) + withoutExt := p.Name[0 : len(p.Name)-len(ext)] + keep[withoutExt] = struct{}{} + } + list, err := Segments(snapDir) + if err != nil { + return err + } + for _, f := range list { + _, fname := filepath.Split(f.Path) + ext := filepath.Ext(fname) + withoutExt := fname[0 : len(fname)-len(ext)] + if _, ok := keep[withoutExt]; !ok { + _ = os.Remove(f.Path) + } else { + if f.T == Transactions { + idxPath := IdxFileName(f.From, f.To, Transactions2Block.String()) + idxExt := filepath.Ext(idxPath) + keep[idxPath[0:len(idxPath)-len(idxExt)]] = struct{}{} + } + } + } + list, err = IdxFiles(snapDir) + if err != nil { + return err + } + for _, f := range list { + _, fname := filepath.Split(f.Path) + ext := filepath.Ext(fname) + withoutExt := fname[0 : len(fname)-len(ext)] + if _, ok := keep[withoutExt]; !ok { + _ = os.Remove(f.Path) + } + } + return nil +} From feabb96e69f2b744d009ad6d3c3cbeac8e2e4252 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 8 Jun 2022 09:29:59 +0700 Subject: [PATCH 018/136] Snapshots: allow delete .seg files #4403 --- cmd/downloader/downloader/util.go | 8 ++++---- cmd/downloader/readme.md | 6 ++++-- go.mod | 2 +- go.sum | 4 ++-- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/cmd/downloader/downloader/util.go b/cmd/downloader/downloader/util.go index c31278be860..4056380d83d 100644 --- a/cmd/downloader/downloader/util.go +++ b/cmd/downloader/downloader/util.go @@ -20,6 +20,7 @@ import ( "github.com/anacrolix/torrent/mmap_span" "github.com/edsrzf/mmap-go" common2 "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cmd/downloader/downloader/torrentcfg" "github.com/ledgerwatch/erigon/cmd/downloader/trackers" @@ -181,6 +182,7 @@ func BuildTorrentFilesIfNeed(ctx context.Context, snapDir string) error { } // BuildTorrentsAndAdd - create .torrent files from .seg files (big IO) - if .seg files were placed manually to snapDir +// torrent.Client does automaticaly read all .torrent files, but we also willing to add .seg files even if corresponding .torrent doesn't exist func BuildTorrentsAndAdd(ctx context.Context, snapDir string, client *torrent.Client) error { logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() @@ -190,10 +192,7 @@ func BuildTorrentsAndAdd(ctx context.Context, snapDir string, client *torrent.Cl } errs := make(chan error, len(files)*2) wg := &sync.WaitGroup{} - workers := runtime.GOMAXPROCS(-1) - 1 - if workers < 1 { - workers = 1 - } + workers := cmp.Max(1, runtime.GOMAXPROCS(-1)-1) var sem = semaphore.NewWeighted(int64(workers)) for i, f := range files { wg.Add(1) @@ -223,6 +222,7 @@ func BuildTorrentsAndAdd(ctx context.Context, snapDir string, client *torrent.Cl return err } } + return nil } diff --git a/cmd/downloader/readme.md b/cmd/downloader/readme.md index 8861dff589c..af9f1724a1d 100644 --- a/cmd/downloader/readme.md +++ b/cmd/downloader/readme.md @@ -1,10 +1,10 @@ -# Downloader +# Downloader Service to seed/download historical data (snapshots, immutable .seg files) by Bittorrent protocol ## How to Start Erigon in snapshot sync mode -As many other Erigon components (txpool, sentry, rpc daemon) it may be built-into Erigon or run as separated process. +As many other Erigon components (txpool, sentry, rpc daemon) it may be built-into Erigon or run as separated process. ```shell # 1. Downloader by default run inside Erigon, by `--syncmode=snap` flag: @@ -54,6 +54,7 @@ downloader --downloader.api.addr=127.0.0.1:9093 --datadir= ``` Additional info: + ```shell # Snapshots creation does not require fully-synced Erigon - few first stages enough. For example: STOP_BEFORE_STAGE=Execution ./build/bin/erigon --syncmode=full --datadir= @@ -90,6 +91,7 @@ Downloader does: Technical details: - To prevent attack - .idx creation using random Seed - all nodes will have different .idx file (and same .seg files) +- If you add/remove any .seg file manually, also need remove `/snapshots/db` folder ## How to verify that .seg files have same checksum withch current .torrent files diff --git a/go.mod b/go.mod index b186dd0b4cc..b572c01a741 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/VictoriaMetrics/metrics v1.18.1 github.com/anacrolix/go-libutp v1.2.0 github.com/anacrolix/log v0.13.1 - github.com/anacrolix/torrent v1.43.1 + github.com/anacrolix/torrent v1.44.0 github.com/btcsuite/btcd v0.22.0-beta github.com/c2h5oh/datasize v0.0.0-20200825124411-48ed595a09d2 github.com/consensys/gnark-crypto v0.4.0 diff --git a/go.sum b/go.sum index b21016665e4..8bf76a97afd 100644 --- a/go.sum +++ b/go.sum @@ -79,8 +79,8 @@ github.com/anacrolix/sync v0.4.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DC github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= github.com/anacrolix/tagflag v1.0.0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8= -github.com/anacrolix/torrent v1.43.1 h1:oM9bOaEdOHJuXM8aaEbAHG5C1T0npyB4v5OflrmRNgY= -github.com/anacrolix/torrent v1.43.1/go.mod h1:SsvA8hlN3q1gC1Pf+fJ7QrfWI+5DumO6tEl4bqf+D2U= +github.com/anacrolix/torrent v1.44.0 h1:Yl58hCsX+4O7me5oUWQphg0G46bs22hJWLdEYAq250w= +github.com/anacrolix/torrent v1.44.0/go.mod h1:SsvA8hlN3q1gC1Pf+fJ7QrfWI+5DumO6tEl4bqf+D2U= github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96 h1:QAVZ3pN/J4/UziniAhJR2OZ9Ox5kOY2053tBbbqUPYA= github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96/go.mod h1:Wa6n8cYIdaG35x15aH3Zy6d03f7P728QfdcDeD/IEOs= github.com/anacrolix/utp v0.1.0 h1:FOpQOmIwYsnENnz7tAGohA+r6iXpRjrq8ssKSre2Cp4= From cbe25665db89fc35175e1d6afae1f18da7f96ff0 Mon Sep 17 00:00:00 2001 From: Willian Mitsuda Date: Tue, 7 Jun 2022 23:30:24 -0300 Subject: [PATCH 019/136] Fix --help for --http; embedded rpcdaemon is off by default (#4402) --- cmd/utils/flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 46b54df4c9b..0a4bcdb67da 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -308,7 +308,7 @@ var ( } HTTPEnabledFlag = cli.BoolFlag{ Name: "http", - Usage: "Enabled by default. Use --http=false to disable the HTTP-RPC server", + Usage: "Disabled by default. Use --http to enable the HTTP-RPC server", } HTTPListenAddrFlag = cli.StringFlag{ Name: "http.addr", From c97064173e5e85d07b6b2993d320bb3db487c3b2 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 8 Jun 2022 09:51:07 +0700 Subject: [PATCH 020/136] Open reset blocks migration: to delete .torrent files also #4404 (#4404) --- turbo/snapshotsync/snap/files.go | 1 + 1 file changed, 1 insertion(+) diff --git a/turbo/snapshotsync/snap/files.go b/turbo/snapshotsync/snap/files.go index 4f3328cda48..e5ff027a00e 100644 --- a/turbo/snapshotsync/snap/files.go +++ b/turbo/snapshotsync/snap/files.go @@ -228,6 +228,7 @@ func RemoveNonPreverifiedFiles(chainName, snapDir string) error { withoutExt := fname[0 : len(fname)-len(ext)] if _, ok := keep[withoutExt]; !ok { _ = os.Remove(f.Path) + _ = os.Remove(f.Path + ".torrent") } else { if f.T == Transactions { idxPath := IdxFileName(f.From, f.To, Transactions2Block.String()) From 46d026d8e8e92781e51fd8c19b11eb0c9cae24a3 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Wed, 8 Jun 2022 11:13:09 +0200 Subject: [PATCH 021/136] Start txpool in AddLocalTxs (#4406) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b572c01a741..a2a1cb6b846 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220605213234-a77e6425eb24 + github.com/ledgerwatch/erigon-lib v0.0.0-20220608083922-5278815cd0b5 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 8bf76a97afd..e74c291ce8a 100644 --- a/go.sum +++ b/go.sum @@ -383,8 +383,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220605213234-a77e6425eb24 h1:Ny8a+KpK6oOVLQTbGFnIppYmXgx6wDpHUz/8/3sKfK0= -github.com/ledgerwatch/erigon-lib v0.0.0-20220605213234-a77e6425eb24/go.mod h1:jNDE6PRPIA8wUdikJs8BvKtrFv101qOijIXA3HnDW8E= +github.com/ledgerwatch/erigon-lib v0.0.0-20220608083922-5278815cd0b5 h1:twatDydXUXs8PoFFF6x0AmeyUQAfkIRdfAUO2tbQ2jc= +github.com/ledgerwatch/erigon-lib v0.0.0-20220608083922-5278815cd0b5/go.mod h1:jNDE6PRPIA8wUdikJs8BvKtrFv101qOijIXA3HnDW8E= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From 2305d09d0da24d7129cf0d6ffdea59e57840aa1a Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Wed, 8 Jun 2022 10:26:42 +0100 Subject: [PATCH 022/136] Print blocks to TTD (#4405) Co-authored-by: Alexey Sharp --- turbo/stages/headerdownload/header_algos.go | 44 +++++++++++++++------ 1 file changed, 32 insertions(+), 12 deletions(-) diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 16b6e100e7f..63a22ecdc62 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -499,9 +499,11 @@ func (hd *HeaderDownload) VerifyHeader(header *types.Header) error { type FeedHeaderFunc = func(header *types.Header, headerRaw []byte, hash common.Hash, blockHeight uint64) (td *big.Int, err error) -func (hd *HeaderDownload) InsertHeader(hf FeedHeaderFunc, terminalTotalDifficulty *big.Int, logPrefix string, logChannel <-chan time.Time) (bool, bool, error) { +func (hd *HeaderDownload) InsertHeader(hf FeedHeaderFunc, terminalTotalDifficulty *big.Int, logPrefix string, logChannel <-chan time.Time) (bool, bool, uint64, error) { hd.lock.Lock() defer hd.lock.Unlock() + var returnTd *big.Int + var lastD *big.Int if hd.insertQueue.Len() > 0 && hd.insertQueue[0].blockHeight <= hd.highestInDb+1 { link := hd.insertQueue[0] _, bad := hd.badHeaders[link.hash] @@ -513,20 +515,20 @@ func (hd *HeaderDownload) InsertHeader(hf FeedHeaderFunc, terminalTotalDifficult hd.moveLinkToQueue(link, NoQueue) delete(hd.links, link.hash) hd.removeUpwards(link) - return true, false, nil + return true, false, 0, nil } if !link.verified { if err := hd.VerifyHeader(link.header); err != nil { if errors.Is(err, consensus.ErrFutureBlock) { // This may become valid later log.Warn("Added future link", "hash", link.hash, "height", link.blockHeight, "timestamp", link.header.Time) - return false, false, nil // prevent removal of the link from the hd.linkQueue + return false, false, 0, nil // prevent removal of the link from the hd.linkQueue } else { log.Debug("Verification failed for header", "hash", link.hash, "height", link.blockHeight, "err", err) hd.moveLinkToQueue(link, NoQueue) delete(hd.links, link.hash) hd.removeUpwards(link) - return true, false, nil + return true, false, 0, nil } } } @@ -539,21 +541,25 @@ func (hd *HeaderDownload) InsertHeader(hf FeedHeaderFunc, terminalTotalDifficult } td, err := hf(link.header, link.headerRaw, link.hash, link.blockHeight) if err != nil { - return false, false, err + return false, false, 0, err } if td != nil { if hd.seenAnnounces.Pop(link.hash) { hd.toAnnounce = append(hd.toAnnounce, Announce{Hash: link.hash, Number: link.blockHeight}) } // Check if transition to proof-of-stake happened and stop forward syncing - if terminalTotalDifficulty != nil && td.Cmp(terminalTotalDifficulty) >= 0 { - hd.highestInDb = link.blockHeight - log.Info(POSPandaBanner) - return true, true, nil + if terminalTotalDifficulty != nil { + if td.Cmp(terminalTotalDifficulty) >= 0 { + hd.highestInDb = link.blockHeight + log.Info(POSPandaBanner) + return true, true, 0, nil + } + returnTd = td + lastD = link.header.Difficulty } } if link.blockHeight == hd.latestMinedBlockNumber { - return false, true, nil + return false, true, 0, nil } if link.blockHeight > hd.highestInDb { @@ -578,7 +584,17 @@ func (hd *HeaderDownload) InsertHeader(hf FeedHeaderFunc, terminalTotalDifficult for child := link.fChild; child != nil; child, child.next = child.next, nil { } } - return hd.insertQueue.Len() > 0 && hd.insertQueue[0].blockHeight <= hd.highestInDb+1, false, nil + var blocksToTTD uint64 + if terminalTotalDifficulty != nil && returnTd != nil && lastD != nil { + // Calculate the estimation of when TTD will be hit + var x big.Int + x.Sub(terminalTotalDifficulty, returnTd) + x.Div(&x, lastD) + if x.IsUint64() { + blocksToTTD = x.Uint64() + } + } + return hd.insertQueue.Len() > 0 && hd.insertQueue[0].blockHeight <= hd.highestInDb+1, false, blocksToTTD, nil } // InsertHeaders attempts to insert headers into the database, verifying them first @@ -587,14 +603,18 @@ func (hd *HeaderDownload) InsertHeaders(hf FeedHeaderFunc, terminalTotalDifficul var more bool = true var err error var force bool + var blocksToTTD uint64 for more { - if more, force, err = hd.InsertHeader(hf, terminalTotalDifficulty, logPrefix, logChannel); err != nil { + if more, force, blocksToTTD, err = hd.InsertHeader(hf, terminalTotalDifficulty, logPrefix, logChannel); err != nil { return false, err } if force { return true, nil } } + if blocksToTTD > 0 { + log.Info("Estimated to reaching TTD", "blocks", blocksToTTD) + } hd.lock.RLock() defer hd.lock.RUnlock() return hd.highestInDb >= hd.preverifiedHeight && hd.topSeenHeightPoW > 0 && hd.highestInDb >= hd.topSeenHeightPoW, nil From 644d25d25df7b1ae08b13c6baa32b2779bc8ae33 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Wed, 8 Jun 2022 12:41:50 +0200 Subject: [PATCH 023/136] MergeForkBlock -> MergeNetsplitBlock (#4407) --- cmd/utils/flags.go | 8 ++++---- core/genesis.go | 16 ++++++++-------- eth/backend.go | 2 +- eth/ethconfig/config.go | 2 +- params/chainspecs/kiln-devnet.json | 2 +- params/config.go | 2 +- turbo/cli/default_flags.go | 2 +- 7 files changed, 17 insertions(+), 17 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 0a4bcdb67da..33146b27ef8 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -134,8 +134,8 @@ var ( Name: "override.terminaltotaldifficulty", Usage: "Manually specify TerminalTotalDifficulty, overriding the bundled setting", } - OverrideMergeForkBlock = BigFlag{ - Name: "override.mergeForkBlock", + OverrideMergeNetsplitBlock = BigFlag{ + Name: "override.mergeNetsplitBlock", Usage: "Manually specify FORK_NEXT_VALUE (see EIP-3675), overriding the bundled setting", } // Ethash settings @@ -1495,8 +1495,8 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C if ctx.GlobalIsSet(OverrideTerminalTotalDifficulty.Name) { cfg.OverrideTerminalTotalDifficulty = GlobalBig(ctx, OverrideTerminalTotalDifficulty.Name) } - if ctx.GlobalIsSet(OverrideMergeForkBlock.Name) { - cfg.OverrideMergeForkBlock = GlobalBig(ctx, OverrideMergeForkBlock.Name) + if ctx.GlobalIsSet(OverrideMergeNetsplitBlock.Name) { + cfg.OverrideMergeNetsplitBlock = GlobalBig(ctx, OverrideMergeNetsplitBlock.Name) } } diff --git a/core/genesis.go b/core/genesis.go index e63dda658e4..241972f3364 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -176,13 +176,13 @@ func CommitGenesisBlock(db kv.RwDB, genesis *Genesis) (*params.ChainConfig, *typ return CommitGenesisBlockWithOverride(db, genesis, nil, nil) } -func CommitGenesisBlockWithOverride(db kv.RwDB, genesis *Genesis, overrideMergeForkBlock, overrideTerminalTotalDifficulty *big.Int) (*params.ChainConfig, *types.Block, error) { +func CommitGenesisBlockWithOverride(db kv.RwDB, genesis *Genesis, overrideMergeNetsplitBlock, overrideTerminalTotalDifficulty *big.Int) (*params.ChainConfig, *types.Block, error) { tx, err := db.BeginRw(context.Background()) if err != nil { return nil, nil, err } defer tx.Rollback() - c, b, err := WriteGenesisBlock(tx, genesis, overrideMergeForkBlock, overrideTerminalTotalDifficulty) + c, b, err := WriteGenesisBlock(tx, genesis, overrideMergeNetsplitBlock, overrideTerminalTotalDifficulty) if err != nil { return c, b, err } @@ -201,7 +201,7 @@ func MustCommitGenesisBlock(db kv.RwDB, genesis *Genesis) (*params.ChainConfig, return c, b } -func WriteGenesisBlock(db kv.RwTx, genesis *Genesis, overrideMergeForkBlock, overrideTerminalTotalDifficulty *big.Int) (*params.ChainConfig, *types.Block, error) { +func WriteGenesisBlock(db kv.RwTx, genesis *Genesis, overrideMergeNetsplitBlock, overrideTerminalTotalDifficulty *big.Int) (*params.ChainConfig, *types.Block, error) { if genesis != nil && genesis.Config == nil { return params.AllEthashProtocolChanges, nil, ErrGenesisNoConfig } @@ -217,8 +217,8 @@ func WriteGenesisBlock(db kv.RwTx, genesis *Genesis, overrideMergeForkBlock, ove genesis = DefaultGenesisBlock() custom = false } - if overrideMergeForkBlock != nil { - genesis.Config.MergeForkBlock = overrideMergeForkBlock + if overrideMergeNetsplitBlock != nil { + genesis.Config.MergeNetsplitBlock = overrideMergeNetsplitBlock } if overrideTerminalTotalDifficulty != nil { genesis.Config.TerminalTotalDifficulty = overrideTerminalTotalDifficulty @@ -250,8 +250,8 @@ func WriteGenesisBlock(db kv.RwTx, genesis *Genesis, overrideMergeForkBlock, ove } // Get the existing chain configuration. newCfg := genesis.configOrDefault(storedHash) - if overrideMergeForkBlock != nil { - newCfg.MergeForkBlock = overrideMergeForkBlock + if overrideMergeNetsplitBlock != nil { + newCfg.MergeNetsplitBlock = overrideMergeNetsplitBlock } if overrideTerminalTotalDifficulty != nil { newCfg.TerminalTotalDifficulty = overrideTerminalTotalDifficulty @@ -274,7 +274,7 @@ func WriteGenesisBlock(db kv.RwTx, genesis *Genesis, overrideMergeForkBlock, ove // Special case: don't change the existing config of a non-mainnet chain if no new // config is supplied. These chains would get AllProtocolChanges (and a compatibility error) // if we just continued here. - if genesis == nil && storedHash != params.MainnetGenesisHash && overrideMergeForkBlock == nil && overrideTerminalTotalDifficulty == nil { + if genesis == nil && storedHash != params.MainnetGenesisHash && overrideMergeNetsplitBlock == nil && overrideTerminalTotalDifficulty == nil { return storedCfg, storedBlock, nil } // Check config compatibility and write the config. Compatibility errors diff --git a/eth/backend.go b/eth/backend.go index 0d5782795fb..8dea5a2d42e 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -175,7 +175,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere panic(err) } - chainConfig, genesis, genesisErr := core.CommitGenesisBlockWithOverride(chainKv, config.Genesis, config.OverrideMergeForkBlock, config.OverrideTerminalTotalDifficulty) + chainConfig, genesis, genesisErr := core.CommitGenesisBlockWithOverride(chainKv, config.Genesis, config.OverrideMergeNetsplitBlock, config.OverrideTerminalTotalDifficulty) if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok { return nil, genesisErr } diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index b9165de123c..65806d3864a 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -224,7 +224,7 @@ type Config struct { Ethstats string // FORK_NEXT_VALUE (see EIP-3675) block override - OverrideMergeForkBlock *big.Int `toml:",omitempty"` + OverrideMergeNetsplitBlock *big.Int `toml:",omitempty"` OverrideTerminalTotalDifficulty *big.Int `toml:",omitempty"` } diff --git a/params/chainspecs/kiln-devnet.json b/params/chainspecs/kiln-devnet.json index 5141f75aab2..3553e08f056 100644 --- a/params/chainspecs/kiln-devnet.json +++ b/params/chainspecs/kiln-devnet.json @@ -14,6 +14,6 @@ "londonBlock": 0, "terminalTotalDifficulty": 20000000000000, "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "mergeForkBlock": 1000, + "mergeNetsplitBlock": 1000, "ethash": {} } diff --git a/params/config.go b/params/config.go index 595b5068001..88cc6fc78f6 100644 --- a/params/config.go +++ b/params/config.go @@ -248,7 +248,7 @@ type ChainConfig struct { TerminalTotalDifficulty *big.Int `json:"terminalTotalDifficulty,omitempty"` // The merge happens when terminal total difficulty is reached TerminalBlockNumber *big.Int `json:"terminalBlockNumber,omitempty"` // Enforce particular terminal block; see TerminalBlockNumber in EIP-3675 TerminalBlockHash common.Hash `json:"terminalBlockHash,omitempty"` // Enforce particular terminal block; see TERMINAL_BLOCK_HASH in EIP-3675 - MergeForkBlock *big.Int `json:"mergeForkBlock,omitempty"` + MergeNetsplitBlock *big.Int `json:"mergeNetsplitBlock,omitempty"` // Virtual fork after The Merge to use as a network splitter; see FORK_NEXT_VALUE in EIP-3675 // Various consensus engines Ethash *EthashConfig `json:"ethash,omitempty"` diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index 8d73a12b39b..b127691e1c2 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -125,5 +125,5 @@ var DefaultFlags = []cli.Flag{ utils.WithoutHeimdallFlag, utils.EthStatsURLFlag, utils.OverrideTerminalTotalDifficulty, - utils.OverrideMergeForkBlock, + utils.OverrideMergeNetsplitBlock, } From bd96c698a49c33a8d0253073e741c6b93a491db1 Mon Sep 17 00:00:00 2001 From: iFA Date: Wed, 8 Jun 2022 22:56:20 +0200 Subject: [PATCH 024/136] Fix operation order during tracing suicide op code (#4409) --- core/vm/instructions.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 36b52d77b8d..ccea368c2bc 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -837,10 +837,10 @@ func opSuicide(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] callerAddr := scope.Contract.Address() beneficiaryAddr := common.Address(beneficiary.Bytes20()) balance := interpreter.evm.IntraBlockState().GetBalance(callerAddr) - interpreter.evm.IntraBlockState().AddBalance(beneficiaryAddr, balance) if interpreter.evm.Config().Debug { interpreter.evm.Config().Tracer.CaptureSelfDestruct(callerAddr, beneficiaryAddr, balance.ToBig()) } + interpreter.evm.IntraBlockState().AddBalance(beneficiaryAddr, balance) interpreter.evm.IntraBlockState().Suicide(callerAddr) return nil, nil } From f31abfe83b88a9688bfb7415da14f514e45159d3 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Thu, 9 Jun 2022 02:48:19 +0100 Subject: [PATCH 025/136] Fixes for RPC notification log (#4410) * Underflow fix * Fix rpc notification to 0 Co-authored-by: Alex Sharp Co-authored-by: Alexey Sharp --- eth/stagedsync/stage_finish.go | 2 +- turbo/stages/stageloop.go | 16 ++++++---------- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/eth/stagedsync/stage_finish.go b/eth/stagedsync/stage_finish.go index b9e3bc174b9..2c6ad964399 100644 --- a/eth/stagedsync/stage_finish.go +++ b/eth/stagedsync/stage_finish.go @@ -146,7 +146,7 @@ func NotifyNewHeaders(ctx context.Context, finishStageBeforeSync uint64, finishS } notifyFrom++ - var notifyTo uint64 + var notifyTo uint64 = notifyFrom var headersRlp [][]byte if err := tx.ForEach(kv.Headers, dbutils.EncodeBlockNumber(notifyFrom), func(k, headerRLP []byte) error { if len(headerRLP) == 0 { diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index a0a72ef6204..dacd8a2a2ca 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -132,7 +132,7 @@ func StageLoopStep( return headBlockHash, err } - canRunCycleInOneTransaction := !initialCycle && highestSeenHeader-origin < 8096 && highestSeenHeader-finishProgressBefore < 8096 + canRunCycleInOneTransaction := !initialCycle && highestSeenHeader < origin+8096 && highestSeenHeader < finishProgressBefore+8096 var tx kv.RwTx // on this variable will run sync cycle. if canRunCycleInOneTransaction { @@ -186,7 +186,6 @@ func StageLoopStep( log.Error("snapshot migration failed", "err", err) } } - rotx.Rollback() headTd256, overflow := uint256.FromBig(headTd) if overflow { @@ -195,11 +194,8 @@ func StageLoopStep( updateHead(ctx, head, headHash, headTd256) if notifications != nil && notifications.Accumulator != nil { - if err := db.View(ctx, func(tx kv.Tx) error { - header := rawdb.ReadCurrentHeader(tx) - if header == nil { - return nil - } + header := rawdb.ReadCurrentHeader(rotx) + if header != nil { pendingBaseFee := misc.CalcBaseFee(notifications.Accumulator.ChainConfig(), header) if header.Number.Uint64() == 0 { @@ -207,9 +203,9 @@ func StageLoopStep( } notifications.Accumulator.SendAndReset(ctx, notifications.StateChangesConsumer, pendingBaseFee.Uint64(), header.GasLimit) - return stagedsync.NotifyNewHeaders(ctx, finishProgressBefore, head, sync.PrevUnwindPoint(), notifications.Events, tx) - }); err != nil { - return headBlockHash, err + if err = stagedsync.NotifyNewHeaders(ctx, finishProgressBefore, head, sync.PrevUnwindPoint(), notifications.Events, rotx); err != nil { + return headBlockHash, nil + } } } From 935975bebd8cfe575227abbdf41568faf30327bb Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Thu, 9 Jun 2022 09:45:30 +0700 Subject: [PATCH 026/136] methods to configure db (#4412) * save * save * save --- cmd/integration/commands/reset_state.go | 2 +- cmd/integration/commands/root.go | 39 +++++++++++------------- cmd/integration/commands/stages.go | 32 +++++++++---------- cmd/integration/commands/state_stages.go | 6 ++-- go.mod | 2 +- go.sum | 4 +-- 6 files changed, 41 insertions(+), 44 deletions(-) diff --git a/cmd/integration/commands/reset_state.go b/cmd/integration/commands/reset_state.go index 76ffc879661..48b9365db7b 100644 --- a/cmd/integration/commands/reset_state.go +++ b/cmd/integration/commands/reset_state.go @@ -22,7 +22,7 @@ var cmdResetState = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common.RootContext() logger := log.New() - db := openDB(chaindata, logger, true) + db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) defer db.Close() if err := db.View(ctx, func(tx kv.Tx) error { return printStages(tx) }); err != nil { return err diff --git a/cmd/integration/commands/root.go b/cmd/integration/commands/root.go index c9614c291a9..5db26bfeacc 100644 --- a/cmd/integration/commands/root.go +++ b/cmd/integration/commands/root.go @@ -34,38 +34,35 @@ func RootCommand() *cobra.Command { return rootCmd } -func openDB(path string, logger log.Logger, applyMigrations bool) kv.RwDB { - label := kv.ChainDB - db := openKV(label, logger, path, false) +func dbCfg(label kv.Label, logger log.Logger, path string) kv2.MdbxOpts { + opts := kv2.NewMDBX(logger).Path(path).Label(label) + if label == kv.ChainDB { + opts = opts.MapSize(8 * datasize.TB) + } + if databaseVerbosity != -1 { + opts = opts.DBVerbosity(kv.DBVerbosityLvl(databaseVerbosity)) + } + return opts +} + +func openDB(opts kv2.MdbxOpts, applyMigrations bool) kv.RwDB { + db := opts.MustOpen() if applyMigrations { - has, err := migrations.NewMigrator(label).HasPendingMigrations(db) + migrator := migrations.NewMigrator(opts.GetLabel()) + has, err := migrator.HasPendingMigrations(db) if err != nil { panic(err) } if has { log.Info("Re-Opening DB in exclusive mode to apply DB migrations") db.Close() - db = openKV(label, logger, path, true) - if err := migrations.NewMigrator(label).Apply(db, datadirCli); err != nil { + db = opts.Exclusive().MustOpen() + if err := migrator.Apply(db, datadirCli); err != nil { panic(err) } db.Close() - db = openKV(label, logger, path, false) + db = opts.MustOpen() } } return db } - -func openKV(label kv.Label, logger log.Logger, path string, exclusive bool) kv.RwDB { - opts := kv2.NewMDBX(logger).Path(path).Label(label) - if label == kv.ChainDB { - opts = opts.MapSize(8 * datasize.TB) - } - if exclusive { - opts = opts.Exclusive() - } - if databaseVerbosity != -1 { - opts = opts.DBVerbosity(kv.DBVerbosityLvl(databaseVerbosity)) - } - return opts.MustOpen() -} diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index 52feb4185c9..43e970a147b 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -49,7 +49,7 @@ var cmdStageHeaders = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() logger := log.New() - db := openDB(chaindata, logger, true) + db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) defer db.Close() if err := stageHeaders(db, ctx); err != nil { @@ -66,7 +66,7 @@ var cmdStageBodies = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() logger := log.New() - db := openDB(chaindata, logger, true) + db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) defer db.Close() if err := stageBodies(db, ctx); err != nil { @@ -83,7 +83,7 @@ var cmdStageSenders = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { logger := log.New() ctx, _ := common2.RootContext() - db := openDB(chaindata, logger, true) + db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) defer db.Close() if err := stageSenders(db, ctx); err != nil { @@ -100,7 +100,7 @@ var cmdStageExec = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() logger := log.New() - db := openDB(chaindata, logger, true) + db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) defer db.Close() if err := stageExec(db, ctx); err != nil { @@ -117,7 +117,7 @@ var cmdStageTrie = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() logger := log.New() - db := openDB(chaindata, logger, true) + db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) defer db.Close() if err := stageTrie(db, ctx); err != nil { @@ -134,7 +134,7 @@ var cmdStageHashState = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { logger := log.New() ctx, _ := common2.RootContext() - db := openDB(chaindata, logger, true) + db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) defer db.Close() if err := stageHashState(db, ctx); err != nil { @@ -151,7 +151,7 @@ var cmdStageHistory = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() logger := log.New() - db := openDB(chaindata, logger, true) + db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) defer db.Close() if err := stageHistory(db, ctx); err != nil { @@ -168,7 +168,7 @@ var cmdLogIndex = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() logger := log.New() - db := openDB(chaindata, logger, true) + db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) defer db.Close() if err := stageLogIndex(db, ctx); err != nil { @@ -185,7 +185,7 @@ var cmdCallTraces = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() logger := log.New() - db := openDB(chaindata, logger, true) + db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) defer db.Close() if err := stageCallTraces(db, ctx); err != nil { @@ -202,7 +202,7 @@ var cmdStageTxLookup = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() logger := log.New() - db := openDB(chaindata, logger, true) + db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) defer db.Close() if err := stageTxLookup(db, ctx); err != nil { @@ -218,7 +218,7 @@ var cmdPrintStages = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() logger := log.New() - db := openDB(chaindata, logger, false) + db := openDB(dbCfg(kv.ChainDB, logger, chaindata).Readonly(), false) defer db.Close() if err := printAllStages(db, ctx); err != nil { @@ -235,7 +235,7 @@ var cmdPrintMigrations = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() logger := log.New() - db := openDB(chaindata, logger, false) + db := openDB(dbCfg(kv.ChainDB, logger, chaindata), false) defer db.Close() if err := printAppliedMigrations(db, ctx); err != nil { log.Error("Error", "err", err) @@ -251,7 +251,7 @@ var cmdRemoveMigration = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() logger := log.New() - db := openDB(chaindata, logger, false) + db := openDB(dbCfg(kv.ChainDB, logger, chaindata), false) defer db.Close() if err := removeMigration(db, ctx); err != nil { log.Error("Error", "err", err) @@ -266,7 +266,7 @@ var cmdRunMigrations = &cobra.Command{ Short: "", RunE: func(cmd *cobra.Command, args []string) error { logger := log.New() - db := openDB(chaindata, logger, true) + db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) defer db.Close() // Nothing to do, migrations will be applied automatically return nil @@ -278,7 +278,7 @@ var cmdSetPrune = &cobra.Command{ Short: "Override existing --prune flag value (if you know what you are doing)", RunE: func(cmd *cobra.Command, args []string) error { logger := log.New() - db := openDB(chaindata, logger, true) + db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) defer db.Close() return overrideStorageMode(db) }, @@ -289,7 +289,7 @@ var cmdSetSnapshto = &cobra.Command{ Short: "Override existing --snapshots flag value (if you know what you are doing)", RunE: func(cmd *cobra.Command, args []string) error { logger := log.New() - db := openDB(chaindata, logger, true) + db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) defer db.Close() _, chainConfig := genesisByChain(chain) snapshots := allSnapshots(chainConfig, db) diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go index 556777812c2..a8c3f184b1b 100644 --- a/cmd/integration/commands/state_stages.go +++ b/cmd/integration/commands/state_stages.go @@ -56,7 +56,7 @@ Examples: miningConfig := params.MiningConfig{} utils.SetupMinerCobra(cmd, &miningConfig) logger := log.New() - db := openDB(cfg.Dirs.Chaindata, logger, true) + db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) defer db.Close() if err := syncBySmallSteps(db, miningConfig, ctx); err != nil { @@ -79,7 +79,7 @@ var loopIhCmd = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() logger := log.New() - db := openDB(chaindata, logger, true) + db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) defer db.Close() if unwind == 0 { @@ -99,7 +99,7 @@ var loopExecCmd = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { ctx, _ := common2.RootContext() logger := log.New() - db := openDB(chaindata, logger, true) + db := openDB(dbCfg(kv.ChainDB, logger, chaindata), true) defer db.Close() if unwind == 0 { unwind = 1 diff --git a/go.mod b/go.mod index a2a1cb6b846..0c2be44a416 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220608083922-5278815cd0b5 + github.com/ledgerwatch/erigon-lib v0.0.0-20220609024229-b33ca06182dc github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index e74c291ce8a..82996611e62 100644 --- a/go.sum +++ b/go.sum @@ -383,8 +383,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220608083922-5278815cd0b5 h1:twatDydXUXs8PoFFF6x0AmeyUQAfkIRdfAUO2tbQ2jc= -github.com/ledgerwatch/erigon-lib v0.0.0-20220608083922-5278815cd0b5/go.mod h1:jNDE6PRPIA8wUdikJs8BvKtrFv101qOijIXA3HnDW8E= +github.com/ledgerwatch/erigon-lib v0.0.0-20220609024229-b33ca06182dc h1:VaaV9YioVaP/3sCXahhB5vR/uUrIkAiYT27x1ze99vI= +github.com/ledgerwatch/erigon-lib v0.0.0-20220609024229-b33ca06182dc/go.mod h1:jNDE6PRPIA8wUdikJs8BvKtrFv101qOijIXA3HnDW8E= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From 77e29346474a68197fe9dbfa4bdea1d2e362cf80 Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Thu, 9 Jun 2022 03:47:33 +0100 Subject: [PATCH 027/136] if no latest valid hash default to empty hash (#4411) --- cmd/rpcdaemon/commands/engine_api.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/rpcdaemon/commands/engine_api.go b/cmd/rpcdaemon/commands/engine_api.go index d5ee5076953..f9f76613c32 100644 --- a/cmd/rpcdaemon/commands/engine_api.go +++ b/cmd/rpcdaemon/commands/engine_api.go @@ -78,6 +78,8 @@ func convertPayloadStatus(x *remote.EnginePayloadStatus) map[string]interface{} } if x.LatestValidHash != nil { json["latestValidHash"] = common.Hash(gointerfaces.ConvertH256ToHash(x.LatestValidHash)) + } else { + json["latestValidHash"] = common.Hash{} } if x.ValidationError != "" { json["validationError"] = x.ValidationError From 27d652b2a46e8eb168f7af82ee239d12307fec6d Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Thu, 9 Jun 2022 12:26:09 +0200 Subject: [PATCH 028/136] WriteForkchoiceHead for no-op FCUs (#4415) * WriteForkchoiceHead for no-op FCUs * Flip WriteForkchoiceHead & safeAndFinalizedBlocksAreCanonical --- eth/stagedsync/stage_headers.go | 1 + 1 file changed, 1 insertion(+) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 644fa0a9525..337934a25da 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -260,6 +260,7 @@ func startHandlingForkChoice( if currentHeadHash == headerHash { // no-op log.Info(fmt.Sprintf("[%s] Fork choice no-op", s.LogPrefix())) cfg.hd.BeaconRequestList.Remove(requestId) + rawdb.WriteForkchoiceHead(tx, forkChoice.HeadBlockHash) canonical, err := safeAndFinalizedBlocksAreCanonical(forkChoice, s, tx, cfg, requestStatus == engineapi.New) if err != nil { log.Warn(fmt.Sprintf("[%s] Fork choice err", s.LogPrefix()), "err", err) From ace482bd0015e2f91cd0a5412f31d7b999008361 Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Thu, 9 Jun 2022 11:26:30 +0100 Subject: [PATCH 029/136] revert #4411 (#4418) --- cmd/rpcdaemon/commands/engine_api.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/cmd/rpcdaemon/commands/engine_api.go b/cmd/rpcdaemon/commands/engine_api.go index f9f76613c32..d5ee5076953 100644 --- a/cmd/rpcdaemon/commands/engine_api.go +++ b/cmd/rpcdaemon/commands/engine_api.go @@ -78,8 +78,6 @@ func convertPayloadStatus(x *remote.EnginePayloadStatus) map[string]interface{} } if x.LatestValidHash != nil { json["latestValidHash"] = common.Hash(gointerfaces.ConvertH256ToHash(x.LatestValidHash)) - } else { - json["latestValidHash"] = common.Hash{} } if x.ValidationError != "" { json["validationError"] = x.ValidationError From 0e142e33a039f229eb196acfe8288eba18c4298b Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Thu, 9 Jun 2022 17:47:38 +0700 Subject: [PATCH 030/136] Reclaimable space in logs (#4417) * save * save * save * save * save * save * save --- eth/stagedsync/sync.go | 15 +++++++++++---- ethdb/olddb/miningmutation.go | 3 +++ go.mod | 2 +- go.sum | 4 ++-- 4 files changed, 17 insertions(+), 7 deletions(-) diff --git a/eth/stagedsync/sync.go b/eth/stagedsync/sync.go index 552754b1503..3b64f1023de 100644 --- a/eth/stagedsync/sync.go +++ b/eth/stagedsync/sync.go @@ -251,14 +251,14 @@ func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { return err } - if err := printLogs(tx, s.timings); err != nil { + if err := printLogs(db, tx, s.timings); err != nil { return err } s.currentStage = 0 return nil } -func printLogs(tx kv.RwTx, timings []Timing) error { +func printLogs(db kv.RoDB, tx kv.RwTx, timings []Timing) error { var logCtx []interface{} count := 0 for i := range timings { @@ -287,14 +287,13 @@ func printLogs(tx kv.RwTx, timings []Timing) error { if len(logCtx) > 0 { // also don't print this logs if everything is fast buckets := []string{ - "freelist", kv.PlainState, kv.AccountChangeSet, kv.StorageChangeSet, kv.EthTx, kv.Log, } - bucketSizes := make([]interface{}, 0, 2*len(buckets)) + bucketSizes := make([]interface{}, 0, 2*(len(buckets)+2)) for _, bucket := range buckets { sz, err1 := tx.BucketSize(bucket) if err1 != nil { @@ -302,6 +301,14 @@ func printLogs(tx kv.RwTx, timings []Timing) error { } bucketSizes = append(bucketSizes, bucket, libcommon.ByteCount(sz)) } + + sz, err1 := tx.BucketSize("freelist") + if err1 != nil { + return err1 + } + bucketSizes = append(bucketSizes, "FreeList", libcommon.ByteCount(sz)) + amountOfFreePagesInDb := sz / 4 // page_id encoded as bigEndian_u32 + bucketSizes = append(bucketSizes, "ReclaimableSpace", libcommon.ByteCount(amountOfFreePagesInDb*db.PageSize())) log.Info("Tables", bucketSizes...) } tx.CollectMetrics() diff --git a/ethdb/olddb/miningmutation.go b/ethdb/olddb/miningmutation.go index 7581ac1f797..7dd94eb9d6d 100644 --- a/ethdb/olddb/miningmutation.go +++ b/ethdb/olddb/miningmutation.go @@ -79,6 +79,9 @@ func (m *miningmutation) getMem(table string, key []byte) ([]byte, bool) { return val, val != nil } +func (m *miningmutation) DBSize() (uint64, error) { panic("not implemented") } +func (m *miningmutation) PageSize() uint64 { panic("not implemented") } + func (m *miningmutation) IncrementSequence(bucket string, amount uint64) (res uint64, err error) { v, ok := m.getMem(kv.Sequence, []byte(bucket)) if !ok && m.db != nil { diff --git a/go.mod b/go.mod index 0c2be44a416..bd027733149 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220609024229-b33ca06182dc + github.com/ledgerwatch/erigon-lib v0.0.0-20220609100618-fec29e42265c github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 82996611e62..6388784b663 100644 --- a/go.sum +++ b/go.sum @@ -383,8 +383,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220609024229-b33ca06182dc h1:VaaV9YioVaP/3sCXahhB5vR/uUrIkAiYT27x1ze99vI= -github.com/ledgerwatch/erigon-lib v0.0.0-20220609024229-b33ca06182dc/go.mod h1:jNDE6PRPIA8wUdikJs8BvKtrFv101qOijIXA3HnDW8E= +github.com/ledgerwatch/erigon-lib v0.0.0-20220609100618-fec29e42265c h1:qlt59N/PVVmdUXzXi6IhAlUvoFM1hDZoiqIWWNPJOIk= +github.com/ledgerwatch/erigon-lib v0.0.0-20220609100618-fec29e42265c/go.mod h1:jNDE6PRPIA8wUdikJs8BvKtrFv101qOijIXA3HnDW8E= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From b8e5cbf82007010e76ca1ac7903391b50fdad2ea Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Thu, 9 Jun 2022 13:16:11 +0200 Subject: [PATCH 031/136] fixed timeout (#4419) --- eth/backend.go | 2 +- eth/stagedsync/stage_mining_create_block.go | 9 +++++++++ eth/stagedsync/stage_mining_finish.go | 9 +-------- ethdb/privateapi/ethbackend.go | 17 ++++++++--------- 4 files changed, 19 insertions(+), 18 deletions(-) diff --git a/eth/backend.go b/eth/backend.go index 8dea5a2d42e..78ed912232f 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -377,7 +377,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere } // proof-of-stake mining assembleBlockPOS := func(param *core.BlockProposerParametersPOS) (*types.Block, error) { - miningStatePos := stagedsync.NewMiningState(&config.Miner) + miningStatePos := stagedsync.NewProposingState(&config.Miner) miningStatePos.MiningConfig.Etherbase = param.SuggestedFeeRecipient proposingSync := stagedsync.New( stagedsync.MiningStages(backend.sentryCtx, diff --git a/eth/stagedsync/stage_mining_create_block.go b/eth/stagedsync/stage_mining_create_block.go index db87a2137ac..aa41321d9d7 100644 --- a/eth/stagedsync/stage_mining_create_block.go +++ b/eth/stagedsync/stage_mining_create_block.go @@ -45,6 +45,15 @@ type MiningState struct { } func NewMiningState(cfg *params.MiningConfig) MiningState { + return MiningState{ + MiningConfig: cfg, + PendingResultCh: make(chan *types.Block, 1), + MiningResultCh: make(chan *types.Block, 1), + MiningBlock: &MiningBlock{}, + } +} + +func NewProposingState(cfg *params.MiningConfig) MiningState { return MiningState{ MiningConfig: cfg, PendingResultCh: make(chan *types.Block, 1), diff --git a/eth/stagedsync/stage_mining_finish.go b/eth/stagedsync/stage_mining_finish.go index eab6752c864..97708a30dc3 100644 --- a/eth/stagedsync/stage_mining_finish.go +++ b/eth/stagedsync/stage_mining_finish.go @@ -5,7 +5,6 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/consensus" - "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/log/v3" @@ -55,13 +54,7 @@ func SpawnMiningFinishStage(s *StageState, tx kv.RwTx, cfg MiningFinishCfg, quit //} //prev = sealHash - // If we are on POS, we will send the result on the POS channel - isTrans, err := rawdb.Transitioned(tx, block.Header().Number.Uint64(), cfg.chainConfig.TerminalTotalDifficulty) - if err != nil { - return err - } - - if isTrans { + if cfg.miningState.MiningResultPOSCh != nil { cfg.miningState.MiningResultPOSCh <- block return nil } diff --git a/ethdb/privateapi/ethbackend.go b/ethdb/privateapi/ethbackend.go index cf6d230dcf9..47128009e9f 100644 --- a/ethdb/privateapi/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -372,13 +372,6 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E // EngineGetPayloadV1 retrieves previously assembled payload (Validators only) func (s *EthBackendServer) EngineGetPayloadV1(ctx context.Context, req *remote.EngineGetPayloadRequest) (*types2.ExecutionPayload, error) { - // TODO(yperbasis): getPayload should stop block assembly if that's currently in fly - - log.Trace("[GetPayload] acquiring lock") - s.syncCond.L.Lock() - defer s.syncCond.L.Unlock() - log.Trace("[GetPayload] lock acquired") - if !s.proposing { return nil, fmt.Errorf("execution layer not running as a proposer. enable proposer by taking out the --proposer.disable flag on startup") } @@ -387,8 +380,15 @@ func (s *EthBackendServer) EngineGetPayloadV1(ctx context.Context, req *remote.E return nil, fmt.Errorf("not a proof-of-stake chain") } + // TODO(yperbasis): getPayload should stop block assembly if that's currently in fly + log.Trace("[GetPayload] acquiring lock") + s.syncCond.L.Lock() + defer s.syncCond.L.Unlock() + log.Trace("[GetPayload] lock acquired") + payload, ok := s.pendingPayloads[req.PayloadId] if !ok { + log.Warn("Payload not stored", "payloadId", req.PayloadId) return nil, &UnknownPayloadErr } @@ -409,6 +409,7 @@ func (s *EthBackendServer) EngineGetPayloadV1(ctx context.Context, req *remote.E if err != nil { return nil, err } + log.Info("Block request successful", "hash", block.Header().Hash(), "transactions count", len(encodedTransactions), "number", block.NumberU64()) return &types2.ExecutionPayload{ ParentHash: gointerfaces.ConvertHashToH256(block.Header().ParentHash), @@ -593,9 +594,7 @@ func (s *EthBackendServer) StartProposer() { } log.Trace("[Proposer] starting assembling...") - s.syncCond.L.Unlock() block, err := s.assemblePayloadPOS(¶m) - s.syncCond.L.Lock() log.Trace("[Proposer] payload assembled") if err != nil { From 367a417d6b80bd708c4682230b3bf8abc2a34fc5 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Thu, 9 Jun 2022 14:27:14 +0200 Subject: [PATCH 032/136] core/types: remove unused field 'td' in Block (#25010) (#4416) Co-authored-by: Rajaram Gaunker --- core/types/block.go | 20 +++----------------- 1 file changed, 3 insertions(+), 17 deletions(-) diff --git a/core/types/block.go b/core/types/block.go index 62d5c8ea054..cc131f286e9 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -653,23 +653,12 @@ type Block struct { hash atomic.Value size atomic.Value - // Td is used by package core to store the total difficulty - // of the chain up to and including the block. - td *big.Int - // These fields are used by package eth to track // inter-peer block relay. ReceivedAt time.Time ReceivedFrom interface{} } -// DeprecatedTd is an old relic for extracting the TD of a block. It is in the -// code solely to facilitate upgrading the database from the old format to the -// new, after which it should be deleted. Do not use! -func (b *Block) DeprecatedTd() *big.Int { - return b.td -} - // [deprecated by eth/63] // StorageBlock defines the RLP encoding of a Block stored in the // state database. The StorageBlock encoding contains fields that @@ -945,7 +934,7 @@ func (bb *Body) DecodeRLP(s *rlp.Stream) error { // are ignored and set to values derived from the given txs, uncles // and receipts. func NewBlock(header *Header, txs []Transaction, uncles []*Header, receipts []*Receipt) *Block { - b := &Block{header: CopyHeader(header), td: new(big.Int)} + b := &Block{header: CopyHeader(header)} // TODO: panic if len(txs) != len(receipts) if len(txs) == 0 { @@ -980,7 +969,7 @@ func NewBlock(header *Header, txs []Transaction, uncles []*Header, receipts []*R // NewBlockFromStorage like NewBlock but used to create Block object when read it from DB // in this case no reason to copy parts, or re-calculate headers fields - they are all stored in DB func NewBlockFromStorage(hash common.Hash, header *Header, txs []Transaction, uncles []*Header) *Block { - b := &Block{header: header, td: new(big.Int), transactions: txs, uncles: uncles} + b := &Block{header: header, transactions: txs, uncles: uncles} b.hash.Store(hash) return b } @@ -1174,7 +1163,7 @@ func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error { if err := s.Decode(&sb); err != nil { return err } - b.header, b.uncles, b.transactions, b.td = sb.Header, sb.Uncles, sb.Txs, sb.TD + b.header, b.uncles, b.transactions = sb.Header, sb.Uncles, sb.Txs return nil } @@ -1308,8 +1297,6 @@ func (b *Block) Copy() *Block { sizeValue.Store(size) } - td := big.NewInt(0).Set(b.td) - if b.ReceivedFrom != nil { panic("ReceivedFrom deep copy is not supported") } @@ -1320,7 +1307,6 @@ func (b *Block) Copy() *Block { transactions: transactions, hash: hashValue, size: sizeValue, - td: td, ReceivedAt: b.ReceivedAt, ReceivedFrom: nil, } From f8304012585833366a0b3ef2c6fbd3a904fa3438 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Thu, 9 Jun 2022 13:38:00 +0100 Subject: [PATCH 033/136] Update skip_analysis.go (#4420) --- core/skip_analysis.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/skip_analysis.go b/core/skip_analysis.go index 35da3ec7b80..cffeee06079 100644 --- a/core/skip_analysis.go +++ b/core/skip_analysis.go @@ -24,9 +24,9 @@ import ( const MainnetNotCheckedFrom uint64 = 14_909_200 // MainnetNotCheckedFrom is the first block number not yet checked for invalid jumps -const BSCNotCheckedFrom uint64 = 17_048_970 +const BSCNotCheckedFrom uint64 = 18_492_482 -const BorMainnetNotCheckedFrom uint64 = 14_232_422 +const BorMainnetNotCheckedFrom uint64 = 21_128_788 const RopstenNotCheckedFrom uint64 = 12_331_664 From 36eacd23c02c76beead52676f0a606e3ab8b14aa Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Thu, 9 Jun 2022 15:53:37 +0200 Subject: [PATCH 034/136] eth_blockNumber should be equal to latest in eth_getBlockByNumber (#4421) --- cmd/rpcdaemon/commands/eth_system.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/rpcdaemon/commands/eth_system.go b/cmd/rpcdaemon/commands/eth_system.go index e9969f5d086..1095cdd82a1 100644 --- a/cmd/rpcdaemon/commands/eth_system.go +++ b/cmd/rpcdaemon/commands/eth_system.go @@ -22,14 +22,14 @@ func (api *APIImpl) BlockNumber(ctx context.Context) (hexutil.Uint64, error) { return 0, err } defer tx.Rollback() - execution, err := stages.GetStageProgress(tx, stages.Finish) + blockNum, err := getLatestBlockNumber(tx) if err != nil { return 0, err } - return hexutil.Uint64(execution), nil + return hexutil.Uint64(blockNum), nil } -// Syncing implements eth_syncing. Returns a data object detaling the status of the sync process or false if not syncing. +// Syncing implements eth_syncing. Returns a data object detailing the status of the sync process or false if not syncing. func (api *APIImpl) Syncing(ctx context.Context) (interface{}, error) { tx, err := api.db.BeginRo(ctx) if err != nil { From 4b4218d2f672cbb6df016153d7b0ae1b030c5775 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Thu, 9 Jun 2022 17:57:00 +0200 Subject: [PATCH 035/136] added licensing (#4422) --- ethdb/olddb/miningmutation.go | 13 +++++++++++++ ethdb/olddb/miningmutation_test.go | 29 ++++++++++++----------------- ethdb/olddb/miningmutationcursor.go | 13 +++++++++++++ 3 files changed, 38 insertions(+), 17 deletions(-) diff --git a/ethdb/olddb/miningmutation.go b/ethdb/olddb/miningmutation.go index 7dd94eb9d6d..22f7cc77976 100644 --- a/ethdb/olddb/miningmutation.go +++ b/ethdb/olddb/miningmutation.go @@ -1,3 +1,16 @@ +/* + Copyright 2022 Erigon contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package olddb import ( diff --git a/ethdb/olddb/miningmutation_test.go b/ethdb/olddb/miningmutation_test.go index 8cc6abe91ce..2f046f03809 100644 --- a/ethdb/olddb/miningmutation_test.go +++ b/ethdb/olddb/miningmutation_test.go @@ -1,20 +1,15 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -//go:build !js +/* + Copyright 2022 Erigon contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ package olddb diff --git a/ethdb/olddb/miningmutationcursor.go b/ethdb/olddb/miningmutationcursor.go index a805a8aad7a..ed5765e1bd9 100644 --- a/ethdb/olddb/miningmutationcursor.go +++ b/ethdb/olddb/miningmutationcursor.go @@ -1,3 +1,16 @@ +/* + Copyright 2022 Erigon contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + package olddb import ( From f9024fed843e14a837c0c679cac7069da8b54a4a Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Fri, 10 Jun 2022 01:54:22 +0200 Subject: [PATCH 036/136] Removed annoying log (#4424) --- turbo/stages/headerdownload/header_algos.go | 1 - 1 file changed, 1 deletion(-) diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 63a22ecdc62..47457aafcf3 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -433,7 +433,6 @@ func (hd *HeaderDownload) requestMoreHeadersForPOS(currentTime time.Time) (timeo } // Request ancestors - log.Info("Requested", "anchor", anchor.parentHash) request = &HeaderRequest{ Anchor: anchor, Hash: anchor.parentHash, From e2a04f756773ee6ff57c7d89fd8dac4963a12e1c Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Fri, 10 Jun 2022 02:52:35 +0200 Subject: [PATCH 037/136] fix (#4425) --- eth/stagedsync/stage_mining_create_block.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/eth/stagedsync/stage_mining_create_block.go b/eth/stagedsync/stage_mining_create_block.go index aa41321d9d7..9bb1bea5170 100644 --- a/eth/stagedsync/stage_mining_create_block.go +++ b/eth/stagedsync/stage_mining_create_block.go @@ -23,6 +23,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/ethutils" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/log/v3" ) @@ -135,13 +136,17 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc return err } - txs, err = types.DecodeTransactions(txSlots.Txs) - if errors.Is(err, io.EOF) { - return nil - } + for i := range txSlots.Txs { + s := rlp.NewStream(bytes.NewReader(txSlots.Txs[i]), uint64(len(txSlots.Txs[i]))) - if err != nil { - return fmt.Errorf("decode rlp of pending txs: %w", err) + transaction, err := types.DecodeTransaction(s) + if err == io.EOF { + continue + } + if err != nil { + return err + } + txs = append(txs, transaction) } var sender common.Address for i := range txs { From b004589610a33e9ce01557932135b09118b8af5f Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Fri, 10 Jun 2022 08:47:24 +0700 Subject: [PATCH 038/136] up mmap-go version to solve Win problem #4426 --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index bd027733149..66e0870bda6 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 - github.com/edsrzf/mmap-go v1.0.0 + github.com/edsrzf/mmap-go v1.1.0 github.com/emicklei/dot v0.16.0 github.com/emirpasic/gods v1.18.1 github.com/fjl/gencodec v0.0.0-20191126094850-e283372f291f diff --git a/go.sum b/go.sum index 6388784b663..bcbe672bbad 100644 --- a/go.sum +++ b/go.sum @@ -174,8 +174,9 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= +github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= github.com/emicklei/dot v0.16.0 h1:7PseyizTgeQ/aSF1eo4LcEfWlQSlzamFZpzY/nMB9EY= github.com/emicklei/dot v0.16.0/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= From 059e5d1ea59dbf1303ab59e3319f105efb89c973 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Fri, 10 Jun 2022 09:02:00 +0700 Subject: [PATCH 039/136] roaring: up version #4427 --- go.mod | 10 +++++----- go.sum | 19 ++++++++++--------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/go.mod b/go.mod index 66e0870bda6..e00a9458795 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon go 1.18 require ( - github.com/RoaringBitmap/roaring v1.1.0 + github.com/RoaringBitmap/roaring v1.2.0 github.com/VictoriaMetrics/fastcache v1.10.0 github.com/VictoriaMetrics/metrics v1.18.1 github.com/anacrolix/go-libutp v1.2.0 @@ -21,7 +21,7 @@ require ( github.com/fjl/gencodec v0.0.0-20191126094850-e283372f291f github.com/goccy/go-json v0.9.7 github.com/gofrs/flock v0.8.1 - github.com/golang-jwt/jwt/v4 v4.3.0 + github.com/golang-jwt/jwt/v4 v4.4.1 github.com/golang/snappy v0.0.4 github.com/google/btree v1.0.1 github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa @@ -39,13 +39,13 @@ require ( github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 github.com/pelletier/go-toml v1.9.5 - github.com/pelletier/go-toml/v2 v2.0.1 + github.com/pelletier/go-toml/v2 v2.0.2 github.com/pion/stun v0.3.5 github.com/quasilyte/go-ruleguard/dsl v0.3.21 github.com/rs/cors v1.8.2 github.com/spf13/cobra v1.4.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.7.1 + github.com/stretchr/testify v1.7.2 github.com/tendermint/go-amino v0.14.1 github.com/tendermint/tendermint v0.31.11 github.com/torquem-ch/mdbx-go v0.24.2 @@ -151,7 +151,7 @@ require ( golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/uint128 v1.1.1 // indirect modernc.org/cc/v3 v3.36.0 // indirect modernc.org/ccgo/v3 v3.16.6 // indirect diff --git a/go.sum b/go.sum index bcbe672bbad..dee89d24b8e 100644 --- a/go.sum +++ b/go.sum @@ -11,8 +11,8 @@ github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w= github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= -github.com/RoaringBitmap/roaring v1.1.0 h1:b10lZrZXaY6Q6EKIRrmOF519FIyQQ5anPgGr3niw2yY= -github.com/RoaringBitmap/roaring v1.1.0/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA= +github.com/RoaringBitmap/roaring v1.2.0 h1:qayex3YgtOmzev8slia4A0jPGsn2o2bnqKDcRpyRUiI= +github.com/RoaringBitmap/roaring v1.2.0/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VictoriaMetrics/fastcache v1.10.0 h1:5hDJnLsKLpnUEToub7ETuRu8RCkb40woBZAUiKonXzY= @@ -237,8 +237,8 @@ github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog= -github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.4.1 h1:pC5DB52sCeK48Wlb9oPcdhnjkz1TKt1D/P7WKJ0kUcQ= +github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -469,8 +469,8 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU= -github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= +github.com/pelletier/go-toml/v2 v2.0.2 h1:+jQXlF3scKIcSEKkdHzXhCTDLPFi5r1wnK6yPS+49Gw= +github.com/pelletier/go-toml/v2 v2.0.2/go.mod h1:MovirKjgVRESsAvNZlAjtFwV867yGuwRkXbG66OzopI= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= @@ -598,8 +598,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/tendermint/go-amino v0.14.1 h1:o2WudxNfdLNBwMyl2dqOJxiro5rfrEaU0Ugs6offJMk= github.com/tendermint/go-amino v0.14.1/go.mod h1:i/UKE5Uocn+argJJBb12qTZsCDBcAYMbR92AaJVmKso= github.com/tendermint/tendermint v0.31.11 h1:TIs//4WfEAG4TOZc2eUfJPI3T8KrywXQCCPnGAaM1Wo= @@ -877,8 +877,9 @@ gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 1e3c90ba80f684b1d6caf18e521252cdd8d82a1f Mon Sep 17 00:00:00 2001 From: Krishna Upadhyaya Date: Fri, 10 Jun 2022 14:02:04 +0530 Subject: [PATCH 040/136] Bor devnet option (#4428) * bor mining testing * Implemented bor-devent network * Minor fixes * use signer as validator * remove unused spaces * fix typo --- cmd/utils/flags.go | 4 ++- consensus/bor/bor.go | 15 ++++++++++ core/allocs/bor_devnet.json | 48 ++++++++++++++++++++++++++++++ core/genesis.go | 15 ++++++++++ eth/backend.go | 19 ++++++++++++ params/chainspecs/bor-devnet.json | 43 ++++++++++++++++++++++++++ params/config.go | 7 +++++ params/networkname/network_name.go | 2 ++ turbo/node/node.go | 6 +++- 9 files changed, 157 insertions(+), 2 deletions(-) create mode 100644 core/allocs/bor_devnet.json create mode 100644 params/chainspecs/bor-devnet.json diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 33146b27ef8..77abd5aac0c 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -941,7 +941,7 @@ func setEtherbase(ctx *cli.Context, cfg *ethconfig.Config) { } } - if ctx.GlobalString(ChainFlag.Name) == networkname.DevChainName { + if ctx.GlobalString(ChainFlag.Name) == networkname.DevChainName || ctx.GlobalString(ChainFlag.Name) == networkname.BorDevnetChainName { if etherbase == "" { cfg.Miner.SigKey = core.DevnetSignPrivateKey cfg.Miner.Etherbase = core.DevnetEtherbase @@ -1054,6 +1054,8 @@ func DataDirForNetwork(datadir string, network string) string { return filepath.Join(datadir, "mumbai") case networkname.BorMainnetChainName: return filepath.Join(datadir, "bor-mainnet") + case networkname.BorDevnetChainName: + return filepath.Join(datadir, "bor-devnet") case networkname.SepoliaChainName: return filepath.Join(datadir, "sepolia") default: diff --git a/consensus/bor/bor.go b/consensus/bor/bor.go index 24c372d31aa..35b457ef40a 100644 --- a/consensus/bor/bor.go +++ b/consensus/bor/bor.go @@ -27,6 +27,7 @@ import ( "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/params/networkname" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/log/v3" @@ -981,6 +982,20 @@ func (c *Bor) GetCurrentSpan(header *types.Header, state *state.IntraBlockState, // GetCurrentValidators get current validators func (c *Bor) GetCurrentValidators(blockNumber uint64) ([]*Validator, error) { + // Use signer as validator in case of bor devent + if c.chainConfig.ChainName == networkname.BorDevnetChainName { + validators := []*Validator{ + { + ID: 1, + Address: c.signer, + VotingPower: 1000, + ProposerPriority: 1, + }, + } + + return validators, nil + } + span, err := c.getSpanForBlock(blockNumber) if err != nil { return nil, err diff --git a/core/allocs/bor_devnet.json b/core/allocs/bor_devnet.json new file mode 100644 index 00000000000..8d6cf795b91 --- /dev/null +++ b/core/allocs/bor_devnet.json @@ -0,0 +1,48 @@ +{ + "0000000000000000000000000000000000001000": { + "balance": "0x0", + "code": "0x608060405234801561001057600080fd5b50600436106101f05760003560e01c806360c8614d1161010f578063af26aa96116100a2578063d5b844eb11610071578063d5b844eb14610666578063dcf2793a14610684578063e3b7c924146106b6578063f59cf565146106d4576101f0565b8063af26aa96146105c7578063b71d7a69146105e7578063b7ab4db514610617578063c1b3c91914610636576101f0565b806370ba5707116100de57806370ba57071461052b57806398ab2b621461055b5780639d11b80714610579578063ae756451146105a9576101f0565b806360c8614d1461049c57806365b3a1e2146104bc57806366332354146104db578063687a9bd6146104f9576101f0565b80633434735f1161018757806344d6528f1161015657806344d6528f146103ee5780634dbc959f1461041e57806355614fcc1461043c578063582a8d081461046c576101f0565b80633434735f1461035257806335ddfeea1461037057806343ee8213146103a057806344c15cb1146103be576101f0565b806323f2a73f116101c357806323f2a73f146102a45780632bc06564146102d45780632de3a180146102f25780632eddf35214610322576101f0565b8063047a6c5b146101f55780630c35b1cb146102275780631270b5741461025857806323c2a2b414610288575b600080fd5b61020f600480360361020a9190810190612b24565b610706565b60405161021e93929190613463565b60405180910390f35b610241600480360361023c9190810190612b24565b61075d565b60405161024f929190613284565b60405180910390f35b610272600480360361026d9190810190612b4d565b610939565b60405161027f91906132bb565b60405180910390f35b6102a2600480360361029d9190810190612c2c565b610a91565b005b6102be60048036036102b99190810190612b4d565b61112a565b6040516102cb91906132bb565b60405180910390f35b6102dc611281565b6040516102e99190613411565b60405180910390f35b61030c60048036036103079190810190612a81565b611286565b60405161031991906132d6565b60405180910390f35b61033c60048036036103379190810190612b24565b611307565b6040516103499190613411565b60405180910390f35b61035a611437565b6040516103679190613269565b60405180910390f35b61038a60048036036103859190810190612abd565b61144f565b60405161039791906132bb565b60405180910390f35b6103a861151a565b6040516103b591906132d6565b60405180910390f35b6103d860048036036103d39190810190612b89565b611531565b6040516103e59190613411565b60405180910390f35b61040860048036036104039190810190612b4d565b611619565b60405161041591906133f6565b60405180910390f35b610426611781565b6040516104339190613411565b60405180910390f35b61045660048036036104519190810190612a06565b611791565b60405161046391906132bb565b60405180910390f35b61048660048036036104819190810190612a2f565b6117ab565b60405161049391906132d6565b60405180910390f35b6104a4611829565b6040516104b393929190613463565b60405180910390f35b6104c461189d565b6040516104d2929190613284565b60405180910390f35b6104e3611b6e565b6040516104f09190613411565b60405180910390f35b610513600480360361050e9190810190612bf0565b611b73565b6040516105229392919061342c565b60405180910390f35b61054560048036036105409190810190612a06565b611bd7565b60405161055291906132bb565b60405180910390f35b610563611bf1565b60405161057091906132d6565b60405180910390f35b610593600480360361058e9190810190612b24565b611c08565b6040516105a09190613411565b60405180910390f35b6105b1611d39565b6040516105be91906132d6565b60405180910390f35b6105cf611d50565b6040516105de93929190613463565b60405180910390f35b61060160048036036105fc9190810190612b24565b611db1565b60405161060e9190613411565b60405180910390f35b61061f611eb1565b60405161062d929190613284565b60405180910390f35b610650600480360361064b9190810190612b24565b611ec5565b60405161065d9190613411565b60405180910390f35b61066e611ee6565b60405161067b919061349a565b60405180910390f35b61069e60048036036106999190810190612bf0565b611eeb565b6040516106ad9392919061342c565b60405180910390f35b6106be611f4f565b6040516106cb9190613411565b60405180910390f35b6106ee60048036036106e99190810190612b24565b611f61565b6040516106fd93929190613463565b60405180910390f35b60008060006002600085815260200190815260200160002060000154600260008681526020019081526020016000206001015460026000878152602001908152602001600020600201549250925092509193909250565b60608060ff83116107795761077061189d565b91509150610934565b600061078484611db1565b9050606060016000838152602001908152602001600020805490506040519080825280602002602001820160405280156107cd5781602001602082028038833980820191505090505b509050606060016000848152602001908152602001600020805490506040519080825280602002602001820160405280156108175781602001602082028038833980820191505090505b50905060008090505b60016000858152602001908152602001600020805490508110156109295760016000858152602001908152602001600020818154811061085c57fe5b906000526020600020906003020160020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1683828151811061089a57fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff16815250506001600085815260200190815260200160002081815481106108f257fe5b90600052602060002090600302016001015482828151811061091057fe5b6020026020010181815250508080600101915050610820565b508181945094505050505b915091565b6000606060016000858152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b82821015610a0c578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505081526020019060010190610970565b50505050905060008090505b8151811015610a84578373ffffffffffffffffffffffffffffffffffffffff16828281518110610a4457fe5b60200260200101516040015173ffffffffffffffffffffffffffffffffffffffff161415610a7757600192505050610a8b565b8080600101915050610a18565b5060009150505b92915050565b73fffffffffffffffffffffffffffffffffffffffe73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610b13576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610b0a906133d6565b60405180910390fd5b6000610b1d611781565b90506000811415610b3157610b30611f8b565b5b610b456001826122ac90919063ffffffff16565b8814610b86576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610b7d90613356565b60405180910390fd5b868611610bc8576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610bbf906133b6565b60405180910390fd5b6000604060018989030181610bd957fe5b0614610c1a576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610c1190613396565b60405180910390fd5b8660026000838152602001908152602001600020600101541115610c73576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610c6a90613336565b60405180910390fd5b6000600260008a81526020019081526020016000206000015414610ccc576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401610cc390613376565b60405180910390fd5b604051806060016040528089815260200188815260200187815250600260008a8152602001908152602001600020600082015181600001556020820151816001015560408201518160020155905050600388908060018154018082558091505090600182039060005260206000200160009091929091909150555060008060008a815260200190815260200160002081610d669190612800565b506000600160008a815260200190815260200160002081610d879190612800565b506060610ddf610dda87878080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050506122cb565b6122f9565b905060008090505b8151811015610f51576060610e0e838381518110610e0157fe5b60200260200101516122f9565b90506000808c81526020019081526020016000208054809190600101610e349190612800565b506040518060600160405280610e5d83600081518110610e5057fe5b60200260200101516123d6565b8152602001610e7f83600181518110610e7257fe5b60200260200101516123d6565b8152602001610ea183600281518110610e9457fe5b6020026020010151612447565b73ffffffffffffffffffffffffffffffffffffffff168152506000808d81526020019081526020016000208381548110610ed757fe5b9060005260206000209060030201600082015181600001556020820151816001015560408201518160020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550905050508080600101915050610de7565b506060610fa9610fa486868080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f820116905080830192505050505050506122cb565b6122f9565b905060008090505b815181101561111d576060610fd8838381518110610fcb57fe5b60200260200101516122f9565b9050600160008d81526020019081526020016000208054809190600101610fff9190612800565b5060405180606001604052806110288360008151811061101b57fe5b60200260200101516123d6565b815260200161104a8360018151811061103d57fe5b60200260200101516123d6565b815260200161106c8360028151811061105f57fe5b6020026020010151612447565b73ffffffffffffffffffffffffffffffffffffffff16815250600160008e815260200190815260200160002083815481106110a357fe5b9060005260206000209060030201600082015181600001556020820151816001015560408201518160020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550905050508080600101915050610fb1565b5050505050505050505050565b60006060600080858152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b828210156111fc578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505081526020019060010190611160565b50505050905060008090505b8151811015611274578373ffffffffffffffffffffffffffffffffffffffff1682828151811061123457fe5b60200260200101516040015173ffffffffffffffffffffffffffffffffffffffff1614156112675760019250505061127b565b8080600101915050611208565b5060009150505b92915050565b604081565b60006002600160f81b84846040516020016112a3939291906131d6565b6040516020818303038152906040526040516112bf9190613213565b602060405180830381855afa1580156112dc573d6000803e3d6000fd5b5050506040513d601f19601f820116820180604052506112ff9190810190612a58565b905092915050565b60006060600080848152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b828210156113d9578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815250508152602001906001019061133d565b505050509050600080905060008090505b825181101561142c5761141d83828151811061140257fe5b602002602001015160200151836122ac90919063ffffffff16565b915080806001019150506113ea565b508092505050919050565b73fffffffffffffffffffffffffffffffffffffffe81565b600080600080859050600060218087518161146657fe5b04029050600081111561147f5761147c876117ab565b91505b6000602190505b818111611509576000600182038801519050818801519550806000602081106114ab57fe5b1a60f81b9450600060f81b857effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff191614156114f0576114e98685611286565b93506114fd565b6114fa8487611286565b93505b50602181019050611486565b508782149450505050509392505050565b60405161152690613254565b604051809103902081565b60008060009050600080905060008090505b84518167ffffffffffffffff16101561160c57606061156e868367ffffffffffffffff16604161246a565b9050600061158582896124f690919063ffffffff16565b905061158f612832565b6115998a83611619565b90506115a58a8361112a565b80156115dc57508473ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16115b156115fe578194506115fb8160200151876122ac90919063ffffffff16565b95505b505050604181019050611543565b5081925050509392505050565b611621612832565b6060600080858152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b828210156116f1578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505081526020019060010190611655565b50505050905060008090505b8151811015611779578373ffffffffffffffffffffffffffffffffffffffff1682828151811061172957fe5b60200260200101516040015173ffffffffffffffffffffffffffffffffffffffff16141561176c5781818151811061175d57fe5b60200260200101519250611779565b80806001019150506116fd565b505092915050565b600061178c43611db1565b905090565b60006117a461179e611781565b8361112a565b9050919050565b60006002600060f81b836040516020016117c69291906131aa565b6040516020818303038152906040526040516117e29190613213565b602060405180830381855afa1580156117ff573d6000803e3d6000fd5b5050506040513d601f19601f820116820180604052506118229190810190612a58565b9050919050565b60008060008061184a600161183c611781565b6122ac90919063ffffffff16565b905060026000828152602001908152602001600020600001546002600083815260200190815260200160002060010154600260008481526020019081526020016000206002015493509350935050909192565b606080606060056040519080825280602002602001820160405280156118d25781602001602082028038833980820191505090505b50905073c26880a0af2ea0c7e8130e6ec47af756465452e8816000815181106118f757fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505073be188d6641e8b680743a4815dfa0f6208038960f8160018151811061195357fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505073c275dc8be39f50d12f66b6a63629c39da5bae5bd816002815181106119af57fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505073f903ba9e006193c1527bfbe65fe2123704ea3f9981600381518110611a0b57fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505073928ed6a3e94437bbd316ccad78479f1d163a6a8c81600481518110611a6757fe5b602002602001019073ffffffffffffffffffffffffffffffffffffffff16908173ffffffffffffffffffffffffffffffffffffffff168152505060606005604051908082528060200260200182016040528015611ad35781602001602082028038833980820191505090505b50905061271081600081518110611ae657fe5b60200260200101818152505061271081600181518110611b0257fe5b60200260200101818152505061271081600281518110611b1e57fe5b60200260200101818152505061271081600381518110611b3a57fe5b60200260200101818152505061271081600481518110611b5657fe5b60200260200101818152505081819350935050509091565b60ff81565b60016020528160005260406000208181548110611b8c57fe5b9060005260206000209060030201600091509150508060000154908060010154908060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905083565b6000611bea611be4611781565b83610939565b9050919050565b604051611bfd9061322a565b604051809103902081565b6000606060016000848152602001908152602001600020805480602002602001604051908101604052809291908181526020016000905b82821015611cdb578382906000526020600020906003020160405180606001604052908160008201548152602001600182015481526020016002820160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152505081526020019060010190611c3f565b505050509050600080905060008090505b8251811015611d2e57611d1f838281518110611d0457fe5b602002602001015160200151836122ac90919063ffffffff16565b91508080600101915050611cec565b508092505050919050565b604051611d459061323f565b604051809103902081565b600080600080611d5e611781565b905060026000828152602001908152602001600020600001546002600083815260200190815260200160002060010154600260008481526020019081526020016000206002015493509350935050909192565b60008060038054905090505b6000811115611e7157611dce612869565b6002600060036001850381548110611de257fe5b906000526020600020015481526020019081526020016000206040518060600160405290816000820154815260200160018201548152602001600282015481525050905083816020015111158015611e3f57506000816040015114155b8015611e4f575080604001518411155b15611e6257806000015192505050611eac565b50808060019003915050611dbd565b5060006003805490501115611ea757600360016003805490500381548110611e9557fe5b90600052602060002001549050611eac565b600090505b919050565b606080611ebd4361075d565b915091509091565b60038181548110611ed257fe5b906000526020600020016000915090505481565b600281565b60006020528160005260406000208181548110611f0457fe5b9060005260206000209060030201600091509150508060000154908060010154908060020160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905083565b600060404381611f5b57fe5b04905090565b60026020528060005260406000206000915090508060000154908060010154908060020154905083565b606080611f9661189d565b8092508193505050600080905060405180606001604052808281526020016000815260200160ff81525060026000838152602001908152602001600020600082015181600001556020820151816001015560408201518160020155905050600381908060018154018082558091505090600182039060005260206000200160009091929091909150555060008060008381526020019081526020016000208161203f9190612800565b50600060016000838152602001908152602001600020816120609190612800565b5060008090505b83518110156121825760008083815260200190815260200160002080548091906001016120949190612800565b5060405180606001604052808281526020018483815181106120b257fe5b602002602001015181526020018583815181106120cb57fe5b602002602001015173ffffffffffffffffffffffffffffffffffffffff16815250600080848152602001908152602001600020828154811061210957fe5b9060005260206000209060030201600082015181600001556020820151816001015560408201518160020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055509050508080600101915050612067565b5060008090505b83518110156122a6576001600083815260200190815260200160002080548091906001016121b79190612800565b5060405180606001604052808281526020018483815181106121d557fe5b602002602001015181526020018583815181106121ee57fe5b602002602001015173ffffffffffffffffffffffffffffffffffffffff1681525060016000848152602001908152602001600020828154811061222d57fe5b9060005260206000209060030201600082015181600001556020820151816001015560408201518160020160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055509050508080600101915050612189565b50505050565b6000808284019050838110156122c157600080fd5b8091505092915050565b6122d361288a565b600060208301905060405180604001604052808451815260200182815250915050919050565b606061230482612600565b61230d57600080fd5b60006123188361264e565b905060608160405190808252806020026020018201604052801561235657816020015b6123436128a4565b81526020019060019003908161233b5790505b509050600061236885602001516126bf565b8560200151019050600080600090505b848110156123c95761238983612748565b91506040518060400160405280838152602001848152508482815181106123ac57fe5b602002602001018190525081830192508080600101915050612378565b5082945050505050919050565b60008082600001511180156123f057506021826000015111155b6123f957600080fd5b600061240883602001516126bf565b9050600081846000015103905060008083866020015101905080519150602083101561243b57826020036101000a820491505b81945050505050919050565b6000601582600001511461245a57600080fd5b612463826123d6565b9050919050565b60608183018451101561247c57600080fd5b6060821560008114612499576040519150602082016040526124ea565b6040519150601f8416801560200281840101858101878315602002848b0101015b818310156124d757805183526020830192506020810190506124ba565b50868552601f19601f8301166040525050505b50809150509392505050565b600080600080604185511461251157600093505050506125fa565b602085015192506040850151915060ff6041860151169050601b8160ff16101561253c57601b810190505b601b8160ff16141580156125545750601c8160ff1614155b1561256557600093505050506125fa565b60006001878386866040516000815260200160405260405161258a94939291906132f1565b6020604051602081039080840390855afa1580156125ac573d6000803e3d6000fd5b505050602060405103519050600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614156125f257600080fd5b809450505050505b92915050565b600080826000015114156126175760009050612649565b60008083602001519050805160001a915060c060ff168260ff16101561264257600092505050612649565b6001925050505b919050565b6000808260000151141561266557600090506126ba565b6000809050600061267984602001516126bf565b84602001510190506000846000015185602001510190505b808210156126b3576126a282612748565b820191508280600101935050612691565b8293505050505b919050565b600080825160001a9050608060ff168110156126df576000915050612743565b60b860ff16811080612704575060c060ff168110158015612703575060f860ff1681105b5b15612713576001915050612743565b60c060ff168110156127335760018060b80360ff16820301915050612743565b60018060f80360ff168203019150505b919050565b6000806000835160001a9050608060ff1681101561276957600191506127f6565b60b860ff16811015612786576001608060ff1682030191506127f5565b60c060ff168110156127b65760b78103600185019450806020036101000a855104600182018101935050506127f4565b60f860ff168110156127d357600160c060ff1682030191506127f3565b60f78103600185019450806020036101000a855104600182018101935050505b5b5b5b8192505050919050565b81548183558181111561282d5760030281600302836000526020600020918201910161282c91906128be565b5b505050565b60405180606001604052806000815260200160008152602001600073ffffffffffffffffffffffffffffffffffffffff1681525090565b60405180606001604052806000815260200160008152602001600081525090565b604051806040016040528060008152602001600081525090565b604051806040016040528060008152602001600081525090565b61291191905b8082111561290d5760008082016000905560018201600090556002820160006101000a81549073ffffffffffffffffffffffffffffffffffffffff0219169055506003016128c4565b5090565b90565b60008135905061292381613693565b92915050565b600081359050612938816136aa565b92915050565b60008151905061294d816136aa565b92915050565b60008083601f84011261296557600080fd5b8235905067ffffffffffffffff81111561297e57600080fd5b60208301915083600182028301111561299657600080fd5b9250929050565b600082601f8301126129ae57600080fd5b81356129c16129bc826134e2565b6134b5565b915080825260208301602083018583830111156129dd57600080fd5b6129e883828461363d565b50505092915050565b600081359050612a00816136c1565b92915050565b600060208284031215612a1857600080fd5b6000612a2684828501612914565b91505092915050565b600060208284031215612a4157600080fd5b6000612a4f84828501612929565b91505092915050565b600060208284031215612a6a57600080fd5b6000612a788482850161293e565b91505092915050565b60008060408385031215612a9457600080fd5b6000612aa285828601612929565b9250506020612ab385828601612929565b9150509250929050565b600080600060608486031215612ad257600080fd5b6000612ae086828701612929565b9350506020612af186828701612929565b925050604084013567ffffffffffffffff811115612b0e57600080fd5b612b1a8682870161299d565b9150509250925092565b600060208284031215612b3657600080fd5b6000612b44848285016129f1565b91505092915050565b60008060408385031215612b6057600080fd5b6000612b6e858286016129f1565b9250506020612b7f85828601612914565b9150509250929050565b600080600060608486031215612b9e57600080fd5b6000612bac868287016129f1565b9350506020612bbd86828701612929565b925050604084013567ffffffffffffffff811115612bda57600080fd5b612be68682870161299d565b9150509250925092565b60008060408385031215612c0357600080fd5b6000612c11858286016129f1565b9250506020612c22858286016129f1565b9150509250929050565b600080600080600080600060a0888a031215612c4757600080fd5b6000612c558a828b016129f1565b9750506020612c668a828b016129f1565b9650506040612c778a828b016129f1565b955050606088013567ffffffffffffffff811115612c9457600080fd5b612ca08a828b01612953565b9450945050608088013567ffffffffffffffff811115612cbf57600080fd5b612ccb8a828b01612953565b925092505092959891949750929550565b6000612ce88383612d0c565b60208301905092915050565b6000612d00838361317d565b60208301905092915050565b612d15816135b2565b82525050565b612d24816135b2565b82525050565b6000612d358261352e565b612d3f8185613569565b9350612d4a8361350e565b8060005b83811015612d7b578151612d628882612cdc565b9750612d6d8361354f565b925050600181019050612d4e565b5085935050505092915050565b6000612d9382613539565b612d9d818561357a565b9350612da88361351e565b8060005b83811015612dd9578151612dc08882612cf4565b9750612dcb8361355c565b925050600181019050612dac565b5085935050505092915050565b612def816135c4565b82525050565b612e06612e01826135d0565b61367f565b82525050565b612e15816135fc565b82525050565b612e2c612e27826135fc565b613689565b82525050565b6000612e3d82613544565b612e47818561358b565b9350612e5781856020860161364c565b80840191505092915050565b6000612e706004836135a7565b91507f766f7465000000000000000000000000000000000000000000000000000000006000830152600482019050919050565b6000612eb0602d83613596565b91507f537461727420626c6f636b206d7573742062652067726561746572207468616e60008301527f2063757272656e74207370616e000000000000000000000000000000000000006020830152604082019050919050565b6000612f16600f83613596565b91507f496e76616c6964207370616e20696400000000000000000000000000000000006000830152602082019050919050565b6000612f56601383613596565b91507f5370616e20616c726561647920657869737473000000000000000000000000006000830152602082019050919050565b6000612f96604583613596565b91507f446966666572656e6365206265747765656e20737461727420616e6420656e6460008301527f20626c6f636b206d75737420626520696e206d756c7469706c6573206f66207360208301527f7072696e740000000000000000000000000000000000000000000000000000006040830152606082019050919050565b6000613022602a83613596565b91507f456e6420626c6f636b206d7573742062652067726561746572207468616e207360008301527f7461727420626c6f636b000000000000000000000000000000000000000000006020830152604082019050919050565b6000613088601283613596565b91507f4e6f742053797374656d204164646573732100000000000000000000000000006000830152602082019050919050565b60006130c86005836135a7565b91507f38303030310000000000000000000000000000000000000000000000000000006000830152600582019050919050565b6000613108600e836135a7565b91507f6865696d64616c6c2d38303030310000000000000000000000000000000000006000830152600e82019050919050565b606082016000820151613151600085018261317d565b506020820151613164602085018261317d565b5060408201516131776040850182612d0c565b50505050565b61318681613626565b82525050565b61319581613626565b82525050565b6131a481613630565b82525050565b60006131b68285612df5565b6001820191506131c68284612e1b565b6020820191508190509392505050565b60006131e28286612df5565b6001820191506131f28285612e1b565b6020820191506132028284612e1b565b602082019150819050949350505050565b600061321f8284612e32565b915081905092915050565b600061323582612e63565b9150819050919050565b600061324a826130bb565b9150819050919050565b600061325f826130fb565b9150819050919050565b600060208201905061327e6000830184612d1b565b92915050565b6000604082019050818103600083015261329e8185612d2a565b905081810360208301526132b28184612d88565b90509392505050565b60006020820190506132d06000830184612de6565b92915050565b60006020820190506132eb6000830184612e0c565b92915050565b60006080820190506133066000830187612e0c565b613313602083018661319b565b6133206040830185612e0c565b61332d6060830184612e0c565b95945050505050565b6000602082019050818103600083015261334f81612ea3565b9050919050565b6000602082019050818103600083015261336f81612f09565b9050919050565b6000602082019050818103600083015261338f81612f49565b9050919050565b600060208201905081810360008301526133af81612f89565b9050919050565b600060208201905081810360008301526133cf81613015565b9050919050565b600060208201905081810360008301526133ef8161307b565b9050919050565b600060608201905061340b600083018461313b565b92915050565b6000602082019050613426600083018461318c565b92915050565b6000606082019050613441600083018661318c565b61344e602083018561318c565b61345b6040830184612d1b565b949350505050565b6000606082019050613478600083018661318c565b613485602083018561318c565b613492604083018461318c565b949350505050565b60006020820190506134af600083018461319b565b92915050565b6000604051905081810181811067ffffffffffffffff821117156134d857600080fd5b8060405250919050565b600067ffffffffffffffff8211156134f957600080fd5b601f19601f8301169050602081019050919050565b6000819050602082019050919050565b6000819050602082019050919050565b600081519050919050565b600081519050919050565b600081519050919050565b6000602082019050919050565b6000602082019050919050565b600082825260208201905092915050565b600082825260208201905092915050565b600081905092915050565b600082825260208201905092915050565b600081905092915050565b60006135bd82613606565b9050919050565b60008115159050919050565b60007fff0000000000000000000000000000000000000000000000000000000000000082169050919050565b6000819050919050565b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b6000819050919050565b600060ff82169050919050565b82818337600083830152505050565b60005b8381101561366a57808201518184015260208101905061364f565b83811115613679576000848401525b50505050565b6000819050919050565b6000819050919050565b61369c816135b2565b81146136a757600080fd5b50565b6136b3816135fc565b81146136be57600080fd5b50565b6136ca81613626565b81146136d557600080fd5b5056fea365627a7a723158208f52ee07630ffe523cc6ad3e15f437f973dcfa36729cd697f9b0fc4a145a48f06c6578706572696d656e74616cf564736f6c634300050b0040" + }, + "0000000000000000000000000000000000001001": { + "balance": "0x0", + "code": "0x608060405234801561001057600080fd5b50600436106100415760003560e01c806319494a17146100465780633434735f146100e15780635407ca671461012b575b600080fd5b6100c76004803603604081101561005c57600080fd5b81019080803590602001909291908035906020019064010000000081111561008357600080fd5b82018360208201111561009557600080fd5b803590602001918460018302840111640100000000831117156100b757600080fd5b9091929391929390505050610149565b604051808215151515815260200191505060405180910390f35b6100e961047a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b610133610492565b6040518082815260200191505060405180910390f35b600073fffffffffffffffffffffffffffffffffffffffe73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614610200576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f4e6f742053797374656d2041646465737321000000000000000000000000000081525060200191505060405180910390fd5b606061025761025285858080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f82011690508083019250505050505050610498565b6104c6565b905060006102788260008151811061026b57fe5b60200260200101516105a3565b905080600160005401146102f4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601b8152602001807f537461746549647320617265206e6f742073657175656e7469616c000000000081525060200191505060405180910390fd5b600080815480929190600101919050555060006103248360018151811061031757fe5b6020026020010151610614565b905060606103458460028151811061033857fe5b6020026020010151610637565b9050610350826106c3565b1561046f576000624c4b409050606084836040516024018083815260200180602001828103825283818151815260200191508051906020019080838360005b838110156103aa57808201518184015260208101905061038f565b50505050905090810190601f1680156103d75780820380516001836020036101000a031916815260200191505b5093505050506040516020818303038152906040527f26c53bea000000000000000000000000000000000000000000000000000000007bffffffffffffffffffffffffffffffffffffffffffffffffffffffff19166020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff8381831617835250505050905060008082516020840160008887f1965050505b505050509392505050565b73fffffffffffffffffffffffffffffffffffffffe81565b60005481565b6104a0610943565b600060208301905060405180604001604052808451815260200182815250915050919050565b60606104d1826106dc565b6104da57600080fd5b60006104e58361072a565b905060608160405190808252806020026020018201604052801561052357816020015b61051061095d565b8152602001906001900390816105085790505b5090506000610535856020015161079b565b8560200151019050600080600090505b848110156105965761055683610824565b915060405180604001604052808381526020018481525084828151811061057957fe5b602002602001018190525081830192508080600101915050610545565b5082945050505050919050565b60008082600001511180156105bd57506021826000015111155b6105c657600080fd5b60006105d5836020015161079b565b9050600081846000015103905060008083866020015101905080519150602083101561060857826020036101000a820491505b81945050505050919050565b6000601582600001511461062757600080fd5b610630826105a3565b9050919050565b6060600082600001511161064a57600080fd5b6000610659836020015161079b565b905060008184600001510390506060816040519080825280601f01601f19166020018201604052801561069b5781602001600182028038833980820191505090505b50905060008160200190506106b78487602001510182856108dc565b81945050505050919050565b600080823b905060008163ffffffff1611915050919050565b600080826000015114156106f35760009050610725565b60008083602001519050805160001a915060c060ff168260ff16101561071e57600092505050610725565b6001925050505b919050565b600080826000015114156107415760009050610796565b60008090506000610755846020015161079b565b84602001510190506000846000015185602001510190505b8082101561078f5761077e82610824565b82019150828060010193505061076d565b8293505050505b919050565b600080825160001a9050608060ff168110156107bb57600091505061081f565b60b860ff168110806107e0575060c060ff1681101580156107df575060f860ff1681105b5b156107ef57600191505061081f565b60c060ff1681101561080f5760018060b80360ff1682030191505061081f565b60018060f80360ff168203019150505b919050565b6000806000835160001a9050608060ff1681101561084557600191506108d2565b60b860ff16811015610862576001608060ff1682030191506108d1565b60c060ff168110156108925760b78103600185019450806020036101000a855104600182018101935050506108d0565b60f860ff168110156108af57600160c060ff1682030191506108cf565b60f78103600185019450806020036101000a855104600182018101935050505b5b5b5b8192505050919050565b60008114156108ea5761093e565b5b602060ff16811061091a5782518252602060ff1683019250602060ff1682019150602060ff16810390506108eb565b6000600182602060ff16036101000a03905080198451168184511681811785525050505b505050565b604051806040016040528060008152602001600081525090565b60405180604001604052806000815260200160008152509056fea265627a7a7231582083fbdacb76f32b4112d0f7db9a596937925824798a0026ba0232322390b5263764736f6c634300050b0032" + }, + "0000000000000000000000000000000000001010": { + "balance": "0x204fcd4f31349d83b6e00000", + "code": "0x60806040526004361061019c5760003560e01c806377d32e94116100ec578063acd06cb31161008a578063e306f77911610064578063e306f77914610a7b578063e614d0d614610aa6578063f2fde38b14610ad1578063fc0c546a14610b225761019c565b8063acd06cb31461097a578063b789543c146109cd578063cc79f97b14610a505761019c565b80639025e64c116100c65780639025e64c146107c957806395d89b4114610859578063a9059cbb146108e9578063abceeba21461094f5761019c565b806377d32e94146106315780638da5cb5b146107435780638f32d59b1461079a5761019c565b806347e7ef24116101595780637019d41a116101335780637019d41a1461053357806370a082311461058a578063715018a6146105ef578063771282f6146106065761019c565b806347e7ef2414610410578063485cc9551461046b57806360f96a8f146104dc5761019c565b806306fdde03146101a15780631499c5921461023157806318160ddd1461028257806319d27d9c146102ad5780632e1a7d4d146103b1578063313ce567146103df575b600080fd5b3480156101ad57600080fd5b506101b6610b79565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101f65780820151818401526020810190506101db565b50505050905090810190601f1680156102235780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561023d57600080fd5b506102806004803603602081101561025457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610bb6565b005b34801561028e57600080fd5b50610297610c24565b6040518082815260200191505060405180910390f35b3480156102b957600080fd5b5061036f600480360360a08110156102d057600080fd5b81019080803590602001906401000000008111156102ed57600080fd5b8201836020820111156102ff57600080fd5b8035906020019184600183028401116401000000008311171561032157600080fd5b9091929391929390803590602001909291908035906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610c3a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6103dd600480360360208110156103c757600080fd5b8101908080359060200190929190505050610e06565b005b3480156103eb57600080fd5b506103f4610f58565b604051808260ff1660ff16815260200191505060405180910390f35b34801561041c57600080fd5b506104696004803603604081101561043357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610f61565b005b34801561047757600080fd5b506104da6004803603604081101561048e57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff16906020019092919050505061111d565b005b3480156104e857600080fd5b506104f16111ec565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561053f57600080fd5b50610548611212565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561059657600080fd5b506105d9600480360360208110156105ad57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050611238565b6040518082815260200191505060405180910390f35b3480156105fb57600080fd5b50610604611259565b005b34801561061257600080fd5b5061061b611329565b6040518082815260200191505060405180910390f35b34801561063d57600080fd5b506107016004803603604081101561065457600080fd5b81019080803590602001909291908035906020019064010000000081111561067b57600080fd5b82018360208201111561068d57600080fd5b803590602001918460018302840111640100000000831117156106af57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f82011690508083019250505050505050919291929050505061132f565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561074f57600080fd5b506107586114b4565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3480156107a657600080fd5b506107af6114dd565b604051808215151515815260200191505060405180910390f35b3480156107d557600080fd5b506107de611534565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561081e578082015181840152602081019050610803565b50505050905090810190601f16801561084b5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561086557600080fd5b5061086e61156d565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156108ae578082015181840152602081019050610893565b50505050905090810190601f1680156108db5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b610935600480360360408110156108ff57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291905050506115aa565b604051808215151515815260200191505060405180910390f35b34801561095b57600080fd5b506109646115d0565b6040518082815260200191505060405180910390f35b34801561098657600080fd5b506109b36004803603602081101561099d57600080fd5b810190808035906020019092919050505061165d565b604051808215151515815260200191505060405180910390f35b3480156109d957600080fd5b50610a3a600480360360808110156109f057600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190803590602001909291908035906020019092919050505061167d565b6040518082815260200191505060405180910390f35b348015610a5c57600080fd5b50610a6561169d565b6040518082815260200191505060405180910390f35b348015610a8757600080fd5b50610a906116a4565b6040518082815260200191505060405180910390f35b348015610ab257600080fd5b50610abb6116aa565b6040518082815260200191505060405180910390f35b348015610add57600080fd5b50610b2060048036036020811015610af457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050611737565b005b348015610b2e57600080fd5b50610b37611754565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60606040518060400160405280600b81526020017f4d6174696320546f6b656e000000000000000000000000000000000000000000815250905090565b6040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b6000601260ff16600a0a6402540be40002905090565b6000808511610c4857600080fd5b6000831480610c575750824311155b610cc9576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260148152602001807f5369676e6174757265206973206578706972656400000000000000000000000081525060200191505060405180910390fd5b6000610cd73387878761167d565b9050600015156005600083815260200190815260200160002060009054906101000a900460ff16151514610d73576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600f8152602001807f536967206465616374697661746564000000000000000000000000000000000081525060200191505060405180910390fd5b60016005600083815260200190815260200160002060006101000a81548160ff021916908315150217905550610ded8189898080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505061132f565b9150610dfa82848861177a565b50509695505050505050565b60003390506000610e1682611238565b9050610e2d83600654611b3790919063ffffffff16565b600681905550600083118015610e4257508234145b610eb4576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f496e73756666696369656e7420616d6f756e740000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167febff2602b3f468259e1e99f613fed6691f3a6526effe6ef3e768ba7ae7a36c4f8584610f3087611238565b60405180848152602001838152602001828152602001935050505060405180910390a3505050565b60006012905090565b610f696114dd565b610f7257600080fd5b600081118015610faf5750600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b611004576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611e636023913960400191505060405180910390fd5b600061100f83611238565b905060008390508073ffffffffffffffffffffffffffffffffffffffff166108fc849081150290604051600060405180830381858888f1935050505015801561105c573d6000803e3d6000fd5b5061107283600654611b5790919063ffffffff16565b6006819055508373ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f4e2ca0515ed1aef1395f66b5303bb5d6f1bf9d61a353fa53f73f8ac9973fa9f685856110f489611238565b60405180848152602001838152602001828152602001935050505060405180910390a350505050565b600760009054906101000a900460ff1615611183576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611e406023913960400191505060405180910390fd5b6001600760006101000a81548160ff02191690831515021790555080600260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506111e882611b76565b5050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008173ffffffffffffffffffffffffffffffffffffffff16319050919050565b6112616114dd565b61126a57600080fd5b600073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a360008060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550565b60065481565b600080600080604185511461134a57600093505050506114ae565b602085015192506040850151915060ff6041860151169050601b8160ff16101561137557601b810190505b601b8160ff161415801561138d5750601c8160ff1614155b1561139e57600093505050506114ae565b60018682858560405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa1580156113fb573d6000803e3d6000fd5b505050602060405103519350600073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff1614156114aa576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f4572726f7220696e2065637265636f766572000000000000000000000000000081525060200191505060405180910390fd5b5050505b92915050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614905090565b6040518060400160405280600381526020017f013881000000000000000000000000000000000000000000000000000000000081525081565b60606040518060400160405280600581526020017f4d41544943000000000000000000000000000000000000000000000000000000815250905090565b60008134146115bc57600090506115ca565b6115c733848461177a565b90505b92915050565b6040518060800160405280605b8152602001611ed8605b91396040516020018082805190602001908083835b6020831061161f57805182526020820191506020810190506020830392506115fc565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b60056020528060005260406000206000915054906101000a900460ff1681565b600061169361168e86868686611c6e565b611d44565b9050949350505050565b6201388181565b60015481565b604051806080016040528060528152602001611e86605291396040516020018082805190602001908083835b602083106116f957805182526020820191506020810190506020830392506116d6565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b61173f6114dd565b61174857600080fd5b61175181611b76565b50565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000803073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156117fa57600080fd5b505afa15801561180e573d6000803e3d6000fd5b505050506040513d602081101561182457600080fd5b8101908080519060200190929190505050905060003073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156118b657600080fd5b505afa1580156118ca573d6000803e3d6000fd5b505050506040513d60208110156118e057600080fd5b810190808051906020019092919050505090506118fe868686611d8e565b8473ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167fe6497e3ee548a3372136af2fcb0696db31fc6cf20260707645068bd3fe97f3c48786863073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611a0657600080fd5b505afa158015611a1a573d6000803e3d6000fd5b505050506040513d6020811015611a3057600080fd5b81019080805190602001909291905050503073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611abe57600080fd5b505afa158015611ad2573d6000803e3d6000fd5b505050506040513d6020811015611ae857600080fd5b8101908080519060200190929190505050604051808681526020018581526020018481526020018381526020018281526020019550505050505060405180910390a46001925050509392505050565b600082821115611b4657600080fd5b600082840390508091505092915050565b600080828401905083811015611b6c57600080fd5b8091505092915050565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161415611bb057600080fd5b8073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a3806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b6000806040518060800160405280605b8152602001611ed8605b91396040516020018082805190602001908083835b60208310611cc05780518252602082019150602081019050602083039250611c9d565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120905060405181815273ffffffffffffffffffffffffffffffffffffffff8716602082015285604082015284606082015283608082015260a0812092505081915050949350505050565b60008060015490506040517f190100000000000000000000000000000000000000000000000000000000000081528160028201528360228201526042812092505081915050919050565b8173ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050158015611dd4573d6000803e3d6000fd5b508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a350505056fe54686520636f6e747261637420697320616c726561647920696e697469616c697a6564496e73756666696369656e7420616d6f756e74206f7220696e76616c69642075736572454950373132446f6d61696e28737472696e67206e616d652c737472696e672076657273696f6e2c75696e7432353620636861696e49642c6164647265737320766572696679696e67436f6e747261637429546f6b656e5472616e736665724f726465722861646472657373207370656e6465722c75696e7432353620746f6b656e49644f72416d6f756e742c6279746573333220646174612c75696e743235362065787069726174696f6e29a265627a7a723158208f81700133738d766ae3d68af591ad588b0125bd91449192179f460893f79f6b64736f6c634300050b0032" + }, + "0x0000000000000000000000000000000000000001": { + "balance": "0x1" + }, + "0x0000000000000000000000000000000000000002": { + "balance": "0x1" + }, + "0x0000000000000000000000000000000000000003": { + "balance": "0x1" + }, + "0x0000000000000000000000000000000000000004": { + "balance": "0x1" + }, + "0x0000000000000000000000000000000000000005": { + "balance": "0x1" + }, + "0x0000000000000000000000000000000000000006": { + "balance": "0x1" + }, + "0x0000000000000000000000000000000000000007": { + "balance": "0x1" + }, + "0x0000000000000000000000000000000000000008": { + "balance": "0x1" + }, + "0x0000000000000000000000000000000000000009": { + "balance": "0x1" + }, + "0x67b1d87101671b127f5f8714789C7192f7ad340e": { + "balance": "0x21e19e0c9bab2400000" + }, + "0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B": { + "balance": "0x21e19e0c9bab2400000" + } + } + \ No newline at end of file diff --git a/core/genesis.go b/core/genesis.go index 241972f3364..6935bf9e4a1 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -704,6 +704,19 @@ func DefaultBorMainnetGenesisBlock() *Genesis { } } +func DefaultBorDevnetGenesisBlock() *Genesis { + return &Genesis{ + Config: params.BorDevnetChainConfig, + Nonce: 0, + Timestamp: 1558348305, + GasLimit: 10000000, + Difficulty: big.NewInt(1), + Mixhash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), + Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"), + Alloc: readPrealloc("allocs/bor_devnet.json"), + } +} + // Pre-calculated version of: // DevnetSignPrivateKey = crypto.HexToECDSA(sha256.Sum256([]byte("erigon devnet key"))) // DevnetEtherbase=crypto.PubkeyToAddress(DevnetSignPrivateKey.PublicKey) @@ -780,6 +793,8 @@ func DefaultGenesisBlockByChainName(chain string) *Genesis { return DefaultMumbaiGenesisBlock() case networkname.BorMainnetChainName: return DefaultBorMainnetGenesisBlock() + case networkname.BorDevnetChainName: + return DefaultBorDevnetGenesisBlock() case networkname.KilnDevnetChainName: return DefaultKilnDevnetGenesisBlock() default: diff --git a/eth/backend.go b/eth/backend.go index 78ed912232f..e604a1e3df2 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -653,6 +653,25 @@ func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, mining *stagedsy }) } + var borcfg *bor.Bor + if b, ok := s.engine.(*bor.Bor); ok { + borcfg = b + } else if br, ok := s.engine.(*serenity.Serenity); ok { + if b, ok := br.InnerEngine().(*bor.Bor); ok { + borcfg = b + } + } + if borcfg != nil { + if cfg.SigKey == nil { + log.Error("Etherbase account unavailable locally", "err", err) + return fmt.Errorf("signer missing: %w", err) + } + + borcfg.Authorize(eb, func(_ common.Address, mimeType string, message []byte) ([]byte, error) { + return crypto.Sign(crypto.Keccak256(message), cfg.SigKey) + }) + } + go func() { defer debug.LogPanic() defer close(s.waitForMiningStop) diff --git a/params/chainspecs/bor-devnet.json b/params/chainspecs/bor-devnet.json new file mode 100644 index 00000000000..bdf9eb7ba21 --- /dev/null +++ b/params/chainspecs/bor-devnet.json @@ -0,0 +1,43 @@ +{ + "ChainName": "bor-devnet", + "chainId": 1337, + "consensus": "bor", + "homesteadBlock": 0, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "istanbulBlock": 0, + "muirGlacierBlock": 0, + "berlinBlock": 0, + "londonBlock": 0, + "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "bor": { + "period": { + "0": 5 + }, + "producerDelay": 6, + "sprint": 64, + "backupMultiplier": { + "0": 5 + }, + "validatorContract": "0x0000000000000000000000000000000000001000", + "stateReceiverContract": "0x0000000000000000000000000000000000001001", + "blockAlloc": { + "22156660": { + "0000000000000000000000000000000000001010": { + "balance": "0x0", + "code": "0x60806040526004361061019c5760003560e01c806377d32e94116100ec578063acd06cb31161008a578063e306f77911610064578063e306f77914610a7b578063e614d0d614610aa6578063f2fde38b14610ad1578063fc0c546a14610b225761019c565b8063acd06cb31461097a578063b789543c146109cd578063cc79f97b14610a505761019c565b80639025e64c116100c65780639025e64c146107c957806395d89b4114610859578063a9059cbb146108e9578063abceeba21461094f5761019c565b806377d32e94146106315780638da5cb5b146107435780638f32d59b1461079a5761019c565b806347e7ef24116101595780637019d41a116101335780637019d41a1461053357806370a082311461058a578063715018a6146105ef578063771282f6146106065761019c565b806347e7ef2414610410578063485cc9551461046b57806360f96a8f146104dc5761019c565b806306fdde03146101a15780631499c5921461023157806318160ddd1461028257806319d27d9c146102ad5780632e1a7d4d146103b1578063313ce567146103df575b600080fd5b3480156101ad57600080fd5b506101b6610b79565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101f65780820151818401526020810190506101db565b50505050905090810190601f1680156102235780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561023d57600080fd5b506102806004803603602081101561025457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610bb6565b005b34801561028e57600080fd5b50610297610c24565b6040518082815260200191505060405180910390f35b3480156102b957600080fd5b5061036f600480360360a08110156102d057600080fd5b81019080803590602001906401000000008111156102ed57600080fd5b8201836020820111156102ff57600080fd5b8035906020019184600183028401116401000000008311171561032157600080fd5b9091929391929390803590602001909291908035906020019092919080359060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610c3a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6103dd600480360360208110156103c757600080fd5b8101908080359060200190929190505050610caa565b005b3480156103eb57600080fd5b506103f4610dfc565b604051808260ff1660ff16815260200191505060405180910390f35b34801561041c57600080fd5b506104696004803603604081101561043357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff16906020019092919080359060200190929190505050610e05565b005b34801561047757600080fd5b506104da6004803603604081101561048e57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610fc1565b005b3480156104e857600080fd5b506104f1611090565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561053f57600080fd5b506105486110b6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561059657600080fd5b506105d9600480360360208110156105ad57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506110dc565b6040518082815260200191505060405180910390f35b3480156105fb57600080fd5b506106046110fd565b005b34801561061257600080fd5b5061061b6111cd565b6040518082815260200191505060405180910390f35b34801561063d57600080fd5b506107016004803603604081101561065457600080fd5b81019080803590602001909291908035906020019064010000000081111561067b57600080fd5b82018360208201111561068d57600080fd5b803590602001918460018302840111640100000000831117156106af57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600081840152601f19601f8201169050808301925050505050505091929192905050506111d3565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34801561074f57600080fd5b50610758611358565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b3480156107a657600080fd5b506107af611381565b604051808215151515815260200191505060405180910390f35b3480156107d557600080fd5b506107de6113d8565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561081e578082015181840152602081019050610803565b50505050905090810190601f16801561084b5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561086557600080fd5b5061086e611411565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156108ae578082015181840152602081019050610893565b50505050905090810190601f1680156108db5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b610935600480360360408110156108ff57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291908035906020019092919050505061144e565b604051808215151515815260200191505060405180910390f35b34801561095b57600080fd5b50610964611474565b6040518082815260200191505060405180910390f35b34801561098657600080fd5b506109b36004803603602081101561099d57600080fd5b8101908080359060200190929190505050611501565b604051808215151515815260200191505060405180910390f35b3480156109d957600080fd5b50610a3a600480360360808110156109f057600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190803590602001909291908035906020019092919080359060200190929190505050611521565b6040518082815260200191505060405180910390f35b348015610a5c57600080fd5b50610a65611541565b6040518082815260200191505060405180910390f35b348015610a8757600080fd5b50610a90611546565b6040518082815260200191505060405180910390f35b348015610ab257600080fd5b50610abb61154c565b6040518082815260200191505060405180910390f35b348015610add57600080fd5b50610b2060048036036020811015610af457600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506115d9565b005b348015610b2e57600080fd5b50610b376115f6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b60606040518060400160405280600b81526020017f4d6174696320546f6b656e000000000000000000000000000000000000000000815250905090565b6040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b6000601260ff16600a0a6402540be40002905090565b60006040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260108152602001807f44697361626c656420666561747572650000000000000000000000000000000081525060200191505060405180910390fd5b60003390506000610cba826110dc565b9050610cd18360065461161c90919063ffffffff16565b600681905550600083118015610ce657508234145b610d58576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f496e73756666696369656e7420616d6f756e740000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167febff2602b3f468259e1e99f613fed6691f3a6526effe6ef3e768ba7ae7a36c4f8584610dd4876110dc565b60405180848152602001838152602001828152602001935050505060405180910390a3505050565b60006012905090565b610e0d611381565b610e1657600080fd5b600081118015610e535750600073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614155b610ea8576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611da76023913960400191505060405180910390fd5b6000610eb3836110dc565b905060008390508073ffffffffffffffffffffffffffffffffffffffff166108fc849081150290604051600060405180830381858888f19350505050158015610f00573d6000803e3d6000fd5b50610f168360065461163c90919063ffffffff16565b6006819055508373ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f4e2ca0515ed1aef1395f66b5303bb5d6f1bf9d61a353fa53f73f8ac9973fa9f68585610f98896110dc565b60405180848152602001838152602001828152602001935050505060405180910390a350505050565b600760009054906101000a900460ff1615611027576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526023815260200180611d846023913960400191505060405180910390fd5b6001600760006101000a81548160ff02191690831515021790555080600260006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555061108c8261165b565b5050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008173ffffffffffffffffffffffffffffffffffffffff16319050919050565b611105611381565b61110e57600080fd5b600073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a360008060006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff160217905550565b60065481565b60008060008060418551146111ee5760009350505050611352565b602085015192506040850151915060ff6041860151169050601b8160ff16101561121957601b810190505b601b8160ff16141580156112315750601c8160ff1614155b156112425760009350505050611352565b60018682858560405160008152602001604052604051808581526020018460ff1660ff1681526020018381526020018281526020019450505050506020604051602081039080840390855afa15801561129f573d6000803e3d6000fd5b505050602060405103519350600073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16141561134e576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f4572726f7220696e2065637265636f766572000000000000000000000000000081525060200191505060405180910390fd5b5050505b92915050565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614905090565b6040518060400160405280600181526020017f890000000000000000000000000000000000000000000000000000000000000081525081565b60606040518060400160405280600581526020017f4d41544943000000000000000000000000000000000000000000000000000000815250905090565b6000813414611460576000905061146e565b61146b338484611753565b90505b92915050565b6040518060800160405280605b8152602001611e1c605b91396040516020018082805190602001908083835b602083106114c357805182526020820191506020810190506020830392506114a0565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b60056020528060005260406000206000915054906101000a900460ff1681565b600061153761153286868686611b10565b611be6565b9050949350505050565b608981565b60015481565b604051806080016040528060528152602001611dca605291396040516020018082805190602001908083835b6020831061159b5780518252602082019150602081019050602083039250611578565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012081565b6115e1611381565b6115ea57600080fd5b6115f38161165b565b50565b600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60008282111561162b57600080fd5b600082840390508091505092915050565b60008082840190508381101561165157600080fd5b8091505092915050565b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16141561169557600080fd5b8073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e060405160405180910390a3806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b6000803073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156117d357600080fd5b505afa1580156117e7573d6000803e3d6000fd5b505050506040513d60208110156117fd57600080fd5b8101908080519060200190929190505050905060003073ffffffffffffffffffffffffffffffffffffffff166370a08231866040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b15801561188f57600080fd5b505afa1580156118a3573d6000803e3d6000fd5b505050506040513d60208110156118b957600080fd5b810190808051906020019092919050505090506118d7868686611c30565b8473ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff16600260009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff167fe6497e3ee548a3372136af2fcb0696db31fc6cf20260707645068bd3fe97f3c48786863073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b1580156119df57600080fd5b505afa1580156119f3573d6000803e3d6000fd5b505050506040513d6020811015611a0957600080fd5b81019080805190602001909291905050503073ffffffffffffffffffffffffffffffffffffffff166370a082318e6040518263ffffffff1660e01b8152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060206040518083038186803b158015611a9757600080fd5b505afa158015611aab573d6000803e3d6000fd5b505050506040513d6020811015611ac157600080fd5b8101908080519060200190929190505050604051808681526020018581526020018481526020018381526020018281526020019550505050505060405180910390a46001925050509392505050565b6000806040518060800160405280605b8152602001611e1c605b91396040516020018082805190602001908083835b60208310611b625780518252602082019150602081019050602083039250611b3f565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120905060405181815273ffffffffffffffffffffffffffffffffffffffff8716602082015285604082015284606082015283608082015260a0812092505081915050949350505050565b60008060015490506040517f190100000000000000000000000000000000000000000000000000000000000081528160028201528360228201526042812092505081915050919050565b3073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415611cd2576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260138152602001807f63616e27742073656e6420746f204d524332300000000000000000000000000081525060200191505060405180910390fd5b8173ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050158015611d18573d6000803e3d6000fd5b508173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a350505056fe54686520636f6e747261637420697320616c726561647920696e697469616c697a6564496e73756666696369656e7420616d6f756e74206f7220696e76616c69642075736572454950373132446f6d61696e28737472696e67206e616d652c737472696e672076657273696f6e2c75696e7432353620636861696e49642c6164647265737320766572696679696e67436f6e747261637429546f6b656e5472616e736665724f726465722861646472657373207370656e6465722c75696e7432353620746f6b656e49644f72416d6f756e742c6279746573333220646174612c75696e743235362065787069726174696f6e29a265627a7a72315820a4a6f71a98ac3fc613c3a8f1e2e11b9eb9b6b39f125f7d9508916c2b8fb02c7164736f6c63430005100032" + } + } + }, + "burntContract": { + "23850000": "0x70bca57f4579f58670ab2d18ef16e02c17553c38" + }, + "jaipurBlock": 0 + } + } + \ No newline at end of file diff --git a/params/config.go b/params/config.go index 88cc6fc78f6..ebd98ae772c 100644 --- a/params/config.go +++ b/params/config.go @@ -73,6 +73,7 @@ var ( RialtoGenesisHash = common.HexToHash("0xaabe549bfa85c84f7aee9da7010b97453ad686f2c2d8ce00503d1a00c72cad54") MumbaiGenesisHash = common.HexToHash("0x7b66506a9ebdbf30d32b43c5f15a3b1216269a1ec3a75aa3182b86176a2b1ca7") BorMainnetGenesisHash = common.HexToHash("0xa9c28ce2141b56c474f1dc504bee9b01eb1bd7d1a507580d5519d4437a97de1b") + BorDevnetGenesisHash = common.HexToHash("0x5a06b25b0c6530708ea0b98a3409290e39dce6be7f558493aeb6e4b99a172a87") ) var ( @@ -162,6 +163,8 @@ var ( BorMainnetChainConfig = readChainSpec("chainspecs/bor-mainnet.json") + BorDevnetChainConfig = readChainSpec("chainspecs/bor-devnet.json") + CliqueSnapshot = NewSnapshotConfig(10, 1024, 16384, true, "") TestChainConfig = &ChainConfig{ @@ -770,6 +773,8 @@ func ChainConfigByChainName(chain string) *ChainConfig { return MumbaiChainConfig case networkname.BorMainnetChainName: return BorMainnetChainConfig + case networkname.BorDevnetChainName: + return BorDevnetChainConfig default: return nil } @@ -803,6 +808,8 @@ func GenesisHashByChainName(chain string) *common.Hash { return &MumbaiGenesisHash case networkname.BorMainnetChainName: return &BorMainnetGenesisHash + case networkname.BorDevnetChainName: + return &BorDevnetGenesisHash default: return nil } diff --git a/params/networkname/network_name.go b/params/networkname/network_name.go index 6bedcf7dbde..958bf698b1d 100644 --- a/params/networkname/network_name.go +++ b/params/networkname/network_name.go @@ -15,6 +15,7 @@ const ( RialtoChainName = "rialto" MumbaiChainName = "mumbai" BorMainnetChainName = "bor-mainnet" + BorDevnetChainName = "bor-devnet" ) var All = []string{ @@ -32,4 +33,5 @@ var All = []string{ //RialtoChainName, MumbaiChainName, BorMainnetChainName, + BorDevnetChainName, } diff --git a/turbo/node/node.go b/turbo/node/node.go index eb9620f78e2..9d0f8ff9bb8 100644 --- a/turbo/node/node.go +++ b/turbo/node/node.go @@ -71,8 +71,12 @@ func NewNodConfigUrfave(ctx *cli.Context) *nodecfg.Config { log.Info("Starting Erigon on Chapel testnet...") case networkname.DevChainName: log.Info("Starting Erigon in ephemeral dev mode...") + case networkname.MumbaiChainName: + log.Info("Starting Erigon on Mumbai testnet...") case networkname.BorMainnetChainName: - log.Info("Starting Erigon on Bor Mainnet") + log.Info("Starting Erigon on Bor Mainnet...") + case networkname.BorDevnetChainName: + log.Info("Starting Erigon on Bor Devnet...") case "", networkname.MainnetChainName: if !ctx.GlobalIsSet(utils.NetworkIdFlag.Name) { log.Info("Starting Erigon on Ethereum mainnet...") From 43f5f0ae2796ab905ec5069f95c088938460eaa6 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Fri, 10 Jun 2022 11:59:31 +0100 Subject: [PATCH 041/136] Close peerInfo tasks channel (#4429) Co-authored-by: Alexey Sharp --- cmd/sentry/sentry/sentry_grpc_server.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cmd/sentry/sentry/sentry_grpc_server.go b/cmd/sentry/sentry/sentry_grpc_server.go index 965a1675795..d9cff87b712 100644 --- a/cmd/sentry/sentry/sentry_grpc_server.go +++ b/cmd/sentry/sentry/sentry_grpc_server.go @@ -78,6 +78,14 @@ func NewPeerInfo(peer *p2p.Peer, rw p2p.MsgReadWriter) *PeerInfo { return p } +func (pi *PeerInfo) Close() { + pi.lock.Lock() + defer pi.lock.Unlock() + if pi.tasks != nil { + close(pi.tasks) + } +} + func (pi *PeerInfo) ID() [64]byte { return pi.peer.Pubkey() } @@ -494,6 +502,7 @@ func NewGrpcServer(ctx context.Context, dialCandidates enode.Iterator, readNodeI log.Trace(fmt.Sprintf("[%s] Start with peer", peerID)) peerInfo := NewPeerInfo(peer, rw) + defer peerInfo.Close() defer ss.GoodPeers.Delete(peerID) err := handShake(ctx, ss.GetStatus(), peerID, rw, protocol, protocol, func(bestHash common.Hash) error { From 8e3ac8a21c8f55bdd78338e88e956999ad7e60b8 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Fri, 10 Jun 2022 16:18:43 +0100 Subject: [PATCH 042/136] Erigon2 upgrade 2 prototype (#4341) * Erigon2 upgrade 2 prototype * Latest erigon-lib * Fixes * Fix print * Fix maxSpan * Reduce maxSpan * Remove duplicate joins * TxNum * Fix resuming * first draft of history22 * Introduce historical reads * Update to erigon-lib * Update erigon-lib * Update erigon-lib * Fixes and tracing for checkChangeSets * More trace * Print account details * fix getHeader * Update to erigon-lib main * Add tracer indices and event log indices * Fix calltracer * Fix calltracer * Duplicate rpcdaemon into rpcdaemon22 * Fix tests * Fix tests * Fix tests * Update to latest erigon-lib Co-authored-by: Alexey Sharp Co-authored-by: Alex Sharp --- Makefile | 1 + cmd/rpcdaemon/cli/config.go | 12 +- cmd/rpcdaemon/commands/admin_api.go | 6 +- cmd/rpcdaemon/commands/daemon.go | 7 +- cmd/rpcdaemon/commands/engine_api.go | 6 +- cmd/rpcdaemon/commands/erigon_api.go | 6 +- cmd/rpcdaemon/commands/eth_api.go | 11 +- cmd/rpcdaemon/commands/eth_filters.go | 14 +- cmd/rpcdaemon/commands/eth_ming_test.go | 6 +- cmd/rpcdaemon/commands/eth_subscribe_test.go | 3 +- cmd/rpcdaemon/commands/net_api.go | 6 +- .../commands/send_transaction_test.go | 4 +- .../starknet_send_transaction_test.go | 4 +- cmd/rpcdaemon/commands/txpool_api_test.go | 4 +- cmd/rpcdaemon/commands/web3_api.go | 6 +- cmd/rpcdaemon22/.gitignore | 0 cmd/rpcdaemon22/README.md | 485 ++ cmd/rpcdaemon22/cli/config.go | 647 ++ cmd/rpcdaemon22/cli/httpcfg/http_cfg.go | 45 + cmd/rpcdaemon22/cli/rpc_allow_list.go | 43 + cmd/rpcdaemon22/commands/admin_api.go | 49 + cmd/rpcdaemon22/commands/bor_api.go | 37 + cmd/rpcdaemon22/commands/bor_helper.go | 156 + cmd/rpcdaemon22/commands/bor_snapshot.go | 424 + cmd/rpcdaemon22/commands/call_traces_test.go | 265 + .../commands/contracts/build/Poly.abi | 1 + .../commands/contracts/build/Poly.bin | 1 + .../commands/contracts/build/Token.abi | 1 + .../commands/contracts/build/Token.bin | 1 + cmd/rpcdaemon22/commands/contracts/gen.go | 4 + .../commands/contracts/gen_poly.go | 364 + .../commands/contracts/gen_token.go | 324 + cmd/rpcdaemon22/commands/contracts/poly.sol | 36 + cmd/rpcdaemon22/commands/contracts/token.sol | 39 + .../commands/corner_cases_support_test.go | 62 + cmd/rpcdaemon22/commands/daemon.go | 134 + cmd/rpcdaemon22/commands/db_api_deprecated.go | 52 + cmd/rpcdaemon22/commands/debug_api.go | 273 + cmd/rpcdaemon22/commands/debug_api_test.go | 185 + cmd/rpcdaemon22/commands/engine_api.go | 262 + cmd/rpcdaemon22/commands/engine_api_test.go | 18 + cmd/rpcdaemon22/commands/erigon_api.go | 52 + cmd/rpcdaemon22/commands/erigon_block.go | 142 + .../erigon_cumulative_chain_traffic.go | 41 + cmd/rpcdaemon22/commands/erigon_issuance.go | 133 + cmd/rpcdaemon22/commands/erigon_nodeInfo.go | 16 + cmd/rpcdaemon22/commands/erigon_receipts.go | 66 + cmd/rpcdaemon22/commands/erigon_system.go | 31 + cmd/rpcdaemon22/commands/error_messages.go | 10 + cmd/rpcdaemon22/commands/eth_accounts.go | 121 + cmd/rpcdaemon22/commands/eth_api.go | 356 + cmd/rpcdaemon22/commands/eth_api_test.go | 219 + cmd/rpcdaemon22/commands/eth_block.go | 320 + cmd/rpcdaemon22/commands/eth_call.go | 453 + cmd/rpcdaemon22/commands/eth_call_test.go | 251 + cmd/rpcdaemon22/commands/eth_deprecated.go | 26 + cmd/rpcdaemon22/commands/eth_filters.go | 248 + cmd/rpcdaemon22/commands/eth_ming_test.go | 64 + cmd/rpcdaemon22/commands/eth_mining.go | 94 + cmd/rpcdaemon22/commands/eth_receipts.go | 452 + .../commands/eth_subscribe_test.go | 59 + cmd/rpcdaemon22/commands/eth_system.go | 218 + cmd/rpcdaemon22/commands/eth_txs.go | 240 + cmd/rpcdaemon22/commands/eth_uncles.go | 133 + .../commands/get_chain_config_test.go | 39 + cmd/rpcdaemon22/commands/net_api.go | 66 + cmd/rpcdaemon22/commands/parity_api.go | 89 + cmd/rpcdaemon22/commands/parity_api_test.go | 105 + cmd/rpcdaemon22/commands/rpc_block.go | 45 + cmd/rpcdaemon22/commands/send_transaction.go | 96 + .../commands/send_transaction_test.go | 110 + cmd/rpcdaemon22/commands/starknet_accounts.go | 39 + cmd/rpcdaemon22/commands/starknet_api.go | 34 + cmd/rpcdaemon22/commands/starknet_call.go | 96 + .../commands/starknet_send_transaction.go | 50 + .../starknet_send_transaction_test.go | 83 + cmd/rpcdaemon22/commands/storage_range.go | 42 + cmd/rpcdaemon22/commands/trace_adhoc.go | 1224 +++ cmd/rpcdaemon22/commands/trace_adhoc_test.go | 108 + cmd/rpcdaemon22/commands/trace_api.go | 49 + cmd/rpcdaemon22/commands/trace_filtering.go | 520 ++ cmd/rpcdaemon22/commands/trace_types.go | 160 + cmd/rpcdaemon22/commands/tracing.go | 241 + cmd/rpcdaemon22/commands/txpool_api.go | 170 + cmd/rpcdaemon22/commands/txpool_api_test.go | 64 + cmd/rpcdaemon22/commands/validator_set.go | 702 ++ cmd/rpcdaemon22/commands/web3_api.go | 38 + cmd/rpcdaemon22/health/check_block.go | 23 + cmd/rpcdaemon22/health/check_peers.go | 23 + cmd/rpcdaemon22/health/health.go | 131 + cmd/rpcdaemon22/health/interfaces.go | 16 + cmd/rpcdaemon22/health/parse_api.go | 22 + cmd/rpcdaemon22/main.go | 42 + cmd/rpcdaemon22/postman/README.md | 18 + cmd/rpcdaemon22/postman/RPC_Testing.json | 4235 ++++++++++ cmd/rpcdaemon22/postman/Trace_Testing.json | 7474 +++++++++++++++++ cmd/rpcdaemon22/rpcdaemontest/test_util.go | 321 + cmd/rpcdaemon22/rpcservices/eth_backend.go | 288 + cmd/rpcdaemon22/rpcservices/eth_mining.go | 43 + cmd/rpcdaemon22/rpcservices/eth_starknet.go | 31 + cmd/rpcdaemon22/rpcservices/eth_txpool.go | 50 + cmd/rpcdaemon22/test.http | 222 + cmd/rpcdaemon22/testdata/.gitignore | 5 + cmd/rpcdaemon22/testdata/sed_file | 22 + cmd/rpcdaemon22/testdata/trace_tests | 76 + cmd/state/commands/calltracer22.go | 57 + cmd/state/commands/check_change_sets.go | 4 +- cmd/state/commands/erigon22.go | 534 ++ cmd/state/commands/history22.go | 286 + cmd/state/commands/opcode_tracer.go | 7 +- cmd/state/commands/state_root.go | 2 +- core/state/plain_readonly.go | 6 + go.mod | 2 +- go.sum | 4 +- .../rpchelper}/filters.go | 5 +- turbo/rpchelper/helper.go | 9 +- .../rpchelper}/interface.go | 2 +- .../rpchelper}/logsfilter.go | 2 +- turbo/transactions/call.go | 3 +- 119 files changed, 25816 insertions(+), 68 deletions(-) create mode 100644 cmd/rpcdaemon22/.gitignore create mode 100644 cmd/rpcdaemon22/README.md create mode 100644 cmd/rpcdaemon22/cli/config.go create mode 100644 cmd/rpcdaemon22/cli/httpcfg/http_cfg.go create mode 100644 cmd/rpcdaemon22/cli/rpc_allow_list.go create mode 100644 cmd/rpcdaemon22/commands/admin_api.go create mode 100644 cmd/rpcdaemon22/commands/bor_api.go create mode 100644 cmd/rpcdaemon22/commands/bor_helper.go create mode 100644 cmd/rpcdaemon22/commands/bor_snapshot.go create mode 100644 cmd/rpcdaemon22/commands/call_traces_test.go create mode 100644 cmd/rpcdaemon22/commands/contracts/build/Poly.abi create mode 100644 cmd/rpcdaemon22/commands/contracts/build/Poly.bin create mode 100644 cmd/rpcdaemon22/commands/contracts/build/Token.abi create mode 100644 cmd/rpcdaemon22/commands/contracts/build/Token.bin create mode 100644 cmd/rpcdaemon22/commands/contracts/gen.go create mode 100644 cmd/rpcdaemon22/commands/contracts/gen_poly.go create mode 100644 cmd/rpcdaemon22/commands/contracts/gen_token.go create mode 100644 cmd/rpcdaemon22/commands/contracts/poly.sol create mode 100644 cmd/rpcdaemon22/commands/contracts/token.sol create mode 100644 cmd/rpcdaemon22/commands/corner_cases_support_test.go create mode 100644 cmd/rpcdaemon22/commands/daemon.go create mode 100644 cmd/rpcdaemon22/commands/db_api_deprecated.go create mode 100644 cmd/rpcdaemon22/commands/debug_api.go create mode 100644 cmd/rpcdaemon22/commands/debug_api_test.go create mode 100644 cmd/rpcdaemon22/commands/engine_api.go create mode 100644 cmd/rpcdaemon22/commands/engine_api_test.go create mode 100644 cmd/rpcdaemon22/commands/erigon_api.go create mode 100644 cmd/rpcdaemon22/commands/erigon_block.go create mode 100644 cmd/rpcdaemon22/commands/erigon_cumulative_chain_traffic.go create mode 100644 cmd/rpcdaemon22/commands/erigon_issuance.go create mode 100644 cmd/rpcdaemon22/commands/erigon_nodeInfo.go create mode 100644 cmd/rpcdaemon22/commands/erigon_receipts.go create mode 100644 cmd/rpcdaemon22/commands/erigon_system.go create mode 100644 cmd/rpcdaemon22/commands/error_messages.go create mode 100644 cmd/rpcdaemon22/commands/eth_accounts.go create mode 100644 cmd/rpcdaemon22/commands/eth_api.go create mode 100644 cmd/rpcdaemon22/commands/eth_api_test.go create mode 100644 cmd/rpcdaemon22/commands/eth_block.go create mode 100644 cmd/rpcdaemon22/commands/eth_call.go create mode 100644 cmd/rpcdaemon22/commands/eth_call_test.go create mode 100644 cmd/rpcdaemon22/commands/eth_deprecated.go create mode 100644 cmd/rpcdaemon22/commands/eth_filters.go create mode 100644 cmd/rpcdaemon22/commands/eth_ming_test.go create mode 100644 cmd/rpcdaemon22/commands/eth_mining.go create mode 100644 cmd/rpcdaemon22/commands/eth_receipts.go create mode 100644 cmd/rpcdaemon22/commands/eth_subscribe_test.go create mode 100644 cmd/rpcdaemon22/commands/eth_system.go create mode 100644 cmd/rpcdaemon22/commands/eth_txs.go create mode 100644 cmd/rpcdaemon22/commands/eth_uncles.go create mode 100644 cmd/rpcdaemon22/commands/get_chain_config_test.go create mode 100644 cmd/rpcdaemon22/commands/net_api.go create mode 100644 cmd/rpcdaemon22/commands/parity_api.go create mode 100644 cmd/rpcdaemon22/commands/parity_api_test.go create mode 100644 cmd/rpcdaemon22/commands/rpc_block.go create mode 100644 cmd/rpcdaemon22/commands/send_transaction.go create mode 100644 cmd/rpcdaemon22/commands/send_transaction_test.go create mode 100644 cmd/rpcdaemon22/commands/starknet_accounts.go create mode 100644 cmd/rpcdaemon22/commands/starknet_api.go create mode 100644 cmd/rpcdaemon22/commands/starknet_call.go create mode 100644 cmd/rpcdaemon22/commands/starknet_send_transaction.go create mode 100644 cmd/rpcdaemon22/commands/starknet_send_transaction_test.go create mode 100644 cmd/rpcdaemon22/commands/storage_range.go create mode 100644 cmd/rpcdaemon22/commands/trace_adhoc.go create mode 100644 cmd/rpcdaemon22/commands/trace_adhoc_test.go create mode 100644 cmd/rpcdaemon22/commands/trace_api.go create mode 100644 cmd/rpcdaemon22/commands/trace_filtering.go create mode 100644 cmd/rpcdaemon22/commands/trace_types.go create mode 100644 cmd/rpcdaemon22/commands/tracing.go create mode 100644 cmd/rpcdaemon22/commands/txpool_api.go create mode 100644 cmd/rpcdaemon22/commands/txpool_api_test.go create mode 100644 cmd/rpcdaemon22/commands/validator_set.go create mode 100644 cmd/rpcdaemon22/commands/web3_api.go create mode 100644 cmd/rpcdaemon22/health/check_block.go create mode 100644 cmd/rpcdaemon22/health/check_peers.go create mode 100644 cmd/rpcdaemon22/health/health.go create mode 100644 cmd/rpcdaemon22/health/interfaces.go create mode 100644 cmd/rpcdaemon22/health/parse_api.go create mode 100644 cmd/rpcdaemon22/main.go create mode 100644 cmd/rpcdaemon22/postman/README.md create mode 100644 cmd/rpcdaemon22/postman/RPC_Testing.json create mode 100644 cmd/rpcdaemon22/postman/Trace_Testing.json create mode 100644 cmd/rpcdaemon22/rpcdaemontest/test_util.go create mode 100644 cmd/rpcdaemon22/rpcservices/eth_backend.go create mode 100644 cmd/rpcdaemon22/rpcservices/eth_mining.go create mode 100644 cmd/rpcdaemon22/rpcservices/eth_starknet.go create mode 100644 cmd/rpcdaemon22/rpcservices/eth_txpool.go create mode 100644 cmd/rpcdaemon22/test.http create mode 100644 cmd/rpcdaemon22/testdata/.gitignore create mode 100644 cmd/rpcdaemon22/testdata/sed_file create mode 100644 cmd/rpcdaemon22/testdata/trace_tests create mode 100644 cmd/state/commands/calltracer22.go create mode 100644 cmd/state/commands/erigon22.go create mode 100644 cmd/state/commands/history22.go rename {cmd/rpcdaemon/rpcservices => turbo/rpchelper}/filters.go (98%) rename {cmd/rpcdaemon/rpcservices/rpcinterfaces => turbo/rpchelper}/interface.go (98%) rename {cmd/rpcdaemon/rpcservices => turbo/rpchelper}/logsfilter.go (99%) diff --git a/Makefile b/Makefile index 15176d5aa15..51104b84947 100644 --- a/Makefile +++ b/Makefile @@ -68,6 +68,7 @@ COMMANDS += integration COMMANDS += observer COMMANDS += pics COMMANDS += rpcdaemon +COMMANDS += rpcdaemon22 COMMANDS += rpctest COMMANDS += sentry COMMANDS += state diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index fbc721a25d1..a8f5c1cbb9e 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -31,7 +31,6 @@ import ( "github.com/ledgerwatch/erigon/cmd/rpcdaemon/cli/httpcfg" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/health" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices/rpcinterfaces" "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" @@ -41,6 +40,7 @@ import ( "github.com/ledgerwatch/erigon/node/nodecfg" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" @@ -209,7 +209,7 @@ func checkDbCompatibility(ctx context.Context, db kv.RoDB) error { func EmbeddedServices(ctx context.Context, erigonDB kv.RoDB, stateCacheCfg kvcache.CoherentConfig, blockReader services.FullBlockReader, ethBackendServer remote.ETHBACKENDServer, txPoolServer txpool.TxpoolServer, miningServer txpool.MiningServer, ) ( - eth rpcinterfaces.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, starknet *rpcservices.StarknetService, stateCache kvcache.Cache, ff *rpcservices.Filters, err error, + eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, starknet *rpcservices.StarknetService, stateCache kvcache.Cache, ff *rpchelper.Filters, err error, ) { if stateCacheCfg.KeysLimit > 0 { stateCache = kvcache.New(stateCacheCfg) @@ -226,7 +226,7 @@ func EmbeddedServices(ctx context.Context, erigonDB kv.RoDB, stateCacheCfg kvcac eth = rpcservices.NewRemoteBackend(directClient, erigonDB, blockReader) txPool = direct.NewTxPoolClient(txPoolServer) mining = direct.NewMiningClient(miningServer) - ff = rpcservices.New(ctx, eth, txPool, mining, func() {}) + ff = rpchelper.New(ctx, eth, txPool, mining, func() {}) return } @@ -234,10 +234,10 @@ func EmbeddedServices(ctx context.Context, erigonDB kv.RoDB, stateCacheCfg kvcac // `cfg.WithDatadir` (mode when it on 1 machine with Erigon) func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, rootCancel context.CancelFunc) ( db kv.RoDB, borDb kv.RoDB, - eth rpcinterfaces.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, + eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, starknet *rpcservices.StarknetService, stateCache kvcache.Cache, blockReader services.FullBlockReader, - ff *rpcservices.Filters, err error) { + ff *rpchelper.Filters, err error) { if !cfg.WithDatadir && cfg.PrivateApiAddr == "" { return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("either remote db or local db must be specified") } @@ -419,7 +419,7 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, starknet = rpcservices.NewStarknetService(starknetConn) } - ff = rpcservices.New(ctx, eth, txPool, mining, onNewSnapshot) + ff = rpchelper.New(ctx, eth, txPool, mining, onNewSnapshot) return db, borDb, eth, txPool, mining, starknet, stateCache, blockReader, ff, err } diff --git a/cmd/rpcdaemon/commands/admin_api.go b/cmd/rpcdaemon/commands/admin_api.go index 6e6e11b9811..636e1de30c6 100644 --- a/cmd/rpcdaemon/commands/admin_api.go +++ b/cmd/rpcdaemon/commands/admin_api.go @@ -5,8 +5,8 @@ import ( "errors" "fmt" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices/rpcinterfaces" "github.com/ledgerwatch/erigon/p2p" + "github.com/ledgerwatch/erigon/turbo/rpchelper" ) // AdminAPI the interface for the admin_* RPC commands. @@ -21,11 +21,11 @@ type AdminAPI interface { // AdminAPIImpl data structure to store things needed for admin_* commands. type AdminAPIImpl struct { - ethBackend rpcinterfaces.ApiBackend + ethBackend rpchelper.ApiBackend } // NewAdminAPI returns AdminAPIImpl instance. -func NewAdminAPI(eth rpcinterfaces.ApiBackend) *AdminAPIImpl { +func NewAdminAPI(eth rpchelper.ApiBackend) *AdminAPIImpl { return &AdminAPIImpl{ ethBackend: eth, } diff --git a/cmd/rpcdaemon/commands/daemon.go b/cmd/rpcdaemon/commands/daemon.go index 004f2d02542..b77afa729b5 100644 --- a/cmd/rpcdaemon/commands/daemon.go +++ b/cmd/rpcdaemon/commands/daemon.go @@ -6,15 +6,14 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/cli/httpcfg" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices/rpcinterfaces" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/services" ) // APIList describes the list of available RPC apis -func APIList(db kv.RoDB, borDb kv.RoDB, eth rpcinterfaces.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, - starknet starknet.CAIROVMClient, filters *rpcservices.Filters, stateCache kvcache.Cache, +func APIList(db kv.RoDB, borDb kv.RoDB, eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, + starknet starknet.CAIROVMClient, filters *rpchelper.Filters, stateCache kvcache.Cache, blockReader services.FullBlockReader, cfg httpcfg.HttpCfg) (list []rpc.API) { base := NewBaseApi(filters, stateCache, blockReader, cfg.WithDatadir) diff --git a/cmd/rpcdaemon/commands/engine_api.go b/cmd/rpcdaemon/commands/engine_api.go index d5ee5076953..1d7984fa91a 100644 --- a/cmd/rpcdaemon/commands/engine_api.go +++ b/cmd/rpcdaemon/commands/engine_api.go @@ -11,10 +11,10 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" types2 "github.com/ledgerwatch/erigon-lib/gointerfaces/types" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices/rpcinterfaces" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/log/v3" ) @@ -69,7 +69,7 @@ type EngineAPI interface { type EngineImpl struct { *BaseAPI db kv.RoDB - api rpcinterfaces.ApiBackend + api rpchelper.ApiBackend } func convertPayloadStatus(x *remote.EnginePayloadStatus) map[string]interface{} { @@ -253,7 +253,7 @@ func (e *EngineImpl) ExchangeTransitionConfigurationV1(ctx context.Context, beac } // NewEngineAPI returns EngineImpl instance -func NewEngineAPI(base *BaseAPI, db kv.RoDB, api rpcinterfaces.ApiBackend) *EngineImpl { +func NewEngineAPI(base *BaseAPI, db kv.RoDB, api rpchelper.ApiBackend) *EngineImpl { return &EngineImpl{ BaseAPI: base, db: db, diff --git a/cmd/rpcdaemon/commands/erigon_api.go b/cmd/rpcdaemon/commands/erigon_api.go index 8f644a2587c..f976cf31f05 100644 --- a/cmd/rpcdaemon/commands/erigon_api.go +++ b/cmd/rpcdaemon/commands/erigon_api.go @@ -4,11 +4,11 @@ import ( "context" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices/rpcinterfaces" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" ) // ErigonAPI Erigon specific routines @@ -39,11 +39,11 @@ type ErigonAPI interface { type ErigonImpl struct { *BaseAPI db kv.RoDB - ethBackend rpcinterfaces.ApiBackend + ethBackend rpchelper.ApiBackend } // NewErigonAPI returns ErigonImpl instance -func NewErigonAPI(base *BaseAPI, db kv.RoDB, eth rpcinterfaces.ApiBackend) *ErigonImpl { +func NewErigonAPI(base *BaseAPI, db kv.RoDB, eth rpchelper.ApiBackend) *ErigonImpl { return &ErigonImpl{ BaseAPI: base, db: db, diff --git a/cmd/rpcdaemon/commands/eth_api.go b/cmd/rpcdaemon/commands/eth_api.go index 830506ad2b7..ab06d94ce5a 100644 --- a/cmd/rpcdaemon/commands/eth_api.go +++ b/cmd/rpcdaemon/commands/eth_api.go @@ -11,8 +11,6 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices/rpcinterfaces" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/common/math" @@ -23,6 +21,7 @@ import ( "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/services" ) @@ -96,7 +95,7 @@ type EthAPI interface { type BaseAPI struct { stateCache kvcache.Cache // thread-safe blocksLRU *lru.Cache // thread-safe - filters *rpcservices.Filters + filters *rpchelper.Filters _chainConfig *params.ChainConfig _genesis *types.Block _genesisLock sync.RWMutex @@ -106,7 +105,7 @@ type BaseAPI struct { TevmEnabled bool // experiment } -func NewBaseApi(f *rpcservices.Filters, stateCache kvcache.Cache, blockReader services.FullBlockReader, singleNodeMode bool) *BaseAPI { +func NewBaseApi(f *rpchelper.Filters, stateCache kvcache.Cache, blockReader services.FullBlockReader, singleNodeMode bool) *BaseAPI { blocksLRUSize := 128 // ~32Mb if !singleNodeMode { blocksLRUSize = 512 @@ -230,7 +229,7 @@ func (api *BaseAPI) blockByRPCNumber(number rpc.BlockNumber, tx kv.Tx) (*types.B // APIImpl is implementation of the EthAPI interface based on remote Db access type APIImpl struct { *BaseAPI - ethBackend rpcinterfaces.ApiBackend + ethBackend rpchelper.ApiBackend txPool txpool.TxpoolClient mining txpool.MiningClient db kv.RoDB @@ -238,7 +237,7 @@ type APIImpl struct { } // NewEthAPI returns APIImpl instance -func NewEthAPI(base *BaseAPI, db kv.RoDB, eth rpcinterfaces.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, gascap uint64) *APIImpl { +func NewEthAPI(base *BaseAPI, db kv.RoDB, eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, gascap uint64) *APIImpl { if gascap == 0 { gascap = uint64(math.MaxUint64 / 2) } diff --git a/cmd/rpcdaemon/commands/eth_filters.go b/cmd/rpcdaemon/commands/eth_filters.go index 596d0ee7ed8..f99ced97f1c 100644 --- a/cmd/rpcdaemon/commands/eth_filters.go +++ b/cmd/rpcdaemon/commands/eth_filters.go @@ -4,13 +4,13 @@ import ( "context" "fmt" - filters2 "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/filters" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/log/v3" ) @@ -90,11 +90,11 @@ func (api *APIImpl) UninstallFilter(_ context.Context, index string) (bool, erro if len(index) >= 2 && index[0] == '0' && (index[1] == 'x' || index[1] == 'X') { index = index[2:] } - isDeleted := api.filters.UnsubscribePendingBlock(filters2.PendingBlockSubID(index)) || - api.filters.UnsubscribePendingTxs(filters2.PendingTxsSubID(index)) + isDeleted := api.filters.UnsubscribePendingBlock(rpchelper.PendingBlockSubID(index)) || + api.filters.UnsubscribePendingTxs(rpchelper.PendingTxsSubID(index)) id, err := hexutil.DecodeUint64(index) if err == nil { - return isDeleted || api.filters.UnsubscribeLogs(filters2.LogsSubID(id)), nil + return isDeleted || api.filters.UnsubscribeLogs(rpchelper.LogsSubID(id)), nil } } @@ -112,13 +112,13 @@ func (api *APIImpl) GetFilterChanges(_ context.Context, index string) ([]interfa if len(index) >= 2 && index[0] == '0' && (index[1] == 'x' || index[1] == 'X') { index = index[2:] } - if blocks, ok := api.filters.ReadPendingBlocks(filters2.PendingBlockSubID(index)); ok { + if blocks, ok := api.filters.ReadPendingBlocks(rpchelper.PendingBlockSubID(index)); ok { for _, v := range blocks { stub = append(stub, v.Hash()) } return stub, nil } - if txs, ok := api.filters.ReadPendingTxs(filters2.PendingTxsSubID(index)); ok { + if txs, ok := api.filters.ReadPendingTxs(rpchelper.PendingTxsSubID(index)); ok { for _, v := range txs { for _, tx := range v { stub = append(stub, tx.Hash()) @@ -130,7 +130,7 @@ func (api *APIImpl) GetFilterChanges(_ context.Context, index string) ([]interfa if err != nil { return stub, fmt.Errorf("eth_getFilterChanges, wrong index: %w", err) } - if logs, ok := api.filters.ReadLogs(filters2.LogsSubID(id)); ok { + if logs, ok := api.filters.ReadLogs(rpchelper.LogsSubID(id)); ok { for _, v := range logs { stub = append(stub, v) } diff --git a/cmd/rpcdaemon/commands/eth_ming_test.go b/cmd/rpcdaemon/commands/eth_ming_test.go index bf6919d89b2..6987ee4ca1b 100644 --- a/cmd/rpcdaemon/commands/eth_ming_test.go +++ b/cmd/rpcdaemon/commands/eth_ming_test.go @@ -8,9 +8,9 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcdaemontest" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/stages" "github.com/stretchr/testify/require" @@ -19,7 +19,7 @@ import ( func TestPendingBlock(t *testing.T) { ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, stages.Mock(t)) mining := txpool.NewMiningClient(conn) - ff := rpcservices.New(ctx, nil, nil, mining, func() {}) + ff := rpchelper.New(ctx, nil, nil, mining, func() {}) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), false), nil, nil, nil, mining, 5000000) expect := uint64(12345) @@ -44,7 +44,7 @@ func TestPendingBlock(t *testing.T) { func TestPendingLogs(t *testing.T) { ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, stages.Mock(t)) mining := txpool.NewMiningClient(conn) - ff := rpcservices.New(ctx, nil, nil, mining, func() {}) + ff := rpchelper.New(ctx, nil, nil, mining, func() {}) expect := []byte{211} ch := make(chan types.Logs, 1) diff --git a/cmd/rpcdaemon/commands/eth_subscribe_test.go b/cmd/rpcdaemon/commands/eth_subscribe_test.go index 02002d9bbc1..25007ffeb4b 100644 --- a/cmd/rpcdaemon/commands/eth_subscribe_test.go +++ b/cmd/rpcdaemon/commands/eth_subscribe_test.go @@ -12,6 +12,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/stages" "github.com/stretchr/testify/require" @@ -38,7 +39,7 @@ func TestEthSubscribe(t *testing.T) { ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) backend := rpcservices.NewRemoteBackend(remote.NewETHBACKENDClient(conn), m.DB, snapshotsync.NewBlockReader()) - ff := rpcservices.New(ctx, backend, nil, nil, func() {}) + ff := rpchelper.New(ctx, backend, nil, nil, func() {}) newHeads := make(chan *types.Header) defer close(newHeads) diff --git a/cmd/rpcdaemon/commands/net_api.go b/cmd/rpcdaemon/commands/net_api.go index c9ea34fefdd..2a094aa2ee7 100644 --- a/cmd/rpcdaemon/commands/net_api.go +++ b/cmd/rpcdaemon/commands/net_api.go @@ -5,8 +5,8 @@ import ( "fmt" "strconv" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices/rpcinterfaces" "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/turbo/rpchelper" ) // NetAPI the interface for the net_ RPC commands @@ -18,11 +18,11 @@ type NetAPI interface { // NetAPIImpl data structure to store things needed for net_ commands type NetAPIImpl struct { - ethBackend rpcinterfaces.ApiBackend + ethBackend rpchelper.ApiBackend } // NewNetAPIImpl returns NetAPIImplImpl instance -func NewNetAPIImpl(eth rpcinterfaces.ApiBackend) *NetAPIImpl { +func NewNetAPIImpl(eth rpchelper.ApiBackend) *NetAPIImpl { return &NetAPIImpl{ ethBackend: eth, } diff --git a/cmd/rpcdaemon/commands/send_transaction_test.go b/cmd/rpcdaemon/commands/send_transaction_test.go index bc65da77ad3..0e848258126 100644 --- a/cmd/rpcdaemon/commands/send_transaction_test.go +++ b/cmd/rpcdaemon/commands/send_transaction_test.go @@ -12,7 +12,6 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/commands" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcdaemontest" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/u256" "github.com/ledgerwatch/erigon/core" @@ -20,6 +19,7 @@ import ( "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/stages" "github.com/stretchr/testify/require" @@ -70,7 +70,7 @@ func TestSendRawTransaction(t *testing.T) { ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) txPool := txpool.NewTxpoolClient(conn) - ff := rpcservices.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}) + ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) api := commands.NewEthAPI(commands.NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), false), m.DB, nil, txPool, nil, 5000000) diff --git a/cmd/rpcdaemon/commands/starknet_send_transaction_test.go b/cmd/rpcdaemon/commands/starknet_send_transaction_test.go index 5ab2791bd67..8ab1a19497a 100644 --- a/cmd/rpcdaemon/commands/starknet_send_transaction_test.go +++ b/cmd/rpcdaemon/commands/starknet_send_transaction_test.go @@ -10,10 +10,10 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/commands" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcdaemontest" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/stages" "github.com/stretchr/testify/require" @@ -33,7 +33,7 @@ func TestErrorStarknetSendRawTransaction(t *testing.T) { ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) txPool := txpool.NewTxpoolClient(conn) starknetClient := starknet.NewCAIROVMClient(conn) - ff := rpcservices.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}) + ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) for _, tt := range cases { diff --git a/cmd/rpcdaemon/commands/txpool_api_test.go b/cmd/rpcdaemon/commands/txpool_api_test.go index 972f56c5be6..9fbe268e5d1 100644 --- a/cmd/rpcdaemon/commands/txpool_api_test.go +++ b/cmd/rpcdaemon/commands/txpool_api_test.go @@ -10,12 +10,12 @@ import ( txPoolProto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcdaemontest" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/stages" "github.com/stretchr/testify/require" @@ -32,7 +32,7 @@ func TestTxPoolContent(t *testing.T) { ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) txPool := txpool.NewTxpoolClient(conn) - ff := rpcservices.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}) + ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}) api := NewTxPoolAPI(NewBaseApi(ff, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), false), m.DB, txPool) expectValue := uint64(1234) diff --git a/cmd/rpcdaemon/commands/web3_api.go b/cmd/rpcdaemon/commands/web3_api.go index 7a13f9a8fe1..c35f62c632e 100644 --- a/cmd/rpcdaemon/commands/web3_api.go +++ b/cmd/rpcdaemon/commands/web3_api.go @@ -3,9 +3,9 @@ package commands import ( "context" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices/rpcinterfaces" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/turbo/rpchelper" ) // Web3API provides interfaces for the web3_ RPC commands @@ -16,11 +16,11 @@ type Web3API interface { type Web3APIImpl struct { *BaseAPI - ethBackend rpcinterfaces.ApiBackend + ethBackend rpchelper.ApiBackend } // NewWeb3APIImpl returns Web3APIImpl instance -func NewWeb3APIImpl(ethBackend rpcinterfaces.ApiBackend) *Web3APIImpl { +func NewWeb3APIImpl(ethBackend rpchelper.ApiBackend) *Web3APIImpl { return &Web3APIImpl{ BaseAPI: &BaseAPI{}, ethBackend: ethBackend, diff --git a/cmd/rpcdaemon22/.gitignore b/cmd/rpcdaemon22/.gitignore new file mode 100644 index 00000000000..e69de29bb2d diff --git a/cmd/rpcdaemon22/README.md b/cmd/rpcdaemon22/README.md new file mode 100644 index 00000000000..50ad1581831 --- /dev/null +++ b/cmd/rpcdaemon22/README.md @@ -0,0 +1,485 @@ +- [Introduction](#introduction) +- [Getting Started](#getting-started) + * [Running locally](#running-locally) + * [Running remotely](#running-remotely) + * [Healthcheck](#healthcheck) + * [Testing](#testing) +- [FAQ](#faq) + * [Relations between prune options and rpc methods](#relations-between-prune-options-and-rpc-method) + * [RPC Implementation Status](#rpc-implementation-status) + * [Securing the communication between RPC daemon and Erigon instance via TLS and authentication](#securing-the-communication-between-rpc-daemon-and-erigon-instance-via-tls-and-authentication) + * [Ethstats](#ethstats) + * [Allowing only specific methods (Allowlist)](#allowing-only-specific-methods--allowlist-) + * [Trace transactions progress](#trace-transactions-progress) + * [Clients getting timeout, but server load is low](#clients-getting-timeout--but-server-load-is-low) + * [Server load too high](#server-load-too-high) + * [Faster Batch requests](#faster-batch-requests) +- [For Developers](#for-developers) + * [Code generation](#code-generation) + +## Introduction + +Erigon's `rpcdaemon` runs in its own seperate process. + +This brings many benefits including easier development, the ability to run multiple daemons at once, and the ability to +run the daemon remotely. It is possible to run the daemon locally as well (read-only) if both processes have access to +the data folder. + +## Getting Started + +The `rpcdaemon` gets built as part of the main `erigon` build process, but you can build it directly with this command: + +```[bash] +make rpcdaemon +``` + +### Running locally + +Run `rpcdaemon` on same computer with Erigon. It's default option because it using Shared Memory access to Erigon's db - +it's much faster than TCP access. Provide both `--datadir` and `--private.api.addr` flags: + +```[bash] +make erigon +./build/bin/erigon --datadir= --private.api.addr=localhost:9090 +make rpcdaemon +./build/bin/rpcdaemon --datadir= --txpool.api.addr=localhost:9090 --private.api.addr=localhost:9090 --http.api=eth,erigon,web3,net,debug,trace,txpool +``` + +Note that we've also specified which RPC namespaces to enable in the above command by `--http.api` flag. + +### Running remotely + +To start the daemon remotely - just don't set `--datadir` flag: + +```[bash] +make erigon +./build/bin/erigon --datadir= --private.api.addr=0.0.0.0:9090 +make rpcdaemon +./build/bin/rpcdaemon --private.api.addr=:9090 --txpool.api.addr=localhost:9090 --http.api=eth,erigon,web3,net,debug,trace,txpool +``` + +The daemon should respond with something like: + +```[bash] +INFO [date-time] HTTP endpoint opened url=localhost:8545... +``` + +When RPC daemon runs remotely, by default it maintains a state cache, which is updated every time when Erigon imports a +new block. When state cache is reasonably warm, it allows such remote RPC daemon to execute queries related to `latest` +block (i.e. to current state) with comparable performance to a local RPC daemon +(around 2x slower vs 10x slower without state cache). Since there can be multiple such RPC daemons per one Erigon node, +it may scale well for some workloads that are heavy on the current state queries. + +### Healthcheck + +Running the daemon also opens an endpoint `/health` that provides a basic health check. + +If the health check is successful it returns 200 OK. + +If the health check fails it returns 500 Internal Server Error. + +Configuration of the health check is sent as POST body of the method. + +``` +{ + "min_peer_count": , + "known_block": +} +``` + +Not adding a check disables that. + +**`min_peer_count`** -- checks for mimimum of healthy node peers. Requires +`net` namespace to be listed in `http.api`. + +**`known_block`** -- sets up the block that node has to know about. Requires +`eth` namespace to be listed in `http.api`. + +Example request +```http POST http://localhost:8545/health --raw '{"min_peer_count": 3, "known_block": "0x1F"}'``` +Example response + +``` +{ + "check_block": "HEALTHY", + "healthcheck_query": "HEALTHY", + "min_peer_count": "HEALTHY" +} +``` + +### Testing + +By default, the `rpcdaemon` serves data from `localhost:8545`. You may send `curl` commands to see if things are +working. + +Try `eth_blockNumber` for example. In a third terminal window enter this command: + +```[bash] +curl -X POST -H "Content-Type: application/json" --data '{"jsonrpc": "2.0", "method": "eth_blockNumber", "params": [], "id":1}' localhost:8545 +``` + +This should return something along the lines of this (depending on how far your Erigon node has synced): + +```[bash] +{ + "jsonrpc": "2.0", + "id": 1, + "result":" 0xa5b9ba" +} +``` + +Also, there +are [extensive instructions for using Postman](https://github.com/ledgerwatch/erigon/wiki/Using-Postman-to-Test-TurboGeth-RPC) +to test the RPC. + +## FAQ + +### Relations between prune options and RPC methods + +Next options available (by `--prune` flag): + +``` +* h - prune history (ChangeSets, HistoryIndices - used to access historical state, like eth_getStorageAt, eth_getBalanceAt, debug_traceTransaction, trace_block, trace_transaction, etc.) +* r - prune receipts (Receipts, Logs, LogTopicIndex, LogAddressIndex - used by eth_getLogs and similar RPC methods) +* t - prune tx lookup (used to get transaction by hash) +* c - prune call traces (used by trace_filter method) +``` + +By default data pruned after 90K blocks, can change it by flags like `--prune.history.after=100_000` + +Some methods, if not found historical data in DB, can fallback to old blocks re-execution - but it require `h`. + +### RPC Implementation Status + +Label "remote" means: `--private.api.addr` flag is required. + +The following table shows the current implementation status of Erigon's RPC daemon. + +| Command | Avail | Notes | +| ------------------------------------------ | ------- | ------------------------------------------ | +| web3_clientVersion | Yes | | +| web3_sha3 | Yes | | +| | | | +| net_listening | HC | (`remote` hard coded returns true) | +| net_peerCount | Limited | internal sentries only | +| net_version | Yes | `remote`. | +| | | | +| eth_blockNumber | Yes | | +| eth_chainID/eth_chainId | Yes | | +| eth_protocolVersion | Yes | | +| eth_syncing | Yes | | +| eth_gasPrice | Yes | | +| eth_maxPriorityFeePerGas | Yes | | +| eth_feeHistory | Yes | | +| | | | +| eth_getBlockByHash | Yes | | +| eth_getBlockByNumber | Yes | | +| eth_getBlockTransactionCountByHash | Yes | | +| eth_getBlockTransactionCountByNumber | Yes | | +| eth_getUncleByBlockHashAndIndex | Yes | | +| eth_getUncleByBlockNumberAndIndex | Yes | | +| eth_getUncleCountByBlockHash | Yes | | +| eth_getUncleCountByBlockNumber | Yes | | +| | | | +| eth_getTransactionByHash | Yes | | +| eth_getRawTransactionByHash | Yes | | +| eth_getTransactionByBlockHashAndIndex | Yes | | +| eth_retRawTransactionByBlockHashAndIndex | Yes | | +| eth_getTransactionByBlockNumberAndIndex | Yes | | +| eth_retRawTransactionByBlockNumberAndIndex | Yes | | +| eth_getTransactionReceipt | Yes | | +| eth_getBlockReceipts | Yes | | +| | | | +| eth_estimateGas | Yes | | +| eth_getBalance | Yes | | +| eth_getCode | Yes | | +| eth_getTransactionCount | Yes | | +| eth_getStorageAt | Yes | | +| eth_call | Yes | | +| eth_callBundle | Yes | | +| eth_createAccessList | Yes | +| | | | +| eth_newFilter | - | not yet implemented | +| eth_newBlockFilter | - | not yet implemented | +| eth_newPendingTransactionFilter | - | not yet implemented | +| eth_getFilterChanges | - | not yet implemented | +| eth_uninstallFilter | - | not yet implemented | +| eth_getLogs | Yes | | +| | | | +| eth_accounts | No | deprecated | +| eth_sendRawTransaction | Yes | `remote`. | +| eth_sendTransaction | - | not yet implemented | +| eth_sign | No | deprecated | +| eth_signTransaction | - | not yet implemented | +| eth_signTypedData | - | ???? | +| | | | +| eth_getProof | - | not yet implemented | +| | | | +| eth_mining | Yes | returns true if --mine flag provided | +| eth_coinbase | Yes | | +| eth_hashrate | Yes | | +| eth_submitHashrate | Yes | | +| eth_getWork | Yes | | +| eth_submitWork | Yes | | +| | | | +| eth_subscribe | Limited | Websock Only - newHeads, | +| | | newPendingTransactions | +| eth_unsubscribe | Yes | Websock Only | +| | | | +| engine_newPayloadV1 | Yes | | +| engine_forkchoiceUpdatedV1 | Yes | | +| engine_getPayloadV1 | Yes | | +| engine_exchangeTransitionConfigurationV1 | Yes | | +| | | | +| debug_accountRange | Yes | Private Erigon debug module | +| debug_accountAt | Yes | Private Erigon debug module | +| debug_getModifiedAccountsByNumber | Yes | | +| debug_getModifiedAccountsByHash | Yes | | +| debug_storageRangeAt | Yes | | +| debug_traceBlockByHash | Yes | Streaming (can handle huge results) | +| debug_traceBlockByNumber | Yes | Streaming (can handle huge results) | +| debug_traceTransaction | Yes | Streaming (can handle huge results) | +| debug_traceCall | Yes | Streaming (can handle huge results) | +| | | | +| trace_call | Yes | | +| trace_callMany | Yes | | +| trace_rawTransaction | - | not yet implemented (come help!) | +| trace_replayBlockTransactions | yes | stateDiff only (come help!) | +| trace_replayTransaction | yes | stateDiff only (come help!) | +| trace_block | Yes | | +| trace_filter | Yes | no pagination, but streaming | +| trace_get | Yes | | +| trace_transaction | Yes | | +| | | | +| txpool_content | Yes | `remote` | +| txpool_status | Yes | `remote` | +| | | | +| eth_getCompilers | No | deprecated | +| eth_compileLLL | No | deprecated | +| eth_compileSolidity | No | deprecated | +| eth_compileSerpent | No | deprecated | +| | | | +| db_putString | No | deprecated | +| db_getString | No | deprecated | +| db_putHex | No | deprecated | +| db_getHex | No | deprecated | +| | | | +| erigon_getHeaderByHash | Yes | Erigon only | +| erigon_getHeaderByNumber | Yes | Erigon only | +| erigon_getLogsByHash | Yes | Erigon only | +| erigon_forks | Yes | Erigon only | +| erigon_issuance | Yes | Erigon only | +| erigon_GetBlockByTimestamp | Yes | Erigon only | +| | | | +| starknet_call | Yes | Starknet only | +| | | | +| bor_getSnapshot | Yes | Bor only | +| bor_getAuthor | Yes | Bor only | +| bor_getSnapshotAtHash | Yes | Bor only | +| bor_getSigners | Yes | Bor only | +| bor_getSignersAtHash | Yes | Bor only | +| bor_getCurrentProposer | Yes | Bor only | +| bor_getCurrentValidators | Yes | Bor only | +| bor_getRootHash | Yes | Bor only | + +This table is constantly updated. Please visit again. + +### Securing the communication between RPC daemon and Erigon instance via TLS and authentication + +In some cases, it is useful to run Erigon nodes in a different network (for example, in a Public cloud), but RPC daemon +locally. To ensure the integrity of communication and access control to the Erigon node, TLS authentication can be +enabled. On the high level, the process consists of these steps (this process needs to be done for any "cluster" of +Erigon and RPC daemon nodes that are supposed to work together): + +1. Generate key pair for the Certificate Authority (CA). The private key of CA will be used to authorise new Erigon + instances as well as new RPC daemon instances, so that they can mutually authenticate. +2. Create CA certificate file that needs to be deployed on any Erigon instance and any RPC daemon. This CA cerf file is + used as a "root of trust", whatever is in it, will be trusted by the participants when they authenticate their + counterparts. +3. For each Erigon instance and each RPC daemon instance, generate a key pair. If you are lazy, you can generate one + pair for all Erigon nodes, and one pair for all RPC daemons, and copy these keys around. +4. Using the CA private key, create cerificate file for each public key generated on the previous step. This + effectively "inducts" these keys into the "cluster of trust". +5. On each instance, deploy 3 files - CA certificate, instance key, and certificate signed by CA for this instance key. + +Following is the detailed description of how it can be done using `openssl` suite of tools. + +Generate CA key pair using Elliptic Curve (as opposed to RSA). The generated CA key will be in the file `CA-key.pem`. +Access to this file will allow anyone to later include any new instance key pair into the "cluster of trust", so keep it +secure. + +``` +openssl ecparam -name prime256v1 -genkey -noout -out CA-key.pem +``` + +Create CA self-signed certificate (this command will ask questions, answers aren't important for now). The file created +by this command is `CA-cert.pem` + +``` +openssl req -x509 -new -nodes -key CA-key.pem -sha256 -days 3650 -out CA-cert.pem +``` + +For Erigon node, generate a key pair: + +``` +openssl ecparam -name prime256v1 -genkey -noout -out erigon-key.pem +``` + +Also, generate one for the RPC daemon: + +``` +openssl ecparam -name prime256v1 -genkey -noout -out RPC-key.pem +``` + +Now create certificate signing request for Erigon key pair: + +``` +openssl req -new -key erigon-key.pem -out erigon.csr +``` + +And from this request, produce the certificate (signed by CA), proving that this key is now part of the "cluster of +trust" + +``` +openssl x509 -req -in erigon.csr -CA CA-cert.pem -CAkey CA-key.pem -CAcreateserial -out erigon.crt -days 3650 -sha256 +``` + +Then, produce the certificate signing request for RPC daemon key pair: + +``` +openssl req -new -key RPC-key.pem -out RPC.csr +``` + +And from this request, produce the certificate (signed by CA), proving that this key is now part of the "cluster of +trust" + +``` +openssl x509 -req -in RPC.csr -CA CA-cert.pem -CAkey CA-key.pem -CAcreateserial -out RPC.crt -days 3650 -sha256 +``` + +When this is all done, these three files need to be placed on the machine where Erigon is running: `CA-cert.pem` +, `erigon-key.pem`, `erigon.crt`. And Erigon needs to be run with these extra options: + +``` +--tls --tls.cacert CA-cert.pem --tls.key erigon-key.pem --tls.cert erigon.crt +``` + +On the RPC daemon machine, these three files need to be placed: `CA-cert.pem`, `RPC-key.pem`, and `RPC.crt`. And RPC +daemon needs to be started with these extra options: + +``` +--tls.key RPC-key.pem --tls.cacert CA-cert.pem --tls.cert RPC.crt +``` + +**WARNING** Normally, the "client side" (which in our case is RPC daemon), verifies that the host name of the server +matches the "Common Name" attribute of the "server" cerificate. At this stage, this verification is turned off, and it +will be turned on again once we have updated the instruction above on how to properly generate cerificates with "Common +Name". + +When running Erigon instance in the Google Cloud, for example, you need to specify the **Internal IP** in +the `--private.api.addr` option. And, you will need to open the firewall on the port you are using, to that connection +to the Erigon instances can be made. + +### Ethstats + +This version of the RPC daemon is compatible with [ethstats-client](https://github.com/goerli/ethstats-client). + +To run ethstats, run the RPC daemon remotely and open some of the APIs. + +`./build/bin/rpcdaemon --private.api.addr=localhost:9090 --http.api=net,eth,web3` + +Then update your `app.json` for ethstats-client like that: + +```json +[ + { + "name": "ethstats", + "script": "app.js", + "log_date_format": "YYYY-MM-DD HH:mm Z", + "merge_logs": false, + "watch": false, + "max_restarts": 10, + "exec_interpreter": "node", + "exec_mode": "fork_mode", + "env": { + "NODE_ENV": "production", + "RPC_HOST": "localhost", + "RPC_PORT": "8545", + "LISTENING_PORT": "30303", + "INSTANCE_NAME": "Erigon node", + "CONTACT_DETAILS": , + "WS_SERVER": "wss://ethstats.net/api", + "WS_SECRET": , + "VERBOSITY": 2 + } + } +] +``` + +Run ethstats-client through pm2 as usual. + +You will see these warnings in the RPC daemon output, but they are expected + +``` +WARN [11-05|09:03:47.911] Served conn=127.0.0.1:59753 method=eth_newBlockFilter reqid=5 t="21.194µs" err="the method eth_newBlockFilter does not exist/is not available" +WARN [11-05|09:03:47.911] Served conn=127.0.0.1:59754 method=eth_newPendingTransactionFilter reqid=6 t="9.053µs" err="the method eth_newPendingTransactionFilter does not exist/is not available" +``` + +### Allowing only specific methods (Allowlist) + +In some cases you might want to only allow certain methods in the namespaces and hide others. That is possible +with `rpc.accessList` flag. + +1. Create a file, say, `rules.json` + +2. Add the following content + +```json +{ + "allow": [ + "net_version", + "web3_eth_getBlockByHash" + ] +} +``` + +3. Provide this file to the rpcdaemon using `--rpc.accessList` flag + +``` +> rpcdaemon --private.api.addr=localhost:9090 --http.api=eth,debug,net,web3 --rpc.accessList=rules.json +``` + +Now only these two methods are available. + +### Clients getting timeout, but server load is low + +In this case: increase default rate-limit - amount of requests server handle simultaneously - requests over this limit +will wait. Increase it - if your 'hot data' is small or have much RAM or see "request timeout" while server load is low. + +``` +./build/bin/erigon --private.api.addr=localhost:9090 --private.api.ratelimit=1024 +``` + +### Server load too high + +Reduce `--private.api.ratelimit` + +### Read DB directly without Json-RPC/Graphql + +[./../../docs/programmers_guide/db_faq.md](./../../docs/programmers_guide/db_faq.md) + +### Faster Batch requests + +Currently batch requests are spawn multiple goroutines and process all sub-requests in parallel. To limit impact of 1 +huge batch to other users - added flag `--rpc.batch.concurrency` (default: 2). Increase it to process large batches +faster. + +Known Issue: if at least 1 request is "stremable" (has parameter of type *jsoniter.Stream) - then whole batch will +processed sequentially (on 1 goroutine). + +## For Developers + +### Code generation + +`go.mod` stores right version of generators, use `make grpc` to install it and generate code (it also installs protoc +into ./build/bin folder). diff --git a/cmd/rpcdaemon22/cli/config.go b/cmd/rpcdaemon22/cli/config.go new file mode 100644 index 00000000000..f82aed55956 --- /dev/null +++ b/cmd/rpcdaemon22/cli/config.go @@ -0,0 +1,647 @@ +package cli + +import ( + "context" + "crypto/rand" + "encoding/binary" + "errors" + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "runtime" + "strings" + "time" + + "github.com/ledgerwatch/erigon/internal/debug" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" + "github.com/ledgerwatch/erigon/rpc/rpccfg" + + "github.com/ledgerwatch/erigon-lib/direct" + "github.com/ledgerwatch/erigon-lib/gointerfaces" + "github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil" + "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" + "github.com/ledgerwatch/erigon-lib/kv/remotedb" + "github.com/ledgerwatch/erigon-lib/kv/remotedbserver" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/cli/httpcfg" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/health" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcservices" + "github.com/ledgerwatch/erigon/cmd/utils" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/common/paths" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/node" + "github.com/ledgerwatch/erigon/node/nodecfg" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" + "github.com/ledgerwatch/log/v3" + "github.com/spf13/cobra" + "google.golang.org/grpc" + grpcHealth "google.golang.org/grpc/health" + "google.golang.org/grpc/health/grpc_health_v1" +) + +var rootCmd = &cobra.Command{ + Use: "rpcdaemon", + Short: "rpcdaemon is JSON RPC server that connects to Erigon node for remote DB access", +} + +const JwtDefaultFile = "jwt.hex" + +func RootCommand() (*cobra.Command, *httpcfg.HttpCfg) { + utils.CobraFlags(rootCmd, append(debug.Flags, utils.MetricFlags...)) + + cfg := &httpcfg.HttpCfg{StateCache: kvcache.DefaultCoherentConfig} + rootCmd.PersistentFlags().StringVar(&cfg.PrivateApiAddr, "private.api.addr", "127.0.0.1:9090", "private api network address, for example: 127.0.0.1:9090") + rootCmd.PersistentFlags().StringVar(&cfg.DataDir, "datadir", "", "path to Erigon working directory") + rootCmd.PersistentFlags().StringVar(&cfg.HttpListenAddress, "http.addr", nodecfg.DefaultHTTPHost, "HTTP-RPC server listening interface") + rootCmd.PersistentFlags().StringVar(&cfg.EngineHTTPListenAddress, "engine.addr", nodecfg.DefaultHTTPHost, "HTTP-RPC server listening interface for engineAPI") + rootCmd.PersistentFlags().StringVar(&cfg.TLSCertfile, "tls.cert", "", "certificate for client side TLS handshake") + rootCmd.PersistentFlags().StringVar(&cfg.TLSKeyFile, "tls.key", "", "key file for client side TLS handshake") + rootCmd.PersistentFlags().StringVar(&cfg.TLSCACert, "tls.cacert", "", "CA certificate for client side TLS handshake") + rootCmd.PersistentFlags().IntVar(&cfg.HttpPort, "http.port", nodecfg.DefaultHTTPPort, "HTTP-RPC server listening port") + rootCmd.PersistentFlags().IntVar(&cfg.EnginePort, "engine.port", nodecfg.DefaultEngineHTTPPort, "HTTP-RPC server listening port for the engineAPI") + rootCmd.PersistentFlags().StringSliceVar(&cfg.HttpCORSDomain, "http.corsdomain", []string{}, "Comma separated list of domains from which to accept cross origin requests (browser enforced)") + rootCmd.PersistentFlags().StringSliceVar(&cfg.HttpVirtualHost, "http.vhosts", nodecfg.DefaultConfig.HTTPVirtualHosts, "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.") + rootCmd.PersistentFlags().BoolVar(&cfg.HttpCompression, "http.compression", true, "Disable http compression") + rootCmd.PersistentFlags().StringSliceVar(&cfg.API, "http.api", []string{"eth", "erigon", "engine"}, "API's offered over the HTTP-RPC interface: eth,engine,erigon,web3,net,debug,trace,txpool,db,starknet. Supported methods: https://github.com/ledgerwatch/erigon/tree/devel/cmd/rpcdaemon22") + rootCmd.PersistentFlags().Uint64Var(&cfg.Gascap, "rpc.gascap", 50000000, "Sets a cap on gas that can be used in eth_call/estimateGas") + rootCmd.PersistentFlags().Uint64Var(&cfg.MaxTraces, "trace.maxtraces", 200, "Sets a limit on traces that can be returned in trace_filter") + rootCmd.PersistentFlags().BoolVar(&cfg.WebsocketEnabled, "ws", false, "Enable Websockets") + rootCmd.PersistentFlags().BoolVar(&cfg.WebsocketCompression, "ws.compression", false, "Enable Websocket compression (RFC 7692)") + rootCmd.PersistentFlags().StringVar(&cfg.RpcAllowListFilePath, "rpc.accessList", "", "Specify granular (method-by-method) API allowlist") + rootCmd.PersistentFlags().UintVar(&cfg.RpcBatchConcurrency, "rpc.batch.concurrency", 2, "Does limit amount of goroutines to process 1 batch request. Means 1 bach request can't overload server. 1 batch still can have unlimited amount of request") + rootCmd.PersistentFlags().IntVar(&cfg.DBReadConcurrency, "db.read.concurrency", runtime.GOMAXPROCS(-1), "Does limit amount of parallel db reads") + rootCmd.PersistentFlags().BoolVar(&cfg.TraceCompatibility, "trace.compat", false, "Bug for bug compatibility with OE for trace_ routines") + rootCmd.PersistentFlags().StringVar(&cfg.TxPoolApiAddr, "txpool.api.addr", "", "txpool api network address, for example: 127.0.0.1:9090 (default: use value of --private.api.addr)") + rootCmd.PersistentFlags().BoolVar(&cfg.TevmEnabled, utils.TevmFlag.Name, false, utils.TevmFlag.Usage) + rootCmd.PersistentFlags().BoolVar(&cfg.Sync.UseSnapshots, "snapshot", true, utils.SnapshotFlag.Usage) + rootCmd.PersistentFlags().IntVar(&cfg.StateCache.KeysLimit, "state.cache", kvcache.DefaultCoherentConfig.KeysLimit, "Amount of keys to store in StateCache (enabled if no --datadir set). Set 0 to disable StateCache. 1_000_000 keys ~ equal to 2Gb RAM (maybe we will add RAM accounting in future versions).") + rootCmd.PersistentFlags().BoolVar(&cfg.GRPCServerEnabled, "grpc", false, "Enable GRPC server") + rootCmd.PersistentFlags().StringVar(&cfg.GRPCListenAddress, "grpc.addr", nodecfg.DefaultGRPCHost, "GRPC server listening interface") + rootCmd.PersistentFlags().IntVar(&cfg.GRPCPort, "grpc.port", nodecfg.DefaultGRPCPort, "GRPC server listening port") + rootCmd.PersistentFlags().BoolVar(&cfg.GRPCHealthCheckEnabled, "grpc.healthcheck", false, "Enable GRPC health check") + rootCmd.PersistentFlags().StringVar(&cfg.StarknetGRPCAddress, "starknet.grpc.address", "127.0.0.1:6066", "Starknet GRPC address") + rootCmd.PersistentFlags().StringVar(&cfg.JWTSecretPath, utils.JWTSecretPath.Name, utils.JWTSecretPath.Value, "Token to ensure safe connection between CL and EL") + + if err := rootCmd.MarkPersistentFlagFilename("rpc.accessList", "json"); err != nil { + panic(err) + } + if err := rootCmd.MarkPersistentFlagDirname("datadir"); err != nil { + panic(err) + } + + rootCmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error { + if err := utils.SetupCobra(cmd); err != nil { + return err + } + cfg.WithDatadir = cfg.DataDir != "" + if cfg.WithDatadir { + if cfg.DataDir == "" { + cfg.DataDir = paths.DefaultDataDir() + } + cfg.Dirs = datadir.New(cfg.DataDir) + } + if cfg.TxPoolApiAddr == "" { + cfg.TxPoolApiAddr = cfg.PrivateApiAddr + } + return nil + } + rootCmd.PersistentPostRunE = func(cmd *cobra.Command, args []string) error { + utils.StopDebug() + return nil + } + + cfg.StateCache.MetricsLabel = "rpc" + + return rootCmd, cfg +} + +type StateChangesClient interface { + StateChanges(ctx context.Context, in *remote.StateChangeRequest, opts ...grpc.CallOption) (remote.KV_StateChangesClient, error) +} + +func subscribeToStateChangesLoop(ctx context.Context, client StateChangesClient, cache kvcache.Cache) { + go func() { + for { + select { + case <-ctx.Done(): + return + default: + } + if err := subscribeToStateChanges(ctx, client, cache); err != nil { + if grpcutil.IsRetryLater(err) || grpcutil.IsEndOfStream(err) { + time.Sleep(3 * time.Second) + continue + } + log.Warn("[txpool.handleStateChanges]", "err", err) + } + } + }() +} + +func subscribeToStateChanges(ctx context.Context, client StateChangesClient, cache kvcache.Cache) error { + streamCtx, cancel := context.WithCancel(ctx) + defer cancel() + stream, err := client.StateChanges(streamCtx, &remote.StateChangeRequest{WithStorage: true, WithTransactions: false}, grpc.WaitForReady(true)) + if err != nil { + return err + } + for req, err := stream.Recv(); ; req, err = stream.Recv() { + if err != nil { + return err + } + if req == nil { + return nil + } + + cache.OnNewBlock(req) + } +} + +func checkDbCompatibility(ctx context.Context, db kv.RoDB) error { + // DB schema version compatibility check + var version []byte + var compatErr error + var compatTx kv.Tx + if compatTx, compatErr = db.BeginRo(ctx); compatErr != nil { + return fmt.Errorf("open Ro Tx for DB schema compability check: %w", compatErr) + } + defer compatTx.Rollback() + if version, compatErr = compatTx.GetOne(kv.DatabaseInfo, kv.DBSchemaVersionKey); compatErr != nil { + return fmt.Errorf("read version for DB schema compability check: %w", compatErr) + } + if len(version) != 12 { + return fmt.Errorf("database does not have major schema version. upgrade and restart Erigon core") + } + major := binary.BigEndian.Uint32(version) + minor := binary.BigEndian.Uint32(version[4:]) + patch := binary.BigEndian.Uint32(version[8:]) + var compatible bool + dbSchemaVersion := &kv.DBSchemaVersion + if major != dbSchemaVersion.Major { + compatible = false + } else if minor != dbSchemaVersion.Minor { + compatible = false + } else { + compatible = true + } + if !compatible { + return fmt.Errorf("incompatible DB Schema versions: reader %d.%d.%d, database %d.%d.%d", + dbSchemaVersion.Major, dbSchemaVersion.Minor, dbSchemaVersion.Patch, + major, minor, patch) + } + log.Info("DB schemas compatible", "reader", fmt.Sprintf("%d.%d.%d", dbSchemaVersion.Major, dbSchemaVersion.Minor, dbSchemaVersion.Patch), + "database", fmt.Sprintf("%d.%d.%d", major, minor, patch)) + return nil +} + +func EmbeddedServices(ctx context.Context, erigonDB kv.RoDB, stateCacheCfg kvcache.CoherentConfig, blockReader services.FullBlockReader, ethBackendServer remote.ETHBACKENDServer, + txPoolServer txpool.TxpoolServer, miningServer txpool.MiningServer, +) ( + eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, starknet *rpcservices.StarknetService, stateCache kvcache.Cache, ff *rpchelper.Filters, err error, +) { + if stateCacheCfg.KeysLimit > 0 { + stateCache = kvcache.New(stateCacheCfg) + } else { + stateCache = kvcache.NewDummy() + } + kvRPC := remotedbserver.NewKvServer(ctx, erigonDB) + stateDiffClient := direct.NewStateDiffClientDirect(kvRPC) + _ = stateDiffClient + subscribeToStateChangesLoop(ctx, stateDiffClient, stateCache) + + directClient := direct.NewEthBackendClientDirect(ethBackendServer) + + eth = rpcservices.NewRemoteBackend(directClient, erigonDB, blockReader) + txPool = direct.NewTxPoolClient(txPoolServer) + mining = direct.NewMiningClient(miningServer) + ff = rpchelper.New(ctx, eth, txPool, mining, func() {}) + return +} + +// RemoteServices - use when RPCDaemon run as independent process. Still it can use --datadir flag to enable +// `cfg.WithDatadir` (mode when it on 1 machine with Erigon) +func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, rootCancel context.CancelFunc) ( + db kv.RoDB, borDb kv.RoDB, + eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, + starknet *rpcservices.StarknetService, + stateCache kvcache.Cache, blockReader services.FullBlockReader, + ff *rpchelper.Filters, err error) { + if !cfg.WithDatadir && cfg.PrivateApiAddr == "" { + return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("either remote db or local db must be specified") + } + + // Do not change the order of these checks. Chaindata needs to be checked first, because PrivateApiAddr has default value which is not "" + // If PrivateApiAddr is checked first, the Chaindata option will never work + if cfg.WithDatadir { + var rwKv kv.RwDB + log.Trace("Creating chain db", "path", cfg.Dirs.Chaindata) + limiter := make(chan struct{}, cfg.DBReadConcurrency) + rwKv, err = kv2.NewMDBX(logger).RoTxsLimiter(limiter).Path(cfg.Dirs.Chaindata).Readonly().Open() + if err != nil { + return nil, nil, nil, nil, nil, nil, nil, nil, ff, err + } + if compatErr := checkDbCompatibility(ctx, rwKv); compatErr != nil { + return nil, nil, nil, nil, nil, nil, nil, nil, ff, compatErr + } + db = rwKv + stateCache = kvcache.NewDummy() + blockReader = snapshotsync.NewBlockReader() + + // bor (consensus) specific db + var borKv kv.RoDB + borDbPath := filepath.Join(cfg.DataDir, "bor") + { + // ensure db exist + tmpDb, err := kv2.NewMDBX(logger).Path(borDbPath).Label(kv.ConsensusDB).Open() + if err != nil { + return nil, nil, nil, nil, nil, nil, nil, nil, ff, err + } + tmpDb.Close() + } + log.Trace("Creating consensus db", "path", borDbPath) + borKv, err = kv2.NewMDBX(logger).Path(borDbPath).Label(kv.ConsensusDB).Readonly().Open() + if err != nil { + return nil, nil, nil, nil, nil, nil, nil, nil, ff, err + } + // Skip the compatibility check, until we have a schema in erigon-lib + borDb = borKv + } else { + if cfg.StateCache.KeysLimit > 0 { + stateCache = kvcache.New(cfg.StateCache) + } else { + stateCache = kvcache.NewDummy() + } + log.Info("if you run RPCDaemon on same machine with Erigon add --datadir option") + } + + if db != nil { + var cc *params.ChainConfig + if err := db.View(context.Background(), func(tx kv.Tx) error { + genesisBlock, err := rawdb.ReadBlockByNumber(tx, 0) + if err != nil { + return err + } + if genesisBlock == nil { + return fmt.Errorf("genesis not found in DB. Likely Erigon was never started on this datadir") + } + cc, err = rawdb.ReadChainConfig(tx, genesisBlock.Hash()) + if err != nil { + return err + } + cfg.Snap.Enabled, err = snap.Enabled(tx) + if err != nil { + return err + } + return nil + }); err != nil { + return nil, nil, nil, nil, nil, nil, nil, nil, ff, err + } + if cc == nil { + return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("chain config not found in db. Need start erigon at least once on this db") + } + cfg.Snap.Enabled = cfg.Snap.Enabled || cfg.Sync.UseSnapshots + + // if chain config has terminal total difficulty then rpc must have eth and engine APIs enableds + if cc.TerminalTotalDifficulty != nil { + hasEthApiEnabled := false + hasEngineApiEnabled := false + + for _, api := range cfg.API { + switch api { + case "eth": + hasEthApiEnabled = true + case "engine": + hasEngineApiEnabled = true + } + } + + if !hasEthApiEnabled { + cfg.API = append(cfg.API, "eth") + } + + if !hasEngineApiEnabled { + cfg.API = append(cfg.API, "engine") + } + } + } + + onNewSnapshot := func() {} + if cfg.WithDatadir { + if cfg.Snap.Enabled { + allSnapshots := snapshotsync.NewRoSnapshots(cfg.Snap, cfg.Dirs.Snap) + allSnapshots.OptimisticReopen() + log.Info("[Snapshots] see new", "blocks", allSnapshots.BlocksAvailable()) + // don't reopen it right here, because snapshots may be not ready yet + onNewSnapshot = func() { + if err := allSnapshots.Reopen(); err != nil { + log.Error("[Snapshots] reopen", "err", err) + } else { + log.Info("[Snapshots] see new", "blocks", allSnapshots.BlocksAvailable()) + } + } + blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) + } else { + log.Info("Use --snapshots=false") + } + } + + creds, err := grpcutil.TLS(cfg.TLSCACert, cfg.TLSCertfile, cfg.TLSKeyFile) + if err != nil { + return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("open tls cert: %w", err) + } + conn, err := grpcutil.Connect(creds, cfg.PrivateApiAddr) + if err != nil { + return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("could not connect to execution service privateApi: %w", err) + } + + kvClient := remote.NewKVClient(conn) + remoteKv, err := remotedb.NewRemote(gointerfaces.VersionFromProto(remotedbserver.KvServiceAPIVersion), logger, kvClient).Open() + if err != nil { + return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("could not connect to remoteKv: %w", err) + } + + subscribeToStateChangesLoop(ctx, kvClient, stateCache) + + if !cfg.WithDatadir { + blockReader = snapshotsync.NewRemoteBlockReader(remote.NewETHBACKENDClient(conn)) + } + remoteEth := rpcservices.NewRemoteBackend(remote.NewETHBACKENDClient(conn), db, blockReader) + blockReader = remoteEth + + txpoolConn := conn + if cfg.TxPoolApiAddr != cfg.PrivateApiAddr { + txpoolConn, err = grpcutil.Connect(creds, cfg.TxPoolApiAddr) + if err != nil { + return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("could not connect to txpool api: %w", err) + } + } + + mining = txpool.NewMiningClient(txpoolConn) + miningService := rpcservices.NewMiningService(mining) + txPool = txpool.NewTxpoolClient(txpoolConn) + txPoolService := rpcservices.NewTxPoolService(txPool) + if db == nil { + db = remoteKv + } + eth = remoteEth + go func() { + if !remoteKv.EnsureVersionCompatibility() { + rootCancel() + } + if !remoteEth.EnsureVersionCompatibility() { + rootCancel() + } + if mining != nil && !miningService.EnsureVersionCompatibility() { + rootCancel() + } + if !txPoolService.EnsureVersionCompatibility() { + rootCancel() + } + }() + + if cfg.StarknetGRPCAddress != "" { + starknetConn, err := grpcutil.Connect(creds, cfg.StarknetGRPCAddress) + if err != nil { + return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("could not connect to starknet api: %w", err) + } + starknet = rpcservices.NewStarknetService(starknetConn) + } + + ff = rpchelper.New(ctx, eth, txPool, mining, onNewSnapshot) + + return db, borDb, eth, txPool, mining, starknet, stateCache, blockReader, ff, err +} + +func StartRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rpc.API) error { + var engineListener *http.Server + var engineSrv *rpc.Server + var engineHttpEndpoint string + + // register apis and create handler stack + httpEndpoint := fmt.Sprintf("%s:%d", cfg.HttpListenAddress, cfg.HttpPort) + + srv := rpc.NewServer(cfg.RpcBatchConcurrency) + + allowListForRPC, err := parseAllowListForRPC(cfg.RpcAllowListFilePath) + if err != nil { + return err + } + srv.SetAllowList(allowListForRPC) + + var defaultAPIList []rpc.API + var engineAPI []rpc.API + + for _, api := range rpcAPI { + if api.Namespace != "engine" { + defaultAPIList = append(defaultAPIList, api) + } else { + engineAPI = append(engineAPI, api) + } + } + + if len(engineAPI) != 0 { + // eth API should also be exposed on the same port as engine API + for _, api := range rpcAPI { + if api.Namespace == "eth" { + engineAPI = append(engineAPI, api) + } + } + } + + var apiFlags []string + for _, flag := range cfg.API { + if flag != "engine" { + apiFlags = append(apiFlags, flag) + } + } + + if err := node.RegisterApisFromWhitelist(defaultAPIList, apiFlags, srv, false); err != nil { + return fmt.Errorf("could not start register RPC apis: %w", err) + } + + httpHandler := node.NewHTTPHandlerStack(srv, cfg.HttpCORSDomain, cfg.HttpVirtualHost, cfg.HttpCompression) + var wsHandler http.Handler + if cfg.WebsocketEnabled { + wsHandler = srv.WebsocketHandler([]string{"*"}, nil, cfg.WebsocketCompression) + } + + apiHandler, err := createHandler(cfg, defaultAPIList, httpHandler, wsHandler, nil) + if err != nil { + return err + } + + listener, _, err := node.StartHTTPEndpoint(httpEndpoint, rpccfg.DefaultHTTPTimeouts, apiHandler) + if err != nil { + return fmt.Errorf("could not start RPC api: %w", err) + } + info := []interface{}{"url", httpEndpoint, "ws", cfg.WebsocketEnabled, + "ws.compression", cfg.WebsocketCompression, "grpc", cfg.GRPCServerEnabled} + + if len(engineAPI) > 0 { + engineListener, engineSrv, engineHttpEndpoint, err = createEngineListener(cfg, engineAPI) + if err != nil { + return fmt.Errorf("could not start RPC api for engine: %w", err) + } + } + + var ( + healthServer *grpcHealth.Server + grpcServer *grpc.Server + grpcListener net.Listener + grpcEndpoint string + ) + if cfg.GRPCServerEnabled { + grpcEndpoint = fmt.Sprintf("%s:%d", cfg.GRPCListenAddress, cfg.GRPCPort) + if grpcListener, err = net.Listen("tcp", grpcEndpoint); err != nil { + return fmt.Errorf("could not start GRPC listener: %w", err) + } + grpcServer = grpc.NewServer() + if cfg.GRPCHealthCheckEnabled { + healthServer = grpcHealth.NewServer() + grpc_health_v1.RegisterHealthServer(grpcServer, healthServer) + } + go grpcServer.Serve(grpcListener) + info = append(info, "grpc.port", cfg.GRPCPort) + } + + log.Info("HTTP endpoint opened", info...) + + defer func() { + srv.Stop() + if engineSrv != nil { + engineSrv.Stop() + } + shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = listener.Shutdown(shutdownCtx) + log.Info("HTTP endpoint closed", "url", httpEndpoint) + + if engineListener != nil { + _ = engineListener.Shutdown(shutdownCtx) + log.Info("Engine HTTP endpoint close", "url", engineHttpEndpoint) + } + + if cfg.GRPCServerEnabled { + if cfg.GRPCHealthCheckEnabled { + healthServer.Shutdown() + } + grpcServer.GracefulStop() + _ = grpcListener.Close() + log.Info("GRPC endpoint closed", "url", grpcEndpoint) + } + }() + <-ctx.Done() + log.Info("Exiting...") + return nil +} + +// isWebsocket checks the header of a http request for a websocket upgrade request. +func isWebsocket(r *http.Request) bool { + return strings.ToLower(r.Header.Get("Upgrade")) == "websocket" && + strings.Contains(strings.ToLower(r.Header.Get("Connection")), "upgrade") +} + +// obtainJWTSecret loads the jwt-secret, either from the provided config, +// or from the default location. If neither of those are present, it generates +// a new secret and stores to the default location. +func obtainJWTSecret(cfg httpcfg.HttpCfg) ([]byte, error) { + // try reading from file + log.Info("Reading JWT secret", "path", cfg.JWTSecretPath) + // If we run the rpcdaemon and datadir is not specified we just use jwt.hex in current directory. + if len(cfg.JWTSecretPath) == 0 { + cfg.JWTSecretPath = "jwt.hex" + } + if data, err := os.ReadFile(cfg.JWTSecretPath); err == nil { + jwtSecret := common.FromHex(strings.TrimSpace(string(data))) + if len(jwtSecret) == 32 { + return jwtSecret, nil + } + log.Error("Invalid JWT secret", "path", cfg.JWTSecretPath, "length", len(jwtSecret)) + return nil, errors.New("invalid JWT secret") + } + // Need to generate one + jwtSecret := make([]byte, 32) + rand.Read(jwtSecret) + + if err := os.WriteFile(cfg.JWTSecretPath, []byte(hexutil.Encode(jwtSecret)), 0600); err != nil { + return nil, err + } + log.Info("Generated JWT secret", "path", cfg.JWTSecretPath) + return jwtSecret, nil +} + +func createHandler(cfg httpcfg.HttpCfg, apiList []rpc.API, httpHandler http.Handler, wsHandler http.Handler, jwtSecret []byte) (http.Handler, error) { + var handler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // adding a healthcheck here + if health.ProcessHealthcheckIfNeeded(w, r, apiList) { + return + } + if cfg.WebsocketEnabled && wsHandler != nil && isWebsocket(r) { + wsHandler.ServeHTTP(w, r) + return + } + + if jwtSecret != nil && !rpc.CheckJwtSecret(w, r, jwtSecret) { + return + } + + httpHandler.ServeHTTP(w, r) + }) + + return handler, nil +} + +func createEngineListener(cfg httpcfg.HttpCfg, engineApi []rpc.API) (*http.Server, *rpc.Server, string, error) { + engineHttpEndpoint := fmt.Sprintf("%s:%d", cfg.EngineHTTPListenAddress, cfg.EnginePort) + + engineSrv := rpc.NewServer(cfg.RpcBatchConcurrency) + + allowListForRPC, err := parseAllowListForRPC(cfg.RpcAllowListFilePath) + if err != nil { + return nil, nil, "", err + } + engineSrv.SetAllowList(allowListForRPC) + + if err := node.RegisterApisFromWhitelist(engineApi, nil, engineSrv, true); err != nil { + return nil, nil, "", fmt.Errorf("could not start register RPC engine api: %w", err) + } + + jwtSecret, err := obtainJWTSecret(cfg) + if err != nil { + return nil, nil, "", err + } + + var wsHandler http.Handler + if cfg.WebsocketEnabled { + wsHandler = engineSrv.WebsocketHandler([]string{"*"}, jwtSecret, cfg.WebsocketCompression) + } + + engineHttpHandler := node.NewHTTPHandlerStack(engineSrv, cfg.HttpCORSDomain, cfg.HttpVirtualHost, cfg.HttpCompression) + + engineApiHandler, err := createHandler(cfg, engineApi, engineHttpHandler, wsHandler, jwtSecret) + if err != nil { + return nil, nil, "", err + } + + engineListener, _, err := node.StartHTTPEndpoint(engineHttpEndpoint, rpccfg.DefaultHTTPTimeouts, engineApiHandler) + if err != nil { + return nil, nil, "", fmt.Errorf("could not start RPC api: %w", err) + } + + engineInfo := []interface{}{"url", engineHttpEndpoint, "ws", cfg.WebsocketEnabled} + log.Info("HTTP endpoint opened for Engine API", engineInfo...) + + return engineListener, engineSrv, engineHttpEndpoint, nil +} diff --git a/cmd/rpcdaemon22/cli/httpcfg/http_cfg.go b/cmd/rpcdaemon22/cli/httpcfg/http_cfg.go new file mode 100644 index 00000000000..6a7d3505110 --- /dev/null +++ b/cmd/rpcdaemon22/cli/httpcfg/http_cfg.go @@ -0,0 +1,45 @@ +package httpcfg + +import ( + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" +) + +type HttpCfg struct { + Enabled bool + PrivateApiAddr string + WithDatadir bool // Erigon's database can be read by separated processes on same machine - in read-only mode - with full support of transactions. It will share same "OS PageCache" with Erigon process. + DataDir string + Dirs datadir.Dirs + HttpListenAddress string + EngineHTTPListenAddress string + TLSCertfile string + TLSCACert string + TLSKeyFile string + HttpPort int + EnginePort int + HttpCORSDomain []string + HttpVirtualHost []string + HttpCompression bool + API []string + Gascap uint64 + MaxTraces uint64 + WebsocketEnabled bool + WebsocketCompression bool + RpcAllowListFilePath string + RpcBatchConcurrency uint + DBReadConcurrency int + TraceCompatibility bool // Bug for bug compatibility for trace_ routines with OpenEthereum + TxPoolApiAddr string + TevmEnabled bool + StateCache kvcache.CoherentConfig + Snap ethconfig.Snapshot + Sync ethconfig.Sync + GRPCServerEnabled bool + GRPCListenAddress string + GRPCPort int + GRPCHealthCheckEnabled bool + StarknetGRPCAddress string + JWTSecretPath string // Engine API Authentication +} diff --git a/cmd/rpcdaemon22/cli/rpc_allow_list.go b/cmd/rpcdaemon22/cli/rpc_allow_list.go new file mode 100644 index 00000000000..dbf6fbff88a --- /dev/null +++ b/cmd/rpcdaemon22/cli/rpc_allow_list.go @@ -0,0 +1,43 @@ +package cli + +import ( + "encoding/json" + "io" + "os" + "strings" + + "github.com/ledgerwatch/erigon/rpc" +) + +type allowListFile struct { + Allow rpc.AllowList `json:"allow"` +} + +func parseAllowListForRPC(path string) (rpc.AllowList, error) { + path = strings.TrimSpace(path) + if path == "" { // no file is provided + return nil, nil + } + + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer func() { + file.Close() //nolint: errcheck + }() + + fileContents, err := io.ReadAll(file) + if err != nil { + return nil, err + } + + var allowListFileObj allowListFile + + err = json.Unmarshal(fileContents, &allowListFileObj) + if err != nil { + return nil, err + } + + return allowListFileObj.Allow, nil +} diff --git a/cmd/rpcdaemon22/commands/admin_api.go b/cmd/rpcdaemon22/commands/admin_api.go new file mode 100644 index 00000000000..636e1de30c6 --- /dev/null +++ b/cmd/rpcdaemon22/commands/admin_api.go @@ -0,0 +1,49 @@ +package commands + +import ( + "context" + "errors" + "fmt" + + "github.com/ledgerwatch/erigon/p2p" + "github.com/ledgerwatch/erigon/turbo/rpchelper" +) + +// AdminAPI the interface for the admin_* RPC commands. +type AdminAPI interface { + // NodeInfo returns a collection of metadata known about the host. + NodeInfo(ctx context.Context) (*p2p.NodeInfo, error) + + // Peers returns information about the connected remote nodes. + // https://geth.ethereum.org/docs/rpc/ns-admin#admin_peers + Peers(ctx context.Context) ([]*p2p.PeerInfo, error) +} + +// AdminAPIImpl data structure to store things needed for admin_* commands. +type AdminAPIImpl struct { + ethBackend rpchelper.ApiBackend +} + +// NewAdminAPI returns AdminAPIImpl instance. +func NewAdminAPI(eth rpchelper.ApiBackend) *AdminAPIImpl { + return &AdminAPIImpl{ + ethBackend: eth, + } +} + +func (api *AdminAPIImpl) NodeInfo(ctx context.Context) (*p2p.NodeInfo, error) { + nodes, err := api.ethBackend.NodeInfo(ctx, 1) + if err != nil { + return nil, fmt.Errorf("node info request error: %w", err) + } + + if len(nodes) == 0 { + return nil, errors.New("empty nodesInfo response") + } + + return &nodes[0], nil +} + +func (api *AdminAPIImpl) Peers(ctx context.Context) ([]*p2p.PeerInfo, error) { + return api.ethBackend.Peers(ctx) +} diff --git a/cmd/rpcdaemon22/commands/bor_api.go b/cmd/rpcdaemon22/commands/bor_api.go new file mode 100644 index 00000000000..79eb2a48327 --- /dev/null +++ b/cmd/rpcdaemon22/commands/bor_api.go @@ -0,0 +1,37 @@ +package commands + +import ( + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/consensus/bor" + "github.com/ledgerwatch/erigon/rpc" +) + +// BorAPI Bor specific routines +type BorAPI interface { + // Bor snapshot related (see ./bor_snapshot.go) + GetSnapshot(number *rpc.BlockNumber) (*Snapshot, error) + GetAuthor(number *rpc.BlockNumber) (*common.Address, error) + GetSnapshotAtHash(hash common.Hash) (*Snapshot, error) + GetSigners(number *rpc.BlockNumber) ([]common.Address, error) + GetSignersAtHash(hash common.Hash) ([]common.Address, error) + GetCurrentProposer() (common.Address, error) + GetCurrentValidators() ([]*bor.Validator, error) + GetRootHash(start uint64, end uint64) (string, error) +} + +// BorImpl is implementation of the BorAPI interface +type BorImpl struct { + *BaseAPI + db kv.RoDB // the chain db + borDb kv.RoDB // the consensus db +} + +// NewBorAPI returns BorImpl instance +func NewBorAPI(base *BaseAPI, db kv.RoDB, borDb kv.RoDB) *BorImpl { + return &BorImpl{ + BaseAPI: base, + db: db, + borDb: borDb, + } +} diff --git a/cmd/rpcdaemon22/commands/bor_helper.go b/cmd/rpcdaemon22/commands/bor_helper.go new file mode 100644 index 00000000000..49d074307da --- /dev/null +++ b/cmd/rpcdaemon22/commands/bor_helper.go @@ -0,0 +1,156 @@ +package commands + +import ( + "bytes" + "errors" + "fmt" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/consensus/bor" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/rpc" +) + +const ( + checkpointInterval = 1024 // Number of blocks after which vote snapshots are saved to db +) + +var ( + extraVanity = 32 // Fixed number of extra-data prefix bytes reserved for signer vanity + extraSeal = 65 // Fixed number of extra-data suffix bytes reserved for signer seal +) + +var ( + // errUnknownBlock is returned when the list of signers is requested for a block + // that is not part of the local blockchain. + errUnknownBlock = errors.New("unknown block") + + // errMissingSignature is returned if a block's extra-data section doesn't seem + // to contain a 65 byte secp256k1 signature. + errMissingSignature = errors.New("extra-data 65 byte signature suffix missing") + + // errOutOfRangeChain is returned if an authorization list is attempted to + // be modified via out-of-range or non-contiguous headers. + errOutOfRangeChain = errors.New("out of range or non-contiguous chain") + + // errMissingVanity is returned if a block's extra-data section is shorter than + // 32 bytes, which is required to store the signer vanity. + errMissingVanity = errors.New("extra-data 32 byte vanity prefix missing") +) + +// getHeaderByNumber returns a block's header given a block number ignoring the block's transaction and uncle list (may be faster). +// derived from erigon_getHeaderByNumber implementation (see ./erigon_block.go) +func getHeaderByNumber(number rpc.BlockNumber, api *BorImpl, tx kv.Tx) (*types.Header, error) { + // Pending block is only known by the miner + if number == rpc.PendingBlockNumber { + block := api.pendingBlock() + if block == nil { + return nil, nil + } + return block.Header(), nil + } + + blockNum, err := getBlockNumber(number, tx) + if err != nil { + return nil, err + } + + header := rawdb.ReadHeaderByNumber(tx, blockNum) + if header == nil { + return nil, fmt.Errorf("block header not found: %d", blockNum) + } + + return header, nil +} + +// getHeaderByHash returns a block's header given a block's hash. +// derived from erigon_getHeaderByHash implementation (see ./erigon_block.go) +func getHeaderByHash(tx kv.Tx, hash common.Hash) (*types.Header, error) { + header, err := rawdb.ReadHeaderByHash(tx, hash) + if err != nil { + return nil, err + } + if header == nil { + return nil, fmt.Errorf("block header not found: %s", hash.String()) + } + + return header, nil +} + +// ecrecover extracts the Ethereum account address from a signed header. +func ecrecover(header *types.Header, c *params.BorConfig) (common.Address, error) { + // Retrieve the signature from the header extra-data + if len(header.Extra) < extraSeal { + return common.Address{}, errMissingSignature + } + signature := header.Extra[len(header.Extra)-extraSeal:] + + // Recover the public key and the Ethereum address + pubkey, err := crypto.Ecrecover(bor.SealHash(header, c).Bytes(), signature) + if err != nil { + return common.Address{}, err + } + var signer common.Address + copy(signer[:], crypto.Keccak256(pubkey[1:])[12:]) + + return signer, nil +} + +// validateHeaderExtraField validates that the extra-data contains both the vanity and signature. +// header.Extra = header.Vanity + header.ProducerBytes (optional) + header.Seal +func validateHeaderExtraField(extraBytes []byte) error { + if len(extraBytes) < extraVanity { + return errMissingVanity + } + if len(extraBytes) < extraVanity+extraSeal { + return errMissingSignature + } + return nil +} + +// validatorContains checks for a validator in given validator set +func validatorContains(a []*bor.Validator, x *bor.Validator) (*bor.Validator, bool) { + for _, n := range a { + if bytes.Equal(n.Address.Bytes(), x.Address.Bytes()) { + return n, true + } + } + return nil, false +} + +// getUpdatedValidatorSet applies changes to a validator set and returns a new validator set +func getUpdatedValidatorSet(oldValidatorSet *ValidatorSet, newVals []*bor.Validator) *ValidatorSet { + v := oldValidatorSet + oldVals := v.Validators + + var changes []*bor.Validator + for _, ov := range oldVals { + if f, ok := validatorContains(newVals, ov); ok { + ov.VotingPower = f.VotingPower + } else { + ov.VotingPower = 0 + } + + changes = append(changes, ov) + } + + for _, nv := range newVals { + if _, ok := validatorContains(changes, nv); !ok { + changes = append(changes, nv) + } + } + + v.UpdateWithChangeSet(changes) + return v +} + +// author returns the Ethereum address recovered +// from the signature in the header's extra-data section. +func author(api *BorImpl, tx kv.Tx, header *types.Header) (common.Address, error) { + config, _ := api.BaseAPI.chainConfig(tx) + return ecrecover(header, config.Bor) +} diff --git a/cmd/rpcdaemon22/commands/bor_snapshot.go b/cmd/rpcdaemon22/commands/bor_snapshot.go new file mode 100644 index 00000000000..96d18cc86bf --- /dev/null +++ b/cmd/rpcdaemon22/commands/bor_snapshot.go @@ -0,0 +1,424 @@ +package commands + +import ( + "context" + "encoding/hex" + "encoding/json" + "fmt" + "math/big" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/consensus/bor" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/log/v3" + "github.com/xsleonard/go-merkle" + "golang.org/x/crypto/sha3" +) + +type Snapshot struct { + config *params.BorConfig // Consensus engine parameters to fine tune behavior + + Number uint64 `json:"number"` // Block number where the snapshot was created + Hash common.Hash `json:"hash"` // Block hash where the snapshot was created + ValidatorSet *ValidatorSet `json:"validatorSet"` // Validator set at this moment + Recents map[uint64]common.Address `json:"recents"` // Set of recent signers for spam protections +} + +// GetSnapshot retrieves the state snapshot at a given block. +func (api *BorImpl) GetSnapshot(number *rpc.BlockNumber) (*Snapshot, error) { + // init chain db + ctx := context.Background() + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + // Retrieve the requested block number (or current if none requested) + var header *types.Header + if number == nil || *number == rpc.LatestBlockNumber { + header = rawdb.ReadCurrentHeader(tx) + } else { + header, _ = getHeaderByNumber(*number, api, tx) + } + // Ensure we have an actually valid block + if header == nil { + return nil, errUnknownBlock + } + + // init consensus db + borTx, err := api.borDb.BeginRo(ctx) + if err != nil { + return nil, err + } + defer borTx.Rollback() + return snapshot(api, tx, borTx, header) +} + +// GetAuthor retrieves the author a block. +func (api *BorImpl) GetAuthor(number *rpc.BlockNumber) (*common.Address, error) { + ctx := context.Background() + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + // Retrieve the requested block number (or current if none requested) + var header *types.Header + if number == nil || *number == rpc.LatestBlockNumber { + header = rawdb.ReadCurrentHeader(tx) + } else { + header, _ = getHeaderByNumber(*number, api, tx) + } + // Ensure we have an actually valid block + if header == nil { + return nil, errUnknownBlock + } + author, err := author(api, tx, header) + return &author, err +} + +// GetSnapshotAtHash retrieves the state snapshot at a given block. +func (api *BorImpl) GetSnapshotAtHash(hash common.Hash) (*Snapshot, error) { + // init chain db + ctx := context.Background() + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + // Retreive the header + header, _ := getHeaderByHash(tx, hash) + + // Ensure we have an actually valid block + if header == nil { + return nil, errUnknownBlock + } + + // init consensus db + borTx, err := api.borDb.BeginRo(ctx) + if err != nil { + return nil, err + } + defer borTx.Rollback() + return snapshot(api, tx, borTx, header) +} + +// GetSigners retrieves the list of authorized signers at the specified block. +func (api *BorImpl) GetSigners(number *rpc.BlockNumber) ([]common.Address, error) { + // init chain db + ctx := context.Background() + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + // Retrieve the requested block number (or current if none requested) + var header *types.Header + if number == nil || *number == rpc.LatestBlockNumber { + header = rawdb.ReadCurrentHeader(tx) + } else { + header, _ = getHeaderByNumber(*number, api, tx) + } + // Ensure we have an actually valid block + if header == nil { + return nil, errUnknownBlock + } + + // init consensus db + borTx, err := api.borDb.BeginRo(ctx) + if err != nil { + return nil, err + } + defer borTx.Rollback() + snap, err := snapshot(api, tx, borTx, header) + return snap.signers(), err +} + +// GetSignersAtHash retrieves the list of authorized signers at the specified block. +func (api *BorImpl) GetSignersAtHash(hash common.Hash) ([]common.Address, error) { + // init chain db + ctx := context.Background() + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + // Retreive the header + header, _ := getHeaderByHash(tx, hash) + + // Ensure we have an actually valid block + if header == nil { + return nil, errUnknownBlock + } + + // init consensus db + borTx, err := api.borDb.BeginRo(ctx) + if err != nil { + return nil, err + } + defer borTx.Rollback() + + snap, err := snapshot(api, tx, borTx, header) + return snap.signers(), err +} + +// GetCurrentProposer gets the current proposer +func (api *BorImpl) GetCurrentProposer() (common.Address, error) { + snap, err := api.GetSnapshot(nil) + if err != nil { + return common.Address{}, err + } + return snap.ValidatorSet.GetProposer().Address, nil +} + +// GetCurrentValidators gets the current validators +func (api *BorImpl) GetCurrentValidators() ([]*bor.Validator, error) { + snap, err := api.GetSnapshot(nil) + if err != nil { + return make([]*bor.Validator, 0), err + } + return snap.ValidatorSet.Validators, nil +} + +// GetRootHash returns the merkle root of the start to end block headers +func (api *BorImpl) GetRootHash(start, end uint64) (string, error) { + length := uint64(end - start + 1) + if length > bor.MaxCheckpointLength { + return "", &bor.MaxCheckpointLengthExceededError{Start: start, End: end} + } + ctx := context.Background() + tx, err := api.db.BeginRo(ctx) + if err != nil { + return "", err + } + defer tx.Rollback() + header := rawdb.ReadCurrentHeader(tx) + var currentHeaderNumber uint64 = 0 + if header == nil { + return "", &bor.InvalidStartEndBlockError{Start: start, End: end, CurrentHeader: currentHeaderNumber} + } + currentHeaderNumber = header.Number.Uint64() + if start > end || end > currentHeaderNumber { + return "", &bor.InvalidStartEndBlockError{Start: start, End: end, CurrentHeader: currentHeaderNumber} + } + blockHeaders := make([]*types.Header, end-start+1) + for number := start; number <= end; number++ { + blockHeaders[number-start], _ = getHeaderByNumber(rpc.BlockNumber(number), api, tx) + } + + headers := make([][32]byte, bor.NextPowerOfTwo(length)) + for i := 0; i < len(blockHeaders); i++ { + blockHeader := blockHeaders[i] + header := crypto.Keccak256(bor.AppendBytes32( + blockHeader.Number.Bytes(), + new(big.Int).SetUint64(blockHeader.Time).Bytes(), + blockHeader.TxHash.Bytes(), + blockHeader.ReceiptHash.Bytes(), + )) + + var arr [32]byte + copy(arr[:], header) + headers[i] = arr + } + tree := merkle.NewTreeWithOpts(merkle.TreeOptions{EnableHashSorting: false, DisableHashLeaves: true}) + if err := tree.Generate(bor.Convert(headers), sha3.NewLegacyKeccak256()); err != nil { + return "", err + } + root := hex.EncodeToString(tree.Root().Hash) + return root, nil +} + +// Helper functions for Snapshot Type + +// copy creates a deep copy of the snapshot, though not the individual votes. +func (s *Snapshot) copy() *Snapshot { + cpy := &Snapshot{ + config: s.config, + Number: s.Number, + Hash: s.Hash, + ValidatorSet: s.ValidatorSet.Copy(), + Recents: make(map[uint64]common.Address), + } + for block, signer := range s.Recents { + cpy.Recents[block] = signer + } + + return cpy +} + +// GetSignerSuccessionNumber returns the relative position of signer in terms of the in-turn proposer +func (s *Snapshot) GetSignerSuccessionNumber(signer common.Address) (int, error) { + validators := s.ValidatorSet.Validators + proposer := s.ValidatorSet.GetProposer().Address + proposerIndex, _ := s.ValidatorSet.GetByAddress(proposer) + if proposerIndex == -1 { + return -1, &bor.UnauthorizedProposerError{Number: s.Number, Proposer: proposer.Bytes()} + } + signerIndex, _ := s.ValidatorSet.GetByAddress(signer) + if signerIndex == -1 { + return -1, &bor.UnauthorizedSignerError{Number: s.Number, Signer: signer.Bytes()} + } + + tempIndex := signerIndex + if proposerIndex != tempIndex { + if tempIndex < proposerIndex { + tempIndex = tempIndex + len(validators) + } + } + return tempIndex - proposerIndex, nil +} + +// signers retrieves the list of authorized signers in ascending order. +func (s *Snapshot) signers() []common.Address { + sigs := make([]common.Address, 0, len(s.ValidatorSet.Validators)) + for _, sig := range s.ValidatorSet.Validators { + sigs = append(sigs, sig.Address) + } + return sigs +} + +// apply header changes on snapshot +func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) { + // Allow passing in no headers for cleaner code + if len(headers) == 0 { + return s, nil + } + // Sanity check that the headers can be applied + for i := 0; i < len(headers)-1; i++ { + if headers[i+1].Number.Uint64() != headers[i].Number.Uint64()+1 { + return nil, errOutOfRangeChain + } + } + if headers[0].Number.Uint64() != s.Number+1 { + return nil, errOutOfRangeChain + } + // Iterate through the headers and create a new snapshot + snap := s.copy() + + for _, header := range headers { + // Remove any votes on checkpoint blocks + number := header.Number.Uint64() + + // Delete the oldest signer from the recent list to allow it signing again + if number >= s.config.Sprint { + delete(snap.Recents, number-s.config.Sprint) + } + + // Resolve the authorization key and check against signers + signer, err := ecrecover(header, s.config) + if err != nil { + return nil, err + } + + // check if signer is in validator set + if !snap.ValidatorSet.HasAddress(signer.Bytes()) { + return nil, &bor.UnauthorizedSignerError{Number: number, Signer: signer.Bytes()} + } + + if _, err = snap.GetSignerSuccessionNumber(signer); err != nil { + return nil, err + } + + // add recents + snap.Recents[number] = signer + + // change validator set and change proposer + if number > 0 && (number+1)%s.config.Sprint == 0 { + if err := validateHeaderExtraField(header.Extra); err != nil { + return nil, err + } + validatorBytes := header.Extra[extraVanity : len(header.Extra)-extraSeal] + + // get validators from headers and use that for new validator set + newVals, _ := bor.ParseValidators(validatorBytes) + v := getUpdatedValidatorSet(snap.ValidatorSet.Copy(), newVals) + v.IncrementProposerPriority(1) + snap.ValidatorSet = v + } + } + snap.Number += uint64(len(headers)) + snap.Hash = headers[len(headers)-1].Hash() + + return snap, nil +} + +// snapshot retrieves the authorization snapshot at a given point in time. +func snapshot(api *BorImpl, db kv.Tx, borDb kv.Tx, header *types.Header) (*Snapshot, error) { + // Search for a snapshot on disk or build it from checkpoint + var ( + headers []*types.Header + snap *Snapshot + ) + + number := header.Number.Uint64() + hash := header.Hash() + + for snap == nil { + // If an on-disk checkpoint snapshot can be found, use that + if number%checkpointInterval == 0 { + if s, err := loadSnapshot(api, db, borDb, hash); err == nil { + log.Info("Loaded snapshot from disk", "number", number, "hash", hash) + snap = s + } + break + } + + // No snapshot for this header, move backward and check parent snapshots + if header == nil { + header, _ = getHeaderByNumber(rpc.BlockNumber(number), api, db) + if header == nil { + return nil, consensus.ErrUnknownAncestor + } + } + headers = append(headers, header) + number, hash = number-1, header.ParentHash + header = nil + } + + if snap == nil { + return nil, fmt.Errorf("unknown error while retrieving snapshot at block number %v", number) + } + + // Previous snapshot found, apply any pending headers on top of it + for i := 0; i < len(headers)/2; i++ { + headers[i], headers[len(headers)-1-i] = headers[len(headers)-1-i], headers[i] + } + + snap, err := snap.apply(headers) + if err != nil { + return nil, err + } + return snap, nil +} + +// loadSnapshot loads an existing snapshot from the database. +func loadSnapshot(api *BorImpl, db kv.Tx, borDb kv.Tx, hash common.Hash) (*Snapshot, error) { + blob, err := borDb.GetOne(kv.BorSeparate, append([]byte("bor-"), hash[:]...)) + if err != nil { + return nil, err + } + snap := new(Snapshot) + if err := json.Unmarshal(blob, snap); err != nil { + return nil, err + } + config, _ := api.BaseAPI.chainConfig(db) + snap.config = config.Bor + + // update total voting power + if err := snap.ValidatorSet.updateTotalVotingPower(); err != nil { + return nil, err + } + + return snap, nil +} diff --git a/cmd/rpcdaemon22/commands/call_traces_test.go b/cmd/rpcdaemon22/commands/call_traces_test.go new file mode 100644 index 00000000000..013e3272899 --- /dev/null +++ b/cmd/rpcdaemon22/commands/call_traces_test.go @@ -0,0 +1,265 @@ +package commands + +import ( + "bytes" + "context" + "sync" + "testing" + + "github.com/holiman/uint256" + jsoniter "github.com/json-iterator/go" + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/cli/httpcfg" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/valyala/fastjson" + + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/erigon/turbo/stages" +) + +func blockNumbersFromTraces(t *testing.T, b []byte) []int { + var err error + var p fastjson.Parser + response := b + var v *fastjson.Value + if v, err = p.ParseBytes(response); err != nil { + t.Fatalf("parsing response: %v", err) + } + var elems []*fastjson.Value + if elems, err = v.Array(); err != nil { + t.Fatalf("expected array in the response: %v", err) + } + var numbers []int + for _, elem := range elems { + bn := elem.GetInt("blockNumber") + numbers = append(numbers, bn) + } + return numbers +} + +func TestCallTraceOneByOne(t *testing.T) { + m := stages.Mock(t) + defer m.DB.Close() + chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 10, func(i int, gen *core.BlockGen) { + gen.SetCoinbase(common.Address{1}) + }, false /* intermediateHashes */) + if err != nil { + t.Fatalf("generate chain: %v", err) + } + api := NewTraceAPI( + NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), false), + m.DB, &httpcfg.HttpCfg{}) + // Insert blocks 1 by 1, to tirgget possible "off by one" errors + for i := 0; i < chain.Length; i++ { + if err = m.InsertChain(chain.Slice(i, i+1)); err != nil { + t.Fatalf("inserting chain: %v", err) + } + } + stream := jsoniter.ConfigDefault.BorrowStream(nil) + defer jsoniter.ConfigDefault.ReturnStream(stream) + var fromBlock, toBlock uint64 + fromBlock = 1 + toBlock = 10 + toAddress1 := common.Address{1} + traceReq1 := TraceFilterRequest{ + FromBlock: (*hexutil.Uint64)(&fromBlock), + ToBlock: (*hexutil.Uint64)(&toBlock), + ToAddress: []*common.Address{&toAddress1}, + } + if err = api.Filter(context.Background(), traceReq1, stream); err != nil { + t.Fatalf("trace_filter failed: %v", err) + } + assert.Equal(t, []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, blockNumbersFromTraces(t, stream.Buffer())) +} + +func TestCallTraceUnwind(t *testing.T) { + m := stages.Mock(t) + defer m.DB.Close() + var chainA, chainB *core.ChainPack + var err error + chainA, err = core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 10, func(i int, gen *core.BlockGen) { + gen.SetCoinbase(common.Address{1}) + }, false /* intermediateHashes */) + if err != nil { + t.Fatalf("generate chainA: %v", err) + } + chainB, err = core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 20, func(i int, gen *core.BlockGen) { + if i < 5 || i >= 10 { + gen.SetCoinbase(common.Address{1}) + } else { + gen.SetCoinbase(common.Address{2}) + } + }, false /* intermediateHashes */) + if err != nil { + t.Fatalf("generate chainB: %v", err) + } + api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), false), m.DB, &httpcfg.HttpCfg{}) + if err = m.InsertChain(chainA); err != nil { + t.Fatalf("inserting chainA: %v", err) + } + var buf bytes.Buffer + stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) + var fromBlock, toBlock uint64 + fromBlock = 1 + toBlock = 10 + toAddress1 := common.Address{1} + traceReq1 := TraceFilterRequest{ + FromBlock: (*hexutil.Uint64)(&fromBlock), + ToBlock: (*hexutil.Uint64)(&toBlock), + ToAddress: []*common.Address{&toAddress1}, + } + if err = api.Filter(context.Background(), traceReq1, stream); err != nil { + t.Fatalf("trace_filter failed: %v", err) + } + + assert.Equal(t, []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, blockNumbersFromTraces(t, buf.Bytes())) + if err = m.InsertChain(chainB.Slice(0, 12)); err != nil { + t.Fatalf("inserting chainB: %v", err) + } + buf.Reset() + toBlock = 12 + traceReq2 := TraceFilterRequest{ + FromBlock: (*hexutil.Uint64)(&fromBlock), + ToBlock: (*hexutil.Uint64)(&toBlock), + ToAddress: []*common.Address{&toAddress1}, + } + if err = api.Filter(context.Background(), traceReq2, stream); err != nil { + t.Fatalf("trace_filter failed: %v", err) + } + assert.Equal(t, []int{1, 2, 3, 4, 5, 11, 12}, blockNumbersFromTraces(t, buf.Bytes())) + if err = m.InsertChain(chainB.Slice(12, 20)); err != nil { + t.Fatalf("inserting chainB: %v", err) + } + buf.Reset() + fromBlock = 12 + toBlock = 20 + traceReq3 := TraceFilterRequest{ + FromBlock: (*hexutil.Uint64)(&fromBlock), + ToBlock: (*hexutil.Uint64)(&toBlock), + ToAddress: []*common.Address{&toAddress1}, + } + if err = api.Filter(context.Background(), traceReq3, stream); err != nil { + t.Fatalf("trace_filter failed: %v", err) + } + assert.Equal(t, []int{12, 13, 14, 15, 16, 17, 18, 19, 20}, blockNumbersFromTraces(t, buf.Bytes())) +} + +func TestFilterNoAddresses(t *testing.T) { + m := stages.Mock(t) + defer m.DB.Close() + chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 10, func(i int, gen *core.BlockGen) { + gen.SetCoinbase(common.Address{1}) + }, false /* intermediateHashes */) + if err != nil { + t.Fatalf("generate chain: %v", err) + } + api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), false), m.DB, &httpcfg.HttpCfg{}) + // Insert blocks 1 by 1, to tirgget possible "off by one" errors + for i := 0; i < chain.Length; i++ { + if err = m.InsertChain(chain.Slice(i, i+1)); err != nil { + t.Fatalf("inserting chain: %v", err) + } + } + var buf bytes.Buffer + stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) + var fromBlock, toBlock uint64 + fromBlock = 1 + toBlock = 10 + traceReq1 := TraceFilterRequest{ + FromBlock: (*hexutil.Uint64)(&fromBlock), + ToBlock: (*hexutil.Uint64)(&toBlock), + } + if err = api.Filter(context.Background(), traceReq1, stream); err != nil { + t.Fatalf("trace_filter failed: %v", err) + } + assert.Equal(t, []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, blockNumbersFromTraces(t, buf.Bytes())) +} + +func TestFilterAddressIntersection(t *testing.T) { + m := stages.Mock(t) + defer m.DB.Close() + + api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), false), m.DB, &httpcfg.HttpCfg{}) + + toAddress1, toAddress2, other := common.Address{1}, common.Address{2}, common.Address{3} + + once := new(sync.Once) + chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 15, func(i int, block *core.BlockGen) { + once.Do(func() { block.SetCoinbase(common.Address{4}) }) + + var rcv common.Address + if i < 5 { + rcv = toAddress1 + } else if i < 10 { + rcv = toAddress2 + } else { + rcv = other + } + + signer := types.LatestSigner(m.ChainConfig) + txn, err := types.SignTx(types.NewTransaction(block.TxNonce(m.Address), rcv, new(uint256.Int), 21000, new(uint256.Int), nil), *signer, m.Key) + if err != nil { + t.Fatal(err) + } + block.AddTx(txn) + }, false /* intermediateHashes */) + require.NoError(t, err, "generate chain") + + err = m.InsertChain(chain) + require.NoError(t, err, "inserting chain") + + fromBlock, toBlock := uint64(1), uint64(15) + t.Run("second", func(t *testing.T) { + stream := jsoniter.ConfigDefault.BorrowStream(nil) + defer jsoniter.ConfigDefault.ReturnStream(stream) + + traceReq1 := TraceFilterRequest{ + FromBlock: (*hexutil.Uint64)(&fromBlock), + ToBlock: (*hexutil.Uint64)(&toBlock), + FromAddress: []*common.Address{&m.Address, &other}, + ToAddress: []*common.Address{&m.Address, &toAddress2}, + Mode: TraceFilterModeIntersection, + } + if err = api.Filter(context.Background(), traceReq1, stream); err != nil { + t.Fatalf("trace_filter failed: %v", err) + } + assert.Equal(t, []int{6, 7, 8, 9, 10}, blockNumbersFromTraces(t, stream.Buffer())) + }) + t.Run("first", func(t *testing.T) { + stream := jsoniter.ConfigDefault.BorrowStream(nil) + defer jsoniter.ConfigDefault.ReturnStream(stream) + + traceReq1 := TraceFilterRequest{ + FromBlock: (*hexutil.Uint64)(&fromBlock), + ToBlock: (*hexutil.Uint64)(&toBlock), + FromAddress: []*common.Address{&m.Address, &other}, + ToAddress: []*common.Address{&toAddress1, &m.Address}, + Mode: TraceFilterModeIntersection, + } + if err = api.Filter(context.Background(), traceReq1, stream); err != nil { + t.Fatalf("trace_filter failed: %v", err) + } + assert.Equal(t, []int{1, 2, 3, 4, 5}, blockNumbersFromTraces(t, stream.Buffer())) + }) + t.Run("empty", func(t *testing.T) { + stream := jsoniter.ConfigDefault.BorrowStream(nil) + defer jsoniter.ConfigDefault.ReturnStream(stream) + + traceReq1 := TraceFilterRequest{ + FromBlock: (*hexutil.Uint64)(&fromBlock), + ToBlock: (*hexutil.Uint64)(&toBlock), + ToAddress: []*common.Address{&other}, + FromAddress: []*common.Address{&toAddress2, &toAddress1, &other}, + Mode: TraceFilterModeIntersection, + } + if err = api.Filter(context.Background(), traceReq1, stream); err != nil { + t.Fatalf("trace_filter failed: %v", err) + } + require.Empty(t, blockNumbersFromTraces(t, stream.Buffer())) + }) +} diff --git a/cmd/rpcdaemon22/commands/contracts/build/Poly.abi b/cmd/rpcdaemon22/commands/contracts/build/Poly.abi new file mode 100644 index 00000000000..95e590dcd3c --- /dev/null +++ b/cmd/rpcdaemon22/commands/contracts/build/Poly.abi @@ -0,0 +1 @@ +[{"inputs":[],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"d","type":"address"}],"name":"DeployEvent","type":"event"},{"inputs":[{"internalType":"uint256","name":"salt","type":"uint256"}],"name":"deploy","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"salt","type":"uint256"}],"name":"deployAndDestruct","outputs":[],"stateMutability":"nonpayable","type":"function"}] \ No newline at end of file diff --git a/cmd/rpcdaemon22/commands/contracts/build/Poly.bin b/cmd/rpcdaemon22/commands/contracts/build/Poly.bin new file mode 100644 index 00000000000..4223e1ab18a --- /dev/null +++ b/cmd/rpcdaemon22/commands/contracts/build/Poly.bin @@ -0,0 +1 @@ +608060405234801561001057600080fd5b506101d1806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c80639debe9811461003b578063a5e387511461005a575b600080fd5b6100586004803603602081101561005157600080fd5b5035610077565b005b6100586004803603602081101561007057600080fd5b50356100fd565b6040805180820190915260138082527260606000534360015360ff60025360036000f360681b60208301908152600091849183f59050600080600080600085620186a0f150604080516001600160a01b038316815290517f68f6a0f063c25c6678c443b9a484086f15ba8f91f60218695d32a5251f2050eb9181900360200190a1505050565b6040805180820190915260138082527260606000534360015360ff60025360036000f360681b60208301908152600091849183f5604080516001600160a01b038316815290519192507f68f6a0f063c25c6678c443b9a484086f15ba8f91f60218695d32a5251f2050eb919081900360200190a150505056fea2646970667358221220c4436dde70fbebb14cf02477e4d8f270620c7f9f54b9b1a2e09b1edcc8c6db6764736f6c637827302e372e352d646576656c6f702e323032302e31322e392b636f6d6d69742e65623737656430380058 \ No newline at end of file diff --git a/cmd/rpcdaemon22/commands/contracts/build/Token.abi b/cmd/rpcdaemon22/commands/contracts/build/Token.abi new file mode 100644 index 00000000000..20efed58391 --- /dev/null +++ b/cmd/rpcdaemon22/commands/contracts/build/Token.abi @@ -0,0 +1 @@ +[{"inputs":[{"internalType":"address","name":"_minter","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"balanceOf","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_to","type":"address"},{"internalType":"uint256","name":"_value","type":"uint256"}],"name":"mint","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"minter","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"totalSupply","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_to","type":"address"},{"internalType":"uint256","name":"_value","type":"uint256"}],"name":"transfer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"}] \ No newline at end of file diff --git a/cmd/rpcdaemon22/commands/contracts/build/Token.bin b/cmd/rpcdaemon22/commands/contracts/build/Token.bin new file mode 100644 index 00000000000..803e31eebc2 --- /dev/null +++ b/cmd/rpcdaemon22/commands/contracts/build/Token.bin @@ -0,0 +1 @@ +608060405234801561001057600080fd5b506040516102cd3803806102cd8339818101604052602081101561003357600080fd5b5051600280546001600160a01b0319166001600160a01b0390921691909117905561026a806100636000396000f3fe608060405234801561001057600080fd5b50600436106100575760003560e01c8063075461721461005c57806318160ddd1461008057806340c10f191461009a57806370a08231146100da578063a9059cbb14610100575b600080fd5b61006461012c565b604080516001600160a01b039092168252519081900360200190f35b61008861013b565b60408051918252519081900360200190f35b6100c6600480360360408110156100b057600080fd5b506001600160a01b038135169060200135610141565b604080519115158252519081900360200190f35b610088600480360360208110156100f057600080fd5b50356001600160a01b03166101b1565b6100c66004803603604081101561011657600080fd5b506001600160a01b0381351690602001356101c3565b6002546001600160a01b031681565b60005481565b6002546000906001600160a01b0316331461015b57600080fd5b6001600160a01b03831660009081526001602052604090205482810181111561018357600080fd5b6001600160a01b03841660009081526001602081905260408220928501909255805484019055905092915050565b60016020526000908152604090205481565b33600090815260016020526040808220546001600160a01b038516835290822054838210156101f157600080fd5b80848201101561020057600080fd5b336000908152600160208190526040808320948790039094556001600160a01b03969096168152919091209201909155509056fea2646970667358221220db4c7b3ba8d073604af68ade92006926639bb4003f2a18929524d580777155fb64736f6c63430007020033 \ No newline at end of file diff --git a/cmd/rpcdaemon22/commands/contracts/gen.go b/cmd/rpcdaemon22/commands/contracts/gen.go new file mode 100644 index 00000000000..96e2eff812c --- /dev/null +++ b/cmd/rpcdaemon22/commands/contracts/gen.go @@ -0,0 +1,4 @@ +package contracts + +//go:generate solc --allow-paths ., --abi --bin --overwrite --optimize -o build token.sol +//go:generate abigen -abi build/Token.abi -bin build/Token.bin -pkg contracts -type token -out ./gen_token.go diff --git a/cmd/rpcdaemon22/commands/contracts/gen_poly.go b/cmd/rpcdaemon22/commands/contracts/gen_poly.go new file mode 100644 index 00000000000..e4bfb1f2997 --- /dev/null +++ b/cmd/rpcdaemon22/commands/contracts/gen_poly.go @@ -0,0 +1,364 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package contracts + +import ( + "math/big" + "strings" + + ethereum "github.com/ledgerwatch/erigon" + "github.com/ledgerwatch/erigon/accounts/abi" + "github.com/ledgerwatch/erigon/accounts/abi/bind" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +// PolyABI is the input ABI used to generate the binding from. +const PolyABI = "[{\"inputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"d\",\"type\":\"address\"}],\"name\":\"DeployEvent\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"salt\",\"type\":\"uint256\"}],\"name\":\"deploy\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"salt\",\"type\":\"uint256\"}],\"name\":\"deployAndDestruct\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]" + +// PolyBin is the compiled bytecode used for deploying new contracts. +var PolyBin = "0x608060405234801561001057600080fd5b506101d1806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c80639debe9811461003b578063a5e387511461005a575b600080fd5b6100586004803603602081101561005157600080fd5b5035610077565b005b6100586004803603602081101561007057600080fd5b50356100fd565b6040805180820190915260138082527260606000534360015360ff60025360036000f360681b60208301908152600091849183f59050600080600080600085620186a0f150604080516001600160a01b038316815290517f68f6a0f063c25c6678c443b9a484086f15ba8f91f60218695d32a5251f2050eb9181900360200190a1505050565b6040805180820190915260138082527260606000534360015360ff60025360036000f360681b60208301908152600091849183f5604080516001600160a01b038316815290519192507f68f6a0f063c25c6678c443b9a484086f15ba8f91f60218695d32a5251f2050eb919081900360200190a150505056fea2646970667358221220c4436dde70fbebb14cf02477e4d8f270620c7f9f54b9b1a2e09b1edcc8c6db6764736f6c637827302e372e352d646576656c6f702e323032302e31322e392b636f6d6d69742e65623737656430380058" + +// DeployPoly deploys a new Ethereum contract, binding an instance of Poly to it. +func DeployPoly(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, types.Transaction, *Poly, error) { + parsed, err := abi.JSON(strings.NewReader(PolyABI)) + if err != nil { + return common.Address{}, nil, nil, err + } + + address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(PolyBin), backend) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &Poly{PolyCaller: PolyCaller{contract: contract}, PolyTransactor: PolyTransactor{contract: contract}, PolyFilterer: PolyFilterer{contract: contract}}, nil +} + +// Poly is an auto generated Go binding around an Ethereum contract. +type Poly struct { + PolyCaller // Read-only binding to the contract + PolyTransactor // Write-only binding to the contract + PolyFilterer // Log filterer for contract events +} + +// PolyCaller is an auto generated read-only Go binding around an Ethereum contract. +type PolyCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// PolyTransactor is an auto generated write-only Go binding around an Ethereum contract. +type PolyTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// PolyFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type PolyFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// PolySession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type PolySession struct { + Contract *Poly // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// PolyCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type PolyCallerSession struct { + Contract *PolyCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// PolyTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type PolyTransactorSession struct { + Contract *PolyTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// PolyRaw is an auto generated low-level Go binding around an Ethereum contract. +type PolyRaw struct { + Contract *Poly // Generic contract binding to access the raw methods on +} + +// PolyCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type PolyCallerRaw struct { + Contract *PolyCaller // Generic read-only contract binding to access the raw methods on +} + +// PolyTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type PolyTransactorRaw struct { + Contract *PolyTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewPoly creates a new instance of Poly, bound to a specific deployed contract. +func NewPoly(address common.Address, backend bind.ContractBackend) (*Poly, error) { + contract, err := bindPoly(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Poly{PolyCaller: PolyCaller{contract: contract}, PolyTransactor: PolyTransactor{contract: contract}, PolyFilterer: PolyFilterer{contract: contract}}, nil +} + +// NewPolyCaller creates a new read-only instance of Poly, bound to a specific deployed contract. +func NewPolyCaller(address common.Address, caller bind.ContractCaller) (*PolyCaller, error) { + contract, err := bindPoly(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &PolyCaller{contract: contract}, nil +} + +// NewPolyTransactor creates a new write-only instance of Poly, bound to a specific deployed contract. +func NewPolyTransactor(address common.Address, transactor bind.ContractTransactor) (*PolyTransactor, error) { + contract, err := bindPoly(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &PolyTransactor{contract: contract}, nil +} + +// NewPolyFilterer creates a new log filterer instance of Poly, bound to a specific deployed contract. +func NewPolyFilterer(address common.Address, filterer bind.ContractFilterer) (*PolyFilterer, error) { + contract, err := bindPoly(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &PolyFilterer{contract: contract}, nil +} + +// bindPoly binds a generic wrapper to an already deployed contract. +func bindPoly(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(PolyABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Poly *PolyRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Poly.Contract.PolyCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Poly *PolyRaw) Transfer(opts *bind.TransactOpts) (types.Transaction, error) { + return _Poly.Contract.PolyTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Poly *PolyRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (types.Transaction, error) { + return _Poly.Contract.PolyTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Poly *PolyCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Poly.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Poly *PolyTransactorRaw) Transfer(opts *bind.TransactOpts) (types.Transaction, error) { + return _Poly.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Poly *PolyTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (types.Transaction, error) { + return _Poly.Contract.contract.Transact(opts, method, params...) +} + +// Deploy is a paid mutator transaction binding the contract method 0xa5e38751. +// +// Solidity: function deploy(uint256 salt) returns() +func (_Poly *PolyTransactor) Deploy(opts *bind.TransactOpts, salt *big.Int) (types.Transaction, error) { + return _Poly.contract.Transact(opts, "deploy", salt) +} + +// Deploy is a paid mutator transaction binding the contract method 0xa5e38751. +// +// Solidity: function deploy(uint256 salt) returns() +func (_Poly *PolySession) Deploy(salt *big.Int) (types.Transaction, error) { + return _Poly.Contract.Deploy(&_Poly.TransactOpts, salt) +} + +// Deploy is a paid mutator transaction binding the contract method 0xa5e38751. +// +// Solidity: function deploy(uint256 salt) returns() +func (_Poly *PolyTransactorSession) Deploy(salt *big.Int) (types.Transaction, error) { + return _Poly.Contract.Deploy(&_Poly.TransactOpts, salt) +} + +// DeployAndDestruct is a paid mutator transaction binding the contract method 0x9debe981. +// +// Solidity: function deployAndDestruct(uint256 salt) returns() +func (_Poly *PolyTransactor) DeployAndDestruct(opts *bind.TransactOpts, salt *big.Int) (types.Transaction, error) { + return _Poly.contract.Transact(opts, "deployAndDestruct", salt) +} + +// DeployAndDestruct is a paid mutator transaction binding the contract method 0x9debe981. +// +// Solidity: function deployAndDestruct(uint256 salt) returns() +func (_Poly *PolySession) DeployAndDestruct(salt *big.Int) (types.Transaction, error) { + return _Poly.Contract.DeployAndDestruct(&_Poly.TransactOpts, salt) +} + +// DeployAndDestruct is a paid mutator transaction binding the contract method 0x9debe981. +// +// Solidity: function deployAndDestruct(uint256 salt) returns() +func (_Poly *PolyTransactorSession) DeployAndDestruct(salt *big.Int) (types.Transaction, error) { + return _Poly.Contract.DeployAndDestruct(&_Poly.TransactOpts, salt) +} + +// PolyDeployEventIterator is returned from FilterDeployEvent and is used to iterate over the raw logs and unpacked data for DeployEvent events raised by the Poly contract. +type PolyDeployEventIterator struct { + Event *PolyDeployEvent // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *PolyDeployEventIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(PolyDeployEvent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(PolyDeployEvent) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *PolyDeployEventIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *PolyDeployEventIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// PolyDeployEvent represents a DeployEvent event raised by the Poly contract. +type PolyDeployEvent struct { + D common.Address + Raw types.Log // Blockchain specific contextual infos +} + +// FilterDeployEvent is a free log retrieval operation binding the contract event 0x68f6a0f063c25c6678c443b9a484086f15ba8f91f60218695d32a5251f2050eb. +// +// Solidity: event DeployEvent(address d) +func (_Poly *PolyFilterer) FilterDeployEvent(opts *bind.FilterOpts) (*PolyDeployEventIterator, error) { + + logs, sub, err := _Poly.contract.FilterLogs(opts, "DeployEvent") + if err != nil { + return nil, err + } + return &PolyDeployEventIterator{contract: _Poly.contract, event: "DeployEvent", logs: logs, sub: sub}, nil +} + +// WatchDeployEvent is a free log subscription operation binding the contract event 0x68f6a0f063c25c6678c443b9a484086f15ba8f91f60218695d32a5251f2050eb. +// +// Solidity: event DeployEvent(address d) +func (_Poly *PolyFilterer) WatchDeployEvent(opts *bind.WatchOpts, sink chan<- *PolyDeployEvent) (event.Subscription, error) { + + logs, sub, err := _Poly.contract.WatchLogs(opts, "DeployEvent") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(PolyDeployEvent) + if err := _Poly.contract.UnpackLog(event, "DeployEvent", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseDeployEvent is a log parse operation binding the contract event 0x68f6a0f063c25c6678c443b9a484086f15ba8f91f60218695d32a5251f2050eb. +// +// Solidity: event DeployEvent(address d) +func (_Poly *PolyFilterer) ParseDeployEvent(log types.Log) (*PolyDeployEvent, error) { + event := new(PolyDeployEvent) + if err := _Poly.contract.UnpackLog(event, "DeployEvent", log); err != nil { + return nil, err + } + return event, nil +} diff --git a/cmd/rpcdaemon22/commands/contracts/gen_token.go b/cmd/rpcdaemon22/commands/contracts/gen_token.go new file mode 100644 index 00000000000..4d276e0b56c --- /dev/null +++ b/cmd/rpcdaemon22/commands/contracts/gen_token.go @@ -0,0 +1,324 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package contracts + +import ( + "math/big" + "strings" + + ethereum "github.com/ledgerwatch/erigon" + "github.com/ledgerwatch/erigon/accounts/abi" + "github.com/ledgerwatch/erigon/accounts/abi/bind" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription +) + +// TokenABI is the input ABI used to generate the binding from. +const TokenABI = "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_minter\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"name\":\"balanceOf\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"}],\"name\":\"mint\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"minter\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"totalSupply\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_to\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"}],\"name\":\"transfer\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]" + +// TokenBin is the compiled bytecode used for deploying new contracts. +var TokenBin = "0x608060405234801561001057600080fd5b506040516102cd3803806102cd8339818101604052602081101561003357600080fd5b5051600280546001600160a01b0319166001600160a01b0390921691909117905561026a806100636000396000f3fe608060405234801561001057600080fd5b50600436106100575760003560e01c8063075461721461005c57806318160ddd1461008057806340c10f191461009a57806370a08231146100da578063a9059cbb14610100575b600080fd5b61006461012c565b604080516001600160a01b039092168252519081900360200190f35b61008861013b565b60408051918252519081900360200190f35b6100c6600480360360408110156100b057600080fd5b506001600160a01b038135169060200135610141565b604080519115158252519081900360200190f35b610088600480360360208110156100f057600080fd5b50356001600160a01b03166101b1565b6100c66004803603604081101561011657600080fd5b506001600160a01b0381351690602001356101c3565b6002546001600160a01b031681565b60005481565b6002546000906001600160a01b0316331461015b57600080fd5b6001600160a01b03831660009081526001602052604090205482810181111561018357600080fd5b6001600160a01b03841660009081526001602081905260408220928501909255805484019055905092915050565b60016020526000908152604090205481565b33600090815260016020526040808220546001600160a01b038516835290822054838210156101f157600080fd5b80848201101561020057600080fd5b336000908152600160208190526040808320948790039094556001600160a01b03969096168152919091209201909155509056fea2646970667358221220db4c7b3ba8d073604af68ade92006926639bb4003f2a18929524d580777155fb64736f6c63430007020033" + +// DeployToken deploys a new Ethereum contract, binding an instance of Token to it. +func DeployToken(auth *bind.TransactOpts, backend bind.ContractBackend, _minter common.Address) (common.Address, types.Transaction, *Token, error) { + parsed, err := abi.JSON(strings.NewReader(TokenABI)) + if err != nil { + return common.Address{}, nil, nil, err + } + + address, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(TokenBin), backend, _minter) + if err != nil { + return common.Address{}, nil, nil, err + } + return address, tx, &Token{TokenCaller: TokenCaller{contract: contract}, TokenTransactor: TokenTransactor{contract: contract}, TokenFilterer: TokenFilterer{contract: contract}}, nil +} + +// Token is an auto generated Go binding around an Ethereum contract. +type Token struct { + TokenCaller // Read-only binding to the contract + TokenTransactor // Write-only binding to the contract + TokenFilterer // Log filterer for contract events +} + +// TokenCaller is an auto generated read-only Go binding around an Ethereum contract. +type TokenCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// TokenTransactor is an auto generated write-only Go binding around an Ethereum contract. +type TokenTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// TokenFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type TokenFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// TokenSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type TokenSession struct { + Contract *Token // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// TokenCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type TokenCallerSession struct { + Contract *TokenCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// TokenTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type TokenTransactorSession struct { + Contract *TokenTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// TokenRaw is an auto generated low-level Go binding around an Ethereum contract. +type TokenRaw struct { + Contract *Token // Generic contract binding to access the raw methods on +} + +// TokenCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type TokenCallerRaw struct { + Contract *TokenCaller // Generic read-only contract binding to access the raw methods on +} + +// TokenTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type TokenTransactorRaw struct { + Contract *TokenTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewToken creates a new instance of Token, bound to a specific deployed contract. +func NewToken(address common.Address, backend bind.ContractBackend) (*Token, error) { + contract, err := bindToken(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Token{TokenCaller: TokenCaller{contract: contract}, TokenTransactor: TokenTransactor{contract: contract}, TokenFilterer: TokenFilterer{contract: contract}}, nil +} + +// NewTokenCaller creates a new read-only instance of Token, bound to a specific deployed contract. +func NewTokenCaller(address common.Address, caller bind.ContractCaller) (*TokenCaller, error) { + contract, err := bindToken(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &TokenCaller{contract: contract}, nil +} + +// NewTokenTransactor creates a new write-only instance of Token, bound to a specific deployed contract. +func NewTokenTransactor(address common.Address, transactor bind.ContractTransactor) (*TokenTransactor, error) { + contract, err := bindToken(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &TokenTransactor{contract: contract}, nil +} + +// NewTokenFilterer creates a new log filterer instance of Token, bound to a specific deployed contract. +func NewTokenFilterer(address common.Address, filterer bind.ContractFilterer) (*TokenFilterer, error) { + contract, err := bindToken(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &TokenFilterer{contract: contract}, nil +} + +// bindToken binds a generic wrapper to an already deployed contract. +func bindToken(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := abi.JSON(strings.NewReader(TokenABI)) + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Token *TokenRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Token.Contract.TokenCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Token *TokenRaw) Transfer(opts *bind.TransactOpts) (types.Transaction, error) { + return _Token.Contract.TokenTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Token *TokenRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (types.Transaction, error) { + return _Token.Contract.TokenTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Token *TokenCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Token.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Token *TokenTransactorRaw) Transfer(opts *bind.TransactOpts) (types.Transaction, error) { + return _Token.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Token *TokenTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (types.Transaction, error) { + return _Token.Contract.contract.Transact(opts, method, params...) +} + +// BalanceOf is a free data retrieval call binding the contract method 0x70a08231. +// +// Solidity: function balanceOf(address ) view returns(uint256) +func (_Token *TokenCaller) BalanceOf(opts *bind.CallOpts, arg0 common.Address) (*big.Int, error) { + var out []interface{} + err := _Token.contract.Call(opts, &out, "balanceOf", arg0) + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// BalanceOf is a free data retrieval call binding the contract method 0x70a08231. +// +// Solidity: function balanceOf(address ) view returns(uint256) +func (_Token *TokenSession) BalanceOf(arg0 common.Address) (*big.Int, error) { + return _Token.Contract.BalanceOf(&_Token.CallOpts, arg0) +} + +// BalanceOf is a free data retrieval call binding the contract method 0x70a08231. +// +// Solidity: function balanceOf(address ) view returns(uint256) +func (_Token *TokenCallerSession) BalanceOf(arg0 common.Address) (*big.Int, error) { + return _Token.Contract.BalanceOf(&_Token.CallOpts, arg0) +} + +// Minter is a free data retrieval call binding the contract method 0x07546172. +// +// Solidity: function minter() view returns(address) +func (_Token *TokenCaller) Minter(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Token.contract.Call(opts, &out, "minter") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +// Minter is a free data retrieval call binding the contract method 0x07546172. +// +// Solidity: function minter() view returns(address) +func (_Token *TokenSession) Minter() (common.Address, error) { + return _Token.Contract.Minter(&_Token.CallOpts) +} + +// Minter is a free data retrieval call binding the contract method 0x07546172. +// +// Solidity: function minter() view returns(address) +func (_Token *TokenCallerSession) Minter() (common.Address, error) { + return _Token.Contract.Minter(&_Token.CallOpts) +} + +// TotalSupply is a free data retrieval call binding the contract method 0x18160ddd. +// +// Solidity: function totalSupply() view returns(uint256) +func (_Token *TokenCaller) TotalSupply(opts *bind.CallOpts) (*big.Int, error) { + var out []interface{} + err := _Token.contract.Call(opts, &out, "totalSupply") + + if err != nil { + return *new(*big.Int), err + } + + out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) + + return out0, err + +} + +// TotalSupply is a free data retrieval call binding the contract method 0x18160ddd. +// +// Solidity: function totalSupply() view returns(uint256) +func (_Token *TokenSession) TotalSupply() (*big.Int, error) { + return _Token.Contract.TotalSupply(&_Token.CallOpts) +} + +// TotalSupply is a free data retrieval call binding the contract method 0x18160ddd. +// +// Solidity: function totalSupply() view returns(uint256) +func (_Token *TokenCallerSession) TotalSupply() (*big.Int, error) { + return _Token.Contract.TotalSupply(&_Token.CallOpts) +} + +// Mint is a paid mutator transaction binding the contract method 0x40c10f19. +// +// Solidity: function mint(address _to, uint256 _value) returns(bool) +func (_Token *TokenTransactor) Mint(opts *bind.TransactOpts, _to common.Address, _value *big.Int) (types.Transaction, error) { + return _Token.contract.Transact(opts, "mint", _to, _value) +} + +// Mint is a paid mutator transaction binding the contract method 0x40c10f19. +// +// Solidity: function mint(address _to, uint256 _value) returns(bool) +func (_Token *TokenSession) Mint(_to common.Address, _value *big.Int) (types.Transaction, error) { + return _Token.Contract.Mint(&_Token.TransactOpts, _to, _value) +} + +// Mint is a paid mutator transaction binding the contract method 0x40c10f19. +// +// Solidity: function mint(address _to, uint256 _value) returns(bool) +func (_Token *TokenTransactorSession) Mint(_to common.Address, _value *big.Int) (types.Transaction, error) { + return _Token.Contract.Mint(&_Token.TransactOpts, _to, _value) +} + +// Transfer is a paid mutator transaction binding the contract method 0xa9059cbb. +// +// Solidity: function transfer(address _to, uint256 _value) returns(bool) +func (_Token *TokenTransactor) Transfer(opts *bind.TransactOpts, _to common.Address, _value *big.Int) (types.Transaction, error) { + return _Token.contract.Transact(opts, "transfer", _to, _value) +} + +// Transfer is a paid mutator transaction binding the contract method 0xa9059cbb. +// +// Solidity: function transfer(address _to, uint256 _value) returns(bool) +func (_Token *TokenSession) Transfer(_to common.Address, _value *big.Int) (types.Transaction, error) { + return _Token.Contract.Transfer(&_Token.TransactOpts, _to, _value) +} + +// Transfer is a paid mutator transaction binding the contract method 0xa9059cbb. +// +// Solidity: function transfer(address _to, uint256 _value) returns(bool) +func (_Token *TokenTransactorSession) Transfer(_to common.Address, _value *big.Int) (types.Transaction, error) { + return _Token.Contract.Transfer(&_Token.TransactOpts, _to, _value) +} diff --git a/cmd/rpcdaemon22/commands/contracts/poly.sol b/cmd/rpcdaemon22/commands/contracts/poly.sol new file mode 100644 index 00000000000..fab4fbc068b --- /dev/null +++ b/cmd/rpcdaemon22/commands/contracts/poly.sol @@ -0,0 +1,36 @@ +pragma solidity >=0.5.0; + +// solc --allow-paths ., --abi --bin --overwrite --optimize -o cmd/rpcdaemon/commands/contracts/build cmd/rpcdaemon/commands/contracts/poly.sol +// ./build/bin/abigen -abi cmd/rpcdaemon/commands/contracts/build/Poly.abi -bin cmd/rpcdaemon/commands/contracts/build/Poly.bin -pkg contracts -type poly -out cmd/rpcdaemon/commands/contracts/gen_poly.go +contract Poly { + + constructor() { + } + + event DeployEvent (address d); + + /* Deploys self-destructing contract with given salt and emits DeployEvent with the address of the created contract */ + function deploy(uint256 salt) public { + // PUSH1 0x60; PUSH1 0; MSTORE8; NUMBER; PUSH1 1; MSTORE8; PUSH1 0xff; PUSH1 2; MSTORE8; PUSH1 3; PUSH1 0; RETURN; + // Returns code 60ff, which is PUSH1 ; SELFDESTRUCT. Value is determined by the block number where deploy function is called + bytes memory init_code = hex"60606000534360015360ff60025360036000f3"; + address payable d; + assembly{ + d := create2(0, add(init_code, 32), mload(init_code), salt) + } + emit DeployEvent(d); + } + + /* Deploys self-destructing contract with given salt and emits DeployEvent with the address of the created contract */ + function deployAndDestruct(uint256 salt) public { + // PUSH1 0x60; PUSH1 0; MSTORE8; NUMBER; PUSH1 1; MSTORE8; PUSH1 0xff; PUSH1 2; MSTORE8; PUSH1 3; PUSH1 0; RETURN; + // Returns code 60ff, which is PUSH1 ; SELFDESTRUCT. Value is determined by the block number where deploy function is called + bytes memory init_code = hex"60606000534360015360ff60025360036000f3"; + address payable d; + assembly{ + d := create2(0, add(init_code, 32), mload(init_code), salt) + pop(call(100000, d, 0, 0, 0, 0, 0)) + } + emit DeployEvent(d); + } +} diff --git a/cmd/rpcdaemon22/commands/contracts/token.sol b/cmd/rpcdaemon22/commands/contracts/token.sol new file mode 100644 index 00000000000..755bdfddd4f --- /dev/null +++ b/cmd/rpcdaemon22/commands/contracts/token.sol @@ -0,0 +1,39 @@ +pragma solidity >=0.6.0; + +// solc --allow-paths ., --abi --bin --overwrite --optimize -o cmd/rpcdaemon/commands/contracts/build cmd/pics/contracts/token.sol +// ./build/bin/abigen -abi cmd/rpcdaemon/commands/contracts/build/Token.abi -bin cmd/rpcdaemon/commands/contracts/build/Token.bin -pkg contracts -type token -out cmd/rpcdaemon/commands/contracts/gen_token.go +contract Token { + uint256 public totalSupply; + mapping(address => uint256) public balanceOf; + address public minter; + + constructor(address _minter) public { + minter = _minter; + } + + /* Send tokens */ + function transfer(address _to, uint256 _value) public returns (bool) { + uint256 fromBalance = balanceOf[msg.sender]; + uint256 toBalance = balanceOf[_to]; + require(fromBalance >= _value); + // Check if the sender has enough + require(toBalance + _value >= toBalance); + // Check for overflows + balanceOf[msg.sender] = fromBalance - _value; + // Subtract from the sender + balanceOf[_to] = toBalance + _value; + return true; + } + + /* Allows the owner to mint more tokens */ + function mint(address _to, uint256 _value) public returns (bool) { + require(msg.sender == minter); + // Only the minter is allowed to mint + uint256 toBalance = balanceOf[_to]; + require(toBalance + _value >= toBalance); + // Check for overflows + balanceOf[_to] = toBalance + _value; + totalSupply += _value; + return true; + } +} diff --git a/cmd/rpcdaemon22/commands/corner_cases_support_test.go b/cmd/rpcdaemon22/commands/corner_cases_support_test.go new file mode 100644 index 00000000000..762560c7cb9 --- /dev/null +++ b/cmd/rpcdaemon22/commands/corner_cases_support_test.go @@ -0,0 +1,62 @@ +package commands + +import ( + "context" + "testing" + + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/stretchr/testify/require" +) + +// TestNotFoundMustReturnNil - next methods - when record not found in db - must return nil instead of error +// see https://github.com/ledgerwatch/erigon/issues/1645 +func TestNotFoundMustReturnNil(t *testing.T) { + require := require.New(t) + db := rpcdaemontest.CreateTestKV(t) + defer db.Close() + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI( + NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), + db, nil, nil, nil, 5000000) + ctx := context.Background() + + a, err := api.GetTransactionByBlockNumberAndIndex(ctx, 10_000, 1) + require.Nil(a) + require.Nil(err) + + b, err := api.GetTransactionByBlockHashAndIndex(ctx, common.Hash{}, 1) + require.Nil(b) + require.Nil(err) + + c, err := api.GetTransactionByBlockNumberAndIndex(ctx, 10_000, 1) + require.Nil(c) + require.Nil(err) + + d, err := api.GetTransactionReceipt(ctx, common.Hash{}) + require.Nil(d) + require.Nil(err) + + e, err := api.GetBlockByHash(ctx, rpc.BlockNumberOrHashWithHash(common.Hash{}, true), false) + require.Nil(e) + require.Nil(err) + + f, err := api.GetBlockByNumber(ctx, 10_000, false) + require.Nil(f) + require.Nil(err) + + g, err := api.GetUncleByBlockHashAndIndex(ctx, common.Hash{}, 1) + require.Nil(g) + require.Nil(err) + + h, err := api.GetUncleByBlockNumberAndIndex(ctx, 10_000, 1) + require.Nil(h) + require.Nil(err) + + j, err := api.GetBlockTransactionCountByNumber(ctx, 10_000) + require.Nil(j) + require.Nil(err) +} diff --git a/cmd/rpcdaemon22/commands/daemon.go b/cmd/rpcdaemon22/commands/daemon.go new file mode 100644 index 00000000000..1fe8e9e7f55 --- /dev/null +++ b/cmd/rpcdaemon22/commands/daemon.go @@ -0,0 +1,134 @@ +package commands + +import ( + "github.com/ledgerwatch/erigon-lib/gointerfaces/starknet" + "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/cli/httpcfg" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/services" +) + +// APIList describes the list of available RPC apis +func APIList(db kv.RoDB, borDb kv.RoDB, eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, + starknet starknet.CAIROVMClient, filters *rpchelper.Filters, stateCache kvcache.Cache, + blockReader services.FullBlockReader, cfg httpcfg.HttpCfg) (list []rpc.API) { + + base := NewBaseApi(filters, stateCache, blockReader, cfg.WithDatadir) + if cfg.TevmEnabled { + base.EnableTevmExperiment() + } + ethImpl := NewEthAPI(base, db, eth, txPool, mining, cfg.Gascap) + erigonImpl := NewErigonAPI(base, db, eth) + starknetImpl := NewStarknetAPI(base, db, starknet, txPool) + txpoolImpl := NewTxPoolAPI(base, db, txPool) + netImpl := NewNetAPIImpl(eth) + debugImpl := NewPrivateDebugAPI(base, db, cfg.Gascap) + traceImpl := NewTraceAPI(base, db, &cfg) + web3Impl := NewWeb3APIImpl(eth) + dbImpl := NewDBAPIImpl() /* deprecated */ + engineImpl := NewEngineAPI(base, db, eth) + adminImpl := NewAdminAPI(eth) + parityImpl := NewParityAPIImpl(db) + borImpl := NewBorAPI(base, db, borDb) // bor (consensus) specific + + for _, enabledAPI := range cfg.API { + switch enabledAPI { + case "eth": + list = append(list, rpc.API{ + Namespace: "eth", + Public: true, + Service: EthAPI(ethImpl), + Version: "1.0", + }) + case "debug": + list = append(list, rpc.API{ + Namespace: "debug", + Public: true, + Service: PrivateDebugAPI(debugImpl), + Version: "1.0", + }) + case "net": + list = append(list, rpc.API{ + Namespace: "net", + Public: true, + Service: NetAPI(netImpl), + Version: "1.0", + }) + case "txpool": + list = append(list, rpc.API{ + Namespace: "txpool", + Public: true, + Service: TxPoolAPI(txpoolImpl), + Version: "1.0", + }) + case "web3": + list = append(list, rpc.API{ + Namespace: "web3", + Public: true, + Service: Web3API(web3Impl), + Version: "1.0", + }) + case "trace": + list = append(list, rpc.API{ + Namespace: "trace", + Public: true, + Service: TraceAPI(traceImpl), + Version: "1.0", + }) + case "db": /* Deprecated */ + list = append(list, rpc.API{ + Namespace: "db", + Public: true, + Service: DBAPI(dbImpl), + Version: "1.0", + }) + case "erigon": + list = append(list, rpc.API{ + Namespace: "erigon", + Public: true, + Service: ErigonAPI(erigonImpl), + Version: "1.0", + }) + case "starknet": + list = append(list, rpc.API{ + Namespace: "starknet", + Public: true, + Service: StarknetAPI(starknetImpl), + Version: "1.0", + }) + case "engine": + list = append(list, rpc.API{ + Namespace: "engine", + Public: true, + Service: EngineAPI(engineImpl), + Version: "1.0", + }) + case "bor": + list = append(list, rpc.API{ + Namespace: "bor", + Public: true, + Service: BorAPI(borImpl), + Version: "1.0", + }) + case "admin": + list = append(list, rpc.API{ + Namespace: "admin", + Public: false, + Service: AdminAPI(adminImpl), + Version: "1.0", + }) + case "parity": + list = append(list, rpc.API{ + Namespace: "parity", + Public: false, + Service: ParityAPI(parityImpl), + Version: "1.0", + }) + } + } + + return list +} diff --git a/cmd/rpcdaemon22/commands/db_api_deprecated.go b/cmd/rpcdaemon22/commands/db_api_deprecated.go new file mode 100644 index 00000000000..886987e05c1 --- /dev/null +++ b/cmd/rpcdaemon22/commands/db_api_deprecated.go @@ -0,0 +1,52 @@ +package commands + +import ( + "context" + "fmt" + + "github.com/ledgerwatch/erigon/common/hexutil" +) + +// DBAPI the interface for the db_ RPC commands (deprecated) +type DBAPI interface { + GetString(_ context.Context, _ string, _ string) (string, error) + PutString(_ context.Context, _ string, _ string, _ string) (bool, error) + GetHex(_ context.Context, _ string, _ string) (hexutil.Bytes, error) + PutHex(_ context.Context, _ string, _ string, _ hexutil.Bytes) (bool, error) +} + +// DBAPIImpl data structure to store things needed for db_ commands +type DBAPIImpl struct { + unused uint64 +} + +// NewDBAPIImpl returns NetAPIImplImpl instance +func NewDBAPIImpl() *DBAPIImpl { + return &DBAPIImpl{ + unused: uint64(0), + } +} + +// GetString implements db_getString. Returns string from the local database. +// Deprecated: This function will be removed in the future. +func (api *DBAPIImpl) GetString(_ context.Context, _ string, _ string) (string, error) { + return "", fmt.Errorf(NotAvailableDeprecated, "db_getString") +} + +// PutString implements db_putString. Stores a string in the local database. +// Deprecated: This function will be removed in the future. +func (api *DBAPIImpl) PutString(_ context.Context, _ string, _ string, _ string) (bool, error) { + return false, fmt.Errorf(NotAvailableDeprecated, "db_putString") +} + +// GetHex implements db_getHex. Returns binary data from the local database. +// Deprecated: This function will be removed in the future. +func (api *DBAPIImpl) GetHex(_ context.Context, _ string, _ string) (hexutil.Bytes, error) { + return hexutil.Bytes(""), fmt.Errorf(NotAvailableDeprecated, "db_getHex") +} + +// PutHex implements db_putHex. Stores binary data in the local database. +// Deprecated: This function will be removed in the future. +func (api *DBAPIImpl) PutHex(_ context.Context, _ string, _ string, _ hexutil.Bytes) (bool, error) { + return false, fmt.Errorf(NotAvailableDeprecated, "db_putHex") +} diff --git a/cmd/rpcdaemon22/commands/debug_api.go b/cmd/rpcdaemon22/commands/debug_api.go new file mode 100644 index 00000000000..f692da238d5 --- /dev/null +++ b/cmd/rpcdaemon22/commands/debug_api.go @@ -0,0 +1,273 @@ +package commands + +import ( + "context" + "fmt" + + jsoniter "github.com/json-iterator/go" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/changeset" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/consensus/ethash" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/eth/tracers" + "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/internal/ethapi" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/transactions" + "github.com/ledgerwatch/log/v3" +) + +// AccountRangeMaxResults is the maximum number of results to be returned per call +const AccountRangeMaxResults = 256 + +// PrivateDebugAPI Exposed RPC endpoints for debugging use +type PrivateDebugAPI interface { + StorageRangeAt(ctx context.Context, blockHash common.Hash, txIndex uint64, contractAddress common.Address, keyStart hexutil.Bytes, maxResult int) (StorageRangeResult, error) + TraceTransaction(ctx context.Context, hash common.Hash, config *tracers.TraceConfig, stream *jsoniter.Stream) error + TraceBlockByHash(ctx context.Context, hash common.Hash, config *tracers.TraceConfig, stream *jsoniter.Stream) error + TraceBlockByNumber(ctx context.Context, number rpc.BlockNumber, config *tracers.TraceConfig, stream *jsoniter.Stream) error + AccountRange(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, start []byte, maxResults int, nocode, nostorage bool) (state.IteratorDump, error) + GetModifiedAccountsByNumber(ctx context.Context, startNum rpc.BlockNumber, endNum *rpc.BlockNumber) ([]common.Address, error) + GetModifiedAccountsByHash(_ context.Context, startHash common.Hash, endHash *common.Hash) ([]common.Address, error) + TraceCall(ctx context.Context, args ethapi.CallArgs, blockNrOrHash rpc.BlockNumberOrHash, config *tracers.TraceConfig, stream *jsoniter.Stream) error + AccountAt(ctx context.Context, blockHash common.Hash, txIndex uint64, account common.Address) (*AccountResult, error) +} + +// PrivateDebugAPIImpl is implementation of the PrivateDebugAPI interface based on remote Db access +type PrivateDebugAPIImpl struct { + *BaseAPI + db kv.RoDB + GasCap uint64 +} + +// NewPrivateDebugAPI returns PrivateDebugAPIImpl instance +func NewPrivateDebugAPI(base *BaseAPI, db kv.RoDB, gascap uint64) *PrivateDebugAPIImpl { + return &PrivateDebugAPIImpl{ + BaseAPI: base, + db: db, + GasCap: gascap, + } +} + +// StorageRangeAt implements debug_storageRangeAt. Returns information about a range of storage locations (if any) for the given address. +func (api *PrivateDebugAPIImpl) StorageRangeAt(ctx context.Context, blockHash common.Hash, txIndex uint64, contractAddress common.Address, keyStart hexutil.Bytes, maxResult int) (StorageRangeResult, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return StorageRangeResult{}, err + } + defer tx.Rollback() + + chainConfig, err := api.chainConfig(tx) + if err != nil { + return StorageRangeResult{}, err + } + + block, err := api.blockByHashWithSenders(tx, blockHash) + if err != nil { + return StorageRangeResult{}, err + } + if block == nil { + return StorageRangeResult{}, nil + } + getHeader := func(hash common.Hash, number uint64) *types.Header { + h, e := api._blockReader.Header(ctx, tx, hash, number) + if e != nil { + log.Error("getHeader error", "number", number, "hash", hash, "err", e) + } + return h + } + + contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } + if api.TevmEnabled { + contractHasTEVM = ethdb.GetHasTEVM(tx) + } + + _, _, _, _, stateReader, err := transactions.ComputeTxEnv(ctx, block, chainConfig, getHeader, contractHasTEVM, ethash.NewFaker(), tx, blockHash, txIndex) + if err != nil { + return StorageRangeResult{}, err + } + return StorageRangeAt(stateReader, contractAddress, keyStart, maxResult) +} + +// AccountRange implements debug_accountRange. Returns a range of accounts involved in the given block rangeb +func (api *PrivateDebugAPIImpl) AccountRange(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, startKey []byte, maxResults int, excludeCode, excludeStorage bool) (state.IteratorDump, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return state.IteratorDump{}, err + } + defer tx.Rollback() + + var blockNumber uint64 + + if number, ok := blockNrOrHash.Number(); ok { + if number == rpc.PendingBlockNumber { + return state.IteratorDump{}, fmt.Errorf("accountRange for pending block not supported") + } + if number == rpc.LatestBlockNumber { + var err error + + blockNumber, err = stages.GetStageProgress(tx, stages.Execution) + if err != nil { + return state.IteratorDump{}, fmt.Errorf("last block has not found: %w", err) + } + } else { + blockNumber = uint64(number) + } + + } else if hash, ok := blockNrOrHash.Hash(); ok { + block, err1 := api.blockByHashWithSenders(tx, hash) + if err1 != nil { + return state.IteratorDump{}, err1 + } + if block == nil { + return state.IteratorDump{}, fmt.Errorf("block %s not found", hash.Hex()) + } + blockNumber = block.NumberU64() + } + + if maxResults > AccountRangeMaxResults || maxResults <= 0 { + maxResults = AccountRangeMaxResults + } + + dumper := state.NewDumper(tx, blockNumber) + res, err := dumper.IteratorDump(excludeCode, excludeStorage, common.BytesToAddress(startKey), maxResults) + if err != nil { + return state.IteratorDump{}, err + } + + hash, err := rawdb.ReadCanonicalHash(tx, blockNumber) + if err != nil { + return state.IteratorDump{}, err + } + if hash != (common.Hash{}) { + header := rawdb.ReadHeader(tx, hash, blockNumber) + if header != nil { + res.Root = header.Root.String() + } + } + + return res, nil +} + +// GetModifiedAccountsByNumber implements debug_getModifiedAccountsByNumber. Returns a list of accounts modified in the given block. +func (api *PrivateDebugAPIImpl) GetModifiedAccountsByNumber(ctx context.Context, startNumber rpc.BlockNumber, endNumber *rpc.BlockNumber) ([]common.Address, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + latestBlock, err := stages.GetStageProgress(tx, stages.Finish) + if err != nil { + return nil, err + } + + // forces negative numbers to fail (too large) but allows zero + startNum := uint64(startNumber.Int64()) + if startNum > latestBlock { + return nil, fmt.Errorf("start block (%d) is later than the latest block (%d)", startNum, latestBlock) + } + + endNum := startNum + 1 // allows for single param calls + if endNumber != nil { + // forces negative numbers to fail (too large) but allows zero + endNum = uint64(endNumber.Int64()) + 1 + } + + // is endNum too big? + if endNum > latestBlock { + return nil, fmt.Errorf("end block (%d) is later than the latest block (%d)", endNum, latestBlock) + } + + if startNum > endNum { + return nil, fmt.Errorf("start block (%d) must be less than or equal to end block (%d)", startNum, endNum) + } + + return changeset.GetModifiedAccounts(tx, startNum, endNum) +} + +// GetModifiedAccountsByHash implements debug_getModifiedAccountsByHash. Returns a list of accounts modified in the given block. +func (api *PrivateDebugAPIImpl) GetModifiedAccountsByHash(ctx context.Context, startHash common.Hash, endHash *common.Hash) ([]common.Address, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + startBlock, err := api.blockByHashWithSenders(tx, startHash) + if err != nil { + return nil, err + } + if startBlock == nil { + return nil, fmt.Errorf("start block %x not found", startHash) + } + startNum := startBlock.NumberU64() + endNum := startNum + 1 // allows for single parameter calls + + if endHash != nil { + endBlock, err := api.blockByHashWithSenders(tx, *endHash) + if err != nil { + return nil, err + } + if endBlock == nil { + return nil, fmt.Errorf("end block %x not found", *endHash) + } + endNum = endBlock.NumberU64() + 1 + } + + if startNum > endNum { + return nil, fmt.Errorf("start block (%d) must be less than or equal to end block (%d)", startNum, endNum) + } + + return changeset.GetModifiedAccounts(tx, startNum, endNum) +} + +func (api *PrivateDebugAPIImpl) AccountAt(ctx context.Context, blockHash common.Hash, txIndex uint64, address common.Address) (*AccountResult, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + chainConfig, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + + block, err := api.blockByHashWithSenders(tx, blockHash) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil + } + getHeader := func(hash common.Hash, number uint64) *types.Header { + return rawdb.ReadHeader(tx, hash, number) + } + contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } + if api.TevmEnabled { + contractHasTEVM = ethdb.GetHasTEVM(tx) + } + _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, block, chainConfig, getHeader, contractHasTEVM, ethash.NewFaker(), tx, blockHash, txIndex) + if err != nil { + return nil, err + } + result := &AccountResult{} + result.Balance.ToInt().Set(ibs.GetBalance(address).ToBig()) + result.Nonce = hexutil.Uint64(ibs.GetNonce(address)) + result.Code = ibs.GetCode(address) + result.CodeHash = ibs.GetCodeHash(address) + return result, nil +} + +type AccountResult struct { + Balance hexutil.Big `json:"balance"` + Nonce hexutil.Uint64 `json:"nonce"` + Code hexutil.Bytes `json:"code"` + CodeHash common.Hash `json:"codeHash"` +} diff --git a/cmd/rpcdaemon22/commands/debug_api_test.go b/cmd/rpcdaemon22/commands/debug_api_test.go new file mode 100644 index 00000000000..25c4f2ccd26 --- /dev/null +++ b/cmd/rpcdaemon22/commands/debug_api_test.go @@ -0,0 +1,185 @@ +package commands + +import ( + "bytes" + "context" + "encoding/json" + "testing" + + jsoniter "github.com/json-iterator/go" + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/eth/tracers" + "github.com/ledgerwatch/erigon/internal/ethapi" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" +) + +var debugTraceTransactionTests = []struct { + txHash string + gas uint64 + failed bool + returnValue string +}{ + {"3f3cb8a0e13ed2481f97f53f7095b9cbc78b6ffb779f2d3e565146371a8830ea", 21000, false, ""}, + {"f588c6426861d9ad25d5ccc12324a8d213f35ef1ed4153193f0c13eb81ca7f4a", 49189, false, "0000000000000000000000000000000000000000000000000000000000000001"}, + {"b6449d8e167a8826d050afe4c9f07095236ff769a985f02649b1023c2ded2059", 38899, false, ""}, +} + +var debugTraceTransactionNoRefundTests = []struct { + txHash string + gas uint64 + failed bool + returnValue string +}{ + {"3f3cb8a0e13ed2481f97f53f7095b9cbc78b6ffb779f2d3e565146371a8830ea", 21000, false, ""}, + {"f588c6426861d9ad25d5ccc12324a8d213f35ef1ed4153193f0c13eb81ca7f4a", 49189, false, "0000000000000000000000000000000000000000000000000000000000000001"}, + {"b6449d8e167a8826d050afe4c9f07095236ff769a985f02649b1023c2ded2059", 62899, false, ""}, +} + +func TestTraceBlockByNumber(t *testing.T) { + db := rpcdaemontest.CreateTestKV(t) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + baseApi := NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false) + ethApi := NewEthAPI(baseApi, db, nil, nil, nil, 5000000) + api := NewPrivateDebugAPI(baseApi, db, 0) + for _, tt := range debugTraceTransactionTests { + var buf bytes.Buffer + stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) + tx, err := ethApi.GetTransactionByHash(context.Background(), common.HexToHash(tt.txHash)) + if err != nil { + t.Errorf("traceBlock %s: %v", tt.txHash, err) + } + txcount, err := ethApi.GetBlockTransactionCountByHash(context.Background(), *tx.BlockHash) + if err != nil { + t.Errorf("traceBlock %s: %v", tt.txHash, err) + } + err = api.TraceBlockByNumber(context.Background(), rpc.BlockNumber(tx.BlockNumber.ToInt().Uint64()), &tracers.TraceConfig{}, stream) + if err != nil { + t.Errorf("traceBlock %s: %v", tt.txHash, err) + } + if err = stream.Flush(); err != nil { + t.Fatalf("error flusing: %v", err) + } + var er []ethapi.ExecutionResult + if err = json.Unmarshal(buf.Bytes(), &er); err != nil { + t.Fatalf("parsing result: %v", err) + } + if len(er) != int(*txcount) { + t.Fatalf("incorrect length: %v", err) + } + } + var buf bytes.Buffer + stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) + err := api.TraceBlockByNumber(context.Background(), rpc.BlockNumber(rpc.LatestBlockNumber), &tracers.TraceConfig{}, stream) + if err != nil { + t.Errorf("traceBlock %v: %v", rpc.LatestBlockNumber, err) + } + if err = stream.Flush(); err != nil { + t.Fatalf("error flusing: %v", err) + } + var er []ethapi.ExecutionResult + if err = json.Unmarshal(buf.Bytes(), &er); err != nil { + t.Fatalf("parsing result: %v", err) + } +} + +func TestTraceBlockByHash(t *testing.T) { + db := rpcdaemontest.CreateTestKV(t) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + baseApi := NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false) + ethApi := NewEthAPI(baseApi, db, nil, nil, nil, 5000000) + api := NewPrivateDebugAPI(baseApi, db, 0) + for _, tt := range debugTraceTransactionTests { + var buf bytes.Buffer + stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) + tx, err := ethApi.GetTransactionByHash(context.Background(), common.HexToHash(tt.txHash)) + if err != nil { + t.Errorf("traceBlock %s: %v", tt.txHash, err) + } + txcount, err := ethApi.GetBlockTransactionCountByHash(context.Background(), *tx.BlockHash) + if err != nil { + t.Errorf("traceBlock %s: %v", tt.txHash, err) + } + err = api.TraceBlockByHash(context.Background(), *tx.BlockHash, &tracers.TraceConfig{}, stream) + if err != nil { + t.Errorf("traceBlock %s: %v", tt.txHash, err) + } + if err = stream.Flush(); err != nil { + t.Fatalf("error flusing: %v", err) + } + var er []ethapi.ExecutionResult + if err = json.Unmarshal(buf.Bytes(), &er); err != nil { + t.Fatalf("parsing result: %v", err) + } + if len(er) != int(*txcount) { + t.Fatalf("incorrect length: %v", err) + } + } +} + +func TestTraceTransaction(t *testing.T) { + db := rpcdaemontest.CreateTestKV(t) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewPrivateDebugAPI( + NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), + db, 0) + for _, tt := range debugTraceTransactionTests { + var buf bytes.Buffer + stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) + err := api.TraceTransaction(context.Background(), common.HexToHash(tt.txHash), &tracers.TraceConfig{}, stream) + if err != nil { + t.Errorf("traceTransaction %s: %v", tt.txHash, err) + } + if err = stream.Flush(); err != nil { + t.Fatalf("error flusing: %v", err) + } + var er ethapi.ExecutionResult + if err = json.Unmarshal(buf.Bytes(), &er); err != nil { + t.Fatalf("parsing result: %v", err) + } + if er.Gas != tt.gas { + t.Errorf("wrong gas for transaction %s, got %d, expected %d", tt.txHash, er.Gas, tt.gas) + } + if er.Failed != tt.failed { + t.Errorf("wrong failed flag for transaction %s, got %t, expected %t", tt.txHash, er.Failed, tt.failed) + } + if er.ReturnValue != tt.returnValue { + t.Errorf("wrong return value for transaction %s, got %s, expected %s", tt.txHash, er.ReturnValue, tt.returnValue) + } + } +} + +func TestTraceTransactionNoRefund(t *testing.T) { + db := rpcdaemontest.CreateTestKV(t) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewPrivateDebugAPI( + NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), + db, 0) + for _, tt := range debugTraceTransactionNoRefundTests { + var buf bytes.Buffer + stream := jsoniter.NewStream(jsoniter.ConfigDefault, &buf, 4096) + var norefunds = true + err := api.TraceTransaction(context.Background(), common.HexToHash(tt.txHash), &tracers.TraceConfig{NoRefunds: &norefunds}, stream) + if err != nil { + t.Errorf("traceTransaction %s: %v", tt.txHash, err) + } + if err = stream.Flush(); err != nil { + t.Fatalf("error flusing: %v", err) + } + var er ethapi.ExecutionResult + if err = json.Unmarshal(buf.Bytes(), &er); err != nil { + t.Fatalf("parsing result: %v", err) + } + if er.Gas != tt.gas { + t.Errorf("wrong gas for transaction %s, got %d, expected %d", tt.txHash, er.Gas, tt.gas) + } + if er.Failed != tt.failed { + t.Errorf("wrong failed flag for transaction %s, got %t, expected %t", tt.txHash, er.Failed, tt.failed) + } + if er.ReturnValue != tt.returnValue { + t.Errorf("wrong return value for transaction %s, got %s, expected %s", tt.txHash, er.ReturnValue, tt.returnValue) + } + } +} diff --git a/cmd/rpcdaemon22/commands/engine_api.go b/cmd/rpcdaemon22/commands/engine_api.go new file mode 100644 index 00000000000..1d7984fa91a --- /dev/null +++ b/cmd/rpcdaemon22/commands/engine_api.go @@ -0,0 +1,262 @@ +package commands + +import ( + "context" + "encoding/binary" + "fmt" + "math/big" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/gointerfaces" + "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + types2 "github.com/ledgerwatch/erigon-lib/gointerfaces/types" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/log/v3" +) + +// ExecutionPayload represents an execution payload (aka slot/block) +type ExecutionPayload struct { + ParentHash common.Hash `json:"parentHash" gencodec:"required"` + FeeRecipient common.Address `json:"feeRecipient" gencodec:"required"` + StateRoot common.Hash `json:"stateRoot" gencodec:"required"` + ReceiptsRoot common.Hash `json:"receiptsRoot" gencodec:"required"` + LogsBloom hexutil.Bytes `json:"logsBloom" gencodec:"required"` + PrevRandao common.Hash `json:"prevRandao" gencodec:"required"` + BlockNumber hexutil.Uint64 `json:"blockNumber" gencodec:"required"` + GasLimit hexutil.Uint64 `json:"gasLimit" gencodec:"required"` + GasUsed hexutil.Uint64 `json:"gasUsed" gencodec:"required"` + Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` + ExtraData hexutil.Bytes `json:"extraData" gencodec:"required"` + BaseFeePerGas *hexutil.Big `json:"baseFeePerGas" gencodec:"required"` + BlockHash common.Hash `json:"blockHash" gencodec:"required"` + Transactions []hexutil.Bytes `json:"transactions" gencodec:"required"` +} + +// PayloadAttributes represent the attributes required to start assembling a payload +type ForkChoiceState struct { + HeadHash common.Hash `json:"headBlockHash" gencodec:"required"` + SafeBlockHash common.Hash `json:"safeBlockHash" gencodec:"required"` + FinalizedBlockHash common.Hash `json:"finalizedBlockHash" gencodec:"required"` +} + +// PayloadAttributes represent the attributes required to start assembling a payload +type PayloadAttributes struct { + Timestamp hexutil.Uint64 `json:"timestamp" gencodec:"required"` + PrevRandao common.Hash `json:"prevRandao" gencodec:"required"` + SuggestedFeeRecipient common.Address `json:"suggestedFeeRecipient" gencodec:"required"` +} + +// TransitionConfiguration represents the correct configurations of the CL and the EL +type TransitionConfiguration struct { + TerminalTotalDifficulty *hexutil.Big `json:"terminalTotalDifficulty" gencodec:"required"` + TerminalBlockHash common.Hash `json:"terminalBlockHash" gencodec:"required"` + TerminalBlockNumber *hexutil.Big `json:"terminalBlockNumber" gencodec:"required"` +} + +// EngineAPI Beacon chain communication endpoint +type EngineAPI interface { + ForkchoiceUpdatedV1(ctx context.Context, forkChoiceState *ForkChoiceState, payloadAttributes *PayloadAttributes) (map[string]interface{}, error) + NewPayloadV1(context.Context, *ExecutionPayload) (map[string]interface{}, error) + GetPayloadV1(ctx context.Context, payloadID hexutil.Bytes) (*ExecutionPayload, error) + ExchangeTransitionConfigurationV1(ctx context.Context, transitionConfiguration TransitionConfiguration) (TransitionConfiguration, error) +} + +// EngineImpl is implementation of the EngineAPI interface +type EngineImpl struct { + *BaseAPI + db kv.RoDB + api rpchelper.ApiBackend +} + +func convertPayloadStatus(x *remote.EnginePayloadStatus) map[string]interface{} { + json := map[string]interface{}{ + "status": x.Status.String(), + } + if x.LatestValidHash != nil { + json["latestValidHash"] = common.Hash(gointerfaces.ConvertH256ToHash(x.LatestValidHash)) + } + if x.ValidationError != "" { + json["validationError"] = x.ValidationError + } + + return json +} + +func (e *EngineImpl) ForkchoiceUpdatedV1(ctx context.Context, forkChoiceState *ForkChoiceState, payloadAttributes *PayloadAttributes) (map[string]interface{}, error) { + log.Trace("Received ForkchoiceUpdated", "head", forkChoiceState.HeadHash, "safe", forkChoiceState.HeadHash, "finalized", forkChoiceState.FinalizedBlockHash, + "build", payloadAttributes != nil) + + var prepareParameters *remote.EnginePayloadAttributes + if payloadAttributes != nil { + prepareParameters = &remote.EnginePayloadAttributes{ + Timestamp: uint64(payloadAttributes.Timestamp), + PrevRandao: gointerfaces.ConvertHashToH256(payloadAttributes.PrevRandao), + SuggestedFeeRecipient: gointerfaces.ConvertAddressToH160(payloadAttributes.SuggestedFeeRecipient), + } + } + reply, err := e.api.EngineForkchoiceUpdatedV1(ctx, &remote.EngineForkChoiceUpdatedRequest{ + ForkchoiceState: &remote.EngineForkChoiceState{ + HeadBlockHash: gointerfaces.ConvertHashToH256(forkChoiceState.HeadHash), + SafeBlockHash: gointerfaces.ConvertHashToH256(forkChoiceState.SafeBlockHash), + FinalizedBlockHash: gointerfaces.ConvertHashToH256(forkChoiceState.FinalizedBlockHash), + }, + PayloadAttributes: prepareParameters, + }) + if err != nil { + return nil, err + } + + json := map[string]interface{}{ + "payloadStatus": convertPayloadStatus(reply.PayloadStatus), + } + if reply.PayloadId != 0 { + encodedPayloadId := make([]byte, 8) + binary.BigEndian.PutUint64(encodedPayloadId, reply.PayloadId) + json["payloadId"] = hexutil.Bytes(encodedPayloadId) + } + + return json, nil +} + +// NewPayloadV1 processes new payloads (blocks) from the beacon chain. +// See https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_newpayloadv1 +func (e *EngineImpl) NewPayloadV1(ctx context.Context, payload *ExecutionPayload) (map[string]interface{}, error) { + log.Trace("Received NewPayload", "height", uint64(payload.BlockNumber), "hash", payload.BlockHash) + + var baseFee *uint256.Int + if payload.BaseFeePerGas != nil { + var overflow bool + baseFee, overflow = uint256.FromBig((*big.Int)(payload.BaseFeePerGas)) + if overflow { + log.Warn("NewPayload BaseFeePerGas overflow") + return nil, fmt.Errorf("invalid request") + } + } + + // Convert slice of hexutil.Bytes to a slice of slice of bytes + transactions := make([][]byte, len(payload.Transactions)) + for i, transaction := range payload.Transactions { + transactions[i] = ([]byte)(transaction) + } + res, err := e.api.EngineNewPayloadV1(ctx, &types2.ExecutionPayload{ + ParentHash: gointerfaces.ConvertHashToH256(payload.ParentHash), + Coinbase: gointerfaces.ConvertAddressToH160(payload.FeeRecipient), + StateRoot: gointerfaces.ConvertHashToH256(payload.StateRoot), + ReceiptRoot: gointerfaces.ConvertHashToH256(payload.ReceiptsRoot), + LogsBloom: gointerfaces.ConvertBytesToH2048(([]byte)(payload.LogsBloom)), + PrevRandao: gointerfaces.ConvertHashToH256(payload.PrevRandao), + BlockNumber: uint64(payload.BlockNumber), + GasLimit: uint64(payload.GasLimit), + GasUsed: uint64(payload.GasUsed), + Timestamp: uint64(payload.Timestamp), + ExtraData: payload.ExtraData, + BaseFeePerGas: gointerfaces.ConvertUint256IntToH256(baseFee), + BlockHash: gointerfaces.ConvertHashToH256(payload.BlockHash), + Transactions: transactions, + }) + if err != nil { + log.Warn("NewPayload", "err", err) + return nil, err + } + + return convertPayloadStatus(res), nil +} + +func (e *EngineImpl) GetPayloadV1(ctx context.Context, payloadID hexutil.Bytes) (*ExecutionPayload, error) { + decodedPayloadId := binary.BigEndian.Uint64(payloadID) + log.Info("Received GetPayload", "payloadId", decodedPayloadId) + + payload, err := e.api.EngineGetPayloadV1(ctx, decodedPayloadId) + if err != nil { + return nil, err + } + var bloom types.Bloom = gointerfaces.ConvertH2048ToBloom(payload.LogsBloom) + + var baseFee *big.Int + if payload.BaseFeePerGas != nil { + baseFee = gointerfaces.ConvertH256ToUint256Int(payload.BaseFeePerGas).ToBig() + } + + // Convert slice of hexutil.Bytes to a slice of slice of bytes + transactions := make([]hexutil.Bytes, len(payload.Transactions)) + for i, transaction := range payload.Transactions { + transactions[i] = transaction + } + return &ExecutionPayload{ + ParentHash: gointerfaces.ConvertH256ToHash(payload.ParentHash), + FeeRecipient: gointerfaces.ConvertH160toAddress(payload.Coinbase), + StateRoot: gointerfaces.ConvertH256ToHash(payload.StateRoot), + ReceiptsRoot: gointerfaces.ConvertH256ToHash(payload.ReceiptRoot), + LogsBloom: bloom[:], + PrevRandao: gointerfaces.ConvertH256ToHash(payload.PrevRandao), + BlockNumber: hexutil.Uint64(payload.BlockNumber), + GasLimit: hexutil.Uint64(payload.GasLimit), + GasUsed: hexutil.Uint64(payload.GasUsed), + Timestamp: hexutil.Uint64(payload.Timestamp), + ExtraData: payload.ExtraData, + BaseFeePerGas: (*hexutil.Big)(baseFee), + BlockHash: gointerfaces.ConvertH256ToHash(payload.BlockHash), + Transactions: transactions, + }, nil +} + +// Receives consensus layer's transition configuration and checks if the execution layer has the correct configuration. +// Can also be used to ping the execution layer (heartbeats). +// See https://github.com/ethereum/execution-apis/blob/v1.0.0-alpha.7/src/engine/specification.md#engine_exchangetransitionconfigurationv1 +func (e *EngineImpl) ExchangeTransitionConfigurationV1(ctx context.Context, beaconConfig TransitionConfiguration) (TransitionConfiguration, error) { + tx, err := e.db.BeginRo(ctx) + + if err != nil { + return TransitionConfiguration{}, err + } + + defer tx.Rollback() + + chainConfig, err := e.BaseAPI.chainConfig(tx) + + if err != nil { + return TransitionConfiguration{}, err + } + + terminalTotalDifficulty := chainConfig.TerminalTotalDifficulty + + if terminalTotalDifficulty == nil { + return TransitionConfiguration{}, fmt.Errorf("the execution layer doesn't have a terminal total difficulty. expected: %v", beaconConfig.TerminalTotalDifficulty) + } + + if terminalTotalDifficulty.Cmp((*big.Int)(beaconConfig.TerminalTotalDifficulty)) != 0 { + return TransitionConfiguration{}, fmt.Errorf("the execution layer has a wrong terminal total difficulty. expected %v, but instead got: %d", beaconConfig.TerminalTotalDifficulty, terminalTotalDifficulty) + } + + if chainConfig.TerminalBlockHash != beaconConfig.TerminalBlockHash { + return TransitionConfiguration{}, fmt.Errorf("the execution layer has a wrong terminal block hash. expected %s, but instead got: %s", beaconConfig.TerminalBlockHash, chainConfig.TerminalBlockHash) + } + + terminalBlockNumber := chainConfig.TerminalBlockNumber + if terminalBlockNumber == nil { + terminalBlockNumber = common.Big0 + } + + if terminalBlockNumber.Cmp((*big.Int)(beaconConfig.TerminalBlockNumber)) != 0 { + return TransitionConfiguration{}, fmt.Errorf("the execution layer has a wrong terminal block number. expected %v, but instead got: %d", beaconConfig.TerminalBlockNumber, terminalBlockNumber) + } + + return TransitionConfiguration{ + TerminalTotalDifficulty: (*hexutil.Big)(terminalTotalDifficulty), + TerminalBlockHash: chainConfig.TerminalBlockHash, + TerminalBlockNumber: (*hexutil.Big)(terminalBlockNumber), + }, nil +} + +// NewEngineAPI returns EngineImpl instance +func NewEngineAPI(base *BaseAPI, db kv.RoDB, api rpchelper.ApiBackend) *EngineImpl { + return &EngineImpl{ + BaseAPI: base, + db: db, + api: api, + } +} diff --git a/cmd/rpcdaemon22/commands/engine_api_test.go b/cmd/rpcdaemon22/commands/engine_api_test.go new file mode 100644 index 00000000000..a209b8469b6 --- /dev/null +++ b/cmd/rpcdaemon22/commands/engine_api_test.go @@ -0,0 +1,18 @@ +package commands + +import ( + "testing" + + "github.com/ledgerwatch/erigon-lib/gointerfaces" + "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + "github.com/ledgerwatch/erigon/common" + "github.com/stretchr/testify/assert" +) + +// Test case for https://github.com/ethereum/execution-apis/pull/217 responses +func TestZeroLatestValidHash(t *testing.T) { + payloadStatus := remote.EnginePayloadStatus{Status: remote.EngineStatus_INVALID, LatestValidHash: gointerfaces.ConvertHashToH256(common.Hash{})} + json := convertPayloadStatus(&payloadStatus) + assert.Equal(t, "INVALID", json["status"]) + assert.Equal(t, common.Hash{}, json["latestValidHash"]) +} diff --git a/cmd/rpcdaemon22/commands/erigon_api.go b/cmd/rpcdaemon22/commands/erigon_api.go new file mode 100644 index 00000000000..f976cf31f05 --- /dev/null +++ b/cmd/rpcdaemon22/commands/erigon_api.go @@ -0,0 +1,52 @@ +package commands + +import ( + "context" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/p2p" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" +) + +// ErigonAPI Erigon specific routines +type ErigonAPI interface { + // System related (see ./erigon_system.go) + Forks(ctx context.Context) (Forks, error) + + // Blocks related (see ./erigon_blocks.go) + GetHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) + GetHeaderByHash(_ context.Context, hash common.Hash) (*types.Header, error) + GetBlockByTimestamp(ctx context.Context, timeStamp rpc.Timestamp, fullTx bool) (map[string]interface{}, error) + + // Receipt related (see ./erigon_receipts.go) + GetLogsByHash(ctx context.Context, hash common.Hash) ([][]*types.Log, error) + //GetLogsByNumber(ctx context.Context, number rpc.BlockNumber) ([][]*types.Log, error) + + // WatchTheBurn / reward related (see ./erigon_issuance.go) + WatchTheBurn(ctx context.Context, blockNr rpc.BlockNumber) (Issuance, error) + + // CumulativeChainTraffic / related to chain traffic (see ./erigon_cumulative_index.go) + CumulativeChainTraffic(ctx context.Context, blockNr rpc.BlockNumber) (ChainTraffic, error) + + // NodeInfo returns a collection of metadata known about the host. + NodeInfo(ctx context.Context) ([]p2p.NodeInfo, error) +} + +// ErigonImpl is implementation of the ErigonAPI interface +type ErigonImpl struct { + *BaseAPI + db kv.RoDB + ethBackend rpchelper.ApiBackend +} + +// NewErigonAPI returns ErigonImpl instance +func NewErigonAPI(base *BaseAPI, db kv.RoDB, eth rpchelper.ApiBackend) *ErigonImpl { + return &ErigonImpl{ + BaseAPI: base, + db: db, + ethBackend: eth, + } +} diff --git a/cmd/rpcdaemon22/commands/erigon_block.go b/cmd/rpcdaemon22/commands/erigon_block.go new file mode 100644 index 00000000000..d32557024e4 --- /dev/null +++ b/cmd/rpcdaemon22/commands/erigon_block.go @@ -0,0 +1,142 @@ +package commands + +import ( + "context" + "fmt" + "sort" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/internal/ethapi" + "github.com/ledgerwatch/erigon/rpc" +) + +// GetHeaderByNumber implements erigon_getHeaderByNumber. Returns a block's header given a block number ignoring the block's transaction and uncle list (may be faster). +func (api *ErigonImpl) GetHeaderByNumber(ctx context.Context, blockNumber rpc.BlockNumber) (*types.Header, error) { + // Pending block is only known by the miner + if blockNumber == rpc.PendingBlockNumber { + block := api.pendingBlock() + if block == nil { + return nil, nil + } + return block.Header(), nil + } + + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + blockNum, err := getBlockNumber(blockNumber, tx) + if err != nil { + return nil, err + } + + header := rawdb.ReadHeaderByNumber(tx, blockNum) + if header == nil { + return nil, fmt.Errorf("block header not found: %d", blockNum) + } + + return header, nil +} + +// GetHeaderByHash implements erigon_getHeaderByHash. Returns a block's header given a block's hash. +func (api *ErigonImpl) GetHeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + header, err := rawdb.ReadHeaderByHash(tx, hash) + if err != nil { + return nil, err + } + if header == nil { + return nil, fmt.Errorf("block header not found: %s", hash.String()) + } + + return header, nil +} + +func (api *ErigonImpl) GetBlockByTimestamp(ctx context.Context, timeStamp rpc.Timestamp, fullTx bool) (map[string]interface{}, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + uintTimestamp := timeStamp.TurnIntoUint64() + + currentHeader := rawdb.ReadCurrentHeader(tx) + currenttHeaderTime := currentHeader.Time + highestNumber := currentHeader.Number.Uint64() + + firstHeader := rawdb.ReadHeaderByNumber(tx, 0) + firstHeaderTime := firstHeader.Time + + if currenttHeaderTime <= uintTimestamp { + blockResponse, err := buildBlockResponse(tx, highestNumber, fullTx) + if err != nil { + return nil, err + } + + return blockResponse, nil + } + + if firstHeaderTime >= uintTimestamp { + blockResponse, err := buildBlockResponse(tx, 0, fullTx) + if err != nil { + return nil, err + } + + return blockResponse, nil + } + + blockNum := sort.Search(int(currentHeader.Number.Uint64()), func(blockNum int) bool { + currentHeader := rawdb.ReadHeaderByNumber(tx, uint64(blockNum)) + + return currentHeader.Time >= uintTimestamp + }) + + resultingHeader := rawdb.ReadHeaderByNumber(tx, uint64(blockNum)) + + if resultingHeader.Time > uintTimestamp { + response, err := buildBlockResponse(tx, uint64(blockNum)-1, fullTx) + if err != nil { + return nil, err + } + return response, nil + } + + response, err := buildBlockResponse(tx, uint64(blockNum), fullTx) + if err != nil { + return nil, err + } + + return response, nil +} + +func buildBlockResponse(db kv.Tx, blockNum uint64, fullTx bool) (map[string]interface{}, error) { + block, err := rawdb.ReadBlockByNumber(db, blockNum) + if err != nil { + return nil, err + } + + if block == nil { + return nil, nil + } + + response, err := ethapi.RPCMarshalBlock(block, true, fullTx) + + if err == nil && rpc.BlockNumber(block.NumberU64()) == rpc.PendingBlockNumber { + // Pending blocks need to nil out a few fields + for _, field := range []string{"hash", "nonce", "miner"} { + response[field] = nil + } + } + return response, err +} diff --git a/cmd/rpcdaemon22/commands/erigon_cumulative_chain_traffic.go b/cmd/rpcdaemon22/commands/erigon_cumulative_chain_traffic.go new file mode 100644 index 00000000000..640ececce78 --- /dev/null +++ b/cmd/rpcdaemon22/commands/erigon_cumulative_chain_traffic.go @@ -0,0 +1,41 @@ +package commands + +import ( + "context" + + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/rpc" +) + +// CumulativeGasIndex implements erigon_cumulativeChainTraffic. Returns how much traffic there has been at the specified block number. +// Aka. amount of gas used so far + total transactions issued to the network +func (api *ErigonImpl) CumulativeChainTraffic(ctx context.Context, blockNr rpc.BlockNumber) (ChainTraffic, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return ChainTraffic{}, err + } + defer tx.Rollback() + + blockNumber := uint64(blockNr) + cumulativeGasUsed, err := rawdb.ReadCumulativeGasUsed(tx, blockNumber) + if err != nil { + return ChainTraffic{}, err + } + + _, baseTxId, txCount, err := rawdb.ReadBodyByNumber(tx, blockNumber) + if err != nil { + return ChainTraffic{}, err + } + + cumulativeTransactionCount := baseTxId + uint64(txCount) + return ChainTraffic{ + CumulativeGasUsed: (*hexutil.Big)(cumulativeGasUsed), + CumulativeTransactionsCount: (*hexutil.Uint64)(&cumulativeTransactionCount), + }, nil +} + +type ChainTraffic struct { + CumulativeGasUsed *hexutil.Big `json:"cumulativeGasUsed"` + CumulativeTransactionsCount *hexutil.Uint64 `json:"cumulativeTransactionsCount"` +} diff --git a/cmd/rpcdaemon22/commands/erigon_issuance.go b/cmd/rpcdaemon22/commands/erigon_issuance.go new file mode 100644 index 00000000000..7aef3595e2a --- /dev/null +++ b/cmd/rpcdaemon22/commands/erigon_issuance.go @@ -0,0 +1,133 @@ +package commands + +import ( + "context" + "fmt" + "math/big" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/consensus/ethash" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/rpc" +) + +// BlockReward returns the block reward for this block +// func (api *ErigonImpl) BlockReward(ctx context.Context, blockNr rpc.BlockNumber) (Issuance, error) { +// tx, err := api.db.Begin(ctx, ethdb.RO) +// if err != nil { +// return Issuance{}, err +// } +// defer tx.Rollback() +// +// return api.rewardCalc(tx, blockNr, "block") // nolint goconst +//} + +// UncleReward returns the uncle reward for this block +// func (api *ErigonImpl) UncleReward(ctx context.Context, blockNr rpc.BlockNumber) (Issuance, error) { +// tx, err := api.db.Begin(ctx, ethdb.RO) +// if err != nil { +// return Issuance{}, err +// } +// defer tx.Rollback() +// +// return api.rewardCalc(tx, blockNr, "uncle") // nolint goconst +//} + +// Issuance implements erigon_issuance. Returns the total issuance (block reward plus uncle reward) for the given block. +func (api *ErigonImpl) WatchTheBurn(ctx context.Context, blockNr rpc.BlockNumber) (Issuance, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return Issuance{}, err + } + defer tx.Rollback() + + chainConfig, err := api.chainConfig(tx) + if err != nil { + return Issuance{}, err + } + if chainConfig.Ethash == nil { + // Clique for example has no issuance + return Issuance{}, nil + } + hash, err := rawdb.ReadCanonicalHash(tx, uint64(blockNr)) + if err != nil { + return Issuance{}, err + } + header := rawdb.ReadHeader(tx, hash, uint64(blockNr)) + if header == nil { + return Issuance{}, fmt.Errorf("could not find block header") + } + + body := rawdb.ReadCanonicalBodyWithTransactions(tx, hash, uint64(blockNr)) + + if body == nil { + return Issuance{}, fmt.Errorf("could not find block body") + } + + minerReward, uncleRewards := ethash.AccumulateRewards(chainConfig, header, body.Uncles) + issuance := minerReward + for _, r := range uncleRewards { + p := r // avoids warning? + issuance.Add(&issuance, &p) + } + + var ret Issuance + ret.BlockReward = (*hexutil.Big)(minerReward.ToBig()) + ret.Issuance = (*hexutil.Big)(issuance.ToBig()) + issuance.Sub(&issuance, &minerReward) + ret.UncleReward = (*hexutil.Big)(issuance.ToBig()) + // Compute how much was burnt + if header.BaseFee != nil { + burnt := header.BaseFee + burnt.Mul(burnt, big.NewInt(int64(header.GasUsed))) + ret.Burnt = (*hexutil.Big)(burnt) + } else { + ret.Burnt = (*hexutil.Big)(big.NewInt(0)) + } + // Compute totalIssued, totalBurnt and the supply of eth + totalIssued, err := rawdb.ReadTotalIssued(tx, uint64(blockNr)) + if err != nil { + return Issuance{}, err + } + totalBurnt, err := rawdb.ReadTotalBurnt(tx, uint64(blockNr)) + if err != nil { + return Issuance{}, err + } + + ret.TotalIssued = (*hexutil.Big)(totalIssued) + ret.TotalBurnt = (*hexutil.Big)(totalBurnt) + + // Compute tips + tips := big.NewInt(0) + + if header.BaseFee != nil { + receipts, err := rawdb.ReadReceiptsByHash(tx, hash) + if err != nil { + return Issuance{}, err + } + + baseFee, overflow := uint256.FromBig(header.BaseFee) + if overflow { + return Issuance{}, fmt.Errorf("baseFee overflow") + } + + for i, transaction := range body.Transactions { + tip := transaction.GetEffectiveGasTip(baseFee).ToBig() + tips.Add(tips, tip.Mul(tip, big.NewInt(int64(receipts[i].GasUsed)))) + } + } + ret.Tips = (*hexutil.Big)(tips) + return ret, nil +} + +// Issuance structure to return information about issuance +type Issuance struct { + BlockReward *hexutil.Big `json:"blockReward"` // Block reward for given block + UncleReward *hexutil.Big `json:"uncleReward"` // Uncle reward for gived block + Issuance *hexutil.Big `json:"issuance"` // Total amount of wei created in the block + Burnt *hexutil.Big `json:"burnt"` // Total amount of wei burned in the block + TotalIssued *hexutil.Big `json:"totalIssued"` // Total amount of wei created in total so far + TotalBurnt *hexutil.Big `json:"totalBurnt"` // Total amount of wei burnt so far + Tips *hexutil.Big `json:"tips"` // Total Tips generated by the block +} diff --git a/cmd/rpcdaemon22/commands/erigon_nodeInfo.go b/cmd/rpcdaemon22/commands/erigon_nodeInfo.go new file mode 100644 index 00000000000..68ef98d9299 --- /dev/null +++ b/cmd/rpcdaemon22/commands/erigon_nodeInfo.go @@ -0,0 +1,16 @@ +package commands + +import ( + "context" + + "github.com/ledgerwatch/erigon/p2p" +) + +const ( + // allNodesInfo used in NodeInfo request to receive meta data from all running sentries. + allNodesInfo = 0 +) + +func (api *ErigonImpl) NodeInfo(ctx context.Context) ([]p2p.NodeInfo, error) { + return api.ethBackend.NodeInfo(ctx, allNodesInfo) +} diff --git a/cmd/rpcdaemon22/commands/erigon_receipts.go b/cmd/rpcdaemon22/commands/erigon_receipts.go new file mode 100644 index 00000000000..cc9a6bae69f --- /dev/null +++ b/cmd/rpcdaemon22/commands/erigon_receipts.go @@ -0,0 +1,66 @@ +package commands + +import ( + "context" + "fmt" + + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/types" +) + +// GetLogsByHash implements erigon_getLogsByHash. Returns an array of arrays of logs generated by the transactions in the block given by the block's hash. +func (api *ErigonImpl) GetLogsByHash(ctx context.Context, hash common.Hash) ([][]*types.Log, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + chainConfig, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + + block, err := api.blockByHashWithSenders(tx, hash) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil + } + receipts, err := api.getReceipts(ctx, tx, chainConfig, block, block.Body().SendersFromTxs()) + if err != nil { + return nil, fmt.Errorf("getReceipts error: %w", err) + } + + logs := make([][]*types.Log, len(receipts)) + for i, receipt := range receipts { + logs[i] = receipt.Logs + } + return logs, nil +} + +// GetLogsByNumber implements erigon_getLogsByHash. Returns all the logs that appear in a block given the block's hash. +// func (api *ErigonImpl) GetLogsByNumber(ctx context.Context, number rpc.BlockNumber) ([][]*types.Log, error) { +// tx, err := api.db.Begin(ctx, false) +// if err != nil { +// return nil, err +// } +// defer tx.Rollback() + +// number := rawdb.ReadHeaderNumber(tx, hash) +// if number == nil { +// return nil, fmt.Errorf("block not found: %x", hash) +// } + +// receipts, err := getReceipts(ctx, tx, *number, hash) +// if err != nil { +// return nil, fmt.Errorf("getReceipts error: %w", err) +// } + +// logs := make([][]*types.Log, len(receipts)) +// for i, receipt := range receipts { +// logs[i] = receipt.Logs +// } +// return logs, nil +// } diff --git a/cmd/rpcdaemon22/commands/erigon_system.go b/cmd/rpcdaemon22/commands/erigon_system.go new file mode 100644 index 00000000000..67f4190fc3d --- /dev/null +++ b/cmd/rpcdaemon22/commands/erigon_system.go @@ -0,0 +1,31 @@ +package commands + +import ( + "context" + + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/forkid" +) + +// Forks is a data type to record a list of forks passed by this node +type Forks struct { + GenesisHash common.Hash `json:"genesis"` + Forks []uint64 `json:"forks"` +} + +// Forks implements erigon_forks. Returns the genesis block hash and a sorted list of all forks block numbers +func (api *ErigonImpl) Forks(ctx context.Context) (Forks, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return Forks{}, err + } + defer tx.Rollback() + + chainConfig, genesis, err := api.chainConfigWithGenesis(tx) + if err != nil { + return Forks{}, err + } + forksBlocks := forkid.GatherForks(chainConfig) + + return Forks{genesis.Hash(), forksBlocks}, nil +} diff --git a/cmd/rpcdaemon22/commands/error_messages.go b/cmd/rpcdaemon22/commands/error_messages.go new file mode 100644 index 00000000000..b593ea59516 --- /dev/null +++ b/cmd/rpcdaemon22/commands/error_messages.go @@ -0,0 +1,10 @@ +package commands + +// NotImplemented is the URI prefix for smartcard wallets. +const NotImplemented = "the method is currently not implemented: %s" + +// NotAvailableChainData x +const NotAvailableChainData = "the function %s is not available, please use --private.api.addr option instead of --datadir option" + +// NotAvailableDeprecated x +const NotAvailableDeprecated = "the method has been deprecated: %s" diff --git a/cmd/rpcdaemon22/commands/eth_accounts.go b/cmd/rpcdaemon22/commands/eth_accounts.go new file mode 100644 index 00000000000..c55869d3999 --- /dev/null +++ b/cmd/rpcdaemon22/commands/eth_accounts.go @@ -0,0 +1,121 @@ +package commands + +import ( + "context" + "fmt" + "math/big" + + "github.com/ledgerwatch/erigon-lib/gointerfaces" + "github.com/ledgerwatch/erigon/turbo/rpchelper" + "google.golang.org/grpc" + + txpool_proto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/rpc" +) + +// GetBalance implements eth_getBalance. Returns the balance of an account for a given address. +func (api *APIImpl) GetBalance(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (*hexutil.Big, error) { + tx, err1 := api.db.BeginRo(ctx) + if err1 != nil { + return nil, fmt.Errorf("getBalance cannot open tx: %w", err1) + } + defer tx.Rollback() + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache) + if err != nil { + return nil, err + } + + acc, err := reader.ReadAccountData(address) + if err != nil { + return nil, fmt.Errorf("cant get a balance for account %x: %w", address.String(), err) + } + if acc == nil { + // Special case - non-existent account is assumed to have zero balance + return (*hexutil.Big)(big.NewInt(0)), nil + } + + return (*hexutil.Big)(acc.Balance.ToBig()), nil +} + +// GetTransactionCount implements eth_getTransactionCount. Returns the number of transactions sent from an address (the nonce). +func (api *APIImpl) GetTransactionCount(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (*hexutil.Uint64, error) { + if blockNrOrHash.BlockNumber != nil && *blockNrOrHash.BlockNumber == rpc.PendingBlockNumber { + reply, err := api.txPool.Nonce(ctx, &txpool_proto.NonceRequest{ + Address: gointerfaces.ConvertAddressToH160(address), + }, &grpc.EmptyCallOption{}) + if err != nil { + return nil, err + } + if reply.Found { + reply.Nonce++ + return (*hexutil.Uint64)(&reply.Nonce), nil + } + } + tx, err1 := api.db.BeginRo(ctx) + if err1 != nil { + return nil, fmt.Errorf("getTransactionCount cannot open tx: %w", err1) + } + defer tx.Rollback() + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache) + if err != nil { + return nil, err + } + nonce := hexutil.Uint64(0) + acc, err := reader.ReadAccountData(address) + if acc == nil || err != nil { + return &nonce, err + } + return (*hexutil.Uint64)(&acc.Nonce), err +} + +// GetCode implements eth_getCode. Returns the byte code at a given address (if it's a smart contract). +func (api *APIImpl) GetCode(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { + tx, err1 := api.db.BeginRo(ctx) + if err1 != nil { + return nil, fmt.Errorf("getCode cannot open tx: %w", err1) + } + defer tx.Rollback() + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache) + if err != nil { + return nil, err + } + + acc, err := reader.ReadAccountData(address) + if acc == nil || err != nil { + return hexutil.Bytes(""), nil + } + res, _ := reader.ReadAccountCode(address, acc.Incarnation, acc.CodeHash) + if res == nil { + return hexutil.Bytes(""), nil + } + return res, nil +} + +// GetStorageAt implements eth_getStorageAt. Returns the value from a storage position at a given address. +func (api *APIImpl) GetStorageAt(ctx context.Context, address common.Address, index string, blockNrOrHash rpc.BlockNumberOrHash) (string, error) { + var empty []byte + + tx, err1 := api.db.BeginRo(ctx) + if err1 != nil { + return hexutil.Encode(common.LeftPadBytes(empty, 32)), err1 + } + defer tx.Rollback() + + reader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache) + if err != nil { + return hexutil.Encode(common.LeftPadBytes(empty, 32)), err + } + acc, err := reader.ReadAccountData(address) + if acc == nil || err != nil { + return hexutil.Encode(common.LeftPadBytes(empty, 32)), err + } + + location := common.HexToHash(index) + res, err := reader.ReadAccountStorage(address, acc.Incarnation, &location) + if err != nil { + res = empty + } + return hexutil.Encode(common.LeftPadBytes(res, 32)), err +} diff --git a/cmd/rpcdaemon22/commands/eth_api.go b/cmd/rpcdaemon22/commands/eth_api.go new file mode 100644 index 00000000000..ab06d94ce5a --- /dev/null +++ b/cmd/rpcdaemon22/commands/eth_api.go @@ -0,0 +1,356 @@ +package commands + +import ( + "bytes" + "context" + "math/big" + "sync" + + lru "github.com/hashicorp/golang-lru" + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/common/math" + "github.com/ledgerwatch/erigon/consensus/misc" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" + ethFilters "github.com/ledgerwatch/erigon/eth/filters" + "github.com/ledgerwatch/erigon/internal/ethapi" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/services" +) + +// EthAPI is a collection of functions that are exposed in the +type EthAPI interface { + // Block related (proposed file: ./eth_blocks.go) + GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) + GetBlockByHash(ctx context.Context, hash rpc.BlockNumberOrHash, fullTx bool) (map[string]interface{}, error) + GetBlockTransactionCountByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*hexutil.Uint, error) + GetBlockTransactionCountByHash(ctx context.Context, blockHash common.Hash) (*hexutil.Uint, error) + + // Transaction related (see ./eth_txs.go) + GetTransactionByHash(ctx context.Context, hash common.Hash) (*RPCTransaction, error) + GetTransactionByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, txIndex hexutil.Uint64) (*RPCTransaction, error) + GetTransactionByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, txIndex hexutil.Uint) (*RPCTransaction, error) + GetRawTransactionByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) (hexutil.Bytes, error) + GetRawTransactionByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, index hexutil.Uint) (hexutil.Bytes, error) + GetRawTransactionByHash(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) + + // Receipt related (see ./eth_receipts.go) + GetTransactionReceipt(ctx context.Context, hash common.Hash) (map[string]interface{}, error) + GetLogs(ctx context.Context, crit ethFilters.FilterCriteria) ([]*types.Log, error) + GetBlockReceipts(ctx context.Context, number rpc.BlockNumber) ([]map[string]interface{}, error) + + // Uncle related (see ./eth_uncles.go) + GetUncleByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) (map[string]interface{}, error) + GetUncleByBlockHashAndIndex(ctx context.Context, hash common.Hash, index hexutil.Uint) (map[string]interface{}, error) + GetUncleCountByBlockNumber(ctx context.Context, number rpc.BlockNumber) (*hexutil.Uint, error) + GetUncleCountByBlockHash(ctx context.Context, hash common.Hash) (*hexutil.Uint, error) + + // Filter related (see ./eth_filters.go) + NewPendingTransactionFilter(_ context.Context) (common.Hash, error) + NewBlockFilter(_ context.Context) (common.Hash, error) + NewFilter(_ context.Context, crit ethFilters.FilterCriteria) (common.Hash, error) + UninstallFilter(_ context.Context, index string) (bool, error) + GetFilterChanges(_ context.Context, index string) ([]interface{}, error) + + // Account related (see ./eth_accounts.go) + Accounts(ctx context.Context) ([]common.Address, error) + GetBalance(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (*hexutil.Big, error) + GetTransactionCount(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (*hexutil.Uint64, error) + GetStorageAt(ctx context.Context, address common.Address, index string, blockNrOrHash rpc.BlockNumberOrHash) (string, error) + GetCode(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) + + // System related (see ./eth_system.go) + BlockNumber(ctx context.Context) (hexutil.Uint64, error) + Syncing(ctx context.Context) (interface{}, error) + ChainId(ctx context.Context) (hexutil.Uint64, error) /* called eth_protocolVersion elsewhere */ + ProtocolVersion(_ context.Context) (hexutil.Uint, error) + GasPrice(_ context.Context) (*hexutil.Big, error) + + // Sending related (see ./eth_call.go) + Call(ctx context.Context, args ethapi.CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *ethapi.StateOverrides) (hexutil.Bytes, error) + EstimateGas(ctx context.Context, argsOrNil *ethapi.CallArgs, blockNrOrHash *rpc.BlockNumberOrHash) (hexutil.Uint64, error) + SendRawTransaction(ctx context.Context, encodedTx hexutil.Bytes) (common.Hash, error) + SendTransaction(_ context.Context, txObject interface{}) (common.Hash, error) + Sign(ctx context.Context, _ common.Address, _ hexutil.Bytes) (hexutil.Bytes, error) + SignTransaction(_ context.Context, txObject interface{}) (common.Hash, error) + GetProof(ctx context.Context, address common.Address, storageKeys []string, blockNr rpc.BlockNumber) (*interface{}, error) + CreateAccessList(ctx context.Context, args ethapi.CallArgs, blockNrOrHash *rpc.BlockNumberOrHash, optimizeGas *bool) (*accessListResult, error) + + // Mining related (see ./eth_mining.go) + Coinbase(ctx context.Context) (common.Address, error) + Hashrate(ctx context.Context) (uint64, error) + Mining(ctx context.Context) (bool, error) + GetWork(ctx context.Context) ([4]string, error) + SubmitWork(ctx context.Context, nonce types.BlockNonce, powHash, digest common.Hash) (bool, error) + SubmitHashrate(ctx context.Context, hashRate hexutil.Uint64, id common.Hash) (bool, error) +} + +type BaseAPI struct { + stateCache kvcache.Cache // thread-safe + blocksLRU *lru.Cache // thread-safe + filters *rpchelper.Filters + _chainConfig *params.ChainConfig + _genesis *types.Block + _genesisLock sync.RWMutex + + _blockReader services.FullBlockReader + _txnReader services.TxnReader + TevmEnabled bool // experiment +} + +func NewBaseApi(f *rpchelper.Filters, stateCache kvcache.Cache, blockReader services.FullBlockReader, singleNodeMode bool) *BaseAPI { + blocksLRUSize := 128 // ~32Mb + if !singleNodeMode { + blocksLRUSize = 512 + } + blocksLRU, err := lru.New(blocksLRUSize) + if err != nil { + panic(err) + } + + return &BaseAPI{filters: f, stateCache: stateCache, blocksLRU: blocksLRU, _blockReader: blockReader, _txnReader: blockReader} +} + +func (api *BaseAPI) chainConfig(tx kv.Tx) (*params.ChainConfig, error) { + cfg, _, err := api.chainConfigWithGenesis(tx) + return cfg, err +} + +func (api *BaseAPI) EnableTevmExperiment() { api.TevmEnabled = true } + +// nolint:unused +func (api *BaseAPI) genesis(tx kv.Tx) (*types.Block, error) { + _, genesis, err := api.chainConfigWithGenesis(tx) + return genesis, err +} + +func (api *BaseAPI) txnLookup(ctx context.Context, tx kv.Tx, txnHash common.Hash) (uint64, bool, error) { + return api._txnReader.TxnLookup(ctx, tx, txnHash) +} + +func (api *BaseAPI) blockByNumberWithSenders(tx kv.Tx, number uint64) (*types.Block, error) { + hash, hashErr := rawdb.ReadCanonicalHash(tx, number) + if hashErr != nil { + return nil, hashErr + } + return api.blockWithSenders(tx, hash, number) +} +func (api *BaseAPI) blockByHashWithSenders(tx kv.Tx, hash common.Hash) (*types.Block, error) { + if api.blocksLRU != nil { + if it, ok := api.blocksLRU.Get(hash); ok && it != nil { + return it.(*types.Block), nil + } + } + number := rawdb.ReadHeaderNumber(tx, hash) + if number == nil { + return nil, nil + } + return api.blockWithSenders(tx, hash, *number) +} +func (api *BaseAPI) blockWithSenders(tx kv.Tx, hash common.Hash, number uint64) (*types.Block, error) { + if api.blocksLRU != nil { + if it, ok := api.blocksLRU.Get(hash); ok && it != nil { + return it.(*types.Block), nil + } + } + block, _, err := api._blockReader.BlockWithSenders(context.Background(), tx, hash, number) + if err != nil { + return nil, err + } + if block == nil { // don't save nil's to cache + return nil, nil + } + // don't save empty blocks to cache, because in Erigon + // if block become non-canonical - we remove it's transactions, but block can become canonical in future + if block.Transactions().Len() == 0 { + return block, nil + } + if api.blocksLRU != nil { + // calc fields before put to cache + for _, txn := range block.Transactions() { + txn.Hash() + } + block.Hash() + api.blocksLRU.Add(hash, block) + } + return block, nil +} + +func (api *BaseAPI) chainConfigWithGenesis(tx kv.Tx) (*params.ChainConfig, *types.Block, error) { + api._genesisLock.RLock() + cc, genesisBlock := api._chainConfig, api._genesis + api._genesisLock.RUnlock() + + if cc != nil { + return cc, genesisBlock, nil + } + genesisBlock, err := rawdb.ReadBlockByNumber(tx, 0) + if err != nil { + return nil, nil, err + } + cc, err = rawdb.ReadChainConfig(tx, genesisBlock.Hash()) + if err != nil { + return nil, nil, err + } + if cc != nil && genesisBlock != nil { + api._genesisLock.Lock() + api._genesis = genesisBlock + api._chainConfig = cc + api._genesisLock.Unlock() + } + return cc, genesisBlock, nil +} + +func (api *BaseAPI) pendingBlock() *types.Block { + return api.filters.LastPendingBlock() +} + +func (api *BaseAPI) blockByRPCNumber(number rpc.BlockNumber, tx kv.Tx) (*types.Block, error) { + if number == rpc.PendingBlockNumber { + return api.pendingBlock(), nil + } + + n, err := getBlockNumber(number, tx) + if err != nil { + return nil, err + } + + block, err := api.blockByNumberWithSenders(tx, n) + return block, err +} + +// APIImpl is implementation of the EthAPI interface based on remote Db access +type APIImpl struct { + *BaseAPI + ethBackend rpchelper.ApiBackend + txPool txpool.TxpoolClient + mining txpool.MiningClient + db kv.RoDB + GasCap uint64 +} + +// NewEthAPI returns APIImpl instance +func NewEthAPI(base *BaseAPI, db kv.RoDB, eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, gascap uint64) *APIImpl { + if gascap == 0 { + gascap = uint64(math.MaxUint64 / 2) + } + + return &APIImpl{ + BaseAPI: base, + db: db, + ethBackend: eth, + txPool: txPool, + mining: mining, + GasCap: gascap, + } +} + +// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction +type RPCTransaction struct { + BlockHash *common.Hash `json:"blockHash"` + BlockNumber *hexutil.Big `json:"blockNumber"` + From common.Address `json:"from"` + Gas hexutil.Uint64 `json:"gas"` + GasPrice *hexutil.Big `json:"gasPrice,omitempty"` + Tip *hexutil.Big `json:"maxPriorityFeePerGas,omitempty"` + FeeCap *hexutil.Big `json:"maxFeePerGas,omitempty"` + Hash common.Hash `json:"hash"` + Input hexutil.Bytes `json:"input"` + Nonce hexutil.Uint64 `json:"nonce"` + To *common.Address `json:"to"` + TransactionIndex *hexutil.Uint64 `json:"transactionIndex"` + Value *hexutil.Big `json:"value"` + Type hexutil.Uint64 `json:"type"` + Accesses *types.AccessList `json:"accessList,omitempty"` + ChainID *hexutil.Big `json:"chainId,omitempty"` + V *hexutil.Big `json:"v"` + R *hexutil.Big `json:"r"` + S *hexutil.Big `json:"s"` +} + +// newRPCTransaction returns a transaction that will serialize to the RPC +// representation, with the given location metadata set (if available). +func newRPCTransaction(tx types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64, baseFee *big.Int) *RPCTransaction { + // Determine the signer. For replay-protected transactions, use the most permissive + // signer, because we assume that signers are backwards-compatible with old + // transactions. For non-protected transactions, the homestead signer signer is used + // because the return value of ChainId is zero for those transactions. + var chainId *big.Int + result := &RPCTransaction{ + Type: hexutil.Uint64(tx.Type()), + Gas: hexutil.Uint64(tx.GetGas()), + Hash: tx.Hash(), + Input: hexutil.Bytes(tx.GetData()), + Nonce: hexutil.Uint64(tx.GetNonce()), + To: tx.GetTo(), + Value: (*hexutil.Big)(tx.GetValue().ToBig()), + } + switch t := tx.(type) { + case *types.LegacyTx: + chainId = types.DeriveChainId(&t.V).ToBig() + result.GasPrice = (*hexutil.Big)(t.GasPrice.ToBig()) + result.V = (*hexutil.Big)(t.V.ToBig()) + result.R = (*hexutil.Big)(t.R.ToBig()) + result.S = (*hexutil.Big)(t.S.ToBig()) + case *types.AccessListTx: + chainId = t.ChainID.ToBig() + result.ChainID = (*hexutil.Big)(chainId) + result.GasPrice = (*hexutil.Big)(t.GasPrice.ToBig()) + result.V = (*hexutil.Big)(t.V.ToBig()) + result.R = (*hexutil.Big)(t.R.ToBig()) + result.S = (*hexutil.Big)(t.S.ToBig()) + result.Accesses = &t.AccessList + case *types.DynamicFeeTransaction: + chainId = t.ChainID.ToBig() + result.ChainID = (*hexutil.Big)(chainId) + result.Tip = (*hexutil.Big)(t.Tip.ToBig()) + result.FeeCap = (*hexutil.Big)(t.FeeCap.ToBig()) + result.V = (*hexutil.Big)(t.V.ToBig()) + result.R = (*hexutil.Big)(t.R.ToBig()) + result.S = (*hexutil.Big)(t.S.ToBig()) + result.Accesses = &t.AccessList + baseFee, overflow := uint256.FromBig(baseFee) + if baseFee != nil && !overflow && blockHash != (common.Hash{}) { + // price = min(tip + baseFee, gasFeeCap) + price := math.Min256(new(uint256.Int).Add(tx.GetTip(), baseFee), tx.GetFeeCap()) + result.GasPrice = (*hexutil.Big)(price.ToBig()) + } else { + result.GasPrice = nil + } + } + signer := types.LatestSignerForChainID(chainId) + result.From, _ = tx.Sender(*signer) + if blockHash != (common.Hash{}) { + result.BlockHash = &blockHash + result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber)) + result.TransactionIndex = (*hexutil.Uint64)(&index) + } + return result +} + +// newRPCPendingTransaction returns a pending transaction that will serialize to the RPC representation +func newRPCPendingTransaction(tx types.Transaction, current *types.Header, config *params.ChainConfig) *RPCTransaction { + var baseFee *big.Int + if current != nil { + baseFee = misc.CalcBaseFee(config, current) + } + return newRPCTransaction(tx, common.Hash{}, 0, 0, baseFee) +} + +// newRPCRawTransactionFromBlockIndex returns the bytes of a transaction given a block and a transaction index. +func newRPCRawTransactionFromBlockIndex(b *types.Block, index uint64) (hexutil.Bytes, error) { + txs := b.Transactions() + if index >= uint64(len(txs)) { + return nil, nil + } + var buf bytes.Buffer + err := txs[index].MarshalBinary(&buf) + return buf.Bytes(), err +} diff --git a/cmd/rpcdaemon22/commands/eth_api_test.go b/cmd/rpcdaemon22/commands/eth_api_test.go new file mode 100644 index 00000000000..f271b7cf824 --- /dev/null +++ b/cmd/rpcdaemon22/commands/eth_api_test.go @@ -0,0 +1,219 @@ +package commands + +import ( + "context" + "fmt" + "testing" + + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/internal/ethapi" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/stretchr/testify/assert" + + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" + "github.com/ledgerwatch/erigon/common" +) + +func TestGetTransactionReceipt(t *testing.T) { + db := rpcdaemontest.CreateTestKV(t) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + // Call GetTransactionReceipt for transaction which is not in the database + if _, err := api.GetTransactionReceipt(context.Background(), common.Hash{}); err != nil { + t.Errorf("calling GetTransactionReceipt with empty hash: %v", err) + } +} + +func TestGetTransactionReceiptUnprotected(t *testing.T) { + db := rpcdaemontest.CreateTestKV(t) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + // Call GetTransactionReceipt for un-protected transaction + if _, err := api.GetTransactionReceipt(context.Background(), common.HexToHash("0x3f3cb8a0e13ed2481f97f53f7095b9cbc78b6ffb779f2d3e565146371a8830ea")); err != nil { + t.Errorf("calling GetTransactionReceipt for unprotected tx: %v", err) + } +} + +// EIP-1898 test cases + +func TestGetStorageAt_ByBlockNumber_WithRequireCanonicalDefault(t *testing.T) { + assert := assert.New(t) + db := rpcdaemontest.CreateTestKV(t) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") + + result, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithNumber(0)) + if err != nil { + t.Errorf("calling GetStorageAt: %v", err) + } + + assert.Equal(common.HexToHash("0x0").String(), result) +} + +func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault(t *testing.T) { + assert := assert.New(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + db := m.DB + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") + + result, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(m.Genesis.Hash(), false)) + if err != nil { + t.Errorf("calling GetStorageAt: %v", err) + } + + assert.Equal(common.HexToHash("0x0").String(), result) +} + +func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue(t *testing.T) { + assert := assert.New(t) + m, _, _ := rpcdaemontest.CreateTestSentry(t) + db := m.DB + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") + + result, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(m.Genesis.Hash(), true)) + if err != nil { + t.Errorf("calling GetStorageAt: %v", err) + } + + assert.Equal(common.HexToHash("0x0").String(), result) +} + +func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_BlockNotFoundError(t *testing.T) { + m, _, _ := rpcdaemontest.CreateTestSentry(t) + db := m.DB + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") + + offChain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, block *core.BlockGen) { + }, true) + if err != nil { + t.Fatal(err) + } + offChainBlock := offChain.Blocks[0] + + if _, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(offChainBlock.Hash(), false)); err != nil { + if fmt.Sprintf("%v", err) != fmt.Sprintf("block %s not found", offChainBlock.Hash().String()[2:]) { + t.Errorf("wrong error: %v", err) + } + } else { + t.Error("error expected") + } +} + +func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue_BlockNotFoundError(t *testing.T) { + m, _, _ := rpcdaemontest.CreateTestSentry(t) + db := m.DB + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") + + offChain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, block *core.BlockGen) { + }, true) + if err != nil { + t.Fatal(err) + } + offChainBlock := offChain.Blocks[0] + + if _, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(offChainBlock.Hash(), true)); err != nil { + if fmt.Sprintf("%v", err) != fmt.Sprintf("block %s not found", offChainBlock.Hash().String()[2:]) { + t.Errorf("wrong error: %v", err) + } + } else { + t.Error("error expected") + } +} + +func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_NonCanonicalBlock(t *testing.T) { + assert := assert.New(t) + m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t) + db := m.DB + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") + + orphanedBlock := orphanedChain[0].Blocks[0] + + result, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(orphanedBlock.Hash(), false)) + if err != nil { + if fmt.Sprintf("%v", err) != fmt.Sprintf("hash %s is not currently canonical", orphanedBlock.Hash().String()[2:]) { + t.Errorf("wrong error: %v", err) + } + } else { + t.Error("error expected") + } + + assert.Equal(common.HexToHash("0x0").String(), result) +} + +func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue_NonCanonicalBlock(t *testing.T) { + m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t) + db := m.DB + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") + + orphanedBlock := orphanedChain[0].Blocks[0] + + if _, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(orphanedBlock.Hash(), true)); err != nil { + if fmt.Sprintf("%v", err) != fmt.Sprintf("hash %s is not currently canonical", orphanedBlock.Hash().String()[2:]) { + t.Errorf("wrong error: %v", err) + } + } else { + t.Error("error expected") + } +} + +func TestCall_ByBlockHash_WithRequireCanonicalDefault_NonCanonicalBlock(t *testing.T) { + m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t) + db := m.DB + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + from := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") + to := common.HexToAddress("0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e") + + orphanedBlock := orphanedChain[0].Blocks[0] + + if _, err := api.Call(context.Background(), ethapi.CallArgs{ + From: &from, + To: &to, + }, rpc.BlockNumberOrHashWithHash(orphanedBlock.Hash(), false), nil); err != nil { + if fmt.Sprintf("%v", err) != fmt.Sprintf("hash %s is not currently canonical", orphanedBlock.Hash().String()[2:]) { + /* Not sure. Here https://github.com/ethereum/EIPs/blob/master/EIPS/eip-1898.md it is not explicitly said that + eth_call should only work with canonical blocks. + But since there is no point in changing the state of non-canonical block, it ignores RequireCanonical. */ + t.Errorf("wrong error: %v", err) + } + } else { + t.Error("error expected") + } +} + +func TestCall_ByBlockHash_WithRequireCanonicalTrue_NonCanonicalBlock(t *testing.T) { + m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t) + db := m.DB + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + from := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") + to := common.HexToAddress("0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e") + + orphanedBlock := orphanedChain[0].Blocks[0] + + if _, err := api.Call(context.Background(), ethapi.CallArgs{ + From: &from, + To: &to, + }, rpc.BlockNumberOrHashWithHash(orphanedBlock.Hash(), true), nil); err != nil { + if fmt.Sprintf("%v", err) != fmt.Sprintf("hash %s is not currently canonical", orphanedBlock.Hash().String()[2:]) { + t.Errorf("wrong error: %v", err) + } + } else { + t.Error("error expected") + } +} diff --git a/cmd/rpcdaemon22/commands/eth_block.go b/cmd/rpcdaemon22/commands/eth_block.go new file mode 100644 index 00000000000..985f22256ff --- /dev/null +++ b/cmd/rpcdaemon22/commands/eth_block.go @@ -0,0 +1,320 @@ +package commands + +import ( + "context" + "fmt" + "math/big" + "time" + + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/common/math" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/adapter/ethapi" + "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/transactions" + "github.com/ledgerwatch/log/v3" + "golang.org/x/crypto/sha3" +) + +func (api *APIImpl) CallBundle(ctx context.Context, txHashes []common.Hash, stateBlockNumberOrHash rpc.BlockNumberOrHash, timeoutMilliSecondsPtr *int64) (map[string]interface{}, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + chainConfig, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + + if len(txHashes) == 0 { + return nil, nil + } + + var txs types.Transactions + + for _, txHash := range txHashes { + blockNum, ok, err := api.txnLookup(ctx, tx, txHash) + if err != nil { + return nil, err + } + if !ok { + return nil, nil + } + block, err := api.blockByNumberWithSenders(tx, blockNum) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil + } + var txn types.Transaction + for _, transaction := range block.Transactions() { + if transaction.Hash() == txHash { + txn = transaction + break + } + } + if txn == nil { + return nil, nil // not error, see https://github.com/ledgerwatch/turbo-geth/issues/1645 + } + txs = append(txs, txn) + } + defer func(start time.Time) { log.Trace("Executing EVM call finished", "runtime", time.Since(start)) }(time.Now()) + + stateBlockNumber, hash, latest, err := rpchelper.GetBlockNumber(stateBlockNumberOrHash, tx, api.filters) + if err != nil { + return nil, err + } + + var stateReader state.StateReader + if latest { + cacheView, err := api.stateCache.View(ctx, tx) + if err != nil { + return nil, err + } + stateReader = state.NewCachedReader2(cacheView, tx) + } else { + stateReader = state.NewPlainState(tx, stateBlockNumber) + } + st := state.New(stateReader) + + parent := rawdb.ReadHeader(tx, hash, stateBlockNumber) + if parent == nil { + return nil, fmt.Errorf("block %d(%x) not found", stateBlockNumber, hash) + } + + blockNumber := stateBlockNumber + 1 + + timestamp := parent.Time // Dont care about the timestamp + + coinbase := parent.Coinbase + header := &types.Header{ + ParentHash: parent.Hash(), + Number: big.NewInt(int64(blockNumber)), + GasLimit: parent.GasLimit, + Time: timestamp, + Difficulty: parent.Difficulty, + Coinbase: coinbase, + } + + // Get a new instance of the EVM + signer := types.MakeSigner(chainConfig, blockNumber) + rules := chainConfig.Rules(blockNumber) + firstMsg, err := txs[0].AsMessage(*signer, nil, rules) + if err != nil { + return nil, err + } + + contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } + if api.TevmEnabled { + contractHasTEVM = ethdb.GetHasTEVM(tx) + } + + blockCtx, txCtx := transactions.GetEvmContext(firstMsg, header, stateBlockNumberOrHash.RequireCanonical, tx, contractHasTEVM) + evm := vm.NewEVM(blockCtx, txCtx, st, chainConfig, vm.Config{Debug: false}) + + timeoutMilliSeconds := int64(5000) + if timeoutMilliSecondsPtr != nil { + timeoutMilliSeconds = *timeoutMilliSecondsPtr + } + timeout := time.Millisecond * time.Duration(timeoutMilliSeconds) + // Setup context so it may be cancelled the call has completed + // or, in case of unmetered gas, setup a context with a timeout. + var cancel context.CancelFunc + if timeout > 0 { + ctx, cancel = context.WithTimeout(ctx, timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + // Make sure the context is cancelled when the call has completed + // this makes sure resources are cleaned up. + defer cancel() + + // Wait for the context to be done and cancel the evm. Even if the + // EVM has finished, cancelling may be done (repeatedly) + go func() { + <-ctx.Done() + evm.Cancel() + }() + + // Setup the gas pool (also for unmetered requests) + // and apply the message. + gp := new(core.GasPool).AddGas(math.MaxUint64) + + results := []map[string]interface{}{} + + bundleHash := sha3.NewLegacyKeccak256() + for _, txn := range txs { + msg, err := txn.AsMessage(*signer, nil, rules) + if err != nil { + return nil, err + } + // Execute the transaction message + result, err := core.ApplyMessage(evm, msg, gp, true /* refunds */, false /* gasBailout */) + if err != nil { + return nil, err + } + // If the timer caused an abort, return an appropriate error message + if evm.Cancelled() { + return nil, fmt.Errorf("execution aborted (timeout = %v)", timeout) + } + + txHash := txn.Hash().String() + jsonResult := map[string]interface{}{ + "txHash": txHash, + "gasUsed": result.UsedGas, + } + bundleHash.Write(txn.Hash().Bytes()) + if result.Err != nil { + jsonResult["error"] = result.Err.Error() + } else { + jsonResult["value"] = common.BytesToHash(result.Return()) + } + + results = append(results, jsonResult) + } + + ret := map[string]interface{}{} + ret["results"] = results + ret["bundleHash"] = hexutil.Encode(bundleHash.Sum(nil)) + return ret, nil +} + +// GetBlockByNumber implements eth_getBlockByNumber. Returns information about a block given the block's number. +func (api *APIImpl) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + b, err := api.blockByRPCNumber(number, tx) + if err != nil { + return nil, err + } + if b == nil { + return nil, nil + } + additionalFields := make(map[string]interface{}) + td, err := rawdb.ReadTd(tx, b.Hash(), b.NumberU64()) + if err != nil { + return nil, err + } + additionalFields["totalDifficulty"] = (*hexutil.Big)(td) + response, err := ethapi.RPCMarshalBlock(b, true, fullTx, additionalFields) + + if err == nil && number == rpc.PendingBlockNumber { + // Pending blocks need to nil out a few fields + for _, field := range []string{"hash", "nonce", "miner"} { + response[field] = nil + } + } + return response, err +} + +// GetBlockByHash implements eth_getBlockByHash. Returns information about a block given the block's hash. +func (api *APIImpl) GetBlockByHash(ctx context.Context, numberOrHash rpc.BlockNumberOrHash, fullTx bool) (map[string]interface{}, error) { + if numberOrHash.BlockHash == nil { + // some web3.js based apps (like ethstats client) for some reason call + // eth_getBlockByHash with a block number as a parameter + // so no matter how weird that is, we would love to support that. + if numberOrHash.BlockNumber == nil { + return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 + } + return api.GetBlockByNumber(ctx, *numberOrHash.BlockNumber, fullTx) + } + + hash := *numberOrHash.BlockHash + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + additionalFields := make(map[string]interface{}) + + block, err := api.blockByHashWithSenders(tx, hash) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 + } + number := block.NumberU64() + + td, err := rawdb.ReadTd(tx, hash, number) + if err != nil { + return nil, err + } + additionalFields["totalDifficulty"] = (*hexutil.Big)(td) + response, err := ethapi.RPCMarshalBlock(block, true, fullTx, additionalFields) + + if err == nil && int64(number) == rpc.PendingBlockNumber.Int64() { + // Pending blocks need to nil out a few fields + for _, field := range []string{"hash", "nonce", "miner"} { + response[field] = nil + } + } + return response, err +} + +// GetBlockTransactionCountByNumber implements eth_getBlockTransactionCountByNumber. Returns the number of transactions in a block given the block's block number. +func (api *APIImpl) GetBlockTransactionCountByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*hexutil.Uint, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + if blockNr == rpc.PendingBlockNumber { + b, err := api.blockByRPCNumber(blockNr, tx) + if err != nil { + return nil, err + } + if b == nil { + return nil, nil + } + n := hexutil.Uint(len(b.Transactions())) + return &n, nil + } + blockNum, err := getBlockNumber(blockNr, tx) + if err != nil { + return nil, err + } + body, _, txAmount, err := rawdb.ReadBodyByNumber(tx, blockNum) + if err != nil { + return nil, err + } + if body == nil { + return nil, nil + } + n := hexutil.Uint(txAmount) + return &n, nil +} + +// GetBlockTransactionCountByHash implements eth_getBlockTransactionCountByHash. Returns the number of transactions in a block given the block's block hash. +func (api *APIImpl) GetBlockTransactionCountByHash(ctx context.Context, blockHash common.Hash) (*hexutil.Uint, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + num := rawdb.ReadHeaderNumber(tx, blockHash) + if num == nil { + return nil, nil + } + body, _, txAmount := rawdb.ReadBody(tx, blockHash, *num) + if body == nil { + return nil, nil + } + n := hexutil.Uint(txAmount) + return &n, nil +} diff --git a/cmd/rpcdaemon22/commands/eth_call.go b/cmd/rpcdaemon22/commands/eth_call.go new file mode 100644 index 00000000000..ce09c837479 --- /dev/null +++ b/cmd/rpcdaemon22/commands/eth_call.go @@ -0,0 +1,453 @@ +package commands + +import ( + "context" + "errors" + "fmt" + "math/big" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/gointerfaces" + txpool_proto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/eth/tracers/logger" + "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/internal/ethapi" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/transactions" + "github.com/ledgerwatch/log/v3" + "google.golang.org/grpc" +) + +// Call implements eth_call. Executes a new message call immediately without creating a transaction on the block chain. +func (api *APIImpl) Call(ctx context.Context, args ethapi.CallArgs, blockNrOrHash rpc.BlockNumberOrHash, overrides *ethapi.StateOverrides) (hexutil.Bytes, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + chainConfig, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + + if args.Gas == nil || uint64(*args.Gas) == 0 { + args.Gas = (*hexutil.Uint64)(&api.GasCap) + } + + contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } + if api.TevmEnabled { + contractHasTEVM = ethdb.GetHasTEVM(tx) + } + + blockNumber, hash, _, err := rpchelper.GetCanonicalBlockNumber(blockNrOrHash, tx, api.filters) // DoCall cannot be executed on non-canonical blocks + if err != nil { + return nil, err + } + block, err := api.BaseAPI.blockWithSenders(tx, hash, blockNumber) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil + } + + result, err := transactions.DoCall(ctx, args, tx, blockNrOrHash, block, overrides, api.GasCap, chainConfig, api.filters, api.stateCache, contractHasTEVM) + if err != nil { + return nil, err + } + + // If the result contains a revert reason, try to unpack and return it. + if len(result.Revert()) > 0 { + return nil, ethapi.NewRevertError(result) + } + + return result.Return(), result.Err +} + +func HeaderByNumberOrHash(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) { + if blockLabel, ok := blockNrOrHash.Number(); ok { + blockNum, err := getBlockNumber(blockLabel, tx) + if err != nil { + return nil, err + } + return rawdb.ReadHeaderByNumber(tx, blockNum), nil + } + if hash, ok := blockNrOrHash.Hash(); ok { + header, err := rawdb.ReadHeaderByHash(tx, hash) + if err != nil { + return nil, err + } + if header == nil { + return nil, errors.New("header for hash not found") + } + + if blockNrOrHash.RequireCanonical { + can, err := rawdb.ReadCanonicalHash(tx, header.Number.Uint64()) + if err != nil { + return nil, err + } + if can != hash { + return nil, errors.New("hash is not currently canonical") + } + } + + h := rawdb.ReadHeader(tx, hash, header.Number.Uint64()) + if h == nil { + return nil, errors.New("header found, but block body is missing") + } + return h, nil + } + return nil, errors.New("invalid arguments; neither block nor hash specified") +} + +// EstimateGas implements eth_estimateGas. Returns an estimate of how much gas is necessary to allow the transaction to complete. The transaction will not be added to the blockchain. +func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi.CallArgs, blockNrOrHash *rpc.BlockNumberOrHash) (hexutil.Uint64, error) { + var args ethapi.CallArgs + // if we actually get CallArgs here, we use them + if argsOrNil != nil { + args = *argsOrNil + } + + bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) + if blockNrOrHash != nil { + bNrOrHash = *blockNrOrHash + } + + dbtx, err := api.db.BeginRo(ctx) + if err != nil { + return 0, err + } + defer dbtx.Rollback() + + // Binary search the gas requirement, as it may be higher than the amount used + var ( + lo = params.TxGas - 1 + hi uint64 + cap uint64 + ) + // Use zero address if sender unspecified. + if args.From == nil { + args.From = new(common.Address) + } + + // Determine the highest gas limit can be used during the estimation. + if args.Gas != nil && uint64(*args.Gas) >= params.TxGas { + hi = uint64(*args.Gas) + } else { + // Retrieve the block to act as the gas ceiling + h, err := HeaderByNumberOrHash(ctx, dbtx, bNrOrHash) + if err != nil { + return 0, err + } + hi = h.GasLimit + } + + var feeCap *big.Int + if args.GasPrice != nil && (args.MaxFeePerGas != nil || args.MaxPriorityFeePerGas != nil) { + return 0, errors.New("both gasPrice and (maxFeePerGas or maxPriorityFeePerGas) specified") + } else if args.GasPrice != nil { + feeCap = args.GasPrice.ToInt() + } else if args.MaxFeePerGas != nil { + feeCap = args.MaxFeePerGas.ToInt() + } else { + feeCap = common.Big0 + } + // Recap the highest gas limit with account's available balance. + if feeCap.Sign() != 0 { + cacheView, err := api.stateCache.View(ctx, dbtx) + if err != nil { + return 0, err + } + stateReader := state.NewCachedReader2(cacheView, dbtx) + state := state.New(stateReader) + if state == nil { + return 0, fmt.Errorf("can't get the current state") + } + + balance := state.GetBalance(*args.From) // from can't be nil + available := balance.ToBig() + if args.Value != nil { + if args.Value.ToInt().Cmp(available) >= 0 { + return 0, errors.New("insufficient funds for transfer") + } + available.Sub(available, args.Value.ToInt()) + } + allowance := new(big.Int).Div(available, feeCap) + + // If the allowance is larger than maximum uint64, skip checking + if allowance.IsUint64() && hi > allowance.Uint64() { + transfer := args.Value + if transfer == nil { + transfer = new(hexutil.Big) + } + log.Warn("Gas estimation capped by limited funds", "original", hi, "balance", balance, + "sent", transfer.ToInt(), "maxFeePerGas", feeCap, "fundable", allowance) + hi = allowance.Uint64() + } + } + + // Recap the highest gas allowance with specified gascap. + if hi > api.GasCap { + log.Warn("Caller gas above allowance, capping", "requested", hi, "cap", api.GasCap) + hi = api.GasCap + } + cap = hi + var lastBlockNum = rpc.LatestBlockNumber + + chainConfig, err := api.chainConfig(dbtx) + if err != nil { + return 0, err + } + + contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } + if api.TevmEnabled { + contractHasTEVM = ethdb.GetHasTEVM(dbtx) + } + + // Create a helper to check if a gas allowance results in an executable transaction + executable := func(gas uint64) (bool, *core.ExecutionResult, error) { + args.Gas = (*hexutil.Uint64)(&gas) + + numOrHash := rpc.BlockNumberOrHash{BlockNumber: &lastBlockNum} + blockNumber, hash, _, err := rpchelper.GetCanonicalBlockNumber(numOrHash, dbtx, api.filters) // DoCall cannot be executed on non-canonical blocks + if err != nil { + return false, nil, err + } + block, err := api.BaseAPI.blockWithSenders(dbtx, hash, blockNumber) + if err != nil { + return false, nil, err + } + if block == nil { + return false, nil, nil + } + + result, err := transactions.DoCall(ctx, args, dbtx, numOrHash, block, nil, + api.GasCap, chainConfig, api.filters, api.stateCache, contractHasTEVM) + if err != nil { + if errors.Is(err, core.ErrIntrinsicGas) { + // Special case, raise gas limit + return true, nil, nil + } + + // Bail out + return true, nil, err + } + return result.Failed(), result, nil + } + // Execute the binary search and hone in on an executable gas limit + for lo+1 < hi { + mid := (hi + lo) / 2 + failed, _, err := executable(mid) + + // If the error is not nil(consensus error), it means the provided message + // call or transaction will never be accepted no matter how much gas it is + // assigened. Return the error directly, don't struggle any more. + if err != nil { + return 0, err + } + if failed { + lo = mid + } else { + hi = mid + } + } + // Reject the transaction as invalid if it still fails at the highest allowance + if hi == cap { + failed, result, err := executable(hi) + if err != nil { + return 0, err + } + if failed { + if result != nil && !errors.Is(result.Err, vm.ErrOutOfGas) { + if len(result.Revert()) > 0 { + return 0, ethapi.NewRevertError(result) + } + return 0, result.Err + } + // Otherwise, the specified gas cap is too low + return 0, fmt.Errorf("gas required exceeds allowance (%d)", cap) + } + } + return hexutil.Uint64(hi), nil +} + +// GetProof not implemented +func (api *APIImpl) GetProof(ctx context.Context, address common.Address, storageKeys []string, blockNr rpc.BlockNumber) (*interface{}, error) { + var stub interface{} + return &stub, fmt.Errorf(NotImplemented, "eth_getProof") +} + +// accessListResult returns an optional accesslist +// Its the result of the `eth_createAccessList` RPC call. +// It contains an error if the transaction itself failed. +type accessListResult struct { + Accesslist *types.AccessList `json:"accessList"` + Error string `json:"error,omitempty"` + GasUsed hexutil.Uint64 `json:"gasUsed"` +} + +// CreateAccessList implements eth_createAccessList. It creates an access list for the given transaction. +// If the accesslist creation fails an error is returned. +// If the transaction itself fails, an vmErr is returned. +func (api *APIImpl) CreateAccessList(ctx context.Context, args ethapi.CallArgs, blockNrOrHash *rpc.BlockNumberOrHash, optimizeGas *bool) (*accessListResult, error) { + bNrOrHash := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) + if blockNrOrHash != nil { + bNrOrHash = *blockNrOrHash + } + + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + chainConfig, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } + if api.TevmEnabled { + contractHasTEVM = ethdb.GetHasTEVM(tx) + } + blockNumber, hash, latest, err := rpchelper.GetCanonicalBlockNumber(bNrOrHash, tx, api.filters) // DoCall cannot be executed on non-canonical blocks + if err != nil { + return nil, err + } + block, err := api.BaseAPI.blockWithSenders(tx, hash, blockNumber) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil + } + var stateReader state.StateReader + if latest { + cacheView, err := api.stateCache.View(ctx, tx) + if err != nil { + return nil, err + } + stateReader = state.NewCachedReader2(cacheView, tx) + } else { + stateReader = state.NewPlainState(tx, blockNumber) + } + + header := block.Header() + // If the gas amount is not set, extract this as it will depend on access + // lists and we'll need to reestimate every time + nogas := args.Gas == nil + + var to common.Address + if args.To != nil { + to = *args.To + } else { + // Require nonce to calculate address of created contract + if args.Nonce == nil { + var nonce uint64 + reply, err := api.txPool.Nonce(ctx, &txpool_proto.NonceRequest{ + Address: gointerfaces.ConvertAddressToH160(*args.From), + }, &grpc.EmptyCallOption{}) + if err != nil { + return nil, err + } + if reply.Found { + nonce = reply.Nonce + 1 + } + args.Nonce = (*hexutil.Uint64)(&nonce) + } + to = crypto.CreateAddress(*args.From, uint64(*args.Nonce)) + } + + // Retrieve the precompiles since they don't need to be added to the access list + precompiles := vm.ActivePrecompiles(chainConfig.Rules(blockNumber)) + + // Create an initial tracer + prevTracer := logger.NewAccessListTracer(nil, *args.From, to, precompiles) + if args.AccessList != nil { + prevTracer = logger.NewAccessListTracer(*args.AccessList, *args.From, to, precompiles) + } + for { + state := state.New(stateReader) + // Retrieve the current access list to expand + accessList := prevTracer.AccessList() + log.Trace("Creating access list", "input", accessList) + + // If no gas amount was specified, each unique access list needs it's own + // gas calculation. This is quite expensive, but we need to be accurate + // and it's convered by the sender only anyway. + if nogas { + args.Gas = nil + } + // Set the accesslist to the last al + args.AccessList = &accessList + baseFee, _ := uint256.FromBig(header.BaseFee) + msg, err := args.ToMessage(api.GasCap, baseFee) + if err != nil { + return nil, err + } + + // Apply the transaction with the access list tracer + tracer := logger.NewAccessListTracer(accessList, *args.From, to, precompiles) + config := vm.Config{Tracer: tracer, Debug: true, NoBaseFee: true} + blockCtx, txCtx := transactions.GetEvmContext(msg, header, bNrOrHash.RequireCanonical, tx, contractHasTEVM) + + evm := vm.NewEVM(blockCtx, txCtx, state, chainConfig, config) + gp := new(core.GasPool).AddGas(msg.Gas()) + res, err := core.ApplyMessage(evm, msg, gp, true /* refunds */, false /* gasBailout */) + if err != nil { + return nil, err + } + if tracer.Equal(prevTracer) { + var errString string + if res.Err != nil { + errString = res.Err.Error() + } + accessList := &accessListResult{Accesslist: &accessList, Error: errString, GasUsed: hexutil.Uint64(res.UsedGas)} + if optimizeGas != nil && *optimizeGas { + optimizeToInAccessList(accessList, to) + } + return accessList, nil + } + prevTracer = tracer + } +} + +// to address is warm already, so we can save by adding it to the access list +// only if we are adding a lot of its storage slots as well +func optimizeToInAccessList(accessList *accessListResult, to common.Address) { + indexToRemove := -1 + + for i := 0; i < len(*accessList.Accesslist); i++ { + entry := (*accessList.Accesslist)[i] + if entry.Address != to { + continue + } + + // https://eips.ethereum.org/EIPS/eip-2930#charging-less-for-accesses-in-the-access-list + accessListSavingPerSlot := params.ColdSloadCostEIP2929 - params.WarmStorageReadCostEIP2929 - params.TxAccessListStorageKeyGas + + numSlots := uint64(len(entry.StorageKeys)) + if numSlots*accessListSavingPerSlot <= params.TxAccessListAddressGas { + indexToRemove = i + } + } + + if indexToRemove >= 0 { + *accessList.Accesslist = removeIndex(*accessList.Accesslist, indexToRemove) + } +} + +func removeIndex(s types.AccessList, index int) types.AccessList { + return append(s[:index], s[index+1:]...) +} diff --git a/cmd/rpcdaemon22/commands/eth_call_test.go b/cmd/rpcdaemon22/commands/eth_call_test.go new file mode 100644 index 00000000000..a51841d7121 --- /dev/null +++ b/cmd/rpcdaemon22/commands/eth_call_test.go @@ -0,0 +1,251 @@ +package commands + +import ( + "context" + "fmt" + "testing" + + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/internal/ethapi" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" +) + +func TestEstimateGas(t *testing.T) { + db := rpcdaemontest.CreateTestKV(t) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + var from = common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") + var to = common.HexToAddress("0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e") + if _, err := api.EstimateGas(context.Background(), ðapi.CallArgs{ + From: &from, + To: &to, + }, nil); err != nil { + t.Errorf("calling EstimateGas: %v", err) + } +} + +func TestEthCallNonCanonical(t *testing.T) { + db := rpcdaemontest.CreateTestKV(t) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + var from = common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") + var to = common.HexToAddress("0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e") + if _, err := api.Call(context.Background(), ethapi.CallArgs{ + From: &from, + To: &to, + }, rpc.BlockNumberOrHashWithHash(common.HexToHash("0x3fcb7c0d4569fddc89cbea54b42f163e0c789351d98810a513895ab44b47020b"), true), nil); err != nil { + if fmt.Sprintf("%v", err) != "hash 3fcb7c0d4569fddc89cbea54b42f163e0c789351d98810a513895ab44b47020b is not currently canonical" { + t.Errorf("wrong error: %v", err) + } + } +} + +func TestGetBlockByTimestampLatestTime(t *testing.T) { + ctx := context.Background() + db := rpcdaemontest.CreateTestKV(t) + + tx, err := db.BeginRo(ctx) + if err != nil { + t.Errorf("fail at beginning tx") + } + defer tx.Rollback() + + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil) + + latestBlock := rawdb.ReadCurrentBlock(tx) + response, err := ethapi.RPCMarshalBlock(latestBlock, true, false) + + if err != nil { + t.Error("couldn't get the rpc marshal block") + } + + if err == nil && rpc.BlockNumber(latestBlock.NumberU64()) == rpc.PendingBlockNumber { + // Pending blocks need to nil out a few fields + for _, field := range []string{"hash", "nonce", "miner"} { + response[field] = nil + } + } + + block, err := api.GetBlockByTimestamp(ctx, rpc.Timestamp(latestBlock.Header().Time), false) + if err != nil { + t.Errorf("couldn't retrieve block %v", err) + } + + if block["timestamp"] != response["timestamp"] || block["hash"] != response["hash"] { + t.Errorf("Retrieved the wrong block.\nexpected block hash: %s expected timestamp: %d\nblock hash retrieved: %s timestamp retrieved: %d", response["hash"], response["timestamp"], block["hash"], block["timestamp"]) + } +} + +func TestGetBlockByTimestampOldestTime(t *testing.T) { + ctx := context.Background() + db := rpcdaemontest.CreateTestKV(t) + + tx, err := db.BeginRo(ctx) + if err != nil { + t.Errorf("failed at beginning tx") + } + defer tx.Rollback() + + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil) + + oldestBlock, err := rawdb.ReadBlockByNumber(tx, 0) + if err != nil { + t.Error("couldn't retrieve oldest block") + } + + response, err := ethapi.RPCMarshalBlock(oldestBlock, true, false) + + if err != nil { + t.Error("couldn't get the rpc marshal block") + } + + if err == nil && rpc.BlockNumber(oldestBlock.NumberU64()) == rpc.PendingBlockNumber { + // Pending blocks need to nil out a few fields + for _, field := range []string{"hash", "nonce", "miner"} { + response[field] = nil + } + } + + block, err := api.GetBlockByTimestamp(ctx, rpc.Timestamp(oldestBlock.Header().Time), false) + if err != nil { + t.Errorf("couldn't retrieve block %v", err) + } + + if block["timestamp"] != response["timestamp"] || block["hash"] != response["hash"] { + t.Errorf("Retrieved the wrong block.\nexpected block hash: %s expected timestamp: %d\nblock hash retrieved: %s timestamp retrieved: %d", response["hash"], response["timestamp"], block["hash"], block["timestamp"]) + } +} + +func TestGetBlockByTimeHigherThanLatestBlock(t *testing.T) { + ctx := context.Background() + db := rpcdaemontest.CreateTestKV(t) + + tx, err := db.BeginRo(ctx) + if err != nil { + t.Errorf("fail at beginning tx") + } + defer tx.Rollback() + + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil) + + latestBlock := rawdb.ReadCurrentBlock(tx) + + response, err := ethapi.RPCMarshalBlock(latestBlock, true, false) + + if err != nil { + t.Error("couldn't get the rpc marshal block") + } + + if err == nil && rpc.BlockNumber(latestBlock.NumberU64()) == rpc.PendingBlockNumber { + // Pending blocks need to nil out a few fields + for _, field := range []string{"hash", "nonce", "miner"} { + response[field] = nil + } + } + + block, err := api.GetBlockByTimestamp(ctx, rpc.Timestamp(latestBlock.Header().Time+999999999999), false) + if err != nil { + t.Errorf("couldn't retrieve block %v", err) + } + + if block["timestamp"] != response["timestamp"] || block["hash"] != response["hash"] { + t.Errorf("Retrieved the wrong block.\nexpected block hash: %s expected timestamp: %d\nblock hash retrieved: %s timestamp retrieved: %d", response["hash"], response["timestamp"], block["hash"], block["timestamp"]) + } +} + +func TestGetBlockByTimeMiddle(t *testing.T) { + ctx := context.Background() + db := rpcdaemontest.CreateTestKV(t) + + tx, err := db.BeginRo(ctx) + if err != nil { + t.Errorf("fail at beginning tx") + } + defer tx.Rollback() + + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil) + + currentHeader := rawdb.ReadCurrentHeader(tx) + oldestHeader := rawdb.ReadHeaderByNumber(tx, 0) + + middleNumber := (currentHeader.Number.Uint64() + oldestHeader.Number.Uint64()) / 2 + middleBlock, err := rawdb.ReadBlockByNumber(tx, middleNumber) + if err != nil { + t.Error("couldn't retrieve middle block") + } + + response, err := ethapi.RPCMarshalBlock(middleBlock, true, false) + + if err != nil { + t.Error("couldn't get the rpc marshal block") + } + + if err == nil && rpc.BlockNumber(middleBlock.NumberU64()) == rpc.PendingBlockNumber { + // Pending blocks need to nil out a few fields + for _, field := range []string{"hash", "nonce", "miner"} { + response[field] = nil + } + } + + block, err := api.GetBlockByTimestamp(ctx, rpc.Timestamp(middleBlock.Header().Time), false) + if err != nil { + t.Errorf("couldn't retrieve block %v", err) + } + + if block["timestamp"] != response["timestamp"] || block["hash"] != response["hash"] { + t.Errorf("Retrieved the wrong block.\nexpected block hash: %s expected timestamp: %d\nblock hash retrieved: %s timestamp retrieved: %d", response["hash"], response["timestamp"], block["hash"], block["timestamp"]) + } +} + +func TestGetBlockByTimestamp(t *testing.T) { + ctx := context.Background() + db := rpcdaemontest.CreateTestKV(t) + + tx, err := db.BeginRo(ctx) + if err != nil { + t.Errorf("fail at beginning tx") + } + defer tx.Rollback() + + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil) + + highestBlockNumber := rawdb.ReadCurrentHeader(tx).Number + pickedBlock, err := rawdb.ReadBlockByNumber(tx, highestBlockNumber.Uint64()/3) + if err != nil { + t.Errorf("couldn't get block %v", pickedBlock.Number()) + } + + if pickedBlock == nil { + t.Error("couldn't retrieve picked block") + } + response, err := ethapi.RPCMarshalBlock(pickedBlock, true, false) + + if err != nil { + t.Error("couldn't get the rpc marshal block") + } + + if err == nil && rpc.BlockNumber(pickedBlock.NumberU64()) == rpc.PendingBlockNumber { + // Pending blocks need to nil out a few fields + for _, field := range []string{"hash", "nonce", "miner"} { + response[field] = nil + } + } + + block, err := api.GetBlockByTimestamp(ctx, rpc.Timestamp(pickedBlock.Header().Time), false) + if err != nil { + t.Errorf("couldn't retrieve block %v", err) + } + + if block["timestamp"] != response["timestamp"] || block["hash"] != response["hash"] { + t.Errorf("Retrieved the wrong block.\nexpected block hash: %s expected timestamp: %d\nblock hash retrieved: %s timestamp retrieved: %d", response["hash"], response["timestamp"], block["hash"], block["timestamp"]) + } +} diff --git a/cmd/rpcdaemon22/commands/eth_deprecated.go b/cmd/rpcdaemon22/commands/eth_deprecated.go new file mode 100644 index 00000000000..46d918f641f --- /dev/null +++ b/cmd/rpcdaemon22/commands/eth_deprecated.go @@ -0,0 +1,26 @@ +package commands + +import ( + "context" + "fmt" + + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" +) + +// Accounts implements eth_accounts. Returns a list of addresses owned by the client. +// Deprecated: This function will be removed in the future. +func (api *APIImpl) Accounts(ctx context.Context) ([]common.Address, error) { + return []common.Address{}, fmt.Errorf(NotAvailableDeprecated, "eth_accounts") +} + +// Sign implements eth_sign. Calculates an Ethereum specific signature with: sign(keccak256('\\x19Ethereum Signed Message:\\n' + len(message) + message))). +// Deprecated: This function will be removed in the future. +func (api *APIImpl) Sign(ctx context.Context, _ common.Address, _ hexutil.Bytes) (hexutil.Bytes, error) { + return hexutil.Bytes(""), fmt.Errorf(NotAvailableDeprecated, "eth_sign") +} + +// SignTransaction deprecated +func (api *APIImpl) SignTransaction(_ context.Context, txObject interface{}) (common.Hash, error) { + return common.Hash{0}, fmt.Errorf(NotAvailableDeprecated, "eth_signTransaction") +} diff --git a/cmd/rpcdaemon22/commands/eth_filters.go b/cmd/rpcdaemon22/commands/eth_filters.go new file mode 100644 index 00000000000..f99ced97f1c --- /dev/null +++ b/cmd/rpcdaemon22/commands/eth_filters.go @@ -0,0 +1,248 @@ +package commands + +import ( + "context" + "fmt" + + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/debug" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/filters" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/log/v3" +) + +// NewPendingTransactionFilter new transaction filter +func (api *APIImpl) NewPendingTransactionFilter(_ context.Context) (common.Hash, error) { + if api.filters == nil { + return common.Hash{}, rpc.ErrNotificationsUnsupported + } + txsCh := make(chan []types.Transaction, 1) + id := api.filters.SubscribePendingTxs(txsCh) + go func() { + for { + select { + case txs, ok := <-txsCh: + if !ok { + return + } + api.filters.AddPendingTxs(id, txs) + default: + } + } + }() + return common.HexToHash(string(id)), nil +} + +// NewBlockFilter implements eth_newBlockFilter. Creates a filter in the node, to notify when a new block arrives. +func (api *APIImpl) NewBlockFilter(_ context.Context) (common.Hash, error) { + if api.filters == nil { + return common.Hash{}, rpc.ErrNotificationsUnsupported + } + ch := make(chan *types.Block, 1) + id := api.filters.SubscribePendingBlock(ch) + go func() { + for { + select { + case block, ok := <-ch: + if !ok { + return + } + api.filters.AddPendingBlock(id, block) + default: + } + } + }() + return common.HexToHash(string(id)), nil +} + +// NewFilter implements eth_newFilter. Creates an arbitrary filter object, based on filter options, to notify when the state changes (logs). +func (api *APIImpl) NewFilter(_ context.Context, crit filters.FilterCriteria) (common.Hash, error) { + if api.filters == nil { + return common.Hash{}, rpc.ErrNotificationsUnsupported + } + logs := make(chan *types.Log, 1) + id := api.filters.SubscribeLogs(logs, crit) + go func() { + for { + select { + case lg, ok := <-logs: + if !ok { + return + } + api.filters.AddLogs(id, lg) + default: + } + } + }() + return common.HexToHash(hexutil.EncodeUint64(uint64(id))), nil +} + +// UninstallFilter new transaction filter +func (api *APIImpl) UninstallFilter(_ context.Context, index string) (bool, error) { + if api.filters == nil { + return false, rpc.ErrNotificationsUnsupported + } + if common.IsHexAddress32(index) { + // remove 0x + if len(index) >= 2 && index[0] == '0' && (index[1] == 'x' || index[1] == 'X') { + index = index[2:] + } + isDeleted := api.filters.UnsubscribePendingBlock(rpchelper.PendingBlockSubID(index)) || + api.filters.UnsubscribePendingTxs(rpchelper.PendingTxsSubID(index)) + id, err := hexutil.DecodeUint64(index) + if err == nil { + return isDeleted || api.filters.UnsubscribeLogs(rpchelper.LogsSubID(id)), nil + } + } + + return false, nil +} + +// GetFilterChanges implements eth_getFilterChanges. Polling method for a previously-created filter, which returns an array of logs which occurred since last poll. +func (api *APIImpl) GetFilterChanges(_ context.Context, index string) ([]interface{}, error) { + if api.filters == nil { + return nil, rpc.ErrNotificationsUnsupported + } + stub := make([]interface{}, 0) + if common.IsHexAddress32(index) { + // remove 0x + if len(index) >= 2 && index[0] == '0' && (index[1] == 'x' || index[1] == 'X') { + index = index[2:] + } + if blocks, ok := api.filters.ReadPendingBlocks(rpchelper.PendingBlockSubID(index)); ok { + for _, v := range blocks { + stub = append(stub, v.Hash()) + } + return stub, nil + } + if txs, ok := api.filters.ReadPendingTxs(rpchelper.PendingTxsSubID(index)); ok { + for _, v := range txs { + for _, tx := range v { + stub = append(stub, tx.Hash()) + } + } + return stub, nil + } + id, err := hexutil.DecodeUint64(index) + if err != nil { + return stub, fmt.Errorf("eth_getFilterChanges, wrong index: %w", err) + } + if logs, ok := api.filters.ReadLogs(rpchelper.LogsSubID(id)); ok { + for _, v := range logs { + stub = append(stub, v) + } + return stub, nil + } + } + return stub, nil +} + +// NewHeads send a notification each time a new (header) block is appended to the chain. +func (api *APIImpl) NewHeads(ctx context.Context) (*rpc.Subscription, error) { + if api.filters == nil { + return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported + } + notifier, supported := rpc.NotifierFromContext(ctx) + if !supported { + return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported + } + + rpcSub := notifier.CreateSubscription() + + go func() { + defer debug.LogPanic() + headers := make(chan *types.Header, 1) + defer close(headers) + id := api.filters.SubscribeNewHeads(headers) + defer api.filters.UnsubscribeHeads(id) + + for { + select { + case h := <-headers: + err := notifier.Notify(rpcSub.ID, h) + if err != nil { + log.Warn("error while notifying subscription", "err", err) + } + case <-rpcSub.Err(): + return + } + } + }() + + return rpcSub, nil +} + +// NewPendingTransactions send a notification each time a new (header) block is appended to the chain. +func (api *APIImpl) NewPendingTransactions(ctx context.Context) (*rpc.Subscription, error) { + if api.filters == nil { + return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported + } + notifier, supported := rpc.NotifierFromContext(ctx) + if !supported { + return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported + } + + rpcSub := notifier.CreateSubscription() + + go func() { + defer debug.LogPanic() + txsCh := make(chan []types.Transaction, 1) + id := api.filters.SubscribePendingTxs(txsCh) + defer api.filters.UnsubscribePendingTxs(id) + + for { + select { + case txs := <-txsCh: + for _, t := range txs { + if t != nil { + err := notifier.Notify(rpcSub.ID, t.Hash()) + if err != nil { + log.Warn("error while notifying subscription", "err", err) + } + } + } + case <-rpcSub.Err(): + return + } + } + }() + + return rpcSub, nil +} + +// Logs send a notification each time a new log appears. +func (api *APIImpl) Logs(ctx context.Context, crit filters.FilterCriteria) (*rpc.Subscription, error) { + if api.filters == nil { + return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported + } + notifier, supported := rpc.NotifierFromContext(ctx) + if !supported { + return &rpc.Subscription{}, rpc.ErrNotificationsUnsupported + } + + rpcSub := notifier.CreateSubscription() + + go func() { + defer debug.LogPanic() + logs := make(chan *types.Log, 1) + id := api.filters.SubscribeLogs(logs, crit) + defer api.filters.UnsubscribeLogs(id) + + for { + select { + case h := <-logs: + err := notifier.Notify(rpcSub.ID, h) + if err != nil { + log.Warn("error while notifying subscription", "err", err) + } + case <-rpcSub.Err(): + return + } + } + }() + + return rpcSub, nil +} diff --git a/cmd/rpcdaemon22/commands/eth_ming_test.go b/cmd/rpcdaemon22/commands/eth_ming_test.go new file mode 100644 index 00000000000..835578e7032 --- /dev/null +++ b/cmd/rpcdaemon22/commands/eth_ming_test.go @@ -0,0 +1,64 @@ +package commands + +import ( + "math/big" + "testing" + "time" + + "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/erigon/turbo/stages" + "github.com/stretchr/testify/require" +) + +func TestPendingBlock(t *testing.T) { + ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, stages.Mock(t)) + mining := txpool.NewMiningClient(conn) + ff := rpchelper.New(ctx, nil, nil, mining, func() {}) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), false), nil, nil, nil, mining, 5000000) + expect := uint64(12345) + b, err := rlp.EncodeToBytes(types.NewBlockWithHeader(&types.Header{Number: big.NewInt(int64(expect))})) + require.NoError(t, err) + ch := make(chan *types.Block, 1) + id := ff.SubscribePendingBlock(ch) + defer ff.UnsubscribePendingBlock(id) + + ff.HandlePendingBlock(&txpool.OnPendingBlockReply{RplBlock: b}) + block := api.pendingBlock() + + require.Equal(t, block.NumberU64(), expect) + select { + case got := <-ch: + require.Equal(t, expect, got.NumberU64()) + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout waiting for expected notification") + } +} + +func TestPendingLogs(t *testing.T) { + ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, stages.Mock(t)) + mining := txpool.NewMiningClient(conn) + ff := rpchelper.New(ctx, nil, nil, mining, func() {}) + expect := []byte{211} + + ch := make(chan types.Logs, 1) + defer close(ch) + id := ff.SubscribePendingLogs(ch) + defer ff.UnsubscribePendingLogs(id) + + b, err := rlp.EncodeToBytes([]*types.Log{{Data: expect}}) + require.NoError(t, err) + ff.HandlePendingLogs(&txpool.OnPendingLogsReply{RplLogs: b}) + select { + case logs := <-ch: + require.Equal(t, expect, logs[0].Data) + case <-time.After(100 * time.Millisecond): + t.Fatalf("timeout waiting for expected notification") + } +} diff --git a/cmd/rpcdaemon22/commands/eth_mining.go b/cmd/rpcdaemon22/commands/eth_mining.go new file mode 100644 index 00000000000..9f4cf4982e9 --- /dev/null +++ b/cmd/rpcdaemon22/commands/eth_mining.go @@ -0,0 +1,94 @@ +package commands + +import ( + "context" + "errors" + + "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/core/types" + "google.golang.org/grpc/status" +) + +// Coinbase implements eth_coinbase. Returns the current client coinbase address. +func (api *APIImpl) Coinbase(ctx context.Context) (common.Address, error) { + return api.ethBackend.Etherbase(ctx) +} + +// Hashrate implements eth_hashrate. Returns the number of hashes per second that the node is mining with. +func (api *APIImpl) Hashrate(ctx context.Context) (uint64, error) { + repl, err := api.mining.HashRate(ctx, &txpool.HashRateRequest{}) + if err != nil { + if s, ok := status.FromError(err); ok { + return 0, errors.New(s.Message()) + } + return 0, err + } + return repl.HashRate, err +} + +// Mining returns an indication if this node is currently mining. +func (api *APIImpl) Mining(ctx context.Context) (bool, error) { + repl, err := api.mining.Mining(ctx, &txpool.MiningRequest{}) + if err != nil { + if s, ok := status.FromError(err); ok { + return false, errors.New(s.Message()) + } + return false, err + } + return repl.Enabled && repl.Running, err +} + +// GetWork returns a work package for external miner. +// +// The work package consists of 3 strings: +// result[0] - 32 bytes hex encoded current block header pow-hash +// result[1] - 32 bytes hex encoded seed hash used for DAG +// result[2] - 32 bytes hex encoded boundary condition ("target"), 2^256/difficulty +// result[3] - hex encoded block number +func (api *APIImpl) GetWork(ctx context.Context) ([4]string, error) { + var res [4]string + repl, err := api.mining.GetWork(ctx, &txpool.GetWorkRequest{}) + if err != nil { + if s, ok := status.FromError(err); ok { + return res, errors.New(s.Message()) + } + return res, err + } + res[0] = repl.HeaderHash + res[1] = repl.SeedHash + res[2] = repl.Target + res[3] = repl.BlockNumber + return res, nil +} + +// SubmitWork can be used by external miner to submit their POW solution. +// It returns an indication if the work was accepted. +// Note either an invalid solution, a stale work a non-existent work will return false. +func (api *APIImpl) SubmitWork(ctx context.Context, nonce types.BlockNonce, powHash, digest common.Hash) (bool, error) { + repl, err := api.mining.SubmitWork(ctx, &txpool.SubmitWorkRequest{BlockNonce: nonce[:], PowHash: powHash.Bytes(), Digest: digest.Bytes()}) + if err != nil { + if s, ok := status.FromError(err); ok { + return false, errors.New(s.Message()) + } + return false, err + } + return repl.Ok, nil +} + +// SubmitHashrate can be used for remote miners to submit their hash rate. +// This enables the node to report the combined hash rate of all miners +// which submit work through this node. +// +// It accepts the miner hash rate and an identifier which must be unique +func (api *APIImpl) SubmitHashrate(ctx context.Context, hashRate hexutil.Uint64, id common.Hash) (bool, error) { + repl, err := api.mining.SubmitHashRate(ctx, &txpool.SubmitHashRateRequest{Rate: uint64(hashRate), Id: id.Bytes()}) + if err != nil { + if s, ok := status.FromError(err); ok { + return false, errors.New(s.Message()) + } + return false, err + } + return repl.Ok, nil +} diff --git a/cmd/rpcdaemon22/commands/eth_receipts.go b/cmd/rpcdaemon22/commands/eth_receipts.go new file mode 100644 index 00000000000..988dd3389eb --- /dev/null +++ b/cmd/rpcdaemon22/commands/eth_receipts.go @@ -0,0 +1,452 @@ +package commands + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "math/big" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/log/v3" + + "github.com/RoaringBitmap/roaring" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/consensus/ethash" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/eth/filters" + "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/bitmapdb" + "github.com/ledgerwatch/erigon/ethdb/cbor" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/transactions" +) + +func (api *BaseAPI) getReceipts(ctx context.Context, tx kv.Tx, chainConfig *params.ChainConfig, block *types.Block, senders []common.Address) (types.Receipts, error) { + if cached := rawdb.ReadReceipts(tx, block, senders); cached != nil { + return cached, nil + } + + getHeader := func(hash common.Hash, number uint64) *types.Header { + h, e := api._blockReader.Header(ctx, tx, hash, number) + if e != nil { + log.Error("getHeader error", "number", number, "hash", hash, "err", e) + } + return h + } + contractHasTEVM := ethdb.GetHasTEVM(tx) + _, _, _, ibs, _, err := transactions.ComputeTxEnv(ctx, block, chainConfig, getHeader, contractHasTEVM, ethash.NewFaker(), tx, block.Hash(), 0) + if err != nil { + return nil, err + } + + usedGas := new(uint64) + gp := new(core.GasPool).AddGas(block.GasLimit()) + + ethashFaker := ethash.NewFaker() + noopWriter := state.NewNoopWriter() + + receipts := make(types.Receipts, len(block.Transactions())) + + for i, txn := range block.Transactions() { + ibs.Prepare(txn.Hash(), block.Hash(), i) + receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, ethashFaker, nil, gp, ibs, noopWriter, block.Header(), txn, usedGas, vm.Config{}, contractHasTEVM) + if err != nil { + return nil, err + } + receipt.BlockHash = block.Hash() + receipts[i] = receipt + } + + return receipts, nil +} + +// GetLogs implements eth_getLogs. Returns an array of logs matching a given filter object. +func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([]*types.Log, error) { + var begin, end uint64 + logs := []*types.Log{} + + tx, beginErr := api.db.BeginRo(ctx) + if beginErr != nil { + return logs, beginErr + } + defer tx.Rollback() + + if crit.BlockHash != nil { + number := rawdb.ReadHeaderNumber(tx, *crit.BlockHash) + if number == nil { + return nil, fmt.Errorf("block not found: %x", *crit.BlockHash) + } + begin = *number + end = *number + } else { + // Convert the RPC block numbers into internal representations + latest, err := getLatestBlockNumber(tx) + if err != nil { + return nil, err + } + + begin = latest + if crit.FromBlock != nil { + if crit.FromBlock.Sign() >= 0 { + begin = crit.FromBlock.Uint64() + } else if !crit.FromBlock.IsInt64() || crit.FromBlock.Int64() != int64(rpc.LatestBlockNumber) { + return nil, fmt.Errorf("negative value for FromBlock: %v", crit.FromBlock) + } + } + end = latest + if crit.ToBlock != nil { + if crit.ToBlock.Sign() >= 0 { + end = crit.ToBlock.Uint64() + } else if !crit.ToBlock.IsInt64() || crit.ToBlock.Int64() != int64(rpc.LatestBlockNumber) { + return nil, fmt.Errorf("negative value for ToBlock: %v", crit.ToBlock) + } + } + } + if end < begin { + return nil, fmt.Errorf("end (%d) < begin (%d)", end, begin) + } + + blockNumbers := roaring.New() + blockNumbers.AddRange(begin, end+1) // [min,max) + + topicsBitmap, err := getTopicsBitmap(tx, crit.Topics, uint32(begin), uint32(end)) + if err != nil { + return nil, err + } + if topicsBitmap != nil { + blockNumbers.And(topicsBitmap) + } + + var addrBitmap *roaring.Bitmap + for _, addr := range crit.Addresses { + m, err := bitmapdb.Get(tx, kv.LogAddressIndex, addr[:], uint32(begin), uint32(end)) + if err != nil { + return nil, err + } + if addrBitmap == nil { + addrBitmap = m + continue + } + addrBitmap = roaring.Or(addrBitmap, m) + } + + if addrBitmap != nil { + blockNumbers.And(addrBitmap) + } + + if blockNumbers.GetCardinality() == 0 { + return logs, nil + } + + iter := blockNumbers.Iterator() + for iter.HasNext() { + if err = ctx.Err(); err != nil { + return nil, err + } + + block := uint64(iter.Next()) + var logIndex uint + var blockLogs []*types.Log + err := tx.ForPrefix(kv.Log, dbutils.EncodeBlockNumber(block), func(k, v []byte) error { + var logs types.Logs + if err := cbor.Unmarshal(&logs, bytes.NewReader(v)); err != nil { + return fmt.Errorf("receipt unmarshal failed: %w", err) + } + for _, log := range logs { + log.Index = logIndex + logIndex++ + } + filtered := filterLogs(logs, crit.Addresses, crit.Topics) + if len(filtered) == 0 { + return nil + } + txIndex := uint(binary.BigEndian.Uint32(k[8:])) + for _, log := range filtered { + log.TxIndex = txIndex + } + blockLogs = append(blockLogs, filtered...) + + return nil + }) + if err != nil { + return logs, err + } + if len(blockLogs) == 0 { + continue + } + + b, err := api.blockByNumberWithSenders(tx, block) + if err != nil { + return nil, err + } + if b == nil { + return nil, fmt.Errorf("block not found %d", block) + } + blockHash := b.Hash() + for _, log := range blockLogs { + log.BlockNumber = block + log.BlockHash = blockHash + log.TxHash = b.Transactions()[log.TxIndex].Hash() + } + logs = append(logs, blockLogs...) + } + + return logs, nil +} + +// The Topic list restricts matches to particular event topics. Each event has a list +// of topics. Topics matches a prefix of that list. An empty element slice matches any +// topic. Non-empty elements represent an alternative that matches any of the +// contained topics. +// +// Examples: +// {} or nil matches any topic list +// {{A}} matches topic A in first position +// {{}, {B}} matches any topic in first position AND B in second position +// {{A}, {B}} matches topic A in first position AND B in second position +// {{A, B}, {C, D}} matches topic (A OR B) in first position AND (C OR D) in second position +func getTopicsBitmap(c kv.Tx, topics [][]common.Hash, from, to uint32) (*roaring.Bitmap, error) { + var result *roaring.Bitmap + for _, sub := range topics { + var bitmapForORing *roaring.Bitmap + for _, topic := range sub { + m, err := bitmapdb.Get(c, kv.LogTopicIndex, topic[:], from, to) + if err != nil { + return nil, err + } + if bitmapForORing == nil { + bitmapForORing = m + continue + } + bitmapForORing.Or(m) + } + + if bitmapForORing == nil { + continue + } + if result == nil { + result = bitmapForORing + continue + } + result = roaring.And(bitmapForORing, result) + } + return result, nil +} + +// GetTransactionReceipt implements eth_getTransactionReceipt. Returns the receipt of a transaction given the transaction's hash. +func (api *APIImpl) GetTransactionReceipt(ctx context.Context, hash common.Hash) (map[string]interface{}, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + var borTx *types.Transaction + var blockHash common.Hash + var blockNum uint64 + var ok bool + + chainConfig, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + + blockNum, ok, err = api.txnLookup(ctx, tx, hash) + if blockNum == 0 { + // It is not an ideal solution (ideal solution requires extending TxnLookupReply proto type to include bool flag indicating absense of result), + // but 0 block number is used here to mean that the transaction is not found + return nil, nil + } + if err != nil { + return nil, err + } + if !ok { + if chainConfig.Bor != nil { + var blocN uint64 + borTx, blockHash, blocN, _, err = rawdb.ReadBorTransaction(tx, hash) + if err != nil { + return nil, err + } + if borTx == nil { + return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 + } + blockNum = blocN + } else { + return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 + } + } + + block, err := api.blockByNumberWithSenders(tx, blockNum) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 + } + + cc, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + if borTx != nil { + receipt := rawdb.ReadBorReceipt(tx, blockHash, blockNum) + return marshalReceipt(receipt, *borTx, cc, block, hash), nil + } + var txnIndex uint64 + var txn types.Transaction + for idx, transaction := range block.Transactions() { + if transaction.Hash() == hash { + txn = transaction + txnIndex = uint64(idx) + break + } + } + + if txn == nil { + return nil, nil + } + + receipts, err := api.getReceipts(ctx, tx, cc, block, block.Body().SendersFromTxs()) + if err != nil { + return nil, fmt.Errorf("getReceipts error: %w", err) + } + if len(receipts) <= int(txnIndex) { + return nil, fmt.Errorf("block has less receipts than expected: %d <= %d, block: %d", len(receipts), int(txnIndex), blockNum) + } + return marshalReceipt(receipts[txnIndex], block.Transactions()[txnIndex], cc, block, hash), nil +} + +// GetBlockReceipts - receipts for individual block +// func (api *APIImpl) GetBlockReceipts(ctx context.Context, number rpc.BlockNumber) ([]map[string]interface{}, error) { +func (api *APIImpl) GetBlockReceipts(ctx context.Context, number rpc.BlockNumber) ([]map[string]interface{}, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + blockNum, err := getBlockNumber(number, tx) + if err != nil { + return nil, err + } + block, err := api.blockByNumberWithSenders(tx, blockNum) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil + } + chainConfig, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + receipts, err := api.getReceipts(ctx, tx, chainConfig, block, block.Body().SendersFromTxs()) + if err != nil { + return nil, fmt.Errorf("getReceipts error: %w", err) + } + result := make([]map[string]interface{}, 0, len(receipts)) + for _, receipt := range receipts { + txn := block.Transactions()[receipt.TransactionIndex] + result = append(result, marshalReceipt(receipt, txn, chainConfig, block, txn.Hash())) + } + + return result, nil +} + +func marshalReceipt(receipt *types.Receipt, txn types.Transaction, chainConfig *params.ChainConfig, block *types.Block, hash common.Hash) map[string]interface{} { + var chainId *big.Int + switch t := txn.(type) { + case *types.LegacyTx: + if t.Protected() { + chainId = types.DeriveChainId(&t.V).ToBig() + } + case *types.AccessListTx: + chainId = t.ChainID.ToBig() + case *types.DynamicFeeTransaction: + chainId = t.ChainID.ToBig() + } + signer := types.LatestSignerForChainID(chainId) + from, _ := txn.Sender(*signer) + + fields := map[string]interface{}{ + "blockHash": receipt.BlockHash, + "blockNumber": hexutil.Uint64(receipt.BlockNumber.Uint64()), + "transactionHash": hash, + "transactionIndex": hexutil.Uint64(receipt.TransactionIndex), + "from": from, + "to": txn.GetTo(), + "type": hexutil.Uint(txn.Type()), + "gasUsed": hexutil.Uint64(receipt.GasUsed), + "cumulativeGasUsed": hexutil.Uint64(receipt.CumulativeGasUsed), + "contractAddress": nil, + "logs": receipt.Logs, + "logsBloom": types.CreateBloom(types.Receipts{receipt}), + } + + if !chainConfig.IsLondon(block.NumberU64()) { + fields["effectiveGasPrice"] = hexutil.Uint64(txn.GetPrice().Uint64()) + } else { + baseFee, _ := uint256.FromBig(block.BaseFee()) + gasPrice := new(big.Int).Add(block.BaseFee(), txn.GetEffectiveGasTip(baseFee).ToBig()) + fields["effectiveGasPrice"] = hexutil.Uint64(gasPrice.Uint64()) + } + // Assign receipt status. + fields["status"] = hexutil.Uint64(receipt.Status) + if receipt.Logs == nil { + fields["logs"] = [][]*types.Log{} + } + // If the ContractAddress is 20 0x0 bytes, assume it is not a contract creation + if receipt.ContractAddress != (common.Address{}) { + fields["contractAddress"] = receipt.ContractAddress + } + return fields +} + +func includes(addresses []common.Address, a common.Address) bool { + for _, addr := range addresses { + if addr == a { + return true + } + } + + return false +} + +// filterLogs creates a slice of logs matching the given criteria. +func filterLogs(logs []*types.Log, addresses []common.Address, topics [][]common.Hash) []*types.Log { + result := make(types.Logs, 0, len(logs)) +Logs: + for _, log := range logs { + + if len(addresses) > 0 && !includes(addresses, log.Address) { + continue + } + // If the to filtered topics is greater than the amount of topics in logs, skip. + if len(topics) > len(log.Topics) { + continue Logs + } + for i, sub := range topics { + match := len(sub) == 0 // empty rule set == wildcard + for _, topic := range sub { + if log.Topics[i] == topic { + match = true + break + } + } + if !match { + continue Logs + } + } + result = append(result, log) + } + return result +} diff --git a/cmd/rpcdaemon22/commands/eth_subscribe_test.go b/cmd/rpcdaemon22/commands/eth_subscribe_test.go new file mode 100644 index 00000000000..9b29d53c5f4 --- /dev/null +++ b/cmd/rpcdaemon22/commands/eth_subscribe_test.go @@ -0,0 +1,59 @@ +package commands + +import ( + "testing" + + "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcservices" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/protocols/eth" + "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/erigon/turbo/stages" + "github.com/stretchr/testify/require" +) + +func TestEthSubscribe(t *testing.T) { + m, require := stages.Mock(t), require.New(t) + chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 21, func(i int, b *core.BlockGen) { + b.SetCoinbase(common.Address{1}) + }, false /* intermediateHashes */) + require.NoError(err) + + b, err := rlp.EncodeToBytes(ð.BlockHeadersPacket66{ + RequestId: 1, + BlockHeadersPacket: chain.Headers, + }) + require.NoError(err) + + m.ReceiveWg.Add(1) + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + require.NoError(err) + } + m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed + + ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) + backend := rpcservices.NewRemoteBackend(remote.NewETHBACKENDClient(conn), m.DB, snapshotsync.NewBlockReader()) + ff := rpchelper.New(ctx, backend, nil, nil, func() {}) + + newHeads := make(chan *types.Header) + defer close(newHeads) + id := ff.SubscribeNewHeads(newHeads) + defer ff.UnsubscribeHeads(id) + + initialCycle := true + highestSeenHeader := chain.TopBlock.NumberU64() + if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + t.Fatal(err) + } + + for i := uint64(1); i <= highestSeenHeader; i++ { + header := <-newHeads + require.Equal(i, header.Number.Uint64()) + } +} diff --git a/cmd/rpcdaemon22/commands/eth_system.go b/cmd/rpcdaemon22/commands/eth_system.go new file mode 100644 index 00000000000..1095cdd82a1 --- /dev/null +++ b/cmd/rpcdaemon22/commands/eth_system.go @@ -0,0 +1,218 @@ +package commands + +import ( + "context" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/gasprice" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/rpc" +) + +// BlockNumber implements eth_blockNumber. Returns the block number of most recent block. +func (api *APIImpl) BlockNumber(ctx context.Context) (hexutil.Uint64, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return 0, err + } + defer tx.Rollback() + blockNum, err := getLatestBlockNumber(tx) + if err != nil { + return 0, err + } + return hexutil.Uint64(blockNum), nil +} + +// Syncing implements eth_syncing. Returns a data object detailing the status of the sync process or false if not syncing. +func (api *APIImpl) Syncing(ctx context.Context) (interface{}, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + highestBlock, err := stages.GetStageProgress(tx, stages.Headers) + if err != nil { + return false, err + } + + currentBlock, err := stages.GetStageProgress(tx, stages.Finish) + if err != nil { + return false, err + } + + if currentBlock > 0 && currentBlock >= highestBlock { // Return not syncing if the synchronisation already completed + return false, nil + } + + // Otherwise gather the block sync stats + type S struct { + StageName string `json:"stage_name"` + BlockNumber hexutil.Uint64 `json:"block_number"` + } + stagesMap := make([]S, len(stages.AllStages)) + for i, stage := range stages.AllStages { + progress, err := stages.GetStageProgress(tx, stage) + if err != nil { + return nil, err + } + stagesMap[i].StageName = string(stage) + stagesMap[i].BlockNumber = hexutil.Uint64(progress) + } + + return map[string]interface{}{ + "currentBlock": hexutil.Uint64(currentBlock), + "highestBlock": hexutil.Uint64(highestBlock), + "stages": stagesMap, + }, nil +} + +// ChainId implements eth_chainId. Returns the current ethereum chainId. +func (api *APIImpl) ChainId(ctx context.Context) (hexutil.Uint64, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return 0, err + } + defer tx.Rollback() + + chainConfig, err := api.chainConfig(tx) + if err != nil { + return 0, err + } + return hexutil.Uint64(chainConfig.ChainID.Uint64()), nil +} + +// ChainID alias of ChainId - just for convenience +func (api *APIImpl) ChainID(ctx context.Context) (hexutil.Uint64, error) { + return api.ChainId(ctx) +} + +// ProtocolVersion implements eth_protocolVersion. Returns the current ethereum protocol version. +func (api *APIImpl) ProtocolVersion(ctx context.Context) (hexutil.Uint, error) { + ver, err := api.ethBackend.ProtocolVersion(ctx) + if err != nil { + return 0, err + } + return hexutil.Uint(ver), nil +} + +// GasPrice implements eth_gasPrice. Returns the current price per gas in wei. +func (api *APIImpl) GasPrice(ctx context.Context) (*hexutil.Big, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + cc, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + oracle := gasprice.NewOracle(NewGasPriceOracleBackend(tx, cc, api.BaseAPI), ethconfig.Defaults.GPO) + tipcap, err := oracle.SuggestTipCap(ctx) + if err != nil { + return nil, err + } + if head := rawdb.ReadCurrentHeader(tx); head != nil && head.BaseFee != nil { + tipcap.Add(tipcap, head.BaseFee) + } + return (*hexutil.Big)(tipcap), err +} + +// MaxPriorityFeePerGas returns a suggestion for a gas tip cap for dynamic fee transactions. +func (api *APIImpl) MaxPriorityFeePerGas(ctx context.Context) (*hexutil.Big, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + cc, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + oracle := gasprice.NewOracle(NewGasPriceOracleBackend(tx, cc, api.BaseAPI), ethconfig.Defaults.GPO) + tipcap, err := oracle.SuggestTipCap(ctx) + if err != nil { + return nil, err + } + return (*hexutil.Big)(tipcap), err +} + +type feeHistoryResult struct { + OldestBlock *hexutil.Big `json:"oldestBlock"` + Reward [][]*hexutil.Big `json:"reward,omitempty"` + BaseFee []*hexutil.Big `json:"baseFeePerGas,omitempty"` + GasUsedRatio []float64 `json:"gasUsedRatio"` +} + +func (api *APIImpl) FeeHistory(ctx context.Context, blockCount rpc.DecimalOrHex, lastBlock rpc.BlockNumber, rewardPercentiles []float64) (*feeHistoryResult, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + cc, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + oracle := gasprice.NewOracle(NewGasPriceOracleBackend(tx, cc, api.BaseAPI), ethconfig.Defaults.GPO) + + oldest, reward, baseFee, gasUsed, err := oracle.FeeHistory(ctx, int(blockCount), lastBlock, rewardPercentiles) + if err != nil { + return nil, err + } + results := &feeHistoryResult{ + OldestBlock: (*hexutil.Big)(oldest), + GasUsedRatio: gasUsed, + } + if reward != nil { + results.Reward = make([][]*hexutil.Big, len(reward)) + for i, w := range reward { + results.Reward[i] = make([]*hexutil.Big, len(w)) + for j, v := range w { + results.Reward[i][j] = (*hexutil.Big)(v) + } + } + } + if baseFee != nil { + results.BaseFee = make([]*hexutil.Big, len(baseFee)) + for i, v := range baseFee { + results.BaseFee[i] = (*hexutil.Big)(v) + } + } + return results, nil +} + +type GasPriceOracleBackend struct { + tx kv.Tx + cc *params.ChainConfig + baseApi *BaseAPI +} + +func NewGasPriceOracleBackend(tx kv.Tx, cc *params.ChainConfig, baseApi *BaseAPI) *GasPriceOracleBackend { + return &GasPriceOracleBackend{tx: tx, cc: cc, baseApi: baseApi} +} + +func (b *GasPriceOracleBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { + block, err := b.baseApi.blockByRPCNumber(number, b.tx) + if err != nil { + return nil, err + } + return block.Header(), nil +} +func (b *GasPriceOracleBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { + return b.baseApi.blockByRPCNumber(number, b.tx) +} +func (b *GasPriceOracleBackend) ChainConfig() *params.ChainConfig { + return b.cc +} +func (b *GasPriceOracleBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { + return rawdb.ReadReceiptsByHash(b.tx, hash) +} +func (b *GasPriceOracleBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { + return nil, nil +} diff --git a/cmd/rpcdaemon22/commands/eth_txs.go b/cmd/rpcdaemon22/commands/eth_txs.go new file mode 100644 index 00000000000..2cf26862d70 --- /dev/null +++ b/cmd/rpcdaemon22/commands/eth_txs.go @@ -0,0 +1,240 @@ +package commands + +import ( + "bytes" + "context" + "math/big" + + "github.com/ledgerwatch/erigon-lib/gointerfaces" + "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon-lib/gointerfaces/types" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/core/rawdb" + types2 "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/rpc" +) + +// GetTransactionByHash implements eth_getTransactionByHash. Returns information about a transaction given the transaction's hash. +func (api *APIImpl) GetTransactionByHash(ctx context.Context, hash common.Hash) (*RPCTransaction, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + chainConfig, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + + // https://infura.io/docs/ethereum/json-rpc/eth-getTransactionByHash + blockNum, ok, err := api.txnLookup(ctx, tx, hash) + if err != nil { + return nil, err + } + if ok { + block, err := api.blockByNumberWithSenders(tx, blockNum) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil + } + blockHash := block.Hash() + var txnIndex uint64 + var txn types2.Transaction + for i, transaction := range block.Transactions() { + if transaction.Hash() == hash { + txn = transaction + txnIndex = uint64(i) + break + } + } + + // Add GasPrice for the DynamicFeeTransaction + var baseFee *big.Int + if chainConfig.IsLondon(blockNum) && blockHash != (common.Hash{}) { + baseFee = block.BaseFee() + } + + // if no transaction was found then we return nil + if txn == nil { + return nil, nil + + } + + return newRPCTransaction(txn, blockHash, blockNum, txnIndex, baseFee), nil + } + + curHeader := rawdb.ReadCurrentHeader(tx) + if curHeader == nil { + return nil, nil + } + + // No finalized transaction, try to retrieve it from the pool + reply, err := api.txPool.Transactions(ctx, &txpool.TransactionsRequest{Hashes: []*types.H256{gointerfaces.ConvertHashToH256(hash)}}) + if err != nil { + return nil, err + } + if len(reply.RlpTxs[0]) > 0 { + s := rlp.NewStream(bytes.NewReader(reply.RlpTxs[0]), uint64(len(reply.RlpTxs[0]))) + txn, err := types2.DecodeTransaction(s) + if err != nil { + return nil, err + } + + // if no transaction was found in the txpool then we return nil and an error warning that we didn't find the transaction by the hash + if txn == nil { + return nil, nil + } + + return newRPCPendingTransaction(txn, curHeader, chainConfig), nil + } + + // Transaction unknown, return as such + return nil, nil +} + +// GetRawTransactionByHash returns the bytes of the transaction for the given hash. +func (api *APIImpl) GetRawTransactionByHash(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + // https://infura.io/docs/ethereum/json-rpc/eth-getTransactionByHash + blockNum, ok, err := api.txnLookup(ctx, tx, hash) + if err != nil { + return nil, err + } + if !ok { + return nil, nil + } + block, err := api.blockByNumberWithSenders(tx, blockNum) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil + } + var txn types2.Transaction + for _, transaction := range block.Transactions() { + if transaction.Hash() == hash { + txn = transaction + break + } + } + + if txn != nil { + var buf bytes.Buffer + err = txn.MarshalBinary(&buf) + return buf.Bytes(), err + } + + // No finalized transaction, try to retrieve it from the pool + reply, err := api.txPool.Transactions(ctx, &txpool.TransactionsRequest{Hashes: []*types.H256{gointerfaces.ConvertHashToH256(hash)}}) + if err != nil { + return nil, err + } + if len(reply.RlpTxs[0]) > 0 { + return reply.RlpTxs[0], nil + } + return nil, nil +} + +// GetTransactionByBlockHashAndIndex implements eth_getTransactionByBlockHashAndIndex. Returns information about a transaction given the block's hash and a transaction index. +func (api *APIImpl) GetTransactionByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, txIndex hexutil.Uint64) (*RPCTransaction, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + // https://infura.io/docs/ethereum/json-rpc/eth-getTransactionByBlockHashAndIndex + block, err := api.blockByHashWithSenders(tx, blockHash) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 + } + + txs := block.Transactions() + if uint64(txIndex) >= uint64(len(txs)) { + return nil, nil // not error + } + + return newRPCTransaction(txs[txIndex], block.Hash(), block.NumberU64(), uint64(txIndex), block.BaseFee()), nil +} + +// GetRawTransactionByBlockHashAndIndex returns the bytes of the transaction for the given block hash and index. +func (api *APIImpl) GetRawTransactionByBlockHashAndIndex(ctx context.Context, blockHash common.Hash, index hexutil.Uint) (hexutil.Bytes, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + // https://infura.io/docs/ethereum/json-rpc/eth-getRawTransactionByBlockHashAndIndex + block, err := api.blockByHashWithSenders(tx, blockHash) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 + } + + return newRPCRawTransactionFromBlockIndex(block, uint64(index)) +} + +// GetTransactionByBlockNumberAndIndex implements eth_getTransactionByBlockNumberAndIndex. Returns information about a transaction given a block number and transaction index. +func (api *APIImpl) GetTransactionByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, txIndex hexutil.Uint) (*RPCTransaction, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + // https://infura.io/docs/ethereum/json-rpc/eth-getTransactionByBlockNumberAndIndex + blockNum, err := getBlockNumber(blockNr, tx) + if err != nil { + return nil, err + } + + block, err := api.blockByNumberWithSenders(tx, blockNum) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 + } + + txs := block.Transactions() + if uint64(txIndex) >= uint64(len(txs)) { + return nil, nil // not error + } + + return newRPCTransaction(txs[txIndex], block.Hash(), block.NumberU64(), uint64(txIndex), block.BaseFee()), nil +} + +// GetRawTransactionByBlockNumberAndIndex returns the bytes of the transaction for the given block number and index. +func (api *APIImpl) GetRawTransactionByBlockNumberAndIndex(ctx context.Context, blockNr rpc.BlockNumber, index hexutil.Uint) (hexutil.Bytes, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + // https://infura.io/docs/ethereum/json-rpc/eth-getRawTransactionByBlockNumberAndIndex + block, err := api.blockByRPCNumber(blockNr, tx) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 + } + + return newRPCRawTransactionFromBlockIndex(block, uint64(index)) +} diff --git a/cmd/rpcdaemon22/commands/eth_uncles.go b/cmd/rpcdaemon22/commands/eth_uncles.go new file mode 100644 index 00000000000..4d21345ffc0 --- /dev/null +++ b/cmd/rpcdaemon22/commands/eth_uncles.go @@ -0,0 +1,133 @@ +package commands + +import ( + "context" + + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/adapter/ethapi" + "github.com/ledgerwatch/log/v3" +) + +// GetUncleByBlockNumberAndIndex implements eth_getUncleByBlockNumberAndIndex. Returns information about an uncle given a block's number and the index of the uncle. +func (api *APIImpl) GetUncleByBlockNumberAndIndex(ctx context.Context, number rpc.BlockNumber, index hexutil.Uint) (map[string]interface{}, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + blockNum, err := getBlockNumber(number, tx) + if err != nil { + return nil, err + } + block, err := api.blockByNumberWithSenders(tx, blockNum) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 + } + hash := block.Hash() + additionalFields := make(map[string]interface{}) + td, err := rawdb.ReadTd(tx, block.Hash(), blockNum) + if err != nil { + return nil, err + } + additionalFields["totalDifficulty"] = (*hexutil.Big)(td) + + uncles := block.Uncles() + if index >= hexutil.Uint(len(uncles)) { + log.Trace("Requested uncle not found", "number", block.Number(), "hash", hash, "index", index) + return nil, nil + } + uncle := types.NewBlockWithHeader(uncles[index]) + return ethapi.RPCMarshalBlock(uncle, false, false, additionalFields) +} + +// GetUncleByBlockHashAndIndex implements eth_getUncleByBlockHashAndIndex. Returns information about an uncle given a block's hash and the index of the uncle. +func (api *APIImpl) GetUncleByBlockHashAndIndex(ctx context.Context, hash common.Hash, index hexutil.Uint) (map[string]interface{}, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + block, err := api.blockByHashWithSenders(tx, hash) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 + } + number := block.NumberU64() + additionalFields := make(map[string]interface{}) + td, err := rawdb.ReadTd(tx, hash, number) + if err != nil { + return nil, err + } + additionalFields["totalDifficulty"] = (*hexutil.Big)(td) + + uncles := block.Uncles() + if index >= hexutil.Uint(len(uncles)) { + log.Trace("Requested uncle not found", "number", block.Number(), "hash", hash, "index", index) + return nil, nil + } + uncle := types.NewBlockWithHeader(uncles[index]) + + return ethapi.RPCMarshalBlock(uncle, false, false, additionalFields) +} + +// GetUncleCountByBlockNumber implements eth_getUncleCountByBlockNumber. Returns the number of uncles in the block, if any. +func (api *APIImpl) GetUncleCountByBlockNumber(ctx context.Context, number rpc.BlockNumber) (*hexutil.Uint, error) { + n := hexutil.Uint(0) + + tx, err := api.db.BeginRo(ctx) + if err != nil { + return &n, err + } + defer tx.Rollback() + + blockNum, err := getBlockNumber(number, tx) + if err != nil { + return &n, err + } + + block, err := api.blockByNumberWithSenders(tx, blockNum) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 + } + n = hexutil.Uint(len(block.Uncles())) + return &n, nil +} + +// GetUncleCountByBlockHash implements eth_getUncleCountByBlockHash. Returns the number of uncles in the block, if any. +func (api *APIImpl) GetUncleCountByBlockHash(ctx context.Context, hash common.Hash) (*hexutil.Uint, error) { + n := hexutil.Uint(0) + tx, err := api.db.BeginRo(ctx) + if err != nil { + return &n, err + } + defer tx.Rollback() + + number := rawdb.ReadHeaderNumber(tx, hash) + if number == nil { + return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 + } + + block, err := api.blockWithSenders(tx, hash, *number) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 + } + n = hexutil.Uint(len(block.Uncles())) + return &n, nil +} diff --git a/cmd/rpcdaemon22/commands/get_chain_config_test.go b/cmd/rpcdaemon22/commands/get_chain_config_test.go new file mode 100644 index 00000000000..a4ed4fdad65 --- /dev/null +++ b/cmd/rpcdaemon22/commands/get_chain_config_test.go @@ -0,0 +1,39 @@ +package commands + +import ( + "context" + "testing" + + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon/core" +) + +func TestGetChainConfig(t *testing.T) { + db := memdb.NewTestDB(t) + config, _, err := core.CommitGenesisBlock(db, core.DefaultGenesisBlock()) + if err != nil { + t.Fatalf("setting up genensis block: %v", err) + } + + tx, txErr := db.BeginRo(context.Background()) + if txErr != nil { + t.Fatalf("error starting tx: %v", txErr) + } + defer tx.Rollback() + + api := &BaseAPI{} + config1, err1 := api.chainConfig(tx) + if err1 != nil { + t.Fatalf("reading chain config: %v", err1) + } + if config.String() != config1.String() { + t.Fatalf("read different config: %s, expected %s", config1.String(), config.String()) + } + config2, err2 := api.chainConfig(tx) + if err2 != nil { + t.Fatalf("reading chain config: %v", err2) + } + if config.String() != config2.String() { + t.Fatalf("read different config: %s, expected %s", config2.String(), config.String()) + } +} diff --git a/cmd/rpcdaemon22/commands/net_api.go b/cmd/rpcdaemon22/commands/net_api.go new file mode 100644 index 00000000000..2a094aa2ee7 --- /dev/null +++ b/cmd/rpcdaemon22/commands/net_api.go @@ -0,0 +1,66 @@ +package commands + +import ( + "context" + "fmt" + "strconv" + + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/turbo/rpchelper" +) + +// NetAPI the interface for the net_ RPC commands +type NetAPI interface { + Listening(_ context.Context) (bool, error) + Version(_ context.Context) (string, error) + PeerCount(_ context.Context) (hexutil.Uint, error) +} + +// NetAPIImpl data structure to store things needed for net_ commands +type NetAPIImpl struct { + ethBackend rpchelper.ApiBackend +} + +// NewNetAPIImpl returns NetAPIImplImpl instance +func NewNetAPIImpl(eth rpchelper.ApiBackend) *NetAPIImpl { + return &NetAPIImpl{ + ethBackend: eth, + } +} + +// Listening implements net_listening. Returns true if client is actively listening for network connections. +// TODO: Remove hard coded value +func (api *NetAPIImpl) Listening(_ context.Context) (bool, error) { + return true, nil +} + +// Version implements net_version. Returns the current network id. +func (api *NetAPIImpl) Version(ctx context.Context) (string, error) { + if api.ethBackend == nil { + // We're running in --datadir mode or otherwise cannot get the backend + return "", fmt.Errorf(NotAvailableChainData, "net_version") + } + + res, err := api.ethBackend.NetVersion(ctx) + if err != nil { + return "", err + } + + return strconv.FormatUint(res, 10), nil +} + +// PeerCount implements net_peerCount. Returns number of peers currently +// connected to the first sentry server. +func (api *NetAPIImpl) PeerCount(ctx context.Context) (hexutil.Uint, error) { + if api.ethBackend == nil { + // We're running in --datadir mode or otherwise cannot get the backend + return 0, fmt.Errorf(NotAvailableChainData, "net_peerCount") + } + + res, err := api.ethBackend.NetPeerCount(ctx) + if err != nil { + return 0, err + } + + return hexutil.Uint(res), nil +} diff --git a/cmd/rpcdaemon22/commands/parity_api.go b/cmd/rpcdaemon22/commands/parity_api.go new file mode 100644 index 00000000000..5e6d7bac23b --- /dev/null +++ b/cmd/rpcdaemon22/commands/parity_api.go @@ -0,0 +1,89 @@ +package commands + +import ( + "context" + "encoding/binary" + "fmt" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/rpc" +) + +var latestTag = common.BytesToHash([]byte("latest")) + +var ErrWrongTag = fmt.Errorf("listStorageKeys wrong block tag or number: must be '%s' ('latest')", latestTag) + +// ParityAPI the interface for the parity_ RPC commands +type ParityAPI interface { + ListStorageKeys(ctx context.Context, account common.Address, quantity int, offset *hexutil.Bytes, blockNumber rpc.BlockNumberOrHash) ([]hexutil.Bytes, error) +} + +// ParityAPIImpl data structure to store things needed for parity_ commands +type ParityAPIImpl struct { + db kv.RoDB +} + +// NewParityAPIImpl returns ParityAPIImpl instance +func NewParityAPIImpl(db kv.RoDB) *ParityAPIImpl { + return &ParityAPIImpl{ + db: db, + } +} + +// ListStorageKeys implements parity_listStorageKeys. Returns all storage keys of the given address +func (api *ParityAPIImpl) ListStorageKeys(ctx context.Context, account common.Address, quantity int, offset *hexutil.Bytes, blockNumberOrTag rpc.BlockNumberOrHash) ([]hexutil.Bytes, error) { + if err := api.checkBlockNumber(blockNumberOrTag); err != nil { + return nil, err + } + + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, fmt.Errorf("listStorageKeys cannot open tx: %w", err) + } + defer tx.Rollback() + a, err := state.NewPlainStateReader(tx).ReadAccountData(account) + if err != nil { + return nil, err + } else if a == nil { + return nil, fmt.Errorf("acc not found") + } + + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, a.GetIncarnation()) + seekBytes := append(account.Bytes(), b...) + + c, err := tx.CursorDupSort(kv.PlainState) + if err != nil { + return nil, err + } + defer c.Close() + keys := make([]hexutil.Bytes, 0) + var v []byte + var seekVal []byte + if offset != nil { + seekVal = *offset + } + + for v, err = c.SeekBothRange(seekBytes, seekVal); v != nil && len(keys) != quantity && err == nil; _, v, err = c.NextDup() { + if len(v) > common.HashLength { + keys = append(keys, v[:common.HashLength]) + } else { + keys = append(keys, v) + } + } + if err != nil { + return nil, err + } + return keys, nil +} + +func (api *ParityAPIImpl) checkBlockNumber(blockNumber rpc.BlockNumberOrHash) error { + num, isNum := blockNumber.Number() + if isNum && rpc.LatestBlockNumber == num { + return nil + } + return ErrWrongTag +} diff --git a/cmd/rpcdaemon22/commands/parity_api_test.go b/cmd/rpcdaemon22/commands/parity_api_test.go new file mode 100644 index 00000000000..0117eddfab8 --- /dev/null +++ b/cmd/rpcdaemon22/commands/parity_api_test.go @@ -0,0 +1,105 @@ +package commands + +import ( + "context" + "fmt" + "testing" + + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/rpc" + "github.com/stretchr/testify/assert" +) + +var latestBlock = rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber) + +func TestParityAPIImpl_ListStorageKeys_NoOffset(t *testing.T) { + assert := assert.New(t) + db := rpcdaemontest.CreateTestKV(t) + api := NewParityAPIImpl(db) + answers := []string{ + "0000000000000000000000000000000000000000000000000000000000000000", + "0000000000000000000000000000000000000000000000000000000000000002", + "0a2127994676ca91e4eb3d2a1e46ec9dcee074dc2643bb5ebd4e9ac6541a3148", + "0fe673b4bc06161f39bc26f4e8831c810a72ffe69e5c8cb26f7f54752618e696", + "120e23dcb7e4437386073613853db77b10011a2404eefc716b97c7767e37f8eb", + } + addr := common.HexToAddress("0x920fd5070602feaea2e251e9e7238b6c376bcae5") + result, err := api.ListStorageKeys(context.Background(), addr, 5, nil, latestBlock) + if err != nil { + t.Errorf("calling ListStorageKeys: %v", err) + } + assert.Equal(len(answers), len(result)) + for k, v := range result { + assert.Equal(answers[k], common.Bytes2Hex(v)) + } +} + +func TestParityAPIImpl_ListStorageKeys_WithOffset_ExistingPrefix(t *testing.T) { + assert := assert.New(t) + db := rpcdaemontest.CreateTestKV(t) + api := NewParityAPIImpl(db) + answers := []string{ + "29d05770ca9ee7088a64e18c8e5160fc62c3c2179dc8ef9b4dbc970c9e51b4d8", + "29edc84535d98b29835079d685b97b41ee8e831e343cc80793057e462353a26d", + "2c05ac60f9aa2df5e64ef977f271e4b9a2d13951f123a2cb5f5d4ad5eb344f1a", + "4644be453c81744b6842ddf615d7fca0e14a23b09734be63d44c23452de95631", + "4974416255391052161ba8184fe652f3bf8c915592c65f7de127af8e637dce5d", + } + addr := common.HexToAddress("0x920fd5070602feaea2e251e9e7238b6c376bcae5") + offset := common.Hex2Bytes("29") + b := hexutil.Bytes(offset) + result, err := api.ListStorageKeys(context.Background(), addr, 5, &b, latestBlock) + if err != nil { + t.Errorf("calling ListStorageKeys: %v", err) + } + assert.Equal(len(answers), len(result)) + for k, v := range result { + assert.Equal(answers[k], common.Bytes2Hex(v)) + } +} + +func TestParityAPIImpl_ListStorageKeys_WithOffset_NonExistingPrefix(t *testing.T) { + assert := assert.New(t) + db := rpcdaemontest.CreateTestKV(t) + api := NewParityAPIImpl(db) + answers := []string{ + "4644be453c81744b6842ddf615d7fca0e14a23b09734be63d44c23452de95631", + "4974416255391052161ba8184fe652f3bf8c915592c65f7de127af8e637dce5d", + } + addr := common.HexToAddress("0x920fd5070602feaea2e251e9e7238b6c376bcae5") + offset := common.Hex2Bytes("30") + b := hexutil.Bytes(offset) + result, err := api.ListStorageKeys(context.Background(), addr, 2, &b, latestBlock) + if err != nil { + t.Errorf("calling ListStorageKeys: %v", err) + } + assert.Equal(len(answers), len(result)) + for k, v := range result { + assert.Equal(answers[k], common.Bytes2Hex(v)) + } +} + +func TestParityAPIImpl_ListStorageKeys_WithOffset_EmptyResponse(t *testing.T) { + assert := assert.New(t) + db := rpcdaemontest.CreateTestKV(t) + api := NewParityAPIImpl(db) + addr := common.HexToAddress("0x920fd5070602feaea2e251e9e7238b6c376bcae5") + offset := common.Hex2Bytes("ff") + b := hexutil.Bytes(offset) + result, err := api.ListStorageKeys(context.Background(), addr, 2, &b, latestBlock) + if err != nil { + t.Errorf("calling ListStorageKeys: %v", err) + } + assert.Equal(0, len(result)) +} + +func TestParityAPIImpl_ListStorageKeys_AccNotFound(t *testing.T) { + assert := assert.New(t) + db := rpcdaemontest.CreateTestKV(t) + api := NewParityAPIImpl(db) + addr := common.HexToAddress("0x920fd5070602feaea2e251e9e7238b6c376bcaef") + _, err := api.ListStorageKeys(context.Background(), addr, 2, nil, latestBlock) + assert.Error(err, fmt.Errorf("acc not found")) +} diff --git a/cmd/rpcdaemon22/commands/rpc_block.go b/cmd/rpcdaemon22/commands/rpc_block.go new file mode 100644 index 00000000000..9c001ba8ac7 --- /dev/null +++ b/cmd/rpcdaemon22/commands/rpc_block.go @@ -0,0 +1,45 @@ +package commands + +import ( + "fmt" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/rpc" +) + +func getBlockNumber(number rpc.BlockNumber, tx kv.Tx) (uint64, error) { + var blockNum uint64 + var err error + if number == rpc.LatestBlockNumber || number == rpc.PendingBlockNumber { + blockNum, err = getLatestBlockNumber(tx) + if err != nil { + return 0, err + } + } else if number == rpc.EarliestBlockNumber { + blockNum = 0 + } else { + blockNum = uint64(number.Int64()) + } + + return blockNum, nil +} + +func getLatestBlockNumber(tx kv.Tx) (uint64, error) { + forkchoiceHeadHash := rawdb.ReadForkchoiceHead(tx) + if forkchoiceHeadHash != (common.Hash{}) { + forkchoiceHeadNum := rawdb.ReadHeaderNumber(tx, forkchoiceHeadHash) + if forkchoiceHeadNum != nil { + return *forkchoiceHeadNum, nil + } + } + + blockNum, err := stages.GetStageProgress(tx, stages.Execution) + if err != nil { + return 0, fmt.Errorf("getting latest block number: %w", err) + } + + return blockNum, nil +} diff --git a/cmd/rpcdaemon22/commands/send_transaction.go b/cmd/rpcdaemon22/commands/send_transaction.go new file mode 100644 index 00000000000..7ce01f31395 --- /dev/null +++ b/cmd/rpcdaemon22/commands/send_transaction.go @@ -0,0 +1,96 @@ +package commands + +import ( + "bytes" + "context" + "errors" + "fmt" + "math/big" + + txPoolProto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/log/v3" +) + +// SendRawTransaction implements eth_sendRawTransaction. Creates new message call transaction or a contract creation for previously-signed transactions. +func (api *APIImpl) SendRawTransaction(ctx context.Context, encodedTx hexutil.Bytes) (common.Hash, error) { + txn, err := types.DecodeTransaction(rlp.NewStream(bytes.NewReader(encodedTx), uint64(len(encodedTx)))) + if err != nil { + return common.Hash{}, err + } + + // If the transaction fee cap is already specified, ensure the + // fee of the given transaction is _reasonable_. + if err := checkTxFee(txn.GetPrice().ToBig(), txn.GetGas(), ethconfig.Defaults.RPCTxFeeCap); err != nil { + return common.Hash{}, err + } + if !txn.Protected() { + return common.Hash{}, errors.New("only replay-protected (EIP-155) transactions allowed over RPC") + } + hash := txn.Hash() + res, err := api.txPool.Add(ctx, &txPoolProto.AddRequest{RlpTxs: [][]byte{encodedTx}}) + if err != nil { + return common.Hash{}, err + } + + if res.Imported[0] != txPoolProto.ImportResult_SUCCESS { + return hash, fmt.Errorf("%s: %s", txPoolProto.ImportResult_name[int32(res.Imported[0])], res.Errors[0]) + } + + tx, err := api.db.BeginRo(ctx) + if err != nil { + return common.Hash{}, err + } + defer tx.Rollback() + + // Print a log with full txn details for manual investigations and interventions + blockNum := rawdb.ReadCurrentBlockNumber(tx) + if blockNum == nil { + return common.Hash{}, err + } + cc, err := api.chainConfig(tx) + if err != nil { + return common.Hash{}, err + } + signer := types.MakeSigner(cc, *blockNum) + from, err := txn.Sender(*signer) + if err != nil { + return common.Hash{}, err + } + + if txn.GetTo() == nil { + addr := crypto.CreateAddress(from, txn.GetNonce()) + log.Info("Submitted contract creation", "hash", txn.Hash().Hex(), "from", from, "nonce", txn.GetNonce(), "contract", addr.Hex(), "value", txn.GetValue()) + } else { + log.Info("Submitted transaction", "hash", txn.Hash().Hex(), "from", from, "nonce", txn.GetNonce(), "recipient", txn.GetTo(), "value", txn.GetValue()) + } + + return txn.Hash(), nil +} + +// SendTransaction implements eth_sendTransaction. Creates new message call transaction or a contract creation if the data field contains code. +func (api *APIImpl) SendTransaction(_ context.Context, txObject interface{}) (common.Hash, error) { + return common.Hash{0}, fmt.Errorf(NotImplemented, "eth_sendTransaction") +} + +// checkTxFee is an internal function used to check whether the fee of +// the given transaction is _reasonable_(under the cap). +func checkTxFee(gasPrice *big.Int, gas uint64, cap float64) error { + // Short circuit if there is no cap for transaction fee at all. + if cap == 0 { + return nil + } + feeEth := new(big.Float).Quo(new(big.Float).SetInt(new(big.Int).Mul(gasPrice, new(big.Int).SetUint64(gas))), new(big.Float).SetInt(big.NewInt(params.Ether))) + feeFloat, _ := feeEth.Float64() + if feeFloat > cap { + return fmt.Errorf("tx fee (%.2f ether) exceeds the configured cap (%.2f ether)", feeFloat, cap) + } + return nil +} diff --git a/cmd/rpcdaemon22/commands/send_transaction_test.go b/cmd/rpcdaemon22/commands/send_transaction_test.go new file mode 100644 index 00000000000..8fe9ffa3613 --- /dev/null +++ b/cmd/rpcdaemon22/commands/send_transaction_test.go @@ -0,0 +1,110 @@ +package commands_test + +import ( + "bytes" + "crypto/ecdsa" + "math/big" + "testing" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" + "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/commands" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/u256" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/eth/protocols/eth" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/erigon/turbo/stages" + "github.com/stretchr/testify/require" +) + +func TestSendRawTransaction(t *testing.T) { + t.Skip("Flaky test") + m, require := stages.Mock(t), require.New(t) + + chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, b *core.BlockGen) { + b.SetCoinbase(common.Address{1}) + }, false /* intermediateHashes */) + require.NoError(err) + { // Do 1 step to start txPool + + // Send NewBlock message + b, err := rlp.EncodeToBytes(ð.NewBlockPacket{ + Block: chain.TopBlock, + TD: big.NewInt(1), // This is ignored anyway + }) + require.NoError(err) + m.ReceiveWg.Add(1) + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { + require.NoError(err) + } + // Send all the headers + b, err = rlp.EncodeToBytes(ð.BlockHeadersPacket66{ + RequestId: 1, + BlockHeadersPacket: chain.Headers, + }) + require.NoError(err) + m.ReceiveWg.Add(1) + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + require.NoError(err) + } + m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed + + initialCycle := true + highestSeenHeader := chain.TopBlock.NumberU64() + if _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, highestSeenHeader, m.Notifications, initialCycle, m.UpdateHead, nil); err != nil { + t.Fatal(err) + } + } + + expectValue := uint64(1234) + txn, err := types.SignTx(types.NewTransaction(0, common.Address{1}, uint256.NewInt(expectValue), params.TxGas, uint256.NewInt(10*params.GWei), nil), *types.LatestSignerForChainID(m.ChainConfig.ChainID), m.Key) + require.NoError(err) + + ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) + txPool := txpool.NewTxpoolClient(conn) + ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := commands.NewEthAPI(commands.NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), false), m.DB, nil, txPool, nil, 5000000) + + buf := bytes.NewBuffer(nil) + err = txn.MarshalBinary(buf) + require.NoError(err) + + txsCh := make(chan []types.Transaction, 1) + id := ff.SubscribePendingTxs(txsCh) + defer ff.UnsubscribePendingTxs(id) + + _, err = api.SendRawTransaction(ctx, buf.Bytes()) + require.NoError(err) + + got := <-txsCh + require.Equal(expectValue, got[0].GetValue().Uint64()) + + //send same tx second time and expect error + _, err = api.SendRawTransaction(ctx, buf.Bytes()) + require.NotNil(err) + require.Equal("ALREADY_EXISTS: already known", err.Error()) + m.ReceiveWg.Wait() + + //TODO: make propagation easy to test - now race + //time.Sleep(time.Second) + //sent := m.SentMessage(0) + //require.Equal(eth.ToProto[m.MultiClient.Protocol()][eth.NewPooledTransactionHashesMsg], sent.Id) +} + +func transaction(nonce uint64, gaslimit uint64, key *ecdsa.PrivateKey) types.Transaction { + return pricedTransaction(nonce, gaslimit, u256.Num1, key) +} + +func pricedTransaction(nonce uint64, gaslimit uint64, gasprice *uint256.Int, key *ecdsa.PrivateKey) types.Transaction { + tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, uint256.NewInt(100), gaslimit, gasprice, nil), *types.LatestSignerForChainID(big.NewInt(1337)), key) + return tx +} diff --git a/cmd/rpcdaemon22/commands/starknet_accounts.go b/cmd/rpcdaemon22/commands/starknet_accounts.go new file mode 100644 index 00000000000..abe0e5c7903 --- /dev/null +++ b/cmd/rpcdaemon22/commands/starknet_accounts.go @@ -0,0 +1,39 @@ +package commands + +import ( + "context" + "fmt" + + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/adapter" + "github.com/ledgerwatch/erigon/turbo/rpchelper" +) + +// GetCode implements starknet_getCode. Returns the byte code at a given address (if it's a smart contract). +func (api *StarknetImpl) GetCode(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { + tx, err1 := api.db.BeginRo(ctx) + if err1 != nil { + return nil, fmt.Errorf("getCode cannot open tx: %w", err1) + } + defer tx.Rollback() + blockNumber, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + if err != nil { + return nil, err + } + + reader := adapter.NewStateReader(tx, blockNumber) + acc, err := reader.ReadAccountData(address) + if acc == nil || err != nil { + return hexutil.Bytes(""), nil + } + res, err := reader.ReadAccountCode(address, acc.Incarnation, acc.CodeHash) + if res == nil || err != nil { + return hexutil.Bytes(""), nil + } + if res == nil { + return hexutil.Bytes(""), nil + } + return res, nil +} diff --git a/cmd/rpcdaemon22/commands/starknet_api.go b/cmd/rpcdaemon22/commands/starknet_api.go new file mode 100644 index 00000000000..0423e31e725 --- /dev/null +++ b/cmd/rpcdaemon22/commands/starknet_api.go @@ -0,0 +1,34 @@ +package commands + +import ( + "context" + "github.com/ledgerwatch/erigon-lib/gointerfaces/starknet" + "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/rpc" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" +) + +type StarknetAPI interface { + SendRawTransaction(ctx context.Context, encodedTx hexutil.Bytes) (common.Hash, error) + GetCode(ctx context.Context, address common.Address, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) + Call(ctx context.Context, request StarknetCallRequest, blockNrOrHash rpc.BlockNumberOrHash) ([]string, error) +} + +type StarknetImpl struct { + *BaseAPI + db kv.RoDB + client starknet.CAIROVMClient + txPool txpool.TxpoolClient +} + +func NewStarknetAPI(base *BaseAPI, db kv.RoDB, client starknet.CAIROVMClient, txPool txpool.TxpoolClient) *StarknetImpl { + return &StarknetImpl{ + BaseAPI: base, + db: db, + client: client, + txPool: txPool, + } +} diff --git a/cmd/rpcdaemon22/commands/starknet_call.go b/cmd/rpcdaemon22/commands/starknet_call.go new file mode 100644 index 00000000000..4b68eac9c39 --- /dev/null +++ b/cmd/rpcdaemon22/commands/starknet_call.go @@ -0,0 +1,96 @@ +package commands + +import ( + "context" + "github.com/ledgerwatch/erigon-lib/gointerfaces/starknet" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/rpc" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/wrapperspb" + "reflect" + "strings" +) + +type StarknetGrpcCallArgs struct { + Inputs string + Address string + Function string + Code string + BlockHash string + BlockNumber int64 + Network string +} + +type StarknetCallRequest struct { + ContractAddress common.Address32 + EntryPointSelector string + CallData []string +} + +func (s StarknetGrpcCallArgs) ToMapAny() (result map[string]*anypb.Any) { + result = make(map[string]*anypb.Any) + + v := reflect.ValueOf(s) + typeOfS := v.Type() + + for i := 0; i < v.NumField(); i++ { + fieldName := strings.ToLower(typeOfS.Field(i).Name) + switch v.Field(i).Kind() { + case reflect.Int64: + result[fieldName], _ = anypb.New(wrapperspb.Int64(v.Field(i).Interface().(int64))) + default: + result[fieldName], _ = anypb.New(wrapperspb.String(v.Field(i).Interface().(string))) + } + } + return result +} + +// Call implements starknet_call. +func (api *StarknetImpl) Call(ctx context.Context, request StarknetCallRequest, blockNrOrHash rpc.BlockNumberOrHash) ([]string, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + code, err := api.GetCode(ctx, request.ContractAddress.ToCommonAddress(), blockNrOrHash) + if err != nil { + return nil, err + } + + requestParams := &StarknetGrpcCallArgs{ + Inputs: strings.Join(request.CallData, ","), + Address: request.ContractAddress.String(), + Function: request.EntryPointSelector, + Code: code.String(), + } + + if blockNrOrHash.BlockHash != nil { + requestParams.BlockHash = blockNrOrHash.BlockHash.String() + } + + if blockNrOrHash.BlockNumber != nil { + requestParams.BlockNumber = blockNrOrHash.BlockNumber.Int64() + } + + requestParamsMap := requestParams.ToMapAny() + + grpcRequest := &starknet.CallRequest{ + Method: "starknet_call", + Params: requestParamsMap, + } + + response, err := api.client.Call(ctx, grpcRequest) + if err != nil { + return nil, err + } + + var result []string + for _, v := range response.Result { + s := wrapperspb.String("") + v.UnmarshalTo(s) + result = append(result, s.GetValue()) + } + + return result, nil +} diff --git a/cmd/rpcdaemon22/commands/starknet_send_transaction.go b/cmd/rpcdaemon22/commands/starknet_send_transaction.go new file mode 100644 index 00000000000..7bb90ea3bf0 --- /dev/null +++ b/cmd/rpcdaemon22/commands/starknet_send_transaction.go @@ -0,0 +1,50 @@ +package commands + +import ( + "bytes" + "context" + "errors" + "fmt" + txPoolProto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/log/v3" +) + +var ( + ErrOnlyStarknetTx = errors.New("only support starknet transactions") + ErrOnlyContractDeploy = errors.New("only support contract creation") +) + +// SendRawTransaction deploy new cairo contract +func (api *StarknetImpl) SendRawTransaction(ctx context.Context, encodedTx hexutil.Bytes) (common.Hash, error) { + txn, err := types.DecodeTransaction(rlp.NewStream(bytes.NewReader(encodedTx), uint64(len(encodedTx)))) + + if err != nil { + return common.Hash{}, err + } + + if !txn.IsStarkNet() { + return common.Hash{}, ErrOnlyStarknetTx + } + + if !txn.IsContractDeploy() { + return common.Hash{}, ErrOnlyContractDeploy + } + + hash := txn.Hash() + res, err := api.txPool.Add(ctx, &txPoolProto.AddRequest{RlpTxs: [][]byte{encodedTx}}) + if err != nil { + return common.Hash{}, err + } + + if res.Imported[0] != txPoolProto.ImportResult_SUCCESS { + return hash, fmt.Errorf("%s: %s", txPoolProto.ImportResult_name[int32(res.Imported[0])], res.Errors[0]) + } + + log.Info("Submitted contract creation", "hash", txn.Hash().Hex(), "nonce", txn.GetNonce(), "value", txn.GetValue()) + + return txn.Hash(), nil +} diff --git a/cmd/rpcdaemon22/commands/starknet_send_transaction_test.go b/cmd/rpcdaemon22/commands/starknet_send_transaction_test.go new file mode 100644 index 00000000000..d62f6374ddd --- /dev/null +++ b/cmd/rpcdaemon22/commands/starknet_send_transaction_test.go @@ -0,0 +1,83 @@ +package commands_test + +import ( + "bytes" + "testing" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/gointerfaces/starknet" + "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/commands" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/erigon/turbo/stages" + "github.com/stretchr/testify/require" +) + +func TestErrorStarknetSendRawTransaction(t *testing.T) { + var cases = []struct { + name string + tx string + error error + }{ + {name: "wrong tx type", tx: generateDynamicFeeTransaction(), error: commands.ErrOnlyStarknetTx}, + {name: "not contract creation", tx: generateStarknetTransaction(), error: commands.ErrOnlyContractDeploy}, + } + + m, require := stages.MockWithTxPool(t), require.New(t) + ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) + txPool := txpool.NewTxpoolClient(conn) + starknetClient := starknet.NewCAIROVMClient(conn) + ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + + for _, tt := range cases { + api := commands.NewStarknetAPI(commands.NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), false), m.DB, starknetClient, txPool) + + t.Run(tt.name, func(t *testing.T) { + hex, _ := hexutil.Decode(tt.tx) + + _, err := api.SendRawTransaction(ctx, hex) + + require.ErrorIs(err, tt.error) + }) + } +} + +func generateDynamicFeeTransaction() string { + buf := bytes.NewBuffer(nil) + types.DynamicFeeTransaction{ + CommonTx: types.CommonTx{ + ChainID: new(uint256.Int), + Nonce: 1, + Value: uint256.NewInt(1), + Gas: 1, + }, + Tip: new(uint256.Int), + FeeCap: new(uint256.Int), + }.MarshalBinary(buf) + + return hexutil.Encode(buf.Bytes()) +} + +func generateStarknetTransaction() string { + buf := bytes.NewBuffer(nil) + types.StarknetTransaction{ + CommonTx: types.CommonTx{ + ChainID: new(uint256.Int), + Nonce: 1, + Value: uint256.NewInt(1), + Gas: 1, + To: &common.Address{}, + }, + Tip: new(uint256.Int), + FeeCap: new(uint256.Int), + }.MarshalBinary(buf) + + return hexutil.Encode(buf.Bytes()) +} diff --git a/cmd/rpcdaemon22/commands/storage_range.go b/cmd/rpcdaemon22/commands/storage_range.go new file mode 100644 index 00000000000..141be618bbc --- /dev/null +++ b/cmd/rpcdaemon22/commands/storage_range.go @@ -0,0 +1,42 @@ +package commands + +import ( + "fmt" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/state" +) + +// StorageRangeResult is the result of a debug_storageRangeAt API call. +type StorageRangeResult struct { + Storage StorageMap `json:"storage"` + NextKey *common.Hash `json:"nextKey"` // nil if Storage includes the last key in the trie. +} + +// StorageMap a map from storage locations to StorageEntry items +type StorageMap map[common.Hash]StorageEntry + +// StorageEntry an entry in storage of the account +type StorageEntry struct { + Key *common.Hash `json:"key"` + Value common.Hash `json:"value"` +} + +func StorageRangeAt(stateReader *state.PlainState, contractAddress common.Address, start []byte, maxResult int) (StorageRangeResult, error) { + result := StorageRangeResult{Storage: StorageMap{}} + resultCount := 0 + + if err := stateReader.ForEachStorage(contractAddress, common.BytesToHash(start), func(key, seckey common.Hash, value uint256.Int) bool { + if resultCount < maxResult { + result.Storage[seckey] = StorageEntry{Key: &key, Value: value.Bytes32()} + } else { + result.NextKey = &key + } + resultCount++ + return resultCount <= maxResult + }, maxResult+1); err != nil { + return StorageRangeResult{}, fmt.Errorf("error walking over storage: %w", err) + } + return result, nil +} diff --git a/cmd/rpcdaemon22/commands/trace_adhoc.go b/cmd/rpcdaemon22/commands/trace_adhoc.go new file mode 100644 index 00000000000..5426b869530 --- /dev/null +++ b/cmd/rpcdaemon22/commands/trace_adhoc.go @@ -0,0 +1,1224 @@ +package commands + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "math" + "math/big" + "strings" + "time" + + "github.com/holiman/uint256" + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + math2 "github.com/ledgerwatch/erigon/common/math" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/types/accounts" + "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/shards" + "github.com/ledgerwatch/erigon/turbo/transactions" + "github.com/ledgerwatch/log/v3" +) + +const callTimeout = 5 * time.Minute + +const ( + CALL = "call" + CALLCODE = "callcode" + DELEGATECALL = "delegatecall" + STATICCALL = "staticcall" + CREATE = "create" + SUICIDE = "suicide" + REWARD = "reward" + TraceTypeTrace = "trace" + TraceTypeStateDiff = "stateDiff" + TraceTypeVmTrace = "vmTrace" +) + +// TraceCallParam (see SendTxArgs -- this allows optional prams plus don't use MixedcaseAddress +type TraceCallParam struct { + From *common.Address `json:"from"` + To *common.Address `json:"to"` + Gas *hexutil.Uint64 `json:"gas"` + GasPrice *hexutil.Big `json:"gasPrice"` + MaxPriorityFeePerGas *hexutil.Big `json:"maxPriorityFeePerGas"` + MaxFeePerGas *hexutil.Big `json:"maxFeePerGas"` + Value *hexutil.Big `json:"value"` + Data hexutil.Bytes `json:"data"` + AccessList *types.AccessList `json:"accessList"` + txHash *common.Hash + traceTypes []string +} + +// TraceCallResult is the response to `trace_call` method +type TraceCallResult struct { + Output hexutil.Bytes `json:"output"` + StateDiff map[common.Address]*StateDiffAccount `json:"stateDiff"` + Trace []*ParityTrace `json:"trace"` + VmTrace *VmTrace `json:"vmTrace"` + TransactionHash *common.Hash `json:"transactionHash,omitempty"` +} + +// StateDiffAccount is the part of `trace_call` response that is under "stateDiff" tag +type StateDiffAccount struct { + Balance interface{} `json:"balance"` // Can be either string "=" or mapping "*" => {"from": "hex", "to": "hex"} + Code interface{} `json:"code"` + Nonce interface{} `json:"nonce"` + Storage map[common.Hash]map[string]interface{} `json:"storage"` +} + +type StateDiffBalance struct { + From *hexutil.Big `json:"from"` + To *hexutil.Big `json:"to"` +} + +type StateDiffCode struct { + From hexutil.Bytes `json:"from"` + To hexutil.Bytes `json:"to"` +} + +type StateDiffNonce struct { + From hexutil.Uint64 `json:"from"` + To hexutil.Uint64 `json:"to"` +} + +type StateDiffStorage struct { + From common.Hash `json:"from"` + To common.Hash `json:"to"` +} + +// VmTrace is the part of `trace_call` response that is under "vmTrace" tag +type VmTrace struct { + Code hexutil.Bytes `json:"code"` + Ops []*VmTraceOp `json:"ops"` +} + +// VmTraceOp is one element of the vmTrace ops trace +type VmTraceOp struct { + Cost int `json:"cost"` + Ex *VmTraceEx `json:"ex"` + Pc int `json:"pc"` + Sub *VmTrace `json:"sub"` + Op string `json:"op,omitempty"` + Idx string `json:"idx,omitempty"` +} + +type VmTraceEx struct { + Mem *VmTraceMem `json:"mem"` + Push []string `json:"push"` + Store *VmTraceStore `json:"store"` + Used int `json:"used"` +} + +type VmTraceMem struct { + Data string `json:"data"` + Off int `json:"off"` +} + +type VmTraceStore struct { + Key string `json:"key"` + Val string `json:"val"` +} + +// ToMessage converts CallArgs to the Message type used by the core evm +func (args *TraceCallParam) ToMessage(globalGasCap uint64, baseFee *uint256.Int) (types.Message, error) { + // Set sender address or use zero address if none specified. + var addr common.Address + if args.From != nil { + addr = *args.From + } + + // Set default gas & gas price if none were set + gas := globalGasCap + if gas == 0 { + gas = uint64(math.MaxUint64 / 2) + } + if args.Gas != nil { + gas = uint64(*args.Gas) + } + if globalGasCap != 0 && globalGasCap < gas { + log.Warn("Caller gas above allowance, capping", "requested", gas, "cap", globalGasCap) + gas = globalGasCap + } + var ( + gasPrice *uint256.Int + gasFeeCap *uint256.Int + gasTipCap *uint256.Int + ) + if baseFee == nil { + // If there's no basefee, then it must be a non-1559 execution + gasPrice = new(uint256.Int) + if args.GasPrice != nil { + overflow := gasPrice.SetFromBig(args.GasPrice.ToInt()) + if overflow { + return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") + } + } + gasFeeCap, gasTipCap = gasPrice, gasPrice + } else { + // A basefee is provided, necessitating 1559-type execution + if args.GasPrice != nil { + var overflow bool + // User specified the legacy gas field, convert to 1559 gas typing + gasPrice, overflow = uint256.FromBig(args.GasPrice.ToInt()) + if overflow { + return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") + } + gasFeeCap, gasTipCap = gasPrice, gasPrice + } else { + // User specified 1559 gas feilds (or none), use those + gasFeeCap = new(uint256.Int) + if args.MaxFeePerGas != nil { + overflow := gasFeeCap.SetFromBig(args.MaxFeePerGas.ToInt()) + if overflow { + return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") + } + } + gasTipCap = new(uint256.Int) + if args.MaxPriorityFeePerGas != nil { + overflow := gasTipCap.SetFromBig(args.MaxPriorityFeePerGas.ToInt()) + if overflow { + return types.Message{}, fmt.Errorf("args.GasPrice higher than 2^256-1") + } + } + // Backfill the legacy gasPrice for EVM execution, unless we're all zeroes + gasPrice = new(uint256.Int) + if !gasFeeCap.IsZero() || !gasTipCap.IsZero() { + gasPrice = math2.U256Min(new(uint256.Int).Add(gasTipCap, baseFee), gasFeeCap) + } else { + // This means gasFeeCap == 0, gasTipCap == 0 + gasPrice.Set(baseFee) + gasFeeCap, gasTipCap = gasPrice, gasPrice + } + } + } + value := new(uint256.Int) + if args.Value != nil { + overflow := value.SetFromBig(args.Value.ToInt()) + if overflow { + return types.Message{}, fmt.Errorf("args.Value higher than 2^256-1") + } + } + var data []byte + if args.Data != nil { + data = args.Data + } + var accessList types.AccessList + if args.AccessList != nil { + accessList = *args.AccessList + } + msg := types.NewMessage(addr, args.To, 0, value, gas, gasPrice, gasFeeCap, gasTipCap, data, accessList, false /* checkNonce */) + return msg, nil +} + +// OpenEthereum-style tracer +type OeTracer struct { + r *TraceCallResult + traceAddr []int + traceStack []*ParityTrace + precompile bool // Whether the last CaptureStart was called with `precompile = true` + compat bool // Bug for bug compatibility mode + lastVmOp *VmTraceOp + lastOp vm.OpCode + lastMemOff uint64 + lastMemLen uint64 + memOffStack []uint64 + memLenStack []uint64 + lastOffStack *VmTraceOp + vmOpStack []*VmTraceOp // Stack of vmTrace operations as call depth increases + idx []string // Prefix for the "idx" inside operations, for easier navigation +} + +func (ot *OeTracer) CaptureStart(env *vm.EVM, depth int, from common.Address, to common.Address, precompile bool, create bool, calltype vm.CallType, input []byte, gas uint64, value *big.Int, code []byte) { + //fmt.Printf("CaptureStart depth %d, from %x, to %x, create %t, input %x, gas %d, value %d, precompile %t\n", depth, from, to, create, input, gas, value, precompile) + if ot.r.VmTrace != nil { + var vmTrace *VmTrace + if depth > 0 { + var vmT *VmTrace + if len(ot.vmOpStack) > 0 { + vmT = ot.vmOpStack[len(ot.vmOpStack)-1].Sub + } else { + vmT = ot.r.VmTrace + } + if !ot.compat { + ot.idx = append(ot.idx, fmt.Sprintf("%d-", len(vmT.Ops)-1)) + } + } + if ot.lastVmOp != nil { + vmTrace = &VmTrace{Ops: []*VmTraceOp{}} + ot.lastVmOp.Sub = vmTrace + ot.vmOpStack = append(ot.vmOpStack, ot.lastVmOp) + } else { + vmTrace = ot.r.VmTrace + } + if create { + vmTrace.Code = common.CopyBytes(input) + if ot.lastVmOp != nil { + ot.lastVmOp.Cost += int(gas) + } + } else { + vmTrace.Code = code + } + } + if precompile && depth > 0 && value.Sign() <= 0 { + ot.precompile = true + return + } + if gas > 500000000 { + gas = 500000001 - (0x8000000000000000 - gas) + } + trace := &ParityTrace{} + if create { + trResult := &CreateTraceResult{} + trace.Type = CREATE + trResult.Address = new(common.Address) + copy(trResult.Address[:], to.Bytes()) + trace.Result = trResult + } else { + trace.Result = &TraceResult{} + trace.Type = CALL + } + if depth > 0 { + topTrace := ot.traceStack[len(ot.traceStack)-1] + traceIdx := topTrace.Subtraces + ot.traceAddr = append(ot.traceAddr, traceIdx) + topTrace.Subtraces++ + if calltype == vm.DELEGATECALLT { + switch action := topTrace.Action.(type) { + case *CreateTraceAction: + value = action.Value.ToInt() + case *CallTraceAction: + value = action.Value.ToInt() + } + } + if calltype == vm.STATICCALLT { + value = big.NewInt(0) + } + } + trace.TraceAddress = make([]int, len(ot.traceAddr)) + copy(trace.TraceAddress, ot.traceAddr) + if create { + action := CreateTraceAction{} + action.From = from + action.Gas.ToInt().SetUint64(gas) + action.Init = common.CopyBytes(input) + action.Value.ToInt().Set(value) + trace.Action = &action + } else { + action := CallTraceAction{} + switch calltype { + case vm.CALLT: + action.CallType = CALL + case vm.CALLCODET: + action.CallType = CALLCODE + case vm.DELEGATECALLT: + action.CallType = DELEGATECALL + case vm.STATICCALLT: + action.CallType = STATICCALL + } + action.From = from + action.To = to + action.Gas.ToInt().SetUint64(gas) + action.Input = common.CopyBytes(input) + action.Value.ToInt().Set(value) + trace.Action = &action + } + ot.r.Trace = append(ot.r.Trace, trace) + ot.traceStack = append(ot.traceStack, trace) +} + +func (ot *OeTracer) CaptureEnd(depth int, output []byte, startGas, endGas uint64, t time.Duration, err error) { + if ot.r.VmTrace != nil { + if len(ot.vmOpStack) > 0 { + ot.lastOffStack = ot.vmOpStack[len(ot.vmOpStack)-1] + ot.vmOpStack = ot.vmOpStack[:len(ot.vmOpStack)-1] + } + if !ot.compat && depth > 0 { + ot.idx = ot.idx[:len(ot.idx)-1] + } + if depth > 0 { + ot.lastMemOff = ot.memOffStack[len(ot.memOffStack)-1] + ot.memOffStack = ot.memOffStack[:len(ot.memOffStack)-1] + ot.lastMemLen = ot.memLenStack[len(ot.memLenStack)-1] + ot.memLenStack = ot.memLenStack[:len(ot.memLenStack)-1] + } + } + if ot.precompile { + ot.precompile = false + return + } + if depth == 0 { + ot.r.Output = common.CopyBytes(output) + } + ignoreError := false + topTrace := ot.traceStack[len(ot.traceStack)-1] + if ot.compat { + ignoreError = depth == 0 && topTrace.Type == CREATE + } + if err != nil && !ignoreError { + switch err { + case vm.ErrInvalidJump: + topTrace.Error = "Bad jump destination" + case vm.ErrContractAddressCollision, vm.ErrCodeStoreOutOfGas, vm.ErrOutOfGas, vm.ErrGasUintOverflow: + topTrace.Error = "Out of gas" + case vm.ErrExecutionReverted: + topTrace.Error = "Reverted" + case vm.ErrWriteProtection: + topTrace.Error = "Mutable Call In Static Context" + default: + switch err.(type) { + case *vm.ErrStackUnderflow: + topTrace.Error = "Stack underflow" + case *vm.ErrInvalidOpCode: + topTrace.Error = "Bad instruction" + default: + topTrace.Error = err.Error() + } + } + topTrace.Result = nil + } else { + if len(output) > 0 { + switch topTrace.Type { + case CALL: + topTrace.Result.(*TraceResult).Output = common.CopyBytes(output) + case CREATE: + topTrace.Result.(*CreateTraceResult).Code = common.CopyBytes(output) + } + } + switch topTrace.Type { + case CALL: + topTrace.Result.(*TraceResult).GasUsed = new(hexutil.Big) + topTrace.Result.(*TraceResult).GasUsed.ToInt().SetUint64(startGas - endGas) + case CREATE: + topTrace.Result.(*CreateTraceResult).GasUsed = new(hexutil.Big) + topTrace.Result.(*CreateTraceResult).GasUsed.ToInt().SetUint64(startGas - endGas) + } + } + ot.traceStack = ot.traceStack[:len(ot.traceStack)-1] + if depth > 0 { + ot.traceAddr = ot.traceAddr[:len(ot.traceAddr)-1] + } +} + +func (ot *OeTracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, opDepth int, err error) { + memory := scope.Memory + st := scope.Stack + + if ot.r.VmTrace != nil { + var vmTrace *VmTrace + if len(ot.vmOpStack) > 0 { + vmTrace = ot.vmOpStack[len(ot.vmOpStack)-1].Sub + } else { + vmTrace = ot.r.VmTrace + } + if ot.lastVmOp != nil && ot.lastVmOp.Ex != nil { + // Set the "push" of the last operation + var showStack int + switch { + case ot.lastOp >= vm.PUSH1 && ot.lastOp <= vm.PUSH32: + showStack = 1 + case ot.lastOp >= vm.SWAP1 && ot.lastOp <= vm.SWAP16: + showStack = int(ot.lastOp-vm.SWAP1) + 2 + case ot.lastOp >= vm.DUP1 && ot.lastOp <= vm.DUP16: + showStack = int(ot.lastOp-vm.DUP1) + 2 + } + switch ot.lastOp { + case vm.CALLDATALOAD, vm.SLOAD, vm.MLOAD, vm.CALLDATASIZE, vm.LT, vm.GT, vm.DIV, vm.SDIV, vm.SAR, vm.AND, vm.EQ, vm.CALLVALUE, vm.ISZERO, + vm.ADD, vm.EXP, vm.CALLER, vm.SHA3, vm.SUB, vm.ADDRESS, vm.GAS, vm.MUL, vm.RETURNDATASIZE, vm.NOT, vm.SHR, vm.SHL, + vm.EXTCODESIZE, vm.SLT, vm.OR, vm.NUMBER, vm.PC, vm.TIMESTAMP, vm.BALANCE, vm.SELFBALANCE, vm.MULMOD, vm.ADDMOD, vm.BASEFEE, + vm.BLOCKHASH, vm.BYTE, vm.XOR, vm.ORIGIN, vm.CODESIZE, vm.MOD, vm.SIGNEXTEND, vm.GASLIMIT, vm.DIFFICULTY, vm.SGT, vm.GASPRICE, + vm.MSIZE, vm.EXTCODEHASH: + showStack = 1 + } + for i := showStack - 1; i >= 0; i-- { + ot.lastVmOp.Ex.Push = append(ot.lastVmOp.Ex.Push, st.Back(i).String()) + } + // Set the "mem" of the last operation + var setMem bool + switch ot.lastOp { + case vm.MSTORE, vm.MSTORE8, vm.MLOAD, vm.RETURNDATACOPY, vm.CALLDATACOPY, vm.CODECOPY: + setMem = true + } + if setMem && ot.lastMemLen > 0 { + cpy := memory.GetCopy(ot.lastMemOff, ot.lastMemLen) + if len(cpy) == 0 { + cpy = make([]byte, ot.lastMemLen) + } + ot.lastVmOp.Ex.Mem = &VmTraceMem{Data: fmt.Sprintf("0x%0x", cpy), Off: int(ot.lastMemOff)} + } + } + if ot.lastOffStack != nil { + ot.lastOffStack.Ex.Used = int(gas) + ot.lastOffStack.Ex.Push = []string{st.Back(0).String()} + if ot.lastMemLen > 0 && memory != nil { + cpy := memory.GetCopy(ot.lastMemOff, ot.lastMemLen) + if len(cpy) == 0 { + cpy = make([]byte, ot.lastMemLen) + } + ot.lastOffStack.Ex.Mem = &VmTraceMem{Data: fmt.Sprintf("0x%0x", cpy), Off: int(ot.lastMemOff)} + } + ot.lastOffStack = nil + } + if ot.lastOp == vm.STOP && op == vm.STOP && len(ot.vmOpStack) == 0 { + // Looks like OE is "optimising away" the second STOP + return + } + ot.lastVmOp = &VmTraceOp{Ex: &VmTraceEx{}} + vmTrace.Ops = append(vmTrace.Ops, ot.lastVmOp) + if !ot.compat { + var sb strings.Builder + for _, idx := range ot.idx { + sb.WriteString(idx) + } + ot.lastVmOp.Idx = fmt.Sprintf("%s%d", sb.String(), len(vmTrace.Ops)-1) + } + ot.lastOp = op + ot.lastVmOp.Cost = int(cost) + ot.lastVmOp.Pc = int(pc) + ot.lastVmOp.Ex.Push = []string{} + ot.lastVmOp.Ex.Used = int(gas) - int(cost) + if !ot.compat { + ot.lastVmOp.Op = op.String() + } + switch op { + case vm.MSTORE, vm.MLOAD: + ot.lastMemOff = st.Back(0).Uint64() + ot.lastMemLen = 32 + case vm.MSTORE8: + ot.lastMemOff = st.Back(0).Uint64() + ot.lastMemLen = 1 + case vm.RETURNDATACOPY, vm.CALLDATACOPY, vm.CODECOPY: + ot.lastMemOff = st.Back(0).Uint64() + ot.lastMemLen = st.Back(2).Uint64() + case vm.STATICCALL, vm.DELEGATECALL: + ot.memOffStack = append(ot.memOffStack, st.Back(4).Uint64()) + ot.memLenStack = append(ot.memLenStack, st.Back(5).Uint64()) + case vm.CALL, vm.CALLCODE: + ot.memOffStack = append(ot.memOffStack, st.Back(5).Uint64()) + ot.memLenStack = append(ot.memLenStack, st.Back(6).Uint64()) + case vm.CREATE, vm.CREATE2: + // Effectively disable memory output + ot.memOffStack = append(ot.memOffStack, 0) + ot.memLenStack = append(ot.memLenStack, 0) + case vm.SSTORE: + ot.lastVmOp.Ex.Store = &VmTraceStore{Key: st.Back(0).String(), Val: st.Back(1).String()} + } + if ot.lastVmOp.Ex.Used < 0 { + ot.lastVmOp.Ex = nil + } + } +} + +func (ot *OeTracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, opDepth int, err error) { +} + +func (ot *OeTracer) CaptureSelfDestruct(from common.Address, to common.Address, value *big.Int) { + trace := &ParityTrace{} + trace.Type = SUICIDE + action := &SuicideTraceAction{} + action.Address = from + action.RefundAddress = to + action.Balance.ToInt().Set(value) + trace.Action = action + topTrace := ot.traceStack[len(ot.traceStack)-1] + traceIdx := topTrace.Subtraces + ot.traceAddr = append(ot.traceAddr, traceIdx) + topTrace.Subtraces++ + trace.TraceAddress = make([]int, len(ot.traceAddr)) + copy(trace.TraceAddress, ot.traceAddr) + ot.traceAddr = ot.traceAddr[:len(ot.traceAddr)-1] + ot.r.Trace = append(ot.r.Trace, trace) +} + +func (ot *OeTracer) CaptureAccountRead(account common.Address) error { + return nil +} +func (ot *OeTracer) CaptureAccountWrite(account common.Address) error { + return nil +} + +// Implements core/state/StateWriter to provide state diffs +type StateDiff struct { + sdMap map[common.Address]*StateDiffAccount +} + +func (sd *StateDiff) UpdateAccountData(address common.Address, original, account *accounts.Account) error { + if _, ok := sd.sdMap[address]; !ok { + sd.sdMap[address] = &StateDiffAccount{Storage: make(map[common.Hash]map[string]interface{})} + } + return nil +} + +func (sd *StateDiff) UpdateAccountCode(address common.Address, incarnation uint64, codeHash common.Hash, code []byte) error { + if _, ok := sd.sdMap[address]; !ok { + sd.sdMap[address] = &StateDiffAccount{Storage: make(map[common.Hash]map[string]interface{})} + } + return nil +} + +func (sd *StateDiff) DeleteAccount(address common.Address, original *accounts.Account) error { + if _, ok := sd.sdMap[address]; !ok { + sd.sdMap[address] = &StateDiffAccount{Storage: make(map[common.Hash]map[string]interface{})} + } + return nil +} + +func (sd *StateDiff) WriteAccountStorage(address common.Address, incarnation uint64, key *common.Hash, original, value *uint256.Int) error { + if *original == *value { + return nil + } + accountDiff := sd.sdMap[address] + if accountDiff == nil { + accountDiff = &StateDiffAccount{Storage: make(map[common.Hash]map[string]interface{})} + sd.sdMap[address] = accountDiff + } + m := make(map[string]interface{}) + m["*"] = &StateDiffStorage{From: common.BytesToHash(original.Bytes()), To: common.BytesToHash(value.Bytes())} + accountDiff.Storage[*key] = m + return nil +} + +func (sd *StateDiff) CreateContract(address common.Address) error { + if _, ok := sd.sdMap[address]; !ok { + sd.sdMap[address] = &StateDiffAccount{Storage: make(map[common.Hash]map[string]interface{})} + } + return nil +} + +// CompareStates uses the addresses accumulated in the sdMap and compares balances, nonces, and codes of the accounts, and fills the rest of the sdMap +func (sd *StateDiff) CompareStates(initialIbs, ibs *state.IntraBlockState) { + var toRemove []common.Address + for addr, accountDiff := range sd.sdMap { + initialExist := initialIbs.Exist(addr) + exist := ibs.Exist(addr) + if initialExist { + if exist { + var allEqual = len(accountDiff.Storage) == 0 + fromBalance := initialIbs.GetBalance(addr).ToBig() + toBalance := ibs.GetBalance(addr).ToBig() + if fromBalance.Cmp(toBalance) == 0 { + accountDiff.Balance = "=" + } else { + m := make(map[string]*StateDiffBalance) + m["*"] = &StateDiffBalance{From: (*hexutil.Big)(fromBalance), To: (*hexutil.Big)(toBalance)} + accountDiff.Balance = m + allEqual = false + } + fromCode := initialIbs.GetCode(addr) + toCode := ibs.GetCode(addr) + if bytes.Equal(fromCode, toCode) { + accountDiff.Code = "=" + } else { + m := make(map[string]*StateDiffCode) + m["*"] = &StateDiffCode{From: fromCode, To: toCode} + accountDiff.Code = m + allEqual = false + } + fromNonce := initialIbs.GetNonce(addr) + toNonce := ibs.GetNonce(addr) + if fromNonce == toNonce { + accountDiff.Nonce = "=" + } else { + m := make(map[string]*StateDiffNonce) + m["*"] = &StateDiffNonce{From: hexutil.Uint64(fromNonce), To: hexutil.Uint64(toNonce)} + accountDiff.Nonce = m + allEqual = false + } + if allEqual { + toRemove = append(toRemove, addr) + } + } else { + { + m := make(map[string]*hexutil.Big) + m["-"] = (*hexutil.Big)(initialIbs.GetBalance(addr).ToBig()) + accountDiff.Balance = m + } + { + m := make(map[string]hexutil.Bytes) + m["-"] = initialIbs.GetCode(addr) + accountDiff.Code = m + } + { + m := make(map[string]hexutil.Uint64) + m["-"] = hexutil.Uint64(initialIbs.GetNonce(addr)) + accountDiff.Nonce = m + } + } + } else if exist { + { + m := make(map[string]*hexutil.Big) + m["+"] = (*hexutil.Big)(ibs.GetBalance(addr).ToBig()) + accountDiff.Balance = m + } + { + m := make(map[string]hexutil.Bytes) + m["+"] = ibs.GetCode(addr) + accountDiff.Code = m + } + { + m := make(map[string]hexutil.Uint64) + m["+"] = hexutil.Uint64(ibs.GetNonce(addr)) + accountDiff.Nonce = m + } + // Transform storage + for _, sm := range accountDiff.Storage { + str := sm["*"].(*StateDiffStorage) + delete(sm, "*") + sm["+"] = &str.To + } + } else { + toRemove = append(toRemove, addr) + } + } + for _, addr := range toRemove { + delete(sd.sdMap, addr) + } +} + +func (api *TraceAPIImpl) ReplayTransaction(ctx context.Context, txHash common.Hash, traceTypes []string) (*TraceCallResult, error) { + tx, err := api.kv.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + chainConfig, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + + blockNum, ok, err := api.txnLookup(ctx, tx, txHash) + if err != nil { + return nil, err + } + if !ok { + return nil, nil + } + block, err := api.blockByNumberWithSenders(tx, blockNum) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil + } + var txnIndex uint64 + for i, transaction := range block.Transactions() { + if transaction.Hash() == txHash { + txnIndex = uint64(i) + break + } + } + + bn := hexutil.Uint64(blockNum) + + parentNr := bn + if parentNr > 0 { + parentNr -= 1 + } + + // Returns an array of trace arrays, one trace array for each transaction + traces, err := api.callManyTransactions(ctx, tx, block.Transactions(), traceTypes, block.ParentHash(), rpc.BlockNumber(parentNr), block.Header(), int(txnIndex), types.MakeSigner(chainConfig, blockNum), chainConfig.Rules(blockNum)) + if err != nil { + return nil, err + } + + var traceTypeTrace, traceTypeStateDiff, traceTypeVmTrace bool + for _, traceType := range traceTypes { + switch traceType { + case TraceTypeTrace: + traceTypeTrace = true + case TraceTypeStateDiff: + traceTypeStateDiff = true + case TraceTypeVmTrace: + traceTypeVmTrace = true + default: + return nil, fmt.Errorf("unrecognized trace type: %s", traceType) + } + } + result := &TraceCallResult{} + + for txno, trace := range traces { + // We're only looking for a specific transaction + if txno == int(txnIndex) { + result.Output = trace.Output + if traceTypeTrace { + result.Trace = trace.Trace + } + if traceTypeStateDiff { + result.StateDiff = trace.StateDiff + } + if traceTypeVmTrace { + result.VmTrace = trace.VmTrace + } + + return trace, nil + } + } + return result, nil + +} + +func (api *TraceAPIImpl) ReplayBlockTransactions(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, traceTypes []string) ([]*TraceCallResult, error) { + tx, err := api.kv.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + chainConfig, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + + blockNumber, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + if err != nil { + return nil, err + } + + parentNr := blockNumber + if parentNr > 0 { + parentNr -= 1 + } + // Extract transactions from block + block, bErr := api.blockByNumberWithSenders(tx, blockNumber) + if bErr != nil { + return nil, bErr + } + if block == nil { + return nil, fmt.Errorf("could not find block %d", blockNumber) + } + var traceTypeTrace, traceTypeStateDiff, traceTypeVmTrace bool + for _, traceType := range traceTypes { + switch traceType { + case TraceTypeTrace: + traceTypeTrace = true + case TraceTypeStateDiff: + traceTypeStateDiff = true + case TraceTypeVmTrace: + traceTypeVmTrace = true + default: + return nil, fmt.Errorf("unrecognized trace type: %s", traceType) + } + } + + // Returns an array of trace arrays, one trace array for each transaction + traces, err := api.callManyTransactions(ctx, tx, block.Transactions(), traceTypes, block.ParentHash(), rpc.BlockNumber(parentNr), block.Header(), -1 /* all tx indices */, types.MakeSigner(chainConfig, blockNumber), chainConfig.Rules(blockNumber)) + if err != nil { + return nil, err + } + + result := make([]*TraceCallResult, len(traces)) + for i, trace := range traces { + tr := &TraceCallResult{} + tr.Output = trace.Output + if traceTypeTrace { + tr.Trace = trace.Trace + } else { + tr.Trace = []*ParityTrace{} + } + if traceTypeStateDiff { + tr.StateDiff = trace.StateDiff + } + if traceTypeVmTrace { + tr.VmTrace = trace.VmTrace + } + result[i] = tr + txhash := block.Transactions()[i].Hash() + tr.TransactionHash = &txhash + } + + return result, nil +} + +// Call implements trace_call. +func (api *TraceAPIImpl) Call(ctx context.Context, args TraceCallParam, traceTypes []string, blockNrOrHash *rpc.BlockNumberOrHash) (*TraceCallResult, error) { + tx, err := api.kv.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + chainConfig, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + + if blockNrOrHash == nil { + var num = rpc.LatestBlockNumber + blockNrOrHash = &rpc.BlockNumberOrHash{BlockNumber: &num} + } + + blockNumber, hash, latest, err := rpchelper.GetBlockNumber(*blockNrOrHash, tx, api.filters) + if err != nil { + return nil, err + } + var stateReader state.StateReader + if latest { + cacheView, err := api.stateCache.View(ctx, tx) + if err != nil { + return nil, err + } + stateReader = state.NewCachedReader2(cacheView, tx) + } else { + stateReader = state.NewPlainState(tx, blockNumber) + } + ibs := state.New(stateReader) + + block, err := api.blockWithSenders(tx, hash, blockNumber) + if err != nil { + return nil, err + } + if block == nil { + return nil, fmt.Errorf("block %d(%x) not found", blockNumber, hash) + } + header := block.Header() + + // Setup context so it may be cancelled the call has completed + // or, in case of unmetered gas, setup a context with a timeout. + var cancel context.CancelFunc + if callTimeout > 0 { + ctx, cancel = context.WithTimeout(ctx, callTimeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + + // Make sure the context is cancelled when the call has completed + // this makes sure resources are cleaned up. + defer cancel() + + traceResult := &TraceCallResult{Trace: []*ParityTrace{}} + var traceTypeTrace, traceTypeStateDiff, traceTypeVmTrace bool + for _, traceType := range traceTypes { + switch traceType { + case TraceTypeTrace: + traceTypeTrace = true + case TraceTypeStateDiff: + traceTypeStateDiff = true + case TraceTypeVmTrace: + traceTypeVmTrace = true + default: + return nil, fmt.Errorf("unrecognized trace type: %s", traceType) + } + } + if traceTypeVmTrace { + traceResult.VmTrace = &VmTrace{Ops: []*VmTraceOp{}} + } + var ot OeTracer + ot.compat = api.compatibility + if traceTypeTrace || traceTypeVmTrace { + ot.r = traceResult + ot.traceAddr = []int{} + } + + // Get a new instance of the EVM. + var baseFee *uint256.Int + if header != nil && header.BaseFee != nil { + var overflow bool + baseFee, overflow = uint256.FromBig(header.BaseFee) + if overflow { + return nil, fmt.Errorf("header.BaseFee uint256 overflow") + } + } + msg, err := args.ToMessage(api.gasCap, baseFee) + if err != nil { + return nil, err + } + + contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } + if api.TevmEnabled { + contractHasTEVM = ethdb.GetHasTEVM(tx) + } + blockCtx, txCtx := transactions.GetEvmContext(msg, header, blockNrOrHash.RequireCanonical, tx, contractHasTEVM) + blockCtx.GasLimit = math.MaxUint64 + blockCtx.MaxGasLimit = true + + evm := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vm.Config{Debug: traceTypeTrace, Tracer: &ot}) + + // Wait for the context to be done and cancel the evm. Even if the + // EVM has finished, cancelling may be done (repeatedly) + go func() { + <-ctx.Done() + evm.Cancel() + }() + + gp := new(core.GasPool).AddGas(msg.Gas()) + var execResult *core.ExecutionResult + ibs.Prepare(common.Hash{}, common.Hash{}, 0) + execResult, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, true /* gasBailout */) + if err != nil { + return nil, err + } + traceResult.Output = common.CopyBytes(execResult.ReturnData) + if traceTypeStateDiff { + sdMap := make(map[common.Address]*StateDiffAccount) + traceResult.StateDiff = sdMap + sd := &StateDiff{sdMap: sdMap} + if err = ibs.FinalizeTx(evm.ChainRules(), sd); err != nil { + return nil, err + } + // Create initial IntraBlockState, we will compare it with ibs (IntraBlockState after the transaction) + initialIbs := state.New(stateReader) + sd.CompareStates(initialIbs, ibs) + } + + // If the timer caused an abort, return an appropriate error message + if evm.Cancelled() { + return nil, fmt.Errorf("execution aborted (timeout = %v)", callTimeout) + } + + return traceResult, nil +} + +// CallMany implements trace_callMany. +func (api *TraceAPIImpl) CallMany(ctx context.Context, calls json.RawMessage, parentNrOrHash *rpc.BlockNumberOrHash) ([]*TraceCallResult, error) { + dbtx, err := api.kv.BeginRo(ctx) + if err != nil { + return nil, err + } + defer dbtx.Rollback() + + var callParams []TraceCallParam + dec := json.NewDecoder(bytes.NewReader(calls)) + tok, err := dec.Token() + if err != nil { + return nil, err + } + if tok != json.Delim('[') { + return nil, fmt.Errorf("expected array of [callparam, tracetypes]") + } + for dec.More() { + tok, err = dec.Token() + if err != nil { + return nil, err + } + if tok != json.Delim('[') { + return nil, fmt.Errorf("expected [callparam, tracetypes]") + } + callParams = append(callParams, TraceCallParam{}) + args := &callParams[len(callParams)-1] + if err = dec.Decode(args); err != nil { + return nil, err + } + if err = dec.Decode(&args.traceTypes); err != nil { + return nil, err + } + tok, err = dec.Token() + if err != nil { + return nil, err + } + if tok != json.Delim(']') { + return nil, fmt.Errorf("expected end of [callparam, tracetypes]") + } + } + tok, err = dec.Token() + if err != nil { + return nil, err + } + if tok != json.Delim(']') { + return nil, fmt.Errorf("expected end of array of [callparam, tracetypes]") + } + var baseFee *uint256.Int + if parentNrOrHash == nil { + var num = rpc.LatestBlockNumber + parentNrOrHash = &rpc.BlockNumberOrHash{BlockNumber: &num} + } + blockNumber, hash, _, err := rpchelper.GetBlockNumber(*parentNrOrHash, dbtx, api.filters) + if err != nil { + return nil, err + } + + // TODO: can read here only parent header + parentBlock, err := api.blockWithSenders(dbtx, hash, blockNumber) + if err != nil { + return nil, err + } + parentHeader := parentBlock.Header() + if parentHeader == nil { + return nil, fmt.Errorf("parent header %d(%x) not found", blockNumber, hash) + } + if parentHeader != nil && parentHeader.BaseFee != nil { + var overflow bool + baseFee, overflow = uint256.FromBig(parentHeader.BaseFee) + if overflow { + return nil, fmt.Errorf("header.BaseFee uint256 overflow") + } + } + msgs := make([]types.Message, len(callParams)) + for i, args := range callParams { + msgs[i], err = args.ToMessage(api.gasCap, baseFee) + if err != nil { + return nil, fmt.Errorf("convert callParam to msg: %w", err) + } + } + return api.doCallMany(ctx, dbtx, msgs, callParams, parentNrOrHash, nil, true /* gasBailout */, -1 /* all tx indices */) +} + +func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []types.Message, callParams []TraceCallParam, parentNrOrHash *rpc.BlockNumberOrHash, header *types.Header, + gasBailout bool, txIndexNeeded int) ([]*TraceCallResult, error) { + chainConfig, err := api.chainConfig(dbtx) + if err != nil { + return nil, err + } + + if parentNrOrHash == nil { + var num = rpc.LatestBlockNumber + parentNrOrHash = &rpc.BlockNumberOrHash{BlockNumber: &num} + } + blockNumber, hash, latest, err := rpchelper.GetBlockNumber(*parentNrOrHash, dbtx, api.filters) + if err != nil { + return nil, err + } + var stateReader state.StateReader + if latest { + cacheView, err := api.stateCache.View(ctx, dbtx) + if err != nil { + return nil, err + } + stateReader = state.NewCachedReader2(cacheView, dbtx) // this cache stays between RPC calls + } else { + stateReader = state.NewPlainState(dbtx, blockNumber+1) + } + stateCache := shards.NewStateCache(32, 0 /* no limit */) // this cache living only during current RPC call, but required to store state writes + cachedReader := state.NewCachedReader(stateReader, stateCache) + noop := state.NewNoopWriter() + cachedWriter := state.NewCachedWriter(noop, stateCache) + + // TODO: can read here only parent header + parentBlock, err := api.blockWithSenders(dbtx, hash, blockNumber) + if err != nil { + return nil, err + } + parentHeader := parentBlock.Header() + if parentHeader == nil { + return nil, fmt.Errorf("parent header %d(%x) not found", blockNumber, hash) + } + + // Setup context so it may be cancelled the call has completed + // or, in case of unmetered gas, setup a context with a timeout. + var cancel context.CancelFunc + if callTimeout > 0 { + ctx, cancel = context.WithTimeout(ctx, callTimeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + + // Make sure the context is cancelled when the call has completed + // this makes sure resources are cleaned up. + defer cancel() + results := []*TraceCallResult{} + + useParent := false + if header == nil { + header = parentHeader + useParent = true + } + + contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } + if api.TevmEnabled { + contractHasTEVM = ethdb.GetHasTEVM(dbtx) + } + + for txIndex, msg := range msgs { + if err := libcommon.Stopped(ctx.Done()); err != nil { + return nil, err + } + traceResult := &TraceCallResult{Trace: []*ParityTrace{}} + var traceTypeTrace, traceTypeStateDiff, traceTypeVmTrace bool + args := callParams[txIndex] + for _, traceType := range args.traceTypes { + switch traceType { + case TraceTypeTrace: + traceTypeTrace = true + case TraceTypeStateDiff: + traceTypeStateDiff = true + case TraceTypeVmTrace: + traceTypeVmTrace = true + default: + return nil, fmt.Errorf("unrecognized trace type: %s", traceType) + } + } + vmConfig := vm.Config{} + if (traceTypeTrace && (txIndexNeeded == -1 || txIndex == txIndexNeeded)) || traceTypeVmTrace { + var ot OeTracer + ot.compat = api.compatibility + ot.r = traceResult + ot.idx = []string{fmt.Sprintf("%d-", txIndex)} + if traceTypeTrace && (txIndexNeeded == -1 || txIndex == txIndexNeeded) { + ot.traceAddr = []int{} + } + if traceTypeVmTrace { + traceResult.VmTrace = &VmTrace{Ops: []*VmTraceOp{}} + } + vmConfig.Debug = true + vmConfig.Tracer = &ot + } + + // Get a new instance of the EVM. + blockCtx, txCtx := transactions.GetEvmContext(msg, header, parentNrOrHash.RequireCanonical, dbtx, contractHasTEVM) + if useParent { + blockCtx.GasLimit = math.MaxUint64 + blockCtx.MaxGasLimit = true + } + ibs := state.New(cachedReader) + // Create initial IntraBlockState, we will compare it with ibs (IntraBlockState after the transaction) + + evm := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vmConfig) + + gp := new(core.GasPool).AddGas(msg.Gas()) + var execResult *core.ExecutionResult + // Clone the state cache before applying the changes, clone is discarded + var cloneReader state.StateReader + if traceTypeStateDiff { + cloneCache := stateCache.Clone() + cloneReader = state.NewCachedReader(stateReader, cloneCache) + } + if args.txHash != nil { + ibs.Prepare(*args.txHash, header.Hash(), txIndex) + } else { + ibs.Prepare(common.Hash{}, header.Hash(), txIndex) + } + execResult, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, gasBailout /* gasBailout */) + if err != nil { + return nil, fmt.Errorf("first run for txIndex %d error: %w", txIndex, err) + } + traceResult.Output = common.CopyBytes(execResult.ReturnData) + if traceTypeStateDiff { + initialIbs := state.New(cloneReader) + sdMap := make(map[common.Address]*StateDiffAccount) + traceResult.StateDiff = sdMap + sd := &StateDiff{sdMap: sdMap} + if err = ibs.FinalizeTx(evm.ChainRules(), sd); err != nil { + return nil, err + } + sd.CompareStates(initialIbs, ibs) + if err = ibs.CommitBlock(evm.ChainRules(), cachedWriter); err != nil { + return nil, err + } + } else { + if err = ibs.FinalizeTx(evm.ChainRules(), noop); err != nil { + return nil, err + } + if err = ibs.CommitBlock(evm.ChainRules(), cachedWriter); err != nil { + return nil, err + } + } + if !traceTypeTrace { + traceResult.Trace = []*ParityTrace{} + } + results = append(results, traceResult) + } + return results, nil +} + +// RawTransaction implements trace_rawTransaction. +func (api *TraceAPIImpl) RawTransaction(ctx context.Context, txHash common.Hash, traceTypes []string) ([]interface{}, error) { + var stub []interface{} + return stub, fmt.Errorf(NotImplemented, "trace_rawTransaction") +} diff --git a/cmd/rpcdaemon22/commands/trace_adhoc_test.go b/cmd/rpcdaemon22/commands/trace_adhoc_test.go new file mode 100644 index 00000000000..84805dd8422 --- /dev/null +++ b/cmd/rpcdaemon22/commands/trace_adhoc_test.go @@ -0,0 +1,108 @@ +package commands + +import ( + "context" + "encoding/json" + "testing" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/cli/httpcfg" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/stretchr/testify/require" +) + +func TestEmptyQuery(t *testing.T) { + db := rpcdaemontest.CreateTestKV(t) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, &httpcfg.HttpCfg{}) + // Call GetTransactionReceipt for transaction which is not in the database + var latest = rpc.LatestBlockNumber + results, err := api.CallMany(context.Background(), json.RawMessage("[]"), &rpc.BlockNumberOrHash{BlockNumber: &latest}) + if err != nil { + t.Errorf("calling CallMany: %v", err) + } + if results == nil { + t.Errorf("expected empty array, got nil") + } + if len(results) > 0 { + t.Errorf("expected empty array, got %d elements", len(results)) + } +} +func TestCoinbaseBalance(t *testing.T) { + db := rpcdaemontest.CreateTestKV(t) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, &httpcfg.HttpCfg{}) + // Call GetTransactionReceipt for transaction which is not in the database + var latest = rpc.LatestBlockNumber + results, err := api.CallMany(context.Background(), json.RawMessage(` +[ + [{"from":"0x71562b71999873db5b286df957af199ec94617f7","to":"0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e","gas":"0x15f90","gasPrice":"0x4a817c800","value":"0x1"},["trace", "stateDiff"]], + [{"from":"0x71562b71999873db5b286df957af199ec94617f7","to":"0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e","gas":"0x15f90","gasPrice":"0x4a817c800","value":"0x1"},["trace", "stateDiff"]] +] +`), &rpc.BlockNumberOrHash{BlockNumber: &latest}) + if err != nil { + t.Errorf("calling CallMany: %v", err) + } + if results == nil { + t.Errorf("expected empty array, got nil") + } + if len(results) != 2 { + t.Errorf("expected array with 2 elements, got %d elements", len(results)) + } + // Expect balance increase of the coinbase (zero address) + if _, ok := results[1].StateDiff[common.Address{}]; !ok { + t.Errorf("expected balance increase for coinbase (zero address)") + } +} + +func TestReplayTransaction(t *testing.T) { + db := rpcdaemontest.CreateTestKV(t) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, &httpcfg.HttpCfg{}) + var txnHash common.Hash + if err := db.View(context.Background(), func(tx kv.Tx) error { + b, err := rawdb.ReadBlockByNumber(tx, 6) + if err != nil { + return err + } + txnHash = b.Transactions()[5].Hash() + return nil + }); err != nil { + t.Fatal(err) + } + + // Call GetTransactionReceipt for transaction which is not in the database + results, err := api.ReplayTransaction(context.Background(), txnHash, []string{"stateDiff"}) + if err != nil { + t.Errorf("calling ReplayTransaction: %v", err) + } + require.NotNil(t, results) + require.NotNil(t, results.StateDiff) + addrDiff := results.StateDiff[common.HexToAddress("0x0000000000000006000000000000000000000000")] + v := addrDiff.Balance.(map[string]*hexutil.Big)["+"].ToInt().Uint64() + require.Equal(t, uint64(1_000_000_000_000_000), v) +} + +func TestReplayBlockTransactions(t *testing.T) { + db := rpcdaemontest.CreateTestKV(t) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, &httpcfg.HttpCfg{}) + + // Call GetTransactionReceipt for transaction which is not in the database + n := rpc.BlockNumber(6) + results, err := api.ReplayBlockTransactions(context.Background(), rpc.BlockNumberOrHash{BlockNumber: &n}, []string{"stateDiff"}) + if err != nil { + t.Errorf("calling ReplayBlockTransactions: %v", err) + } + require.NotNil(t, results) + require.NotNil(t, results[0].StateDiff) + addrDiff := results[0].StateDiff[common.HexToAddress("0x0000000000000001000000000000000000000000")] + v := addrDiff.Balance.(map[string]*hexutil.Big)["+"].ToInt().Uint64() + require.Equal(t, uint64(1_000_000_000_000_000), v) +} diff --git a/cmd/rpcdaemon22/commands/trace_api.go b/cmd/rpcdaemon22/commands/trace_api.go new file mode 100644 index 00000000000..c8a34cc2576 --- /dev/null +++ b/cmd/rpcdaemon22/commands/trace_api.go @@ -0,0 +1,49 @@ +package commands + +import ( + "context" + "encoding/json" + + jsoniter "github.com/json-iterator/go" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/cli/httpcfg" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/rpc" +) + +// TraceAPI RPC interface into tracing API +type TraceAPI interface { + // Ad-hoc (see ./trace_adhoc.go) + ReplayBlockTransactions(ctx context.Context, blockNr rpc.BlockNumberOrHash, traceTypes []string) ([]*TraceCallResult, error) + ReplayTransaction(ctx context.Context, txHash common.Hash, traceTypes []string) (*TraceCallResult, error) + Call(ctx context.Context, call TraceCallParam, types []string, blockNr *rpc.BlockNumberOrHash) (*TraceCallResult, error) + CallMany(ctx context.Context, calls json.RawMessage, blockNr *rpc.BlockNumberOrHash) ([]*TraceCallResult, error) + RawTransaction(ctx context.Context, txHash common.Hash, traceTypes []string) ([]interface{}, error) + + // Filtering (see ./trace_filtering.go) + Transaction(ctx context.Context, txHash common.Hash) (ParityTraces, error) + Get(ctx context.Context, txHash common.Hash, txIndicies []hexutil.Uint64) (*ParityTrace, error) + Block(ctx context.Context, blockNr rpc.BlockNumber) (ParityTraces, error) + Filter(ctx context.Context, req TraceFilterRequest, stream *jsoniter.Stream) error +} + +// TraceAPIImpl is implementation of the TraceAPI interface based on remote Db access +type TraceAPIImpl struct { + *BaseAPI + kv kv.RoDB + maxTraces uint64 + gasCap uint64 + compatibility bool // Bug for bug compatiblity with OpenEthereum +} + +// NewTraceAPI returns NewTraceAPI instance +func NewTraceAPI(base *BaseAPI, kv kv.RoDB, cfg *httpcfg.HttpCfg) *TraceAPIImpl { + return &TraceAPIImpl{ + BaseAPI: base, + kv: kv, + maxTraces: cfg.MaxTraces, + gasCap: cfg.Gascap, + compatibility: cfg.TraceCompatibility, + } +} diff --git a/cmd/rpcdaemon22/commands/trace_filtering.go b/cmd/rpcdaemon22/commands/trace_filtering.go new file mode 100644 index 00000000000..fdae1610393 --- /dev/null +++ b/cmd/rpcdaemon22/commands/trace_filtering.go @@ -0,0 +1,520 @@ +package commands + +import ( + "context" + "errors" + "fmt" + + "github.com/RoaringBitmap/roaring/roaring64" + jsoniter "github.com/json-iterator/go" + "github.com/ledgerwatch/erigon-lib/kv" + + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/consensus/ethash" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/ethdb/bitmapdb" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/rpc" +) + +// Transaction implements trace_transaction +func (api *TraceAPIImpl) Transaction(ctx context.Context, txHash common.Hash) (ParityTraces, error) { + tx, err := api.kv.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + chainConfig, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + + blockNumber, ok, err := api.txnLookup(ctx, tx, txHash) + if err != nil { + return nil, err + } + if !ok { + return nil, nil + } + block, err := api.blockByNumberWithSenders(tx, blockNumber) + if err != nil { + return nil, err + } + if block == nil { + return nil, nil + } + + // Extract transactions from block + block, bErr := api.blockByNumberWithSenders(tx, blockNumber) + if bErr != nil { + return nil, bErr + } + if block == nil { + return nil, fmt.Errorf("could not find block %d", blockNumber) + } + var txIndex int + for idx, txn := range block.Transactions() { + if txn.Hash() == txHash { + txIndex = idx + break + } + } + bn := hexutil.Uint64(blockNumber) + + parentNr := bn + if parentNr > 0 { + parentNr -= 1 + } + hash := block.Hash() + + // Returns an array of trace arrays, one trace array for each transaction + traces, err := api.callManyTransactions(ctx, tx, block.Transactions(), []string{TraceTypeTrace}, block.ParentHash(), rpc.BlockNumber(parentNr), block.Header(), txIndex, types.MakeSigner(chainConfig, blockNumber), chainConfig.Rules(blockNumber)) + if err != nil { + return nil, err + } + + out := make([]ParityTrace, 0, len(traces)) + blockno := uint64(bn) + for txno, trace := range traces { + txhash := block.Transactions()[txno].Hash() + // We're only looking for a specific transaction + if txno == txIndex { + for _, pt := range trace.Trace { + pt.BlockHash = &hash + pt.BlockNumber = &blockno + pt.TransactionHash = &txhash + txpos := uint64(txno) + pt.TransactionPosition = &txpos + out = append(out, *pt) + } + } + } + + return out, err +} + +// Get implements trace_get +func (api *TraceAPIImpl) Get(ctx context.Context, txHash common.Hash, indicies []hexutil.Uint64) (*ParityTrace, error) { + // Parity fails if it gets more than a single index. It returns nothing in this case. Must we? + if len(indicies) > 1 { + return nil, nil + } + + traces, err := api.Transaction(ctx, txHash) + if err != nil { + return nil, err + } + + // 'trace_get' index starts at one (oddly) + firstIndex := int(indicies[0]) + 1 + for i, trace := range traces { + if i == firstIndex { + return &trace, nil + } + } + return nil, err +} + +// Block implements trace_block +func (api *TraceAPIImpl) Block(ctx context.Context, blockNr rpc.BlockNumber) (ParityTraces, error) { + tx, err := api.kv.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + blockNum, err := getBlockNumber(blockNr, tx) + if err != nil { + return nil, err + } + if blockNum == 0 { + return []ParityTrace{}, nil + } + bn := hexutil.Uint64(blockNum) + + // Extract transactions from block + block, bErr := api.blockByNumberWithSenders(tx, blockNum) + if bErr != nil { + return nil, bErr + } + if block == nil { + return nil, fmt.Errorf("could not find block %d", uint64(bn)) + } + hash := block.Hash() + + parentNr := bn + if parentNr > 0 { + parentNr -= 1 + } + + chainConfig, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + traces, err := api.callManyTransactions(ctx, tx, block.Transactions(), []string{TraceTypeTrace}, block.ParentHash(), rpc.BlockNumber(parentNr), block.Header(), -1 /* all tx indices */, types.MakeSigner(chainConfig, blockNum), chainConfig.Rules(blockNum)) + if err != nil { + return nil, err + } + + out := make([]ParityTrace, 0, len(traces)) + blockno := uint64(bn) + for txno, trace := range traces { + txhash := block.Transactions()[txno].Hash() + txpos := uint64(txno) + for _, pt := range trace.Trace { + pt.BlockHash = &hash + pt.BlockNumber = &blockno + pt.TransactionHash = &txhash + pt.TransactionPosition = &txpos + out = append(out, *pt) + } + } + minerReward, uncleRewards := ethash.AccumulateRewards(chainConfig, block.Header(), block.Uncles()) + var tr ParityTrace + var rewardAction = &RewardTraceAction{} + rewardAction.Author = block.Coinbase() + rewardAction.RewardType = "block" // nolint: goconst + rewardAction.Value.ToInt().Set(minerReward.ToBig()) + tr.Action = rewardAction + tr.BlockHash = &common.Hash{} + copy(tr.BlockHash[:], block.Hash().Bytes()) + tr.BlockNumber = new(uint64) + *tr.BlockNumber = block.NumberU64() + tr.Type = "reward" // nolint: goconst + tr.TraceAddress = []int{} + out = append(out, tr) + for i, uncle := range block.Uncles() { + if i < len(uncleRewards) { + var tr ParityTrace + rewardAction = &RewardTraceAction{} + rewardAction.Author = uncle.Coinbase + rewardAction.RewardType = "uncle" // nolint: goconst + rewardAction.Value.ToInt().Set(uncleRewards[i].ToBig()) + tr.Action = rewardAction + tr.BlockHash = &common.Hash{} + copy(tr.BlockHash[:], block.Hash().Bytes()) + tr.BlockNumber = new(uint64) + *tr.BlockNumber = block.NumberU64() + tr.Type = "reward" // nolint: goconst + tr.TraceAddress = []int{} + out = append(out, tr) + } + } + + return out, err +} + +// Filter implements trace_filter +// NOTE: We do not store full traces - we just store index for each address +// Pull blocks which have txs with matching address +func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, stream *jsoniter.Stream) error { + dbtx, err1 := api.kv.BeginRo(ctx) + if err1 != nil { + stream.WriteNil() + return fmt.Errorf("traceFilter cannot open tx: %w", err1) + } + defer dbtx.Rollback() + + var fromBlock uint64 + var toBlock uint64 + if req.FromBlock == nil { + fromBlock = 0 + } else { + fromBlock = uint64(*req.FromBlock) + } + + if req.ToBlock == nil { + headNumber := rawdb.ReadHeaderNumber(dbtx, rawdb.ReadHeadHeaderHash(dbtx)) + toBlock = *headNumber + } else { + toBlock = uint64(*req.ToBlock) + } + + if fromBlock > toBlock { + stream.WriteNil() + return fmt.Errorf("invalid parameters: fromBlock cannot be greater than toBlock") + } + + fromAddresses := make(map[common.Address]struct{}, len(req.FromAddress)) + toAddresses := make(map[common.Address]struct{}, len(req.ToAddress)) + + var ( + allBlocks roaring64.Bitmap + blocksTo roaring64.Bitmap + ) + + for _, addr := range req.FromAddress { + if addr != nil { + b, err := bitmapdb.Get64(dbtx, kv.CallFromIndex, addr.Bytes(), fromBlock, toBlock) + if err != nil { + if errors.Is(err, ethdb.ErrKeyNotFound) { + continue + } + stream.WriteNil() + return err + } + allBlocks.Or(b) + fromAddresses[*addr] = struct{}{} + } + } + + for _, addr := range req.ToAddress { + if addr != nil { + b, err := bitmapdb.Get64(dbtx, kv.CallToIndex, addr.Bytes(), fromBlock, toBlock) + if err != nil { + if errors.Is(err, ethdb.ErrKeyNotFound) { + continue + } + stream.WriteNil() + return err + } + blocksTo.Or(b) + toAddresses[*addr] = struct{}{} + } + } + + switch req.Mode { + case TraceFilterModeIntersection: + allBlocks.And(&blocksTo) + case TraceFilterModeUnion: + fallthrough + default: + allBlocks.Or(&blocksTo) + } + + // Special case - if no addresses specified, take all traces + if len(req.FromAddress) == 0 && len(req.ToAddress) == 0 { + allBlocks.AddRange(fromBlock, toBlock+1) + } else { + allBlocks.RemoveRange(0, fromBlock) + allBlocks.RemoveRange(toBlock+1, uint64(0x100000000)) + } + + chainConfig, err := api.chainConfig(dbtx) + if err != nil { + stream.WriteNil() + return err + } + + var json = jsoniter.ConfigCompatibleWithStandardLibrary + stream.WriteArrayStart() + first := true + // Execute all transactions in picked blocks + + count := uint64(^uint(0)) // this just makes it easier to use below + if req.Count != nil { + count = *req.Count + } + after := uint64(0) // this just makes it easier to use below + if req.After != nil { + after = *req.After + } + nSeen := uint64(0) + nExported := uint64(0) + + it := allBlocks.Iterator() + for it.HasNext() { + b := uint64(it.Next()) + // Extract transactions from block + hash, hashErr := rawdb.ReadCanonicalHash(dbtx, b) + if hashErr != nil { + stream.WriteNil() + return hashErr + } + + block, bErr := api.blockWithSenders(dbtx, hash, b) + if bErr != nil { + stream.WriteNil() + return bErr + } + if block == nil { + stream.WriteNil() + return fmt.Errorf("could not find block %x %d", hash, b) + } + + blockHash := block.Hash() + blockNumber := block.NumberU64() + txs := block.Transactions() + t, tErr := api.callManyTransactions(ctx, dbtx, txs, []string{TraceTypeTrace}, block.ParentHash(), rpc.BlockNumber(block.NumberU64()-1), block.Header(), -1 /* all tx indices */, types.MakeSigner(chainConfig, b), chainConfig.Rules(b)) + if tErr != nil { + stream.WriteNil() + return tErr + } + includeAll := len(fromAddresses) == 0 && len(toAddresses) == 0 + for i, trace := range t { + txPosition := uint64(i) + txHash := txs[i].Hash() + // Check if transaction concerns any of the addresses we wanted + for _, pt := range trace.Trace { + if includeAll || filter_trace(pt, fromAddresses, toAddresses) { + nSeen++ + pt.BlockHash = &blockHash + pt.BlockNumber = &blockNumber + pt.TransactionHash = &txHash + pt.TransactionPosition = &txPosition + b, err := json.Marshal(pt) + if err != nil { + stream.WriteNil() + return err + } + if nSeen > after && nExported < count { + if first { + first = false + } else { + stream.WriteMore() + } + stream.Write(b) + nExported++ + } + } + } + } + minerReward, uncleRewards := ethash.AccumulateRewards(chainConfig, block.Header(), block.Uncles()) + if _, ok := toAddresses[block.Coinbase()]; ok || includeAll { + nSeen++ + var tr ParityTrace + var rewardAction = &RewardTraceAction{} + rewardAction.Author = block.Coinbase() + rewardAction.RewardType = "block" // nolint: goconst + rewardAction.Value.ToInt().Set(minerReward.ToBig()) + tr.Action = rewardAction + tr.BlockHash = &common.Hash{} + copy(tr.BlockHash[:], block.Hash().Bytes()) + tr.BlockNumber = new(uint64) + *tr.BlockNumber = block.NumberU64() + tr.Type = "reward" // nolint: goconst + tr.TraceAddress = []int{} + b, err := json.Marshal(tr) + if err != nil { + stream.WriteNil() + return err + } + if nSeen > after && nExported < count { + if first { + first = false + } else { + stream.WriteMore() + } + stream.Write(b) + nExported++ + } + } + for i, uncle := range block.Uncles() { + if _, ok := toAddresses[uncle.Coinbase]; ok || includeAll { + if i < len(uncleRewards) { + nSeen++ + var tr ParityTrace + rewardAction := &RewardTraceAction{} + rewardAction.Author = uncle.Coinbase + rewardAction.RewardType = "uncle" // nolint: goconst + rewardAction.Value.ToInt().Set(uncleRewards[i].ToBig()) + tr.Action = rewardAction + tr.BlockHash = &common.Hash{} + copy(tr.BlockHash[:], block.Hash().Bytes()) + tr.BlockNumber = new(uint64) + *tr.BlockNumber = block.NumberU64() + tr.Type = "reward" // nolint: goconst + tr.TraceAddress = []int{} + b, err := json.Marshal(tr) + if err != nil { + stream.WriteNil() + return err + } + if nSeen > after && nExported < count { + if first { + first = false + } else { + stream.WriteMore() + } + stream.Write(b) + nExported++ + } + } + } + } + } + stream.WriteArrayEnd() + return stream.Flush() +} + +func filter_trace(pt *ParityTrace, fromAddresses map[common.Address]struct{}, toAddresses map[common.Address]struct{}) bool { + switch action := pt.Action.(type) { + case *CallTraceAction: + _, f := fromAddresses[action.From] + _, t := toAddresses[action.To] + if f || t { + return true + } + case *CreateTraceAction: + _, f := fromAddresses[action.From] + if f { + return true + } + + if res, ok := pt.Result.(*CreateTraceResult); ok { + if res.Address != nil { + if _, t := toAddresses[*res.Address]; t { + return true + } + } + } + case *SuicideTraceAction: + _, f := fromAddresses[action.Address] + _, t := toAddresses[action.RefundAddress] + if f || t { + return true + } + } + + return false +} + +func (api *TraceAPIImpl) callManyTransactions(ctx context.Context, dbtx kv.Tx, txs []types.Transaction, traceTypes []string, parentHash common.Hash, parentNo rpc.BlockNumber, header *types.Header, txIndex int, signer *types.Signer, rules *params.Rules) ([]*TraceCallResult, error) { + callParams := make([]TraceCallParam, 0, len(txs)) + msgs := make([]types.Message, len(txs)) + for i, tx := range txs { + hash := tx.Hash() + callParams = append(callParams, TraceCallParam{ + txHash: &hash, + traceTypes: traceTypes, + }) + var err error + if msgs[i], err = tx.AsMessage(*signer, header.BaseFee, rules); err != nil { + return nil, fmt.Errorf("convert tx into msg: %w", err) + } + } + + traces, cmErr := api.doCallMany(ctx, dbtx, msgs, callParams, &rpc.BlockNumberOrHash{ + BlockNumber: &parentNo, + BlockHash: &parentHash, + RequireCanonical: true, + }, header, false /* gasBailout */, txIndex) + + if cmErr != nil { + return nil, cmErr + } + + return traces, nil +} + +// TraceFilterRequest represents the arguments for trace_filter +type TraceFilterRequest struct { + FromBlock *hexutil.Uint64 `json:"fromBlock"` + ToBlock *hexutil.Uint64 `json:"toBlock"` + FromAddress []*common.Address `json:"fromAddress"` + ToAddress []*common.Address `json:"toAddress"` + Mode TraceFilterMode `json:"mode"` + After *uint64 `json:"after"` + Count *uint64 `json:"count"` +} + +type TraceFilterMode string + +const ( + // Default mode for TraceFilter. Unions results referred to addresses from FromAddress or ToAddress + TraceFilterModeUnion = "union" + // IntersectionMode retrives results referred to addresses provided both in FromAddress and ToAddress + TraceFilterModeIntersection = "intersection" +) diff --git a/cmd/rpcdaemon22/commands/trace_types.go b/cmd/rpcdaemon22/commands/trace_types.go new file mode 100644 index 00000000000..2b98c70bdb0 --- /dev/null +++ b/cmd/rpcdaemon22/commands/trace_types.go @@ -0,0 +1,160 @@ +package commands + +import ( + "fmt" + + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/core/types" +) + +// TODO:(tjayrush) +// Implementation Notes: +// -- Many of these fields are of string type. I chose to do this for ease of debugging / clarity of code (less +// conversions, etc.).Once we start optimizing this code, many of these fields will be made into their native +// types (Addresses, uint64, etc.) +// -- The ordering of the fields in the Parity types should not be changed. This allows us to compare output +// directly with existing Parity tests + +// GethTrace The trace as received from the existing Geth javascript tracer 'callTracer' +type GethTrace struct { + Type string `json:"type"` + Error string `json:"error"` + From string `json:"from"` + To string `json:"to"` + Value string `json:"value"` + Gas string `json:"gas"` + GasUsed string `json:"gasUsed"` + Input string `json:"input"` + Output string `json:"output"` + Time string `json:"time"` + Calls GethTraces `json:"calls"` +} + +// GethTraces an array of GethTraces +type GethTraces []*GethTrace + +// ParityTrace A trace in the desired format (Parity/OpenEtherum) See: https://openethereum.github.io/wiki/JSONRPC-trace-module +type ParityTrace struct { + // Do not change the ordering of these fields -- allows for easier comparison with other clients + Action interface{} `json:"action"` // Can be either CallTraceAction or CreateTraceAction + BlockHash *common.Hash `json:"blockHash,omitempty"` + BlockNumber *uint64 `json:"blockNumber,omitempty"` + Error string `json:"error,omitempty"` + Result interface{} `json:"result"` + Subtraces int `json:"subtraces"` + TraceAddress []int `json:"traceAddress"` + TransactionHash *common.Hash `json:"transactionHash,omitempty"` + TransactionPosition *uint64 `json:"transactionPosition,omitempty"` + Type string `json:"type"` +} + +// ParityTraces An array of parity traces +type ParityTraces []ParityTrace + +// TraceAction A parity formatted trace action +type TraceAction struct { + // Do not change the ordering of these fields -- allows for easier comparison with other clients + Author string `json:"author,omitempty"` + RewardType string `json:"rewardType,omitempty"` + SelfDestructed string `json:"address,omitempty"` + Balance string `json:"balance,omitempty"` + CallType string `json:"callType,omitempty"` + From common.Address `json:"from"` + Gas hexutil.Big `json:"gas"` + Init hexutil.Bytes `json:"init,omitempty"` + Input hexutil.Bytes `json:"input,omitempty"` + RefundAddress string `json:"refundAddress,omitempty"` + To string `json:"to,omitempty"` + Value string `json:"value,omitempty"` +} + +type CallTraceAction struct { + From common.Address `json:"from"` + CallType string `json:"callType"` + Gas hexutil.Big `json:"gas"` + Input hexutil.Bytes `json:"input"` + To common.Address `json:"to"` + Value hexutil.Big `json:"value"` +} + +type CreateTraceAction struct { + From common.Address `json:"from"` + Gas hexutil.Big `json:"gas"` + Init hexutil.Bytes `json:"init"` + Value hexutil.Big `json:"value"` +} + +type SuicideTraceAction struct { + Address common.Address `json:"address"` + RefundAddress common.Address `json:"refundAddress"` + Balance hexutil.Big `json:"balance"` +} + +type RewardTraceAction struct { + Author common.Address `json:"author"` + RewardType string `json:"rewardType"` + Value hexutil.Big `json:"value"` +} + +type CreateTraceResult struct { + // Do not change the ordering of these fields -- allows for easier comparison with other clients + Address *common.Address `json:"address,omitempty"` + Code hexutil.Bytes `json:"code"` + GasUsed *hexutil.Big `json:"gasUsed"` +} + +// TraceResult A parity formatted trace result +type TraceResult struct { + // Do not change the ordering of these fields -- allows for easier comparison with other clients + GasUsed *hexutil.Big `json:"gasUsed"` + Output hexutil.Bytes `json:"output"` +} + +// Allows for easy printing of a geth trace for debugging +func (p GethTrace) String() string { + var ret string + ret += fmt.Sprintf("Type: %s\n", p.Type) + ret += fmt.Sprintf("From: %s\n", p.From) + ret += fmt.Sprintf("To: %s\n", p.To) + ret += fmt.Sprintf("Value: %s\n", p.Value) + ret += fmt.Sprintf("Gas: %s\n", p.Gas) + ret += fmt.Sprintf("GasUsed: %s\n", p.GasUsed) + ret += fmt.Sprintf("Input: %s\n", p.Input) + ret += fmt.Sprintf("Output: %s\n", p.Output) + return ret +} + +// Allows for easy printing of a parity trace for debugging +func (t ParityTrace) String() string { + var ret string + //ret += fmt.Sprintf("Action.SelfDestructed: %s\n", t.Action.SelfDestructed) + //ret += fmt.Sprintf("Action.Balance: %s\n", t.Action.Balance) + //ret += fmt.Sprintf("Action.CallType: %s\n", t.Action.CallType) + //ret += fmt.Sprintf("Action.From: %s\n", t.Action.From) + //ret += fmt.Sprintf("Action.Gas: %d\n", t.Action.Gas.ToInt()) + //ret += fmt.Sprintf("Action.Init: %s\n", t.Action.Init) + //ret += fmt.Sprintf("Action.Input: %s\n", t.Action.Input) + //ret += fmt.Sprintf("Action.RefundAddress: %s\n", t.Action.RefundAddress) + //ret += fmt.Sprintf("Action.To: %s\n", t.Action.To) + //ret += fmt.Sprintf("Action.Value: %s\n", t.Action.Value) + ret += fmt.Sprintf("BlockHash: %v\n", t.BlockHash) + ret += fmt.Sprintf("BlockNumber: %d\n", t.BlockNumber) + //ret += fmt.Sprintf("Result.Address: %s\n", t.Result.Address) + //ret += fmt.Sprintf("Result.Code: %s\n", t.Result.Code) + //ret += fmt.Sprintf("Result.GasUsed: %s\n", t.Result.GasUsed) + //ret += fmt.Sprintf("Result.Output: %s\n", t.Result.Output) + ret += fmt.Sprintf("Subtraces: %d\n", t.Subtraces) + ret += fmt.Sprintf("TraceAddress: %v\n", t.TraceAddress) + ret += fmt.Sprintf("TransactionHash: %v\n", t.TransactionHash) + ret += fmt.Sprintf("TransactionPosition: %d\n", t.TransactionPosition) + ret += fmt.Sprintf("Type: %s\n", t.Type) + return ret +} + +// Takes a hierarchical Geth trace with fields of different meaning stored in the same named fields depending on 'type'. Parity traces +// are flattened depth first and each field is put in its proper place +func (api *TraceAPIImpl) convertToParityTrace(gethTrace GethTrace, blockHash common.Hash, blockNumber uint64, tx types.Transaction, txIndex uint64, depth []int) ParityTraces { //nolint: unused + var traces ParityTraces // nolint prealloc + return traces +} diff --git a/cmd/rpcdaemon22/commands/tracing.go b/cmd/rpcdaemon22/commands/tracing.go new file mode 100644 index 00000000000..26312b8a78d --- /dev/null +++ b/cmd/rpcdaemon22/commands/tracing.go @@ -0,0 +1,241 @@ +package commands + +import ( + "context" + "fmt" + + "github.com/holiman/uint256" + jsoniter "github.com/json-iterator/go" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/consensus/ethash" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/eth/tracers" + "github.com/ledgerwatch/erigon/ethdb" + "github.com/ledgerwatch/erigon/internal/ethapi" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/transactions" + "github.com/ledgerwatch/log/v3" +) + +// TraceBlockByNumber implements debug_traceBlockByNumber. Returns Geth style block traces. +func (api *PrivateDebugAPIImpl) TraceBlockByNumber(ctx context.Context, blockNum rpc.BlockNumber, config *tracers.TraceConfig, stream *jsoniter.Stream) error { + return api.traceBlock(ctx, rpc.BlockNumberOrHashWithNumber(blockNum), config, stream) +} + +// TraceBlockByHash implements debug_traceBlockByHash. Returns Geth style block traces. +func (api *PrivateDebugAPIImpl) TraceBlockByHash(ctx context.Context, hash common.Hash, config *tracers.TraceConfig, stream *jsoniter.Stream) error { + return api.traceBlock(ctx, rpc.BlockNumberOrHashWithHash(hash, true), config, stream) +} + +func (api *PrivateDebugAPIImpl) traceBlock(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash, config *tracers.TraceConfig, stream *jsoniter.Stream) error { + tx, err := api.db.BeginRo(ctx) + if err != nil { + stream.WriteNil() + return err + } + defer tx.Rollback() + var block *types.Block + if number, ok := blockNrOrHash.Number(); ok { + block, err = api.blockByRPCNumber(number, tx) + } else if hash, ok := blockNrOrHash.Hash(); ok { + block, err = api.blockByHashWithSenders(tx, hash) + } else { + return fmt.Errorf("invalid arguments; neither block nor hash specified") + } + + if err != nil { + stream.WriteNil() + return err + } + + chainConfig, err := api.chainConfig(tx) + if err != nil { + stream.WriteNil() + return err + } + + contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } + if api.TevmEnabled { + contractHasTEVM = ethdb.GetHasTEVM(tx) + } + + getHeader := func(hash common.Hash, number uint64) *types.Header { + h, e := api._blockReader.Header(ctx, tx, hash, number) + if e != nil { + log.Error("getHeader error", "number", number, "hash", hash, "err", e) + } + return h + } + + _, blockCtx, _, ibs, reader, err := transactions.ComputeTxEnv(ctx, block, chainConfig, getHeader, contractHasTEVM, ethash.NewFaker(), tx, block.Hash(), 0) + if err != nil { + stream.WriteNil() + return err + } + + signer := types.MakeSigner(chainConfig, block.NumberU64()) + rules := chainConfig.Rules(block.NumberU64()) + stream.WriteArrayStart() + for idx, tx := range block.Transactions() { + select { + default: + case <-ctx.Done(): + stream.WriteNil() + return ctx.Err() + } + ibs.Prepare(tx.Hash(), block.Hash(), idx) + msg, _ := tx.AsMessage(*signer, block.BaseFee(), rules) + txCtx := vm.TxContext{ + TxHash: tx.Hash(), + Origin: msg.From(), + GasPrice: msg.GasPrice().ToBig(), + } + + transactions.TraceTx(ctx, msg, blockCtx, txCtx, ibs, config, chainConfig, stream) + _ = ibs.FinalizeTx(rules, reader) + if idx != len(block.Transactions())-1 { + stream.WriteMore() + } + stream.Flush() + } + stream.WriteArrayEnd() + stream.Flush() + return nil +} + +// TraceTransaction implements debug_traceTransaction. Returns Geth style transaction traces. +func (api *PrivateDebugAPIImpl) TraceTransaction(ctx context.Context, hash common.Hash, config *tracers.TraceConfig, stream *jsoniter.Stream) error { + tx, err := api.db.BeginRo(ctx) + if err != nil { + stream.WriteNil() + return err + } + defer tx.Rollback() + // Retrieve the transaction and assemble its EVM context + blockNum, ok, err := api.txnLookup(ctx, tx, hash) + if err != nil { + return err + } + if !ok { + return nil + } + block, err := api.blockByNumberWithSenders(tx, blockNum) + if err != nil { + return err + } + if block == nil { + return nil + } + blockHash := block.Hash() + var txnIndex uint64 + var txn types.Transaction + for i, transaction := range block.Transactions() { + if transaction.Hash() == hash { + txnIndex = uint64(i) + txn = transaction + break + } + } + if txn == nil { + var borTx *types.Transaction + borTx, _, _, _, err = rawdb.ReadBorTransaction(tx, hash) + + if err != nil { + return err + } + + if borTx != nil { + return nil + } + stream.WriteNil() + return fmt.Errorf("transaction %#x not found", hash) + } + chainConfig, err := api.chainConfig(tx) + if err != nil { + stream.WriteNil() + return err + } + + getHeader := func(hash common.Hash, number uint64) *types.Header { + return rawdb.ReadHeader(tx, hash, number) + } + contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } + if api.TevmEnabled { + contractHasTEVM = ethdb.GetHasTEVM(tx) + } + msg, blockCtx, txCtx, ibs, _, err := transactions.ComputeTxEnv(ctx, block, chainConfig, getHeader, contractHasTEVM, ethash.NewFaker(), tx, blockHash, txnIndex) + if err != nil { + stream.WriteNil() + return err + } + // Trace the transaction and return + return transactions.TraceTx(ctx, msg, blockCtx, txCtx, ibs, config, chainConfig, stream) +} + +func (api *PrivateDebugAPIImpl) TraceCall(ctx context.Context, args ethapi.CallArgs, blockNrOrHash rpc.BlockNumberOrHash, config *tracers.TraceConfig, stream *jsoniter.Stream) error { + dbtx, err := api.db.BeginRo(ctx) + if err != nil { + stream.WriteNil() + return err + } + defer dbtx.Rollback() + + chainConfig, err := api.chainConfig(dbtx) + if err != nil { + stream.WriteNil() + return err + } + + blockNumber, hash, latest, err := rpchelper.GetBlockNumber(blockNrOrHash, dbtx, api.filters) + if err != nil { + stream.WriteNil() + return err + } + var stateReader state.StateReader + if latest { + cacheView, err := api.stateCache.View(ctx, dbtx) + if err != nil { + return err + } + stateReader = state.NewCachedReader2(cacheView, dbtx) + } else { + stateReader = state.NewPlainState(dbtx, blockNumber) + } + header := rawdb.ReadHeader(dbtx, hash, blockNumber) + if header == nil { + stream.WriteNil() + return fmt.Errorf("block %d(%x) not found", blockNumber, hash) + } + ibs := state.New(stateReader) + + if config != nil && config.StateOverrides != nil { + if err := config.StateOverrides.Override(ibs); err != nil { + return err + } + } + + var baseFee *uint256.Int + if header != nil && header.BaseFee != nil { + var overflow bool + baseFee, overflow = uint256.FromBig(header.BaseFee) + if overflow { + return fmt.Errorf("header.BaseFee uint256 overflow") + } + } + msg, err := args.ToMessage(api.GasCap, baseFee) + if err != nil { + return err + } + + contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } + if api.TevmEnabled { + contractHasTEVM = ethdb.GetHasTEVM(dbtx) + } + blockCtx, txCtx := transactions.GetEvmContext(msg, header, blockNrOrHash.RequireCanonical, dbtx, contractHasTEVM) + // Trace the transaction and return + return transactions.TraceTx(ctx, msg, blockCtx, txCtx, ibs, config, chainConfig, stream) +} diff --git a/cmd/rpcdaemon22/commands/txpool_api.go b/cmd/rpcdaemon22/commands/txpool_api.go new file mode 100644 index 00000000000..eccb66bc4f5 --- /dev/null +++ b/cmd/rpcdaemon22/commands/txpool_api.go @@ -0,0 +1,170 @@ +package commands + +import ( + "bytes" + "context" + "fmt" + + "github.com/ledgerwatch/erigon-lib/gointerfaces" + proto_txpool "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/rlp" +) + +// NetAPI the interface for the net_ RPC commands +type TxPoolAPI interface { + Content(ctx context.Context) (map[string]map[string]map[string]*RPCTransaction, error) +} + +// TxPoolAPIImpl data structure to store things needed for net_ commands +type TxPoolAPIImpl struct { + *BaseAPI + pool proto_txpool.TxpoolClient + db kv.RoDB +} + +// NewTxPoolAPI returns NetAPIImplImpl instance +func NewTxPoolAPI(base *BaseAPI, db kv.RoDB, pool proto_txpool.TxpoolClient) *TxPoolAPIImpl { + return &TxPoolAPIImpl{ + BaseAPI: base, + pool: pool, + db: db, + } +} + +func (api *TxPoolAPIImpl) Content(ctx context.Context) (map[string]map[string]map[string]*RPCTransaction, error) { + reply, err := api.pool.All(ctx, &proto_txpool.AllRequest{}) + if err != nil { + return nil, err + } + + content := map[string]map[string]map[string]*RPCTransaction{ + "pending": make(map[string]map[string]*RPCTransaction), + "baseFee": make(map[string]map[string]*RPCTransaction), + "queued": make(map[string]map[string]*RPCTransaction), + } + + pending := make(map[common.Address][]types.Transaction, 8) + baseFee := make(map[common.Address][]types.Transaction, 8) + queued := make(map[common.Address][]types.Transaction, 8) + for i := range reply.Txs { + stream := rlp.NewStream(bytes.NewReader(reply.Txs[i].RlpTx), 0) + txn, err := types.DecodeTransaction(stream) + if err != nil { + return nil, err + } + addr := gointerfaces.ConvertH160toAddress(reply.Txs[i].Sender) + switch reply.Txs[i].TxnType { + case proto_txpool.AllReply_PENDING: + if _, ok := pending[addr]; !ok { + pending[addr] = make([]types.Transaction, 0, 4) + } + pending[addr] = append(pending[addr], txn) + case proto_txpool.AllReply_BASE_FEE: + if _, ok := baseFee[addr]; !ok { + baseFee[addr] = make([]types.Transaction, 0, 4) + } + baseFee[addr] = append(baseFee[addr], txn) + case proto_txpool.AllReply_QUEUED: + if _, ok := queued[addr]; !ok { + queued[addr] = make([]types.Transaction, 0, 4) + } + queued[addr] = append(queued[addr], txn) + } + } + + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + cc, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + + curHeader := rawdb.ReadCurrentHeader(tx) + if curHeader == nil { + return nil, nil + } + // Flatten the pending transactions + for account, txs := range pending { + dump := make(map[string]*RPCTransaction) + for _, txn := range txs { + dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + } + content["pending"][account.Hex()] = dump + } + // Flatten the baseFee transactions + for account, txs := range baseFee { + dump := make(map[string]*RPCTransaction) + for _, txn := range txs { + dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + } + content["baseFee"][account.Hex()] = dump + } + // Flatten the queued transactions + for account, txs := range queued { + dump := make(map[string]*RPCTransaction) + for _, txn := range txs { + dump[fmt.Sprintf("%d", txn.GetNonce())] = newRPCPendingTransaction(txn, curHeader, cc) + } + content["queued"][account.Hex()] = dump + } + return content, nil +} + +// Status returns the number of pending and queued transaction in the pool. +func (api *TxPoolAPIImpl) Status(ctx context.Context) (map[string]hexutil.Uint, error) { + reply, err := api.pool.Status(ctx, &proto_txpool.StatusRequest{}) + if err != nil { + return nil, err + } + return map[string]hexutil.Uint{ + "pending": hexutil.Uint(reply.PendingCount), + "baseFee": hexutil.Uint(reply.BaseFeeCount), + "queued": hexutil.Uint(reply.QueuedCount), + }, nil +} + +/* + +// Inspect retrieves the content of the transaction pool and flattens it into an +// easily inspectable list. +func (s *PublicTxPoolAPI) Inspect() map[string]map[string]map[string]string { + content := map[string]map[string]map[string]string{ + "pending": make(map[string]map[string]string), + "queued": make(map[string]map[string]string), + } + pending, queue := s.b.TxPoolContent() + + // Define a formatter to flatten a transaction into a string + var format = func(tx *types.Transaction) string { + if to := tx.To(); to != nil { + return fmt.Sprintf("%s: %v wei + %v gas × %v wei", tx.To().Hex(), tx.Value(), tx.Gas(), tx.GasPrice()) + } + return fmt.Sprintf("contract creation: %v wei + %v gas × %v wei", tx.Value(), tx.Gas(), tx.GasPrice()) + } + // Flatten the pending transactions + for account, txs := range pending { + dump := make(map[string]string) + for _, tx := range txs { + dump[fmt.Sprintf("%d", tx.Nonce())] = format(tx) + } + content["pending"][account.Hex()] = dump + } + // Flatten the queued transactions + for account, txs := range queue { + dump := make(map[string]string) + for _, tx := range txs { + dump[fmt.Sprintf("%d", tx.Nonce())] = format(tx) + } + content["queued"][account.Hex()] = dump + } + return content +} +*/ diff --git a/cmd/rpcdaemon22/commands/txpool_api_test.go b/cmd/rpcdaemon22/commands/txpool_api_test.go new file mode 100644 index 00000000000..18c02ded7e8 --- /dev/null +++ b/cmd/rpcdaemon22/commands/txpool_api_test.go @@ -0,0 +1,64 @@ +package commands + +import ( + "bytes" + "fmt" + "testing" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + txPoolProto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/erigon/turbo/stages" + "github.com/stretchr/testify/require" +) + +func TestTxPoolContent(t *testing.T) { + m, require := stages.MockWithTxPool(t), require.New(t) + chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, b *core.BlockGen) { + b.SetCoinbase(common.Address{1}) + }, false /* intermediateHashes */) + require.NoError(err) + err = m.InsertChain(chain) + require.NoError(err) + + ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) + txPool := txpool.NewTxpoolClient(conn) + ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}) + api := NewTxPoolAPI(NewBaseApi(ff, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), false), m.DB, txPool) + + expectValue := uint64(1234) + txn, err := types.SignTx(types.NewTransaction(0, common.Address{1}, uint256.NewInt(expectValue), params.TxGas, uint256.NewInt(10*params.GWei), nil), *types.LatestSignerForChainID(m.ChainConfig.ChainID), m.Key) + require.NoError(err) + + buf := bytes.NewBuffer(nil) + err = txn.MarshalBinary(buf) + require.NoError(err) + + reply, err := txPool.Add(ctx, &txpool.AddRequest{RlpTxs: [][]byte{buf.Bytes()}}) + require.NoError(err) + for _, res := range reply.Imported { + require.Equal(res, txPoolProto.ImportResult_SUCCESS, fmt.Sprintf("%s", reply.Errors)) + } + + content, err := api.Content(ctx) + require.NoError(err) + + sender := m.Address.String() + require.Equal(1, len(content["pending"][sender])) + require.Equal(expectValue, content["pending"][sender]["0"].Value.ToInt().Uint64()) + + status, err := api.Status(ctx) + require.NoError(err) + require.Len(status, 3) + require.Equal(status["pending"], hexutil.Uint(1)) + require.Equal(status["queued"], hexutil.Uint(0)) +} diff --git a/cmd/rpcdaemon22/commands/validator_set.go b/cmd/rpcdaemon22/commands/validator_set.go new file mode 100644 index 00000000000..5ebbe35217a --- /dev/null +++ b/cmd/rpcdaemon22/commands/validator_set.go @@ -0,0 +1,702 @@ +package commands + +// Tendermint leader selection algorithm + +import ( + "bytes" + "fmt" + "math" + "math/big" + "sort" + "strings" + + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/consensus/bor" + "github.com/ledgerwatch/log/v3" +) + +// MaxTotalVotingPower - the maximum allowed total voting power. +// It needs to be sufficiently small to, in all cases: +// 1. prevent clipping in incrementProposerPriority() +// 2. let (diff+diffMax-1) not overflow in IncrementProposerPriority() +// (Proof of 1 is tricky, left to the reader). +// It could be higher, but this is sufficiently large for our purposes, +// and leaves room for defensive purposes. +// PriorityWindowSizeFactor - is a constant that when multiplied with the total voting power gives +// the maximum allowed distance between validator priorities. + +const ( + MaxTotalVotingPower = int64(math.MaxInt64) / 8 + PriorityWindowSizeFactor = 2 +) + +// ValidatorSet represent a set of *Validator at a given height. +// The validators can be fetched by address or index. +// The index is in order of .Address, so the indices are fixed +// for all rounds of a given blockchain height - ie. the validators +// are sorted by their address. +// On the other hand, the .ProposerPriority of each validator and +// the designated .GetProposer() of a set changes every round, +// upon calling .IncrementProposerPriority(). +// NOTE: Not goroutine-safe. +// NOTE: All get/set to validators should copy the value for safety. +type ValidatorSet struct { + // NOTE: persisted via reflect, must be exported. + Validators []*bor.Validator `json:"validators"` + Proposer *bor.Validator `json:"proposer"` + + // cached (unexported) + totalVotingPower int64 +} + +// NewValidatorSet initializes a ValidatorSet by copying over the +// values from `valz`, a list of Validators. If valz is nil or empty, +// the new ValidatorSet will have an empty list of Validators. +// The addresses of validators in `valz` must be unique otherwise the +// function panics. +func NewValidatorSet(valz []*bor.Validator) *ValidatorSet { + vals := &ValidatorSet{} + err := vals.updateWithChangeSet(valz, false) + if err != nil { + panic(fmt.Sprintf("cannot create validator set: %s", err)) + } + if len(valz) > 0 { + vals.IncrementProposerPriority(1) + } + return vals +} + +// Nil or empty validator sets are invalid. +func (vals *ValidatorSet) IsNilOrEmpty() bool { + return vals == nil || len(vals.Validators) == 0 +} + +// Increment ProposerPriority and update the proposer on a copy, and return it. +func (vals *ValidatorSet) CopyIncrementProposerPriority(times int) *ValidatorSet { + copy := vals.Copy() + copy.IncrementProposerPriority(times) + return copy +} + +// IncrementProposerPriority increments ProposerPriority of each validator and updates the +// proposer. Panics if validator set is empty. +// `times` must be positive. +func (vals *ValidatorSet) IncrementProposerPriority(times int) { + if vals.IsNilOrEmpty() { + panic("empty validator set") + } + if times <= 0 { + panic("Cannot call IncrementProposerPriority with non-positive times") + } + + // Cap the difference between priorities to be proportional to 2*totalPower by + // re-normalizing priorities, i.e., rescale all priorities by multiplying with: + // 2*totalVotingPower/(maxPriority - minPriority) + diffMax := PriorityWindowSizeFactor * vals.TotalVotingPower() + vals.RescalePriorities(diffMax) + vals.shiftByAvgProposerPriority() + + var proposer *bor.Validator + // Call IncrementProposerPriority(1) times times. + for i := 0; i < times; i++ { + proposer = vals.incrementProposerPriority() + } + + vals.Proposer = proposer +} + +func (vals *ValidatorSet) RescalePriorities(diffMax int64) { + if vals.IsNilOrEmpty() { + panic("empty validator set") + } + // NOTE: This check is merely a sanity check which could be + // removed if all tests would init. voting power appropriately; + // i.e. diffMax should always be > 0 + if diffMax <= 0 { + return + } + + // Calculating ceil(diff/diffMax): + // Re-normalization is performed by dividing by an integer for simplicity. + // NOTE: This may make debugging priority issues easier as well. + diff := computeMaxMinPriorityDiff(vals) + ratio := (diff + diffMax - 1) / diffMax + if diff > diffMax { + for _, val := range vals.Validators { + val.ProposerPriority = val.ProposerPriority / ratio + } + } +} + +func (vals *ValidatorSet) incrementProposerPriority() *bor.Validator { + for _, val := range vals.Validators { + // Check for overflow for sum. + newPrio := safeAddClip(val.ProposerPriority, val.VotingPower) + val.ProposerPriority = newPrio + } + // Decrement the validator with most ProposerPriority. + mostest := vals.getValWithMostPriority() + // Mind the underflow. + mostest.ProposerPriority = safeSubClip(mostest.ProposerPriority, vals.TotalVotingPower()) + + return mostest +} + +// Should not be called on an empty validator set. +func (vals *ValidatorSet) computeAvgProposerPriority() int64 { + n := int64(len(vals.Validators)) + sum := big.NewInt(0) + for _, val := range vals.Validators { + sum.Add(sum, big.NewInt(val.ProposerPriority)) + } + avg := sum.Div(sum, big.NewInt(n)) + if avg.IsInt64() { + return avg.Int64() + } + + // This should never happen: each val.ProposerPriority is in bounds of int64. + panic(fmt.Sprintf("Cannot represent avg ProposerPriority as an int64 %v", avg)) +} + +// Compute the difference between the max and min ProposerPriority of that set. +func computeMaxMinPriorityDiff(vals *ValidatorSet) int64 { + if vals.IsNilOrEmpty() { + panic("empty validator set") + } + max := int64(math.MinInt64) + min := int64(math.MaxInt64) + for _, v := range vals.Validators { + if v.ProposerPriority < min { + min = v.ProposerPriority + } + if v.ProposerPriority > max { + max = v.ProposerPriority + } + } + diff := max - min + if diff < 0 { + return -1 * diff + } else { + return diff + } +} + +func (vals *ValidatorSet) getValWithMostPriority() *bor.Validator { + var res *bor.Validator + for _, val := range vals.Validators { + res = res.Cmp(val) + } + return res +} + +func (vals *ValidatorSet) shiftByAvgProposerPriority() { + if vals.IsNilOrEmpty() { + panic("empty validator set") + } + avgProposerPriority := vals.computeAvgProposerPriority() + for _, val := range vals.Validators { + val.ProposerPriority = safeSubClip(val.ProposerPriority, avgProposerPriority) + } +} + +// Makes a copy of the validator list. +func validatorListCopy(valsList []*bor.Validator) []*bor.Validator { + if valsList == nil { + return nil + } + valsCopy := make([]*bor.Validator, len(valsList)) + for i, val := range valsList { + valsCopy[i] = val.Copy() + } + return valsCopy +} + +// Copy each validator into a new ValidatorSet. +func (vals *ValidatorSet) Copy() *ValidatorSet { + return &ValidatorSet{ + Validators: validatorListCopy(vals.Validators), + Proposer: vals.Proposer, + totalVotingPower: vals.totalVotingPower, + } +} + +// HasAddress returns true if address given is in the validator set, false - +// otherwise. +func (vals *ValidatorSet) HasAddress(address []byte) bool { + idx := sort.Search(len(vals.Validators), func(i int) bool { + return bytes.Compare(address, vals.Validators[i].Address.Bytes()) <= 0 + }) + return idx < len(vals.Validators) && bytes.Equal(vals.Validators[idx].Address.Bytes(), address) +} + +// GetByAddress returns an index of the validator with address and validator +// itself if found. Otherwise, -1 and nil are returned. +func (vals *ValidatorSet) GetByAddress(address common.Address) (index int, val *bor.Validator) { + idx := sort.Search(len(vals.Validators), func(i int) bool { + return bytes.Compare(address.Bytes(), vals.Validators[i].Address.Bytes()) <= 0 + }) + if idx < len(vals.Validators) && bytes.Equal(vals.Validators[idx].Address.Bytes(), address.Bytes()) { + return idx, vals.Validators[idx].Copy() + } + return -1, nil +} + +// GetByIndex returns the validator's address and validator itself by index. +// It returns nil values if index is less than 0 or greater or equal to +// len(ValidatorSet.Validators). +func (vals *ValidatorSet) GetByIndex(index int) (address []byte, val *bor.Validator) { + if index < 0 || index >= len(vals.Validators) { + return nil, nil + } + val = vals.Validators[index] + return val.Address.Bytes(), val.Copy() +} + +// Size returns the length of the validator set. +func (vals *ValidatorSet) Size() int { + return len(vals.Validators) +} + +// Force recalculation of the set's total voting power. +func (vals *ValidatorSet) updateTotalVotingPower() error { + + sum := int64(0) + for _, val := range vals.Validators { + // mind overflow + sum = safeAddClip(sum, val.VotingPower) + if sum > MaxTotalVotingPower { + return &bor.TotalVotingPowerExceededError{Sum: sum, Validators: vals.Validators} + } + } + vals.totalVotingPower = sum + return nil +} + +// TotalVotingPower returns the sum of the voting powers of all validators. +// It recomputes the total voting power if required. +func (vals *ValidatorSet) TotalVotingPower() int64 { + if vals.totalVotingPower == 0 { + log.Info("invoking updateTotalVotingPower before returning it") + if err := vals.updateTotalVotingPower(); err != nil { + // Can/should we do better? + panic(err) + } + } + return vals.totalVotingPower +} + +// GetProposer returns the current proposer. If the validator set is empty, nil +// is returned. +func (vals *ValidatorSet) GetProposer() (proposer *bor.Validator) { + if len(vals.Validators) == 0 { + return nil + } + if vals.Proposer == nil { + vals.Proposer = vals.findProposer() + } + return vals.Proposer.Copy() +} + +func (vals *ValidatorSet) findProposer() *bor.Validator { + var proposer *bor.Validator + for _, val := range vals.Validators { + if proposer == nil || !bytes.Equal(val.Address.Bytes(), proposer.Address.Bytes()) { + proposer = proposer.Cmp(val) + } + } + return proposer +} + +// Hash returns the Merkle root hash build using validators (as leaves) in the +// set. +// func (vals *ValidatorSet) Hash() []byte { +// if len(vals.Validators) == 0 { +// return nil +// } +// bzs := make([][]byte, len(vals.Validators)) +// for i, val := range vals.Validators { +// bzs[i] = val.Bytes() +// } +// return merkle.SimpleHashFromByteSlices(bzs) +// } + +// Iterate will run the given function over the set. +func (vals *ValidatorSet) Iterate(fn func(index int, val *bor.Validator) bool) { + for i, val := range vals.Validators { + stop := fn(i, val.Copy()) + if stop { + break + } + } +} + +// Checks changes against duplicates, splits the changes in updates and removals, sorts them by address. +// +// Returns: +// updates, removals - the sorted lists of updates and removals +// err - non-nil if duplicate entries or entries with negative voting power are seen +// +// No changes are made to 'origChanges'. +func processChanges(origChanges []*bor.Validator) (updates, removals []*bor.Validator, err error) { + // Make a deep copy of the changes and sort by address. + changes := validatorListCopy(origChanges) + sort.Sort(ValidatorsByAddress(changes)) + + removals = make([]*bor.Validator, 0, len(changes)) + updates = make([]*bor.Validator, 0, len(changes)) + var prevAddr common.Address + + // Scan changes by address and append valid validators to updates or removals lists. + for _, valUpdate := range changes { + if bytes.Equal(valUpdate.Address.Bytes(), prevAddr.Bytes()) { + err = fmt.Errorf("duplicate entry %v in %v", valUpdate, changes) + return nil, nil, err + } + if valUpdate.VotingPower < 0 { + err = fmt.Errorf("voting power can't be negative: %v", valUpdate) + return nil, nil, err + } + if valUpdate.VotingPower > MaxTotalVotingPower { + err = fmt.Errorf("to prevent clipping/ overflow, voting power can't be higher than %v: %v ", + MaxTotalVotingPower, valUpdate) + return nil, nil, err + } + if valUpdate.VotingPower == 0 { + removals = append(removals, valUpdate) + } else { + updates = append(updates, valUpdate) + } + prevAddr = valUpdate.Address + } + return updates, removals, err +} + +// Verifies a list of updates against a validator set, making sure the allowed +// total voting power would not be exceeded if these updates would be applied to the set. +// +// Returns: +// updatedTotalVotingPower - the new total voting power if these updates would be applied +// numNewValidators - number of new validators +// err - non-nil if the maximum allowed total voting power would be exceeded +// +// 'updates' should be a list of proper validator changes, i.e. they have been verified +// by processChanges for duplicates and invalid values. +// No changes are made to the validator set 'vals'. +func verifyUpdates(updates []*bor.Validator, vals *ValidatorSet) (updatedTotalVotingPower int64, numNewValidators int, err error) { + + updatedTotalVotingPower = vals.TotalVotingPower() + + for _, valUpdate := range updates { + address := valUpdate.Address + _, val := vals.GetByAddress(address) + if val == nil { + // New validator, add its voting power the the total. + updatedTotalVotingPower += valUpdate.VotingPower + numNewValidators++ + } else { + // Updated validator, add the difference in power to the total. + updatedTotalVotingPower += valUpdate.VotingPower - val.VotingPower + } + overflow := updatedTotalVotingPower > MaxTotalVotingPower + if overflow { + err = fmt.Errorf( + "failed to add/update validator %v, total voting power would exceed the max allowed %v", + valUpdate, MaxTotalVotingPower) + return 0, 0, err + } + } + + return updatedTotalVotingPower, numNewValidators, nil +} + +// Computes the proposer priority for the validators not present in the set based on 'updatedTotalVotingPower'. +// Leaves unchanged the priorities of validators that are changed. +// +// 'updates' parameter must be a list of unique validators to be added or updated. +// No changes are made to the validator set 'vals'. +func computeNewPriorities(updates []*bor.Validator, vals *ValidatorSet, updatedTotalVotingPower int64) { + + for _, valUpdate := range updates { + address := valUpdate.Address + _, val := vals.GetByAddress(address) + if val == nil { + // add val + // Set ProposerPriority to -C*totalVotingPower (with C ~= 1.125) to make sure validators can't + // un-bond and then re-bond to reset their (potentially previously negative) ProposerPriority to zero. + // + // Contract: updatedVotingPower < MaxTotalVotingPower to ensure ProposerPriority does + // not exceed the bounds of int64. + // + // Compute ProposerPriority = -1.125*totalVotingPower == -(updatedVotingPower + (updatedVotingPower >> 3)). + valUpdate.ProposerPriority = -(updatedTotalVotingPower + (updatedTotalVotingPower >> 3)) + } else { + valUpdate.ProposerPriority = val.ProposerPriority + } + } + +} + +// Merges the vals' validator list with the updates list. +// When two elements with same address are seen, the one from updates is selected. +// Expects updates to be a list of updates sorted by address with no duplicates or errors, +// must have been validated with verifyUpdates() and priorities computed with computeNewPriorities(). +func (vals *ValidatorSet) applyUpdates(updates []*bor.Validator) { + + existing := vals.Validators + merged := make([]*bor.Validator, len(existing)+len(updates)) + i := 0 + + for len(existing) > 0 && len(updates) > 0 { + if bytes.Compare(existing[0].Address.Bytes(), updates[0].Address.Bytes()) < 0 { // unchanged validator + merged[i] = existing[0] + existing = existing[1:] + } else { + // Apply add or update. + merged[i] = updates[0] + if bytes.Equal(existing[0].Address.Bytes(), updates[0].Address.Bytes()) { + // bor.Validator is present in both, advance existing. + existing = existing[1:] + } + updates = updates[1:] + } + i++ + } + + // Add the elements which are left. + for j := 0; j < len(existing); j++ { + merged[i] = existing[j] + i++ + } + // OR add updates which are left. + for j := 0; j < len(updates); j++ { + merged[i] = updates[j] + i++ + } + + vals.Validators = merged[:i] +} + +// Checks that the validators to be removed are part of the validator set. +// No changes are made to the validator set 'vals'. +func verifyRemovals(deletes []*bor.Validator, vals *ValidatorSet) error { + + for _, valUpdate := range deletes { + address := valUpdate.Address + _, val := vals.GetByAddress(address) + if val == nil { + return fmt.Errorf("failed to find validator %X to remove", address) + } + } + if len(deletes) > len(vals.Validators) { + panic("more deletes than validators") + } + return nil +} + +// Removes the validators specified in 'deletes' from validator set 'vals'. +// Should not fail as verification has been done before. +func (vals *ValidatorSet) applyRemovals(deletes []*bor.Validator) { + + existing := vals.Validators + + merged := make([]*bor.Validator, len(existing)-len(deletes)) + i := 0 + + // Loop over deletes until we removed all of them. + for len(deletes) > 0 { + if bytes.Equal(existing[0].Address.Bytes(), deletes[0].Address.Bytes()) { + deletes = deletes[1:] + } else { // Leave it in the resulting slice. + merged[i] = existing[0] + i++ + } + existing = existing[1:] + } + + // Add the elements which are left. + for j := 0; j < len(existing); j++ { + merged[i] = existing[j] + i++ + } + + vals.Validators = merged[:i] +} + +// Main function used by UpdateWithChangeSet() and NewValidatorSet(). +// If 'allowDeletes' is false then delete operations (identified by validators with voting power 0) +// are not allowed and will trigger an error if present in 'changes'. +// The 'allowDeletes' flag is set to false by NewValidatorSet() and to true by UpdateWithChangeSet(). +func (vals *ValidatorSet) updateWithChangeSet(changes []*bor.Validator, allowDeletes bool) error { + + if len(changes) < 1 { + return nil + } + + // Check for duplicates within changes, split in 'updates' and 'deletes' lists (sorted). + updates, deletes, err := processChanges(changes) + if err != nil { + return err + } + + if !allowDeletes && len(deletes) != 0 { + return fmt.Errorf("cannot process validators with voting power 0: %v", deletes) + } + + // Verify that applying the 'deletes' against 'vals' will not result in error. + if err := verifyRemovals(deletes, vals); err != nil { + return err + } + + // Verify that applying the 'updates' against 'vals' will not result in error. + updatedTotalVotingPower, numNewValidators, err := verifyUpdates(updates, vals) + if err != nil { + return err + } + + // Check that the resulting set will not be empty. + if numNewValidators == 0 && len(vals.Validators) == len(deletes) { + return fmt.Errorf("applying the validator changes would result in empty set") + } + + // Compute the priorities for updates. + computeNewPriorities(updates, vals, updatedTotalVotingPower) + + // Apply updates and removals. + vals.applyUpdates(updates) + vals.applyRemovals(deletes) + + if err := vals.updateTotalVotingPower(); err != nil { + return err + } + + // Scale and center. + vals.RescalePriorities(PriorityWindowSizeFactor * vals.TotalVotingPower()) + vals.shiftByAvgProposerPriority() + + return nil +} + +// UpdateWithChangeSet attempts to update the validator set with 'changes'. +// It performs the following steps: +// - validates the changes making sure there are no duplicates and splits them in updates and deletes +// - verifies that applying the changes will not result in errors +// - computes the total voting power BEFORE removals to ensure that in the next steps the priorities +// across old and newly added validators are fair +// - computes the priorities of new validators against the final set +// - applies the updates against the validator set +// - applies the removals against the validator set +// - performs scaling and centering of priority values +// If an error is detected during verification steps, it is returned and the validator set +// is not changed. +func (vals *ValidatorSet) UpdateWithChangeSet(changes []*bor.Validator) error { + return vals.updateWithChangeSet(changes, true) +} + +//----------------- +// ErrTooMuchChange + +func IsErrTooMuchChange(err error) bool { + switch err.(type) { + case errTooMuchChange: + return true + default: + return false + } +} + +type errTooMuchChange struct { + got int64 + needed int64 +} + +func (e errTooMuchChange) Error() string { + return fmt.Sprintf("Invalid commit -- insufficient old voting power: got %v, needed %v", e.got, e.needed) +} + +//---------------- + +func (vals *ValidatorSet) String() string { + return vals.StringIndented("") +} + +func (vals *ValidatorSet) StringIndented(indent string) string { + if vals == nil { + return "nil-ValidatorSet" + } + var valStrings []string + vals.Iterate(func(index int, val *bor.Validator) bool { + valStrings = append(valStrings, val.String()) + return false + }) + return fmt.Sprintf(`ValidatorSet{ +%s Proposer: %v +%s Validators: +%s %v +%s}`, + indent, vals.GetProposer().String(), + indent, + indent, strings.Join(valStrings, "\n"+indent+" "), + indent) + +} + +//------------------------------------- +// Implements sort for sorting validators by address. + +// Sort validators by address. +type ValidatorsByAddress []*bor.Validator + +func (valz ValidatorsByAddress) Len() int { + return len(valz) +} + +func (valz ValidatorsByAddress) Less(i, j int) bool { + return bytes.Compare(valz[i].Address.Bytes(), valz[j].Address.Bytes()) == -1 +} + +func (valz ValidatorsByAddress) Swap(i, j int) { + valz[i], valz[j] = valz[j], valz[i] +} + +/////////////////////////////////////////////////////////////////////////////// +// safe addition/subtraction + +func safeAdd(a, b int64) (int64, bool) { + if b > 0 && a > math.MaxInt64-b { + return -1, true + } else if b < 0 && a < math.MinInt64-b { + return -1, true + } + return a + b, false +} + +func safeSub(a, b int64) (int64, bool) { + if b > 0 && a < math.MinInt64+b { + return -1, true + } else if b < 0 && a > math.MaxInt64+b { + return -1, true + } + return a - b, false +} + +func safeAddClip(a, b int64) int64 { + c, overflow := safeAdd(a, b) + if overflow { + if b < 0 { + return math.MinInt64 + } + return math.MaxInt64 + } + return c +} + +func safeSubClip(a, b int64) int64 { + c, overflow := safeSub(a, b) + if overflow { + if b > 0 { + return math.MinInt64 + } + return math.MaxInt64 + } + return c +} diff --git a/cmd/rpcdaemon22/commands/web3_api.go b/cmd/rpcdaemon22/commands/web3_api.go new file mode 100644 index 00000000000..c35f62c632e --- /dev/null +++ b/cmd/rpcdaemon22/commands/web3_api.go @@ -0,0 +1,38 @@ +package commands + +import ( + "context" + + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/turbo/rpchelper" +) + +// Web3API provides interfaces for the web3_ RPC commands +type Web3API interface { + ClientVersion(_ context.Context) (string, error) + Sha3(_ context.Context, input hexutil.Bytes) hexutil.Bytes +} + +type Web3APIImpl struct { + *BaseAPI + ethBackend rpchelper.ApiBackend +} + +// NewWeb3APIImpl returns Web3APIImpl instance +func NewWeb3APIImpl(ethBackend rpchelper.ApiBackend) *Web3APIImpl { + return &Web3APIImpl{ + BaseAPI: &BaseAPI{}, + ethBackend: ethBackend, + } +} + +// ClientVersion implements web3_clientVersion. Returns the current client version. +func (api *Web3APIImpl) ClientVersion(ctx context.Context) (string, error) { + return api.ethBackend.ClientVersion(ctx) +} + +// Sha3 implements web3_sha3. Returns Keccak-256 (not the standardized SHA3-256) of the given data. +func (api *Web3APIImpl) Sha3(_ context.Context, input hexutil.Bytes) hexutil.Bytes { + return crypto.Keccak256(input) +} diff --git a/cmd/rpcdaemon22/health/check_block.go b/cmd/rpcdaemon22/health/check_block.go new file mode 100644 index 00000000000..8978b6ffc4e --- /dev/null +++ b/cmd/rpcdaemon22/health/check_block.go @@ -0,0 +1,23 @@ +package health + +import ( + "context" + "fmt" + + "github.com/ledgerwatch/erigon/rpc" +) + +func checkBlockNumber(blockNumber rpc.BlockNumber, api EthAPI) error { + if api == nil { + return fmt.Errorf("no connection to the Erigon server or `eth` namespace isn't enabled") + } + data, err := api.GetBlockByNumber(context.TODO(), blockNumber, false) + if err != nil { + return err + } + if len(data) == 0 { // block not found + return fmt.Errorf("no known block with number %v (%x hex)", blockNumber, blockNumber) + } + + return nil +} diff --git a/cmd/rpcdaemon22/health/check_peers.go b/cmd/rpcdaemon22/health/check_peers.go new file mode 100644 index 00000000000..818152b668b --- /dev/null +++ b/cmd/rpcdaemon22/health/check_peers.go @@ -0,0 +1,23 @@ +package health + +import ( + "context" + "fmt" +) + +func checkMinPeers(minPeerCount uint, api NetAPI) error { + if api == nil { + return fmt.Errorf("no connection to the Erigon server or `net` namespace isn't enabled") + } + + peerCount, err := api.PeerCount(context.TODO()) + if err != nil { + return err + } + + if uint64(peerCount) < uint64(minPeerCount) { + return fmt.Errorf("not enough peers: %d (minimum %d))", peerCount, minPeerCount) + } + + return nil +} diff --git a/cmd/rpcdaemon22/health/health.go b/cmd/rpcdaemon22/health/health.go new file mode 100644 index 00000000000..311af85c5d9 --- /dev/null +++ b/cmd/rpcdaemon22/health/health.go @@ -0,0 +1,131 @@ +package health + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strings" + + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/log/v3" +) + +type requestBody struct { + MinPeerCount *uint `json:"min_peer_count"` + BlockNumber *rpc.BlockNumber `json:"known_block"` +} + +const ( + urlPath = "/health" +) + +var ( + errCheckDisabled = errors.New("error check disabled") +) + +func ProcessHealthcheckIfNeeded( + w http.ResponseWriter, + r *http.Request, + rpcAPI []rpc.API, +) bool { + if !strings.EqualFold(r.URL.Path, urlPath) { + return false + } + + netAPI, ethAPI := parseAPI(rpcAPI) + + var errMinPeerCount = errCheckDisabled + var errCheckBlock = errCheckDisabled + + body, errParse := parseHealthCheckBody(r.Body) + defer r.Body.Close() + + if errParse != nil { + log.Root().Warn("unable to process healthcheck request", "err", errParse) + } else { + // 1. net_peerCount + if body.MinPeerCount != nil { + errMinPeerCount = checkMinPeers(*body.MinPeerCount, netAPI) + } + // 2. custom query (shouldn't fail) + if body.BlockNumber != nil { + errCheckBlock = checkBlockNumber(*body.BlockNumber, ethAPI) + } + // TODO add time from the last sync cycle + } + + err := reportHealth(errParse, errMinPeerCount, errCheckBlock, w) + if err != nil { + log.Root().Warn("unable to process healthcheck request", "err", err) + } + + return true +} + +func parseHealthCheckBody(reader io.Reader) (requestBody, error) { + var body requestBody + + bodyBytes, err := io.ReadAll(reader) + if err != nil { + return body, err + } + + err = json.Unmarshal(bodyBytes, &body) + if err != nil { + return body, err + } + + return body, nil +} + +func reportHealth(errParse, errMinPeerCount, errCheckBlock error, w http.ResponseWriter) error { + statusCode := http.StatusOK + errors := make(map[string]string) + + if shouldChangeStatusCode(errParse) { + statusCode = http.StatusInternalServerError + } + errors["healthcheck_query"] = errorStringOrOK(errParse) + + if shouldChangeStatusCode(errMinPeerCount) { + statusCode = http.StatusInternalServerError + } + errors["min_peer_count"] = errorStringOrOK(errMinPeerCount) + + if shouldChangeStatusCode(errCheckBlock) { + statusCode = http.StatusInternalServerError + } + errors["check_block"] = errorStringOrOK(errCheckBlock) + + w.WriteHeader(statusCode) + + bodyJson, err := json.Marshal(errors) + if err != nil { + return err + } + + _, err = w.Write(bodyJson) + if err != nil { + return err + } + + return nil +} + +func shouldChangeStatusCode(err error) bool { + return err != nil && !errors.Is(err, errCheckDisabled) +} + +func errorStringOrOK(err error) string { + if err == nil { + return "HEALTHY" + } + + if errors.Is(err, errCheckDisabled) { + return "DISABLED" + } + + return fmt.Sprintf("ERROR: %v", err) +} diff --git a/cmd/rpcdaemon22/health/interfaces.go b/cmd/rpcdaemon22/health/interfaces.go new file mode 100644 index 00000000000..4cf0fc6892b --- /dev/null +++ b/cmd/rpcdaemon22/health/interfaces.go @@ -0,0 +1,16 @@ +package health + +import ( + "context" + + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/rpc" +) + +type NetAPI interface { + PeerCount(_ context.Context) (hexutil.Uint, error) +} + +type EthAPI interface { + GetBlockByNumber(_ context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) +} diff --git a/cmd/rpcdaemon22/health/parse_api.go b/cmd/rpcdaemon22/health/parse_api.go new file mode 100644 index 00000000000..21e003e5a59 --- /dev/null +++ b/cmd/rpcdaemon22/health/parse_api.go @@ -0,0 +1,22 @@ +package health + +import ( + "github.com/ledgerwatch/erigon/rpc" +) + +func parseAPI(api []rpc.API) (netAPI NetAPI, ethAPI EthAPI) { + for _, rpc := range api { + if rpc.Service == nil { + continue + } + + if netCandidate, ok := rpc.Service.(NetAPI); ok { + netAPI = netCandidate + } + + if ethCandidate, ok := rpc.Service.(EthAPI); ok { + ethAPI = ethCandidate + } + } + return netAPI, ethAPI +} diff --git a/cmd/rpcdaemon22/main.go b/cmd/rpcdaemon22/main.go new file mode 100644 index 00000000000..8d95c899568 --- /dev/null +++ b/cmd/rpcdaemon22/main.go @@ -0,0 +1,42 @@ +package main + +import ( + "os" + + "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/cli" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/commands" + "github.com/ledgerwatch/log/v3" + "github.com/spf13/cobra" +) + +func main() { + cmd, cfg := cli.RootCommand() + rootCtx, rootCancel := common.RootContext() + cmd.RunE = func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + logger := log.New() + db, borDb, backend, txPool, mining, starknet, stateCache, blockReader, ff, err := cli.RemoteServices(ctx, *cfg, logger, rootCancel) + if err != nil { + log.Error("Could not connect to DB", "err", err) + return nil + } + defer db.Close() + if borDb != nil { + defer borDb.Close() + } + + apiList := commands.APIList(db, borDb, backend, txPool, mining, starknet, ff, stateCache, blockReader, *cfg) + if err := cli.StartRpcServer(ctx, *cfg, apiList); err != nil { + log.Error(err.Error()) + return nil + } + + return nil + } + + if err := cmd.ExecuteContext(rootCtx); err != nil { + log.Error(err.Error()) + os.Exit(1) + } +} diff --git a/cmd/rpcdaemon22/postman/README.md b/cmd/rpcdaemon22/postman/README.md new file mode 100644 index 00000000000..0b9c2321838 --- /dev/null +++ b/cmd/rpcdaemon22/postman/README.md @@ -0,0 +1,18 @@ +# Postman testing + +There are two files here: + +- RPC_Testing.json +- Trace_Testing.json + +You can import them into Postman using these +instructions: https://github.com/ledgerwatch/erigon/wiki/Using-Postman-to-Test-TurboGeth-RPC + +The first one is used to generate help text and other documentation as well as running a sanity check against a new +release. There is basically one test for each of the 81 RPC endpoints. + +The second file contains 31 test cases specifically for the nine trace routines (five tests for five of the routines, +three for another, one each for the other three). + +Another collection of related tests can be found +here: https://github.com/Great-Hill-Corporation/trueblocks-core/tree/develop/src/other/trace_tests diff --git a/cmd/rpcdaemon22/postman/RPC_Testing.json b/cmd/rpcdaemon22/postman/RPC_Testing.json new file mode 100644 index 00000000000..0dce3725afc --- /dev/null +++ b/cmd/rpcdaemon22/postman/RPC_Testing.json @@ -0,0 +1,4235 @@ +{ + "info": { + "_postman_id": "72c52f91-c09d-4af6-abb4-162b9c5532b2", + "name": "RPC_Testing", + "description": "A collection holding all the Ethereum JSON RPC API calls", + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json" + }, + "item": [ + { + "name": "web3", + "item": [ + { + "name": "clientVersion", + "event": [ + { + "listen": "test", + "script": { + "id": "6c4da7d1-aa83-40f8-bdad-b68cb42415a4", + "exec": [ + "pm.test('Has correct result', function() {", + " const jsonData = pm.response.json();", + " var messages = {", + " '{{NETHERMIND}}': 'Nethermind',", + " '{{ERIGON}}': 'Erigon',", + " '{{SILKRPC}}': 'Erigon',", + " '{{PARITY}}': 'Parity-Ethereum',", + " }", + " var parts = jsonData.result.split('/');", + " pm.expect(parts[0]).to.deep.equals(messages[pm.environment.get('HOST')]);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"jsonrpc\": \"2.0\",\n \"method\": \"web3_clientVersion\",\n \"params\": [],\n \"id\": \"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the current client version.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nSTRING - The current client version string including node name and version" + }, + "response": [] + }, + { + "name": "sha3", + "event": [ + { + "listen": "test", + "script": { + "id": "d8ebbf3d-8ae7-460a-9808-4b4b8a08d289", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": \"0x47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + }, + { + "listen": "prerequest", + "script": { + "id": "78f0ca53-f4fe-4396-a87a-e1c81899822a", + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"web3_sha3\",\n\t\"params\":[\"0x68656c6c6f20776f726c64\"],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns Keccak-256 (not the standardized SHA3-256) of the given data.\r\n\r\n**Parameters**\r\n\r\nDATA - The data to convert into a SHA3 hash\r\n\r\n**Returns**\r\n\r\nDATA - The SHA3 result of the given input string" + }, + "response": [] + } + ], + "protocolProfileBehavior": {} + }, + { + "name": "net", + "item": [ + { + "name": "listening", + "event": [ + { + "listen": "test", + "script": { + "id": "322f2289-938f-4cfb-adde-3d4f0c54455e", + "exec": [ + "expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": true", + "}", + "", + "pm.test('Returns true (hardcoded)', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected)", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"net_listening\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns true if client is actively listening for network connections.\r\n\r\n**TODO**\r\n\r\nThe code currently returns a hard coded true value. Remove hard coded value.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nBoolean - true when listening, false otherwise" + }, + "response": [] + }, + { + "name": "version", + "event": [ + { + "listen": "test", + "script": { + "id": "a7d33b17-7d1d-49db-b30a-82d8c695c1d4", + "exec": [ + "expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": \"1\"", + "}", + "", + "pm.test('Returns true (hardcoded)', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected)", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"net_version\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the current network id.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nSTRING - The current network id. One of BR \"1\": Ethereum Mainnet BR \"2\": Morden Testnet (deprecated) BR \"3\": Ropsten Testnet BR \"4\": Rinkeby Testnet BR \"42\": Kovan Testnet BR" + }, + "response": [] + }, + { + "name": "peerCount", + "event": [ + { + "listen": "test", + "script": { + "id": "985b79fb-0c36-421d-8dcf-cc1d619a11e3", + "exec": [ + "expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": \"0x19\"", + "}", + "", + "pm.test('Returns true (hardcoded)', function() {", + " expected.result = pm.response.json().result;", + " pm.expect(pm.response.json()).to.be.deep.equal(expected)", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"net_peerCount\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns number of peers currently connected to the client.\r\n\r\n**TODO**\r\n\r\nThis routine currently returns a hard coded value of '25'\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nQUANTITY - Integer of the number of connected peers" + }, + "response": [] + } + ], + "protocolProfileBehavior": {} + }, + { + "name": "eth", + "item": [ + { + "name": "blocks", + "item": [ + { + "name": "getBlockByNumber", + "event": [ + { + "listen": "test", + "script": { + "id": "438e5e99-267a-4a47-92f3-d7a9e675f183", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": {", + " \"difficulty\": \"0xb5708d578a6\",", + " \"extraData\": \"0xd783010400844765746887676f312e352e31856c696e7578\",", + " \"gasLimit\": \"0x2fefd8\",", + " \"gasUsed\": \"0x14820\",", + " \"hash\": \"0x0b4c6fb75ded4b90218cf0346b0885e442878f104e1b60bf75d5b6860eeacd53\",", + " \"logsBloom\": \"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",", + " \"miner\": \"0x0c729be7c39543c3d549282a40395299d987cec2\",", + " \"mixHash\": \"0x1530cda332d86d5d7462e3a0eb585e22c88348dd796d29e6ef18196a78cdce07\",", + " \"nonce\": \"0x938e5630b060b7d3\",", + " \"number\": \"0xf4629\",", + " \"parentHash\": \"0x96810a6076e621e311a232468bfd3dcfac08f4803b255af0f00300f47981c10f\",", + " \"receiptsRoot\": \"0x075608bec75d988c52ea6750f4c2204fd60082eb1df32cf8f4732e8a591eef62\",", + " \"sha3Uncles\": \"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347\",", + " \"size\": \"0x3e1\",", + " \"stateRoot\": \"0xb3f9408d80048b6f206951c4e387f8da37fb8510eccc18527865fa746c47bbc5\",", + " \"timestamp\": \"0x56bff9bb\",", + " \"totalDifficulty\": \"0x6332227c16fd7c67\",", + " \"transactions\": [", + " \"0x730724cb08a6eb17bf6b3296359d261570d343ea7944a17a9d7287d77900db08\",", + " \"0xef2ea39c20ba09553b2f3cf02380406ac766039ca56612937eed5e7f3503fb3a\",", + " \"0x5352c80aa2073e21ce6c4aa5488c38455f3519955ece7dca5af3e326797bcc63\",", + " \"0x060e4cf9fa8d34a8b423b5b3691b2541255ff7974ff16699e104edcfb63bd521\"", + " ],", + " \"transactionsRoot\": \"0xb779480508401ddd57f1f1e83a54715dcafc6ccec4e4d842c1b68cb418e6560d\",", + " \"uncles\": []", + " }", + "}", + "", + "pm.test('Has correct result', function() {", + " var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", + " var jsonData = pm.response.json();", + " if (!isErigon) {", + " delete jsonData.result.author;", + " delete jsonData.result.sealFields;", + " }", + " pm.expect(jsonData).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getBlockByNumber\",\n\t\"params\":[\n\t\t\"0xf4629\", \n\t\tfalse\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns information about a block given the block's number.\r\n\r\n**Parameters**\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\nBoolean - If true it returns the full transaction objects, if false only the hashes of the transactions\r\n\r\n**Returns**\r\n\r\nObject - An object of type Block defined as:\r\n\r\nnumber: QUANTITY - The block number or null when pending\r\n\r\nhash: DATA, 32 Bytes - Hash of the block or null when pending\r\n\r\nparentHash: DATA, 32 Bytes - Hash of the parent block\r\n\r\nnonce: DATA, 8 bytes - Hash of the proof of work or null when pending\r\n\r\nsha3Uncles: DATA, 32 Bytes - SHA3 of the uncles data in the block\r\n\r\nlogsBloom: DATA, 256 Bytes - The bloom filter for the block's logs or null when pending\r\n\r\ntransactionsRoot: DATA, 32 Bytes - The root of the transaction trie of the block\r\n\r\nstateRoot: DATA, 32 Bytes - The root of the final state trie of the block\r\n\r\nreceiptsRoot: DATA, 32 Bytes - The root of the receipts trie of the block\r\n\r\nminer: DATA, 20 Bytes - The address of the beneficiary to whom the mining rewards were given\r\n\r\ndifficulty: QUANTITY - Integer of the difficulty for this block\r\n\r\ntotalDifficulty: QUANTITY - Integer of the total difficulty of the chain until this block\r\n\r\nextraData: DATA - The extra data field of this block\r\n\r\nsize: QUANTITY - Integer the size of this block in bytes\r\n\r\ngasLimit: QUANTITY - The maximum gas allowed in this block\r\n\r\ngasUsed: QUANTITY - The total used gas by all transactions in this block\r\n\r\ntimestamp: QUANTITY - The unix timestamp for when the block was collated\r\n\r\ntransactions: ARRAY - Array of transaction objects, or 32 Bytes transaction hashes depending on the last given parameter\r\n\r\nuncles: ARRAY - Array of uncle hashes\r\n\r\n" + }, + "response": [] + }, + { + "name": "getBlockByHash", + "event": [ + { + "listen": "test", + "script": { + "id": "7c760190-bb77-4b63-bba7-93c01a72bd2a", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": {", + " \"difficulty\": \"0xb5708d578a6\",", + " \"extraData\": \"0xd783010400844765746887676f312e352e31856c696e7578\",", + " \"gasLimit\": \"0x2fefd8\",", + " \"gasUsed\": \"0x14820\",", + " \"hash\": \"0x0b4c6fb75ded4b90218cf0346b0885e442878f104e1b60bf75d5b6860eeacd53\",", + " \"logsBloom\": \"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",", + " \"miner\": \"0x0c729be7c39543c3d549282a40395299d987cec2\",", + " \"mixHash\": \"0x1530cda332d86d5d7462e3a0eb585e22c88348dd796d29e6ef18196a78cdce07\",", + " \"nonce\": \"0x938e5630b060b7d3\",", + " \"number\": \"0xf4629\",", + " \"parentHash\": \"0x96810a6076e621e311a232468bfd3dcfac08f4803b255af0f00300f47981c10f\",", + " \"receiptsRoot\": \"0x075608bec75d988c52ea6750f4c2204fd60082eb1df32cf8f4732e8a591eef62\",", + " \"sha3Uncles\": \"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347\",", + " \"size\": \"0x3e1\",", + " \"stateRoot\": \"0xb3f9408d80048b6f206951c4e387f8da37fb8510eccc18527865fa746c47bbc5\",", + " \"timestamp\": \"0x56bff9bb\",", + " \"totalDifficulty\": \"0x6332227c16fd7c67\",", + " \"transactions\": [", + " \"0x730724cb08a6eb17bf6b3296359d261570d343ea7944a17a9d7287d77900db08\",", + " \"0xef2ea39c20ba09553b2f3cf02380406ac766039ca56612937eed5e7f3503fb3a\",", + " \"0x5352c80aa2073e21ce6c4aa5488c38455f3519955ece7dca5af3e326797bcc63\",", + " \"0x060e4cf9fa8d34a8b423b5b3691b2541255ff7974ff16699e104edcfb63bd521\"", + " ],", + " \"transactionsRoot\": \"0xb779480508401ddd57f1f1e83a54715dcafc6ccec4e4d842c1b68cb418e6560d\",", + " \"uncles\": []", + " }", + "}", + "", + "pm.test('Has correct result', function() {", + " var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", + " var jsonData = pm.response.json();", + " if (!isErigon) {", + " delete jsonData.result.author;", + " delete jsonData.result.sealFields;", + " }", + " pm.expect(jsonData).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getBlockByHash\",\n\t\"params\":[\n\t\t\"0x0b4c6fb75ded4b90218cf0346b0885e442878f104e1b60bf75d5b6860eeacd53\", \n\t\tfalse\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns information about a block given the block's hash.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - Hash of a block\r\n\r\nBoolean - If true it returns the full transaction objects, if false only the hashes of the transactions\r\n\r\n**Returns**\r\n\r\nObject - An object of type Block as described at eth_getBlockByNumber, or null when no block was found" + }, + "response": [] + }, + { + "name": "getBlockTransactionCountByNumber", + "event": [ + { + "listen": "test", + "script": { + "id": "341676cb-5915-48b7-a2b2-146feb7b80a6", + "exec": [ + "pm.test('Has correct result', function() {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData.result).to.be.equals(\"0x4\");", + "});", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getBlockTransactionCountByNumber\",\n\t\"params\":[\n\t\t\"0xf4629\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the number of transactions in a block given the block's block number.\r\n\r\n**Parameters**\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\n**Returns**\r\n\r\nQUANTITY - Integer of the number of transactions in this block" + }, + "response": [] + }, + { + "name": "getBlockTransactionCountByHash", + "event": [ + { + "listen": "test", + "script": { + "id": "439ec2db-b271-4b15-8fc5-09e86aeed870", + "exec": [ + "pm.test('Has correct result', function() {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData.result).to.be.equals('0x4');", + "});", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getBlockTransactionCountByHash\",\n\t\"params\":[\n\t\t\"0x0b4c6fb75ded4b90218cf0346b0885e442878f104e1b60bf75d5b6860eeacd53\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the number of transactions in a block given the block's block hash.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - hash of a block\r\n\r\n**Returns**\r\n\r\nQUANTITY - Integer of the number of transactions in this block" + }, + "response": [] + } + ], + "protocolProfileBehavior": {}, + "_postman_isSubFolder": true + }, + { + "name": "txs", + "item": [ + { + "name": "getTransactionByHash", + "event": [ + { + "listen": "test", + "script": { + "id": "68b084bd-9b84-4018-bc45-e947bcf07f95", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": {", + " \"blockHash\": \"0x785b221ec95c66579d5ae14eebe16284a769e948359615d580f02e646e93f1d5\",", + " \"blockNumber\": \"0x52a90b\",", + " \"from\": \"0x11b6a5fe2906f3354145613db0d99ceb51f604c9\",", + " \"gas\": \"0x6b6c\",", + " \"gasPrice\": \"0x11e1a300\",", + " \"hash\": \"0xb2fea9c4b24775af6990237aa90228e5e092c56bdaee74496992a53c208da1ee\",", + " \"input\": \"0x80dfa34a0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002e516d556558334448416654747442464a42315454384a617a67765744776a727a7342686973693473547532613551000000000000000000000000000000000000\",", + " \"nonce\": \"0x10\",", + " \"r\": \"0xacdf839bdcb6653da60900f739076a00ecbe0059fa046933348e9b68a62a222\",", + " \"s\": \"0x132a0517a4c52916e0c6b0e74b0479326891df2a9afd711482c7f3919b335ff6\",", + " \"to\": \"0xfa28ec7198028438514b49a3cf353bca5541ce1d\",", + " \"transactionIndex\": \"0x25\",", + " \"v\": \"0x26\",", + " \"value\": \"0x0\"", + " }", + "}", + "", + "pm.test('Has correct result', function() {", + " const jsonData = pm.response.json();", + " var keys = Object.keys(jsonData.result);", + " keys.map(function (k) {", + " var value = jsonData.result[k] ? jsonData.result[k] : null;", + " var expect = expected.result[k] ? expected.result[k] : null;", + " if (expect && typeof expect === 'object') {", + " jsonData.result[k].map(function (value, index) {", + " var expect = expected.result[k][index];", + " pm.expect(value).to.be.equal(expect)", + " })", + " } else {", + " pm.expect(value).to.be.equal(expect)", + " }", + " });", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getTransactionByHash\",\n\t\"params\":[\n\t\t\"0xb2fea9c4b24775af6990237aa90228e5e092c56bdaee74496992a53c208da1ee\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": { + "language": "json" + } + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns information about a transaction given the transaction's hash.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - hash of a transaction\r\n\r\n**Returns**\r\n\r\nObject - An object of type Transaction or null when no transaction was found\r\n\r\nhash: DATA, 32 Bytes - hash of the transaction\r\n\r\nnonce: QUANTITY - The number of transactions made by the sender prior to this one\r\n\r\nblockHash: DATA, 32 Bytes - hash of the block where this transaction was in. null when its pending\r\n\r\nblockNumber: QUANTITY - block number where this transaction was in. null when its pending\r\n\r\ntransactionIndex: QUANTITY - Integer of the transactions index position in the block. null when its pending\r\n\r\nfrom: DATA, 20 Bytes - address of the sender\r\n\r\nto: DATA, 20 Bytes - address of the receiver. null when its a contract creation transaction\r\n\r\nvalue: QUANTITY - value transferred in Wei\r\n\r\ngasPrice: QUANTITY - gas price provided by the sender in Wei\r\n\r\ngas: QUANTITY - gas provided by the sender\r\n\r\ninput: DATA - The data send along with the transaction" + }, + "response": [] + }, + { + "name": "getTransactionByBlockHashAndIndex", + "event": [ + { + "listen": "test", + "script": { + "id": "593d73f7-fea6-4fd0-bd02-ece07971cd58", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": {", + " \"blockHash\": \"0x785b221ec95c66579d5ae14eebe16284a769e948359615d580f02e646e93f1d5\",", + " \"blockNumber\": \"0x52a90b\",", + " \"from\": \"0x11b6a5fe2906f3354145613db0d99ceb51f604c9\",", + " \"gas\": \"0x6b6c\",", + " \"gasPrice\": \"0x11e1a300\",", + " \"hash\": \"0xb2fea9c4b24775af6990237aa90228e5e092c56bdaee74496992a53c208da1ee\",", + " \"input\": \"0x80dfa34a0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002e516d556558334448416654747442464a42315454384a617a67765744776a727a7342686973693473547532613551000000000000000000000000000000000000\",", + " \"nonce\": \"0x10\",", + " \"r\": \"0xacdf839bdcb6653da60900f739076a00ecbe0059fa046933348e9b68a62a222\",", + " \"s\": \"0x132a0517a4c52916e0c6b0e74b0479326891df2a9afd711482c7f3919b335ff6\",", + " \"to\": \"0xfa28ec7198028438514b49a3cf353bca5541ce1d\",", + " \"transactionIndex\": \"0x25\",", + " \"v\": \"0x26\",", + " \"value\": \"0x0\"", + " }", + "}", + "", + "pm.test('Has correct result', function() {", + " const jsonData = pm.response.json();", + " var keys = Object.keys(jsonData.result);", + " keys.map(function (k) {", + " var value = jsonData.result[k] ? jsonData.result[k] : null;", + " var expect = expected.result[k] ? expected.result[k] : null;", + " if (expect && typeof expect === 'object') {", + " jsonData.result[k].map(function (value, index) {", + " var expect = expected.result[k][index];", + " pm.expect(value).to.be.equal(expect)", + " })", + " } else {", + " pm.expect(value).to.be.equal(expect)", + " }", + " });", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getTransactionByBlockHashAndIndex\",\n\t\"params\":[\n\t\t\"0x785b221ec95c66579d5ae14eebe16284a769e948359615d580f02e646e93f1d5\", \n\t\t\"0x25\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns information about a transaction given the block's hash and a transaction index.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - hash of a block\r\n\r\nQUANTITY - Integer of the transaction index position\r\n\r\n**Returns**\r\n\r\nObject - An object of type Transaction or null when no transaction was found. See eth_getTransactionByHash" + }, + "response": [] + }, + { + "name": "getTransactionByBlockNumberAndIndex", + "event": [ + { + "listen": "test", + "script": { + "id": "530d1490-3007-499c-ae23-f9fd26f1787b", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": {", + " \"blockHash\": \"0x785b221ec95c66579d5ae14eebe16284a769e948359615d580f02e646e93f1d5\",", + " \"blockNumber\": \"0x52a90b\",", + " \"from\": \"0x11b6a5fe2906f3354145613db0d99ceb51f604c9\",", + " \"gas\": \"0x6b6c\",", + " \"gasPrice\": \"0x11e1a300\",", + " \"hash\": \"0xb2fea9c4b24775af6990237aa90228e5e092c56bdaee74496992a53c208da1ee\",", + " \"input\": \"0x80dfa34a0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002e516d556558334448416654747442464a42315454384a617a67765744776a727a7342686973693473547532613551000000000000000000000000000000000000\",", + " \"nonce\": \"0x10\",", + " \"r\": \"0xacdf839bdcb6653da60900f739076a00ecbe0059fa046933348e9b68a62a222\",", + " \"s\": \"0x132a0517a4c52916e0c6b0e74b0479326891df2a9afd711482c7f3919b335ff6\",", + " \"to\": \"0xfa28ec7198028438514b49a3cf353bca5541ce1d\",", + " \"transactionIndex\": \"0x25\",", + " \"v\": \"0x26\",", + " \"value\": \"0x0\"", + " }", + "}", + "", + "pm.test('Has correct result', function() {", + " const jsonData = pm.response.json();", + " var keys = Object.keys(jsonData.result);", + " keys.map(function (k) {", + " var value = jsonData.result[k] ? jsonData.result[k] : null;", + " var expect = expected.result[k] ? expected.result[k] : null;", + " if (expect && typeof expect === 'object') {", + " jsonData.result[k].map(function (value, index) {", + " var expect = expected.result[k][index];", + " pm.expect(value).to.be.equal(expect)", + " })", + " } else {", + " pm.expect(value).to.be.equal(expect)", + " }", + " });", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getTransactionByBlockNumberAndIndex\",\n\t\"params\":[\n\t\t\"0x52a90b\", \n\t\t\"0x25\"\n\t],\n\t\"id\":\"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns information about a transaction given a block number and transaction index.\r\n\r\n**Parameters**\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\nQUANTITY - The transaction index position\r\n\r\n**Returns**\r\n\r\nObject - An object of type Transaction or null when no transaction was found. See eth_getTransactionByHash" + }, + "response": [] + }, + { + "name": "getTransactionReceipt", + "event": [ + { + "listen": "test", + "script": { + "id": "d49e47cb-cbdf-4cc1-83e2-e0ab6b860fd3", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": {", + " \"blockHash\": \"0xf6084155ff2022773b22df3217d16e9df53cbc42689b27ca4789e06b6339beb2\",", + " \"blockNumber\": \"0x52a975\",", + " \"contractAddress\": null,", + " \"cumulativeGasUsed\": \"0x797db0\",", + " \"from\": \"0xd907941c8b3b966546fc408b8c942eb10a4f98df\",", + " \"gasUsed\": \"0x1308c\",", + " \"logs\": [", + " {", + " \"address\": \"0xd6df5935cd03a768b7b9e92637a01b25e24cb709\",", + " \"topics\": [", + " \"0x8940c4b8e215f8822c5c8f0056c12652c746cbc57eedbd2a440b175971d47a77\",", + " \"0x000000000000000000000000d907941c8b3b966546fc408b8c942eb10a4f98df\"", + " ],", + " \"data\": \"0x0000000000000000000000000000000000000000000000000000008bb2c97000\",", + " \"blockNumber\": \"0x52a975\",", + " \"transactionHash\": \"0xa3ece39ae137617669c6933b7578b94e705e765683f260fcfe30eaa41932610f\",", + " \"transactionIndex\": \"0x29\",", + " \"blockHash\": \"0xf6084155ff2022773b22df3217d16e9df53cbc42689b27ca4789e06b6339beb2\",", + " \"logIndex\": \"0x119\",", + " \"removed\": false", + " },", + " {", + " \"address\": \"0xd6df5935cd03a768b7b9e92637a01b25e24cb709\",", + " \"topics\": [", + " \"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef\",", + " \"0x0000000000000000000000000000000000000000000000000000000000000000\",", + " \"0x000000000000000000000000d907941c8b3b966546fc408b8c942eb10a4f98df\"", + " ],", + " \"data\": \"0x0000000000000000000000000000000000000000000000000000008bb2c97000\",", + " \"blockNumber\": \"0x52a975\",", + " \"transactionHash\": \"0xa3ece39ae137617669c6933b7578b94e705e765683f260fcfe30eaa41932610f\",", + " \"transactionIndex\": \"0x29\",", + " \"blockHash\": \"0xf6084155ff2022773b22df3217d16e9df53cbc42689b27ca4789e06b6339beb2\",", + " \"logIndex\": \"0x11a\",", + " \"removed\": false", + " }", + " ],", + " \"logsBloom\": \"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000020000000000000000000800000000000000004010000010100000000000000000000000000000000000000000000000000040000080000000000000080000000000000000000000000000000000000000000020000000000000000000000002000000000000000000000000000000000000000000000000000020000000010000000000000000000000000000000000000000000000000000000000\",", + " \"status\": \"0x1\",", + " \"to\": \"0xd6df5935cd03a768b7b9e92637a01b25e24cb709\",", + " \"transactionHash\": \"0xa3ece39ae137617669c6933b7578b94e705e765683f260fcfe30eaa41932610f\",", + " \"transactionIndex\": \"0x29\"", + " }", + "}", + "", + "pm.test('Has correct result', function() {", + " const jsonData = pm.response.json();", + " var keys = Object.keys(jsonData.result);", + " keys.map(function (k) {", + " var value = jsonData.result[k] ? jsonData.result[k] : null;", + " var expect = expected.result[k] ? expected.result[k] : null;", + " if (expect && typeof expect === 'object') {", + " if (k !== 'logs') {", + " jsonData.result[k].map(function (value, index) {", + " var expect = expected.result[k][index];", + " pm.expect(value).to.be.equal(expect)", + " })", + " }", + " } else {", + " pm.expect(value).to.be.equal(expect)", + " }", + " });", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getTransactionReceipt\",\n\t\"params\":[\n\t\t\"0xa3ece39ae137617669c6933b7578b94e705e765683f260fcfe30eaa41932610f\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the receipt of a transaction given the transaction's hash.\r\n\r\n**Note**\r\n\r\nReceipts are not available for pending transactions.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - hash of a transaction\r\n\r\n**Returns**\r\n\r\nObject - An object of type TransactionReceipt or null when no receipt was found\r\n\r\ntransactionHash: DATA, 32 Bytes - hash of the transaction\r\n\r\ntransactionIndex: QUANTITY - Integer of the transactions index position in the block\r\n\r\nblockHash: DATA, 32 Bytes - hash of the block where this transaction was in\r\n\r\nblockNumber: QUANTITY - block number where this transaction was in\r\n\r\ncumulativeGasUsed: QUANTITY - The total amount of gas used when this transaction was executed in the block\r\n\r\ngasUsed: QUANTITY - The amount of gas used by this specific transaction alone\r\n\r\ncontractAddress: DATA, 20 Bytes - The contract address created, if the transaction was a contract creation, null otherwise\r\n\r\nlogs: Array - Array of log objects, which this transaction generated\r\n\r\nlogsBloom: DATA, 256 Bytes - Bloom filter for light clients to quickly retrieve related logs.\r\n\r\nroot: DATA 32 bytes - post-transaction stateroot (if the block is pre-Byzantium)\r\n\r\nstatus: QUANTITY - either 1 = success or 0 = failure (if block is Byzatnium or later)" + }, + "response": [] + } + ], + "protocolProfileBehavior": {}, + "_postman_isSubFolder": true + }, + { + "name": "uncles", + "item": [ + { + "name": "getUncleByBlockNumberAndIndex", + "event": [ + { + "listen": "test", + "script": { + "id": "bb80848b-3b1d-4d5a-8317-fe623c0be114", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": {", + " \"difficulty\": \"0x3ff800000\",", + " \"extraData\": \"0x59617465732052616e64616c6c202d2045746865724e696e6a61\",", + " \"gasLimit\": \"0x1388\",", + " \"gasUsed\": \"0x0\",", + " \"hash\": \"0x5cd50096dbb856a6d1befa6de8f9c20decb299f375154427d90761dc0b101109\",", + " \"logsBloom\": \"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",", + " \"miner\": \"0xc8ebccc5f5689fa8659d83713341e5ad19349448\",", + " \"mixHash\": \"0xf8c94dfe61cf26dcdf8cffeda337cf6a903d65c449d7691a022837f6e2d99459\",", + " \"nonce\": \"0x68b769c5451a7aea\",", + " \"number\": \"0x1\",", + " \"parentHash\": \"0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3\",", + " \"receiptsRoot\": \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",", + " \"sha3Uncles\": \"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347\",", + " \"size\": \"0x21a\",", + " \"stateRoot\": \"0x1e6e030581fd1873b4784280859cd3b3c04aa85520f08c304cf5ee63d3935add\",", + " \"timestamp\": \"0x55ba4242\",", + " \"totalDifficulty\": \"0xffd003ffe\",", + " \"transactionsRoot\": \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",", + " \"uncles\": []", + " }", + "}", + "", + "pm.test('Has correct result', function() {", + " const jsonData = pm.response.json();", + " var keys = Object.keys(jsonData.result);", + " keys.map(function (k) {", + " var value = jsonData.result[k] ? jsonData.result[k] : null;", + " var expect = expected.result[k] ? expected.result[k] : null;", + " if (expect && typeof expect === 'object') {", + " jsonData.result[k].map(function (value, index) {", + " var expect = expected.result[k][index];", + " pm.expect(value).to.be.equal(expect)", + " })", + " } else {", + " pm.expect(value).to.be.equal(expect)", + " }", + " });", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getUncleByBlockNumberAndIndex\",\n\t\"params\":[\n\t\t\"0x3\",\n\t\t\"0x0\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns information about an uncle given a block's number and the index of the uncle.\r\n\r\n**Parameters**\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\nQUANTITY - The uncle's index position\r\n\r\n**Returns**\r\n\r\nObject - An object of type Block (with zero transactions), or null when no uncle was found. See eth_getBlockByHash" + }, + "response": [] + }, + { + "name": "getUncleByBlockHashAndIndex", + "event": [ + { + "listen": "test", + "script": { + "id": "3ba8cc46-cd5d-4b26-a618-a54ddc3d86c4", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": {", + " \"difficulty\": \"0x3ff800000\",", + " \"extraData\": \"0x59617465732052616e64616c6c202d2045746865724e696e6a61\",", + " \"gasLimit\": \"0x1388\",", + " \"gasUsed\": \"0x0\",", + " \"hash\": \"0x5cd50096dbb856a6d1befa6de8f9c20decb299f375154427d90761dc0b101109\",", + " \"logsBloom\": \"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",", + " \"miner\": \"0xc8ebccc5f5689fa8659d83713341e5ad19349448\",", + " \"mixHash\": \"0xf8c94dfe61cf26dcdf8cffeda337cf6a903d65c449d7691a022837f6e2d99459\",", + " \"nonce\": \"0x68b769c5451a7aea\",", + " \"number\": \"0x1\",", + " \"parentHash\": \"0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3\",", + " \"receiptsRoot\": \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",", + " \"sha3Uncles\": \"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347\",", + " \"size\": \"0x21a\",", + " \"stateRoot\": \"0x1e6e030581fd1873b4784280859cd3b3c04aa85520f08c304cf5ee63d3935add\",", + " \"timestamp\": \"0x55ba4242\",", + " \"totalDifficulty\": \"0xffd003ffe\",", + " \"transactionsRoot\": \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",", + " \"uncles\": []", + " }", + "}", + "", + "pm.test('Has correct result', function() {", + " const jsonData = pm.response.json();", + " var keys = Object.keys(jsonData.result);", + " keys.map(function (k) {", + " var value = jsonData.result[k] ? jsonData.result[k] : null;", + " var expect = expected.result[k] ? expected.result[k] : null;", + " if (expect && typeof expect === 'object') {", + " jsonData.result[k].map(function (value, index) {", + " var expect = expected.result[k][index];", + " pm.expect(value).to.be.equal(expect)", + " })", + " } else {", + " pm.expect(value).to.be.equal(expect)", + " }", + " });", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getUncleByBlockHashAndIndex\",\n\t\"params\":[\n\t\t\"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\", \n\t\t\"0x0\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns information about an uncle given a block's hash and the index of the uncle.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - Hash of the block holding the uncle\r\n\r\nQUANTITY - The uncle's index position\r\n\r\n**Returns**\r\n\r\nObject - An object of type Block (with zero transactions), or null when no uncle was found. See eth_getBlockByHash" + }, + "response": [] + }, + { + "name": "getUncleCountByBlockNumber", + "event": [ + { + "listen": "test", + "script": { + "id": "790ef142-b864-4ad6-a90c-7bece105c3f8", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": \"0x1\",", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", + " var jsonData = pm.response.json();", + " if (!isErigon) {", + " delete jsonData.result.author;", + " delete jsonData.result.sealFields;", + " }", + " pm.expect(jsonData).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getUncleCountByBlockNumber\",\n\t\"params\":[\n\t\t\"0x3\"\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the number of uncles in the block, if any.\r\n\r\n**Parameters**\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\n**Returns**\r\n\r\nQUANTITY - The number of uncles in the block, if any" + }, + "response": [] + }, + { + "name": "getUncleCountByBlockHash", + "event": [ + { + "listen": "test", + "script": { + "id": "d3fba91c-ae8f-4ced-b563-51f9b7e36144", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": \"0x1\",", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", + " var jsonData = pm.response.json();", + " if (!isErigon) {", + " delete jsonData.result.author;", + " delete jsonData.result.sealFields;", + " }", + " pm.expect(jsonData).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getUncleCountByBlockHash\",\n\t\"params\":[\n\t\t\"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the number of uncles in the block, if any.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - Hash of the block containing the uncle\r\n\r\n**Returns**\r\n\r\nQUANTITY - The number of uncles in the block, if any" + }, + "response": [] + } + ], + "protocolProfileBehavior": {}, + "_postman_isSubFolder": true + }, + { + "name": "filters", + "item": [ + { + "name": "newPendingTransactionFilter", + "event": [ + { + "listen": "test", + "script": { + "id": "2bdda0a7-7cf2-4e02-ae19-7a575f2588a2", + "exec": ["utils.notImplemented(\"eth_newPendingTransactionFilter\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_newPendingTransactionFilter\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Creates a pending transaction filter in the node. To check if the state has changed, call eth_getFilterChanges.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nQUANTITY - A filter id" + }, + "response": [] + }, + { + "name": "newBlockFilter", + "event": [ + { + "listen": "test", + "script": { + "id": "a627cf51-a966-4f25-9447-fb3da185a3e0", + "exec": ["utils.notImplemented(\"eth_newBlockFilter\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_newBlockFilter\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Creates a block filter in the node, to notify when a new block arrives. To check if the state has changed, call eth_getFilterChanges.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nQUANTITY - A filter id" + }, + "response": [] + }, + { + "name": "newFilter", + "event": [ + { + "listen": "test", + "script": { + "id": "44d72ef7-022a-4ebf-94b6-9778bd6925d1", + "exec": ["utils.notImplemented(\"eth_newFilter\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_newFilter\",\n\t\"params\": [\n {\n \"fromBlock\": \"0x1\",\n \"toBlock\": \"0x2\",\n \"address\": \" 0x8888f1f195afa192cfee860698584c030f4c9db1\",\n \"topics\": [\n \"0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n null,\n [\"0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b\", \"0x0000000000000000000000000aff3454fce5edbc8cca8697c15331677e6ebccc\"]\n ]\n }\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Creates an arbitrary filter object, based on filter options, to notify when the state changes (logs). To check if the state has changed, call eth_getFilterChanges.\r\n\r\n**Example**\r\n\r\nA note on specifying topic filters\r\n\r\nTopics are order-dependent. A transaction with a log with topics [A, B] will be matched by the following topic filters\r\n\r\n[] \"anything\"\r\n\r\n[A] \"A in first position (and anything after)\"\r\n\r\n[null, B] \"anything in first position AND B in second position (and anything after)\"\r\n\r\n[A, B] \"A in first position AND B in second position (and anything after)\"\r\n\r\n[[A, B], [A, B]] \"(A OR B) in first position AND (A OR B) in second position (and anything after)\"\r\n\r\n**Parameters**\r\n\r\nObject - An object of type Filter\r\n\r\nQUANTITY|TAG - (optional, default \"latest\") Integer block number, or \"earliest\", \"latest\" or \"pending\" for not yet mined transactions\r\n\r\nQUANTITY|TAG - (optional, default \"latest\") Integer block number, or \"earliest\", \"latest\" or \"pending\" for not yet mined transactions\r\n\r\nDATA|Array of DATA, 20 Bytes - (optional) Contract address or a list of addresses from which logs should originate\r\n\r\nArray of DATA, - (optional) Array of 32 Bytes DATA topics. Topics are order-dependent. Each topic can also be an array of DATA with \"or\" options\r\n\r\n**Returns**\r\n\r\nQUANTITY - A filter id" + }, + "response": [] + }, + { + "name": "uninstallFilter", + "event": [ + { + "listen": "test", + "script": { + "id": "11a48bf8-6320-45ae-989c-ad4b889b5f0d", + "exec": ["utils.notImplemented(\"eth_uninstallFilter\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_uninstallFilter\",\n\t\"params\":[\n\t\t\"0xdeadbeef\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Uninstalls a previously-created filter given the filter's id. Always uninstall filters when no longer needed.\r\n\r\n**Note**\r\n\r\nFilters timeout when they are not requested with eth_getFilterChanges for a period of time.\r\n\r\n**Parameters**\r\n\r\nQUANTITY - The filter id\r\n\r\n**Returns**\r\n\r\nBoolean - true if the filter was successfully uninstalled, false otherwise" + }, + "response": [] + }, + { + "name": "getFilterChanges", + "event": [ + { + "listen": "test", + "script": { + "id": "6e68517c-5d19-4843-b1bb-39c7a594d4a5", + "exec": ["utils.notImplemented(\"eth_getFilterChanges\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getFilterChanges\",\n\t\"params\":[\n\t\t\"0xdeadbeef\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns an array of objects of type Log, an array of block hashes (for eth_newBlockFilter) or an array of transaction hashes (for eth_newPendingTransactionFilter) or an empty array if nothing has changed since the last poll.\r\n\r\n**Note**\r\n\r\nIn solidity: The first topic is the hash of the signature of the event (if you have not declared the event anonymous.\r\n\r\n**Parameters**\r\n\r\nQUANTITY - The filter id\r\n\r\n**Returns**\r\n\r\nObject - An object of type FilterLog is defined as\r\n\r\nremoved: BOOLEAN - true when the log was removed, due to a chain reorganization. false if its a valid log\r\n\r\nlogIndex: QUANTITY - Integer of the log index position in the block. null when its pending log\r\n\r\ntransactionIndex: QUANTITY - Integer of the transactions index position log was created from. null when its pending log\r\n\r\ntransactionHash: DATA, 32 Bytes - hash of the transactions this log was created from. null when its pending log\r\n\r\nblockHash: DATA, 32 Bytes - hash of the block where this log was in. null when its pending. null when its pending log\r\n\r\nblockNumber: QUANTITY - The block number where this log was in. null when its pending. null when its pending log\r\n\r\naddress: DATA, 20 Bytes - address from which this log originated\r\n\r\ndata: DATA - contains one or more 32 Bytes non-indexed arguments of the log\r\n\r\ntopics: Array of DATA - Array of 0 to 4 32 Bytes DATA of indexed log arguments." + }, + "response": [] + }, + { + "name": "getLogs", + "event": [ + { + "listen": "test", + "script": { + "id": "3b0fee2d-9ef2-48d4-901b-7cab11dfbba2", + "exec": [ + "pm.test('Not tested', function() {", + " var tested = false;", + " pm.expect(tested).to.be.true", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getLogs\",\n\t\"params\":[{\n\t\t\"topics\":[\n\t\t\t\"0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b\"\n\t\t]\n\t}],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns an array of logs matching a given filter object.\r\n\r\n**Parameters**\r\n\r\nObject - An object of type Filter, see eth_newFilter parameters\r\n\r\n**Returns**\r\n\r\nObject - An object of type LogArray or an empty array if nothing has changed since last poll. See eth_getFilterChanges" + }, + "response": [] + } + ], + "protocolProfileBehavior": {}, + "_postman_isSubFolder": true + }, + { + "name": "accounts", + "item": [ + { + "name": "getBalance", + "event": [ + { + "listen": "test", + "script": { + "id": "2527ac10-fa47-47c1-a422-ac54f2067e83", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": \"0x7a69\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getBalance\",\n\t\"params\":[\n\t\t\"0x5df9b87991262f6ba471f09758cde1c0fc1de734\", \n\t\t\"0xb443\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the balance of an account for a given address.\r\n\r\n**Parameters**\r\n\r\nDATA, 20 Bytes - Address to check for balance\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\n**Returns**\r\n\r\nQUANTITY - Integer of the current balance in wei" + }, + "response": [] + }, + { + "name": "getTransactionCount", + "event": [ + { + "listen": "test", + "script": { + "id": "bcfa7ced-fa30-4936-ad0d-28c99c7a39c5", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": \"0xa\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getTransactionCount\",\n\t\"params\":[\n\t\t\"0xfd2605a2bf58fdbb90db1da55df61628b47f9e8c\", \n\t\t\"0xc443\"\n\t],\n\t\"id\":\"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the number of transactions sent from an address (the nonce).\r\n\r\n**Parameters**\r\n\r\nDATA, 20 Bytes - Address from which to retrieve nonce\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\n**Returns**\r\n\r\nQUANTITY - Integer of the number of transactions sent from this address" + }, + "response": [] + }, + { + "name": "getCode", + "event": [ + { + "listen": "test", + "script": { + "id": "b1435da2-cfbc-48fd-97ac-24612fb6ee6d", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": \"0x6060604052361561001f5760e060020a600035046372ea4b8c811461010c575b61011b3460008080670de0b6b3a764000084106101d557600180548101908190556003805433929081101561000257906000526020600020900160006101000a815481600160a060020a0302191690830217905550670de0b6b3a7640000840393508350670de0b6b3a76400006000600082828250540192505081905550600260016000505411151561011d5760038054829081101561000257906000526020600020900160009054906101000a9004600160a060020a0316600160a060020a03166000600060005054604051809050600060405180830381858888f150505080555060016002556101d5565b60018054016060908152602090f35b005b60018054600354910114156101d55760038054600254600101909102900392505b6003546002549003600119018310156101e357600380548490811015610002579082526040517fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b9190910154600160a060020a03169082906706f05b59d3b200009082818181858883f1505090546706f05b59d3b1ffff1901835550506001929092019161013e565b505060028054600101905550505b600080548501905550505050565b506002548154919250600190810190910460001901905b60035460025490036001190183101561029a576003805484908110156100025760009182526040517fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b9190910154600160a060020a03169190838504600019019082818181858883f1505081548486049003600190810190925550600290830183020460001901841415905061028e576001015b600192909201916101fa565b60038054600254810182018083559190829080158290116101c75760008390526101c7907fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b9081019083015b808211156102fa57600081556001016102e6565b509056\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getCode\",\n\t\"params\":[\n\t\t\"0x109c4f2ccc82c4d77bde15f306707320294aea3f\", \n\t\t\"0xc443\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the byte code at a given address (if it's a smart contract).\r\n\r\n**Parameters**\r\n\r\nDATA, 20 Bytes - Address from which to retreive byte code\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\n**Returns**\r\n\r\nDATA - The byte code (if any) found at the given address" + }, + "response": [] + }, + { + "name": "getStorageAt", + "event": [ + { + "listen": "test", + "script": { + "id": "270e7931-1ec1-440a-a8e1-ba54f4f4e9a3", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": \"0x0000000000000000000000000000000000000000000000001bc16d674ec80000\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\", \n\t\"method\": \"eth_getStorageAt\", \n\t\"params\": [\n\t\t\"0x109c4f2ccc82c4d77bde15f306707320294aea3f\", \n\t\t\"0x0\",\n\t\t\"0xc443\"\n\t], \n\t\"id\": \"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the value from a storage position at a given address.\r\n\r\n**Parameters**\r\n\r\nDATA, 20 Bytes - Address of the contract whose storage to retreive\r\n\r\nQUANTITY - Integer of the position in the storage\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\n**Returns**\r\n\r\nDATA - The value at this storage position" + }, + "response": [] + } + ], + "protocolProfileBehavior": {}, + "_postman_isSubFolder": true + }, + { + "name": "system", + "item": [ + { + "name": "blockNumber", + "event": [ + { + "listen": "test", + "script": { + "id": "5e569618-0584-4849-9571-689ef1a79248", + "exec": ["utils.cannotTest(\"eth_blockNumber\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_blockNumber\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the block number of most recent block.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nQUANTITY - Integer of the current highest block number the client is on" + }, + "response": [] + }, + { + "name": "syncing", + "event": [ + { + "listen": "test", + "script": { + "id": "8b16926e-2282-492c-9d84-48dd950ac85b", + "exec": [ + "// There's nothing really to test here. The node is always syncing", + "pm.test('Endpoint not tested', function() {", + " pm.expect(true).to.be.true;", + "});" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_syncing\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns a data object detailing the status of the sync process or false if not syncing.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nObject - An object of type Syncing or false if not syncing.\r\n\r\nstartingBlock: QUANTITY - The block at which the import started (will only be reset, after the sync reached his head)\r\n\r\ncurrentBlock: QUANTITY - The current block, same as eth_blockNumber\r\n\r\nhighestBlock: QUANTITY - The estimated highest block" + }, + "response": [] + }, + { + "name": "chainId", + "event": [ + { + "listen": "test", + "script": { + "id": "82448e71-a47e-4fee-9ba7-b6c0d211c075", + "exec": [ + "pm.test('Has correct result', function() {", + " const jsonData = pm.response.json();", + " pm.expect(jsonData.result).to.be.equals(\"0x1\")", + "});", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_chainId\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the current ethereum chainId.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nQUANTITY - The current chainId" + }, + "response": [] + }, + { + "name": "protocolVersion", + "event": [ + { + "listen": "test", + "script": { + "id": "42dfa289-098b-43b0-9395-9ed18209fa20", + "exec": [ + "pm.test('Has correct result', function() {", + " var isParity = pm.environment.get('HOST') == \"{{PARITY}}\";", + " const jsonData = pm.response.json();", + " ", + " if (isParity) {", + " pm.expect(jsonData.result).to.be.equals(\"63\")", + " } else {", + " pm.expect(jsonData.result).to.be.equals(\"0x41\")", + " }", + "});", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_protocolVersion\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the current ethereum protocol version.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nQUANTITY - The current ethereum protocol version" + }, + "response": [] + }, + { + "name": "gasPrice", + "event": [ + { + "listen": "test", + "script": { + "id": "50b5578b-4008-406c-a8f6-0459f258538d", + "exec": ["utils.cannotTest(\"eth_gasPrice\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_gasPrice\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the current price per gas in wei.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nQUANTITY - Integer of the current gas price in wei" + }, + "response": [] + } + ], + "protocolProfileBehavior": {}, + "_postman_isSubFolder": true + }, + { + "name": "call", + "item": [ + { + "name": "call", + "event": [ + { + "listen": "test", + "script": { + "id": "3dde2a48-3bad-43c2-97a6-f4339f368992", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": \"0x0000000000000000000000000000000000000000000c685fa11e01ec6f000000\",", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " var jsonData = pm.response.json();", + " pm.expect(jsonData).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_call\",\n\t\"params\":[\n {\n \"to\": \"0x08a2e41fb99a7599725190b9c970ad3893fa33cf\",\n \"data\": \"0x18160ddd\"\n },\n \"0xa2f2e0\"\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Executes a new message call immediately without creating a transaction on the block chain.\r\n\r\n**Parameters**\r\n\r\nObject - An object of type Call\r\n\r\nDATA, 20 Bytes - (optional) The address the transaction is sent from\r\n\r\nDATA, 20 Bytes - The address the transaction is directed to\r\n\r\nQUANTITY - (optional) Integer of the gas provided for the transaction execution. eth_call consumes zero gas, but this parameter may be needed by some executions\r\n\r\nQUANTITY - (optional) Integer of the gasPrice used for each paid gas\r\n\r\nQUANTITY - (optional) Integer of the value sent with this transaction\r\n\r\nDATA - (optional) Hash of the method signature and encoded parameters. For details see Ethereum Contract ABI\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\n**Returns**\r\n\r\nDATA - The return value of executed contract" + }, + "response": [] + }, + { + "name": "estimateGas", + "event": [ + { + "listen": "test", + "script": { + "id": "61b5e2c2-b0c3-438c-a8cc-85bd6f058f75", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": \"0x5208\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_estimateGas\",\n\t\"params\":[\n {\n \"to\": \"0x3d597789ea16054a084ac84ce87f50df9198f415\",\n \"from\": \"0x3d597789ea16054a084ac84ce87f50df9198f415\",\n \"value\": \"0x1\"\n }\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns an estimate of how much gas is necessary to allow the transaction to complete. The transaction will not be added to the blockchain.\r\n\r\n**Note**\r\n\r\nThe estimate may be significantly more than the amount of gas actually used by the transaction for a variety of reasons including EVM mechanics and node performance.\r\n\r\n**Note**\r\n\r\nIf no gas limit is specified geth uses the block gas limit from the pending block as an upper bound. As a result the returned estimate might not be enough to executed the call/transaction when the amount of gas is higher than the pending block gas limit.\r\n\r\n**Parameters**\r\n\r\nObject - An object of type Call, see eth_call parameters, expect that all properties are optional\r\n\r\n**Returns**\r\n\r\nQUANTITY - The estimated amount of gas needed for the call" + }, + "response": [] + }, + { + "name": "sendTransaction", + "event": [ + { + "listen": "test", + "script": { + "id": "6099e6b6-bb38-45ed-8178-a2c148e4d2c5", + "exec": ["utils.notImplemented(\"eth_sendTransaction\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_sendTransaction\",\n \"params\": [\n {\n \"from\": \" 0xb60e8dd61c5d32be8058bb8eb970870f07233155\",\n \"to\": \" 0xd46e8dd67c5d32be8058bb8eb970870f07244567\",\n \"gas\": \"0x76c0\",\n \"gasPrice\": \"0x9184e72a000\",\n \"value\": \"0x9184e72a\",\n \"data\": \"0xd46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675\"\n }\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Creates new message call transaction or a contract creation if the data field contains code.\r\n\r\n**Note**\r\n\r\nUse eth_getTransactionReceipt to get the contract address, after the transaction was mined, when you created a contract\r\n\r\n**Parameters**\r\n\r\nObject - An object of type SendTransaction\r\n\r\nDATA, 20 Bytes - The address the transaction is send from\r\n\r\nDATA, 20 Bytes - (optional when creating new contract) The address the transaction is directed to\r\n\r\nQUANTITY - (optional, default 90000) Integer of the gas provided for the transaction execution. It will return unused gas\r\n\r\nQUANTITY - (optional, default To-Be-Determined) Integer of the gasPrice used for each paid gas\r\n\r\nQUANTITY - (optional) Integer of the value sent with this transaction\r\n\r\nDATA - The compiled code of a contract OR the hash of the invoked method signature and encoded parameters. For details see Ethereum Contract ABI\r\n\r\nQUANTITY - (optional) Integer of a nonce. This allows to overwrite your own pending transactions that use the same nonce\r\n\r\n**Returns**\r\n\r\nDATA, 32 Bytes - The transaction hash, or the zero hash if the transaction is not yet available" + }, + "response": [] + }, + { + "name": "sendRawTransaction", + "event": [ + { + "listen": "test", + "script": { + "id": "3293bee1-893c-4d4c-bc5b-458235d2158b", + "exec": [ + "pm.test('Not tested', function() {", + " var tested = false;", + " pm.expect(tested).to.be.true", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_sendRawTransaction\",\n\t\"params\":[\"0xd46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675\"],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Creates new message call transaction or a contract creation for previously-signed transactions.\r\n\r\n**Note**\r\n\r\nUse eth_getTransactionReceipt to get the contract address, after the transaction was mined, when you created a contract.\r\n\r\n**Parameters**\r\n\r\nDATA - The signed transaction data\r\n\r\n**Returns**\r\n\r\nDATA, 32 Bytes - The transaction hash, or the zero hash if the transaction is not yet available" + }, + "response": [] + }, + { + "name": "getProof", + "event": [ + { + "listen": "test", + "script": { + "id": "3d8697ee-e17d-419f-b66a-1017f8f7ad22", + "exec": ["utils.notImplemented(\"eth_getProof\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"id\": \"1\",\n \"jsonrpc\": \"2.0\",\n \"method\": \"eth_getProof\",\n \"params\": [\n \"0x7F0d15C7FAae65896648C8273B6d7E43f58Fa842\",\n [ \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\" ],\n \"latest\"\n ]\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "See this EIP of more information: https://github.com/ethereum/EIPs/issues/1186\r\n\r\nPossible implementation: https://github.com/vocdoni/eth-storage-proof\r\n\r\n**Parameters**\r\n\r\nDATA, 20 Bytes - The address of the storage locations being proved\r\n\r\nDATAARRAY - one or more storage locations to prove\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\n**Returns**\r\n\r\nDATA - The Merkel proof of the storage locations" + }, + "response": [] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "952dad06-2f84-4226-98c8-696d0fc84db6", + "type": "text/javascript", + "exec": [""] + } + }, + { + "listen": "test", + "script": { + "id": "e4208e7b-1dbd-4b84-9735-94dbf509f2e4", + "type": "text/javascript", + "exec": [""] + } + } + ], + "protocolProfileBehavior": {}, + "_postman_isSubFolder": true + }, + { + "name": "mining", + "item": [ + { + "name": "coinbase", + "event": [ + { + "listen": "test", + "script": { + "id": "6136a206-96bb-43f2-94bd-08f93303cf9a", + "exec": ["utils.notImplemented(\"eth_coinbase\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_coinbase\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the current client coinbase address.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nDATA, 20 Bytes - The current coinbase address" + }, + "response": [] + }, + { + "name": "hashrate", + "event": [ + { + "listen": "test", + "script": { + "id": "9ac59f4f-7de3-4276-8e65-91cd0ad9c040", + "exec": ["utils.notImplemented(\"eth_hashrate\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_hashrate\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the number of hashes per second that the node is mining with.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nQUANTITY - Number of hashes per second" + }, + "response": [] + }, + { + "name": "mining", + "event": [ + { + "listen": "test", + "script": { + "id": "8bdc9381-dbde-4419-b736-96e7914901e0", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": false", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_mining\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns true if client is actively mining new blocks.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nBoolean - true if the client is mining, false otherwise" + }, + "response": [] + }, + { + "name": "getWork", + "event": [ + { + "listen": "test", + "script": { + "id": "99953248-ef11-4c01-92dc-26ce5ef38d9d", + "exec": ["utils.notImplemented(\"eth_getWork\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getWork\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the hash of the current block, the seedHash, and the boundary condition to be met ('target').\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nObject - An object of type Work (an array of three hashes representing block header pow-hash, seed hash and boundary condition\r\n\r\ncurrent: DATA, 32 Bytes - current block header pow-hash\r\n\r\nseed: DATA, 32 Bytes - The seed hash used for the DAG\r\n\r\nboundary: DATA, 32 Bytes - The boundary condition ('target'), 2^256 / difficulty" + }, + "response": [] + }, + { + "name": "submitWork", + "event": [ + { + "listen": "test", + "script": { + "id": "db6d4657-a901-4ed5-9995-37f11ec9da6e", + "exec": ["utils.notImplemented(\"eth_submitWork\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\", \n\t\"method\":\"eth_submitWork\", \n\t\"params\":[\n\t\t\"0x1\", \n\t\t\"0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef\", \n\t\t\"0xD16E5700000000000000000000000000D16E5700000000000000000000000000\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Submits a proof-of-work solution to the blockchain.\r\n\r\n**Parameters**\r\n\r\nDATA, 8 Bytes - The nonce found (64 bits)\r\n\r\nDATA, 32 Bytes - The header's pow-hash (256 bits)\r\n\r\nDATA, 32 Bytes - The mix digest (256 bits)\r\n\r\n**Returns**\r\n\r\nBoolean - true if the provided solution is valid, false otherwise" + }, + "response": [] + }, + { + "name": "submitHashrate", + "event": [ + { + "listen": "test", + "script": { + "id": "394114d6-fffc-4cb8-a897-89e0bb6b0aa2", + "exec": ["utils.notImplemented(\"eth_submitHashrate\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\", \n\t\"method\":\"eth_submitHashrate\", \n\t\"params\":[\n\t\t\"0x0000000000000000000000000000000000000000000000000000000000500000\", \n\t\t\"0x59daa26581d0acd1fce254fb7e85952f4c09d0915afd33d3886cd914bc7d283c\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Submit the mining hashrate to the blockchain.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - a hexadecimal string representation of the hash rate\r\n\r\nString - A random hexadecimal ID identifying the client\r\n\r\n**Returns**\r\n\r\nBoolean - true if submitting went through succesfully, false otherwise" + }, + "response": [] + } + ], + "protocolProfileBehavior": {}, + "_postman_isSubFolder": true + } + ], + "protocolProfileBehavior": {} + }, + { + "name": "trace", + "item": [ + { + "name": "call", + "event": [ + { + "listen": "test", + "script": { + "id": "2e6a9c1c-38f4-4061-ae83-8fcc8a7511be", + "exec": ["utils.notImplemented(\"trace_call\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"jsonrpc\":\"2.0\",\n \"method\":\"trace_call\",\n \"params\":[\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"trace\",\"vmTrace\"],\n \"latest\"\n ],\n \"id\": \"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Executes the given call and returns a number of possible traces for it.\r\n\r\n**Parameters**\r\n\r\nObject - An object of type TraceCall\r\n\r\nfrom: DATA, 20 Bytes - (optional) 20 Bytes - The address the transaction is send from.\r\n\r\nto: DATA, 20 Bytes - (optional when creating new contract) 20 Bytes - The address the transaction is directed to.\r\n\r\ngas: QUANTITY - (optional) Integer formatted as a hex string of the gas provided for the transaction execution. eth_call consumes zero gas, but this parameter may be needed by some executions.\r\n\r\ngasPrice: QUANTITY - (optional) Integer formatted as a hex string of the gas price used for each paid gas.\r\n\r\nvalue: QUANTITY - (optional) Integer formatted as a hex string of the value sent with this transaction.\r\n\r\ndata: DATA - (optional) 4 byte hash of the method signature followed by encoded parameters. For details see Ethereum Contract ABI.\r\n\r\nSTRINGARRAY - An array of strings, one or more of: \"vmTrace\", \"trace\", \"stateDiff\".\r\n\r\nTAG - (optional) Integer of a block number, or the string 'earliest', 'latest' or 'pending'.\r\n\r\n**Returns**\r\n\r\nObject - An object of type BlockTraceArray" + }, + "response": [] + }, + { + "name": "callMany", + "event": [ + { + "listen": "test", + "script": { + "id": "fbec6f83-1a35-43dd-839b-8dea5ea39cfb", + "exec": ["utils.notImplemented(\"trace_callMany\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_callMany\",\n \"params\": [\n [\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"trace\"]\n ],\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"trace\"]\n ]\n ],\n \"latest\"\n ],\n\t\"id\":\"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Performs multiple call traces on top of the same block. i.e. transaction n will be executed on top of a pending block with all n-1 transactions applied (traced) first. Allows to trace dependent transactions.\r\n\r\n**Parameters**\r\n\r\nCALLARRAY - An array of Call objects plus strings, one or more of: \"vmTrace\", \"trace\", \"stateDiff\".\r\n\r\nTAG - (optional) integer block number, or the string 'latest', 'earliest' or 'pending', see the default block parameter.\r\n\r\n**Returns**\r\n\r\nObject - An object of type BlockTraceArray" + }, + "response": [] + }, + { + "name": "rawTransaction", + "event": [ + { + "listen": "test", + "script": { + "id": "a2465974-9dba-4410-a7bd-67b493703d29", + "exec": ["utils.notImplemented(\"trace_rawTransaction\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_rawTransaction\",\n\t\"params\":[\"0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3\", [\"vmTrace\"]],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Traces a call to eth_sendRawTransaction without making the call, returning the traces\r\n\r\n**Parameters**\r\nDATA - Raw transaction data.\r\n\r\nSTRINGARRAY - Type of trace, one or more of: \"vmTrace\", \"trace\", \"stateDiff\".\r\n\r\n**Returns**\r\nObject - An object of type BlockTrace." + }, + "response": [] + }, + { + "name": "replayBlockTransactions", + "event": [ + { + "listen": "test", + "script": { + "id": "7ae64e81-7268-4743-ae25-98a2d53386c0", + "exec": ["utils.notImplemented(\"trace_replayBlockTransactions\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayBlockTransactions\",\n\t\"params\":[\"0x2\",[\"trace\"]],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Replays all transactions in a block returning the requested traces for each transaction.\r\n\r\n**Parameters**\r\n\r\nTAG - Integer of a block number, or the string 'earliest', 'latest' or 'pending'.\r\n\r\nSTRINGARRAY - Type of trace, one or more of: \"vmTrace\", \"trace\", \"stateDiff\".\r\n\r\n**Returns**\r\n\r\nObject - An object of type BlockTraceArray." + }, + "response": [] + }, + { + "name": "replayTransaction", + "event": [ + { + "listen": "test", + "script": { + "id": "b60375bb-313f-47cc-9a7d-ff4abffebe99", + "exec": ["utils.notImplemented(\"trace_replayTransaction\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayTransaction\",\n \"params\": [\n \"0x02d4a872e096445e80d05276ee756cefef7f3b376bcec14246469c0cd97dad8f\",\n [\"trace\"]\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Replays a transaction, returning the traces.\r\n\r\n**Parameters**\r\nDATA, 32 Bytes - The transaction's hash.\r\n\r\nSTRINGARRAY - Type of trace, one or more of: \"vmTrace\", \"trace\", \"stateDiff\".\r\n\r\n**Returns**\r\nObject - An object of type BlockTrace." + }, + "response": [] + }, + { + "name": "transaction", + "event": [ + { + "listen": "test", + "script": { + "id": "de0d1c16-7bd3-4d6c-ae80-1001f994f1ed", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": [", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x83806d539d4ea1c140489a06660319c9a303f874\",", + " \"gas\": \"0x1a1f8\",", + " \"input\": \"0x\",", + " \"to\": \"0x1c39ba39e4735cb65978d4db400ddd70a72dc750\",", + " \"value\": \"0x7a16c911b4d00000\"", + " },", + " \"blockHash\": \"0x7eb25504e4c202cf3d62fd585d3e238f592c780cca82dacb2ed3cb5b38883add\",", + " \"blockNumber\": 3068185,", + " \"result\": {", + " \"gasUsed\": \"0x2982\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 2,", + " \"traceAddress\": [],", + " \"transactionHash\": \"0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3\",", + " \"transactionPosition\": 2,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x1c39ba39e4735cb65978d4db400ddd70a72dc750\",", + " \"gas\": \"0x13e99\",", + " \"input\": \"0x16c72721\",", + " \"to\": \"0x2bd2326c993dfaef84f696526064ff22eba5b362\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x7eb25504e4c202cf3d62fd585d3e238f592c780cca82dacb2ed3cb5b38883add\",", + " \"blockNumber\": 3068185,", + " \"result\": {", + " \"gasUsed\": \"0x183\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 0", + " ],", + " \"transactionHash\": \"0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3\",", + " \"transactionPosition\": 2,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x1c39ba39e4735cb65978d4db400ddd70a72dc750\",", + " \"gas\": \"0x8fc\",", + " \"input\": \"0x\",", + " \"to\": \"0x70faa28a6b8d6829a4b1e649d26ec9a2a39ba413\",", + " \"value\": \"0x7a16c911b4d00000\"", + " },", + " \"blockHash\": \"0x7eb25504e4c202cf3d62fd585d3e238f592c780cca82dacb2ed3cb5b38883add\",", + " \"blockNumber\": 3068185,", + " \"result\": {", + " \"gasUsed\": \"0x0\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1", + " ],", + " \"transactionHash\": \"0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3\",", + " \"transactionPosition\": 2,", + " \"type\": \"call\"", + " }", + " ]", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_transaction\",\n \"params\":[\"0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3\"],\n\t\"id\":\"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns traces for the given transaction\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - The transaction's hash\r\n\r\n**Returns**\r\n\r\nObject - An object of type AdhocTraceArray, see trace_filter." + }, + "response": [] + }, + { + "name": "get", + "event": [ + { + "listen": "test", + "script": { + "id": "c1d276a3-867a-43ba-8d82-629650317491", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x1c39ba39e4735cb65978d4db400ddd70a72dc750\",", + " \"gas\": \"0x13e99\",", + " \"input\": \"0x16c72721\",", + " \"to\": \"0x2bd2326c993dfaef84f696526064ff22eba5b362\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x7eb25504e4c202cf3d62fd585d3e238f592c780cca82dacb2ed3cb5b38883add\",", + " \"blockNumber\": 3068185,", + " \"result\": {", + " \"gasUsed\": \"0x183\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 0", + " ],", + " \"transactionHash\": \"0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3\",", + " \"transactionPosition\": 2,", + " \"type\": \"call\"", + " }", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_get\",\n \"params\":[\n \"0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3\",\n [\"0x0\"]\n ],\n\t\"id\":\"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns trace at given position.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - The transaction's hash.\r\n\r\nQUANTITYARRAY - The index position of the trace.\r\n\r\n**Returns**\r\n\r\nObject - An object of type AdhocTraceArray, see trace_filter." + }, + "response": [] + }, + { + "name": "block", + "event": [ + { + "listen": "test", + "script": { + "id": "0ab5009a-3398-4d25-a894-862f86e10785", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": [", + " {", + " \"action\": {", + " \"author\": \"0x5088d623ba0fcf0131e0897a91734a4d83596aa0\",", + " \"rewardType\": \"block\",", + " \"value\": \"0x478eae0e571ba000\"", + " },", + " \"blockHash\": \"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\",", + " \"blockNumber\": 3,", + " \"result\": {},", + " \"subtraces\": 0,", + " \"traceAddress\": null,", + " \"transactionHash\": \"0x0000000000000000000000000000000000000000000000000000000000000000\",", + " \"transactionPosition\": 0,", + " \"type\": \"reward\"", + " },", + " {", + " \"action\": {", + " \"author\": \"0xc8ebccc5f5689fa8659d83713341e5ad19349448\",", + " \"rewardType\": \"uncle\",", + " \"value\": \"0x340aad21b3b70000\"", + " },", + " \"blockHash\": \"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\",", + " \"blockNumber\": 3,", + " \"result\": {},", + " \"subtraces\": 0,", + " \"traceAddress\": null,", + " \"transactionHash\": \"0x0000000000000000000000000000000000000000000000000000000000000000\",", + " \"transactionPosition\": 0,", + " \"type\": \"reward\"", + " }", + " ]", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_block\",\n\t\"params\":[\"0x3\"],\n\t\"id\":\"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns traces created at given block.\r\n\r\n**Parameters**\r\n\r\nTAG - Integer of a block number, or the string 'earliest', 'latest' or 'pending'.\r\n\r\n**Returns**\r\n\r\nObject - An object of type AdhocTraceArray." + }, + "response": [] + }, + { + "name": "filter", + "event": [ + { + "listen": "test", + "script": { + "id": "9b701d79-77b1-48fb-b8a7-4b38e6e63c5d", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": [", + " {", + " \"action\": {", + " \"author\": \"0x5088d623ba0fcf0131e0897a91734a4d83596aa0\",", + " \"rewardType\": \"block\",", + " \"value\": \"0x478eae0e571ba000\"", + " },", + " \"blockHash\": \"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\",", + " \"blockNumber\": 3,", + " \"result\": {},", + " \"subtraces\": 0,", + " \"traceAddress\": null,", + " \"transactionHash\": \"0x0000000000000000000000000000000000000000000000000000000000000000\",", + " \"transactionPosition\": 0,", + " \"type\": \"reward\"", + " },", + " {", + " \"action\": {", + " \"author\": \"0xc8ebccc5f5689fa8659d83713341e5ad19349448\",", + " \"rewardType\": \"uncle\",", + " \"value\": \"0x340aad21b3b70000\"", + " },", + " \"blockHash\": \"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\",", + " \"blockNumber\": 3,", + " \"result\": {},", + " \"subtraces\": 0,", + " \"traceAddress\": null,", + " \"transactionHash\": \"0x0000000000000000000000000000000000000000000000000000000000000000\",", + " \"transactionPosition\": 0,", + " \"type\": \"reward\"", + " }", + " ]", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_filter\",\n \"params\":[\n {\n \"fromBlock\":\"0x3\",\n \"toBlock\":\"0x3\"\n }\n ],\n\t\"id\":\"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns traces matching given filter\r\n\r\n**Parameters**\r\n\r\nObject - An object of type TraceFilter\r\n\r\nfromBlock: TAG - (optional) From this block.\r\n\r\ntoBlock: TAG - (optional) To this block.\r\n\r\nfromAddress: DATA, 20 Bytes - (optional) Sent from these addresses.\r\n\r\ntoAddress: DATA, 20 Bytes - (optional) Sent to these addresses.\r\n\r\nafter: QUANTITY - (optional) The offset trace number\r\n\r\ncount: QUANTITY - (optional) Integer number of traces to display in a batch.\r\n\r\n**Returns**\r\nObject - An object of type AdHocTraceArray matching the given filter." + }, + "response": [] + } + ], + "protocolProfileBehavior": {} + }, + { + "name": "erigon", + "item": [ + { + "name": "forks", + "event": [ + { + "listen": "test", + "script": { + "id": "331402b4-0302-4516-b601-b160484292b3", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": {", + " \"genesis\": \"0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3\",", + " \"forks\": [", + " 1150000,", + " 1920000,", + " 2463000,", + " 2675000,", + " 4370000,", + " 7280000,", + " 9069000,", + " 9200000", + " ]", + " }", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"erigon_forks\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the genesis block hash and a sorted list of already passed fork block numbers as well as the next fork block (if applicable)\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nObject - An object of type Fork\r\n\r\ngenesis: DATA, 32 Bytes - The hash of the genesis block\r\n\r\npassed: ARRAY of QUANTITY - Array of block numbers passed by this client\r\n\r\nnext: QUANTITY - (optional) the next fork block" + }, + "response": [] + }, + { + "name": "getHeaderByNumber", + "event": [ + { + "listen": "test", + "script": { + "id": "8f7e9f2d-1508-4ce6-bb7e-ab697a69ce66", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": {", + " \"parentHash\": \"0xb495a1d7e6663152ae92708da4843337b958146015a2802f4193a410044698c9\",", + " \"sha3Uncles\": \"0x6b17b938c6e4ef18b26ad81b9ca3515f27fd9c4e82aac56a1fd8eab288785e41\",", + " \"miner\": \"0x5088d623ba0fcf0131e0897a91734a4d83596aa0\",", + " \"stateRoot\": \"0x76ab0b899e8387436ff2658e2988f83cbf1af1590b9fe9feca3714f8d1824940\",", + " \"transactionsRoot\": \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",", + " \"receiptsRoot\": \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",", + " \"logsBloom\": \"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",", + " \"difficulty\": \"0x3fe802ffe\",", + " \"number\": \"0x3\",", + " \"gasLimit\": \"0x1388\",", + " \"gasUsed\": \"0x0\",", + " \"timestamp\": \"0x55ba4260\",", + " \"extraData\": \"0x476574682f76312e302e302d66633739643332642f6c696e75782f676f312e34\",", + " \"mixHash\": \"0x65e12eec23fe6555e6bcdb47aa25269ae106e5f16b54e1e92dcee25e1c8ad037\",", + " \"nonce\": \"0x2e9344e0cbde83ce\",", + " \"hash\": \"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\"", + " }", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"erigon_getHeaderByNumber\",\n\t\"params\":[\n\t\t\"0x3\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns a block's header given a block number ignoring the block's transaction and uncle list (may be faster).\r\n\r\n**Parameters**\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\n**Returns**\r\n\r\nObject - An object of type BlockHeader or null when no block was found. See eth_getBlockByHash" + }, + "response": [] + }, + { + "name": "getHeaderByHash", + "event": [ + { + "listen": "test", + "script": { + "id": "2ca80cf3-6a70-44ae-8741-3d8851096b65", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": {", + " \"parentHash\": \"0xb495a1d7e6663152ae92708da4843337b958146015a2802f4193a410044698c9\",", + " \"sha3Uncles\": \"0x6b17b938c6e4ef18b26ad81b9ca3515f27fd9c4e82aac56a1fd8eab288785e41\",", + " \"miner\": \"0x5088d623ba0fcf0131e0897a91734a4d83596aa0\",", + " \"stateRoot\": \"0x76ab0b899e8387436ff2658e2988f83cbf1af1590b9fe9feca3714f8d1824940\",", + " \"transactionsRoot\": \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",", + " \"receiptsRoot\": \"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421\",", + " \"logsBloom\": \"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\",", + " \"difficulty\": \"0x3fe802ffe\",", + " \"number\": \"0x3\",", + " \"gasLimit\": \"0x1388\",", + " \"gasUsed\": \"0x0\",", + " \"timestamp\": \"0x55ba4260\",", + " \"extraData\": \"0x476574682f76312e302e302d66633739643332642f6c696e75782f676f312e34\",", + " \"mixHash\": \"0x65e12eec23fe6555e6bcdb47aa25269ae106e5f16b54e1e92dcee25e1c8ad037\",", + " \"nonce\": \"0x2e9344e0cbde83ce\",", + " \"hash\": \"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\"", + " }", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"erigon_getHeaderByHash\",\n\t\"params\":[\n\t\t\"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns a block's header given a block's hash.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - Hash of a block\r\n\r\n**Returns**\r\n\r\nObject - An object of type BlockHeader or null when no block was found. See eth_getBlockByHash" + }, + "response": [] + }, + { + "name": "getLogsByHash", + "event": [ + { + "listen": "test", + "script": { + "id": "6a55ab5e-fa04-4e14-b7f9-1b387ee51188", + "exec": [ + "var expected = [", + " null,", + " [", + " {", + " \"address\": \"0xb8c77482e45f1f44de1745f52c74426c631bdd52\",", + " \"topics\": [", + " \"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef\",", + " \"0x000000000000000000000000001866ae5b3de6caa5a51543fd9fb64f524f5478\",", + " \"0x00000000000000000000000016a9c11e229ce221578a9adb3e7c0a48482e8063\"", + " ],", + " \"data\": \"0x00000000000000000000000000000000000000000000021ea4a7ecbf3c280000\",", + " \"blockNumber\": \"0x3d0cec\",", + " \"transactionHash\": \"0x99f91752d50d0c2c92e681fda082843747e8284d846f8b623e4cd280fbd7bb65\",", + " \"transactionIndex\": \"0x2\",", + " \"blockHash\": \"0x2f244c154cbacb0305581295b80efa6dffb0224b60386a5fc6ae9585e2a140c4\",", + " \"logIndex\": \"0x0\",", + " \"removed\": false", + " }", + " ]", + "]", + "", + "pm.test('Has correct result', function() {", + " // We test just two log entries to keep the test case small", + " var jsonData = pm.response.json();", + " pm.expect(jsonData.result[0]).to.be.deep.equal(expected[0]);", + " pm.expect(jsonData.result[2]).to.be.deep.equal(expected[1]);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"erigon_getLogsByHash\",\n\t\"params\":[\n\t\t\"0x2f244c154cbacb0305581295b80efa6dffb0224b60386a5fc6ae9585e2a140c4\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns an array of arrays of logs generated by the transactions in the block given by the block's hash.\r\n\r\n**Note**\r\n\r\nThe returned value is an array of arrays of log entries. There is an entry for each transaction in the block. BR BR If transaction X did not create any logs, the entry at result[X] will be null BR BR If transaction X generated N logs, the entry at position result[X] will be an array of N log objects\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - Hash of block at which to retreive data\r\n\r\n**Returns**\r\n\r\nObject - An object of type LogArray some of which may be null found in the block. See eth_getFilterChanges" + }, + "response": [] + }, + { + "name": "issuance", + "event": [ + { + "listen": "test", + "script": { + "id": "b5a34317-4baa-4fb9-95a8-83f4f757c842", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": {", + " \"blockReward\": \"0x478eae0e571ba000\",", + " \"uncleReward\": \"0x340aad21b3b70000\",", + " \"issuance\": \"0x7b995b300ad2a000\"", + " }", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"erigon_issuance\",\n\t\"params\":[\n\t\t\"0x3\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the total issuance (block reward plus uncle reward) for the given block.\r\n\r\n**Parameters**\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\n**Returns**\r\n\r\nObject - An object of type Issuance\r\n\r\nblockReward: QUANTITY - The issuance to the miner of the block (includes nephew reward but not transaction fees)\r\n\r\nuncleReward: QUANTITY - The issuance to miners of included uncle (if any)\r\n\r\nissuance: QUANTITY - The sum of blockReward and uncleReward" + }, + "response": [] + } + ], + "protocolProfileBehavior": {} + }, + { + "name": "debug", + "item": [ + { + "name": "storageRangeAt", + "event": [ + { + "listen": "test", + "script": { + "id": "c4bcaf47-dd81-42af-9bbd-9256ba908426", + "exec": [ + "var isSilk = pm.environment.get('HOST') == \"{{SILKRPC}}\";", + "if (isSilk) {", + " utils.notImplemented(\"debug_storageRangeAt\", pm.response.json())", + " return;", + "}", + "var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", + "if (isErigon) {", + " utils.cannotTest(\"debug_accountRange\", pm.response.json())", + " return;", + "}", + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": {", + " \"storage\": {", + " \"0x290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563\": {", + " \"key\": \"0x0000000000000000000000000000000000000000000000000000000000000000\",", + " \"value\": \"0x000000000000000000000000ed2f1401f8994d3ff2b2a923e743c24c2914ab4f\"", + " },", + " \"0xb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf6\": {", + " \"key\": \"0x0000000000000000000000000000000000000000000000000000000000000001\",", + " \"value\": \"0x000000000000000000000000739c71235a9669f6b900490ab1c95310c19abc71\"", + " }", + " },", + " \"nextKey\": \"0x0000000000000000000000000000000000000000000000000000000000000002\"", + " }", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"debug_storageRangeAt\",\n\t\"params\":[\n\t\t\"0xd3f1853788b02e31067f2c6e65cb0ae56729e23e3c92e2393af9396fa182701d\", \n 1,\n \"0xb734c74ff4087493373a27834074f80acbd32827\",\n\t\t\"0x00\",\n 2\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns information about a range of storage locations (if any) for the given address.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - Hash of block at which to retreive data\r\n\r\nQUANTITY, 8 bytes - Transaction index in the give block\r\n\r\nDATA, 20 Bytes - Contract address from which to retreive storage data\r\n\r\nDATA, 32 Bytes - Storage key to retreive\r\n\r\nQUANTITY, 8 bytes - The number of values to retreive\r\n\r\n**Returns**\r\n\r\nObject - An object of type StorageRangeResult which is defined as\r\n\r\npair: KEY/VALUE - A key value pair of the storage location\r\n\r\nnextKey: DATA, 32 Bytes - (optional) Hash pointing to next storage pair or empty" + }, + "response": [] + }, + { + "name": "accountRange", + "event": [ + { + "listen": "test", + "script": { + "id": "8fa3bd0b-1c56-4fd5-b46b-66d52a22d7fc", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": {", + " \"root\": \"0x8d8f6ffa5f2e55c0f8f0b88c3421d647e497f3ee0d66825f3f7433d7e244dde8\",", + " \"accounts\": {", + " \"0x0000000000000000000000000000000000000001\": {", + " \"balance\": \"0\",", + " \"nonce\": 0,", + " \"root\": \"0000000000000000000000000000000000000000000000000000000000000000\",", + " \"codeHash\": \"c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470\"", + " }", + " },", + " \"next\": \"AAAAAAAAAAAAAAAAAAAAAAAAAAI=\"", + " }", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"debug_accountRange\",\n\t\"params\":[\n\t\t\"0xaaaaa\", \n\t\t[1],\n 1,\n true,\n true,\n true\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns a range of accounts involved in the given block range\r\n\r\n**Parameters**\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\nDATAARRAY - an array of prefixs against which to match account addresses (report only on accounts addresses that begin with this prefix, default matches all accounts)\r\n\r\nQUANTITY, 8 bytes - the maximum number of accounts to retreive\r\n\r\nBoolean - if true, do not return byte code from the address, if false return the byte code (if any)\r\n\r\nBoolean - if true, do not return storage from the address, if false return storage (if any)\r\n\r\nBoolean - if true, do not return missing preimages, if false do return them\r\n\r\n**Returns**\r\n\r\nObject - An object of type IteratorDump which is defined as\r\n\r\nroot: string - IteratorDump\r\n\r\naccounts: map[common.Address]DumpAccount - IteratorDump\r\n\r\nnext: []byte - IteratorDump\r\n\r\nbalance: string - DumpAccount\r\n\r\nnonce: uint64 - DumpAccount\r\n\r\nroot: string - DumpAccount\r\n\r\ncodeHash: string - DumpAccount\r\n\r\ncode: string - DumpAccount\r\n\r\nstorage: map[string]string - DumpAccount\r\n\r\naddress: common.Address - (optional) DumpAccount\r\n\r\nsecureKey: hexutil.Bytes - DumpAccount\r\n\r\n" + }, + "response": [] + }, + { + "name": "getModifiedAccountsByNumber", + "event": [ + { + "listen": "test", + "script": { + "id": "019465f3-a3d5-457c-bd86-4f50b02e518c", + "exec": [ + "var isSilk = pm.environment.get('HOST') == \"{{SILKRPC}}\";", + "if (isSilk) {", + " utils.notImplemented(\"debug_getModifiedAccountsByNumber\", pm.response.json())", + " return;", + "}", + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": [", + " \"0x8764b360076809bba4635c4281c3f44c1677d013\",", + " \"0x1194e966965418c7d73a42cceeb254d875860356\",", + " \"0x42e6723a0c884e922240e56d7b618bec96f35800\",", + " \"0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5\",", + " \"0xdf88d2cf450e1134e0cd794c3b89d648c3269ffc\",", + " \"0x2a65aca4d5fc5b5c859090a6c34d164135398226\",", + " \"0x68795c4aa09d6f4ed3e5deddf8c2ad3049a601da\",", + " \"0x8751355da8bb4854620e247904fc64c2dbff0484\"", + " ]", + "}", + "", + "pm.test('Has correct result', function() {", + " const jsonData = pm.response.json();", + " jsonData.result = jsonData.result.sort();", + " expected.result = expected.result.sort();", + " pm.expect(jsonData).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"debug_getModifiedAccountsByNumber\",\n\t\"params\":[\n\t\t\"0xccccd\",\n\t\t\"0xcccce\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns a list of accounts modified in the given block.\r\n\r\n**Parameters**\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\"\r\n\r\nTAG - Integer block number or one of \"earliest\", \"latest\" or \"pending\". Optional, defaults to startNum\r\n\r\n**Returns**\r\n\r\nArray of DATA, 20 Bytes - Array of addresses modifed in the given block range" + }, + "response": [] + }, + { + "name": "getModifiedAccountsByHash", + "event": [ + { + "listen": "test", + "script": { + "id": "bbbf909f-9ce2-4558-8e29-abc5ac1f5899", + "exec": [ + "var isSilk = pm.environment.get('HOST') == \"{{SILKRPC}}\";", + "if (isSilk) {", + " utils.notImplemented(\"debug_getModifiedAccountsByHash\", pm.response.json())", + " return;", + "}", + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": [", + " \"0x8764b360076809bba4635c4281c3f44c1677d013\",", + " \"0x1194e966965418c7d73a42cceeb254d875860356\",", + " \"0x42e6723a0c884e922240e56d7b618bec96f35800\",", + " \"0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5\",", + " \"0xdf88d2cf450e1134e0cd794c3b89d648c3269ffc\",", + " \"0x2a65aca4d5fc5b5c859090a6c34d164135398226\",", + " \"0x68795c4aa09d6f4ed3e5deddf8c2ad3049a601da\",", + " \"0x8751355da8bb4854620e247904fc64c2dbff0484\"", + " ]", + "}", + "", + "pm.test('Has correct result', function() {", + " const jsonData = pm.response.json();", + " jsonData.result = jsonData.result.sort();", + " expected.result = expected.result.sort();", + " pm.expect(jsonData).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"debug_getModifiedAccountsByHash\",\n\t\"params\":[\n\t\t\"0x2a1af018e33bcbd5015c96a356117a5251fcccf94a9c7c8f0148e25fdee37aec\",\n\t\t\"0x4e3d3e7eee350df0ee6e94a44471ee2d22cfb174db89bbf8e6c5f6aef7b360c5\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns a list of accounts modified in the given block.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - the first hash of block at which to retreive data\r\n\r\nDATA, 32 Bytes - the last hash of block at which to retreive data. Optional, defaults to startHash\r\n\r\n**Returns**\r\n\r\nArray of DATA, 20 Bytes - Array of addresses modifed in the given block range" + }, + "response": [] + }, + { + "name": "traceTransaction", + "event": [ + { + "listen": "test", + "script": { + "id": "a2e80bc5-85c6-4415-8e06-22ebe0d310cd", + "exec": [ + "var isSilk = pm.environment.get('HOST') == \"{{SILKRPC}}\";", + "if (isSilk) {", + " utils.notImplemented(\"debug_traceTransaction\", pm.response.json())", + " return;", + "}", + "var expected = [", + " {", + " \"pc\": 0,", + " \"op\": \"PUSH1\",", + " \"gas\": 179000,", + " \"gasCost\": 3,", + " \"depth\": 1,", + " \"stack\": [],", + " \"memory\": [],", + " \"storage\": {}", + " },", + " {", + " \"pc\": 2,", + " \"op\": \"PUSH1\",", + " \"gas\": 178997,", + " \"gasCost\": 3,", + " \"depth\": 1,", + " \"stack\": [", + " \"0000000000000000000000000000000000000000000000000000000000000060\"", + " ],", + " \"memory\": [],", + " \"storage\": {}", + " },", + " {", + " \"pc\": 284,", + " \"op\": \"STOP\",", + " \"gas\": 81142,", + " \"gasCost\": 0,", + " \"depth\": 1,", + " \"stack\": [],", + " \"memory\": [", + " \"0000000000000000000000000000000000000000000000000000000000000003\",", + " \"0000000000000000000000000000000000000000000000000000000000000000\",", + " \"0000000000000000000000000000000000000000000000000000000000000060\"", + " ],", + " \"storage\": {", + " \"0000000000000000000000000000000000000000000000000000000000000000\": \"0000000000000000000000000000000000000000000000000000000000000000\",", + " \"0000000000000000000000000000000000000000000000000000000000000001\": \"0000000000000000000000000000000000000000000000000000000000000001\",", + " \"0000000000000000000000000000000000000000000000000000000000000002\": \"0000000000000000000000000000000000000000000000000000000000000001\",", + " \"0000000000000000000000000000000000000000000000000000000000000003\": \"0000000000000000000000000000000000000000000000000000000000000006\",", + " \"c2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b\": \"000000000000000000000000881b0a4e9c55d08e31d8d3c022144d75a454211c\",", + " \"c2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85c\": \"000000000000000000000000fd2605a2bf58fdbb90db1da55df61628b47f9e8c\"", + " }", + " }", + "]", + "", + "pm.test('Has correct result', function() {", + " // because the returned data is pretty large, we only test the first two value and the last", + " var jsonData = pm.response.json()", + " pm.expect(jsonData.result.structLogs[0]).to.be.deep.equal(expected[0]);", + " pm.expect(jsonData.result.structLogs[1]).to.be.deep.equal(expected[1]);", + " pm.expect(jsonData.result.structLogs[jsonData.result.structLogs.length-1]).to.be.deep.equal(expected[2]);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"debug_traceTransaction\",\n\t\"params\":[\n\t\t\"0x893c428fed019404f704cf4d9be977ed9ca01050ed93dccdd6c169422155586f\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns Geth style transaction traces.\r\n\r\n**Parameters**\r\n\r\nDATA, 32 Bytes - hash of transaction to trace.\r\n\r\n**Returns**\r\n\r\nSTACK_TRACE - An array of stack traces as per Geth" + }, + "response": [] + } + ], + "protocolProfileBehavior": {} + }, + { + "name": "deprecated", + "item": [ + { + "name": "eth", + "item": [ + { + "name": "accounts (deprecated)", + "event": [ + { + "listen": "test", + "script": { + "id": "49fab8c4-6858-4475-89f9-2c06a0acaaa0", + "exec": ["utils.isDeprecated(\"eth_accounts\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_accounts\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns a list of addresses owned by the client.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nArray of DATA, 20 Bytes - addresses owned by the client" + }, + "response": [] + }, + { + "name": "getCompilers (deprecated)", + "event": [ + { + "listen": "test", + "script": { + "id": "71e1fac7-5027-4ec7-8a6f-b7ebba79ebc7", + "exec": ["utils.isDeprecated(\"eth_getCompilers\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_getCompilers\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns a list of available compilers in the client.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nObject - An object of type StringArray of available compilers" + }, + "response": [] + }, + { + "name": "compileLLL (deprecated)", + "event": [ + { + "listen": "test", + "script": { + "id": "c143eb2a-869c-4d61-b77a-f1d96e35867d", + "exec": ["utils.isDeprecated(\"eth_compileLLL\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_compileLLL\",\n\t\"params\":[\n\t\t\"(returnlll (suicide (caller)))\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns compiled LLL code.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nString - The source code\r\n\r\n**Returns**\r\n\r\nDATA - The compiled source code" + }, + "response": [] + }, + { + "name": "compileSolidity (deprecated)", + "event": [ + { + "listen": "test", + "script": { + "id": "a225f789-727a-45b7-8233-b83fa9710f0b", + "exec": ["utils.isDeprecated(\"eth_compileSolidity\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_compileSolidity\",\n\t\"params\":[\n\t\t\"contract test { function multiply(uint a) returns(uint d) { return a * 7; } }\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns compiled solidity code.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nString - The source code\r\n\r\n**Returns**\r\n\r\nDATA - The compiled source code" + }, + "response": [] + }, + { + "name": "compileSerpent (deprecated)", + "event": [ + { + "listen": "test", + "script": { + "id": "281f3795-1854-47a9-b256-2e14f32ebff6", + "exec": ["utils.isDeprecated(\"eth_compileSerpent\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_compileSerpent\",\n\t\"params\":[\"/* some serpent */\"],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns compiled serpent code.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nString - The source code\r\n\r\n**Returns**\r\n\r\nDATA - The compiled source code" + }, + "response": [] + }, + { + "name": "sign (deprecated)", + "event": [ + { + "listen": "test", + "script": { + "id": "f3a959a5-3f2a-417b-ab6f-101ca25235ab", + "exec": ["utils.isDeprecated(\"eth_sign\", pm.response.json())", ""], + "type": "text/javascript" + } + }, + { + "listen": "prerequest", + "script": { + "id": "ccbe7bce-2ee4-4872-884d-884de423d002", + "exec": [ + "var isParity = pm.environment.get('HOST') == \"{{PARITY}}\";", + "if (isParity) {", + " pm.test.skip('Skipping for parity')", + "}" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"eth_sign\",\n\t\"params\":[\n\t\t\"0x9b2055d370f73ec7d8a03e965129118dc8f5bf83\", \n\t\t\"0xdeadbeef\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Calculates an Ethereum specific signature with: sign(keccak256(\"\\x19Ethereum Signed Message:\\n\" + len(message) + message))).\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nDATA, 20 Bytes - address\r\n\r\nDATA - message to sign\r\n\r\n**Returns**\r\n\r\nDATA - The signature" + }, + "response": [] + } + ], + "protocolProfileBehavior": {}, + "_postman_isSubFolder": true + }, + { + "name": "db", + "item": [ + { + "name": "getString (deprecated)", + "event": [ + { + "listen": "test", + "script": { + "id": "adc610c7-58da-4b14-86eb-5ad2b7e1bb42", + "exec": ["utils.isDeprecated(\"db_getString\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"db_getString\",\n\t\"params\":[\n\t\t\"testDB\",\n\t\t\"myKey\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns string from the local database.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nString - Database name\r\n\r\nString - Key name\r\n\r\n**Returns**\r\n\r\nSTRING - The previously stored string" + }, + "response": [] + }, + { + "name": "putString (deprecated)", + "event": [ + { + "listen": "test", + "script": { + "id": "42952899-d220-432e-9c2f-5fd8b7f63a10", + "exec": ["utils.isDeprecated(\"db_putString\", pm.response.json())", ""], + "type": "text/javascript" + } + }, + { + "listen": "prerequest", + "script": { + "id": "765518a5-fcb0-4c40-bfd9-91a7dabaa24c", + "exec": [""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"db_putString\",\n\t\"params\":[\n\t\t\"testDB\",\n\t\t\"myKey\",\n\t\t\"myString\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Stores a string in the local database.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nString - Database name\r\n\r\nString - Key name\r\n\r\nString - String to store\r\n\r\n**Returns**\r\n\r\nBoolean - true if the value was stored, false otherwise" + }, + "response": [] + }, + { + "name": "getHex (deprecated)", + "event": [ + { + "listen": "test", + "script": { + "id": "1877532c-58ef-49e6-9adc-298e68e8e519", + "exec": ["utils.isDeprecated(\"db_getHex\", pm.response.json());"], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"db_getHex\"\n\t,\"params\":[\n\t\t\"testDB\",\n\t\t\"myKey\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns binary data from the local database.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nString - Database name\r\n\r\nString - Key name\r\n\r\n**Returns**\r\n\r\nDATA - The previously stored data" + }, + "response": [] + }, + { + "name": "putHex (deprecated)", + "event": [ + { + "listen": "test", + "script": { + "id": "eb8f901f-11f6-40f1-96ba-db322d1bc017", + "exec": ["utils.isDeprecated(\"db_putHex\", pm.response.json())", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"db_putHex\",\n\t\"params\":[\n\t\t\"testDB\",\n\t\t\"myKey\",\n\t\t\"0x68656c6c6f20776f726c64\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Stores binary data in the local database.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nString - Database name\r\n\r\nString - Key name\r\n\r\nDATA - The data to store\r\n\r\n**Returns**\r\n\r\nBoolean - true if the value was stored, false otherwise" + }, + "response": [] + } + ], + "protocolProfileBehavior": {}, + "_postman_isSubFolder": true + }, + { + "name": "shh", + "item": [ + { + "name": "post (deprecated)", + "event": [ + { + "listen": "test", + "script": { + "id": "6f40e9ca-755e-42e3-9532-c629c98d7038", + "exec": ["utils.isDeprecated(\"shh_post\", pm.response.json());", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"shh_post\",\n\t\"params\":[{\n\t\t\"from\":\"0xc931d93e97ab07fe42d923478ba2465f2..\",\n\t\t\"topics\": [\n\t\t\t\"0x68656c6c6f20776f726c64\"\n\t\t],\n\t\t\"payload\":\"0x68656c6c6f20776f726c64\",\n\t\t\"ttl\":\"0x64\",\n\t\t\"priority\":\"0x64\"\n\t}],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Sends a whisper message.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nObject - An object of type Post\r\n\r\n**Returns**\r\n\r\nBoolean - true if the message was send, false otherwise" + }, + "response": [] + }, + { + "name": "version (deprecated)", + "event": [ + { + "listen": "test", + "script": { + "id": "4d2835ac-ef75-4a3e-ac48-8e6afa2508cb", + "exec": ["utils.isDeprecated(\"shh_version\", pm.response.json());", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"shh_version\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Returns the current whisper protocol version.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nSTRING - The current whisper protocol version" + }, + "response": [] + }, + { + "name": "newIdentity (deprecated)", + "event": [ + { + "listen": "test", + "script": { + "id": "d5ca5bc1-1972-4479-a5cb-ea621c40c1f2", + "exec": ["utils.isDeprecated(\"shh_newIdentity\", pm.response.json());", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"shh_newIdentity\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Creates new whisper identity in the client.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nDATA, 60 Bytes - The address of the new identiy" + }, + "response": [] + }, + { + "name": "hasIdentity (deprecated)", + "event": [ + { + "listen": "test", + "script": { + "id": "237a0212-f467-4bc7-825d-ce8eb97d02e7", + "exec": ["utils.isDeprecated(\"shh_hasIdentity\", pm.response.json());", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"shh_hasIdentity\",\n\t\"params\":[\n\t\t\"0x04f96a5e25610293e42a73908e93ccc8c4d4dc0edcfa9fa872f50cb214e08ebf61a03e245533f97284d442460f2998cd41858798ddfd4d661997d3940272b717b1\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Checks if the client hold the private keys for a given identity.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nDATA, 60 Bytes - The identity address to check\r\n\r\n**Returns**\r\n\r\nBoolean - true if the client holds the privatekey for that identity, false otherwise" + }, + "response": [] + }, + { + "name": "newGroup (deprecated)", + "event": [ + { + "listen": "test", + "script": { + "id": "498a4713-d7bf-4849-a794-bcb4ae1b13f6", + "exec": ["utils.isDeprecated(\"shh_newGroup\", pm.response.json());", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"shh_newGroup\",\n\t\"params\":[],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Create a new group.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nNone\r\n\r\n**Returns**\r\n\r\nDATA, 60 Bytes - The address of the new group" + }, + "response": [] + }, + { + "name": "addToGroup (deprecated)", + "event": [ + { + "listen": "test", + "script": { + "id": "59a5f8d0-6cb0-4948-9a94-a67494d56deb", + "exec": ["utils.isDeprecated(\"shh_addToGroup\", pm.response.json());", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"shh_addToGroup\",\n\t\"params\":[\n\t\t\"0x04f96a5e25610293e42a73908e93ccc8c4d4dc0edcfa9fa872f50cb214e08ebf61a03e245533f97284d442460f2998cd41858798ddfd4d661997d3940272b717b1\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Add to a group.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nDATA, 60 Bytes - The identity address to add to a group\r\n\r\n**Returns**\r\n\r\nBoolean - true if the identity was successfully added to the group, false otherwise" + }, + "response": [] + }, + { + "name": "newFilter (deprecated)", + "event": [ + { + "listen": "test", + "script": { + "id": "a3c1325c-f738-473f-b981-7a8f271377bd", + "exec": ["utils.isDeprecated(\"shh_newFilter\", pm.response.json());", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"shh_newFilter\",\n\t\"params\":[{\n\t\t\"topics\": [\n\t\t\t\"0x12341234bf4b564f\"\n\t\t],\n\t\t\"to\": \"0x2341234bf4b2341234bf4b564f...\"\n\t}],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Creates filter to notify, when client receives whisper message matching the filter options.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nObject - An object of type Filter\r\n\r\n**Returns**\r\n\r\nQUANTITY - The newly created filter id" + }, + "response": [] + }, + { + "name": "uninstallFilter (deprecated)", + "event": [ + { + "listen": "test", + "script": { + "id": "1f635382-7c93-456e-a4e0-6c9a31c3ff3e", + "exec": ["utils.isDeprecated(\"shh_uninstallFilter\", pm.response.json());", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"shh_uninstallFilter\",\n\t\"params\":[\n\t\t\"0x7\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Uninstalls a filter with given id.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nQUANTITY - The filter id\r\n\r\n**Returns**\r\n\r\nBoolean - true if the filter was successfully uninstalled, false otherwise" + }, + "response": [] + }, + { + "name": "getFilterChanges (deprecated)", + "event": [ + { + "listen": "test", + "script": { + "id": "1b86fec4-c310-4ad4-b87d-d6bcaa3e707c", + "exec": ["utils.isDeprecated(\"shh_getFilterChanges\", pm.response.json());", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"shh_getFilterChanges\",\n\t\"params\":[\n\t\t\"0x7\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Polling method for whisper filters. Returns new messages since the last call of this method.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nQUANTITY - The filter id\r\n\r\n**Returns**\r\n\r\nObject - An object of type MessageArray received since last poll" + }, + "response": [] + }, + { + "name": "getMessages (deprecated)", + "event": [ + { + "listen": "test", + "script": { + "id": "8cdf20b9-4b07-43ad-a96e-66d49cacb651", + "exec": ["utils.isDeprecated(\"shh_getMessages\", pm.response.json());", ""], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"shh_getMessages\",\n\t\"params\":[\n\t\t\"0x7\"\n\t],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": ["{{HOST}}"] + }, + "description": "Get all messages matching a filter. Unlike shh_getFilterChanges this returns all messages.\r\n\r\n**Deprecated** This function will be removed in the future.\r\n\r\n**Parameters**\r\n\r\nQUANTITY - The filter id\r\n\r\n**Returns**\r\n\r\nObject - An object of type MessageArray received since last poll" + }, + "response": [] + } + ], + "protocolProfileBehavior": {}, + "_postman_isSubFolder": true + } + ], + "description": "RPC commands in this group have been deprecated.", + "event": [ + { + "listen": "prerequest", + "script": { + "id": "f3715e8c-8219-4b4c-a797-283787c030da", + "type": "text/javascript", + "exec": [""] + } + }, + { + "listen": "test", + "script": { + "id": "b1b0fe57-01a2-480a-a5bf-fd11942fd43c", + "type": "text/javascript", + "exec": [""] + } + } + ], + "protocolProfileBehavior": {} + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "28916081-d267-4803-b88f-38f0cfac83f3", + "type": "text/javascript", + "exec": [ + "utils = {", + " notImplemented: function(methodName, jsonData) {", + " var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", + " var isSilk = pm.environment.get('HOST') == \"{{SILKRPC}}\";", + " if (!isErigon && !isSilk) // only test erigon", + " return;", + "", + " var testNotImplemented = pm.globals.get('TEST_NOT_IMPLEMENTED') === 'true';", + " if (testNotImplemented) { // defaults to false, therefore don't test", + " pm.test('NOT IMPLEMENTED', function() {", + " pm.expect(false).to.be(true);", + " })", + " } else {", + " // pass unless user has explicitly told us to test not implemented", + " var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"error\": {", + " \"code\": -32000,", + " \"message\": \"the method is currently not implemented: \" + methodName", + " }", + " }", + " if (jsonData.error)", + " delete jsonData.error.data;", + " pm.test('NOT IMPLEMENTED', function() {", + " pm.expect(jsonData).to.deep.equals(expected);", + " })", + " }", + " },", + "", + " isDeprecated: function(methodName, jsonData) {", + " var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", + " var isSilk = pm.environment.get('HOST') == \"{{SILKRPC}}\";", + " if (!isErigon && !isSilk) // only test erigon", + " return;", + "", + " var testDeprecated = pm.globals.get('TEST_DEPRECATED') === 'true';", + " if (testDeprecated) { // defaults to false, therefore don't test", + " pm.test('DEPRECATED', function() {", + " console.log(\"testDeprecated2: \", testDeprecated)", + " pm.expect(false).to.be(true);", + " })", + " } else {", + " // pass unless user has explicitly told us to fail deprecated", + " var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"error\": {", + " \"code\": -32000,", + " \"message\": \"the method has been deprecated: \" + methodName", + " }", + " }", + " if (jsonData.error)", + " delete jsonData.error.data;", + " pm.test('DEPRECATED', function() {", + " pm.expect(jsonData).to.deep.equals(expected);", + " })", + " }", + " },", + "", + " cannotTest: function(methodName, jsonData) {", + " var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", + " var isSilk = pm.environment.get('HOST') == \"{{SILKRPC}}\";", + " if (!isErigon && !isSilk) // only test erigon", + " return;", + "", + " var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": \"Cannot test - value changes\"", + " }", + " pm.test('VALUE CHANGES, CANNOT TEST: ' + methodName, function() {", + " jsonData.result = \"Cannot test - value changes\";", + " pm.expect(jsonData).to.deep.equals(expected);", + " })", + " },", + "};" + ] + } + }, + { + "listen": "test", + "script": { + "id": "be6e47aa-dcea-4eaf-941f-889669172f43", + "type": "text/javascript", + "exec": [ + "pm.test('Base tests', function() {", + " const jsonData = pm.response.json();", + " pm.response.to.have.status(200);", + " pm.expect(jsonData !== null)", + " jsonData.errors == null || pm.expect(jsonData.errors).to.be.empty;", + "})", + "" + ] + } + } + ], + "protocolProfileBehavior": {} +} diff --git a/cmd/rpcdaemon22/postman/Trace_Testing.json b/cmd/rpcdaemon22/postman/Trace_Testing.json new file mode 100644 index 00000000000..74e7221beb9 --- /dev/null +++ b/cmd/rpcdaemon22/postman/Trace_Testing.json @@ -0,0 +1,7474 @@ +{ + "info": { + "_postman_id": "7b2a3a4b-0c75-4b99-8e8b-4237bcbd2494", + "name": "Trace Testing", + "description": "Tests related to tracing", + "schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json" + }, + "item": [ + { + "name": "trace_call", + "item": [ + { + "name": "trace_call - all", + "event": [ + { + "listen": "test", + "script": { + "id": "cad5e0e8-19aa-4c85-b322-fe4e9e40f0f7", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": {", + " \"output\": \"0x\",", + " \"stateDiff\": {", + " \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\": {", + " \"balance\": {", + " \"+\": \"0x0\"", + " },", + " \"code\": {", + " \"+\": \"0x\"", + " },", + " \"nonce\": {", + " \"+\": \"0x1\"", + " },", + " \"storage\": {}", + " },", + " \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\": {", + " \"balance\": {", + " \"+\": \"0x186a0\"", + " },", + " \"code\": {", + " \"+\": \"0x\"", + " },", + " \"nonce\": {", + " \"+\": \"0x0\"", + " },", + " \"storage\": {}", + " }", + " },", + " \"trace\": [", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",", + " \"gas\": \"0x1dcd12f8\",", + " \"input\": \"0x\",", + " \"to\": \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",", + " \"value\": \"0x186a0\"", + " },", + " \"result\": {", + " \"gasUsed\": \"0x0\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [],", + " \"type\": \"call\"", + " }", + " ],", + " \"vmTrace\": {", + " \"code\": \"0x\",", + " \"ops\": []", + " }", + " },", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"jsonrpc\":\"2.0\",\n \"method\":\"trace_call\",\n \"params\":[\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"trace\",\"vmTrace\",\"stateDiff\"],\n \"0x186a0\"\n ],\n \"id\": \"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_call - none", + "event": [ + { + "listen": "test", + "script": { + "id": "b5c127ba-f385-4ae5-a779-038281427a49", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": {", + " \"output\": \"0x\",", + " \"stateDiff\": null,", + " \"trace\": [],", + " \"vmTrace\": null", + " },", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"jsonrpc\":\"2.0\",\n \"method\":\"trace_call\",\n \"params\":[\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [],\n \"0x186a0\"\n ],\n \"id\": \"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_call - trace only", + "event": [ + { + "listen": "test", + "script": { + "id": "719796d3-02f9-499d-b22d-c7f42f9fa80a", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": {", + " \"output\": \"0x\",", + " \"stateDiff\": null,", + " \"trace\": [", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",", + " \"gas\": \"0x1dcd12f8\",", + " \"input\": \"0x\",", + " \"to\": \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",", + " \"value\": \"0x186a0\"", + " },", + " \"result\": {", + " \"gasUsed\": \"0x0\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [],", + " \"type\": \"call\"", + " }", + " ],", + " \"vmTrace\": null", + " },", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"jsonrpc\":\"2.0\",\n \"method\":\"trace_call\",\n \"params\":[\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"trace\"],\n \"0x186a0\"\n ],\n \"id\": \"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_call - vmTrace only", + "event": [ + { + "listen": "test", + "script": { + "id": "bf873e95-ad20-42ef-b5da-71ef503f314c", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": {", + " \"output\": \"0x\",", + " \"stateDiff\": null,", + " \"trace\": [],", + " \"vmTrace\": {", + " \"code\": \"0x\",", + " \"ops\": []", + " }", + " },", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"jsonrpc\":\"2.0\",\n \"method\":\"trace_call\",\n \"params\":[\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"vmTrace\"],\n \"0x186a0\"\n ],\n \"id\": \"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_call - stateDiff only", + "event": [ + { + "listen": "test", + "script": { + "id": "eabf15c8-247b-4bfb-acfd-81c1851fa9d7", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": {", + " \"output\": \"0x\",", + " \"stateDiff\": {", + " \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\": {", + " \"balance\": {", + " \"+\": \"0x0\"", + " },", + " \"code\": {", + " \"+\": \"0x\"", + " },", + " \"nonce\": {", + " \"+\": \"0x1\"", + " },", + " \"storage\": {}", + " },", + " \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\": {", + " \"balance\": {", + " \"+\": \"0x186a0\"", + " },", + " \"code\": {", + " \"+\": \"0x\"", + " },", + " \"nonce\": {", + " \"+\": \"0x0\"", + " },", + " \"storage\": {}", + " }", + " },", + " \"trace\": [],", + " \"vmTrace\": null", + " },", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n \"jsonrpc\":\"2.0\",\n \"method\":\"trace_call\",\n \"params\":[\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"stateDiff\"],\n \"0x186a0\"\n ],\n \"id\": \"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + } + ], + "protocolProfileBehavior": {} + }, + { + "name": "trace_callMany", + "item": [ + { + "name": "trace_callMany - all", + "event": [ + { + "listen": "test", + "script": { + "id": "7949387e-4c36-4942-a5a7-1759d7c43975", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": [", + " {", + " \"output\": \"0x\",", + " \"stateDiff\": {", + " \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\": {", + " \"balance\": {", + " \"+\": \"0x0\"", + " },", + " \"code\": {", + " \"+\": \"0x\"", + " },", + " \"nonce\": {", + " \"+\": \"0x1\"", + " },", + " \"storage\": {}", + " },", + " \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\": {", + " \"balance\": {", + " \"+\": \"0x186a0\"", + " },", + " \"code\": {", + " \"+\": \"0x\"", + " },", + " \"nonce\": {", + " \"+\": \"0x0\"", + " },", + " \"storage\": {}", + " }", + " },", + " \"trace\": [", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",", + " \"gas\": \"0x1dcd12f8\",", + " \"input\": \"0x\",", + " \"to\": \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",", + " \"value\": \"0x186a0\"", + " },", + " \"result\": {", + " \"gasUsed\": \"0x0\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [],", + " \"type\": \"call\"", + " }", + " ],", + " \"vmTrace\": {", + " \"code\": \"0x\",", + " \"ops\": []", + " }", + " },", + " {", + " \"output\": \"0x\",", + " \"stateDiff\": {", + " \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\": {", + " \"balance\": \"=\",", + " \"code\": \"=\",", + " \"nonce\": {", + " \"*\": {", + " \"from\": \"0x1\",", + " \"to\": \"0x2\"", + " }", + " },", + " \"storage\": {}", + " },", + " \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\": {", + " \"balance\": {", + " \"*\": {", + " \"from\": \"0x186a0\",", + " \"to\": \"0x30d40\"", + " }", + " },", + " \"code\": \"=\",", + " \"nonce\": \"=\",", + " \"storage\": {}", + " }", + " },", + " \"trace\": [", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",", + " \"gas\": \"0x1dcd12f8\",", + " \"input\": \"0x\",", + " \"to\": \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",", + " \"value\": \"0x186a0\"", + " },", + " \"result\": {", + " \"gasUsed\": \"0x0\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [],", + " \"type\": \"call\"", + " }", + " ],", + " \"vmTrace\": {", + " \"code\": \"0x\",", + " \"ops\": []", + " }", + " }", + " ],", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_callMany\",\n \"params\": [\n [\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"trace\",\"vmTrace\",\"stateDiff\"]\n ],\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"trace\",\"vmTrace\",\"stateDiff\"]\n ]\n ],\n \"0x186a0\"\n ],\n\t\"id\":\"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_callMany - none", + "event": [ + { + "listen": "test", + "script": { + "id": "1670fbac-fbed-4c5d-9e4f-cacf151fab86", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": [", + " {", + " \"output\": \"0x\",", + " \"stateDiff\": null,", + " \"trace\": [],", + " \"vmTrace\": null", + " },", + " {", + " \"output\": \"0x\",", + " \"stateDiff\": null,", + " \"trace\": [],", + " \"vmTrace\": null", + " }", + " ],", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_callMany\",\n \"params\": [\n [\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n []\n ],\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n []\n ]\n ],\n \"0x186a0\"\n ],\n\t\"id\":\"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_callMany - trace only", + "event": [ + { + "listen": "test", + "script": { + "id": "1e12f8e6-f089-458c-9b8f-9cc17d1f2828", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": [", + " {", + " \"output\": \"0x\",", + " \"stateDiff\": null,", + " \"trace\": [", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",", + " \"gas\": \"0x1dcd12f8\",", + " \"input\": \"0x\",", + " \"to\": \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",", + " \"value\": \"0x186a0\"", + " },", + " \"result\": {", + " \"gasUsed\": \"0x0\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [],", + " \"type\": \"call\"", + " }", + " ],", + " \"vmTrace\": null", + " },", + " {", + " \"output\": \"0x\",", + " \"stateDiff\": null,", + " \"trace\": [", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",", + " \"gas\": \"0x1dcd12f8\",", + " \"input\": \"0x\",", + " \"to\": \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",", + " \"value\": \"0x186a0\"", + " },", + " \"result\": {", + " \"gasUsed\": \"0x0\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [],", + " \"type\": \"call\"", + " }", + " ],", + " \"vmTrace\": null", + " }", + " ],", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_callMany\",\n \"params\": [\n [\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"trace\"]\n ],\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"trace\"]\n ]\n ],\n \"0x186a0\"\n ],\n\t\"id\":\"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_callMany - vmTrace only", + "event": [ + { + "listen": "test", + "script": { + "id": "27cc3046-2d7c-4b7a-ae7d-f1b11008fc4c", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": [", + " {", + " \"output\": \"0x\",", + " \"stateDiff\": null,", + " \"trace\": [],", + " \"vmTrace\": {", + " \"code\": \"0x\",", + " \"ops\": []", + " }", + " },", + " {", + " \"output\": \"0x\",", + " \"stateDiff\": null,", + " \"trace\": [],", + " \"vmTrace\": {", + " \"code\": \"0x\",", + " \"ops\": []", + " }", + " }", + " ],", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_callMany\",\n \"params\": [\n [\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"vmTrace\"]\n ],\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"vmTrace\"]\n ]\n ],\n \"0x186a0\"\n ],\n\t\"id\":\"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_callMany - stateDiff only", + "event": [ + { + "listen": "test", + "script": { + "id": "d944881a-3184-4b85-a047-a1ce1ec115cd", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": [", + " {", + " \"output\": \"0x\",", + " \"stateDiff\": {", + " \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\": {", + " \"balance\": {", + " \"+\": \"0x0\"", + " },", + " \"code\": {", + " \"+\": \"0x\"", + " },", + " \"nonce\": {", + " \"+\": \"0x1\"", + " },", + " \"storage\": {}", + " },", + " \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\": {", + " \"balance\": {", + " \"+\": \"0x186a0\"", + " },", + " \"code\": {", + " \"+\": \"0x\"", + " },", + " \"nonce\": {", + " \"+\": \"0x0\"", + " },", + " \"storage\": {}", + " }", + " },", + " \"trace\": [],", + " \"vmTrace\": null", + " },", + " {", + " \"output\": \"0x\",", + " \"stateDiff\": {", + " \"0x407d73d8a49eeb85d32cf465507dd71d507100c1\": {", + " \"balance\": \"=\",", + " \"code\": \"=\",", + " \"nonce\": {", + " \"*\": {", + " \"from\": \"0x1\",", + " \"to\": \"0x2\"", + " }", + " },", + " \"storage\": {}", + " },", + " \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\": {", + " \"balance\": {", + " \"*\": {", + " \"from\": \"0x186a0\",", + " \"to\": \"0x30d40\"", + " }", + " },", + " \"code\": \"=\",", + " \"nonce\": \"=\",", + " \"storage\": {}", + " }", + " },", + " \"trace\": [],", + " \"vmTrace\": null", + " }", + " ],", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_callMany\",\n \"params\": [\n [\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"stateDiff\"]\n ],\n [\n {\n \"from\":\"0x407d73d8a49eeb85d32cf465507dd71d507100c1\",\n \"to\":\"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\",\n \"value\":\"0x186a0\"\n },\n [\"stateDiff\"]\n ]\n ],\n \"0x1e8480\"\n ],\n\t\"id\":\"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + } + ], + "protocolProfileBehavior": {} + }, + { + "name": "trace_replayTransaction", + "item": [ + { + "name": "trace_replayTransaction - all", + "event": [ + { + "listen": "test", + "script": { + "id": "0df08365-de62-444f-a2a8-1585c6b2d9b1", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": {", + " \"output\": \"0x\",", + " \"stateDiff\": {", + " \"0x00a63d34051602b2cb268ea344d4b8bc4767f2d4\": {", + " \"balance\": {", + " \"*\": {", + " \"from\": \"0x52b7cb1385ccf49b2b\",", + " \"to\": \"0x5236bafcfeb4e73b2b\"", + " }", + " },", + " \"code\": \"=\",", + " \"nonce\": {", + " \"*\": {", + " \"from\": \"0xc6f\",", + " \"to\": \"0xc70\"", + " }", + " },", + " \"storage\": {}", + " },", + " \"0x1a060b0604883a99809eb3f798df71bef6c358f1\": {", + " \"balance\": {", + " \"*\": {", + " \"from\": \"0x6f9b59db405cf2c70\",", + " \"to\": \"0x6f9b71bb0e49d6c70\"", + " }", + " },", + " \"code\": \"=\",", + " \"nonce\": \"=\",", + " \"storage\": {}", + " },", + " \"0x87cc0d78ee64a9f11b5affdd9ea523872eae14e4\": {", + " \"balance\": {", + " \"*\": {", + " \"from\": \"0x3afccb788fcd0e00\",", + " \"to\": \"0xbc0b6402c90c2e00\"", + " }", + " },", + " \"code\": \"=\",", + " \"nonce\": \"=\",", + " \"storage\": {}", + " }", + " },", + " \"trace\": [", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x00a63d34051602b2cb268ea344d4b8bc4767f2d4\",", + " \"gas\": \"0x0\",", + " \"input\": \"0x\",", + " \"to\": \"0x87cc0d78ee64a9f11b5affdd9ea523872eae14e4\",", + " \"value\": \"0x810e988a393f2000\"", + " },", + " \"result\": {", + " \"gasUsed\": \"0x0\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [],", + " \"type\": \"call\"", + " }", + " ],", + " \"vmTrace\": {", + " \"code\": \"0x\",", + " \"ops\": []", + " }", + " },", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayTransaction\",\n\t\"params\":[\n \"0x02d4a872e096445e80d05276ee756cefef7f3b376bcec14246469c0cd97dad8f\",\n [\"trace\",\"vmTrace\",\"stateDiff\"]\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_replayTransaction - none", + "event": [ + { + "listen": "test", + "script": { + "id": "5f84df2a-b6ab-45ca-9b18-f4f9534c1458", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": {", + " \"output\": \"0x\",", + " \"stateDiff\": null,", + " \"trace\": [],", + " \"vmTrace\": null", + " },", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayTransaction\",\n\t\"params\":[\n \"0x02d4a872e096445e80d05276ee756cefef7f3b376bcec14246469c0cd97dad8f\",\n []\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_replayTransaction - trace only", + "event": [ + { + "listen": "test", + "script": { + "id": "33278201-153b-490c-acb2-07b233baac25", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": {", + " \"output\": \"0x\",", + " \"stateDiff\": null,", + " \"trace\": [", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x00a63d34051602b2cb268ea344d4b8bc4767f2d4\",", + " \"gas\": \"0x0\",", + " \"input\": \"0x\",", + " \"to\": \"0x87cc0d78ee64a9f11b5affdd9ea523872eae14e4\",", + " \"value\": \"0x810e988a393f2000\"", + " },", + " \"result\": {", + " \"gasUsed\": \"0x0\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [],", + " \"type\": \"call\"", + " }", + " ],", + " \"vmTrace\": null", + " },", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayTransaction\",\n\t\"params\":[\n \"0x02d4a872e096445e80d05276ee756cefef7f3b376bcec14246469c0cd97dad8f\",\n [\"trace\"]\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_replayTransaction - vmTrace only", + "event": [ + { + "listen": "test", + "script": { + "id": "a84b1a91-ee56-46ee-b4fa-231f7aad455e", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": {", + " \"output\": \"0x\",", + " \"stateDiff\": null,", + " \"trace\": [],", + " \"vmTrace\": {", + " \"code\": \"0x\",", + " \"ops\": []", + " }", + " },", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayTransaction\",\n\t\"params\":[\n \"0x02d4a872e096445e80d05276ee756cefef7f3b376bcec14246469c0cd97dad8f\",\n [\"vmTrace\"]\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_replayTransaction - stateDiff only", + "event": [ + { + "listen": "test", + "script": { + "id": "915e86b3-5172-44b3-87d9-9025c074ea5b", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": {", + " \"output\": \"0x\",", + " \"stateDiff\": {", + " \"0x00a63d34051602b2cb268ea344d4b8bc4767f2d4\": {", + " \"balance\": {", + " \"*\": {", + " \"from\": \"0x52b7cb1385ccf49b2b\",", + " \"to\": \"0x5236bafcfeb4e73b2b\"", + " }", + " },", + " \"code\": \"=\",", + " \"nonce\": {", + " \"*\": {", + " \"from\": \"0xc6f\",", + " \"to\": \"0xc70\"", + " }", + " },", + " \"storage\": {}", + " },", + " \"0x1a060b0604883a99809eb3f798df71bef6c358f1\": {", + " \"balance\": {", + " \"*\": {", + " \"from\": \"0x6f9b59db405cf2c70\",", + " \"to\": \"0x6f9b71bb0e49d6c70\"", + " }", + " },", + " \"code\": \"=\",", + " \"nonce\": \"=\",", + " \"storage\": {}", + " },", + " \"0x87cc0d78ee64a9f11b5affdd9ea523872eae14e4\": {", + " \"balance\": {", + " \"*\": {", + " \"from\": \"0x3afccb788fcd0e00\",", + " \"to\": \"0xbc0b6402c90c2e00\"", + " }", + " },", + " \"code\": \"=\",", + " \"nonce\": \"=\",", + " \"storage\": {}", + " }", + " },", + " \"trace\": [],", + " \"vmTrace\": null", + " },", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayTransaction\",\n\t\"params\":[\n \"0x02d4a872e096445e80d05276ee756cefef7f3b376bcec14246469c0cd97dad8f\",\n [\"stateDiff\"]\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + } + ], + "protocolProfileBehavior": {} + }, + { + "name": "trace_replayBlockTransactions", + "item": [ + { + "name": "trace_replayBlockTransactions - all", + "event": [ + { + "listen": "test", + "script": { + "id": "38dda4a1-afc6-4d61-ae10-d59496d10eb3", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": [", + " {", + " \"output\": \"0x\",", + " \"stateDiff\": {", + " \"0x104994f45d9d697ca104e5704a7b77d7fec3537c\": {", + " \"balance\": {", + " \"+\": \"0x821878651a4d70000\"", + " },", + " \"code\": {", + " \"+\": \"0x\"", + " },", + " \"nonce\": {", + " \"+\": \"0x0\"", + " },", + " \"storage\": {}", + " },", + " \"0x32be343b94f860124dc4fee278fdcbd38c102d88\": {", + " \"balance\": {", + " \"*\": {", + " \"from\": \"0x29dd8f1fcd55eef7fe5c\",", + " \"to\": \"0x29d56d960a08fbeb9e5c\"", + " }", + " },", + " \"code\": \"=\",", + " \"nonce\": {", + " \"*\": {", + " \"from\": \"0x1efc5\",", + " \"to\": \"0x1efc6\"", + " }", + " },", + " \"storage\": {}", + " },", + " \"0x61c808d82a3ac53231750dadc13c777b59310bd9\": {", + " \"balance\": {", + " \"*\": {", + " \"from\": \"0x16d21cbe94fc6c3ebf7\",", + " \"to\": \"0x16d21ce264b14f94bf7\"", + " }", + " },", + " \"code\": \"=\",", + " \"nonce\": \"=\",", + " \"storage\": {}", + " }", + " },", + " \"trace\": [", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x32be343b94f860124dc4fee278fdcbd38c102d88\",", + " \"gas\": \"0x4c40d\",", + " \"input\": \"0x\",", + " \"to\": \"0x104994f45d9d697ca104e5704a7b77d7fec3537c\",", + " \"value\": \"0x821878651a4d70000\"", + " },", + " \"result\": {", + " \"gasUsed\": \"0x0\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [],", + " \"type\": \"call\"", + " }", + " ],", + " \"transactionHash\": \"0xc55e2b90168af6972193c1f86fa4d7d7b31a29c156665d15b9cd48618b5177ef\",", + " \"vmTrace\": {", + " \"code\": \"0x\",", + " \"ops\": []", + " }", + " }", + " ],", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayBlockTransactions\",\n\t\"params\":[\n \"0x1e8480\",\n [\"trace\",\"vmTrace\",\"stateDiff\"]\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_replayBlockTransactions - none", + "event": [ + { + "listen": "test", + "script": { + "id": "7ae24d9b-a87c-4e22-a604-9c20e6641ee5", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": [", + " {", + " \"output\": \"0x\",", + " \"stateDiff\": null,", + " \"trace\": [],", + " \"transactionHash\": \"0xc55e2b90168af6972193c1f86fa4d7d7b31a29c156665d15b9cd48618b5177ef\",", + " \"vmTrace\": null", + " }", + " ],", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayBlockTransactions\",\n\t\"params\":[\n \"0x1e8480\",\n []\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_replayBlockTransactions - trace only", + "event": [ + { + "listen": "test", + "script": { + "id": "ed4c941b-54ee-487d-8aff-7d0ecb750523", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": [", + " {", + " \"output\": \"0x\",", + " \"stateDiff\": null,", + " \"trace\": [", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x32be343b94f860124dc4fee278fdcbd38c102d88\",", + " \"gas\": \"0x4c40d\",", + " \"input\": \"0x\",", + " \"to\": \"0x104994f45d9d697ca104e5704a7b77d7fec3537c\",", + " \"value\": \"0x821878651a4d70000\"", + " },", + " \"result\": {", + " \"gasUsed\": \"0x0\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [],", + " \"type\": \"call\"", + " }", + " ],", + " \"transactionHash\": \"0xc55e2b90168af6972193c1f86fa4d7d7b31a29c156665d15b9cd48618b5177ef\",", + " \"vmTrace\": null", + " }", + " ],", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayBlockTransactions\",\n\t\"params\":[\n \"0x1e8480\",\n [\"trace\"]\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_replayBlockTransactions - vmTrace only", + "event": [ + { + "listen": "test", + "script": { + "id": "b3703c29-7bb8-4d08-b757-efbb4de8243d", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": [", + " {", + " \"output\": \"0x\",", + " \"stateDiff\": null,", + " \"trace\": [],", + " \"transactionHash\": \"0xc55e2b90168af6972193c1f86fa4d7d7b31a29c156665d15b9cd48618b5177ef\",", + " \"vmTrace\": {", + " \"code\": \"0x\",", + " \"ops\": []", + " }", + " }", + " ],", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayBlockTransactions\",\n\t\"params\":[\n \"0x1e8480\",\n [\"vmTrace\"]\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_replayBlockTransactions - stateDiff only", + "event": [ + { + "listen": "test", + "script": { + "id": "ac907f46-3f6d-436e-b0fb-3ab294e6c33f", + "exec": [ + "utils.cannotTest(\"trace_rawTransaction - all\", pm.response.json())", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_replayBlockTransactions\",\n\t\"params\":[\n \"0x1e8480\",\n [\"stateDiff\"]\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + } + ], + "protocolProfileBehavior": {} + }, + { + "name": "trace_rawTransaction", + "item": [ + { + "name": "trace_rawTransaction - all", + "event": [ + { + "listen": "test", + "script": { + "id": "daca0279-5627-47e0-abb3-b1d0e0e3e1ef", + "exec": [ + "utils.cannotTest(\"trace_rawTransaction - all\", pm.response.json())", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_rawTransaction\",\n\t\"params\":[\n \"0xf86d09850cf032900f83030d4094109c4f2ccc82c4d77bde15f306707320294aea3f880de0b6b3a7640000801ca02da49aa24d7fa6fa876af59d77acfd60537eba478654934430b1b32893b65c85a02cdc152d81b71f25fd23e3e271c8c0b15a3a91ce104b6af35bd476d1e6d26fdf\",\n [\"trace\",\"vmTrace\",\"stateDiff\"]\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_rawTransaction - none", + "event": [ + { + "listen": "test", + "script": { + "id": "7a69f86d-f9db-4377-b650-e4fc0cf09253", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": {", + " \"output\": \"0x\",", + " \"stateDiff\": null,", + " \"trace\": [],", + " \"vmTrace\": null", + " },", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_rawTransaction\",\n\t\"params\":[\n \"0xf86d09850cf032900f83030d4094109c4f2ccc82c4d77bde15f306707320294aea3f880de0b6b3a7640000801ca02da49aa24d7fa6fa876af59d77acfd60537eba478654934430b1b32893b65c85a02cdc152d81b71f25fd23e3e271c8c0b15a3a91ce104b6af35bd476d1e6d26fdf\",\n []\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_rawTransaction - trace only", + "event": [ + { + "listen": "test", + "script": { + "id": "7ddd07c3-edfc-4a21-ba38-308efc7fb782", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": {", + " \"output\": \"0x\",", + " \"stateDiff\": null,", + " \"trace\": [", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xfd2605a2bf58fdbb90db1da55df61628b47f9e8c\",", + " \"gas\": \"0x2bb38\",", + " \"input\": \"0x\",", + " \"to\": \"0x109c4f2ccc82c4d77bde15f306707320294aea3f\",", + " \"value\": \"0xde0b6b3a7640000\"", + " },", + " \"result\": {", + " \"gasUsed\": \"0x9325\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [],", + " \"type\": \"call\"", + " }", + " ],", + " \"vmTrace\": null", + " },", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_rawTransaction\",\n\t\"params\":[\n \"0xf86d09850cf032900f83030d4094109c4f2ccc82c4d77bde15f306707320294aea3f880de0b6b3a7640000801ca02da49aa24d7fa6fa876af59d77acfd60537eba478654934430b1b32893b65c85a02cdc152d81b71f25fd23e3e271c8c0b15a3a91ce104b6af35bd476d1e6d26fdf\",\n [\"trace\"]\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_rawTransaction - vmTrace only", + "event": [ + { + "listen": "test", + "script": { + "id": "10371f18-4769-4d59-a46c-03d13a79a3c9", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": {", + " \"output\": \"0x\",", + " \"stateDiff\": null,", + " \"trace\": [],", + " \"vmTrace\": {", + " \"code\": \"0x6060604052361561001f5760e060020a600035046372ea4b8c811461010c575b61011b3460008080670de0b6b3a764000084106101d557600180548101908190556003805433929081101561000257906000526020600020900160006101000a815481600160a060020a0302191690830217905550670de0b6b3a7640000840393508350670de0b6b3a76400006000600082828250540192505081905550600260016000505411151561011d5760038054829081101561000257906000526020600020900160009054906101000a9004600160a060020a0316600160a060020a03166000600060005054604051809050600060405180830381858888f150505080555060016002556101d5565b60018054016060908152602090f35b005b60018054600354910114156101d55760038054600254600101909102900392505b6003546002549003600119018310156101e357600380548490811015610002579082526040517fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b9190910154600160a060020a03169082906706f05b59d3b200009082818181858883f1505090546706f05b59d3b1ffff1901835550506001929092019161013e565b505060028054600101905550505b600080548501905550505050565b506002548154919250600190810190910460001901905b60035460025490036001190183101561029a576003805484908110156100025760009182526040517fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b9190910154600160a060020a03169190838504600019019082818181858883f1505081548486049003600190810190925550600290830183020460001901841415905061028e576001015b600192909201916101fa565b60038054600254810182018083559190829080158290116101c75760008390526101c7907fc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b9081019083015b808211156102fa57600081556001016102e6565b509056\",", + " \"ops\": [", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x60\"", + " ],", + " \"store\": null,", + " \"used\": 178997", + " },", + " \"pc\": 0,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x40\"", + " ],", + " \"store\": null,", + " \"used\": 178994", + " },", + " \"pc\": 2,", + " \"sub\": null", + " },", + " {", + " \"cost\": 12,", + " \"ex\": {", + " \"mem\": {", + " \"data\": \"0x0000000000000000000000000000000000000000000000000000000000000060\",", + " \"off\": 64", + " },", + " \"push\": [],", + " \"store\": null,", + " \"used\": 178982", + " },", + " \"pc\": 4,", + " \"sub\": null", + " },", + " {", + " \"cost\": 2,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 178980", + " },", + " \"pc\": 5,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x1\"", + " ],", + " \"store\": null,", + " \"used\": 178977", + " },", + " \"pc\": 6,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x1f\"", + " ],", + " \"store\": null,", + " \"used\": 178974", + " },", + " \"pc\": 7,", + " \"sub\": null", + " },", + " {", + " \"cost\": 10,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 178964", + " },", + " \"pc\": 10,", + " \"sub\": null", + " },", + " {", + " \"cost\": 1,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 178963", + " },", + " \"pc\": 31,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x11b\"", + " ],", + " \"store\": null,", + " \"used\": 178960", + " },", + " \"pc\": 32,", + " \"sub\": null", + " },", + " {", + " \"cost\": 2,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xde0b6b3a7640000\"", + " ],", + " \"store\": null,", + " \"used\": 178958", + " },", + " \"pc\": 35,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 178955", + " },", + " \"pc\": 36,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\",", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 178952", + " },", + " \"pc\": 38,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\",", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 178949", + " },", + " \"pc\": 39,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xde0b6b3a7640000\"", + " ],", + " \"store\": null,", + " \"used\": 178946", + " },", + " \"pc\": 40,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xde0b6b3a7640000\",", + " \"0x0\",", + " \"0x0\",", + " \"0x0\",", + " \"0xde0b6b3a7640000\",", + " \"0xde0b6b3a7640000\"", + " ],", + " \"store\": null,", + " \"used\": 178943", + " },", + " \"pc\": 49,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 178940", + " },", + " \"pc\": 50,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x1d5\"", + " ],", + " \"store\": null,", + " \"used\": 178937", + " },", + " \"pc\": 51,", + " \"sub\": null", + " },", + " {", + " \"cost\": 10,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 178927", + " },", + " \"pc\": 54,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x1\"", + " ],", + " \"store\": null,", + " \"used\": 178924", + " },", + " \"pc\": 55,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x1\",", + " \"0x1\"", + " ],", + " \"store\": null,", + " \"used\": 178921", + " },", + " \"pc\": 57,", + " \"sub\": null", + " },", + " {", + " \"cost\": 800,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x56\"", + " ],", + " \"store\": null,", + " \"used\": 178121", + " },", + " \"pc\": 58,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x1\",", + " \"0x56\",", + " \"0x1\"", + " ],", + " \"store\": null,", + " \"used\": 178118", + " },", + " \"pc\": 59,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x57\"", + " ],", + " \"store\": null,", + " \"used\": 178115", + " },", + " \"pc\": 60,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x57\",", + " \"0x1\"", + " ],", + " \"store\": null,", + " \"used\": 178112", + " },", + " \"pc\": 61,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x57\",", + " \"0x1\",", + " \"0x57\"", + " ],", + " \"store\": null,", + " \"used\": 178109", + " },", + " \"pc\": 62,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x57\",", + " \"0x1\"", + " ],", + " \"store\": null,", + " \"used\": 178106", + " },", + " \"pc\": 63,", + " \"sub\": null", + " },", + " {", + " \"cost\": 5000,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": {", + " \"key\": \"0x1\",", + " \"val\": \"0x57\"", + " },", + " \"used\": 173106", + " },", + " \"pc\": 64,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x3\"", + " ],", + " \"store\": null,", + " \"used\": 173103", + " },", + " \"pc\": 65,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x3\",", + " \"0x3\"", + " ],", + " \"store\": null,", + " \"used\": 173100", + " },", + " \"pc\": 67,", + " \"sub\": null", + " },", + " {", + " \"cost\": 800,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x5b\"", + " ],", + " \"store\": null,", + " \"used\": 172300", + " },", + " \"pc\": 68,", + " \"sub\": null", + " },", + " {", + " \"cost\": 2,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xfd2605a2bf58fdbb90db1da55df61628b47f9e8c\"", + " ],", + " \"store\": null,", + " \"used\": 172298", + " },", + " \"pc\": 69,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xfd2605a2bf58fdbb90db1da55df61628b47f9e8c\",", + " \"0x3\",", + " \"0x5b\",", + " \"0x57\"", + " ],", + " \"store\": null,", + " \"used\": 172295", + " },", + " \"pc\": 70,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x57\",", + " \"0x5b\"", + " ],", + " \"store\": null,", + " \"used\": 172292", + " },", + " \"pc\": 71,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x57\",", + " \"0x5b\",", + " \"0x57\"", + " ],", + " \"store\": null,", + " \"used\": 172289", + " },", + " \"pc\": 72,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x1\"", + " ],", + " \"store\": null,", + " \"used\": 172286", + " },", + " \"pc\": 73,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 172283", + " },", + " \"pc\": 74,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x2\"", + " ],", + " \"store\": null,", + " \"used\": 172280", + " },", + " \"pc\": 75,", + " \"sub\": null", + " },", + " {", + " \"cost\": 10,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 172270", + " },", + " \"pc\": 78,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x57\",", + " \"0x3\"", + " ],", + " \"store\": null,", + " \"used\": 172267", + " },", + " \"pc\": 79,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 172264", + " },", + " \"pc\": 80,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": {", + " \"data\": \"0x0000000000000000000000000000000000000000000000000000000000000003\",", + " \"off\": 0", + " },", + " \"push\": [],", + " \"store\": null,", + " \"used\": 172261", + " },", + " \"pc\": 82,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x20\"", + " ],", + " \"store\": null,", + " \"used\": 172258", + " },", + " \"pc\": 83,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 172255", + " },", + " \"pc\": 85,", + " \"sub\": null", + " },", + " {", + " \"cost\": 36,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b\"", + " ],", + " \"store\": null,", + " \"used\": 172219", + " },", + " \"pc\": 87,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f85b\",", + " \"0x57\"", + " ],", + " \"store\": null,", + " \"used\": 172216", + " },", + " \"pc\": 88,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f8b2\"", + " ],", + " \"store\": null,", + " \"used\": 172213", + " },", + " \"pc\": 89,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 172210", + " },", + " \"pc\": 90,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x100\"", + " ],", + " \"store\": null,", + " \"used\": 172207", + " },", + " \"pc\": 92,", + " \"sub\": null", + " },", + " {", + " \"cost\": 10,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x1\"", + " ],", + " \"store\": null,", + " \"used\": 172197", + " },", + " \"pc\": 95,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f8b2\",", + " \"0x1\",", + " \"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f8b2\"", + " ],", + " \"store\": null,", + " \"used\": 172194", + " },", + " \"pc\": 96,", + " \"sub\": null", + " },", + " {", + " \"cost\": 800,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 171394", + " },", + " \"pc\": 97,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x1\",", + " \"0x0\",", + " \"0x1\"", + " ],", + " \"store\": null,", + " \"used\": 171391", + " },", + " \"pc\": 98,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x1\"", + " ],", + " \"store\": null,", + " \"used\": 171388", + " },", + " \"pc\": 99,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xa0\"", + " ],", + " \"store\": null,", + " \"used\": 171385", + " },", + " \"pc\": 101,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x2\"", + " ],", + " \"store\": null,", + " \"used\": 171382", + " },", + " \"pc\": 103,", + " \"sub\": null", + " },", + " {", + " \"cost\": 60,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x10000000000000000000000000000000000000000\"", + " ],", + " \"store\": null,", + " \"used\": 171322", + " },", + " \"pc\": 105,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xffffffffffffffffffffffffffffffffffffffff\"", + " ],", + " \"store\": null,", + " \"used\": 171319", + " },", + " \"pc\": 106,", + " \"sub\": null", + " },", + " {", + " \"cost\": 5,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xffffffffffffffffffffffffffffffffffffffff\"", + " ],", + " \"store\": null,", + " \"used\": 171314", + " },", + " \"pc\": 107,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xffffffffffffffffffffffff0000000000000000000000000000000000000000\"", + " ],", + " \"store\": null,", + " \"used\": 171311", + " },", + " \"pc\": 108,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 171308", + " },", + " \"pc\": 109,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\",", + " \"0x1\"", + " ],", + " \"store\": null,", + " \"used\": 171305", + " },", + " \"pc\": 110,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xfd2605a2bf58fdbb90db1da55df61628b47f9e8c\",", + " \"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f8b2\",", + " \"0x0\",", + " \"0x1\",", + " \"0xfd2605a2bf58fdbb90db1da55df61628b47f9e8c\"", + " ],", + " \"store\": null,", + " \"used\": 171302", + " },", + " \"pc\": 111,", + " \"sub\": null", + " },", + " {", + " \"cost\": 5,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xfd2605a2bf58fdbb90db1da55df61628b47f9e8c\"", + " ],", + " \"store\": null,", + " \"used\": 171297", + " },", + " \"pc\": 112,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xfd2605a2bf58fdbb90db1da55df61628b47f9e8c\"", + " ],", + " \"store\": null,", + " \"used\": 171294", + " },", + " \"pc\": 113,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xfd2605a2bf58fdbb90db1da55df61628b47f9e8c\",", + " \"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f8b2\"", + " ],", + " \"store\": null,", + " \"used\": 171291", + " },", + " \"pc\": 114,", + " \"sub\": null", + " },", + " {", + " \"cost\": 20000,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": {", + " \"key\": \"0xc2575a0e9e593c00f959f8c92f12db2869c3395a3b0502d05e2516446f71f8b2\",", + " \"val\": \"0xfd2605a2bf58fdbb90db1da55df61628b47f9e8c\"", + " },", + " \"used\": 151291", + " },", + " \"pc\": 115,", + " \"sub\": null", + " },", + " {", + " \"cost\": 2,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 151289", + " },", + " \"pc\": 116,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xde0b6b3a7640000\"", + " ],", + " \"store\": null,", + " \"used\": 151286", + " },", + " \"pc\": 117,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xde0b6b3a7640000\",", + " \"0x0\",", + " \"0x0\",", + " \"0x0\",", + " \"0xde0b6b3a7640000\",", + " \"0xde0b6b3a7640000\"", + " ],", + " \"store\": null,", + " \"used\": 151283", + " },", + " \"pc\": 126,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 151280", + " },", + " \"pc\": 127,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\",", + " \"0x0\",", + " \"0x0\",", + " \"0x0\",", + " \"0xde0b6b3a7640000\"", + " ],", + " \"store\": null,", + " \"used\": 151277", + " },", + " \"pc\": 128,", + " \"sub\": null", + " },", + " {", + " \"cost\": 2,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 151275", + " },", + " \"pc\": 129,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\",", + " \"0x0\",", + " \"0x0\",", + " \"0x0\",", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 151272", + " },", + " \"pc\": 130,", + " \"sub\": null", + " },", + " {", + " \"cost\": 2,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 151270", + " },", + " \"pc\": 131,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xde0b6b3a7640000\"", + " ],", + " \"store\": null,", + " \"used\": 151267", + " },", + " \"pc\": 132,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 151264", + " },", + " \"pc\": 141,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 151261", + " },", + " \"pc\": 143,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0xde0b6b3a7640000\",", + " \"0x0\",", + " \"0x0\",", + " \"0xde0b6b3a7640000\"", + " ],", + " \"store\": null,", + " \"used\": 151258", + " },", + " \"pc\": 145,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\",", + " \"0x0\",", + " \"0xde0b6b3a7640000\",", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 151255", + " },", + " \"pc\": 146,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\",", + " \"0xde0b6b3a7640000\",", + " \"0x0\",", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 151252", + " },", + " \"pc\": 147,", + " \"sub\": null", + " },", + " {", + " \"cost\": 2,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 151250", + " },", + " \"pc\": 148,", + " \"sub\": null", + " },", + " {", + " \"cost\": 800,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x7ce66c50e91277e9\"", + " ],", + " \"store\": null,", + " \"used\": 150450", + " },", + " \"pc\": 149,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x8ac72304907677e9\"", + " ],", + " \"store\": null,", + " \"used\": 150447", + " },", + " \"pc\": 150,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x8ac72304907677e9\",", + " \"0x0\",", + " \"0x0\",", + " \"0xde0b6b3a7640000\"", + " ],", + " \"store\": null,", + " \"used\": 150444", + " },", + " \"pc\": 151,", + " \"sub\": null", + " },", + " {", + " \"cost\": 2,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 150442", + " },", + " \"pc\": 152,", + " \"sub\": null", + " },", + " {", + " \"cost\": 2,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 150440", + " },", + " \"pc\": 153,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x8ac72304907677e9\",", + " \"0x0\",", + " \"0x8ac72304907677e9\"", + " ],", + " \"store\": null,", + " \"used\": 150437", + " },", + " \"pc\": 154,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x8ac72304907677e9\",", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 150434", + " },", + " \"pc\": 155,", + " \"sub\": null", + " },", + " {", + " \"cost\": 5000,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": {", + " \"key\": \"0x0\",", + " \"val\": \"0x8ac72304907677e9\"", + " },", + " \"used\": 145434", + " },", + " \"pc\": 156,", + " \"sub\": null", + " },", + " {", + " \"cost\": 2,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 145432", + " },", + " \"pc\": 157,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x2\"", + " ],", + " \"store\": null,", + " \"used\": 145429", + " },", + " \"pc\": 158,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x1\"", + " ],", + " \"store\": null,", + " \"used\": 145426", + " },", + " \"pc\": 160,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 145423", + " },", + " \"pc\": 162,", + " \"sub\": null", + " },", + " {", + " \"cost\": 2,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 145421", + " },", + " \"pc\": 164,", + " \"sub\": null", + " },", + " {", + " \"cost\": 800,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x57\"", + " ],", + " \"store\": null,", + " \"used\": 144621", + " },", + " \"pc\": 165,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x1\"", + " ],", + " \"store\": null,", + " \"used\": 144618", + " },", + " \"pc\": 166,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 144615", + " },", + " \"pc\": 167,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x1\"", + " ],", + " \"store\": null,", + " \"used\": 144612", + " },", + " \"pc\": 168,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x11d\"", + " ],", + " \"store\": null,", + " \"used\": 144609", + " },", + " \"pc\": 169,", + " \"sub\": null", + " },", + " {", + " \"cost\": 10,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 144599", + " },", + " \"pc\": 172,", + " \"sub\": null", + " },", + " {", + " \"cost\": 1,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 144598", + " },", + " \"pc\": 285,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x1\"", + " ],", + " \"store\": null,", + " \"used\": 144595", + " },", + " \"pc\": 286,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x1\",", + " \"0x1\"", + " ],", + " \"store\": null,", + " \"used\": 144592", + " },", + " \"pc\": 288,", + " \"sub\": null", + " },", + " {", + " \"cost\": 800,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x57\"", + " ],", + " \"store\": null,", + " \"used\": 143792", + " },", + " \"pc\": 289,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x3\"", + " ],", + " \"store\": null,", + " \"used\": 143789", + " },", + " \"pc\": 290,", + " \"sub\": null", + " },", + " {", + " \"cost\": 800,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x5b\"", + " ],", + " \"store\": null,", + " \"used\": 142989", + " },", + " \"pc\": 292,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x5b\",", + " \"0x57\",", + " \"0x1\"", + " ],", + " \"store\": null,", + " \"used\": 142986", + " },", + " \"pc\": 293,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x58\"", + " ],", + " \"store\": null,", + " \"used\": 142983", + " },", + " \"pc\": 294,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 142980", + " },", + " \"pc\": 295,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x1\"", + " ],", + " \"store\": null,", + " \"used\": 142977", + " },", + " \"pc\": 296,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x1d5\"", + " ],", + " \"store\": null,", + " \"used\": 142974", + " },", + " \"pc\": 297,", + " \"sub\": null", + " },", + " {", + " \"cost\": 10,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 142964", + " },", + " \"pc\": 300,", + " \"sub\": null", + " },", + " {", + " \"cost\": 1,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 142963", + " },", + " \"pc\": 469,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 142960", + " },", + " \"pc\": 470,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\",", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 142957", + " },", + " \"pc\": 472,", + " \"sub\": null", + " },", + " {", + " \"cost\": 800,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x8ac72304907677e9\"", + " ],", + " \"store\": null,", + " \"used\": 142157", + " },", + " \"pc\": 473,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x0\",", + " \"0x0\",", + " \"0x0\",", + " \"0x0\",", + " \"0x0\",", + " \"0x8ac72304907677e9\",", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 142154", + " },", + " \"pc\": 474,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x8ac72304907677e9\"", + " ],", + " \"store\": null,", + " \"used\": 142151", + " },", + " \"pc\": 475,", + " \"sub\": null", + " },", + " {", + " \"cost\": 3,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [", + " \"0x8ac72304907677e9\",", + " \"0x0\"", + " ],", + " \"store\": null,", + " \"used\": 142148", + " },", + " \"pc\": 476,", + " \"sub\": null", + " },", + " {", + " \"cost\": 800,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": {", + " \"key\": \"0x0\",", + " \"val\": \"0x8ac72304907677e9\"", + " },", + " \"used\": 141348", + " },", + " \"pc\": 477,", + " \"sub\": null", + " },", + " {", + " \"cost\": 2,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 141346", + " },", + " \"pc\": 478,", + " \"sub\": null", + " },", + " {", + " \"cost\": 2,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 141344", + " },", + " \"pc\": 479,", + " \"sub\": null", + " },", + " {", + " \"cost\": 2,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 141342", + " },", + " \"pc\": 480,", + " \"sub\": null", + " },", + " {", + " \"cost\": 2,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 141340", + " },", + " \"pc\": 481,", + " \"sub\": null", + " },", + " {", + " \"cost\": 8,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 141332", + " },", + " \"pc\": 482,", + " \"sub\": null", + " },", + " {", + " \"cost\": 1,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 141331", + " },", + " \"pc\": 283,", + " \"sub\": null", + " },", + " {", + " \"cost\": 0,", + " \"ex\": {", + " \"mem\": null,", + " \"push\": [],", + " \"store\": null,", + " \"used\": 141331", + " },", + " \"pc\": 284,", + " \"sub\": null", + " }", + " ]", + " }", + " },", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_rawTransaction\",\n\t\"params\":[\n \"0xf86d09850cf032900f83030d4094109c4f2ccc82c4d77bde15f306707320294aea3f880de0b6b3a7640000801ca02da49aa24d7fa6fa876af59d77acfd60537eba478654934430b1b32893b65c85a02cdc152d81b71f25fd23e3e271c8c0b15a3a91ce104b6af35bd476d1e6d26fdf\",\n [\"vmTrace\"]\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_rawTransaction - stateDiff only", + "event": [ + { + "listen": "test", + "script": { + "id": "9fd4f1d1-1c82-4d39-b47f-feeb33836056", + "exec": [ + "utils.cannotTest(\"trace_rawTransaction - stateDiff only\", pm.response.json())", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_rawTransaction\",\n\t\"params\":[\n \"0xf86d09850cf032900f83030d4094109c4f2ccc82c4d77bde15f306707320294aea3f880de0b6b3a7640000801ca02da49aa24d7fa6fa876af59d77acfd60537eba478654934430b1b32893b65c85a02cdc152d81b71f25fd23e3e271c8c0b15a3a91ce104b6af35bd476d1e6d26fdf\",\n [\"stateDiff\"]\n ],\n\t\"id\":\"1\"\n}", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + } + ], + "protocolProfileBehavior": {} + }, + { + "name": "trace_get", + "item": [ + { + "name": "trace_get - trace 0", + "event": [ + { + "listen": "test", + "script": { + "id": "b13645b7-48a0-480f-99e5-be52aacae86d", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": {", + " \"action\": {", + " \"callType\": \"staticcall\",", + " \"from\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", + " \"gas\": \"0x40e8b\",", + " \"input\": \"0x0902f1ac\",", + " \"to\": \"0x6ebb1c40cd3789e6fc02f003b2416383ea5c96f4\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", + " \"blockNumber\": 11000000,", + " \"result\": {", + " \"gasUsed\": \"0x4b4\",", + " \"output\": \"0x00000000000000000000000000000000000000000000008f63f71a5f71f77323000000000000000000000000000000000000000000000008709d1f36bd0f2f83000000000000000000000000000000000000000000000000000000005f7befab\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 0", + " ],", + " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", + " \"transactionPosition\": 26,", + " \"type\": \"call\"", + " },", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_get\",\n \"params\":[\n \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",\n [\"0x0\"]\n ],\n\t\"id\":\"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_get - trace 3", + "event": [ + { + "listen": "test", + "script": { + "id": "0c7a662c-06d5-45f2-85e8-6d0d30e07d64", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", + " \"gas\": \"0x3d7ce\",", + " \"input\": \"0x23b872dd0000000000000000000000006795e7f4219a48e083157db6b52cf70002eced5f0000000000000000000000004a4354ffddb257671ac00cdcedef87503ac6b35200000000000000000000000000000000000000000000000001d1e7fc878ab04b\",", + " \"to\": \"0x5befbb272290dd5b8521d4a938f6c4757742c430\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", + " \"blockNumber\": 11000000,", + " \"result\": {", + " \"gasUsed\": \"0x5a30\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 3", + " ],", + " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", + " \"transactionPosition\": 26,", + " \"type\": \"call\"", + " },", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_get\",\n \"params\":[\n \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",\n [\"0x3\"]\n ],\n\t\"id\":\"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_get - trace 5", + "event": [ + { + "listen": "test", + "script": { + "id": "f5431b3d-4824-4fb7-9172-483588363adc", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", + " \"gas\": \"0x271a9\",", + " \"input\": \"0x022c0d9f00000000000000000000000000000000000000000000000003483b57f55165f500000000000000000000000000000000000000000000000000000000000000000000000000000000000000006ebb1c40cd3789e6fc02f003b2416383ea5c96f400000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000\",", + " \"to\": \"0x0d4a11d5eeaac28ec3f61d100daf4d40471f1852\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", + " \"blockNumber\": 11000000,", + " \"result\": {", + " \"gasUsed\": \"0xe1fa\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 3,", + " \"traceAddress\": [", + " 5", + " ],", + " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", + " \"transactionPosition\": 26,", + " \"type\": \"call\"", + " },", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_get\",\n \"params\":[\n \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",\n [\"0x5\"]\n ],\n\t\"id\":\"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_get - trace 10 (non-existant)", + "event": [ + { + "listen": "test", + "script": { + "id": "77a95092-2e76-4457-b024-78b2e57d7457", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": null,", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_get\",\n \"params\":[\n \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",\n [\"0xa\"]\n ],\n\t\"id\":\"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + } + ], + "protocolProfileBehavior": {} + }, + { + "name": "trace_transaction", + "event": [ + { + "listen": "test", + "script": { + "id": "d326776f-bac6-4630-82eb-d698fea4a4bf", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": [", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x6795e7f4219a48e083157db6b52cf70002eced5f\",", + " \"gas\": \"0x42c8c\",", + " \"input\": \"0x8803dbee0000000000000000000000000000000000000000000000003782dace9d90000000000000000000000000000000000000000000000000000001d690b82191f53800000000000000000000000000000000000000000000000000000000000000a00000000000000000000000006795e7f4219a48e083157db6b52cf70002eced5f000000000000000000000000000000000000000000000000000000005f7bf45c00000000000000000000000000000000000000000000000000000000000000040000000000000000000000005befbb272290dd5b8521d4a938f6c4757742c430000000000000000000000000dac17f958d2ee523a2206206994597c13d831ec7000000000000000000000000c02aaa39b223fe8d0a0e5c4f27ead9083c756cc20000000000000000000000001712aad2c773ee04bdc9114b32163c058321cd85\",", + " \"to\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", + " \"blockNumber\": 11000000,", + " \"result\": {", + " \"gasUsed\": \"0x3bafa\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000001d1e7fc878ab04b0000000000000000000000000000000000000000000000000000000004fe3dbb00000000000000000000000000000000000000000000000003483b57f55165f50000000000000000000000000000000000000000000000003782dace9d900000\"", + " },", + " \"subtraces\": 7,", + " \"traceAddress\": [],", + " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", + " \"transactionPosition\": 26,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"staticcall\",", + " \"from\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", + " \"gas\": \"0x40e8b\",", + " \"input\": \"0x0902f1ac\",", + " \"to\": \"0x6ebb1c40cd3789e6fc02f003b2416383ea5c96f4\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", + " \"blockNumber\": 11000000,", + " \"result\": {", + " \"gasUsed\": \"0x4b4\",", + " \"output\": \"0x00000000000000000000000000000000000000000000008f63f71a5f71f77323000000000000000000000000000000000000000000000008709d1f36bd0f2f83000000000000000000000000000000000000000000000000000000005f7befab\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 0", + " ],", + " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", + " \"transactionPosition\": 26,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"staticcall\",", + " \"from\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", + " \"gas\": \"0x3fc18\",", + " \"input\": \"0x0902f1ac\",", + " \"to\": \"0x0d4a11d5eeaac28ec3f61d100daf4d40471f1852\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", + " \"blockNumber\": 11000000,", + " \"result\": {", + " \"gasUsed\": \"0x4b4\",", + " \"output\": \"0x000000000000000000000000000000000000000000008c337fdddb8e693225210000000000000000000000000000000000000000000000000000d4a5c378ac6a000000000000000000000000000000000000000000000000000000005f7befad\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1", + " ],", + " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", + " \"transactionPosition\": 26,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"staticcall\",", + " \"from\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", + " \"gas\": \"0x3e9b8\",", + " \"input\": \"0x0902f1ac\",", + " \"to\": \"0x4a4354ffddb257671ac00cdcedef87503ac6b352\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", + " \"blockNumber\": 11000000,", + " \"result\": {", + " \"gasUsed\": \"0x4b4\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000076bfdff5e26f7d67500000000000000000000000000000000000000000000000000000014716b4531000000000000000000000000000000000000000000000000000000005f7beebe\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 2", + " ],", + " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", + " \"transactionPosition\": 26,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", + " \"gas\": \"0x3d7ce\",", + " \"input\": \"0x23b872dd0000000000000000000000006795e7f4219a48e083157db6b52cf70002eced5f0000000000000000000000004a4354ffddb257671ac00cdcedef87503ac6b35200000000000000000000000000000000000000000000000001d1e7fc878ab04b\",", + " \"to\": \"0x5befbb272290dd5b8521d4a938f6c4757742c430\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", + " \"blockNumber\": 11000000,", + " \"result\": {", + " \"gasUsed\": \"0x5a30\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 3", + " ],", + " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", + " \"transactionPosition\": 26,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", + " \"gas\": \"0x36f24\",", + " \"input\": \"0x022c0d9f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004fe3dbb0000000000000000000000000d4a11d5eeaac28ec3f61d100daf4d40471f185200000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000\",", + " \"to\": \"0x4a4354ffddb257671ac00cdcedef87503ac6b352\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", + " \"blockNumber\": 11000000,", + " \"result\": {", + " \"gasUsed\": \"0xf2c2\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 3,", + " \"traceAddress\": [", + " 4", + " ],", + " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", + " \"transactionPosition\": 26,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x4a4354ffddb257671ac00cdcedef87503ac6b352\",", + " \"gas\": \"0x33965\",", + " \"input\": \"0xa9059cbb0000000000000000000000000d4a11d5eeaac28ec3f61d100daf4d40471f18520000000000000000000000000000000000000000000000000000000004fe3dbb\",", + " \"to\": \"0xdac17f958d2ee523a2206206994597c13d831ec7\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", + " \"blockNumber\": 11000000,", + " \"result\": {", + " \"gasUsed\": \"0x4c91\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 4,", + " 0", + " ],", + " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", + " \"transactionPosition\": 26,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"staticcall\",", + " \"from\": \"0x4a4354ffddb257671ac00cdcedef87503ac6b352\",", + " \"gas\": \"0x2e786\",", + " \"input\": \"0x70a082310000000000000000000000004a4354ffddb257671ac00cdcedef87503ac6b352\",", + " \"to\": \"0x5befbb272290dd5b8521d4a938f6c4757742c430\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", + " \"blockNumber\": 11000000,", + " \"result\": {", + " \"gasUsed\": \"0x4c2\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000076dcfe75aae8286c0\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 4,", + " 1", + " ],", + " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", + " \"transactionPosition\": 26,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"staticcall\",", + " \"from\": \"0x4a4354ffddb257671ac00cdcedef87503ac6b352\",", + " \"gas\": \"0x2dca4\",", + " \"input\": \"0x70a082310000000000000000000000004a4354ffddb257671ac00cdcedef87503ac6b352\",", + " \"to\": \"0xdac17f958d2ee523a2206206994597c13d831ec7\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", + " \"blockNumber\": 11000000,", + " \"result\": {", + " \"gasUsed\": \"0x97f\",", + " \"output\": \"0x000000000000000000000000000000000000000000000000000000146c6d0776\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 4,", + " 2", + " ],", + " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", + " \"transactionPosition\": 26,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", + " \"gas\": \"0x271a9\",", + " \"input\": \"0x022c0d9f00000000000000000000000000000000000000000000000003483b57f55165f500000000000000000000000000000000000000000000000000000000000000000000000000000000000000006ebb1c40cd3789e6fc02f003b2416383ea5c96f400000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000\",", + " \"to\": \"0x0d4a11d5eeaac28ec3f61d100daf4d40471f1852\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", + " \"blockNumber\": 11000000,", + " \"result\": {", + " \"gasUsed\": \"0xe1fa\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 3,", + " \"traceAddress\": [", + " 5", + " ],", + " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", + " \"transactionPosition\": 26,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x0d4a11d5eeaac28ec3f61d100daf4d40471f1852\",", + " \"gas\": \"0x23ffe\",", + " \"input\": \"0xa9059cbb0000000000000000000000006ebb1c40cd3789e6fc02f003b2416383ea5c96f400000000000000000000000000000000000000000000000003483b57f55165f5\",", + " \"to\": \"0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", + " \"blockNumber\": 11000000,", + " \"result\": {", + " \"gasUsed\": \"0x3b3a\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 5,", + " 0", + " ],", + " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", + " \"transactionPosition\": 26,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"staticcall\",", + " \"from\": \"0x0d4a11d5eeaac28ec3f61d100daf4d40471f1852\",", + " \"gas\": \"0x1fea6\",", + " \"input\": \"0x70a082310000000000000000000000000d4a11d5eeaac28ec3f61d100daf4d40471f1852\",", + " \"to\": \"0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", + " \"blockNumber\": 11000000,", + " \"result\": {", + " \"gasUsed\": \"0x4d2\",", + " \"output\": \"0x000000000000000000000000000000000000000000008c337c95a03673e0bf2c\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 5,", + " 1", + " ],", + " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", + " \"transactionPosition\": 26,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"staticcall\",", + " \"from\": \"0x0d4a11d5eeaac28ec3f61d100daf4d40471f1852\",", + " \"gas\": \"0x1f3b5\",", + " \"input\": \"0x70a082310000000000000000000000000d4a11d5eeaac28ec3f61d100daf4d40471f1852\",", + " \"to\": \"0xdac17f958d2ee523a2206206994597c13d831ec7\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", + " \"blockNumber\": 11000000,", + " \"result\": {", + " \"gasUsed\": \"0x97f\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000d4a5c876ea25\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 5,", + " 2", + " ],", + " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", + " \"transactionPosition\": 26,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x7a250d5630b4cf539739df2c5dacb4c659f2488d\",", + " \"gas\": \"0x1875a\",", + " \"input\": \"0x022c0d9f0000000000000000000000000000000000000000000000003782dace9d90000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006795e7f4219a48e083157db6b52cf70002eced5f00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000\",", + " \"to\": \"0x6ebb1c40cd3789e6fc02f003b2416383ea5c96f4\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", + " \"blockNumber\": 11000000,", + " \"result\": {", + " \"gasUsed\": \"0x119aa\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 3,", + " \"traceAddress\": [", + " 6", + " ],", + " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", + " \"transactionPosition\": 26,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x6ebb1c40cd3789e6fc02f003b2416383ea5c96f4\",", + " \"gas\": \"0x15958\",", + " \"input\": \"0xa9059cbb0000000000000000000000006795e7f4219a48e083157db6b52cf70002eced5f0000000000000000000000000000000000000000000000003782dace9d900000\",", + " \"to\": \"0x1712aad2c773ee04bdc9114b32163c058321cd85\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", + " \"blockNumber\": 11000000,", + " \"result\": {", + " \"gasUsed\": \"0x776b\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 6,", + " 0", + " ],", + " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", + " \"transactionPosition\": 26,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"staticcall\",", + " \"from\": \"0x6ebb1c40cd3789e6fc02f003b2416383ea5c96f4\",", + " \"gas\": \"0xdcc0\",", + " \"input\": \"0x70a082310000000000000000000000006ebb1c40cd3789e6fc02f003b2416383ea5c96f4\",", + " \"to\": \"0x1712aad2c773ee04bdc9114b32163c058321cd85\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", + " \"blockNumber\": 11000000,", + " \"result\": {", + " \"gasUsed\": \"0x4fe\",", + " \"output\": \"0x00000000000000000000000000000000000000000000008f2c743f90d4677323\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 6,", + " 1", + " ],", + " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", + " \"transactionPosition\": 26,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"staticcall\",", + " \"from\": \"0x6ebb1c40cd3789e6fc02f003b2416383ea5c96f4\",", + " \"gas\": \"0xd1a4\",", + " \"input\": \"0x70a082310000000000000000000000006ebb1c40cd3789e6fc02f003b2416383ea5c96f4\",", + " \"to\": \"0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0x4d5e647b2d8d3a3c8c1561ebb88734bc5fc3c2941016f810cf218738c0ecd99e\",", + " \"blockNumber\": 11000000,", + " \"result\": {", + " \"gasUsed\": \"0x4d2\",", + " \"output\": \"0x00000000000000000000000000000000000000000000000873e55a8eb2609578\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 6,", + " 2", + " ],", + " \"transactionHash\": \"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\",", + " \"transactionPosition\": 26,", + " \"type\": \"call\"", + " }", + " ],", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})", + "" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_transaction\",\n \"params\":[\"0x88162c6bcbb040fd8f6676fe336f83d7a27a5765b18bfce524beea2a0e107159\"],\n\t\"id\":\"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_block", + "event": [ + { + "listen": "test", + "script": { + "id": "181dcd6d-e8cd-4ab4-aae3-db15ad220580", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": [", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x969837498944ae1dc0dcac2d0c65634c88729b2d\",", + " \"gas\": \"0x4782ec\",", + " \"input\": \"0xc4463c80000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a000000000000000000000000f835a0247b0063c04ef22006ebe57c5f11977cc40000000000000000000000000000000000000000000000000000000000000009000000000000000000000000f35e2cc8e6523d683ed44870f5b7cc785051a77d\",", + " \"to\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"value\": \"0x775ec7b96add6c8f0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x3ee428\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 2,", + " \"traceAddress\": [],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0x8fc\",", + " \"input\": \"0x\",", + " \"to\": \"0xf35e2cc8e6523d683ed44870f5b7cc785051a77d\",", + " \"value\": \"0x775ec7b96add6c8f0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x0\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0x4567ba\",", + " \"input\": \"0x82661dc4000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x3d248a\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 5,", + " \"traceAddress\": [", + " 1", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x44f77f\",", + " \"input\": \"0xe2faf044000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000057870858\",", + " \"to\": \"0x4a574510c7014e4ae985403536074abe582adfc8\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x2b21c7\",", + " \"output\": \"0x000000000000000000000000304a554a310c7e546dfe434669c62820b7d83490\"", + " },", + " \"subtraces\": 1,", + " \"traceAddress\": [", + " 1,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"from\": \"0x4a574510c7014e4ae985403536074abe582adfc8\",", + " \"gas\": \"0x446e14\",", + " \"init\": \"0x606060405260405160c0806132c88339610120604052905160805160a051925160e0516101005193949293828282600f829055601083905560118054610100830261010060a860020a031990911617905560405130906001906101bc8061033e8339600160a060020a03909316908301526101408201526040519081900361016001906000f060128054600160a060020a031916919091179055505060038054600160a060020a03199081168917909155600e80549091168717905550600c84905560405130906000906101bc806104fa8339018083600160a060020a0316815260200182815260200192505050604051809103906000f0600760006101000a815481600160a060020a03021916908302179055503060006040516101bc806106b68339018083600160a060020a0316815260200182815260200192505050604051809103906000f060088054600160a060020a031916919091179055600754600160a060020a03166000141561017557610002565b600854600160a060020a03166000141561018e57610002565b426002556005600190815560008054828255829080158290116101ca57600e0281600e0283600052602060002091820191016101ca9190610245565b50505030600160a060020a03908116600090815260046020526040808220805460ff19908116600190811790925560035490941683529120805490921617905550505050505050612a56806108726000396000f35b5050600060098201819055600a820155600d81018054600160a060020a03191690556001015b8082111561033a578054600160a060020a03191681556000600182810182905560028381018054848255909281161561010002600019011604601f81901061030c57505b506000600383018190556004838101805461ffff19169055600584018290556006840182905560078401805460ff19169055600884018054838255908352602090922061021f929091028101905b8082111561033a5760008082556001820181815560028301919091556003919091018054600160a060020a03191690556102d7565b601f01602090049060005260206000209081019061028991905b8082111561033a5760008155600101610326565b50905660606040818152806101bc833960a090525160805160008054600160a060020a03191690921760a060020a60ff0219167401000000000000000000000000000000000000000090910217815561016290819061005a90396000f3606060405236156100405760e060020a60003504630221038a811461004d57806318bdc79a146100aa5780638da5cb5b146100be578063d2cc718f146100d0575b6100d96001805434019055565b6100db6004356024356000805433600160a060020a0390811691161415806100755750600034115b806100a05750805460a060020a900460ff1680156100a057508054600160a060020a03848116911614155b156100f757610002565b6100db60005460ff60a060020a9091041681565b6100ed600054600160a060020a031681565b6100db60015481565b005b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a0383168260608381818185876185025a03f1925050501561015c57604080518381529051600160a060020a038516917f9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc919081900360200190a25060015b929150505660606040818152806101bc833960a090525160805160008054600160a060020a03191690921760a060020a60ff0219167401000000000000000000000000000000000000000090910217815561016290819061005a90396000f3606060405236156100405760e060020a60003504630221038a811461004d57806318bdc79a146100aa5780638da5cb5b146100be578063d2cc718f146100d0575b6100d96001805434019055565b6100db6004356024356000805433600160a060020a0390811691161415806100755750600034115b806100a05750805460a060020a900460ff1680156100a057508054600160a060020a03848116911614155b156100f757610002565b6100db60005460ff60a060020a9091041681565b6100ed600054600160a060020a031681565b6100db60015481565b005b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a0383168260608381818185876185025a03f1925050501561015c57604080518381529051600160a060020a038516917f9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc919081900360200190a25060015b929150505660606040818152806101bc833960a090525160805160008054600160a060020a03191690921760a060020a60ff0219167401000000000000000000000000000000000000000090910217815561016290819061005a90396000f3606060405236156100405760e060020a60003504630221038a811461004d57806318bdc79a146100aa5780638da5cb5b146100be578063d2cc718f146100d0575b6100d96001805434019055565b6100db6004356024356000805433600160a060020a0390811691161415806100755750600034115b806100a05750805460a060020a900460ff1680156100a057508054600160a060020a03848116911614155b156100f757610002565b6100db60005460ff60a060020a9091041681565b6100ed600054600160a060020a031681565b6100db60015481565b005b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a0383168260608381818185876185025a03f1925050501561015c57604080518381529051600160a060020a038516917f9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc919081900360200190a25060015b92915050566060604052361561020e5760e060020a6000350463013cf08b8114610247578063095ea7b3146102d05780630c3b7b96146103455780630e7082031461034e578063149acf9a1461036057806318160ddd146103725780631f2dc5ef1461037b57806321b5b8dd1461039b578063237e9492146103ad57806323b872dd1461040e5780632632bf2014610441578063341458081461047257806339d1f9081461047b5780634b6753bc146104935780634df6d6cc1461049c5780634e10c3ee146104b7578063590e1ae3146104ca578063612e45a3146104db578063643f7cdd1461057a578063674ed066146105925780636837ff1e1461059b57806370a08231146105e5578063749f98891461060b57806378524b2e1461062457806381f03fcb1461067e57806382661dc41461069657806382bf6464146106b75780638b15a605146106c95780638d7af473146106d257806396d7f3f5146106e1578063a1da2fb9146106ea578063a3912ec814610704578063a9059cbb1461070f578063b7bc2c841461073f578063baac53001461074b578063be7c29c1146107b1578063c9d27afe14610817578063cc9ae3f61461082d578063cdef91d014610841578063dbde198814610859578063dd62ed3e1461087e578063e33734fd146108b2578063e5962195146108c6578063e66f53b7146108de578063eceb2945146108f0578063f8c80d261461094f575b610966600f546000906234bc000142108015610239575060125433600160a060020a03908116911614155b156109785761098033610752565b6109866004356000805482908110156100025750808052600e8202600080516020612a3683398151915201905060038101546004820154600683015460018401548454600786015460058701546009880154600a890154600d8a0154600160a060020a039586169b509599600201989760ff81811698610100909204811697949691951693168c565b61096660043560243533600160a060020a03908116600081815260156020908152604080832094871680845294825280832086905580518681529051929493927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925929181900390910190a35060015b92915050565b61096660105481565b610a7d600754600160a060020a031681565b610a7d600e54600160a060020a031681565b61096660165481565b6109665b60004262127500600f60005054031115610de557506014610983565b610a7d601254600160a060020a031681565b60408051602060248035600481810135601f810185900485028601850190965285855261096695813595919460449492939092019181908401838280828437509496505050505050506000600060006000600060003411156116a857610002565b6109666004356024356044355b60115460009060ff1680156104315750600f5442115b80156124e957506124e78461044b565b6109666000610980335b600160a060020a0381166000908152600b602052604081205481908114156129cb57610b99565b61096660065481565b6109665b600d5430600160a060020a03163103610983565b610966600f5481565b61096660043560046020526000908152604090205460ff1681565b61096660043560243560006124cb610831565b610a9a6000341115610ba457610002565b604080516020604435600481810135601f8101849004840285018401909552848452610966948135946024803595939460649492939101918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a01909352828252969897608497919650602491909101945090925082915084018382808284375094965050933593505060a435915050600060006110c1336105ec565b61096660043560096020526000908152604090205481565b61096660015481565b610a9a60043530600160a060020a031633600160a060020a03161415806105db5750600160a060020a03811660009081526004602052604090205460ff16155b156121cb576121c8565b6109666004355b600160a060020a0381166000908152601460205260409020545b919050565b6109666004356024356000600034111561259957610002565b610966600062e6b680420360026000505410806106505750600354600160a060020a0390811633909116145b80156106645750600254621274ff19420190105b156126145750426002908155600180549091028155610983565b610966600435600a6020526000908152604090205481565b610966600435602435600060006000600060006000341115611ba157610002565b610a7d600854600160a060020a031681565b610966600c5481565b61096660005460001901610983565b61096660025481565b61096660043560006000600060003411156121fc57610002565b6109665b6001610983565b6109666004356024355b60115460009060ff16801561072f5750600f5442115b801561248757506124853361044b565b61096660115460ff1681565b6109666004355b60006000600f600050544210801561076a5750600034115b80156107a457506011546101009004600160a060020a0316600014806107a457506011546101009004600160a060020a0390811633909116145b15610b9f57610a9c61037f565b610a7d600435600060006000508281548110156100025750508080527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e56b600e83020180548290811015610002575081526020902060030154600160a060020a0316610606565b61096660043560243560006000610e1b336105ec565b6109665b6000600034111561247c57610002565b61096660043560056020526000908152604090205481565b610966600435602435604435600061252f845b6000600060003411156127ac57610002565b610966600435602435600160a060020a0382811660009081526015602090815260408083209385168352929052205461033f565b610a9a600435600034111561254557610002565b610966600435600b6020526000908152604090205481565b610a7d600354600160a060020a031681565b604080516020606435600481810135601f81018490048402850184019095528484526109669481359460248035956044359560849492019190819084018382808284375094965050505050505060006000600034111561103257610002565b610a7d6011546101009004600160a060020a031681565b60408051918252519081900360200190f35b610980610708565b90505b90565b604051808d600160a060020a031681526020018c8152602001806020018b81526020018a815260200189815260200188815260200187815260200186815260200185815260200184815260200183600160a060020a0316815260200182810382528c818154600181600116156101000203166002900481526020019150805460018160011615610100020316600290048015610a635780601f10610a3857610100808354040283529160200191610a63565b820191906000526020600020905b815481529060010190602001808311610a4657829003601f168201915b50509d505050505050505050505050505060405180910390f35b60408051600160a060020a03929092168252519081900360200190f35b005b604051601254601434908102939093049350600160a060020a03169183900390600081818185876185025a03f150505050600160a060020a038316600081815260146020908152604080832080548601905560168054860190556013825291829020805434019055815184815291517fdbccb92686efceafb9bb7e0394df7f58f71b954061b81afb57109bf247d3d75a9281900390910190a260105460165410801590610b4c575060115460ff16155b15610b94576011805460ff1916600117905560165460408051918252517ff381a3e2428fdda36615919e8d9c35878d9eb0cf85ac6edf575088e80e4c147e9181900360200190a15b600191505b50919050565b610002565b600f5442118015610bb8575060115460ff16155b15610de357601260009054906101000a9004600160a060020a0316600160a060020a031663d2cc718f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040516012549051600160a060020a039190911631109050610cc9576040805160125460e060020a63d2cc718f0282529151600160a060020a039290921691630221038a913091849163d2cc718f91600482810192602092919082900301816000876161da5a03f11561000257505060408051805160e160020a63011081c5028252600160a060020a039490941660048201526024810193909352516044838101936020935082900301816000876161da5a03f115610002575050505b33600160a060020a0316600081815260136020526040808220549051909181818185876185025a03f19250505015610de35733600160a060020a03167fbb28353e4598c3b9199101a66e0989549b659a59a54d2c27fbb183f1932c8e6d6013600050600033600160a060020a03168152602001908152602001600020600050546040518082815260200191505060405180910390a26014600050600033600160a060020a0316815260200190815260200160002060005054601660008282825054039250508190555060006014600050600033600160a060020a031681526020019081526020016000206000508190555060006013600050600033600160a060020a03168152602001908152602001600020600050819055505b565b4262054600600f60005054031115610e13576201518062127500600f60005054034203046014019050610983565b50601e610983565b60001415610e2857610002565b6000341115610e3657610002565b6000805485908110156100025750600160a060020a03331681527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e56e600e8602908101602052604090912054600080516020612a3683398151915291909101915060ff1680610eb05750600c810160205260406000205460ff165b80610ebf575060038101544210155b15610ec957610002565b8215610f0f5733600160a060020a03166000908152601460209081526040808320546009850180549091019055600b84019091529020805460ff19166001179055610f4b565b33600160a060020a0316600090815260146020908152604080832054600a850180549091019055600c84019091529020805460ff191660011790555b33600160a060020a03166000908152600b60205260408120541415610f77576040600020849055610feb565b33600160a060020a03166000908152600b60205260408120548154811015610002579080527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e566600e909102015460038201541115610feb5733600160a060020a03166000908152600b602052604090208490555b60408051848152905133600160a060020a03169186917f86abfce99b7dd908bec0169288797f85049ec73cbe046ed9de818fab3a497ae09181900360200190a35092915050565b6000805487908110156100025750808052600e8702600080516020612a3683398151915201905090508484846040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f15090500193505050506040518091039020816005016000505414915050949350505050565b600014156110ce57610002565b82801561111857508660001415806110e857508451600014155b806111005750600354600160a060020a038981169116145b8061110b5750600034115b80611118575062093a8084105b1561112257610002565b8215801561114257506111348861115c565b158061114257506212750084105b156111fe57610002565b83546118e590600160a060020a03165b600160a060020a03811660009081526004602052604081205460ff16806111f15750601254600160a060020a039081169083161480156111f15750601260009054906101000a9004600160a060020a0316600160a060020a031663d2cc718f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051516006541190505b156129a157506001610606565b6249d40084111561120e57610002565b60115460ff1615806112215750600f5442105b806112365750600c5434108015611236575082155b1561124057610002565b42844201101561124f57610002565b30600160a060020a031633600160a060020a0316141561126e57610002565b60008054600181018083559091908280158290116112a557600e0281600e0283600052602060002091820191016112a5919061136a565b505060008054929450918491508110156100025750808052600e8302600080516020612a368339815191520190508054600160a060020a031916891781556001818101899055875160028084018054600082815260209081902096975091959481161561010002600019011691909104601f908101829004840193918b019083901061146257805160ff19168380011785555b5061149292915061144a565b5050600060098201819055600a820155600d81018054600160a060020a03191690556001015b8082111561145e578054600160a060020a03191681556000600182810182905560028084018054848255909281161561010002600019011604601f81901061143057505b506000600383018190556004808401805461ffff19169055600584018290556006840182905560078401805460ff191690556008840180548382559083526020909220611344929091028101905b8082111561145e57600080825560018201818155600283019190915560039091018054600160a060020a03191690556113fc565b601f0160209004906000526020600020908101906113ae91905b8082111561145e576000815560010161144a565b5090565b82800160010185558215611338579182015b82811115611338578251826000505591602001919060010190611474565b50508787866040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f150905001935050505060405180910390208160050160005081905550834201816003016000508190555060018160040160006101000a81548160ff02191690830217905550828160070160006101000a81548160ff02191690830217905550821561157857600881018054600181018083559091908280158290116115735760040281600402836000526020600020918201910161157391906113fc565b505050505b600d8082018054600160a060020a031916331790553460068301819055815401905560408051600160a060020a038a16815260208181018a9052918101859052608060608201818152895191830191909152885185937f5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f938d938d938a938e93929160a084019185810191908190849082908590600090600490601f850104600f02600301f150905090810190601f1680156116485780820380516001836020036101000a031916815260200191505b509550505050505060405180910390a2509695505050505050565b6040805186815260208101839052815189927fdfc78bdca8e3e0b18c16c5c99323c6cb9eb5e00afde190b4e7273f5158702b07928290030190a25b5050505092915050565b6000805488908110156100025750808052600e8802600080516020612a36833981519152019050600781015490945060ff166116e757620d2f006116ec565b622398805b600485015490935060ff16801561170857506003840154830142115b15611716576117b887611890565b600384015442108061172d5750600484015460ff16155b806117ae57508360000160009054906101000a9004600160a060020a03168460010160005054876040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f15090500193505050506040518091039020846005016000505414155b1561114c57610002565b61169e565b60048401805461ff001916610100179055835460019550600160a060020a03908116309091161480159061180157508354600754600160a060020a03908116911614155b801561181d57506008548454600160a060020a03908116911614155b801561183957508354601254600160a060020a03908116911614155b801561185557506003548454600160a060020a03908116911614155b1561188b5760018401805430600160a060020a031660009081526005602052604090208054919091019055546006805490910190555b611663875b6000600060005082815481101561000257908052600e02600080516020612a36833981519152018150600481015490915060ff16156118d757600d80546006830154900390555b600401805460ff1916905550565b15156118f45761190087611890565b6001915061193161047f565b604051600d8501546006860154600160a060020a0391909116916000919082818181858883f193505050505061169e565b6001850154111561194157600091505b50600a8301546009840154865191019060049010801590611986575085600081518110156100025790602001015160f860020a900460f860020a02606860f860020a02145b80156119b6575085600181518110156100025790602001015160f860020a900460f860020a02603760f860020a02145b80156119e6575085600281518110156100025790602001015160f860020a900460f860020a0260ff60f860020a02145b8015611a16575085600381518110156100025790602001015160f860020a900460f860020a02601e60f860020a02145b8015611a45575030600160a060020a0316600090815260056020526040902054611a4290611a5d61047f565b81105b15611a4f57600091505b6001840154611a8090611a5f565b015b30600160a060020a03166000908152600560205260408120546129a961047f565b8110611ad457604051600d8501546006860154600160a060020a0391909116916000919082818181858883f193505050501515611abc57610002565b4260025560165460059004811115611ad45760056001555b6001840154611ae290611a5f565b8110158015611af85750600a8401546009850154115b8015611b015750815b1561188b578360000160009054906101000a9004600160a060020a0316600160a060020a0316846001016000505487604051808280519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015611b7d5780820380516001836020036101000a031916815260200191505b5091505060006040518083038185876185025a03f19250505015156117bd57610002565b611baa336105ec565b60001415611bb757610002565b60008054889081101561000257508052600e87027f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e566810154600080516020612a36833981519152919091019450421080611c1957506003840154622398800142115b80611c3257508354600160a060020a0390811690871614155b80611c425750600784015460ff16155b80611c68575033600160a060020a03166000908152600b8501602052604090205460ff16155b80611c9c575033600160a060020a03166000908152600b60205260409020548714801590611c9c5750604060009081205414155b15611ca657610002565b600884018054600090811015610002579081526020812060030154600160a060020a03161415611e1257611efc86604051600090600160a060020a038316907f9046fefd66f538ab35263248a44217dcb70e2eb2cd136629e141b8b8f9f03b60908390a260408051600e547fe2faf044000000000000000000000000000000000000000000000000000000008252600160a060020a03858116600484015260248301859052604483018590526223988042016064840152925192169163e2faf04491608480820192602092909190829003018187876161da5a03f1156100025750506040515191506106069050565b6008850180546000908110156100025781815260208082209390935530600160a060020a031681526005909252604082205481549092908110156100025790815260208120905060020155601654600885018054600090811015610002579081526020812090506001015560048401805461ff0019166101001790555b6008840180546000908110156100025781548282526020822060010154929190811015610002579081526020812090505433600160a060020a031660009081526014602052604081205460088801805493909102939093049550908110156100025790815260208120905060030154604080517fbaac530000000000000000000000000000000000000000000000000000000000815233600160a060020a0390811660048301529151929091169163baac53009186916024808301926020929190829003018185886185025a03f11561000257505060405151600014159150611f78905057610002565b60088501805460009081101561000257818152602081206003018054600160a060020a03191690931790925580549091908110156100025790815260208120905060030154600160a060020a031660001415611f5757610002565b600d5430600160a060020a0316311015611f7057610002565b611d9561047f565b6008840180546000908110156100025781548282526020822060010154929190811015610002579081526020812090506002015433600160a060020a0390811660009081526014602090815260408083205430909416835260058083528184205460099093529083205460088b018054969095029690960497509487020494508593929091908290811015610002575260208120815060030154600160a060020a0390811682526020828101939093526040918201600090812080549095019094553016835260059091529020548290101561205357610002565b30600160a060020a031660009081526005602052604081208054849003905560088501805483926009929091829081101561000257508152602080822060030154600160a060020a039081168352929052604080822080549094019093553090911681522054819010156120c657610002565b30600160a060020a0390811660009081526009602090815260408083208054869003905533909316808352601482528383205484519081529351929390927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a36121383361086c565b5033600160a060020a03166000908152601460209081526040808320805460168054919091039055839055600a9091528120556001945061169e565b30600160a060020a0390811660008181526005602090815260408083208054958716808552828520805490970190965584845283905560099091528082208054948352908220805490940190935590815290555b50565b604051600160a060020a0382811691309091163190600081818185876185025a03f192505050151561217457610002565b33600160a060020a03818116600090815260096020908152604080832054815160065460085460e060020a63d2cc718f028352935197995091969195929092169363d2cc718f936004848101949193929183900301908290876161da5a03f11561000257505050604051805190602001506005600050600033600160a060020a03168152602001908152602001600020600050540204101561229d57610002565b600160a060020a03338116600090815260096020908152604080832054815160065460085460e060020a63d2cc718f02835293519296909593169363d2cc718f93600483810194929383900301908290876161da5a03f11561000257505050604051805190602001506005600050600033600160a060020a0316815260200190815260200160002060005054020403905083156123ec57600860009054906101000a9004600160a060020a0316600160a060020a0316630221038a83600160a060020a0316630e7082036040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a63011081c5028252600160a060020a031660048201526024810186905290516044808301935060209282900301816000876161da5a03f115610002575050604051511515905061245457610002565b6040805160085460e160020a63011081c5028252600160a060020a038581166004840152602483018590529251921691630221038a9160448082019260209290919082900301816000876161da5a03f115610002575050604051511515905061245457610002565b600160a060020a03331660009081526009602052604090208054909101905550600192915050565b6109803361086c565b155b80156124a257506124a23384845b6000600061293a856105ec565b80156124be57506124be83836000600034111561261c57610002565b15610b9f5750600161033f565b15156124d657610002565b6124e08383610719565b905061033f565b155b80156124fb57506124fb848484612495565b80156125185750612518848484600060003411156126c157610002565b15610b9f57506001612528565b90505b9392505050565b151561253a57610002565b61252584848461041b565b30600160a060020a031633600160a060020a031614158061258a575030600160a060020a031660009081526005602052604090205460649061258561047f565b010481115b1561259457610002565b600c55565b600354600160a060020a0390811633909116146125b557610002565b600160a060020a038316600081815260046020908152604091829020805460ff191686179055815185815291517f73ad2a153c8b67991df9459024950b318a609782cee8c7eeda47b905f9baa91f9281900390910190a250600161033f565b506000610983565b33600160a060020a03166000908152601460205260409020548290108015906126455750600082115b156126b957600160a060020a03338116600081815260146020908152604080832080548890039055938716808352918490208054870190558351868152935191937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929081900390910190a350600161033f565b50600061033f565b600160a060020a03841660009081526014602052604090205482901080159061270a5750601560209081526040600081812033600160a060020a03168252909252902054829010155b80156127165750600082115b156127a457600160a060020a03838116600081815260146020908152604080832080548801905588851680845281842080548990039055601583528184203390961684529482529182902080548790039055815186815291519293927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9281900390910190a3506001612528565b506000612528565b600160a060020a038381166000908152600a6020908152604080832054601654600754835160e060020a63d2cc718f02815293519296919591169363d2cc718f9360048181019492939183900301908290876161da5a03f11561000257505060405151905061281a866105ec565b0204101561282757610002565b600160a060020a038381166000908152600a6020908152604080832054601654600754835160e060020a63d2cc718f02815293519296919591169363d2cc718f9360048181019492939183900301908290876161da5a03f115610002575050604051519050612895866105ec565b0204039050600760009054906101000a9004600160a060020a0316600160a060020a0316630221038a84836040518360e060020a0281526004018083600160a060020a03168152602001828152602001925050506020604051808303816000876161da5a03f115610002575050604051511515905061291357610002565b600160a060020a0383166000908152600a6020526040902080548201905560019150610b99565b600160a060020a0386166000908152600a602052604090205480850291909104915081111561296857610002565b600160a060020a038581166000908152600a60205260408082208054859003905591861681522080548201905560019150509392505050565b506000610606565b0160030260166000505483020460016000505460166000505404019050610606565b600160a060020a0383166000908152600b6020526040812054815481101561000257818052600e02600080516020612a368339815191520190506003810154909150421115610b9457600160a060020a0383166000908152600b602052604081208190559150610b9956290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a0000000000000000000000004a574510c7014e4ae985403536074abe582adfc8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000057870858000000000000000000000000bb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"address\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"code\": \"0x6060604052361561020e5760e060020a6000350463013cf08b8114610247578063095ea7b3146102d05780630c3b7b96146103455780630e7082031461034e578063149acf9a1461036057806318160ddd146103725780631f2dc5ef1461037b57806321b5b8dd1461039b578063237e9492146103ad57806323b872dd1461040e5780632632bf2014610441578063341458081461047257806339d1f9081461047b5780634b6753bc146104935780634df6d6cc1461049c5780634e10c3ee146104b7578063590e1ae3146104ca578063612e45a3146104db578063643f7cdd1461057a578063674ed066146105925780636837ff1e1461059b57806370a08231146105e5578063749f98891461060b57806378524b2e1461062457806381f03fcb1461067e57806382661dc41461069657806382bf6464146106b75780638b15a605146106c95780638d7af473146106d257806396d7f3f5146106e1578063a1da2fb9146106ea578063a3912ec814610704578063a9059cbb1461070f578063b7bc2c841461073f578063baac53001461074b578063be7c29c1146107b1578063c9d27afe14610817578063cc9ae3f61461082d578063cdef91d014610841578063dbde198814610859578063dd62ed3e1461087e578063e33734fd146108b2578063e5962195146108c6578063e66f53b7146108de578063eceb2945146108f0578063f8c80d261461094f575b610966600f546000906234bc000142108015610239575060125433600160a060020a03908116911614155b156109785761098033610752565b6109866004356000805482908110156100025750808052600e8202600080516020612a3683398151915201905060038101546004820154600683015460018401548454600786015460058701546009880154600a890154600d8a0154600160a060020a039586169b509599600201989760ff81811698610100909204811697949691951693168c565b61096660043560243533600160a060020a03908116600081815260156020908152604080832094871680845294825280832086905580518681529051929493927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925929181900390910190a35060015b92915050565b61096660105481565b610a7d600754600160a060020a031681565b610a7d600e54600160a060020a031681565b61096660165481565b6109665b60004262127500600f60005054031115610de557506014610983565b610a7d601254600160a060020a031681565b60408051602060248035600481810135601f810185900485028601850190965285855261096695813595919460449492939092019181908401838280828437509496505050505050506000600060006000600060003411156116a857610002565b6109666004356024356044355b60115460009060ff1680156104315750600f5442115b80156124e957506124e78461044b565b6109666000610980335b600160a060020a0381166000908152600b602052604081205481908114156129cb57610b99565b61096660065481565b6109665b600d5430600160a060020a03163103610983565b610966600f5481565b61096660043560046020526000908152604090205460ff1681565b61096660043560243560006124cb610831565b610a9a6000341115610ba457610002565b604080516020604435600481810135601f8101849004840285018401909552848452610966948135946024803595939460649492939101918190840183828082843750506040805160209735808a0135601f81018a90048a0283018a01909352828252969897608497919650602491909101945090925082915084018382808284375094965050933593505060a435915050600060006110c1336105ec565b61096660043560096020526000908152604090205481565b61096660015481565b610a9a60043530600160a060020a031633600160a060020a03161415806105db5750600160a060020a03811660009081526004602052604090205460ff16155b156121cb576121c8565b6109666004355b600160a060020a0381166000908152601460205260409020545b919050565b6109666004356024356000600034111561259957610002565b610966600062e6b680420360026000505410806106505750600354600160a060020a0390811633909116145b80156106645750600254621274ff19420190105b156126145750426002908155600180549091028155610983565b610966600435600a6020526000908152604090205481565b610966600435602435600060006000600060006000341115611ba157610002565b610a7d600854600160a060020a031681565b610966600c5481565b61096660005460001901610983565b61096660025481565b61096660043560006000600060003411156121fc57610002565b6109665b6001610983565b6109666004356024355b60115460009060ff16801561072f5750600f5442115b801561248757506124853361044b565b61096660115460ff1681565b6109666004355b60006000600f600050544210801561076a5750600034115b80156107a457506011546101009004600160a060020a0316600014806107a457506011546101009004600160a060020a0390811633909116145b15610b9f57610a9c61037f565b610a7d600435600060006000508281548110156100025750508080527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e56b600e83020180548290811015610002575081526020902060030154600160a060020a0316610606565b61096660043560243560006000610e1b336105ec565b6109665b6000600034111561247c57610002565b61096660043560056020526000908152604090205481565b610966600435602435604435600061252f845b6000600060003411156127ac57610002565b610966600435602435600160a060020a0382811660009081526015602090815260408083209385168352929052205461033f565b610a9a600435600034111561254557610002565b610966600435600b6020526000908152604090205481565b610a7d600354600160a060020a031681565b604080516020606435600481810135601f81018490048402850184019095528484526109669481359460248035956044359560849492019190819084018382808284375094965050505050505060006000600034111561103257610002565b610a7d6011546101009004600160a060020a031681565b60408051918252519081900360200190f35b610980610708565b90505b90565b604051808d600160a060020a031681526020018c8152602001806020018b81526020018a815260200189815260200188815260200187815260200186815260200185815260200184815260200183600160a060020a0316815260200182810382528c818154600181600116156101000203166002900481526020019150805460018160011615610100020316600290048015610a635780601f10610a3857610100808354040283529160200191610a63565b820191906000526020600020905b815481529060010190602001808311610a4657829003601f168201915b50509d505050505050505050505050505060405180910390f35b60408051600160a060020a03929092168252519081900360200190f35b005b604051601254601434908102939093049350600160a060020a03169183900390600081818185876185025a03f150505050600160a060020a038316600081815260146020908152604080832080548601905560168054860190556013825291829020805434019055815184815291517fdbccb92686efceafb9bb7e0394df7f58f71b954061b81afb57109bf247d3d75a9281900390910190a260105460165410801590610b4c575060115460ff16155b15610b94576011805460ff1916600117905560165460408051918252517ff381a3e2428fdda36615919e8d9c35878d9eb0cf85ac6edf575088e80e4c147e9181900360200190a15b600191505b50919050565b610002565b600f5442118015610bb8575060115460ff16155b15610de357601260009054906101000a9004600160a060020a0316600160a060020a031663d2cc718f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040516012549051600160a060020a039190911631109050610cc9576040805160125460e060020a63d2cc718f0282529151600160a060020a039290921691630221038a913091849163d2cc718f91600482810192602092919082900301816000876161da5a03f11561000257505060408051805160e160020a63011081c5028252600160a060020a039490941660048201526024810193909352516044838101936020935082900301816000876161da5a03f115610002575050505b33600160a060020a0316600081815260136020526040808220549051909181818185876185025a03f19250505015610de35733600160a060020a03167fbb28353e4598c3b9199101a66e0989549b659a59a54d2c27fbb183f1932c8e6d6013600050600033600160a060020a03168152602001908152602001600020600050546040518082815260200191505060405180910390a26014600050600033600160a060020a0316815260200190815260200160002060005054601660008282825054039250508190555060006014600050600033600160a060020a031681526020019081526020016000206000508190555060006013600050600033600160a060020a03168152602001908152602001600020600050819055505b565b4262054600600f60005054031115610e13576201518062127500600f60005054034203046014019050610983565b50601e610983565b60001415610e2857610002565b6000341115610e3657610002565b6000805485908110156100025750600160a060020a03331681527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e56e600e8602908101602052604090912054600080516020612a3683398151915291909101915060ff1680610eb05750600c810160205260406000205460ff165b80610ebf575060038101544210155b15610ec957610002565b8215610f0f5733600160a060020a03166000908152601460209081526040808320546009850180549091019055600b84019091529020805460ff19166001179055610f4b565b33600160a060020a0316600090815260146020908152604080832054600a850180549091019055600c84019091529020805460ff191660011790555b33600160a060020a03166000908152600b60205260408120541415610f77576040600020849055610feb565b33600160a060020a03166000908152600b60205260408120548154811015610002579080527f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e566600e909102015460038201541115610feb5733600160a060020a03166000908152600b602052604090208490555b60408051848152905133600160a060020a03169186917f86abfce99b7dd908bec0169288797f85049ec73cbe046ed9de818fab3a497ae09181900360200190a35092915050565b6000805487908110156100025750808052600e8702600080516020612a3683398151915201905090508484846040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f15090500193505050506040518091039020816005016000505414915050949350505050565b600014156110ce57610002565b82801561111857508660001415806110e857508451600014155b806111005750600354600160a060020a038981169116145b8061110b5750600034115b80611118575062093a8084105b1561112257610002565b8215801561114257506111348861115c565b158061114257506212750084105b156111fe57610002565b83546118e590600160a060020a03165b600160a060020a03811660009081526004602052604081205460ff16806111f15750601254600160a060020a039081169083161480156111f15750601260009054906101000a9004600160a060020a0316600160a060020a031663d2cc718f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051516006541190505b156129a157506001610606565b6249d40084111561120e57610002565b60115460ff1615806112215750600f5442105b806112365750600c5434108015611236575082155b1561124057610002565b42844201101561124f57610002565b30600160a060020a031633600160a060020a0316141561126e57610002565b60008054600181018083559091908280158290116112a557600e0281600e0283600052602060002091820191016112a5919061136a565b505060008054929450918491508110156100025750808052600e8302600080516020612a368339815191520190508054600160a060020a031916891781556001818101899055875160028084018054600082815260209081902096975091959481161561010002600019011691909104601f908101829004840193918b019083901061146257805160ff19168380011785555b5061149292915061144a565b5050600060098201819055600a820155600d81018054600160a060020a03191690556001015b8082111561145e578054600160a060020a03191681556000600182810182905560028084018054848255909281161561010002600019011604601f81901061143057505b506000600383018190556004808401805461ffff19169055600584018290556006840182905560078401805460ff191690556008840180548382559083526020909220611344929091028101905b8082111561145e57600080825560018201818155600283019190915560039091018054600160a060020a03191690556113fc565b601f0160209004906000526020600020908101906113ae91905b8082111561145e576000815560010161144a565b5090565b82800160010185558215611338579182015b82811115611338578251826000505591602001919060010190611474565b50508787866040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f150905001935050505060405180910390208160050160005081905550834201816003016000508190555060018160040160006101000a81548160ff02191690830217905550828160070160006101000a81548160ff02191690830217905550821561157857600881018054600181018083559091908280158290116115735760040281600402836000526020600020918201910161157391906113fc565b505050505b600d8082018054600160a060020a031916331790553460068301819055815401905560408051600160a060020a038a16815260208181018a9052918101859052608060608201818152895191830191909152885185937f5790de2c279e58269b93b12828f56fd5f2bc8ad15e61ce08572585c81a38756f938d938d938a938e93929160a084019185810191908190849082908590600090600490601f850104600f02600301f150905090810190601f1680156116485780820380516001836020036101000a031916815260200191505b509550505050505060405180910390a2509695505050505050565b6040805186815260208101839052815189927fdfc78bdca8e3e0b18c16c5c99323c6cb9eb5e00afde190b4e7273f5158702b07928290030190a25b5050505092915050565b6000805488908110156100025750808052600e8802600080516020612a36833981519152019050600781015490945060ff166116e757620d2f006116ec565b622398805b600485015490935060ff16801561170857506003840154830142115b15611716576117b887611890565b600384015442108061172d5750600484015460ff16155b806117ae57508360000160009054906101000a9004600160a060020a03168460010160005054876040518084600160a060020a0316606060020a0281526014018381526020018280519060200190808383829060006004602084601f0104600f02600301f15090500193505050506040518091039020846005016000505414155b1561114c57610002565b61169e565b60048401805461ff001916610100179055835460019550600160a060020a03908116309091161480159061180157508354600754600160a060020a03908116911614155b801561181d57506008548454600160a060020a03908116911614155b801561183957508354601254600160a060020a03908116911614155b801561185557506003548454600160a060020a03908116911614155b1561188b5760018401805430600160a060020a031660009081526005602052604090208054919091019055546006805490910190555b611663875b6000600060005082815481101561000257908052600e02600080516020612a36833981519152018150600481015490915060ff16156118d757600d80546006830154900390555b600401805460ff1916905550565b15156118f45761190087611890565b6001915061193161047f565b604051600d8501546006860154600160a060020a0391909116916000919082818181858883f193505050505061169e565b6001850154111561194157600091505b50600a8301546009840154865191019060049010801590611986575085600081518110156100025790602001015160f860020a900460f860020a02606860f860020a02145b80156119b6575085600181518110156100025790602001015160f860020a900460f860020a02603760f860020a02145b80156119e6575085600281518110156100025790602001015160f860020a900460f860020a0260ff60f860020a02145b8015611a16575085600381518110156100025790602001015160f860020a900460f860020a02601e60f860020a02145b8015611a45575030600160a060020a0316600090815260056020526040902054611a4290611a5d61047f565b81105b15611a4f57600091505b6001840154611a8090611a5f565b015b30600160a060020a03166000908152600560205260408120546129a961047f565b8110611ad457604051600d8501546006860154600160a060020a0391909116916000919082818181858883f193505050501515611abc57610002565b4260025560165460059004811115611ad45760056001555b6001840154611ae290611a5f565b8110158015611af85750600a8401546009850154115b8015611b015750815b1561188b578360000160009054906101000a9004600160a060020a0316600160a060020a0316846001016000505487604051808280519060200190808383829060006004602084601f0104600f02600301f150905090810190601f168015611b7d5780820380516001836020036101000a031916815260200191505b5091505060006040518083038185876185025a03f19250505015156117bd57610002565b611baa336105ec565b60001415611bb757610002565b60008054889081101561000257508052600e87027f290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e566810154600080516020612a36833981519152919091019450421080611c1957506003840154622398800142115b80611c3257508354600160a060020a0390811690871614155b80611c425750600784015460ff16155b80611c68575033600160a060020a03166000908152600b8501602052604090205460ff16155b80611c9c575033600160a060020a03166000908152600b60205260409020548714801590611c9c5750604060009081205414155b15611ca657610002565b600884018054600090811015610002579081526020812060030154600160a060020a03161415611e1257611efc86604051600090600160a060020a038316907f9046fefd66f538ab35263248a44217dcb70e2eb2cd136629e141b8b8f9f03b60908390a260408051600e547fe2faf044000000000000000000000000000000000000000000000000000000008252600160a060020a03858116600484015260248301859052604483018590526223988042016064840152925192169163e2faf04491608480820192602092909190829003018187876161da5a03f1156100025750506040515191506106069050565b6008850180546000908110156100025781815260208082209390935530600160a060020a031681526005909252604082205481549092908110156100025790815260208120905060020155601654600885018054600090811015610002579081526020812090506001015560048401805461ff0019166101001790555b6008840180546000908110156100025781548282526020822060010154929190811015610002579081526020812090505433600160a060020a031660009081526014602052604081205460088801805493909102939093049550908110156100025790815260208120905060030154604080517fbaac530000000000000000000000000000000000000000000000000000000000815233600160a060020a0390811660048301529151929091169163baac53009186916024808301926020929190829003018185886185025a03f11561000257505060405151600014159150611f78905057610002565b60088501805460009081101561000257818152602081206003018054600160a060020a03191690931790925580549091908110156100025790815260208120905060030154600160a060020a031660001415611f5757610002565b600d5430600160a060020a0316311015611f7057610002565b611d9561047f565b6008840180546000908110156100025781548282526020822060010154929190811015610002579081526020812090506002015433600160a060020a0390811660009081526014602090815260408083205430909416835260058083528184205460099093529083205460088b018054969095029690960497509487020494508593929091908290811015610002575260208120815060030154600160a060020a0390811682526020828101939093526040918201600090812080549095019094553016835260059091529020548290101561205357610002565b30600160a060020a031660009081526005602052604081208054849003905560088501805483926009929091829081101561000257508152602080822060030154600160a060020a039081168352929052604080822080549094019093553090911681522054819010156120c657610002565b30600160a060020a0390811660009081526009602090815260408083208054869003905533909316808352601482528383205484519081529351929390927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a36121383361086c565b5033600160a060020a03166000908152601460209081526040808320805460168054919091039055839055600a9091528120556001945061169e565b30600160a060020a0390811660008181526005602090815260408083208054958716808552828520805490970190965584845283905560099091528082208054948352908220805490940190935590815290555b50565b604051600160a060020a0382811691309091163190600081818185876185025a03f192505050151561217457610002565b33600160a060020a03818116600090815260096020908152604080832054815160065460085460e060020a63d2cc718f028352935197995091969195929092169363d2cc718f936004848101949193929183900301908290876161da5a03f11561000257505050604051805190602001506005600050600033600160a060020a03168152602001908152602001600020600050540204101561229d57610002565b600160a060020a03338116600090815260096020908152604080832054815160065460085460e060020a63d2cc718f02835293519296909593169363d2cc718f93600483810194929383900301908290876161da5a03f11561000257505050604051805190602001506005600050600033600160a060020a0316815260200190815260200160002060005054020403905083156123ec57600860009054906101000a9004600160a060020a0316600160a060020a0316630221038a83600160a060020a0316630e7082036040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a63011081c5028252600160a060020a031660048201526024810186905290516044808301935060209282900301816000876161da5a03f115610002575050604051511515905061245457610002565b6040805160085460e160020a63011081c5028252600160a060020a038581166004840152602483018590529251921691630221038a9160448082019260209290919082900301816000876161da5a03f115610002575050604051511515905061245457610002565b600160a060020a03331660009081526009602052604090208054909101905550600192915050565b6109803361086c565b155b80156124a257506124a23384845b6000600061293a856105ec565b80156124be57506124be83836000600034111561261c57610002565b15610b9f5750600161033f565b15156124d657610002565b6124e08383610719565b905061033f565b155b80156124fb57506124fb848484612495565b80156125185750612518848484600060003411156126c157610002565b15610b9f57506001612528565b90505b9392505050565b151561253a57610002565b61252584848461041b565b30600160a060020a031633600160a060020a031614158061258a575030600160a060020a031660009081526005602052604090205460649061258561047f565b010481115b1561259457610002565b600c55565b600354600160a060020a0390811633909116146125b557610002565b600160a060020a038316600081815260046020908152604091829020805460ff191686179055815185815291517f73ad2a153c8b67991df9459024950b318a609782cee8c7eeda47b905f9baa91f9281900390910190a250600161033f565b506000610983565b33600160a060020a03166000908152601460205260409020548290108015906126455750600082115b156126b957600160a060020a03338116600081815260146020908152604080832080548890039055938716808352918490208054870190558351868152935191937fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929081900390910190a350600161033f565b50600061033f565b600160a060020a03841660009081526014602052604090205482901080159061270a5750601560209081526040600081812033600160a060020a03168252909252902054829010155b80156127165750600082115b156127a457600160a060020a03838116600081815260146020908152604080832080548801905588851680845281842080548990039055601583528184203390961684529482529182902080548790039055815186815291519293927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9281900390910190a3506001612528565b506000612528565b600160a060020a038381166000908152600a6020908152604080832054601654600754835160e060020a63d2cc718f02815293519296919591169363d2cc718f9360048181019492939183900301908290876161da5a03f11561000257505060405151905061281a866105ec565b0204101561282757610002565b600160a060020a038381166000908152600a6020908152604080832054601654600754835160e060020a63d2cc718f02815293519296919591169363d2cc718f9360048181019492939183900301908290876161da5a03f115610002575050604051519050612895866105ec565b0204039050600760009054906101000a9004600160a060020a0316600160a060020a0316630221038a84836040518360e060020a0281526004018083600160a060020a03168152602001828152602001925050506020604051808303816000876161da5a03f115610002575050604051511515905061291357610002565b600160a060020a0383166000908152600a6020526040902080548201905560019150610b99565b600160a060020a0386166000908152600a602052604090205480850291909104915081111561296857610002565b600160a060020a038581166000908152600a60205260408082208054859003905591861681522080548201905560019150509392505050565b506000610606565b0160030260166000505483020460016000505460166000505404019050610606565b600160a060020a0383166000908152600b6020526040812054815481101561000257818052600e02600080516020612a368339815191520190506003810154909150421115610b9457600160a060020a0383166000908152600b602052604081208190559150610b9956290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563\",", + " \"gasUsed\": \"0x2a97ef\"", + " },", + " \"subtraces\": 3,", + " \"traceAddress\": [", + " 1,", + " 0,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"create\"", + " },", + " {", + " \"action\": {", + " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"gas\": \"0x433f5e\",", + " \"init\": \"0x60606040818152806101bc833960a090525160805160008054600160a060020a03191690921760a060020a60ff0219167401000000000000000000000000000000000000000090910217815561016290819061005a90396000f3606060405236156100405760e060020a60003504630221038a811461004d57806318bdc79a146100aa5780638da5cb5b146100be578063d2cc718f146100d0575b6100d96001805434019055565b6100db6004356024356000805433600160a060020a0390811691161415806100755750600034115b806100a05750805460a060020a900460ff1680156100a057508054600160a060020a03848116911614155b156100f757610002565b6100db60005460ff60a060020a9091041681565b6100ed600054600160a060020a031681565b6100db60015481565b005b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a0383168260608381818185876185025a03f1925050501561015c57604080518381529051600160a060020a038516917f9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc919081900360200190a25060015b9291505056000000000000000000000000304a554a310c7e546dfe434669c62820b7d834900000000000000000000000000000000000000000000000000000000000000001\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"address\": \"0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79\",", + " \"code\": \"0x606060405236156100405760e060020a60003504630221038a811461004d57806318bdc79a146100aa5780638da5cb5b146100be578063d2cc718f146100d0575b6100d96001805434019055565b6100db6004356024356000805433600160a060020a0390811691161415806100755750600034115b806100a05750805460a060020a900460ff1680156100a057508054600160a060020a03848116911614155b156100f757610002565b6100db60005460ff60a060020a9091041681565b6100ed600054600160a060020a031681565b6100db60015481565b005b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a0383168260608381818185876185025a03f1925050501561015c57604080518381529051600160a060020a038516917f9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc919081900360200190a25060015b9291505056\",", + " \"gasUsed\": \"0x163e6\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 0,", + " 0,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"create\"", + " },", + " {", + " \"action\": {", + " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"gas\": \"0x405eab\",", + " \"init\": \"0x60606040818152806101bc833960a090525160805160008054600160a060020a03191690921760a060020a60ff0219167401000000000000000000000000000000000000000090910217815561016290819061005a90396000f3606060405236156100405760e060020a60003504630221038a811461004d57806318bdc79a146100aa5780638da5cb5b146100be578063d2cc718f146100d0575b6100d96001805434019055565b6100db6004356024356000805433600160a060020a0390811691161415806100755750600034115b806100a05750805460a060020a900460ff1680156100a057508054600160a060020a03848116911614155b156100f757610002565b6100db60005460ff60a060020a9091041681565b6100ed600054600160a060020a031681565b6100db60015481565b005b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a0383168260608381818185876185025a03f1925050501561015c57604080518381529051600160a060020a038516917f9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc919081900360200190a25060015b9291505056000000000000000000000000304a554a310c7e546dfe434669c62820b7d834900000000000000000000000000000000000000000000000000000000000000000\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"address\": \"0xad3ecf23c0c8983b07163708be6d763b5f056193\",", + " \"code\": \"0x606060405236156100405760e060020a60003504630221038a811461004d57806318bdc79a146100aa5780638da5cb5b146100be578063d2cc718f146100d0575b6100d96001805434019055565b6100db6004356024356000805433600160a060020a0390811691161415806100755750600034115b806100a05750805460a060020a900460ff1680156100a057508054600160a060020a03848116911614155b156100f757610002565b6100db60005460ff60a060020a9091041681565b6100ed600054600160a060020a031681565b6100db60015481565b005b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a0383168260608381818185876185025a03f1925050501561015c57604080518381529051600160a060020a038516917f9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc919081900360200190a25060015b9291505056\",", + " \"gasUsed\": \"0x163e6\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 0,", + " 0,", + " 1", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"create\"", + " },", + " {", + " \"action\": {", + " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"gas\": \"0x3e2e73\",", + " \"init\": \"0x60606040818152806101bc833960a090525160805160008054600160a060020a03191690921760a060020a60ff0219167401000000000000000000000000000000000000000090910217815561016290819061005a90396000f3606060405236156100405760e060020a60003504630221038a811461004d57806318bdc79a146100aa5780638da5cb5b146100be578063d2cc718f146100d0575b6100d96001805434019055565b6100db6004356024356000805433600160a060020a0390811691161415806100755750600034115b806100a05750805460a060020a900460ff1680156100a057508054600160a060020a03848116911614155b156100f757610002565b6100db60005460ff60a060020a9091041681565b6100ed600054600160a060020a031681565b6100db60015481565b005b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a0383168260608381818185876185025a03f1925050501561015c57604080518381529051600160a060020a038516917f9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc919081900360200190a25060015b9291505056000000000000000000000000304a554a310c7e546dfe434669c62820b7d834900000000000000000000000000000000000000000000000000000000000000000\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"address\": \"0x1d29edb6997993a16c5086733cfd735d01df787c\",", + " \"code\": \"0x606060405236156100405760e060020a60003504630221038a811461004d57806318bdc79a146100aa5780638da5cb5b146100be578063d2cc718f146100d0575b6100d96001805434019055565b6100db6004356024356000805433600160a060020a0390811691161415806100755750600034115b806100a05750805460a060020a900460ff1680156100a057508054600160a060020a03848116911614155b156100f757610002565b6100db60005460ff60a060020a9091041681565b6100ed600054600160a060020a031681565b6100db60015481565b005b60408051918252519081900360200190f35b6060908152602090f35b600160a060020a0383168260608381818185876185025a03f1925050501561015c57604080518381529051600160a060020a038516917f9735b0cb909f3d21d5c16bbcccd272d85fa11446f6d679f6ecb170d2dabfecfc919081900360200190a25060015b9291505056\",", + " \"gasUsed\": \"0x163e6\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 0,", + " 0,", + " 2", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"create\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x18a010\",", + " \"input\": \"0xbaac5300000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"to\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"value\": \"0xdfd4116684423b208\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x124c9\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 1,", + " \"traceAddress\": [", + " 1,", + " 1", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"gas\": \"0x181512\",", + " \"input\": \"0x\",", + " \"to\": \"0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x13f9\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 1,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x171a62\",", + " \"input\": \"0xd2cc718f\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x113\",", + " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 2", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x17168c\",", + " \"input\": \"0xd2cc718f\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x113\",", + " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 3", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x171316\",", + " \"input\": \"0x0221038a000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89000000000000000000000000000000000000000000000000000000052aa8b9ab\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0xea64e\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 1,", + " \"traceAddress\": [", + " 1,", + " 4", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"gas\": \"0x169540\",", + " \"input\": \"0x\",", + " \"to\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"value\": \"0x52aa8b9ab\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0xe8407\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 2,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0x1632a8\",", + " \"input\": \"0x0e708203\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x15f\",", + " \"output\": \"0x000000000000000000000000d2e16a20dd7b1ae54fb0312209784478d069c7b0\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0x161ae8\",", + " \"input\": \"0x82661dc4000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0xe6afb\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 4,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x159290\",", + " \"input\": \"0xbaac5300000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"to\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"value\": \"0xdfd4116684423b208\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x5cdf\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 1,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"gas\": \"0x150792\",", + " \"input\": \"0x\",", + " \"to\": \"0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x13f9\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 0,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x14d4cc\",", + " \"input\": \"0xd2cc718f\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x113\",", + " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 1", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x14d0f6\",", + " \"input\": \"0xd2cc718f\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x113\",", + " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 2", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x14cd7d\",", + " \"input\": \"0x0221038a000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89000000000000000000000000000000000000000000000000000000052aa8b9ab\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0xcf3f8\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 1,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"gas\": \"0x144fa7\",", + " \"input\": \"0x\",", + " \"to\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"value\": \"0x52aa8b9ab\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0xcd1b1\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 2,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0x13ed0f\",", + " \"input\": \"0x0e708203\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x15f\",", + " \"output\": \"0x000000000000000000000000d2e16a20dd7b1ae54fb0312209784478d069c7b0\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0x13d54f\",", + " \"input\": \"0x82661dc4000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0xcb8a5\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 4,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x134cf7\",", + " \"input\": \"0xbaac5300000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"to\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"value\": \"0xdfd4116684423b208\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x5cdf\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 1,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"gas\": \"0x12c1f9\",", + " \"input\": \"0x\",", + " \"to\": \"0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x13f9\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 0,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x128f33\",", + " \"input\": \"0xd2cc718f\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x113\",", + " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 1", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x128b5d\",", + " \"input\": \"0xd2cc718f\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x113\",", + " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 2", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x1287e4\",", + " \"input\": \"0x0221038a000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89000000000000000000000000000000000000000000000000000000052aa8b9ab\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0xb41a2\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 1,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"gas\": \"0x120a0e\",", + " \"input\": \"0x\",", + " \"to\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"value\": \"0x52aa8b9ab\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0xb1f5b\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 2,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0x11a776\",", + " \"input\": \"0x0e708203\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x15f\",", + " \"output\": \"0x000000000000000000000000d2e16a20dd7b1ae54fb0312209784478d069c7b0\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0x118fb6\",", + " \"input\": \"0x82661dc4000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0xb064f\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 4,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x11075e\",", + " \"input\": \"0xbaac5300000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"to\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"value\": \"0xdfd4116684423b208\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x5cdf\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 1,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"gas\": \"0x107c60\",", + " \"input\": \"0x\",", + " \"to\": \"0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x13f9\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 0,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x10499a\",", + " \"input\": \"0xd2cc718f\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x113\",", + " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 1", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x1045c4\",", + " \"input\": \"0xd2cc718f\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x113\",", + " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 2", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x10424b\",", + " \"input\": \"0x0221038a000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89000000000000000000000000000000000000000000000000000000052aa8b9ab\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x98f4c\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 1,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"gas\": \"0xfc475\",", + " \"input\": \"0x\",", + " \"to\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"value\": \"0x52aa8b9ab\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x96d05\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 2,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0xf61dd\",", + " \"input\": \"0x0e708203\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x15f\",", + " \"output\": \"0x000000000000000000000000d2e16a20dd7b1ae54fb0312209784478d069c7b0\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0xf4a1d\",", + " \"input\": \"0x82661dc4000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x953f9\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 4,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0xec1c5\",", + " \"input\": \"0xbaac5300000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"to\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"value\": \"0xdfd4116684423b208\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x5cdf\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 1,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"gas\": \"0xe36c7\",", + " \"input\": \"0x\",", + " \"to\": \"0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x13f9\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 0,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0xe0401\",", + " \"input\": \"0xd2cc718f\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x113\",", + " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 1", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0xe002b\",", + " \"input\": \"0xd2cc718f\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x113\",", + " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 2", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0xdfcb2\",", + " \"input\": \"0x0221038a000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89000000000000000000000000000000000000000000000000000000052aa8b9ab\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x7dcf6\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 1,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"gas\": \"0xd7edc\",", + " \"input\": \"0x\",", + " \"to\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"value\": \"0x52aa8b9ab\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x7baaf\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 2,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0xd1c44\",", + " \"input\": \"0x0e708203\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x15f\",", + " \"output\": \"0x000000000000000000000000d2e16a20dd7b1ae54fb0312209784478d069c7b0\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0xd0484\",", + " \"input\": \"0x82661dc4000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x7a1a3\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 4,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0xc7c2c\",", + " \"input\": \"0xbaac5300000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"to\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"value\": \"0xdfd4116684423b208\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x5cdf\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 1,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"gas\": \"0xbf12e\",", + " \"input\": \"0x\",", + " \"to\": \"0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x13f9\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 0,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0xbbe68\",", + " \"input\": \"0xd2cc718f\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x113\",", + " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 1", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0xbba92\",", + " \"input\": \"0xd2cc718f\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x113\",", + " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 2", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0xbb719\",", + " \"input\": \"0x0221038a000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89000000000000000000000000000000000000000000000000000000052aa8b9ab\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x62aa0\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 1,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"gas\": \"0xb3943\",", + " \"input\": \"0x\",", + " \"to\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"value\": \"0x52aa8b9ab\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x60859\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 2,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0xad6ab\",", + " \"input\": \"0x0e708203\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x15f\",", + " \"output\": \"0x000000000000000000000000d2e16a20dd7b1ae54fb0312209784478d069c7b0\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0xabeeb\",", + " \"input\": \"0x82661dc4000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x5ef4d\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 4,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0xa3693\",", + " \"input\": \"0xbaac5300000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"to\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"value\": \"0xdfd4116684423b208\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x5cdf\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 1,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"gas\": \"0x9ab95\",", + " \"input\": \"0x\",", + " \"to\": \"0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x13f9\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 0,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x978cf\",", + " \"input\": \"0xd2cc718f\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x113\",", + " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 1", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x974f9\",", + " \"input\": \"0xd2cc718f\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x113\",", + " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 2", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x97180\",", + " \"input\": \"0x0221038a000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89000000000000000000000000000000000000000000000000000000052aa8b9ab\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x4784a\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 1,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"gas\": \"0x8f3aa\",", + " \"input\": \"0x\",", + " \"to\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"value\": \"0x52aa8b9ab\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x45603\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 2,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0x89112\",", + " \"input\": \"0x0e708203\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x15f\",", + " \"output\": \"0x000000000000000000000000d2e16a20dd7b1ae54fb0312209784478d069c7b0\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0x87952\",", + " \"input\": \"0x82661dc4000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x43cf7\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 4,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x7f0fa\",", + " \"input\": \"0xbaac5300000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"to\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"value\": \"0xdfd4116684423b208\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x5cdf\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 1,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"gas\": \"0x765fc\",", + " \"input\": \"0x\",", + " \"to\": \"0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x13f9\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 0,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x73336\",", + " \"input\": \"0xd2cc718f\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x113\",", + " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 1", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x72f60\",", + " \"input\": \"0xd2cc718f\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x113\",", + " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 2", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x72be7\",", + " \"input\": \"0x0221038a000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89000000000000000000000000000000000000000000000000000000052aa8b9ab\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x2c5f4\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 1,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"gas\": \"0x6ae11\",", + " \"input\": \"0x\",", + " \"to\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"value\": \"0x52aa8b9ab\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x2a3ad\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 2,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0x64b79\",", + " \"input\": \"0x0e708203\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x15f\",", + " \"output\": \"0x000000000000000000000000d2e16a20dd7b1ae54fb0312209784478d069c7b0\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0x633b9\",", + " \"input\": \"0x82661dc4000000000000000000000000000000000000000000000000000000000000003b000000000000000000000000b656b2a9c3b2416437a811e07466ca712f5a5b5a\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x28aa1\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 4,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x5ab61\",", + " \"input\": \"0xbaac5300000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"to\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"value\": \"0xdfd4116684423b208\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x5cdf\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 1,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x304a554a310c7e546dfe434669c62820b7d83490\",", + " \"gas\": \"0x52063\",", + " \"input\": \"0x\",", + " \"to\": \"0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x13f9\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 0,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x4ed9d\",", + " \"input\": \"0xd2cc718f\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x113\",", + " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 1", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x4e9c7\",", + " \"input\": \"0xd2cc718f\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x113\",", + " \"output\": \"0x00000000000000000000000000000000000000000000000000038d7ea4c68000\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 2", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"gas\": \"0x4e64e\",", + " \"input\": \"0x0221038a000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89000000000000000000000000000000000000000000000000000000052aa8b9ab\",", + " \"to\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x1139e\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 1,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xd2e16a20dd7b1ae54fb0312209784478d069c7b0\",", + " \"gas\": \"0x46878\",", + " \"input\": \"0x\",", + " \"to\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"value\": \"0x52aa8b9ab\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0xf157\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 3,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0x405e0\",", + " \"input\": \"0x0e708203\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x15f\",", + " \"output\": \"0x000000000000000000000000d2e16a20dd7b1ae54fb0312209784478d069c7b0\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 0", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0x4024b\",", + " \"input\": \"0x70a08231000000000000000000000000c0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x314\",", + " \"output\": \"0x00000000000000000000000000000000000000000000000dfd3f956d86e77600\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xc0ee9db1a9e07ca63e4ff0d5fb6f86bf68d47b89\",", + " \"gas\": \"0x3fe60\",", + " \"input\": \"0xa9059cbb000000000000000000000000f835a0247b0063c04ef22006ebe57c5f11977cc400000000000000000000000000000000000000000000000dfd3f956d86e77600\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0xd4fa\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 1,", + " 4,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 1,", + " 3,", + " 0,", + " 2", + " ],", + " \"transactionHash\": \"0x0ec3f2488a93839524add10ea229e773f6bc891b4eb4794c3337d4495263790b\",", + " \"transactionPosition\": 0,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x2dd9d8e62af6e839b4ed0d869094198ee7e02bff\",", + " \"gas\": \"0x0\",", + " \"input\": \"0x\",", + " \"to\": \"0x46e943ad525b7fe18f8240d944028c7890da135c\",", + " \"value\": \"0xb1a2bc2ec50000\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x0\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [],", + " \"transactionHash\": \"0x61db76fc3fe109ad548d91d321daf76dc2ef2f683dc3c4006377d1da8629e274\",", + " \"transactionPosition\": 1,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x286635c294b61bf10f416bbb7b579a0035379d33\",", + " \"gas\": \"0x10d88\",", + " \"input\": \"0x\",", + " \"to\": \"0x81d246bf10386b5702193202b865b0e45bd97f1a\",", + " \"value\": \"0x393ef1a5127c80000\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x0\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [],", + " \"transactionHash\": \"0x877db25210ca8be928280112b8b3b4a3afced1d35eb8cd795ac730c7e89d88a3\",", + " \"transactionPosition\": 2,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0xfecd1b8e3a1e21b304bfba7e5fb5241169e1fa1d\",", + " \"gas\": \"0x0\",", + " \"input\": \"0x\",", + " \"to\": \"0x7ed1e469fcb3ee19c0366d829e291451be638e59\",", + " \"value\": \"0x9e4e3e07f0b2fc00\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x0\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [],", + " \"transactionHash\": \"0xe7b3edd1d02b869b4d0eac0be43a67f0ac2f5b190f5a49f37ac59a98b17f56a3\",", + " \"transactionPosition\": 3,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x0a869d79a7052c7f1b55a8ebabbea3420f0d1e13\",", + " \"gas\": \"0x74148\",", + " \"input\": \"0xf5537ede000000000000000000000000bb9bc244d798123fde783fcc1c72d3bb8c1894130000000000000000000000000a869d79a7052c7f1b55a8ebabbea3420f0d1e1300000000000000000000000000000000000000000000000000b1a2bc2ec50000\",", + " \"to\": \"0x447f914fee54e1f9dc1fc5276ae2572b9369ae5d\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x68eb\",", + " \"output\": \"0x\"", + " },", + " \"subtraces\": 1,", + " \"traceAddress\": [],", + " \"transactionHash\": \"0xe11112b361cc2ffdc4815513dcb337beb83be014bcc89cd39a984f3d458e668d\",", + " \"transactionPosition\": 4,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"callType\": \"call\",", + " \"from\": \"0x447f914fee54e1f9dc1fc5276ae2572b9369ae5d\",", + " \"gas\": \"0x6ddd9\",", + " \"input\": \"0xa9059cbb0000000000000000000000000a869d79a7052c7f1b55a8ebabbea3420f0d1e1300000000000000000000000000000000000000000000000000b1a2bc2ec50000\",", + " \"to\": \"0xbb9bc244d798123fde783fcc1c72d3bb8c189413\",", + " \"value\": \"0x0\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": {", + " \"gasUsed\": \"0x5fca\",", + " \"output\": \"0x0000000000000000000000000000000000000000000000000000000000000001\"", + " },", + " \"subtraces\": 0,", + " \"traceAddress\": [", + " 0", + " ],", + " \"transactionHash\": \"0xe11112b361cc2ffdc4815513dcb337beb83be014bcc89cd39a984f3d458e668d\",", + " \"transactionPosition\": 4,", + " \"type\": \"call\"", + " },", + " {", + " \"action\": {", + " \"author\": \"0xbcdfc35b86bedf72f0cda046a3c16829a2ef41d1\",", + " \"rewardType\": \"block\",", + " \"value\": \"0x4563918244f40000\"", + " },", + " \"blockHash\": \"0xcaaa13ce099342d5e1342b04d588d7733093591666af8ef756ce20cf13d16475\",", + " \"blockNumber\": 1718497,", + " \"result\": null,", + " \"subtraces\": 0,", + " \"traceAddress\": [],", + " \"transactionHash\": null,", + " \"transactionPosition\": null,", + " \"type\": \"reward\"", + " }", + " ],", + " \"id\": \"1\"", + "}", + "", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_block\",\n\t\"params\":[\"0x1a38e1\"],\n\t\"id\":\"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + }, + { + "name": "trace_filter", + "event": [ + { + "listen": "test", + "script": { + "id": "f524ed84-7851-4a5a-88f2-4d19a10e74cb", + "exec": [ + "var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"result\": [", + " {", + " \"action\": {", + " \"author\": \"0x5088d623ba0fcf0131e0897a91734a4d83596aa0\",", + " \"rewardType\": \"block\",", + " \"value\": \"0x478eae0e571ba000\"", + " },", + " \"blockHash\": \"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\",", + " \"blockNumber\": 3,", + " \"result\": null,", + " \"subtraces\": 0,", + " \"traceAddress\": [],", + " \"transactionHash\": null,", + " \"transactionPosition\": null,", + " \"type\": \"reward\"", + " },", + " {", + " \"action\": {", + " \"author\": \"0xc8ebccc5f5689fa8659d83713341e5ad19349448\",", + " \"rewardType\": \"uncle\",", + " \"value\": \"0x340aad21b3b70000\"", + " },", + " \"blockHash\": \"0x3d6122660cc824376f11ee842f83addc3525e2dd6756b9bcf0affa6aa88cf741\",", + " \"blockNumber\": 3,", + " \"result\": null,", + " \"subtraces\": 0,", + " \"traceAddress\": [],", + " \"transactionHash\": null,", + " \"transactionPosition\": null,", + " \"type\": \"reward\"", + " }", + " ],", + " \"id\": \"1\"", + "}", + "pm.test('Has correct result', function() {", + " pm.expect(pm.response.json()).to.be.deep.equal(expected);", + "})" + ], + "type": "text/javascript" + } + } + ], + "request": { + "method": "POST", + "header": [ + { + "key": "Content-Type", + "value": "application/json" + } + ], + "body": { + "mode": "raw", + "raw": "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"trace_filter\",\n \"params\":[\n {\n \"fromBlock\":\"0x3\",\n \"toBlock\":\"0x3\"\n }\n ],\n\t\"id\":\"1\"\n}\n", + "options": { + "raw": {} + } + }, + "url": { + "raw": "{{HOST}}", + "host": [ + "{{HOST}}" + ] + } + }, + "response": [] + } + ], + "event": [ + { + "listen": "prerequest", + "script": { + "id": "f660f521-60fc-4561-bacd-14ab00640a12", + "type": "text/javascript", + "exec": [ + "utils = {", + " notImplemented: function(methodName, jsonData) {", + " var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", + " var isSilk = pm.environment.get('HOST') == \"{{SILKRPC}}\";", + " if (!isErigon && !isSilk) // only test erigon", + " return;", + "", + " var testNotImplemented = pm.globals.get('TEST_NOT_IMPLEMENTED') === 'true';", + " if (testNotImplemented) { // defaults to false, therefore don't test", + " pm.test('NOT IMPLEMENTED', function() {", + " pm.expect(false).to.be(true);", + " })", + " } else {", + " // pass unless user has explicitly told us to test not implemented", + " var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"error\": {", + " \"code\": -32000,", + " \"message\": \"the method is currently not implemented: \" + methodName", + " }", + " }", + " if (jsonData.error)", + " delete jsonData.error.data;", + " pm.test('NOT IMPLEMENTED', function() {", + " pm.expect(jsonData).to.deep.equals(expected);", + " })", + " }", + " },", + "", + " isDeprecated: function(methodName, jsonData) {", + " var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", + " var isSilk = pm.environment.get('HOST') == \"{{SILKRPC}}\";", + " if (!isErigon && !isSilk) // only test erigon", + " return;", + "", + " var testDeprecated = pm.globals.get('TEST_DEPRECATED') === 'true';", + " if (testDeprecated) { // defaults to false, therefore don't test", + " pm.test('DEPRECATED', function() {", + " console.log(\"testDeprecated2: \", testDeprecated)", + " pm.expect(false).to.be(true);", + " })", + " } else {", + " // pass unless user has explicitly told us to fail deprecated", + " var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"error\": {", + " \"code\": -32000,", + " \"message\": \"the method has been deprecated: \" + methodName", + " }", + " }", + " if (jsonData.error)", + " delete jsonData.error.data;", + " pm.test('DEPRECATED', function() {", + " pm.expect(jsonData).to.deep.equals(expected);", + " })", + " }", + " },", + "", + " cannotTest: function(methodName, jsonData) {", + " var isErigon = pm.environment.get('HOST') == \"{{ERIGON}}\";", + " var isSilk = pm.environment.get('HOST') == \"{{SILKRPC}}\";", + " if (!isErigon && !isSilk) // only test erigon", + " return;", + "", + " var expected = {", + " \"jsonrpc\": \"2.0\",", + " \"id\": \"1\",", + " \"result\": \"Cannot test - value changes\"", + " }", + " pm.test('VALUE CHANGES, CANNOT TEST: ' + methodName, function() {", + " jsonData.result = \"Cannot test - value changes\";", + " pm.expect(jsonData).to.deep.equals(expected);", + " })", + " },", + "};" + ] + } + }, + { + "listen": "test", + "script": { + "id": "8e45cd97-14f5-42f7-9df7-fe5e2824be86", + "type": "text/javascript", + "exec": [ + "pm.test('Base tests', function() {", + " const jsonData = pm.response.json();", + " pm.response.to.have.status(200);", + " pm.expect(jsonData !== null)", + " jsonData.errors == null || pm.expect(jsonData.errors).to.be.empty;", + "})", + "" + ] + } + } + ], + "protocolProfileBehavior": {} +} \ No newline at end of file diff --git a/cmd/rpcdaemon22/rpcdaemontest/test_util.go b/cmd/rpcdaemon22/rpcdaemontest/test_util.go new file mode 100644 index 00000000000..15589682e34 --- /dev/null +++ b/cmd/rpcdaemon22/rpcdaemontest/test_util.go @@ -0,0 +1,321 @@ +package rpcdaemontest + +import ( + "context" + "crypto/ecdsa" + "encoding/binary" + "math/big" + "net" + "testing" + + "github.com/ledgerwatch/erigon/consensus" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + "github.com/ledgerwatch/erigon-lib/gointerfaces/starknet" + "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/accounts/abi/bind" + "github.com/ledgerwatch/erigon/accounts/abi/bind/backends" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/commands/contracts" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/consensus/ethash" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/ethdb/privateapi" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/erigon/turbo/stages" + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" +) + +func CreateTestKV(t *testing.T) kv.RwDB { + s, _, _ := CreateTestSentry(t) + return s.DB +} + +type testAddresses struct { + key *ecdsa.PrivateKey + key1 *ecdsa.PrivateKey + key2 *ecdsa.PrivateKey + address common.Address + address1 common.Address + address2 common.Address +} + +func makeTestAddresses() testAddresses { + var ( + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + key1, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") + key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + address = crypto.PubkeyToAddress(key.PublicKey) + address1 = crypto.PubkeyToAddress(key1.PublicKey) + address2 = crypto.PubkeyToAddress(key2.PublicKey) + ) + + return testAddresses{ + key: key, + key1: key1, + key2: key2, + address: address, + address1: address1, + address2: address2, + } +} + +func CreateTestSentry(t *testing.T) (*stages.MockSentry, *core.ChainPack, []*core.ChainPack) { + addresses := makeTestAddresses() + var ( + key = addresses.key + address = addresses.address + address1 = addresses.address1 + address2 = addresses.address2 + ) + + var ( + gspec = &core.Genesis{ + Config: params.AllEthashProtocolChanges, + Alloc: core.GenesisAlloc{ + address: {Balance: big.NewInt(9000000000000000000)}, + address1: {Balance: big.NewInt(200000000000000000)}, + address2: {Balance: big.NewInt(300000000000000000)}, + }, + GasLimit: 10000000, + } + ) + m := stages.MockWithGenesis(t, gspec, key) + + contractBackend := backends.NewSimulatedBackendWithConfig(gspec.Alloc, gspec.Config, gspec.GasLimit) + defer contractBackend.Close() + + // Generate empty chain to have some orphaned blocks for tests + orphanedChain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 5, func(i int, block *core.BlockGen) { + }, true) + if err != nil { + t.Fatal(err) + } + + chain, err := getChainInstance(&addresses, m.ChainConfig, m.Genesis, m.Engine, m.DB, contractBackend) + if err != nil { + t.Fatal(err) + } + + if err = m.InsertChain(orphanedChain); err != nil { + t.Fatal(err) + } + if err = m.InsertChain(chain); err != nil { + t.Fatal(err) + } + + return m, chain, []*core.ChainPack{orphanedChain} +} + +var chainInstance *core.ChainPack + +func getChainInstance( + addresses *testAddresses, + config *params.ChainConfig, + parent *types.Block, + engine consensus.Engine, + db kv.RwDB, + contractBackend *backends.SimulatedBackend, +) (*core.ChainPack, error) { + var err error + if chainInstance == nil { + chainInstance, err = generateChain(addresses, config, parent, engine, db, contractBackend) + } + return chainInstance.Copy(), err +} + +func generateChain( + addresses *testAddresses, + config *params.ChainConfig, + parent *types.Block, + engine consensus.Engine, + db kv.RwDB, + contractBackend *backends.SimulatedBackend, +) (*core.ChainPack, error) { + var ( + key = addresses.key + key1 = addresses.key1 + key2 = addresses.key2 + address = addresses.address + address1 = addresses.address1 + address2 = addresses.address2 + theAddr = common.Address{1} + chainId = big.NewInt(1337) + // this code generates a log + signer = types.LatestSignerForChainID(nil) + ) + + transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, chainId) + transactOpts1, _ := bind.NewKeyedTransactorWithChainID(key1, chainId) + transactOpts2, _ := bind.NewKeyedTransactorWithChainID(key2, chainId) + var poly *contracts.Poly + var tokenContract *contracts.Token + + // We generate the blocks without plain state because it's not supported in core.GenerateChain + return core.GenerateChain(config, parent, engine, db, 10, func(i int, block *core.BlockGen) { + var ( + txn types.Transaction + txs []types.Transaction + err error + ) + + ctx := context.Background() + switch i { + case 0: + txn, err = types.SignTx(types.NewTransaction(0, theAddr, uint256.NewInt(1000000000000000), 21000, new(uint256.Int), nil), *signer, key) + if err != nil { + panic(err) + } + err = contractBackend.SendTransaction(ctx, txn) + if err != nil { + panic(err) + } + case 1: + txn, err = types.SignTx(types.NewTransaction(1, theAddr, uint256.NewInt(1000000000000000), 21000, new(uint256.Int), nil), *signer, key) + if err != nil { + panic(err) + } + err = contractBackend.SendTransaction(ctx, txn) + if err != nil { + panic(err) + } + case 2: + _, txn, tokenContract, err = contracts.DeployToken(transactOpts, contractBackend, address1) + case 3: + txn, err = tokenContract.Mint(transactOpts1, address2, big.NewInt(10)) + case 4: + txn, err = tokenContract.Transfer(transactOpts2, address, big.NewInt(3)) + case 5: + // Multiple transactions sending small amounts of ether to various accounts + var j uint64 + var toAddr common.Address + nonce := block.TxNonce(address) + for j = 1; j <= 32; j++ { + binary.BigEndian.PutUint64(toAddr[:], j) + txn, err = types.SignTx(types.NewTransaction(nonce, toAddr, uint256.NewInt(1_000_000_000_000_000), 21000, new(uint256.Int), nil), *signer, key) + if err != nil { + panic(err) + } + err = contractBackend.SendTransaction(ctx, txn) + if err != nil { + panic(err) + } + txs = append(txs, txn) + nonce++ + } + case 6: + _, txn, tokenContract, err = contracts.DeployToken(transactOpts, contractBackend, address1) + if err != nil { + panic(err) + } + txs = append(txs, txn) + txn, err = tokenContract.Mint(transactOpts1, address2, big.NewInt(100)) + if err != nil { + panic(err) + } + txs = append(txs, txn) + // Multiple transactions sending small amounts of ether to various accounts + var j uint64 + var toAddr common.Address + for j = 1; j <= 32; j++ { + binary.BigEndian.PutUint64(toAddr[:], j) + txn, err = tokenContract.Transfer(transactOpts2, toAddr, big.NewInt(1)) + if err != nil { + panic(err) + } + txs = append(txs, txn) + } + case 7: + var toAddr common.Address + nonce := block.TxNonce(address) + binary.BigEndian.PutUint64(toAddr[:], 4) + txn, err = types.SignTx(types.NewTransaction(nonce, toAddr, uint256.NewInt(1000000000000000), 21000, new(uint256.Int), nil), *signer, key) + if err != nil { + panic(err) + } + err = contractBackend.SendTransaction(ctx, txn) + if err != nil { + panic(err) + } + txs = append(txs, txn) + binary.BigEndian.PutUint64(toAddr[:], 12) + txn, err = tokenContract.Transfer(transactOpts2, toAddr, big.NewInt(1)) + if err != nil { + panic(err) + } + txs = append(txs, txn) + case 8: + _, txn, poly, err = contracts.DeployPoly(transactOpts, contractBackend) + if err != nil { + panic(err) + } + txs = append(txs, txn) + case 9: + txn, err = poly.DeployAndDestruct(transactOpts, big.NewInt(0)) + if err != nil { + panic(err) + } + txs = append(txs, txn) + } + + if err != nil { + panic(err) + } + if txs == nil && txn != nil { + txs = append(txs, txn) + } + + for _, txn := range txs { + block.AddTx(txn) + } + contractBackend.Commit() + }, true) +} + +type IsMiningMock struct{} + +func (*IsMiningMock) IsMining() bool { return false } + +func CreateTestGrpcConn(t *testing.T, m *stages.MockSentry) (context.Context, *grpc.ClientConn) { //nolint + ctx, cancel := context.WithCancel(context.Background()) + + apis := m.Engine.APIs(nil) + if len(apis) < 1 { + t.Fatal("couldn't instantiate Engine api") + } + + ethashApi := apis[1].Service.(*ethash.API) + server := grpc.NewServer() + + remote.RegisterETHBACKENDServer(server, privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, snapshotsync.NewBlockReader(), nil, nil, nil, nil, false)) + txpool.RegisterTxpoolServer(server, m.TxPoolGrpcServer) + txpool.RegisterMiningServer(server, privateapi.NewMiningServer(ctx, &IsMiningMock{}, ethashApi)) + starknet.RegisterCAIROVMServer(server, &starknet.UnimplementedCAIROVMServer{}) + listener := bufconn.Listen(1024 * 1024) + + dialer := func() func(context.Context, string) (net.Conn, error) { + go func() { + if err := server.Serve(listener); err != nil { + panic(err) + } + }() + return func(context.Context, string) (net.Conn, error) { + return listener.Dial() + } + } + + conn, err := grpc.DialContext(ctx, "", grpc.WithInsecure(), grpc.WithContextDialer(dialer())) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + cancel() + conn.Close() + }) + return ctx, conn +} diff --git a/cmd/rpcdaemon22/rpcservices/eth_backend.go b/cmd/rpcdaemon22/rpcservices/eth_backend.go new file mode 100644 index 00000000000..f86aaa0515d --- /dev/null +++ b/cmd/rpcdaemon22/rpcservices/eth_backend.go @@ -0,0 +1,288 @@ +package rpcservices + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "sync/atomic" + + "github.com/ledgerwatch/erigon-lib/gointerfaces" + "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + types2 "github.com/ledgerwatch/erigon-lib/gointerfaces/types" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/ethdb/privateapi" + "github.com/ledgerwatch/erigon/p2p" + "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/log/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/emptypb" +) + +type RemoteBackend struct { + remoteEthBackend remote.ETHBACKENDClient + log log.Logger + version gointerfaces.Version + db kv.RoDB + blockReader services.FullBlockReader +} + +func NewRemoteBackend(client remote.ETHBACKENDClient, db kv.RoDB, blockReader services.FullBlockReader) *RemoteBackend { + return &RemoteBackend{ + remoteEthBackend: client, + version: gointerfaces.VersionFromProto(privateapi.EthBackendAPIVersion), + log: log.New("remote_service", "eth_backend"), + db: db, + blockReader: blockReader, + } +} + +func (back *RemoteBackend) EnsureVersionCompatibility() bool { + versionReply, err := back.remoteEthBackend.Version(context.Background(), &emptypb.Empty{}, grpc.WaitForReady(true)) + if err != nil { + + back.log.Error("getting Version", "err", err) + return false + } + if !gointerfaces.EnsureVersion(back.version, versionReply) { + back.log.Error("incompatible interface versions", "client", back.version.String(), + "server", fmt.Sprintf("%d.%d.%d", versionReply.Major, versionReply.Minor, versionReply.Patch)) + return false + } + back.log.Info("interfaces compatible", "client", back.version.String(), + "server", fmt.Sprintf("%d.%d.%d", versionReply.Major, versionReply.Minor, versionReply.Patch)) + return true +} + +func (back *RemoteBackend) Etherbase(ctx context.Context) (common.Address, error) { + res, err := back.remoteEthBackend.Etherbase(ctx, &remote.EtherbaseRequest{}) + if err != nil { + if s, ok := status.FromError(err); ok { + return common.Address{}, errors.New(s.Message()) + } + return common.Address{}, err + } + + return gointerfaces.ConvertH160toAddress(res.Address), nil +} + +func (back *RemoteBackend) NetVersion(ctx context.Context) (uint64, error) { + res, err := back.remoteEthBackend.NetVersion(ctx, &remote.NetVersionRequest{}) + if err != nil { + if s, ok := status.FromError(err); ok { + return 0, errors.New(s.Message()) + } + return 0, err + } + + return res.Id, nil +} + +func (back *RemoteBackend) NetPeerCount(ctx context.Context) (uint64, error) { + res, err := back.remoteEthBackend.NetPeerCount(ctx, &remote.NetPeerCountRequest{}) + if err != nil { + if s, ok := status.FromError(err); ok { + return 0, errors.New(s.Message()) + } + return 0, err + } + + return res.Count, nil +} + +func (back *RemoteBackend) ProtocolVersion(ctx context.Context) (uint64, error) { + res, err := back.remoteEthBackend.ProtocolVersion(ctx, &remote.ProtocolVersionRequest{}) + if err != nil { + if s, ok := status.FromError(err); ok { + return 0, errors.New(s.Message()) + } + return 0, err + } + + return res.Id, nil +} + +func (back *RemoteBackend) ClientVersion(ctx context.Context) (string, error) { + res, err := back.remoteEthBackend.ClientVersion(ctx, &remote.ClientVersionRequest{}) + if err != nil { + if s, ok := status.FromError(err); ok { + return "", errors.New(s.Message()) + } + return "", err + } + + return res.NodeName, nil +} + +func (back *RemoteBackend) Subscribe(ctx context.Context, onNewEvent func(*remote.SubscribeReply)) error { + subscription, err := back.remoteEthBackend.Subscribe(ctx, &remote.SubscribeRequest{}, grpc.WaitForReady(true)) + if err != nil { + if s, ok := status.FromError(err); ok { + return errors.New(s.Message()) + } + return err + } + for { + event, err := subscription.Recv() + if errors.Is(err, io.EOF) { + log.Debug("rpcdaemon: the subscription channel was closed") + break + } + if err != nil { + return err + } + + onNewEvent(event) + } + return nil +} + +func (back *RemoteBackend) SubscribeLogs(ctx context.Context, onNewLogs func(reply *remote.SubscribeLogsReply), requestor *atomic.Value) error { + subscription, err := back.remoteEthBackend.SubscribeLogs(ctx, grpc.WaitForReady(true)) + if err != nil { + if s, ok := status.FromError(err); ok { + return errors.New(s.Message()) + } + return err + } + requestor.Store(subscription.Send) + for { + logs, err := subscription.Recv() + if errors.Is(err, io.EOF) { + log.Info("rpcdaemon: the logs subscription channel was closed") + break + } + if err != nil { + return err + } + onNewLogs(logs) + } + return nil +} + +func (back *RemoteBackend) TxnLookup(ctx context.Context, tx kv.Getter, txnHash common.Hash) (uint64, bool, error) { + return back.blockReader.TxnLookup(ctx, tx, txnHash) +} +func (back *RemoteBackend) BlockWithSenders(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (block *types.Block, senders []common.Address, err error) { + return back.blockReader.BlockWithSenders(ctx, tx, hash, blockHeight) +} +func (back *RemoteBackend) BodyWithTransactions(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (body *types.Body, err error) { + return back.blockReader.BodyWithTransactions(ctx, tx, hash, blockHeight) +} +func (back *RemoteBackend) BodyRlp(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (bodyRlp rlp.RawValue, err error) { + return back.blockReader.BodyRlp(ctx, tx, hash, blockHeight) +} +func (back *RemoteBackend) Body(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (body *types.Body, err error) { + return back.blockReader.Body(ctx, tx, hash, blockHeight) +} +func (back *RemoteBackend) Header(ctx context.Context, tx kv.Getter, hash common.Hash, blockHeight uint64) (*types.Header, error) { + return back.blockReader.Header(ctx, tx, hash, blockHeight) +} +func (back *RemoteBackend) HeaderByNumber(ctx context.Context, tx kv.Getter, blockHeight uint64) (*types.Header, error) { + return back.blockReader.HeaderByNumber(ctx, tx, blockHeight) +} +func (back *RemoteBackend) HeaderByHash(ctx context.Context, tx kv.Getter, hash common.Hash) (*types.Header, error) { + return back.blockReader.HeaderByHash(ctx, tx, hash) +} +func (back *RemoteBackend) CanonicalHash(ctx context.Context, tx kv.Getter, blockHeight uint64) (common.Hash, error) { + return back.blockReader.CanonicalHash(ctx, tx, blockHeight) +} + +func (back *RemoteBackend) EngineNewPayloadV1(ctx context.Context, payload *types2.ExecutionPayload) (res *remote.EnginePayloadStatus, err error) { + return back.remoteEthBackend.EngineNewPayloadV1(ctx, payload) +} + +func (back *RemoteBackend) EngineForkchoiceUpdatedV1(ctx context.Context, request *remote.EngineForkChoiceUpdatedRequest) (*remote.EngineForkChoiceUpdatedReply, error) { + return back.remoteEthBackend.EngineForkChoiceUpdatedV1(ctx, request) +} + +func (back *RemoteBackend) EngineGetPayloadV1(ctx context.Context, payloadId uint64) (res *types2.ExecutionPayload, err error) { + return back.remoteEthBackend.EngineGetPayloadV1(ctx, &remote.EngineGetPayloadRequest{ + PayloadId: payloadId, + }) +} + +func (back *RemoteBackend) NodeInfo(ctx context.Context, limit uint32) ([]p2p.NodeInfo, error) { + nodes, err := back.remoteEthBackend.NodeInfo(ctx, &remote.NodesInfoRequest{Limit: limit}) + if err != nil { + return nil, fmt.Errorf("nodes info request error: %w", err) + } + + if nodes == nil || len(nodes.NodesInfo) == 0 { + return nil, errors.New("empty nodesInfo response") + } + + ret := make([]p2p.NodeInfo, 0, len(nodes.NodesInfo)) + for _, node := range nodes.NodesInfo { + var rawProtocols map[string]json.RawMessage + if err = json.Unmarshal(node.Protocols, &rawProtocols); err != nil { + return nil, fmt.Errorf("cannot decode protocols metadata: %w", err) + } + + protocols := make(map[string]interface{}, len(rawProtocols)) + for k, v := range rawProtocols { + protocols[k] = v + } + + ret = append(ret, p2p.NodeInfo{ + Enode: node.Enode, + ID: node.Id, + IP: node.Enode, + ENR: node.Enr, + ListenAddr: node.ListenerAddr, + Name: node.Name, + Ports: struct { + Discovery int `json:"discovery"` + Listener int `json:"listener"` + }{ + Discovery: int(node.Ports.Discovery), + Listener: int(node.Ports.Listener), + }, + Protocols: protocols, + }) + } + + return ret, nil +} + +func (back *RemoteBackend) Peers(ctx context.Context) ([]*p2p.PeerInfo, error) { + rpcPeers, err := back.remoteEthBackend.Peers(ctx, &emptypb.Empty{}) + if err != nil { + return nil, fmt.Errorf("ETHBACKENDClient.Peers() error: %w", err) + } + + peers := make([]*p2p.PeerInfo, 0, len(rpcPeers.Peers)) + + for _, rpcPeer := range rpcPeers.Peers { + peer := p2p.PeerInfo{ + ENR: rpcPeer.Enr, + Enode: rpcPeer.Enode, + ID: rpcPeer.Id, + Name: rpcPeer.Name, + Caps: rpcPeer.Caps, + Network: struct { + LocalAddress string `json:"localAddress"` + RemoteAddress string `json:"remoteAddress"` + Inbound bool `json:"inbound"` + Trusted bool `json:"trusted"` + Static bool `json:"static"` + }{ + LocalAddress: rpcPeer.ConnLocalAddr, + RemoteAddress: rpcPeer.ConnRemoteAddr, + Inbound: rpcPeer.ConnIsInbound, + Trusted: rpcPeer.ConnIsTrusted, + Static: rpcPeer.ConnIsStatic, + }, + Protocols: nil, + } + + peers = append(peers, &peer) + } + + return peers, nil +} diff --git a/cmd/rpcdaemon22/rpcservices/eth_mining.go b/cmd/rpcdaemon22/rpcservices/eth_mining.go new file mode 100644 index 00000000000..889b24d62e3 --- /dev/null +++ b/cmd/rpcdaemon22/rpcservices/eth_mining.go @@ -0,0 +1,43 @@ +package rpcservices + +import ( + "context" + "fmt" + + "github.com/ledgerwatch/erigon-lib/gointerfaces" + "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon/ethdb/privateapi" + "github.com/ledgerwatch/log/v3" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" +) + +type MiningService struct { + txpool.MiningClient + log log.Logger + version gointerfaces.Version +} + +func NewMiningService(client txpool.MiningClient) *MiningService { + return &MiningService{ + MiningClient: client, + version: gointerfaces.VersionFromProto(privateapi.MiningAPIVersion), + log: log.New("remote_service", "mining"), + } +} + +func (s *MiningService) EnsureVersionCompatibility() bool { + versionReply, err := s.Version(context.Background(), &emptypb.Empty{}, grpc.WaitForReady(true)) + if err != nil { + s.log.Error("getting Version", "err", err) + return false + } + if !gointerfaces.EnsureVersion(s.version, versionReply) { + s.log.Error("incompatible interface versions", "client", s.version.String(), + "server", fmt.Sprintf("%d.%d.%d", versionReply.Major, versionReply.Minor, versionReply.Patch)) + return false + } + s.log.Info("interfaces compatible", "client", s.version.String(), + "server", fmt.Sprintf("%d.%d.%d", versionReply.Major, versionReply.Minor, versionReply.Patch)) + return true +} diff --git a/cmd/rpcdaemon22/rpcservices/eth_starknet.go b/cmd/rpcdaemon22/rpcservices/eth_starknet.go new file mode 100644 index 00000000000..6dcc02d448d --- /dev/null +++ b/cmd/rpcdaemon22/rpcservices/eth_starknet.go @@ -0,0 +1,31 @@ +package rpcservices + +import ( + "github.com/ledgerwatch/erigon-lib/gointerfaces" + "github.com/ledgerwatch/erigon-lib/gointerfaces/starknet" + types2 "github.com/ledgerwatch/erigon-lib/gointerfaces/types" + "github.com/ledgerwatch/log/v3" + "google.golang.org/grpc" +) + +// StarknetAPIVersion +var StarknetAPIVersion = &types2.VersionReply{Major: 1, Minor: 0, Patch: 0} + +type StarknetService struct { + starknet.CAIROVMClient + log log.Logger + version gointerfaces.Version +} + +func NewStarknetService(cc grpc.ClientConnInterface) *StarknetService { + return &StarknetService{ + CAIROVMClient: starknet.NewCAIROVMClient(cc), + version: gointerfaces.VersionFromProto(StarknetAPIVersion), + log: log.New("remote_service", "starknet"), + } +} + +func (s *StarknetService) EnsureVersionCompatibility() bool { + //TODO: add version check + return true +} diff --git a/cmd/rpcdaemon22/rpcservices/eth_txpool.go b/cmd/rpcdaemon22/rpcservices/eth_txpool.go new file mode 100644 index 00000000000..670a77b538b --- /dev/null +++ b/cmd/rpcdaemon22/rpcservices/eth_txpool.go @@ -0,0 +1,50 @@ +package rpcservices + +import ( + "context" + "fmt" + "time" + + "github.com/ledgerwatch/erigon-lib/gointerfaces" + "github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil" + txpooproto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + txpool2 "github.com/ledgerwatch/erigon-lib/txpool" + "github.com/ledgerwatch/log/v3" + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" +) + +type TxPoolService struct { + txpooproto.TxpoolClient + log log.Logger + version gointerfaces.Version +} + +func NewTxPoolService(client txpooproto.TxpoolClient) *TxPoolService { + return &TxPoolService{ + TxpoolClient: client, + version: gointerfaces.VersionFromProto(txpool2.TxPoolAPIVersion), + log: log.New("remote_service", "tx_pool"), + } +} + +func (s *TxPoolService) EnsureVersionCompatibility() bool { +Start: + versionReply, err := s.Version(context.Background(), &emptypb.Empty{}, grpc.WaitForReady(true)) + if err != nil { + if grpcutil.ErrIs(err, txpool2.ErrPoolDisabled) { + time.Sleep(3 * time.Second) + goto Start + } + s.log.Error("ensure version", "err", err) + return false + } + if !gointerfaces.EnsureVersion(s.version, versionReply) { + s.log.Error("incompatible interface versions", "client", s.version.String(), + "server", fmt.Sprintf("%d.%d.%d", versionReply.Major, versionReply.Minor, versionReply.Patch)) + return false + } + s.log.Info("interfaces compatible", "client", s.version.String(), + "server", fmt.Sprintf("%d.%d.%d", versionReply.Major, versionReply.Minor, versionReply.Patch)) + return true +} diff --git a/cmd/rpcdaemon22/test.http b/cmd/rpcdaemon22/test.http new file mode 100644 index 00000000000..cf6205cf37c --- /dev/null +++ b/cmd/rpcdaemon22/test.http @@ -0,0 +1,222 @@ + +### + +POST localhost:8545 +Content-Type: application/json + +{ + "jsonrpc": "2.0", + "method": "eth_syncing", + "params": [], + "id": 1 +} + +### + +POST localhost:8545 +Content-Type: application/json + +{ + "jsonrpc": "2.0", + "method": "eth_getBalance", + "params": [ + "0xfffa4763f94f7ad191b366a343092a5d1a47ed08", + "0xde84" + ], + "id": 1 +} + +### + +POST localhost:8545 +Content-Type: application/json + +{ + "jsonrpc": "2.0", + "method": "debug_accountRange", + "params": [ + "0x1e8480", + "", + 256, + false, + false, + false + ], + "id": 1 +} + +### + +# curl -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"eth_getTransactionByHash", "params": ["0x1302cc71b89c1482b18a97a6fa2c9c375f4bf7548122363b6e91528440272fde"], "id":1}' localhost:8545 +POST localhost:8545 +Content-Type: application/json + +{ + "jsonrpc": "2.0", + "method": "eth_getTransactionByHash", + "params": [ + "0x1302cc71b89c1482b18a97a6fa2c9c375f4bf7548122363b6e91528440272fde" + ], + "id": 1 +} + +### + + + +# curl -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"eth_getTransactionByHash", "params": ["0x1302cc71b89c1482b18a97a6fa2c9c375f4bf7548122363b6e91528440272fde"], "id":1}' localhost:8545 +POST localhost:8545 +Content-Type: application/json + +{ + "jsonrpc": "2.0", + "method": "eth_getBlockByNumber", + "params": [ + "0x4C4B40", + true + ], + "id": 1 +} + +### + +# curl -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"eth_getBlockByNumber", "params": ["0x1b4", true], "id":1}' localhost:8545 +POST localhost:8545 +Content-Type: application/json + +{ + "jsonrpc": "2.0", + "method": "eth_newHeader", + "params": [], + "id": 1 +} + +### + +POST localhost:8545 +Content-Type: application/json + +{ + "jsonrpc": "2.0", + "method": "eth_getBlockByNumber", + "params": [ + "0xf4240", + true + ], + "id": 2 +} + +### + +POST localhost:8545 +Content-Type: application/json + +{ + "jsonrpc": "2.0", + "method": "debug_storageRangeAt", + "params": [ + "0x4ced0bc30041f7f4e11ba9f341b54404770c7695dfdba6bb64b6ffeee2074177", + 99, + "0x33990122638b9132ca29c723bdf037f1a891a70c", + "0x0000000000000000000000000000000000000000000000000000000000000000", + 1024 + ], + "id": 537758 +} + +### > 60 + +### >20 +###{"jsonrpc":"2.0","method":"debug_storageRangeAt","params":["0x6e6ec30ba20b263d1bdf6d87a0b1b037ea595929ac10ad74f6b7e1890fdad744", 19,"0x793ae8c1b1a160bfc07bfb0d04f85eab1a71f4f2","0x0000000000000000000000000000000000000000000000000000000000000000",1024],"id":113911} + + +### {"jsonrpc":"2.0","mesthod":"debug_storageRangeAt","params":["0xbcb55dcb321899291d10818dd06eaaf939ff87a717ac40850b54c6b56e8936ff", 2,"0xca7c390f8f843a8c3036841fde755e5d0acb97da","0x0000000000000000000000000000000000000000000000000000000000000000",1024],"id":3836} + +###{"jsonrpc":"2.0","method":"debug_storageRangeAt","params":["0xf212a7655339852bf58f7e1d66f82256d22d13ccba3068a9c47a635738698c84", 0,"0xb278e4cb20dfbf97e78f27001f6b15288302f4d7","0x0000000000000000000000000000000000000000000000000000000000000000",1024],"id":8970} + +### + +POST 192.168.255.138:8545 +Content-Type: application/json + +{ + "jsonrpc": "2.0", + "method": "eth_getTransactionReceipt", + "params": [ + "0xc05ce241bec59900356ede868d170bc01d743c3cd5ecb129ca99596593022771" + ], + "id": 537758 +} + + +### + +#POST 192.168.255.138:8545 +POST localhost:8545 +Content-Type: application/json + +{ + "jsonrpc": "2.0", + "method": "erigon_getLogsByHash", + "params": [ + "0x343f85f13356e138152d77287fda5ae0818c514119119ad439f81d69c59fc2f6" + ], + "id": 537758 +} + + +### + +#POST 192.168.255.138:8545 +POST localhost:8545 +Content-Type: application/json + +{ + "jsonrpc": "2.0", + "method": "eth_getLogs", + "params": [ + { + "address": "0x6090a6e47849629b7245dfa1ca21d94cd15878ef", + "fromBlock": "0x3d0000", + "toBlock": "0x3d2600", + "topics": [ + null, + "0x374f3a049e006f36f6cf91b02a3b0ee16c858af2f75858733eb0e927b5b7126c" + ] + } + ], + "id": 537758 +} + +### + +#POST 192.168.255.138:8545 +POST localhost:8545 +Content-Type: application/json + +{ + "jsonrpc": "2.0", + "method": "eth_getWork", + "params": [], + "id": 537758 +} + + + +### + +POST localhost:8545 +Content-Type: application/json + +{ + "id": 1, + "method": "eth_estimateGas", + "params": [ + { + "to": "0x5fda30bb72b8dfe20e48a00dfc108d0915be9bb0", + "value": "0x1234" + }, + "latest" + ] +} + diff --git a/cmd/rpcdaemon22/testdata/.gitignore b/cmd/rpcdaemon22/testdata/.gitignore new file mode 100644 index 00000000000..6ad27168f7c --- /dev/null +++ b/cmd/rpcdaemon22/testdata/.gitignore @@ -0,0 +1,5 @@ +geth +parity +nethermind +turbogeth +erigon \ No newline at end of file diff --git a/cmd/rpcdaemon22/testdata/sed_file b/cmd/rpcdaemon22/testdata/sed_file new file mode 100644 index 00000000000..777338ab63f --- /dev/null +++ b/cmd/rpcdaemon22/testdata/sed_file @@ -0,0 +1,22 @@ +s/,\"id\":\"1\"//g +s/\"result\":null,/\"result\":\{\},/g +s/suicide/selfdestruct/g +s/\"gasUsed\":\"0x0\",//g +s/,\"value\":\"0x0\"//g + +s/invalid argument 1: json: cannot unmarshal hex string \\\"0x\\\" into Go value of type hexutil.Uint64/Invalid params: Invalid index: cannot parse integer from empty string./ +s/invalid argument 1: json: cannot unmarshal number into Go value of type \[\]hexutil.Uint64/Invalid params: invalid type: integer `0`, expected a sequence./ +s/missing value for required argument 1/Invalid params: invalid length 1, expected a tuple of size 2./ +s/Invalid params: invalid type: string \\\"0x0\\\", expected a sequence./invalid argument 1: json: cannot unmarshal string into Go value of type \[\]hexutil.Uint64/ +s/Invalid params\: Invalid block number\: number too large to fit in target type./invalid argument 0: hex number > 64 bits/ +s/the method trace_junk12 does not exist\/is not available/Method not found/ + +s/,\"traceAddress\":null/,\"traceAddress\":[]/g +s/\"0x0000000000000000000000000000000000000000000000000000000000000000\"/\"0x\"/g +s/\"transactionHash\":\"0x\",\"transactionPosition\":0/\"transactionHash\":null,\"transactionPosition\":null/g +s/\"result\":null/\"result\":[]/g + +s/\"error\":{\"code\":-32000,\"message\":\"function trace_replayBlockTransactions not implemented\"}/\"result\":\[\]/ +s/\"error\":{\"code\":-32000,\"message\":\"function trace_replayTransaction not implemented\"}/\"result\":{\"output\":\"0x\",\"stateDiff\":null,\"trace\":\[\],\"vmTrace\":null}/ +s/\"error\":{\"code\":-32602,\"message\":\"invalid argument 0: json: cannot unmarshal array into Go value of type commands.CallParam\"}/\"result\":\[{\"output\":\"0x\",\"stateDiff\":null,\"trace\":\[\],\"vmTrace\":null},{\"output\":\"0x\",\"stateDiff\":null,\"trace\":\[\],\"vmTrace\":null}]/ +s/\"error\":{\"code\":-32602,\"message\":\"invalid argument 0: hex string has length 82, want 64 for common.Hash\"}/\"error\":{\"code\":-32602,\"data\":\"RlpIncorrectListLen\",\"message\":\"Couldn't parse parameters: Transaction is not valid RLP\"}/ diff --git a/cmd/rpcdaemon22/testdata/trace_tests b/cmd/rpcdaemon22/testdata/trace_tests new file mode 100644 index 00000000000..0e89a8da6aa --- /dev/null +++ b/cmd/rpcdaemon22/testdata/trace_tests @@ -0,0 +1,76 @@ +005 trace_get fail ["0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3",0] +010 trace_get fail ["0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3","0x0"] +015 trace_get zero ["0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3",["0x0"]] +020 trace_get one ["0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3",["0x1"]] +025 trace_get both ["0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3",["0x0","0x1"]] +030 trace_get fail ["0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3"] +035 trace_get two ["0x5c504ed432cb51138bcf09aa5e8a410dd4a1e204ef84bfed1be16dfba1b22060",["0x2"]] +040 trace_get fail ["0x975994512b958b31608f5692a6dbacba359349533dfb4ba0facfb7291fbec48d",["0x"]] + +050 trace_transaction one ["0x17104ac9d3312d8c136b7f44d4b8b47852618065ebfa534bd2d3b5ef218ca1f3"] +055 trace_transaction two ["0x5c504ed432cb51138bcf09aa5e8a410dd4a1e204ef84bfed1be16dfba1b22060"] +060 trace_transaction three ["0x6afbe0f0ea3613edd6b84b71260836c03bddce81604f05c81a070cd671d3d765"] +065 trace_transaction four ["0x80926bb17ecdd526a2d901835482615eec87c4ca7fc30b96d8c6d6ab17bc721e"] +070 trace_transaction five ["0xb8ae0ab093fe1882249187b8f40dbe6e9285b419d096bd8028172d55b47ff3ce"] +075 trace_transaction six ["0xc2b831c051582f13dfaff6df648972e7e94aeeed1e85d23bd968a55b59f3cb5b"] +080 trace_transaction seven ["0xf9d426284bd20415a53991a004122b3a3a619b295ea98d1d88a5fd3a4125408b"] +085 trace_transaction cr_de ["0x343ba476313771d4431018d7d2e935eba2bfe26d5be3e6cb84af6817fd0e4309"] + +105 trace_block 0x23 ["0x2328"] +110 trace_block 0x10 ["0x100"] +115 trace_block 0x12 ["0x12"] +120 trace_block 0x12 ["0x121212"] +125 trace_block 0x2e ["0x2ed119"] +130 trace_block 0xa1 ["0xa18dcfbc639be11c353420ede9224d772c56eb9ff327eb73771f798cf42d0027"] +#135 trace_block 0xa6 ["0xa60f34"] +#140 trace_block 0xf4 ["0xf4629"] +#145 trace_block slow ["0x895441"] + +150 trace_filter good_1 [{"fromBlock":"0x2328","toBlock":"0x2328"}] +155 trace_filter range_1 [{"fromBlock":"0x2dcaa9","toBlock":"0x2dcaaa"}] +160 trace_filter block_3 [{"fromBlock":"0x3","toBlock":"0x3"}] +165 trace_filter first_tx [{"fromBlock":"0xb443","toBlock":"0xb443"}] +170 trace_filter from_doc [{"fromBlock":"0x2ed0c4","toBlock":"0x2ed128","toAddress":["0x8bbb73bcb5d553b5a556358d27625323fd781d37"],"after":1000,"count":100}] +175 trace_filter rem_a_o [{"fromBlock":"0x2ed0c4","toBlock":"0x2ed128","toAddress":["0x8bbb73bcb5d553b5a556358d27625323fd781d37"]}] +180 trace_filter count_1 [{"fromBlock":"0x2ed0c4","toBlock":"0x2ed128","toAddress":["0x8bbb73bcb5d553b5a556358d27625323fd781d37"],"count":1}] +185 trace_filter after_1 [{"fromBlock":"0x2ed0c4","toBlock":"0x2ed128","toAddress":["0x8bbb73bcb5d553b5a556358d27625323fd781d37"],"after":1,"count":4}] +190 trace_filter to_0xc02 [{"fromBlock":"0xa344e0","toBlock":"0xa344e0","toAddress":["0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2"]}] +195 trace_filter fr_0xc3c [{"fromBlock":"0xa344e0","toBlock":"0xa344e0","fromAddress":["0xc3ca90684fd7b8c7e4be88c329269fc32111c4bd"]}] +200 trace_filter both [{"fromBlock":"0xa344e0","toBlock":"0xa344e0","fromAddress":["0xc3ca90684fd7b8c7e4be88c329269fc32111c4bd"],"toAddress":["0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2"]}] +205 trace_filter fail_2 [{"fromBlock":"0xa606ba","toBlock":"0x2dcaa9"}] +210 trace_filter bad_1 [{"fromBlock":"0x2328","toBlock":"0x2327"}] +#215 trace_filter slow_2 [{"fromBlock":"0xa606ba","toBlock":"0xa606ba"}] +#220 trace_filter 10700000 [{"fromBlock":"0xa344e0","toBlock":"0xa344e0"}] + +250 trace_replayBlockTransactions fail ["0x3", ["stateDiff"]] +300 trace_replayTransaction fail ["0x02d4a872e096445e80d05276ee756cefef7f3b376bcec14246469c0cd97dad8f", ["fail"]] +320_erigon trace_call fail [{"input":"0x0","nonce":"0x0","from":"0x02fcf30912b6fe2b6452ee19721c6068fe4c7b61","gas":"0xf4240","to":"0x37a9679c41e99db270bda88de8ff50c0cd23f326","gasPrice":"0x4a817c800","value":"0x0"},["fail"],"latest"] +340 trace_callMany fail [[[{"from":"0x407d73d8a49eeb85d32cf465507dd71d507100c1","to":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b","value":"0x186a0"},["fail"]],[{"from":"0x407d73d8a49eeb85d32cf465507dd71d507100c1","to":"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b","value":"0x186a0"},["fail"]]],"latest"] +360 trace_rawTransaction fail ["0xd46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675",["fail"]] +#255 trace_replayBlockTransactions ["0x1",["trace"]] +#250 trace_replayBlockTransactions ["0x1"] +#265 trace_replayBlockTransactions ["0x100"] +#260 trace_replayBlockTransactions ["0x895441",["trace"]] +#275 trace_replayBlockTransactions ["0x895441",["vmTrace"]] +#270 trace_replayBlockTransactions ["0xCF9BF",["trace"]] +#285 trace_replayBlockTransactions ["0xDBBA1",["trace"]] +#280 trace_replayBlockTransactions ["0xDBBA1",["vmTrace"]] +#285 trace_replayBlockTransactions ["CF9BF",["trace"]] +#290 trace_replayTransactions ["CF9BF",["trace"]] +#295trace_replayTransactions ["CF9BF",["trace"]] + +305 trace_junk12 no_rpc [] + +# custom, experimental stuff +405_erigon trace_blockReward rew_0 ["0x0"] +410_erigon trace_blockReward rew_1 ["0x1"] +415_erigon trace_blockReward rew_2 ["0x2"] +420_erigon trace_blockReward rew_3 ["0x3"] +425_erigon trace_uncleReward unc_0 ["0x0"] +430_erigon trace_uncleReward unc_1 ["0x1"] +435_erigon trace_uncleReward unc_2 ["0x2"] +440_erigon trace_uncleReward unc_3 ["0x3"] +445_erigon trace_issuance iss_0 ["0x0"] +450_erigon trace_issuance iss_1 ["0x1"] +455_erigon trace_issuance iss_2 ["0x2"] +460_erigon trace_issuance iss_3 ["0x3"] diff --git a/cmd/state/commands/calltracer22.go b/cmd/state/commands/calltracer22.go new file mode 100644 index 00000000000..cf679518d8d --- /dev/null +++ b/cmd/state/commands/calltracer22.go @@ -0,0 +1,57 @@ +package commands + +import ( + "math/big" + "time" + + libstate "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/vm" +) + +type CallTracer struct { + froms map[common.Address]struct{} + tos map[common.Address]struct{} +} + +func NewCallTracer() *CallTracer { + return &CallTracer{ + froms: map[common.Address]struct{}{}, + tos: map[common.Address]struct{}{}, + } +} + +func (ct *CallTracer) CaptureStart(evm *vm.EVM, depth int, from common.Address, to common.Address, precompile bool, create bool, calltype vm.CallType, input []byte, gas uint64, value *big.Int, code []byte) { + ct.froms[from] = struct{}{} + ct.tos[to] = struct{}{} +} +func (ct *CallTracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { +} +func (ct *CallTracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { +} +func (ct *CallTracer) CaptureEnd(depth int, output []byte, startGas, endGas uint64, t time.Duration, err error) { +} +func (ct *CallTracer) CaptureSelfDestruct(from common.Address, to common.Address, value *big.Int) { + ct.froms[from] = struct{}{} + ct.tos[to] = struct{}{} +} +func (ct *CallTracer) CaptureAccountRead(account common.Address) error { + return nil +} +func (ct *CallTracer) CaptureAccountWrite(account common.Address) error { + return nil +} + +func (ct *CallTracer) AddToAggregator(a *libstate.Aggregator) error { + for from := range ct.froms { + if err := a.AddTraceFrom(from[:]); err != nil { + return err + } + } + for to := range ct.tos { + if err := a.AddTraceTo(to[:]); err != nil { + return err + } + } + return nil +} diff --git a/cmd/state/commands/check_change_sets.go b/cmd/state/commands/check_change_sets.go index 7b014e21078..dc8c938aac4 100644 --- a/cmd/state/commands/check_change_sets.go +++ b/cmd/state/commands/check_change_sets.go @@ -148,7 +148,7 @@ func CheckChangeSets(genesis *core.Genesis, logger log.Logger, blockNum uint64, break } reader := state.NewPlainState(historyTx, blockNum) - //reader.SetTrace(blockNum == uint64(block)) + reader.SetTrace(blockNum == uint64(block)) intraBlockState := state.New(reader) csw := state.NewChangeSetWriterPlain(nil /* db */, blockNum) var blockWriter state.StateWriter @@ -166,7 +166,7 @@ func CheckChangeSets(genesis *core.Genesis, logger log.Logger, blockNum uint64, return h } contractHasTEVM := ethdb.GetHasTEVM(rwtx) - receipts, err1 := runBlock(engine, intraBlockState, noOpWriter, blockWriter, chainConfig, getHeader, contractHasTEVM, b, vmConfig) + receipts, err1 := runBlock(engine, intraBlockState, noOpWriter, blockWriter, chainConfig, getHeader, contractHasTEVM, b, vmConfig, blockNum == uint64(block)) if err1 != nil { return err1 } diff --git a/cmd/state/commands/erigon22.go b/cmd/state/commands/erigon22.go new file mode 100644 index 00000000000..69e960b4c04 --- /dev/null +++ b/cmd/state/commands/erigon22.go @@ -0,0 +1,534 @@ +package commands + +import ( + "context" + "errors" + "fmt" + "math/bits" + "os" + "os/signal" + "path" + "path/filepath" + "runtime" + "syscall" + "time" + + "github.com/holiman/uint256" + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/mdbx" + kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" + libstate "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/log/v3" + "github.com/spf13/cobra" + + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/consensus/misc" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/types/accounts" + "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" +) + +const ( + AggregationStep = 3_125_000 /* number of transactions in smallest static file */ +) + +func init() { + withBlock(erigon22Cmd) + withDataDir(erigon22Cmd) + withChain(erigon22Cmd) + + rootCmd.AddCommand(erigon22Cmd) +} + +var erigon22Cmd = &cobra.Command{ + Use: "erigon22", + Short: "Exerimental command to re-execute blocks from beginning using erigon2 state representation and histoty (ugrade 2)", + RunE: func(cmd *cobra.Command, args []string) error { + logger := log.New() + return Erigon22(genesis, chainConfig, logger) + }, +} + +func Erigon22(genesis *core.Genesis, chainConfig *params.ChainConfig, logger log.Logger) error { + sigs := make(chan os.Signal, 1) + interruptCh := make(chan bool, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + + go func() { + <-sigs + interruptCh <- true + }() + + historyDb, err := kv2.NewMDBX(logger).Path(path.Join(datadir, "chaindata")).Open() + if err != nil { + return fmt.Errorf("opening chaindata as read only: %v", err) + } + defer historyDb.Close() + + ctx := context.Background() + historyTx, err1 := historyDb.BeginRo(ctx) + if err1 != nil { + return err1 + } + defer historyTx.Rollback() + stateDbPath := path.Join(datadir, "statedb") + if block == 0 { + if _, err = os.Stat(stateDbPath); err != nil { + if !errors.Is(err, os.ErrNotExist) { + return err + } + } else if err = os.RemoveAll(stateDbPath); err != nil { + return err + } + } + db, err2 := kv2.NewMDBX(logger).Path(stateDbPath).WriteMap().Open() + if err2 != nil { + return err2 + } + defer db.Close() + + aggPath := filepath.Join(datadir, "erigon22") + if block == 0 { + if _, err = os.Stat(aggPath); err != nil { + if !errors.Is(err, os.ErrNotExist) { + return err + } + } else if err = os.RemoveAll(aggPath); err != nil { + return err + } + if err = os.Mkdir(aggPath, os.ModePerm); err != nil { + return err + } + } + + var rwTx kv.RwTx + defer func() { + if rwTx != nil { + rwTx.Rollback() + } + }() + if rwTx, err = db.BeginRw(ctx); err != nil { + return err + } + + agg, err3 := libstate.NewAggregator(aggPath, AggregationStep) + if err3 != nil { + return fmt.Errorf("create aggregator: %w", err3) + } + defer agg.Close() + + interrupt := false + if block == 0 { + _, genesisIbs, err := genesis.ToBlock() + if err != nil { + return err + } + agg.SetTx(rwTx) + agg.SetTxNum(0) + if err = genesisIbs.CommitBlock(¶ms.Rules{}, &WriterWrapper22{w: agg}); err != nil { + return fmt.Errorf("cannot write state: %w", err) + } + if err = agg.FinishTx(); err != nil { + return err + } + } + + logger.Info("Initialised chain configuration", "config", chainConfig) + + var ( + blockNum uint64 + trace bool + vmConfig vm.Config + + txNum uint64 = 2 // Consider that each block contains at least first system tx and enclosing transactions, except for Clique consensus engine + ) + + logEvery := time.NewTicker(logInterval) + defer logEvery.Stop() + + statx := &stat22{ + prevBlock: blockNum, + prevTime: time.Now(), + } + + go func() { + for range logEvery.C { + aStats := agg.Stats() + statx.delta(aStats, blockNum).print(aStats, logger) + } + }() + + var blockReader services.FullBlockReader + var allSnapshots *snapshotsync.RoSnapshots + allSnapshots = snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadir, "snapshots")) + defer allSnapshots.Close() + if err := allSnapshots.Reopen(); err != nil { + return fmt.Errorf("reopen snapshot segments: %w", err) + } + blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) + engine := initConsensusEngine(chainConfig, logger, allSnapshots) + + for !interrupt { + blockNum++ + trace = traceBlock > 0 && blockNum == uint64(traceBlock) + blockHash, err := blockReader.CanonicalHash(ctx, historyTx, blockNum) + if err != nil { + return err + } + + b, _, err := blockReader.BlockWithSenders(ctx, historyTx, blockHash, blockNum) + if err != nil { + return err + } + if b == nil { + log.Info("history: block is nil", "block", blockNum) + break + } + if blockNum <= block { + // Skip that block, but increase txNum + txNum += uint64(len(b.Transactions())) + 2 // Pre and Post block transaction + continue + } + agg.SetTx(rwTx) + agg.SetTxNum(txNum) + + readWrapper := &ReaderWrapper22{r: agg, roTx: rwTx, blockNum: blockNum} + writeWrapper := &WriterWrapper22{w: agg, blockNum: blockNum} + getHeader := func(hash common.Hash, number uint64) *types.Header { + h, err := blockReader.Header(ctx, historyTx, hash, number) + if err != nil { + panic(err) + } + return h + } + + txNum++ // Pre-block transaction + agg.SetTxNum(txNum) + + if txNum, _, err = processBlock22(trace, txNum, readWrapper, writeWrapper, chainConfig, engine, getHeader, b, vmConfig); err != nil { + return fmt.Errorf("processing block %d: %w", blockNum, err) + } + agg.SetTxNum(txNum) + if err := agg.FinishTx(); err != nil { + return fmt.Errorf("failed to finish tx: %w", err) + } + if trace { + fmt.Printf("FinishTx called for %d block %d\n", txNum, blockNum) + } + + txNum++ // Post-block transaction + agg.SetTxNum(txNum) + + // Check for interrupts + select { + case interrupt = <-interruptCh: + log.Info(fmt.Sprintf("interrupted, please wait for cleanup, next time start with --block %d", blockNum)) + default: + } + // Commit transaction only when interrupted or just before computing commitment (so it can be re-done) + commit := interrupt + if !commit && (blockNum+1)%uint64(commitmentFrequency) == 0 { + var spaceDirty uint64 + if spaceDirty, _, err = rwTx.(*mdbx.MdbxTx).SpaceDirty(); err != nil { + return fmt.Errorf("retrieving spaceDirty: %w", err) + } + if spaceDirty >= dirtySpaceThreshold { + log.Info("Initiated tx commit", "block", blockNum, "space dirty", libcommon.ByteCount(spaceDirty)) + commit = true + } + } + if commit { + if err = rwTx.Commit(); err != nil { + return err + } + if !interrupt { + if rwTx, err = db.BeginRw(ctx); err != nil { + return err + } + } + } + } + + return nil +} + +type stat22 struct { + blockNum uint64 + hits uint64 + misses uint64 + prevBlock uint64 + prevMisses uint64 + prevHits uint64 + hitMissRatio float64 + speed float64 + prevTime time.Time + mem runtime.MemStats +} + +func (s *stat22) print(aStats libstate.FilesStats, logger log.Logger) { + totalFiles := 0 + totalDatSize := 0 + totalIdxSize := 0 + + logger.Info("Progress", "block", s.blockNum, "blk/s", s.speed, "state files", totalFiles, + "total dat", libcommon.ByteCount(uint64(totalDatSize)), "total idx", libcommon.ByteCount(uint64(totalIdxSize)), + "hit ratio", s.hitMissRatio, "hits+misses", s.hits+s.misses, + "alloc", libcommon.ByteCount(s.mem.Alloc), "sys", libcommon.ByteCount(s.mem.Sys), + ) +} + +func (s *stat22) delta(aStats libstate.FilesStats, blockNum uint64) *stat22 { + currentTime := time.Now() + libcommon.ReadMemStats(&s.mem) + + interval := currentTime.Sub(s.prevTime).Seconds() + s.blockNum = blockNum + s.speed = float64(s.blockNum-s.prevBlock) / interval + s.prevBlock = blockNum + s.prevTime = currentTime + + total := s.hits + s.misses + if total > 0 { + s.hitMissRatio = float64(s.hits) / float64(total) + } + return s +} + +func processBlock22(trace bool, txNumStart uint64, rw *ReaderWrapper22, ww *WriterWrapper22, chainConfig *params.ChainConfig, engine consensus.Engine, getHeader func(hash common.Hash, number uint64) *types.Header, block *types.Block, vmConfig vm.Config) (uint64, types.Receipts, error) { + defer blockExecutionTimer.UpdateDuration(time.Now()) + + header := block.Header() + vmConfig.Debug = true + gp := new(core.GasPool).AddGas(block.GasLimit()) + usedGas := new(uint64) + var receipts types.Receipts + daoBlock := chainConfig.DAOForkSupport && chainConfig.DAOForkBlock != nil && chainConfig.DAOForkBlock.Cmp(block.Number()) == 0 + rules := chainConfig.Rules(block.NumberU64()) + txNum := txNumStart + ww.w.SetTxNum(txNum) + + for i, tx := range block.Transactions() { + ibs := state.New(rw) + if daoBlock { + misc.ApplyDAOHardFork(ibs) + daoBlock = false + } + ibs.Prepare(tx.Hash(), block.Hash(), i) + ct := NewCallTracer() + vmConfig.Tracer = ct + receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) + if err != nil { + return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) + } + if err = ct.AddToAggregator(ww.w); err != nil { + return 0, nil, fmt.Errorf("adding traces to aggregator: %w", err) + } + receipts = append(receipts, receipt) + for _, log := range receipt.Logs { + if err = ww.w.AddLogAddr(log.Address[:]); err != nil { + return 0, nil, fmt.Errorf("adding event log for addr %x: %w", log.Address, err) + } + for _, topic := range log.Topics { + if err = ww.w.AddLogTopic(topic[:]); err != nil { + return 0, nil, fmt.Errorf("adding event log for topic %x: %w", topic, err) + } + } + } + if err = ww.w.FinishTx(); err != nil { + return 0, nil, fmt.Errorf("finish tx %d [%x] failed: %w", i, tx.Hash(), err) + } + if trace { + fmt.Printf("FinishTx called for %d [%x]\n", txNum, tx.Hash()) + } + txNum++ + ww.w.SetTxNum(txNum) + } + + ibs := state.New(rw) + + // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) + if _, _, _, err := engine.FinalizeAndAssemble(chainConfig, header, ibs, block.Transactions(), block.Uncles(), receipts, nil, nil, nil, nil); err != nil { + return 0, nil, fmt.Errorf("finalize of block %d failed: %w", block.NumberU64(), err) + } + + if err := ibs.CommitBlock(rules, ww); err != nil { + return 0, nil, fmt.Errorf("committing block %d failed: %w", block.NumberU64(), err) + } + + return txNum, receipts, nil +} + +// Implements StateReader and StateWriter +type ReaderWrapper22 struct { + blockNum uint64 + roTx kv.Tx + r *libstate.Aggregator +} + +type WriterWrapper22 struct { + blockNum uint64 + w *libstate.Aggregator +} + +func (rw *ReaderWrapper22) ReadAccountData(address common.Address) (*accounts.Account, error) { + enc, err := rw.r.ReadAccountData(address.Bytes(), rw.roTx) + if err != nil { + return nil, err + } + if len(enc) == 0 { + return nil, nil + } + + var a accounts.Account + a.Reset() + pos := 0 + nonceBytes := int(enc[pos]) + pos++ + if nonceBytes > 0 { + a.Nonce = bytesToUint64(enc[pos : pos+nonceBytes]) + pos += nonceBytes + } + balanceBytes := int(enc[pos]) + pos++ + if balanceBytes > 0 { + a.Balance.SetBytes(enc[pos : pos+balanceBytes]) + pos += balanceBytes + } + codeHashBytes := int(enc[pos]) + pos++ + if codeHashBytes > 0 { + copy(a.CodeHash[:], enc[pos:pos+codeHashBytes]) + pos += codeHashBytes + } + incBytes := int(enc[pos]) + pos++ + if incBytes > 0 { + a.Incarnation = bytesToUint64(enc[pos : pos+incBytes]) + } + return &a, nil +} + +func (rw *ReaderWrapper22) ReadAccountStorage(address common.Address, incarnation uint64, key *common.Hash) ([]byte, error) { + enc, err := rw.r.ReadAccountStorage(address.Bytes(), key.Bytes(), rw.roTx) + if err != nil { + return nil, err + } + if enc == nil { + return nil, nil + } + return enc, nil +} + +func (rw *ReaderWrapper22) ReadAccountCode(address common.Address, incarnation uint64, codeHash common.Hash) ([]byte, error) { + return rw.r.ReadAccountCode(address.Bytes(), rw.roTx) +} + +func (rw *ReaderWrapper22) ReadAccountCodeSize(address common.Address, incarnation uint64, codeHash common.Hash) (int, error) { + return rw.r.ReadAccountCodeSize(address.Bytes(), rw.roTx) +} + +func (rw *ReaderWrapper22) ReadAccountIncarnation(address common.Address) (uint64, error) { + return 0, nil +} + +func (ww *WriterWrapper22) UpdateAccountData(address common.Address, original, account *accounts.Account) error { + var l int + l++ + if account.Nonce > 0 { + l += (bits.Len64(account.Nonce) + 7) / 8 + } + l++ + if !account.Balance.IsZero() { + l += account.Balance.ByteLen() + } + l++ + if !account.IsEmptyCodeHash() { + l += 32 + } + l++ + if account.Incarnation > 0 { + l += (bits.Len64(account.Incarnation) + 7) / 8 + } + value := make([]byte, l) + pos := 0 + if account.Nonce == 0 { + value[pos] = 0 + pos++ + } else { + nonceBytes := (bits.Len64(account.Nonce) + 7) / 8 + value[pos] = byte(nonceBytes) + var nonce = account.Nonce + for i := nonceBytes; i > 0; i-- { + value[pos+i] = byte(nonce) + nonce >>= 8 + } + pos += nonceBytes + 1 + } + if account.Balance.IsZero() { + value[pos] = 0 + pos++ + } else { + balanceBytes := account.Balance.ByteLen() + value[pos] = byte(balanceBytes) + pos++ + account.Balance.WriteToSlice(value[pos : pos+balanceBytes]) + pos += balanceBytes + } + if account.IsEmptyCodeHash() { + value[pos] = 0 + pos++ + } else { + value[pos] = 32 + pos++ + copy(value[pos:pos+32], account.CodeHash[:]) + pos += 32 + } + if account.Incarnation == 0 { + value[pos] = 0 + } else { + incBytes := (bits.Len64(account.Incarnation) + 7) / 8 + value[pos] = byte(incBytes) + var inc = account.Incarnation + for i := incBytes; i > 0; i-- { + value[pos+i] = byte(inc) + inc >>= 8 + } + } + if err := ww.w.UpdateAccountData(address.Bytes(), value); err != nil { + return err + } + return nil +} + +func (ww *WriterWrapper22) UpdateAccountCode(address common.Address, incarnation uint64, codeHash common.Hash, code []byte) error { + if err := ww.w.UpdateAccountCode(address.Bytes(), code); err != nil { + return err + } + return nil +} + +func (ww *WriterWrapper22) DeleteAccount(address common.Address, original *accounts.Account) error { + if err := ww.w.DeleteAccount(address.Bytes()); err != nil { + return err + } + return nil +} + +func (ww *WriterWrapper22) WriteAccountStorage(address common.Address, incarnation uint64, key *common.Hash, original, value *uint256.Int) error { + if err := ww.w.WriteAccountStorage(address.Bytes(), key.Bytes(), value.Bytes()); err != nil { + return err + } + return nil +} + +func (ww *WriterWrapper22) CreateContract(address common.Address) error { + return nil +} diff --git a/cmd/state/commands/history22.go b/cmd/state/commands/history22.go new file mode 100644 index 00000000000..beb55a55c7c --- /dev/null +++ b/cmd/state/commands/history22.go @@ -0,0 +1,286 @@ +package commands + +import ( + "context" + "fmt" + "os" + "os/signal" + "path" + "path/filepath" + "syscall" + "time" + + kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" + libstate "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/consensus/ethash" + "github.com/ledgerwatch/erigon/consensus/misc" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/types/accounts" + "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/log/v3" + "github.com/spf13/cobra" +) + +func init() { + withBlock(history22Cmd) + withDataDir(history22Cmd) + history22Cmd.Flags().IntVar(&traceBlock, "traceblock", 0, "block number at which to turn on tracing") + history22Cmd.Flags().IntVar(&blockTo, "blockto", 0, "block number to stop replay of history at") + rootCmd.AddCommand(history22Cmd) +} + +var history22Cmd = &cobra.Command{ + Use: "history22", + Short: "Exerimental command to re-execute historical transactions in erigon2 format", + RunE: func(cmd *cobra.Command, args []string) error { + logger := log.New() + return History22(genesis, logger) + }, +} + +func History22(genesis *core.Genesis, logger log.Logger) error { + sigs := make(chan os.Signal, 1) + interruptCh := make(chan bool, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + + go func() { + <-sigs + interruptCh <- true + }() + historyDb, err := kv2.NewMDBX(logger).Path(path.Join(datadir, "chaindata")).Open() + if err != nil { + return fmt.Errorf("opening chaindata as read only: %v", err) + } + defer historyDb.Close() + ctx := context.Background() + historyTx, err1 := historyDb.BeginRo(ctx) + if err1 != nil { + return err1 + } + defer historyTx.Rollback() + aggPath := filepath.Join(datadir, "erigon22") + h, err3 := libstate.NewAggregator(aggPath, AggregationStep) + //h, err3 := aggregator.NewHistory(aggPath, uint64(blockTo), aggregationStep) + if err3 != nil { + return fmt.Errorf("create history: %w", err3) + } + defer h.Close() + chainConfig := genesis.Config + vmConfig := vm.Config{} + + interrupt := false + blockNum := uint64(0) + var txNum uint64 = 2 + trace := false + logEvery := time.NewTicker(logInterval) + defer logEvery.Stop() + prevBlock := blockNum + prevTime := time.Now() + + var blockReader services.FullBlockReader + var allSnapshots *snapshotsync.RoSnapshots + allSnapshots = snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadir, "snapshots")) + defer allSnapshots.Close() + if err := allSnapshots.Reopen(); err != nil { + return fmt.Errorf("reopen snapshot segments: %w", err) + } + blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) + + for !interrupt { + select { + default: + case <-logEvery.C: + currentTime := time.Now() + interval := currentTime.Sub(prevTime) + speed := float64(blockNum-prevBlock) / (float64(interval) / float64(time.Second)) + prevBlock = blockNum + prevTime = currentTime + log.Info("Progress", "block", blockNum, "blk/s", speed) + } + blockNum++ + if blockNum > uint64(blockTo) { + break + } + blockHash, err := rawdb.ReadCanonicalHash(historyTx, blockNum) + if err != nil { + return err + } + b, _, err := blockReader.BlockWithSenders(ctx, historyTx, blockHash, blockNum) + if err != nil { + return err + } + if b == nil { + log.Info("history: block is nil", "block", blockNum) + break + } + if blockNum <= block { + // Skip that block, but increase txNum + txNum += uint64(len(b.Transactions())) + 2 // Pre and Post block transaction + continue + } + readWrapper := &HistoryWrapper22{r: h} + if traceBlock != 0 { + readWrapper.trace = blockNum == uint64(traceBlock) + } + writeWrapper := state.NewNoopWriter() + txNum++ // Pre block transaction + getHeader := func(hash common.Hash, number uint64) *types.Header { + h, err := blockReader.Header(ctx, historyTx, hash, number) + if err != nil { + panic(err) + } + return h + } + if txNum, _, err = runHistory22(trace, blockNum, txNum, readWrapper, writeWrapper, chainConfig, getHeader, b, vmConfig); err != nil { + return fmt.Errorf("block %d: %w", blockNum, err) + } + txNum++ // Post block transaction + // Check for interrupts + select { + case interrupt = <-interruptCh: + log.Info(fmt.Sprintf("interrupted, please wait for cleanup, next time start with --block %d", blockNum)) + default: + } + } + return nil +} + +func runHistory22(trace bool, blockNum, txNumStart uint64, hw *HistoryWrapper22, ww state.StateWriter, chainConfig *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, block *types.Block, vmConfig vm.Config) (uint64, types.Receipts, error) { + header := block.Header() + vmConfig.TraceJumpDest = true + engine := ethash.NewFullFaker() + gp := new(core.GasPool).AddGas(block.GasLimit()) + usedGas := new(uint64) + var receipts types.Receipts + daoBlock := chainConfig.DAOForkSupport && chainConfig.DAOForkBlock != nil && chainConfig.DAOForkBlock.Cmp(block.Number()) == 0 + txNum := txNumStart + for i, tx := range block.Transactions() { + hw.r.SetTxNum(txNum) + hw.txNum = txNum + ibs := state.New(hw) + if daoBlock { + misc.ApplyDAOHardFork(ibs) + daoBlock = false + } + ibs.Prepare(tx.Hash(), block.Hash(), i) + receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) + if err != nil { + return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) + } + if traceBlock != 0 && blockNum == uint64(traceBlock) { + fmt.Printf("tx idx %d, num %d, gas used %d\n", i, txNum, receipt.GasUsed) + } + receipts = append(receipts, receipt) + txNum++ + hw.r.SetTxNum(txNum) + hw.txNum = txNum + } + + return txNum, receipts, nil +} + +// Implements StateReader and StateWriter +type HistoryWrapper22 struct { + r *libstate.Aggregator + txNum uint64 + trace bool +} + +func (hw *HistoryWrapper22) ReadAccountData(address common.Address) (*accounts.Account, error) { + enc, err := hw.r.ReadAccountDataBeforeTxNum(address.Bytes(), hw.txNum, nil /* roTx */) + if err != nil { + return nil, err + } + if len(enc) == 0 { + if hw.trace { + fmt.Printf("ReadAccountData [%x] => []\n", address) + } + return nil, nil + } + var a accounts.Account + a.Reset() + pos := 0 + nonceBytes := int(enc[pos]) + pos++ + if nonceBytes > 0 { + a.Nonce = bytesToUint64(enc[pos : pos+nonceBytes]) + pos += nonceBytes + } + balanceBytes := int(enc[pos]) + pos++ + if balanceBytes > 0 { + a.Balance.SetBytes(enc[pos : pos+balanceBytes]) + pos += balanceBytes + } + codeHashBytes := int(enc[pos]) + pos++ + if codeHashBytes > 0 { + copy(a.CodeHash[:], enc[pos:pos+codeHashBytes]) + pos += codeHashBytes + } + if pos >= len(enc) { + fmt.Printf("panic ReadAccountData(%x)=>[%x]\n", address, enc) + } + incBytes := int(enc[pos]) + pos++ + if incBytes > 0 { + a.Incarnation = bytesToUint64(enc[pos : pos+incBytes]) + } + if hw.trace { + fmt.Printf("ReadAccountData [%x] => [nonce: %d, balance: %d, codeHash: %x]\n", address, a.Nonce, &a.Balance, a.CodeHash) + } + return &a, nil +} + +func (hw *HistoryWrapper22) ReadAccountStorage(address common.Address, incarnation uint64, key *common.Hash) ([]byte, error) { + enc, err := hw.r.ReadAccountStorageBeforeTxNum(address.Bytes(), key.Bytes(), hw.txNum, nil /* roTx */) + if err != nil { + fmt.Printf("%v\n", err) + return nil, err + } + if hw.trace { + if enc == nil { + fmt.Printf("ReadAccountStorage [%x] [%x] => []\n", address, key.Bytes()) + } else { + fmt.Printf("ReadAccountStorage [%x] [%x] => [%x]\n", address, key.Bytes(), enc) + } + } + if enc == nil { + return nil, nil + } + return enc, nil +} + +func (hw *HistoryWrapper22) ReadAccountCode(address common.Address, incarnation uint64, codeHash common.Hash) ([]byte, error) { + enc, err := hw.r.ReadAccountCodeBeforeTxNum(address.Bytes(), hw.txNum, nil /* roTx */) + if err != nil { + return nil, err + } + if hw.trace { + fmt.Printf("ReadAccountCode [%x] => [%x]\n", address, enc) + } + return enc, nil +} + +func (hw *HistoryWrapper22) ReadAccountCodeSize(address common.Address, incarnation uint64, codeHash common.Hash) (int, error) { + size, err := hw.r.ReadAccountCodeSizeBeforeTxNum(address.Bytes(), hw.txNum, nil /* roTx */) + if err != nil { + return 0, err + } + if hw.trace { + fmt.Printf("ReadAccountCodeSize [%x] => [%d]\n", address, size) + } + return size, nil +} + +func (hw *HistoryWrapper22) ReadAccountIncarnation(address common.Address) (uint64, error) { + return 0, nil +} diff --git a/cmd/state/commands/opcode_tracer.go b/cmd/state/commands/opcode_tracer.go index 522f74c60be..fb6e9777180 100644 --- a/cmd/state/commands/opcode_tracer.go +++ b/cmd/state/commands/opcode_tracer.go @@ -560,7 +560,7 @@ func OpcodeTracer(genesis *core.Genesis, blockNum uint64, chaindata string, numB getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(historyTx, hash, number) } contractHasTEVM := ethdb.GetHasTEVM(historyTx) - receipts, err1 := runBlock(ethash.NewFullFaker(), intraBlockState, noOpWriter, noOpWriter, chainConfig, getHeader, contractHasTEVM, block, vmConfig) + receipts, err1 := runBlock(ethash.NewFullFaker(), intraBlockState, noOpWriter, noOpWriter, chainConfig, getHeader, contractHasTEVM, block, vmConfig, false) if err1 != nil { return err1 } @@ -671,7 +671,7 @@ func OpcodeTracer(genesis *core.Genesis, blockNum uint64, chaindata string, numB } func runBlock(engine consensus.Engine, ibs *state.IntraBlockState, txnWriter state.StateWriter, blockWriter state.StateWriter, - chainConfig *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, contractHasTEVM func(common.Hash) (bool, error), block *types.Block, vmConfig vm.Config) (types.Receipts, error) { + chainConfig *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, contractHasTEVM func(common.Hash) (bool, error), block *types.Block, vmConfig vm.Config, trace bool) (types.Receipts, error) { header := block.Header() vmConfig.TraceJumpDest = true gp := new(core.GasPool).AddGas(block.GasLimit()) @@ -688,6 +688,9 @@ func runBlock(engine consensus.Engine, ibs *state.IntraBlockState, txnWriter sta if err != nil { return nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } + if trace { + fmt.Printf("tx idx %d, gas used %d\n", i, receipt.GasUsed) + } receipts = append(receipts, receipt) } diff --git a/cmd/state/commands/state_root.go b/cmd/state/commands/state_root.go index 22dca7de421..7d00121beea 100644 --- a/cmd/state/commands/state_root.go +++ b/cmd/state/commands/state_root.go @@ -131,7 +131,7 @@ func StateRoot(genesis *core.Genesis, logger log.Logger, blockNum uint64, datadi r := state.NewPlainStateReader(tx) intraBlockState := state.New(r) getHeader := func(hash common.Hash, number uint64) *types.Header { return rawdb.ReadHeader(historyTx, hash, number) } - if _, err = runBlock(ethash.NewFullFaker(), intraBlockState, noOpWriter, w, chainConfig, getHeader, nil, b, vmConfig); err != nil { + if _, err = runBlock(ethash.NewFullFaker(), intraBlockState, noOpWriter, w, chainConfig, getHeader, nil, b, vmConfig, false); err != nil { return fmt.Errorf("block %d: %w", block, err) } if block+1 == blockNum { diff --git a/core/state/plain_readonly.go b/core/state/plain_readonly.go index 08809bb2a60..ac9dea345e3 100644 --- a/core/state/plain_readonly.go +++ b/core/state/plain_readonly.go @@ -157,6 +157,9 @@ func (s *PlainState) ReadAccountData(address common.Address) (*accounts.Account, return nil, err } if len(enc) == 0 { + if s.trace { + fmt.Printf("ReadAccountData [%x] => []\n", address) + } return nil, nil } var a accounts.Account @@ -173,6 +176,9 @@ func (s *PlainState) ReadAccountData(address common.Address) (*accounts.Account, return nil, err1 } } + if s.trace { + fmt.Printf("ReadAccountData [%x] => [nonce: %d, balance: %d, codeHash: %x]\n", address, a.Nonce, &a.Balance, a.CodeHash) + } return &a, nil } diff --git a/go.mod b/go.mod index e00a9458795..c9da44aeca3 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220609100618-fec29e42265c + github.com/ledgerwatch/erigon-lib v0.0.0-20220610055100-7ce8bd589f79 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index dee89d24b8e..99f0c0641c3 100644 --- a/go.sum +++ b/go.sum @@ -384,8 +384,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220609100618-fec29e42265c h1:qlt59N/PVVmdUXzXi6IhAlUvoFM1hDZoiqIWWNPJOIk= -github.com/ledgerwatch/erigon-lib v0.0.0-20220609100618-fec29e42265c/go.mod h1:jNDE6PRPIA8wUdikJs8BvKtrFv101qOijIXA3HnDW8E= +github.com/ledgerwatch/erigon-lib v0.0.0-20220610055100-7ce8bd589f79 h1:c97OsvCaCuDBpCkjMX/+uMbeA1OxfLQuvUniZQ/bnSM= +github.com/ledgerwatch/erigon-lib v0.0.0-20220610055100-7ce8bd589f79/go.mod h1:jNDE6PRPIA8wUdikJs8BvKtrFv101qOijIXA3HnDW8E= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= diff --git a/cmd/rpcdaemon/rpcservices/filters.go b/turbo/rpchelper/filters.go similarity index 98% rename from cmd/rpcdaemon/rpcservices/filters.go rename to turbo/rpchelper/filters.go index 32bce150a8c..c279744e0c5 100644 --- a/cmd/rpcdaemon/rpcservices/filters.go +++ b/turbo/rpchelper/filters.go @@ -1,4 +1,4 @@ -package rpcservices +package rpchelper import ( "bytes" @@ -17,7 +17,6 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" txpool2 "github.com/ledgerwatch/erigon-lib/txpool" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices/rpcinterfaces" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/filters" @@ -54,7 +53,7 @@ type Filters struct { pendingTxsStores map[PendingTxsSubID][][]types.Transaction } -func New(ctx context.Context, ethBackend rpcinterfaces.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, onNewSnapshot func()) *Filters { +func New(ctx context.Context, ethBackend ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, onNewSnapshot func()) *Filters { log.Info("rpc filters: subscribing to Erigon events") ff := &Filters{ diff --git a/turbo/rpchelper/helper.go b/turbo/rpchelper/helper.go index 0227d156056..ec307e4fc1b 100644 --- a/turbo/rpchelper/helper.go +++ b/turbo/rpchelper/helper.go @@ -6,7 +6,6 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" @@ -25,15 +24,15 @@ func (e nonCanonocalHashError) Error() string { return fmt.Sprintf("hash %x is not currently canonical", e.hash) } -func GetBlockNumber(blockNrOrHash rpc.BlockNumberOrHash, tx kv.Tx, filters *rpcservices.Filters) (uint64, common.Hash, bool, error) { +func GetBlockNumber(blockNrOrHash rpc.BlockNumberOrHash, tx kv.Tx, filters *Filters) (uint64, common.Hash, bool, error) { return _GetBlockNumber(blockNrOrHash.RequireCanonical, blockNrOrHash, tx, filters) } -func GetCanonicalBlockNumber(blockNrOrHash rpc.BlockNumberOrHash, tx kv.Tx, filters *rpcservices.Filters) (uint64, common.Hash, bool, error) { +func GetCanonicalBlockNumber(blockNrOrHash rpc.BlockNumberOrHash, tx kv.Tx, filters *Filters) (uint64, common.Hash, bool, error) { return _GetBlockNumber(true, blockNrOrHash, tx, filters) } -func _GetBlockNumber(requireCanonical bool, blockNrOrHash rpc.BlockNumberOrHash, tx kv.Tx, filters *rpcservices.Filters) (blockNumber uint64, hash common.Hash, latest bool, err error) { +func _GetBlockNumber(requireCanonical bool, blockNrOrHash rpc.BlockNumberOrHash, tx kv.Tx, filters *Filters) (blockNumber uint64, hash common.Hash, latest bool, err error) { var latestBlockNumber uint64 if latestBlockNumber, err = stages.GetStageProgress(tx, stages.Execution); err != nil { return 0, common.Hash{}, false, fmt.Errorf("getting latest block number: %w", err) @@ -86,7 +85,7 @@ func GetAccount(tx kv.Tx, blockNumber uint64, address common.Address) (*accounts return reader.ReadAccountData(address) } -func CreateStateReader(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash, filters *rpcservices.Filters, stateCache kvcache.Cache) (state.StateReader, error) { +func CreateStateReader(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash, filters *Filters, stateCache kvcache.Cache) (state.StateReader, error) { blockNumber, _, latest, err := _GetBlockNumber(true, blockNrOrHash, tx, filters) if err != nil { return nil, err diff --git a/cmd/rpcdaemon/rpcservices/rpcinterfaces/interface.go b/turbo/rpchelper/interface.go similarity index 98% rename from cmd/rpcdaemon/rpcservices/rpcinterfaces/interface.go rename to turbo/rpchelper/interface.go index 35c39b3f962..1fde0d8e515 100644 --- a/cmd/rpcdaemon/rpcservices/rpcinterfaces/interface.go +++ b/turbo/rpchelper/interface.go @@ -1,4 +1,4 @@ -package rpcinterfaces +package rpchelper import ( "context" diff --git a/cmd/rpcdaemon/rpcservices/logsfilter.go b/turbo/rpchelper/logsfilter.go similarity index 99% rename from cmd/rpcdaemon/rpcservices/logsfilter.go rename to turbo/rpchelper/logsfilter.go index b3c6c90c2f1..62a4d45fd4a 100644 --- a/cmd/rpcdaemon/rpcservices/logsfilter.go +++ b/turbo/rpchelper/logsfilter.go @@ -1,4 +1,4 @@ -package rpcservices +package rpchelper import ( "sync" diff --git a/turbo/transactions/call.go b/turbo/transactions/call.go index 82fe1fd85a2..9df067e7f54 100644 --- a/turbo/transactions/call.go +++ b/turbo/transactions/call.go @@ -9,7 +9,6 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" @@ -32,7 +31,7 @@ func DoCall( block *types.Block, overrides *ethapi.StateOverrides, gasCap uint64, chainConfig *params.ChainConfig, - filters *rpcservices.Filters, + filters *rpchelper.Filters, stateCache kvcache.Cache, contractHasTEVM func(hash common.Hash) (bool, error), ) (*core.ExecutionResult, error) { From 1cc5b92dd8a6f3ca646636c891fe3b096fa82233 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Sat, 11 Jun 2022 08:13:40 +0100 Subject: [PATCH 043/136] Another fix for stuck header download (#4433) Co-authored-by: Alex Sharp --- cmd/sentry/sentry/sentry_multi_client.go | 5 ++--- eth/stagedsync/stage_headers.go | 13 +++++-------- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/cmd/sentry/sentry/sentry_multi_client.go b/cmd/sentry/sentry/sentry_multi_client.go index 70721dae3e6..efa5f9c871b 100644 --- a/cmd/sentry/sentry/sentry_multi_client.go +++ b/cmd/sentry/sentry/sentry_multi_client.go @@ -418,11 +418,10 @@ func (cs *MultiClient) blockHeaders(ctx context.Context, pkt eth.BlockHeadersPac req, penalties := cs.Hd.RequestMoreHeaders(currentTime) if req != nil { if _, sentToPeer := cs.SendHeaderRequest(ctx, req); sentToPeer { - // If request was actually sent to a peer, we update retry time to be 5 seconds in the future - cs.Hd.UpdateRetryTime(req, currentTime, 5*time.Second /* timeout */) - log.Trace("Sent request", "height", req.Number) cs.Hd.UpdateStats(req, false /* skeleton */) } + // Regardless of whether request was actually sent to a peer, we update retry time to be 5 seconds in the future + cs.Hd.UpdateRetryTime(req, currentTime, 5*time.Second /* timeout */) } if len(penalties) > 0 { cs.Penalize(ctx, penalties) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 337934a25da..3ef91ed5894 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -738,10 +738,10 @@ Loop: if req != nil { _, sentToPeer = cfg.headerReqSend(ctx, req) if sentToPeer { - // If request was actually sent to a peer, we update retry time to be 5 seconds in the future - cfg.hd.UpdateRetryTime(req, currentTime, 5*time.Second /* timeout */) - log.Trace("Sent request", "height", req.Number) + cfg.hd.UpdateStats(req, false /* skeleton */) } + // Regardless of whether request was actually sent to a peer, we update retry time to be 5 seconds in the future + cfg.hd.UpdateRetryTime(req, currentTime, 5*time.Second /* timeout */) } if len(penalties) > 0 { cfg.penalize(ctx, penalties) @@ -752,12 +752,10 @@ Loop: if req != nil { _, sentToPeer = cfg.headerReqSend(ctx, req) if sentToPeer { - // If request was actually sent to a peer, we update retry time to be 5 seconds in the future - cfg.hd.UpdateRetryTime(req, currentTime, 5*time.Second /*timeout */) - log.Trace("Sent request", "height", req.Number) cfg.hd.UpdateStats(req, false /* skeleton */) - } + // Regardless of whether request was actually sent to a peer, we update retry time to be 5 seconds in the future + cfg.hd.UpdateRetryTime(req, currentTime, 5*time.Second /* timeout */) } if len(penalties) > 0 { cfg.penalize(ctx, penalties) @@ -771,7 +769,6 @@ Loop: if req != nil { _, sentToPeer = cfg.headerReqSend(ctx, req) if sentToPeer { - log.Trace("Sent skeleton request", "height", req.Number) cfg.hd.UpdateStats(req, true /* skeleton */) lastSkeletonTime = time.Now() } From 3f1e9948fac43e165989b390cf04eed37bfd9a29 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Sat, 11 Jun 2022 08:53:02 +0100 Subject: [PATCH 044/136] Update skip_analysis.go (#4435) --- core/skip_analysis.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/skip_analysis.go b/core/skip_analysis.go index cffeee06079..1dafa999759 100644 --- a/core/skip_analysis.go +++ b/core/skip_analysis.go @@ -24,9 +24,9 @@ import ( const MainnetNotCheckedFrom uint64 = 14_909_200 // MainnetNotCheckedFrom is the first block number not yet checked for invalid jumps -const BSCNotCheckedFrom uint64 = 18_492_482 +const BSCNotCheckedFrom uint64 = 18_589_376 -const BorMainnetNotCheckedFrom uint64 = 21_128_788 +const BorMainnetNotCheckedFrom uint64 = 24_673_536 const RopstenNotCheckedFrom uint64 = 12_331_664 From 41c0a47485291d7149b18af0c905e2789cb00471 Mon Sep 17 00:00:00 2001 From: iFA Date: Sat, 11 Jun 2022 13:05:26 +0200 Subject: [PATCH 045/136] Add REVERT result to trace (#4434) * Add REVERT result to trace * topTrace error should be changed too --- cmd/rpcdaemon/commands/trace_adhoc.go | 37 +++++++++++++++------------ 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/cmd/rpcdaemon/commands/trace_adhoc.go b/cmd/rpcdaemon/commands/trace_adhoc.go index 5426b869530..aac18010a9c 100644 --- a/cmd/rpcdaemon/commands/trace_adhoc.go +++ b/cmd/rpcdaemon/commands/trace_adhoc.go @@ -365,26 +365,31 @@ func (ot *OeTracer) CaptureEnd(depth int, output []byte, startGas, endGas uint64 ignoreError = depth == 0 && topTrace.Type == CREATE } if err != nil && !ignoreError { - switch err { - case vm.ErrInvalidJump: - topTrace.Error = "Bad jump destination" - case vm.ErrContractAddressCollision, vm.ErrCodeStoreOutOfGas, vm.ErrOutOfGas, vm.ErrGasUintOverflow: - topTrace.Error = "Out of gas" - case vm.ErrExecutionReverted: + if err == vm.ErrExecutionReverted { topTrace.Error = "Reverted" - case vm.ErrWriteProtection: - topTrace.Error = "Mutable Call In Static Context" - default: - switch err.(type) { - case *vm.ErrStackUnderflow: - topTrace.Error = "Stack underflow" - case *vm.ErrInvalidOpCode: - topTrace.Error = "Bad instruction" + topTrace.Result.(*TraceResult).GasUsed = new(hexutil.Big) + topTrace.Result.(*TraceResult).GasUsed.ToInt().SetUint64(startGas - endGas) + topTrace.Result.(*TraceResult).Output = common.CopyBytes(output) + } else { + topTrace.Result = nil + switch err { + case vm.ErrInvalidJump: + topTrace.Error = "Bad jump destination" + case vm.ErrContractAddressCollision, vm.ErrCodeStoreOutOfGas, vm.ErrOutOfGas, vm.ErrGasUintOverflow: + topTrace.Error = "Out of gas" + case vm.ErrWriteProtection: + topTrace.Error = "Mutable Call In Static Context" default: - topTrace.Error = err.Error() + switch err.(type) { + case *vm.ErrStackUnderflow: + topTrace.Error = "Stack underflow" + case *vm.ErrInvalidOpCode: + topTrace.Error = "Bad instruction" + default: + topTrace.Error = err.Error() + } } } - topTrace.Result = nil } else { if len(output) > 0 { switch topTrace.Type { From fd8adddce8ee7f4c7a859dffcc65569c934fc991 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Sun, 12 Jun 2022 12:44:01 +0100 Subject: [PATCH 046/136] trace_filter implementation based on erigon 2 update 2 data (#4431) * trace_filter implementation based on erigon 2 update 2 data * Fix test compile, extract txNums * Update to erigon-lib, add generic heap * Fix getHeaderHash for RPC methods * Missing files * Skip tests * Add reward traces * Filter trace lint * Reinstate filtering * Print * Print * Fix * Print * Print txNums * Fix * Fix * Fix * Fix * Fix * Fix * Fix * Remove prints * Fix nil dereference * Inclusive toBlock bound * Print * Print * Hack * remove some deps * Fix lint * Update erigon-lib Co-authored-by: Alexey Sharp Co-authored-by: Alex Sharp --- cmd/hack/hack.go | 1463 +---------------- cmd/rpcdaemon/commands/eth_block.go | 2 +- cmd/rpcdaemon/commands/eth_call.go | 6 +- cmd/rpcdaemon/commands/trace_adhoc.go | 4 +- cmd/rpcdaemon/commands/tracing.go | 2 +- cmd/rpcdaemon/rpcservices/eth_backend.go | 3 + cmd/rpcdaemon22/cli/config.go | 50 +- cmd/rpcdaemon22/commands/call_traces_test.go | 12 +- .../commands/corner_cases_support_test.go | 2 +- cmd/rpcdaemon22/commands/daemon.go | 5 +- cmd/rpcdaemon22/commands/debug_api_test.go | 8 +- cmd/rpcdaemon22/commands/eth_api.go | 7 +- cmd/rpcdaemon22/commands/eth_api_test.go | 22 +- cmd/rpcdaemon22/commands/eth_block.go | 2 +- cmd/rpcdaemon22/commands/eth_call.go | 6 +- cmd/rpcdaemon22/commands/eth_call_test.go | 14 +- cmd/rpcdaemon22/commands/eth_ming_test.go | 2 +- .../commands/send_transaction_test.go | 2 +- .../starknet_send_transaction_test.go | 2 +- cmd/rpcdaemon22/commands/trace_adhoc.go | 4 +- cmd/rpcdaemon22/commands/trace_adhoc_test.go | 8 +- cmd/rpcdaemon22/commands/trace_filtering.go | 314 ++-- cmd/rpcdaemon22/commands/tracing.go | 2 +- cmd/rpcdaemon22/commands/txpool_api_test.go | 2 +- cmd/rpcdaemon22/main.go | 4 +- cmd/rpcdaemon22/rpcservices/eth_backend.go | 3 + cmd/state/commands/erigon22.go | 23 +- cmd/state/commands/history22.go | 111 +- core/state/HistoryReader22.go | 129 ++ go.mod | 5 +- go.sum | 10 +- turbo/services/interfaces.go | 2 +- turbo/snapshotsync/block_snapshots.go | 16 + turbo/transactions/call.go | 18 +- 34 files changed, 509 insertions(+), 1756 deletions(-) create mode 100644 core/state/HistoryReader22.go diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go index f6806984986..211159e64a5 100644 --- a/cmd/hack/hack.go +++ b/cmd/hack/hack.go @@ -6,32 +6,26 @@ import ( "context" "encoding/binary" "encoding/json" - "errors" "flag" "fmt" - "io" "math/big" "net/http" _ "net/http/pprof" //nolint:gosec "os" - "os/signal" "path/filepath" "regexp" - "runtime" "runtime/pprof" "strconv" "strings" - "syscall" "time" "github.com/RoaringBitmap/roaring/roaring64" "github.com/holiman/uint256" - libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/compress" - "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon-lib/recsplit" + "github.com/ledgerwatch/erigon-lib/recsplit/eliasfano32" "golang.org/x/exp/slices" hackdb "github.com/ledgerwatch/erigon/cmd/hack/db" @@ -45,20 +39,14 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb" "github.com/ledgerwatch/erigon/ethdb/cbor" "github.com/ledgerwatch/erigon/internal/debug" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/parallelcompress" - "github.com/ledgerwatch/erigon/turbo/trie" "github.com/ledgerwatch/log/v3" - "github.com/wcharczuk/go-chart/v2" ) const ASSERT = false @@ -67,7 +55,6 @@ var ( verbosity = flag.Uint("verbosity", 3, "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail (default 3)") action = flag.String("action", "", "action to execute") cpuprofile = flag.String("cpuprofile", "", "write cpu profile `file`") - rewind = flag.Int("rewind", 1, "rewind to given number of blocks") block = flag.Int("block", 1, "specifies a block number for operation") blockTotal = flag.Int("blocktotal", 1, "specifies a total amount of blocks to process (will offset from head block if <= 0)") account = flag.String("account", "0x", "specifies account to investigate") @@ -77,487 +64,6 @@ var ( hash = flag.String("hash", "0x00", "image for preimage or state root for testBlockHashes action") ) -func readData(filename string) (blocks []float64, hours []float64, dbsize []float64, trienodes []float64, heap []float64) { - err := chart.ReadLines(filename, func(line string) error { - parts := strings.Split(line, ",") - blocks = append(blocks, tool.ParseFloat64(strings.Trim(parts[0], " "))) - hours = append(hours, tool.ParseFloat64(strings.Trim(parts[1], " "))) - dbsize = append(dbsize, tool.ParseFloat64(strings.Trim(parts[2], " "))) - trienodes = append(trienodes, tool.ParseFloat64(strings.Trim(parts[3], " "))) - heap = append(heap, tool.ParseFloat64(strings.Trim(parts[4], " "))) - return nil - }) - if err != nil { - fmt.Println(err.Error()) - } - return -} - -func notables() []chart.GridLine { - return []chart.GridLine{ - {Value: 1.0}, - {Value: 2.0}, - {Value: 3.0}, - {Value: 4.0}, - {Value: 5.0}, - {Value: 6.0}, - } -} - -func days() []chart.GridLine { - return []chart.GridLine{ - {Value: 24.0}, - {Value: 48.0}, - {Value: 72.0}, - {Value: 96.0}, - {Value: 120.0}, - {Value: 144.0}, - {Value: 168.0}, - {Value: 192.0}, - {Value: 216.0}, - {Value: 240.0}, - {Value: 264.0}, - {Value: 288.0}, - } -} - -func mychart() { - blocks, hours, dbsize, trienodes, heap := readData("bolt.csv") - blocks0, hours0, dbsize0, _, _ := readData("badger.csv") - mainSeries := &chart.ContinuousSeries{ - Name: "Cumulative sync time (bolt)", - Style: chart.Style{ - StrokeColor: chart.ColorBlue, - FillColor: chart.ColorBlue.WithAlpha(100), - }, - XValues: blocks, - YValues: hours, - } - badgerSeries := &chart.ContinuousSeries{ - Name: "Cumulative sync time (badger)", - Style: chart.Style{ - StrokeColor: chart.ColorRed, - FillColor: chart.ColorRed.WithAlpha(100), - }, - XValues: blocks0, - YValues: hours0, - } - dbsizeSeries := &chart.ContinuousSeries{ - Name: "Database size (bolt)", - Style: chart.Style{ - - StrokeColor: chart.ColorBlack, - }, - YAxis: chart.YAxisSecondary, - XValues: blocks, - YValues: dbsize, - } - dbsizeSeries0 := &chart.ContinuousSeries{ - Name: "Database size (badger)", - Style: chart.Style{ - - StrokeColor: chart.ColorOrange, - }, - YAxis: chart.YAxisSecondary, - XValues: blocks, - YValues: dbsize0, - } - - graph1 := chart.Chart{ - Width: 1280, - Height: 720, - Background: chart.Style{ - Padding: chart.Box{ - Top: 50, - }, - }, - YAxis: chart.YAxis{ - Name: "Elapsed time", - NameStyle: chart.Shown(), - Style: chart.Shown(), - TickStyle: chart.Style{ - TextRotationDegrees: 45.0, - }, - ValueFormatter: func(v interface{}) string { - return fmt.Sprintf("%d h", int(v.(float64))) - }, - GridMajorStyle: chart.Style{ - - StrokeColor: chart.ColorBlue, - StrokeWidth: 1.0, - }, - GridLines: days(), - }, - YAxisSecondary: chart.YAxis{ - NameStyle: chart.Shown(), - Style: chart.Shown(), - TickStyle: chart.Style{ - TextRotationDegrees: 45.0, - }, - ValueFormatter: func(v interface{}) string { - return fmt.Sprintf("%d G", int(v.(float64))) - }, - }, - XAxis: chart.XAxis{ - Name: "Blocks, million", - Style: chart.Style{}, - ValueFormatter: func(v interface{}) string { - return fmt.Sprintf("%.3fm", v.(float64)) - }, - GridMajorStyle: chart.Style{ - - StrokeColor: chart.ColorAlternateGray, - StrokeWidth: 1.0, - }, - GridLines: notables(), - }, - Series: []chart.Series{ - mainSeries, - badgerSeries, - dbsizeSeries, - dbsizeSeries0, - }, - } - - graph1.Elements = []chart.Renderable{chart.LegendThin(&graph1)} - - buffer := bytes.NewBuffer([]byte{}) - err := graph1.Render(chart.PNG, buffer) - tool.Check(err) - err = os.WriteFile("chart1.png", buffer.Bytes(), 0644) - tool.Check(err) - - heapSeries := &chart.ContinuousSeries{ - Name: "Allocated heap", - Style: chart.Style{ - - StrokeColor: chart.ColorYellow, - FillColor: chart.ColorYellow.WithAlpha(100), - }, - XValues: blocks, - YValues: heap, - } - trienodesSeries := &chart.ContinuousSeries{ - Name: "Trie nodes", - Style: chart.Style{ - - StrokeColor: chart.ColorGreen, - }, - YAxis: chart.YAxisSecondary, - XValues: blocks, - YValues: trienodes, - } - graph2 := chart.Chart{ - Width: 1280, - Height: 720, - Background: chart.Style{ - Padding: chart.Box{ - Top: 50, - }, - }, - YAxis: chart.YAxis{ - Name: "Allocated heap", - NameStyle: chart.Shown(), - Style: chart.Shown(), - TickStyle: chart.Style{ - TextRotationDegrees: 45.0, - }, - ValueFormatter: func(v interface{}) string { - return fmt.Sprintf("%.1f G", v.(float64)) - }, - GridMajorStyle: chart.Style{ - - StrokeColor: chart.ColorYellow, - StrokeWidth: 1.0, - }, - GridLines: days(), - }, - YAxisSecondary: chart.YAxis{ - NameStyle: chart.Shown(), - Style: chart.Shown(), - TickStyle: chart.Style{ - TextRotationDegrees: 45.0, - }, - ValueFormatter: func(v interface{}) string { - return fmt.Sprintf("%.1f m", v.(float64)) - }, - }, - XAxis: chart.XAxis{ - Name: "Blocks, million", - Style: chart.Style{}, - ValueFormatter: func(v interface{}) string { - return fmt.Sprintf("%.3fm", v.(float64)) - }, - GridMajorStyle: chart.Style{ - - StrokeColor: chart.ColorAlternateGray, - StrokeWidth: 1.0, - }, - GridLines: notables(), - }, - Series: []chart.Series{ - heapSeries, - trienodesSeries, - }, - } - - graph2.Elements = []chart.Renderable{chart.LegendThin(&graph2)} - buffer.Reset() - err = graph2.Render(chart.PNG, buffer) - tool.Check(err) - err = os.WriteFile("chart2.png", buffer.Bytes(), 0644) - tool.Check(err) -} - -func bucketStats(chaindata string) error { - /* - ethDb := mdbx.MustOpen(chaindata) - defer ethDb.Close() - - var bucketList []string - if err1 := ethDb.View(context.Background(), func(txa kv.Tx) error { - if bl, err := txa.(kv.BucketMigrator).ListBuckets(); err == nil { - bucketList = bl - } else { - return err - } - return nil - }); err1 != nil { - ethDb.Close() - return err1 - } - fmt.Printf(",BranchPageN,LeafPageN,OverflowN,Entries\n") - switch db := ethDb.(type) { - case *mdbx.MdbxKV: - type MdbxStat interface { - BucketStat(name string) (*mdbx.Stat, error) - } - - if err := db.View(context.Background(), func(tx kv.Tx) error { - for _, bucket := range bucketList { - bs, statErr := tx.(MdbxStat).BucketStat(bucket) - tool.Check(statErr) - fmt.Printf("%s,%d,%d,%d,%d\n", bucket, - bs.BranchPages, bs.LeafPages, bs.OverflowPages, bs.Entries) - } - bs, statErr := tx.(MdbxStat).BucketStat("freelist") - tool.Check(statErr) - fmt.Printf("%s,%d,%d,%d,%d\n", "freelist", bs.BranchPages, bs.LeafPages, bs.OverflowPages, bs.Entries) - return nil - }); err != nil { - panic(err) - } - } - */ - return nil -} - -func readTrieLog() ([]float64, map[int][]float64, []float64) { - data, err := os.ReadFile("dust/hack.log") - tool.Check(err) - thresholds := []float64{} - counts := map[int][]float64{} - for i := 2; i <= 16; i++ { - counts[i] = []float64{} - } - shorts := []float64{} - lines := bytes.Split(data, []byte("\n")) - for _, line := range lines { - if bytes.HasPrefix(line, []byte("Threshold:")) { - tokens := bytes.Split(line, []byte(" ")) - if len(tokens) == 23 { - wei := tool.ParseFloat64(string(tokens[1])) - thresholds = append(thresholds, wei) - for i := 2; i <= 16; i++ { - pair := bytes.Split(tokens[i+3], []byte(":")) - counts[i] = append(counts[i], tool.ParseFloat64(string(pair[1]))) - } - pair := bytes.Split(tokens[21], []byte(":")) - shorts = append(shorts, tool.ParseFloat64(string(pair[1]))) - } - } - } - return thresholds, counts, shorts -} - -func trieChart() { - thresholds, counts, shorts := readTrieLog() - fmt.Printf("%d %d %d\n", len(thresholds), len(counts), len(shorts)) - shortsSeries := &chart.ContinuousSeries{ - Name: "Short nodes", - Style: chart.Style{ - - StrokeColor: chart.ColorBlue, - FillColor: chart.ColorBlue.WithAlpha(100), - }, - XValues: thresholds, - YValues: shorts, - } - countSeries := make(map[int]*chart.ContinuousSeries) - for i := 2; i <= 16; i++ { - countSeries[i] = &chart.ContinuousSeries{ - Name: fmt.Sprintf("%d-nodes", i), - Style: chart.Style{ - - StrokeColor: chart.GetAlternateColor(i), - }, - XValues: thresholds, - YValues: counts[i], - } - } - xaxis := &chart.XAxis{ - Name: "Dust theshold", - Style: chart.Style{}, - ValueFormatter: func(v interface{}) string { - return fmt.Sprintf("%d wei", int(v.(float64))) - }, - GridMajorStyle: chart.Style{ - - StrokeColor: chart.DefaultStrokeColor, - StrokeWidth: 1.0, - }, - Range: &chart.ContinuousRange{ - Min: thresholds[0], - Max: thresholds[len(thresholds)-1], - }, - Ticks: []chart.Tick{ - {Value: 0.0, Label: "0"}, - {Value: 1.0, Label: "wei"}, - {Value: 10.0, Label: "10"}, - {Value: 100.0, Label: "100"}, - {Value: 1e3, Label: "1e3"}, - {Value: 1e4, Label: "1e4"}, - {Value: 1e5, Label: "1e5"}, - {Value: 1e6, Label: "1e6"}, - {Value: 1e7, Label: "1e7"}, - {Value: 1e8, Label: "1e8"}, - {Value: 1e9, Label: "1e9"}, - {Value: 1e10, Label: "1e10"}, - //{1e15, "finney"}, - //{1e18, "ether"}, - }, - } - - graph3 := chart.Chart{ - Width: 1280, - Height: 720, - Background: chart.Style{ - Padding: chart.Box{ - Top: 50, - }, - }, - XAxis: *xaxis, - YAxis: chart.YAxis{ - Name: "Node count", - NameStyle: chart.Shown(), - Style: chart.Shown(), - TickStyle: chart.Style{ - TextRotationDegrees: 45.0, - }, - ValueFormatter: func(v interface{}) string { - return fmt.Sprintf("%dm", int(v.(float64)/1e6)) - }, - GridMajorStyle: chart.Style{ - - StrokeColor: chart.DefaultStrokeColor, - StrokeWidth: 1.0, - }, - }, - Series: []chart.Series{ - shortsSeries, - }, - } - graph3.Elements = []chart.Renderable{chart.LegendThin(&graph3)} - buffer := bytes.NewBuffer([]byte{}) - err := graph3.Render(chart.PNG, buffer) - tool.Check(err) - err = os.WriteFile("chart3.png", buffer.Bytes(), 0644) - tool.Check(err) - graph4 := chart.Chart{ - Width: 1280, - Height: 720, - Background: chart.Style{ - Padding: chart.Box{ - Top: 50, - }, - }, - XAxis: *xaxis, - YAxis: chart.YAxis{ - Name: "Node count", - NameStyle: chart.Shown(), - Style: chart.Shown(), - TickStyle: chart.Style{ - TextRotationDegrees: 45.0, - }, - ValueFormatter: func(v interface{}) string { - return fmt.Sprintf("%.2fm", v.(float64)/1e6) - }, - GridMajorStyle: chart.Style{ - - StrokeColor: chart.DefaultStrokeColor, - StrokeWidth: 1.0, - }, - }, - Series: []chart.Series{ - countSeries[2], - countSeries[3], - }, - } - graph4.Elements = []chart.Renderable{chart.LegendThin(&graph4)} - buffer = bytes.NewBuffer([]byte{}) - err = graph4.Render(chart.PNG, buffer) - tool.Check(err) - err = os.WriteFile("chart4.png", buffer.Bytes(), 0644) - tool.Check(err) - graph5 := chart.Chart{ - Width: 1280, - Height: 720, - Background: chart.Style{ - Padding: chart.Box{ - Top: 50, - }, - }, - XAxis: *xaxis, - YAxis: chart.YAxis{ - Name: "Node count", - NameStyle: chart.Shown(), - Style: chart.Shown(), - TickStyle: chart.Style{ - TextRotationDegrees: 45.0, - }, - ValueFormatter: func(v interface{}) string { - return fmt.Sprintf("%.2fk", v.(float64)/1e3) - }, - GridMajorStyle: chart.Style{ - - StrokeColor: chart.DefaultStrokeColor, - StrokeWidth: 1.0, - }, - }, - Series: []chart.Series{ - countSeries[4], - countSeries[5], - countSeries[6], - countSeries[7], - countSeries[8], - countSeries[9], - countSeries[10], - countSeries[11], - countSeries[12], - countSeries[13], - countSeries[14], - countSeries[15], - countSeries[16], - }, - } - graph5.Elements = []chart.Renderable{chart.LegendThin(&graph5)} - buffer = bytes.NewBuffer([]byte{}) - err = graph5.Render(chart.PNG, buffer) - tool.Check(err) - err = os.WriteFile("chart5.png", buffer.Bytes(), 0644) - tool.Check(err) -} - func dbSlice(chaindata string, bucket string, prefix []byte) { db := mdbx.MustOpen(chaindata) defer db.Close() @@ -578,55 +84,6 @@ func dbSlice(chaindata string, bucket string, prefix []byte) { } } -func hashFile() { - f, err := os.Open("/Users/alexeyakhunov/mygit/go-ethereum/geth.log") - tool.Check(err) - defer f.Close() - w, err := os.Create("/Users/alexeyakhunov/mygit/go-ethereum/geth_read.log") - tool.Check(err) - defer w.Close() - scanner := bufio.NewScanner(f) - count := 0 - for scanner.Scan() { - line := scanner.Text() - if strings.HasPrefix(line, "ResolveWithDb") || strings.HasPrefix(line, "Error") || - strings.HasPrefix(line, "0000000000000000000000000000000000000000000000000000000000000000") || - strings.HasPrefix(line, "ERROR") || strings.HasPrefix(line, "tc{") { - fmt.Printf("%d %s\n", count, line) - count++ - } else if count == 66 { - w.WriteString(line) - w.WriteString("\n") - } - } - fmt.Printf("%d lines scanned\n", count) -} - -func rlpIndices() { - keybuf := new(bytes.Buffer) - for i := 0; i < 512; i++ { - keybuf.Reset() - rlp.Encode(keybuf, uint(i)) - fmt.Printf("Encoding of %d is %x\n", i, keybuf.Bytes()) - } -} - -func printFullNodeRLPs() { - trie.FullNode1() - trie.FullNode2() - trie.FullNode3() - trie.FullNode4() - trie.ShortNode1() - trie.ShortNode2() - trie.Hash1() - trie.Hash2() - trie.Hash3() - trie.Hash4() - trie.Hash5() - trie.Hash6() - trie.Hash7() -} - // Searches 1000 blocks from the given one to try to find the one with the given state root hash func testBlockHashes(chaindata string, block int, stateRoot common.Hash) { ethDb := mdbx.MustOpen(chaindata) @@ -695,28 +152,6 @@ func printTxHashes(chaindata string, block uint64) error { return nil } -func readTrie(filename string) *trie.Trie { - f, err := os.Open(filename) - tool.Check(err) - defer f.Close() - t, err := trie.Load(f) - tool.Check(err) - return t -} - -func invTree(wrong, right, diff string, name string) { - fmt.Printf("Reading trie...\n") - t1 := readTrie(fmt.Sprintf("%s_%s.txt", wrong, name)) - fmt.Printf("Root hash: %x\n", t1.Hash()) - fmt.Printf("Reading trie 2...\n") - t2 := readTrie(fmt.Sprintf("%s_%s.txt", right, name)) - fmt.Printf("Root hash: %x\n", t2.Hash()) - c, err := os.Create(fmt.Sprintf("%s_%s.txt", diff, name)) - tool.Check(err) - defer c.Close() - t1.PrintDiff(t2, c) -} - func readAccount(chaindata string, account common.Address) error { db := mdbx.MustOpen(chaindata) defer db.Close() @@ -854,663 +289,6 @@ func printBucket(chaindata string) { } } -func ValidateTxLookups2(chaindata string) { - db := mdbx.MustOpen(chaindata) - defer db.Close() - startTime := time.Now() - sigs := make(chan os.Signal, 1) - interruptCh := make(chan bool, 1) - signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) - go func() { - <-sigs - interruptCh <- true - }() - var blockNum uint64 = 1 - validateTxLookups2(db, blockNum, interruptCh) - log.Info("All done", "duration", time.Since(startTime)) -} - -func validateTxLookups2(db kv.RwDB, startBlock uint64, interruptCh chan bool) { - tx, err := db.BeginRo(context.Background()) - if err != nil { - panic(err) - } - defer tx.Rollback() - blockNum := startBlock - iterations := 0 - var interrupt bool - // Validation Process - blockBytes := big.NewInt(0) - for !interrupt { - blockHash, err := rawdb.ReadCanonicalHash(tx, blockNum) - tool.Check(err) - body := rawdb.ReadCanonicalBodyWithTransactions(tx, blockHash, blockNum) - - if body == nil { - break - } - - select { - case interrupt = <-interruptCh: - log.Info("interrupted, please wait for cleanup...") - default: - } - blockBytes.SetUint64(blockNum) - bn := blockBytes.Bytes() - - for _, txn := range body.Transactions { - val, err := tx.GetOne(kv.TxLookup, txn.Hash().Bytes()) - iterations++ - if iterations%100000 == 0 { - log.Info("Validated", "entries", iterations, "number", blockNum) - } - if !bytes.Equal(val, bn) { - tool.Check(err) - panic(fmt.Sprintf("Validation process failed(%d). Expected %b, got %b", iterations, bn, val)) - } - } - blockNum++ - } -} - -type Receiver struct { - defaultReceiver *trie.RootHashAggregator - accountMap map[string]*accounts.Account - storageMap map[string][]byte - unfurlList []string - currentIdx int -} - -func (r *Receiver) Root() common.Hash { panic("don't call me") } -func (r *Receiver) Receive( - itemType trie.StreamItem, - accountKey []byte, - storageKey []byte, - accountValue *accounts.Account, - storageValue []byte, - hash []byte, - hasTree bool, - cutoff int, -) error { - for r.currentIdx < len(r.unfurlList) { - ks := r.unfurlList[r.currentIdx] - k := []byte(ks) - var c int - switch itemType { - case trie.StorageStreamItem, trie.SHashStreamItem: - c = bytes.Compare(k, storageKey) - case trie.AccountStreamItem, trie.AHashStreamItem: - c = bytes.Compare(k, accountKey) - case trie.CutoffStreamItem: - c = -1 - } - if c > 0 { - return r.defaultReceiver.Receive(itemType, accountKey, storageKey, accountValue, storageValue, hash, hasTree, cutoff) - } - if len(k) > common.HashLength { - v := r.storageMap[ks] - if len(v) > 0 { - if err := r.defaultReceiver.Receive(trie.StorageStreamItem, nil, k, nil, v, nil, hasTree, 0); err != nil { - return err - } - } - } else { - v := r.accountMap[ks] - if v != nil { - if err := r.defaultReceiver.Receive(trie.AccountStreamItem, k, nil, v, nil, nil, hasTree, 0); err != nil { - return err - } - } - } - r.currentIdx++ - if c == 0 { - return nil - } - } - // We ran out of modifications, simply pass through - return r.defaultReceiver.Receive(itemType, accountKey, storageKey, accountValue, storageValue, hash, hasTree, cutoff) -} - -func (r *Receiver) Result() trie.SubTries { - return r.defaultReceiver.Result() -} - -func regenerate(chaindata string) error { - db := mdbx.MustOpen(chaindata) - defer db.Close() - tx, err := db.BeginRw(context.Background()) - if err != nil { - return err - } - defer tx.Rollback() - - tool.Check(stagedsync.ResetIH(tx)) - to, err := stages.GetStageProgress(tx, stages.HashState) - if err != nil { - return err - } - hash, err := rawdb.ReadCanonicalHash(tx, to) - if err != nil { - return err - } - syncHeadHeader := rawdb.ReadHeader(tx, hash, to) - expectedRootHash := syncHeadHeader.Root - blockReader := snapshotsync.NewBlockReader() - _, err = stagedsync.RegenerateIntermediateHashes("", tx, stagedsync.StageTrieCfg(db, true, true, "", blockReader), expectedRootHash, nil) - tool.Check(err) - log.Info("Regeneration ended") - return tx.Commit() -} - -func testGetProof(chaindata string, address common.Address, rewind int, regen bool) error { - if regen { - if err := regenerate(chaindata); err != nil { - return err - } - } - storageKeys := []string{} - var m runtime.MemStats - libcommon.ReadMemStats(&m) - db := mdbx.MustOpen(chaindata) - defer db.Close() - tx, err1 := db.BeginRo(context.Background()) - if err1 != nil { - return err1 - } - defer tx.Rollback() - - headHash := rawdb.ReadHeadBlockHash(tx) - headNumber := rawdb.ReadHeaderNumber(tx, headHash) - block := *headNumber - uint64(rewind) - log.Info("GetProof", "address", address, "storage keys", len(storageKeys), "head", *headNumber, "block", block, - "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) - - accountMap := make(map[string]*accounts.Account) - - if err := changeset.ForRange(tx, kv.AccountChangeSet, block+1, *headNumber+1, func(blockN uint64, address, v []byte) error { - var addrHash, err = common.HashData(address) - if err != nil { - return err - } - k := addrHash[:] - - if _, ok := accountMap[string(k)]; !ok { - if len(v) > 0 { - var a accounts.Account - if innerErr := a.DecodeForStorage(v); innerErr != nil { - return innerErr - } - accountMap[string(k)] = &a - } else { - accountMap[string(k)] = nil - } - } - return nil - }); err != nil { - return err - } - libcommon.ReadMemStats(&m) - log.Info("Constructed account map", "size", len(accountMap), - "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) - storageMap := make(map[string][]byte) - if err := changeset.ForRange(tx, kv.StorageChangeSet, block+1, *headNumber+1, func(blockN uint64, address, v []byte) error { - var addrHash, err = common.HashData(address) - if err != nil { - return err - } - k := addrHash[:] - if _, ok := storageMap[string(k)]; !ok { - storageMap[string(k)] = v - } - return nil - }); err != nil { - return err - } - libcommon.ReadMemStats(&m) - log.Info("Constructed storage map", "size", len(storageMap), - "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) - var unfurlList = make([]string, len(accountMap)+len(storageMap)) - unfurl := trie.NewRetainList(0) - i := 0 - for ks, acc := range accountMap { - unfurlList[i] = ks - i++ - unfurl.AddKey([]byte(ks)) - if acc != nil { - // Fill the code hashes - if acc.Incarnation > 0 && acc.IsEmptyCodeHash() { - if codeHash, err1 := tx.GetOne(kv.ContractCode, dbutils.GenerateStoragePrefix([]byte(ks), acc.Incarnation)); err1 == nil { - copy(acc.CodeHash[:], codeHash) - } else { - return err1 - } - } - } - } - for ks := range storageMap { - unfurlList[i] = ks - i++ - unfurl.AddKey([]byte(ks)) - } - rl := trie.NewRetainList(0) - addrHash, err := common.HashData(address[:]) - if err != nil { - return err - } - rl.AddKey(addrHash[:]) - unfurl.AddKey(addrHash[:]) - for _, key := range storageKeys { - keyAsHash := common.HexToHash(key) - if keyHash, err1 := common.HashData(keyAsHash[:]); err1 == nil { - //TODO Add incarnation in the middle of this - trieKey := append(addrHash[:], keyHash[:]...) - rl.AddKey(trieKey) - unfurl.AddKey(trieKey) - } else { - return err1 - } - } - slices.Sort(unfurlList) - libcommon.ReadMemStats(&m) - log.Info("Constructed account unfurl lists", - "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) - - loader := trie.NewFlatDBTrieLoader("checkRoots") - if err = loader.Reset(unfurl, nil, nil, false); err != nil { - panic(err) - } - _, err = loader.CalcTrieRoot(tx, nil, nil) - if err != nil { - return err - } - r := &Receiver{defaultReceiver: trie.NewRootHashAggregator(), unfurlList: unfurlList, accountMap: accountMap, storageMap: storageMap} - r.defaultReceiver.Reset(nil, nil /* HashCollector */, false) - loader.SetStreamReceiver(r) - root, err := loader.CalcTrieRoot(tx, nil, nil) - if err != nil { - return err - } - libcommon.ReadMemStats(&m) - log.Info("Loaded subtries", - "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) - hash, err := rawdb.ReadCanonicalHash(tx, block) - tool.Check(err) - header := rawdb.ReadHeader(tx, hash, block) - libcommon.ReadMemStats(&m) - log.Info("Constructed trie", - "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) - fmt.Printf("Resulting root: %x, expected root: %x\n", root, header.Root) - return nil -} - -// dumpState writes the content of current state into a file with given name -func dumpState(chaindata string, block int, name string) error { - db := mdbx.MustOpen(chaindata) - defer db.Close() - fa, err := os.Create(name + ".accounts.dat") - if err != nil { - return err - } - defer fa.Close() - wa := bufio.NewWriterSize(fa, etl.BufIOSize) - // Write out number of key/value pairs first - var countBytes [8]byte - binary.BigEndian.PutUint64(countBytes[:], 0) // TODO: Write correct number or remove - if _, err = wa.Write(countBytes[:]); err != nil { - return err - } - defer wa.Flush() - var fs, fc *os.File - if fs, err = os.Create(name + ".storage.dat"); err != nil { - return err - } - defer fs.Close() - ws := bufio.NewWriterSize(fs, etl.BufIOSize) - binary.BigEndian.PutUint64(countBytes[:], 0) // TODO: Write correct number or remove - if _, err = ws.Write(countBytes[:]); err != nil { - return err - } - defer ws.Flush() - if fc, err = os.Create(name + ".code.dat"); err != nil { - return err - } - defer fc.Close() - wc := bufio.NewWriterSize(fc, etl.BufIOSize) - binary.BigEndian.PutUint64(countBytes[:], 0) // TODO: Write correct number or remove - if _, err = wc.Write(countBytes[:]); err != nil { - return err - } - defer wc.Flush() - tx, err := db.BeginRo(context.Background()) - if err != nil { - return err - } - defer tx.Rollback() - var sc kv.Cursor - if sc, err = tx.Cursor(kv.PlainState); err != nil { - return err - } - defer sc.Close() - var cc kv.Cursor - if cc, err = tx.Cursor(kv.PlainContractCode); err != nil { - return err - } - defer cc.Close() - i := 0 - numBuf := make([]byte, binary.MaxVarintLen64) - k, v, e := sc.First() - var a accounts.Account - var addr common.Address - var ks [20 + 32]byte - for ; k != nil && e == nil; k, v, e = sc.Next() { - if len(k) == 20 { - n := binary.PutUvarint(numBuf, uint64(len(k))) - if _, err = wa.Write(numBuf[:n]); err != nil { - return err - } - if _, err = wa.Write(k); err != nil { - return err - } - n = binary.PutUvarint(numBuf, uint64(len(v))) - if _, err = wa.Write(numBuf[:n]); err != nil { - return err - } - if len(v) > 0 { - if _, err = wa.Write(v); err != nil { - return err - } - } - if err = a.DecodeForStorage(v); err != nil { - return err - } - if a.CodeHash != trie.EmptyCodeHash { - code, err := tx.GetOne(kv.Code, a.CodeHash[:]) - if err != nil { - return err - } - if len(code) != 0 { - n = binary.PutUvarint(numBuf, uint64(len(k))) - if _, err = wc.Write(numBuf[:n]); err != nil { - return err - } - if _, err = wc.Write(k); err != nil { - return err - } - n = binary.PutUvarint(numBuf, uint64(len(code))) - if _, err = wc.Write(numBuf[:n]); err != nil { - return err - } - if len(code) > 0 { - if _, err = wc.Write(code); err != nil { - return err - } - } - i += 2 - if i%10_000_000 == 0 { - log.Info("Written into file", "millions", i/1_000_000) - } - } - } - copy(addr[:], k) - i += 2 - if i%10_000_000 == 0 { - log.Info("Written into file", "millions", i/1_000_000) - } - } - if len(k) == 60 { - inc := binary.BigEndian.Uint64(k[20:]) - if bytes.Equal(k[:20], addr[:]) && inc == a.Incarnation { - copy(ks[:], k[:20]) - copy(ks[20:], k[20+8:]) - n := binary.PutUvarint(numBuf, uint64(len(ks))) - if _, err = ws.Write(numBuf[:n]); err != nil { - return err - } - if _, err = ws.Write(ks[:]); err != nil { - return err - } - n = binary.PutUvarint(numBuf, uint64(len(v))) - if _, err = ws.Write(numBuf[:n]); err != nil { - return err - } - if len(v) > 0 { - if _, err = ws.Write(v); err != nil { - return err - } - } - i += 2 - if i%10_000_000 == 0 { - log.Info("Written into file", "millions", i/1_000_000) - } - } - } - } - if e != nil { - return e - } - return nil -} - -func mphf(chaindata string, block int) error { - // Create a file to compress if it does not exist already - statefile := "statedump.dat" - if _, err := os.Stat(statefile); err != nil { - if !os.IsNotExist(err) { - return fmt.Errorf("not sure if statedump.dat exists: %w", err) - } - if err = dumpState(chaindata, int(block), "statefile"); err != nil { - return err - } - } - var rs *recsplit.RecSplit - f, err := os.Open(statefile) - if err != nil { - return err - } - r := bufio.NewReaderSize(f, etl.BufIOSize) - defer f.Close() - var countBuf [8]byte - if _, err = io.ReadFull(r, countBuf[:]); err != nil { - return err - } - count := binary.BigEndian.Uint64(countBuf[:]) - if rs, err = recsplit.NewRecSplit(recsplit.RecSplitArgs{ - KeyCount: int(count), - BucketSize: 2000, - Salt: 1, - LeafSize: 8, - TmpDir: "", - StartSeed: []uint64{0x106393c187cae21a, 0x6453cec3f7376937, 0x643e521ddbd2be98, 0x3740c6412f6572cb, 0x717d47562f1ce470, 0x4cd6eb4c63befb7c, 0x9bfd8c5e18c8da73, - 0x082f20e10092a9a3, 0x2ada2ce68d21defc, 0xe33cb4f3e7c6466b, 0x3980be458c509c59, 0xc466fd9584828e8c, 0x45f0aabe1a61ede6, 0xf6e7b8b33ad9b98d, - 0x4ef95e25f4b4983d, 0x81175195173b92d3, 0x4e50927d8dd15978, 0x1ea2099d1fafae7f, 0x425c8a06fbaaa815, 0xcd4216006c74052a}, - IndexFile: "state.idx", - }); err != nil { - return err - } - var buf [256]byte - l, e := r.ReadByte() - i := 0 - for ; e == nil; l, e = r.ReadByte() { - if _, e = io.ReadFull(r, buf[:l]); e != nil { - return e - } - if i%2 == 0 { - // It is key, we skip the values here - if err := rs.AddKey(buf[:l], uint64(i/2)); err != nil { - return err - } - } - i++ - if i == int(count*2) { - break - } - } - if e != nil && !errors.Is(e, io.EOF) { - return e - } - start := time.Now() - log.Info("Building recsplit...") - if err = rs.Build(); err != nil { - return err - } - s1, s2 := rs.Stats() - log.Info("Done", "time", time.Since(start), "s1", s1, "s2", s2) - idx := recsplit.MustOpen("state.idx") - defer idx.Close() - log.Info("Testing bijection") - bitCount := (count + 63) / 64 - bits := make([]uint64, bitCount) - if _, err = f.Seek(8, 0); err != nil { - return err - } - r = bufio.NewReaderSize(f, etl.BufIOSize) - l, e = r.ReadByte() - i = 0 - var lookupTime time.Duration - idxReader := recsplit.NewIndexReader(idx) - for ; e == nil; l, e = r.ReadByte() { - if _, e = io.ReadFull(r, buf[:l]); e != nil { - return e - } - if i%2 == 0 { - // It is key, we skip the values here - start := time.Now() - offset := idxReader.Lookup(buf[:l]) - lookupTime += time.Since(start) - if offset >= count { - return fmt.Errorf("idx %d >= count %d", offset, count) - } - mask := uint64(1) << (offset & 63) - if bits[offset>>6]&mask != 0 { - return fmt.Errorf("no bijection key idx=%d, lookup up idx = %d", i, offset) - } - bits[offset>>6] |= mask - } - i++ - if i == int(count*2) { - break - } - } - if e != nil && !errors.Is(e, io.EOF) { - return e - } - log.Info("Average lookup time", "per key", time.Duration(uint64(lookupTime)/count)) - return nil -} - -// genstate generates statedump.dat file for testing -func genstate() error { - f, err := os.Create("statedump.dat") - if err != nil { - return err - } - defer f.Close() - w := bufio.NewWriterSize(f, etl.BufIOSize) - defer w.Flush() - var count uint64 = 25 - var countBuf [8]byte - binary.BigEndian.PutUint64(countBuf[:], count) - if _, err = w.Write(countBuf[:]); err != nil { - return err - } - for i := 0; i < 5; i++ { - for j := 0; j < 5; j++ { - key := fmt.Sprintf("addr%dxlocation%d", i, j) - val := "value" - if err = w.WriteByte(byte(len(key))); err != nil { - return err - } - if _, err = w.Write([]byte(key)); err != nil { - return err - } - if err = w.WriteByte(byte(len(val))); err != nil { - return err - } - if _, err = w.Write([]byte(val)); err != nil { - return err - } - } - } - return nil -} - -func compress1(fileName, segmentFileName string) error { - compressor, err := compress.NewCompressor(context.Background(), "", segmentFileName, "", compress.MinPatternScore, runtime.GOMAXPROCS(-1), log.LvlDebug) - if err != nil { - return err - } - defer compressor.Close() - if err := compress.ReadSimpleFile(fileName, func(v []byte) error { - return compressor.AddWord(v) - }); err != nil { - return err - } - return compressor.Compress() -} -func decompress(name string) error { - return parallelcompress.Decompress("hack", name+".seg", name+".decompressed.dat") -} - -func changeSetStats(chaindata string, block1, block2 uint64) error { - db := mdbx.MustOpen(chaindata) - defer db.Close() - - fmt.Printf("State stats\n") - stAccounts := 0 - stStorage := 0 - if err := db.View(context.Background(), func(tx kv.Tx) error { - c, err := tx.Cursor(kv.PlainState) - if err != nil { - return err - } - k, _, e := c.First() - for ; k != nil && e == nil; k, _, e = c.Next() { - if len(k) > 28 { - stStorage++ - } else { - stAccounts++ - } - if (stStorage+stAccounts)%100000 == 0 { - fmt.Printf("State records: %d\n", stStorage+stAccounts) - } - } - return e - }); err != nil { - return err - } - fmt.Printf("stAccounts = %d, stStorage = %d\n", stAccounts, stStorage) - fmt.Printf("Changeset stats from %d to %d\n", block1, block2) - accounts := make(map[string]struct{}) - tx, err1 := db.BeginRw(context.Background()) - if err1 != nil { - return err1 - } - defer tx.Rollback() - if err := changeset.ForRange(tx, kv.AccountChangeSet, block1, block2, func(blockN uint64, k, v []byte) error { - if (blockN-block1)%100000 == 0 { - fmt.Printf("at the block %d for accounts, booster size: %d\n", blockN, len(accounts)) - } - accounts[string(common.CopyBytes(k))] = struct{}{} - return nil - }); err != nil { - return err - } - - storage := make(map[string]struct{}) - if err := changeset.ForRange(tx, kv.StorageChangeSet, block1, block2, func(blockN uint64, k, v []byte) error { - if (blockN-block1)%100000 == 0 { - fmt.Printf("at the block %d for accounts, booster size: %d\n", blockN, len(accounts)) - } - storage[string(common.CopyBytes(k))] = struct{}{} - return nil - }); err != nil { - return err - } - - fmt.Printf("accounts changed: %d, storage changed: %d\n", len(accounts), len(storage)) - return nil -} - func searchChangeSet(chaindata string, key []byte, block uint64) error { fmt.Printf("Searching changesets\n") db := mdbx.MustOpen(chaindata) @@ -1553,42 +331,6 @@ func searchStorageChangeSet(chaindata string, key []byte, block uint64) error { return nil } -func supply(chaindata string) error { - startTime := time.Now() - db := mdbx.MustOpen(chaindata) - defer db.Close() - count := 0 - supply := uint256.NewInt(0) - var a accounts.Account - if err := db.View(context.Background(), func(tx kv.Tx) error { - c, err := tx.Cursor(kv.PlainState) - if err != nil { - return err - } - for k, v, err := c.First(); k != nil; k, v, err = c.Next() { - if err != nil { - return err - } - if len(k) != 20 { - continue - } - if err1 := a.DecodeForStorage(v); err1 != nil { - return err1 - } - count++ - supply.Add(supply, &a.Balance) - if count%100000 == 0 { - fmt.Printf("Processed %dK account records\n", count/1000) - } - } - return nil - }); err != nil { - return err - } - fmt.Printf("Total accounts: %d, supply: %d, took: %s\n", count, supply, time.Since(startTime)) - return nil -} - func extractCode(chaindata string) error { db := mdbx.MustOpen(chaindata) defer db.Close() @@ -1646,97 +388,6 @@ func iterateOverCode(chaindata string) error { return nil } -func mint(chaindata string, block uint64) error { - f, err := os.Create("mint.csv") - if err != nil { - return err - } - defer f.Close() - w := bufio.NewWriter(f) - defer w.Flush() - db := mdbx.MustOpen(chaindata) - defer db.Close() - tx, err := db.BeginRw(context.Background()) - if err != nil { - return err - } - defer tx.Rollback() - //chiTokenAddr = common.HexToAddress("0x0000000000004946c0e9F43F4Dee607b0eF1fA1c") - //mintFuncPrefix = common.FromHex("0xa0712d68") - var gwei uint256.Int - gwei.SetUint64(1000000000) - blockEncoded := dbutils.EncodeBlockNumber(block) - canonical := make(map[common.Hash]struct{}) - c, err := tx.Cursor(kv.HeaderCanonical) - if err != nil { - return err - } - - // This is a mapping of contractAddress + incarnation => CodeHash - for k, v, err := c.Seek(blockEncoded); k != nil; k, v, err = c.Next() { - if err != nil { - return err - } - // Skip non relevant records - canonical[common.BytesToHash(v)] = struct{}{} - if len(canonical)%100_000 == 0 { - log.Info("Read canonical hashes", "count", len(canonical)) - } - } - log.Info("Read canonical hashes", "count", len(canonical)) - c, err = tx.Cursor(kv.BlockBody) - if err != nil { - return err - } - var prevBlock uint64 - var burntGas uint64 - for k, _, err := c.Seek(blockEncoded); k != nil; k, _, err = c.Next() { - if err != nil { - return err - } - blockNumber := binary.BigEndian.Uint64(k[:8]) - blockHash := common.BytesToHash(k[8:]) - if _, isCanonical := canonical[blockHash]; !isCanonical { - continue - } - if blockNumber != prevBlock && blockNumber != prevBlock+1 { - fmt.Printf("Gap [%d-%d]\n", prevBlock, blockNumber-1) - } - prevBlock = blockNumber - body := rawdb.ReadCanonicalBodyWithTransactions(tx, blockHash, blockNumber) - header := rawdb.ReadHeader(tx, blockHash, blockNumber) - senders, errSenders := rawdb.ReadSenders(tx, blockHash, blockNumber) - if errSenders != nil { - return errSenders - } - var ethSpent uint256.Int - var ethSpentTotal uint256.Int - var totalGas uint256.Int - count := 0 - for i, tx := range body.Transactions { - ethSpent.SetUint64(tx.GetGas()) - totalGas.Add(&totalGas, ðSpent) - if senders[i] == header.Coinbase { - continue // Mining pool sending payout potentially with abnormally low fee, skip - } - ethSpent.Mul(ðSpent, tx.GetPrice()) - ethSpentTotal.Add(ðSpentTotal, ðSpent) - count++ - } - if count > 0 { - ethSpentTotal.Div(ðSpentTotal, &totalGas) - ethSpentTotal.Div(ðSpentTotal, &gwei) - gasPrice := ethSpentTotal.Uint64() - burntGas += header.GasUsed - fmt.Fprintf(w, "%d, %d\n", burntGas, gasPrice) - } - if blockNumber%100_000 == 0 { - log.Info("Processed", "blocks", blockNumber) - } - } - return tx.Commit() -} - func getBlockTotal(tx kv.Tx, blockFrom uint64, blockTotalOrOffset int64) uint64 { if blockTotalOrOffset > 0 { return uint64(blockTotalOrOffset) @@ -1856,29 +507,6 @@ func extractBodies(chaindata string, block uint64) error { return nil } -func fixUnwind(chaindata string) error { - contractAddr := common.HexToAddress("0x577a32aa9c40cf4266e49fc1e44c749c356309bd") - db := mdbx.MustOpen(chaindata) - defer db.Close() - tool.Check(db.Update(context.Background(), func(tx kv.RwTx) error { - i, err := tx.GetOne(kv.IncarnationMap, contractAddr[:]) - if err != nil { - return err - } else if i == nil { - fmt.Print("Not found\n") - var b [8]byte - binary.BigEndian.PutUint64(b[:], 1) - if err = tx.Put(kv.IncarnationMap, contractAddr[:], b[:]); err != nil { - return err - } - } else { - fmt.Printf("Inc: %x\n", i) - } - return nil - })) - return nil -} - func snapSizes(chaindata string) error { db := mdbx.MustOpen(chaindata) defer db.Close() @@ -2641,6 +1269,40 @@ func findPrefix(chaindata string) error { return nil } +func readEf(file string, addr []byte) error { + datPath := file + ".dat" + idxPath := file + ".idx" + index, err := recsplit.OpenIndex(idxPath) + if err != nil { + return err + } + defer index.Close() + decomp, err := compress.NewDecompressor(datPath) + if err != nil { + return err + } + defer decomp.Close() + indexReader := recsplit.NewIndexReader(index) + offset := indexReader.Lookup(addr) + g := decomp.MakeGetter() + g.Reset(offset) + word, _ := g.Next(nil) + fmt.Printf("%x\n", word) + word, _ = g.NextUncompressed() + ef, _ := eliasfano32.ReadEliasFano(word) + it := ef.Iterator() + line := 0 + for it.HasNext() { + fmt.Printf("%d ", it.Next()) + line++ + if line%20 == 0 { + fmt.Printf("\n") + } + } + fmt.Printf("\n") + return nil +} + func main() { debug.RaiseFdLimit() flag.Parse() @@ -2669,18 +1331,9 @@ func main() { case "cfg": flow.TestGenCfg() - case "bucketStats": - err = bucketStats(*chaindata) - - case "syncChart": - mychart() - case "testBlockHashes": testBlockHashes(*chaindata, *block, common.HexToHash(*hash)) - case "invTree": - invTree("root", "right", "diff", *name) - case "readAccount": if err := readAccount(*chaindata, common.HexToAddress(*account)); err != nil { fmt.Printf("Error: %v\n", err) @@ -2698,39 +1351,21 @@ func main() { case "bucket": printBucket(*chaindata) - case "val-tx-lookup-2": - ValidateTxLookups2(*chaindata) - case "slice": dbSlice(*chaindata, *bucket, common.FromHex(*hash)) - case "getProof": - err = testGetProof(*chaindata, common.HexToAddress(*account), *rewind, false) - - case "regenerateIH": - err = regenerate(*chaindata) - case "searchChangeSet": err = searchChangeSet(*chaindata, common.FromHex(*hash), uint64(*block)) case "searchStorageChangeSet": err = searchStorageChangeSet(*chaindata, common.FromHex(*hash), uint64(*block)) - case "changeSetStats": - err = changeSetStats(*chaindata, uint64(*block), uint64(*block)+uint64(*rewind)) - - case "supply": - err = supply(*chaindata) - case "extractCode": err = extractCode(*chaindata) case "iterateOverCode": err = iterateOverCode(*chaindata) - case "mint": - err = mint(*chaindata, uint64(*block)) - case "extractHeaders": err = extractHeaders(*chaindata, uint64(*block), int64(*blockTotal)) @@ -2746,33 +1381,15 @@ func main() { case "extractBodies": err = extractBodies(*chaindata, uint64(*block)) - case "fixUnwind": - err = fixUnwind(*chaindata) - case "repairCurrent": repairCurrent() - case "printFullNodeRLPs": - printFullNodeRLPs() - - case "rlpIndices": - rlpIndices() - - case "hashFile": - hashFile() - - case "trieChart": - trieChart() - case "printTxHashes": printTxHashes(*chaindata, uint64(*block)) case "snapSizes": err = snapSizes(*chaindata) - case "mphf": - err = mphf(*chaindata, *block) - case "readCallTraces": err = readCallTraces(*chaindata, uint64(*block)) @@ -2802,14 +1419,6 @@ func main() { case "devTx": err = devTx(*chaindata) - case "dumpState": - err = dumpState(*chaindata, int(*block), *name) - case "compress": - err = compress1(*name, *name) - case "decompress": - err = decompress(*name) - case "genstate": - err = genstate() case "mainnetGenesis": err = mainnetGenesis() case "junkdb": @@ -2822,6 +1431,8 @@ func main() { err = chainConfig(*name) case "findPrefix": err = findPrefix(*chaindata) + case "readEf": + err = readEf(*chaindata, common.FromHex(*account)) } if err != nil { diff --git a/cmd/rpcdaemon/commands/eth_block.go b/cmd/rpcdaemon/commands/eth_block.go index 985f22256ff..811291c9cbf 100644 --- a/cmd/rpcdaemon/commands/eth_block.go +++ b/cmd/rpcdaemon/commands/eth_block.go @@ -119,7 +119,7 @@ func (api *APIImpl) CallBundle(ctx context.Context, txHashes []common.Hash, stat contractHasTEVM = ethdb.GetHasTEVM(tx) } - blockCtx, txCtx := transactions.GetEvmContext(firstMsg, header, stateBlockNumberOrHash.RequireCanonical, tx, contractHasTEVM) + blockCtx, txCtx := transactions.GetEvmContext(firstMsg, header, stateBlockNumberOrHash.RequireCanonical, tx, contractHasTEVM, api._blockReader) evm := vm.NewEVM(blockCtx, txCtx, st, chainConfig, vm.Config{Debug: false}) timeoutMilliSeconds := int64(5000) diff --git a/cmd/rpcdaemon/commands/eth_call.go b/cmd/rpcdaemon/commands/eth_call.go index ce09c837479..a015b1e94b2 100644 --- a/cmd/rpcdaemon/commands/eth_call.go +++ b/cmd/rpcdaemon/commands/eth_call.go @@ -63,7 +63,7 @@ func (api *APIImpl) Call(ctx context.Context, args ethapi.CallArgs, blockNrOrHas return nil, nil } - result, err := transactions.DoCall(ctx, args, tx, blockNrOrHash, block, overrides, api.GasCap, chainConfig, api.filters, api.stateCache, contractHasTEVM) + result, err := transactions.DoCall(ctx, args, tx, blockNrOrHash, block, overrides, api.GasCap, chainConfig, api.filters, api.stateCache, contractHasTEVM, api._blockReader) if err != nil { return nil, err } @@ -234,7 +234,7 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi.CallArgs, } result, err := transactions.DoCall(ctx, args, dbtx, numOrHash, block, nil, - api.GasCap, chainConfig, api.filters, api.stateCache, contractHasTEVM) + api.GasCap, chainConfig, api.filters, api.stateCache, contractHasTEVM, api._blockReader) if err != nil { if errors.Is(err, core.ErrIntrinsicGas) { // Special case, raise gas limit @@ -400,7 +400,7 @@ func (api *APIImpl) CreateAccessList(ctx context.Context, args ethapi.CallArgs, // Apply the transaction with the access list tracer tracer := logger.NewAccessListTracer(accessList, *args.From, to, precompiles) config := vm.Config{Tracer: tracer, Debug: true, NoBaseFee: true} - blockCtx, txCtx := transactions.GetEvmContext(msg, header, bNrOrHash.RequireCanonical, tx, contractHasTEVM) + blockCtx, txCtx := transactions.GetEvmContext(msg, header, bNrOrHash.RequireCanonical, tx, contractHasTEVM, api._blockReader) evm := vm.NewEVM(blockCtx, txCtx, state, chainConfig, config) gp := new(core.GasPool).AddGas(msg.Gas()) diff --git a/cmd/rpcdaemon/commands/trace_adhoc.go b/cmd/rpcdaemon/commands/trace_adhoc.go index aac18010a9c..6888b0a17e1 100644 --- a/cmd/rpcdaemon/commands/trace_adhoc.go +++ b/cmd/rpcdaemon/commands/trace_adhoc.go @@ -940,7 +940,7 @@ func (api *TraceAPIImpl) Call(ctx context.Context, args TraceCallParam, traceTyp if api.TevmEnabled { contractHasTEVM = ethdb.GetHasTEVM(tx) } - blockCtx, txCtx := transactions.GetEvmContext(msg, header, blockNrOrHash.RequireCanonical, tx, contractHasTEVM) + blockCtx, txCtx := transactions.GetEvmContext(msg, header, blockNrOrHash.RequireCanonical, tx, contractHasTEVM, api._blockReader) blockCtx.GasLimit = math.MaxUint64 blockCtx.MaxGasLimit = true @@ -1166,7 +1166,7 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type } // Get a new instance of the EVM. - blockCtx, txCtx := transactions.GetEvmContext(msg, header, parentNrOrHash.RequireCanonical, dbtx, contractHasTEVM) + blockCtx, txCtx := transactions.GetEvmContext(msg, header, parentNrOrHash.RequireCanonical, dbtx, contractHasTEVM, api._blockReader) if useParent { blockCtx.GasLimit = math.MaxUint64 blockCtx.MaxGasLimit = true diff --git a/cmd/rpcdaemon/commands/tracing.go b/cmd/rpcdaemon/commands/tracing.go index 26312b8a78d..31ddd4522cc 100644 --- a/cmd/rpcdaemon/commands/tracing.go +++ b/cmd/rpcdaemon/commands/tracing.go @@ -235,7 +235,7 @@ func (api *PrivateDebugAPIImpl) TraceCall(ctx context.Context, args ethapi.CallA if api.TevmEnabled { contractHasTEVM = ethdb.GetHasTEVM(dbtx) } - blockCtx, txCtx := transactions.GetEvmContext(msg, header, blockNrOrHash.RequireCanonical, dbtx, contractHasTEVM) + blockCtx, txCtx := transactions.GetEvmContext(msg, header, blockNrOrHash.RequireCanonical, dbtx, contractHasTEVM, api._blockReader) // Trace the transaction and return return transactions.TraceTx(ctx, msg, blockCtx, txCtx, ibs, config, chainConfig, stream) } diff --git a/cmd/rpcdaemon/rpcservices/eth_backend.go b/cmd/rpcdaemon/rpcservices/eth_backend.go index f86aaa0515d..248345a003c 100644 --- a/cmd/rpcdaemon/rpcservices/eth_backend.go +++ b/cmd/rpcdaemon/rpcservices/eth_backend.go @@ -192,6 +192,9 @@ func (back *RemoteBackend) HeaderByHash(ctx context.Context, tx kv.Getter, hash func (back *RemoteBackend) CanonicalHash(ctx context.Context, tx kv.Getter, blockHeight uint64) (common.Hash, error) { return back.blockReader.CanonicalHash(ctx, tx, blockHeight) } +func (back *RemoteBackend) TxnByIdxInBlock(ctx context.Context, tx kv.Getter, blockNum uint64, i int) (types.Transaction, error) { + return back.blockReader.TxnByIdxInBlock(ctx, tx, blockNum, i) +} func (back *RemoteBackend) EngineNewPayloadV1(ctx context.Context, payload *types2.ExecutionPayload) (res *remote.EnginePayloadStatus, err error) { return back.remoteEthBackend.EngineNewPayloadV1(ctx, payload) diff --git a/cmd/rpcdaemon22/cli/config.go b/cmd/rpcdaemon22/cli/config.go index f82aed55956..5e5830e870d 100644 --- a/cmd/rpcdaemon22/cli/config.go +++ b/cmd/rpcdaemon22/cli/config.go @@ -28,6 +28,7 @@ import ( kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon-lib/kv/remotedb" "github.com/ledgerwatch/erigon-lib/kv/remotedbserver" + libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/cli/httpcfg" "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/health" "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcservices" @@ -237,9 +238,12 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, starknet *rpcservices.StarknetService, stateCache kvcache.Cache, blockReader services.FullBlockReader, - ff *rpchelper.Filters, err error) { + ff *rpchelper.Filters, + agg *libstate.Aggregator, + txNums []uint64, + err error) { if !cfg.WithDatadir && cfg.PrivateApiAddr == "" { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("either remote db or local db must be specified") + return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, fmt.Errorf("either remote db or local db must be specified") } // Do not change the order of these checks. Chaindata needs to be checked first, because PrivateApiAddr has default value which is not "" @@ -250,10 +254,10 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, limiter := make(chan struct{}, cfg.DBReadConcurrency) rwKv, err = kv2.NewMDBX(logger).RoTxsLimiter(limiter).Path(cfg.Dirs.Chaindata).Readonly().Open() if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, err + return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, err } if compatErr := checkDbCompatibility(ctx, rwKv); compatErr != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, compatErr + return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, compatErr } db = rwKv stateCache = kvcache.NewDummy() @@ -266,14 +270,14 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, // ensure db exist tmpDb, err := kv2.NewMDBX(logger).Path(borDbPath).Label(kv.ConsensusDB).Open() if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, err + return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, err } tmpDb.Close() } log.Trace("Creating consensus db", "path", borDbPath) borKv, err = kv2.NewMDBX(logger).Path(borDbPath).Label(kv.ConsensusDB).Readonly().Open() if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, err + return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, err } // Skip the compatibility check, until we have a schema in erigon-lib borDb = borKv @@ -306,10 +310,10 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, } return nil }); err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, err + return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, err } if cc == nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("chain config not found in db. Need start erigon at least once on this db") + return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, fmt.Errorf("chain config not found in db. Need start erigon at least once on this db") } cfg.Snap.Enabled = cfg.Snap.Enabled || cfg.Sync.UseSnapshots @@ -343,6 +347,19 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, allSnapshots := snapshotsync.NewRoSnapshots(cfg.Snap, cfg.Dirs.Snap) allSnapshots.OptimisticReopen() log.Info("[Snapshots] see new", "blocks", allSnapshots.BlocksAvailable()) + txNums = make([]uint64, allSnapshots.BlocksAvailable()+1) + if err = allSnapshots.Bodies.View(func(bs []*snapshotsync.BodySegment) error { + for _, b := range bs { + if err = b.Iterate(func(blockNum, baseTxNum, txAmount uint64) { + txNums[blockNum] = baseTxNum + txAmount + }); err != nil { + return err + } + } + return nil + }); err != nil { + return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, fmt.Errorf("build txNum => blockNum mapping: %w", err) + } // don't reopen it right here, because snapshots may be not ready yet onNewSnapshot = func() { if err := allSnapshots.Reopen(); err != nil { @@ -359,17 +376,17 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, creds, err := grpcutil.TLS(cfg.TLSCACert, cfg.TLSCertfile, cfg.TLSKeyFile) if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("open tls cert: %w", err) + return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, fmt.Errorf("open tls cert: %w", err) } conn, err := grpcutil.Connect(creds, cfg.PrivateApiAddr) if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("could not connect to execution service privateApi: %w", err) + return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, fmt.Errorf("could not connect to execution service privateApi: %w", err) } kvClient := remote.NewKVClient(conn) remoteKv, err := remotedb.NewRemote(gointerfaces.VersionFromProto(remotedbserver.KvServiceAPIVersion), logger, kvClient).Open() if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("could not connect to remoteKv: %w", err) + return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, fmt.Errorf("could not connect to remoteKv: %w", err) } subscribeToStateChangesLoop(ctx, kvClient, stateCache) @@ -384,7 +401,7 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, if cfg.TxPoolApiAddr != cfg.PrivateApiAddr { txpoolConn, err = grpcutil.Connect(creds, cfg.TxPoolApiAddr) if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("could not connect to txpool api: %w", err) + return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, fmt.Errorf("could not connect to txpool api: %w", err) } } @@ -414,14 +431,19 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, if cfg.StarknetGRPCAddress != "" { starknetConn, err := grpcutil.Connect(creds, cfg.StarknetGRPCAddress) if err != nil { - return nil, nil, nil, nil, nil, nil, nil, nil, ff, fmt.Errorf("could not connect to starknet api: %w", err) + return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, fmt.Errorf("could not connect to starknet api: %w", err) } starknet = rpcservices.NewStarknetService(starknetConn) } ff = rpchelper.New(ctx, eth, txPool, mining, onNewSnapshot) - return db, borDb, eth, txPool, mining, starknet, stateCache, blockReader, ff, err + if cfg.WithDatadir { + if agg, err = libstate.NewAggregator(filepath.Join(cfg.DataDir, "erigon22"), 3_125_000); err != nil { + return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, fmt.Errorf("create aggregator: %w", err) + } + } + return db, borDb, eth, txPool, mining, starknet, stateCache, blockReader, ff, agg, txNums, err } func StartRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rpc.API) error { diff --git a/cmd/rpcdaemon22/commands/call_traces_test.go b/cmd/rpcdaemon22/commands/call_traces_test.go index 013e3272899..7f884f5f0c8 100644 --- a/cmd/rpcdaemon22/commands/call_traces_test.go +++ b/cmd/rpcdaemon22/commands/call_traces_test.go @@ -43,6 +43,7 @@ func blockNumbersFromTraces(t *testing.T, b []byte) []int { } func TestCallTraceOneByOne(t *testing.T) { + t.Skip() m := stages.Mock(t) defer m.DB.Close() chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 10, func(i int, gen *core.BlockGen) { @@ -52,7 +53,7 @@ func TestCallTraceOneByOne(t *testing.T) { t.Fatalf("generate chain: %v", err) } api := NewTraceAPI( - NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), false), + NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), nil, nil, false), m.DB, &httpcfg.HttpCfg{}) // Insert blocks 1 by 1, to tirgget possible "off by one" errors for i := 0; i < chain.Length; i++ { @@ -78,6 +79,7 @@ func TestCallTraceOneByOne(t *testing.T) { } func TestCallTraceUnwind(t *testing.T) { + t.Skip() m := stages.Mock(t) defer m.DB.Close() var chainA, chainB *core.ChainPack @@ -98,7 +100,7 @@ func TestCallTraceUnwind(t *testing.T) { if err != nil { t.Fatalf("generate chainB: %v", err) } - api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), false), m.DB, &httpcfg.HttpCfg{}) + api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), nil, nil, false), m.DB, &httpcfg.HttpCfg{}) if err = m.InsertChain(chainA); err != nil { t.Fatalf("inserting chainA: %v", err) } @@ -150,6 +152,7 @@ func TestCallTraceUnwind(t *testing.T) { } func TestFilterNoAddresses(t *testing.T) { + t.Skip() m := stages.Mock(t) defer m.DB.Close() chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 10, func(i int, gen *core.BlockGen) { @@ -158,7 +161,7 @@ func TestFilterNoAddresses(t *testing.T) { if err != nil { t.Fatalf("generate chain: %v", err) } - api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), false), m.DB, &httpcfg.HttpCfg{}) + api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), nil, nil, false), m.DB, &httpcfg.HttpCfg{}) // Insert blocks 1 by 1, to tirgget possible "off by one" errors for i := 0; i < chain.Length; i++ { if err = m.InsertChain(chain.Slice(i, i+1)); err != nil { @@ -181,10 +184,11 @@ func TestFilterNoAddresses(t *testing.T) { } func TestFilterAddressIntersection(t *testing.T) { + t.Skip() m := stages.Mock(t) defer m.DB.Close() - api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), false), m.DB, &httpcfg.HttpCfg{}) + api := NewTraceAPI(NewBaseApi(nil, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), nil, nil, false), m.DB, &httpcfg.HttpCfg{}) toAddress1, toAddress2, other := common.Address{1}, common.Address{2}, common.Address{3} diff --git a/cmd/rpcdaemon22/commands/corner_cases_support_test.go b/cmd/rpcdaemon22/commands/corner_cases_support_test.go index 762560c7cb9..0b95ba2c657 100644 --- a/cmd/rpcdaemon22/commands/corner_cases_support_test.go +++ b/cmd/rpcdaemon22/commands/corner_cases_support_test.go @@ -20,7 +20,7 @@ func TestNotFoundMustReturnNil(t *testing.T) { defer db.Close() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) api := NewEthAPI( - NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), + NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) ctx := context.Background() diff --git a/cmd/rpcdaemon22/commands/daemon.go b/cmd/rpcdaemon22/commands/daemon.go index 1fe8e9e7f55..ca2029f3fe4 100644 --- a/cmd/rpcdaemon22/commands/daemon.go +++ b/cmd/rpcdaemon22/commands/daemon.go @@ -5,6 +5,7 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" + libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/cli/httpcfg" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" @@ -14,9 +15,9 @@ import ( // APIList describes the list of available RPC apis func APIList(db kv.RoDB, borDb kv.RoDB, eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, starknet starknet.CAIROVMClient, filters *rpchelper.Filters, stateCache kvcache.Cache, - blockReader services.FullBlockReader, cfg httpcfg.HttpCfg) (list []rpc.API) { + blockReader services.FullBlockReader, agg *libstate.Aggregator, txNums []uint64, cfg httpcfg.HttpCfg) (list []rpc.API) { - base := NewBaseApi(filters, stateCache, blockReader, cfg.WithDatadir) + base := NewBaseApi(filters, stateCache, blockReader, agg, txNums, cfg.WithDatadir) if cfg.TevmEnabled { base.EnableTevmExperiment() } diff --git a/cmd/rpcdaemon22/commands/debug_api_test.go b/cmd/rpcdaemon22/commands/debug_api_test.go index 25c4f2ccd26..87a24a3eef5 100644 --- a/cmd/rpcdaemon22/commands/debug_api_test.go +++ b/cmd/rpcdaemon22/commands/debug_api_test.go @@ -41,7 +41,7 @@ var debugTraceTransactionNoRefundTests = []struct { func TestTraceBlockByNumber(t *testing.T) { db := rpcdaemontest.CreateTestKV(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - baseApi := NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false) + baseApi := NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false) ethApi := NewEthAPI(baseApi, db, nil, nil, nil, 5000000) api := NewPrivateDebugAPI(baseApi, db, 0) for _, tt := range debugTraceTransactionTests { @@ -88,7 +88,7 @@ func TestTraceBlockByNumber(t *testing.T) { func TestTraceBlockByHash(t *testing.T) { db := rpcdaemontest.CreateTestKV(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - baseApi := NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false) + baseApi := NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false) ethApi := NewEthAPI(baseApi, db, nil, nil, nil, 5000000) api := NewPrivateDebugAPI(baseApi, db, 0) for _, tt := range debugTraceTransactionTests { @@ -123,7 +123,7 @@ func TestTraceTransaction(t *testing.T) { db := rpcdaemontest.CreateTestKV(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) api := NewPrivateDebugAPI( - NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), + NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, 0) for _, tt := range debugTraceTransactionTests { var buf bytes.Buffer @@ -155,7 +155,7 @@ func TestTraceTransactionNoRefund(t *testing.T) { db := rpcdaemontest.CreateTestKV(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) api := NewPrivateDebugAPI( - NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), + NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, 0) for _, tt := range debugTraceTransactionNoRefundTests { var buf bytes.Buffer diff --git a/cmd/rpcdaemon22/commands/eth_api.go b/cmd/rpcdaemon22/commands/eth_api.go index ab06d94ce5a..982b79a86de 100644 --- a/cmd/rpcdaemon22/commands/eth_api.go +++ b/cmd/rpcdaemon22/commands/eth_api.go @@ -11,6 +11,7 @@ import ( "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" + libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/common/math" @@ -102,10 +103,12 @@ type BaseAPI struct { _blockReader services.FullBlockReader _txnReader services.TxnReader + _agg *libstate.Aggregator + _txNums []uint64 TevmEnabled bool // experiment } -func NewBaseApi(f *rpchelper.Filters, stateCache kvcache.Cache, blockReader services.FullBlockReader, singleNodeMode bool) *BaseAPI { +func NewBaseApi(f *rpchelper.Filters, stateCache kvcache.Cache, blockReader services.FullBlockReader, agg *libstate.Aggregator, txNums []uint64, singleNodeMode bool) *BaseAPI { blocksLRUSize := 128 // ~32Mb if !singleNodeMode { blocksLRUSize = 512 @@ -115,7 +118,7 @@ func NewBaseApi(f *rpchelper.Filters, stateCache kvcache.Cache, blockReader serv panic(err) } - return &BaseAPI{filters: f, stateCache: stateCache, blocksLRU: blocksLRU, _blockReader: blockReader, _txnReader: blockReader} + return &BaseAPI{filters: f, stateCache: stateCache, blocksLRU: blocksLRU, _blockReader: blockReader, _txnReader: blockReader, _agg: agg, _txNums: txNums} } func (api *BaseAPI) chainConfig(tx kv.Tx) (*params.ChainConfig, error) { diff --git a/cmd/rpcdaemon22/commands/eth_api_test.go b/cmd/rpcdaemon22/commands/eth_api_test.go index f271b7cf824..ffee9fe095c 100644 --- a/cmd/rpcdaemon22/commands/eth_api_test.go +++ b/cmd/rpcdaemon22/commands/eth_api_test.go @@ -19,7 +19,7 @@ import ( func TestGetTransactionReceipt(t *testing.T) { db := rpcdaemontest.CreateTestKV(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) // Call GetTransactionReceipt for transaction which is not in the database if _, err := api.GetTransactionReceipt(context.Background(), common.Hash{}); err != nil { t.Errorf("calling GetTransactionReceipt with empty hash: %v", err) @@ -29,7 +29,7 @@ func TestGetTransactionReceipt(t *testing.T) { func TestGetTransactionReceiptUnprotected(t *testing.T) { db := rpcdaemontest.CreateTestKV(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) // Call GetTransactionReceipt for un-protected transaction if _, err := api.GetTransactionReceipt(context.Background(), common.HexToHash("0x3f3cb8a0e13ed2481f97f53f7095b9cbc78b6ffb779f2d3e565146371a8830ea")); err != nil { t.Errorf("calling GetTransactionReceipt for unprotected tx: %v", err) @@ -42,7 +42,7 @@ func TestGetStorageAt_ByBlockNumber_WithRequireCanonicalDefault(t *testing.T) { assert := assert.New(t) db := rpcdaemontest.CreateTestKV(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") result, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithNumber(0)) @@ -58,7 +58,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault(t *testing.T) { m, _, _ := rpcdaemontest.CreateTestSentry(t) db := m.DB stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") result, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(m.Genesis.Hash(), false)) @@ -74,7 +74,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue(t *testing.T) { m, _, _ := rpcdaemontest.CreateTestSentry(t) db := m.DB stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") result, err := api.GetStorageAt(context.Background(), addr, "0x0", rpc.BlockNumberOrHashWithHash(m.Genesis.Hash(), true)) @@ -89,7 +89,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_BlockNotFoundError m, _, _ := rpcdaemontest.CreateTestSentry(t) db := m.DB stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") offChain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, block *core.BlockGen) { @@ -112,7 +112,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue_BlockNotFoundError(t m, _, _ := rpcdaemontest.CreateTestSentry(t) db := m.DB stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") offChain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 1, func(i int, block *core.BlockGen) { @@ -136,7 +136,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalDefault_NonCanonicalBlock( m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t) db := m.DB stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") orphanedBlock := orphanedChain[0].Blocks[0] @@ -157,7 +157,7 @@ func TestGetStorageAt_ByBlockHash_WithRequireCanonicalTrue_NonCanonicalBlock(t * m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t) db := m.DB stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) addr := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") orphanedBlock := orphanedChain[0].Blocks[0] @@ -175,7 +175,7 @@ func TestCall_ByBlockHash_WithRequireCanonicalDefault_NonCanonicalBlock(t *testi m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t) db := m.DB stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) from := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") to := common.HexToAddress("0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e") @@ -200,7 +200,7 @@ func TestCall_ByBlockHash_WithRequireCanonicalTrue_NonCanonicalBlock(t *testing. m, _, orphanedChain := rpcdaemontest.CreateTestSentry(t) db := m.DB stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) from := common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") to := common.HexToAddress("0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e") diff --git a/cmd/rpcdaemon22/commands/eth_block.go b/cmd/rpcdaemon22/commands/eth_block.go index 985f22256ff..811291c9cbf 100644 --- a/cmd/rpcdaemon22/commands/eth_block.go +++ b/cmd/rpcdaemon22/commands/eth_block.go @@ -119,7 +119,7 @@ func (api *APIImpl) CallBundle(ctx context.Context, txHashes []common.Hash, stat contractHasTEVM = ethdb.GetHasTEVM(tx) } - blockCtx, txCtx := transactions.GetEvmContext(firstMsg, header, stateBlockNumberOrHash.RequireCanonical, tx, contractHasTEVM) + blockCtx, txCtx := transactions.GetEvmContext(firstMsg, header, stateBlockNumberOrHash.RequireCanonical, tx, contractHasTEVM, api._blockReader) evm := vm.NewEVM(blockCtx, txCtx, st, chainConfig, vm.Config{Debug: false}) timeoutMilliSeconds := int64(5000) diff --git a/cmd/rpcdaemon22/commands/eth_call.go b/cmd/rpcdaemon22/commands/eth_call.go index ce09c837479..a015b1e94b2 100644 --- a/cmd/rpcdaemon22/commands/eth_call.go +++ b/cmd/rpcdaemon22/commands/eth_call.go @@ -63,7 +63,7 @@ func (api *APIImpl) Call(ctx context.Context, args ethapi.CallArgs, blockNrOrHas return nil, nil } - result, err := transactions.DoCall(ctx, args, tx, blockNrOrHash, block, overrides, api.GasCap, chainConfig, api.filters, api.stateCache, contractHasTEVM) + result, err := transactions.DoCall(ctx, args, tx, blockNrOrHash, block, overrides, api.GasCap, chainConfig, api.filters, api.stateCache, contractHasTEVM, api._blockReader) if err != nil { return nil, err } @@ -234,7 +234,7 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi.CallArgs, } result, err := transactions.DoCall(ctx, args, dbtx, numOrHash, block, nil, - api.GasCap, chainConfig, api.filters, api.stateCache, contractHasTEVM) + api.GasCap, chainConfig, api.filters, api.stateCache, contractHasTEVM, api._blockReader) if err != nil { if errors.Is(err, core.ErrIntrinsicGas) { // Special case, raise gas limit @@ -400,7 +400,7 @@ func (api *APIImpl) CreateAccessList(ctx context.Context, args ethapi.CallArgs, // Apply the transaction with the access list tracer tracer := logger.NewAccessListTracer(accessList, *args.From, to, precompiles) config := vm.Config{Tracer: tracer, Debug: true, NoBaseFee: true} - blockCtx, txCtx := transactions.GetEvmContext(msg, header, bNrOrHash.RequireCanonical, tx, contractHasTEVM) + blockCtx, txCtx := transactions.GetEvmContext(msg, header, bNrOrHash.RequireCanonical, tx, contractHasTEVM, api._blockReader) evm := vm.NewEVM(blockCtx, txCtx, state, chainConfig, config) gp := new(core.GasPool).AddGas(msg.Gas()) diff --git a/cmd/rpcdaemon22/commands/eth_call_test.go b/cmd/rpcdaemon22/commands/eth_call_test.go index a51841d7121..6958651e4f5 100644 --- a/cmd/rpcdaemon22/commands/eth_call_test.go +++ b/cmd/rpcdaemon22/commands/eth_call_test.go @@ -17,7 +17,7 @@ import ( func TestEstimateGas(t *testing.T) { db := rpcdaemontest.CreateTestKV(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) var from = common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") var to = common.HexToAddress("0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e") if _, err := api.EstimateGas(context.Background(), ðapi.CallArgs{ @@ -31,7 +31,7 @@ func TestEstimateGas(t *testing.T) { func TestEthCallNonCanonical(t *testing.T) { db := rpcdaemontest.CreateTestKV(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) var from = common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") var to = common.HexToAddress("0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e") if _, err := api.Call(context.Background(), ethapi.CallArgs{ @@ -55,7 +55,7 @@ func TestGetBlockByTimestampLatestTime(t *testing.T) { defer tx.Rollback() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil) + api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil) latestBlock := rawdb.ReadCurrentBlock(tx) response, err := ethapi.RPCMarshalBlock(latestBlock, true, false) @@ -92,7 +92,7 @@ func TestGetBlockByTimestampOldestTime(t *testing.T) { defer tx.Rollback() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil) + api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil) oldestBlock, err := rawdb.ReadBlockByNumber(tx, 0) if err != nil { @@ -133,7 +133,7 @@ func TestGetBlockByTimeHigherThanLatestBlock(t *testing.T) { defer tx.Rollback() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil) + api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil) latestBlock := rawdb.ReadCurrentBlock(tx) @@ -171,7 +171,7 @@ func TestGetBlockByTimeMiddle(t *testing.T) { defer tx.Rollback() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil) + api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil) currentHeader := rawdb.ReadCurrentHeader(tx) oldestHeader := rawdb.ReadHeaderByNumber(tx, 0) @@ -216,7 +216,7 @@ func TestGetBlockByTimestamp(t *testing.T) { defer tx.Rollback() stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil) + api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil) highestBlockNumber := rawdb.ReadCurrentHeader(tx).Number pickedBlock, err := rawdb.ReadBlockByNumber(tx, highestBlockNumber.Uint64()/3) diff --git a/cmd/rpcdaemon22/commands/eth_ming_test.go b/cmd/rpcdaemon22/commands/eth_ming_test.go index 835578e7032..a3d39e9e101 100644 --- a/cmd/rpcdaemon22/commands/eth_ming_test.go +++ b/cmd/rpcdaemon22/commands/eth_ming_test.go @@ -21,7 +21,7 @@ func TestPendingBlock(t *testing.T) { mining := txpool.NewMiningClient(conn) ff := rpchelper.New(ctx, nil, nil, mining, func() {}) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), false), nil, nil, nil, mining, 5000000) + api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), nil, nil, nil, mining, 5000000) expect := uint64(12345) b, err := rlp.EncodeToBytes(types.NewBlockWithHeader(&types.Header{Number: big.NewInt(int64(expect))})) require.NoError(t, err) diff --git a/cmd/rpcdaemon22/commands/send_transaction_test.go b/cmd/rpcdaemon22/commands/send_transaction_test.go index 8fe9ffa3613..87c6ef411b3 100644 --- a/cmd/rpcdaemon22/commands/send_transaction_test.go +++ b/cmd/rpcdaemon22/commands/send_transaction_test.go @@ -72,7 +72,7 @@ func TestSendRawTransaction(t *testing.T) { txPool := txpool.NewTxpoolClient(conn) ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := commands.NewEthAPI(commands.NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), false), m.DB, nil, txPool, nil, 5000000) + api := commands.NewEthAPI(commands.NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), m.DB, nil, txPool, nil, 5000000) buf := bytes.NewBuffer(nil) err = txn.MarshalBinary(buf) diff --git a/cmd/rpcdaemon22/commands/starknet_send_transaction_test.go b/cmd/rpcdaemon22/commands/starknet_send_transaction_test.go index d62f6374ddd..4e8eb3979e5 100644 --- a/cmd/rpcdaemon22/commands/starknet_send_transaction_test.go +++ b/cmd/rpcdaemon22/commands/starknet_send_transaction_test.go @@ -37,7 +37,7 @@ func TestErrorStarknetSendRawTransaction(t *testing.T) { stateCache := kvcache.New(kvcache.DefaultCoherentConfig) for _, tt := range cases { - api := commands.NewStarknetAPI(commands.NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), false), m.DB, starknetClient, txPool) + api := commands.NewStarknetAPI(commands.NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), m.DB, starknetClient, txPool) t.Run(tt.name, func(t *testing.T) { hex, _ := hexutil.Decode(tt.tx) diff --git a/cmd/rpcdaemon22/commands/trace_adhoc.go b/cmd/rpcdaemon22/commands/trace_adhoc.go index 5426b869530..2f3c2bb743e 100644 --- a/cmd/rpcdaemon22/commands/trace_adhoc.go +++ b/cmd/rpcdaemon22/commands/trace_adhoc.go @@ -935,7 +935,7 @@ func (api *TraceAPIImpl) Call(ctx context.Context, args TraceCallParam, traceTyp if api.TevmEnabled { contractHasTEVM = ethdb.GetHasTEVM(tx) } - blockCtx, txCtx := transactions.GetEvmContext(msg, header, blockNrOrHash.RequireCanonical, tx, contractHasTEVM) + blockCtx, txCtx := transactions.GetEvmContext(msg, header, blockNrOrHash.RequireCanonical, tx, contractHasTEVM, api._blockReader) blockCtx.GasLimit = math.MaxUint64 blockCtx.MaxGasLimit = true @@ -1161,7 +1161,7 @@ func (api *TraceAPIImpl) doCallMany(ctx context.Context, dbtx kv.Tx, msgs []type } // Get a new instance of the EVM. - blockCtx, txCtx := transactions.GetEvmContext(msg, header, parentNrOrHash.RequireCanonical, dbtx, contractHasTEVM) + blockCtx, txCtx := transactions.GetEvmContext(msg, header, parentNrOrHash.RequireCanonical, dbtx, contractHasTEVM, api._blockReader) if useParent { blockCtx.GasLimit = math.MaxUint64 blockCtx.MaxGasLimit = true diff --git a/cmd/rpcdaemon22/commands/trace_adhoc_test.go b/cmd/rpcdaemon22/commands/trace_adhoc_test.go index 84805dd8422..abd446c1a78 100644 --- a/cmd/rpcdaemon22/commands/trace_adhoc_test.go +++ b/cmd/rpcdaemon22/commands/trace_adhoc_test.go @@ -20,7 +20,7 @@ import ( func TestEmptyQuery(t *testing.T) { db := rpcdaemontest.CreateTestKV(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, &httpcfg.HttpCfg{}) + api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, &httpcfg.HttpCfg{}) // Call GetTransactionReceipt for transaction which is not in the database var latest = rpc.LatestBlockNumber results, err := api.CallMany(context.Background(), json.RawMessage("[]"), &rpc.BlockNumberOrHash{BlockNumber: &latest}) @@ -37,7 +37,7 @@ func TestEmptyQuery(t *testing.T) { func TestCoinbaseBalance(t *testing.T) { db := rpcdaemontest.CreateTestKV(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, &httpcfg.HttpCfg{}) + api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, &httpcfg.HttpCfg{}) // Call GetTransactionReceipt for transaction which is not in the database var latest = rpc.LatestBlockNumber results, err := api.CallMany(context.Background(), json.RawMessage(` @@ -64,7 +64,7 @@ func TestCoinbaseBalance(t *testing.T) { func TestReplayTransaction(t *testing.T) { db := rpcdaemontest.CreateTestKV(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, &httpcfg.HttpCfg{}) + api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, &httpcfg.HttpCfg{}) var txnHash common.Hash if err := db.View(context.Background(), func(tx kv.Tx) error { b, err := rawdb.ReadBlockByNumber(tx, 6) @@ -92,7 +92,7 @@ func TestReplayTransaction(t *testing.T) { func TestReplayBlockTransactions(t *testing.T) { db := rpcdaemontest.CreateTestKV(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, &httpcfg.HttpCfg{}) + api := NewTraceAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, &httpcfg.HttpCfg{}) // Call GetTransactionReceipt for transaction which is not in the database n := rpc.BlockNumber(6) diff --git a/cmd/rpcdaemon22/commands/trace_filtering.go b/cmd/rpcdaemon22/commands/trace_filtering.go index fdae1610393..64e307883d8 100644 --- a/cmd/rpcdaemon22/commands/trace_filtering.go +++ b/cmd/rpcdaemon22/commands/trace_filtering.go @@ -2,8 +2,8 @@ package commands import ( "context" - "errors" "fmt" + "sort" "github.com/RoaringBitmap/roaring/roaring64" jsoniter "github.com/json-iterator/go" @@ -12,12 +12,15 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/consensus/ethash" + "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb" - "github.com/ledgerwatch/erigon/ethdb/bitmapdb" + "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/shards" + "github.com/ledgerwatch/erigon/turbo/transactions" ) // Transaction implements trace_transaction @@ -232,6 +235,12 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str toBlock = uint64(*req.ToBlock) } + var fromTxNum, toTxNum uint64 + if fromBlock > 0 { + fromTxNum = api._txNums[fromBlock-1] + } + toTxNum = api._txNums[toBlock] // toBlock is an inclusive bound + if fromBlock > toBlock { stream.WriteNil() return fmt.Errorf("invalid parameters: fromBlock cannot be greater than toBlock") @@ -241,55 +250,45 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str toAddresses := make(map[common.Address]struct{}, len(req.ToAddress)) var ( - allBlocks roaring64.Bitmap - blocksTo roaring64.Bitmap + allTxs roaring64.Bitmap + txsTo roaring64.Bitmap ) for _, addr := range req.FromAddress { if addr != nil { - b, err := bitmapdb.Get64(dbtx, kv.CallFromIndex, addr.Bytes(), fromBlock, toBlock) - if err != nil { - if errors.Is(err, ethdb.ErrKeyNotFound) { - continue - } - stream.WriteNil() - return err + it := api._agg.TraceFromIterator(addr.Bytes(), fromTxNum, toTxNum, nil) + for it.HasNext() { + allTxs.Add(it.Next()) } - allBlocks.Or(b) fromAddresses[*addr] = struct{}{} } } for _, addr := range req.ToAddress { if addr != nil { - b, err := bitmapdb.Get64(dbtx, kv.CallToIndex, addr.Bytes(), fromBlock, toBlock) - if err != nil { - if errors.Is(err, ethdb.ErrKeyNotFound) { - continue - } - stream.WriteNil() - return err + it := api._agg.TraceToIterator(addr.Bytes(), fromTxNum, toTxNum, nil) + for it.HasNext() { + txsTo.Add(it.Next()) } - blocksTo.Or(b) toAddresses[*addr] = struct{}{} } } switch req.Mode { case TraceFilterModeIntersection: - allBlocks.And(&blocksTo) + allTxs.And(&txsTo) case TraceFilterModeUnion: fallthrough default: - allBlocks.Or(&blocksTo) + allTxs.Or(&txsTo) } // Special case - if no addresses specified, take all traces if len(req.FromAddress) == 0 && len(req.ToAddress) == 0 { - allBlocks.AddRange(fromBlock, toBlock+1) + allTxs.AddRange(fromTxNum, toTxNum+1) } else { - allBlocks.RemoveRange(0, fromBlock) - allBlocks.RemoveRange(toBlock+1, uint64(0x100000000)) + allTxs.RemoveRange(0, fromTxNum) + allTxs.RemoveRange(toTxNum+1, uint64(0x1000000000000)) } chainConfig, err := api.chainConfig(dbtx) @@ -313,124 +312,175 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str } nSeen := uint64(0) nExported := uint64(0) - - it := allBlocks.Iterator() + includeAll := len(fromAddresses) == 0 && len(toAddresses) == 0 + it := allTxs.Iterator() + var lastBlockNum uint64 + var lastBlockHash common.Hash + var lastHeader *types.Header + var lastSigner *types.Signer + var lastRules *params.Rules + stateReader := state.NewHistoryReader22(api._agg) + noop := state.NewNoopWriter() for it.HasNext() { - b := uint64(it.Next()) - // Extract transactions from block - hash, hashErr := rawdb.ReadCanonicalHash(dbtx, b) - if hashErr != nil { + txNum := uint64(it.Next()) + // Find block number + blockNum := uint64(sort.Search(len(api._txNums), func(i int) bool { + return api._txNums[i] > txNum + })) + if blockNum > lastBlockNum { + if lastHeader, err = api._blockReader.HeaderByNumber(ctx, nil, blockNum); err != nil { + stream.WriteNil() + return err + } + lastBlockNum = blockNum + lastBlockHash = lastHeader.Hash() + lastSigner = types.MakeSigner(chainConfig, blockNum) + lastRules = chainConfig.Rules(blockNum) + } + if txNum+1 == api._txNums[blockNum] { + body, err := api._blockReader.Body(ctx, nil, lastBlockHash, blockNum) + if err != nil { + stream.WriteNil() + return err + } + // Block reward section, handle specially + minerReward, uncleRewards := ethash.AccumulateRewards(chainConfig, lastHeader, body.Uncles) + if _, ok := toAddresses[lastHeader.Coinbase]; ok || includeAll { + nSeen++ + var tr ParityTrace + var rewardAction = &RewardTraceAction{} + rewardAction.Author = lastHeader.Coinbase + rewardAction.RewardType = "block" // nolint: goconst + rewardAction.Value.ToInt().Set(minerReward.ToBig()) + tr.Action = rewardAction + tr.BlockHash = &common.Hash{} + copy(tr.BlockHash[:], lastBlockHash.Bytes()) + tr.BlockNumber = new(uint64) + *tr.BlockNumber = blockNum + tr.Type = "reward" // nolint: goconst + tr.TraceAddress = []int{} + b, err := json.Marshal(tr) + if err != nil { + stream.WriteNil() + return err + } + if nSeen > after && nExported < count { + if first { + first = false + } else { + stream.WriteMore() + } + stream.Write(b) + nExported++ + } + } + for i, uncle := range body.Uncles { + if _, ok := toAddresses[uncle.Coinbase]; ok || includeAll { + if i < len(uncleRewards) { + nSeen++ + var tr ParityTrace + rewardAction := &RewardTraceAction{} + rewardAction.Author = uncle.Coinbase + rewardAction.RewardType = "uncle" // nolint: goconst + rewardAction.Value.ToInt().Set(uncleRewards[i].ToBig()) + tr.Action = rewardAction + tr.BlockHash = &common.Hash{} + copy(tr.BlockHash[:], lastBlockHash[:]) + tr.BlockNumber = new(uint64) + *tr.BlockNumber = blockNum + tr.Type = "reward" // nolint: goconst + tr.TraceAddress = []int{} + b, err := json.Marshal(tr) + if err != nil { + stream.WriteNil() + return err + } + if nSeen > after && nExported < count { + if first { + first = false + } else { + stream.WriteMore() + } + stream.Write(b) + nExported++ + } + } + } + } + continue + } + var startTxNum uint64 + if blockNum > 0 { + startTxNum = api._txNums[blockNum-1] + } + txIndex := txNum - startTxNum - 1 + fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d\n", txNum, blockNum, txIndex) + txn, err := api._txnReader.TxnByIdxInBlock(ctx, nil, blockNum, int(txIndex)) + if err != nil { stream.WriteNil() - return hashErr + return err } - - block, bErr := api.blockWithSenders(dbtx, hash, b) - if bErr != nil { + txHash := txn.Hash() + msg, err := txn.AsMessage(*lastSigner, lastHeader.BaseFee, lastRules) + if err != nil { stream.WriteNil() - return bErr + return err } - if block == nil { + contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } + blockCtx, txCtx := transactions.GetEvmContext(msg, lastHeader, true /* requireCanonical */, dbtx, contractHasTEVM, api._blockReader) + stateReader.SetTxNum(txNum) + stateCache := shards.NewStateCache(32, 0 /* no limit */) // this cache living only during current RPC call, but required to store state writes + cachedReader := state.NewCachedReader(stateReader, stateCache) + cachedWriter := state.NewCachedWriter(noop, stateCache) + vmConfig := vm.Config{} + traceResult := &TraceCallResult{Trace: []*ParityTrace{}} + var ot OeTracer + ot.compat = api.compatibility + ot.r = traceResult + ot.idx = []string{fmt.Sprintf("%d-", txIndex)} + ot.traceAddr = []int{} + vmConfig.Debug = true + vmConfig.Tracer = &ot + ibs := state.New(cachedReader) + evm := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vmConfig) + + gp := new(core.GasPool).AddGas(msg.Gas()) + ibs.Prepare(txHash, lastBlockHash, int(txIndex)) + var execResult *core.ExecutionResult + execResult, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, false /* gasBailout */) + if err != nil { stream.WriteNil() - return fmt.Errorf("could not find block %x %d", hash, b) + return err } - - blockHash := block.Hash() - blockNumber := block.NumberU64() - txs := block.Transactions() - t, tErr := api.callManyTransactions(ctx, dbtx, txs, []string{TraceTypeTrace}, block.ParentHash(), rpc.BlockNumber(block.NumberU64()-1), block.Header(), -1 /* all tx indices */, types.MakeSigner(chainConfig, b), chainConfig.Rules(b)) - if tErr != nil { + traceResult.Output = common.CopyBytes(execResult.ReturnData) + if err = ibs.FinalizeTx(evm.ChainRules(), noop); err != nil { stream.WriteNil() - return tErr + return err } - includeAll := len(fromAddresses) == 0 && len(toAddresses) == 0 - for i, trace := range t { - txPosition := uint64(i) - txHash := txs[i].Hash() - // Check if transaction concerns any of the addresses we wanted - for _, pt := range trace.Trace { - if includeAll || filter_trace(pt, fromAddresses, toAddresses) { - nSeen++ - pt.BlockHash = &blockHash - pt.BlockNumber = &blockNumber - pt.TransactionHash = &txHash - pt.TransactionPosition = &txPosition - b, err := json.Marshal(pt) - if err != nil { - stream.WriteNil() - return err - } - if nSeen > after && nExported < count { - if first { - first = false - } else { - stream.WriteMore() - } - stream.Write(b) - nExported++ - } - } - } + if err = ibs.CommitBlock(evm.ChainRules(), cachedWriter); err != nil { + stream.WriteNil() + return err } - minerReward, uncleRewards := ethash.AccumulateRewards(chainConfig, block.Header(), block.Uncles()) - if _, ok := toAddresses[block.Coinbase()]; ok || includeAll { - nSeen++ - var tr ParityTrace - var rewardAction = &RewardTraceAction{} - rewardAction.Author = block.Coinbase() - rewardAction.RewardType = "block" // nolint: goconst - rewardAction.Value.ToInt().Set(minerReward.ToBig()) - tr.Action = rewardAction - tr.BlockHash = &common.Hash{} - copy(tr.BlockHash[:], block.Hash().Bytes()) - tr.BlockNumber = new(uint64) - *tr.BlockNumber = block.NumberU64() - tr.Type = "reward" // nolint: goconst - tr.TraceAddress = []int{} - b, err := json.Marshal(tr) - if err != nil { - stream.WriteNil() - return err - } - if nSeen > after && nExported < count { - if first { - first = false - } else { - stream.WriteMore() + for _, pt := range traceResult.Trace { + if includeAll || filter_trace(pt, fromAddresses, toAddresses) { + nSeen++ + pt.BlockHash = &lastBlockHash + pt.BlockNumber = &blockNum + pt.TransactionHash = &txHash + pt.TransactionPosition = &txIndex + b, err := json.Marshal(pt) + if err != nil { + stream.WriteNil() + return err } - stream.Write(b) - nExported++ - } - } - for i, uncle := range block.Uncles() { - if _, ok := toAddresses[uncle.Coinbase]; ok || includeAll { - if i < len(uncleRewards) { - nSeen++ - var tr ParityTrace - rewardAction := &RewardTraceAction{} - rewardAction.Author = uncle.Coinbase - rewardAction.RewardType = "uncle" // nolint: goconst - rewardAction.Value.ToInt().Set(uncleRewards[i].ToBig()) - tr.Action = rewardAction - tr.BlockHash = &common.Hash{} - copy(tr.BlockHash[:], block.Hash().Bytes()) - tr.BlockNumber = new(uint64) - *tr.BlockNumber = block.NumberU64() - tr.Type = "reward" // nolint: goconst - tr.TraceAddress = []int{} - b, err := json.Marshal(tr) - if err != nil { - stream.WriteNil() - return err - } - if nSeen > after && nExported < count { - if first { - first = false - } else { - stream.WriteMore() - } - stream.Write(b) - nExported++ + if nSeen > after && nExported < count { + if first { + first = false + } else { + stream.WriteMore() } + stream.Write(b) + nExported++ } } } diff --git a/cmd/rpcdaemon22/commands/tracing.go b/cmd/rpcdaemon22/commands/tracing.go index 26312b8a78d..31ddd4522cc 100644 --- a/cmd/rpcdaemon22/commands/tracing.go +++ b/cmd/rpcdaemon22/commands/tracing.go @@ -235,7 +235,7 @@ func (api *PrivateDebugAPIImpl) TraceCall(ctx context.Context, args ethapi.CallA if api.TevmEnabled { contractHasTEVM = ethdb.GetHasTEVM(dbtx) } - blockCtx, txCtx := transactions.GetEvmContext(msg, header, blockNrOrHash.RequireCanonical, dbtx, contractHasTEVM) + blockCtx, txCtx := transactions.GetEvmContext(msg, header, blockNrOrHash.RequireCanonical, dbtx, contractHasTEVM, api._blockReader) // Trace the transaction and return return transactions.TraceTx(ctx, msg, blockCtx, txCtx, ibs, config, chainConfig, stream) } diff --git a/cmd/rpcdaemon22/commands/txpool_api_test.go b/cmd/rpcdaemon22/commands/txpool_api_test.go index 18c02ded7e8..5cd85335692 100644 --- a/cmd/rpcdaemon22/commands/txpool_api_test.go +++ b/cmd/rpcdaemon22/commands/txpool_api_test.go @@ -33,7 +33,7 @@ func TestTxPoolContent(t *testing.T) { ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) txPool := txpool.NewTxpoolClient(conn) ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}) - api := NewTxPoolAPI(NewBaseApi(ff, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), false), m.DB, txPool) + api := NewTxPoolAPI(NewBaseApi(ff, kvcache.New(kvcache.DefaultCoherentConfig), snapshotsync.NewBlockReader(), nil, nil, false), m.DB, txPool) expectValue := uint64(1234) txn, err := types.SignTx(types.NewTransaction(0, common.Address{1}, uint256.NewInt(expectValue), params.TxGas, uint256.NewInt(10*params.GWei), nil), *types.LatestSignerForChainID(m.ChainConfig.ChainID), m.Key) diff --git a/cmd/rpcdaemon22/main.go b/cmd/rpcdaemon22/main.go index 8d95c899568..9d8ad5463ce 100644 --- a/cmd/rpcdaemon22/main.go +++ b/cmd/rpcdaemon22/main.go @@ -16,7 +16,7 @@ func main() { cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() logger := log.New() - db, borDb, backend, txPool, mining, starknet, stateCache, blockReader, ff, err := cli.RemoteServices(ctx, *cfg, logger, rootCancel) + db, borDb, backend, txPool, mining, starknet, stateCache, blockReader, ff, agg, txNums, err := cli.RemoteServices(ctx, *cfg, logger, rootCancel) if err != nil { log.Error("Could not connect to DB", "err", err) return nil @@ -26,7 +26,7 @@ func main() { defer borDb.Close() } - apiList := commands.APIList(db, borDb, backend, txPool, mining, starknet, ff, stateCache, blockReader, *cfg) + apiList := commands.APIList(db, borDb, backend, txPool, mining, starknet, ff, stateCache, blockReader, agg, txNums, *cfg) if err := cli.StartRpcServer(ctx, *cfg, apiList); err != nil { log.Error(err.Error()) return nil diff --git a/cmd/rpcdaemon22/rpcservices/eth_backend.go b/cmd/rpcdaemon22/rpcservices/eth_backend.go index f86aaa0515d..248345a003c 100644 --- a/cmd/rpcdaemon22/rpcservices/eth_backend.go +++ b/cmd/rpcdaemon22/rpcservices/eth_backend.go @@ -192,6 +192,9 @@ func (back *RemoteBackend) HeaderByHash(ctx context.Context, tx kv.Getter, hash func (back *RemoteBackend) CanonicalHash(ctx context.Context, tx kv.Getter, blockHeight uint64) (common.Hash, error) { return back.blockReader.CanonicalHash(ctx, tx, blockHeight) } +func (back *RemoteBackend) TxnByIdxInBlock(ctx context.Context, tx kv.Getter, blockNum uint64, i int) (types.Transaction, error) { + return back.blockReader.TxnByIdxInBlock(ctx, tx, blockNum, i) +} func (back *RemoteBackend) EngineNewPayloadV1(ctx context.Context, payload *types2.ExecutionPayload) (res *remote.EnginePayloadStatus, err error) { return back.remoteEthBackend.EngineNewPayloadV1(ctx, payload) diff --git a/cmd/state/commands/erigon22.go b/cmd/state/commands/erigon22.go index 69e960b4c04..2bf001921c9 100644 --- a/cmd/state/commands/erigon22.go +++ b/cmd/state/commands/erigon22.go @@ -314,6 +314,7 @@ func processBlock22(trace bool, txNumStart uint64, rw *ReaderWrapper22, ww *Writ rules := chainConfig.Rules(block.NumberU64()) txNum := txNumStart ww.w.SetTxNum(txNum) + trace = block.NumberU64() == 1700059 for i, tx := range block.Transactions() { ibs := state.New(rw) @@ -328,8 +329,18 @@ func processBlock22(trace bool, txNumStart uint64, rw *ReaderWrapper22, ww *Writ if err != nil { return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } - if err = ct.AddToAggregator(ww.w); err != nil { - return 0, nil, fmt.Errorf("adding traces to aggregator: %w", err) + for from := range ct.froms { + if err := ww.w.AddTraceFrom(from[:]); err != nil { + return 0, nil, err + } + } + for to := range ct.tos { + if trace { + fmt.Printf("TraceTo [%x]\n", to[:]) + } + if err := ww.w.AddTraceTo(to[:]); err != nil { + return 0, nil, err + } } receipts = append(receipts, receipt) for _, log := range receipt.Logs { @@ -353,6 +364,14 @@ func processBlock22(trace bool, txNumStart uint64, rw *ReaderWrapper22, ww *Writ } ibs := state.New(rw) + if err := ww.w.AddTraceTo(block.Coinbase().Bytes()); err != nil { + return 0, nil, fmt.Errorf("adding coinbase trace: %w", err) + } + for _, uncle := range block.Uncles() { + if err := ww.w.AddTraceTo(uncle.Coinbase.Bytes()); err != nil { + return 0, nil, fmt.Errorf("adding uncle trace: %w", err) + } + } // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) if _, _, _, err := engine.FinalizeAndAssemble(chainConfig, header, ibs, block.Transactions(), block.Uncles(), receipts, nil, nil, nil, nil); err != nil { diff --git a/cmd/state/commands/history22.go b/cmd/state/commands/history22.go index beb55a55c7c..c9efe1ccf7c 100644 --- a/cmd/state/commands/history22.go +++ b/cmd/state/commands/history22.go @@ -19,7 +19,6 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/params" @@ -126,9 +125,9 @@ func History22(genesis *core.Genesis, logger log.Logger) error { txNum += uint64(len(b.Transactions())) + 2 // Pre and Post block transaction continue } - readWrapper := &HistoryWrapper22{r: h} + readWrapper := state.NewHistoryReader22(h) if traceBlock != 0 { - readWrapper.trace = blockNum == uint64(traceBlock) + readWrapper.SetTrace(blockNum == uint64(traceBlock)) } writeWrapper := state.NewNoopWriter() txNum++ // Pre block transaction @@ -153,7 +152,7 @@ func History22(genesis *core.Genesis, logger log.Logger) error { return nil } -func runHistory22(trace bool, blockNum, txNumStart uint64, hw *HistoryWrapper22, ww state.StateWriter, chainConfig *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, block *types.Block, vmConfig vm.Config) (uint64, types.Receipts, error) { +func runHistory22(trace bool, blockNum, txNumStart uint64, hw *state.HistoryReader22, ww state.StateWriter, chainConfig *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, block *types.Block, vmConfig vm.Config) (uint64, types.Receipts, error) { header := block.Header() vmConfig.TraceJumpDest = true engine := ethash.NewFullFaker() @@ -163,8 +162,7 @@ func runHistory22(trace bool, blockNum, txNumStart uint64, hw *HistoryWrapper22, daoBlock := chainConfig.DAOForkSupport && chainConfig.DAOForkBlock != nil && chainConfig.DAOForkBlock.Cmp(block.Number()) == 0 txNum := txNumStart for i, tx := range block.Transactions() { - hw.r.SetTxNum(txNum) - hw.txNum = txNum + hw.SetTxNum(txNum) ibs := state.New(hw) if daoBlock { misc.ApplyDAOHardFork(ibs) @@ -180,107 +178,8 @@ func runHistory22(trace bool, blockNum, txNumStart uint64, hw *HistoryWrapper22, } receipts = append(receipts, receipt) txNum++ - hw.r.SetTxNum(txNum) - hw.txNum = txNum + hw.SetTxNum(txNum) } return txNum, receipts, nil } - -// Implements StateReader and StateWriter -type HistoryWrapper22 struct { - r *libstate.Aggregator - txNum uint64 - trace bool -} - -func (hw *HistoryWrapper22) ReadAccountData(address common.Address) (*accounts.Account, error) { - enc, err := hw.r.ReadAccountDataBeforeTxNum(address.Bytes(), hw.txNum, nil /* roTx */) - if err != nil { - return nil, err - } - if len(enc) == 0 { - if hw.trace { - fmt.Printf("ReadAccountData [%x] => []\n", address) - } - return nil, nil - } - var a accounts.Account - a.Reset() - pos := 0 - nonceBytes := int(enc[pos]) - pos++ - if nonceBytes > 0 { - a.Nonce = bytesToUint64(enc[pos : pos+nonceBytes]) - pos += nonceBytes - } - balanceBytes := int(enc[pos]) - pos++ - if balanceBytes > 0 { - a.Balance.SetBytes(enc[pos : pos+balanceBytes]) - pos += balanceBytes - } - codeHashBytes := int(enc[pos]) - pos++ - if codeHashBytes > 0 { - copy(a.CodeHash[:], enc[pos:pos+codeHashBytes]) - pos += codeHashBytes - } - if pos >= len(enc) { - fmt.Printf("panic ReadAccountData(%x)=>[%x]\n", address, enc) - } - incBytes := int(enc[pos]) - pos++ - if incBytes > 0 { - a.Incarnation = bytesToUint64(enc[pos : pos+incBytes]) - } - if hw.trace { - fmt.Printf("ReadAccountData [%x] => [nonce: %d, balance: %d, codeHash: %x]\n", address, a.Nonce, &a.Balance, a.CodeHash) - } - return &a, nil -} - -func (hw *HistoryWrapper22) ReadAccountStorage(address common.Address, incarnation uint64, key *common.Hash) ([]byte, error) { - enc, err := hw.r.ReadAccountStorageBeforeTxNum(address.Bytes(), key.Bytes(), hw.txNum, nil /* roTx */) - if err != nil { - fmt.Printf("%v\n", err) - return nil, err - } - if hw.trace { - if enc == nil { - fmt.Printf("ReadAccountStorage [%x] [%x] => []\n", address, key.Bytes()) - } else { - fmt.Printf("ReadAccountStorage [%x] [%x] => [%x]\n", address, key.Bytes(), enc) - } - } - if enc == nil { - return nil, nil - } - return enc, nil -} - -func (hw *HistoryWrapper22) ReadAccountCode(address common.Address, incarnation uint64, codeHash common.Hash) ([]byte, error) { - enc, err := hw.r.ReadAccountCodeBeforeTxNum(address.Bytes(), hw.txNum, nil /* roTx */) - if err != nil { - return nil, err - } - if hw.trace { - fmt.Printf("ReadAccountCode [%x] => [%x]\n", address, enc) - } - return enc, nil -} - -func (hw *HistoryWrapper22) ReadAccountCodeSize(address common.Address, incarnation uint64, codeHash common.Hash) (int, error) { - size, err := hw.r.ReadAccountCodeSizeBeforeTxNum(address.Bytes(), hw.txNum, nil /* roTx */) - if err != nil { - return 0, err - } - if hw.trace { - fmt.Printf("ReadAccountCodeSize [%x] => [%d]\n", address, size) - } - return size, nil -} - -func (hw *HistoryWrapper22) ReadAccountIncarnation(address common.Address) (uint64, error) { - return 0, nil -} diff --git a/core/state/HistoryReader22.go b/core/state/HistoryReader22.go new file mode 100644 index 00000000000..e2d6ae826c8 --- /dev/null +++ b/core/state/HistoryReader22.go @@ -0,0 +1,129 @@ +package state + +import ( + "fmt" + + libstate "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/types/accounts" +) + +func bytesToUint64(buf []byte) (x uint64) { + for i, b := range buf { + x = x<<8 + uint64(b) + if i == 7 { + return + } + } + return +} + +// Implements StateReader and StateWriter +type HistoryReader22 struct { + a *libstate.Aggregator + txNum uint64 + trace bool +} + +func NewHistoryReader22(a *libstate.Aggregator) *HistoryReader22 { + return &HistoryReader22{a: a} +} + +func (hr *HistoryReader22) SetTxNum(txNum uint64) { + hr.txNum = txNum + hr.a.SetTxNum(txNum) +} + +func (hr *HistoryReader22) SetTrace(trace bool) { + hr.trace = trace +} + +func (hr *HistoryReader22) ReadAccountData(address common.Address) (*accounts.Account, error) { + enc, err := hr.a.ReadAccountDataBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) + if err != nil { + return nil, err + } + if len(enc) == 0 { + if hr.trace { + fmt.Printf("ReadAccountData [%x] => []\n", address) + } + return nil, nil + } + var a accounts.Account + a.Reset() + pos := 0 + nonceBytes := int(enc[pos]) + pos++ + if nonceBytes > 0 { + a.Nonce = bytesToUint64(enc[pos : pos+nonceBytes]) + pos += nonceBytes + } + balanceBytes := int(enc[pos]) + pos++ + if balanceBytes > 0 { + a.Balance.SetBytes(enc[pos : pos+balanceBytes]) + pos += balanceBytes + } + codeHashBytes := int(enc[pos]) + pos++ + if codeHashBytes > 0 { + copy(a.CodeHash[:], enc[pos:pos+codeHashBytes]) + pos += codeHashBytes + } + if pos >= len(enc) { + fmt.Printf("panic ReadAccountData(%x)=>[%x]\n", address, enc) + } + incBytes := int(enc[pos]) + pos++ + if incBytes > 0 { + a.Incarnation = bytesToUint64(enc[pos : pos+incBytes]) + } + if hr.trace { + fmt.Printf("ReadAccountData [%x] => [nonce: %d, balance: %d, codeHash: %x]\n", address, a.Nonce, &a.Balance, a.CodeHash) + } + return &a, nil +} + +func (hr *HistoryReader22) ReadAccountStorage(address common.Address, incarnation uint64, key *common.Hash) ([]byte, error) { + enc, err := hr.a.ReadAccountStorageBeforeTxNum(address.Bytes(), key.Bytes(), hr.txNum, nil /* roTx */) + if err != nil { + return nil, err + } + if hr.trace { + if enc == nil { + fmt.Printf("ReadAccountStorage [%x] [%x] => []\n", address, key.Bytes()) + } else { + fmt.Printf("ReadAccountStorage [%x] [%x] => [%x]\n", address, key.Bytes(), enc) + } + } + if enc == nil { + return nil, nil + } + return enc, nil +} + +func (hr *HistoryReader22) ReadAccountCode(address common.Address, incarnation uint64, codeHash common.Hash) ([]byte, error) { + enc, err := hr.a.ReadAccountCodeBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) + if err != nil { + return nil, err + } + if hr.trace { + fmt.Printf("ReadAccountCode [%x] => [%x]\n", address, enc) + } + return enc, nil +} + +func (hr *HistoryReader22) ReadAccountCodeSize(address common.Address, incarnation uint64, codeHash common.Hash) (int, error) { + size, err := hr.a.ReadAccountCodeSizeBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) + if err != nil { + return 0, err + } + if hr.trace { + fmt.Printf("ReadAccountCodeSize [%x] => [%d]\n", address, size) + } + return size, nil +} + +func (hr *HistoryReader22) ReadAccountIncarnation(address common.Address) (uint64, error) { + return 0, nil +} diff --git a/go.mod b/go.mod index c9da44aeca3..c2211ce5018 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220610055100-7ce8bd589f79 + github.com/ledgerwatch/erigon-lib v0.0.0-20220612091418-6cad65e62b27 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 @@ -53,7 +53,6 @@ require ( github.com/ugorji/go/codec/codecgen v1.1.13 github.com/urfave/cli v1.22.8 github.com/valyala/fastjson v1.6.3 - github.com/wcharczuk/go-chart/v2 v2.1.0 github.com/xsleonard/go-merkle v1.1.0 go.uber.org/atomic v1.9.0 golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122 @@ -101,7 +100,6 @@ require ( github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/go-stack/stack v1.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/uuid v1.3.0 // indirect github.com/huandu/xstrings v1.3.2 // indirect @@ -143,7 +141,6 @@ require ( github.com/valyala/fastrand v1.1.0 // indirect github.com/valyala/histogram v1.2.0 // indirect go.etcd.io/bbolt v1.3.6 // indirect - golang.org/x/image v0.0.0-20200927104501-e162460cd6b5 // indirect golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 // indirect golang.org/x/text v0.3.7 // indirect diff --git a/go.sum b/go.sum index 99f0c0641c3..ecd901f1cb4 100644 --- a/go.sum +++ b/go.sum @@ -239,8 +239,6 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.4.1 h1:pC5DB52sCeK48Wlb9oPcdhnjkz1TKt1D/P7WKJ0kUcQ= github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -384,8 +382,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220610055100-7ce8bd589f79 h1:c97OsvCaCuDBpCkjMX/+uMbeA1OxfLQuvUniZQ/bnSM= -github.com/ledgerwatch/erigon-lib v0.0.0-20220610055100-7ce8bd589f79/go.mod h1:jNDE6PRPIA8wUdikJs8BvKtrFv101qOijIXA3HnDW8E= +github.com/ledgerwatch/erigon-lib v0.0.0-20220612091418-6cad65e62b27 h1:Yc4RSLapsihfiCA3CnUsixJpmLQPnMHRH8gqgN+GrKs= +github.com/ledgerwatch/erigon-lib v0.0.0-20220612091418-6cad65e62b27/go.mod h1:jNDE6PRPIA8wUdikJs8BvKtrFv101qOijIXA3HnDW8E= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= @@ -627,8 +625,6 @@ github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G github.com/valyala/fastrand v1.1.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ= github.com/valyala/histogram v1.2.0 h1:wyYGAZZt3CpwUiIb9AU/Zbllg1llXyrtApRS815OLoQ= github.com/valyala/histogram v1.2.0/go.mod h1:Hb4kBwb4UxsaNbbbh+RRz8ZR6pdodR57tzWUS3BUzXY= -github.com/wcharczuk/go-chart/v2 v2.1.0 h1:tY2slqVQ6bN+yHSnDYwZebLQFkphK4WNrVwnt7CJZ2I= -github.com/wcharczuk/go-chart/v2 v2.1.0/go.mod h1:yx7MvAVNcP/kN9lKXM/NTce4au4DFN99j6i1OwDclNA= github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= @@ -673,8 +669,6 @@ golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20220428152302-39d4317da171 h1:TfdoLivD44QwvssI9Sv1xwa5DcL5XQr4au4sZ2F2NV4= golang.org/x/exp v0.0.0-20220428152302-39d4317da171/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= -golang.org/x/image v0.0.0-20200927104501-e162460cd6b5 h1:QelT11PB4FXiDEXucrfNckHoFxwt8USGY1ajP1ZF5lM= -golang.org/x/image v0.0.0-20200927104501-e162460cd6b5/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= diff --git a/turbo/services/interfaces.go b/turbo/services/interfaces.go index 8bbfacd2b86..8ed3056d902 100644 --- a/turbo/services/interfaces.go +++ b/turbo/services/interfaces.go @@ -35,7 +35,7 @@ type BodyReader interface { type TxnReader interface { TxnLookup(ctx context.Context, tx kv.Getter, txnHash common.Hash) (uint64, bool, error) - //TxnByIdxInBlock(ctx context.Context, tx kv.Getter, blockNum uint64, i int) (txn types.Transaction, err error) + TxnByIdxInBlock(ctx context.Context, tx kv.Getter, blockNum uint64, i int) (txn types.Transaction, err error) } type HeaderAndCanonicalReader interface { HeaderReader diff --git a/turbo/snapshotsync/block_snapshots.go b/turbo/snapshotsync/block_snapshots.go index 48b8d8217b9..373079f3b6a 100644 --- a/turbo/snapshotsync/block_snapshots.go +++ b/turbo/snapshotsync/block_snapshots.go @@ -109,6 +109,22 @@ func (sn *BodySegment) reopen(dir string) (err error) { return nil } +func (sn *BodySegment) Iterate(f func(blockNum, baseTxNum, txAmout uint64)) error { + var buf []byte + g := sn.seg.MakeGetter() + blockNum := sn.idxBodyNumber.BaseDataID() + var b types.BodyForStorage + for g.HasNext() { + buf, _ = g.Next(buf[:0]) + if err := rlp.DecodeBytes(buf, &b); err != nil { + return err + } + f(blockNum, b.BaseTxId, uint64(b.TxAmount)) + blockNum++ + } + return nil +} + func (sn *TxnSegment) close() { if sn.Seg != nil { sn.Seg.Close() diff --git a/turbo/transactions/call.go b/turbo/transactions/call.go index 9df067e7f54..c034ccb6d12 100644 --- a/turbo/transactions/call.go +++ b/turbo/transactions/call.go @@ -11,7 +11,6 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" @@ -19,6 +18,7 @@ import ( "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/log/v3" ) @@ -34,6 +34,7 @@ func DoCall( filters *rpchelper.Filters, stateCache kvcache.Cache, contractHasTEVM func(hash common.Hash) (bool, error), + headerReader services.HeaderReader, ) (*core.ExecutionResult, error) { // todo: Pending state is only known by the miner /* @@ -84,7 +85,7 @@ func DoCall( if err != nil { return nil, err } - blockCtx, txCtx := GetEvmContext(msg, header, blockNrOrHash.RequireCanonical, tx, contractHasTEVM) + blockCtx, txCtx := GetEvmContext(msg, header, blockNrOrHash.RequireCanonical, tx, contractHasTEVM, headerReader) evm := vm.NewEVM(blockCtx, txCtx, state, chainConfig, vm.Config{NoBaseFee: true}) @@ -108,7 +109,7 @@ func DoCall( return result, nil } -func GetEvmContext(msg core.Message, header *types.Header, requireCanonical bool, tx kv.Tx, contractHasTEVM func(address common.Hash) (bool, error)) (vm.BlockContext, vm.TxContext) { +func GetEvmContext(msg core.Message, header *types.Header, requireCanonical bool, tx kv.Tx, contractHasTEVM func(address common.Hash) (bool, error), headerReader services.HeaderReader) (vm.BlockContext, vm.TxContext) { var baseFee uint256.Int if header.Eip1559 { overflow := baseFee.SetFromBig(header.BaseFee) @@ -119,7 +120,7 @@ func GetEvmContext(msg core.Message, header *types.Header, requireCanonical bool return vm.BlockContext{ CanTransfer: core.CanTransfer, Transfer: core.Transfer, - GetHash: getHashGetter(requireCanonical, tx), + GetHash: getHashGetter(requireCanonical, tx, headerReader), ContractHasTEVM: contractHasTEVM, Coinbase: header.Coinbase, BlockNumber: header.Number.Uint64(), @@ -134,12 +135,13 @@ func GetEvmContext(msg core.Message, header *types.Header, requireCanonical bool } } -func getHashGetter(requireCanonical bool, tx kv.Tx) func(uint64) common.Hash { +func getHashGetter(requireCanonical bool, tx kv.Tx, headerReader services.HeaderReader) func(uint64) common.Hash { return func(n uint64) common.Hash { - hash, err := rawdb.ReadCanonicalHash(tx, n) + h, err := headerReader.HeaderByNumber(context.Background(), tx, n) if err != nil { - log.Debug("Can't get block hash by number", "number", n, "only-canonical", requireCanonical) + log.Error("Can't get block hash by number", "number", n, "only-canonical", requireCanonical) + return common.Hash{} } - return hash + return h.Hash() } } From eb497372aebcc60aabe6119db101523f02e216c9 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Mon, 13 Jun 2022 15:43:09 +0200 Subject: [PATCH 047/136] Interruptible PoS block building (#4438) * Fix a typo * BlockBuilder dummy * BlockProposerParametersPOS -> BlockBuilderParameters * Pass tx to MiningStep * BlockBuilderFunc * Interrupt in MiningExec Stage * Draft implementation of BlockBuilder * Fail back to empty header * Add a comment * cosmetic change * Cosmetic change again * It's not safe to pass transactions between goroutines --- cmd/integration/commands/stages.go | 2 +- cmd/rpcdaemon/rpcdaemontest/test_util.go | 2 +- ...roposer.go => block_builder_parameters.go} | 5 +- eth/backend.go | 12 +- eth/stagedsync/stage_headers.go | 2 +- eth/stagedsync/stage_mining_create_block.go | 44 ++--- eth/stagedsync/stage_mining_exec.go | 34 ++-- ethdb/privateapi/all.go | 4 +- ethdb/privateapi/ethbackend.go | 156 +++++------------- turbo/builder/block_builder.go | 56 +++++++ turbo/stages/mock_sentry.go | 2 +- 11 files changed, 139 insertions(+), 180 deletions(-) rename core/{block_proposer.go => block_builder_parameters.go} (50%) create mode 100644 turbo/builder/block_builder.go diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index 43e970a147b..89d0bf39076 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -1205,7 +1205,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) miningSync := stagedsync.New( stagedsync.MiningStages(ctx, stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, nil, nil, nil, tmpdir), - stagedsync.StageMiningExecCfg(db, miner, events, *chainConfig, engine, &vm.Config{}, tmpdir), + stagedsync.StageMiningExecCfg(db, miner, events, *chainConfig, engine, &vm.Config{}, tmpdir, nil), stagedsync.StageHashStateCfg(db, tmpdir), stagedsync.StageTrieCfg(db, false, true, tmpdir, br), stagedsync.StageMiningFinishCfg(db, *chainConfig, engine, miner, ctx.Done()), diff --git a/cmd/rpcdaemon/rpcdaemontest/test_util.go b/cmd/rpcdaemon/rpcdaemontest/test_util.go index 4df38906345..b8b3bac9a81 100644 --- a/cmd/rpcdaemon/rpcdaemontest/test_util.go +++ b/cmd/rpcdaemon/rpcdaemontest/test_util.go @@ -4,7 +4,6 @@ import ( "context" "crypto/ecdsa" "encoding/binary" - "github.com/ledgerwatch/erigon/consensus" "math/big" "net" "testing" @@ -18,6 +17,7 @@ import ( "github.com/ledgerwatch/erigon/accounts/abi/bind/backends" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/commands/contracts" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/types" diff --git a/core/block_proposer.go b/core/block_builder_parameters.go similarity index 50% rename from core/block_proposer.go rename to core/block_builder_parameters.go index b8f81b8e537..ee64c1578ce 100644 --- a/core/block_proposer.go +++ b/core/block_builder_parameters.go @@ -2,8 +2,9 @@ package core import "github.com/ledgerwatch/erigon/common" -// See https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#payloadattributesv1 -type BlockProposerParametersPOS struct { +// Parameters for PoS block building +// See also https://github.com/ethereum/execution-apis/blob/v1.0.0-alpha.9/src/engine/specification.md#payloadattributesv1 +type BlockBuilderParameters struct { ParentHash common.Hash Timestamp uint64 PrevRandao common.Hash diff --git a/eth/backend.go b/eth/backend.go index e604a1e3df2..b96e0a836d9 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -365,7 +365,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere mining := stagedsync.New( stagedsync.MiningStages(backend.sentryCtx, stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miner, *backend.chainConfig, backend.engine, backend.txPool2, backend.txPool2DB, nil, tmpdir), - stagedsync.StageMiningExecCfg(backend.chainDB, miner, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir), + stagedsync.StageMiningExecCfg(backend.chainDB, miner, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, nil), stagedsync.StageHashStateCfg(backend.chainDB, tmpdir), stagedsync.StageTrieCfg(backend.chainDB, false, true, tmpdir, blockReader), stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miner, backend.miningSealingQuit), @@ -375,14 +375,15 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere if casted, ok := backend.engine.(*ethash.Ethash); ok { ethashApi = casted.APIs(nil)[1].Service.(*ethash.API) } + // proof-of-stake mining - assembleBlockPOS := func(param *core.BlockProposerParametersPOS) (*types.Block, error) { + assembleBlockPOS := func(param *core.BlockBuilderParameters, interrupt *int32) (*types.Block, error) { miningStatePos := stagedsync.NewProposingState(&config.Miner) miningStatePos.MiningConfig.Etherbase = param.SuggestedFeeRecipient proposingSync := stagedsync.New( stagedsync.MiningStages(backend.sentryCtx, stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miningStatePos, *backend.chainConfig, backend.engine, backend.txPool2, backend.txPool2DB, param, tmpdir), - stagedsync.StageMiningExecCfg(backend.chainDB, miningStatePos, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir), + stagedsync.StageMiningExecCfg(backend.chainDB, miningStatePos, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, interrupt), stagedsync.StageHashStateCfg(backend.chainDB, tmpdir), stagedsync.StageTrieCfg(backend.chainDB, false, true, tmpdir, blockReader), stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miningStatePos, backend.miningSealingQuit), @@ -400,10 +401,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere blockReader, chainConfig, backend.sentriesClient.Hd.BeaconRequestList, backend.sentriesClient.Hd.PayloadStatusCh, assembleBlockPOS, config.Miner.EnabledPOS) miningRPC = privateapi.NewMiningServer(ctx, backend, ethashApi) - // If we enabled the proposer flag we initiates the block proposing thread - if config.Miner.EnabledPOS && chainConfig.TerminalTotalDifficulty != nil { - ethBackendRPC.StartProposer() - } + if stack.Config().PrivateApiAddr != "" { var creds credentials.TransportCredentials if stack.Config().TLSConnection { diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 3ef91ed5894..284fd14a03a 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -574,7 +574,7 @@ func verifyAndSaveNewPoSHeader( } } else { // Side chain or something weird - // TODO(yperbasis): considered non-canonical because some missing headers were donloaded but not canonized + // TODO(yperbasis): considered non-canonical because some missing headers were downloaded but not canonized // Or it's not a problem because forkChoice is updated frequently? if requestStatus == engineapi.New { cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{Status: remote.EngineStatus_ACCEPTED} diff --git a/eth/stagedsync/stage_mining_create_block.go b/eth/stagedsync/stage_mining_create_block.go index 9bb1bea5170..a5d1dde905c 100644 --- a/eth/stagedsync/stage_mining_create_block.go +++ b/eth/stagedsync/stage_mining_create_block.go @@ -65,26 +65,26 @@ func NewProposingState(cfg *params.MiningConfig) MiningState { } type MiningCreateBlockCfg struct { - db kv.RwDB - miner MiningState - chainConfig params.ChainConfig - engine consensus.Engine - txPool2 *txpool.TxPool - txPool2DB kv.RoDB - tmpdir string - blockProposerParameters *core.BlockProposerParametersPOS + db kv.RwDB + miner MiningState + chainConfig params.ChainConfig + engine consensus.Engine + txPool2 *txpool.TxPool + txPool2DB kv.RoDB + tmpdir string + blockBuilderParameters *core.BlockBuilderParameters } -func StageMiningCreateBlockCfg(db kv.RwDB, miner MiningState, chainConfig params.ChainConfig, engine consensus.Engine, txPool2 *txpool.TxPool, txPool2DB kv.RoDB, blockProposerParameters *core.BlockProposerParametersPOS, tmpdir string) MiningCreateBlockCfg { +func StageMiningCreateBlockCfg(db kv.RwDB, miner MiningState, chainConfig params.ChainConfig, engine consensus.Engine, txPool2 *txpool.TxPool, txPool2DB kv.RoDB, blockBuilderParameters *core.BlockBuilderParameters, tmpdir string) MiningCreateBlockCfg { return MiningCreateBlockCfg{ - db: db, - miner: miner, - chainConfig: chainConfig, - engine: engine, - txPool2: txPool2, - txPool2DB: txPool2DB, - tmpdir: tmpdir, - blockProposerParameters: blockProposerParameters, + db: db, + miner: miner, + chainConfig: chainConfig, + engine: engine, + txPool2: txPool2, + txPool2DB: txPool2DB, + tmpdir: tmpdir, + blockBuilderParameters: blockBuilderParameters, } } @@ -111,8 +111,8 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc return fmt.Errorf("empty block %d", executionAt) } - if cfg.blockProposerParameters != nil && cfg.blockProposerParameters.ParentHash != parent.Hash() { - return fmt.Errorf("wrong head block: %x (current) vs %x (requested)", parent.Hash(), cfg.blockProposerParameters.ParentHash) + if cfg.blockBuilderParameters != nil && cfg.blockBuilderParameters.ParentHash != parent.Hash() { + return fmt.Errorf("wrong head block: %x (current) vs %x (requested)", parent.Hash(), cfg.blockBuilderParameters.ParentHash) } isTrans, err := rawdb.Transitioned(tx, executionAt, cfg.chainConfig.TerminalTotalDifficulty) @@ -125,7 +125,7 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc return fmt.Errorf("refusing to mine without etherbase") } // If we do not have an etherbase, let's use the suggested one - coinbase = cfg.blockProposerParameters.SuggestedFeeRecipient + coinbase = cfg.blockBuilderParameters.SuggestedFeeRecipient } blockNum := executionAt + 1 @@ -206,7 +206,7 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc } } else { // If we are on proof-of-stake timestamp should be already set for us - timestamp = cfg.blockProposerParameters.Timestamp + timestamp = cfg.blockBuilderParameters.Timestamp } header := core.MakeEmptyHeader(parent, &cfg.chainConfig, timestamp, &cfg.miner.MiningConfig.GasLimit) @@ -231,7 +231,7 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc } if isTrans { - header.MixDigest = cfg.blockProposerParameters.PrevRandao + header.MixDigest = cfg.blockBuilderParameters.PrevRandao current.Header = header current.Uncles = nil diff --git a/eth/stagedsync/stage_mining_exec.go b/eth/stagedsync/stage_mining_exec.go index 96e1a4e610c..3afa9922767 100644 --- a/eth/stagedsync/stage_mining_exec.go +++ b/eth/stagedsync/stage_mining_exec.go @@ -2,6 +2,7 @@ package stagedsync import ( "fmt" + "sync/atomic" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" @@ -32,6 +33,7 @@ type MiningExecCfg struct { blockReader services.FullBlockReader vmConfig *vm.Config tmpdir string + interrupt *int32 } func StageMiningExecCfg( @@ -42,6 +44,7 @@ func StageMiningExecCfg( engine consensus.Engine, vmConfig *vm.Config, tmpdir string, + interrupt *int32, ) MiningExecCfg { return MiningExecCfg{ db: db, @@ -52,6 +55,7 @@ func StageMiningExecCfg( blockReader: snapshotsync.NewBlockReader(), vmConfig: vmConfig, tmpdir: tmpdir, + interrupt: interrupt, } } @@ -89,7 +93,7 @@ func SpawnMiningExecStage(s *StageState, tx kv.RwTx, cfg MiningExecCfg, quit <-c // empty block is necessary to keep the liveness of the network. if noempty { if !localTxs.Empty() { - logs, err := addTransactionsToMiningBlock(logPrefix, current, cfg.chainConfig, cfg.vmConfig, getHeader, contractHasTEVM, cfg.engine, localTxs, cfg.miningState.MiningConfig.Etherbase, ibs, quit) + logs, err := addTransactionsToMiningBlock(logPrefix, current, cfg.chainConfig, cfg.vmConfig, getHeader, contractHasTEVM, cfg.engine, localTxs, cfg.miningState.MiningConfig.Etherbase, ibs, quit, cfg.interrupt) if err != nil { return err } @@ -101,7 +105,7 @@ func SpawnMiningExecStage(s *StageState, tx kv.RwTx, cfg MiningExecCfg, quit <-c //} } if !remoteTxs.Empty() { - logs, err := addTransactionsToMiningBlock(logPrefix, current, cfg.chainConfig, cfg.vmConfig, getHeader, contractHasTEVM, cfg.engine, remoteTxs, cfg.miningState.MiningConfig.Etherbase, ibs, quit) + logs, err := addTransactionsToMiningBlock(logPrefix, current, cfg.chainConfig, cfg.vmConfig, getHeader, contractHasTEVM, cfg.engine, remoteTxs, cfg.miningState.MiningConfig.Etherbase, ibs, quit, cfg.interrupt) if err != nil { return err } @@ -165,7 +169,7 @@ func SpawnMiningExecStage(s *StageState, tx kv.RwTx, cfg MiningExecCfg, quit <-c return nil } -func addTransactionsToMiningBlock(logPrefix string, current *MiningBlock, chainConfig params.ChainConfig, vmConfig *vm.Config, getHeader func(hash common.Hash, number uint64) *types.Header, contractHasTEVM func(common.Hash) (bool, error), engine consensus.Engine, txs types.TransactionsStream, coinbase common.Address, ibs *state.IntraBlockState, quit <-chan struct{}) (types.Logs, error) { +func addTransactionsToMiningBlock(logPrefix string, current *MiningBlock, chainConfig params.ChainConfig, vmConfig *vm.Config, getHeader func(hash common.Hash, number uint64) *types.Header, contractHasTEVM func(common.Hash) (bool, error), engine consensus.Engine, txs types.TransactionsStream, coinbase common.Address, ibs *state.IntraBlockState, quit <-chan struct{}, interrupt *int32) (types.Logs, error) { header := current.Header tcount := 0 gasPool := new(core.GasPool).AddGas(current.Header.GasLimit) @@ -197,26 +201,10 @@ func addTransactionsToMiningBlock(logPrefix string, current *MiningBlock, chainC return nil, err } - // In the following three cases, we will interrupt the execution of the transaction. - // (1) new head block event arrival, the interrupt signal is 1 - // (2) worker start or restart, the interrupt signal is 1 - // (3) worker recreate the mining block with any newly arrived transactions, the interrupt signal is 2. - // For the first two cases, the semi-finished work will be discarded. - // For the third case, the semi-finished work will be submitted to the consensus engine. - //if interrupt != nil && atomic.LoadInt32(interrupt) != commitInterruptNone { - // // Notify resubmit loop to increase resubmitting interval due to too frequent commits. - // if atomic.LoadInt32(interrupt) == commitInterruptResubmit { - // ratio := float64(header.GasLimit-w.env.gasPool.Gas()) / float64(header.GasLimit) - // if ratio < 0.1 { - // ratio = 0.1 - // } - // w.resubmitAdjustCh <- &intervalAdjust{ - // ratio: ratio, - // inc: true, - // } - // } - // return atomic.LoadInt32(interrupt) == commitInterruptNewHead - //} + if interrupt != nil && atomic.LoadInt32(interrupt) != 0 { + log.Debug("Transaction adding was interrupted") + break + } // If we don't have enough gas for any further transactions then we're done if gasPool.Gas() < params.TxGas { log.Debug(fmt.Sprintf("[%s] Not enough gas for further transactions", logPrefix), "have", gasPool, "want", params.TxGas) diff --git a/ethdb/privateapi/all.go b/ethdb/privateapi/all.go index f3720c508be..d374074732f 100644 --- a/ethdb/privateapi/all.go +++ b/ethdb/privateapi/all.go @@ -5,10 +5,9 @@ import ( "net" "github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil" - "github.com/ledgerwatch/erigon-lib/kv/remotedbserver" - //grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" txpool_proto "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon-lib/kv/remotedbserver" "github.com/ledgerwatch/log/v3" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -43,7 +42,6 @@ func StartGrpc(kv *remotedbserver.KvServer, ethBackendSrv *EthBackendServer, txP if healthCheck { defer healthServer.Shutdown() } - defer ethBackendSrv.StopProposer() if err := grpcServer.Serve(lis); err != nil { log.Error("private RPC server fail", "err", err) } diff --git a/ethdb/privateapi/ethbackend.go b/ethdb/privateapi/ethbackend.go index 47128009e9f..1adcf3eb524 100644 --- a/ethdb/privateapi/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -21,6 +21,7 @@ import ( "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/builder" "github.com/ledgerwatch/erigon/turbo/engineapi" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/log/v3" @@ -28,8 +29,6 @@ import ( "google.golang.org/protobuf/types/known/emptypb" ) -type assemblePayloadPOSFunc func(param *core.BlockProposerParametersPOS) (*types.Block, error) - // EthBackendAPIVersion // 2.0.0 - move all mining-related methods to 'txpool/mining' server // 2.1.0 - add NetPeerCount function @@ -38,7 +37,7 @@ type assemblePayloadPOSFunc func(param *core.BlockProposerParametersPOS) (*types // 3.1.0 - add Subscribe to logs var EthBackendAPIVersion = &types2.VersionReply{Major: 3, Minor: 1, Patch: 0} -const MaxPendingPayloads = 128 +const MaxBuilders = 128 var UnknownPayloadErr = rpc.CustomError{Code: -38001, Message: "Unknown payload"} var InvalidForkchoiceStateErr = rpc.CustomError{Code: -38002, Message: "Invalid forkchoice state"} @@ -54,17 +53,16 @@ type EthBackendServer struct { blockReader services.BlockAndTxnReader config *params.ChainConfig // Block proposing for proof-of-stake - payloadId uint64 - pendingPayloads map[uint64]*pendingPayload + payloadId uint64 + builders map[uint64]*builder.BlockBuilder // Send Beacon Chain requests to staged sync requestList *engineapi.RequestList // Replies to newPayload & forkchoice requests - statusCh <-chan PayloadStatus - assemblePayloadPOS assemblePayloadPOSFunc - proposing bool - syncCond *sync.Cond // Engine API is asynchronous, we want to avoid CL to call different APIs at the same time - shutdown bool - logsFilter *LogsFilterAggregator + statusCh <-chan PayloadStatus + builderFunc builder.BlockBuilderFunc + proposing bool + lock sync.Mutex // Engine API is asynchronous, we want to avoid CL to call different APIs at the same time + logsFilter *LogsFilterAggregator } type EthBackend interface { @@ -85,19 +83,13 @@ type PayloadStatus struct { CriticalError error } -type pendingPayload struct { - block *types.Block - built bool -} - func NewEthBackendServer(ctx context.Context, eth EthBackend, db kv.RwDB, events *Events, blockReader services.BlockAndTxnReader, config *params.ChainConfig, requestList *engineapi.RequestList, statusCh <-chan PayloadStatus, - assemblePayloadPOS assemblePayloadPOSFunc, proposing bool, + builderFunc builder.BlockBuilderFunc, proposing bool, ) *EthBackendServer { s := &EthBackendServer{ctx: ctx, eth: eth, events: events, db: db, blockReader: blockReader, config: config, - requestList: requestList, statusCh: statusCh, pendingPayloads: make(map[uint64]*pendingPayload), - assemblePayloadPOS: assemblePayloadPOS, proposing: proposing, syncCond: sync.NewCond(&sync.Mutex{}), - logsFilter: NewLogsFilterAggregator(events), + requestList: requestList, statusCh: statusCh, builders: make(map[uint64]*builder.BlockBuilder), + builderFunc: builderFunc, proposing: proposing, logsFilter: NewLogsFilterAggregator(events), } ch, clean := s.events.AddLogsSubscription() @@ -283,8 +275,8 @@ func (s *EthBackendServer) stageLoopIsBusy() bool { // EngineNewPayloadV1 validates and possibly executes payload func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.ExecutionPayload) (*remote.EnginePayloadStatus, error) { log.Trace("[NewPayload] acquiring lock") - s.syncCond.L.Lock() - defer s.syncCond.L.Unlock() + s.lock.Lock() + defer s.lock.Unlock() log.Trace("[NewPayload] lock acquired") if s.config.TerminalTotalDifficulty == nil { @@ -380,23 +372,18 @@ func (s *EthBackendServer) EngineGetPayloadV1(ctx context.Context, req *remote.E return nil, fmt.Errorf("not a proof-of-stake chain") } - // TODO(yperbasis): getPayload should stop block assembly if that's currently in fly log.Trace("[GetPayload] acquiring lock") - s.syncCond.L.Lock() - defer s.syncCond.L.Unlock() + s.lock.Lock() + defer s.lock.Unlock() log.Trace("[GetPayload] lock acquired") - payload, ok := s.pendingPayloads[req.PayloadId] + builder, ok := s.builders[req.PayloadId] if !ok { log.Warn("Payload not stored", "payloadId", req.PayloadId) return nil, &UnknownPayloadErr } - // getPayload should stop the build process - // https://github.com/ethereum/execution-apis/blob/v1.0.0-alpha.7/src/engine/specification.md#payload-building - payload.built = true - - block := payload.block + block := builder.Stop() var baseFeeReply *types2.H256 if block.Header().BaseFee != nil { @@ -432,8 +419,8 @@ func (s *EthBackendServer) EngineGetPayloadV1(ctx context.Context, req *remote.E // EngineForkChoiceUpdatedV1 either states new block head or request the assembling of a new block func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *remote.EngineForkChoiceUpdatedRequest) (*remote.EngineForkChoiceUpdatedReply, error) { log.Trace("[ForkChoiceUpdated] acquiring lock") - s.syncCond.L.Lock() - defer s.syncCond.L.Unlock() + s.lock.Lock() + defer s.lock.Unlock() log.Trace("[ForkChoiceUpdated] lock acquired") if s.config.TerminalTotalDifficulty == nil { @@ -462,9 +449,9 @@ func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *r } // TODO(yperbasis): Client software MAY skip an update of the forkchoice state and - // MUST NOT begin a payload build process if forkchoiceState.headBlockHash doesn't reference a leaf of the block tree - // (i.e. it references an old block). - // https://github.com/ethereum/execution-apis/blob/v1.0.0-alpha.6/src/engine/specification.md#specification-1 + // MUST NOT begin a payload build process if forkchoiceState.headBlockHash doesn't reference a leaf of the block tree. + // That is, the block referenced by forkchoiceState.headBlockHash is neither the head of the canonical chain nor a block at the tip of any other chain. + // https://github.com/ethereum/execution-apis/blob/v1.0.0-alpha.9/src/engine/specification.md#specification-1 tx1.Rollback() if s.stageLoopIsBusy() { @@ -493,7 +480,7 @@ func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *r return nil, fmt.Errorf("execution layer not running as a proposer. enable proposer by taking out the --proposer.disable flag on startup") } - s.evictOldPendingPayloads() + s.evictOldBuilders() // payload IDs start from 1 (0 signifies null) s.payloadId++ @@ -519,12 +506,15 @@ func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *r emptyHeader.Coinbase = gointerfaces.ConvertH160toAddress(req.PayloadAttributes.SuggestedFeeRecipient) emptyHeader.MixDigest = gointerfaces.ConvertH256ToHash(req.PayloadAttributes.PrevRandao) - s.pendingPayloads[s.payloadId] = &pendingPayload{block: types.NewBlock(emptyHeader, nil, nil, nil)} + param := core.BlockBuilderParameters{ + ParentHash: forkChoice.HeadBlockHash, + Timestamp: req.PayloadAttributes.Timestamp, + PrevRandao: emptyHeader.MixDigest, + SuggestedFeeRecipient: emptyHeader.Coinbase, + } - log.Trace("[ForkChoiceUpdated] unpause assemble process") - s.syncCond.Broadcast() + s.builders[s.payloadId] = builder.NewBlockBuilder(s.builderFunc, ¶m, emptyHeader) - // successfully assembled the payload and assigned the correct id return &remote.EngineForkChoiceUpdatedReply{ PayloadStatus: &remote.EnginePayloadStatus{ Status: remote.EngineStatus_VALID, @@ -534,92 +524,20 @@ func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *r }, nil } -func (s *EthBackendServer) evictOldPendingPayloads() { +func (s *EthBackendServer) evictOldBuilders() { // sort payload IDs in ascending order - ids := make([]uint64, 0, len(s.pendingPayloads)) - for id := range s.pendingPayloads { + ids := make([]uint64, 0, len(s.builders)) + for id := range s.builders { ids = append(ids, id) } slices.Sort(ids) - // remove old payloads so that at most MaxPendingPayloads - 1 remain - for i := 0; i <= len(s.pendingPayloads)-MaxPendingPayloads; i++ { - delete(s.pendingPayloads, ids[i]) + // remove old builders so that at most MaxBuilders - 1 remain + for i := 0; i <= len(s.builders)-MaxBuilders; i++ { + delete(s.builders, ids[i]) } } -func (s *EthBackendServer) StartProposer() { - go func() { - log.Trace("[Proposer] acquiring lock") - s.syncCond.L.Lock() - defer s.syncCond.L.Unlock() - log.Trace("[Proposer] lock acquired") - - for { - var blockToBuild *types.Block - var payloadId uint64 - - FindPayloadToBuild: - for { - if s.shutdown { - return - } - - tx, err := s.db.BeginRo(s.ctx) - if err != nil { - log.Error("Error while opening txn in block proposer", "err", err.Error()) - return - } - headHash := rawdb.ReadHeadBlockHash(tx) - tx.Rollback() - - for id, payload := range s.pendingPayloads { - if !payload.built && payload.block.ParentHash() == headHash { - blockToBuild = payload.block - payloadId = id - break FindPayloadToBuild - } - } - - log.Trace("[Proposer] Wait until we have to process new payloads") - s.syncCond.Wait() - log.Trace("[Proposer] Wait finished") - } - - param := core.BlockProposerParametersPOS{ - ParentHash: blockToBuild.ParentHash(), - Timestamp: blockToBuild.Header().Time, - PrevRandao: blockToBuild.MixDigest(), - SuggestedFeeRecipient: blockToBuild.Header().Coinbase, - } - - log.Trace("[Proposer] starting assembling...") - block, err := s.assemblePayloadPOS(¶m) - log.Trace("[Proposer] payload assembled") - - if err != nil { - log.Warn("Error during block assembling", "err", err.Error()) - } else { - payload, ok := s.pendingPayloads[payloadId] - if ok && !payload.built { // don't update after engine_getPayload was called - payload.block = block - payload.built = true - } - } - } - }() -} - -func (s *EthBackendServer) StopProposer() { - log.Trace("[StopProposer] acquiring lock") - s.syncCond.L.Lock() - defer s.syncCond.L.Unlock() - log.Trace("[StopProposer] lock acquired") - - s.shutdown = true - s.syncCond.Broadcast() -} - func (s *EthBackendServer) NodeInfo(_ context.Context, r *remote.NodesInfoRequest) (*remote.NodesInfoReply, error) { nodesInfo, err := s.eth.NodesInfo(int(r.Limit)) if err != nil { diff --git a/turbo/builder/block_builder.go b/turbo/builder/block_builder.go new file mode 100644 index 00000000000..5fc57554171 --- /dev/null +++ b/turbo/builder/block_builder.go @@ -0,0 +1,56 @@ +package builder + +import ( + "sync" + "sync/atomic" + + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/log/v3" +) + +type BlockBuilderFunc func(param *core.BlockBuilderParameters, interrupt *int32) (*types.Block, error) + +// BlockBuilder wraps a goroutine that builds Proof-of-Stake payloads (PoS "mining") +type BlockBuilder struct { + emptyHeader *types.Header + interrupt int32 + syncCond *sync.Cond + block *types.Block + err error +} + +func NewBlockBuilder(build BlockBuilderFunc, param *core.BlockBuilderParameters, emptyHeader *types.Header) *BlockBuilder { + b := new(BlockBuilder) + b.emptyHeader = emptyHeader + b.syncCond = sync.NewCond(new(sync.Mutex)) + + go func() { + block, err := build(param, &b.interrupt) + + b.syncCond.L.Lock() + defer b.syncCond.L.Unlock() + b.block = block + b.err = err + b.syncCond.Broadcast() + }() + + return b +} + +func (b *BlockBuilder) Stop() *types.Block { + atomic.StoreInt32(&b.interrupt, 1) + + b.syncCond.L.Lock() + defer b.syncCond.L.Unlock() + for b.block == nil && b.err == nil { + b.syncCond.Wait() + } + + if b.err != nil { + log.Error("BlockBuilder", "err", b.err) + return types.NewBlock(b.emptyHeader, nil, nil, nil) + } + + return b.block +} diff --git a/turbo/stages/mock_sentry.go b/turbo/stages/mock_sentry.go index 9e7978647d7..2de0eea076b 100644 --- a/turbo/stages/mock_sentry.go +++ b/turbo/stages/mock_sentry.go @@ -365,7 +365,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey mock.MiningSync = stagedsync.New( stagedsync.MiningStages(mock.Ctx, stagedsync.StageMiningCreateBlockCfg(mock.DB, miner, *mock.ChainConfig, mock.Engine, mock.TxPool, nil, nil, mock.tmpdir), - stagedsync.StageMiningExecCfg(mock.DB, miner, nil, *mock.ChainConfig, mock.Engine, &vm.Config{}, mock.tmpdir), + stagedsync.StageMiningExecCfg(mock.DB, miner, nil, *mock.ChainConfig, mock.Engine, &vm.Config{}, mock.tmpdir, nil), stagedsync.StageHashStateCfg(mock.DB, mock.tmpdir), stagedsync.StageTrieCfg(mock.DB, false, true, mock.tmpdir, blockReader), stagedsync.StageMiningFinishCfg(mock.DB, *mock.ChainConfig, mock.Engine, miner, mock.Ctx.Done()), From 1f36d76e0900a5782bb75a7e7a87f795b0bd4b2a Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Mon, 13 Jun 2022 18:20:18 +0200 Subject: [PATCH 048/136] Added Flush function to memory mutation (#4439) --- .../{miningmutation.go => memorymutation.go} | 136 ++++++++++++------ ...utation_test.go => memorymutation_test.go} | 27 +++- ...ationcursor.go => memorymutationcursor.go} | 58 ++++---- turbo/stages/stageloop.go | 2 +- 4 files changed, 145 insertions(+), 78 deletions(-) rename ethdb/olddb/{miningmutation.go => memorymutation.go} (63%) rename ethdb/olddb/{miningmutation_test.go => memorymutation_test.go} (80%) rename ethdb/olddb/{miningmutationcursor.go => memorymutationcursor.go} (83%) diff --git a/ethdb/olddb/miningmutation.go b/ethdb/olddb/memorymutation.go similarity index 63% rename from ethdb/olddb/miningmutation.go rename to ethdb/olddb/memorymutation.go index 22f7cc77976..6636b9e9b5a 100644 --- a/ethdb/olddb/miningmutation.go +++ b/ethdb/olddb/memorymutation.go @@ -24,7 +24,7 @@ import ( "github.com/ledgerwatch/erigon/ethdb" ) -type miningmutation struct { +type memorymutation struct { // Bucket => Key => Value memTx kv.RwTx memDb kv.RwDB @@ -42,13 +42,13 @@ type miningmutation struct { // defer batch.Rollback() // ... some calculations on `batch` // batch.Commit() -func NewMiningBatch(tx kv.Tx) *miningmutation { +func NewMemoryBatch(tx kv.Tx) *memorymutation { tmpDB := mdbx.NewMDBX(log.New()).InMem().MustOpen() memTx, err := tmpDB.BeginRw(context.Background()) if err != nil { panic(err) } - return &miningmutation{ + return &memorymutation{ db: tx, memDb: tmpDB, memTx: memTx, @@ -62,19 +62,19 @@ func NewMiningBatch(tx kv.Tx) *miningmutation { } } -func (m *miningmutation) RwKV() kv.RwDB { +func (m *memorymutation) RwKV() kv.RwDB { if casted, ok := m.db.(ethdb.HasRwKV); ok { return casted.RwKV() } return nil } -func (m *miningmutation) isTableCleared(table string) bool { +func (m *memorymutation) isTableCleared(table string) bool { _, ok := m.clearedTables[table] return ok } -func (m *miningmutation) isEntryDeleted(table string, key []byte) bool { +func (m *memorymutation) isEntryDeleted(table string, key []byte) bool { _, ok := m.deletedEntries[table] if !ok { return ok @@ -84,7 +84,7 @@ func (m *miningmutation) isEntryDeleted(table string, key []byte) bool { } // getMem Retrieve database entry from memory (hashed storage will be left out for now because it is the only non auto-DupSorted table) -func (m *miningmutation) getMem(table string, key []byte) ([]byte, bool) { +func (m *memorymutation) getMem(table string, key []byte) ([]byte, bool) { val, err := m.memTx.GetOne(table, key) if err != nil { panic(err) @@ -92,10 +92,10 @@ func (m *miningmutation) getMem(table string, key []byte) ([]byte, bool) { return val, val != nil } -func (m *miningmutation) DBSize() (uint64, error) { panic("not implemented") } -func (m *miningmutation) PageSize() uint64 { panic("not implemented") } +func (m *memorymutation) DBSize() (uint64, error) { panic("not implemented") } +func (m *memorymutation) PageSize() uint64 { panic("not implemented") } -func (m *miningmutation) IncrementSequence(bucket string, amount uint64) (res uint64, err error) { +func (m *memorymutation) IncrementSequence(bucket string, amount uint64) (res uint64, err error) { v, ok := m.getMem(kv.Sequence, []byte(bucket)) if !ok && m.db != nil { v, err = m.db.GetOne(kv.Sequence, []byte(bucket)) @@ -118,7 +118,7 @@ func (m *miningmutation) IncrementSequence(bucket string, amount uint64) (res ui return currentV, nil } -func (m *miningmutation) ReadSequence(bucket string) (res uint64, err error) { +func (m *memorymutation) ReadSequence(bucket string) (res uint64, err error) { v, ok := m.getMem(kv.Sequence, []byte(bucket)) if !ok && m.db != nil { v, err = m.db.GetOne(kv.Sequence, []byte(bucket)) @@ -135,7 +135,7 @@ func (m *miningmutation) ReadSequence(bucket string) (res uint64, err error) { } // Can only be called from the worker thread -func (m *miningmutation) GetOne(table string, key []byte) ([]byte, error) { +func (m *memorymutation) GetOne(table string, key []byte) ([]byte, error) { if value, ok := m.getMem(table, key); ok { if value == nil { return nil, nil @@ -154,7 +154,7 @@ func (m *miningmutation) GetOne(table string, key []byte) ([]byte, error) { } // Can only be called from the worker thread -func (m *miningmutation) Get(table string, key []byte) ([]byte, error) { +func (m *memorymutation) Get(table string, key []byte) ([]byte, error) { value, err := m.GetOne(table, key) if err != nil { return nil, err @@ -167,12 +167,12 @@ func (m *miningmutation) Get(table string, key []byte) ([]byte, error) { return value, nil } -func (m *miningmutation) Last(table string) ([]byte, []byte, error) { - panic("not implemented. (miningmutation.Last)") +func (m *memorymutation) Last(table string) ([]byte, []byte, error) { + panic("not implemented. (memorymutation.Last)") } // Has return whether a key is present in a certain table. -func (m *miningmutation) Has(table string, key []byte) (bool, error) { +func (m *memorymutation) Has(table string, key []byte) (bool, error) { if _, ok := m.getMem(table, key); ok { return ok, nil } @@ -183,38 +183,38 @@ func (m *miningmutation) Has(table string, key []byte) (bool, error) { } // Put insert a new entry in the database, if it is hashed storage it will add it to a slice instead of a map. -func (m *miningmutation) Put(table string, key []byte, value []byte) error { +func (m *memorymutation) Put(table string, key []byte, value []byte) error { return m.memTx.Put(table, key, value) } -func (m *miningmutation) Append(table string, key []byte, value []byte) error { +func (m *memorymutation) Append(table string, key []byte, value []byte) error { return m.Put(table, key, value) } -func (m *miningmutation) AppendDup(table string, key []byte, value []byte) error { +func (m *memorymutation) AppendDup(table string, key []byte, value []byte) error { return m.Put(table, key, value) } -func (m *miningmutation) BatchSize() int { +func (m *memorymutation) BatchSize() int { return 0 } -func (m *miningmutation) ForEach(bucket string, fromPrefix []byte, walker func(k, v []byte) error) error { +func (m *memorymutation) ForEach(bucket string, fromPrefix []byte, walker func(k, v []byte) error) error { m.panicOnEmptyDB() return m.db.ForEach(bucket, fromPrefix, walker) } -func (m *miningmutation) ForPrefix(bucket string, prefix []byte, walker func(k, v []byte) error) error { +func (m *memorymutation) ForPrefix(bucket string, prefix []byte, walker func(k, v []byte) error) error { m.panicOnEmptyDB() return m.db.ForPrefix(bucket, prefix, walker) } -func (m *miningmutation) ForAmount(bucket string, prefix []byte, amount uint32, walker func(k, v []byte) error) error { +func (m *memorymutation) ForAmount(bucket string, prefix []byte, amount uint32, walker func(k, v []byte) error) error { m.panicOnEmptyDB() return m.db.ForAmount(bucket, prefix, amount, walker) } -func (m *miningmutation) Delete(table string, k, v []byte) error { +func (m *memorymutation) Delete(table string, k, v []byte) error { if _, ok := m.deletedEntries[table]; !ok { m.deletedEntries[table] = make(map[string]struct{}) } @@ -222,70 +222,116 @@ func (m *miningmutation) Delete(table string, k, v []byte) error { return m.memTx.Delete(table, k, v) } -func (m *miningmutation) Commit() error { +func (m *memorymutation) Commit() error { return nil } -func (m *miningmutation) Rollback() { +func (m *memorymutation) Rollback() { m.memTx.Rollback() m.memDb.Close() return } -func (m *miningmutation) Close() { +func (m *memorymutation) Close() { m.Rollback() } -func (m *miningmutation) Begin(ctx context.Context, flags ethdb.TxFlags) (ethdb.DbWithPendingMutations, error) { +func (m *memorymutation) Begin(ctx context.Context, flags ethdb.TxFlags) (ethdb.DbWithPendingMutations, error) { panic("mutation can't start transaction, because doesn't own it") } -func (m *miningmutation) panicOnEmptyDB() { +func (m *memorymutation) panicOnEmptyDB() { if m.db == nil { panic("Not implemented") } } -func (m *miningmutation) SetRwKV(kv kv.RwDB) { +func (m *memorymutation) SetRwKV(kv kv.RwDB) { m.db.(ethdb.HasRwKV).SetRwKV(kv) } -func (m *miningmutation) BucketSize(bucket string) (uint64, error) { +func (m *memorymutation) BucketSize(bucket string) (uint64, error) { return 0, nil } -func (m *miningmutation) DropBucket(bucket string) error { +func (m *memorymutation) DropBucket(bucket string) error { panic("Not implemented") } -func (m *miningmutation) ExistsBucket(bucket string) (bool, error) { +func (m *memorymutation) ExistsBucket(bucket string) (bool, error) { panic("Not implemented") } -func (m *miningmutation) ListBuckets() ([]string, error) { +func (m *memorymutation) ListBuckets() ([]string, error) { panic("Not implemented") } -func (m *miningmutation) ClearBucket(bucket string) error { +func (m *memorymutation) ClearBucket(bucket string) error { m.clearedTables[bucket] = struct{}{} return m.memTx.ClearBucket(bucket) } -func (m *miningmutation) isBucketCleared(bucket string) bool { +func (m *memorymutation) isBucketCleared(bucket string) bool { _, ok := m.clearedTables[bucket] return ok } -func (m *miningmutation) CollectMetrics() { +func (m *memorymutation) CollectMetrics() { } -func (m *miningmutation) CreateBucket(bucket string) error { +func (m *memorymutation) CreateBucket(bucket string) error { panic("Not implemented") } +func (m *memorymutation) Flush(tx kv.RwTx) error { + // Obtain buckets touched. + buckets, err := m.memTx.ListBuckets() + if err != nil { + return err + } + // Iterate over each bucket and apply changes accordingly. + for _, bucket := range buckets { + if _, ok := m.dupsortTables[bucket]; ok && bucket != kv.HashedStorage { + cbucket, err := m.memTx.CursorDupSort(bucket) + if err != nil { + return err + } + defer cbucket.Close() + dbCursor, err := tx.RwCursorDupSort(bucket) + if err != nil { + return err + } + defer dbCursor.Close() + for k, v, err := cbucket.First(); k != nil; k, v, err = cbucket.Next() { + if err != nil { + return err + } + if err := dbCursor.AppendDup(k, v); err != nil { + return err + } + } + } else { + cbucket, err := m.memTx.Cursor(bucket) + if err != nil { + return err + } + defer cbucket.Close() + for k, v, err := cbucket.First(); k != nil; k, v, err = cbucket.Next() { + if err != nil { + return err + } + if err := tx.Put(bucket, k, v); err != nil { + return err + } + } + } + } + return nil +} + // Cursor creates a new cursor (the real fun begins here) -func (m *miningmutation) makeCursor(bucket string) (kv.RwCursorDupSort, error) { - c := &miningmutationcursor{} +func (m *memorymutation) makeCursor(bucket string) (kv.RwCursorDupSort, error) { + c := &memorymutationcursor{} // We can filter duplicates in dup sorted table c.table = bucket @@ -309,26 +355,26 @@ func (m *miningmutation) makeCursor(bucket string) (kv.RwCursorDupSort, error) { } // Cursor creates a new cursor (the real fun begins here) -func (m *miningmutation) RwCursorDupSort(bucket string) (kv.RwCursorDupSort, error) { +func (m *memorymutation) RwCursorDupSort(bucket string) (kv.RwCursorDupSort, error) { return m.makeCursor(bucket) } // Cursor creates a new cursor (the real fun begins here) -func (m *miningmutation) RwCursor(bucket string) (kv.RwCursor, error) { +func (m *memorymutation) RwCursor(bucket string) (kv.RwCursor, error) { return m.makeCursor(bucket) } // Cursor creates a new cursor (the real fun begins here) -func (m *miningmutation) CursorDupSort(bucket string) (kv.CursorDupSort, error) { +func (m *memorymutation) CursorDupSort(bucket string) (kv.CursorDupSort, error) { return m.makeCursor(bucket) } // Cursor creates a new cursor (the real fun begins here) -func (m *miningmutation) Cursor(bucket string) (kv.Cursor, error) { +func (m *memorymutation) Cursor(bucket string) (kv.Cursor, error) { return m.makeCursor(bucket) } // ViewID creates a new cursor (the real fun begins here) -func (m *miningmutation) ViewID() uint64 { +func (m *memorymutation) ViewID() uint64 { panic("ViewID Not implemented") } diff --git a/ethdb/olddb/miningmutation_test.go b/ethdb/olddb/memorymutation_test.go similarity index 80% rename from ethdb/olddb/miningmutation_test.go rename to ethdb/olddb/memorymutation_test.go index 2f046f03809..3f96479a2ce 100644 --- a/ethdb/olddb/miningmutation_test.go +++ b/ethdb/olddb/memorymutation_test.go @@ -35,7 +35,7 @@ func TestLastMiningDB(t *testing.T) { initializeDB(rwTx) - batch := NewMiningBatch(rwTx) + batch := NewMemoryBatch(rwTx) batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4")) batch.Put(kv.HashedAccounts, []byte("BCAA"), []byte("value5")) @@ -60,7 +60,7 @@ func TestLastMiningMem(t *testing.T) { initializeDB(rwTx) - batch := NewMiningBatch(rwTx) + batch := NewMemoryBatch(rwTx) batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4")) batch.Put(kv.HashedAccounts, []byte("DCAA"), []byte("value5")) @@ -84,7 +84,7 @@ func TestDeleteMining(t *testing.T) { require.NoError(t, err) initializeDB(rwTx) - batch := NewMiningBatch(rwTx) + batch := NewMemoryBatch(rwTx) batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4")) batch.Put(kv.HashedAccounts, []byte("DCAA"), []byte("value5")) batch.Put(kv.HashedAccounts, []byte("FCAA"), []byte("value5")) @@ -105,3 +105,24 @@ func TestDeleteMining(t *testing.T) { require.Equal(t, key, []byte(nil)) require.Equal(t, value, []byte(nil)) } + +func TestFlush(t *testing.T) { + rwTx, err := memdb.New().BeginRw(context.Background()) + require.NoError(t, err) + + initializeDB(rwTx) + batch := NewMemoryBatch(rwTx) + batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4")) + batch.Put(kv.HashedAccounts, []byte("AAAA"), []byte("value5")) + batch.Put(kv.HashedAccounts, []byte("FCAA"), []byte("value5")) + + require.NoError(t, batch.Flush(rwTx)) + + value, err := rwTx.GetOne(kv.HashedAccounts, []byte("BAAA")) + require.NoError(t, err) + require.Equal(t, value, []byte("value4")) + + value, err = rwTx.GetOne(kv.HashedAccounts, []byte("AAAA")) + require.NoError(t, err) + require.Equal(t, value, []byte("value5")) +} diff --git a/ethdb/olddb/miningmutationcursor.go b/ethdb/olddb/memorymutationcursor.go similarity index 83% rename from ethdb/olddb/miningmutationcursor.go rename to ethdb/olddb/memorymutationcursor.go index ed5765e1bd9..9c0f3aa9481 100644 --- a/ethdb/olddb/miningmutationcursor.go +++ b/ethdb/olddb/memorymutationcursor.go @@ -27,7 +27,7 @@ type cursorentry struct { } // cursor -type miningmutationcursor struct { +type memorymutationcursor struct { // we can keep one cursor type if we store 2 of each kind. cursor kv.Cursor dupCursor kv.CursorDupSort @@ -43,12 +43,12 @@ type miningmutationcursor struct { currentDbEntry cursorentry currentMemEntry cursorentry // we keep the mining mutation so that we can insert new elements in db - mutation *miningmutation + mutation *memorymutation table string } // First move cursor to first position and return key and value accordingly. -func (m *miningmutationcursor) First() ([]byte, []byte, error) { +func (m *memorymutationcursor) First() ([]byte, []byte, error) { memKey, memValue, err := m.memCursor.First() if err != nil { return nil, nil, err @@ -68,7 +68,7 @@ func (m *miningmutationcursor) First() ([]byte, []byte, error) { return m.goForward(memKey, memValue, dbKey, dbValue) } -func (m *miningmutationcursor) getNextOnDb(dup bool) (key []byte, value []byte, err error) { +func (m *memorymutationcursor) getNextOnDb(dup bool) (key []byte, value []byte, err error) { if dup { key, value, err = m.dupCursor.NextDup() if err != nil { @@ -97,7 +97,7 @@ func (m *miningmutationcursor) getNextOnDb(dup bool) (key []byte, value []byte, return } -func (m *miningmutationcursor) convertAutoDupsort(key []byte, value []byte) []byte { +func (m *memorymutationcursor) convertAutoDupsort(key []byte, value []byte) []byte { // The only dupsorted table we are interested is HashedStorage if m.table != kv.HashedStorage { return key @@ -106,11 +106,11 @@ func (m *miningmutationcursor) convertAutoDupsort(key []byte, value []byte) []by } // Current return the current key and values the cursor is on. -func (m *miningmutationcursor) Current() ([]byte, []byte, error) { +func (m *memorymutationcursor) Current() ([]byte, []byte, error) { return common.CopyBytes(m.currentPair.key), common.CopyBytes(m.currentPair.value), nil } -func (m *miningmutationcursor) skipIntersection(memKey, memValue, dbKey, dbValue []byte) (newDbKey []byte, newDbValue []byte, err error) { +func (m *memorymutationcursor) skipIntersection(memKey, memValue, dbKey, dbValue []byte) (newDbKey []byte, newDbValue []byte, err error) { newDbKey = dbKey newDbValue = dbValue // Check for duplicates @@ -132,7 +132,7 @@ func (m *miningmutationcursor) skipIntersection(memKey, memValue, dbKey, dbValue return } -func (m *miningmutationcursor) goForward(memKey, memValue, dbKey, dbValue []byte) ([]byte, []byte, error) { +func (m *memorymutationcursor) goForward(memKey, memValue, dbKey, dbValue []byte) ([]byte, []byte, error) { var err error if memValue == nil && dbValue == nil { return nil, nil, nil @@ -167,7 +167,7 @@ func (m *miningmutationcursor) goForward(memKey, memValue, dbKey, dbValue []byte } // Next returns the next element of the mutation. -func (m *miningmutationcursor) Next() ([]byte, []byte, error) { +func (m *memorymutationcursor) Next() ([]byte, []byte, error) { if m.isPrevFromDb { k, v, err := m.getNextOnDb(false) if err != nil { @@ -185,7 +185,7 @@ func (m *miningmutationcursor) Next() ([]byte, []byte, error) { } // NextDup returns the next element of the mutation. -func (m *miningmutationcursor) NextDup() ([]byte, []byte, error) { +func (m *memorymutationcursor) NextDup() ([]byte, []byte, error) { if m.isPrevFromDb { k, v, err := m.getNextOnDb(true) @@ -204,7 +204,7 @@ func (m *miningmutationcursor) NextDup() ([]byte, []byte, error) { } // Seek move pointer to a key at a certain position. -func (m *miningmutationcursor) Seek(seek []byte) ([]byte, []byte, error) { +func (m *memorymutationcursor) Seek(seek []byte) ([]byte, []byte, error) { dbKey, dbValue, err := m.cursor.Seek(seek) if err != nil { return nil, nil, err @@ -226,7 +226,7 @@ func (m *miningmutationcursor) Seek(seek []byte) ([]byte, []byte, error) { } // Seek move pointer to a key at a certain position. -func (m *miningmutationcursor) SeekExact(seek []byte) ([]byte, []byte, error) { +func (m *memorymutationcursor) SeekExact(seek []byte) ([]byte, []byte, error) { memKey, memValue, err := m.memCursor.SeekExact(seek) if err != nil { return nil, nil, err @@ -257,37 +257,37 @@ func (m *miningmutationcursor) SeekExact(seek []byte) ([]byte, []byte, error) { return nil, nil, nil } -func (m *miningmutationcursor) Put(k, v []byte) error { +func (m *memorymutationcursor) Put(k, v []byte) error { return m.mutation.Put(m.table, common.CopyBytes(k), common.CopyBytes(v)) } -func (m *miningmutationcursor) Append(k []byte, v []byte) error { +func (m *memorymutationcursor) Append(k []byte, v []byte) error { return m.mutation.Put(m.table, common.CopyBytes(k), common.CopyBytes(v)) } -func (m *miningmutationcursor) AppendDup(k []byte, v []byte) error { +func (m *memorymutationcursor) AppendDup(k []byte, v []byte) error { return m.memDupCursor.AppendDup(common.CopyBytes(k), common.CopyBytes(v)) } -func (m *miningmutationcursor) PutNoDupData(key, value []byte) error { +func (m *memorymutationcursor) PutNoDupData(key, value []byte) error { panic("DeleteCurrentDuplicates Not implemented") } -func (m *miningmutationcursor) Delete(k, v []byte) error { +func (m *memorymutationcursor) Delete(k, v []byte) error { return m.mutation.Delete(m.table, k, v) } -func (m *miningmutationcursor) DeleteCurrent() error { +func (m *memorymutationcursor) DeleteCurrent() error { panic("DeleteCurrent Not implemented") } -func (m *miningmutationcursor) DeleteCurrentDuplicates() error { +func (m *memorymutationcursor) DeleteCurrentDuplicates() error { panic("DeleteCurrentDuplicates Not implemented") } // Seek move pointer to a key at a certain position. -func (m *miningmutationcursor) SeekBothRange(key, value []byte) ([]byte, error) { +func (m *memorymutationcursor) SeekBothRange(key, value []byte) ([]byte, error) { if value == nil { _, v, err := m.SeekExact(key) return v, err @@ -313,7 +313,7 @@ func (m *miningmutationcursor) SeekBothRange(key, value []byte) ([]byte, error) return retValue, err } -func (m *miningmutationcursor) Last() ([]byte, []byte, error) { +func (m *memorymutationcursor) Last() ([]byte, []byte, error) { // TODO(Giulio2002): make fixes. memKey, memValue, err := m.memCursor.Last() if err != nil { @@ -373,11 +373,11 @@ func (m *miningmutationcursor) Last() ([]byte, []byte, error) { return dbKey, dbValue, nil } -func (m *miningmutationcursor) Prev() ([]byte, []byte, error) { +func (m *memorymutationcursor) Prev() ([]byte, []byte, error) { panic("Prev is not implemented!") } -func (m *miningmutationcursor) Close() { +func (m *memorymutationcursor) Close() { if m.cursor != nil { m.cursor.Close() } @@ -387,26 +387,26 @@ func (m *miningmutationcursor) Close() { return } -func (m *miningmutationcursor) Count() (uint64, error) { +func (m *memorymutationcursor) Count() (uint64, error) { panic("Not implemented") } -func (m *miningmutationcursor) FirstDup() ([]byte, error) { +func (m *memorymutationcursor) FirstDup() ([]byte, error) { panic("Not implemented") } -func (m *miningmutationcursor) NextNoDup() ([]byte, []byte, error) { +func (m *memorymutationcursor) NextNoDup() ([]byte, []byte, error) { panic("Not implemented") } -func (m *miningmutationcursor) LastDup() ([]byte, error) { +func (m *memorymutationcursor) LastDup() ([]byte, error) { panic("Not implemented") } -func (m *miningmutationcursor) CountDuplicates() (uint64, error) { +func (m *memorymutationcursor) CountDuplicates() (uint64, error) { panic("Not implemented") } -func (m *miningmutationcursor) SeekBothExact(key, value []byte) ([]byte, []byte, error) { +func (m *memorymutationcursor) SeekBothExact(key, value []byte) ([]byte, []byte, error) { panic("SeekBothExact Not implemented") } diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index dacd8a2a2ca..50d442baf54 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -225,7 +225,7 @@ func MiningStep(ctx context.Context, kv kv.RwDB, mining *stagedsync.Sync) (err e } defer tx.Rollback() - miningBatch := olddb.NewMiningBatch(tx) + miningBatch := olddb.NewMemoryBatch(tx) defer miningBatch.Rollback() if err = mining.Run(nil, miningBatch, false); err != nil { From e47dd15e682e51238982a40909c12140feedeac5 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Mon, 13 Jun 2022 21:25:17 +0100 Subject: [PATCH 049/136] [erigon2.2] Prototype of eth_getLogs (#4437) * Initial work * Fix compile errors * Update * Debug * Cleanup * Temp changes * Temp * Remove temp * Remove print * Upgate to erigon-lib main * go mod tidy Co-authored-by: Alex Sharp Co-authored-by: Alexey Sharp --- cmd/hack/hack.go | 12 ++ cmd/rpcdaemon22/commands/eth_receipts.go | 159 +++++++++++--------- cmd/rpcdaemon22/commands/trace_filtering.go | 4 +- cmd/rpctest/rpctest/bench_tracefilter.go | 64 ++++---- cmd/state/commands/calltracer22.go | 15 -- cmd/state/commands/erigon22.go | 11 +- go.mod | 2 +- go.sum | 4 +- 8 files changed, 139 insertions(+), 132 deletions(-) diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go index 211159e64a5..596f45c4448 100644 --- a/cmd/hack/hack.go +++ b/cmd/hack/hack.go @@ -1303,6 +1303,16 @@ func readEf(file string, addr []byte) error { return nil } +func readBodies(file string) error { + datPath := file + ".dat" + decomp, err := compress.NewDecompressor(datPath) + if err != nil { + return err + } + defer decomp.Close() + return nil +} + func main() { debug.RaiseFdLimit() flag.Parse() @@ -1433,6 +1443,8 @@ func main() { err = findPrefix(*chaindata) case "readEf": err = readEf(*chaindata, common.FromHex(*account)) + case "readBodies": + err = readBodies(*chaindata) } if err != nil { diff --git a/cmd/rpcdaemon22/commands/eth_receipts.go b/cmd/rpcdaemon22/commands/eth_receipts.go index 988dd3389eb..6f76899da7b 100644 --- a/cmd/rpcdaemon22/commands/eth_receipts.go +++ b/cmd/rpcdaemon22/commands/eth_receipts.go @@ -1,19 +1,18 @@ package commands import ( - "bytes" "context" - "encoding/binary" "fmt" "math/big" + "sort" "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/kv" + libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/log/v3" - "github.com/RoaringBitmap/roaring" + "github.com/RoaringBitmap/roaring/roaring64" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core" @@ -23,8 +22,6 @@ import ( "github.com/ledgerwatch/erigon/core/vm" "github.com/ledgerwatch/erigon/eth/filters" "github.com/ledgerwatch/erigon/ethdb" - "github.com/ledgerwatch/erigon/ethdb/bitmapdb" - "github.com/ledgerwatch/erigon/ethdb/cbor" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/transactions" @@ -114,90 +111,107 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ if end < begin { return nil, fmt.Errorf("end (%d) < begin (%d)", end, begin) } + chainConfig, err := api.chainConfig(tx) + if err != nil { + return nil, err + } - blockNumbers := roaring.New() - blockNumbers.AddRange(begin, end+1) // [min,max) + var fromTxNum, toTxNum uint64 + if begin > 0 { + fromTxNum = api._txNums[begin-1] + } + toTxNum = api._txNums[end] // end is an inclusive bound + + txNumbers := roaring64.New() + txNumbers.AddRange(fromTxNum, toTxNum) // [min,max) - topicsBitmap, err := getTopicsBitmap(tx, crit.Topics, uint32(begin), uint32(end)) + topicsBitmap, err := getTopicsBitmap(api._agg, tx, crit.Topics, fromTxNum, toTxNum) if err != nil { return nil, err } if topicsBitmap != nil { - blockNumbers.And(topicsBitmap) + txNumbers.And(topicsBitmap) } - var addrBitmap *roaring.Bitmap + var addrBitmap *roaring64.Bitmap for _, addr := range crit.Addresses { - m, err := bitmapdb.Get(tx, kv.LogAddressIndex, addr[:], uint32(begin), uint32(end)) - if err != nil { - return nil, err + var bitmapForORing roaring64.Bitmap + it := api._agg.LogAddrIterator(addr.Bytes(), fromTxNum, toTxNum, nil) + for it.HasNext() { + bitmapForORing.Add(it.Next()) } if addrBitmap == nil { - addrBitmap = m + addrBitmap = &bitmapForORing continue } - addrBitmap = roaring.Or(addrBitmap, m) + addrBitmap = roaring64.Or(addrBitmap, &bitmapForORing) } if addrBitmap != nil { - blockNumbers.And(addrBitmap) + txNumbers.And(addrBitmap) } - if blockNumbers.GetCardinality() == 0 { + if txNumbers.GetCardinality() == 0 { return logs, nil } - - iter := blockNumbers.Iterator() + var lastBlockNum uint64 + var lastBlockHash common.Hash + var lastHeader *types.Header + var lastSigner *types.Signer + var lastRules *params.Rules + stateReader := state.NewHistoryReader22(api._agg) + iter := txNumbers.Iterator() for iter.HasNext() { - if err = ctx.Err(); err != nil { - return nil, err - } - - block := uint64(iter.Next()) - var logIndex uint - var blockLogs []*types.Log - err := tx.ForPrefix(kv.Log, dbutils.EncodeBlockNumber(block), func(k, v []byte) error { - var logs types.Logs - if err := cbor.Unmarshal(&logs, bytes.NewReader(v)); err != nil { - return fmt.Errorf("receipt unmarshal failed: %w", err) - } - for _, log := range logs { - log.Index = logIndex - logIndex++ - } - filtered := filterLogs(logs, crit.Addresses, crit.Topics) - if len(filtered) == 0 { - return nil - } - txIndex := uint(binary.BigEndian.Uint32(k[8:])) - for _, log := range filtered { - log.TxIndex = txIndex + txNum := iter.Next() + // Find block number + blockNum := uint64(sort.Search(len(api._txNums), func(i int) bool { + return api._txNums[i] > txNum + })) + if blockNum > lastBlockNum { + if lastHeader, err = api._blockReader.HeaderByNumber(ctx, nil, blockNum); err != nil { + return nil, err } - blockLogs = append(blockLogs, filtered...) - - return nil - }) - if err != nil { - return logs, err + lastBlockNum = blockNum + lastBlockHash = lastHeader.Hash() + lastSigner = types.MakeSigner(chainConfig, blockNum) + lastRules = chainConfig.Rules(blockNum) } - if len(blockLogs) == 0 { - continue + var startTxNum uint64 + if blockNum > 0 { + startTxNum = api._txNums[blockNum-1] } - - b, err := api.blockByNumberWithSenders(tx, block) + txIndex := txNum - startTxNum - 1 + //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d\n", txNum, blockNum, txIndex) + txn, err := api._txnReader.TxnByIdxInBlock(ctx, nil, blockNum, int(txIndex)) + if err != nil { + return nil, err + } + txHash := txn.Hash() + msg, err := txn.AsMessage(*lastSigner, lastHeader.BaseFee, lastRules) if err != nil { return nil, err } - if b == nil { - return nil, fmt.Errorf("block not found %d", block) + contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } + blockCtx, txCtx := transactions.GetEvmContext(msg, lastHeader, true /* requireCanonical */, tx, contractHasTEVM, api._blockReader) + stateReader.SetTxNum(txNum) + vmConfig := vm.Config{} + ibs := state.New(stateReader) + evm := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vmConfig) + + gp := new(core.GasPool).AddGas(msg.Gas()) + ibs.Prepare(txHash, lastBlockHash, int(txIndex)) + _, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, false /* gasBailout */) + if err != nil { + return nil, err } - blockHash := b.Hash() - for _, log := range blockLogs { - log.BlockNumber = block - log.BlockHash = blockHash - log.TxHash = b.Transactions()[log.TxIndex].Hash() + filtered := filterLogs(ibs.GetLogs(txHash), crit.Addresses, crit.Topics) + for _, log := range filtered { + log.BlockNumber = blockNum + log.BlockHash = lastBlockHash + log.TxHash = txHash + log.Index = 0 } - logs = append(logs, blockLogs...) + logs = append(logs, filtered...) } return logs, nil @@ -214,30 +228,25 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ // {{}, {B}} matches any topic in first position AND B in second position // {{A}, {B}} matches topic A in first position AND B in second position // {{A, B}, {C, D}} matches topic (A OR B) in first position AND (C OR D) in second position -func getTopicsBitmap(c kv.Tx, topics [][]common.Hash, from, to uint32) (*roaring.Bitmap, error) { - var result *roaring.Bitmap +func getTopicsBitmap(a *libstate.Aggregator, c kv.Tx, topics [][]common.Hash, from, to uint64) (*roaring64.Bitmap, error) { + var result *roaring64.Bitmap for _, sub := range topics { - var bitmapForORing *roaring.Bitmap + var bitmapForORing roaring64.Bitmap for _, topic := range sub { - m, err := bitmapdb.Get(c, kv.LogTopicIndex, topic[:], from, to) - if err != nil { - return nil, err - } - if bitmapForORing == nil { - bitmapForORing = m - continue + it := a.LogTopicIterator(topic.Bytes(), from, to, nil) + for it.HasNext() { + bitmapForORing.Add(it.Next()) } - bitmapForORing.Or(m) } - if bitmapForORing == nil { + if bitmapForORing.GetCardinality() == 0 { continue } if result == nil { - result = bitmapForORing + result = &bitmapForORing continue } - result = roaring.And(bitmapForORing, result) + result = roaring64.And(&bitmapForORing, result) } return result, nil } diff --git a/cmd/rpcdaemon22/commands/trace_filtering.go b/cmd/rpcdaemon22/commands/trace_filtering.go index 64e307883d8..0e1b0a22874 100644 --- a/cmd/rpcdaemon22/commands/trace_filtering.go +++ b/cmd/rpcdaemon22/commands/trace_filtering.go @@ -288,7 +288,7 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str allTxs.AddRange(fromTxNum, toTxNum+1) } else { allTxs.RemoveRange(0, fromTxNum) - allTxs.RemoveRange(toTxNum+1, uint64(0x1000000000000)) + allTxs.RemoveRange(toTxNum, uint64(0x1000000000000)) } chainConfig, err := api.chainConfig(dbtx) @@ -414,7 +414,7 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str startTxNum = api._txNums[blockNum-1] } txIndex := txNum - startTxNum - 1 - fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d\n", txNum, blockNum, txIndex) + //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d\n", txNum, blockNum, txIndex) txn, err := api._txnReader.TxnByIdxInBlock(ctx, nil, blockNum, int(txIndex)) if err != nil { stream.WriteNil() diff --git a/cmd/rpctest/rpctest/bench_tracefilter.go b/cmd/rpctest/rpctest/bench_tracefilter.go index 17af54977d8..0d16b9c3ad2 100644 --- a/cmd/rpctest/rpctest/bench_tracefilter.go +++ b/cmd/rpctest/rpctest/bench_tracefilter.go @@ -107,38 +107,40 @@ func BenchTraceFilter(erigonURL, oeURL string, needCompare bool, blockFrom uint6 return } } - if len(accounts) > 1 { - from := accounts[0] - to := accounts[1] - reqGen.reqID++ - request := reqGen.traceFilterUnion(prevBn, bn, from, to) - errCtx := fmt.Sprintf("traceFilterUnion fromBlock %d, toBlock %d, fromAddress %x, toAddress %x", prevBn, bn, from, to) - if err := requestAndCompare(request, "trace_filter", errCtx, reqGen, needCompare, rec, errs, nil); err != nil { - fmt.Println(err) - return - } - reqGen.reqID++ - request = reqGen.traceFilterAfter(prevBn, bn, 1) - errCtx = fmt.Sprintf("traceFilterAfter fromBlock %d, toBlock %d, after %x", prevBn, bn, 1) - if err := requestAndCompare(request, "trace_filter", errCtx, reqGen, needCompare, rec, errs, nil); err != nil { - fmt.Println(err) - return - } - reqGen.reqID++ - request = reqGen.traceFilterCount(prevBn, bn, 1) - errCtx = fmt.Sprintf("traceFilterCount fromBlock %d, toBlock %d, count %x", prevBn, bn, 1) - if err := requestAndCompare(request, "trace_filter", errCtx, reqGen, needCompare, rec, errs, nil); err != nil { - fmt.Println(err) - return - } - reqGen.reqID++ - request = reqGen.traceFilterCountAfter(prevBn, bn, 1, 1) - errCtx = fmt.Sprintf("traceFilterCountAfter fromBlock %d, toBlock %d, count %x, after %x", prevBn, bn, 1, 1) - if err := requestAndCompare(request, "trace_filter", errCtx, reqGen, needCompare, rec, errs, nil); err != nil { - fmt.Println(err) - return + /* + if len(accounts) > 1 { + from := accounts[0] + to := accounts[1] + reqGen.reqID++ + request := reqGen.traceFilterUnion(prevBn, bn, from, to) + errCtx := fmt.Sprintf("traceFilterUnion fromBlock %d, toBlock %d, fromAddress %x, toAddress %x", prevBn, bn, from, to) + if err := requestAndCompare(request, "trace_filter", errCtx, reqGen, needCompare, rec, errs, nil); err != nil { + fmt.Println(err) + return + } + reqGen.reqID++ + request = reqGen.traceFilterAfter(prevBn, bn, 1) + errCtx = fmt.Sprintf("traceFilterAfter fromBlock %d, toBlock %d, after %x", prevBn, bn, 1) + if err := requestAndCompare(request, "trace_filter", errCtx, reqGen, needCompare, rec, errs, nil); err != nil { + fmt.Println(err) + return + } + reqGen.reqID++ + request = reqGen.traceFilterCount(prevBn, bn, 1) + errCtx = fmt.Sprintf("traceFilterCount fromBlock %d, toBlock %d, count %x", prevBn, bn, 1) + if err := requestAndCompare(request, "trace_filter", errCtx, reqGen, needCompare, rec, errs, nil); err != nil { + fmt.Println(err) + return + } + reqGen.reqID++ + request = reqGen.traceFilterCountAfter(prevBn, bn, 1, 1) + errCtx = fmt.Sprintf("traceFilterCountAfter fromBlock %d, toBlock %d, count %x, after %x", prevBn, bn, 1, 1) + if err := requestAndCompare(request, "trace_filter", errCtx, reqGen, needCompare, rec, errs, nil); err != nil { + fmt.Println(err) + return + } } - } + */ } fmt.Printf("Done blocks %d-%d, modified accounts: %d\n", prevBn, bn, len(mag.Result)) prevBn = bn diff --git a/cmd/state/commands/calltracer22.go b/cmd/state/commands/calltracer22.go index cf679518d8d..09e58b3dbc2 100644 --- a/cmd/state/commands/calltracer22.go +++ b/cmd/state/commands/calltracer22.go @@ -4,7 +4,6 @@ import ( "math/big" "time" - libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/vm" ) @@ -41,17 +40,3 @@ func (ct *CallTracer) CaptureAccountRead(account common.Address) error { func (ct *CallTracer) CaptureAccountWrite(account common.Address) error { return nil } - -func (ct *CallTracer) AddToAggregator(a *libstate.Aggregator) error { - for from := range ct.froms { - if err := a.AddTraceFrom(from[:]); err != nil { - return err - } - } - for to := range ct.tos { - if err := a.AddTraceTo(to[:]); err != nil { - return err - } - } - return nil -} diff --git a/cmd/state/commands/erigon22.go b/cmd/state/commands/erigon22.go index 2bf001921c9..9fd7f7bb7a1 100644 --- a/cmd/state/commands/erigon22.go +++ b/cmd/state/commands/erigon22.go @@ -254,6 +254,7 @@ func Erigon22(genesis *core.Genesis, chainConfig *params.ChainConfig, logger log return err } } + agg.SetTx(rwTx) } } @@ -302,7 +303,9 @@ func (s *stat22) delta(aStats libstate.FilesStats, blockNum uint64) *stat22 { return s } -func processBlock22(trace bool, txNumStart uint64, rw *ReaderWrapper22, ww *WriterWrapper22, chainConfig *params.ChainConfig, engine consensus.Engine, getHeader func(hash common.Hash, number uint64) *types.Header, block *types.Block, vmConfig vm.Config) (uint64, types.Receipts, error) { +func processBlock22(trace bool, txNumStart uint64, rw *ReaderWrapper22, ww *WriterWrapper22, chainConfig *params.ChainConfig, + engine consensus.Engine, getHeader func(hash common.Hash, number uint64) *types.Header, block *types.Block, vmConfig vm.Config, +) (uint64, types.Receipts, error) { defer blockExecutionTimer.UpdateDuration(time.Now()) header := block.Header() @@ -314,7 +317,6 @@ func processBlock22(trace bool, txNumStart uint64, rw *ReaderWrapper22, ww *Writ rules := chainConfig.Rules(block.NumberU64()) txNum := txNumStart ww.w.SetTxNum(txNum) - trace = block.NumberU64() == 1700059 for i, tx := range block.Transactions() { ibs := state.New(rw) @@ -335,9 +337,6 @@ func processBlock22(trace bool, txNumStart uint64, rw *ReaderWrapper22, ww *Writ } } for to := range ct.tos { - if trace { - fmt.Printf("TraceTo [%x]\n", to[:]) - } if err := ww.w.AddTraceTo(to[:]); err != nil { return 0, nil, err } @@ -357,7 +356,7 @@ func processBlock22(trace bool, txNumStart uint64, rw *ReaderWrapper22, ww *Writ return 0, nil, fmt.Errorf("finish tx %d [%x] failed: %w", i, tx.Hash(), err) } if trace { - fmt.Printf("FinishTx called for %d [%x]\n", txNum, tx.Hash()) + fmt.Printf("FinishTx called for blockNum=%d, txIndex=%d, txNum=%d txHash=[%x]\n", block.NumberU64(), i, txNum, tx.Hash()) } txNum++ ww.w.SetTxNum(txNum) diff --git a/go.mod b/go.mod index c2211ce5018..7a98e22e518 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220612091418-6cad65e62b27 + github.com/ledgerwatch/erigon-lib v0.0.0-20220613183213-e2c6ef00585e github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index ecd901f1cb4..32a838f9018 100644 --- a/go.sum +++ b/go.sum @@ -382,8 +382,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220612091418-6cad65e62b27 h1:Yc4RSLapsihfiCA3CnUsixJpmLQPnMHRH8gqgN+GrKs= -github.com/ledgerwatch/erigon-lib v0.0.0-20220612091418-6cad65e62b27/go.mod h1:jNDE6PRPIA8wUdikJs8BvKtrFv101qOijIXA3HnDW8E= +github.com/ledgerwatch/erigon-lib v0.0.0-20220613183213-e2c6ef00585e h1:wYFBZKp/jaWNpdATZK+e2nRYZDd4gejMYvtp7Ksdflc= +github.com/ledgerwatch/erigon-lib v0.0.0-20220613183213-e2c6ef00585e/go.mod h1:jNDE6PRPIA8wUdikJs8BvKtrFv101qOijIXA3HnDW8E= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From f376d0a7a0cfcbca5aa3c9b54c3bef36f1ac87f0 Mon Sep 17 00:00:00 2001 From: laughship <107451833+laughship@users.noreply.github.com> Date: Mon, 13 Jun 2022 23:31:42 -0500 Subject: [PATCH 050/136] Remove debug output (#4442) --- cmd/downloader/downloader/downloader.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/downloader/downloader/downloader.go b/cmd/downloader/downloader/downloader.go index ca085ca0f09..e828fd80be9 100644 --- a/cmd/downloader/downloader/downloader.go +++ b/cmd/downloader/downloader/downloader.go @@ -205,7 +205,7 @@ func (d *Downloader) onComplete() { panic(err) } d.cfg.DataDir = snapDir - fmt.Printf("alex1: %s\n", d.cfg.DataDir) + // fmt.Printf("alex1: %s\n", d.cfg.DataDir) db, c, m, torrentClient, err := openClient(d.cfg.ClientConfig) if err != nil { From a5cb53d6908ae3315394bd36a7a79e4495144662 Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Tue, 14 Jun 2022 11:07:46 +0300 Subject: [PATCH 051/136] safe and finalized blocks from eth_getBlockByNumber (#4436) * added getFinalzed and getSafe block num * added rpc finalized and safe block num * getting nums * returning nil * returning nil * added to helper.go * removed repeated code * added functions into rpchelper * returning err * simplified * using previous latest getter * getting pending block with filter/ * Fix plain state block number * Fix test Co-authored-by: Alexey Sharp Co-authored-by: Alex Sharp --- cmd/rpcdaemon/commands/bor_helper.go | 2 +- cmd/rpcdaemon/commands/erigon_block.go | 2 +- cmd/rpcdaemon/commands/eth_api.go | 2 +- cmd/rpcdaemon/commands/eth_block.go | 2 +- cmd/rpcdaemon/commands/eth_call.go | 6 +-- cmd/rpcdaemon/commands/eth_call_test.go | 8 ++- cmd/rpcdaemon/commands/eth_receipts.go | 5 +- cmd/rpcdaemon/commands/eth_system.go | 3 +- cmd/rpcdaemon/commands/eth_txs.go | 2 +- cmd/rpcdaemon/commands/eth_uncles.go | 4 +- cmd/rpcdaemon/commands/rpc_block.go | 45 ---------------- cmd/rpcdaemon/commands/rpc_getBlockNumber.go | 41 ++++++++++++++ cmd/rpcdaemon/commands/trace_filtering.go | 2 +- rpc/types.go | 2 + turbo/rpchelper/helper.go | 38 ++++++++----- turbo/rpchelper/rpc_block.go | 56 ++++++++++++++++++++ 16 files changed, 147 insertions(+), 73 deletions(-) delete mode 100644 cmd/rpcdaemon/commands/rpc_block.go create mode 100644 cmd/rpcdaemon/commands/rpc_getBlockNumber.go create mode 100644 turbo/rpchelper/rpc_block.go diff --git a/cmd/rpcdaemon/commands/bor_helper.go b/cmd/rpcdaemon/commands/bor_helper.go index 49d074307da..4056832f8a9 100644 --- a/cmd/rpcdaemon/commands/bor_helper.go +++ b/cmd/rpcdaemon/commands/bor_helper.go @@ -54,7 +54,7 @@ func getHeaderByNumber(number rpc.BlockNumber, api *BorImpl, tx kv.Tx) (*types.H return block.Header(), nil } - blockNum, err := getBlockNumber(number, tx) + blockNum, err := getBlockNumber(number, tx, api.filters) if err != nil { return nil, err } diff --git a/cmd/rpcdaemon/commands/erigon_block.go b/cmd/rpcdaemon/commands/erigon_block.go index d32557024e4..229a9c5c0f0 100644 --- a/cmd/rpcdaemon/commands/erigon_block.go +++ b/cmd/rpcdaemon/commands/erigon_block.go @@ -30,7 +30,7 @@ func (api *ErigonImpl) GetHeaderByNumber(ctx context.Context, blockNumber rpc.Bl } defer tx.Rollback() - blockNum, err := getBlockNumber(blockNumber, tx) + blockNum, err := getBlockNumber(blockNumber, tx, api.filters) if err != nil { return nil, err } diff --git a/cmd/rpcdaemon/commands/eth_api.go b/cmd/rpcdaemon/commands/eth_api.go index ab06d94ce5a..bb8e3f223a7 100644 --- a/cmd/rpcdaemon/commands/eth_api.go +++ b/cmd/rpcdaemon/commands/eth_api.go @@ -217,7 +217,7 @@ func (api *BaseAPI) blockByRPCNumber(number rpc.BlockNumber, tx kv.Tx) (*types.B return api.pendingBlock(), nil } - n, err := getBlockNumber(number, tx) + n, err := getBlockNumber(number, tx, api.filters) if err != nil { return nil, err } diff --git a/cmd/rpcdaemon/commands/eth_block.go b/cmd/rpcdaemon/commands/eth_block.go index 811291c9cbf..fb5826b580b 100644 --- a/cmd/rpcdaemon/commands/eth_block.go +++ b/cmd/rpcdaemon/commands/eth_block.go @@ -284,7 +284,7 @@ func (api *APIImpl) GetBlockTransactionCountByNumber(ctx context.Context, blockN n := hexutil.Uint(len(b.Transactions())) return &n, nil } - blockNum, err := getBlockNumber(blockNr, tx) + blockNum, err := getBlockNumber(blockNr, tx, api.filters) if err != nil { return nil, err } diff --git a/cmd/rpcdaemon/commands/eth_call.go b/cmd/rpcdaemon/commands/eth_call.go index a015b1e94b2..b0ae482f266 100644 --- a/cmd/rpcdaemon/commands/eth_call.go +++ b/cmd/rpcdaemon/commands/eth_call.go @@ -76,9 +76,9 @@ func (api *APIImpl) Call(ctx context.Context, args ethapi.CallArgs, blockNrOrHas return result.Return(), result.Err } -func HeaderByNumberOrHash(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash) (*types.Header, error) { +func HeaderByNumberOrHash(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash, filters *rpchelper.Filters) (*types.Header, error) { if blockLabel, ok := blockNrOrHash.Number(); ok { - blockNum, err := getBlockNumber(blockLabel, tx) + blockNum, err := getBlockNumber(blockLabel, tx, filters) if err != nil { return nil, err } @@ -147,7 +147,7 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi.CallArgs, hi = uint64(*args.Gas) } else { // Retrieve the block to act as the gas ceiling - h, err := HeaderByNumberOrHash(ctx, dbtx, bNrOrHash) + h, err := HeaderByNumberOrHash(ctx, dbtx, bNrOrHash, api.filters) if err != nil { return 0, err } diff --git a/cmd/rpcdaemon/commands/eth_call_test.go b/cmd/rpcdaemon/commands/eth_call_test.go index b3ee394e82d..a73a0b53342 100644 --- a/cmd/rpcdaemon/commands/eth_call_test.go +++ b/cmd/rpcdaemon/commands/eth_call_test.go @@ -5,19 +5,25 @@ import ( "fmt" "testing" + "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcdaemontest" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/erigon/turbo/stages" ) func TestEstimateGas(t *testing.T) { db := rpcdaemontest.CreateTestKV(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) - api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, stages.Mock(t)) + mining := txpool.NewMiningClient(conn) + ff := rpchelper.New(ctx, nil, nil, mining, func() {}) + api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) var from = common.HexToAddress("0x71562b71999873db5b286df957af199ec94617f7") var to = common.HexToAddress("0x0d3ab14bbad3d99f4203bd7a11acb94882050e7e") if _, err := api.EstimateGas(context.Background(), ðapi.CallArgs{ diff --git a/cmd/rpcdaemon/commands/eth_receipts.go b/cmd/rpcdaemon/commands/eth_receipts.go index 988dd3389eb..c986e3d1dad 100644 --- a/cmd/rpcdaemon/commands/eth_receipts.go +++ b/cmd/rpcdaemon/commands/eth_receipts.go @@ -27,6 +27,7 @@ import ( "github.com/ledgerwatch/erigon/ethdb/cbor" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/transactions" ) @@ -89,7 +90,7 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ end = *number } else { // Convert the RPC block numbers into internal representations - latest, err := getLatestBlockNumber(tx) + latest, err := rpchelper.GetLatestBlockNumber(tx) if err != nil { return nil, err } @@ -334,7 +335,7 @@ func (api *APIImpl) GetBlockReceipts(ctx context.Context, number rpc.BlockNumber } defer tx.Rollback() - blockNum, err := getBlockNumber(number, tx) + blockNum, err := getBlockNumber(number, tx, api.filters) if err != nil { return nil, err } diff --git a/cmd/rpcdaemon/commands/eth_system.go b/cmd/rpcdaemon/commands/eth_system.go index 1095cdd82a1..654c4fc2879 100644 --- a/cmd/rpcdaemon/commands/eth_system.go +++ b/cmd/rpcdaemon/commands/eth_system.go @@ -13,6 +13,7 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" ) // BlockNumber implements eth_blockNumber. Returns the block number of most recent block. @@ -22,7 +23,7 @@ func (api *APIImpl) BlockNumber(ctx context.Context) (hexutil.Uint64, error) { return 0, err } defer tx.Rollback() - blockNum, err := getLatestBlockNumber(tx) + blockNum, err := rpchelper.GetLatestBlockNumber(tx) if err != nil { return 0, err } diff --git a/cmd/rpcdaemon/commands/eth_txs.go b/cmd/rpcdaemon/commands/eth_txs.go index 2cf26862d70..7383aee5c1f 100644 --- a/cmd/rpcdaemon/commands/eth_txs.go +++ b/cmd/rpcdaemon/commands/eth_txs.go @@ -198,7 +198,7 @@ func (api *APIImpl) GetTransactionByBlockNumberAndIndex(ctx context.Context, blo defer tx.Rollback() // https://infura.io/docs/ethereum/json-rpc/eth-getTransactionByBlockNumberAndIndex - blockNum, err := getBlockNumber(blockNr, tx) + blockNum, err := getBlockNumber(blockNr, tx, api.filters) if err != nil { return nil, err } diff --git a/cmd/rpcdaemon/commands/eth_uncles.go b/cmd/rpcdaemon/commands/eth_uncles.go index 4d21345ffc0..e4b2947bd18 100644 --- a/cmd/rpcdaemon/commands/eth_uncles.go +++ b/cmd/rpcdaemon/commands/eth_uncles.go @@ -20,7 +20,7 @@ func (api *APIImpl) GetUncleByBlockNumberAndIndex(ctx context.Context, number rp } defer tx.Rollback() - blockNum, err := getBlockNumber(number, tx) + blockNum, err := getBlockNumber(number, tx, api.filters) if err != nil { return nil, err } @@ -91,7 +91,7 @@ func (api *APIImpl) GetUncleCountByBlockNumber(ctx context.Context, number rpc.B } defer tx.Rollback() - blockNum, err := getBlockNumber(number, tx) + blockNum, err := getBlockNumber(number, tx, api.filters) if err != nil { return &n, err } diff --git a/cmd/rpcdaemon/commands/rpc_block.go b/cmd/rpcdaemon/commands/rpc_block.go deleted file mode 100644 index 9c001ba8ac7..00000000000 --- a/cmd/rpcdaemon/commands/rpc_block.go +++ /dev/null @@ -1,45 +0,0 @@ -package commands - -import ( - "fmt" - - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core/rawdb" - "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/rpc" -) - -func getBlockNumber(number rpc.BlockNumber, tx kv.Tx) (uint64, error) { - var blockNum uint64 - var err error - if number == rpc.LatestBlockNumber || number == rpc.PendingBlockNumber { - blockNum, err = getLatestBlockNumber(tx) - if err != nil { - return 0, err - } - } else if number == rpc.EarliestBlockNumber { - blockNum = 0 - } else { - blockNum = uint64(number.Int64()) - } - - return blockNum, nil -} - -func getLatestBlockNumber(tx kv.Tx) (uint64, error) { - forkchoiceHeadHash := rawdb.ReadForkchoiceHead(tx) - if forkchoiceHeadHash != (common.Hash{}) { - forkchoiceHeadNum := rawdb.ReadHeaderNumber(tx, forkchoiceHeadHash) - if forkchoiceHeadNum != nil { - return *forkchoiceHeadNum, nil - } - } - - blockNum, err := stages.GetStageProgress(tx, stages.Execution) - if err != nil { - return 0, fmt.Errorf("getting latest block number: %w", err) - } - - return blockNum, nil -} diff --git a/cmd/rpcdaemon/commands/rpc_getBlockNumber.go b/cmd/rpcdaemon/commands/rpc_getBlockNumber.go new file mode 100644 index 00000000000..aa4c9f26b7e --- /dev/null +++ b/cmd/rpcdaemon/commands/rpc_getBlockNumber.go @@ -0,0 +1,41 @@ +package commands + +import ( + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" +) + +func getBlockNumber(number rpc.BlockNumber, tx kv.Tx, filters *rpchelper.Filters) (uint64, error) { + var blockNum uint64 + latest, err := rpchelper.GetLatestBlockNumber(tx) + if err != nil { + return 0, err + } + + switch number { + case rpc.LatestBlockNumber: + return latest, nil + + case rpc.PendingBlockNumber: + pendingBlock := filters.LastPendingBlock() + if pendingBlock == nil { + return latest, nil + } + return pendingBlock.NumberU64(), nil + + case rpc.EarliestBlockNumber: + blockNum = 0 + + case rpc.FinalizeBlockNumber: + return rpchelper.GetFinalizedBlockNumber(tx) + + case rpc.SafeBlockNumber: + return rpchelper.GetSafeBlockNumber(tx) + + default: + blockNum = uint64(number.Int64()) + } + + return blockNum, nil +} diff --git a/cmd/rpcdaemon/commands/trace_filtering.go b/cmd/rpcdaemon/commands/trace_filtering.go index fdae1610393..c8582b976ba 100644 --- a/cmd/rpcdaemon/commands/trace_filtering.go +++ b/cmd/rpcdaemon/commands/trace_filtering.go @@ -125,7 +125,7 @@ func (api *TraceAPIImpl) Block(ctx context.Context, blockNr rpc.BlockNumber) (Pa return nil, err } defer tx.Rollback() - blockNum, err := getBlockNumber(blockNr, tx) + blockNum, err := getBlockNumber(blockNr, tx, api.filters) if err != nil { return nil, err } diff --git a/rpc/types.go b/rpc/types.go index d6ddd664c36..1f7ea53c2e6 100644 --- a/rpc/types.go +++ b/rpc/types.go @@ -71,6 +71,8 @@ type BlockNumber int64 type Timestamp uint64 const ( + FinalizeBlockNumber = BlockNumber(-4) + SafeBlockNumber = BlockNumber(-3) PendingBlockNumber = BlockNumber(-2) LatestBlockNumber = BlockNumber(-1) EarliestBlockNumber = BlockNumber(0) diff --git a/turbo/rpchelper/helper.go b/turbo/rpchelper/helper.go index ec307e4fc1b..b87a61a68d9 100644 --- a/turbo/rpchelper/helper.go +++ b/turbo/rpchelper/helper.go @@ -33,29 +33,41 @@ func GetCanonicalBlockNumber(blockNrOrHash rpc.BlockNumberOrHash, tx kv.Tx, filt } func _GetBlockNumber(requireCanonical bool, blockNrOrHash rpc.BlockNumberOrHash, tx kv.Tx, filters *Filters) (blockNumber uint64, hash common.Hash, latest bool, err error) { - var latestBlockNumber uint64 - if latestBlockNumber, err = stages.GetStageProgress(tx, stages.Execution); err != nil { - return 0, common.Hash{}, false, fmt.Errorf("getting latest block number: %w", err) + // Due to changed semantics of `lastest` block in RPC request, it is now distinct + // from the block block number corresponding to the plain state + var plainStateBlockNumber uint64 + if plainStateBlockNumber, err = stages.GetStageProgress(tx, stages.Execution); err != nil { + return 0, common.Hash{}, false, fmt.Errorf("getting plain state block number: %w", err) } var ok bool hash, ok = blockNrOrHash.Hash() if !ok { number := *blockNrOrHash.BlockNumber - if number == rpc.LatestBlockNumber { - blockNumber = latestBlockNumber - } else if number == rpc.EarliestBlockNumber { + switch number { + case rpc.LatestBlockNumber: + if blockNumber, err = GetLatestBlockNumber(tx); err != nil { + return 0, common.Hash{}, false, err + } + case rpc.EarliestBlockNumber: blockNumber = 0 - } else if number == rpc.PendingBlockNumber { + case rpc.FinalizeBlockNumber: + blockNumber, err = GetFinalizedBlockNumber(tx) + if err != nil { + return 0, common.Hash{}, false, err + } + case rpc.SafeBlockNumber: + blockNumber, err = GetSafeBlockNumber(tx) + if err != nil { + return 0, common.Hash{}, false, err + } + case rpc.PendingBlockNumber: pendingBlock := filters.LastPendingBlock() if pendingBlock == nil { - blockNumber, err = stages.GetStageProgress(tx, stages.Execution) - if err != nil { - return 0, common.Hash{}, false, fmt.Errorf("getting latest block number: %w", err) - } + blockNumber = plainStateBlockNumber } else { return pendingBlock.NumberU64(), pendingBlock.Hash(), false, nil } - } else { + default: blockNumber = uint64(number.Int64()) } hash, err = rawdb.ReadCanonicalHash(tx, blockNumber) @@ -77,7 +89,7 @@ func _GetBlockNumber(requireCanonical bool, blockNrOrHash rpc.BlockNumberOrHash, return 0, common.Hash{}, false, nonCanonocalHashError{hash} } } - return blockNumber, hash, blockNumber == latestBlockNumber, nil + return blockNumber, hash, blockNumber == plainStateBlockNumber, nil } func GetAccount(tx kv.Tx, blockNumber uint64, address common.Address) (*accounts.Account, error) { diff --git a/turbo/rpchelper/rpc_block.go b/turbo/rpchelper/rpc_block.go new file mode 100644 index 00000000000..4d54b41e3e8 --- /dev/null +++ b/turbo/rpchelper/rpc_block.go @@ -0,0 +1,56 @@ +package rpchelper + +import ( + "fmt" + + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/rpc" +) + +var UnknownBlockError = &rpc.CustomError{ + Code: -39001, + Message: "Unknown block", +} + +func GetLatestBlockNumber(tx kv.Tx) (uint64, error) { + forkchoiceHeadHash := rawdb.ReadForkchoiceHead(tx) + if forkchoiceHeadHash != (common.Hash{}) { + forkchoiceHeadNum := rawdb.ReadHeaderNumber(tx, forkchoiceHeadHash) + if forkchoiceHeadNum != nil { + return *forkchoiceHeadNum, nil + } + } + + blockNum, err := stages.GetStageProgress(tx, stages.Execution) + if err != nil { + return 0, fmt.Errorf("getting latest block number: %w", err) + } + + return blockNum, nil +} + +func GetFinalizedBlockNumber(tx kv.Tx) (uint64, error) { + forkchoiceFinalizedHash := rawdb.ReadForkchoiceFinalized(tx) + if forkchoiceFinalizedHash != (common.Hash{}) { + forkchoiceFinalizedNum := rawdb.ReadHeaderNumber(tx, forkchoiceFinalizedHash) + if forkchoiceFinalizedNum != nil { + return *forkchoiceFinalizedNum, nil + } + } + + return 0, UnknownBlockError +} + +func GetSafeBlockNumber(tx kv.Tx) (uint64, error) { + forkchoiceSafeHash := rawdb.ReadForkchoiceSafe(tx) + if forkchoiceSafeHash != (common.Hash{}) { + forkchoiceSafeNum := rawdb.ReadHeaderNumber(tx, forkchoiceSafeHash) + if forkchoiceSafeNum != nil { + return *forkchoiceSafeNum, nil + } + } + return 0, UnknownBlockError +} From 92b1f06932559d5aad856ca39a33c3f86dad83e6 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Tue, 14 Jun 2022 11:48:40 +0200 Subject: [PATCH 052/136] Gray Glacier bomb delay (#4443) --- consensus/ethash/consensus.go | 7 +++++ consensus/misc/eip1559_test.go | 48 ++++++++++++++++++++++------------ core/forkid/forkid_test.go | 14 +++++----- core/vm/runtime/runtime.go | 1 + params/chainspecs/mainnet.json | 1 + params/config.go | 13 ++++++++- 6 files changed, 60 insertions(+), 24 deletions(-) diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index cf0a6f8e7c2..905bd329c7e 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -47,6 +47,11 @@ var ( maxUncles = 2 // Maximum number of uncles allowed in a single block allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks + // calcDifficultyEip5133 is the difficulty adjustment algorithm as specified by EIP 5133. + // It offsets the bomb a total of 11.4M blocks. + // Specification EIP-5133: https://eips.ethereum.org/EIPS/eip-5133 + calcDifficultyEip5133 = makeDifficultyCalculator(11400000) + // calcDifficultyEip4345 is the difficulty adjustment algorithm as specified by EIP 4345. // It offsets the bomb a total of 10.7M blocks. // Specification EIP-4345: https://eips.ethereum.org/EIPS/eip-4345 @@ -277,6 +282,8 @@ func (ethash *Ethash) CalcDifficulty(chain consensus.ChainHeaderReader, time, pa func CalcDifficulty(config *params.ChainConfig, time, parentTime uint64, parentDifficulty *big.Int, parentNumber uint64, parentUncleHash common.Hash) *big.Int { next := parentNumber + 1 switch { + case config.IsGrayGlacier(next): + return calcDifficultyEip5133(time, parentTime, parentDifficulty, parentNumber, parentUncleHash) case config.IsArrowGlacier(next): return calcDifficultyEip4345(time, parentTime, parentDifficulty, parentNumber, parentUncleHash) case config.IsLondon(next): diff --git a/consensus/misc/eip1559_test.go b/consensus/misc/eip1559_test.go index 392a3a7067f..e1c7c96d530 100644 --- a/consensus/misc/eip1559_test.go +++ b/consensus/misc/eip1559_test.go @@ -29,23 +29,37 @@ import ( // do not use e.g. SetInt() on the numbers. For testing only func copyConfig(original *params.ChainConfig) *params.ChainConfig { return ¶ms.ChainConfig{ - ChainID: original.ChainID, - HomesteadBlock: original.HomesteadBlock, - DAOForkBlock: original.DAOForkBlock, - DAOForkSupport: original.DAOForkSupport, - TangerineWhistleBlock: original.TangerineWhistleBlock, - TangerineWhistleHash: original.TangerineWhistleHash, - SpuriousDragonBlock: original.SpuriousDragonBlock, - ByzantiumBlock: original.ByzantiumBlock, - ConstantinopleBlock: original.ConstantinopleBlock, - PetersburgBlock: original.PetersburgBlock, - IstanbulBlock: original.IstanbulBlock, - MuirGlacierBlock: original.MuirGlacierBlock, - BerlinBlock: original.BerlinBlock, - LondonBlock: original.LondonBlock, - ArrowGlacierBlock: original.ArrowGlacierBlock, - Ethash: original.Ethash, - Clique: original.Clique, + ChainName: original.ChainName, + ChainID: original.ChainID, + Consensus: original.Consensus, + HomesteadBlock: original.HomesteadBlock, + DAOForkBlock: original.DAOForkBlock, + DAOForkSupport: original.DAOForkSupport, + TangerineWhistleBlock: original.TangerineWhistleBlock, + TangerineWhistleHash: original.TangerineWhistleHash, + SpuriousDragonBlock: original.SpuriousDragonBlock, + ByzantiumBlock: original.ByzantiumBlock, + ConstantinopleBlock: original.ConstantinopleBlock, + PetersburgBlock: original.PetersburgBlock, + IstanbulBlock: original.IstanbulBlock, + MuirGlacierBlock: original.MuirGlacierBlock, + BerlinBlock: original.BerlinBlock, + LondonBlock: original.LondonBlock, + ArrowGlacierBlock: original.ArrowGlacierBlock, + GrayGlacierBlock: original.GrayGlacierBlock, + RamanujanBlock: original.RamanujanBlock, + NielsBlock: original.NielsBlock, + MirrorSyncBlock: original.MirrorSyncBlock, + BrunoBlock: original.BrunoBlock, + TerminalTotalDifficulty: original.TerminalTotalDifficulty, + TerminalBlockNumber: original.TerminalBlockNumber, + TerminalBlockHash: original.TerminalBlockHash, + MergeNetsplitBlock: original.MergeNetsplitBlock, + Ethash: original.Ethash, + Clique: original.Clique, + Aura: original.Aura, + Parlia: original.Parlia, + Bor: original.Bor, } } diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index 21f6f84f1bd..f29b109985c 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -57,16 +57,18 @@ func TestCreation(t *testing.T) { {7279999, ID{Hash: checksumToBytes(0xa00bc324), Next: 7280000}}, // Last Byzantium block {7280000, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // First and last Constantinople, first Petersburg block {9068999, ID{Hash: checksumToBytes(0x668db0af), Next: 9069000}}, // Last Petersburg block - {9069000, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // First Istanbul and first Muir Glacier block - {9199999, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // Last Istanbul and first Muir Glacier block + {9069000, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // First Istanbul block + {9199999, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // Last Istanbul block {9200000, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // First Muir Glacier block {12243999, ID{Hash: checksumToBytes(0xe029e991), Next: 12244000}}, // Last Muir Glacier block {12244000, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // First Berlin block {12964999, ID{Hash: checksumToBytes(0x0eb440f6), Next: 12965000}}, // Last Berlin block {12965000, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // First London block {13772999, ID{Hash: checksumToBytes(0xb715077d), Next: 13773000}}, // Last London block - {13773000, ID{Hash: checksumToBytes(0x20c327fc), Next: 0}}, // First Arrow Glacier block - {20000000, ID{Hash: checksumToBytes(0x20c327fc), Next: 0}}, // Future Arrow Glacier block + {13773000, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // First Arrow Glacier block + {15049999, ID{Hash: checksumToBytes(0x20c327fc), Next: 15050000}}, // Last Arrow Glacier block + {15050000, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}}, // First Gray Glacier block + {20000000, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 0}}, // Future Gray Glacier block }, }, // Ropsten test cases @@ -203,11 +205,11 @@ func TestValidation(t *testing.T) { // Local is mainnet Petersburg, remote is Rinkeby Petersburg. {7987396, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale}, - // Local is mainnet Arrow Glacier, far in the future. Remote announces Gopherium (non existing fork) + // Local is mainnet Gray Glacier, far in the future. Remote announces Gopherium (non existing fork) // at some future block 88888888, for itself, but past block for local. Local is incompatible. // // This case detects non-upgraded nodes with majority hash power (typical Ropsten mess). - {88888888, ID{Hash: checksumToBytes(0x20c327fc), Next: 88888888}, ErrLocalIncompatibleOrStale}, + {88888888, ID{Hash: checksumToBytes(0xf0afd0e3), Next: 88888888}, ErrLocalIncompatibleOrStale}, // Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing // fork) at block 7279999, before Petersburg. Local is incompatible. diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go index 75deda37f5d..3d6a8f217ff 100644 --- a/core/vm/runtime/runtime.go +++ b/core/vm/runtime/runtime.go @@ -75,6 +75,7 @@ func setDefaults(cfg *Config) { BerlinBlock: new(big.Int), LondonBlock: new(big.Int), ArrowGlacierBlock: new(big.Int), + GrayGlacierBlock: new(big.Int), } } diff --git a/params/chainspecs/mainnet.json b/params/chainspecs/mainnet.json index c3ecaa123bb..74e4e2eb363 100644 --- a/params/chainspecs/mainnet.json +++ b/params/chainspecs/mainnet.json @@ -16,6 +16,7 @@ "berlinBlock": 12244000, "londonBlock": 12965000, "arrowGlacierBlock": 13773000, + "grayGlacierBlock": 15050000, "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "ethash": {} } diff --git a/params/config.go b/params/config.go index ebd98ae772c..20007f128b6 100644 --- a/params/config.go +++ b/params/config.go @@ -241,6 +241,7 @@ type ChainConfig struct { BerlinBlock *big.Int `json:"berlinBlock,omitempty"` // Berlin switch block (nil = no fork, 0 = already on berlin) LondonBlock *big.Int `json:"londonBlock,omitempty"` // London switch block (nil = no fork, 0 = already on london) ArrowGlacierBlock *big.Int `json:"arrowGlacierBlock,omitempty"` // EIP-4345 (bomb delay) switch block (nil = no fork, 0 = already activated) + GrayGlacierBlock *big.Int `json:"grayGlacierBlock,omitempty"` // EIP-5133 (bomb delay) switch block (nil = no fork, 0 = already activated) RamanujanBlock *big.Int `json:"ramanujanBlock,omitempty" toml:",omitempty"` // ramanujanBlock switch block (nil = no fork, 0 = already activated) NielsBlock *big.Int `json:"nielsBlock,omitempty" toml:",omitempty"` // nielsBlock switch block (nil = no fork, 0 = already activated) @@ -391,7 +392,7 @@ func (c *ChainConfig) String() string { ) } - return fmt.Sprintf("{ChainID: %v, Homestead: %v, DAO: %v, DAO Support: %v, Tangerine Whistle: %v, Spurious Dragon: %v, Byzantium: %v, Constantinople: %v, Petersburg: %v, Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Arrow Glacier: %v, Terminal Total Difficulty: %v, Engine: %v}", + return fmt.Sprintf("{ChainID: %v, Homestead: %v, DAO: %v, DAO Support: %v, Tangerine Whistle: %v, Spurious Dragon: %v, Byzantium: %v, Constantinople: %v, Petersburg: %v, Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Arrow Glacier: %v, Gray Glacier: %v, Terminal Total Difficulty: %v, Engine: %v}", c.ChainID, c.HomesteadBlock, c.DAOForkBlock, @@ -406,6 +407,7 @@ func (c *ChainConfig) String() string { c.BerlinBlock, c.LondonBlock, c.ArrowGlacierBlock, + c.GrayGlacierBlock, c.TerminalTotalDifficulty, engine, ) @@ -541,6 +543,11 @@ func (c *ChainConfig) IsArrowGlacier(num uint64) bool { return isForked(c.ArrowGlacierBlock, num) } +// IsGrayGlacier returns whether num is either equal to the Gray Glacier (EIP-5133) fork block or greater. +func (c *ChainConfig) IsGrayGlacier(num uint64) bool { + return isForked(c.GrayGlacierBlock, num) +} + // CheckCompatible checks whether scheduled fork transitions have been imported // with a mismatching chain configuration. func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64) *ConfigCompatError { @@ -584,6 +591,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error { {name: "berlinBlock", block: c.BerlinBlock}, {name: "londonBlock", block: c.LondonBlock}, {name: "arrowGlacierBlock", block: c.ArrowGlacierBlock, optional: true}, + {name: "grayGlacierBlock", block: c.GrayGlacierBlock, optional: true}, } { if lastFork.name != "" { // Next one must be higher number @@ -653,6 +661,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head uint64) *ConfigC if isForkIncompatible(c.ArrowGlacierBlock, newcfg.ArrowGlacierBlock, head) { return newCompatError("Arrow Glacier fork block", c.ArrowGlacierBlock, newcfg.ArrowGlacierBlock) } + if isForkIncompatible(c.GrayGlacierBlock, newcfg.GrayGlacierBlock, head) { + return newCompatError("Gray Glacier fork block", c.GrayGlacierBlock, newcfg.GrayGlacierBlock) + } return nil } From 34d823c15a84c83404019c8a576d25cb37c2f5ed Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Tue, 14 Jun 2022 12:31:26 +0100 Subject: [PATCH 053/136] txid patch to try to reduce GC work during commit (#4441) * txid patch to try to reduce GC work during commit * Switch to erigon-lib main Co-authored-by: Alexey Sharp --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 7a98e22e518..4e53c7ae8c1 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220613183213-e2c6ef00585e + github.com/ledgerwatch/erigon-lib v0.0.0-20220614104214-a450f5c34197 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 @@ -48,7 +48,7 @@ require ( github.com/stretchr/testify v1.7.2 github.com/tendermint/go-amino v0.14.1 github.com/tendermint/tendermint v0.31.11 - github.com/torquem-ch/mdbx-go v0.24.2 + github.com/torquem-ch/mdbx-go v0.24.3-0.20220614090901-342411560dde github.com/ugorji/go/codec v1.1.13 github.com/ugorji/go/codec/codecgen v1.1.13 github.com/urfave/cli v1.22.8 diff --git a/go.sum b/go.sum index 32a838f9018..dbad0a8c14f 100644 --- a/go.sum +++ b/go.sum @@ -382,8 +382,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220613183213-e2c6ef00585e h1:wYFBZKp/jaWNpdATZK+e2nRYZDd4gejMYvtp7Ksdflc= -github.com/ledgerwatch/erigon-lib v0.0.0-20220613183213-e2c6ef00585e/go.mod h1:jNDE6PRPIA8wUdikJs8BvKtrFv101qOijIXA3HnDW8E= +github.com/ledgerwatch/erigon-lib v0.0.0-20220614104214-a450f5c34197 h1:XZi/OsuXfAGMxoh60OiCUaAWTWLs8IhmCM6vVAu4Xao= +github.com/ledgerwatch/erigon-lib v0.0.0-20220614104214-a450f5c34197/go.mod h1:SOwq7m9Wm7ckQ+kxUwDYRchwuwO8lXhp1lhbLTUhMk8= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= @@ -608,8 +608,8 @@ github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDW github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/torquem-ch/mdbx-go v0.24.2 h1:FS1feDGgt1fmhiDXkaqOyOSWMt8X+d2wVvOTuc37PtI= -github.com/torquem-ch/mdbx-go v0.24.2/go.mod h1:T2fsoJDVppxfAPTLd1svUgH1kpPmeXdPESmroSHcL1E= +github.com/torquem-ch/mdbx-go v0.24.3-0.20220614090901-342411560dde h1:1nzKGldWC9T0ApRfV0jzH28DaBy1Yg5+rmjSiJ/G0dI= +github.com/torquem-ch/mdbx-go v0.24.3-0.20220614090901-342411560dde/go.mod h1:T2fsoJDVppxfAPTLd1svUgH1kpPmeXdPESmroSHcL1E= github.com/ugorji/go v1.1.13/go.mod h1:jxau1n+/wyTGLQoCkjok9r5zFa/FxT6eI5HiHKQszjc= github.com/ugorji/go/codec v1.1.13 h1:013LbFhocBoIqgHeIHKlV4JWYhqogATYWZhIcH0WHn4= github.com/ugorji/go/codec v1.1.13/go.mod h1:oNVt3Dq+FO91WNQ/9JnHKQP2QJxTzoN7wCBFCq1OeuU= From 352e5b3d2abdc6423a60ab752e19c3082eeeddca Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Tue, 14 Jun 2022 16:29:49 +0300 Subject: [PATCH 054/136] Clean up (#4445) * changed to rpchelper * got rid of getBlockNumber file --- cmd/rpcdaemon/commands/bor_helper.go | 3 +- cmd/rpcdaemon/commands/erigon_block.go | 3 +- cmd/rpcdaemon/commands/eth_api.go | 2 +- cmd/rpcdaemon/commands/eth_block.go | 2 +- cmd/rpcdaemon/commands/eth_call.go | 46 +++++++++----------- cmd/rpcdaemon/commands/eth_receipts.go | 2 +- cmd/rpcdaemon/commands/eth_txs.go | 3 +- cmd/rpcdaemon/commands/eth_uncles.go | 6 +-- cmd/rpcdaemon/commands/rpc_getBlockNumber.go | 41 ----------------- cmd/rpcdaemon/commands/trace_filtering.go | 4 +- 10 files changed, 34 insertions(+), 78 deletions(-) delete mode 100644 cmd/rpcdaemon/commands/rpc_getBlockNumber.go diff --git a/cmd/rpcdaemon/commands/bor_helper.go b/cmd/rpcdaemon/commands/bor_helper.go index 4056832f8a9..bb054a8054b 100644 --- a/cmd/rpcdaemon/commands/bor_helper.go +++ b/cmd/rpcdaemon/commands/bor_helper.go @@ -13,6 +13,7 @@ import ( "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" ) const ( @@ -54,7 +55,7 @@ func getHeaderByNumber(number rpc.BlockNumber, api *BorImpl, tx kv.Tx) (*types.H return block.Header(), nil } - blockNum, err := getBlockNumber(number, tx, api.filters) + blockNum, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) if err != nil { return nil, err } diff --git a/cmd/rpcdaemon/commands/erigon_block.go b/cmd/rpcdaemon/commands/erigon_block.go index 229a9c5c0f0..f094311a6be 100644 --- a/cmd/rpcdaemon/commands/erigon_block.go +++ b/cmd/rpcdaemon/commands/erigon_block.go @@ -11,6 +11,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" ) // GetHeaderByNumber implements erigon_getHeaderByNumber. Returns a block's header given a block number ignoring the block's transaction and uncle list (may be faster). @@ -30,7 +31,7 @@ func (api *ErigonImpl) GetHeaderByNumber(ctx context.Context, blockNumber rpc.Bl } defer tx.Rollback() - blockNum, err := getBlockNumber(blockNumber, tx, api.filters) + blockNum, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNumber), tx, api.filters) if err != nil { return nil, err } diff --git a/cmd/rpcdaemon/commands/eth_api.go b/cmd/rpcdaemon/commands/eth_api.go index bb8e3f223a7..98a469ac143 100644 --- a/cmd/rpcdaemon/commands/eth_api.go +++ b/cmd/rpcdaemon/commands/eth_api.go @@ -217,7 +217,7 @@ func (api *BaseAPI) blockByRPCNumber(number rpc.BlockNumber, tx kv.Tx) (*types.B return api.pendingBlock(), nil } - n, err := getBlockNumber(number, tx, api.filters) + n, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) if err != nil { return nil, err } diff --git a/cmd/rpcdaemon/commands/eth_block.go b/cmd/rpcdaemon/commands/eth_block.go index fb5826b580b..1da5b7b0293 100644 --- a/cmd/rpcdaemon/commands/eth_block.go +++ b/cmd/rpcdaemon/commands/eth_block.go @@ -284,7 +284,7 @@ func (api *APIImpl) GetBlockTransactionCountByNumber(ctx context.Context, blockN n := hexutil.Uint(len(b.Transactions())) return &n, nil } - blockNum, err := getBlockNumber(blockNr, tx, api.filters) + blockNum, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) if err != nil { return nil, err } diff --git a/cmd/rpcdaemon/commands/eth_call.go b/cmd/rpcdaemon/commands/eth_call.go index b0ae482f266..331c4e4ec02 100644 --- a/cmd/rpcdaemon/commands/eth_call.go +++ b/cmd/rpcdaemon/commands/eth_call.go @@ -77,39 +77,33 @@ func (api *APIImpl) Call(ctx context.Context, args ethapi.CallArgs, blockNrOrHas } func HeaderByNumberOrHash(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash, filters *rpchelper.Filters) (*types.Header, error) { - if blockLabel, ok := blockNrOrHash.Number(); ok { - blockNum, err := getBlockNumber(blockLabel, tx, filters) - if err != nil { - return nil, err - } - return rawdb.ReadHeaderByNumber(tx, blockNum), nil + _, hash, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, filters) + if err != nil { + return nil, err + } + header, err := rawdb.ReadHeaderByHash(tx, hash) + if err != nil { + return nil, err + } + if header == nil { + return nil, errors.New("header for hash not found") } - if hash, ok := blockNrOrHash.Hash(); ok { - header, err := rawdb.ReadHeaderByHash(tx, hash) + + if blockNrOrHash.RequireCanonical { + can, err := rawdb.ReadCanonicalHash(tx, header.Number.Uint64()) if err != nil { return nil, err } - if header == nil { - return nil, errors.New("header for hash not found") - } - - if blockNrOrHash.RequireCanonical { - can, err := rawdb.ReadCanonicalHash(tx, header.Number.Uint64()) - if err != nil { - return nil, err - } - if can != hash { - return nil, errors.New("hash is not currently canonical") - } + if can != hash { + return nil, errors.New("hash is not currently canonical") } + } - h := rawdb.ReadHeader(tx, hash, header.Number.Uint64()) - if h == nil { - return nil, errors.New("header found, but block body is missing") - } - return h, nil + h := rawdb.ReadHeader(tx, hash, header.Number.Uint64()) + if h == nil { + return nil, errors.New("header found, but block body is missing") } - return nil, errors.New("invalid arguments; neither block nor hash specified") + return h, nil } // EstimateGas implements eth_estimateGas. Returns an estimate of how much gas is necessary to allow the transaction to complete. The transaction will not be added to the blockchain. diff --git a/cmd/rpcdaemon/commands/eth_receipts.go b/cmd/rpcdaemon/commands/eth_receipts.go index c986e3d1dad..bfd481ec08d 100644 --- a/cmd/rpcdaemon/commands/eth_receipts.go +++ b/cmd/rpcdaemon/commands/eth_receipts.go @@ -335,7 +335,7 @@ func (api *APIImpl) GetBlockReceipts(ctx context.Context, number rpc.BlockNumber } defer tx.Rollback() - blockNum, err := getBlockNumber(number, tx, api.filters) + blockNum, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) if err != nil { return nil, err } diff --git a/cmd/rpcdaemon/commands/eth_txs.go b/cmd/rpcdaemon/commands/eth_txs.go index 7383aee5c1f..507c52225b2 100644 --- a/cmd/rpcdaemon/commands/eth_txs.go +++ b/cmd/rpcdaemon/commands/eth_txs.go @@ -14,6 +14,7 @@ import ( types2 "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" ) // GetTransactionByHash implements eth_getTransactionByHash. Returns information about a transaction given the transaction's hash. @@ -198,7 +199,7 @@ func (api *APIImpl) GetTransactionByBlockNumberAndIndex(ctx context.Context, blo defer tx.Rollback() // https://infura.io/docs/ethereum/json-rpc/eth-getTransactionByBlockNumberAndIndex - blockNum, err := getBlockNumber(blockNr, tx, api.filters) + blockNum, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) if err != nil { return nil, err } diff --git a/cmd/rpcdaemon/commands/eth_uncles.go b/cmd/rpcdaemon/commands/eth_uncles.go index e4b2947bd18..5073e2f795d 100644 --- a/cmd/rpcdaemon/commands/eth_uncles.go +++ b/cmd/rpcdaemon/commands/eth_uncles.go @@ -9,6 +9,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/adapter/ethapi" + "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/log/v3" ) @@ -20,7 +21,7 @@ func (api *APIImpl) GetUncleByBlockNumberAndIndex(ctx context.Context, number rp } defer tx.Rollback() - blockNum, err := getBlockNumber(number, tx, api.filters) + blockNum, hash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) if err != nil { return nil, err } @@ -31,7 +32,6 @@ func (api *APIImpl) GetUncleByBlockNumberAndIndex(ctx context.Context, number rp if block == nil { return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 } - hash := block.Hash() additionalFields := make(map[string]interface{}) td, err := rawdb.ReadTd(tx, block.Hash(), blockNum) if err != nil { @@ -91,7 +91,7 @@ func (api *APIImpl) GetUncleCountByBlockNumber(ctx context.Context, number rpc.B } defer tx.Rollback() - blockNum, err := getBlockNumber(number, tx, api.filters) + blockNum, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) if err != nil { return &n, err } diff --git a/cmd/rpcdaemon/commands/rpc_getBlockNumber.go b/cmd/rpcdaemon/commands/rpc_getBlockNumber.go deleted file mode 100644 index aa4c9f26b7e..00000000000 --- a/cmd/rpcdaemon/commands/rpc_getBlockNumber.go +++ /dev/null @@ -1,41 +0,0 @@ -package commands - -import ( - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/erigon/turbo/rpchelper" -) - -func getBlockNumber(number rpc.BlockNumber, tx kv.Tx, filters *rpchelper.Filters) (uint64, error) { - var blockNum uint64 - latest, err := rpchelper.GetLatestBlockNumber(tx) - if err != nil { - return 0, err - } - - switch number { - case rpc.LatestBlockNumber: - return latest, nil - - case rpc.PendingBlockNumber: - pendingBlock := filters.LastPendingBlock() - if pendingBlock == nil { - return latest, nil - } - return pendingBlock.NumberU64(), nil - - case rpc.EarliestBlockNumber: - blockNum = 0 - - case rpc.FinalizeBlockNumber: - return rpchelper.GetFinalizedBlockNumber(tx) - - case rpc.SafeBlockNumber: - return rpchelper.GetSafeBlockNumber(tx) - - default: - blockNum = uint64(number.Int64()) - } - - return blockNum, nil -} diff --git a/cmd/rpcdaemon/commands/trace_filtering.go b/cmd/rpcdaemon/commands/trace_filtering.go index c8582b976ba..74cec78e1d3 100644 --- a/cmd/rpcdaemon/commands/trace_filtering.go +++ b/cmd/rpcdaemon/commands/trace_filtering.go @@ -18,6 +18,7 @@ import ( "github.com/ledgerwatch/erigon/ethdb/bitmapdb" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" ) // Transaction implements trace_transaction @@ -125,7 +126,7 @@ func (api *TraceAPIImpl) Block(ctx context.Context, blockNr rpc.BlockNumber) (Pa return nil, err } defer tx.Rollback() - blockNum, err := getBlockNumber(blockNr, tx, api.filters) + blockNum, hash, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) if err != nil { return nil, err } @@ -142,7 +143,6 @@ func (api *TraceAPIImpl) Block(ctx context.Context, blockNr rpc.BlockNumber) (Pa if block == nil { return nil, fmt.Errorf("could not find block %d", uint64(bn)) } - hash := block.Hash() parentNr := bn if parentNr > 0 { From 21e3ebdab5cd12ae7181b83e044886e0f9c4851b Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Tue, 14 Jun 2022 16:06:42 +0100 Subject: [PATCH 055/136] Update skip analysis, add more BSC snapshot hashes (#4448) * Update skip analysis, add more BSC snapshot hashes * Syntax fix Co-authored-by: Alexey Sharp --- core/skip_analysis.go | 6 +++--- turbo/snapshotsync/snapshothashes/erigon-snapshots | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/core/skip_analysis.go b/core/skip_analysis.go index 1dafa999759..cc787cc1e93 100644 --- a/core/skip_analysis.go +++ b/core/skip_analysis.go @@ -21,12 +21,12 @@ import ( ) // MainnetNotCheckedFrom is the first block number not yet checked for invalid jumps -const MainnetNotCheckedFrom uint64 = 14_909_200 +const MainnetNotCheckedFrom uint64 = 14_961_400 // MainnetNotCheckedFrom is the first block number not yet checked for invalid jumps -const BSCNotCheckedFrom uint64 = 18_589_376 +const BSCNotCheckedFrom uint64 = 18_682_505 -const BorMainnetNotCheckedFrom uint64 = 24_673_536 +const BorMainnetNotCheckedFrom uint64 = 29_447_463 const RopstenNotCheckedFrom uint64 = 12_331_664 diff --git a/turbo/snapshotsync/snapshothashes/erigon-snapshots b/turbo/snapshotsync/snapshothashes/erigon-snapshots index b900e6362cc..7e85e4d0028 160000 --- a/turbo/snapshotsync/snapshothashes/erigon-snapshots +++ b/turbo/snapshotsync/snapshothashes/erigon-snapshots @@ -1 +1 @@ -Subproject commit b900e6362ccb20b31d3f29fe2fe1efd73e8f6183 +Subproject commit 7e85e4d0028c27f747d97f65ac0b8c252a050b39 From 7f81e0dddebf5b98693bc8f4a5d2e391bc373a67 Mon Sep 17 00:00:00 2001 From: sudeep Date: Wed, 15 Jun 2022 01:03:30 +0530 Subject: [PATCH 056/136] in transaction execution, subtract from account balance only after enough gaspool is ensured (#4450) - noticed the difference when executing testdata#10 in go-ethereum and erigon --- core/state_transition.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/core/state_transition.go b/core/state_transition.go index c8b8248b485..43a17bab06d 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -257,12 +257,13 @@ func (st *StateTransition) buyGas(gasBailout bool) error { return fmt.Errorf("%w: address %v", ErrInsufficientFunds, st.msg.From().Hex()) } } + var subBalance bool = false if have, want := st.state.GetBalance(st.msg.From()), balanceCheck; have.Cmp(want) < 0 { if !gasBailout { return fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, st.msg.From().Hex(), have, want) } } else { - st.state.SubBalance(st.msg.From(), mgval) + subBalance = true } if err := st.gp.SubGas(st.msg.Gas()); err != nil { if !gasBailout { @@ -272,6 +273,9 @@ func (st *StateTransition) buyGas(gasBailout bool) error { st.gas += st.msg.Gas() st.initialGas = st.msg.Gas() + if subBalance { + st.state.SubBalance(st.msg.From(), mgval) + } return nil } From cad32ee1cf664c6943a374302275bcbfbdfb70d3 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Tue, 14 Jun 2022 21:43:44 +0200 Subject: [PATCH 057/136] added deletion support (#4451) --- ethdb/olddb/memorymutation.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/ethdb/olddb/memorymutation.go b/ethdb/olddb/memorymutation.go index 6636b9e9b5a..1154212c98d 100644 --- a/ethdb/olddb/memorymutation.go +++ b/ethdb/olddb/memorymutation.go @@ -289,6 +289,20 @@ func (m *memorymutation) Flush(tx kv.RwTx) error { if err != nil { return err } + // Obliterate buckets who are to be deleted + for bucket := range m.clearedTables { + if err := tx.ClearBucket(bucket); err != nil { + return err + } + } + // Obliterate entries who are to be deleted + for bucket, keys := range m.deletedEntries { + for key := range keys { + if err := tx.Delete(bucket, []byte(key), nil); err != nil { + return err + } + } + } // Iterate over each bucket and apply changes accordingly. for _, bucket := range buckets { if _, ok := m.dupsortTables[bucket]; ok && bucket != kv.HashedStorage { From ff5cbcb75dd9156e0917406fdf2fb126de629583 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Wed, 15 Jun 2022 01:10:41 +0200 Subject: [PATCH 058/136] Made in memory mutation compatible with all buckets (#4454) * progress #1 * progress #2 * proper file naming * more mature memory mutation --- .../{memorymutation.go => memory_mutation.go} | 20 ++++---- ...ioncursor.go => memory_mutation_cursor.go} | 49 +++++++++++-------- ...tation_test.go => memory_mutation_test.go} | 0 3 files changed, 39 insertions(+), 30 deletions(-) rename ethdb/olddb/{memorymutation.go => memory_mutation.go} (96%) rename ethdb/olddb/{memorymutationcursor.go => memory_mutation_cursor.go} (88%) rename ethdb/olddb/{memorymutation_test.go => memory_mutation_test.go} (100%) diff --git a/ethdb/olddb/memorymutation.go b/ethdb/olddb/memory_mutation.go similarity index 96% rename from ethdb/olddb/memorymutation.go rename to ethdb/olddb/memory_mutation.go index 1154212c98d..96bcb72cffc 100644 --- a/ethdb/olddb/memorymutation.go +++ b/ethdb/olddb/memory_mutation.go @@ -30,7 +30,6 @@ type memorymutation struct { memDb kv.RwDB deletedEntries map[string]map[string]struct{} clearedTables map[string]struct{} - dupsortTables map[string]struct{} db kv.Tx } @@ -54,11 +53,6 @@ func NewMemoryBatch(tx kv.Tx) *memorymutation { memTx: memTx, deletedEntries: make(map[string]map[string]struct{}), clearedTables: make(map[string]struct{}), - dupsortTables: map[string]struct{}{ - kv.AccountChangeSet: {}, - kv.StorageChangeSet: {}, - kv.HashedStorage: {}, - }, } } @@ -305,7 +299,7 @@ func (m *memorymutation) Flush(tx kv.RwTx) error { } // Iterate over each bucket and apply changes accordingly. for _, bucket := range buckets { - if _, ok := m.dupsortTables[bucket]; ok && bucket != kv.HashedStorage { + if isTablePurelyDupsort(bucket) { cbucket, err := m.memTx.CursorDupSort(bucket) if err != nil { return err @@ -343,6 +337,16 @@ func (m *memorymutation) Flush(tx kv.RwTx) error { return nil } +// Check if a bucket is dupsorted and has dupsort conversion off +func isTablePurelyDupsort(bucket string) bool { + config, ok := kv.ChaindataTablesCfg[bucket] + // If we do not have the configuration we assume it is not dupsorted + if !ok { + return false + } + return !config.AutoDupSortKeysConversion && config.Flags == kv.DupSort +} + // Cursor creates a new cursor (the real fun begins here) func (m *memorymutation) makeCursor(bucket string) (kv.RwCursorDupSort, error) { c := &memorymutationcursor{} @@ -361,8 +365,6 @@ func (m *memorymutation) makeCursor(bucket string) (kv.RwCursorDupSort, error) { if err != nil { return nil, err } - _, isDupsort := m.dupsortTables[bucket] - c.isDupsort = isDupsort c.memCursor = c.memDupCursor c.mutation = m return c, err diff --git a/ethdb/olddb/memorymutationcursor.go b/ethdb/olddb/memory_mutation_cursor.go similarity index 88% rename from ethdb/olddb/memorymutationcursor.go rename to ethdb/olddb/memory_mutation_cursor.go index 9c0f3aa9481..6da39ecd123 100644 --- a/ethdb/olddb/memorymutationcursor.go +++ b/ethdb/olddb/memory_mutation_cursor.go @@ -36,8 +36,6 @@ type memorymutationcursor struct { memDupCursor kv.RwCursorDupSort // we keep the index in the slice of pairs we are at. isPrevFromDb bool - // Flag for dupsort mode - isDupsort bool // entry history currentPair cursorentry currentDbEntry cursorentry @@ -65,7 +63,7 @@ func (m *memorymutationcursor) First() ([]byte, []byte, error) { } } - return m.goForward(memKey, memValue, dbKey, dbValue) + return m.goForward(memKey, memValue, dbKey, dbValue, false) } func (m *memorymutationcursor) getNextOnDb(dup bool) (key []byte, value []byte, err error) { @@ -98,11 +96,15 @@ func (m *memorymutationcursor) getNextOnDb(dup bool) (key []byte, value []byte, } func (m *memorymutationcursor) convertAutoDupsort(key []byte, value []byte) []byte { - // The only dupsorted table we are interested is HashedStorage - if m.table != kv.HashedStorage { + config, ok := kv.ChaindataTablesCfg[m.table] + // If we do not have the configuration we assume it is not dupsorted + if !ok || !config.AutoDupSortKeysConversion { return key } - return append(key, value[:32]...) + if len(key) != config.DupToLen { + return key + } + return append(key, value[:config.DupFromLen-config.DupToLen]...) } // Current return the current key and values the cursor is on. @@ -110,21 +112,26 @@ func (m *memorymutationcursor) Current() ([]byte, []byte, error) { return common.CopyBytes(m.currentPair.key), common.CopyBytes(m.currentPair.value), nil } -func (m *memorymutationcursor) skipIntersection(memKey, memValue, dbKey, dbValue []byte) (newDbKey []byte, newDbValue []byte, err error) { +func (m *memorymutationcursor) skipIntersection(memKey, memValue, dbKey, dbValue []byte, dup bool) (newDbKey []byte, newDbValue []byte, err error) { newDbKey = dbKey newDbValue = dbValue + config, ok := kv.ChaindataTablesCfg[m.table] + dupsortOffset := 0 + if ok && config.AutoDupSortKeysConversion { + dupsortOffset = config.DupFromLen - config.DupToLen + } // Check for duplicates if bytes.Compare(memKey, dbKey) == 0 { - if !m.isDupsort { - if newDbKey, newDbValue, err = m.getNextOnDb(false); err != nil { + if !dup { + if newDbKey, newDbValue, err = m.getNextOnDb(dup); err != nil { return } } else if bytes.Compare(memValue, dbValue) == 0 { - if newDbKey, newDbValue, err = m.getNextOnDb(true); err != nil { + if newDbKey, newDbValue, err = m.getNextOnDb(dup); err != nil { return } - } else if len(memValue) >= 32 && len(dbValue) >= 32 && m.table == kv.HashedStorage && bytes.Compare(memValue[:32], dbValue[:32]) == 0 { - if newDbKey, newDbValue, err = m.getNextOnDb(true); err != nil { + } else if dupsortOffset != 0 && len(memValue) >= dupsortOffset && len(dbValue) >= dupsortOffset && bytes.Compare(memValue[:dupsortOffset], dbValue[:dupsortOffset]) == 0 { + if newDbKey, newDbValue, err = m.getNextOnDb(dup); err != nil { return } } @@ -132,13 +139,13 @@ func (m *memorymutationcursor) skipIntersection(memKey, memValue, dbKey, dbValue return } -func (m *memorymutationcursor) goForward(memKey, memValue, dbKey, dbValue []byte) ([]byte, []byte, error) { +func (m *memorymutationcursor) goForward(memKey, memValue, dbKey, dbValue []byte, dup bool) ([]byte, []byte, error) { var err error if memValue == nil && dbValue == nil { return nil, nil, nil } - dbKey, dbValue, err = m.skipIntersection(memKey, memValue, dbKey, dbValue) + dbKey, dbValue, err = m.skipIntersection(memKey, memValue, dbKey, dbValue, dup) if err != nil { return nil, nil, err } @@ -173,7 +180,7 @@ func (m *memorymutationcursor) Next() ([]byte, []byte, error) { if err != nil { return nil, nil, err } - return m.goForward(m.currentMemEntry.key, m.currentMemEntry.value, k, v) + return m.goForward(m.currentMemEntry.key, m.currentMemEntry.value, k, v, false) } memK, memV, err := m.memCursor.Next() @@ -181,7 +188,7 @@ func (m *memorymutationcursor) Next() ([]byte, []byte, error) { return nil, nil, err } - return m.goForward(memK, memV, m.currentDbEntry.key, m.currentDbEntry.value) + return m.goForward(memK, memV, m.currentDbEntry.key, m.currentDbEntry.value, false) } // NextDup returns the next element of the mutation. @@ -192,7 +199,7 @@ func (m *memorymutationcursor) NextDup() ([]byte, []byte, error) { if err != nil { return nil, nil, err } - return m.goForward(m.currentMemEntry.key, m.currentMemEntry.value, k, v) + return m.goForward(m.currentMemEntry.key, m.currentMemEntry.value, k, v, true) } memK, memV, err := m.memDupCursor.NextDup() @@ -200,7 +207,7 @@ func (m *memorymutationcursor) NextDup() ([]byte, []byte, error) { return nil, nil, err } - return m.goForward(memK, memV, m.currentDbEntry.key, m.currentDbEntry.value) + return m.goForward(memK, memV, m.currentDbEntry.key, m.currentDbEntry.value, true) } // Seek move pointer to a key at a certain position. @@ -222,7 +229,7 @@ func (m *memorymutationcursor) Seek(seek []byte) ([]byte, []byte, error) { if err != nil { return nil, nil, err } - return m.goForward(memKey, memValue, dbKey, dbValue) + return m.goForward(memKey, memValue, dbKey, dbValue, false) } // Seek move pointer to a key at a certain position. @@ -309,7 +316,7 @@ func (m *memorymutationcursor) SeekBothRange(key, value []byte) ([]byte, error) if err != nil { return nil, err } - _, retValue, err := m.goForward(key, memValue, key, dbValue) + _, retValue, err := m.goForward(key, memValue, key, dbValue, true) return retValue, err } @@ -325,7 +332,7 @@ func (m *memorymutationcursor) Last() ([]byte, []byte, error) { return nil, nil, err } - dbKey, dbValue, err = m.skipIntersection(memKey, memValue, dbKey, dbValue) + dbKey, dbValue, err = m.skipIntersection(memKey, memValue, dbKey, dbValue, false) if err != nil { return nil, nil, err } diff --git a/ethdb/olddb/memorymutation_test.go b/ethdb/olddb/memory_mutation_test.go similarity index 100% rename from ethdb/olddb/memorymutation_test.go rename to ethdb/olddb/memory_mutation_test.go From 2e4e7bd4de61e42b00db27ba345de1f2533f624d Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Wed, 15 Jun 2022 07:10:37 +0100 Subject: [PATCH 059/136] [erigon2.2] Log stats tool (#4453) Co-authored-by: Alexey Sharp --- cmd/hack/hack.go | 109 ++++++++++++++++++++++++++++++++++++++++++++++- go.mod | 2 +- go.sum | 4 +- 3 files changed, 110 insertions(+), 5 deletions(-) diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go index 596f45c4448..a54e19b47ee 100644 --- a/cmd/hack/hack.go +++ b/cmd/hack/hack.go @@ -15,6 +15,7 @@ import ( "path/filepath" "regexp" "runtime/pprof" + "sort" "strconv" "strings" "time" @@ -1304,12 +1305,114 @@ func readEf(file string, addr []byte) error { } func readBodies(file string) error { - datPath := file + ".dat" - decomp, err := compress.NewDecompressor(datPath) + decomp, err := compress.NewDecompressor(file) if err != nil { return err } defer decomp.Close() + gg := decomp.MakeGetter() + buf, _ := gg.Next(nil) + firstBody := &types.BodyForStorage{} + if err = rlp.DecodeBytes(buf, firstBody); err != nil { + return err + } + //var blockFrom uint64 = 12300000 + //var blockTo uint64 = 12400000 + firstTxID := firstBody.BaseTxId + + lastBody := new(types.BodyForStorage) + i := uint64(0) + for gg.HasNext() { + i++ + //if i == blockTo-blockFrom-1 { + //fmt.Printf("lastBody\n") + buf, _ = gg.Next(buf[:0]) + if err = rlp.DecodeBytes(buf, lastBody); err != nil { + return err + } + //if gg.HasNext() { + // panic(1) + //} + //} else { + if gg.HasNext() { + gg.Skip() + } + //} + } + expectedCount := lastBody.BaseTxId + uint64(lastBody.TxAmount) - firstBody.BaseTxId + fmt.Printf("i=%d, firstBody=%v, lastBody=%v, firstTxID=%d, expectedCount=%d\n", i, firstBody, lastBody, firstTxID, expectedCount) + + return nil +} + +func findLogs(chaindata string, block uint64, blockTotal uint64) error { + db := mdbx.MustOpen(chaindata) + defer db.Close() + + tx, txErr := db.BeginRo(context.Background()) + if txErr != nil { + return txErr + } + defer tx.Rollback() + logs, err := tx.Cursor(kv.Log) + if err != nil { + return err + } + defer logs.Close() + + reader := bytes.NewReader(nil) + addrs := map[common.Address]int{} + topics := map[string]int{} + + for k, v, err := logs.Seek(dbutils.LogKey(block, 0)); k != nil; k, v, err = logs.Next() { + if err != nil { + return err + } + + blockNum := binary.BigEndian.Uint64(k[:8]) + if blockNum >= block+blockTotal { + break + } + + var ll types.Logs + reader.Reset(v) + if err := cbor.Unmarshal(&ll, reader); err != nil { + return fmt.Errorf("receipt unmarshal failed: %w, blocl=%d", err, blockNum) + } + + for _, l := range ll { + addrs[l.Address]++ + for _, topic := range l.Topics { + topics[fmt.Sprintf("%x | %x", l.Address, topic)]++ + } + } + } + addrsInv := map[int][]common.Address{} + topicsInv := map[int][]string{} + for a, c := range addrs { + addrsInv[c] = append(addrsInv[c], a) + } + counts := make([]int, 0, len(addrsInv)) + for c := range addrsInv { + counts = append(counts, -c) + } + sort.Ints(counts) + for i := 0; i < 10 && i < len(counts); i++ { + as := addrsInv[-counts[i]] + fmt.Printf("%d=%x\n", -counts[i], as) + } + for t, c := range topics { + topicsInv[c] = append(topicsInv[c], t) + } + counts = make([]int, 0, len(topicsInv)) + for c := range topicsInv { + counts = append(counts, -c) + } + sort.Ints(counts) + for i := 0; i < 10 && i < len(counts); i++ { + as := topicsInv[-counts[i]] + fmt.Printf("%d=%s\n", -counts[i], as) + } return nil } @@ -1445,6 +1548,8 @@ func main() { err = readEf(*chaindata, common.FromHex(*account)) case "readBodies": err = readBodies(*chaindata) + case "findLogs": + err = findLogs(*chaindata, uint64(*block), uint64(*blockTotal)) } if err != nil { diff --git a/go.mod b/go.mod index 4e53c7ae8c1..6c7105be1a6 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220614104214-a450f5c34197 + github.com/ledgerwatch/erigon-lib v0.0.0-20220614213818-bbf96d05808e github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index dbad0a8c14f..9e777215db2 100644 --- a/go.sum +++ b/go.sum @@ -382,8 +382,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220614104214-a450f5c34197 h1:XZi/OsuXfAGMxoh60OiCUaAWTWLs8IhmCM6vVAu4Xao= -github.com/ledgerwatch/erigon-lib v0.0.0-20220614104214-a450f5c34197/go.mod h1:SOwq7m9Wm7ckQ+kxUwDYRchwuwO8lXhp1lhbLTUhMk8= +github.com/ledgerwatch/erigon-lib v0.0.0-20220614213818-bbf96d05808e h1:A4ozqGgOHe4D67icNKt721OFm6ZR1q2MjRp/dFBsDms= +github.com/ledgerwatch/erigon-lib v0.0.0-20220614213818-bbf96d05808e/go.mod h1:SOwq7m9Wm7ckQ+kxUwDYRchwuwO8lXhp1lhbLTUhMk8= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From 663ccfd4dccadb9dc98e21258a43e7fdd98f5d33 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Wed, 15 Jun 2022 18:45:05 +0200 Subject: [PATCH 060/136] better flush (#4459) --- ethdb/olddb/memory_mutation.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethdb/olddb/memory_mutation.go b/ethdb/olddb/memory_mutation.go index 96bcb72cffc..8b304559371 100644 --- a/ethdb/olddb/memory_mutation.go +++ b/ethdb/olddb/memory_mutation.go @@ -314,7 +314,7 @@ func (m *memorymutation) Flush(tx kv.RwTx) error { if err != nil { return err } - if err := dbCursor.AppendDup(k, v); err != nil { + if err := dbCursor.Put(k, v); err != nil { return err } } From 631d4854766512a18f611dbfa612bb044e4dd554 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Wed, 15 Jun 2022 19:59:20 +0200 Subject: [PATCH 061/136] Renamed mutations types to be exported (#4463) * renamed * ops --- ethdb/olddb/memory_mutation.go | 92 +++++++++++++-------------- ethdb/olddb/memory_mutation_cursor.go | 58 ++++++++--------- 2 files changed, 75 insertions(+), 75 deletions(-) diff --git a/ethdb/olddb/memory_mutation.go b/ethdb/olddb/memory_mutation.go index 8b304559371..4782d877c38 100644 --- a/ethdb/olddb/memory_mutation.go +++ b/ethdb/olddb/memory_mutation.go @@ -24,7 +24,7 @@ import ( "github.com/ledgerwatch/erigon/ethdb" ) -type memorymutation struct { +type MemoryMutation struct { // Bucket => Key => Value memTx kv.RwTx memDb kv.RwDB @@ -41,13 +41,13 @@ type memorymutation struct { // defer batch.Rollback() // ... some calculations on `batch` // batch.Commit() -func NewMemoryBatch(tx kv.Tx) *memorymutation { +func NewMemoryBatch(tx kv.Tx) *MemoryMutation { tmpDB := mdbx.NewMDBX(log.New()).InMem().MustOpen() memTx, err := tmpDB.BeginRw(context.Background()) if err != nil { panic(err) } - return &memorymutation{ + return &MemoryMutation{ db: tx, memDb: tmpDB, memTx: memTx, @@ -56,19 +56,19 @@ func NewMemoryBatch(tx kv.Tx) *memorymutation { } } -func (m *memorymutation) RwKV() kv.RwDB { +func (m *MemoryMutation) RwKV() kv.RwDB { if casted, ok := m.db.(ethdb.HasRwKV); ok { return casted.RwKV() } return nil } -func (m *memorymutation) isTableCleared(table string) bool { +func (m *MemoryMutation) isTableCleared(table string) bool { _, ok := m.clearedTables[table] return ok } -func (m *memorymutation) isEntryDeleted(table string, key []byte) bool { +func (m *MemoryMutation) isEntryDeleted(table string, key []byte) bool { _, ok := m.deletedEntries[table] if !ok { return ok @@ -78,7 +78,7 @@ func (m *memorymutation) isEntryDeleted(table string, key []byte) bool { } // getMem Retrieve database entry from memory (hashed storage will be left out for now because it is the only non auto-DupSorted table) -func (m *memorymutation) getMem(table string, key []byte) ([]byte, bool) { +func (m *MemoryMutation) getMem(table string, key []byte) ([]byte, bool) { val, err := m.memTx.GetOne(table, key) if err != nil { panic(err) @@ -86,10 +86,10 @@ func (m *memorymutation) getMem(table string, key []byte) ([]byte, bool) { return val, val != nil } -func (m *memorymutation) DBSize() (uint64, error) { panic("not implemented") } -func (m *memorymutation) PageSize() uint64 { panic("not implemented") } +func (m *MemoryMutation) DBSize() (uint64, error) { panic("not implemented") } +func (m *MemoryMutation) PageSize() uint64 { panic("not implemented") } -func (m *memorymutation) IncrementSequence(bucket string, amount uint64) (res uint64, err error) { +func (m *MemoryMutation) IncrementSequence(bucket string, amount uint64) (res uint64, err error) { v, ok := m.getMem(kv.Sequence, []byte(bucket)) if !ok && m.db != nil { v, err = m.db.GetOne(kv.Sequence, []byte(bucket)) @@ -112,7 +112,7 @@ func (m *memorymutation) IncrementSequence(bucket string, amount uint64) (res ui return currentV, nil } -func (m *memorymutation) ReadSequence(bucket string) (res uint64, err error) { +func (m *MemoryMutation) ReadSequence(bucket string) (res uint64, err error) { v, ok := m.getMem(kv.Sequence, []byte(bucket)) if !ok && m.db != nil { v, err = m.db.GetOne(kv.Sequence, []byte(bucket)) @@ -129,7 +129,7 @@ func (m *memorymutation) ReadSequence(bucket string) (res uint64, err error) { } // Can only be called from the worker thread -func (m *memorymutation) GetOne(table string, key []byte) ([]byte, error) { +func (m *MemoryMutation) GetOne(table string, key []byte) ([]byte, error) { if value, ok := m.getMem(table, key); ok { if value == nil { return nil, nil @@ -148,7 +148,7 @@ func (m *memorymutation) GetOne(table string, key []byte) ([]byte, error) { } // Can only be called from the worker thread -func (m *memorymutation) Get(table string, key []byte) ([]byte, error) { +func (m *MemoryMutation) Get(table string, key []byte) ([]byte, error) { value, err := m.GetOne(table, key) if err != nil { return nil, err @@ -161,12 +161,12 @@ func (m *memorymutation) Get(table string, key []byte) ([]byte, error) { return value, nil } -func (m *memorymutation) Last(table string) ([]byte, []byte, error) { - panic("not implemented. (memorymutation.Last)") +func (m *MemoryMutation) Last(table string) ([]byte, []byte, error) { + panic("not implemented. (MemoryMutation.Last)") } // Has return whether a key is present in a certain table. -func (m *memorymutation) Has(table string, key []byte) (bool, error) { +func (m *MemoryMutation) Has(table string, key []byte) (bool, error) { if _, ok := m.getMem(table, key); ok { return ok, nil } @@ -177,38 +177,38 @@ func (m *memorymutation) Has(table string, key []byte) (bool, error) { } // Put insert a new entry in the database, if it is hashed storage it will add it to a slice instead of a map. -func (m *memorymutation) Put(table string, key []byte, value []byte) error { +func (m *MemoryMutation) Put(table string, key []byte, value []byte) error { return m.memTx.Put(table, key, value) } -func (m *memorymutation) Append(table string, key []byte, value []byte) error { +func (m *MemoryMutation) Append(table string, key []byte, value []byte) error { return m.Put(table, key, value) } -func (m *memorymutation) AppendDup(table string, key []byte, value []byte) error { +func (m *MemoryMutation) AppendDup(table string, key []byte, value []byte) error { return m.Put(table, key, value) } -func (m *memorymutation) BatchSize() int { +func (m *MemoryMutation) BatchSize() int { return 0 } -func (m *memorymutation) ForEach(bucket string, fromPrefix []byte, walker func(k, v []byte) error) error { +func (m *MemoryMutation) ForEach(bucket string, fromPrefix []byte, walker func(k, v []byte) error) error { m.panicOnEmptyDB() return m.db.ForEach(bucket, fromPrefix, walker) } -func (m *memorymutation) ForPrefix(bucket string, prefix []byte, walker func(k, v []byte) error) error { +func (m *MemoryMutation) ForPrefix(bucket string, prefix []byte, walker func(k, v []byte) error) error { m.panicOnEmptyDB() return m.db.ForPrefix(bucket, prefix, walker) } -func (m *memorymutation) ForAmount(bucket string, prefix []byte, amount uint32, walker func(k, v []byte) error) error { +func (m *MemoryMutation) ForAmount(bucket string, prefix []byte, amount uint32, walker func(k, v []byte) error) error { m.panicOnEmptyDB() return m.db.ForAmount(bucket, prefix, amount, walker) } -func (m *memorymutation) Delete(table string, k, v []byte) error { +func (m *MemoryMutation) Delete(table string, k, v []byte) error { if _, ok := m.deletedEntries[table]; !ok { m.deletedEntries[table] = make(map[string]struct{}) } @@ -216,68 +216,68 @@ func (m *memorymutation) Delete(table string, k, v []byte) error { return m.memTx.Delete(table, k, v) } -func (m *memorymutation) Commit() error { +func (m *MemoryMutation) Commit() error { return nil } -func (m *memorymutation) Rollback() { +func (m *MemoryMutation) Rollback() { m.memTx.Rollback() m.memDb.Close() return } -func (m *memorymutation) Close() { +func (m *MemoryMutation) Close() { m.Rollback() } -func (m *memorymutation) Begin(ctx context.Context, flags ethdb.TxFlags) (ethdb.DbWithPendingMutations, error) { +func (m *MemoryMutation) Begin(ctx context.Context, flags ethdb.TxFlags) (ethdb.DbWithPendingMutations, error) { panic("mutation can't start transaction, because doesn't own it") } -func (m *memorymutation) panicOnEmptyDB() { +func (m *MemoryMutation) panicOnEmptyDB() { if m.db == nil { panic("Not implemented") } } -func (m *memorymutation) SetRwKV(kv kv.RwDB) { +func (m *MemoryMutation) SetRwKV(kv kv.RwDB) { m.db.(ethdb.HasRwKV).SetRwKV(kv) } -func (m *memorymutation) BucketSize(bucket string) (uint64, error) { +func (m *MemoryMutation) BucketSize(bucket string) (uint64, error) { return 0, nil } -func (m *memorymutation) DropBucket(bucket string) error { +func (m *MemoryMutation) DropBucket(bucket string) error { panic("Not implemented") } -func (m *memorymutation) ExistsBucket(bucket string) (bool, error) { +func (m *MemoryMutation) ExistsBucket(bucket string) (bool, error) { panic("Not implemented") } -func (m *memorymutation) ListBuckets() ([]string, error) { +func (m *MemoryMutation) ListBuckets() ([]string, error) { panic("Not implemented") } -func (m *memorymutation) ClearBucket(bucket string) error { +func (m *MemoryMutation) ClearBucket(bucket string) error { m.clearedTables[bucket] = struct{}{} return m.memTx.ClearBucket(bucket) } -func (m *memorymutation) isBucketCleared(bucket string) bool { +func (m *MemoryMutation) isBucketCleared(bucket string) bool { _, ok := m.clearedTables[bucket] return ok } -func (m *memorymutation) CollectMetrics() { +func (m *MemoryMutation) CollectMetrics() { } -func (m *memorymutation) CreateBucket(bucket string) error { +func (m *MemoryMutation) CreateBucket(bucket string) error { panic("Not implemented") } -func (m *memorymutation) Flush(tx kv.RwTx) error { +func (m *MemoryMutation) Flush(tx kv.RwTx) error { // Obtain buckets touched. buckets, err := m.memTx.ListBuckets() if err != nil { @@ -348,8 +348,8 @@ func isTablePurelyDupsort(bucket string) bool { } // Cursor creates a new cursor (the real fun begins here) -func (m *memorymutation) makeCursor(bucket string) (kv.RwCursorDupSort, error) { - c := &memorymutationcursor{} +func (m *MemoryMutation) makeCursor(bucket string) (kv.RwCursorDupSort, error) { + c := &memoryMutationCursor{} // We can filter duplicates in dup sorted table c.table = bucket @@ -371,26 +371,26 @@ func (m *memorymutation) makeCursor(bucket string) (kv.RwCursorDupSort, error) { } // Cursor creates a new cursor (the real fun begins here) -func (m *memorymutation) RwCursorDupSort(bucket string) (kv.RwCursorDupSort, error) { +func (m *MemoryMutation) RwCursorDupSort(bucket string) (kv.RwCursorDupSort, error) { return m.makeCursor(bucket) } // Cursor creates a new cursor (the real fun begins here) -func (m *memorymutation) RwCursor(bucket string) (kv.RwCursor, error) { +func (m *MemoryMutation) RwCursor(bucket string) (kv.RwCursor, error) { return m.makeCursor(bucket) } // Cursor creates a new cursor (the real fun begins here) -func (m *memorymutation) CursorDupSort(bucket string) (kv.CursorDupSort, error) { +func (m *MemoryMutation) CursorDupSort(bucket string) (kv.CursorDupSort, error) { return m.makeCursor(bucket) } // Cursor creates a new cursor (the real fun begins here) -func (m *memorymutation) Cursor(bucket string) (kv.Cursor, error) { +func (m *MemoryMutation) Cursor(bucket string) (kv.Cursor, error) { return m.makeCursor(bucket) } // ViewID creates a new cursor (the real fun begins here) -func (m *memorymutation) ViewID() uint64 { +func (m *MemoryMutation) ViewID() uint64 { panic("ViewID Not implemented") } diff --git a/ethdb/olddb/memory_mutation_cursor.go b/ethdb/olddb/memory_mutation_cursor.go index 6da39ecd123..f1bd19251c5 100644 --- a/ethdb/olddb/memory_mutation_cursor.go +++ b/ethdb/olddb/memory_mutation_cursor.go @@ -27,7 +27,7 @@ type cursorentry struct { } // cursor -type memorymutationcursor struct { +type memoryMutationCursor struct { // we can keep one cursor type if we store 2 of each kind. cursor kv.Cursor dupCursor kv.CursorDupSort @@ -41,12 +41,12 @@ type memorymutationcursor struct { currentDbEntry cursorentry currentMemEntry cursorentry // we keep the mining mutation so that we can insert new elements in db - mutation *memorymutation + mutation *MemoryMutation table string } // First move cursor to first position and return key and value accordingly. -func (m *memorymutationcursor) First() ([]byte, []byte, error) { +func (m *memoryMutationCursor) First() ([]byte, []byte, error) { memKey, memValue, err := m.memCursor.First() if err != nil { return nil, nil, err @@ -66,7 +66,7 @@ func (m *memorymutationcursor) First() ([]byte, []byte, error) { return m.goForward(memKey, memValue, dbKey, dbValue, false) } -func (m *memorymutationcursor) getNextOnDb(dup bool) (key []byte, value []byte, err error) { +func (m *memoryMutationCursor) getNextOnDb(dup bool) (key []byte, value []byte, err error) { if dup { key, value, err = m.dupCursor.NextDup() if err != nil { @@ -95,7 +95,7 @@ func (m *memorymutationcursor) getNextOnDb(dup bool) (key []byte, value []byte, return } -func (m *memorymutationcursor) convertAutoDupsort(key []byte, value []byte) []byte { +func (m *memoryMutationCursor) convertAutoDupsort(key []byte, value []byte) []byte { config, ok := kv.ChaindataTablesCfg[m.table] // If we do not have the configuration we assume it is not dupsorted if !ok || !config.AutoDupSortKeysConversion { @@ -108,11 +108,11 @@ func (m *memorymutationcursor) convertAutoDupsort(key []byte, value []byte) []by } // Current return the current key and values the cursor is on. -func (m *memorymutationcursor) Current() ([]byte, []byte, error) { +func (m *memoryMutationCursor) Current() ([]byte, []byte, error) { return common.CopyBytes(m.currentPair.key), common.CopyBytes(m.currentPair.value), nil } -func (m *memorymutationcursor) skipIntersection(memKey, memValue, dbKey, dbValue []byte, dup bool) (newDbKey []byte, newDbValue []byte, err error) { +func (m *memoryMutationCursor) skipIntersection(memKey, memValue, dbKey, dbValue []byte, dup bool) (newDbKey []byte, newDbValue []byte, err error) { newDbKey = dbKey newDbValue = dbValue config, ok := kv.ChaindataTablesCfg[m.table] @@ -139,7 +139,7 @@ func (m *memorymutationcursor) skipIntersection(memKey, memValue, dbKey, dbValue return } -func (m *memorymutationcursor) goForward(memKey, memValue, dbKey, dbValue []byte, dup bool) ([]byte, []byte, error) { +func (m *memoryMutationCursor) goForward(memKey, memValue, dbKey, dbValue []byte, dup bool) ([]byte, []byte, error) { var err error if memValue == nil && dbValue == nil { return nil, nil, nil @@ -174,7 +174,7 @@ func (m *memorymutationcursor) goForward(memKey, memValue, dbKey, dbValue []byte } // Next returns the next element of the mutation. -func (m *memorymutationcursor) Next() ([]byte, []byte, error) { +func (m *memoryMutationCursor) Next() ([]byte, []byte, error) { if m.isPrevFromDb { k, v, err := m.getNextOnDb(false) if err != nil { @@ -192,7 +192,7 @@ func (m *memorymutationcursor) Next() ([]byte, []byte, error) { } // NextDup returns the next element of the mutation. -func (m *memorymutationcursor) NextDup() ([]byte, []byte, error) { +func (m *memoryMutationCursor) NextDup() ([]byte, []byte, error) { if m.isPrevFromDb { k, v, err := m.getNextOnDb(true) @@ -211,7 +211,7 @@ func (m *memorymutationcursor) NextDup() ([]byte, []byte, error) { } // Seek move pointer to a key at a certain position. -func (m *memorymutationcursor) Seek(seek []byte) ([]byte, []byte, error) { +func (m *memoryMutationCursor) Seek(seek []byte) ([]byte, []byte, error) { dbKey, dbValue, err := m.cursor.Seek(seek) if err != nil { return nil, nil, err @@ -233,7 +233,7 @@ func (m *memorymutationcursor) Seek(seek []byte) ([]byte, []byte, error) { } // Seek move pointer to a key at a certain position. -func (m *memorymutationcursor) SeekExact(seek []byte) ([]byte, []byte, error) { +func (m *memoryMutationCursor) SeekExact(seek []byte) ([]byte, []byte, error) { memKey, memValue, err := m.memCursor.SeekExact(seek) if err != nil { return nil, nil, err @@ -264,37 +264,37 @@ func (m *memorymutationcursor) SeekExact(seek []byte) ([]byte, []byte, error) { return nil, nil, nil } -func (m *memorymutationcursor) Put(k, v []byte) error { +func (m *memoryMutationCursor) Put(k, v []byte) error { return m.mutation.Put(m.table, common.CopyBytes(k), common.CopyBytes(v)) } -func (m *memorymutationcursor) Append(k []byte, v []byte) error { +func (m *memoryMutationCursor) Append(k []byte, v []byte) error { return m.mutation.Put(m.table, common.CopyBytes(k), common.CopyBytes(v)) } -func (m *memorymutationcursor) AppendDup(k []byte, v []byte) error { +func (m *memoryMutationCursor) AppendDup(k []byte, v []byte) error { return m.memDupCursor.AppendDup(common.CopyBytes(k), common.CopyBytes(v)) } -func (m *memorymutationcursor) PutNoDupData(key, value []byte) error { +func (m *memoryMutationCursor) PutNoDupData(key, value []byte) error { panic("DeleteCurrentDuplicates Not implemented") } -func (m *memorymutationcursor) Delete(k, v []byte) error { +func (m *memoryMutationCursor) Delete(k, v []byte) error { return m.mutation.Delete(m.table, k, v) } -func (m *memorymutationcursor) DeleteCurrent() error { +func (m *memoryMutationCursor) DeleteCurrent() error { panic("DeleteCurrent Not implemented") } -func (m *memorymutationcursor) DeleteCurrentDuplicates() error { +func (m *memoryMutationCursor) DeleteCurrentDuplicates() error { panic("DeleteCurrentDuplicates Not implemented") } // Seek move pointer to a key at a certain position. -func (m *memorymutationcursor) SeekBothRange(key, value []byte) ([]byte, error) { +func (m *memoryMutationCursor) SeekBothRange(key, value []byte) ([]byte, error) { if value == nil { _, v, err := m.SeekExact(key) return v, err @@ -320,7 +320,7 @@ func (m *memorymutationcursor) SeekBothRange(key, value []byte) ([]byte, error) return retValue, err } -func (m *memorymutationcursor) Last() ([]byte, []byte, error) { +func (m *memoryMutationCursor) Last() ([]byte, []byte, error) { // TODO(Giulio2002): make fixes. memKey, memValue, err := m.memCursor.Last() if err != nil { @@ -380,11 +380,11 @@ func (m *memorymutationcursor) Last() ([]byte, []byte, error) { return dbKey, dbValue, nil } -func (m *memorymutationcursor) Prev() ([]byte, []byte, error) { +func (m *memoryMutationCursor) Prev() ([]byte, []byte, error) { panic("Prev is not implemented!") } -func (m *memorymutationcursor) Close() { +func (m *memoryMutationCursor) Close() { if m.cursor != nil { m.cursor.Close() } @@ -394,26 +394,26 @@ func (m *memorymutationcursor) Close() { return } -func (m *memorymutationcursor) Count() (uint64, error) { +func (m *memoryMutationCursor) Count() (uint64, error) { panic("Not implemented") } -func (m *memorymutationcursor) FirstDup() ([]byte, error) { +func (m *memoryMutationCursor) FirstDup() ([]byte, error) { panic("Not implemented") } -func (m *memorymutationcursor) NextNoDup() ([]byte, []byte, error) { +func (m *memoryMutationCursor) NextNoDup() ([]byte, []byte, error) { panic("Not implemented") } -func (m *memorymutationcursor) LastDup() ([]byte, error) { +func (m *memoryMutationCursor) LastDup() ([]byte, error) { panic("Not implemented") } -func (m *memorymutationcursor) CountDuplicates() (uint64, error) { +func (m *memoryMutationCursor) CountDuplicates() (uint64, error) { panic("Not implemented") } -func (m *memorymutationcursor) SeekBothExact(key, value []byte) ([]byte, []byte, error) { +func (m *memoryMutationCursor) SeekBothExact(key, value []byte) ([]byte, []byte, error) { panic("SeekBothExact Not implemented") } From bca563fd0f0c461d00e721c84ba958da1cb7c7d4 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Wed, 15 Jun 2022 22:17:16 +0200 Subject: [PATCH 062/136] Fix genesis storage collision state tests (#4462) * Enable a couple of tests that work now * Fix genesis storage collision state tests * IncarnationMap in t8ntool MakePreState --- cmd/evm/internal/t8ntool/execution.go | 7 ++++++- core/genesis.go | 4 ++-- tests/state_test.go | 9 --------- tests/state_test_util.go | 10 +++++++++- 4 files changed, 17 insertions(+), 13 deletions(-) diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index 06988c0ed78..1ef1457c2c7 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -18,6 +18,7 @@ package t8ntool import ( "context" + "encoding/binary" "fmt" "math/big" @@ -297,7 +298,11 @@ func MakePreState(chainRules *params.Rules, tx kv.RwTx, accounts core.GenesisAll } if len(a.Code) > 0 || len(a.Storage) > 0 { - statedb.SetIncarnation(addr, 1) + statedb.SetIncarnation(addr, state.FirstContractIncarnation) + + var b [8]byte + binary.BigEndian.PutUint64(b[:], state.FirstContractIncarnation) + tx.Put(kv.IncarnationMap, addr[:], b[:]) } } // Commit and re-open to start with a clean state. diff --git a/core/genesis.go b/core/genesis.go index 6935bf9e4a1..89a3ca80e18 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -338,7 +338,7 @@ func (g *Genesis) ToBlock() (*types.Block, *state.IntraBlockState, error) { } if len(account.Code) > 0 || len(account.Storage) > 0 { - statedb.SetIncarnation(addr, 1) + statedb.SetIncarnation(addr, state.FirstContractIncarnation) } } if err := statedb.FinalizeTx(¶ms.Rules{}, w); err != nil { @@ -401,7 +401,7 @@ func (g *Genesis) WriteGenesisState(tx kv.RwTx) (*types.Block, *state.IntraBlock return nil, nil, err } for addr, account := range g.Alloc { - if len(account.Code) == 0 && len(account.Storage) > 0 { + if len(account.Code) > 0 || len(account.Storage) > 0 { // Special case for weird tests - inaccessible storage var b [8]byte binary.BigEndian.PutUint64(b[:], state.FirstContractIncarnation) diff --git a/tests/state_test.go b/tests/state_test.go index 955f1714a70..15766aa1b5a 100644 --- a/tests/state_test.go +++ b/tests/state_test.go @@ -46,15 +46,6 @@ func TestState(t *testing.T) { st.skipLoad(`^stTimeConsuming/`) st.skipLoad(`.*vmPerformance/loop.*`) - // Broken tests: - st.skipLoad(`^stCreate2/create2collisionStorage.json`) - st.skipLoad(`^stExtCodeHash/dynamicAccountOverwriteEmpty.json`) - st.skipLoad(`^stSStoreTest/InitCollision.json`) - st.skipLoad(`^stEIP1559/typeTwoBerlin.json`) - - // value exceeding 256 bit is not supported - st.skipLoad(`^stTransactionTest/ValueOverflow.json`) - st.walk(t, stateTestDir, func(t *testing.T, name string, test *StateTest) { db := memdb.NewTestDB(t) for _, subtest := range test.Subtests() { diff --git a/tests/state_test_util.go b/tests/state_test_util.go index 0200753e588..4989ab2af97 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -17,6 +17,7 @@ package tests import ( + "encoding/binary" "encoding/json" "fmt" "math/big" @@ -274,9 +275,16 @@ func MakePreState(rules *params.Rules, tx kv.RwTx, accounts core.GenesisAlloc, b } if len(a.Code) > 0 || len(a.Storage) > 0 { - statedb.SetIncarnation(addr, 1) + statedb.SetIncarnation(addr, state.FirstContractIncarnation) + + var b [8]byte + binary.BigEndian.PutUint64(b[:], state.FirstContractIncarnation) + if err := tx.Put(kv.IncarnationMap, addr[:], b[:]); err != nil { + return nil, err + } } } + // Commit and re-open to start with a clean state. if err := statedb.FinalizeTx(rules, state.NewPlainStateWriter(tx, nil, blockNr+1)); err != nil { return nil, err From df3eea6414a6b888a6b3b5d8eb5a33ceeca801fc Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Thu, 16 Jun 2022 11:17:53 +0100 Subject: [PATCH 063/136] Not send sentry task to closed tasks channel (#4467) * Not send sentry task to closed tasks channel * Add comments Co-authored-by: Alexey Sharp --- cmd/sentry/sentry/sentry_grpc_server.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/cmd/sentry/sentry/sentry_grpc_server.go b/cmd/sentry/sentry/sentry_grpc_server.go index d9cff87b712..ab6583a2ce5 100644 --- a/cmd/sentry/sentry/sentry_grpc_server.go +++ b/cmd/sentry/sentry/sentry_grpc_server.go @@ -83,6 +83,8 @@ func (pi *PeerInfo) Close() { defer pi.lock.Unlock() if pi.tasks != nil { close(pi.tasks) + // Setting this to nil because other functions detect the closure of the channel by checking pi.tasks == nil + pi.tasks = nil } } @@ -144,11 +146,16 @@ func (pi *PeerInfo) Remove() { func (pi *PeerInfo) Async(f func()) { pi.lock.Lock() defer pi.lock.Unlock() + if pi.tasks == nil { + // Too late, the task channel has been closed + return + } select { case <-pi.removed: // noop if peer removed case <-pi.ctx.Done(): if pi.tasks != nil { close(pi.tasks) + // Setting this to nil because other functions detect the closure of the channel by checking pi.tasks == nil pi.tasks = nil } case pi.tasks <- f: From 963fb7a7f5b5f97b4e40e82eb49ea9114b4d0258 Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Thu, 16 Jun 2022 17:32:32 +0300 Subject: [PATCH 064/136] BSC main net Euler fork (#4461) * added euler into chainConfig * parlia abi update * updated upgrade.go * ops/ * added euler gas usage * added eulerBlock for bsc * changed block positioning * comment --- consensus/parlia/abi.go | 1035 ++++++++++++++++++++++++++++--- consensus/parlia/parlia.go | 7 +- core/state_processor.go | 8 +- core/systemcontracts/upgrade.go | 66 +- params/chainspecs/bsc.json | 1 + params/config.go | 19 +- 6 files changed, 1033 insertions(+), 103 deletions(-) diff --git a/consensus/parlia/abi.go b/consensus/parlia/abi.go index a0b49800c49..e841eb5cd5c 100644 --- a/consensus/parlia/abi.go +++ b/consensus/parlia/abi.go @@ -6,7 +6,7 @@ const validatorSetABI = ` "anonymous": false, "inputs": [ { - "indexed": true, + "indexed": false, "internalType": "uint256", "name": "amount", "type": "uint256" @@ -15,6 +15,44 @@ const validatorSetABI = ` "name": "batchTransfer", "type": "event" }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "string", + "name": "reason", + "type": "string" + } + ], + "name": "batchTransferFailed", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "reason", + "type": "bytes" + } + ], + "name": "batchTransferLowerFailed", + "type": "event" + }, { "anonymous": false, "inputs": [ @@ -25,7 +63,7 @@ const validatorSetABI = ` "type": "address" }, { - "indexed": true, + "indexed": false, "internalType": "uint256", "name": "amount", "type": "uint256" @@ -44,7 +82,7 @@ const validatorSetABI = ` "type": "address" }, { - "indexed": true, + "indexed": false, "internalType": "uint256", "name": "amount", "type": "uint256" @@ -58,6 +96,70 @@ const validatorSetABI = ` "inputs": [ { "indexed": true, + "internalType": "address payable", + "name": "validator", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "directTransferFail", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "string", + "name": "message", + "type": "string" + } + ], + "name": "failReasonWithStr", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint256", + "name": "amount", + "type": "uint256" + } + ], + "name": "feeBurned", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "string", + "name": "key", + "type": "string" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "value", + "type": "bytes" + } + ], + "name": "paramChange", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, "internalType": "uint256", "name": "amount", "type": "uint256" @@ -66,6 +168,25 @@ const validatorSetABI = ` "name": "systemTransfer", "type": "event" }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint8", + "name": "channelId", + "type": "uint8" + }, + { + "indexed": false, + "internalType": "bytes", + "name": "msgBytes", + "type": "bytes" + } + ], + "name": "unexpectedPackage", + "type": "event" + }, { "anonymous": false, "inputs": [ @@ -76,7 +197,7 @@ const validatorSetABI = ` "type": "address" }, { - "indexed": true, + "indexed": false, "internalType": "uint256", "name": "amount", "type": "uint256" @@ -93,9 +214,48 @@ const validatorSetABI = ` "internalType": "address", "name": "validator", "type": "address" - }, + } + ], + "name": "validatorEmptyJailed", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "validator", + "type": "address" + } + ], + "name": "validatorEnterMaintenance", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "validator", + "type": "address" + } + ], + "name": "validatorExitMaintenance", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ { "indexed": true, + "internalType": "address", + "name": "validator", + "type": "address" + }, + { + "indexed": false, "internalType": "uint256", "name": "amount", "type": "uint256" @@ -127,7 +287,7 @@ const validatorSetABI = ` "type": "address" }, { - "indexed": true, + "indexed": false, "internalType": "uint256", "name": "amount", "type": "uint256" @@ -144,7 +304,7 @@ const validatorSetABI = ` }, { "inputs": [], - "name": "CHANNEL_ID", + "name": "BIND_CHANNELID", "outputs": [ { "internalType": "uint8", @@ -157,12 +317,12 @@ const validatorSetABI = ` }, { "inputs": [], - "name": "DUSTY_INCOMING", + "name": "BURN_ADDRESS", "outputs": [ { - "internalType": "uint256", + "internalType": "address", "name": "", - "type": "uint256" + "type": "address" } ], "stateMutability": "view", @@ -170,7 +330,7 @@ const validatorSetABI = ` }, { "inputs": [], - "name": "EXTRA_FEE", + "name": "BURN_RATIO_SCALE", "outputs": [ { "internalType": "uint256", @@ -183,12 +343,25 @@ const validatorSetABI = ` }, { "inputs": [], - "name": "JAIL_MESSAGE_TYPE", + "name": "CODE_OK", "outputs": [ { - "internalType": "uint8", + "internalType": "uint32", "name": "", - "type": "uint8" + "type": "uint32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "CROSS_CHAIN_CONTRACT_ADDR", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" } ], "stateMutability": "view", @@ -196,7 +369,7 @@ const validatorSetABI = ` }, { "inputs": [], - "name": "RELAYER_REWARD", + "name": "DUSTY_INCOMING", "outputs": [ { "internalType": "uint256", @@ -209,12 +382,12 @@ const validatorSetABI = ` }, { "inputs": [], - "name": "SYSTEM_ADDRESS", + "name": "EPOCH", "outputs": [ { - "internalType": "address", + "internalType": "uint256", "name": "", - "type": "address" + "type": "uint256" } ], "stateMutability": "view", @@ -222,12 +395,12 @@ const validatorSetABI = ` }, { "inputs": [], - "name": "VALIDATORS_UPDATE_MESSAGE_TYPE", + "name": "ERROR_FAIL_CHECK_VALIDATORS", "outputs": [ { - "internalType": "uint8", + "internalType": "uint32", "name": "", - "type": "uint8" + "type": "uint32" } ], "stateMutability": "view", @@ -235,55 +408,63 @@ const validatorSetABI = ` }, { "inputs": [], - "name": "alreadyInit", + "name": "ERROR_FAIL_DECODE", "outputs": [ { - "internalType": "bool", + "internalType": "uint32", "name": "", - "type": "bool" + "type": "uint32" } ], "stateMutability": "view", "type": "function" }, { - "inputs": [ + "inputs": [], + "name": "ERROR_LEN_OF_VAL_MISMATCH", + "outputs": [ { - "internalType": "uint256", + "internalType": "uint32", "name": "", - "type": "uint256" + "type": "uint32" } ], - "name": "currentValidatorSet", + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "ERROR_RELAYFEE_TOO_LARGE", "outputs": [ { - "internalType": "address", - "name": "consensusAddress", - "type": "address" - }, - { - "internalType": "address payable", - "name": "feeAddress", - "type": "address" - }, - { - "internalType": "address", - "name": "BBCFeeAddress", - "type": "address" - }, - { - "internalType": "uint64", - "name": "votingPower", - "type": "uint64" - }, + "internalType": "uint32", + "name": "", + "type": "uint32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "ERROR_UNKNOWN_PACKAGE_TYPE", + "outputs": [ { - "internalType": "bool", - "name": "jailed", - "type": "bool" - }, + "internalType": "uint32", + "name": "", + "type": "uint32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "EXPIRE_TIME_SECOND_GAP", + "outputs": [ { "internalType": "uint256", - "name": "incoming", + "name": "", "type": "uint256" } ], @@ -292,12 +473,12 @@ const validatorSetABI = ` }, { "inputs": [], - "name": "fromChainId", + "name": "GOV_CHANNELID", "outputs": [ { - "internalType": "uint16", + "internalType": "uint8", "name": "", - "type": "uint16" + "type": "uint8" } ], "stateMutability": "view", @@ -305,7 +486,7 @@ const validatorSetABI = ` }, { "inputs": [], - "name": "initLightClientAddr", + "name": "GOV_HUB_ADDR", "outputs": [ { "internalType": "address", @@ -318,7 +499,7 @@ const validatorSetABI = ` }, { "inputs": [], - "name": "initSlashContract", + "name": "INCENTIVIZE_ADDR", "outputs": [ { "internalType": "address", @@ -331,12 +512,12 @@ const validatorSetABI = ` }, { "inputs": [], - "name": "initSystemRewardAddr", + "name": "INIT_BURN_RATIO", "outputs": [ { - "internalType": "address payable", + "internalType": "uint256", "name": "", - "type": "address" + "type": "uint256" } ], "stateMutability": "view", @@ -344,12 +525,12 @@ const validatorSetABI = ` }, { "inputs": [], - "name": "initTokenHubAddr", + "name": "INIT_MAINTAIN_SLASH_SCALE", "outputs": [ { - "internalType": "address", + "internalType": "uint256", "name": "", - "type": "address" + "type": "uint256" } ], "stateMutability": "view", @@ -357,12 +538,12 @@ const validatorSetABI = ` }, { "inputs": [], - "name": "initValidatorSetBytes", + "name": "INIT_MAX_NUM_OF_MAINTAINING", "outputs": [ { - "internalType": "bytes", + "internalType": "uint256", "name": "", - "type": "bytes" + "type": "uint256" } ], "stateMutability": "view", @@ -370,12 +551,12 @@ const validatorSetABI = ` }, { "inputs": [], - "name": "keyPrefix", + "name": "INIT_NUM_OF_CABINETS", "outputs": [ { - "internalType": "bytes", + "internalType": "uint256", "name": "", - "type": "bytes" + "type": "uint256" } ], "stateMutability": "view", @@ -383,12 +564,12 @@ const validatorSetABI = ` }, { "inputs": [], - "name": "previousDepositHeight", + "name": "INIT_VALIDATORSET_BYTES", "outputs": [ { - "internalType": "uint64", + "internalType": "bytes", "name": "", - "type": "uint64" + "type": "bytes" } ], "stateMutability": "view", @@ -396,12 +577,12 @@ const validatorSetABI = ` }, { "inputs": [], - "name": "sequence", + "name": "JAIL_MESSAGE_TYPE", "outputs": [ { - "internalType": "uint64", + "internalType": "uint8", "name": "", - "type": "uint64" + "type": "uint8" } ], "stateMutability": "view", @@ -409,12 +590,12 @@ const validatorSetABI = ` }, { "inputs": [], - "name": "toChainId", + "name": "LIGHT_CLIENT_ADDR", "outputs": [ { - "internalType": "uint16", + "internalType": "address", "name": "", - "type": "uint16" + "type": "address" } ], "stateMutability": "view", @@ -422,7 +603,7 @@ const validatorSetABI = ` }, { "inputs": [], - "name": "totalInComing", + "name": "MAX_NUM_OF_VALIDATORS", "outputs": [ { "internalType": "uint256", @@ -435,9 +616,403 @@ const validatorSetABI = ` }, { "inputs": [], - "name": "init", - "outputs": [], - "stateMutability": "nonpayable", + "name": "PRECISION", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "RELAYERHUB_CONTRACT_ADDR", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "SLASH_CHANNELID", + "outputs": [ + { + "internalType": "uint8", + "name": "", + "type": "uint8" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "SLASH_CONTRACT_ADDR", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "STAKING_CHANNELID", + "outputs": [ + { + "internalType": "uint8", + "name": "", + "type": "uint8" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "SYSTEM_ADDRESS", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "SYSTEM_REWARD_ADDR", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "TOKEN_HUB_ADDR", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "TOKEN_MANAGER_ADDR", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "TRANSFER_IN_CHANNELID", + "outputs": [ + { + "internalType": "uint8", + "name": "", + "type": "uint8" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "TRANSFER_OUT_CHANNELID", + "outputs": [ + { + "internalType": "uint8", + "name": "", + "type": "uint8" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "VALIDATORS_UPDATE_MESSAGE_TYPE", + "outputs": [ + { + "internalType": "uint8", + "name": "", + "type": "uint8" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "VALIDATOR_CONTRACT_ADDR", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "alreadyInit", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "bscChainID", + "outputs": [ + { + "internalType": "uint16", + "name": "", + "type": "uint16" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "burnRatio", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "burnRatioInitialized", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "currentValidatorSet", + "outputs": [ + { + "internalType": "address", + "name": "consensusAddress", + "type": "address" + }, + { + "internalType": "address payable", + "name": "feeAddress", + "type": "address" + }, + { + "internalType": "address", + "name": "BBCFeeAddress", + "type": "address" + }, + { + "internalType": "uint64", + "name": "votingPower", + "type": "uint64" + }, + { + "internalType": "bool", + "name": "jailed", + "type": "bool" + }, + { + "internalType": "uint256", + "name": "incoming", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "currentValidatorSetMap", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "expireTimeSecondGap", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "maintainSlashScale", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "maxNumOfCandidates", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "maxNumOfMaintaining", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "maxNumOfWorkingCandidates", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "numOfCabinets", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "numOfJailed", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "numOfMaintaining", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "totalInComing", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", "type": "function" }, { @@ -446,41 +1021,175 @@ const validatorSetABI = ` "internalType": "address", "name": "valAddr", "type": "address" + }, + { + "internalType": "address", + "name": "slashAddr", + "type": "address" + }, + { + "internalType": "address", + "name": "rewardAddr", + "type": "address" + }, + { + "internalType": "address", + "name": "lightAddr", + "type": "address" + }, + { + "internalType": "address", + "name": "tokenHubAddr", + "type": "address" + }, + { + "internalType": "address", + "name": "incentivizeAddr", + "type": "address" + }, + { + "internalType": "address", + "name": "relayerHubAddr", + "type": "address" + }, + { + "internalType": "address", + "name": "govHub", + "type": "address" + }, + { + "internalType": "address", + "name": "tokenManagerAddr", + "type": "address" + }, + { + "internalType": "address", + "name": "crossChain", + "type": "address" } ], - "name": "deposit", + "name": "updateContractAddr", "outputs": [], - "stateMutability": "payable", + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "validatorExtraSet", + "outputs": [ + { + "internalType": "uint256", + "name": "enterMaintenanceHeight", + "type": "uint256" + }, + { + "internalType": "bool", + "name": "isMaintaining", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "init", + "outputs": [], + "stateMutability": "nonpayable", "type": "function" }, { "inputs": [ + { + "internalType": "uint8", + "name": "", + "type": "uint8" + }, { "internalType": "bytes", "name": "msgBytes", "type": "bytes" - }, + } + ], + "name": "handleSynPackage", + "outputs": [ { "internalType": "bytes", - "name": "proof", + "name": "responsePayload", "type": "bytes" + } + ], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint8", + "name": "channelId", + "type": "uint8" }, { - "internalType": "uint64", - "name": "height", - "type": "uint64" + "internalType": "bytes", + "name": "msgBytes", + "type": "bytes" + } + ], + "name": "handleAckPackage", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint8", + "name": "channelId", + "type": "uint8" }, { - "internalType": "uint64", - "name": "packageSequence", - "type": "uint64" + "internalType": "bytes", + "name": "msgBytes", + "type": "bytes" } ], - "name": "update", + "name": "handleFailAckPackage", "outputs": [], "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [ + { + "internalType": "address", + "name": "valAddr", + "type": "address" + } + ], + "name": "deposit", + "outputs": [], + "stateMutability": "payable", + "type": "function" + }, + { + "inputs": [], + "name": "getMiningValidators", + "outputs": [ + { + "internalType": "address[]", + "name": "", + "type": "address[]" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [], "name": "getValidators", @@ -494,6 +1203,25 @@ const validatorSetABI = ` "stateMutability": "view", "type": "function" }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "index", + "type": "uint256" + } + ], + "name": "isWorkingValidator", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [ { @@ -513,6 +1241,25 @@ const validatorSetABI = ` "stateMutability": "view", "type": "function" }, + { + "inputs": [ + { + "internalType": "address", + "name": "validator", + "type": "address" + } + ], + "name": "isCurrentValidator", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, { "inputs": [ { @@ -538,6 +1285,108 @@ const validatorSetABI = ` "outputs": [], "stateMutability": "nonpayable", "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_validator", + "type": "address" + } + ], + "name": "getCurrentValidatorIndex", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "index", + "type": "uint256" + } + ], + "name": "canEnterMaintenance", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "enterMaintenance", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "exitMaintenance", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "string", + "name": "key", + "type": "string" + }, + { + "internalType": "bytes", + "name": "value", + "type": "bytes" + } + ], + "name": "updateParam", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "validator", + "type": "address" + } + ], + "name": "isValidatorExist", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "getMaintainingValidators", + "outputs": [ + { + "internalType": "address[]", + "name": "maintainingValidators", + "type": "address[]" + } + ], + "stateMutability": "view", + "type": "function" } ] ` diff --git a/consensus/parlia/parlia.go b/consensus/parlia/parlia.go index c60e20f2ab2..30fd185239a 100644 --- a/consensus/parlia/parlia.go +++ b/consensus/parlia/parlia.go @@ -982,7 +982,12 @@ func (p *Parlia) Close() error { // getCurrentValidators get current validators func (p *Parlia) getCurrentValidators(header *types.Header, ibs *state.IntraBlockState) ([]common.Address, error) { // method - method := "getValidators" + var method string + if p.chainConfig.IsEuler(header.Number) { + method = "getMiningValidators" + } else { + method = "getValidators" + } data, err := p.validatorSetABI.Pack(method) if err != nil { log.Error("Unable to pack tx for getValidators", "err", err) diff --git a/core/state_processor.go b/core/state_processor.go index 56608c94dfc..96d409256af 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -107,8 +107,12 @@ func applyTransaction(config *params.ChainConfig, gp *GasPool, statedb *state.In if err = statedb.FinalizeTx(rules, stateWriter); err != nil { return nil, nil, err } - - *usedGas += result.UsedGas + // checks if current header is an Euler block or not (returns false for all the chains except BSC) + if config.IsEuler(header.Number) { + *usedGas += result.UsedGas * 3 + } else { + *usedGas += result.UsedGas + } // Set the receipt logs and create the bloom filter. // based on the eip phase, we're passing whether the root touch-delete accounts. diff --git a/core/systemcontracts/upgrade.go b/core/systemcontracts/upgrade.go index abbc618552a..e95b6eb1799 100644 --- a/core/systemcontracts/upgrade.go +++ b/core/systemcontracts/upgrade.go @@ -10,7 +10,6 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/params/networkname" ) type UpgradeConfig struct { @@ -36,6 +35,7 @@ const ( ) var ( + GenesisHash common.Hash //upgrade config ramanujanUpgrade = make(map[string]*Upgrade) @@ -44,6 +44,8 @@ var ( mirrorUpgrade = make(map[string]*Upgrade) brunoUpgrade = make(map[string]*Upgrade) + + eulerUpgrade = make(map[string]*Upgrade) ) func init() { @@ -310,6 +312,55 @@ func init() { }, }, } + + eulerUpgrade[mainNet] = &Upgrade{ + UpgradeName: "euler", + Configs: []*UpgradeConfig{ + { + ContractAddr: ValidatorContract, + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/db8bb560ac5a1265c685b719c7e976dced162310", + Code: "6080604052600436106104055760003560e01c80638d19a41011610213578063c81b166211610123578063eb57e202116100ab578063f9a2bbc71161007a578063f9a2bbc714610d55578063fc3e590814610d6a578063fccc281314610d7f578063fd4ad81f14610d94578063fd6a687914610dd757610405565b8063eb57e20214610cd2578063eda5868c14610d05578063f340fa0114610d1a578063f92eb86b14610d4057610405565b8063daacdb66116100f2578063daacdb6614610c69578063dc927faf14610c7e578063e086c7b114610c93578063e1c7392a14610ca8578063e40716a114610cbd57610405565b8063c81b166214610c2a578063c8509d8114610939578063d68fb56a14610c3f578063d86222d514610c5457610405565b8063a78abc16116101a6578063ad3c9da611610175578063ad3c9da614610bb8578063b7ab4db514610beb578063b8cf4ef114610c00578063bf9f49951461065f578063c6d3394514610c1557610405565b8063a78abc1614610aae578063aaf5eb6814610ac3578063ab51bb9614610ad8578063ac43175114610aed57610405565b80639fe0f816116101e25780639fe0f81614610a5a578063a0dc275814610a6f578063a1a11bf514610a84578063a5422d5c14610a9957610405565b80638d19a410146109e85780639369d7de14610a1b57806396713da914610a305780639dc0926214610a4557610405565b80635192c82c1161031957806375d47a0a116102a157806381650b621161027057806381650b6214610924578063831d65d114610939578063853230aa146108e557806386249882146109be5780638b5ad0c9146109d357610405565b806375d47a0a146108d057806378dfed4a146108e55780637942fd05146108fa5780637a84ca2a1461090f57610405565b80635667515a116102e85780635667515a146108065780635d77156c1461081b5780636969a25c146108305780636e47b482146108a657806370fd5bad146108bb57610405565b80635192c82c1461077657806351e806721461078b57806355614fcc146107a0578063565c56b3146107d357610405565b80633365af3a1161039c57806343756e5c1161036b57806343756e5c1461068a57806345cf9daf146106bb578063493279b1146106d05780634bf6c882146106fc5780634df6e0c31461071157610405565b80633365af3a146105ed57806335409f7f146106175780633de0f0d81461064a5780633dffc3871461065f57610405565b8063152ad3b8116103d8578063152ad3b8146105705780631ff1806914610599578063219f22d5146105ae578063321d398a146105c357610405565b806304c4fec61461040a57806307a56847146104215780630bee7a67146104485780631182b87514610476575b600080fd5b34801561041657600080fd5b5061041f610dec565b005b34801561042d57600080fd5b50610436610e7f565b60408051918252519081900360200190f35b34801561045457600080fd5b5061045d610e85565b6040805163ffffffff9092168252519081900360200190f35b34801561048257600080fd5b506104fb6004803603604081101561049957600080fd5b60ff8235169190810190604081016020820135600160201b8111156104bd57600080fd5b8201836020820111156104cf57600080fd5b803590602001918460018302840111600160201b831117156104f057600080fd5b509092509050610e8a565b6040805160208082528351818301528351919283929083019185019080838360005b8381101561053557818101518382015260200161051d565b50505050905090810190601f1680156105625780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561057c57600080fd5b5061058561111c565b604080519115158252519081900360200190f35b3480156105a557600080fd5b50610436611125565b3480156105ba57600080fd5b5061045d61112b565b3480156105cf57600080fd5b50610585600480360360208110156105e657600080fd5b5035611130565b3480156105f957600080fd5b506105856004803603602081101561061057600080fd5b50356111ff565b34801561062357600080fd5b5061041f6004803603602081101561063a57600080fd5b50356001600160a01b03166112b0565b34801561065657600080fd5b5061043661140f565b34801561066b57600080fd5b50610674611415565b6040805160ff9092168252519081900360200190f35b34801561069657600080fd5b5061069f61141a565b604080516001600160a01b039092168252519081900360200190f35b3480156106c757600080fd5b50610436611420565b3480156106dc57600080fd5b506106e5611426565b6040805161ffff9092168252519081900360200190f35b34801561070857600080fd5b5061067461142b565b34801561071d57600080fd5b50610726611430565b60408051602080825283518183015283519192839290830191858101910280838360005b8381101561076257818101518382015260200161074a565b505050509050019250505060405180910390f35b34801561078257600080fd5b5061043661152b565b34801561079757600080fd5b5061069f611531565b3480156107ac57600080fd5b50610585600480360360208110156107c357600080fd5b50356001600160a01b0316611537565b3480156107df57600080fd5b50610436600480360360208110156107f657600080fd5b50356001600160a01b031661156c565b34801561081257600080fd5b506106746115bd565b34801561082757600080fd5b5061045d6115c2565b34801561083c57600080fd5b5061085a6004803603602081101561085357600080fd5b50356115c7565b604080516001600160a01b039788168152958716602087015293909516848401526001600160401b0390911660608401521515608083015260a082019290925290519081900360c00190f35b3480156108b257600080fd5b5061069f61162b565b3480156108c757600080fd5b50610674611631565b3480156108dc57600080fd5b5061069f611636565b3480156108f157600080fd5b5061043661163c565b34801561090657600080fd5b50610674611642565b34801561091b57600080fd5b50610436611647565b34801561093057600080fd5b5061045d61164d565b34801561094557600080fd5b5061041f6004803603604081101561095c57600080fd5b60ff8235169190810190604081016020820135600160201b81111561098057600080fd5b82018360208201111561099257600080fd5b803590602001918460018302840111600160201b831117156109b357600080fd5b509092509050611652565b3480156109ca57600080fd5b50610436611705565b3480156109df57600080fd5b5061043661170b565b3480156109f457600080fd5b5061043660048036036020811015610a0b57600080fd5b50356001600160a01b0316611711565b348015610a2757600080fd5b5061041f611786565b348015610a3c57600080fd5b506106746118a0565b348015610a5157600080fd5b5061069f6118a5565b348015610a6657600080fd5b506104366118ab565b348015610a7b57600080fd5b506104366118b0565b348015610a9057600080fd5b5061069f6118b5565b348015610aa557600080fd5b506104fb6118bb565b348015610aba57600080fd5b506105856118da565b348015610acf57600080fd5b506104366118e3565b348015610ae457600080fd5b5061045d6115bd565b348015610af957600080fd5b5061041f60048036036040811015610b1057600080fd5b810190602081018135600160201b811115610b2a57600080fd5b820183602082011115610b3c57600080fd5b803590602001918460018302840111600160201b83111715610b5d57600080fd5b919390929091602081019035600160201b811115610b7a57600080fd5b820183602082011115610b8c57600080fd5b803590602001918460018302840111600160201b83111715610bad57600080fd5b5090925090506118ec565b348015610bc457600080fd5b5061043660048036036020811015610bdb57600080fd5b50356001600160a01b0316612329565b348015610bf757600080fd5b5061072661233b565b348015610c0c57600080fd5b5061043661241e565b348015610c2157600080fd5b50610436611631565b348015610c3657600080fd5b5061069f612423565b348015610c4b57600080fd5b50610436612429565b348015610c6057600080fd5b50610436612468565b348015610c7557600080fd5b50610436612474565b348015610c8a57600080fd5b5061069f61247a565b348015610c9f57600080fd5b50610436612480565b348015610cb457600080fd5b5061041f612485565b348015610cc957600080fd5b50610436612688565b348015610cde57600080fd5b5061041f60048036036020811015610cf557600080fd5b50356001600160a01b031661268e565b348015610d1157600080fd5b5061045d61279c565b61041f60048036036020811015610d3057600080fd5b50356001600160a01b03166127a1565b348015610d4c57600080fd5b50610436612aa6565b348015610d6157600080fd5b5061069f612aac565b348015610d7657600080fd5b506106746118ab565b348015610d8b57600080fd5b5061069f612ab2565b348015610da057600080fd5b50610dbe60048036036020811015610db757600080fd5b5035612ab8565b6040805192835290151560208301528051918290030190f35b348015610de357600080fd5b5061069f612ae6565b6000610df733611711565b9050600b8181548110610e0657fe5b600091825260209091206001601690920201015460ff16610e63576040805162461bcd60e51b81526020600482015260126024820152716e6f7420696e206d61696e74656e616e636560701b604482015290519081900360640190fd5b6000610e6d612429565b9050610e7a338383612aec565b505050565b60095481565b606481565b60005460609060ff16610ee0576040805162461bcd60e51b81526020600482015260196024820152781d1a194818dbdb9d1c9858dd081b9bdd081a5b9a5d081e595d603a1b604482015290519081900360640190fd5b3361200014610f205760405162461bcd60e51b815260040180806020018281038252602f81526020018061648b602f913960400191505060405180910390fd5b600b54610fc557610f2f6159e0565b60015460005b81811015610fc157600b80546001810182556000919091528351600080516020615e15833981519152601690920291820190815560208501516000805160206165148339815191528301805460ff191691151591909117905560408501518592610fb391600080516020615e5b833981519152909101906014615a04565b505050806001019050610f35565b5050505b610fcd615a3e565b600061100e85858080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250612ce092505050565b915091508061102a576110216064612e39565b92505050611115565b815160009060ff1661104a576110438360200151612e9a565b90506110e1565b825160ff16600114156110dd578260200151516001146110b7577f70e72399380dcfb0338abc03dc8d47f9f470ada8e769c9a78d644ea97385ecb2604051808060200182810382526025815260200180615d5b6025913960400191505060405180910390a15060676110d8565b61104383602001516000815181106110cb57fe5b6020026020010151613caa565b6110e1565b5060655b63ffffffff811661110657505060408051600081526020810190915291506111159050565b61110f81612e39565b93505050505b9392505050565b60075460ff1681565b60035481565b606881565b6001546000908210611144575060006111fa565b60006001600160a01b03166001838154811061115c57fe5b60009182526020909120600490910201546001600160a01b0316148061118c5750600854158061118c5750600a54155b8061119b575060085460095410155b806111ac57506111aa826111ff565b155b806111d557506000600b83815481106111c157fe5b906000526020600020906016020160000154115b806111e9575060016111e561233b565b5111155b156111f6575060006111fa565b5060015b919050565b6001546000908210611213575060006111fa565b600b548210611250576001828154811061122957fe5b9060005260206000209060040201600201601c9054906101000a900460ff161590506111fa565b6001828154811061125d57fe5b9060005260206000209060040201600201601c9054906101000a900460ff161580156112aa5750600b828154811061129157fe5b600091825260209091206001601690920201015460ff16155b92915050565b33611001146112f05760405162461bcd60e51b81526004018080602001828103825260298152602001806165346029913960400191505060405180910390fd5b600b54611395576112ff6159e0565b60015460005b8181101561139157600b80546001810182556000919091528351600080516020615e15833981519152601690920291820190815560208501516000805160206165148339815191528301805460ff19169115159190911790556040850151859261138391600080516020615e5b833981519152909101906014615a04565b505050806001019050611305565b5050505b6001600160a01b038116600090815260046020526040902054806113b9575061140c565b6001810390506000600b82815481106113ce57fe5b600091825260209091206001601690920201015460ff1690506113f18383613e21565b80156113fa5750805b15610e7a576009805460001901905550505b50565b61271081565b600181565b61100181565b60085481565b603881565b600881565b600e54600c546060919080611443575060155b606061144d61233b565b905081815111611461579250611528915050565b82828251031015611473578181510392505b82156114a75760c8430461148e82828686036000888861419d565b6114a582828686038787038889898951030161419d565b505b6060826040519080825280602002602001820160405280156114d3578160200160208202803683370190505b50905060005b83811015611521578281815181106114ed57fe5b602002602001015182828151811061150157fe5b6001600160a01b03909216602092830291909101909101526001016114d9565b5093505050505b90565b60065481565b61200081565b6001600160a01b0381166000908152600460205260408120548061155f5760009150506111fa565b60001901611115816111ff565b6001600160a01b038116600090815260046020526040812054806115945760009150506111fa565b6001808203815481106115a357fe5b906000526020600020906004020160030154915050919050565b600081565b606781565b600181815481106115d457fe5b600091825260209091206004909102018054600182015460028301546003909301546001600160a01b0392831694509082169291821691600160a01b81046001600160401b031691600160e01b90910460ff169086565b61100581565b600281565b61100881565b6103e881565b600b81565b600c5481565b606681565b33612000146116925760405162461bcd60e51b815260040180806020018281038252602f81526020018061648b602f913960400191505060405180910390fd5b7f41ce201247b6ceb957dcdb217d0b8acb50b9ea0e12af9af4f5e7f38902101605838383604051808460ff1660ff168152602001806020018281038252848482818152602001925080828437600083820152604051601f909101601f1916909201829003965090945050505050a1505050565b60025481565b600a5481565b6001600160a01b0381166000908152600460205260408120548061177c576040805162461bcd60e51b815260206004820152601760248201527f6f6e6c792063757272656e742076616c696461746f7273000000000000000000604482015290519081900360640190fd5b6000190192915050565b600b5461182b576117956159e0565b60015460005b8181101561182757600b80546001810182556000919091528351600080516020615e15833981519152601690920291820190815560208501516000805160206165148339815191528301805460ff19169115159190911790556040850151859261181991600080516020615e5b833981519152909101906014615a04565b50505080600101905061179b565b5050505b6008546118385760036008555b600a54611845576002600a555b600061185033611711565b905061185b81611130565b6118965760405162461bcd60e51b8152600401808060200182810382526023815260200180615d386023913960400191505060405180910390fd5b61140c338261428c565b600981565b61100781565b600381565b60c881565b61100681565b6040518061062001604052806105ef8152602001615e9c6105ef913981565b60005460ff1681565b6402540be40081565b60005460ff1661193f576040805162461bcd60e51b81526020600482015260196024820152781d1a194818dbdb9d1c9858dd081b9bdd081a5b9a5d081e595d603a1b604482015290519081900360640190fd5b336110071461197f5760405162461bcd60e51b815260040180806020018281038252602e815260200180615da1602e913960400191505060405180910390fd5b6119e984848080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050604080518082019091526013815272065787069726554696d655365636f6e6447617606c1b602082015291506143249050565b15611ac45760208114611a2d5760405162461bcd60e51b8152600401808060200182810382526026815260200180615e356026913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091611a6b9185858083850183828082843760009201919091525061440b92505050565b905060648110158015611a815750620186a08111155b611abc5760405162461bcd60e51b8152600401808060200182810382526027815260200180615cc86027913960400191505060405180910390fd5b600255612297565b611b2484848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250506040805180820190915260098152686275726e526174696f60b81b602082015291506143249050565b15611c145760208114611b7e576040805162461bcd60e51b815260206004820152601c60248201527f6c656e677468206f66206275726e526174696f206d69736d6174636800000000604482015290519081900360640190fd5b604080516020601f8401819004810282018101909252828152600091611bbc9185858083850183828082843760009201919091525061440b92505050565b9050612710811115611bff5760405162461bcd60e51b815260040180806020018281038252602b815260200180615c72602b913960400191505060405180910390fd5b6006556007805460ff19166001179055612297565b611c7e84848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250506040805180820190915260138152726d61784e756d4f664d61696e7461696e696e6760681b602082015291506143249050565b15611d565760208114611cc25760405162461bcd60e51b8152600401808060200182810382526026815260200180615b396026913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091611d009185858083850183828082843760009201919091525061440b92505050565b600c5490915080611d0f575060155b808210611d4d5760405162461bcd60e51b8152600401808060200182810382526038815260200180615bb06038913960400191505060405180910390fd5b50600855612297565b611dbf84848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250506040805180820190915260128152716d61696e7461696e536c6173685363616c6560701b602082015291506143249050565b15611e8a5760208114611e035760405162461bcd60e51b8152600401808060200182810382526025815260200180615b8b6025913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091611e419185858083850183828082843760009201919091525061440b92505050565b905060008111611e825760405162461bcd60e51b815260040180806020018281038252602d8152602001806164e7602d913960400191505060405180910390fd5b600a55612297565b611efe84848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152601981527f6d61784e756d4f66576f726b696e6743616e6469646174657300000000000000602082015291506143249050565b15611fcb5760208114611f425760405162461bcd60e51b815260040180806020018281038252602c815260200180615b5f602c913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091611f809185858083850183828082843760009201919091525061440b92505050565b9050600d54811115611fc35760405162461bcd60e51b8152600401808060200182810382526049815260200180615cef6049913960600191505060405180910390fd5b600e55612297565b61203484848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250506040805180820190915260128152716d61784e756d4f6643616e6469646174657360701b602082015291506143249050565b156120d557602081146120785760405162461bcd60e51b8152600401808060200182810382526025815260200180615dcf6025913960400191505060405180910390fd5b604080516020601f84018190048102820181019092528281526000916120b69185858083850183828082843760009201919091525061440b92505050565b600d819055600e549091508110156120cf57600d54600e555b50612297565b61213984848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152600d81526c6e756d4f66436162696e65747360981b602082015291506143249050565b1561225a5760208114612193576040805162461bcd60e51b815260206004820181905260248201527f6c656e677468206f66206e756d4f66436162696e657473206d69736d61746368604482015290519081900360640190fd5b604080516020601f84018190048102820181019092528281526000916121d19185858083850183828082843760009201919091525061440b92505050565b9050600081116122125760405162461bcd60e51b8152600401808060200182810382526028815260200180615be86028913960400191505060405180910390fd5b60298111156122525760405162461bcd60e51b8152600401808060200182810382526039815260200180615c106039913960400191505060405180910390fd5b600c55612297565b6040805162461bcd60e51b815260206004820152600d60248201526c756e6b6e6f776e20706172616d60981b604482015290519081900360640190fd5b7f6cdb0ac70ab7f2e2d035cca5be60d89906f2dede7648ddbd7402189c1eeed17a848484846040518080602001806020018381038352878782818152602001925080828437600083820152601f01601f191690910184810383528581526020019050858580828437600083820152604051601f909101601f19169092018290039850909650505050505050a150505050565b60046020526000908152604090205481565b6001546060906000805b8281101561236a57612356816111ff565b15612362576001909101905b600101612345565b50606081604051908082528060200260200182016040528015612397578160200160208202803683370190505b5090506000915060005b83811015612416576123b2816111ff565b1561240e57600181815481106123c457fe5b600091825260209091206004909102015482516001600160a01b03909116908390859081106123ef57fe5b6001600160a01b03909216602092830291909101909101526001909201915b6001016123a1565b509250505090565b601581565b61100281565b600061243361233b565b519050600080600c541161244857601561244c565b600c545b90508082111561245a578091505b8161246457600191505b5090565b67016345785d8a000081565b60055481565b61100381565b602981565b60005460ff16156124dd576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e747261637420616c726561647920696e697400000000000000604482015290519081900360640190fd5b6124e5615a3e565b600061250b6040518061062001604052806105ef8152602001615e9c6105ef9139612ce0565b915091508061254b5760405162461bcd60e51b8152600401808060200182810382526021815260200180615e7b6021913960400191505060405180910390fd5b60005b8260200151518110156126705760018360200151828151811061256d57fe5b60209081029190910181015182546001818101855560009485528385208351600493840290910180546001600160a01b039283166001600160a01b03199182161782558587015182850180549185169183169190911790556040860151600283018054606089015160808a01511515600160e01b0260ff60e01b196001600160401b03909216600160a01b0267ffffffffffffffff60a01b199590981692909516919091179290921694909417161790915560a09093015160039093019290925591860151805191850193918590811061264357fe5b602090810291909101810151516001600160a01b031682528101919091526040016000205560010161254e565b50506103e8600255506000805460ff19166001179055565b600d5481565b33611001146126ce5760405162461bcd60e51b81526004018080602001828103825260298152602001806165346029913960400191505060405180910390fd5b600b54612773576126dd6159e0565b60015460005b8181101561276f57600b80546001810182556000919091528351600080516020615e15833981519152601690920291820190815560208501516000805160206165148339815191528301805460ff19169115159190911790556040850151859261276191600080516020615e5b833981519152909101906014615a04565b5050508060010190506126e3565b5050505b600061277e82614410565b905061278981611130565b1561279857612798828261428c565b5050565b606581565b3341146127df5760405162461bcd60e51b815260040180806020018281038252602d8152602001806164ba602d913960400191505060405180910390fd5b60005460ff16612832576040805162461bcd60e51b81526020600482015260196024820152781d1a194818dbdb9d1c9858dd081b9bdd081a5b9a5d081e595d603a1b604482015290519081900360640190fd5b6000341161287f576040805162461bcd60e51b81526020600482015260156024820152746465706f7369742076616c7565206973207a65726f60581b604482015290519081900360640190fd5b6001600160a01b0381166000908152600460205260409020546007543491906103e89060ff16156128af57506006545b6000831180156128bf5750600081115b156129685760006128e86127106128dc868563ffffffff6145b416565b9063ffffffff61460d16565b905080156129665760405161dead9082156108fc029083906000818181858888f1935050505015801561291f573d6000803e3d6000fd5b506040805182815290517f627059660ea01c4733a328effb2294d2f86905bf806da763a89cee254de8bee59181900360200190a1612963848263ffffffff61464f16565b93505b505b8115612a6057600060018084038154811061297f57fe5b9060005260206000209060040201905080600201601c9054906101000a900460ff16156129ea576040805185815290516001600160a01b038716917ff177e5d6c5764d79c32883ed824111d9b13f5668cf6ab1cc12dd36791dd955b4919081900360200190a2612a5a565b6003546129fd908563ffffffff61469116565b6003908155810154612a15908563ffffffff61469116565b60038201556040805185815290516001600160a01b038716917f93a090ecc682c002995fad3c85b30c5651d7fd29b0be5da9d784a3302aedc055919081900360200190a25b50612aa0565b6040805184815290516001600160a01b038616917ff177e5d6c5764d79c32883ed824111d9b13f5668cf6ab1cc12dd36791dd955b4919081900360200190a25b50505050565b600e5481565b61100081565b61dead81565b600b8181548110612ac557fe5b60009182526020909120601690910201805460019091015490915060ff1682565b61100481565b6000600a5460001480612afd575081155b80612b085750600954155b15612b1557506000611115565b600960008154809291906001900391905055506000612b62600a546128dc856128dc600b8981548110612b4457fe5b6000918252602090912060169091020154439063ffffffff61464f16565b90506000600b8581548110612b7357fe5b906000526020600020906016020160010160006101000a81548160ff0219169083151502179055506000806110016001600160a01b0316638256ace66040518163ffffffff1660e01b8152600401604080518083038186803b158015612bd857600080fd5b505afa158015612bec573d6000803e3d6000fd5b505050506040513d6040811015612c0257600080fd5b508051602090910151600095509092509050808310612c9057612c258787613e21565b50604080516305bfb49960e41b81526001600160a01b0389166004820152905161100191635bfb499091602480830192600092919082900301818387803b158015612c6f57600080fd5b505af1158015612c83573d6000803e3d6000fd5b5050505060019350612ca2565b818310612ca257612ca087614410565b505b6040516001600160a01b038816907fb9d38178dc641ff1817967a63c9078cbcd955a9f1fcd75e0e3636de615d44d3b90600090a25050509392505050565b612ce8615a3e565b6000612cf2615a3e565b612cfa615a56565b612d0b612d06866146eb565b614710565b90506000805b612d1a8361475a565b15612e2b5780612d3f57612d35612d308461477b565b6147c9565b60ff168452612e23565b8060011415612e1e576060612d5b612d568561477b565b614880565b90508051604051908082528060200260200182016040528015612d9857816020015b612d85615a76565b815260200190600190039081612d7d5790505b50602086015260005b8151811015612e1357612db2615a76565b6000612dd0848481518110612dc357fe5b6020026020010151614951565b9150915080612ded57876000995099505050505050505050612e34565b8188602001518481518110612dfe57fe5b60209081029190910101525050600101612da1565b506001925050612e23565b612e2b565b600101612d11565b50919350909150505b915091565b604080516001808252818301909252606091829190816020015b6060815260200190600190039081612e53579050509050612e798363ffffffff16614a2e565b81600081518110612e8657fe5b602002602001018190525061111581614a41565b6000806060612ea884614acb565b9150915081612f55577f70e72399380dcfb0338abc03dc8d47f9f470ada8e769c9a78d644ea97385ecb2816040518080602001828103825283818151815260200191508051906020019080838360005b83811015612f10578181015183820152602001612ef8565b50505050905090810190601f168015612f3d5780820380516001836020036101000a031916815260200191505b509250505060405180910390a16066925050506111fa565b50506060612f6283614bad565b6001549091506000908190815b81811015612fe55767016345785d8a000060018281548110612f8d57fe5b90600052602060002090600402016003015410612faf57600190930192612fdd565b600060018281548110612fbe57fe5b9060005260206000209060040201600301541115612fdd576001909201915b600101612f6f565b50606083604051908082528060200260200182016040528015613012578160200160208202803683370190505b509050606084604051908082528060200260200182016040528015613041578160200160208202803683370190505b509050606085604051908082528060200260200182016040528015613070578160200160208202803683370190505b50905060608660405190808252806020026020018201604052801561309f578160200160208202803683370190505b50905060006060876040519080825280602002602001820160405280156130d0578160200160208202803683370190505b5090506060886040519080825280602002602001820160405280156130ff578160200160208202803683370190505b509050600099506000985060006110046001600160a01b031663149d14d96040518163ffffffff1660e01b815260040160206040518083038186803b15801561314757600080fd5b505afa15801561315b573d6000803e3d6000fd5b505050506040513d602081101561317157600080fd5b5051905067016345785d8a00008111156131e5577f70e72399380dcfb0338abc03dc8d47f9f470ada8e769c9a78d644ea97385ecb2604051808060200182810382526021815260200180615df46021913960400191505060405180910390a160689c505050505050505050505050506111fa565b60005b898110156134565767016345785d8a00006001828154811061320657fe5b9060005260206000209060040201600301541061338c576001818154811061322a57fe5b906000526020600020906004020160020160009054906101000a90046001600160a01b0316898d8151811061325b57fe5b60200260200101906001600160a01b031690816001600160a01b03168152505060006402540be4006001838154811061329057fe5b906000526020600020906004020160030154816132a957fe5b06600183815481106132b757fe5b9060005260206000209060040201600301540390506132df838261464f90919063ffffffff16565b898e815181106132eb57fe5b6020026020010181815250506001828154811061330457fe5b906000526020600020906004020160020160009054906101000a90046001600160a01b0316878e8151811061333557fe5b60200260200101906001600160a01b031690816001600160a01b03168152505081888e8151811061336257fe5b602090810291909101015261337d868263ffffffff61469116565b6001909d019c955061344e9050565b60006001828154811061339b57fe5b906000526020600020906004020160030154111561344e57600181815481106133c057fe5b906000526020600020906004020160010160009054906101000a90046001600160a01b0316848c815181106133f157fe5b60200260200101906001600160a01b031690816001600160a01b0316815250506001818154811061341e57fe5b906000526020600020906004020160030154838c8151811061343c57fe5b60209081029190910101526001909a01995b6001016131e8565b5060008415613894576110046001600160a01b0316636e056520868b8b8a60025442016040518663ffffffff1660e01b815260040180806020018060200180602001856001600160401b03166001600160401b03168152602001848103845288818151815260200191508051906020019060200280838360005b838110156134e85781810151838201526020016134d0565b50505050905001848103835287818151815260200191508051906020019060200280838360005b8381101561352757818101518382015260200161350f565b50505050905001848103825286818151815260200191508051906020019060200280838360005b8381101561356657818101518382015260200161354e565b505050509050019750505050505050506020604051808303818588803b15801561358f57600080fd5b505af1935050505080156135b557506040513d60208110156135b057600080fd5b505160015b6137f0576040516000815260443d10156135d15750600061366c565b60046000803e60005160e01c6308c379a081146135f257600091505061366c565b60043d036004833e81513d60248201116001600160401b038211171561361d5760009250505061366c565b80830180516001600160401b0381111561363e57600094505050505061366c565b8060208301013d860181111561365c5760009550505050505061366c565b601f01601f191660405250925050505b80613677575061371b565b60019150857fa7cdeed7d0db45e3219a6e5d60838824c16f1d39991fcfe3f963029c844bf280826040518080602001828103825283818151815260200191508051906020019080838360005b838110156136db5781810151838201526020016136c3565b50505050905090810190601f1680156137085780820380516001836020036101000a031916815260200191505b509250505060405180910390a2506137eb565b3d808015613745576040519150601f19603f3d011682016040523d82523d6000602084013e61374a565b606091505b5060019150857fbfa884552dd8921b6ce90bfe906952ae5b3b29be0cc1a951d4f62697635a3a45826040518080602001828103825283818151815260200191508051906020019080838360005b838110156137af578181015183820152602001613797565b50505050905090810190601f1680156137dc5780820380516001836020036101000a031916815260200191505b509250505060405180910390a2505b613894565b801561382e576040805187815290517fa217d08e65f80c73121cd9db834d81652d544bfbf452f6d04922b16c90a37b709181900360200190a1613892565b604080516020808252601b908201527f6261746368207472616e736665722072657475726e2066616c7365000000000081830152905187917fa7cdeed7d0db45e3219a6e5d60838824c16f1d39991fcfe3f963029c844bf280919081900360600190a25b505b8015613a4a5760005b8751811015613a485760008882815181106138b457fe5b602002602001015190506000600182815481106138cd57fe5b60009182526020909120600160049092020181015481546001600160a01b03909116916108fc91859081106138fe57fe5b9060005260206000209060040201600301549081150290604051600060405180830381858888f19350505050905080156139ba576001828154811061393f57fe5b60009182526020909120600160049092020181015481546001600160a01b03909116917f6c61d60f69a7beb3e1c80db7f39f37b208537cbb19da3174511b477812b2fc7d918590811061398e57fe5b9060005260206000209060040201600301546040518082815260200191505060405180910390a2613a3e565b600182815481106139c757fe5b60009182526020909120600160049092020181015481546001600160a01b03909116917f25d0ce7d2f0cec669a8c17efe49d195c13455bb8872b65fa610ac7f53fe4ca7d9185908110613a1657fe5b9060005260206000209060040201600301546040518082815260200191505060405180910390a25b505060010161389d565b505b835115613b945760005b8451811015613b92576000858281518110613a6b57fe5b60200260200101516001600160a01b03166108fc868481518110613a8b57fe5b60200260200101519081150290604051600060405180830381858888f1935050505090508015613b2157858281518110613ac157fe5b60200260200101516001600160a01b03167f6c61d60f69a7beb3e1c80db7f39f37b208537cbb19da3174511b477812b2fc7d868481518110613aff57fe5b60200260200101516040518082815260200191505060405180910390a2613b89565b858281518110613b2d57fe5b60200260200101516001600160a01b03167f25d0ce7d2f0cec669a8c17efe49d195c13455bb8872b65fa610ac7f53fe4ca7d868481518110613b6b57fe5b60200260200101516040518082815260200191505060405180910390a25b50600101613a54565b505b4715613bfd576040805147815290517f6ecc855f9440a9282c90913bbc91619fd44f5ec0b462af28d127b116f130aa4d9181900360200190a1604051611002904780156108fc02916000818181858888f19350505050158015613bfb573d6000803e3d6000fd5b505b600060038190556005558c5115613c1757613c178d614d7d565b6110016001600160a01b031663fc4333cd6040518163ffffffff1660e01b8152600401600060405180830381600087803b158015613c5457600080fd5b505af1158015613c68573d6000803e3d6000fd5b50506040517fedd8d7296956dd970ab4de3f2fc03be2b0ffc615d20cd4c72c6e44f928630ebf925060009150a15060009e9d5050505050505050505050505050565b80516001600160a01b0316600090815260046020526040812054801580613cfb5750600180820381548110613cdb57fe5b9060005260206000209060040201600201601c9054906101000a900460ff165b15613d415782516040516001600160a01b03909116907fe209c46bebf57cf265d5d9009a00870e256d9150f3ed5281ab9d9eb3cec6e4be90600090a260009150506111fa565b600154600554600019820111801590613d975784516040516001600160a01b03909116907fe209c46bebf57cf265d5d9009a00870e256d9150f3ed5281ab9d9eb3cec6e4be90600090a2600093505050506111fa565b600580546001908101909155805481906000198601908110613db557fe5b6000918252602082206002600490920201018054921515600160e01b0260ff60e01b199093169290921790915585516040516001600160a01b03909116917ff226e7d8f547ff903d9d419cf5f54e0d7d07efa9584135a53a057c5f1f27f49a91a2506000949350505050565b60008060018381548110613e3157fe5b90600052602060002090600402016003015490506000600180805490500390506001613e5b61233b565b5111613e9057600060018581548110613e7057fe5b9060005260206000209060040201600301819055506000925050506112aa565b6040805183815290516001600160a01b038716917f3b6f9ef90462b512a1293ecec018670bf7b7f1876fb727590a8a6d7643130a70919081900360200190a26001600160a01b038516600090815260046020526040812055835b6001546000190181101561408d5760018160010181548110613f0857fe5b906000526020600020906004020160018281548110613f2357fe5b60009182526020909120825460049092020180546001600160a01b03199081166001600160a01b0393841617825560018085015481840180548416918616919091179055600280860180549185018054909416919095161780835584546001600160401b03600160a01b91829004160267ffffffffffffffff60a01b1990911617808355935460ff600160e01b918290041615150260ff60e01b19909416939093179055600392830154920191909155600b805490918301908110613fe457fe5b9060005260206000209060160201600b8281548110613fff57fe5b600091825260209091208254601690920201908155600180830154908201805460ff191660ff909216151591909117905561404260028083019084016014615aab565b5090505080600101600460006001848154811061405b57fe5b600091825260208083206004909202909101546001600160a01b03168352820192909252604001902055600101613eea565b50600180548061409957fe5b60008281526020812060046000199093019283020180546001600160a01b0319908116825560018201805490911690556002810180546001600160e81b0319169055600301559055600b8054806140ec57fe5b60008281526020812060166000199093019283020181815560018101805460ff191690559061411e6002830182615ad6565b50509055600081838161412d57fe5b04905080156141915760015460005b8181101561418e57826001828154811061415257fe5b906000526020600020906004020160030154016001828154811061417257fe5b600091825260209091206003600490920201015560010161413c565b50505b50600195945050505050565b60005b828110156142835760408051602080820189905287840182840152825180830384018152606090920190925280519101206000908390816141dd57fe5b0690508085018287011461427a57600088838801815181106141fb57fe5b60200260200101519050888287018151811061421357fe5b6020026020010151898489018151811061422957fe5b60200260200101906001600160a01b031690816001600160a01b03168152505080898388018151811061425857fe5b60200260200101906001600160a01b031690816001600160a01b031681525050505b506001016141a0565b50505050505050565b600980546001908101909155600b8054839081106142a657fe5b906000526020600020906016020160010160006101000a81548160ff02191690831515021790555043600b82815481106142dc57fe5b600091825260208220601690910201919091556040516001600160a01b038416917ff62981a567ec3cec866c6fa93c55bcdf841d6292d18b8d522ececa769375d82d91a25050565b6000816040516020018082805190602001908083835b602083106143595780518252601f19909201916020918201910161433a565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120836040516020018082805190602001908083835b602083106143c75780518252601f1990920191602091820191016143a8565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012014905092915050565b015190565b6001600160a01b03811660009081526004602052604081205480614439575060001990506111fa565b60018103905060006001828154811061444e57fe5b906000526020600020906004020160030154905060006001838154811061447157fe5b906000526020600020906004020160030181905550600060018080549050039050846001600160a01b03167f8cd4e147d8af98a9e3b6724021b8bf6aed2e5dac71c38f2dce8161b82585b25d836040518082815260200191505060405180910390a2806144e3578293505050506111fa565b60008183816144ee57fe5b04905080156145aa5760005b8481101561454c57816001828154811061451057fe5b906000526020600020906004020160030154016001828154811061453057fe5b60009182526020909120600360049092020101556001016144fa565b50600180549085015b818110156145a757826001828154811061456b57fe5b906000526020600020906004020160030154016001828154811061458b57fe5b6000918252602090912060036004909202010155600101614555565b50505b5091949350505050565b6000826145c3575060006112aa565b828202828482816145d057fe5b04146111155760405162461bcd60e51b8152600401808060200182810382526021815260200180615d806021913960400191505060405180910390fd5b600061111583836040518060400160405280601a81526020017f536166654d6174683a206469766973696f6e206279207a65726f000000000000815250615370565b600061111583836040518060400160405280601e81526020017f536166654d6174683a207375627472616374696f6e206f766572666c6f770000815250615412565b600082820183811015611115576040805162461bcd60e51b815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b6146f3615ae5565b506040805180820190915281518152602082810190820152919050565b614718615a56565b6147218261546c565b61472a57600080fd5b600061473983602001516154a6565b60208085015160408051808201909152868152920190820152915050919050565b6000614764615ae5565b505080518051602091820151919092015191011190565b614783615ae5565b61478c8261475a565b61479557600080fd5b602082015160006147a582615509565b80830160209586015260408051808201909152908152938401919091525090919050565b8051600090158015906147de57508151602110155b6147e757600080fd5b60006147f683602001516154a6565b90508083600001511015614851576040805162461bcd60e51b815260206004820152601a60248201527f6c656e677468206973206c657373207468616e206f6666736574000000000000604482015290519081900360640190fd5b82516020808501518301805192849003929183101561487757826020036101000a820491505b50949350505050565b606061488b8261546c565b61489457600080fd5b600061489f8361563c565b90506060816040519080825280602002602001820160405280156148dd57816020015b6148ca615ae5565b8152602001906001900390816148c25790505b50905060006148ef85602001516154a6565b60208601510190506000805b848110156149465761490c83615509565b915060405180604001604052808381526020018481525084828151811061492f57fe5b6020908102919091010152918101916001016148fb565b509195945050505050565b614959615a76565b6000614963615a76565b61496b615a56565b61497485614710565b90506000805b6149838361475a565b15612e2b57806149ae5761499e6149998461477b565b615698565b6001600160a01b03168452614a26565b80600114156149d6576149c36149998461477b565b6001600160a01b03166020850152614a26565b80600214156149fe576149eb6149998461477b565b6001600160a01b03166040850152614a26565b8060031415612e1e57614a13612d308461477b565b6001600160401b03166060850152600191505b60010161497a565b60606112aa614a3c836156b2565b615798565b6060815160001415614a6257506040805160008152602081019091526111fa565b606082600081518110614a7157fe5b602002602001015190506000600190505b8351811015614ab257614aa882858381518110614a9b57fe5b60200260200101516157ea565b9150600101614a82565b50611115614ac5825160c060ff16615867565b826157ea565b60006060602983511115614afd576000604051806060016040528060298152602001615c496029913991509150612e34565b60005b8351811015614b935760005b81811015614b8a57848181518110614b2057fe5b6020026020010151600001516001600160a01b0316858381518110614b4157fe5b6020026020010151600001516001600160a01b03161415614b825760006040518060600160405280602b8152602001615c9d602b9139935093505050612e34565b600101614b0c565b50600101614b00565b505060408051602081019091526000815260019150915091565b6060600080808080614bbd612429565b6001549091505b8015614ccb57600181039250600b8381548110614bdd57fe5b600091825260209091206001601690920201015460ff16614bfd57614cc2565b60018381548110614c0a57fe5b60009182526020909120600490910201546001600160a01b03169450614c31858484612aec565b9350831580614c44575060018851038610155b15614c4e57614cc2565b60005b8851811015614cc057856001600160a01b0316898281518110614c7057fe5b6020026020010151600001516001600160a01b03161415614cb8576001898281518110614c9957fe5b6020908102919091010151901515608090910152600190960195614cc0565b600101614c51565b505b60001901614bc4565b5084875103604051908082528060200260200182016040528015614d0957816020015b614cf6615a76565b815260200190600190039081614cee5790505b5095506000915060005b8751811015614d7257878181518110614d2857fe5b602002602001015160800151614d6a57878181518110614d4457fe5b6020026020010151878481518110614d5857fe5b60209081029190910101526001909201915b600101614d13565b505050505050919050565b600154815160005b82811015614e9a576001614d97615a76565b60018381548110614da457fe5b600091825260208083206040805160c08101825260049490940290910180546001600160a01b0390811685526001820154811693850193909352600281015492831691840191909152600160a01b82046001600160401b03166060840152600160e01b90910460ff16151560808301526003015460a082015291505b84811015614e6e57868181518110614e3457fe5b6020026020010151600001516001600160a01b031682600001516001600160a01b03161415614e665760009250614e6e565b600101614e20565b508115614e905780516001600160a01b03166000908152600460205260408120555b5050600101614d85565b5080821115614f4b57805b82811015614f49576001805480614eb857fe5b60008281526020812060046000199093019283020180546001600160a01b0319908116825560018201805490911690556002810180546001600160e81b0319169055600301559055600b805480614f0b57fe5b60008281526020812060166000199093019283020181815560018101805460ff1916905590614f3d6002830182615ad6565b50509055600101614ea5565b505b6000818310614f5a5781614f5c565b825b905060005b818110156151565761500e858281518110614f7857fe5b602002602001015160018381548110614f8d57fe5b60009182526020918290206040805160c08101825260049390930290910180546001600160a01b0390811684526001820154811694840194909452600281015493841691830191909152600160a01b83046001600160401b03166060830152600160e01b90920460ff161515608082015260039091015460a082015261595f565b61512957806001016004600087848151811061502657fe5b6020026020010151600001516001600160a01b03166001600160a01b031681526020019081526020016000208190555084818151811061506257fe5b60200260200101516001828154811061507757fe5b6000918252602091829020835160049092020180546001600160a01b039283166001600160a01b0319918216178255928401516001820180549184169185169190911790556040840151600282018054606087015160808801511515600160e01b0260ff60e01b196001600160401b03909216600160a01b0267ffffffffffffffff60a01b1995909716929097169190911792909216939093171692909217905560a09091015160039091015561514e565b60006001828154811061513857fe5b9060005260206000209060040201600301819055505b600101614f61565b50828211156152fb576151676159e0565b835b838110156152f857600186828151811061517f57fe5b6020908102919091018101518254600181810185556000948552838520835160049093020180546001600160a01b039384166001600160a01b0319918216178255848601518284018054918616918316919091179055604080860151600284018054606089015160808a01511515600160e01b0260ff60e01b196001600160401b03909216600160a01b0267ffffffffffffffff60a01b1995909a1692909616919091179290921696909617169190911790935560a090930151600390930192909255600b8054928301815590935284516016909102600080516020615e158339815191528101918255918501516000805160206165148339815191528301805491151560ff19909216919091179055918401518492916152b491600080516020615e5b833981519152909101906014615a04565b50505080600101600460008884815181106152cb57fe5b602090810291909101810151516001600160a01b0316825281019190915260400160002055600101615169565b50505b6000600981905560015493505b83811015615369576000600b828154811061531f57fe5b60009182526020822060169190910201600101805460ff191692151592909217909155600b80548390811061535057fe5b6000918252602090912060169091020155600101615308565b5050505050565b600081836153fc5760405162461bcd60e51b81526004018080602001828103825283818151815260200191508051906020019080838360005b838110156153c15781810151838201526020016153a9565b50505050905090810190601f1680156153ee5780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b50600083858161540857fe5b0495945050505050565b600081848411156154645760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156153c15781810151838201526020016153a9565b505050900390565b805160009061547d575060006111fa565b6020820151805160001a9060c082101561549c576000925050506111fa565b5060019392505050565b8051600090811a60808110156154c05760009150506111fa565b60b88110806154db575060c081108015906154db575060f881105b156154ea5760019150506111fa565b60c08110156154fe5760b5190190506111fa565b60f5190190506111fa565b80516000908190811a60808110156155245760019150615635565b60b881101561553957607e1981019150615635565b60c08110156155b357600060b78203600186019550806020036101000a8651049150600181018201935050808310156155ad576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b50615635565b60f88110156155c85760be1981019150615635565b600060f78203600186019550806020036101000a865104915060018101820193505080831015615633576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b505b5092915050565b805160009061564d575060006111fa565b6000809050600061566184602001516154a6565b602085015185519181019250015b8082101561568f5761568082615509565b6001909301929091019061566f565b50909392505050565b80516000906015146156a957600080fd5b6112aa826147c9565b604080516020808252818301909252606091829190602082018180368337505050602081018490529050600067ffffffffffffffff1984166156f65750601861571a565b6fffffffffffffffffffffffffffffffff1984166157165750601061571a565b5060005b60208110156157505781818151811061572f57fe5b01602001516001600160f81b0319161561574857615750565b60010161571a565b60008160200390506060816040519080825280601f01601f191660200182016040528015615785576020820181803683370190505b5080830196909652508452509192915050565b6060815160011480156157ca5750607f60f81b826000815181106157b857fe5b01602001516001600160f81b03191611155b156157d65750806111fa565b6112aa6157e88351608060ff16615867565b835b6060806040519050835180825260208201818101602087015b8183101561581b578051835260209283019201615803565b50855184518101855292509050808201602086015b81831015615848578051835260209283019201615830565b508651929092011591909101601f01601f191660405250905092915050565b60606801000000000000000083106158b7576040805162461bcd60e51b815260206004820152600e60248201526d696e70757420746f6f206c6f6e6760901b604482015290519081900360640190fd5b604080516001808252818301909252606091602082018180368337019050509050603784116159115782840160f81b816000815181106158f357fe5b60200101906001600160f81b031916908160001a90535090506112aa565b606061591c856156b2565b90508381510160370160f81b8260008151811061593557fe5b60200101906001600160f81b031916908160001a90535061595682826157ea565b95945050505050565b805182516000916001600160a01b039182169116148015615999575081602001516001600160a01b031683602001516001600160a01b0316145b80156159be575081604001516001600160a01b031683604001516001600160a01b0316145b80156111155750506060908101519101516001600160401b0390811691161490565b60408051606081018252600080825260208201529081016159ff615aff565b905290565b8260148101928215615a32579160200282015b82811115615a32578251825591602001919060010190615a17565b50612464929150615b1e565b60408051808201909152600081526060602082015290565b6040518060400160405280615a69615ae5565b8152602001600081525090565b6040805160c081018252600080825260208201819052918101829052606081018290526080810182905260a081019190915290565b8260148101928215615a32579182015b82811115615a32578254825591600101919060010190615abb565b5061140c906014810190615b1e565b604051806040016040528060008152602001600081525090565b6040518061028001604052806014906020820280368337509192915050565b61152891905b808211156124645760008155600101615b2456fe6c656e677468206f66206d61784e756d4f664d61696e7461696e696e67206d69736d617463686c656e677468206f66206d61784e756d4f66576f726b696e6743616e64696461746573206d69736d617463686c656e677468206f66206d61696e7461696e536c6173685363616c65206d69736d61746368746865206d61784e756d4f664d61696e7461696e696e67206d757374206265206c657373207468616e206e756d4f6643616e696e61746573746865206e756d4f66436162696e657473206d7573742062652067726561746572207468616e2030746865206e756d4f66436162696e657473206d757374206265206c657373207468616e204d41585f4e554d5f4f465f56414c494441544f5253746865206e756d626572206f662076616c696461746f72732065786365656420746865206c696d6974746865206275726e526174696f206d757374206265206e6f2067726561746572207468616e2031303030306475706c696361746520636f6e73656e7375732061646472657373206f662076616c696461746f725365747468652065787069726554696d655365636f6e64476170206973206f7574206f662072616e6765746865206d61784e756d4f66576f726b696e6743616e64696461746573206d757374206265206e6f742067726561746572207468616e206d61784e756d4f6643616e6469646174657363616e206e6f7420656e7465722054656d706f72617279204d61696e74656e616e63656c656e677468206f66206a61696c2076616c696461746f7273206d757374206265206f6e65536166654d6174683a206d756c7469706c69636174696f6e206f766572666c6f77746865206d6573736167652073656e646572206d75737420626520676f7665726e616e636520636f6e74726163746c656e677468206f66206d61784e756d4f6643616e64696461746573206d69736d61746368666565206973206c6172676572207468616e2044555354595f494e434f4d494e470175b7a638427703f0dbe7bb9bbf987a2551717b34e79f33b5b1008d1fa01db96c656e677468206f662065787069726554696d655365636f6e64476170206d69736d617463680175b7a638427703f0dbe7bb9bbf987a2551717b34e79f33b5b1008d1fa01dbb6661696c656420746f20706172736520696e69742076616c696461746f72536574f905ec80f905e8f846942a7cdd959bfe8d9487b2a43b33565295a698f7e294b6a7edd747c0554875d3fc531d19ba1497992c5e941ff80f3f7f110ffd8920a3ac38fdef318fe94a3f86048c27395000f846946488aa4d1955ee33403f8ccb1d4de5fb97c7ade294220f003d8bdfaadf52aa1e55ae4cc485e6794875941a87e90e440a39c99aa9cb5cea0ad6a3f0b2407b86048c27395000f846949ef9f4360c606c7ab4db26b016007d3ad0ab86a0946103af86a874b705854033438383c82575f25bc29418e2db06cbff3e3c5f856410a1838649e760175786048c27395000f84694ee01c3b1283aa067c58eab4709f85e99d46de5fe94ee4b9bfb1871c64e2bcabb1dc382dc8b7c4218a29415904ab26ab0e99d70b51c220ccdcccabee6e29786048c27395000f84694685b1ded8013785d6623cc18d214320b6bb6475994a20ef4e5e4e7e36258dbf51f4d905114cb1b34bc9413e39085dc88704f4394d35209a02b1a9520320c86048c27395000f8469478f3adfc719c99674c072166708589033e2d9afe9448a30d5eaa7b64492a160f139e2da2800ec3834e94055838358c29edf4dcc1ba1985ad58aedbb6be2b86048c27395000f84694c2be4ec20253b8642161bc3f444f53679c1f3d479466f50c616d737e60d7ca6311ff0d9c434197898a94d1d678a2506eeaa365056fe565df8bc8659f28b086048c27395000f846942f7be8361c80a4c1e7e9aaf001d0877f1cfde218945f93992ac37f3e61db2ef8a587a436a161fd210b94ecbc4fb1a97861344dad0867ca3cba2b860411f086048c27395000f84694ce2fd7544e0b2cc94692d4a704debef7bcb613289444abc67b4b2fba283c582387f54c9cba7c34bafa948acc2ab395ded08bb75ce85bf0f95ad2abc51ad586048c27395000f84694b8f7166496996a7da21cf1f1b04d9b3e26a3d077946770572763289aac606e4f327c2f6cc1aa3b3e3b94882d745ed97d4422ca8da1c22ec49d880c4c097286048c27395000f846942d4c407bbe49438ed859fe965b140dcf1aab71a9943ad0939e120f33518fbba04631afe7a3ed6327b194b2bbb170ca4e499a2b0f3cc85ebfa6e8c4dfcbea86048c27395000f846946bbad7cf34b5fa511d8e963dbba288b1960e75d694853b0f6c324d1f4e76c8266942337ac1b0af1a229442498946a51ca5924552ead6fc2af08b94fcba648601d1a94a2000f846944430b3230294d12c6ab2aac5c2cd68e80b16b581947b107f4976a252a6939b771202c28e64e03f52d694795811a7f214084116949fc4f53cedbf189eeab28601d1a94a2000f84694ea0a6e3c511bbd10f4519ece37dc24887e11b55d946811ca77acfb221a49393c193f3a22db829fcc8e9464feb7c04830dd9ace164fc5c52b3f5a29e5018a8601d1a94a2000f846947ae2f5b9e386cd1b50a4550696d957cb4900f03a94e83bcc5077e6b873995c24bac871b5ad856047e19464e48d4057a90b233e026c1041e6012ada897fe88601d1a94a2000f8469482012708dafc9e1b880fd083b32182b869be8e09948e5adc73a2d233a1b496ed3115464dd6c7b887509428b383d324bc9a37f4e276190796ba5a8947f5ed8601d1a94a2000f8469422b81f8e175ffde54d797fe11eb03f9e3bf75f1d94a1c3ef7ca38d8ba80cce3bfc53ebd2903ed21658942767f7447f7b9b70313d4147b795414aecea54718601d1a94a2000f8469468bf0b8b6fb4e317a0f9d6f03eaf8ce6675bc60d94675cfe570b7902623f47e7f59c9664b5f5065dcf94d84f0d2e50bcf00f2fc476e1c57f5ca2d57f625b8601d1a94a2000f846948c4d90829ce8f72d0163c1d5cf348a862d5506309485c42a7b34309bee2ed6a235f86d16f059deec5894cc2cedc53f0fa6d376336efb67e43d167169f3b78601d1a94a2000f8469435e7a025f4da968de7e4d7e4004197917f4070f194b1182abaeeb3b4d8eba7e6a4162eac7ace23d57394c4fd0d870da52e73de2dd8ded19fe3d26f43a1138601d1a94a2000f84694d6caa02bbebaebb5d7e581e4b66559e635f805ff94c07335cf083c1c46a487f0325769d88e163b653694efaff03b42e41f953a925fc43720e45fb61a19938601d1a94a2000746865206d6573736167652073656e646572206d7573742062652063726f737320636861696e20636f6e7472616374746865206d6573736167652073656e646572206d7573742062652074686520626c6f636b2070726f6475636572746865206d61696e7461696e536c6173685363616c65206d7573742062652067726561746572207468616e20300175b7a638427703f0dbe7bb9bbf987a2551717b34e79f33b5b1008d1fa01dba746865206d6573736167652073656e646572206d75737420626520736c61736820636f6e7472616374a264697066735822122091dd90b1dd27923c787e9f55bd1bebe31e8416465e6ce432022dfeb252e6701e64736f6c63430006040033", + }, + { + ContractAddr: SlashContract, + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/db8bb560ac5a1265c685b719c7e976dced162310", + Code: "608060405234801561001057600080fd5b506004361061023d5760003560e01c80638256ace61161013b578063c80d4b8f116100b8578063e1c7392a1161007c578063e1c7392a1461071d578063f9a2bbc714610725578063fc3e59081461072d578063fc4333cd14610735578063fd6a68791461073d5761023d565b8063c80d4b8f14610667578063c81b16621461066f578063c8509d8114610677578063c96be4cb146106ef578063dc927faf146107155761023d565b8063a1a11bf5116100ff578063a1a11bf514610575578063a78abc161461057d578063ab51bb9614610599578063ac0af629146105a1578063ac431751146105a95761023d565b80638256ace6146104dd578063831d65d1146104e557806396713da91461055d5780639bc8e4f2146105655780639dc092621461056d5761023d565b80634bf6c882116101c95780636e47b4821161018d5780636e47b482146104b557806370fd5bad146104bd57806375d47a0a146104c55780637912a65d146104cd5780637942fd05146104d55761023d565b80634bf6c8821461046d57806351e8067214610475578063567a372d1461047d5780635bfb49901461048557806362b72cf5146104ad5761023d565b806337c8dab91161021057806337c8dab9146103cf578063389f4f711461040e5780633dffc3871461042857806343756e5c14610446578063493279b11461044e5761023d565b80630bee7a67146102425780631182b8751461026357806323bac5a21461035057806335aa2e4414610396575b600080fd5b61024a610745565b6040805163ffffffff9092168252519081900360200190f35b6102db6004803603604081101561027957600080fd5b60ff8235169190810190604081016020820135600160201b81111561029d57600080fd5b8201836020820111156102af57600080fd5b803590602001918460018302840111600160201b831117156102d057600080fd5b50909250905061074a565b6040805160208082528351818301528351919283929083019185019080838360005b838110156103155781810151838201526020016102fd565b50505050905090810190601f1680156103425780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b6103766004803603602081101561036657600080fd5b50356001600160a01b031661081e565b604080519384526020840192909252151582820152519081900360600190f35b6103b3600480360360208110156103ac57600080fd5b5035610841565b604080516001600160a01b039092168252519081900360200190f35b6103f5600480360360208110156103e557600080fd5b50356001600160a01b0316610868565b6040805192835260208301919091528051918290030190f35b6104166108bf565b60408051918252519081900360200190f35b6104306108c5565b6040805160ff9092168252519081900360200190f35b6103b36108ca565b6104566108d0565b6040805161ffff9092168252519081900360200190f35b6104306108d5565b6103b36108da565b6104166108e0565b6104ab6004803603602081101561049b57600080fd5b50356001600160a01b03166108e6565b005b610416610a47565b6103b3610a4d565b610430610a53565b6103b3610a58565b610416610a5e565b610430610a63565b6103f5610a68565b6104ab600480360360408110156104fb57600080fd5b60ff8235169190810190604081016020820135600160201b81111561051f57600080fd5b82018360208201111561053157600080fd5b803590602001918460018302840111600160201b8311171561055257600080fd5b509092509050610a72565b610430610bcc565b610416610bd1565b6103b3610bdc565b6103b3610be2565b610585610be8565b604080519115158252519081900360200190f35b61024a610bf1565b610416610bf6565b6104ab600480360360408110156105bf57600080fd5b810190602081018135600160201b8111156105d957600080fd5b8201836020820111156105eb57600080fd5b803590602001918460018302840111600160201b8311171561060c57600080fd5b919390929091602081019035600160201b81111561062957600080fd5b82018360208201111561063b57600080fd5b803590602001918460018302840111600160201b8311171561065c57600080fd5b509092509050610bfb565b610416610fe9565b6103b3610fee565b6104ab6004803603604081101561068d57600080fd5b60ff8235169190810190604081016020820135600160201b8111156106b157600080fd5b8201836020820111156106c357600080fd5b803590602001918460018302840111600160201b831117156106e457600080fd5b509092509050610ff4565b6104ab6004803603602081101561070557600080fd5b50356001600160a01b03166110a7565b6103b361154a565b6104ab611550565b6103b36115c1565b6104306115c7565b6104ab6115cc565b6103b3611a57565b606481565b6060336120001461078c5760405162461bcd60e51b815260040180806020018281038252602f8152602001806124ae602f913960400191505060405180910390fd5b60005460ff166107d1576040805162461bcd60e51b8152602060048201526019602482015260008051602061250a833981519152604482015290519081900360640190fd5b6040805162461bcd60e51b815260206004820152601e60248201527f7265636569766520756e65787065637465642073796e207061636b6167650000604482015290519081900360640190fd5b600260208190526000918252604090912080546001820154919092015460ff1683565b6001818154811061084e57fe5b6000918252602090912001546001600160a01b0316905081565b600080610873612372565b5050506001600160a01b0316600090815260026020818152604092839020835160608101855281548082526001830154938201849052919093015460ff16151592909301919091529091565b60055481565b600181565b61100181565b603881565b600881565b61200081565b60045481565b33611000146109265760405162461bcd60e51b81526004018080602001828103825260308152602001806124096030913960400191505060405180910390fd5b60005460ff1661096b576040805162461bcd60e51b8152602060048201526019602482015260008051602061250a833981519152604482015290519081900360640190fd5b61200063f7a251d7600b61097e84611a5d565b60006040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b838110156109de5781810151838201526020016109c6565b50505050905090810190601f168015610a0b5780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b158015610a2c57600080fd5b505af1158015610a40573d6000803e3d6000fd5b5050505050565b60035481565b61100581565b600281565b61100881565b603281565b600b81565b6004546005549091565b3361200014610ab25760405162461bcd60e51b815260040180806020018281038252602f8152602001806124ae602f913960400191505060405180910390fd5b60005460ff16610af7576040805162461bcd60e51b8152602060048201526019602482015260008051602061250a833981519152604482015290519081900360640190fd5b610aff612395565b6000610b4084848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250611b2f92505050565b915091508015610b8a5781516040805163ffffffff9092168252517f7f0956d47419b9525356e7111652b653b530ec6f5096dccc04589bc38e6299679181900360200190a1610a40565b81516040805163ffffffff9092168252517f7d45f62d17443dd4547bca8a8112c60e2385669318dc300ec61a5d2492f262e79181900360200190a15050505050565b600981565b662386f26fc1000081565b61100781565b61100681565b60005460ff1681565b600081565b600481565b60005460ff16610c40576040805162461bcd60e51b8152602060048201526019602482015260008051602061250a833981519152604482015290519081900360640190fd5b3361100714610c805760405162461bcd60e51b815260040180806020018281038252602e815260200180612439602e913960400191505060405180910390fd5b610ceb84848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250506040805180820190915260148152731b5a5cd9195b59585b9bdc951a1c995cda1bdb1960621b60208201529150611baf9050565b15610dc45760208114610d2f5760405162461bcd60e51b81526004018080602001828103825260278152602001806123e26027913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091610d6d91858580838501838280828437600092019190915250611c9792505050565b905060018110158015610d81575060055481105b610dbc5760405162461bcd60e51b81526004018080602001828103825260258152602001806124896025913960400191505060405180910390fd5b600455610f57565b610e2a84848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152600f81526e19995b1bdb9e551a1c995cda1bdb19608a1b60208201529150611baf9050565b15610f1a5760208114610e6e5760405162461bcd60e51b81526004018080602001828103825260228152602001806124676022913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091610eac91858580838501838280828437600092019190915250611c9792505050565b90506103e88111158015610ec1575060045481115b610f12576040805162461bcd60e51b815260206004820181905260248201527f7468652066656c6f6e795468726573686f6c64206f7574206f662072616e6765604482015290519081900360640190fd5b600555610f57565b6040805162461bcd60e51b815260206004820152600d60248201526c756e6b6e6f776e20706172616d60981b604482015290519081900360640190fd5b7f6cdb0ac70ab7f2e2d035cca5be60d89906f2dede7648ddbd7402189c1eeed17a848484846040518080602001806020018381038352878782818152602001925080828437600083820152601f01601f191690910184810383528581526020019050858580828437600083820152604051601f909101601f19169092018290039850909650505050505050a150505050565b609681565b61100281565b33612000146110345760405162461bcd60e51b815260040180806020018281038252602f8152602001806124ae602f913960400191505060405180910390fd5b60005460ff16611079576040805162461bcd60e51b8152602060048201526019602482015260008051602061250a833981519152604482015290519081900360640190fd5b6040517f07db600eebe2ac176be8dcebad61858c245a4961bb32ca2aa3d159b09aa0810e90600090a1505050565b3341146110e55760405162461bcd60e51b815260040180806020018281038252602d8152602001806124dd602d913960400191505060405180910390fd5b60005460ff1661112a576040805162461bcd60e51b8152602060048201526019602482015260008051602061250a833981519152604482015290519081900360640190fd5b6003544311611180576040805162461bcd60e51b815260206004820181905260248201527f63616e206e6f7420736c61736820747769636520696e206f6e6520626c6f636b604482015290519081900360640190fd5b3a156111ca576040805162461bcd60e51b81526020600482015260146024820152736761737072696365206973206e6f74207a65726f60601b604482015290519081900360640190fd5b6040805163155853f360e21b81526001600160a01b03831660048201529051611000916355614fcc916024808301926020929190829003018186803b15801561121257600080fd5b505afa158015611226573d6000803e3d6000fd5b505050506040513d602081101561123c57600080fd5b505161124757611543565b61124f612372565b506001600160a01b0381166000908152600260208181526040928390208351606081018552815481526001820154928101929092529091015460ff1615801592820192909252906112aa576020810180516001019052611303565b60016040820181905260208201819052805480820182556000919091527fb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf60180546001600160a01b0319166001600160a01b0384161790555b43815260055460208201518161131557fe5b0661146757600060208201819052604080516335409f7f60e01b81526001600160a01b03851660048201529051611000926335409f7f926024808201939182900301818387803b15801561136857600080fd5b505af115801561137c573d6000803e3d6000fd5b505050506120006001600160a01b031663f7a251d7600b61139c85611a5d565b60006040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b838110156113fc5781810151838201526020016113e4565b50505050905090810190601f1680156114295780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b15801561144a57600080fd5b505af115801561145e573d6000803e3d6000fd5b505050506114dd565b60045481602001518161147657fe5b066114dd57604080516375abf10160e11b81526001600160a01b038416600482015290516110009163eb57e20291602480830192600092919082900301818387803b1580156114c457600080fd5b505af11580156114d8573d6000803e3d6000fd5b505050505b6001600160a01b0382166000818152600260208181526040808420865181559186015160018301558581015191909201805460ff1916911515919091179055517fddb6012116e51abf5436d956a4f0ebd927e92c576ff96d7918290c8782291e3e9190a2505b5043600355565b61100381565b60005460ff16156115a8576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e747261637420616c726561647920696e697400000000000000604482015290519081900360640190fd5b603260045560966005556000805460ff19166001179055565b61100081565b600381565b336110001461160c5760405162461bcd60e51b81526004018080602001828103825260308152602001806124096030913960400191505060405180910390fd5b60005460ff16611651576040805162461bcd60e51b8152602060048201526019602482015260008051602061250a833981519152604482015290519081900360640190fd5b60015461165d57611a55565b600154600090600019015b808211611a29576000805b8284101561178c57611683612372565b600260006001878154811061169457fe5b60009182526020808320909101546001600160a01b0316835282810193909352604091820190208151606081018352815481526001820154938101939093526002015460ff1615159082015260055490915060049004816020015111156117765760046005548161170157fe5b0481602001510381602001818152505080600260006001888154811061172357fe5b6000918252602080832091909101546001600160a01b0316835282810193909352604091820190208351815591830151600183015591909101516002909101805460ff1916911515919091179055611780565b600192505061178c565b50600190930192611673565b8284116119235761179b612372565b60026000600186815481106117ac57fe5b60009182526020808320909101546001600160a01b0316835282810193909352604091820190208151606081018352815481526001820154938101939093526002015460ff1615159082015260055490915060049004816020015111156118945760046005548161181957fe5b0481602001510381602001818152505080600260006001878154811061183b57fe5b6000918252602080832091909101546001600160a01b03168352828101939093526040918201902083518155918301516001808401919091559201516002909101805460ff191691151591909117905591506119239050565b60026000600186815481106118a557fe5b60009182526020808320909101546001600160a01b031683528201929092526040018120818155600181810192909255600201805460ff191690558054806118e957fe5b600082815260209020810160001990810180546001600160a01b0319169055019055836119165750611923565b506000199092019161178c565b81801561192d5750805b15611a0c57600260006001868154811061194357fe5b60009182526020808320909101546001600160a01b031683528201929092526040018120818155600181810192909255600201805460ff1916905580548490811061198a57fe5b600091825260209091200154600180546001600160a01b0390921691869081106119b057fe5b9060005260206000200160006101000a8154816001600160a01b0302191690836001600160a01b0316021790555060018054806119e957fe5b600082815260209020810160001990810180546001600160a01b03191690550190555b82611a18575050611a29565b505060019091019060001901611668565b6040517fcfdb3b6ccaeccbdc68be3c59c840e3b3c90f0a7c491f5fff1cf56cfda200dd9c90600090a150505b565b61100481565b60408051600480825260a08201909252606091829190816020015b6060815260200190600190039081611a78579050509050611aa1836001600160a01b0316611c9c565b81600081518110611aae57fe5b6020026020010181905250611ac243611cbf565b81600181518110611acf57fe5b6020908102919091010152611ae46038611cbf565b81600281518110611af157fe5b6020026020010181905250611b0542611cbf565b81600381518110611b1257fe5b6020026020010181905250611b2681611cd2565b9150505b919050565b611b37612395565b6000611b41612395565b611b496123a7565b611b5a611b5586611d5c565b611d81565b90506000805b611b6983611dcb565b15611ba25780611b9557611b84611b7f84611dec565b611e3a565b63ffffffff16845260019150611b9a565b611ba2565b600101611b60565b5091935090915050915091565b6000816040516020018082805190602001908083835b60208310611be45780518252601f199092019160209182019101611bc5565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120836040516020018082805190602001908083835b60208310611c525780518252601f199092019160209182019101611c33565b6001836020036101000a038019825116818451168082178552505050505050905001915050604051602081830303815290604052805190602001201490505b92915050565b015190565b60408051600560a21b8318601482015260348101909152606090611b2681611ef1565b6060611c91611ccd83611f47565b611ef1565b6060815160001415611cf35750604080516000815260208101909152611b2a565b606082600081518110611d0257fe5b602002602001015190506000600190505b8351811015611d4357611d3982858381518110611d2c57fe5b602002602001015161202d565b9150600101611d13565b50611b26611d56825160c060ff166120aa565b8261202d565b611d646123c7565b506040805180820190915281518152602082810190820152919050565b611d896123a7565b611d92826121a2565b611d9b57600080fd5b6000611daa83602001516121dc565b60208085015160408051808201909152868152920190820152915050919050565b6000611dd56123c7565b505080518051602091820151919092015191011190565b611df46123c7565b611dfd82611dcb565b611e0657600080fd5b60208201516000611e168261223f565b80830160209586015260408051808201909152908152938401919091525090919050565b805160009015801590611e4f57508151602110155b611e5857600080fd5b6000611e6783602001516121dc565b90508083600001511015611ec2576040805162461bcd60e51b815260206004820152601a60248201527f6c656e677468206973206c657373207468616e206f6666736574000000000000604482015290519081900360640190fd5b825160208085015183018051928490039291831015611ee857826020036101000a820491505b50949350505050565b606081516001148015611f235750607f60f81b82600081518110611f1157fe5b01602001516001600160f81b03191611155b15611f2f575080611b2a565b611c91611f418351608060ff166120aa565b8361202d565b604080516020808252818301909252606091829190602082018180368337505050602081018490529050600067ffffffffffffffff198416611f8b57506018611faf565b6fffffffffffffffffffffffffffffffff198416611fab57506010611faf565b5060005b6020811015611fe557818181518110611fc457fe5b01602001516001600160f81b03191615611fdd57611fe5565b600101611faf565b60008160200390506060816040519080825280601f01601f19166020018201604052801561201a576020820181803683370190505b5080830196909652508452509192915050565b6060806040519050835180825260208201818101602087015b8183101561205e578051835260209283019201612046565b50855184518101855292509050808201602086015b8183101561208b578051835260209283019201612073565b508651929092011591909101601f01601f191660405250905092915050565b60606801000000000000000083106120fa576040805162461bcd60e51b815260206004820152600e60248201526d696e70757420746f6f206c6f6e6760901b604482015290519081900360640190fd5b604080516001808252818301909252606091602082018180368337019050509050603784116121545782840160f81b8160008151811061213657fe5b60200101906001600160f81b031916908160001a9053509050611c91565b606061215f85611f47565b90508381510160370160f81b8260008151811061217857fe5b60200101906001600160f81b031916908160001a905350612199828261202d565b95945050505050565b80516000906121b357506000611b2a565b6020820151805160001a9060c08210156121d257600092505050611b2a565b5060019392505050565b8051600090811a60808110156121f6576000915050611b2a565b60b8811080612211575060c08110801590612211575060f881105b15612220576001915050611b2a565b60c08110156122345760b519019050611b2a565b60f519019050611b2a565b80516000908190811a608081101561225a576001915061236b565b60b881101561226f57607e198101915061236b565b60c08110156122e957600060b78203600186019550806020036101000a8651049150600181018201935050808310156122e3576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b5061236b565b60f88110156122fe5760be198101915061236b565b600060f78203600186019550806020036101000a865104915060018101820193505080831015612369576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b505b5092915050565b604051806060016040528060008152602001600081526020016000151581525090565b60408051602081019091526000815290565b60405180604001604052806123ba6123c7565b8152602001600081525090565b60405180604001604052806000815260200160008152509056fe6c656e677468206f66206d697364656d65616e6f725468726573686f6c64206d69736d61746368746865206d6573736167652073656e646572206d7573742062652076616c696461746f7253657420636f6e7472616374746865206d6573736167652073656e646572206d75737420626520676f7665726e616e636520636f6e74726163746c656e677468206f662066656c6f6e795468726573686f6c64206d69736d61746368746865206d697364656d65616e6f725468726573686f6c64206f7574206f662072616e6765746865206d6573736167652073656e646572206d7573742062652063726f737320636861696e20636f6e7472616374746865206d6573736167652073656e646572206d7573742062652074686520626c6f636b2070726f647563657274686520636f6e7472616374206e6f7420696e69742079657400000000000000a264697066735822122018b8714d9e81a204e6cb56a28d2122ac3178937ed053b34fef2460106855598d64736f6c63430006040033", + }, + }, + } + + eulerUpgrade[chapelNet] = &Upgrade{ + UpgradeName: "euler", + Configs: []*UpgradeConfig{ + { + ContractAddr: ValidatorContract, + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/db8bb560ac5a1265c685b719c7e976dced162310", + Code: "6080604052600436106104055760003560e01c80638d19a41011610213578063c81b166211610123578063eb57e202116100ab578063f9a2bbc71161007a578063f9a2bbc714610d55578063fc3e590814610d6a578063fccc281314610d7f578063fd4ad81f14610d94578063fd6a687914610dd757610405565b8063eb57e20214610cd2578063eda5868c14610d05578063f340fa0114610d1a578063f92eb86b14610d4057610405565b8063daacdb66116100f2578063daacdb6614610c69578063dc927faf14610c7e578063e086c7b114610c93578063e1c7392a14610ca8578063e40716a114610cbd57610405565b8063c81b166214610c2a578063c8509d8114610939578063d68fb56a14610c3f578063d86222d514610c5457610405565b8063a78abc16116101a6578063ad3c9da611610175578063ad3c9da614610bb8578063b7ab4db514610beb578063b8cf4ef114610c00578063bf9f49951461065f578063c6d3394514610c1557610405565b8063a78abc1614610aae578063aaf5eb6814610ac3578063ab51bb9614610ad8578063ac43175114610aed57610405565b80639fe0f816116101e25780639fe0f81614610a5a578063a0dc275814610a6f578063a1a11bf514610a84578063a5422d5c14610a9957610405565b80638d19a410146109e85780639369d7de14610a1b57806396713da914610a305780639dc0926214610a4557610405565b80635192c82c1161031957806375d47a0a116102a157806381650b621161027057806381650b6214610924578063831d65d114610939578063853230aa146108e557806386249882146109be5780638b5ad0c9146109d357610405565b806375d47a0a146108d057806378dfed4a146108e55780637942fd05146108fa5780637a84ca2a1461090f57610405565b80635667515a116102e85780635667515a146108065780635d77156c1461081b5780636969a25c146108305780636e47b482146108a657806370fd5bad146108bb57610405565b80635192c82c1461077657806351e806721461078b57806355614fcc146107a0578063565c56b3146107d357610405565b80633365af3a1161039c57806343756e5c1161036b57806343756e5c1461068a57806345cf9daf146106bb578063493279b1146106d05780634bf6c882146106fc5780634df6e0c31461071157610405565b80633365af3a146105ed57806335409f7f146106175780633de0f0d81461064a5780633dffc3871461065f57610405565b8063152ad3b8116103d8578063152ad3b8146105705780631ff1806914610599578063219f22d5146105ae578063321d398a146105c357610405565b806304c4fec61461040a57806307a56847146104215780630bee7a67146104485780631182b87514610476575b600080fd5b34801561041657600080fd5b5061041f610dec565b005b34801561042d57600080fd5b50610436610e7f565b60408051918252519081900360200190f35b34801561045457600080fd5b5061045d610e85565b6040805163ffffffff9092168252519081900360200190f35b34801561048257600080fd5b506104fb6004803603604081101561049957600080fd5b60ff8235169190810190604081016020820135600160201b8111156104bd57600080fd5b8201836020820111156104cf57600080fd5b803590602001918460018302840111600160201b831117156104f057600080fd5b509092509050610e8a565b6040805160208082528351818301528351919283929083019185019080838360005b8381101561053557818101518382015260200161051d565b50505050905090810190601f1680156105625780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561057c57600080fd5b5061058561111c565b604080519115158252519081900360200190f35b3480156105a557600080fd5b50610436611125565b3480156105ba57600080fd5b5061045d61112b565b3480156105cf57600080fd5b50610585600480360360208110156105e657600080fd5b5035611130565b3480156105f957600080fd5b506105856004803603602081101561061057600080fd5b50356111ff565b34801561062357600080fd5b5061041f6004803603602081101561063a57600080fd5b50356001600160a01b03166112b0565b34801561065657600080fd5b5061043661140f565b34801561066b57600080fd5b50610674611415565b6040805160ff9092168252519081900360200190f35b34801561069657600080fd5b5061069f61141a565b604080516001600160a01b039092168252519081900360200190f35b3480156106c757600080fd5b50610436611420565b3480156106dc57600080fd5b506106e5611426565b6040805161ffff9092168252519081900360200190f35b34801561070857600080fd5b5061067461142b565b34801561071d57600080fd5b50610726611430565b60408051602080825283518183015283519192839290830191858101910280838360005b8381101561076257818101518382015260200161074a565b505050509050019250505060405180910390f35b34801561078257600080fd5b5061043661152b565b34801561079757600080fd5b5061069f611531565b3480156107ac57600080fd5b50610585600480360360208110156107c357600080fd5b50356001600160a01b0316611537565b3480156107df57600080fd5b50610436600480360360208110156107f657600080fd5b50356001600160a01b031661156c565b34801561081257600080fd5b506106746115bd565b34801561082757600080fd5b5061045d6115c2565b34801561083c57600080fd5b5061085a6004803603602081101561085357600080fd5b50356115c7565b604080516001600160a01b039788168152958716602087015293909516848401526001600160401b0390911660608401521515608083015260a082019290925290519081900360c00190f35b3480156108b257600080fd5b5061069f61162b565b3480156108c757600080fd5b50610674611631565b3480156108dc57600080fd5b5061069f611636565b3480156108f157600080fd5b5061043661163c565b34801561090657600080fd5b50610674611642565b34801561091b57600080fd5b50610436611647565b34801561093057600080fd5b5061045d61164d565b34801561094557600080fd5b5061041f6004803603604081101561095c57600080fd5b60ff8235169190810190604081016020820135600160201b81111561098057600080fd5b82018360208201111561099257600080fd5b803590602001918460018302840111600160201b831117156109b357600080fd5b509092509050611652565b3480156109ca57600080fd5b50610436611705565b3480156109df57600080fd5b5061043661170b565b3480156109f457600080fd5b5061043660048036036020811015610a0b57600080fd5b50356001600160a01b0316611711565b348015610a2757600080fd5b5061041f611786565b348015610a3c57600080fd5b506106746118a0565b348015610a5157600080fd5b5061069f6118a5565b348015610a6657600080fd5b506104366118ab565b348015610a7b57600080fd5b506104366118b0565b348015610a9057600080fd5b5061069f6118b5565b348015610aa557600080fd5b506104fb6118bb565b348015610aba57600080fd5b506105856118da565b348015610acf57600080fd5b506104366118e3565b348015610ae457600080fd5b5061045d6115bd565b348015610af957600080fd5b5061041f60048036036040811015610b1057600080fd5b810190602081018135600160201b811115610b2a57600080fd5b820183602082011115610b3c57600080fd5b803590602001918460018302840111600160201b83111715610b5d57600080fd5b919390929091602081019035600160201b811115610b7a57600080fd5b820183602082011115610b8c57600080fd5b803590602001918460018302840111600160201b83111715610bad57600080fd5b5090925090506118ec565b348015610bc457600080fd5b5061043660048036036020811015610bdb57600080fd5b50356001600160a01b0316612329565b348015610bf757600080fd5b5061072661233b565b348015610c0c57600080fd5b5061043661241e565b348015610c2157600080fd5b50610436611631565b348015610c3657600080fd5b5061069f612423565b348015610c4b57600080fd5b50610436612429565b348015610c6057600080fd5b50610436612468565b348015610c7557600080fd5b50610436612474565b348015610c8a57600080fd5b5061069f61247a565b348015610c9f57600080fd5b50610436612480565b348015610cb457600080fd5b5061041f612485565b348015610cc957600080fd5b50610436612688565b348015610cde57600080fd5b5061041f60048036036020811015610cf557600080fd5b50356001600160a01b031661268e565b348015610d1157600080fd5b5061045d61279c565b61041f60048036036020811015610d3057600080fd5b50356001600160a01b03166127a1565b348015610d4c57600080fd5b50610436612aa6565b348015610d6157600080fd5b5061069f612aac565b348015610d7657600080fd5b506106746118ab565b348015610d8b57600080fd5b5061069f612ab2565b348015610da057600080fd5b50610dbe60048036036020811015610db757600080fd5b5035612ab8565b6040805192835290151560208301528051918290030190f35b348015610de357600080fd5b5061069f612ae6565b6000610df733611711565b9050600b8181548110610e0657fe5b600091825260209091206001601690920201015460ff16610e63576040805162461bcd60e51b81526020600482015260126024820152716e6f7420696e206d61696e74656e616e636560701b604482015290519081900360640190fd5b6000610e6d612429565b9050610e7a338383612aec565b505050565b60095481565b606481565b60005460609060ff16610ee0576040805162461bcd60e51b81526020600482015260196024820152781d1a194818dbdb9d1c9858dd081b9bdd081a5b9a5d081e595d603a1b604482015290519081900360640190fd5b3361200014610f205760405162461bcd60e51b815260040180806020018281038252602f815260200180616047602f913960400191505060405180910390fd5b600b54610fc557610f2f6159e0565b60015460005b81811015610fc157600b80546001810182556000919091528351600080516020615fc0833981519152601690920291820190815560208501516000805160206160d08339815191528301805460ff191691151591909117905560408501518592610fb391600080516020616006833981519152909101906014615a04565b505050806001019050610f35565b5050505b610fcd615a3e565b600061100e85858080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250612ce092505050565b915091508061102a576110216064612e39565b92505050611115565b815160009060ff1661104a576110438360200151612e9a565b90506110e1565b825160ff16600114156110dd578260200151516001146110b7577f70e72399380dcfb0338abc03dc8d47f9f470ada8e769c9a78d644ea97385ecb2604051808060200182810382526025815260200180615f066025913960400191505060405180910390a15060676110d8565b61104383602001516000815181106110cb57fe5b6020026020010151613caa565b6110e1565b5060655b63ffffffff811661110657505060408051600081526020810190915291506111159050565b61110f81612e39565b93505050505b9392505050565b60075460ff1681565b60035481565b606881565b6001546000908210611144575060006111fa565b60006001600160a01b03166001838154811061115c57fe5b60009182526020909120600490910201546001600160a01b0316148061118c5750600854158061118c5750600a54155b8061119b575060085460095410155b806111ac57506111aa826111ff565b155b806111d557506000600b83815481106111c157fe5b906000526020600020906016020160000154115b806111e9575060016111e561233b565b5111155b156111f6575060006111fa565b5060015b919050565b6001546000908210611213575060006111fa565b600b548210611250576001828154811061122957fe5b9060005260206000209060040201600201601c9054906101000a900460ff161590506111fa565b6001828154811061125d57fe5b9060005260206000209060040201600201601c9054906101000a900460ff161580156112aa5750600b828154811061129157fe5b600091825260209091206001601690920201015460ff16155b92915050565b33611001146112f05760405162461bcd60e51b81526004018080602001828103825260298152602001806160f06029913960400191505060405180910390fd5b600b54611395576112ff6159e0565b60015460005b8181101561139157600b80546001810182556000919091528351600080516020615fc0833981519152601690920291820190815560208501516000805160206160d08339815191528301805460ff19169115159190911790556040850151859261138391600080516020616006833981519152909101906014615a04565b505050806001019050611305565b5050505b6001600160a01b038116600090815260046020526040902054806113b9575061140c565b6001810390506000600b82815481106113ce57fe5b600091825260209091206001601690920201015460ff1690506113f18383613e21565b80156113fa5750805b15610e7a576009805460001901905550505b50565b61271081565b600181565b61100181565b60085481565b606181565b600881565b600e54600c546060919080611443575060155b606061144d61233b565b905081815111611461579250611528915050565b82828251031015611473578181510392505b82156114a75760c8430461148e82828686036000888861419d565b6114a582828686038787038889898951030161419d565b505b6060826040519080825280602002602001820160405280156114d3578160200160208202803683370190505b50905060005b83811015611521578281815181106114ed57fe5b602002602001015182828151811061150157fe5b6001600160a01b03909216602092830291909101909101526001016114d9565b5093505050505b90565b60065481565b61200081565b6001600160a01b0381166000908152600460205260408120548061155f5760009150506111fa565b60001901611115816111ff565b6001600160a01b038116600090815260046020526040812054806115945760009150506111fa565b6001808203815481106115a357fe5b906000526020600020906004020160030154915050919050565b600081565b606781565b600181815481106115d457fe5b600091825260209091206004909102018054600182015460028301546003909301546001600160a01b0392831694509082169291821691600160a01b81046001600160401b031691600160e01b90910460ff169086565b61100581565b600281565b61100881565b6103e881565b600b81565b600c5481565b606681565b33612000146116925760405162461bcd60e51b815260040180806020018281038252602f815260200180616047602f913960400191505060405180910390fd5b7f41ce201247b6ceb957dcdb217d0b8acb50b9ea0e12af9af4f5e7f38902101605838383604051808460ff1660ff168152602001806020018281038252848482818152602001925080828437600083820152604051601f909101601f1916909201829003965090945050505050a1505050565b60025481565b600a5481565b6001600160a01b0381166000908152600460205260408120548061177c576040805162461bcd60e51b815260206004820152601760248201527f6f6e6c792063757272656e742076616c696461746f7273000000000000000000604482015290519081900360640190fd5b6000190192915050565b600b5461182b576117956159e0565b60015460005b8181101561182757600b80546001810182556000919091528351600080516020615fc0833981519152601690920291820190815560208501516000805160206160d08339815191528301805460ff19169115159190911790556040850151859261181991600080516020616006833981519152909101906014615a04565b50505080600101905061179b565b5050505b6008546118385760036008555b600a54611845576002600a555b600061185033611711565b905061185b81611130565b6118965760405162461bcd60e51b8152600401808060200182810382526023815260200180615ee36023913960400191505060405180910390fd5b61140c338261428c565b600981565b61100781565b600381565b60c881565b61100681565b604051806101e001604052806101ab8152602001615b396101ab913981565b60005460ff1681565b6402540be40081565b60005460ff1661193f576040805162461bcd60e51b81526020600482015260196024820152781d1a194818dbdb9d1c9858dd081b9bdd081a5b9a5d081e595d603a1b604482015290519081900360640190fd5b336110071461197f5760405162461bcd60e51b815260040180806020018281038252602e815260200180615f4c602e913960400191505060405180910390fd5b6119e984848080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050604080518082019091526013815272065787069726554696d655365636f6e6447617606c1b602082015291506143249050565b15611ac45760208114611a2d5760405162461bcd60e51b8152600401808060200182810382526026815260200180615fe06026913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091611a6b9185858083850183828082843760009201919091525061440b92505050565b905060648110158015611a815750620186a08111155b611abc5760405162461bcd60e51b8152600401808060200182810382526027815260200180615e736027913960400191505060405180910390fd5b600255612297565b611b2484848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250506040805180820190915260098152686275726e526174696f60b81b602082015291506143249050565b15611c145760208114611b7e576040805162461bcd60e51b815260206004820152601c60248201527f6c656e677468206f66206275726e526174696f206d69736d6174636800000000604482015290519081900360640190fd5b604080516020601f8401819004810282018101909252828152600091611bbc9185858083850183828082843760009201919091525061440b92505050565b9050612710811115611bff5760405162461bcd60e51b815260040180806020018281038252602b815260200180615e1d602b913960400191505060405180910390fd5b6006556007805460ff19166001179055612297565b611c7e84848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250506040805180820190915260138152726d61784e756d4f664d61696e7461696e696e6760681b602082015291506143249050565b15611d565760208114611cc25760405162461bcd60e51b8152600401808060200182810382526026815260200180615ce46026913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091611d009185858083850183828082843760009201919091525061440b92505050565b600c5490915080611d0f575060155b808210611d4d5760405162461bcd60e51b8152600401808060200182810382526038815260200180615d5b6038913960400191505060405180910390fd5b50600855612297565b611dbf84848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250506040805180820190915260128152716d61696e7461696e536c6173685363616c6560701b602082015291506143249050565b15611e8a5760208114611e035760405162461bcd60e51b8152600401808060200182810382526025815260200180615d366025913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091611e419185858083850183828082843760009201919091525061440b92505050565b905060008111611e825760405162461bcd60e51b815260040180806020018281038252602d8152602001806160a3602d913960400191505060405180910390fd5b600a55612297565b611efe84848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152601981527f6d61784e756d4f66576f726b696e6743616e6469646174657300000000000000602082015291506143249050565b15611fcb5760208114611f425760405162461bcd60e51b815260040180806020018281038252602c815260200180615d0a602c913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091611f809185858083850183828082843760009201919091525061440b92505050565b9050600d54811115611fc35760405162461bcd60e51b8152600401808060200182810382526049815260200180615e9a6049913960600191505060405180910390fd5b600e55612297565b61203484848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250506040805180820190915260128152716d61784e756d4f6643616e6469646174657360701b602082015291506143249050565b156120d557602081146120785760405162461bcd60e51b8152600401808060200182810382526025815260200180615f7a6025913960400191505060405180910390fd5b604080516020601f84018190048102820181019092528281526000916120b69185858083850183828082843760009201919091525061440b92505050565b600d819055600e549091508110156120cf57600d54600e555b50612297565b61213984848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152600d81526c6e756d4f66436162696e65747360981b602082015291506143249050565b1561225a5760208114612193576040805162461bcd60e51b815260206004820181905260248201527f6c656e677468206f66206e756d4f66436162696e657473206d69736d61746368604482015290519081900360640190fd5b604080516020601f84018190048102820181019092528281526000916121d19185858083850183828082843760009201919091525061440b92505050565b9050600081116122125760405162461bcd60e51b8152600401808060200182810382526028815260200180615d936028913960400191505060405180910390fd5b60298111156122525760405162461bcd60e51b8152600401808060200182810382526039815260200180615dbb6039913960400191505060405180910390fd5b600c55612297565b6040805162461bcd60e51b815260206004820152600d60248201526c756e6b6e6f776e20706172616d60981b604482015290519081900360640190fd5b7f6cdb0ac70ab7f2e2d035cca5be60d89906f2dede7648ddbd7402189c1eeed17a848484846040518080602001806020018381038352878782818152602001925080828437600083820152601f01601f191690910184810383528581526020019050858580828437600083820152604051601f909101601f19169092018290039850909650505050505050a150505050565b60046020526000908152604090205481565b6001546060906000805b8281101561236a57612356816111ff565b15612362576001909101905b600101612345565b50606081604051908082528060200260200182016040528015612397578160200160208202803683370190505b5090506000915060005b83811015612416576123b2816111ff565b1561240e57600181815481106123c457fe5b600091825260209091206004909102015482516001600160a01b03909116908390859081106123ef57fe5b6001600160a01b03909216602092830291909101909101526001909201915b6001016123a1565b509250505090565b601581565b61100281565b600061243361233b565b519050600080600c541161244857601561244c565b600c545b90508082111561245a578091505b8161246457600191505b5090565b67016345785d8a000081565b60055481565b61100381565b602981565b60005460ff16156124dd576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e747261637420616c726561647920696e697400000000000000604482015290519081900360640190fd5b6124e5615a3e565b600061250b604051806101e001604052806101ab8152602001615b396101ab9139612ce0565b915091508061254b5760405162461bcd60e51b81526004018080602001828103825260218152602001806160266021913960400191505060405180910390fd5b60005b8260200151518110156126705760018360200151828151811061256d57fe5b60209081029190910181015182546001818101855560009485528385208351600493840290910180546001600160a01b039283166001600160a01b03199182161782558587015182850180549185169183169190911790556040860151600283018054606089015160808a01511515600160e01b0260ff60e01b196001600160401b03909216600160a01b0267ffffffffffffffff60a01b199590981692909516919091179290921694909417161790915560a09093015160039093019290925591860151805191850193918590811061264357fe5b602090810291909101810151516001600160a01b031682528101919091526040016000205560010161254e565b50506103e8600255506000805460ff19166001179055565b600d5481565b33611001146126ce5760405162461bcd60e51b81526004018080602001828103825260298152602001806160f06029913960400191505060405180910390fd5b600b54612773576126dd6159e0565b60015460005b8181101561276f57600b80546001810182556000919091528351600080516020615fc0833981519152601690920291820190815560208501516000805160206160d08339815191528301805460ff19169115159190911790556040850151859261276191600080516020616006833981519152909101906014615a04565b5050508060010190506126e3565b5050505b600061277e82614410565b905061278981611130565b1561279857612798828261428c565b5050565b606581565b3341146127df5760405162461bcd60e51b815260040180806020018281038252602d815260200180616076602d913960400191505060405180910390fd5b60005460ff16612832576040805162461bcd60e51b81526020600482015260196024820152781d1a194818dbdb9d1c9858dd081b9bdd081a5b9a5d081e595d603a1b604482015290519081900360640190fd5b6000341161287f576040805162461bcd60e51b81526020600482015260156024820152746465706f7369742076616c7565206973207a65726f60581b604482015290519081900360640190fd5b6001600160a01b0381166000908152600460205260409020546007543491906103e89060ff16156128af57506006545b6000831180156128bf5750600081115b156129685760006128e86127106128dc868563ffffffff6145b416565b9063ffffffff61460d16565b905080156129665760405161dead9082156108fc029083906000818181858888f1935050505015801561291f573d6000803e3d6000fd5b506040805182815290517f627059660ea01c4733a328effb2294d2f86905bf806da763a89cee254de8bee59181900360200190a1612963848263ffffffff61464f16565b93505b505b8115612a6057600060018084038154811061297f57fe5b9060005260206000209060040201905080600201601c9054906101000a900460ff16156129ea576040805185815290516001600160a01b038716917ff177e5d6c5764d79c32883ed824111d9b13f5668cf6ab1cc12dd36791dd955b4919081900360200190a2612a5a565b6003546129fd908563ffffffff61469116565b6003908155810154612a15908563ffffffff61469116565b60038201556040805185815290516001600160a01b038716917f93a090ecc682c002995fad3c85b30c5651d7fd29b0be5da9d784a3302aedc055919081900360200190a25b50612aa0565b6040805184815290516001600160a01b038616917ff177e5d6c5764d79c32883ed824111d9b13f5668cf6ab1cc12dd36791dd955b4919081900360200190a25b50505050565b600e5481565b61100081565b61dead81565b600b8181548110612ac557fe5b60009182526020909120601690910201805460019091015490915060ff1682565b61100481565b6000600a5460001480612afd575081155b80612b085750600954155b15612b1557506000611115565b600960008154809291906001900391905055506000612b62600a546128dc856128dc600b8981548110612b4457fe5b6000918252602090912060169091020154439063ffffffff61464f16565b90506000600b8581548110612b7357fe5b906000526020600020906016020160010160006101000a81548160ff0219169083151502179055506000806110016001600160a01b0316638256ace66040518163ffffffff1660e01b8152600401604080518083038186803b158015612bd857600080fd5b505afa158015612bec573d6000803e3d6000fd5b505050506040513d6040811015612c0257600080fd5b508051602090910151600095509092509050808310612c9057612c258787613e21565b50604080516305bfb49960e41b81526001600160a01b0389166004820152905161100191635bfb499091602480830192600092919082900301818387803b158015612c6f57600080fd5b505af1158015612c83573d6000803e3d6000fd5b5050505060019350612ca2565b818310612ca257612ca087614410565b505b6040516001600160a01b038816907fb9d38178dc641ff1817967a63c9078cbcd955a9f1fcd75e0e3636de615d44d3b90600090a25050509392505050565b612ce8615a3e565b6000612cf2615a3e565b612cfa615a56565b612d0b612d06866146eb565b614710565b90506000805b612d1a8361475a565b15612e2b5780612d3f57612d35612d308461477b565b6147c9565b60ff168452612e23565b8060011415612e1e576060612d5b612d568561477b565b614880565b90508051604051908082528060200260200182016040528015612d9857816020015b612d85615a76565b815260200190600190039081612d7d5790505b50602086015260005b8151811015612e1357612db2615a76565b6000612dd0848481518110612dc357fe5b6020026020010151614951565b9150915080612ded57876000995099505050505050505050612e34565b8188602001518481518110612dfe57fe5b60209081029190910101525050600101612da1565b506001925050612e23565b612e2b565b600101612d11565b50919350909150505b915091565b604080516001808252818301909252606091829190816020015b6060815260200190600190039081612e53579050509050612e798363ffffffff16614a2e565b81600081518110612e8657fe5b602002602001018190525061111581614a41565b6000806060612ea884614acb565b9150915081612f55577f70e72399380dcfb0338abc03dc8d47f9f470ada8e769c9a78d644ea97385ecb2816040518080602001828103825283818151815260200191508051906020019080838360005b83811015612f10578181015183820152602001612ef8565b50505050905090810190601f168015612f3d5780820380516001836020036101000a031916815260200191505b509250505060405180910390a16066925050506111fa565b50506060612f6283614bad565b6001549091506000908190815b81811015612fe55767016345785d8a000060018281548110612f8d57fe5b90600052602060002090600402016003015410612faf57600190930192612fdd565b600060018281548110612fbe57fe5b9060005260206000209060040201600301541115612fdd576001909201915b600101612f6f565b50606083604051908082528060200260200182016040528015613012578160200160208202803683370190505b509050606084604051908082528060200260200182016040528015613041578160200160208202803683370190505b509050606085604051908082528060200260200182016040528015613070578160200160208202803683370190505b50905060608660405190808252806020026020018201604052801561309f578160200160208202803683370190505b50905060006060876040519080825280602002602001820160405280156130d0578160200160208202803683370190505b5090506060886040519080825280602002602001820160405280156130ff578160200160208202803683370190505b509050600099506000985060006110046001600160a01b031663149d14d96040518163ffffffff1660e01b815260040160206040518083038186803b15801561314757600080fd5b505afa15801561315b573d6000803e3d6000fd5b505050506040513d602081101561317157600080fd5b5051905067016345785d8a00008111156131e5577f70e72399380dcfb0338abc03dc8d47f9f470ada8e769c9a78d644ea97385ecb2604051808060200182810382526021815260200180615f9f6021913960400191505060405180910390a160689c505050505050505050505050506111fa565b60005b898110156134565767016345785d8a00006001828154811061320657fe5b9060005260206000209060040201600301541061338c576001818154811061322a57fe5b906000526020600020906004020160020160009054906101000a90046001600160a01b0316898d8151811061325b57fe5b60200260200101906001600160a01b031690816001600160a01b03168152505060006402540be4006001838154811061329057fe5b906000526020600020906004020160030154816132a957fe5b06600183815481106132b757fe5b9060005260206000209060040201600301540390506132df838261464f90919063ffffffff16565b898e815181106132eb57fe5b6020026020010181815250506001828154811061330457fe5b906000526020600020906004020160020160009054906101000a90046001600160a01b0316878e8151811061333557fe5b60200260200101906001600160a01b031690816001600160a01b03168152505081888e8151811061336257fe5b602090810291909101015261337d868263ffffffff61469116565b6001909d019c955061344e9050565b60006001828154811061339b57fe5b906000526020600020906004020160030154111561344e57600181815481106133c057fe5b906000526020600020906004020160010160009054906101000a90046001600160a01b0316848c815181106133f157fe5b60200260200101906001600160a01b031690816001600160a01b0316815250506001818154811061341e57fe5b906000526020600020906004020160030154838c8151811061343c57fe5b60209081029190910101526001909a01995b6001016131e8565b5060008415613894576110046001600160a01b0316636e056520868b8b8a60025442016040518663ffffffff1660e01b815260040180806020018060200180602001856001600160401b03166001600160401b03168152602001848103845288818151815260200191508051906020019060200280838360005b838110156134e85781810151838201526020016134d0565b50505050905001848103835287818151815260200191508051906020019060200280838360005b8381101561352757818101518382015260200161350f565b50505050905001848103825286818151815260200191508051906020019060200280838360005b8381101561356657818101518382015260200161354e565b505050509050019750505050505050506020604051808303818588803b15801561358f57600080fd5b505af1935050505080156135b557506040513d60208110156135b057600080fd5b505160015b6137f0576040516000815260443d10156135d15750600061366c565b60046000803e60005160e01c6308c379a081146135f257600091505061366c565b60043d036004833e81513d60248201116001600160401b038211171561361d5760009250505061366c565b80830180516001600160401b0381111561363e57600094505050505061366c565b8060208301013d860181111561365c5760009550505050505061366c565b601f01601f191660405250925050505b80613677575061371b565b60019150857fa7cdeed7d0db45e3219a6e5d60838824c16f1d39991fcfe3f963029c844bf280826040518080602001828103825283818151815260200191508051906020019080838360005b838110156136db5781810151838201526020016136c3565b50505050905090810190601f1680156137085780820380516001836020036101000a031916815260200191505b509250505060405180910390a2506137eb565b3d808015613745576040519150601f19603f3d011682016040523d82523d6000602084013e61374a565b606091505b5060019150857fbfa884552dd8921b6ce90bfe906952ae5b3b29be0cc1a951d4f62697635a3a45826040518080602001828103825283818151815260200191508051906020019080838360005b838110156137af578181015183820152602001613797565b50505050905090810190601f1680156137dc5780820380516001836020036101000a031916815260200191505b509250505060405180910390a2505b613894565b801561382e576040805187815290517fa217d08e65f80c73121cd9db834d81652d544bfbf452f6d04922b16c90a37b709181900360200190a1613892565b604080516020808252601b908201527f6261746368207472616e736665722072657475726e2066616c7365000000000081830152905187917fa7cdeed7d0db45e3219a6e5d60838824c16f1d39991fcfe3f963029c844bf280919081900360600190a25b505b8015613a4a5760005b8751811015613a485760008882815181106138b457fe5b602002602001015190506000600182815481106138cd57fe5b60009182526020909120600160049092020181015481546001600160a01b03909116916108fc91859081106138fe57fe5b9060005260206000209060040201600301549081150290604051600060405180830381858888f19350505050905080156139ba576001828154811061393f57fe5b60009182526020909120600160049092020181015481546001600160a01b03909116917f6c61d60f69a7beb3e1c80db7f39f37b208537cbb19da3174511b477812b2fc7d918590811061398e57fe5b9060005260206000209060040201600301546040518082815260200191505060405180910390a2613a3e565b600182815481106139c757fe5b60009182526020909120600160049092020181015481546001600160a01b03909116917f25d0ce7d2f0cec669a8c17efe49d195c13455bb8872b65fa610ac7f53fe4ca7d9185908110613a1657fe5b9060005260206000209060040201600301546040518082815260200191505060405180910390a25b505060010161389d565b505b835115613b945760005b8451811015613b92576000858281518110613a6b57fe5b60200260200101516001600160a01b03166108fc868481518110613a8b57fe5b60200260200101519081150290604051600060405180830381858888f1935050505090508015613b2157858281518110613ac157fe5b60200260200101516001600160a01b03167f6c61d60f69a7beb3e1c80db7f39f37b208537cbb19da3174511b477812b2fc7d868481518110613aff57fe5b60200260200101516040518082815260200191505060405180910390a2613b89565b858281518110613b2d57fe5b60200260200101516001600160a01b03167f25d0ce7d2f0cec669a8c17efe49d195c13455bb8872b65fa610ac7f53fe4ca7d868481518110613b6b57fe5b60200260200101516040518082815260200191505060405180910390a25b50600101613a54565b505b4715613bfd576040805147815290517f6ecc855f9440a9282c90913bbc91619fd44f5ec0b462af28d127b116f130aa4d9181900360200190a1604051611002904780156108fc02916000818181858888f19350505050158015613bfb573d6000803e3d6000fd5b505b600060038190556005558c5115613c1757613c178d614d7d565b6110016001600160a01b031663fc4333cd6040518163ffffffff1660e01b8152600401600060405180830381600087803b158015613c5457600080fd5b505af1158015613c68573d6000803e3d6000fd5b50506040517fedd8d7296956dd970ab4de3f2fc03be2b0ffc615d20cd4c72c6e44f928630ebf925060009150a15060009e9d5050505050505050505050505050565b80516001600160a01b0316600090815260046020526040812054801580613cfb5750600180820381548110613cdb57fe5b9060005260206000209060040201600201601c9054906101000a900460ff165b15613d415782516040516001600160a01b03909116907fe209c46bebf57cf265d5d9009a00870e256d9150f3ed5281ab9d9eb3cec6e4be90600090a260009150506111fa565b600154600554600019820111801590613d975784516040516001600160a01b03909116907fe209c46bebf57cf265d5d9009a00870e256d9150f3ed5281ab9d9eb3cec6e4be90600090a2600093505050506111fa565b600580546001908101909155805481906000198601908110613db557fe5b6000918252602082206002600490920201018054921515600160e01b0260ff60e01b199093169290921790915585516040516001600160a01b03909116917ff226e7d8f547ff903d9d419cf5f54e0d7d07efa9584135a53a057c5f1f27f49a91a2506000949350505050565b60008060018381548110613e3157fe5b90600052602060002090600402016003015490506000600180805490500390506001613e5b61233b565b5111613e9057600060018581548110613e7057fe5b9060005260206000209060040201600301819055506000925050506112aa565b6040805183815290516001600160a01b038716917f3b6f9ef90462b512a1293ecec018670bf7b7f1876fb727590a8a6d7643130a70919081900360200190a26001600160a01b038516600090815260046020526040812055835b6001546000190181101561408d5760018160010181548110613f0857fe5b906000526020600020906004020160018281548110613f2357fe5b60009182526020909120825460049092020180546001600160a01b03199081166001600160a01b0393841617825560018085015481840180548416918616919091179055600280860180549185018054909416919095161780835584546001600160401b03600160a01b91829004160267ffffffffffffffff60a01b1990911617808355935460ff600160e01b918290041615150260ff60e01b19909416939093179055600392830154920191909155600b805490918301908110613fe457fe5b9060005260206000209060160201600b8281548110613fff57fe5b600091825260209091208254601690920201908155600180830154908201805460ff191660ff909216151591909117905561404260028083019084016014615aab565b5090505080600101600460006001848154811061405b57fe5b600091825260208083206004909202909101546001600160a01b03168352820192909252604001902055600101613eea565b50600180548061409957fe5b60008281526020812060046000199093019283020180546001600160a01b0319908116825560018201805490911690556002810180546001600160e81b0319169055600301559055600b8054806140ec57fe5b60008281526020812060166000199093019283020181815560018101805460ff191690559061411e6002830182615ad6565b50509055600081838161412d57fe5b04905080156141915760015460005b8181101561418e57826001828154811061415257fe5b906000526020600020906004020160030154016001828154811061417257fe5b600091825260209091206003600490920201015560010161413c565b50505b50600195945050505050565b60005b828110156142835760408051602080820189905287840182840152825180830384018152606090920190925280519101206000908390816141dd57fe5b0690508085018287011461427a57600088838801815181106141fb57fe5b60200260200101519050888287018151811061421357fe5b6020026020010151898489018151811061422957fe5b60200260200101906001600160a01b031690816001600160a01b03168152505080898388018151811061425857fe5b60200260200101906001600160a01b031690816001600160a01b031681525050505b506001016141a0565b50505050505050565b600980546001908101909155600b8054839081106142a657fe5b906000526020600020906016020160010160006101000a81548160ff02191690831515021790555043600b82815481106142dc57fe5b600091825260208220601690910201919091556040516001600160a01b038416917ff62981a567ec3cec866c6fa93c55bcdf841d6292d18b8d522ececa769375d82d91a25050565b6000816040516020018082805190602001908083835b602083106143595780518252601f19909201916020918201910161433a565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120836040516020018082805190602001908083835b602083106143c75780518252601f1990920191602091820191016143a8565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012014905092915050565b015190565b6001600160a01b03811660009081526004602052604081205480614439575060001990506111fa565b60018103905060006001828154811061444e57fe5b906000526020600020906004020160030154905060006001838154811061447157fe5b906000526020600020906004020160030181905550600060018080549050039050846001600160a01b03167f8cd4e147d8af98a9e3b6724021b8bf6aed2e5dac71c38f2dce8161b82585b25d836040518082815260200191505060405180910390a2806144e3578293505050506111fa565b60008183816144ee57fe5b04905080156145aa5760005b8481101561454c57816001828154811061451057fe5b906000526020600020906004020160030154016001828154811061453057fe5b60009182526020909120600360049092020101556001016144fa565b50600180549085015b818110156145a757826001828154811061456b57fe5b906000526020600020906004020160030154016001828154811061458b57fe5b6000918252602090912060036004909202010155600101614555565b50505b5091949350505050565b6000826145c3575060006112aa565b828202828482816145d057fe5b04146111155760405162461bcd60e51b8152600401808060200182810382526021815260200180615f2b6021913960400191505060405180910390fd5b600061111583836040518060400160405280601a81526020017f536166654d6174683a206469766973696f6e206279207a65726f000000000000815250615370565b600061111583836040518060400160405280601e81526020017f536166654d6174683a207375627472616374696f6e206f766572666c6f770000815250615412565b600082820183811015611115576040805162461bcd60e51b815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b6146f3615ae5565b506040805180820190915281518152602082810190820152919050565b614718615a56565b6147218261546c565b61472a57600080fd5b600061473983602001516154a6565b60208085015160408051808201909152868152920190820152915050919050565b6000614764615ae5565b505080518051602091820151919092015191011190565b614783615ae5565b61478c8261475a565b61479557600080fd5b602082015160006147a582615509565b80830160209586015260408051808201909152908152938401919091525090919050565b8051600090158015906147de57508151602110155b6147e757600080fd5b60006147f683602001516154a6565b90508083600001511015614851576040805162461bcd60e51b815260206004820152601a60248201527f6c656e677468206973206c657373207468616e206f6666736574000000000000604482015290519081900360640190fd5b82516020808501518301805192849003929183101561487757826020036101000a820491505b50949350505050565b606061488b8261546c565b61489457600080fd5b600061489f8361563c565b90506060816040519080825280602002602001820160405280156148dd57816020015b6148ca615ae5565b8152602001906001900390816148c25790505b50905060006148ef85602001516154a6565b60208601510190506000805b848110156149465761490c83615509565b915060405180604001604052808381526020018481525084828151811061492f57fe5b6020908102919091010152918101916001016148fb565b509195945050505050565b614959615a76565b6000614963615a76565b61496b615a56565b61497485614710565b90506000805b6149838361475a565b15612e2b57806149ae5761499e6149998461477b565b615698565b6001600160a01b03168452614a26565b80600114156149d6576149c36149998461477b565b6001600160a01b03166020850152614a26565b80600214156149fe576149eb6149998461477b565b6001600160a01b03166040850152614a26565b8060031415612e1e57614a13612d308461477b565b6001600160401b03166060850152600191505b60010161497a565b60606112aa614a3c836156b2565b615798565b6060815160001415614a6257506040805160008152602081019091526111fa565b606082600081518110614a7157fe5b602002602001015190506000600190505b8351811015614ab257614aa882858381518110614a9b57fe5b60200260200101516157ea565b9150600101614a82565b50611115614ac5825160c060ff16615867565b826157ea565b60006060602983511115614afd576000604051806060016040528060298152602001615df46029913991509150612e34565b60005b8351811015614b935760005b81811015614b8a57848181518110614b2057fe5b6020026020010151600001516001600160a01b0316858381518110614b4157fe5b6020026020010151600001516001600160a01b03161415614b825760006040518060600160405280602b8152602001615e48602b9139935093505050612e34565b600101614b0c565b50600101614b00565b505060408051602081019091526000815260019150915091565b6060600080808080614bbd612429565b6001549091505b8015614ccb57600181039250600b8381548110614bdd57fe5b600091825260209091206001601690920201015460ff16614bfd57614cc2565b60018381548110614c0a57fe5b60009182526020909120600490910201546001600160a01b03169450614c31858484612aec565b9350831580614c44575060018851038610155b15614c4e57614cc2565b60005b8851811015614cc057856001600160a01b0316898281518110614c7057fe5b6020026020010151600001516001600160a01b03161415614cb8576001898281518110614c9957fe5b6020908102919091010151901515608090910152600190960195614cc0565b600101614c51565b505b60001901614bc4565b5084875103604051908082528060200260200182016040528015614d0957816020015b614cf6615a76565b815260200190600190039081614cee5790505b5095506000915060005b8751811015614d7257878181518110614d2857fe5b602002602001015160800151614d6a57878181518110614d4457fe5b6020026020010151878481518110614d5857fe5b60209081029190910101526001909201915b600101614d13565b505050505050919050565b600154815160005b82811015614e9a576001614d97615a76565b60018381548110614da457fe5b600091825260208083206040805160c08101825260049490940290910180546001600160a01b0390811685526001820154811693850193909352600281015492831691840191909152600160a01b82046001600160401b03166060840152600160e01b90910460ff16151560808301526003015460a082015291505b84811015614e6e57868181518110614e3457fe5b6020026020010151600001516001600160a01b031682600001516001600160a01b03161415614e665760009250614e6e565b600101614e20565b508115614e905780516001600160a01b03166000908152600460205260408120555b5050600101614d85565b5080821115614f4b57805b82811015614f49576001805480614eb857fe5b60008281526020812060046000199093019283020180546001600160a01b0319908116825560018201805490911690556002810180546001600160e81b0319169055600301559055600b805480614f0b57fe5b60008281526020812060166000199093019283020181815560018101805460ff1916905590614f3d6002830182615ad6565b50509055600101614ea5565b505b6000818310614f5a5781614f5c565b825b905060005b818110156151565761500e858281518110614f7857fe5b602002602001015160018381548110614f8d57fe5b60009182526020918290206040805160c08101825260049390930290910180546001600160a01b0390811684526001820154811694840194909452600281015493841691830191909152600160a01b83046001600160401b03166060830152600160e01b90920460ff161515608082015260039091015460a082015261595f565b61512957806001016004600087848151811061502657fe5b6020026020010151600001516001600160a01b03166001600160a01b031681526020019081526020016000208190555084818151811061506257fe5b60200260200101516001828154811061507757fe5b6000918252602091829020835160049092020180546001600160a01b039283166001600160a01b0319918216178255928401516001820180549184169185169190911790556040840151600282018054606087015160808801511515600160e01b0260ff60e01b196001600160401b03909216600160a01b0267ffffffffffffffff60a01b1995909716929097169190911792909216939093171692909217905560a09091015160039091015561514e565b60006001828154811061513857fe5b9060005260206000209060040201600301819055505b600101614f61565b50828211156152fb576151676159e0565b835b838110156152f857600186828151811061517f57fe5b6020908102919091018101518254600181810185556000948552838520835160049093020180546001600160a01b039384166001600160a01b0319918216178255848601518284018054918616918316919091179055604080860151600284018054606089015160808a01511515600160e01b0260ff60e01b196001600160401b03909216600160a01b0267ffffffffffffffff60a01b1995909a1692909616919091179290921696909617169190911790935560a090930151600390930192909255600b8054928301815590935284516016909102600080516020615fc08339815191528101918255918501516000805160206160d08339815191528301805491151560ff19909216919091179055918401518492916152b491600080516020616006833981519152909101906014615a04565b50505080600101600460008884815181106152cb57fe5b602090810291909101810151516001600160a01b0316825281019190915260400160002055600101615169565b50505b6000600981905560015493505b83811015615369576000600b828154811061531f57fe5b60009182526020822060169190910201600101805460ff191692151592909217909155600b80548390811061535057fe5b6000918252602090912060169091020155600101615308565b5050505050565b600081836153fc5760405162461bcd60e51b81526004018080602001828103825283818151815260200191508051906020019080838360005b838110156153c15781810151838201526020016153a9565b50505050905090810190601f1680156153ee5780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b50600083858161540857fe5b0495945050505050565b600081848411156154645760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156153c15781810151838201526020016153a9565b505050900390565b805160009061547d575060006111fa565b6020820151805160001a9060c082101561549c576000925050506111fa565b5060019392505050565b8051600090811a60808110156154c05760009150506111fa565b60b88110806154db575060c081108015906154db575060f881105b156154ea5760019150506111fa565b60c08110156154fe5760b5190190506111fa565b60f5190190506111fa565b80516000908190811a60808110156155245760019150615635565b60b881101561553957607e1981019150615635565b60c08110156155b357600060b78203600186019550806020036101000a8651049150600181018201935050808310156155ad576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b50615635565b60f88110156155c85760be1981019150615635565b600060f78203600186019550806020036101000a865104915060018101820193505080831015615633576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b505b5092915050565b805160009061564d575060006111fa565b6000809050600061566184602001516154a6565b602085015185519181019250015b8082101561568f5761568082615509565b6001909301929091019061566f565b50909392505050565b80516000906015146156a957600080fd5b6112aa826147c9565b604080516020808252818301909252606091829190602082018180368337505050602081018490529050600067ffffffffffffffff1984166156f65750601861571a565b6fffffffffffffffffffffffffffffffff1984166157165750601061571a565b5060005b60208110156157505781818151811061572f57fe5b01602001516001600160f81b0319161561574857615750565b60010161571a565b60008160200390506060816040519080825280601f01601f191660200182016040528015615785576020820181803683370190505b5080830196909652508452509192915050565b6060815160011480156157ca5750607f60f81b826000815181106157b857fe5b01602001516001600160f81b03191611155b156157d65750806111fa565b6112aa6157e88351608060ff16615867565b835b6060806040519050835180825260208201818101602087015b8183101561581b578051835260209283019201615803565b50855184518101855292509050808201602086015b81831015615848578051835260209283019201615830565b508651929092011591909101601f01601f191660405250905092915050565b60606801000000000000000083106158b7576040805162461bcd60e51b815260206004820152600e60248201526d696e70757420746f6f206c6f6e6760901b604482015290519081900360640190fd5b604080516001808252818301909252606091602082018180368337019050509050603784116159115782840160f81b816000815181106158f357fe5b60200101906001600160f81b031916908160001a90535090506112aa565b606061591c856156b2565b90508381510160370160f81b8260008151811061593557fe5b60200101906001600160f81b031916908160001a90535061595682826157ea565b95945050505050565b805182516000916001600160a01b039182169116148015615999575081602001516001600160a01b031683602001516001600160a01b0316145b80156159be575081604001516001600160a01b031683604001516001600160a01b0316145b80156111155750506060908101519101516001600160401b0390811691161490565b60408051606081018252600080825260208201529081016159ff615aff565b905290565b8260148101928215615a32579160200282015b82811115615a32578251825591602001919060010190615a17565b50612464929150615b1e565b60408051808201909152600081526060602082015290565b6040518060400160405280615a69615ae5565b8152602001600081525090565b6040805160c081018252600080825260208201819052918101829052606081018290526080810182905260a081019190915290565b8260148101928215615a32579182015b82811115615a32578254825591600101919060010190615abb565b5061140c906014810190615b1e565b604051806040016040528060008152602001600081525090565b6040518061028001604052806014906020820280368337509192915050565b61152891905b808211156124645760008155600101615b2456fef901a880f901a4f844941284214b9b9c85549ab3d2b972df0deef66ac2c9946ddf42a51534fc98d0c0a3b42c963cace8441ddf946ddf42a51534fc98d0c0a3b42c963cace8441ddf8410000000f84494a2959d3f95eae5dc7d70144ce1b73b403b7eb6e0948081ef03f1d9e0bb4a5bf38f16285c879299f07f948081ef03f1d9e0bb4a5bf38f16285c879299f07f8410000000f8449435552c16704d214347f29fa77f77da6d75d7c75294dc4973e838e3949c77aced16ac2315dc2d7ab11194dc4973e838e3949c77aced16ac2315dc2d7ab1118410000000f84494980a75ecd1309ea12fa2ed87a8744fbfc9b863d594cc6ac05c95a99c1f7b5f88de0e3486c82293b27094cc6ac05c95a99c1f7b5f88de0e3486c82293b2708410000000f84494f474cf03cceff28abc65c9cbae594f725c80e12d94e61a183325a18a173319dd8e19c8d069459e217594e61a183325a18a173319dd8e19c8d069459e21758410000000f84494b71b214cb885500844365e95cd9942c7276e7fd894d22ca3ba2141d23adab65ce4940eb7665ea2b6a794d22ca3ba2141d23adab65ce4940eb7665ea2b6a784100000006c656e677468206f66206d61784e756d4f664d61696e7461696e696e67206d69736d617463686c656e677468206f66206d61784e756d4f66576f726b696e6743616e64696461746573206d69736d617463686c656e677468206f66206d61696e7461696e536c6173685363616c65206d69736d61746368746865206d61784e756d4f664d61696e7461696e696e67206d757374206265206c657373207468616e206e756d4f6643616e696e61746573746865206e756d4f66436162696e657473206d7573742062652067726561746572207468616e2030746865206e756d4f66436162696e657473206d757374206265206c657373207468616e204d41585f4e554d5f4f465f56414c494441544f5253746865206e756d626572206f662076616c696461746f72732065786365656420746865206c696d6974746865206275726e526174696f206d757374206265206e6f2067726561746572207468616e2031303030306475706c696361746520636f6e73656e7375732061646472657373206f662076616c696461746f725365747468652065787069726554696d655365636f6e64476170206973206f7574206f662072616e6765746865206d61784e756d4f66576f726b696e6743616e64696461746573206d757374206265206e6f742067726561746572207468616e206d61784e756d4f6643616e6469646174657363616e206e6f7420656e7465722054656d706f72617279204d61696e74656e616e63656c656e677468206f66206a61696c2076616c696461746f7273206d757374206265206f6e65536166654d6174683a206d756c7469706c69636174696f6e206f766572666c6f77746865206d6573736167652073656e646572206d75737420626520676f7665726e616e636520636f6e74726163746c656e677468206f66206d61784e756d4f6643616e64696461746573206d69736d61746368666565206973206c6172676572207468616e2044555354595f494e434f4d494e470175b7a638427703f0dbe7bb9bbf987a2551717b34e79f33b5b1008d1fa01db96c656e677468206f662065787069726554696d655365636f6e64476170206d69736d617463680175b7a638427703f0dbe7bb9bbf987a2551717b34e79f33b5b1008d1fa01dbb6661696c656420746f20706172736520696e69742076616c696461746f72536574746865206d6573736167652073656e646572206d7573742062652063726f737320636861696e20636f6e7472616374746865206d6573736167652073656e646572206d7573742062652074686520626c6f636b2070726f6475636572746865206d61696e7461696e536c6173685363616c65206d7573742062652067726561746572207468616e20300175b7a638427703f0dbe7bb9bbf987a2551717b34e79f33b5b1008d1fa01dba746865206d6573736167652073656e646572206d75737420626520736c61736820636f6e7472616374a2646970667358221220c0dc878fd08b0a4bf6659ead68604aefff201930a1931eca7eb88ebdf3b0a03e64736f6c63430006040033", + }, + { + ContractAddr: SlashContract, + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/db8bb560ac5a1265c685b719c7e976dced162310", + Code: "608060405234801561001057600080fd5b506004361061023d5760003560e01c80638256ace61161013b578063c80d4b8f116100b8578063e1c7392a1161007c578063e1c7392a1461071d578063f9a2bbc714610725578063fc3e59081461072d578063fc4333cd14610735578063fd6a68791461073d5761023d565b8063c80d4b8f14610667578063c81b16621461066f578063c8509d8114610677578063c96be4cb146106ef578063dc927faf146107155761023d565b8063a1a11bf5116100ff578063a1a11bf514610575578063a78abc161461057d578063ab51bb9614610599578063ac0af629146105a1578063ac431751146105a95761023d565b80638256ace6146104dd578063831d65d1146104e557806396713da91461055d5780639bc8e4f2146105655780639dc092621461056d5761023d565b80634bf6c882116101c95780636e47b4821161018d5780636e47b482146104b557806370fd5bad146104bd57806375d47a0a146104c55780637912a65d146104cd5780637942fd05146104d55761023d565b80634bf6c8821461046d57806351e8067214610475578063567a372d1461047d5780635bfb49901461048557806362b72cf5146104ad5761023d565b806337c8dab91161021057806337c8dab9146103cf578063389f4f711461040e5780633dffc3871461042857806343756e5c14610446578063493279b11461044e5761023d565b80630bee7a67146102425780631182b8751461026357806323bac5a21461035057806335aa2e4414610396575b600080fd5b61024a610745565b6040805163ffffffff9092168252519081900360200190f35b6102db6004803603604081101561027957600080fd5b60ff8235169190810190604081016020820135600160201b81111561029d57600080fd5b8201836020820111156102af57600080fd5b803590602001918460018302840111600160201b831117156102d057600080fd5b50909250905061074a565b6040805160208082528351818301528351919283929083019185019080838360005b838110156103155781810151838201526020016102fd565b50505050905090810190601f1680156103425780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b6103766004803603602081101561036657600080fd5b50356001600160a01b031661081e565b604080519384526020840192909252151582820152519081900360600190f35b6103b3600480360360208110156103ac57600080fd5b5035610841565b604080516001600160a01b039092168252519081900360200190f35b6103f5600480360360208110156103e557600080fd5b50356001600160a01b0316610868565b6040805192835260208301919091528051918290030190f35b6104166108bf565b60408051918252519081900360200190f35b6104306108c5565b6040805160ff9092168252519081900360200190f35b6103b36108ca565b6104566108d0565b6040805161ffff9092168252519081900360200190f35b6104306108d5565b6103b36108da565b6104166108e0565b6104ab6004803603602081101561049b57600080fd5b50356001600160a01b03166108e6565b005b610416610a47565b6103b3610a4d565b610430610a53565b6103b3610a58565b610416610a5e565b610430610a63565b6103f5610a68565b6104ab600480360360408110156104fb57600080fd5b60ff8235169190810190604081016020820135600160201b81111561051f57600080fd5b82018360208201111561053157600080fd5b803590602001918460018302840111600160201b8311171561055257600080fd5b509092509050610a72565b610430610bcc565b610416610bd1565b6103b3610bdc565b6103b3610be2565b610585610be8565b604080519115158252519081900360200190f35b61024a610bf1565b610416610bf6565b6104ab600480360360408110156105bf57600080fd5b810190602081018135600160201b8111156105d957600080fd5b8201836020820111156105eb57600080fd5b803590602001918460018302840111600160201b8311171561060c57600080fd5b919390929091602081019035600160201b81111561062957600080fd5b82018360208201111561063b57600080fd5b803590602001918460018302840111600160201b8311171561065c57600080fd5b509092509050610bfb565b610416610fe9565b6103b3610fee565b6104ab6004803603604081101561068d57600080fd5b60ff8235169190810190604081016020820135600160201b8111156106b157600080fd5b8201836020820111156106c357600080fd5b803590602001918460018302840111600160201b831117156106e457600080fd5b509092509050610ff4565b6104ab6004803603602081101561070557600080fd5b50356001600160a01b03166110a7565b6103b361154a565b6104ab611550565b6103b36115c1565b6104306115c7565b6104ab6115cc565b6103b3611a57565b606481565b6060336120001461078c5760405162461bcd60e51b815260040180806020018281038252602f8152602001806124ae602f913960400191505060405180910390fd5b60005460ff166107d1576040805162461bcd60e51b8152602060048201526019602482015260008051602061250a833981519152604482015290519081900360640190fd5b6040805162461bcd60e51b815260206004820152601e60248201527f7265636569766520756e65787065637465642073796e207061636b6167650000604482015290519081900360640190fd5b600260208190526000918252604090912080546001820154919092015460ff1683565b6001818154811061084e57fe5b6000918252602090912001546001600160a01b0316905081565b600080610873612372565b5050506001600160a01b0316600090815260026020818152604092839020835160608101855281548082526001830154938201849052919093015460ff16151592909301919091529091565b60055481565b600181565b61100181565b606181565b600881565b61200081565b60045481565b33611000146109265760405162461bcd60e51b81526004018080602001828103825260308152602001806124096030913960400191505060405180910390fd5b60005460ff1661096b576040805162461bcd60e51b8152602060048201526019602482015260008051602061250a833981519152604482015290519081900360640190fd5b61200063f7a251d7600b61097e84611a5d565b60006040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b838110156109de5781810151838201526020016109c6565b50505050905090810190601f168015610a0b5780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b158015610a2c57600080fd5b505af1158015610a40573d6000803e3d6000fd5b5050505050565b60035481565b61100581565b600281565b61100881565b603281565b600b81565b6004546005549091565b3361200014610ab25760405162461bcd60e51b815260040180806020018281038252602f8152602001806124ae602f913960400191505060405180910390fd5b60005460ff16610af7576040805162461bcd60e51b8152602060048201526019602482015260008051602061250a833981519152604482015290519081900360640190fd5b610aff612395565b6000610b4084848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250611b2f92505050565b915091508015610b8a5781516040805163ffffffff9092168252517f7f0956d47419b9525356e7111652b653b530ec6f5096dccc04589bc38e6299679181900360200190a1610a40565b81516040805163ffffffff9092168252517f7d45f62d17443dd4547bca8a8112c60e2385669318dc300ec61a5d2492f262e79181900360200190a15050505050565b600981565b662386f26fc1000081565b61100781565b61100681565b60005460ff1681565b600081565b600481565b60005460ff16610c40576040805162461bcd60e51b8152602060048201526019602482015260008051602061250a833981519152604482015290519081900360640190fd5b3361100714610c805760405162461bcd60e51b815260040180806020018281038252602e815260200180612439602e913960400191505060405180910390fd5b610ceb84848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250506040805180820190915260148152731b5a5cd9195b59585b9bdc951a1c995cda1bdb1960621b60208201529150611baf9050565b15610dc45760208114610d2f5760405162461bcd60e51b81526004018080602001828103825260278152602001806123e26027913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091610d6d91858580838501838280828437600092019190915250611c9792505050565b905060018110158015610d81575060055481105b610dbc5760405162461bcd60e51b81526004018080602001828103825260258152602001806124896025913960400191505060405180910390fd5b600455610f57565b610e2a84848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152600f81526e19995b1bdb9e551a1c995cda1bdb19608a1b60208201529150611baf9050565b15610f1a5760208114610e6e5760405162461bcd60e51b81526004018080602001828103825260228152602001806124676022913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091610eac91858580838501838280828437600092019190915250611c9792505050565b90506103e88111158015610ec1575060045481115b610f12576040805162461bcd60e51b815260206004820181905260248201527f7468652066656c6f6e795468726573686f6c64206f7574206f662072616e6765604482015290519081900360640190fd5b600555610f57565b6040805162461bcd60e51b815260206004820152600d60248201526c756e6b6e6f776e20706172616d60981b604482015290519081900360640190fd5b7f6cdb0ac70ab7f2e2d035cca5be60d89906f2dede7648ddbd7402189c1eeed17a848484846040518080602001806020018381038352878782818152602001925080828437600083820152601f01601f191690910184810383528581526020019050858580828437600083820152604051601f909101601f19169092018290039850909650505050505050a150505050565b609681565b61100281565b33612000146110345760405162461bcd60e51b815260040180806020018281038252602f8152602001806124ae602f913960400191505060405180910390fd5b60005460ff16611079576040805162461bcd60e51b8152602060048201526019602482015260008051602061250a833981519152604482015290519081900360640190fd5b6040517f07db600eebe2ac176be8dcebad61858c245a4961bb32ca2aa3d159b09aa0810e90600090a1505050565b3341146110e55760405162461bcd60e51b815260040180806020018281038252602d8152602001806124dd602d913960400191505060405180910390fd5b60005460ff1661112a576040805162461bcd60e51b8152602060048201526019602482015260008051602061250a833981519152604482015290519081900360640190fd5b6003544311611180576040805162461bcd60e51b815260206004820181905260248201527f63616e206e6f7420736c61736820747769636520696e206f6e6520626c6f636b604482015290519081900360640190fd5b3a156111ca576040805162461bcd60e51b81526020600482015260146024820152736761737072696365206973206e6f74207a65726f60601b604482015290519081900360640190fd5b6040805163155853f360e21b81526001600160a01b03831660048201529051611000916355614fcc916024808301926020929190829003018186803b15801561121257600080fd5b505afa158015611226573d6000803e3d6000fd5b505050506040513d602081101561123c57600080fd5b505161124757611543565b61124f612372565b506001600160a01b0381166000908152600260208181526040928390208351606081018552815481526001820154928101929092529091015460ff1615801592820192909252906112aa576020810180516001019052611303565b60016040820181905260208201819052805480820182556000919091527fb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf60180546001600160a01b0319166001600160a01b0384161790555b43815260055460208201518161131557fe5b0661146757600060208201819052604080516335409f7f60e01b81526001600160a01b03851660048201529051611000926335409f7f926024808201939182900301818387803b15801561136857600080fd5b505af115801561137c573d6000803e3d6000fd5b505050506120006001600160a01b031663f7a251d7600b61139c85611a5d565b60006040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b838110156113fc5781810151838201526020016113e4565b50505050905090810190601f1680156114295780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b15801561144a57600080fd5b505af115801561145e573d6000803e3d6000fd5b505050506114dd565b60045481602001518161147657fe5b066114dd57604080516375abf10160e11b81526001600160a01b038416600482015290516110009163eb57e20291602480830192600092919082900301818387803b1580156114c457600080fd5b505af11580156114d8573d6000803e3d6000fd5b505050505b6001600160a01b0382166000818152600260208181526040808420865181559186015160018301558581015191909201805460ff1916911515919091179055517fddb6012116e51abf5436d956a4f0ebd927e92c576ff96d7918290c8782291e3e9190a2505b5043600355565b61100381565b60005460ff16156115a8576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e747261637420616c726561647920696e697400000000000000604482015290519081900360640190fd5b603260045560966005556000805460ff19166001179055565b61100081565b600381565b336110001461160c5760405162461bcd60e51b81526004018080602001828103825260308152602001806124096030913960400191505060405180910390fd5b60005460ff16611651576040805162461bcd60e51b8152602060048201526019602482015260008051602061250a833981519152604482015290519081900360640190fd5b60015461165d57611a55565b600154600090600019015b808211611a29576000805b8284101561178c57611683612372565b600260006001878154811061169457fe5b60009182526020808320909101546001600160a01b0316835282810193909352604091820190208151606081018352815481526001820154938101939093526002015460ff1615159082015260055490915060049004816020015111156117765760046005548161170157fe5b0481602001510381602001818152505080600260006001888154811061172357fe5b6000918252602080832091909101546001600160a01b0316835282810193909352604091820190208351815591830151600183015591909101516002909101805460ff1916911515919091179055611780565b600192505061178c565b50600190930192611673565b8284116119235761179b612372565b60026000600186815481106117ac57fe5b60009182526020808320909101546001600160a01b0316835282810193909352604091820190208151606081018352815481526001820154938101939093526002015460ff1615159082015260055490915060049004816020015111156118945760046005548161181957fe5b0481602001510381602001818152505080600260006001878154811061183b57fe5b6000918252602080832091909101546001600160a01b03168352828101939093526040918201902083518155918301516001808401919091559201516002909101805460ff191691151591909117905591506119239050565b60026000600186815481106118a557fe5b60009182526020808320909101546001600160a01b031683528201929092526040018120818155600181810192909255600201805460ff191690558054806118e957fe5b600082815260209020810160001990810180546001600160a01b0319169055019055836119165750611923565b506000199092019161178c565b81801561192d5750805b15611a0c57600260006001868154811061194357fe5b60009182526020808320909101546001600160a01b031683528201929092526040018120818155600181810192909255600201805460ff1916905580548490811061198a57fe5b600091825260209091200154600180546001600160a01b0390921691869081106119b057fe5b9060005260206000200160006101000a8154816001600160a01b0302191690836001600160a01b0316021790555060018054806119e957fe5b600082815260209020810160001990810180546001600160a01b03191690550190555b82611a18575050611a29565b505060019091019060001901611668565b6040517fcfdb3b6ccaeccbdc68be3c59c840e3b3c90f0a7c491f5fff1cf56cfda200dd9c90600090a150505b565b61100481565b60408051600480825260a08201909252606091829190816020015b6060815260200190600190039081611a78579050509050611aa1836001600160a01b0316611c9c565b81600081518110611aae57fe5b6020026020010181905250611ac243611cbf565b81600181518110611acf57fe5b6020908102919091010152611ae46061611cbf565b81600281518110611af157fe5b6020026020010181905250611b0542611cbf565b81600381518110611b1257fe5b6020026020010181905250611b2681611cd2565b9150505b919050565b611b37612395565b6000611b41612395565b611b496123a7565b611b5a611b5586611d5c565b611d81565b90506000805b611b6983611dcb565b15611ba25780611b9557611b84611b7f84611dec565b611e3a565b63ffffffff16845260019150611b9a565b611ba2565b600101611b60565b5091935090915050915091565b6000816040516020018082805190602001908083835b60208310611be45780518252601f199092019160209182019101611bc5565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120836040516020018082805190602001908083835b60208310611c525780518252601f199092019160209182019101611c33565b6001836020036101000a038019825116818451168082178552505050505050905001915050604051602081830303815290604052805190602001201490505b92915050565b015190565b60408051600560a21b8318601482015260348101909152606090611b2681611ef1565b6060611c91611ccd83611f47565b611ef1565b6060815160001415611cf35750604080516000815260208101909152611b2a565b606082600081518110611d0257fe5b602002602001015190506000600190505b8351811015611d4357611d3982858381518110611d2c57fe5b602002602001015161202d565b9150600101611d13565b50611b26611d56825160c060ff166120aa565b8261202d565b611d646123c7565b506040805180820190915281518152602082810190820152919050565b611d896123a7565b611d92826121a2565b611d9b57600080fd5b6000611daa83602001516121dc565b60208085015160408051808201909152868152920190820152915050919050565b6000611dd56123c7565b505080518051602091820151919092015191011190565b611df46123c7565b611dfd82611dcb565b611e0657600080fd5b60208201516000611e168261223f565b80830160209586015260408051808201909152908152938401919091525090919050565b805160009015801590611e4f57508151602110155b611e5857600080fd5b6000611e6783602001516121dc565b90508083600001511015611ec2576040805162461bcd60e51b815260206004820152601a60248201527f6c656e677468206973206c657373207468616e206f6666736574000000000000604482015290519081900360640190fd5b825160208085015183018051928490039291831015611ee857826020036101000a820491505b50949350505050565b606081516001148015611f235750607f60f81b82600081518110611f1157fe5b01602001516001600160f81b03191611155b15611f2f575080611b2a565b611c91611f418351608060ff166120aa565b8361202d565b604080516020808252818301909252606091829190602082018180368337505050602081018490529050600067ffffffffffffffff198416611f8b57506018611faf565b6fffffffffffffffffffffffffffffffff198416611fab57506010611faf565b5060005b6020811015611fe557818181518110611fc457fe5b01602001516001600160f81b03191615611fdd57611fe5565b600101611faf565b60008160200390506060816040519080825280601f01601f19166020018201604052801561201a576020820181803683370190505b5080830196909652508452509192915050565b6060806040519050835180825260208201818101602087015b8183101561205e578051835260209283019201612046565b50855184518101855292509050808201602086015b8183101561208b578051835260209283019201612073565b508651929092011591909101601f01601f191660405250905092915050565b60606801000000000000000083106120fa576040805162461bcd60e51b815260206004820152600e60248201526d696e70757420746f6f206c6f6e6760901b604482015290519081900360640190fd5b604080516001808252818301909252606091602082018180368337019050509050603784116121545782840160f81b8160008151811061213657fe5b60200101906001600160f81b031916908160001a9053509050611c91565b606061215f85611f47565b90508381510160370160f81b8260008151811061217857fe5b60200101906001600160f81b031916908160001a905350612199828261202d565b95945050505050565b80516000906121b357506000611b2a565b6020820151805160001a9060c08210156121d257600092505050611b2a565b5060019392505050565b8051600090811a60808110156121f6576000915050611b2a565b60b8811080612211575060c08110801590612211575060f881105b15612220576001915050611b2a565b60c08110156122345760b519019050611b2a565b60f519019050611b2a565b80516000908190811a608081101561225a576001915061236b565b60b881101561226f57607e198101915061236b565b60c08110156122e957600060b78203600186019550806020036101000a8651049150600181018201935050808310156122e3576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b5061236b565b60f88110156122fe5760be198101915061236b565b600060f78203600186019550806020036101000a865104915060018101820193505080831015612369576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b505b5092915050565b604051806060016040528060008152602001600081526020016000151581525090565b60408051602081019091526000815290565b60405180604001604052806123ba6123c7565b8152602001600081525090565b60405180604001604052806000815260200160008152509056fe6c656e677468206f66206d697364656d65616e6f725468726573686f6c64206d69736d61746368746865206d6573736167652073656e646572206d7573742062652076616c696461746f7253657420636f6e7472616374746865206d6573736167652073656e646572206d75737420626520676f7665726e616e636520636f6e74726163746c656e677468206f662066656c6f6e795468726573686f6c64206d69736d61746368746865206d697364656d65616e6f725468726573686f6c64206f7574206f662072616e6765746865206d6573736167652073656e646572206d7573742062652063726f737320636861696e20636f6e7472616374746865206d6573736167652073656e646572206d7573742062652074686520626c6f636b2070726f647563657274686520636f6e7472616374206e6f7420696e69742079657400000000000000a2646970667358221220a288eb0fb37b1f80ed70929944f149856fbe848d775f3fe9d6a2f46faa67754564736f6c63430006040033", + }, + }, + } + + eulerUpgrade[rialtoNet] = &Upgrade{ + UpgradeName: "euler", + Configs: []*UpgradeConfig{ + { + ContractAddr: ValidatorContract, + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/db8bb560ac5a1265c685b719c7e976dced162310", + Code: "6080604052600436106104055760003560e01c80638d19a41011610213578063c81b166211610123578063eb57e202116100ab578063f9a2bbc71161007a578063f9a2bbc714610d55578063fc3e590814610d6a578063fccc281314610d7f578063fd4ad81f14610d94578063fd6a687914610dd757610405565b8063eb57e20214610cd2578063eda5868c14610d05578063f340fa0114610d1a578063f92eb86b14610d4057610405565b8063daacdb66116100f2578063daacdb6614610c69578063dc927faf14610c7e578063e086c7b114610c93578063e1c7392a14610ca8578063e40716a114610cbd57610405565b8063c81b166214610c2a578063c8509d8114610939578063d68fb56a14610c3f578063d86222d514610c5457610405565b8063a78abc16116101a6578063ad3c9da611610175578063ad3c9da614610bb8578063b7ab4db514610beb578063b8cf4ef114610c00578063bf9f49951461065f578063c6d3394514610c1557610405565b8063a78abc1614610aae578063aaf5eb6814610ac3578063ab51bb9614610ad8578063ac43175114610aed57610405565b80639fe0f816116101e25780639fe0f81614610a5a578063a0dc275814610a6f578063a1a11bf514610a84578063a5422d5c14610a9957610405565b80638d19a410146109e85780639369d7de14610a1b57806396713da914610a305780639dc0926214610a4557610405565b80635192c82c1161031957806375d47a0a116102a157806381650b621161027057806381650b6214610924578063831d65d114610939578063853230aa146108e557806386249882146109be5780638b5ad0c9146109d357610405565b806375d47a0a146108d057806378dfed4a146108e55780637942fd05146108fa5780637a84ca2a1461090f57610405565b80635667515a116102e85780635667515a146108065780635d77156c1461081b5780636969a25c146108305780636e47b482146108a657806370fd5bad146108bb57610405565b80635192c82c1461077657806351e806721461078b57806355614fcc146107a0578063565c56b3146107d357610405565b80633365af3a1161039c57806343756e5c1161036b57806343756e5c1461068a57806345cf9daf146106bb578063493279b1146106d05780634bf6c882146106fc5780634df6e0c31461071157610405565b80633365af3a146105ed57806335409f7f146106175780633de0f0d81461064a5780633dffc3871461065f57610405565b8063152ad3b8116103d8578063152ad3b8146105705780631ff1806914610599578063219f22d5146105ae578063321d398a146105c357610405565b806304c4fec61461040a57806307a56847146104215780630bee7a67146104485780631182b87514610476575b600080fd5b34801561041657600080fd5b5061041f610dec565b005b34801561042d57600080fd5b50610436610e7f565b60408051918252519081900360200190f35b34801561045457600080fd5b5061045d610e85565b6040805163ffffffff9092168252519081900360200190f35b34801561048257600080fd5b506104fb6004803603604081101561049957600080fd5b60ff8235169190810190604081016020820135600160201b8111156104bd57600080fd5b8201836020820111156104cf57600080fd5b803590602001918460018302840111600160201b831117156104f057600080fd5b509092509050610e8a565b6040805160208082528351818301528351919283929083019185019080838360005b8381101561053557818101518382015260200161051d565b50505050905090810190601f1680156105625780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34801561057c57600080fd5b5061058561111c565b604080519115158252519081900360200190f35b3480156105a557600080fd5b50610436611125565b3480156105ba57600080fd5b5061045d61112b565b3480156105cf57600080fd5b50610585600480360360208110156105e657600080fd5b5035611130565b3480156105f957600080fd5b506105856004803603602081101561061057600080fd5b50356111ff565b34801561062357600080fd5b5061041f6004803603602081101561063a57600080fd5b50356001600160a01b03166112b0565b34801561065657600080fd5b5061043661140f565b34801561066b57600080fd5b50610674611415565b6040805160ff9092168252519081900360200190f35b34801561069657600080fd5b5061069f61141a565b604080516001600160a01b039092168252519081900360200190f35b3480156106c757600080fd5b50610436611420565b3480156106dc57600080fd5b506106e5611426565b6040805161ffff9092168252519081900360200190f35b34801561070857600080fd5b5061067461142c565b34801561071d57600080fd5b50610726611431565b60408051602080825283518183015283519192839290830191858101910280838360005b8381101561076257818101518382015260200161074a565b505050509050019250505060405180910390f35b34801561078257600080fd5b5061043661152c565b34801561079757600080fd5b5061069f611532565b3480156107ac57600080fd5b50610585600480360360208110156107c357600080fd5b50356001600160a01b0316611538565b3480156107df57600080fd5b50610436600480360360208110156107f657600080fd5b50356001600160a01b031661156d565b34801561081257600080fd5b506106746115be565b34801561082757600080fd5b5061045d6115c3565b34801561083c57600080fd5b5061085a6004803603602081101561085357600080fd5b50356115c8565b604080516001600160a01b039788168152958716602087015293909516848401526001600160401b0390911660608401521515608083015260a082019290925290519081900360c00190f35b3480156108b257600080fd5b5061069f61162c565b3480156108c757600080fd5b50610674611632565b3480156108dc57600080fd5b5061069f611637565b3480156108f157600080fd5b5061043661163d565b34801561090657600080fd5b50610674611643565b34801561091b57600080fd5b50610436611648565b34801561093057600080fd5b5061045d61164e565b34801561094557600080fd5b5061041f6004803603604081101561095c57600080fd5b60ff8235169190810190604081016020820135600160201b81111561098057600080fd5b82018360208201111561099257600080fd5b803590602001918460018302840111600160201b831117156109b357600080fd5b509092509050611653565b3480156109ca57600080fd5b50610436611706565b3480156109df57600080fd5b5061043661170c565b3480156109f457600080fd5b5061043660048036036020811015610a0b57600080fd5b50356001600160a01b0316611712565b348015610a2757600080fd5b5061041f611787565b348015610a3c57600080fd5b506106746118a1565b348015610a5157600080fd5b5061069f6118a6565b348015610a6657600080fd5b506104366118ac565b348015610a7b57600080fd5b506104366118b1565b348015610a9057600080fd5b5061069f6118b6565b348015610aa557600080fd5b506104fb6118bc565b348015610aba57600080fd5b506105856118db565b348015610acf57600080fd5b506104366118e4565b348015610ae457600080fd5b5061045d6115be565b348015610af957600080fd5b5061041f60048036036040811015610b1057600080fd5b810190602081018135600160201b811115610b2a57600080fd5b820183602082011115610b3c57600080fd5b803590602001918460018302840111600160201b83111715610b5d57600080fd5b919390929091602081019035600160201b811115610b7a57600080fd5b820183602082011115610b8c57600080fd5b803590602001918460018302840111600160201b83111715610bad57600080fd5b5090925090506118ed565b348015610bc457600080fd5b5061043660048036036020811015610bdb57600080fd5b50356001600160a01b031661232a565b348015610bf757600080fd5b5061072661233c565b348015610c0c57600080fd5b5061043661241f565b348015610c2157600080fd5b50610436611632565b348015610c3657600080fd5b5061069f612424565b348015610c4b57600080fd5b5061043661242a565b348015610c6057600080fd5b50610436612469565b348015610c7557600080fd5b50610436612475565b348015610c8a57600080fd5b5061069f61247b565b348015610c9f57600080fd5b50610436612481565b348015610cb457600080fd5b5061041f612486565b348015610cc957600080fd5b50610436612689565b348015610cde57600080fd5b5061041f60048036036020811015610cf557600080fd5b50356001600160a01b031661268f565b348015610d1157600080fd5b5061045d61279d565b61041f60048036036020811015610d3057600080fd5b50356001600160a01b03166127a2565b348015610d4c57600080fd5b50610436612aa7565b348015610d6157600080fd5b5061069f612aad565b348015610d7657600080fd5b506106746118ac565b348015610d8b57600080fd5b5061069f612ab3565b348015610da057600080fd5b50610dbe60048036036020811015610db757600080fd5b5035612ab9565b6040805192835290151560208301528051918290030190f35b348015610de357600080fd5b5061069f612ae7565b6000610df733611712565b9050600b8181548110610e0657fe5b600091825260209091206001601690920201015460ff16610e63576040805162461bcd60e51b81526020600482015260126024820152716e6f7420696e206d61696e74656e616e636560701b604482015290519081900360640190fd5b6000610e6d61242a565b9050610e7a338383612aed565b505050565b60095481565b606481565b60005460609060ff16610ee0576040805162461bcd60e51b81526020600482015260196024820152781d1a194818dbdb9d1c9858dd081b9bdd081a5b9a5d081e595d603a1b604482015290519081900360640190fd5b3361200014610f205760405162461bcd60e51b815260040180806020018281038252602f815260200180616048602f913960400191505060405180910390fd5b600b54610fc557610f2f6159e1565b60015460005b81811015610fc157600b80546001810182556000919091528351600080516020615fc1833981519152601690920291820190815560208501516000805160206160d18339815191528301805460ff191691151591909117905560408501518592610fb391600080516020616007833981519152909101906014615a05565b505050806001019050610f35565b5050505b610fcd615a3f565b600061100e85858080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250612ce192505050565b915091508061102a576110216064612e3a565b92505050611115565b815160009060ff1661104a576110438360200151612e9b565b90506110e1565b825160ff16600114156110dd578260200151516001146110b7577f70e72399380dcfb0338abc03dc8d47f9f470ada8e769c9a78d644ea97385ecb2604051808060200182810382526025815260200180615f076025913960400191505060405180910390a15060676110d8565b61104383602001516000815181106110cb57fe5b6020026020010151613cab565b6110e1565b5060655b63ffffffff811661110657505060408051600081526020810190915291506111159050565b61110f81612e3a565b93505050505b9392505050565b60075460ff1681565b60035481565b606881565b6001546000908210611144575060006111fa565b60006001600160a01b03166001838154811061115c57fe5b60009182526020909120600490910201546001600160a01b0316148061118c5750600854158061118c5750600a54155b8061119b575060085460095410155b806111ac57506111aa826111ff565b155b806111d557506000600b83815481106111c157fe5b906000526020600020906016020160000154115b806111e9575060016111e561233c565b5111155b156111f6575060006111fa565b5060015b919050565b6001546000908210611213575060006111fa565b600b548210611250576001828154811061122957fe5b9060005260206000209060040201600201601c9054906101000a900460ff161590506111fa565b6001828154811061125d57fe5b9060005260206000209060040201600201601c9054906101000a900460ff161580156112aa5750600b828154811061129157fe5b600091825260209091206001601690920201015460ff16155b92915050565b33611001146112f05760405162461bcd60e51b81526004018080602001828103825260298152602001806160f16029913960400191505060405180910390fd5b600b54611395576112ff6159e1565b60015460005b8181101561139157600b80546001810182556000919091528351600080516020615fc1833981519152601690920291820190815560208501516000805160206160d18339815191528301805460ff19169115159190911790556040850151859261138391600080516020616007833981519152909101906014615a05565b505050806001019050611305565b5050505b6001600160a01b038116600090815260046020526040902054806113b9575061140c565b6001810390506000600b82815481106113ce57fe5b600091825260209091206001601690920201015460ff1690506113f18383613e22565b80156113fa5750805b15610e7a576009805460001901905550505b50565b61271081565b600181565b61100181565b60085481565b6102ca81565b600881565b600e54600c546060919080611444575060155b606061144e61233c565b905081815111611462579250611529915050565b82828251031015611474578181510392505b82156114a85760c8430461148f82828686036000888861419e565b6114a682828686038787038889898951030161419e565b505b6060826040519080825280602002602001820160405280156114d4578160200160208202803683370190505b50905060005b83811015611522578281815181106114ee57fe5b602002602001015182828151811061150257fe5b6001600160a01b03909216602092830291909101909101526001016114da565b5093505050505b90565b60065481565b61200081565b6001600160a01b038116600090815260046020526040812054806115605760009150506111fa565b60001901611115816111ff565b6001600160a01b038116600090815260046020526040812054806115955760009150506111fa565b6001808203815481106115a457fe5b906000526020600020906004020160030154915050919050565b600081565b606781565b600181815481106115d557fe5b600091825260209091206004909102018054600182015460028301546003909301546001600160a01b0392831694509082169291821691600160a01b81046001600160401b031691600160e01b90910460ff169086565b61100581565b600281565b61100881565b6103e881565b600b81565b600c5481565b606681565b33612000146116935760405162461bcd60e51b815260040180806020018281038252602f815260200180616048602f913960400191505060405180910390fd5b7f41ce201247b6ceb957dcdb217d0b8acb50b9ea0e12af9af4f5e7f38902101605838383604051808460ff1660ff168152602001806020018281038252848482818152602001925080828437600083820152604051601f909101601f1916909201829003965090945050505050a1505050565b60025481565b600a5481565b6001600160a01b0381166000908152600460205260408120548061177d576040805162461bcd60e51b815260206004820152601760248201527f6f6e6c792063757272656e742076616c696461746f7273000000000000000000604482015290519081900360640190fd5b6000190192915050565b600b5461182c576117966159e1565b60015460005b8181101561182857600b80546001810182556000919091528351600080516020615fc1833981519152601690920291820190815560208501516000805160206160d18339815191528301805460ff19169115159190911790556040850151859261181a91600080516020616007833981519152909101906014615a05565b50505080600101905061179c565b5050505b6008546118395760036008555b600a54611846576002600a555b600061185133611712565b905061185c81611130565b6118975760405162461bcd60e51b8152600401808060200182810382526023815260200180615ee46023913960400191505060405180910390fd5b61140c338261428d565b600981565b61100781565b600381565b60c881565b61100681565b604051806101e001604052806101ab8152602001615b3a6101ab913981565b60005460ff1681565b6402540be40081565b60005460ff16611940576040805162461bcd60e51b81526020600482015260196024820152781d1a194818dbdb9d1c9858dd081b9bdd081a5b9a5d081e595d603a1b604482015290519081900360640190fd5b33611007146119805760405162461bcd60e51b815260040180806020018281038252602e815260200180615f4d602e913960400191505060405180910390fd5b6119ea84848080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525050604080518082019091526013815272065787069726554696d655365636f6e6447617606c1b602082015291506143259050565b15611ac55760208114611a2e5760405162461bcd60e51b8152600401808060200182810382526026815260200180615fe16026913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091611a6c9185858083850183828082843760009201919091525061440c92505050565b905060648110158015611a825750620186a08111155b611abd5760405162461bcd60e51b8152600401808060200182810382526027815260200180615e746027913960400191505060405180910390fd5b600255612298565b611b2584848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250506040805180820190915260098152686275726e526174696f60b81b602082015291506143259050565b15611c155760208114611b7f576040805162461bcd60e51b815260206004820152601c60248201527f6c656e677468206f66206275726e526174696f206d69736d6174636800000000604482015290519081900360640190fd5b604080516020601f8401819004810282018101909252828152600091611bbd9185858083850183828082843760009201919091525061440c92505050565b9050612710811115611c005760405162461bcd60e51b815260040180806020018281038252602b815260200180615e1e602b913960400191505060405180910390fd5b6006556007805460ff19166001179055612298565b611c7f84848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250506040805180820190915260138152726d61784e756d4f664d61696e7461696e696e6760681b602082015291506143259050565b15611d575760208114611cc35760405162461bcd60e51b8152600401808060200182810382526026815260200180615ce56026913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091611d019185858083850183828082843760009201919091525061440c92505050565b600c5490915080611d10575060155b808210611d4e5760405162461bcd60e51b8152600401808060200182810382526038815260200180615d5c6038913960400191505060405180910390fd5b50600855612298565b611dc084848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250506040805180820190915260128152716d61696e7461696e536c6173685363616c6560701b602082015291506143259050565b15611e8b5760208114611e045760405162461bcd60e51b8152600401808060200182810382526025815260200180615d376025913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091611e429185858083850183828082843760009201919091525061440c92505050565b905060008111611e835760405162461bcd60e51b815260040180806020018281038252602d8152602001806160a4602d913960400191505060405180910390fd5b600a55612298565b611eff84848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152601981527f6d61784e756d4f66576f726b696e6743616e6469646174657300000000000000602082015291506143259050565b15611fcc5760208114611f435760405162461bcd60e51b815260040180806020018281038252602c815260200180615d0b602c913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091611f819185858083850183828082843760009201919091525061440c92505050565b9050600d54811115611fc45760405162461bcd60e51b8152600401808060200182810382526049815260200180615e9b6049913960600191505060405180910390fd5b600e55612298565b61203584848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250506040805180820190915260128152716d61784e756d4f6643616e6469646174657360701b602082015291506143259050565b156120d657602081146120795760405162461bcd60e51b8152600401808060200182810382526025815260200180615f7b6025913960400191505060405180910390fd5b604080516020601f84018190048102820181019092528281526000916120b79185858083850183828082843760009201919091525061440c92505050565b600d819055600e549091508110156120d057600d54600e555b50612298565b61213a84848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152600d81526c6e756d4f66436162696e65747360981b602082015291506143259050565b1561225b5760208114612194576040805162461bcd60e51b815260206004820181905260248201527f6c656e677468206f66206e756d4f66436162696e657473206d69736d61746368604482015290519081900360640190fd5b604080516020601f84018190048102820181019092528281526000916121d29185858083850183828082843760009201919091525061440c92505050565b9050600081116122135760405162461bcd60e51b8152600401808060200182810382526028815260200180615d946028913960400191505060405180910390fd5b60298111156122535760405162461bcd60e51b8152600401808060200182810382526039815260200180615dbc6039913960400191505060405180910390fd5b600c55612298565b6040805162461bcd60e51b815260206004820152600d60248201526c756e6b6e6f776e20706172616d60981b604482015290519081900360640190fd5b7f6cdb0ac70ab7f2e2d035cca5be60d89906f2dede7648ddbd7402189c1eeed17a848484846040518080602001806020018381038352878782818152602001925080828437600083820152601f01601f191690910184810383528581526020019050858580828437600083820152604051601f909101601f19169092018290039850909650505050505050a150505050565b60046020526000908152604090205481565b6001546060906000805b8281101561236b57612357816111ff565b15612363576001909101905b600101612346565b50606081604051908082528060200260200182016040528015612398578160200160208202803683370190505b5090506000915060005b83811015612417576123b3816111ff565b1561240f57600181815481106123c557fe5b600091825260209091206004909102015482516001600160a01b03909116908390859081106123f057fe5b6001600160a01b03909216602092830291909101909101526001909201915b6001016123a2565b509250505090565b601581565b61100281565b600061243461233c565b519050600080600c541161244957601561244d565b600c545b90508082111561245b578091505b8161246557600191505b5090565b67016345785d8a000081565b60055481565b61100381565b602981565b60005460ff16156124de576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e747261637420616c726561647920696e697400000000000000604482015290519081900360640190fd5b6124e6615a3f565b600061250c604051806101e001604052806101ab8152602001615b3a6101ab9139612ce1565b915091508061254c5760405162461bcd60e51b81526004018080602001828103825260218152602001806160276021913960400191505060405180910390fd5b60005b8260200151518110156126715760018360200151828151811061256e57fe5b60209081029190910181015182546001818101855560009485528385208351600493840290910180546001600160a01b039283166001600160a01b03199182161782558587015182850180549185169183169190911790556040860151600283018054606089015160808a01511515600160e01b0260ff60e01b196001600160401b03909216600160a01b0267ffffffffffffffff60a01b199590981692909516919091179290921694909417161790915560a09093015160039093019290925591860151805191850193918590811061264457fe5b602090810291909101810151516001600160a01b031682528101919091526040016000205560010161254f565b50506103e8600255506000805460ff19166001179055565b600d5481565b33611001146126cf5760405162461bcd60e51b81526004018080602001828103825260298152602001806160f16029913960400191505060405180910390fd5b600b54612774576126de6159e1565b60015460005b8181101561277057600b80546001810182556000919091528351600080516020615fc1833981519152601690920291820190815560208501516000805160206160d18339815191528301805460ff19169115159190911790556040850151859261276291600080516020616007833981519152909101906014615a05565b5050508060010190506126e4565b5050505b600061277f82614411565b905061278a81611130565b1561279957612799828261428d565b5050565b606581565b3341146127e05760405162461bcd60e51b815260040180806020018281038252602d815260200180616077602d913960400191505060405180910390fd5b60005460ff16612833576040805162461bcd60e51b81526020600482015260196024820152781d1a194818dbdb9d1c9858dd081b9bdd081a5b9a5d081e595d603a1b604482015290519081900360640190fd5b60003411612880576040805162461bcd60e51b81526020600482015260156024820152746465706f7369742076616c7565206973207a65726f60581b604482015290519081900360640190fd5b6001600160a01b0381166000908152600460205260409020546007543491906103e89060ff16156128b057506006545b6000831180156128c05750600081115b156129695760006128e96127106128dd868563ffffffff6145b516565b9063ffffffff61460e16565b905080156129675760405161dead9082156108fc029083906000818181858888f19350505050158015612920573d6000803e3d6000fd5b506040805182815290517f627059660ea01c4733a328effb2294d2f86905bf806da763a89cee254de8bee59181900360200190a1612964848263ffffffff61465016565b93505b505b8115612a6157600060018084038154811061298057fe5b9060005260206000209060040201905080600201601c9054906101000a900460ff16156129eb576040805185815290516001600160a01b038716917ff177e5d6c5764d79c32883ed824111d9b13f5668cf6ab1cc12dd36791dd955b4919081900360200190a2612a5b565b6003546129fe908563ffffffff61469216565b6003908155810154612a16908563ffffffff61469216565b60038201556040805185815290516001600160a01b038716917f93a090ecc682c002995fad3c85b30c5651d7fd29b0be5da9d784a3302aedc055919081900360200190a25b50612aa1565b6040805184815290516001600160a01b038616917ff177e5d6c5764d79c32883ed824111d9b13f5668cf6ab1cc12dd36791dd955b4919081900360200190a25b50505050565b600e5481565b61100081565b61dead81565b600b8181548110612ac657fe5b60009182526020909120601690910201805460019091015490915060ff1682565b61100481565b6000600a5460001480612afe575081155b80612b095750600954155b15612b1657506000611115565b600960008154809291906001900391905055506000612b63600a546128dd856128dd600b8981548110612b4557fe5b6000918252602090912060169091020154439063ffffffff61465016565b90506000600b8581548110612b7457fe5b906000526020600020906016020160010160006101000a81548160ff0219169083151502179055506000806110016001600160a01b0316638256ace66040518163ffffffff1660e01b8152600401604080518083038186803b158015612bd957600080fd5b505afa158015612bed573d6000803e3d6000fd5b505050506040513d6040811015612c0357600080fd5b508051602090910151600095509092509050808310612c9157612c268787613e22565b50604080516305bfb49960e41b81526001600160a01b0389166004820152905161100191635bfb499091602480830192600092919082900301818387803b158015612c7057600080fd5b505af1158015612c84573d6000803e3d6000fd5b5050505060019350612ca3565b818310612ca357612ca187614411565b505b6040516001600160a01b038816907fb9d38178dc641ff1817967a63c9078cbcd955a9f1fcd75e0e3636de615d44d3b90600090a25050509392505050565b612ce9615a3f565b6000612cf3615a3f565b612cfb615a57565b612d0c612d07866146ec565b614711565b90506000805b612d1b8361475b565b15612e2c5780612d4057612d36612d318461477c565b6147ca565b60ff168452612e24565b8060011415612e1f576060612d5c612d578561477c565b614881565b90508051604051908082528060200260200182016040528015612d9957816020015b612d86615a77565b815260200190600190039081612d7e5790505b50602086015260005b8151811015612e1457612db3615a77565b6000612dd1848481518110612dc457fe5b6020026020010151614952565b9150915080612dee57876000995099505050505050505050612e35565b8188602001518481518110612dff57fe5b60209081029190910101525050600101612da2565b506001925050612e24565b612e2c565b600101612d12565b50919350909150505b915091565b604080516001808252818301909252606091829190816020015b6060815260200190600190039081612e54579050509050612e7a8363ffffffff16614a2f565b81600081518110612e8757fe5b602002602001018190525061111581614a42565b6000806060612ea984614acc565b9150915081612f56577f70e72399380dcfb0338abc03dc8d47f9f470ada8e769c9a78d644ea97385ecb2816040518080602001828103825283818151815260200191508051906020019080838360005b83811015612f11578181015183820152602001612ef9565b50505050905090810190601f168015612f3e5780820380516001836020036101000a031916815260200191505b509250505060405180910390a16066925050506111fa565b50506060612f6383614bae565b6001549091506000908190815b81811015612fe65767016345785d8a000060018281548110612f8e57fe5b90600052602060002090600402016003015410612fb057600190930192612fde565b600060018281548110612fbf57fe5b9060005260206000209060040201600301541115612fde576001909201915b600101612f70565b50606083604051908082528060200260200182016040528015613013578160200160208202803683370190505b509050606084604051908082528060200260200182016040528015613042578160200160208202803683370190505b509050606085604051908082528060200260200182016040528015613071578160200160208202803683370190505b5090506060866040519080825280602002602001820160405280156130a0578160200160208202803683370190505b50905060006060876040519080825280602002602001820160405280156130d1578160200160208202803683370190505b509050606088604051908082528060200260200182016040528015613100578160200160208202803683370190505b509050600099506000985060006110046001600160a01b031663149d14d96040518163ffffffff1660e01b815260040160206040518083038186803b15801561314857600080fd5b505afa15801561315c573d6000803e3d6000fd5b505050506040513d602081101561317257600080fd5b5051905067016345785d8a00008111156131e6577f70e72399380dcfb0338abc03dc8d47f9f470ada8e769c9a78d644ea97385ecb2604051808060200182810382526021815260200180615fa06021913960400191505060405180910390a160689c505050505050505050505050506111fa565b60005b898110156134575767016345785d8a00006001828154811061320757fe5b9060005260206000209060040201600301541061338d576001818154811061322b57fe5b906000526020600020906004020160020160009054906101000a90046001600160a01b0316898d8151811061325c57fe5b60200260200101906001600160a01b031690816001600160a01b03168152505060006402540be4006001838154811061329157fe5b906000526020600020906004020160030154816132aa57fe5b06600183815481106132b857fe5b9060005260206000209060040201600301540390506132e0838261465090919063ffffffff16565b898e815181106132ec57fe5b6020026020010181815250506001828154811061330557fe5b906000526020600020906004020160020160009054906101000a90046001600160a01b0316878e8151811061333657fe5b60200260200101906001600160a01b031690816001600160a01b03168152505081888e8151811061336357fe5b602090810291909101015261337e868263ffffffff61469216565b6001909d019c955061344f9050565b60006001828154811061339c57fe5b906000526020600020906004020160030154111561344f57600181815481106133c157fe5b906000526020600020906004020160010160009054906101000a90046001600160a01b0316848c815181106133f257fe5b60200260200101906001600160a01b031690816001600160a01b0316815250506001818154811061341f57fe5b906000526020600020906004020160030154838c8151811061343d57fe5b60209081029190910101526001909a01995b6001016131e9565b5060008415613895576110046001600160a01b0316636e056520868b8b8a60025442016040518663ffffffff1660e01b815260040180806020018060200180602001856001600160401b03166001600160401b03168152602001848103845288818151815260200191508051906020019060200280838360005b838110156134e95781810151838201526020016134d1565b50505050905001848103835287818151815260200191508051906020019060200280838360005b83811015613528578181015183820152602001613510565b50505050905001848103825286818151815260200191508051906020019060200280838360005b8381101561356757818101518382015260200161354f565b505050509050019750505050505050506020604051808303818588803b15801561359057600080fd5b505af1935050505080156135b657506040513d60208110156135b157600080fd5b505160015b6137f1576040516000815260443d10156135d25750600061366d565b60046000803e60005160e01c6308c379a081146135f357600091505061366d565b60043d036004833e81513d60248201116001600160401b038211171561361e5760009250505061366d565b80830180516001600160401b0381111561363f57600094505050505061366d565b8060208301013d860181111561365d5760009550505050505061366d565b601f01601f191660405250925050505b80613678575061371c565b60019150857fa7cdeed7d0db45e3219a6e5d60838824c16f1d39991fcfe3f963029c844bf280826040518080602001828103825283818151815260200191508051906020019080838360005b838110156136dc5781810151838201526020016136c4565b50505050905090810190601f1680156137095780820380516001836020036101000a031916815260200191505b509250505060405180910390a2506137ec565b3d808015613746576040519150601f19603f3d011682016040523d82523d6000602084013e61374b565b606091505b5060019150857fbfa884552dd8921b6ce90bfe906952ae5b3b29be0cc1a951d4f62697635a3a45826040518080602001828103825283818151815260200191508051906020019080838360005b838110156137b0578181015183820152602001613798565b50505050905090810190601f1680156137dd5780820380516001836020036101000a031916815260200191505b509250505060405180910390a2505b613895565b801561382f576040805187815290517fa217d08e65f80c73121cd9db834d81652d544bfbf452f6d04922b16c90a37b709181900360200190a1613893565b604080516020808252601b908201527f6261746368207472616e736665722072657475726e2066616c7365000000000081830152905187917fa7cdeed7d0db45e3219a6e5d60838824c16f1d39991fcfe3f963029c844bf280919081900360600190a25b505b8015613a4b5760005b8751811015613a495760008882815181106138b557fe5b602002602001015190506000600182815481106138ce57fe5b60009182526020909120600160049092020181015481546001600160a01b03909116916108fc91859081106138ff57fe5b9060005260206000209060040201600301549081150290604051600060405180830381858888f19350505050905080156139bb576001828154811061394057fe5b60009182526020909120600160049092020181015481546001600160a01b03909116917f6c61d60f69a7beb3e1c80db7f39f37b208537cbb19da3174511b477812b2fc7d918590811061398f57fe5b9060005260206000209060040201600301546040518082815260200191505060405180910390a2613a3f565b600182815481106139c857fe5b60009182526020909120600160049092020181015481546001600160a01b03909116917f25d0ce7d2f0cec669a8c17efe49d195c13455bb8872b65fa610ac7f53fe4ca7d9185908110613a1757fe5b9060005260206000209060040201600301546040518082815260200191505060405180910390a25b505060010161389e565b505b835115613b955760005b8451811015613b93576000858281518110613a6c57fe5b60200260200101516001600160a01b03166108fc868481518110613a8c57fe5b60200260200101519081150290604051600060405180830381858888f1935050505090508015613b2257858281518110613ac257fe5b60200260200101516001600160a01b03167f6c61d60f69a7beb3e1c80db7f39f37b208537cbb19da3174511b477812b2fc7d868481518110613b0057fe5b60200260200101516040518082815260200191505060405180910390a2613b8a565b858281518110613b2e57fe5b60200260200101516001600160a01b03167f25d0ce7d2f0cec669a8c17efe49d195c13455bb8872b65fa610ac7f53fe4ca7d868481518110613b6c57fe5b60200260200101516040518082815260200191505060405180910390a25b50600101613a55565b505b4715613bfe576040805147815290517f6ecc855f9440a9282c90913bbc91619fd44f5ec0b462af28d127b116f130aa4d9181900360200190a1604051611002904780156108fc02916000818181858888f19350505050158015613bfc573d6000803e3d6000fd5b505b600060038190556005558c5115613c1857613c188d614d7e565b6110016001600160a01b031663fc4333cd6040518163ffffffff1660e01b8152600401600060405180830381600087803b158015613c5557600080fd5b505af1158015613c69573d6000803e3d6000fd5b50506040517fedd8d7296956dd970ab4de3f2fc03be2b0ffc615d20cd4c72c6e44f928630ebf925060009150a15060009e9d5050505050505050505050505050565b80516001600160a01b0316600090815260046020526040812054801580613cfc5750600180820381548110613cdc57fe5b9060005260206000209060040201600201601c9054906101000a900460ff165b15613d425782516040516001600160a01b03909116907fe209c46bebf57cf265d5d9009a00870e256d9150f3ed5281ab9d9eb3cec6e4be90600090a260009150506111fa565b600154600554600019820111801590613d985784516040516001600160a01b03909116907fe209c46bebf57cf265d5d9009a00870e256d9150f3ed5281ab9d9eb3cec6e4be90600090a2600093505050506111fa565b600580546001908101909155805481906000198601908110613db657fe5b6000918252602082206002600490920201018054921515600160e01b0260ff60e01b199093169290921790915585516040516001600160a01b03909116917ff226e7d8f547ff903d9d419cf5f54e0d7d07efa9584135a53a057c5f1f27f49a91a2506000949350505050565b60008060018381548110613e3257fe5b90600052602060002090600402016003015490506000600180805490500390506001613e5c61233c565b5111613e9157600060018581548110613e7157fe5b9060005260206000209060040201600301819055506000925050506112aa565b6040805183815290516001600160a01b038716917f3b6f9ef90462b512a1293ecec018670bf7b7f1876fb727590a8a6d7643130a70919081900360200190a26001600160a01b038516600090815260046020526040812055835b6001546000190181101561408e5760018160010181548110613f0957fe5b906000526020600020906004020160018281548110613f2457fe5b60009182526020909120825460049092020180546001600160a01b03199081166001600160a01b0393841617825560018085015481840180548416918616919091179055600280860180549185018054909416919095161780835584546001600160401b03600160a01b91829004160267ffffffffffffffff60a01b1990911617808355935460ff600160e01b918290041615150260ff60e01b19909416939093179055600392830154920191909155600b805490918301908110613fe557fe5b9060005260206000209060160201600b828154811061400057fe5b600091825260209091208254601690920201908155600180830154908201805460ff191660ff909216151591909117905561404360028083019084016014615aac565b5090505080600101600460006001848154811061405c57fe5b600091825260208083206004909202909101546001600160a01b03168352820192909252604001902055600101613eeb565b50600180548061409a57fe5b60008281526020812060046000199093019283020180546001600160a01b0319908116825560018201805490911690556002810180546001600160e81b0319169055600301559055600b8054806140ed57fe5b60008281526020812060166000199093019283020181815560018101805460ff191690559061411f6002830182615ad7565b50509055600081838161412e57fe5b04905080156141925760015460005b8181101561418f57826001828154811061415357fe5b906000526020600020906004020160030154016001828154811061417357fe5b600091825260209091206003600490920201015560010161413d565b50505b50600195945050505050565b60005b828110156142845760408051602080820189905287840182840152825180830384018152606090920190925280519101206000908390816141de57fe5b0690508085018287011461427b57600088838801815181106141fc57fe5b60200260200101519050888287018151811061421457fe5b6020026020010151898489018151811061422a57fe5b60200260200101906001600160a01b031690816001600160a01b03168152505080898388018151811061425957fe5b60200260200101906001600160a01b031690816001600160a01b031681525050505b506001016141a1565b50505050505050565b600980546001908101909155600b8054839081106142a757fe5b906000526020600020906016020160010160006101000a81548160ff02191690831515021790555043600b82815481106142dd57fe5b600091825260208220601690910201919091556040516001600160a01b038416917ff62981a567ec3cec866c6fa93c55bcdf841d6292d18b8d522ececa769375d82d91a25050565b6000816040516020018082805190602001908083835b6020831061435a5780518252601f19909201916020918201910161433b565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120836040516020018082805190602001908083835b602083106143c85780518252601f1990920191602091820191016143a9565b6001836020036101000a0380198251168184511680821785525050505050509050019150506040516020818303038152906040528051906020012014905092915050565b015190565b6001600160a01b0381166000908152600460205260408120548061443a575060001990506111fa565b60018103905060006001828154811061444f57fe5b906000526020600020906004020160030154905060006001838154811061447257fe5b906000526020600020906004020160030181905550600060018080549050039050846001600160a01b03167f8cd4e147d8af98a9e3b6724021b8bf6aed2e5dac71c38f2dce8161b82585b25d836040518082815260200191505060405180910390a2806144e4578293505050506111fa565b60008183816144ef57fe5b04905080156145ab5760005b8481101561454d57816001828154811061451157fe5b906000526020600020906004020160030154016001828154811061453157fe5b60009182526020909120600360049092020101556001016144fb565b50600180549085015b818110156145a857826001828154811061456c57fe5b906000526020600020906004020160030154016001828154811061458c57fe5b6000918252602090912060036004909202010155600101614556565b50505b5091949350505050565b6000826145c4575060006112aa565b828202828482816145d157fe5b04146111155760405162461bcd60e51b8152600401808060200182810382526021815260200180615f2c6021913960400191505060405180910390fd5b600061111583836040518060400160405280601a81526020017f536166654d6174683a206469766973696f6e206279207a65726f000000000000815250615371565b600061111583836040518060400160405280601e81526020017f536166654d6174683a207375627472616374696f6e206f766572666c6f770000815250615413565b600082820183811015611115576040805162461bcd60e51b815260206004820152601b60248201527f536166654d6174683a206164646974696f6e206f766572666c6f770000000000604482015290519081900360640190fd5b6146f4615ae6565b506040805180820190915281518152602082810190820152919050565b614719615a57565b6147228261546d565b61472b57600080fd5b600061473a83602001516154a7565b60208085015160408051808201909152868152920190820152915050919050565b6000614765615ae6565b505080518051602091820151919092015191011190565b614784615ae6565b61478d8261475b565b61479657600080fd5b602082015160006147a68261550a565b80830160209586015260408051808201909152908152938401919091525090919050565b8051600090158015906147df57508151602110155b6147e857600080fd5b60006147f783602001516154a7565b90508083600001511015614852576040805162461bcd60e51b815260206004820152601a60248201527f6c656e677468206973206c657373207468616e206f6666736574000000000000604482015290519081900360640190fd5b82516020808501518301805192849003929183101561487857826020036101000a820491505b50949350505050565b606061488c8261546d565b61489557600080fd5b60006148a08361563d565b90506060816040519080825280602002602001820160405280156148de57816020015b6148cb615ae6565b8152602001906001900390816148c35790505b50905060006148f085602001516154a7565b60208601510190506000805b848110156149475761490d8361550a565b915060405180604001604052808381526020018481525084828151811061493057fe5b6020908102919091010152918101916001016148fc565b509195945050505050565b61495a615a77565b6000614964615a77565b61496c615a57565b61497585614711565b90506000805b6149848361475b565b15612e2c57806149af5761499f61499a8461477c565b615699565b6001600160a01b03168452614a27565b80600114156149d7576149c461499a8461477c565b6001600160a01b03166020850152614a27565b80600214156149ff576149ec61499a8461477c565b6001600160a01b03166040850152614a27565b8060031415612e1f57614a14612d318461477c565b6001600160401b03166060850152600191505b60010161497b565b60606112aa614a3d836156b3565b615799565b6060815160001415614a6357506040805160008152602081019091526111fa565b606082600081518110614a7257fe5b602002602001015190506000600190505b8351811015614ab357614aa982858381518110614a9c57fe5b60200260200101516157eb565b9150600101614a83565b50611115614ac6825160c060ff16615868565b826157eb565b60006060602983511115614afe576000604051806060016040528060298152602001615df56029913991509150612e35565b60005b8351811015614b945760005b81811015614b8b57848181518110614b2157fe5b6020026020010151600001516001600160a01b0316858381518110614b4257fe5b6020026020010151600001516001600160a01b03161415614b835760006040518060600160405280602b8152602001615e49602b9139935093505050612e35565b600101614b0d565b50600101614b01565b505060408051602081019091526000815260019150915091565b6060600080808080614bbe61242a565b6001549091505b8015614ccc57600181039250600b8381548110614bde57fe5b600091825260209091206001601690920201015460ff16614bfe57614cc3565b60018381548110614c0b57fe5b60009182526020909120600490910201546001600160a01b03169450614c32858484612aed565b9350831580614c45575060018851038610155b15614c4f57614cc3565b60005b8851811015614cc157856001600160a01b0316898281518110614c7157fe5b6020026020010151600001516001600160a01b03161415614cb9576001898281518110614c9a57fe5b6020908102919091010151901515608090910152600190960195614cc1565b600101614c52565b505b60001901614bc5565b5084875103604051908082528060200260200182016040528015614d0a57816020015b614cf7615a77565b815260200190600190039081614cef5790505b5095506000915060005b8751811015614d7357878181518110614d2957fe5b602002602001015160800151614d6b57878181518110614d4557fe5b6020026020010151878481518110614d5957fe5b60209081029190910101526001909201915b600101614d14565b505050505050919050565b600154815160005b82811015614e9b576001614d98615a77565b60018381548110614da557fe5b600091825260208083206040805160c08101825260049490940290910180546001600160a01b0390811685526001820154811693850193909352600281015492831691840191909152600160a01b82046001600160401b03166060840152600160e01b90910460ff16151560808301526003015460a082015291505b84811015614e6f57868181518110614e3557fe5b6020026020010151600001516001600160a01b031682600001516001600160a01b03161415614e675760009250614e6f565b600101614e21565b508115614e915780516001600160a01b03166000908152600460205260408120555b5050600101614d86565b5080821115614f4c57805b82811015614f4a576001805480614eb957fe5b60008281526020812060046000199093019283020180546001600160a01b0319908116825560018201805490911690556002810180546001600160e81b0319169055600301559055600b805480614f0c57fe5b60008281526020812060166000199093019283020181815560018101805460ff1916905590614f3e6002830182615ad7565b50509055600101614ea6565b505b6000818310614f5b5781614f5d565b825b905060005b818110156151575761500f858281518110614f7957fe5b602002602001015160018381548110614f8e57fe5b60009182526020918290206040805160c08101825260049390930290910180546001600160a01b0390811684526001820154811694840194909452600281015493841691830191909152600160a01b83046001600160401b03166060830152600160e01b90920460ff161515608082015260039091015460a0820152615960565b61512a57806001016004600087848151811061502757fe5b6020026020010151600001516001600160a01b03166001600160a01b031681526020019081526020016000208190555084818151811061506357fe5b60200260200101516001828154811061507857fe5b6000918252602091829020835160049092020180546001600160a01b039283166001600160a01b0319918216178255928401516001820180549184169185169190911790556040840151600282018054606087015160808801511515600160e01b0260ff60e01b196001600160401b03909216600160a01b0267ffffffffffffffff60a01b1995909716929097169190911792909216939093171692909217905560a09091015160039091015561514f565b60006001828154811061513957fe5b9060005260206000209060040201600301819055505b600101614f62565b50828211156152fc576151686159e1565b835b838110156152f957600186828151811061518057fe5b6020908102919091018101518254600181810185556000948552838520835160049093020180546001600160a01b039384166001600160a01b0319918216178255848601518284018054918616918316919091179055604080860151600284018054606089015160808a01511515600160e01b0260ff60e01b196001600160401b03909216600160a01b0267ffffffffffffffff60a01b1995909a1692909616919091179290921696909617169190911790935560a090930151600390930192909255600b8054928301815590935284516016909102600080516020615fc18339815191528101918255918501516000805160206160d18339815191528301805491151560ff19909216919091179055918401518492916152b591600080516020616007833981519152909101906014615a05565b50505080600101600460008884815181106152cc57fe5b602090810291909101810151516001600160a01b031682528101919091526040016000205560010161516a565b50505b6000600981905560015493505b8381101561536a576000600b828154811061532057fe5b60009182526020822060169190910201600101805460ff191692151592909217909155600b80548390811061535157fe5b6000918252602090912060169091020155600101615309565b5050505050565b600081836153fd5760405162461bcd60e51b81526004018080602001828103825283818151815260200191508051906020019080838360005b838110156153c25781810151838201526020016153aa565b50505050905090810190601f1680156153ef5780820380516001836020036101000a031916815260200191505b509250505060405180910390fd5b50600083858161540957fe5b0495945050505050565b600081848411156154655760405162461bcd60e51b81526020600482018181528351602484015283519092839260449091019190850190808383600083156153c25781810151838201526020016153aa565b505050900390565b805160009061547e575060006111fa565b6020820151805160001a9060c082101561549d576000925050506111fa565b5060019392505050565b8051600090811a60808110156154c15760009150506111fa565b60b88110806154dc575060c081108015906154dc575060f881105b156154eb5760019150506111fa565b60c08110156154ff5760b5190190506111fa565b60f5190190506111fa565b80516000908190811a60808110156155255760019150615636565b60b881101561553a57607e1981019150615636565b60c08110156155b457600060b78203600186019550806020036101000a8651049150600181018201935050808310156155ae576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b50615636565b60f88110156155c95760be1981019150615636565b600060f78203600186019550806020036101000a865104915060018101820193505080831015615634576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b505b5092915050565b805160009061564e575060006111fa565b6000809050600061566284602001516154a7565b602085015185519181019250015b80821015615690576156818261550a565b60019093019290910190615670565b50909392505050565b80516000906015146156aa57600080fd5b6112aa826147ca565b604080516020808252818301909252606091829190602082018180368337505050602081018490529050600067ffffffffffffffff1984166156f75750601861571b565b6fffffffffffffffffffffffffffffffff1984166157175750601061571b565b5060005b60208110156157515781818151811061573057fe5b01602001516001600160f81b0319161561574957615751565b60010161571b565b60008160200390506060816040519080825280601f01601f191660200182016040528015615786576020820181803683370190505b5080830196909652508452509192915050565b6060815160011480156157cb5750607f60f81b826000815181106157b957fe5b01602001516001600160f81b03191611155b156157d75750806111fa565b6112aa6157e98351608060ff16615868565b835b6060806040519050835180825260208201818101602087015b8183101561581c578051835260209283019201615804565b50855184518101855292509050808201602086015b81831015615849578051835260209283019201615831565b508651929092011591909101601f01601f191660405250905092915050565b60606801000000000000000083106158b8576040805162461bcd60e51b815260206004820152600e60248201526d696e70757420746f6f206c6f6e6760901b604482015290519081900360640190fd5b604080516001808252818301909252606091602082018180368337019050509050603784116159125782840160f81b816000815181106158f457fe5b60200101906001600160f81b031916908160001a90535090506112aa565b606061591d856156b3565b90508381510160370160f81b8260008151811061593657fe5b60200101906001600160f81b031916908160001a90535061595782826157eb565b95945050505050565b805182516000916001600160a01b03918216911614801561599a575081602001516001600160a01b031683602001516001600160a01b0316145b80156159bf575081604001516001600160a01b031683604001516001600160a01b0316145b80156111155750506060908101519101516001600160401b0390811691161490565b6040805160608101825260008082526020820152908101615a00615b00565b905290565b8260148101928215615a33579160200282015b82811115615a33578251825591602001919060010190615a18565b50612465929150615b1f565b60408051808201909152600081526060602082015290565b6040518060400160405280615a6a615ae6565b8152602001600081525090565b6040805160c081018252600080825260208201819052918101829052606081018290526080810182905260a081019190915290565b8260148101928215615a33579182015b82811115615a33578254825591600101919060010190615abc565b5061140c906014810190615b1f565b604051806040016040528060008152602001600081525090565b6040518061028001604052806014906020820280368337509192915050565b61152991905b808211156124655760008155600101615b2556fef901a880f901a4f844941284214b9b9c85549ab3d2b972df0deef66ac2c9946ddf42a51534fc98d0c0a3b42c963cace8441ddf946ddf42a51534fc98d0c0a3b42c963cace8441ddf8410000000f84494a2959d3f95eae5dc7d70144ce1b73b403b7eb6e0948081ef03f1d9e0bb4a5bf38f16285c879299f07f948081ef03f1d9e0bb4a5bf38f16285c879299f07f8410000000f8449435552c16704d214347f29fa77f77da6d75d7c75294dc4973e838e3949c77aced16ac2315dc2d7ab11194dc4973e838e3949c77aced16ac2315dc2d7ab1118410000000f84494980a75ecd1309ea12fa2ed87a8744fbfc9b863d594cc6ac05c95a99c1f7b5f88de0e3486c82293b27094cc6ac05c95a99c1f7b5f88de0e3486c82293b2708410000000f84494f474cf03cceff28abc65c9cbae594f725c80e12d94e61a183325a18a173319dd8e19c8d069459e217594e61a183325a18a173319dd8e19c8d069459e21758410000000f84494b71b214cb885500844365e95cd9942c7276e7fd894d22ca3ba2141d23adab65ce4940eb7665ea2b6a794d22ca3ba2141d23adab65ce4940eb7665ea2b6a784100000006c656e677468206f66206d61784e756d4f664d61696e7461696e696e67206d69736d617463686c656e677468206f66206d61784e756d4f66576f726b696e6743616e64696461746573206d69736d617463686c656e677468206f66206d61696e7461696e536c6173685363616c65206d69736d61746368746865206d61784e756d4f664d61696e7461696e696e67206d757374206265206c657373207468616e206e756d4f6643616e696e61746573746865206e756d4f66436162696e657473206d7573742062652067726561746572207468616e2030746865206e756d4f66436162696e657473206d757374206265206c657373207468616e204d41585f4e554d5f4f465f56414c494441544f5253746865206e756d626572206f662076616c696461746f72732065786365656420746865206c696d6974746865206275726e526174696f206d757374206265206e6f2067726561746572207468616e2031303030306475706c696361746520636f6e73656e7375732061646472657373206f662076616c696461746f725365747468652065787069726554696d655365636f6e64476170206973206f7574206f662072616e6765746865206d61784e756d4f66576f726b696e6743616e64696461746573206d757374206265206e6f742067726561746572207468616e206d61784e756d4f6643616e6469646174657363616e206e6f7420656e7465722054656d706f72617279204d61696e74656e616e63656c656e677468206f66206a61696c2076616c696461746f7273206d757374206265206f6e65536166654d6174683a206d756c7469706c69636174696f6e206f766572666c6f77746865206d6573736167652073656e646572206d75737420626520676f7665726e616e636520636f6e74726163746c656e677468206f66206d61784e756d4f6643616e64696461746573206d69736d61746368666565206973206c6172676572207468616e2044555354595f494e434f4d494e470175b7a638427703f0dbe7bb9bbf987a2551717b34e79f33b5b1008d1fa01db96c656e677468206f662065787069726554696d655365636f6e64476170206d69736d617463680175b7a638427703f0dbe7bb9bbf987a2551717b34e79f33b5b1008d1fa01dbb6661696c656420746f20706172736520696e69742076616c696461746f72536574746865206d6573736167652073656e646572206d7573742062652063726f737320636861696e20636f6e7472616374746865206d6573736167652073656e646572206d7573742062652074686520626c6f636b2070726f6475636572746865206d61696e7461696e536c6173685363616c65206d7573742062652067726561746572207468616e20300175b7a638427703f0dbe7bb9bbf987a2551717b34e79f33b5b1008d1fa01dba746865206d6573736167652073656e646572206d75737420626520736c61736820636f6e7472616374a26469706673582212207d06d87a731c4f784da075c7514dda45d8e95322210e57cff56c67307efa97d164736f6c63430006040033", + }, + { + ContractAddr: SlashContract, + CommitUrl: "https://github.com/bnb-chain/bsc-genesis-contract/commit/db8bb560ac5a1265c685b719c7e976dced162310", + Code: "608060405234801561001057600080fd5b506004361061023d5760003560e01c80638256ace61161013b578063c80d4b8f116100b8578063e1c7392a1161007c578063e1c7392a1461071d578063f9a2bbc714610725578063fc3e59081461072d578063fc4333cd14610735578063fd6a68791461073d5761023d565b8063c80d4b8f14610667578063c81b16621461066f578063c8509d8114610677578063c96be4cb146106ef578063dc927faf146107155761023d565b8063a1a11bf5116100ff578063a1a11bf514610575578063a78abc161461057d578063ab51bb9614610599578063ac0af629146105a1578063ac431751146105a95761023d565b80638256ace6146104dd578063831d65d1146104e557806396713da91461055d5780639bc8e4f2146105655780639dc092621461056d5761023d565b80634bf6c882116101c95780636e47b4821161018d5780636e47b482146104b557806370fd5bad146104bd57806375d47a0a146104c55780637912a65d146104cd5780637942fd05146104d55761023d565b80634bf6c8821461046d57806351e8067214610475578063567a372d1461047d5780635bfb49901461048557806362b72cf5146104ad5761023d565b806337c8dab91161021057806337c8dab9146103cf578063389f4f711461040e5780633dffc3871461042857806343756e5c14610446578063493279b11461044e5761023d565b80630bee7a67146102425780631182b8751461026357806323bac5a21461035057806335aa2e4414610396575b600080fd5b61024a610745565b6040805163ffffffff9092168252519081900360200190f35b6102db6004803603604081101561027957600080fd5b60ff8235169190810190604081016020820135600160201b81111561029d57600080fd5b8201836020820111156102af57600080fd5b803590602001918460018302840111600160201b831117156102d057600080fd5b50909250905061074a565b6040805160208082528351818301528351919283929083019185019080838360005b838110156103155781810151838201526020016102fd565b50505050905090810190601f1680156103425780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b6103766004803603602081101561036657600080fd5b50356001600160a01b031661081e565b604080519384526020840192909252151582820152519081900360600190f35b6103b3600480360360208110156103ac57600080fd5b5035610841565b604080516001600160a01b039092168252519081900360200190f35b6103f5600480360360208110156103e557600080fd5b50356001600160a01b0316610868565b6040805192835260208301919091528051918290030190f35b6104166108bf565b60408051918252519081900360200190f35b6104306108c5565b6040805160ff9092168252519081900360200190f35b6103b36108ca565b6104566108d0565b6040805161ffff9092168252519081900360200190f35b6104306108d6565b6103b36108db565b6104166108e1565b6104ab6004803603602081101561049b57600080fd5b50356001600160a01b03166108e7565b005b610416610a48565b6103b3610a4e565b610430610a54565b6103b3610a59565b610416610a5f565b610430610a64565b6103f5610a69565b6104ab600480360360408110156104fb57600080fd5b60ff8235169190810190604081016020820135600160201b81111561051f57600080fd5b82018360208201111561053157600080fd5b803590602001918460018302840111600160201b8311171561055257600080fd5b509092509050610a73565b610430610bcd565b610416610bd2565b6103b3610bdd565b6103b3610be3565b610585610be9565b604080519115158252519081900360200190f35b61024a610bf2565b610416610bf7565b6104ab600480360360408110156105bf57600080fd5b810190602081018135600160201b8111156105d957600080fd5b8201836020820111156105eb57600080fd5b803590602001918460018302840111600160201b8311171561060c57600080fd5b919390929091602081019035600160201b81111561062957600080fd5b82018360208201111561063b57600080fd5b803590602001918460018302840111600160201b8311171561065c57600080fd5b509092509050610bfc565b610416610fea565b6103b3610fef565b6104ab6004803603604081101561068d57600080fd5b60ff8235169190810190604081016020820135600160201b8111156106b157600080fd5b8201836020820111156106c357600080fd5b803590602001918460018302840111600160201b831117156106e457600080fd5b509092509050610ff5565b6104ab6004803603602081101561070557600080fd5b50356001600160a01b03166110a8565b6103b361154b565b6104ab611551565b6103b36115c2565b6104306115c8565b6104ab6115cd565b6103b3611a58565b606481565b6060336120001461078c5760405162461bcd60e51b815260040180806020018281038252602f8152602001806124b0602f913960400191505060405180910390fd5b60005460ff166107d1576040805162461bcd60e51b8152602060048201526019602482015260008051602061250c833981519152604482015290519081900360640190fd5b6040805162461bcd60e51b815260206004820152601e60248201527f7265636569766520756e65787065637465642073796e207061636b6167650000604482015290519081900360640190fd5b600260208190526000918252604090912080546001820154919092015460ff1683565b6001818154811061084e57fe5b6000918252602090912001546001600160a01b0316905081565b600080610873612374565b5050506001600160a01b0316600090815260026020818152604092839020835160608101855281548082526001830154938201849052919093015460ff16151592909301919091529091565b60055481565b600181565b61100181565b6102ca81565b600881565b61200081565b60045481565b33611000146109275760405162461bcd60e51b815260040180806020018281038252603081526020018061240b6030913960400191505060405180910390fd5b60005460ff1661096c576040805162461bcd60e51b8152602060048201526019602482015260008051602061250c833981519152604482015290519081900360640190fd5b61200063f7a251d7600b61097f84611a5e565b60006040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b838110156109df5781810151838201526020016109c7565b50505050905090810190601f168015610a0c5780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b158015610a2d57600080fd5b505af1158015610a41573d6000803e3d6000fd5b5050505050565b60035481565b61100581565b600281565b61100881565b603281565b600b81565b6004546005549091565b3361200014610ab35760405162461bcd60e51b815260040180806020018281038252602f8152602001806124b0602f913960400191505060405180910390fd5b60005460ff16610af8576040805162461bcd60e51b8152602060048201526019602482015260008051602061250c833981519152604482015290519081900360640190fd5b610b00612397565b6000610b4184848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250611b3192505050565b915091508015610b8b5781516040805163ffffffff9092168252517f7f0956d47419b9525356e7111652b653b530ec6f5096dccc04589bc38e6299679181900360200190a1610a41565b81516040805163ffffffff9092168252517f7d45f62d17443dd4547bca8a8112c60e2385669318dc300ec61a5d2492f262e79181900360200190a15050505050565b600981565b662386f26fc1000081565b61100781565b61100681565b60005460ff1681565b600081565b600481565b60005460ff16610c41576040805162461bcd60e51b8152602060048201526019602482015260008051602061250c833981519152604482015290519081900360640190fd5b3361100714610c815760405162461bcd60e51b815260040180806020018281038252602e81526020018061243b602e913960400191505060405180910390fd5b610cec84848080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250506040805180820190915260148152731b5a5cd9195b59585b9bdc951a1c995cda1bdb1960621b60208201529150611bb19050565b15610dc55760208114610d305760405162461bcd60e51b81526004018080602001828103825260278152602001806123e46027913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091610d6e91858580838501838280828437600092019190915250611c9992505050565b905060018110158015610d82575060055481105b610dbd5760405162461bcd60e51b815260040180806020018281038252602581526020018061248b6025913960400191505060405180910390fd5b600455610f58565b610e2b84848080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152505060408051808201909152600f81526e19995b1bdb9e551a1c995cda1bdb19608a1b60208201529150611bb19050565b15610f1b5760208114610e6f5760405162461bcd60e51b81526004018080602001828103825260228152602001806124696022913960400191505060405180910390fd5b604080516020601f8401819004810282018101909252828152600091610ead91858580838501838280828437600092019190915250611c9992505050565b90506103e88111158015610ec2575060045481115b610f13576040805162461bcd60e51b815260206004820181905260248201527f7468652066656c6f6e795468726573686f6c64206f7574206f662072616e6765604482015290519081900360640190fd5b600555610f58565b6040805162461bcd60e51b815260206004820152600d60248201526c756e6b6e6f776e20706172616d60981b604482015290519081900360640190fd5b7f6cdb0ac70ab7f2e2d035cca5be60d89906f2dede7648ddbd7402189c1eeed17a848484846040518080602001806020018381038352878782818152602001925080828437600083820152601f01601f191690910184810383528581526020019050858580828437600083820152604051601f909101601f19169092018290039850909650505050505050a150505050565b609681565b61100281565b33612000146110355760405162461bcd60e51b815260040180806020018281038252602f8152602001806124b0602f913960400191505060405180910390fd5b60005460ff1661107a576040805162461bcd60e51b8152602060048201526019602482015260008051602061250c833981519152604482015290519081900360640190fd5b6040517f07db600eebe2ac176be8dcebad61858c245a4961bb32ca2aa3d159b09aa0810e90600090a1505050565b3341146110e65760405162461bcd60e51b815260040180806020018281038252602d8152602001806124df602d913960400191505060405180910390fd5b60005460ff1661112b576040805162461bcd60e51b8152602060048201526019602482015260008051602061250c833981519152604482015290519081900360640190fd5b6003544311611181576040805162461bcd60e51b815260206004820181905260248201527f63616e206e6f7420736c61736820747769636520696e206f6e6520626c6f636b604482015290519081900360640190fd5b3a156111cb576040805162461bcd60e51b81526020600482015260146024820152736761737072696365206973206e6f74207a65726f60601b604482015290519081900360640190fd5b6040805163155853f360e21b81526001600160a01b03831660048201529051611000916355614fcc916024808301926020929190829003018186803b15801561121357600080fd5b505afa158015611227573d6000803e3d6000fd5b505050506040513d602081101561123d57600080fd5b505161124857611544565b611250612374565b506001600160a01b0381166000908152600260208181526040928390208351606081018552815481526001820154928101929092529091015460ff1615801592820192909252906112ab576020810180516001019052611304565b60016040820181905260208201819052805480820182556000919091527fb10e2d527612073b26eecdfd717e6a320cf44b4afac2b0732d9fcbe2b7fa0cf60180546001600160a01b0319166001600160a01b0384161790555b43815260055460208201518161131657fe5b0661146857600060208201819052604080516335409f7f60e01b81526001600160a01b03851660048201529051611000926335409f7f926024808201939182900301818387803b15801561136957600080fd5b505af115801561137d573d6000803e3d6000fd5b505050506120006001600160a01b031663f7a251d7600b61139d85611a5e565b60006040518463ffffffff1660e01b8152600401808460ff1660ff16815260200180602001838152602001828103825284818151815260200191508051906020019080838360005b838110156113fd5781810151838201526020016113e5565b50505050905090810190601f16801561142a5780820380516001836020036101000a031916815260200191505b50945050505050600060405180830381600087803b15801561144b57600080fd5b505af115801561145f573d6000803e3d6000fd5b505050506114de565b60045481602001518161147757fe5b066114de57604080516375abf10160e11b81526001600160a01b038416600482015290516110009163eb57e20291602480830192600092919082900301818387803b1580156114c557600080fd5b505af11580156114d9573d6000803e3d6000fd5b505050505b6001600160a01b0382166000818152600260208181526040808420865181559186015160018301558581015191909201805460ff1916911515919091179055517fddb6012116e51abf5436d956a4f0ebd927e92c576ff96d7918290c8782291e3e9190a2505b5043600355565b61100381565b60005460ff16156115a9576040805162461bcd60e51b815260206004820152601960248201527f74686520636f6e747261637420616c726561647920696e697400000000000000604482015290519081900360640190fd5b603260045560966005556000805460ff19166001179055565b61100081565b600381565b336110001461160d5760405162461bcd60e51b815260040180806020018281038252603081526020018061240b6030913960400191505060405180910390fd5b60005460ff16611652576040805162461bcd60e51b8152602060048201526019602482015260008051602061250c833981519152604482015290519081900360640190fd5b60015461165e57611a56565b600154600090600019015b808211611a2a576000805b8284101561178d57611684612374565b600260006001878154811061169557fe5b60009182526020808320909101546001600160a01b0316835282810193909352604091820190208151606081018352815481526001820154938101939093526002015460ff1615159082015260055490915060049004816020015111156117775760046005548161170257fe5b0481602001510381602001818152505080600260006001888154811061172457fe5b6000918252602080832091909101546001600160a01b0316835282810193909352604091820190208351815591830151600183015591909101516002909101805460ff1916911515919091179055611781565b600192505061178d565b50600190930192611674565b8284116119245761179c612374565b60026000600186815481106117ad57fe5b60009182526020808320909101546001600160a01b0316835282810193909352604091820190208151606081018352815481526001820154938101939093526002015460ff1615159082015260055490915060049004816020015111156118955760046005548161181a57fe5b0481602001510381602001818152505080600260006001878154811061183c57fe5b6000918252602080832091909101546001600160a01b03168352828101939093526040918201902083518155918301516001808401919091559201516002909101805460ff191691151591909117905591506119249050565b60026000600186815481106118a657fe5b60009182526020808320909101546001600160a01b031683528201929092526040018120818155600181810192909255600201805460ff191690558054806118ea57fe5b600082815260209020810160001990810180546001600160a01b0319169055019055836119175750611924565b506000199092019161178d565b81801561192e5750805b15611a0d57600260006001868154811061194457fe5b60009182526020808320909101546001600160a01b031683528201929092526040018120818155600181810192909255600201805460ff1916905580548490811061198b57fe5b600091825260209091200154600180546001600160a01b0390921691869081106119b157fe5b9060005260206000200160006101000a8154816001600160a01b0302191690836001600160a01b0316021790555060018054806119ea57fe5b600082815260209020810160001990810180546001600160a01b03191690550190555b82611a19575050611a2a565b505060019091019060001901611669565b6040517fcfdb3b6ccaeccbdc68be3c59c840e3b3c90f0a7c491f5fff1cf56cfda200dd9c90600090a150505b565b61100481565b60408051600480825260a08201909252606091829190816020015b6060815260200190600190039081611a79579050509050611aa2836001600160a01b0316611c9e565b81600081518110611aaf57fe5b6020026020010181905250611ac343611cc1565b81600181518110611ad057fe5b6020908102919091010152611ae66102ca611cc1565b81600281518110611af357fe5b6020026020010181905250611b0742611cc1565b81600381518110611b1457fe5b6020026020010181905250611b2881611cd4565b9150505b919050565b611b39612397565b6000611b43612397565b611b4b6123a9565b611b5c611b5786611d5e565b611d83565b90506000805b611b6b83611dcd565b15611ba45780611b9757611b86611b8184611dee565b611e3c565b63ffffffff16845260019150611b9c565b611ba4565b600101611b62565b5091935090915050915091565b6000816040516020018082805190602001908083835b60208310611be65780518252601f199092019160209182019101611bc7565b6001836020036101000a03801982511681845116808217855250505050505090500191505060405160208183030381529060405280519060200120836040516020018082805190602001908083835b60208310611c545780518252601f199092019160209182019101611c35565b6001836020036101000a038019825116818451168082178552505050505050905001915050604051602081830303815290604052805190602001201490505b92915050565b015190565b60408051600560a21b8318601482015260348101909152606090611b2881611ef3565b6060611c93611ccf83611f49565b611ef3565b6060815160001415611cf55750604080516000815260208101909152611b2c565b606082600081518110611d0457fe5b602002602001015190506000600190505b8351811015611d4557611d3b82858381518110611d2e57fe5b602002602001015161202f565b9150600101611d15565b50611b28611d58825160c060ff166120ac565b8261202f565b611d666123c9565b506040805180820190915281518152602082810190820152919050565b611d8b6123a9565b611d94826121a4565b611d9d57600080fd5b6000611dac83602001516121de565b60208085015160408051808201909152868152920190820152915050919050565b6000611dd76123c9565b505080518051602091820151919092015191011190565b611df66123c9565b611dff82611dcd565b611e0857600080fd5b60208201516000611e1882612241565b80830160209586015260408051808201909152908152938401919091525090919050565b805160009015801590611e5157508151602110155b611e5a57600080fd5b6000611e6983602001516121de565b90508083600001511015611ec4576040805162461bcd60e51b815260206004820152601a60248201527f6c656e677468206973206c657373207468616e206f6666736574000000000000604482015290519081900360640190fd5b825160208085015183018051928490039291831015611eea57826020036101000a820491505b50949350505050565b606081516001148015611f255750607f60f81b82600081518110611f1357fe5b01602001516001600160f81b03191611155b15611f31575080611b2c565b611c93611f438351608060ff166120ac565b8361202f565b604080516020808252818301909252606091829190602082018180368337505050602081018490529050600067ffffffffffffffff198416611f8d57506018611fb1565b6fffffffffffffffffffffffffffffffff198416611fad57506010611fb1565b5060005b6020811015611fe757818181518110611fc657fe5b01602001516001600160f81b03191615611fdf57611fe7565b600101611fb1565b60008160200390506060816040519080825280601f01601f19166020018201604052801561201c576020820181803683370190505b5080830196909652508452509192915050565b6060806040519050835180825260208201818101602087015b81831015612060578051835260209283019201612048565b50855184518101855292509050808201602086015b8183101561208d578051835260209283019201612075565b508651929092011591909101601f01601f191660405250905092915050565b60606801000000000000000083106120fc576040805162461bcd60e51b815260206004820152600e60248201526d696e70757420746f6f206c6f6e6760901b604482015290519081900360640190fd5b604080516001808252818301909252606091602082018180368337019050509050603784116121565782840160f81b8160008151811061213857fe5b60200101906001600160f81b031916908160001a9053509050611c93565b606061216185611f49565b90508381510160370160f81b8260008151811061217a57fe5b60200101906001600160f81b031916908160001a90535061219b828261202f565b95945050505050565b80516000906121b557506000611b2c565b6020820151805160001a9060c08210156121d457600092505050611b2c565b5060019392505050565b8051600090811a60808110156121f8576000915050611b2c565b60b8811080612213575060c08110801590612213575060f881105b15612222576001915050611b2c565b60c08110156122365760b519019050611b2c565b60f519019050611b2c565b80516000908190811a608081101561225c576001915061236d565b60b881101561227157607e198101915061236d565b60c08110156122eb57600060b78203600186019550806020036101000a8651049150600181018201935050808310156122e5576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b5061236d565b60f88110156123005760be198101915061236d565b600060f78203600186019550806020036101000a86510491506001810182019350508083101561236b576040805162461bcd60e51b81526020600482015260116024820152706164646974696f6e206f766572666c6f7760781b604482015290519081900360640190fd5b505b5092915050565b604051806060016040528060008152602001600081526020016000151581525090565b60408051602081019091526000815290565b60405180604001604052806123bc6123c9565b8152602001600081525090565b60405180604001604052806000815260200160008152509056fe6c656e677468206f66206d697364656d65616e6f725468726573686f6c64206d69736d61746368746865206d6573736167652073656e646572206d7573742062652076616c696461746f7253657420636f6e7472616374746865206d6573736167652073656e646572206d75737420626520676f7665726e616e636520636f6e74726163746c656e677468206f662066656c6f6e795468726573686f6c64206d69736d61746368746865206d697364656d65616e6f725468726573686f6c64206f7574206f662072616e6765746865206d6573736167652073656e646572206d7573742062652063726f737320636861696e20636f6e7472616374746865206d6573736167652073656e646572206d7573742062652074686520626c6f636b2070726f647563657274686520636f6e7472616374206e6f7420696e69742079657400000000000000a2646970667358221220cc16be27360652de46deaec4be6263ad1d90d5a454d0f34c9ca0f4c36e67819264736f6c63430006040033", + }, + }, + } + } func UpgradeBuildInSystemContract(config *params.ChainConfig, blockNumber *big.Int, statedb *state.IntraBlockState) { @@ -317,12 +368,13 @@ func UpgradeBuildInSystemContract(config *params.ChainConfig, blockNumber *big.I return } var network string - switch config.ChainName { - case networkname.BSCChainName: + switch GenesisHash { + /* Add mainnet genesis hash */ + case params.BSCGenesisHash: network = mainNet - case networkname.ChapelChainName: + case params.ChapelGenesisHash: network = chapelNet - case networkname.RialtoChainName: + case params.RialtoGenesisHash: network = rialtoNet default: network = defaultNet @@ -345,6 +397,10 @@ func UpgradeBuildInSystemContract(config *params.ChainConfig, blockNumber *big.I applySystemContractUpgrade(brunoUpgrade[network], blockNumber, statedb, logger) } + if config.IsOnEuler(blockNumber) { + applySystemContractUpgrade(eulerUpgrade[network], blockNumber, statedb, logger) + } + /* apply other upgrades */ diff --git a/params/chainspecs/bsc.json b/params/chainspecs/bsc.json index 7e8945cabce..96edfaf4d00 100644 --- a/params/chainspecs/bsc.json +++ b/params/chainspecs/bsc.json @@ -15,6 +15,7 @@ "nielsBlock": 0, "mirrorSyncBlock": 5184000, "brunoBlock": 13082000, + "eulerBlock": 18907621, "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "parlia": { "DBPath": "", diff --git a/params/config.go b/params/config.go index 20007f128b6..6446c63eb19 100644 --- a/params/config.go +++ b/params/config.go @@ -70,7 +70,7 @@ var ( FermionGenesisHash = common.HexToHash("0x0658360d8680ead416900a552b67b84e6d575c7f0ecab3dbe42406f9f8c34c35") BSCGenesisHash = common.HexToHash("0x0d21840abff46b96c84b2ac9e10e4f5cdaeb5693cb665db62a2f3b02d2d57b5b") ChapelGenesisHash = common.HexToHash("0x6d3c66c5357ec91d5c43af47e234a939b22557cbb552dc45bebbceeed90fbe34") - RialtoGenesisHash = common.HexToHash("0xaabe549bfa85c84f7aee9da7010b97453ad686f2c2d8ce00503d1a00c72cad54") + RialtoGenesisHash = common.HexToHash("0xee835a629f9cf5510b48b6ba41d69e0ff7d6ef10f977166ef939db41f59f5501") MumbaiGenesisHash = common.HexToHash("0x7b66506a9ebdbf30d32b43c5f15a3b1216269a1ec3a75aa3182b86176a2b1ca7") BorMainnetGenesisHash = common.HexToHash("0xa9c28ce2141b56c474f1dc504bee9b01eb1bd7d1a507580d5519d4437a97de1b") BorDevnetGenesisHash = common.HexToHash("0x5a06b25b0c6530708ea0b98a3409290e39dce6be7f558493aeb6e4b99a172a87") @@ -247,6 +247,7 @@ type ChainConfig struct { NielsBlock *big.Int `json:"nielsBlock,omitempty" toml:",omitempty"` // nielsBlock switch block (nil = no fork, 0 = already activated) MirrorSyncBlock *big.Int `json:"mirrorSyncBlock,omitempty" toml:",omitempty"` // mirrorSyncBlock switch block (nil = no fork, 0 = already activated) BrunoBlock *big.Int `json:"brunoBlock,omitempty" toml:",omitempty"` // brunoBlock switch block (nil = no fork, 0 = already activated) + EulerBlock *big.Int `json:"eulerBlock,omitempty" toml:",omitempty"` // eulerBlock switch block (nil = no fork, 0 = already activated) // EIP-3675: Upgrade consensus to Proof-of-Stake TerminalTotalDifficulty *big.Int `json:"terminalTotalDifficulty,omitempty"` // The merge happens when terminal total difficulty is reached @@ -382,12 +383,13 @@ func (c *ChainConfig) String() string { // TODO Covalent: Refactor to more generic approach and potentially introduce tag for "ecosystem" field (Ethereum, BSC, etc.) if c.Consensus == ParliaConsensus { - return fmt.Sprintf("{ChainID: %v Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Engine: %v}", + return fmt.Sprintf("{ChainID: %v Ramanujan: %v, Niels: %v, MirrorSync: %v, Bruno: %v, Euler: %v, Engine: %v}", c.ChainID, c.RamanujanBlock, c.NielsBlock, c.MirrorSyncBlock, c.BrunoBlock, + c.EulerBlock, engine, ) } @@ -511,6 +513,15 @@ func (c *ChainConfig) IsOnBruno(num *big.Int) bool { return configNumEqual(c.BrunoBlock, num) } +// IsEuler returns whether num is either equal to the euler fork block or greater. +func (c *ChainConfig) IsEuler(num *big.Int) bool { + return isForked(c.EulerBlock, num.Uint64()) +} + +func (c *ChainConfig) IsOnEuler(num *big.Int) bool { + return configNumEqual(c.EulerBlock, num) +} + // IsMuirGlacier returns whether num is either equal to the Muir Glacier (EIP-2384) fork block or greater. func (c *ChainConfig) IsMuirGlacier(num uint64) bool { return isForked(c.MuirGlacierBlock, num) @@ -588,6 +599,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error { {name: "petersburgBlock", block: c.PetersburgBlock}, {name: "istanbulBlock", block: c.IstanbulBlock}, {name: "muirGlacierBlock", block: c.MuirGlacierBlock, optional: true}, + {name: "eulerBlock", block: c.EulerBlock, optional: true}, {name: "berlinBlock", block: c.BerlinBlock}, {name: "londonBlock", block: c.LondonBlock}, {name: "arrowGlacierBlock", block: c.ArrowGlacierBlock, optional: true}, @@ -664,6 +676,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head uint64) *ConfigC if isForkIncompatible(c.GrayGlacierBlock, newcfg.GrayGlacierBlock, head) { return newCompatError("Gray Glacier fork block", c.GrayGlacierBlock, newcfg.GrayGlacierBlock) } + if isForkIncompatible(c.EulerBlock, newcfg.EulerBlock, head) { + return newCompatError("Euler fork block", c.EulerBlock, newcfg.EulerBlock) + } return nil } From 2e600316e3ead945456aff0f209eb1edcf3f7018 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Thu, 16 Jun 2022 16:39:56 +0200 Subject: [PATCH 065/136] Used memory batches from Erigon (#4469) * modded summed * deleted * ops --- ethdb/olddb/memory_mutation.go | 396 ------------------------ ethdb/olddb/memory_mutation_cursor.go | 419 -------------------------- ethdb/olddb/memory_mutation_test.go | 128 -------- go.mod | 2 +- go.sum | 4 +- turbo/stages/stageloop.go | 4 +- 6 files changed, 5 insertions(+), 948 deletions(-) delete mode 100644 ethdb/olddb/memory_mutation.go delete mode 100644 ethdb/olddb/memory_mutation_cursor.go delete mode 100644 ethdb/olddb/memory_mutation_test.go diff --git a/ethdb/olddb/memory_mutation.go b/ethdb/olddb/memory_mutation.go deleted file mode 100644 index 4782d877c38..00000000000 --- a/ethdb/olddb/memory_mutation.go +++ /dev/null @@ -1,396 +0,0 @@ -/* - Copyright 2022 Erigon contributors - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package olddb - -import ( - "context" - "encoding/binary" - - "github.com/ledgerwatch/log/v3" - - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/mdbx" - "github.com/ledgerwatch/erigon/ethdb" -) - -type MemoryMutation struct { - // Bucket => Key => Value - memTx kv.RwTx - memDb kv.RwDB - deletedEntries map[string]map[string]struct{} - clearedTables map[string]struct{} - db kv.Tx -} - -// NewBatch - starts in-mem batch -// -// Common pattern: -// -// batch := db.NewBatch() -// defer batch.Rollback() -// ... some calculations on `batch` -// batch.Commit() -func NewMemoryBatch(tx kv.Tx) *MemoryMutation { - tmpDB := mdbx.NewMDBX(log.New()).InMem().MustOpen() - memTx, err := tmpDB.BeginRw(context.Background()) - if err != nil { - panic(err) - } - return &MemoryMutation{ - db: tx, - memDb: tmpDB, - memTx: memTx, - deletedEntries: make(map[string]map[string]struct{}), - clearedTables: make(map[string]struct{}), - } -} - -func (m *MemoryMutation) RwKV() kv.RwDB { - if casted, ok := m.db.(ethdb.HasRwKV); ok { - return casted.RwKV() - } - return nil -} - -func (m *MemoryMutation) isTableCleared(table string) bool { - _, ok := m.clearedTables[table] - return ok -} - -func (m *MemoryMutation) isEntryDeleted(table string, key []byte) bool { - _, ok := m.deletedEntries[table] - if !ok { - return ok - } - _, ok = m.deletedEntries[table][string(key)] - return ok -} - -// getMem Retrieve database entry from memory (hashed storage will be left out for now because it is the only non auto-DupSorted table) -func (m *MemoryMutation) getMem(table string, key []byte) ([]byte, bool) { - val, err := m.memTx.GetOne(table, key) - if err != nil { - panic(err) - } - return val, val != nil -} - -func (m *MemoryMutation) DBSize() (uint64, error) { panic("not implemented") } -func (m *MemoryMutation) PageSize() uint64 { panic("not implemented") } - -func (m *MemoryMutation) IncrementSequence(bucket string, amount uint64) (res uint64, err error) { - v, ok := m.getMem(kv.Sequence, []byte(bucket)) - if !ok && m.db != nil { - v, err = m.db.GetOne(kv.Sequence, []byte(bucket)) - if err != nil { - return 0, err - } - } - - var currentV uint64 = 0 - if len(v) > 0 { - currentV = binary.BigEndian.Uint64(v) - } - - newVBytes := make([]byte, 8) - binary.BigEndian.PutUint64(newVBytes, currentV+amount) - if err = m.Put(kv.Sequence, []byte(bucket), newVBytes); err != nil { - return 0, err - } - - return currentV, nil -} - -func (m *MemoryMutation) ReadSequence(bucket string) (res uint64, err error) { - v, ok := m.getMem(kv.Sequence, []byte(bucket)) - if !ok && m.db != nil { - v, err = m.db.GetOne(kv.Sequence, []byte(bucket)) - if err != nil { - return 0, err - } - } - var currentV uint64 = 0 - if len(v) > 0 { - currentV = binary.BigEndian.Uint64(v) - } - - return currentV, nil -} - -// Can only be called from the worker thread -func (m *MemoryMutation) GetOne(table string, key []byte) ([]byte, error) { - if value, ok := m.getMem(table, key); ok { - if value == nil { - return nil, nil - } - return value, nil - } - if m.db != nil && !m.isTableCleared(table) && !m.isEntryDeleted(table, key) { - // TODO: simplify when tx can no longer be parent of mutation - value, err := m.db.GetOne(table, key) - if err != nil { - return nil, err - } - return value, nil - } - return nil, nil -} - -// Can only be called from the worker thread -func (m *MemoryMutation) Get(table string, key []byte) ([]byte, error) { - value, err := m.GetOne(table, key) - if err != nil { - return nil, err - } - - if value == nil { - return nil, ethdb.ErrKeyNotFound - } - - return value, nil -} - -func (m *MemoryMutation) Last(table string) ([]byte, []byte, error) { - panic("not implemented. (MemoryMutation.Last)") -} - -// Has return whether a key is present in a certain table. -func (m *MemoryMutation) Has(table string, key []byte) (bool, error) { - if _, ok := m.getMem(table, key); ok { - return ok, nil - } - if m.db != nil { - return m.db.Has(table, key) - } - return false, nil -} - -// Put insert a new entry in the database, if it is hashed storage it will add it to a slice instead of a map. -func (m *MemoryMutation) Put(table string, key []byte, value []byte) error { - return m.memTx.Put(table, key, value) -} - -func (m *MemoryMutation) Append(table string, key []byte, value []byte) error { - return m.Put(table, key, value) -} - -func (m *MemoryMutation) AppendDup(table string, key []byte, value []byte) error { - return m.Put(table, key, value) -} - -func (m *MemoryMutation) BatchSize() int { - return 0 -} - -func (m *MemoryMutation) ForEach(bucket string, fromPrefix []byte, walker func(k, v []byte) error) error { - m.panicOnEmptyDB() - return m.db.ForEach(bucket, fromPrefix, walker) -} - -func (m *MemoryMutation) ForPrefix(bucket string, prefix []byte, walker func(k, v []byte) error) error { - m.panicOnEmptyDB() - return m.db.ForPrefix(bucket, prefix, walker) -} - -func (m *MemoryMutation) ForAmount(bucket string, prefix []byte, amount uint32, walker func(k, v []byte) error) error { - m.panicOnEmptyDB() - return m.db.ForAmount(bucket, prefix, amount, walker) -} - -func (m *MemoryMutation) Delete(table string, k, v []byte) error { - if _, ok := m.deletedEntries[table]; !ok { - m.deletedEntries[table] = make(map[string]struct{}) - } - m.deletedEntries[table][string(k)] = struct{}{} - return m.memTx.Delete(table, k, v) -} - -func (m *MemoryMutation) Commit() error { - return nil -} - -func (m *MemoryMutation) Rollback() { - m.memTx.Rollback() - m.memDb.Close() - return -} - -func (m *MemoryMutation) Close() { - m.Rollback() -} - -func (m *MemoryMutation) Begin(ctx context.Context, flags ethdb.TxFlags) (ethdb.DbWithPendingMutations, error) { - panic("mutation can't start transaction, because doesn't own it") -} - -func (m *MemoryMutation) panicOnEmptyDB() { - if m.db == nil { - panic("Not implemented") - } -} - -func (m *MemoryMutation) SetRwKV(kv kv.RwDB) { - m.db.(ethdb.HasRwKV).SetRwKV(kv) -} - -func (m *MemoryMutation) BucketSize(bucket string) (uint64, error) { - return 0, nil -} - -func (m *MemoryMutation) DropBucket(bucket string) error { - panic("Not implemented") -} - -func (m *MemoryMutation) ExistsBucket(bucket string) (bool, error) { - panic("Not implemented") -} - -func (m *MemoryMutation) ListBuckets() ([]string, error) { - panic("Not implemented") -} - -func (m *MemoryMutation) ClearBucket(bucket string) error { - m.clearedTables[bucket] = struct{}{} - return m.memTx.ClearBucket(bucket) -} - -func (m *MemoryMutation) isBucketCleared(bucket string) bool { - _, ok := m.clearedTables[bucket] - return ok -} - -func (m *MemoryMutation) CollectMetrics() { -} - -func (m *MemoryMutation) CreateBucket(bucket string) error { - panic("Not implemented") -} - -func (m *MemoryMutation) Flush(tx kv.RwTx) error { - // Obtain buckets touched. - buckets, err := m.memTx.ListBuckets() - if err != nil { - return err - } - // Obliterate buckets who are to be deleted - for bucket := range m.clearedTables { - if err := tx.ClearBucket(bucket); err != nil { - return err - } - } - // Obliterate entries who are to be deleted - for bucket, keys := range m.deletedEntries { - for key := range keys { - if err := tx.Delete(bucket, []byte(key), nil); err != nil { - return err - } - } - } - // Iterate over each bucket and apply changes accordingly. - for _, bucket := range buckets { - if isTablePurelyDupsort(bucket) { - cbucket, err := m.memTx.CursorDupSort(bucket) - if err != nil { - return err - } - defer cbucket.Close() - dbCursor, err := tx.RwCursorDupSort(bucket) - if err != nil { - return err - } - defer dbCursor.Close() - for k, v, err := cbucket.First(); k != nil; k, v, err = cbucket.Next() { - if err != nil { - return err - } - if err := dbCursor.Put(k, v); err != nil { - return err - } - } - } else { - cbucket, err := m.memTx.Cursor(bucket) - if err != nil { - return err - } - defer cbucket.Close() - for k, v, err := cbucket.First(); k != nil; k, v, err = cbucket.Next() { - if err != nil { - return err - } - if err := tx.Put(bucket, k, v); err != nil { - return err - } - } - } - } - return nil -} - -// Check if a bucket is dupsorted and has dupsort conversion off -func isTablePurelyDupsort(bucket string) bool { - config, ok := kv.ChaindataTablesCfg[bucket] - // If we do not have the configuration we assume it is not dupsorted - if !ok { - return false - } - return !config.AutoDupSortKeysConversion && config.Flags == kv.DupSort -} - -// Cursor creates a new cursor (the real fun begins here) -func (m *MemoryMutation) makeCursor(bucket string) (kv.RwCursorDupSort, error) { - c := &memoryMutationCursor{} - // We can filter duplicates in dup sorted table - c.table = bucket - - var err error - // Initialize db cursors - c.dupCursor, err = m.db.CursorDupSort(bucket) - if err != nil { - return nil, err - } - c.cursor = c.dupCursor - // Initialize memory cursors - c.memDupCursor, err = m.memTx.RwCursorDupSort(bucket) - if err != nil { - return nil, err - } - c.memCursor = c.memDupCursor - c.mutation = m - return c, err -} - -// Cursor creates a new cursor (the real fun begins here) -func (m *MemoryMutation) RwCursorDupSort(bucket string) (kv.RwCursorDupSort, error) { - return m.makeCursor(bucket) -} - -// Cursor creates a new cursor (the real fun begins here) -func (m *MemoryMutation) RwCursor(bucket string) (kv.RwCursor, error) { - return m.makeCursor(bucket) -} - -// Cursor creates a new cursor (the real fun begins here) -func (m *MemoryMutation) CursorDupSort(bucket string) (kv.CursorDupSort, error) { - return m.makeCursor(bucket) -} - -// Cursor creates a new cursor (the real fun begins here) -func (m *MemoryMutation) Cursor(bucket string) (kv.Cursor, error) { - return m.makeCursor(bucket) -} - -// ViewID creates a new cursor (the real fun begins here) -func (m *MemoryMutation) ViewID() uint64 { - panic("ViewID Not implemented") -} diff --git a/ethdb/olddb/memory_mutation_cursor.go b/ethdb/olddb/memory_mutation_cursor.go deleted file mode 100644 index f1bd19251c5..00000000000 --- a/ethdb/olddb/memory_mutation_cursor.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - Copyright 2022 Erigon contributors - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package olddb - -import ( - "bytes" - - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/common" -) - -// entry for the cursor -type cursorentry struct { - key []byte - value []byte -} - -// cursor -type memoryMutationCursor struct { - // we can keep one cursor type if we store 2 of each kind. - cursor kv.Cursor - dupCursor kv.CursorDupSort - // Mem cursors - memCursor kv.RwCursor - memDupCursor kv.RwCursorDupSort - // we keep the index in the slice of pairs we are at. - isPrevFromDb bool - // entry history - currentPair cursorentry - currentDbEntry cursorentry - currentMemEntry cursorentry - // we keep the mining mutation so that we can insert new elements in db - mutation *MemoryMutation - table string -} - -// First move cursor to first position and return key and value accordingly. -func (m *memoryMutationCursor) First() ([]byte, []byte, error) { - memKey, memValue, err := m.memCursor.First() - if err != nil { - return nil, nil, err - } - - dbKey, dbValue, err := m.cursor.First() - if err != nil { - return nil, nil, err - } - - if dbKey != nil && m.mutation.isEntryDeleted(m.table, dbKey) { - if dbKey, dbValue, err = m.getNextOnDb(false); err != nil { - return nil, nil, err - } - } - - return m.goForward(memKey, memValue, dbKey, dbValue, false) -} - -func (m *memoryMutationCursor) getNextOnDb(dup bool) (key []byte, value []byte, err error) { - if dup { - key, value, err = m.dupCursor.NextDup() - if err != nil { - return - } - } else { - key, value, err = m.cursor.Next() - if err != nil { - return - } - } - - for key != nil && value != nil && m.mutation.isEntryDeleted(m.table, m.convertAutoDupsort(key, value)) { - if dup { - key, value, err = m.dupCursor.NextDup() - if err != nil { - return - } - } else { - key, value, err = m.cursor.Next() - if err != nil { - return - } - } - } - return -} - -func (m *memoryMutationCursor) convertAutoDupsort(key []byte, value []byte) []byte { - config, ok := kv.ChaindataTablesCfg[m.table] - // If we do not have the configuration we assume it is not dupsorted - if !ok || !config.AutoDupSortKeysConversion { - return key - } - if len(key) != config.DupToLen { - return key - } - return append(key, value[:config.DupFromLen-config.DupToLen]...) -} - -// Current return the current key and values the cursor is on. -func (m *memoryMutationCursor) Current() ([]byte, []byte, error) { - return common.CopyBytes(m.currentPair.key), common.CopyBytes(m.currentPair.value), nil -} - -func (m *memoryMutationCursor) skipIntersection(memKey, memValue, dbKey, dbValue []byte, dup bool) (newDbKey []byte, newDbValue []byte, err error) { - newDbKey = dbKey - newDbValue = dbValue - config, ok := kv.ChaindataTablesCfg[m.table] - dupsortOffset := 0 - if ok && config.AutoDupSortKeysConversion { - dupsortOffset = config.DupFromLen - config.DupToLen - } - // Check for duplicates - if bytes.Compare(memKey, dbKey) == 0 { - if !dup { - if newDbKey, newDbValue, err = m.getNextOnDb(dup); err != nil { - return - } - } else if bytes.Compare(memValue, dbValue) == 0 { - if newDbKey, newDbValue, err = m.getNextOnDb(dup); err != nil { - return - } - } else if dupsortOffset != 0 && len(memValue) >= dupsortOffset && len(dbValue) >= dupsortOffset && bytes.Compare(memValue[:dupsortOffset], dbValue[:dupsortOffset]) == 0 { - if newDbKey, newDbValue, err = m.getNextOnDb(dup); err != nil { - return - } - } - } - return -} - -func (m *memoryMutationCursor) goForward(memKey, memValue, dbKey, dbValue []byte, dup bool) ([]byte, []byte, error) { - var err error - if memValue == nil && dbValue == nil { - return nil, nil, nil - } - - dbKey, dbValue, err = m.skipIntersection(memKey, memValue, dbKey, dbValue, dup) - if err != nil { - return nil, nil, err - } - - m.currentDbEntry = cursorentry{dbKey, dbValue} - m.currentMemEntry = cursorentry{memKey, memValue} - // compare entries - if bytes.Compare(memKey, dbKey) == 0 { - m.isPrevFromDb = dbValue != nil && (memValue == nil || bytes.Compare(memValue, dbValue) > 0) - } else { - m.isPrevFromDb = dbValue != nil && (memKey == nil || bytes.Compare(memKey, dbKey) > 0) - } - if dbValue == nil { - m.currentDbEntry = cursorentry{} - } - if memValue == nil { - m.currentMemEntry = cursorentry{} - } - if m.isPrevFromDb { - m.currentPair = cursorentry{dbKey, dbValue} - return dbKey, dbValue, nil - } - - m.currentPair = cursorentry{memKey, memValue} - return memKey, memValue, nil -} - -// Next returns the next element of the mutation. -func (m *memoryMutationCursor) Next() ([]byte, []byte, error) { - if m.isPrevFromDb { - k, v, err := m.getNextOnDb(false) - if err != nil { - return nil, nil, err - } - return m.goForward(m.currentMemEntry.key, m.currentMemEntry.value, k, v, false) - } - - memK, memV, err := m.memCursor.Next() - if err != nil { - return nil, nil, err - } - - return m.goForward(memK, memV, m.currentDbEntry.key, m.currentDbEntry.value, false) -} - -// NextDup returns the next element of the mutation. -func (m *memoryMutationCursor) NextDup() ([]byte, []byte, error) { - if m.isPrevFromDb { - k, v, err := m.getNextOnDb(true) - - if err != nil { - return nil, nil, err - } - return m.goForward(m.currentMemEntry.key, m.currentMemEntry.value, k, v, true) - } - - memK, memV, err := m.memDupCursor.NextDup() - if err != nil { - return nil, nil, err - } - - return m.goForward(memK, memV, m.currentDbEntry.key, m.currentDbEntry.value, true) -} - -// Seek move pointer to a key at a certain position. -func (m *memoryMutationCursor) Seek(seek []byte) ([]byte, []byte, error) { - dbKey, dbValue, err := m.cursor.Seek(seek) - if err != nil { - return nil, nil, err - } - - // If the entry is marked as DB find one that is not - if dbKey != nil && m.mutation.isEntryDeleted(m.table, dbKey) { - dbKey, dbValue, err = m.getNextOnDb(false) - if err != nil { - return nil, nil, err - } - } - - memKey, memValue, err := m.memCursor.Seek(seek) - if err != nil { - return nil, nil, err - } - return m.goForward(memKey, memValue, dbKey, dbValue, false) -} - -// Seek move pointer to a key at a certain position. -func (m *memoryMutationCursor) SeekExact(seek []byte) ([]byte, []byte, error) { - memKey, memValue, err := m.memCursor.SeekExact(seek) - if err != nil { - return nil, nil, err - } - - if memKey != nil { - m.currentMemEntry.key = memKey - m.currentMemEntry.value = memValue - m.currentDbEntry.key, m.currentDbEntry.value, err = m.cursor.Seek(seek) - m.isPrevFromDb = false - m.currentPair = cursorentry{memKey, memValue} - return memKey, memValue, err - } - - dbKey, dbValue, err := m.cursor.SeekExact(seek) - if err != nil { - return nil, nil, err - } - - if dbKey != nil && !m.mutation.isEntryDeleted(m.table, seek) { - m.currentDbEntry.key = dbKey - m.currentDbEntry.value = dbValue - m.currentMemEntry.key, m.currentMemEntry.value, err = m.memCursor.Seek(seek) - m.isPrevFromDb = true - m.currentPair = cursorentry{dbKey, dbValue} - return dbKey, dbValue, err - } - return nil, nil, nil -} - -func (m *memoryMutationCursor) Put(k, v []byte) error { - return m.mutation.Put(m.table, common.CopyBytes(k), common.CopyBytes(v)) -} - -func (m *memoryMutationCursor) Append(k []byte, v []byte) error { - return m.mutation.Put(m.table, common.CopyBytes(k), common.CopyBytes(v)) - -} - -func (m *memoryMutationCursor) AppendDup(k []byte, v []byte) error { - return m.memDupCursor.AppendDup(common.CopyBytes(k), common.CopyBytes(v)) -} - -func (m *memoryMutationCursor) PutNoDupData(key, value []byte) error { - panic("DeleteCurrentDuplicates Not implemented") -} - -func (m *memoryMutationCursor) Delete(k, v []byte) error { - return m.mutation.Delete(m.table, k, v) -} - -func (m *memoryMutationCursor) DeleteCurrent() error { - panic("DeleteCurrent Not implemented") -} - -func (m *memoryMutationCursor) DeleteCurrentDuplicates() error { - panic("DeleteCurrentDuplicates Not implemented") -} - -// Seek move pointer to a key at a certain position. -func (m *memoryMutationCursor) SeekBothRange(key, value []byte) ([]byte, error) { - if value == nil { - _, v, err := m.SeekExact(key) - return v, err - } - - dbValue, err := m.dupCursor.SeekBothRange(key, value) - if err != nil { - return nil, err - } - - if dbValue != nil && m.mutation.isEntryDeleted(m.table, m.convertAutoDupsort(key, dbValue)) { - _, dbValue, err = m.getNextOnDb(true) - if err != nil { - return nil, err - } - } - - memValue, err := m.memDupCursor.SeekBothRange(key, value) - if err != nil { - return nil, err - } - _, retValue, err := m.goForward(key, memValue, key, dbValue, true) - return retValue, err -} - -func (m *memoryMutationCursor) Last() ([]byte, []byte, error) { - // TODO(Giulio2002): make fixes. - memKey, memValue, err := m.memCursor.Last() - if err != nil { - return nil, nil, err - } - - dbKey, dbValue, err := m.cursor.Last() - if err != nil { - return nil, nil, err - } - - dbKey, dbValue, err = m.skipIntersection(memKey, memValue, dbKey, dbValue, false) - if err != nil { - return nil, nil, err - } - - m.currentDbEntry = cursorentry{dbKey, dbValue} - m.currentMemEntry = cursorentry{memKey, memValue} - - // Basic checks - if dbKey != nil && m.mutation.isEntryDeleted(m.table, dbKey) { - m.currentDbEntry = cursorentry{} - m.isPrevFromDb = false - return memKey, memValue, nil - } - - if dbValue == nil { - m.isPrevFromDb = false - return memKey, memValue, nil - } - - if memValue == nil { - m.isPrevFromDb = true - return dbKey, dbValue, nil - } - // Check which one is last and return it - keyCompare := bytes.Compare(memKey, dbKey) - if keyCompare == 0 { - if bytes.Compare(memValue, dbValue) > 0 { - m.currentDbEntry = cursorentry{} - m.isPrevFromDb = false - return memKey, memValue, nil - } - m.currentMemEntry = cursorentry{} - m.isPrevFromDb = true - return dbKey, dbValue, nil - } - - if keyCompare > 0 { - m.currentDbEntry = cursorentry{} - m.isPrevFromDb = false - return memKey, memValue, nil - } - - m.currentMemEntry = cursorentry{} - m.isPrevFromDb = true - return dbKey, dbValue, nil -} - -func (m *memoryMutationCursor) Prev() ([]byte, []byte, error) { - panic("Prev is not implemented!") -} - -func (m *memoryMutationCursor) Close() { - if m.cursor != nil { - m.cursor.Close() - } - if m.memCursor != nil { - m.memCursor.Close() - } - return -} - -func (m *memoryMutationCursor) Count() (uint64, error) { - panic("Not implemented") -} - -func (m *memoryMutationCursor) FirstDup() ([]byte, error) { - panic("Not implemented") -} - -func (m *memoryMutationCursor) NextNoDup() ([]byte, []byte, error) { - panic("Not implemented") -} - -func (m *memoryMutationCursor) LastDup() ([]byte, error) { - panic("Not implemented") -} - -func (m *memoryMutationCursor) CountDuplicates() (uint64, error) { - panic("Not implemented") -} - -func (m *memoryMutationCursor) SeekBothExact(key, value []byte) ([]byte, []byte, error) { - panic("SeekBothExact Not implemented") -} diff --git a/ethdb/olddb/memory_mutation_test.go b/ethdb/olddb/memory_mutation_test.go deleted file mode 100644 index 3f96479a2ce..00000000000 --- a/ethdb/olddb/memory_mutation_test.go +++ /dev/null @@ -1,128 +0,0 @@ -/* - Copyright 2022 Erigon contributors - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package olddb - -import ( - "context" - "testing" - - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/memdb" - "github.com/stretchr/testify/require" -) - -func initializeDB(rwTx kv.RwTx) { - rwTx.Put(kv.HashedAccounts, []byte("AAAA"), []byte("value")) - rwTx.Put(kv.HashedAccounts, []byte("CAAA"), []byte("value1")) - rwTx.Put(kv.HashedAccounts, []byte("CBAA"), []byte("value2")) - rwTx.Put(kv.HashedAccounts, []byte("CCAA"), []byte("value3")) -} - -func TestLastMiningDB(t *testing.T) { - rwTx, err := memdb.New().BeginRw(context.Background()) - require.NoError(t, err) - - initializeDB(rwTx) - - batch := NewMemoryBatch(rwTx) - batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4")) - batch.Put(kv.HashedAccounts, []byte("BCAA"), []byte("value5")) - - cursor, err := batch.Cursor(kv.HashedAccounts) - require.NoError(t, err) - - key, value, err := cursor.Last() - require.NoError(t, err) - - require.Equal(t, key, []byte("CCAA")) - require.Equal(t, value, []byte("value3")) - - key, value, err = cursor.Next() - require.NoError(t, err) - require.Equal(t, key, []byte(nil)) - require.Equal(t, value, []byte(nil)) -} - -func TestLastMiningMem(t *testing.T) { - rwTx, err := memdb.New().BeginRw(context.Background()) - require.NoError(t, err) - - initializeDB(rwTx) - - batch := NewMemoryBatch(rwTx) - batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4")) - batch.Put(kv.HashedAccounts, []byte("DCAA"), []byte("value5")) - - cursor, err := batch.Cursor(kv.HashedAccounts) - require.NoError(t, err) - - key, value, err := cursor.Last() - require.NoError(t, err) - - require.Equal(t, key, []byte("DCAA")) - require.Equal(t, value, []byte("value5")) - - key, value, err = cursor.Next() - require.NoError(t, err) - require.Equal(t, key, []byte(nil)) - require.Equal(t, value, []byte(nil)) -} - -func TestDeleteMining(t *testing.T) { - rwTx, err := memdb.New().BeginRw(context.Background()) - require.NoError(t, err) - - initializeDB(rwTx) - batch := NewMemoryBatch(rwTx) - batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4")) - batch.Put(kv.HashedAccounts, []byte("DCAA"), []byte("value5")) - batch.Put(kv.HashedAccounts, []byte("FCAA"), []byte("value5")) - - batch.Delete(kv.HashedAccounts, []byte("BAAA"), nil) - batch.Delete(kv.HashedAccounts, []byte("CBAA"), nil) - - cursor, err := batch.Cursor(kv.HashedAccounts) - require.NoError(t, err) - - key, value, err := cursor.SeekExact([]byte("BAAA")) - require.NoError(t, err) - require.Equal(t, key, []byte(nil)) - require.Equal(t, value, []byte(nil)) - - key, value, err = cursor.SeekExact([]byte("CBAA")) - require.NoError(t, err) - require.Equal(t, key, []byte(nil)) - require.Equal(t, value, []byte(nil)) -} - -func TestFlush(t *testing.T) { - rwTx, err := memdb.New().BeginRw(context.Background()) - require.NoError(t, err) - - initializeDB(rwTx) - batch := NewMemoryBatch(rwTx) - batch.Put(kv.HashedAccounts, []byte("BAAA"), []byte("value4")) - batch.Put(kv.HashedAccounts, []byte("AAAA"), []byte("value5")) - batch.Put(kv.HashedAccounts, []byte("FCAA"), []byte("value5")) - - require.NoError(t, batch.Flush(rwTx)) - - value, err := rwTx.GetOne(kv.HashedAccounts, []byte("BAAA")) - require.NoError(t, err) - require.Equal(t, value, []byte("value4")) - - value, err = rwTx.GetOne(kv.HashedAccounts, []byte("AAAA")) - require.NoError(t, err) - require.Equal(t, value, []byte("value5")) -} diff --git a/go.mod b/go.mod index 6c7105be1a6..dabeb4f6210 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220614213818-bbf96d05808e + github.com/ledgerwatch/erigon-lib v0.0.0-20220616070148-280c5e9dcc82 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 9e777215db2..db9d39793e9 100644 --- a/go.sum +++ b/go.sum @@ -382,8 +382,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220614213818-bbf96d05808e h1:A4ozqGgOHe4D67icNKt721OFm6ZR1q2MjRp/dFBsDms= -github.com/ledgerwatch/erigon-lib v0.0.0-20220614213818-bbf96d05808e/go.mod h1:SOwq7m9Wm7ckQ+kxUwDYRchwuwO8lXhp1lhbLTUhMk8= +github.com/ledgerwatch/erigon-lib v0.0.0-20220616070148-280c5e9dcc82 h1:8MOS6AJudtu+RKx1FtCNdsHDsGfb1e1lpepEgCRSIu4= +github.com/ledgerwatch/erigon-lib v0.0.0-20220616070148-280c5e9dcc82/go.mod h1:SOwq7m9Wm7ckQ+kxUwDYRchwuwO8lXhp1lhbLTUhMk8= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 50d442baf54..05915ca70c6 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -13,6 +13,7 @@ import ( proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/cmd/sentry/sentry" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus/misc" @@ -22,7 +23,6 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb/olddb" "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/turbo/services" @@ -225,7 +225,7 @@ func MiningStep(ctx context.Context, kv kv.RwDB, mining *stagedsync.Sync) (err e } defer tx.Rollback() - miningBatch := olddb.NewMemoryBatch(tx) + miningBatch := memdb.NewMemoryBatch(tx) defer miningBatch.Rollback() if err = mining.Run(nil, miningBatch, false); err != nil { From a738a0dd5102c3ded2d4c8087e0755eed1bfa8ca Mon Sep 17 00:00:00 2001 From: 3nprob <74199244+3nprob@users.noreply.github.com> Date: Thu, 16 Jun 2022 14:50:59 +0000 Subject: [PATCH 066/136] makefile: Add DOCKER_{UID,GID,TAG} make params (#4457) Co-authored-by: 3np <3np@example.com> --- Dockerfile | 4 +++- Makefile | 7 ++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1f913496e97..cd79ce62baa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,7 +16,9 @@ FROM docker.io/library/alpine:3.15 RUN apk add --no-cache ca-certificates libstdc++ tzdata COPY --from=builder /app/build/bin/* /usr/local/bin/ -RUN adduser -H -u 1000 -g 1000 -D erigon +ARG PUID=1000 +ARG PGID=1000 +RUN adduser -H -u ${PUID} -g ${PGID} -D erigon RUN mkdir -p /home/erigon RUN mkdir -p /home/erigon/.local/share/erigon RUN chown -R erigon:erigon /home/erigon diff --git a/Makefile b/Makefile index 51104b84947..7a0dd3d99d5 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,9 @@ GOBIN = $(CURDIR)/build/bin GIT_COMMIT ?= $(shell git rev-list -1 HEAD) GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD) GIT_TAG ?= $(shell git describe --tags '--match=v*' --dirty) +DOCKER_UID ?= 1000 +DOCKER_PID ?= 1000 +DOCKER_TAG ?= thorax/erigon:latest CGO_CFLAGS := $(shell $(GO) env CGO_CFLAGS) # don't loose default CGO_CFLAGS += -DMDBX_FORCE_ASSERTIONS=1 # Enable MDBX's asserts by default in 'devel' branch and disable in 'stable' @@ -30,10 +33,12 @@ go-version: fi docker: git-submodules - DOCKER_BUILDKIT=1 docker build -t thorax/erigon:latest \ + DOCKER_BUILDKIT=1 docker build -t ${DOCKER_TAG} \ --build-arg "BUILD_DATE=$(shell date -Iseconds)" \ --build-arg VCS_REF=${GIT_COMMIT} \ --build-arg VERSION=${GIT_TAG} \ + --build-arg PUID=${DOCKER_UID} \ + --build-arg PGID=${DOCKER_PID} \ ${DOCKER_FLAGS} \ . From b1572a15d5892351fe8f5de5d878921d4bb6d1f0 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Thu, 16 Jun 2022 16:07:09 +0100 Subject: [PATCH 067/136] No reorgs when fork choice head points to a canonical header (#4466) * No reorgs when fork choice head points to a canonical header * Return immediately * Fix sentry Co-authored-by: Alexey Sharp --- cmd/sentry/sentry/sentry_grpc_server.go | 20 +++++++----- eth/stagedsync/stage_bodies.go | 2 +- eth/stagedsync/stage_headers.go | 42 ++++++++++++++++++++++--- turbo/stages/stageloop.go | 16 +++++++++- 4 files changed, 67 insertions(+), 13 deletions(-) diff --git a/cmd/sentry/sentry/sentry_grpc_server.go b/cmd/sentry/sentry/sentry_grpc_server.go index ab6583a2ce5..098c77693b6 100644 --- a/cmd/sentry/sentry/sentry_grpc_server.go +++ b/cmd/sentry/sentry/sentry_grpc_server.go @@ -703,16 +703,22 @@ func (ss *GrpcServer) SendMessageByMinBlock(_ context.Context, inreq *proto_sent return reply, fmt.Errorf("sendMessageByMinBlock not implemented for message Id: %s", inreq.Data.Id) } - var lastErr error - for retry := 0; retry < 16 && len(reply.Peers) == 0; retry++ { // limit number of retries - peerInfo, found := ss.findPeer(inreq.MinBlock) - if !found { - break - } + peerInfo, found := ss.findPeer(inreq.MinBlock) + if found { ss.writePeer("sendMessageByMinBlock", peerInfo, msgcode, inreq.Data.Data, 30*time.Second) reply.Peers = []*proto_types.H512{gointerfaces.ConvertHashToH512(peerInfo.ID())} + } else { + // If peer with specified minBlock is not found, send to 2 random peers + i := 0 + sendToAmount := 2 + ss.rangePeers(func(peerInfo *PeerInfo) bool { + ss.writePeer("sendMessageByMinBlock", peerInfo, msgcode, inreq.Data.Data, 0) + reply.Peers = append(reply.Peers, gointerfaces.ConvertHashToH512(peerInfo.ID())) + i++ + return i < sendToAmount + }) } - return reply, lastErr + return reply, nil } func (ss *GrpcServer) SendMessageById(_ context.Context, inreq *proto_sentry.SendMessageByIdRequest) (*proto_sentry.SentPeers, error) { diff --git a/eth/stagedsync/stage_bodies.go b/eth/stagedsync/stage_bodies.go index 247e0b04f7a..86412f711a3 100644 --- a/eth/stagedsync/stage_bodies.go +++ b/eth/stagedsync/stage_bodies.go @@ -93,7 +93,7 @@ func BodiesForward( return err } bodyProgress = s.BlockNumber - if bodyProgress == headerProgress { + if bodyProgress >= headerProgress { return nil } diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 284fd14a03a..e06703d8e90 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -293,9 +293,13 @@ func startHandlingForkChoice( return nil } - header, err := rawdb.ReadHeaderByHash(tx, headerHash) + // Header itself may already be in the snapshots, if CL starts off at much ealier state than Erigon + header, err := headerReader.HeaderByHash(ctx, tx, headerHash) if err != nil { - log.Warn(fmt.Sprintf("[%s] Fork choice err", s.LogPrefix()), "err", err) + return err + } + if err != nil { + log.Warn(fmt.Sprintf("[%s] Fork choice err (reading header by hash %x)", s.LogPrefix(), headerHash), "err", err) cfg.hd.BeaconRequestList.Remove(requestId) if requestStatus == engineapi.New { cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} @@ -304,7 +308,7 @@ func startHandlingForkChoice( } if header == nil { - log.Info(fmt.Sprintf("[%s] Fork choice missing header", s.LogPrefix())) + log.Info(fmt.Sprintf("[%s] Fork choice missing header with hash %x", s.LogPrefix(), headerHash)) hashToDownload := headerHash cfg.hd.SetPoSDownloaderTip(headerHash) schedulePoSDownload(requestStatus, requestId, hashToDownload, 0 /* header height is unknown, setting to 0 */, s, cfg) @@ -314,6 +318,36 @@ func startHandlingForkChoice( cfg.hd.BeaconRequestList.Remove(requestId) headerNumber := header.Number.Uint64() + // If header is canononical, then no reorgs are required + canonicalHash, err := rawdb.ReadCanonicalHash(tx, headerNumber) + if err != nil { + log.Warn(fmt.Sprintf("[%s] Fork choice err (reading canonical hash of %d)", s.LogPrefix(), headerNumber), "err", err) + cfg.hd.BeaconRequestList.Remove(requestId) + if requestStatus == engineapi.New { + cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} + } + return err + } + if headerHash == canonicalHash { + log.Info(fmt.Sprintf("[%s] Fork choice on previously known block", s.LogPrefix())) + cfg.hd.BeaconRequestList.Remove(requestId) + rawdb.WriteForkchoiceHead(tx, forkChoice.HeadBlockHash) + canonical, err := safeAndFinalizedBlocksAreCanonical(forkChoice, s, tx, cfg, requestStatus == engineapi.New) + if err != nil { + log.Warn(fmt.Sprintf("[%s] Fork choice err", s.LogPrefix()), "err", err) + if requestStatus == engineapi.New { + cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} + } + return err + } + if canonical && requestStatus == engineapi.New { + cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ + Status: remote.EngineStatus_VALID, + LatestValidHash: headerHash, + } + } + return nil + } cfg.hd.UpdateTopSeenHeightPoS(headerNumber) forkingPoint := uint64(0) @@ -425,7 +459,7 @@ func handleNewPayload( } if existingCanonicalHash != (common.Hash{}) && headerHash == existingCanonicalHash { - log.Info(fmt.Sprintf("[%s] New payload: previously received valid header", s.LogPrefix())) + log.Info(fmt.Sprintf("[%s] New payload: previously received valid header %d", s.LogPrefix(), headerNumber)) cfg.hd.BeaconRequestList.Remove(requestId) if requestStatus == engineapi.New { cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 05915ca70c6..376ee88dd2b 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -262,7 +262,21 @@ func NewStagedSync( isBor := controlServer.ChainConfig.Bor != nil return stagedsync.New( stagedsync.DefaultStages(ctx, cfg.Prune, - stagedsync.StageHeadersCfg(db, controlServer.Hd, controlServer.Bd, *controlServer.ChainConfig, controlServer.SendHeaderRequest, controlServer.PropagateNewBlockHashes, controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, snapshots, snapshotDownloader, blockReader, tmpdir, notifications.Events), + stagedsync.StageHeadersCfg( + db, + controlServer.Hd, + controlServer.Bd, + *controlServer.ChainConfig, + controlServer.SendHeaderRequest, + controlServer.PropagateNewBlockHashes, + controlServer.Penalize, + cfg.BatchSize, + p2pCfg.NoDiscovery, + snapshots, + snapshotDownloader, + blockReader, + tmpdir, + notifications.Events), stagedsync.StageCumulativeIndexCfg(db), stagedsync.StageBlockHashesCfg(db, tmpdir, controlServer.ChainConfig), stagedsync.StageBodiesCfg( From 45b374a6d8858cab9b0d784fa2a3539a320dbb04 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Thu, 16 Jun 2022 18:39:36 +0200 Subject: [PATCH 068/136] Enable --http by default (#4470) --- cmd/utils/flags.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 77abd5aac0c..7d109496e70 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -306,9 +306,9 @@ var ( Name: "ipcpath", Usage: "Filename for IPC socket/pipe within the datadir (explicit paths escape it)", } - HTTPEnabledFlag = cli.BoolFlag{ + HTTPEnabledFlag = cli.BoolTFlag{ Name: "http", - Usage: "Disabled by default. Use --http to enable the HTTP-RPC server", + Usage: "HTTP-RPC server (enabled by default). Use --http false to disable it", } HTTPListenAddrFlag = cli.StringFlag{ Name: "http.addr", From 70b41f5dc5bab916ef5067182ba76ce436f0a17d Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Fri, 17 Jun 2022 13:47:17 +0200 Subject: [PATCH 069/136] Small readjustment of FCU logs (#4473) --- eth/stagedsync/stage_headers.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index e06703d8e90..09d608ee22b 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -254,11 +254,11 @@ func startHandlingForkChoice( headerReader services.HeaderReader, ) error { headerHash := forkChoice.HeadBlockHash - log.Info(fmt.Sprintf("[%s] Handling fork choice", s.LogPrefix()), "headerHash", headerHash) + log.Debug(fmt.Sprintf("[%s] Handling fork choice", s.LogPrefix()), "headerHash", headerHash) currentHeadHash := rawdb.ReadHeadHeaderHash(tx) if currentHeadHash == headerHash { // no-op - log.Info(fmt.Sprintf("[%s] Fork choice no-op", s.LogPrefix())) + log.Debug(fmt.Sprintf("[%s] Fork choice no-op", s.LogPrefix())) cfg.hd.BeaconRequestList.Remove(requestId) rawdb.WriteForkchoiceHead(tx, forkChoice.HeadBlockHash) canonical, err := safeAndFinalizedBlocksAreCanonical(forkChoice, s, tx, cfg, requestStatus == engineapi.New) @@ -280,7 +280,7 @@ func startHandlingForkChoice( bad, lastValidHash := cfg.hd.IsBadHeaderPoS(headerHash) if bad { - log.Info(fmt.Sprintf("[%s] Fork choice bad head block", s.LogPrefix()), "headerHash", headerHash) + log.Warn(fmt.Sprintf("[%s] Fork choice bad head block", s.LogPrefix()), "headerHash", headerHash) cfg.hd.BeaconRequestList.Remove(requestId) if requestStatus == engineapi.New { cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ @@ -293,7 +293,7 @@ func startHandlingForkChoice( return nil } - // Header itself may already be in the snapshots, if CL starts off at much ealier state than Erigon + // Header itself may already be in the snapshots, if CL starts off at much earlier state than Erigon header, err := headerReader.HeaderByHash(ctx, tx, headerHash) if err != nil { return err @@ -318,7 +318,7 @@ func startHandlingForkChoice( cfg.hd.BeaconRequestList.Remove(requestId) headerNumber := header.Number.Uint64() - // If header is canononical, then no reorgs are required + // If header is canonical, then no reorgs are required canonicalHash, err := rawdb.ReadCanonicalHash(tx, headerNumber) if err != nil { log.Warn(fmt.Sprintf("[%s] Fork choice err (reading canonical hash of %d)", s.LogPrefix(), headerNumber), "err", err) @@ -348,6 +348,7 @@ func startHandlingForkChoice( } return nil } + cfg.hd.UpdateTopSeenHeightPoS(headerNumber) forkingPoint := uint64(0) @@ -365,20 +366,18 @@ func startHandlingForkChoice( } } + log.Info(fmt.Sprintf("[%s] Fork choice re-org", s.LogPrefix()), "headerNumber", headerNumber, "forkingPoint", forkingPoint) + if requestStatus == engineapi.New { if headerNumber-forkingPoint <= ShortPoSReorgThresholdBlocks { - log.Info(fmt.Sprintf("[%s] Short range re-org", s.LogPrefix()), "headerNumber", headerNumber, "forkingPoint", forkingPoint) // TODO(yperbasis): what if some bodies are missing and we have to download them? cfg.hd.SetPendingPayloadStatus(headerHash) } else { - log.Info(fmt.Sprintf("[%s] Long range re-org", s.LogPrefix()), "headerNumber", headerNumber, "forkingPoint", forkingPoint) cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{Status: remote.EngineStatus_SYNCING} } } - log.Trace(fmt.Sprintf("[%s] Fork choice beginning unwind", s.LogPrefix())) u.UnwindTo(forkingPoint, common.Hash{}) - log.Trace(fmt.Sprintf("[%s] Fork choice unwind finished", s.LogPrefix())) cfg.hd.SetUnsettledForkChoice(forkChoice, headerNumber) From 93151f0ae6298bd94dd037646aac1ac6a37b497a Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Fri, 17 Jun 2022 13:40:49 +0100 Subject: [PATCH 070/136] [erigon2.2] Reduce allocations when replaying historical txs (#4460) * [erigon2.2] Reduce allocations when replaying historical txs * import new API, fix code * Add hack decompress function * update erigon-lib * Fix hack * Update to latest erigon-lib * Fix reindexing * Enable skip analysis for tracing calls too * Enable for eth_getLogs * Fix skip analysis * Optimise * Stop grpc server in the tests * Print * No panic on server stop * Update to latest erigon-lib Co-authored-by: Alexey Sharp --- cmd/hack/hack.go | 106 +++++++++++++++++++ cmd/rpcdaemon/commands/eth_subscribe_test.go | 2 + cmd/rpcdaemon/rpcdaemontest/test_util.go | 4 +- cmd/rpcdaemon22/commands/eth_receipts.go | 1 + core/skip_analysis.go | 56 ++++------ go.mod | 2 +- go.sum | 4 +- turbo/snapshotsync/block_reader.go | 12 +-- 8 files changed, 144 insertions(+), 43 deletions(-) diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go index a54e19b47ee..ce4fbf4ae3b 100644 --- a/cmd/hack/hack.go +++ b/cmd/hack/hack.go @@ -1416,6 +1416,110 @@ func findLogs(chaindata string, block uint64, blockTotal uint64) error { return nil } +func decompress(chaindata string) error { + dir := filepath.Join(chaindata, "erigon22") + files, err := os.ReadDir(dir) + if err != nil { + return err + } + for _, f := range files { + name := f.Name() + if !strings.HasSuffix(name, ".dat") { + continue + } + if err = decompressAll(dir, filepath.Join(dir, name), strings.Contains(name, "code")); err != nil { + return err + } + } + // Re-read directory + files, err = os.ReadDir(dir) + if err != nil { + return err + } + for _, f := range files { + name := f.Name() + if !strings.HasSuffix(name, ".d") { + continue + } + if err = os.Rename(filepath.Join(dir, name), filepath.Join(dir, name[:len(name)-2])); err != nil { + return err + } + } + return nil +} + +func decompressAll(dir string, filename string, onlyKeys bool) error { + fmt.Printf("decompress file %s, onlyKeys=%t\n", filename, onlyKeys) + d, err := compress.NewDecompressor(filename) + if err != nil { + return err + } + defer d.Close() + newDatPath := filename + ".d" + comp, err := compress.NewCompressor(context.Background(), "comp", newDatPath, dir, compress.MinPatternScore, 1, log.LvlInfo) + if err != nil { + return err + } + defer comp.Close() + idxPath := filename[:len(filename)-3] + "idx" + idx, err := recsplit.OpenIndex(idxPath) + if err != nil { + return err + } + defer idx.Close() + g := d.MakeGetter() + var isKey bool + var word []byte + for g.HasNext() { + word, _ = g.Next(word[:0]) + if onlyKeys && !isKey { + if err := comp.AddWord(word); err != nil { + return err + } + } else { + if err := comp.AddUncompressedWord(word); err != nil { + return err + } + } + isKey = !isKey + } + if err = comp.Compress(); err != nil { + return err + } + comp.Close() + offsets := idx.ExtractOffsets() + newD, err := compress.NewDecompressor(newDatPath) + if err != nil { + return err + } + defer newD.Close() + newG := newD.MakeGetter() + g.Reset(0) + offset := uint64(0) + newOffset := uint64(0) + for g.HasNext() { + offsets[offset] = newOffset + offset = g.Skip() + newOffset = newG.Skip() + } + newIdxPath := idxPath + ".d" + f, err := os.Create(newIdxPath) + if err != nil { + return err + } + w := bufio.NewWriter(f) + if err = idx.RewriteWithOffsets(w, offsets); err != nil { + return err + } + if err = w.Flush(); err != nil { + return err + } + if err = f.Close(); err != nil { + return err + } + return nil +} + func main() { debug.RaiseFdLimit() flag.Parse() @@ -1550,6 +1654,8 @@ func main() { err = readBodies(*chaindata) case "findLogs": err = findLogs(*chaindata, uint64(*block), uint64(*blockTotal)) + case "decompress": + err = decompress(*chaindata) } if err != nil { diff --git a/cmd/rpcdaemon/commands/eth_subscribe_test.go b/cmd/rpcdaemon/commands/eth_subscribe_test.go index 25007ffeb4b..db689e44771 100644 --- a/cmd/rpcdaemon/commands/eth_subscribe_test.go +++ b/cmd/rpcdaemon/commands/eth_subscribe_test.go @@ -1,6 +1,7 @@ package commands import ( + "fmt" "testing" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" @@ -54,6 +55,7 @@ func TestEthSubscribe(t *testing.T) { for i := uint64(1); i <= highestSeenHeader; i++ { header := <-newHeads + fmt.Printf("Got header %d\n", header.Number.Uint64()) require.Equal(i, header.Number.Uint64()) } } diff --git a/cmd/rpcdaemon/rpcdaemontest/test_util.go b/cmd/rpcdaemon/rpcdaemontest/test_util.go index b8b3bac9a81..ee53c28a891 100644 --- a/cmd/rpcdaemon/rpcdaemontest/test_util.go +++ b/cmd/rpcdaemon/rpcdaemontest/test_util.go @@ -4,6 +4,7 @@ import ( "context" "crypto/ecdsa" "encoding/binary" + "fmt" "math/big" "net" "testing" @@ -300,7 +301,7 @@ func CreateTestGrpcConn(t *testing.T, m *stages.MockSentry) (context.Context, *g dialer := func() func(context.Context, string) (net.Conn, error) { go func() { if err := server.Serve(listener); err != nil { - panic(err) + fmt.Printf("%v\n", err) } }() return func(context.Context, string) (net.Conn, error) { @@ -315,6 +316,7 @@ func CreateTestGrpcConn(t *testing.T, m *stages.MockSentry) (context.Context, *g t.Cleanup(func() { cancel() conn.Close() + server.Stop() }) return ctx, conn } diff --git a/cmd/rpcdaemon22/commands/eth_receipts.go b/cmd/rpcdaemon22/commands/eth_receipts.go index 6f76899da7b..774f5c5f2be 100644 --- a/cmd/rpcdaemon22/commands/eth_receipts.go +++ b/cmd/rpcdaemon22/commands/eth_receipts.go @@ -195,6 +195,7 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ blockCtx, txCtx := transactions.GetEvmContext(msg, lastHeader, true /* requireCanonical */, tx, contractHasTEVM, api._blockReader) stateReader.SetTxNum(txNum) vmConfig := vm.Config{} + vmConfig.SkipAnalysis = core.SkipAnalysis(chainConfig, blockNum) ibs := state.New(stateReader) evm := vm.NewEVM(blockCtx, txCtx, ibs, chainConfig, vmConfig) diff --git a/core/skip_analysis.go b/core/skip_analysis.go index cc787cc1e93..844aa394a0a 100644 --- a/core/skip_analysis.go +++ b/core/skip_analysis.go @@ -17,19 +17,12 @@ package core import ( + "sort" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/params/networkname" ) -// MainnetNotCheckedFrom is the first block number not yet checked for invalid jumps -const MainnetNotCheckedFrom uint64 = 14_961_400 - -// MainnetNotCheckedFrom is the first block number not yet checked for invalid jumps -const BSCNotCheckedFrom uint64 = 18_682_505 - -const BorMainnetNotCheckedFrom uint64 = 29_447_463 - -const RopstenNotCheckedFrom uint64 = 12_331_664 - // SkipAnalysis function tells us whether we can skip performing jumpdest analysis // for the historical blocks (on mainnet now but perhaps on the testsnets // in the future), because we have verified that there were only a few blocks @@ -42,29 +35,26 @@ const RopstenNotCheckedFrom uint64 = 12_331_664 // 0xcdb5bf0b4b51093e1c994f471921f88623c9d3e1b6aa2782049f53a0048f2b32 (block 11079912) // 0x21ab7bf7245a87eae265124aaf180d91133377e47db2b1a4866493ec4b371150 (block 13119520) +var analysisBlocks map[string][]uint64 = map[string][]uint64{ + networkname.MainnetChainName: {6_426_298, 6_426_432, 5_800_596, 11_079_912, 13_119_520, 14_961_400}, + networkname.BSCChainName: {18_682_505}, + networkname.BorMainnetChainName: {29_447_463}, + networkname.RopstenChainName: {2_534_105, 2_534_116, 3_028_887, 3_028_940, 3_028_956, 3_450_102, 5_294_626, 5_752_787, 10_801_303, 10_925_062, 11_440_683, 11_897_655, 11_898_288, 12_291_199, 12_331_664}, +} + func SkipAnalysis(config *params.ChainConfig, blockNumber uint64) bool { - if config == params.MainnetChainConfig { - if blockNumber >= MainnetNotCheckedFrom { // We have not checked beyond that block - return false - } - if blockNumber == 6426298 || blockNumber == 6426432 || blockNumber == 5800596 || blockNumber == 11079912 || blockNumber == 13119520 { - return false - } - return true - } else if config == params.BSCChainConfig { - return blockNumber < BSCNotCheckedFrom - } else if config == params.BorMainnetChainConfig { - return blockNumber < BorMainnetNotCheckedFrom - } else if config == params.RopstenChainConfig { - if blockNumber >= RopstenNotCheckedFrom { - return false - } - if blockNumber == 2534105 || blockNumber == 2534116 || blockNumber == 3028887 || blockNumber == 3028940 || blockNumber == 3028956 || - blockNumber == 3450102 || blockNumber == 5294626 || blockNumber == 5752787 || blockNumber == 10801303 || blockNumber == 10925062 || - blockNumber == 11440683 || blockNumber == 11897655 || blockNumber == 11898288 || blockNumber == 12291199 { - return false - } - return true + blockNums, ok := analysisBlocks[config.ChainName] + if !ok { + return false + } + // blockNums is ordered, and the last element is the first block number which has not been checked + p := sort.Search(len(blockNums), func(i int) bool { + return blockNums[i] >= blockNumber + }) + if p == len(blockNums) { + // blockNum is beyond the last element, no optimisation + return false } - return false + // If the blockNumber is in the list, no optimisation + return blockNumber != blockNums[p] } diff --git a/go.mod b/go.mod index dabeb4f6210..45152768f88 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220616070148-280c5e9dcc82 + github.com/ledgerwatch/erigon-lib v0.0.0-20220617113949-df49481ddcea github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index db9d39793e9..efe9255a665 100644 --- a/go.sum +++ b/go.sum @@ -382,8 +382,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220616070148-280c5e9dcc82 h1:8MOS6AJudtu+RKx1FtCNdsHDsGfb1e1lpepEgCRSIu4= -github.com/ledgerwatch/erigon-lib v0.0.0-20220616070148-280c5e9dcc82/go.mod h1:SOwq7m9Wm7ckQ+kxUwDYRchwuwO8lXhp1lhbLTUhMk8= +github.com/ledgerwatch/erigon-lib v0.0.0-20220617113949-df49481ddcea h1:vTQtPDVZyzY3ijQcjC7NWxRP9lzVULsDspoivasHXF8= +github.com/ledgerwatch/erigon-lib v0.0.0-20220617113949-df49481ddcea/go.mod h1:SOwq7m9Wm7ckQ+kxUwDYRchwuwO8lXhp1lhbLTUhMk8= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= diff --git a/turbo/snapshotsync/block_reader.go b/turbo/snapshotsync/block_reader.go index d70a960fd5f..f467a34520a 100644 --- a/turbo/snapshotsync/block_reader.go +++ b/turbo/snapshotsync/block_reader.go @@ -487,7 +487,7 @@ func (back *BlockReaderWithSnapshots) headerFromSnapshot(blockHeight uint64, sn if sn.idxHeaderHash == nil { return nil, buf, nil } - headerOffset := sn.idxHeaderHash.Lookup2(blockHeight - sn.idxHeaderHash.BaseDataID()) + headerOffset := sn.idxHeaderHash.OrdinalLookup(blockHeight - sn.idxHeaderHash.BaseDataID()) gg := sn.seg.MakeGetter() gg.Reset(headerOffset) buf, _ = gg.Next(buf[:0]) @@ -511,7 +511,7 @@ func (back *BlockReaderWithSnapshots) headerFromSnapshotByHash(hash common.Hash, } reader := recsplit.NewIndexReader(sn.idxHeaderHash) localID := reader.Lookup(hash[:]) - headerOffset := sn.idxHeaderHash.Lookup2(localID) + headerOffset := sn.idxHeaderHash.OrdinalLookup(localID) gg := sn.seg.MakeGetter() gg.Reset(headerOffset) buf, _ = gg.Next(buf[:0]) @@ -548,7 +548,7 @@ func (back *BlockReaderWithSnapshots) bodyForStorageFromSnapshot(blockHeight uin if sn.idxBodyNumber == nil { return nil, buf, nil } - bodyOffset := sn.idxBodyNumber.Lookup2(blockHeight - sn.idxBodyNumber.BaseDataID()) + bodyOffset := sn.idxBodyNumber.OrdinalLookup(blockHeight - sn.idxBodyNumber.BaseDataID()) gg := sn.seg.MakeGetter() gg.Reset(bodyOffset) @@ -582,7 +582,7 @@ func (back *BlockReaderWithSnapshots) txsFromSnapshot(baseTxnID uint64, txsAmoun if txsAmount == 0 { return txs, senders, nil } - txnOffset := txsSeg.IdxTxnHash.Lookup2(baseTxnID - txsSeg.IdxTxnHash.BaseDataID()) + txnOffset := txsSeg.IdxTxnHash.OrdinalLookup(baseTxnID - txsSeg.IdxTxnHash.BaseDataID()) gg := txsSeg.Seg.MakeGetter() gg.Reset(txnOffset) stream := rlp.NewStream(reader, 0) @@ -606,7 +606,7 @@ func (back *BlockReaderWithSnapshots) txsFromSnapshot(baseTxnID uint64, txsAmoun } func (back *BlockReaderWithSnapshots) txnByID(txnID uint64, sn *TxnSegment, buf []byte) (txn types.Transaction, err error) { - offset := sn.IdxTxnHash.Lookup2(txnID - sn.IdxTxnHash.BaseDataID()) + offset := sn.IdxTxnHash.OrdinalLookup(txnID - sn.IdxTxnHash.BaseDataID()) gg := sn.Seg.MakeGetter() gg.Reset(offset) buf, _ = gg.Next(buf[:0]) @@ -629,7 +629,7 @@ func (back *BlockReaderWithSnapshots) txnByHash(txnHash common.Hash, segments [] reader := recsplit.NewIndexReader(sn.IdxTxnHash) txnId := reader.Lookup(txnHash[:]) - offset := sn.IdxTxnHash.Lookup2(txnId) + offset := sn.IdxTxnHash.OrdinalLookup(txnId) gg := sn.Seg.MakeGetter() gg.Reset(offset) // first byte txnHash check - reducing false-positives 256 times. Allows don't store and don't calculate full hash of entity - when checking many snapshots. From 027faa1de8ee9de7a0cb2fe5011e1a749945b1aa Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Fri, 17 Jun 2022 14:14:32 +0100 Subject: [PATCH 071/136] Update state_processor.go (#4475) --- core/state_processor.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/core/state_processor.go b/core/state_processor.go index 96d409256af..066378ab5d9 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -107,12 +107,7 @@ func applyTransaction(config *params.ChainConfig, gp *GasPool, statedb *state.In if err = statedb.FinalizeTx(rules, stateWriter); err != nil { return nil, nil, err } - // checks if current header is an Euler block or not (returns false for all the chains except BSC) - if config.IsEuler(header.Number) { - *usedGas += result.UsedGas * 3 - } else { - *usedGas += result.UsedGas - } + *usedGas += result.UsedGas // Set the receipt logs and create the bloom filter. // based on the eip phase, we're passing whether the root touch-delete accounts. From ecf528d56a059805754337c98156b9056ad34409 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Fri, 17 Jun 2022 16:10:35 +0100 Subject: [PATCH 072/136] Prevent clogging up pending subpool with transactions that were already mined but notification missed (#4476) * Debug tx pool Best * Print results of Best * Print * Update to erigon-lib * Fix * Update to latest erigon-lib Co-authored-by: Alexey Sharp --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 45152768f88..531bf8306d6 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220617113949-df49481ddcea + github.com/ledgerwatch/erigon-lib v0.0.0-20220617144601-d7693ce09400 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index efe9255a665..6b094e59cac 100644 --- a/go.sum +++ b/go.sum @@ -382,8 +382,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220617113949-df49481ddcea h1:vTQtPDVZyzY3ijQcjC7NWxRP9lzVULsDspoivasHXF8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220617113949-df49481ddcea/go.mod h1:SOwq7m9Wm7ckQ+kxUwDYRchwuwO8lXhp1lhbLTUhMk8= +github.com/ledgerwatch/erigon-lib v0.0.0-20220617144601-d7693ce09400 h1:HqoDhLo0H7bNkYJ8hQqueyhWnalM2uO29PiRlQqKAP8= +github.com/ledgerwatch/erigon-lib v0.0.0-20220617144601-d7693ce09400/go.mod h1:SOwq7m9Wm7ckQ+kxUwDYRchwuwO8lXhp1lhbLTUhMk8= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From e44d62db1ab485ecc6f037baef40ba5acb108ef1 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Fri, 17 Jun 2022 19:14:52 +0100 Subject: [PATCH 073/136] Fix trace block (#4480) * Fix trace block * Fix compile Co-authored-by: Alexey Sharp --- cmd/rpcdaemon/commands/trace_adhoc.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/cmd/rpcdaemon/commands/trace_adhoc.go b/cmd/rpcdaemon/commands/trace_adhoc.go index 6888b0a17e1..2493edf0daa 100644 --- a/cmd/rpcdaemon/commands/trace_adhoc.go +++ b/cmd/rpcdaemon/commands/trace_adhoc.go @@ -367,9 +367,16 @@ func (ot *OeTracer) CaptureEnd(depth int, output []byte, startGas, endGas uint64 if err != nil && !ignoreError { if err == vm.ErrExecutionReverted { topTrace.Error = "Reverted" - topTrace.Result.(*TraceResult).GasUsed = new(hexutil.Big) - topTrace.Result.(*TraceResult).GasUsed.ToInt().SetUint64(startGas - endGas) - topTrace.Result.(*TraceResult).Output = common.CopyBytes(output) + switch topTrace.Type { + case CALL: + topTrace.Result.(*TraceResult).GasUsed = new(hexutil.Big) + topTrace.Result.(*TraceResult).GasUsed.ToInt().SetUint64(startGas - endGas) + topTrace.Result.(*TraceResult).Output = common.CopyBytes(output) + case CREATE: + topTrace.Result.(*CreateTraceResult).GasUsed = new(hexutil.Big) + topTrace.Result.(*CreateTraceResult).GasUsed.ToInt().SetUint64(startGas - endGas) + topTrace.Result.(*CreateTraceResult).Code = common.CopyBytes(output) + } } else { topTrace.Result = nil switch err { From d1aab4e59b3ae09288a31dd419f4ecfcec658e44 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Fri, 17 Jun 2022 20:24:36 +0200 Subject: [PATCH 074/136] Bring a corner case in line with the Engine API spec (#4477) --- ethdb/privateapi/ethbackend.go | 66 ++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 30 deletions(-) diff --git a/ethdb/privateapi/ethbackend.go b/ethdb/privateapi/ethbackend.go index 1adcf3eb524..03eb2079031 100644 --- a/ethdb/privateapi/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -152,7 +152,7 @@ func (s *EthBackendServer) NetPeerCount(_ context.Context, _ *remote.NetPeerCoun } func (s *EthBackendServer) Subscribe(r *remote.SubscribeRequest, subscribeServer remote.ETHBACKEND_SubscribeServer) (err error) { - log.Trace("Establishing event subscription channel with the RPC daemon ...") + log.Debug("Establishing event subscription channel with the RPC daemon ...") ch, clean := s.events.AddHeaderSubscription() defer clean() newSnCh, newSnClean := s.events.AddNewSnapshotSubscription() @@ -274,10 +274,10 @@ func (s *EthBackendServer) stageLoopIsBusy() bool { // EngineNewPayloadV1 validates and possibly executes payload func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.ExecutionPayload) (*remote.EnginePayloadStatus, error) { - log.Trace("[NewPayload] acquiring lock") + log.Debug("[NewPayload] acquiring lock") s.lock.Lock() defer s.lock.Unlock() - log.Trace("[NewPayload] lock acquired") + log.Debug("[NewPayload] lock acquired") if s.config.TerminalTotalDifficulty == nil { log.Error("[NewPayload] not a proof-of-stake chain") @@ -339,11 +339,11 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E // The process of validating a payload on the canonical chain MUST NOT be affected by an active sync process on a side branch of the block tree. // For example, if side branch B is SYNCING but the requisite data for validating a payload from canonical branch A is available, client software MUST initiate the validation process. // https://github.com/ethereum/execution-apis/blob/v1.0.0-alpha.6/src/engine/specification.md#payload-validation - log.Trace("[NewPayload] stage loop is busy") + log.Debug("[NewPayload] stage loop is busy") return &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } - log.Trace("[NewPayload] sending block", "height", header.Number, "hash", common.Hash(blockHash)) + log.Debug("[NewPayload] sending block", "height", header.Number, "hash", common.Hash(blockHash)) s.requestList.AddPayloadRequest(&engineapi.PayloadMessage{ Header: &header, Body: &types.RawBody{ @@ -353,7 +353,7 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E }) payloadStatus := <-s.statusCh - log.Trace("[NewPayload] got reply", "payloadStatus", payloadStatus) + log.Debug("[NewPayload] got reply", "payloadStatus", payloadStatus) if payloadStatus.CriticalError != nil { return nil, payloadStatus.CriticalError @@ -372,10 +372,10 @@ func (s *EthBackendServer) EngineGetPayloadV1(ctx context.Context, req *remote.E return nil, fmt.Errorf("not a proof-of-stake chain") } - log.Trace("[GetPayload] acquiring lock") + log.Debug("[GetPayload] acquiring lock") s.lock.Lock() defer s.lock.Unlock() - log.Trace("[GetPayload] lock acquired") + log.Debug("[GetPayload] lock acquired") builder, ok := s.builders[req.PayloadId] if !ok { @@ -418,10 +418,10 @@ func (s *EthBackendServer) EngineGetPayloadV1(ctx context.Context, req *remote.E // EngineForkChoiceUpdatedV1 either states new block head or request the assembling of a new block func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *remote.EngineForkChoiceUpdatedRequest) (*remote.EngineForkChoiceUpdatedReply, error) { - log.Trace("[ForkChoiceUpdated] acquiring lock") + log.Debug("[ForkChoiceUpdated] acquiring lock") s.lock.Lock() defer s.lock.Unlock() - log.Trace("[ForkChoiceUpdated] lock acquired") + log.Debug("[ForkChoiceUpdated] lock acquired") if s.config.TerminalTotalDifficulty == nil { return nil, fmt.Errorf("not a proof-of-stake chain") @@ -438,6 +438,7 @@ func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *r return nil, err } td, err := rawdb.ReadTdByHash(tx1, forkChoice.HeadBlockHash) + tx1.Rollback() if err != nil { return nil, err } @@ -448,43 +449,32 @@ func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *r }, nil } - // TODO(yperbasis): Client software MAY skip an update of the forkchoice state and - // MUST NOT begin a payload build process if forkchoiceState.headBlockHash doesn't reference a leaf of the block tree. - // That is, the block referenced by forkchoiceState.headBlockHash is neither the head of the canonical chain nor a block at the tip of any other chain. - // https://github.com/ethereum/execution-apis/blob/v1.0.0-alpha.9/src/engine/specification.md#specification-1 - tx1.Rollback() - if s.stageLoopIsBusy() { - log.Trace("[ForkChoiceUpdated] stage loop is busy") + log.Debug("[ForkChoiceUpdated] stage loop is busy") return &remote.EngineForkChoiceUpdatedReply{ PayloadStatus: &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, }, nil } - log.Trace("[ForkChoiceUpdated] sending forkChoiceMessage", "head", forkChoice.HeadBlockHash) + log.Debug("[ForkChoiceUpdated] sending forkChoiceMessage", "head", forkChoice.HeadBlockHash) s.requestList.AddForkChoiceRequest(&forkChoice) - payloadStatus := <-s.statusCh - log.Trace("[ForkChoiceUpdated] got reply", "payloadStatus", payloadStatus) + status := <-s.statusCh + log.Debug("[ForkChoiceUpdated] got reply", "payloadStatus", status) - if payloadStatus.CriticalError != nil { - return nil, payloadStatus.CriticalError + if status.CriticalError != nil { + return nil, status.CriticalError } // No need for payload building - if req.PayloadAttributes == nil || payloadStatus.Status != remote.EngineStatus_VALID { - return &remote.EngineForkChoiceUpdatedReply{PayloadStatus: convertPayloadStatus(&payloadStatus)}, nil + if req.PayloadAttributes == nil || status.Status != remote.EngineStatus_VALID { + return &remote.EngineForkChoiceUpdatedReply{PayloadStatus: convertPayloadStatus(&status)}, nil } if !s.proposing { return nil, fmt.Errorf("execution layer not running as a proposer. enable proposer by taking out the --proposer.disable flag on startup") } - s.evictOldBuilders() - - // payload IDs start from 1 (0 signifies null) - s.payloadId++ - tx2, err := s.db.BeginRo(ctx) if err != nil { return nil, err @@ -495,13 +485,29 @@ func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *r tx2.Rollback() if headHeader.Hash() != forkChoice.HeadBlockHash { - return nil, fmt.Errorf("unexpected head hash: %x vs %x", headHeader.Hash(), forkChoice.HeadBlockHash) + // Per https://github.com/ethereum/execution-apis/blob/v1.0.0-alpha.9/src/engine/specification.md#specification-1: + // Client software MAY skip an update of the forkchoice state and + // MUST NOT begin a payload build process if forkchoiceState.headBlockHash doesn't reference a leaf of the block tree. + // That is, the block referenced by forkchoiceState.headBlockHash is neither the head of the canonical chain nor a block at the tip of any other chain. + // In the case of such an event, client software MUST return + // {payloadStatus: {status: VALID, latestValidHash: forkchoiceState.headBlockHash, validationError: null}, payloadId: null}. + + log.Warn("Skipping payload building because forkchoiceState.headBlockHash is not the head of the canonical chain", + "forkChoice.HeadBlockHash", forkChoice.HeadBlockHash, "headHeader.Hash", headHeader.Hash()) + return &remote.EngineForkChoiceUpdatedReply{PayloadStatus: convertPayloadStatus(&status)}, nil } if headHeader.Time >= req.PayloadAttributes.Timestamp { return nil, &InvalidPayloadAttributesErr } + // Initiate payload building + + s.evictOldBuilders() + + // payload IDs start from 1 (0 signifies null) + s.payloadId++ + emptyHeader := core.MakeEmptyHeader(headHeader, s.config, req.PayloadAttributes.Timestamp, nil) emptyHeader.Coinbase = gointerfaces.ConvertH160toAddress(req.PayloadAttributes.SuggestedFeeRecipient) emptyHeader.MixDigest = gointerfaces.ConvertH256ToHash(req.PayloadAttributes.PrevRandao) From 50873a5b1d163924ec7826886c9a9e8b7275e7df Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Fri, 17 Jun 2022 19:46:27 +0100 Subject: [PATCH 075/136] [erigon2.2] Fix for code merge (#4478) * [erigon2.2 Fix for code merge * Update to latest erigon-lib Co-authored-by: Alexey Sharp --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 531bf8306d6..770d22c0e8c 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220617144601-d7693ce09400 + github.com/ledgerwatch/erigon-lib v0.0.0-20220617182456-945b0e9e0f61 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 6b094e59cac..d4dd40f0c39 100644 --- a/go.sum +++ b/go.sum @@ -382,8 +382,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220617144601-d7693ce09400 h1:HqoDhLo0H7bNkYJ8hQqueyhWnalM2uO29PiRlQqKAP8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220617144601-d7693ce09400/go.mod h1:SOwq7m9Wm7ckQ+kxUwDYRchwuwO8lXhp1lhbLTUhMk8= +github.com/ledgerwatch/erigon-lib v0.0.0-20220617182456-945b0e9e0f61 h1:eQQEmAO6jtdJL3woBhWhKhl7W7CzeWAeDWUoSG8WIk0= +github.com/ledgerwatch/erigon-lib v0.0.0-20220617182456-945b0e9e0f61/go.mod h1:SOwq7m9Wm7ckQ+kxUwDYRchwuwO8lXhp1lhbLTUhMk8= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From c2518ad6a336207e9b284e51a54b6256a31c8e1b Mon Sep 17 00:00:00 2001 From: Zachinquarantine Date: Sat, 18 Jun 2022 03:35:43 -0400 Subject: [PATCH 076/136] Removes StorageBlock type (#4483) --- core/types/block.go | 25 ------------------------- 1 file changed, 25 deletions(-) diff --git a/core/types/block.go b/core/types/block.go index cc131f286e9..ec90dbe236f 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -659,21 +659,6 @@ type Block struct { ReceivedFrom interface{} } -// [deprecated by eth/63] -// StorageBlock defines the RLP encoding of a Block stored in the -// state database. The StorageBlock encoding contains fields that -// would otherwise need to be recomputed. -type StorageBlock Block - -// [deprecated by eth/63] -// "storage" block encoding. used for database. -type storageblock struct { - Header *Header - Txs []Transaction - Uncles []*Header - TD *big.Int -} - // Copy transaction senders from body into the transactions func (b *Body) SendersToTxs(senders []common.Address) { if senders == nil { @@ -1157,16 +1142,6 @@ func (bb Block) EncodeRLP(w io.Writer) error { return nil } -// [deprecated by eth/63] -func (b *StorageBlock) DecodeRLP(s *rlp.Stream) error { - var sb storageblock - if err := s.Decode(&sb); err != nil { - return err - } - b.header, b.uncles, b.transactions = sb.Header, sb.Uncles, sb.Txs - return nil -} - func (b *Block) Uncles() []*Header { return b.uncles } func (b *Block) Transactions() Transactions { return b.transactions } From dfae218a6f8fe913651783a9076932c6cb2fdee1 Mon Sep 17 00:00:00 2001 From: Uwe Voelker Date: Sat, 18 Jun 2022 21:17:38 +0200 Subject: [PATCH 077/136] fix typo (#4486) --- cmd/rpcdaemon/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/rpcdaemon/README.md b/cmd/rpcdaemon/README.md index 50ad1581831..307d3c5e186 100644 --- a/cmd/rpcdaemon/README.md +++ b/cmd/rpcdaemon/README.md @@ -474,7 +474,7 @@ Currently batch requests are spawn multiple goroutines and process all sub-reque huge batch to other users - added flag `--rpc.batch.concurrency` (default: 2). Increase it to process large batches faster. -Known Issue: if at least 1 request is "stremable" (has parameter of type *jsoniter.Stream) - then whole batch will +Known Issue: if at least 1 request is "streamable" (has parameter of type *jsoniter.Stream) - then whole batch will processed sequentially (on 1 goroutine). ## For Developers From dc8a3fc56573ed4b94b5af89ead1606970df94b4 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sun, 19 Jun 2022 09:33:28 +0600 Subject: [PATCH 078/136] fix docs(#4491) --- cmd/utils/flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 7d109496e70..31f35b8082a 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -308,7 +308,7 @@ var ( } HTTPEnabledFlag = cli.BoolTFlag{ Name: "http", - Usage: "HTTP-RPC server (enabled by default). Use --http false to disable it", + Usage: "HTTP-RPC server (enabled by default). Use --http=false to disable it", } HTTPListenAddrFlag = cli.StringFlag{ Name: "http.addr", From ef749e39c38771047677578e7bea2666eed75c2e Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sun, 19 Jun 2022 11:27:14 +0600 Subject: [PATCH 079/136] Roaring version up (#4492) * save * save --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 770d22c0e8c..d2bb4ffc0ec 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon go 1.18 require ( - github.com/RoaringBitmap/roaring v1.2.0 + github.com/RoaringBitmap/roaring v1.2.1 github.com/VictoriaMetrics/fastcache v1.10.0 github.com/VictoriaMetrics/metrics v1.18.1 github.com/anacrolix/go-libutp v1.2.0 @@ -34,7 +34,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220617182456-945b0e9e0f61 + github.com/ledgerwatch/erigon-lib v0.0.0-20220619033440-0fb5347c1b57 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 @@ -51,7 +51,7 @@ require ( github.com/torquem-ch/mdbx-go v0.24.3-0.20220614090901-342411560dde github.com/ugorji/go/codec v1.1.13 github.com/ugorji/go/codec/codecgen v1.1.13 - github.com/urfave/cli v1.22.8 + github.com/urfave/cli v1.22.9 github.com/valyala/fastjson v1.6.3 github.com/xsleonard/go-merkle v1.1.0 go.uber.org/atomic v1.9.0 diff --git a/go.sum b/go.sum index d4dd40f0c39..09e40e81385 100644 --- a/go.sum +++ b/go.sum @@ -11,8 +11,8 @@ github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go github.com/RoaringBitmap/roaring v0.4.7/go.mod h1:8khRDP4HmeXns4xIj9oGrKSz7XTQiJx2zgh7AcNke4w= github.com/RoaringBitmap/roaring v0.4.17/go.mod h1:D3qVegWTmfCaX4Bl5CrBE9hfrSrrXIr8KVNvRsDi1NI= github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo= -github.com/RoaringBitmap/roaring v1.2.0 h1:qayex3YgtOmzev8slia4A0jPGsn2o2bnqKDcRpyRUiI= -github.com/RoaringBitmap/roaring v1.2.0/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA= +github.com/RoaringBitmap/roaring v1.2.1 h1:58/LJlg/81wfEHd5L9qsHduznOIhyv4qb1yWcSvVq9A= +github.com/RoaringBitmap/roaring v1.2.1/go.mod h1:icnadbWcNyfEHlYdr+tDlOTih1Bf/h+rzPpv4sbomAA= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/VictoriaMetrics/fastcache v1.10.0 h1:5hDJnLsKLpnUEToub7ETuRu8RCkb40woBZAUiKonXzY= @@ -382,8 +382,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220617182456-945b0e9e0f61 h1:eQQEmAO6jtdJL3woBhWhKhl7W7CzeWAeDWUoSG8WIk0= -github.com/ledgerwatch/erigon-lib v0.0.0-20220617182456-945b0e9e0f61/go.mod h1:SOwq7m9Wm7ckQ+kxUwDYRchwuwO8lXhp1lhbLTUhMk8= +github.com/ledgerwatch/erigon-lib v0.0.0-20220619033440-0fb5347c1b57 h1:A3jtfJKJrinu88Vb+57QYcElDWa+JrXRF/yPNaZpaPo= +github.com/ledgerwatch/erigon-lib v0.0.0-20220619033440-0fb5347c1b57/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= @@ -617,8 +617,8 @@ github.com/ugorji/go/codec/codecgen v1.1.13 h1:rGpZ4Q63VcWA3DMBbIHvg+SQweUkfXBBa github.com/ugorji/go/codec/codecgen v1.1.13/go.mod h1:EhCxlc7Crov+HLygD4+hBCitXNrrGKRrRWj+pRsyJGg= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.8 h1:9ic0a+f2TCJ5tSbVRX/FSSCIHJacFLYxcuNexNMJF8Q= -github.com/urfave/cli v1.22.8/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.9 h1:cv3/KhXGBGjEXLC4bH0sLuJ9BewaAbpk5oyMOveu4pw= +github.com/urfave/cli v1.22.9/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/valyala/fastjson v1.6.3 h1:tAKFnnwmeMGPbwJ7IwxcTPCNr3uIzoIj3/Fh90ra4xc= github.com/valyala/fastjson v1.6.3/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/valyala/fastrand v1.1.0 h1:f+5HkLW4rsgzdNoleUOB69hyT9IlD2ZQh9GyDMfb5G8= From f88fa89bda826458add31274a60cf6800ac47f0f Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Sun, 19 Jun 2022 06:51:05 +0100 Subject: [PATCH 080/136] Fix in skip_analysis (#4493) * Update skip_analysis.go * Update skip_analysis.go --- core/skip_analysis.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/skip_analysis.go b/core/skip_analysis.go index 844aa394a0a..231a3150750 100644 --- a/core/skip_analysis.go +++ b/core/skip_analysis.go @@ -29,14 +29,14 @@ import ( // where codeBitmap was useful. Invalid jumps either did not occur, or were // prevented simply by checking whether the jump destination has JUMPDEST opcode // Mainnet transactions that use jumpdest analysis are: +// 0x3666640316df11865abd1352f4c0b4c5126f8ac1d858ef2a0c6e744a4865bca2 (block 5800596) // 0x88a1f2a9f048a21fd944b28ad9962f533ab5d3c40e17b1bc3f99ae999a4021b2 (block 6426432) // 0x86e55d1818b5355424975de9633a57c40789ca08552297b726333a9433949c92 (block 6426298) -// 0x3666640316df11865abd1352f4c0b4c5126f8ac1d858ef2a0c6e744a4865bca2 (block 5800596) // 0xcdb5bf0b4b51093e1c994f471921f88623c9d3e1b6aa2782049f53a0048f2b32 (block 11079912) // 0x21ab7bf7245a87eae265124aaf180d91133377e47db2b1a4866493ec4b371150 (block 13119520) var analysisBlocks map[string][]uint64 = map[string][]uint64{ - networkname.MainnetChainName: {6_426_298, 6_426_432, 5_800_596, 11_079_912, 13_119_520, 14_961_400}, + networkname.MainnetChainName: {5_800_596, 6_426_298, 6_426_432, 11_079_912, 13_119_520, 14_961_400}, networkname.BSCChainName: {18_682_505}, networkname.BorMainnetChainName: {29_447_463}, networkname.RopstenChainName: {2_534_105, 2_534_116, 3_028_887, 3_028_940, 3_028_956, 3_450_102, 5_294_626, 5_752_787, 10_801_303, 10_925_062, 11_440_683, 11_897_655, 11_898_288, 12_291_199, 12_331_664}, From 2e3a75f2b1713df9be9e8672685fa3085cec6ac8 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Sun, 19 Jun 2022 07:01:00 +0100 Subject: [PATCH 081/136] [erigon2.2] Add skip analysis to trace_filter (#4487) * [erigon2.2] Add skip analysis to trace_filter * Optimisation * Update to latest erigon-lib * Fix test * Update to latest erigon-lib Co-authored-by: Alexey Sharp --- cmd/rpcdaemon22/commands/trace_filtering.go | 1 + cmd/rpcdaemon22/rpcdaemontest/test_util.go | 4 +++- go.mod | 2 +- go.sum | 4 ++-- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/cmd/rpcdaemon22/commands/trace_filtering.go b/cmd/rpcdaemon22/commands/trace_filtering.go index 0e1b0a22874..a71a890e9af 100644 --- a/cmd/rpcdaemon22/commands/trace_filtering.go +++ b/cmd/rpcdaemon22/commands/trace_filtering.go @@ -433,6 +433,7 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str cachedReader := state.NewCachedReader(stateReader, stateCache) cachedWriter := state.NewCachedWriter(noop, stateCache) vmConfig := vm.Config{} + vmConfig.SkipAnalysis = core.SkipAnalysis(chainConfig, blockNum) traceResult := &TraceCallResult{Trace: []*ParityTrace{}} var ot OeTracer ot.compat = api.compatibility diff --git a/cmd/rpcdaemon22/rpcdaemontest/test_util.go b/cmd/rpcdaemon22/rpcdaemontest/test_util.go index 15589682e34..ad73c3faad4 100644 --- a/cmd/rpcdaemon22/rpcdaemontest/test_util.go +++ b/cmd/rpcdaemon22/rpcdaemontest/test_util.go @@ -4,6 +4,7 @@ import ( "context" "crypto/ecdsa" "encoding/binary" + "fmt" "math/big" "net" "testing" @@ -301,7 +302,7 @@ func CreateTestGrpcConn(t *testing.T, m *stages.MockSentry) (context.Context, *g dialer := func() func(context.Context, string) (net.Conn, error) { go func() { if err := server.Serve(listener); err != nil { - panic(err) + fmt.Printf("%v\n", err) } }() return func(context.Context, string) (net.Conn, error) { @@ -316,6 +317,7 @@ func CreateTestGrpcConn(t *testing.T, m *stages.MockSentry) (context.Context, *g t.Cleanup(func() { cancel() conn.Close() + server.Stop() }) return ctx, conn } diff --git a/go.mod b/go.mod index d2bb4ffc0ec..e86fb37bb83 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220619033440-0fb5347c1b57 + github.com/ledgerwatch/erigon-lib v0.0.0-20220619053529-5574d68a87ee github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 09e40e81385..be76a4a279d 100644 --- a/go.sum +++ b/go.sum @@ -382,8 +382,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220619033440-0fb5347c1b57 h1:A3jtfJKJrinu88Vb+57QYcElDWa+JrXRF/yPNaZpaPo= -github.com/ledgerwatch/erigon-lib v0.0.0-20220619033440-0fb5347c1b57/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= +github.com/ledgerwatch/erigon-lib v0.0.0-20220619053529-5574d68a87ee h1:AoE5ESeSj/KUE/Je9KdNlEYyRA20KNkrV0nNrLmnYOY= +github.com/ledgerwatch/erigon-lib v0.0.0-20220619053529-5574d68a87ee/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From 64067a2b779e7aaf997a792d700c30d4880d7138 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Sun, 19 Jun 2022 13:40:28 +0100 Subject: [PATCH 082/136] Debugging Engine API (#4488) * defer tx rollback * Add http.trace flag * Fixed http.trace flag * Fix lint * Fix lint * Delete unused tests * Fix lint * Fix lint * Fix lint * Fix lint Co-authored-by: Alexey Sharp --- cmd/rpcdaemon/cli/config.go | 6 +- cmd/rpcdaemon/cli/httpcfg/http_cfg.go | 1 + cmd/rpcdaemon22/cli/config.go | 5 +- cmd/rpcdaemon22/cli/httpcfg/http_cfg.go | 1 + cmd/utils/flags.go | 4 + eth/backend.go | 1 - ethdb/privateapi/ethbackend.go | 3 + node/api.go | 331 --------- node/api_test.go | 357 ---------- node/node.go | 206 +----- node/node_test.go | 269 ------- node/rpcstack.go | 21 +- node/utils_test.go | 21 - p2p/simulations/adapters/inproc.go | 358 ---------- p2p/simulations/connect_test.go | 178 ----- p2p/simulations/examples/ping-pong.go | 163 ----- p2p/simulations/examples/ping-pong.sh | 40 -- p2p/simulations/http_test.go | 878 ----------------------- p2p/simulations/mocker_test.go | 176 ----- p2p/simulations/network_test.go | 886 ------------------------ rpc/client.go | 2 +- rpc/handler.go | 16 +- rpc/http_test.go | 2 +- rpc/server.go | 7 +- rpc/server_test.go | 2 +- rpc/subscription_test.go | 2 +- rpc/testservice_test.go | 2 +- rpc/websocket_test.go | 2 +- turbo/cli/default_flags.go | 1 + turbo/cli/flags.go | 1 + 30 files changed, 50 insertions(+), 3892 deletions(-) delete mode 100644 node/api.go delete mode 100644 node/api_test.go delete mode 100644 p2p/simulations/adapters/inproc.go delete mode 100644 p2p/simulations/connect_test.go delete mode 100644 p2p/simulations/examples/ping-pong.go delete mode 100755 p2p/simulations/examples/ping-pong.sh delete mode 100644 p2p/simulations/http_test.go delete mode 100644 p2p/simulations/mocker_test.go delete mode 100644 p2p/simulations/network_test.go diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index a8f5c1cbb9e..5ecedf225bd 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -93,6 +93,7 @@ func RootCommand() (*cobra.Command, *httpcfg.HttpCfg) { rootCmd.PersistentFlags().BoolVar(&cfg.GRPCHealthCheckEnabled, "grpc.healthcheck", false, "Enable GRPC health check") rootCmd.PersistentFlags().StringVar(&cfg.StarknetGRPCAddress, "starknet.grpc.address", "127.0.0.1:6066", "Starknet GRPC address") rootCmd.PersistentFlags().StringVar(&cfg.JWTSecretPath, utils.JWTSecretPath.Name, utils.JWTSecretPath.Value, "Token to ensure safe connection between CL and EL") + rootCmd.PersistentFlags().BoolVar(&cfg.TraceRequests, utils.HTTPTraceFlag.Name, false, "Trace HTTP requests with INFO level") if err := rootCmd.MarkPersistentFlagFilename("rpc.accessList", "json"); err != nil { panic(err) @@ -432,7 +433,8 @@ func StartRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rpc.API) // register apis and create handler stack httpEndpoint := fmt.Sprintf("%s:%d", cfg.HttpListenAddress, cfg.HttpPort) - srv := rpc.NewServer(cfg.RpcBatchConcurrency) + fmt.Printf("TraceRequests = %t\n", cfg.TraceRequests) + srv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests) allowListForRPC, err := parseAllowListForRPC(cfg.RpcAllowListFilePath) if err != nil { @@ -606,7 +608,7 @@ func createHandler(cfg httpcfg.HttpCfg, apiList []rpc.API, httpHandler http.Hand func createEngineListener(cfg httpcfg.HttpCfg, engineApi []rpc.API) (*http.Server, *rpc.Server, string, error) { engineHttpEndpoint := fmt.Sprintf("%s:%d", cfg.EngineHTTPListenAddress, cfg.EnginePort) - engineSrv := rpc.NewServer(cfg.RpcBatchConcurrency) + engineSrv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests) allowListForRPC, err := parseAllowListForRPC(cfg.RpcAllowListFilePath) if err != nil { diff --git a/cmd/rpcdaemon/cli/httpcfg/http_cfg.go b/cmd/rpcdaemon/cli/httpcfg/http_cfg.go index 6a7d3505110..42e15eb17d5 100644 --- a/cmd/rpcdaemon/cli/httpcfg/http_cfg.go +++ b/cmd/rpcdaemon/cli/httpcfg/http_cfg.go @@ -42,4 +42,5 @@ type HttpCfg struct { GRPCHealthCheckEnabled bool StarknetGRPCAddress string JWTSecretPath string // Engine API Authentication + TraceRequests bool // Always trace requests in INFO level } diff --git a/cmd/rpcdaemon22/cli/config.go b/cmd/rpcdaemon22/cli/config.go index 5e5830e870d..5bce87f69f6 100644 --- a/cmd/rpcdaemon22/cli/config.go +++ b/cmd/rpcdaemon22/cli/config.go @@ -94,6 +94,7 @@ func RootCommand() (*cobra.Command, *httpcfg.HttpCfg) { rootCmd.PersistentFlags().BoolVar(&cfg.GRPCHealthCheckEnabled, "grpc.healthcheck", false, "Enable GRPC health check") rootCmd.PersistentFlags().StringVar(&cfg.StarknetGRPCAddress, "starknet.grpc.address", "127.0.0.1:6066", "Starknet GRPC address") rootCmd.PersistentFlags().StringVar(&cfg.JWTSecretPath, utils.JWTSecretPath.Name, utils.JWTSecretPath.Value, "Token to ensure safe connection between CL and EL") + rootCmd.PersistentFlags().BoolVar(&cfg.TraceRequests, utils.HTTPTraceFlag.Name, false, "Trace HTTP requests with INFO level") if err := rootCmd.MarkPersistentFlagFilename("rpc.accessList", "json"); err != nil { panic(err) @@ -454,7 +455,7 @@ func StartRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rpc.API) // register apis and create handler stack httpEndpoint := fmt.Sprintf("%s:%d", cfg.HttpListenAddress, cfg.HttpPort) - srv := rpc.NewServer(cfg.RpcBatchConcurrency) + srv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests) allowListForRPC, err := parseAllowListForRPC(cfg.RpcAllowListFilePath) if err != nil { @@ -628,7 +629,7 @@ func createHandler(cfg httpcfg.HttpCfg, apiList []rpc.API, httpHandler http.Hand func createEngineListener(cfg httpcfg.HttpCfg, engineApi []rpc.API) (*http.Server, *rpc.Server, string, error) { engineHttpEndpoint := fmt.Sprintf("%s:%d", cfg.EngineHTTPListenAddress, cfg.EnginePort) - engineSrv := rpc.NewServer(cfg.RpcBatchConcurrency) + engineSrv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests) allowListForRPC, err := parseAllowListForRPC(cfg.RpcAllowListFilePath) if err != nil { diff --git a/cmd/rpcdaemon22/cli/httpcfg/http_cfg.go b/cmd/rpcdaemon22/cli/httpcfg/http_cfg.go index 6a7d3505110..42e15eb17d5 100644 --- a/cmd/rpcdaemon22/cli/httpcfg/http_cfg.go +++ b/cmd/rpcdaemon22/cli/httpcfg/http_cfg.go @@ -42,4 +42,5 @@ type HttpCfg struct { GRPCHealthCheckEnabled bool StarknetGRPCAddress string JWTSecretPath string // Engine API Authentication + TraceRequests bool // Always trace requests in INFO level } diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 31f35b8082a..4a991bd6f60 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -365,6 +365,10 @@ var ( Usage: "Does limit amount of goroutines to process 1 batch request. Means 1 bach request can't overload server. 1 batch still can have unlimited amount of request", Value: 2, } + HTTPTraceFlag = cli.BoolFlag{ + Name: "http.trace", + Usage: "Trace HTTP requests with INFO level", + } DBReadConcurrencyFlag = cli.IntFlag{ Name: "db.read.concurrency", Usage: "Does limit amount of parallel db reads. Default: equal to GOMAXPROCS (or number of CPU)", diff --git a/eth/backend.go b/eth/backend.go index b96e0a836d9..291fc943c92 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -540,7 +540,6 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere } // Register the backend on the node - stack.RegisterAPIs(backend.APIs()) stack.RegisterLifecycle(backend) return backend, nil } diff --git a/ethdb/privateapi/ethbackend.go b/ethdb/privateapi/ethbackend.go index 03eb2079031..4719d6c78e6 100644 --- a/ethdb/privateapi/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -322,6 +322,7 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E if err != nil { return nil, err } + defer tx.Rollback() parentTd, err := rawdb.ReadTd(tx, header.ParentHash, req.BlockNumber-1) if err != nil { return nil, err @@ -437,6 +438,7 @@ func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *r if err != nil { return nil, err } + defer tx1.Rollback() td, err := rawdb.ReadTdByHash(tx1, forkChoice.HeadBlockHash) tx1.Rollback() if err != nil { @@ -479,6 +481,7 @@ func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *r if err != nil { return nil, err } + defer tx2.Rollback() headHash := rawdb.ReadHeadBlockHash(tx2) headNumber := rawdb.ReadHeaderNumber(tx2, headHash) headHeader := rawdb.ReadHeader(tx2, headHash, *headNumber) diff --git a/node/api.go b/node/api.go deleted file mode 100644 index cefa9f59963..00000000000 --- a/node/api.go +++ /dev/null @@ -1,331 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package node - -import ( - "context" - "fmt" - "strings" - - _debug "github.com/ledgerwatch/erigon/common/debug" - "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/internal/debug" - "github.com/ledgerwatch/erigon/node/nodecfg" - "github.com/ledgerwatch/erigon/p2p" - "github.com/ledgerwatch/erigon/p2p/enode" - "github.com/ledgerwatch/erigon/rpc" -) - -// apis returns the collection of built-in RPC APIs. -func (n *Node) apis() []rpc.API { - return []rpc.API{ - { - Namespace: "admin", - Version: "1.0", - Service: &privateAdminAPI{n}, - }, { - Namespace: "admin", - Version: "1.0", - Service: &publicAdminAPI{n}, - Public: true, - }, { - Namespace: "debug", - Version: "1.0", - Service: debug.Handler, - }, { - Namespace: "web3", - Version: "1.0", - Service: &publicWeb3API{n}, - Public: true, - }, - } -} - -// privateAdminAPI is the collection of administrative API methods exposed only -// over a secure RPC channel. -type privateAdminAPI struct { - node *Node // Node interfaced by this API -} - -// AddPeer requests connecting to a remote node, and also maintaining the new -// connection at all times, even reconnecting if it is lost. -func (api *privateAdminAPI) AddPeer(url string) (bool, error) { - // Make sure the server is running, fail otherwise - server := api.node.Server() - if server == nil { - return false, ErrNodeStopped - } - // Try to add the url as a static peer and return - node, err := enode.Parse(enode.ValidSchemes, url) - if err != nil { - return false, fmt.Errorf("invalid enode: %w", err) - } - server.AddPeer(node) - return true, nil -} - -// RemovePeer disconnects from a remote node if the connection exists -func (api *privateAdminAPI) RemovePeer(url string) (bool, error) { - // Make sure the server is running, fail otherwise - server := api.node.Server() - if server == nil { - return false, ErrNodeStopped - } - // Try to remove the url as a static peer and return - node, err := enode.Parse(enode.ValidSchemes, url) - if err != nil { - return false, fmt.Errorf("invalid enode: %w", err) - } - server.RemovePeer(node) - return true, nil -} - -// AddTrustedPeer allows a remote node to always connect, even if slots are full -func (api *privateAdminAPI) AddTrustedPeer(url string) (bool, error) { - // Make sure the server is running, fail otherwise - server := api.node.Server() - if server == nil { - return false, ErrNodeStopped - } - node, err := enode.Parse(enode.ValidSchemes, url) - if err != nil { - return false, fmt.Errorf("invalid enode: %w", err) - } - server.AddTrustedPeer(node) - return true, nil -} - -// RemoveTrustedPeer removes a remote node from the trusted peer set, but it -// does not disconnect it automatically. -func (api *privateAdminAPI) RemoveTrustedPeer(url string) (bool, error) { - // Make sure the server is running, fail otherwise - server := api.node.Server() - if server == nil { - return false, ErrNodeStopped - } - node, err := enode.Parse(enode.ValidSchemes, url) - if err != nil { - return false, fmt.Errorf("invalid enode: %w", err) - } - server.RemoveTrustedPeer(node) - return true, nil -} - -// PeerEvents creates an RPC subscription which receives peer events from the -// node's p2p.Server -func (api *privateAdminAPI) PeerEvents(ctx context.Context) (*rpc.Subscription, error) { - // Make sure the server is running, fail otherwise - server := api.node.Server() - if server == nil { - return nil, ErrNodeStopped - } - - // Create the subscription - notifier, supported := rpc.NotifierFromContext(ctx) - if !supported { - return nil, rpc.ErrNotificationsUnsupported - } - rpcSub := notifier.CreateSubscription() - - go func() { - defer _debug.LogPanic() - events := make(chan *p2p.PeerEvent) - sub := server.SubscribeEvents(events) - defer sub.Unsubscribe() - - for { - select { - case event := <-events: - notifier.Notify(rpcSub.ID, event) - case <-sub.Err(): - return - case <-rpcSub.Err(): - return - case <-notifier.Closed(): - return - } - } - }() - - return rpcSub, nil -} - -// StartRPC starts the HTTP RPC API server. -func (api *privateAdminAPI) StartRPC(host *string, port *int, cors *string, apis *string, vhosts *string) (bool, error) { - api.node.lock.Lock() - defer api.node.lock.Unlock() - - // Determine host and port. - if host == nil { - h := nodecfg.DefaultHTTPHost - if api.node.config.HTTPHost != "" { - h = api.node.config.HTTPHost - } - host = &h - } - if port == nil { - port = &api.node.config.HTTPPort - } - - // Determine config. - config := httpConfig{ - CorsAllowedOrigins: api.node.config.HTTPCors, - Vhosts: api.node.config.HTTPVirtualHosts, - Modules: api.node.config.HTTPModules, - } - if cors != nil { - config.CorsAllowedOrigins = nil - for _, origin := range strings.Split(*cors, ",") { - config.CorsAllowedOrigins = append(config.CorsAllowedOrigins, strings.TrimSpace(origin)) - } - } - if vhosts != nil { - config.Vhosts = nil - for _, vhost := range strings.Split(*host, ",") { - config.Vhosts = append(config.Vhosts, strings.TrimSpace(vhost)) - } - } - if apis != nil { - config.Modules = nil - for _, m := range strings.Split(*apis, ",") { - config.Modules = append(config.Modules, strings.TrimSpace(m)) - } - } - - if err := api.node.http.setListenAddr(*host, *port); err != nil { - return false, err - } - if err := api.node.http.enableRPC(api.node.rpcAPIs, config, nil); err != nil { - return false, err - } - if err := api.node.http.start(); err != nil { - return false, err - } - return true, nil -} - -// StopRPC shuts down the HTTP server. -func (api *privateAdminAPI) StopRPC() (bool, error) { - api.node.http.stop() - return true, nil -} - -// StartWS starts the websocket RPC API server. -func (api *privateAdminAPI) StartWS(host *string, port *int, allowedOrigins *string, apis *string) (bool, error) { - api.node.lock.Lock() - defer api.node.lock.Unlock() - - // Determine host and port. - if host == nil { - h := nodecfg.DefaultWSHost - if api.node.config.WSHost != "" { - h = api.node.config.WSHost - } - host = &h - } - if port == nil { - port = &api.node.config.WSPort - } - - // Determine config. - config := wsConfig{ - Modules: api.node.config.WSModules, - Origins: api.node.config.WSOrigins, - // ExposeAll: api.node.config.WSExposeAll, - } - if apis != nil { - config.Modules = nil - for _, m := range strings.Split(*apis, ",") { - config.Modules = append(config.Modules, strings.TrimSpace(m)) - } - } - if allowedOrigins != nil { - config.Origins = nil - for _, origin := range strings.Split(*allowedOrigins, ",") { - config.Origins = append(config.Origins, strings.TrimSpace(origin)) - } - } - - // Enable WebSocket on the server. - server := api.node.wsServerForPort(*port) - if err := server.setListenAddr(*host, *port); err != nil { - return false, err - } - if err := server.enableWS(api.node.rpcAPIs, config, nil); err != nil { - return false, err - } - if err := server.start(); err != nil { - return false, err - } - api.node.http.log.Info("WebSocket endpoint opened", "url", api.node.WSEndpoint()) - return true, nil -} - -// StopWS terminates all WebSocket servers. -func (api *privateAdminAPI) StopWS() (bool, error) { - api.node.http.stopWS() - api.node.ws.stop() - return true, nil -} - -// publicAdminAPI is the collection of administrative API methods exposed over -// both secure and unsecure RPC channels. -type publicAdminAPI struct { - node *Node // Node interfaced by this API -} - -// Peers retrieves all the information we know about each individual peer at the -// protocol granularity. -func (api *publicAdminAPI) Peers() ([]*p2p.PeerInfo, error) { - server := api.node.Server() - if server == nil { - return nil, ErrNodeStopped - } - return server.PeersInfo(), nil -} - -// NodeInfo retrieves all the information we know about the host node at the -// protocol granularity. -func (api *publicAdminAPI) NodeInfo() (*p2p.NodeInfo, error) { - server := api.node.Server() - if server == nil { - return nil, ErrNodeStopped - } - return server.NodeInfo(), nil -} - -// Datadir retrieves the current data directory the node is using. -func (api *publicAdminAPI) Datadir() string { - return api.node.DataDir() -} - -// publicWeb3API offers helper utils -type publicWeb3API struct { - stack *Node -} - -// ClientVersion returns the node name -func (s *publicWeb3API) ClientVersion() string { - return s.stack.Server().Name -} - -// Sha3 applies the ethereum sha3 implementation on the input. -// It assumes the input is hex encoded. -func (s *publicWeb3API) Sha3(input hexutil.Bytes) hexutil.Bytes { - return crypto.Keccak256(input) -} diff --git a/node/api_test.go b/node/api_test.go deleted file mode 100644 index 99ccc2e9fa9..00000000000 --- a/node/api_test.go +++ /dev/null @@ -1,357 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package node - -import ( - "bytes" - "io" - "net" - "net/http" - "net/url" - "runtime" - "strings" - "testing" - - "github.com/ledgerwatch/erigon/node/nodecfg" - "github.com/ledgerwatch/erigon/rpc" - "github.com/stretchr/testify/assert" -) - -// This test uses the admin_startRPC and admin_startWS APIs, -// checking whether the HTTP server is started correctly. -func TestStartRPC(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("fix me on win please") - } - type test struct { - name string - cfg nodecfg.Config - fn func(*testing.T, *Node, *privateAdminAPI) - - // Checks. These run after the node is configured and all API calls have been made. - wantReachable bool // whether the HTTP server should be reachable at all - wantHandlers bool // whether RegisterHandler handlers should be accessible - wantRPC bool // whether JSON-RPC/HTTP should be accessible - wantWS bool // whether JSON-RPC/WS should be accessible - } - - tests := []test{ - { - name: "all off", - cfg: nodecfg.Config{}, - fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - }, - wantReachable: false, - wantHandlers: false, - wantRPC: false, - wantWS: false, - }, - { - name: "rpc enabled through config", - cfg: nodecfg.Config{HTTPHost: "127.0.0.1"}, - fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - }, - wantReachable: true, - wantHandlers: true, - wantRPC: true, - wantWS: false, - }, - { - name: "rpc enabled through API", - cfg: nodecfg.Config{}, - fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - _, err := api.StartRPC(sp("127.0.0.1"), ip(0), nil, nil, nil) - assert.NoError(t, err) - }, - wantReachable: true, - wantHandlers: true, - wantRPC: true, - wantWS: false, - }, - { - name: "rpc start again after failure", - cfg: nodecfg.Config{}, - fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - // Listen on a random port. - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal("can't listen:", err) - } - defer listener.Close() - port := listener.Addr().(*net.TCPAddr).Port - - // Now try to start RPC on that port. This should fail. - _, err = api.StartRPC(sp("127.0.0.1"), ip(port), nil, nil, nil) - if err == nil { - t.Fatal("StartRPC should have failed on port", port) - } - - // Try again after unblocking the port. It should work this time. - listener.Close() - _, err = api.StartRPC(sp("127.0.0.1"), ip(port), nil, nil, nil) - assert.NoError(t, err) - }, - wantReachable: true, - wantHandlers: true, - wantRPC: true, - wantWS: false, - }, - { - name: "rpc stopped through API", - cfg: nodecfg.Config{HTTPHost: "127.0.0.1"}, - fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - _, err := api.StopRPC() - assert.NoError(t, err) - }, - wantReachable: false, - wantHandlers: false, - wantRPC: false, - wantWS: false, - }, - { - name: "rpc stopped twice", - cfg: nodecfg.Config{HTTPHost: "127.0.0.1"}, - fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - _, err := api.StopRPC() - assert.NoError(t, err) - - _, err = api.StopRPC() - assert.NoError(t, err) - }, - wantReachable: false, - wantHandlers: false, - wantRPC: false, - wantWS: false, - }, - { - name: "ws enabled through config", - cfg: nodecfg.Config{WSHost: "127.0.0.1"}, - wantReachable: true, - wantHandlers: false, - wantRPC: false, - wantWS: true, - }, - { - name: "ws enabled through API", - cfg: nodecfg.Config{}, - fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - _, err := api.StartWS(sp("127.0.0.1"), ip(0), nil, nil) - assert.NoError(t, err) - }, - wantReachable: true, - wantHandlers: false, - wantRPC: false, - wantWS: true, - }, - { - name: "ws stopped through API", - cfg: nodecfg.Config{WSHost: "127.0.0.1"}, - fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - _, err := api.StopWS() - assert.NoError(t, err) - }, - wantReachable: false, - wantHandlers: false, - wantRPC: false, - wantWS: false, - }, - { - name: "ws stopped twice", - cfg: nodecfg.Config{WSHost: "127.0.0.1"}, - fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - _, err := api.StopWS() - assert.NoError(t, err) - - _, err = api.StopWS() - assert.NoError(t, err) - }, - wantReachable: false, - wantHandlers: false, - wantRPC: false, - wantWS: false, - }, - { - name: "ws enabled after RPC", - cfg: nodecfg.Config{HTTPHost: "127.0.0.1"}, - fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - wsport := n.http.port - _, err := api.StartWS(sp("127.0.0.1"), ip(wsport), nil, nil) - assert.NoError(t, err) - }, - wantReachable: true, - wantHandlers: true, - wantRPC: true, - wantWS: true, - }, - { - name: "ws enabled after RPC then stopped", - cfg: nodecfg.Config{HTTPHost: "127.0.0.1"}, - fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - wsport := n.http.port - _, err := api.StartWS(sp("127.0.0.1"), ip(wsport), nil, nil) - assert.NoError(t, err) - - _, err = api.StopWS() - assert.NoError(t, err) - }, - wantReachable: true, - wantHandlers: true, - wantRPC: true, - wantWS: false, - }, - { - name: "rpc stopped with ws enabled", - fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - _, err := api.StartRPC(sp("127.0.0.1"), ip(0), nil, nil, nil) - assert.NoError(t, err) - - wsport := n.http.port - _, err = api.StartWS(sp("127.0.0.1"), ip(wsport), nil, nil) - assert.NoError(t, err) - - _, err = api.StopRPC() - assert.NoError(t, err) - }, - wantReachable: false, - wantHandlers: false, - wantRPC: false, - wantWS: false, - }, - { - name: "rpc enabled after ws", - fn: func(t *testing.T, n *Node, api *privateAdminAPI) { - _, err := api.StartWS(sp("127.0.0.1"), ip(0), nil, nil) - assert.NoError(t, err) - - wsport := n.http.port - _, err = api.StartRPC(sp("127.0.0.1"), ip(wsport), nil, nil, nil) - assert.NoError(t, err) - }, - wantReachable: true, - wantHandlers: true, - wantRPC: true, - wantWS: true, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - - // Apply some sane defaults. - config := test.cfg //nolint:scopelint - // config.Log = testlog.Log(t, log.LvlDebug) - config.P2P.NoDiscovery = true - - // Create Node. - stack, err := New(&config) - if err != nil { - t.Fatal("can't create node:", err) - } - defer stack.Close() - - // Register the test handler. - stack.RegisterHandler("test", "/test", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("OK")) //nolint:errcheck - })) - - if err := stack.Start(); err != nil { - t.Fatal("can't start node:", err) - } - - // Run the API call hook. - if test.fn != nil { //nolint:scopelint - test.fn(t, stack, &privateAdminAPI{stack}) //nolint:scopelint - } - - // Check if the HTTP endpoints are available. - baseURL := stack.HTTPEndpoint() - reachable := checkReachable(baseURL) - handlersAvailable := checkBodyOK(baseURL + "/test") - rpcAvailable := checkRPC(baseURL) - wsAvailable := checkRPC(strings.Replace(baseURL, "http://", "ws://", 1)) - if reachable != test.wantReachable { //nolint:scopelint - t.Errorf("HTTP server is %sreachable, want it %sreachable", not(reachable), not(test.wantReachable)) //nolint:scopelint - } - if handlersAvailable != test.wantHandlers { //nolint:scopelint - t.Errorf("RegisterHandler handlers %savailable, want them %savailable", not(handlersAvailable), not(test.wantHandlers)) //nolint:scopelint - } - if rpcAvailable != test.wantRPC { //nolint:scopelint - t.Errorf("HTTP RPC %savailable, want it %savailable", not(rpcAvailable), not(test.wantRPC)) //nolint:scopelint - } - if wsAvailable != test.wantWS { //nolint:scopelint - t.Errorf("WS RPC %savailable, want it %savailable", not(wsAvailable), not(test.wantWS)) //nolint:scopelint - } - }) - } -} - -// checkReachable checks if the TCP endpoint in rawurl is open. -func checkReachable(rawurl string) bool { - u, err := url.Parse(rawurl) - if err != nil { - panic(err) - } - conn, err := net.Dial("tcp", u.Host) - if err != nil { - return false - } - conn.Close() - return true -} - -// checkBodyOK checks whether the given HTTP URL responds with 200 OK and body "OK". -func checkBodyOK(url string) bool { - resp, err := http.Get(url) - if err != nil { - return false - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - return false - } - buf := make([]byte, 2) - if _, err = io.ReadFull(resp.Body, buf); err != nil { - return false - } - return bytes.Equal(buf, []byte("OK")) -} - -// checkRPC checks whether JSON-RPC works against the given URL. -func checkRPC(url string) bool { - c, err := rpc.Dial(url) - if err != nil { - return false - } - defer c.Close() - - _, err = c.SupportedModules() - return err == nil -} - -// string/int pointer helpers. -func sp(s string) *string { return &s } -func ip(i int) *int { return &i } - -func not(ok bool) string { - if ok { - return "" - } - return "not " -} diff --git a/node/node.go b/node/node.go index e0f07f43c4b..f43713acc31 100644 --- a/node/node.go +++ b/node/node.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "net" - "net/http" "os" "path/filepath" "reflect" @@ -31,7 +30,6 @@ import ( "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon/node/nodecfg" "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/rpc/rpccfg" "github.com/gofrs/flock" "github.com/ledgerwatch/erigon-lib/kv" @@ -39,7 +37,6 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/migrations" "github.com/ledgerwatch/erigon/p2p" - "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/log/v3" ) @@ -53,14 +50,8 @@ type Node struct { startStopLock sync.Mutex // Start/Stop are protected by an additional lock state int // Tracks state of node lifecycle - lock sync.Mutex - lifecycles []Lifecycle // All registered backends, services, and auxiliary services that have a lifecycle - rpcAPIs []rpc.API // List of APIs currently provided by the node - http *httpServer // - ws *httpServer // - inprocHandler *rpc.Server // In-process RPC request handler to process the API requests - - rpcAllowList rpc.AllowList // list of RPC methods explicitly allowed for this RPC node + lock sync.Mutex + lifecycles []Lifecycle // All registered backends, services, and auxiliary services that have a lifecycle databases []kv.Closer } @@ -91,38 +82,17 @@ func New(conf *nodecfg.Config) (*Node, error) { } node := &Node{ - config: conf, - inprocHandler: rpc.NewServer(50), - log: conf.Log, - stop: make(chan struct{}), - databases: make([]kv.Closer, 0), + config: conf, + log: conf.Log, + stop: make(chan struct{}), + databases: make([]kv.Closer, 0), } - // Register built-in APIs. - node.rpcAPIs = append(node.rpcAPIs, node.apis()...) // Acquire the instance directory lock. if err := node.openDataDir(); err != nil { return nil, err } - var err error - // Initialize the p2p server. This creates the node key and discovery databases. - - // Check HTTP/WS prefixes are valid. - if err = validatePrefix("HTTP", conf.HTTPPathPrefix); err != nil { - return nil, err - } - if err = validatePrefix("WebSocket", conf.WSPathPrefix); err != nil { - return nil, err - } - - // Configure RPC servers. - node.http = newHTTPServer(node.log, conf.HTTPTimeouts) - node.ws = newHTTPServer(node.log, rpccfg.DefaultHTTPTimeouts) - // Check for uncaught crashes from the previous boot and notify the user if - // there are any - //debug.CheckForCrashes(conf.DataDir) - return node, nil } @@ -145,20 +115,14 @@ func (n *Node) Start() error { return ErrNodeStopped } n.state = runningState - // open networking and RPC endpoints - err := n.openEndpoints() lifecycles := make([]Lifecycle, len(n.lifecycles)) copy(lifecycles, n.lifecycles) n.lock.Unlock() - // Check if endpoint startup failed. - if err != nil { - n.doClose(nil) - return err - } // Start all registered lifecycles. // preallocation leads to bugs here var started []Lifecycle //nolint:prealloc + var err error for _, lifecycle := range lifecycles { if err = lifecycle.Start(); err != nil { break @@ -228,16 +192,6 @@ func (n *Node) doClose(errs []error) error { } } -// openEndpoints starts all network and RPC endpoints. -func (n *Node) openEndpoints() error { - // start RPC endpoints - err := n.startRPC() - if err != nil { - n.stopRPC() - } - return err -} - // containsLifecycle checks if 'lfs' contains 'l'. func containsLifecycle(lfs []Lifecycle, l Lifecycle) bool { for _, obj := range lfs { @@ -300,85 +254,6 @@ func (n *Node) closeDataDir() { } } -// SetAllowListForRPC sets granular allow list for exposed RPC methods -func (n *Node) SetAllowListForRPC(allowList rpc.AllowList) { - n.rpcAllowList = allowList -} - -// configureRPC is a helper method to configure all the various RPC endpoints during node -// startup. It's not meant to be called at any time afterwards as it makes certain -// assumptions about the state of the node. -func (n *Node) startRPC() error { - if err := n.startInProc(); err != nil { - return err - } - - // Configure HTTP. - if n.config.HTTPHost != "" { - config := httpConfig{ - CorsAllowedOrigins: n.config.HTTPCors, - Vhosts: n.config.HTTPVirtualHosts, - Modules: n.config.HTTPModules, - prefix: n.config.HTTPPathPrefix, - } - if err := n.http.setListenAddr(n.config.HTTPHost, n.config.HTTPPort); err != nil { - return err - } - if err := n.http.enableRPC(n.rpcAPIs, config, n.rpcAllowList); err != nil { - return err - } - } - - // Configure WebSocket. - if n.config.WSHost != "" { - server := n.wsServerForPort(n.config.WSPort) - config := wsConfig{ - Modules: n.config.WSModules, - Origins: n.config.WSOrigins, - prefix: n.config.WSPathPrefix, - } - if err := server.setListenAddr(n.config.WSHost, n.config.WSPort); err != nil { - return err - } - if err := server.enableWS(n.rpcAPIs, config, n.rpcAllowList); err != nil { - return err - } - } - - if err := n.http.start(); err != nil { - return err - } - return n.ws.start() -} - -func (n *Node) wsServerForPort(port int) *httpServer { - if n.config.HTTPHost == "" || n.http.port == port { - return n.http - } - return n.ws -} - -func (n *Node) stopRPC() { - n.http.stop() - n.ws.stop() - n.stopInProc() -} - -// startInProc registers all RPC APIs on the inproc server. -func (n *Node) startInProc() error { - for _, api := range n.rpcAPIs { - if err := n.inprocHandler.RegisterName(api.Namespace, api.Service); err != nil { - return err - } - } - return nil -} - -// stopInProc terminates the in-process RPC endpoint. -func (n *Node) stopInProc() { - n.inprocHandler.Stop() -} - // Wait blocks until the node is closed. func (n *Node) Wait() { <-n.stop @@ -408,83 +283,16 @@ func (n *Node) RegisterProtocols(protocols []p2p.Protocol) { } } -// RegisterAPIs registers the APIs a service provides on the node. -func (n *Node) RegisterAPIs(apis []rpc.API) { - n.lock.Lock() - defer n.lock.Unlock() - - if n.state != initializingState { - panic("can't register APIs on running/stopped node") - } - n.rpcAPIs = append(n.rpcAPIs, apis...) -} - -// RegisterHandler mounts a handler on the given path on the canonical HTTP server. -// -// The name of the handler is shown in a log message when the HTTP server starts -// and should be a descriptive term for the service provided by the handler. -func (n *Node) RegisterHandler(name, path string, handler http.Handler) { - n.lock.Lock() - defer n.lock.Unlock() - - if n.state != initializingState { - panic("can't register HTTP handler on running/stopped node") - } - - n.http.mux.Handle(path, handler) - n.http.handlerNames[path] = name -} - -// Attach creates an RPC client attached to an in-process API handler. -func (n *Node) Attach() (*rpc.Client, error) { - return rpc.DialInProc(n.inprocHandler), nil -} - -// RPCHandler returns the in-process RPC request handler. -func (n *Node) RPCHandler() (*rpc.Server, error) { - n.lock.Lock() - defer n.lock.Unlock() - - if n.state == closedState { - return nil, ErrNodeStopped - } - return n.inprocHandler, nil -} - // Config returns the configuration of node. func (n *Node) Config() *nodecfg.Config { return n.config } -// Server retrieves the currently running P2P network layer. This method is meant -// only to inspect fields of the currently running server. Callers should not -// start or stop the returned server. -func (n *Node) Server() *p2p.Server { - n.lock.Lock() - defer n.lock.Unlock() - - return n.server -} - // DataDir retrieves the current datadir used by the protocol stack. func (n *Node) DataDir() string { return n.config.Dirs.DataDir } -// HTTPEndpoint returns the URL of the HTTP server. Note that this URL does not -// contain the JSON-RPC path prefix set by HTTPPathPrefix. -func (n *Node) HTTPEndpoint() string { - return "http://" + n.http.listenAddr() -} - -// WSEndpoint returns the current JSON-RPC over WebSocket endpoint. -func (n *Node) WSEndpoint() string { - if n.http.wsAllowed() { - return "ws://" + n.http.listenAddr() + n.http.wsConfig.prefix - } - return "ws://" + n.ws.listenAddr() + n.ws.wsConfig.prefix -} - func OpenDatabase(config *nodecfg.Config, logger log.Logger, label kv.Label) (kv.RwDB, error) { var name string switch label { diff --git a/node/node_test.go b/node/node_test.go index 891a11abf5b..d27d5c82113 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -19,13 +19,8 @@ package node import ( "context" "errors" - "fmt" - "io" - "net" - "net/http" "reflect" "runtime" - "strings" "testing" "github.com/ledgerwatch/erigon-lib/kv" @@ -33,11 +28,8 @@ import ( "github.com/ledgerwatch/erigon/node/nodecfg" "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/p2p" - "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/require" - - "github.com/stretchr/testify/assert" ) var ( @@ -159,11 +151,6 @@ func TestRegisterProtocols(t *testing.T) { } } - for _, api := range fs.APIs() { - if !containsAPI(stack.rpcAPIs, api) { - t.Fatalf("api %v was not successfully registered", api) - } - } } // This test checks that open databases are closed with node. @@ -422,253 +409,6 @@ func TestLifecycleTerminationGuarantee(t *testing.T) { stack.server.PrivateKey = testNodeKey } -// Tests whether a handler can be successfully mounted on the canonical HTTP server -// on the given prefix -func TestRegisterHandler_Successful(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("fix me on win please") - } - - node := createNode(t, 7878, 7979) - - // create and mount handler - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("success")) //nolint:errcheck - }) - node.RegisterHandler("test", "/test", handler) - - // start node - if err := node.Start(); err != nil { - t.Fatalf("could not start node: %v", err) - } - - // create HTTP request - httpReq, err := http.NewRequest(http.MethodGet, "http://127.0.0.1:7878/test", nil) - if err != nil { - t.Error("could not issue new http request ", err) - } - - // check response - resp := doHTTPRequest(t, httpReq) - defer resp.Body.Close() - buf := make([]byte, 7) - _, err = io.ReadFull(resp.Body, buf) - if err != nil { - t.Fatalf("could not read response: %v", err) - } - assert.Equal(t, "success", string(buf)) -} - -// Tests that the given handler will not be successfully mounted since no HTTP server -// is enabled for RPC -func TestRegisterHandler_Unsuccessful(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("fix me on win please") - } - - node, err := New(&nodecfg.DefaultConfig) - if err != nil { - t.Fatalf("could not create new node: %v", err) - } - - // create and mount handler - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("success")) //nolint:errcheck - }) - node.RegisterHandler("test", "/test", handler) -} - -// Tests whether websocket requests can be handled on the same port as a regular http server. -func TestWebsocketHTTPOnSamePort_WebsocketRequest(t *testing.T) { - node := startHTTP(t, 0, 0) - defer node.Close() - - ws := strings.Replace(node.HTTPEndpoint(), "http://", "ws://", 1) - - if node.WSEndpoint() != ws { - t.Fatalf("endpoints should be the same") - } - if !checkRPC(ws) { - t.Fatalf("ws request failed") - } - if !checkRPC(node.HTTPEndpoint()) { - t.Fatalf("http request failed") - } -} - -func TestWebsocketHTTPOnSeparatePort_WSRequest(t *testing.T) { - // try and get a free port - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal("can't listen:", err) - } - port := listener.Addr().(*net.TCPAddr).Port - listener.Close() - - node := startHTTP(t, 0, port) - defer node.Close() - - wsOnHTTP := strings.Replace(node.HTTPEndpoint(), "http://", "ws://", 1) - ws := fmt.Sprintf("ws://127.0.0.1:%d", port) - - if node.WSEndpoint() == wsOnHTTP { - t.Fatalf("endpoints should not be the same") - } - // ensure ws endpoint matches the expected endpoint - if node.WSEndpoint() != ws { - t.Fatalf("ws endpoint is incorrect: expected %s, got %s", ws, node.WSEndpoint()) - } - - if !checkRPC(ws) { - t.Fatalf("ws request failed") - } - if !checkRPC(node.HTTPEndpoint()) { - t.Fatalf("http request failed") - } -} - -type rpcPrefixTest struct { - httpPrefix, wsPrefix string - // These lists paths on which JSON-RPC should be served / not served. - wantHTTP []string - wantNoHTTP []string - wantWS []string - wantNoWS []string -} - -func TestNodeRPCPrefix(t *testing.T) { - t.Parallel() - - tests := []rpcPrefixTest{ - // both off - { - httpPrefix: "", wsPrefix: "", - wantHTTP: []string{"/", "/?p=1"}, - wantNoHTTP: []string{"/test", "/test?p=1"}, - wantWS: []string{"/", "/?p=1"}, - wantNoWS: []string{"/test", "/test?p=1"}, - }, - // only http prefix - { - httpPrefix: "/testprefix", wsPrefix: "", - wantHTTP: []string{"/testprefix", "/testprefix?p=1", "/testprefix/x", "/testprefix/x?p=1"}, - wantNoHTTP: []string{"/", "/?p=1", "/test", "/test?p=1"}, - wantWS: []string{"/", "/?p=1"}, - wantNoWS: []string{"/testprefix", "/testprefix?p=1", "/test", "/test?p=1"}, - }, - // only ws prefix - { - httpPrefix: "", wsPrefix: "/testprefix", - wantHTTP: []string{"/", "/?p=1"}, - wantNoHTTP: []string{"/testprefix", "/testprefix?p=1", "/test", "/test?p=1"}, - wantWS: []string{"/testprefix", "/testprefix?p=1", "/testprefix/x", "/testprefix/x?p=1"}, - wantNoWS: []string{"/", "/?p=1", "/test", "/test?p=1"}, - }, - // both set - { - httpPrefix: "/testprefix", wsPrefix: "/testprefix", - wantHTTP: []string{"/testprefix", "/testprefix?p=1", "/testprefix/x", "/testprefix/x?p=1"}, - wantNoHTTP: []string{"/", "/?p=1", "/test", "/test?p=1"}, - wantWS: []string{"/testprefix", "/testprefix?p=1", "/testprefix/x", "/testprefix/x?p=1"}, - wantNoWS: []string{"/", "/?p=1", "/test", "/test?p=1"}, - }, - } - - for _, test := range tests { - test := test - name := fmt.Sprintf("http=%s ws=%s", test.httpPrefix, test.wsPrefix) - t.Run(name, func(t *testing.T) { - cfg := &nodecfg.Config{ - HTTPHost: "127.0.0.1", - HTTPPathPrefix: test.httpPrefix, - WSHost: "127.0.0.1", - WSPathPrefix: test.wsPrefix, - } - node, err := New(cfg) - if err != nil { - t.Fatal("can't create node:", err) - } - defer node.Close() - if err := node.Start(); err != nil { - t.Fatal("can't start node:", err) - } - test.check(t, node) - }) - } -} - -func (test rpcPrefixTest) check(t *testing.T, node *Node) { - t.Helper() - httpBase := "http://" + node.http.listenAddr() - wsBase := "ws://" + node.http.listenAddr() - - if node.WSEndpoint() != wsBase+test.wsPrefix { - t.Errorf("Error: node has wrong WSEndpoint %q", node.WSEndpoint()) - } - - for _, path := range test.wantHTTP { - resp := rpcRequest(t, httpBase+path) - if resp.StatusCode != 200 { - t.Errorf("Error: %s: bad status code %d, want 200", path, resp.StatusCode) - } - resp.Body.Close() - } - for _, path := range test.wantNoHTTP { - resp := rpcRequest(t, httpBase+path) - if resp.StatusCode != 404 { - t.Errorf("Error: %s: bad status code %d, want 404", path, resp.StatusCode) - } - resp.Body.Close() - } - for _, path := range test.wantWS { - err := wsRequest(t, wsBase+path, "") - if err != nil { - t.Errorf("Error: %s: WebSocket connection failed: %v", path, err) - } - } - for _, path := range test.wantNoWS { - err := wsRequest(t, wsBase+path, "") - if err == nil { - t.Errorf("Error: %s: WebSocket connection succeeded for path in wantNoWS", path) - } - - } -} - -func createNode(t *testing.T, httpPort, wsPort int) *Node { - conf := &nodecfg.Config{ - HTTPHost: "127.0.0.1", - HTTPPort: httpPort, - WSHost: "127.0.0.1", - WSPort: wsPort, - } - node, err := New(conf) - if err != nil { - t.Fatalf("could not create a new node: %v", err) - } - return node -} - -func startHTTP(t *testing.T, httpPort, wsPort int) *Node { - node := createNode(t, httpPort, wsPort) - err := node.Start() - if err != nil { - t.Fatalf("could not start http service on node: %v", err) - } - - return node -} - -func doHTTPRequest(t *testing.T, req *http.Request) *http.Response { - client := http.DefaultClient - resp, err := client.Do(req) - if err != nil { - t.Fatalf("could not issue a GET request to the given endpoint: %v", err) - - } - return resp -} - func containsProtocol(stackProtocols []p2p.Protocol, protocol p2p.Protocol) bool { for _, a := range stackProtocols { if reflect.DeepEqual(a, protocol) { @@ -677,12 +417,3 @@ func containsProtocol(stackProtocols []p2p.Protocol, protocol p2p.Protocol) bool } return false } - -func containsAPI(stackAPIs []rpc.API, api rpc.API) bool { - for _, a := range stackAPIs { - if reflect.DeepEqual(a, api) { - return true - } - } - return false -} diff --git a/node/rpcstack.go b/node/rpcstack.go index 90865448c04..42da30d1847 100644 --- a/node/rpcstack.go +++ b/node/rpcstack.go @@ -223,23 +223,6 @@ func checkPath(r *http.Request, path string) bool { return len(r.URL.Path) >= len(path) && r.URL.Path[:len(path)] == path } -// validatePrefix checks if 'path' is a valid configuration value for the RPC prefix option. -func validatePrefix(what, path string) error { - if path == "" { - return nil - } - if path[0] != '/' { - return fmt.Errorf(`%s RPC path prefix %q does not contain leading "/"`, what, path) - } - if strings.ContainsAny(path, "?#") { - // This is just to avoid confusion. While these would match correctly (i.e. they'd - // match if URL-escaped into path), it's not easy to understand for users when - // setting that on the command line. - return fmt.Errorf("%s RPC path prefix %q contains URL meta-characters", what, path) - } - return nil -} - // stop shuts down the HTTP server. func (h *httpServer) stop() { h.mu.Lock() @@ -282,7 +265,7 @@ func (h *httpServer) enableRPC(apis []rpc.API, config httpConfig, allowList rpc. } // Create RPC server and handler. - srv := rpc.NewServer(50) + srv := rpc.NewServer(50, false /* traceRequests */) srv.SetAllowList(allowList) if err := RegisterApisFromWhitelist(apis, config.Modules, srv, false); err != nil { return err @@ -315,7 +298,7 @@ func (h *httpServer) enableWS(apis []rpc.API, config wsConfig, allowList rpc.All } // Create RPC server and handler. - srv := rpc.NewServer(50) + srv := rpc.NewServer(50, false /* traceRequests */) srv.SetAllowList(allowList) if err := RegisterApisFromWhitelist(apis, config.Modules, srv, false); err != nil { return err diff --git a/node/utils_test.go b/node/utils_test.go index b7a82777cf0..0a347674326 100644 --- a/node/utils_test.go +++ b/node/utils_test.go @@ -21,7 +21,6 @@ package node import ( "github.com/ledgerwatch/erigon/p2p" - "github.com/ledgerwatch/erigon/rpc" ) // NoopLifecycle is a trivial implementation of the Service interface. @@ -69,7 +68,6 @@ func NewFullService(stack *Node) (*FullService, error) { fs := new(FullService) stack.RegisterProtocols(fs.Protocols()) - stack.RegisterAPIs(fs.APIs()) stack.RegisterLifecycle(fs) return fs, nil } @@ -90,22 +88,3 @@ func (f *FullService) Protocols() []p2p.Protocol { }, } } - -func (f *FullService) APIs() []rpc.API { - return []rpc.API{ - { - Namespace: "admin", - Version: "1.0", - }, - { - Namespace: "debug", - Version: "1.0", - Public: true, - }, - { - Namespace: "net", - Version: "1.0", - Public: true, - }, - } -} diff --git a/p2p/simulations/adapters/inproc.go b/p2p/simulations/adapters/inproc.go deleted file mode 100644 index 9b539701e5b..00000000000 --- a/p2p/simulations/adapters/inproc.go +++ /dev/null @@ -1,358 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package adapters - -import ( - "context" - "errors" - "fmt" - "math" - "net" - "sync" - - "github.com/ledgerwatch/erigon/event" - "github.com/ledgerwatch/erigon/node" - "github.com/ledgerwatch/erigon/node/nodecfg" - "github.com/ledgerwatch/erigon/p2p" - "github.com/ledgerwatch/erigon/p2p/enode" - "github.com/ledgerwatch/erigon/p2p/simulations/pipes" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/log/v3" - - "github.com/gorilla/websocket" -) - -// SimAdapter is a NodeAdapter which creates in-memory simulation nodes and -// connects them using net.Pipe -type SimAdapter struct { - pipe func() (net.Conn, net.Conn, error) - mtx sync.RWMutex - nodes map[enode.ID]*SimNode - lifecycles LifecycleConstructors -} - -// NewSimAdapter creates a SimAdapter which is capable of running in-memory -// simulation nodes running any of the given services (the services to run on a -// particular node are passed to the NewNode function in the NodeConfig) -// the adapter uses a net.Pipe for in-memory simulated network connections -func NewSimAdapter(services LifecycleConstructors) *SimAdapter { - return &SimAdapter{ - pipe: pipes.NetPipe, - nodes: make(map[enode.ID]*SimNode), - lifecycles: services, - } -} - -// Name returns the name of the adapter for logging purposes -func (s *SimAdapter) Name() string { - return "sim-adapter" -} - -// NewNode returns a new SimNode using the given config -func (s *SimAdapter) NewNode(config *NodeConfig) (Node, error) { - s.mtx.Lock() - defer s.mtx.Unlock() - - id := config.ID - // verify that the node has a private key in the config - if config.PrivateKey == nil { - return nil, fmt.Errorf("node is missing private key: %s", id) - } - - // check a node with the ID doesn't already exist - if _, exists := s.nodes[id]; exists { - return nil, fmt.Errorf("node already exists: %s", id) - } - - // check the services are valid - if len(config.Lifecycles) == 0 { - return nil, errors.New("node must have at least one service") - } - for _, service := range config.Lifecycles { - if _, exists := s.lifecycles[service]; !exists { - return nil, fmt.Errorf("unknown node service %q", service) - } - } - - err := config.initDummyEnode() - if err != nil { - return nil, err - } - - n, err := node.New(&nodecfg.Config{ - P2P: p2p.Config{ - PrivateKey: config.PrivateKey, - MaxPeers: math.MaxInt32, - MaxPendingPeers: 50, - NoDiscovery: true, - Dialer: s, - EnableMsgEvents: config.EnableMsgEvents, - }, - // Convert node ID to string once, rather than for every log line - Log: log.New("node.id", id.String()), - }) - if err != nil { - return nil, err - } - - simNode := &SimNode{ - ID: id, - config: config, - node: n, - adapter: s, - running: make(map[string]node.Lifecycle), - } - s.nodes[id] = simNode - return simNode, nil -} - -// Dial implements the p2p.NodeDialer interface by connecting to the node using -// an in-memory net.Pipe -func (s *SimAdapter) Dial(ctx context.Context, dest *enode.Node) (conn net.Conn, err error) { - node, ok := s.GetNode(dest.ID()) - if !ok { - return nil, fmt.Errorf("unknown node: %s", dest.ID()) - } - srv := node.Server() - if srv == nil { - return nil, fmt.Errorf("node not running: %s", dest.ID()) - } - // SimAdapter.pipe is net.Pipe (NewSimAdapter) - pipe1, pipe2, err := s.pipe() - if err != nil { - return nil, err - } - // this is simulated 'listening' - // asynchronously call the dialed destination node's p2p server - // to set up connection on the 'listening' side - go func() { - _ = srv.SetupConn(pipe1, 0, nil) - }() - return pipe2, nil -} - -// DialRPC implements the RPCDialer interface by creating an in-memory RPC -// client of the given node -func (s *SimAdapter) DialRPC(id enode.ID) (*rpc.Client, error) { - node, ok := s.GetNode(id) - if !ok { - return nil, fmt.Errorf("unknown node: %s", id) - } - return node.node.Attach() -} - -// GetNode returns the node with the given ID if it exists -func (s *SimAdapter) GetNode(id enode.ID) (*SimNode, bool) { - s.mtx.RLock() - defer s.mtx.RUnlock() - node, ok := s.nodes[id] - return node, ok -} - -// SimNode is an in-memory simulation node which connects to other nodes using -// net.Pipe (see SimAdapter.Dial), running devp2p protocols directly over that -// pipe -type SimNode struct { - lock sync.RWMutex - ID enode.ID - config *NodeConfig - adapter *SimAdapter - node *node.Node - running map[string]node.Lifecycle - client *rpc.Client - registerOnce sync.Once -} - -// Close closes the underlaying node.Node to release -// acquired resources. -func (sn *SimNode) Close() error { - return sn.node.Close() -} - -// Addr returns the node's discovery address -func (sn *SimNode) Addr() []byte { - return []byte(sn.Node().String()) -} - -// Node returns a node descriptor representing the SimNode -func (sn *SimNode) Node() *enode.Node { - return sn.config.Node() -} - -// Client returns an rpc.Client which can be used to communicate with the -// underlying services (it is set once the node has started) -func (sn *SimNode) Client() (*rpc.Client, error) { - sn.lock.RLock() - defer sn.lock.RUnlock() - if sn.client == nil { - return nil, errors.New("node not started") - } - return sn.client, nil -} - -// ServeRPC serves RPC requests over the given connection by creating an -// in-memory client to the node's RPC server. -func (sn *SimNode) ServeRPC(conn *websocket.Conn) error { - handler, err := sn.node.RPCHandler() - if err != nil { - return err - } - codec := rpc.NewFuncCodec(conn, conn.WriteJSON, conn.ReadJSON) - handler.ServeCodec(codec, 0) - return nil -} - -// Snapshots creates snapshots of the services by calling the -// simulation_snapshot RPC method -func (sn *SimNode) Snapshots() (map[string][]byte, error) { - sn.lock.RLock() - services := make(map[string]node.Lifecycle, len(sn.running)) - for name, service := range sn.running { - services[name] = service - } - sn.lock.RUnlock() - if len(services) == 0 { - return nil, errors.New("no running services") - } - snapshots := make(map[string][]byte) - for name, service := range services { - if s, ok := service.(interface { - Snapshot() ([]byte, error) - }); ok { - snap, err := s.Snapshot() - if err != nil { - return nil, err - } - snapshots[name] = snap - } - } - return snapshots, nil -} - -// Start registers the services and starts the underlying devp2p node -func (sn *SimNode) Start(snapshots map[string][]byte) error { - // ensure we only register the services once in the case of the node - // being stopped and then started again - var regErr error - sn.registerOnce.Do(func() { - for _, name := range sn.config.Lifecycles { - ctx := &ServiceContext{ - RPCDialer: sn.adapter, - Config: sn.config, - } - if snapshots != nil { - ctx.Snapshot = snapshots[name] - } - serviceFunc := sn.adapter.lifecycles[name] - service, err := serviceFunc(ctx, sn.node) - if err != nil { - regErr = err - break - } - // if the service has already been registered, don't register it again. - if _, ok := sn.running[name]; ok { - continue - } - sn.running[name] = service - } - }) - if regErr != nil { - return regErr - } - - if err := sn.node.Start(); err != nil { - return err - } - - // create an in-process RPC client - client, err := sn.node.Attach() - if err != nil { - return err - } - sn.lock.Lock() - sn.client = client - sn.lock.Unlock() - - return nil -} - -// Stop closes the RPC client and stops the underlying devp2p node -func (sn *SimNode) Stop() error { - sn.lock.Lock() - if sn.client != nil { - sn.client.Close() - sn.client = nil - } - sn.lock.Unlock() - return sn.node.Close() -} - -// Service returns a running service by name -func (sn *SimNode) Service(name string) node.Lifecycle { - sn.lock.RLock() - defer sn.lock.RUnlock() - return sn.running[name] -} - -// Services returns a copy of the underlying services -func (sn *SimNode) Services() []node.Lifecycle { - sn.lock.RLock() - defer sn.lock.RUnlock() - services := make([]node.Lifecycle, 0, len(sn.running)) - for _, service := range sn.running { - services = append(services, service) - } - return services -} - -// ServiceMap returns a map by names of the underlying services -func (sn *SimNode) ServiceMap() map[string]node.Lifecycle { - sn.lock.RLock() - defer sn.lock.RUnlock() - services := make(map[string]node.Lifecycle, len(sn.running)) - for name, service := range sn.running { - services[name] = service - } - return services -} - -// Server returns the underlying p2p.Server -func (sn *SimNode) Server() *p2p.Server { - return sn.node.Server() -} - -// SubscribeEvents subscribes the given channel to peer events from the -// underlying p2p.Server -func (sn *SimNode) SubscribeEvents(ch chan *p2p.PeerEvent) event.Subscription { - srv := sn.Server() - if srv == nil { - panic("node not running") - } - return srv.SubscribeEvents(ch) -} - -// NodeInfo returns information about the node -func (sn *SimNode) NodeInfo() *p2p.NodeInfo { - server := sn.Server() - if server == nil { - return &p2p.NodeInfo{ - ID: sn.ID.String(), - Enode: sn.Node().String(), - } - } - return server.NodeInfo() -} diff --git a/p2p/simulations/connect_test.go b/p2p/simulations/connect_test.go deleted file mode 100644 index bacb6def46f..00000000000 --- a/p2p/simulations/connect_test.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2018 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package simulations - -import ( - "testing" - - "github.com/ledgerwatch/erigon/node" - "github.com/ledgerwatch/erigon/p2p/enode" - "github.com/ledgerwatch/erigon/p2p/simulations/adapters" -) - -func newTestNetwork(t *testing.T, nodeCount int) (*Network, []enode.ID) { - t.Helper() - adapter := adapters.NewSimAdapter(adapters.LifecycleConstructors{ - "noopwoop": func(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) { - return NewNoopService(nil), nil - }, - }) - - // create network - network := NewNetwork(adapter, &NetworkConfig{ - DefaultService: "noopwoop", - }) - - // create and start nodes - ids := make([]enode.ID, nodeCount) - for i := range ids { - conf := adapters.RandomNodeConfig() - node, err := network.NewNodeWithConfig(conf) - if err != nil { - t.Fatalf("error creating node: %s", err) - } - if err := network.Start(node.ID()); err != nil { - t.Fatalf("error starting node: %s", err) - } - ids[i] = node.ID() - } - - if len(network.Conns) > 0 { - t.Fatal("no connections should exist after just adding nodes") - } - - return network, ids -} - -func TestConnectToLastNode(t *testing.T) { - t.Skip("need test for p2p sentry") - net, ids := newTestNetwork(t, 10) - defer net.Shutdown() - - first := ids[0] - if err := net.ConnectToLastNode(first); err != nil { - t.Fatal(err) - } - - last := ids[len(ids)-1] - for i, id := range ids { - if id == first || id == last { - continue - } - - if net.GetConn(first, id) != nil { - t.Errorf("connection must not exist with node(ind: %v, id: %v)", i, id) - } - } - - if net.GetConn(first, last) == nil { - t.Error("first and last node must be connected") - } -} - -func TestConnectToRandomNode(t *testing.T) { - t.Skip("need test for p2p sentry") - net, ids := newTestNetwork(t, 10) - defer net.Shutdown() - - err := net.ConnectToRandomNode(ids[0]) - if err != nil { - t.Fatal(err) - } - - var cc int - for i, a := range ids { - for _, b := range ids[i:] { - if net.GetConn(a, b) != nil { - cc++ - } - } - } - - if cc != 1 { - t.Errorf("expected one connection, got %v", cc) - } -} - -func TestConnectNodesFull(t *testing.T) { - t.Skip("need test for p2p sentry") - tests := []struct { - name string - nodeCount int - }{ - {name: "no node", nodeCount: 0}, - {name: "single node", nodeCount: 1}, - {name: "2 nodes", nodeCount: 2}, - {name: "3 nodes", nodeCount: 3}, - {name: "even number of nodes", nodeCount: 12}, - {name: "odd number of nodes", nodeCount: 13}, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - net, ids := newTestNetwork(t, test.nodeCount) - defer net.Shutdown() - - err := net.ConnectNodesFull(ids) - if err != nil { - t.Fatal(err) - } - - VerifyFull(t, net, ids) - }) - } -} - -func TestConnectNodesChain(t *testing.T) { - t.Skip("need test for p2p sentry") - net, ids := newTestNetwork(t, 10) - defer net.Shutdown() - - err := net.ConnectNodesChain(ids) - if err != nil { - t.Fatal(err) - } - - VerifyChain(t, net, ids) -} - -func TestConnectNodesRing(t *testing.T) { - t.Skip("need test for p2p sentry") - net, ids := newTestNetwork(t, 10) - defer net.Shutdown() - - err := net.ConnectNodesRing(ids) - if err != nil { - t.Fatal(err) - } - - VerifyRing(t, net, ids) -} - -func TestConnectNodesStar(t *testing.T) { - t.Skip("need test for p2p sentry") - net, ids := newTestNetwork(t, 10) - defer net.Shutdown() - - pivotIndex := 2 - - err := net.ConnectNodesStar(ids, ids[pivotIndex]) - if err != nil { - t.Fatal(err) - } - - VerifyStar(t, net, ids, pivotIndex) -} diff --git a/p2p/simulations/examples/ping-pong.go b/p2p/simulations/examples/ping-pong.go deleted file mode 100644 index 5210ec76d5b..00000000000 --- a/p2p/simulations/examples/ping-pong.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package main - -import ( - "flag" - "fmt" - "io" - "net/http" - "sync/atomic" - "time" - - "github.com/ledgerwatch/erigon/node" - "github.com/ledgerwatch/erigon/p2p" - "github.com/ledgerwatch/erigon/p2p/enode" - "github.com/ledgerwatch/erigon/p2p/simulations" - "github.com/ledgerwatch/erigon/p2p/simulations/adapters" - "github.com/ledgerwatch/log/v3" -) - -var adapterType = flag.String("adapter", "sim", `node adapter to use (one of "sim", "exec" or "docker")`) - -// main() starts a simulation network which contains nodes running a simple -// ping-pong protocol -func main() { - flag.Parse() - - // set the log level to Trace - log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler)) - - // register a single ping-pong service - services := map[string]adapters.LifecycleConstructor{ - "ping-pong": func(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) { - pps := newPingPongService(ctx.Config.ID) - stack.RegisterProtocols(pps.Protocols()) - return pps, nil - }, - } - adapters.RegisterLifecycles(services) - - // create the NodeAdapter - var adapter adapters.NodeAdapter - - switch *adapterType { - - case "sim": - log.Info("using sim adapter") - adapter = adapters.NewSimAdapter(services) - - default: - log.Crit(fmt.Sprintf("unknown node adapter %q", *adapterType)) - } - - // start the HTTP API - log.Info("starting simulation server on 0.0.0.0:8888...") - network := simulations.NewNetwork(adapter, &simulations.NetworkConfig{ - DefaultService: "ping-pong", - }) - if err := http.ListenAndServe(":8888", simulations.NewServer(network)); err != nil { - log.Crit("error starting simulation server", "err", err) - } -} - -// pingPongService runs a ping-pong protocol between nodes where each node -// sends a ping to all its connected peers every 10s and receives a pong in -// return -type pingPongService struct { - id enode.ID - log log.Logger - received int64 -} - -func newPingPongService(id enode.ID) *pingPongService { - return &pingPongService{ - id: id, - log: log.New("node.id", id), - } -} - -func (p *pingPongService) Protocols() []p2p.Protocol { - return []p2p.Protocol{{ - Name: "ping-pong", - Version: 1, - Length: 2, - Run: p.Run, - NodeInfo: p.Info, - }} -} - -func (p *pingPongService) Start() error { - p.log.Info("ping-pong service starting") - return nil -} - -func (p *pingPongService) Stop() error { - p.log.Info("ping-pong service stopping") - return nil -} - -func (p *pingPongService) Info() interface{} { - return struct { - Received int64 `json:"received"` - }{ - atomic.LoadInt64(&p.received), - } -} - -const ( - pingMsgCode = iota - pongMsgCode -) - -// Run implements the ping-pong protocol which sends ping messages to the peer -// at 10s intervals, and responds to pings with pong messages. -func (p *pingPongService) Run(peer *p2p.Peer, rw p2p.MsgReadWriter) error { - log := p.log.New("peer.id", peer.ID()) - - errC := make(chan error) - go func() { - for range time.Tick(10 * time.Second) { - log.Info("sending ping") - if err := p2p.Send(rw, pingMsgCode, "PING"); err != nil { - errC <- err - return - } - } - }() - go func() { - for { - msg, err := rw.ReadMsg() - if err != nil { - errC <- err - return - } - payload, err := io.ReadAll(msg.Payload) - if err != nil { - errC <- err - return - } - log.Info("received message", "msg.code", msg.Code, "msg.payload", string(payload)) - atomic.AddInt64(&p.received, 1) - if msg.Code == pingMsgCode { - log.Info("sending pong") - go p2p.Send(rw, pongMsgCode, "PONG") - } - } - }() - return <-errC -} diff --git a/p2p/simulations/examples/ping-pong.sh b/p2p/simulations/examples/ping-pong.sh deleted file mode 100755 index 47936bd9a07..00000000000 --- a/p2p/simulations/examples/ping-pong.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -# -# Boot a ping-pong network simulation using the HTTP API started by ping-pong.go - -set -e - -main() { - if ! which p2psim &>/dev/null; then - fail "missing p2psim binary (you need to build cmd/p2psim and put it in \$PATH)" - fi - - info "creating 10 nodes" - for i in $(seq 1 10); do - p2psim node create --name "$(node_name $i)" - p2psim node start "$(node_name $i)" - done - - info "connecting node01 to all other nodes" - for i in $(seq 2 10); do - p2psim node connect "node01" "$(node_name $i)" - done - - info "done" -} - -node_name() { - local num=$1 - echo "node$(printf '%02d' $num)" -} - -info() { - echo -e "\033[1;32m---> $(date +%H:%M:%S) ${@}\033[0m" -} - -fail() { - echo -e "\033[1;31mERROR: ${@}\033[0m" >&2 - exit 1 -} - -main "$@" diff --git a/p2p/simulations/http_test.go b/p2p/simulations/http_test.go deleted file mode 100644 index 354235845bf..00000000000 --- a/p2p/simulations/http_test.go +++ /dev/null @@ -1,878 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package simulations - -import ( - "context" - "flag" - "fmt" - "math/rand" - "net/http/httptest" - "os" - "reflect" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ledgerwatch/erigon/event" - "github.com/ledgerwatch/erigon/node" - "github.com/ledgerwatch/erigon/p2p" - "github.com/ledgerwatch/erigon/p2p/enode" - "github.com/ledgerwatch/erigon/p2p/simulations/adapters" - "github.com/ledgerwatch/erigon/rpc" - "github.com/ledgerwatch/log/v3" -) - -func TestMain(m *testing.M) { - loglevel := flag.Int("loglevel", 2, "verbosity of logs") - flag.Parse() - //log.PrintOrigins(true) - log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StderrHandler)) - os.Exit(m.Run()) -} - -// testService implements the node.Service interface and provides protocols -// and APIs which are useful for testing nodes in a simulation network -type testService struct { - id enode.ID - - // peerCount is incremented once a peer handshake has been performed - peerCount int64 - - peers map[enode.ID]*testPeer - peersMtx sync.Mutex - - // state stores []byte which is used to test creating and loading - // snapshots - state atomic.Value -} - -func newTestService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) { - svc := &testService{ - id: ctx.Config.ID, - peers: make(map[enode.ID]*testPeer), - } - svc.state.Store(ctx.Snapshot) - - stack.RegisterProtocols(svc.Protocols()) - stack.RegisterAPIs(svc.APIs()) - return svc, nil -} - -type testPeer struct { - testReady chan struct{} - dumReady chan struct{} -} - -func (t *testService) peer(id enode.ID) *testPeer { - t.peersMtx.Lock() - defer t.peersMtx.Unlock() - if peer, ok := t.peers[id]; ok { - return peer - } - peer := &testPeer{ - testReady: make(chan struct{}), - dumReady: make(chan struct{}), - } - t.peers[id] = peer - return peer -} - -func (t *testService) Protocols() []p2p.Protocol { - return []p2p.Protocol{ - { - Name: "test", - Version: 1, - Length: 3, - Run: t.RunTest, - }, - { - Name: "dum", - Version: 1, - Length: 1, - Run: t.RunDum, - }, - { - Name: "prb", - Version: 1, - Length: 1, - Run: t.RunPrb, - }, - } -} - -func (t *testService) APIs() []rpc.API { - return []rpc.API{{ - Namespace: "test", - Version: "1.0", - Service: &TestAPI{ - state: &t.state, - peerCount: &t.peerCount, - }, - }} -} - -func (t *testService) Start() error { - return nil -} - -func (t *testService) Stop() error { - return nil -} - -// handshake performs a peer handshake by sending and expecting an empty -// message with the given code -func (t *testService) handshake(rw p2p.MsgReadWriter, code uint64) error { - errc := make(chan error, 2) - go func() { errc <- p2p.Send(rw, code, struct{}{}) }() - go func() { errc <- p2p.ExpectMsg(rw, code, struct{}{}) }() - for i := 0; i < 2; i++ { - if err := <-errc; err != nil { - return err - } - } - return nil -} - -func (t *testService) RunTest(p *p2p.Peer, rw p2p.MsgReadWriter) error { - peer := t.peer(p.ID()) - - // perform three handshakes with three different message codes, - // used to test message sending and filtering - if err := t.handshake(rw, 2); err != nil { - return err - } - if err := t.handshake(rw, 1); err != nil { - return err - } - if err := t.handshake(rw, 0); err != nil { - return err - } - - // close the testReady channel so that other protocols can run - close(peer.testReady) - - // track the peer - atomic.AddInt64(&t.peerCount, 1) - defer atomic.AddInt64(&t.peerCount, -1) - - // block until the peer is dropped - for { - _, err := rw.ReadMsg() - if err != nil { - return err - } - } -} - -func (t *testService) RunDum(p *p2p.Peer, rw p2p.MsgReadWriter) error { - peer := t.peer(p.ID()) - - // wait for the test protocol to perform its handshake - <-peer.testReady - - // perform a handshake - if err := t.handshake(rw, 0); err != nil { - return err - } - - // close the dumReady channel so that other protocols can run - close(peer.dumReady) - - // block until the peer is dropped - for { - _, err := rw.ReadMsg() - if err != nil { - return err - } - } -} -func (t *testService) RunPrb(p *p2p.Peer, rw p2p.MsgReadWriter) error { - peer := t.peer(p.ID()) - - // wait for the dum protocol to perform its handshake - <-peer.dumReady - - // perform a handshake - if err := t.handshake(rw, 0); err != nil { - return err - } - - // block until the peer is dropped - for { - _, err := rw.ReadMsg() - if err != nil { - return err - } - } -} - -func (t *testService) Snapshot() ([]byte, error) { - return t.state.Load().([]byte), nil -} - -// TestAPI provides a test API to: -// * get the peer count -// * get and set an arbitrary state byte slice -// * get and increment a counter -// * subscribe to counter increment events -type TestAPI struct { - state *atomic.Value - peerCount *int64 - counter int64 - feed event.Feed -} - -func (t *TestAPI) PeerCount() int64 { - return atomic.LoadInt64(t.peerCount) -} - -func (t *TestAPI) Get() int64 { - return atomic.LoadInt64(&t.counter) -} - -func (t *TestAPI) Add(delta int64) { - atomic.AddInt64(&t.counter, delta) - t.feed.Send(delta) -} - -func (t *TestAPI) GetState() []byte { - return t.state.Load().([]byte) -} - -func (t *TestAPI) SetState(state []byte) { - t.state.Store(state) -} - -func (t *TestAPI) Events(ctx context.Context) (*rpc.Subscription, error) { - notifier, supported := rpc.NotifierFromContext(ctx) - if !supported { - return nil, rpc.ErrNotificationsUnsupported - } - - rpcSub := notifier.CreateSubscription() - - go func() { - events := make(chan int64) - sub := t.feed.Subscribe(events) - defer sub.Unsubscribe() - - for { - select { - case event := <-events: - notifier.Notify(rpcSub.ID, event) - case <-sub.Err(): - return - case <-rpcSub.Err(): - return - case <-notifier.Closed(): - return - } - } - }() - - return rpcSub, nil -} - -var testServices = adapters.LifecycleConstructors{ - "test": newTestService, -} - -func testHTTPServer(t *testing.T) (*Network, *httptest.Server) { - t.Helper() - adapter := adapters.NewSimAdapter(testServices) - network := NewNetwork(adapter, &NetworkConfig{ - DefaultService: "test", - }) - return network, httptest.NewServer(NewServer(network)) -} - -// TestHTTPNetwork tests interacting with a simulation network using the HTTP -// API -func TestHTTPNetwork(t *testing.T) { - t.Skip("need test for p2p sentry") - // start the server - network, s := testHTTPServer(t) - defer s.Close() - - // subscribe to events so we can check them later - client := NewClient(s.URL) - events := make(chan *Event, 100) - var opts SubscribeOpts - sub, err := client.SubscribeNetwork(events, opts) - if err != nil { - t.Fatalf("error subscribing to network events: %s", err) - } - defer sub.Unsubscribe() - - // check we can retrieve details about the network - gotNetwork, err := client.GetNetwork() - if err != nil { - t.Fatalf("error getting network: %s", err) - } - if gotNetwork.ID != network.ID { - t.Fatalf("expected network to have ID %q, got %q", network.ID, gotNetwork.ID) - } - - // start a simulation network - nodeIDs := startTestNetwork(t, client) - - // check we got all the events - x := &expectEvents{t, events, sub} - x.expect( - x.nodeEvent(nodeIDs[0], false), - x.nodeEvent(nodeIDs[1], false), - x.nodeEvent(nodeIDs[0], true), - x.nodeEvent(nodeIDs[1], true), - x.connEvent(nodeIDs[0], nodeIDs[1], false), - x.connEvent(nodeIDs[0], nodeIDs[1], true), - ) - - // reconnect the stream and check we get the current nodes and conns - events = make(chan *Event, 100) - opts.Current = true - sub, err = client.SubscribeNetwork(events, opts) - if err != nil { - t.Fatalf("error subscribing to network events: %s", err) - } - defer sub.Unsubscribe() - x = &expectEvents{t, events, sub} - x.expect( - x.nodeEvent(nodeIDs[0], true), - x.nodeEvent(nodeIDs[1], true), - x.connEvent(nodeIDs[0], nodeIDs[1], true), - ) -} - -func startTestNetwork(t *testing.T, client *Client) []string { - // create two nodes - nodeCount := 2 - nodeIDs := make([]string, nodeCount) - for i := 0; i < nodeCount; i++ { - config := adapters.RandomNodeConfig() - node, err := client.CreateNode(config) - if err != nil { - t.Fatalf("error creating node: %s", err) - } - nodeIDs[i] = node.ID - } - - // check both nodes exist - nodes, err := client.GetNodes() - if err != nil { - t.Fatalf("error getting nodes: %s", err) - } - if len(nodes) != nodeCount { - t.Fatalf("expected %d nodes, got %d", nodeCount, len(nodes)) - } - for i, nodeID := range nodeIDs { - if nodes[i].ID != nodeID { - t.Fatalf("expected node %d to have ID %q, got %q", i, nodeID, nodes[i].ID) - } - node, err := client.GetNode(nodeID) - if err != nil { - t.Fatalf("error getting node %d: %s", i, err) - } - if node.ID != nodeID { - t.Fatalf("expected node %d to have ID %q, got %q", i, nodeID, node.ID) - } - } - - // start both nodes - for _, nodeID := range nodeIDs { - if err := client.StartNode(nodeID); err != nil { - t.Fatalf("error starting node %q: %s", nodeID, err) - } - } - - // connect the nodes - for i := 0; i < nodeCount-1; i++ { - peerId := i + 1 - if i == nodeCount-1 { - peerId = 0 - } - if err := client.ConnectNode(nodeIDs[i], nodeIDs[peerId]); err != nil { - t.Fatalf("error connecting nodes: %s", err) - } - } - - return nodeIDs -} - -type expectEvents struct { - *testing.T - - events chan *Event - sub event.Subscription -} - -func (t *expectEvents) nodeEvent(id string, up bool) *Event { - config := &adapters.NodeConfig{ID: enode.HexID(id)} - return &Event{Type: EventTypeNode, Node: newNode(nil, config, up)} -} - -func (t *expectEvents) connEvent(one, other string, up bool) *Event { - return &Event{ - Type: EventTypeConn, - Conn: &Conn{ - One: enode.HexID(one), - Other: enode.HexID(other), - Up: up, - }, - } -} - -func (t *expectEvents) expectMsgs(expected map[MsgFilter]int) { - actual := make(map[MsgFilter]int) - timeout := time.After(10 * time.Second) -loop: - for { - select { - case event := <-t.events: - t.Logf("received %s event: %v", event.Type, event) - - if event.Type != EventTypeMsg || event.Msg.Received { - continue loop - } - if event.Msg == nil { - t.Fatal("expected event.Msg to be set") - } - filter := MsgFilter{ - Proto: event.Msg.Protocol, - Code: int64(event.Msg.Code), - } - actual[filter]++ - if actual[filter] > expected[filter] { - t.Fatalf("received too many msgs for filter: %v", filter) - } - if reflect.DeepEqual(actual, expected) { - return - } - - case err := <-t.sub.Err(): - t.Fatalf("network stream closed unexpectedly: %s", err) - - case <-timeout: - t.Fatal("timed out waiting for expected events") - } - } -} - -func (t *expectEvents) expect(events ...*Event) { - t.Helper() - timeout := time.After(10 * time.Second) - i := 0 - for { - select { - case event := <-t.events: - t.Logf("received %s event: %v", event.Type, event) - - expected := events[i] - if event.Type != expected.Type { - t.Fatalf("expected event %d to have type %q, got %q", i, expected.Type, event.Type) - } - - switch expected.Type { - - case EventTypeNode: - if event.Node == nil { - t.Fatal("expected event.Node to be set") - } - if event.Node.ID() != expected.Node.ID() { - t.Fatalf("expected node event %d to have id %q, got %q", i, expected.Node.ID().TerminalString(), event.Node.ID().TerminalString()) - } - if event.Node.Up() != expected.Node.Up() { - t.Fatalf("expected node event %d to have up=%t, got up=%t", i, expected.Node.Up(), event.Node.Up()) - } - - case EventTypeConn: - if event.Conn == nil { - t.Fatal("expected event.Conn to be set") - } - if event.Conn.One != expected.Conn.One { - t.Fatalf("expected conn event %d to have one=%q, got one=%q", i, expected.Conn.One.TerminalString(), event.Conn.One.TerminalString()) - } - if event.Conn.Other != expected.Conn.Other { - t.Fatalf("expected conn event %d to have other=%q, got other=%q", i, expected.Conn.Other.TerminalString(), event.Conn.Other.TerminalString()) - } - if event.Conn.Up != expected.Conn.Up { - t.Fatalf("expected conn event %d to have up=%t, got up=%t", i, expected.Conn.Up, event.Conn.Up) - } - - } - - i++ - if i == len(events) { - return - } - - case err := <-t.sub.Err(): - t.Fatalf("network stream closed unexpectedly: %s", err) - - case <-timeout: - t.Fatal("timed out waiting for expected events") - } - } -} - -// TestHTTPNodeRPC tests calling RPC methods on nodes via the HTTP API -func TestHTTPNodeRPC(t *testing.T) { - t.Skip("need test for p2p sentry") - // start the server - _, s := testHTTPServer(t) - defer s.Close() - - // start a node in the network - client := NewClient(s.URL) - - config := adapters.RandomNodeConfig() - node, err := client.CreateNode(config) - if err != nil { - t.Fatalf("error creating node: %s", err) - } - if err := client.StartNode(node.ID); err != nil { - t.Fatalf("error starting node: %s", err) - } - - // create two RPC clients - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - rpcClient1, err := client.RPCClient(ctx, node.ID) - if err != nil { - t.Fatalf("error getting node RPC client: %s", err) - } - rpcClient2, err := client.RPCClient(ctx, node.ID) - if err != nil { - t.Fatalf("error getting node RPC client: %s", err) - } - - // subscribe to events using client 1 - events := make(chan int64, 1) - sub, err := rpcClient1.Subscribe(ctx, "test", events, "events") - if err != nil { - t.Fatalf("error subscribing to events: %s", err) - } - defer sub.Unsubscribe() - - // call some RPC methods using client 2 - if err := rpcClient2.CallContext(ctx, nil, "test_add", 10); err != nil { - t.Fatalf("error calling RPC method: %s", err) - } - var result int64 - if err := rpcClient2.CallContext(ctx, &result, "test_get"); err != nil { - t.Fatalf("error calling RPC method: %s", err) - } - if result != 10 { - t.Fatalf("expected result to be 10, got %d", result) - } - - // check we got an event from client 1 - select { - case event := <-events: - if event != 10 { - t.Fatalf("expected event to be 10, got %d", event) - } - case <-ctx.Done(): - t.Fatal(ctx.Err()) - } -} - -// TestHTTPSnapshot tests creating and loading network snapshots -func TestHTTPSnapshot(t *testing.T) { - t.Skip("need test for p2p sentry") - // start the server - network, s := testHTTPServer(t) - defer s.Close() - - var eventsDone = make(chan struct{}) - count := 1 - eventsDoneChan := make(chan *Event) - eventSub := network.Events().Subscribe(eventsDoneChan) - go func() { - defer eventSub.Unsubscribe() - for event := range eventsDoneChan { - if event.Type == EventTypeConn && !event.Control { - count-- - if count == 0 { - eventsDone <- struct{}{} - return - } - } - } - }() - - // create a two-node network - client := NewClient(s.URL) - nodeCount := 2 - nodes := make([]*p2p.NodeInfo, nodeCount) - for i := 0; i < nodeCount; i++ { - config := adapters.RandomNodeConfig() - node, err := client.CreateNode(config) - if err != nil { - t.Fatalf("error creating node: %s", err) - } - if err := client.StartNode(node.ID); err != nil { - t.Fatalf("error starting node: %s", err) - } - nodes[i] = node - } - if err := client.ConnectNode(nodes[0].ID, nodes[1].ID); err != nil { - t.Fatalf("error connecting nodes: %s", err) - } - - // store some state in the test services - states := make([]string, nodeCount) - for i, node := range nodes { - rpc, err := client.RPCClient(context.Background(), node.ID) - if err != nil { - t.Fatalf("error getting RPC client: %s", err) - } - defer rpc.Close() - state := fmt.Sprintf("%x", rand.Int()) - if err := rpc.Call(nil, "test_setState", []byte(state)); err != nil { - t.Fatalf("error setting service state: %s", err) - } - states[i] = state - } - <-eventsDone - // create a snapshot - snap, snapErr := client.CreateSnapshot() - if snapErr != nil { - t.Fatalf("error creating snapshot: %s", snapErr) - } - for i, state := range states { - gotState := snap.Nodes[i].Snapshots["test"] - if string(gotState) != state { - t.Fatalf("expected snapshot state %q, got %q", state, gotState) - } - } - - // create another network - network2, s := testHTTPServer(t) - defer s.Close() - client = NewClient(s.URL) - count = 1 - eventSub = network2.Events().Subscribe(eventsDoneChan) - go func() { - defer eventSub.Unsubscribe() - for event := range eventsDoneChan { - if event.Type == EventTypeConn && !event.Control { - count-- - if count == 0 { - eventsDone <- struct{}{} - return - } - } - } - }() - - // subscribe to events so we can check them later - events := make(chan *Event, 100) - var opts SubscribeOpts - sub, subErr := client.SubscribeNetwork(events, opts) - if subErr != nil { - t.Fatalf("error subscribing to network events: %s", subErr) - } - defer sub.Unsubscribe() - - // load the snapshot - if err := client.LoadSnapshot(snap); err != nil { - t.Fatalf("error loading snapshot: %s", err) - } - <-eventsDone - - // check the nodes and connection exists - net, err := client.GetNetwork() - if err != nil { - t.Fatalf("error getting network: %s", err) - } - if len(net.Nodes) != nodeCount { - t.Fatalf("expected network to have %d nodes, got %d", nodeCount, len(net.Nodes)) - } - for i, node := range nodes { - id := net.Nodes[i].ID().String() - if id != node.ID { - t.Fatalf("expected node %d to have ID %s, got %s", i, node.ID, id) - } - } - if len(net.Conns) != 1 { - t.Fatalf("expected network to have 1 connection, got %d", len(net.Conns)) - } - conn := net.Conns[0] - if conn.One.String() != nodes[0].ID { - t.Fatalf("expected connection to have one=%q, got one=%q", nodes[0].ID, conn.One) - } - if conn.Other.String() != nodes[1].ID { - t.Fatalf("expected connection to have other=%q, got other=%q", nodes[1].ID, conn.Other) - } - if !conn.Up { - t.Fatal("should be up") - } - - // check the node states were restored - for i, node := range nodes { - rpc, err := client.RPCClient(context.Background(), node.ID) - if err != nil { - t.Fatalf("error getting RPC client: %s", err) - } - defer rpc.Close() - var state []byte - if err := rpc.Call(&state, "test_getState"); err != nil { - t.Fatalf("error getting service state: %s", err) - } - if string(state) != states[i] { - t.Fatalf("expected snapshot state %q, got %q", states[i], state) - } - } - - // check we got all the events - x := &expectEvents{t, events, sub} - x.expect( - x.nodeEvent(nodes[0].ID, false), - x.nodeEvent(nodes[0].ID, true), - x.nodeEvent(nodes[1].ID, false), - x.nodeEvent(nodes[1].ID, true), - x.connEvent(nodes[0].ID, nodes[1].ID, false), - x.connEvent(nodes[0].ID, nodes[1].ID, true), - ) -} - -// TestMsgFilterPassMultiple tests streaming message events using a filter -// with multiple protocols -func TestMsgFilterPassMultiple(t *testing.T) { - t.Skip("need test for p2p sentry") - // start the server - _, s := testHTTPServer(t) - defer s.Close() - - // subscribe to events with a message filter - client := NewClient(s.URL) - events := make(chan *Event, 10) - opts := SubscribeOpts{ - Filter: "prb:0-test:0", - } - sub, err := client.SubscribeNetwork(events, opts) - if err != nil { - t.Fatalf("error subscribing to network events: %s", err) - } - defer sub.Unsubscribe() - - // start a simulation network - startTestNetwork(t, client) - - // check we got the expected events - x := &expectEvents{t, events, sub} - x.expectMsgs(map[MsgFilter]int{ - {"test", 0}: 2, - {"prb", 0}: 2, - }) -} - -// TestMsgFilterPassWildcard tests streaming message events using a filter -// with a code wildcard -func TestMsgFilterPassWildcard(t *testing.T) { - t.Skip("need test for p2p sentry") - // start the server - _, s := testHTTPServer(t) - defer s.Close() - - // subscribe to events with a message filter - client := NewClient(s.URL) - events := make(chan *Event, 10) - opts := SubscribeOpts{ - Filter: "prb:0,2-test:*", - } - sub, err := client.SubscribeNetwork(events, opts) - if err != nil { - t.Fatalf("error subscribing to network events: %s", err) - } - defer sub.Unsubscribe() - - // start a simulation network - startTestNetwork(t, client) - - // check we got the expected events - x := &expectEvents{t, events, sub} - x.expectMsgs(map[MsgFilter]int{ - {"test", 2}: 2, - {"test", 1}: 2, - {"test", 0}: 2, - {"prb", 0}: 2, - }) -} - -// TestMsgFilterPassSingle tests streaming message events using a filter -// with a single protocol and code -func TestMsgFilterPassSingle(t *testing.T) { - t.Skip("need test for p2p sentry") - // start the server - _, s := testHTTPServer(t) - defer s.Close() - - // subscribe to events with a message filter - client := NewClient(s.URL) - events := make(chan *Event, 10) - opts := SubscribeOpts{ - Filter: "dum:0", - } - sub, err := client.SubscribeNetwork(events, opts) - if err != nil { - t.Fatalf("error subscribing to network events: %s", err) - } - defer sub.Unsubscribe() - - // start a simulation network - startTestNetwork(t, client) - - // check we got the expected events - x := &expectEvents{t, events, sub} - x.expectMsgs(map[MsgFilter]int{ - {"dum", 0}: 2, - }) -} - -// TestMsgFilterPassSingle tests streaming message events using an invalid -// filter -func TestMsgFilterFailBadParams(t *testing.T) { - t.Skip("need test for p2p sentry") - // start the server - _, s := testHTTPServer(t) - defer s.Close() - - client := NewClient(s.URL) - events := make(chan *Event, 10) - opts := SubscribeOpts{ - Filter: "foo:", - } - _, err := client.SubscribeNetwork(events, opts) - if err == nil { - t.Fatalf("expected event subscription to fail but succeeded!") - } - - opts.Filter = "bzz:aa" - _, err = client.SubscribeNetwork(events, opts) - if err == nil { - t.Fatalf("expected event subscription to fail but succeeded!") - } - - opts.Filter = "invalid" - _, err = client.SubscribeNetwork(events, opts) - if err == nil { - t.Fatalf("expected event subscription to fail but succeeded!") - } -} diff --git a/p2p/simulations/mocker_test.go b/p2p/simulations/mocker_test.go deleted file mode 100644 index 34b02223a69..00000000000 --- a/p2p/simulations/mocker_test.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package simulations simulates p2p networks. -// A mocker simulates starting and stopping real nodes in a network. -package simulations - -import ( - "encoding/json" - "net/http" - "net/url" - "strconv" - "sync" - "testing" - "time" - - "github.com/ledgerwatch/erigon/p2p/enode" -) - -func TestMocker(t *testing.T) { - t.Skip("need test for p2p sentry") - //start the simulation HTTP server - _, s := testHTTPServer(t) - defer s.Close() - - //create a client - client := NewClient(s.URL) - - //start the network - err := client.StartNetwork() - if err != nil { - t.Fatalf("Could not start test network: %s", err) - } - //stop the network to terminate - defer func() { - err = client.StopNetwork() - if err != nil { - t.Fatalf("Could not stop test network: %s", err) - } - }() - - //get the list of available mocker types - resp, err := http.Get(s.URL + "/mocker") - if err != nil { - t.Fatalf("Could not get mocker list: %s", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Fatalf("Invalid Status Code received, expected 200, got %d", resp.StatusCode) - } - - //check the list is at least 1 in size - var mockerlist []string - err = json.NewDecoder(resp.Body).Decode(&mockerlist) - if err != nil { - t.Fatalf("Error decoding JSON mockerlist: %s", err) - } - - if len(mockerlist) < 1 { - t.Fatalf("No mockers available") - } - - nodeCount := 10 - var wg sync.WaitGroup - - events := make(chan *Event, 10) - var opts SubscribeOpts - sub, err := client.SubscribeNetwork(events, opts) - defer sub.Unsubscribe() - - // wait until all nodes are started and connected - // store every node up event in a map (value is irrelevant, mimic Set datatype) - nodemap := make(map[enode.ID]bool) - nodesComplete := false - connCount := 0 - wg.Add(1) - go func() { - defer wg.Done() - - for connCount < (nodeCount-1)*2 { - select { - case event := <-events: - if isNodeUp(event) { - //add the correspondent node ID to the map - nodemap[event.Node.Config.ID] = true - //this means all nodes got a nodeUp event, so we can continue the test - if len(nodemap) == nodeCount { - nodesComplete = true - } - } else if event.Conn != nil && nodesComplete { - connCount += 1 - } - case <-time.After(30 * time.Second): - t.Errorf("Timeout waiting for nodes being started up!") - return - } - } - }() - - //take the last element of the mockerlist as the default mocker-type to ensure one is enabled - mockertype := mockerlist[len(mockerlist)-1] - //still, use hardcoded "probabilistic" one if available ;) - for _, m := range mockerlist { - if m == "probabilistic" { - mockertype = m - break - } - } - //start the mocker with nodeCount number of nodes - resp, err = http.PostForm(s.URL+"/mocker/start", url.Values{"mocker-type": {mockertype}, "node-count": {strconv.Itoa(nodeCount)}}) - if err != nil { - t.Fatalf("Could not start mocker: %s", err) - } - defer resp.Body.Close() - - if resp.StatusCode != 200 { - t.Fatalf("Invalid Status Code received for starting mocker, expected 200, got %d", resp.StatusCode) - } - - wg.Wait() - - //check there are nodeCount number of nodes in the network - nodesInfo, err := client.GetNodes() - if err != nil { - t.Fatalf("Could not get nodes list: %s", err) - } - - if len(nodesInfo) != nodeCount { - t.Fatalf("Expected %d number of nodes, got: %d", nodeCount, len(nodesInfo)) - } - - //stop the mocker - resp, err = http.Post(s.URL+"/mocker/stop", "", nil) - if err != nil { - t.Fatalf("Could not stop mocker: %s", err) - } - defer resp.Body.Close() - if resp.StatusCode != 200 { - t.Fatalf("Invalid Status Code received for stopping mocker, expected 200, got %d", resp.StatusCode) - } - - //reset the network - r, err := http.Post(s.URL+"/reset", "", nil) - if err != nil { - t.Fatalf("Could not reset network: %s", err) - } - r.Body.Close() - - //now the number of nodes in the network should be zero - nodesInfo, err = client.GetNodes() - if err != nil { - t.Fatalf("Could not get nodes list: %s", err) - } - - if len(nodesInfo) != 0 { - t.Fatalf("Expected empty list of nodes, got: %d", len(nodesInfo)) - } -} - -func isNodeUp(event *Event) bool { - return event.Node != nil && event.Node.Up() -} diff --git a/p2p/simulations/network_test.go b/p2p/simulations/network_test.go deleted file mode 100644 index f2cb9eab2b1..00000000000 --- a/p2p/simulations/network_test.go +++ /dev/null @@ -1,886 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package simulations - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "reflect" - "strconv" - "strings" - "testing" - "time" - - "github.com/ledgerwatch/erigon/common/fdlimit" - "github.com/ledgerwatch/erigon/node" - "github.com/ledgerwatch/erigon/p2p/enode" - "github.com/ledgerwatch/erigon/p2p/simulations/adapters" - "github.com/ledgerwatch/log/v3" -) - -// Tests that a created snapshot with a minimal service only contains the expected connections -// and that a network when loaded with this snapshot only contains those same connections -func TestSnapshot(t *testing.T) { - t.Skip("need test for p2p sentry") - if _, err := fdlimit.Raise(2048); err != nil { - panic(err) - } - - // PART I - // create snapshot from ring network - - // this is a minimal service, whose protocol will take exactly one message OR close of connection before quitting - adapter := adapters.NewSimAdapter(adapters.LifecycleConstructors{ - "noopwoop": func(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) { - return NewNoopService(nil), nil - }, - }) - - // create network - network := NewNetwork(adapter, &NetworkConfig{ - DefaultService: "noopwoop", - }) - // \todo consider making a member of network, set to true threadsafe when shutdown - runningOne := true - defer func() { - if runningOne { - network.Shutdown() - } - }() - - // create and start nodes - nodeCount := 20 - ids := make([]enode.ID, nodeCount) - for i := 0; i < nodeCount; i++ { - conf := adapters.RandomNodeConfig() - node, err := network.NewNodeWithConfig(conf) - if err != nil { - t.Fatalf("error creating node: %s", err) - } - if err := network.Start(node.ID()); err != nil { - t.Fatalf("error starting node: %s", err) - } - ids[i] = node.ID() - } - - // subscribe to peer events - evC := make(chan *Event) - sub := network.Events().Subscribe(evC) - defer sub.Unsubscribe() - - // connect nodes in a ring - // spawn separate thread to avoid deadlock in the event listeners - connectErr := make(chan error, 1) - go func() { - for i, id := range ids { - peerID := ids[(i+1)%len(ids)] - if err := network.Connect(id, peerID); err != nil { - connectErr <- err - return - } - } - }() - - // collect connection events up to expected number - ctx, cancel := context.WithTimeout(context.TODO(), time.Second) - defer cancel() - checkIds := make(map[enode.ID][]enode.ID) - connEventCount := nodeCount -OUTER: - for { - select { - case <-ctx.Done(): - t.Fatal(ctx.Err()) - case err := <-connectErr: - t.Fatal(err) - case ev := <-evC: - if ev.Type == EventTypeConn && !ev.Control { - // fail on any disconnect - if !ev.Conn.Up { - t.Fatalf("unexpected disconnect: %v -> %v", ev.Conn.One, ev.Conn.Other) - } - checkIds[ev.Conn.One] = append(checkIds[ev.Conn.One], ev.Conn.Other) - checkIds[ev.Conn.Other] = append(checkIds[ev.Conn.Other], ev.Conn.One) - connEventCount-- - log.Trace("ev", "count", connEventCount) - if connEventCount == 0 { - break OUTER - } - } - } - } - - // create snapshot of current network - snap, err := network.Snapshot() - if err != nil { - t.Fatal(err) - } - j, err := json.Marshal(snap) - if err != nil { - t.Fatal(err) - } - log.Trace("snapshot taken", "nodes", len(snap.Nodes), "conns", len(snap.Conns), "json", string(j)) - - // verify that the snap element numbers check out - if len(checkIds) != len(snap.Conns) || len(checkIds) != len(snap.Nodes) { - t.Fatalf("snapshot wrong node,conn counts %d,%d != %d", len(snap.Nodes), len(snap.Conns), len(checkIds)) - } - - // shut down sim network - runningOne = false - sub.Unsubscribe() - network.Shutdown() - - // check that we have all the expected connections in the snapshot - for nodid, nodConns := range checkIds { - for _, nodConn := range nodConns { - var match bool - for _, snapConn := range snap.Conns { - if snapConn.One == nodid && snapConn.Other == nodConn { - match = true - break - } else if snapConn.Other == nodid && snapConn.One == nodConn { - match = true - break - } - } - if !match { - t.Fatalf("snapshot missing conn %v -> %v", nodid, nodConn) - } - } - } - log.Info("snapshot checked") - - // PART II - // load snapshot and verify that exactly same connections are formed - - adapter = adapters.NewSimAdapter(adapters.LifecycleConstructors{ - "noopwoop": func(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) { - return NewNoopService(nil), nil - }, - }) - network = NewNetwork(adapter, &NetworkConfig{ - DefaultService: "noopwoop", - }) - defer func() { - network.Shutdown() - }() - - // subscribe to peer events - // every node up and conn up event will generate one additional control event - // therefore multiply the count by two - evC = make(chan *Event, (len(snap.Conns)*2)+(len(snap.Nodes)*2)) - sub = network.Events().Subscribe(evC) - defer sub.Unsubscribe() - - // load the snapshot - // spawn separate thread to avoid deadlock in the event listeners - err = network.Load(snap) - if err != nil { - t.Fatal(err) - } - - // collect connection events up to expected number - ctx, cancel = context.WithTimeout(context.TODO(), time.Second*3) - defer cancel() - - connEventCount = nodeCount - -OuterTwo: - for { - select { - case <-ctx.Done(): - t.Fatal(ctx.Err()) - case ev := <-evC: - if ev.Type == EventTypeConn && !ev.Control { - - // fail on any disconnect - if !ev.Conn.Up { - t.Fatalf("unexpected disconnect: %v -> %v", ev.Conn.One, ev.Conn.Other) - } - log.Trace("conn", "on", ev.Conn.One, "other", ev.Conn.Other) - checkIds[ev.Conn.One] = append(checkIds[ev.Conn.One], ev.Conn.Other) - checkIds[ev.Conn.Other] = append(checkIds[ev.Conn.Other], ev.Conn.One) - connEventCount-- - log.Trace("ev", "count", connEventCount) - if connEventCount == 0 { - break OuterTwo - } - } - } - } - - // check that we have all expected connections in the network - for _, snapConn := range snap.Conns { - var match bool - for nodid, nodConns := range checkIds { - for _, nodConn := range nodConns { - if snapConn.One == nodid && snapConn.Other == nodConn { - match = true - break - } else if snapConn.Other == nodid && snapConn.One == nodConn { - match = true - break - } - } - } - if !match { - t.Fatalf("network missing conn %v -> %v", snapConn.One, snapConn.Other) - } - } - - // verify that network didn't generate any other additional connection events after the ones we have collected within a reasonable period of time - ctx, cancel = context.WithTimeout(context.TODO(), time.Second) - defer cancel() - select { - case <-ctx.Done(): - case ev := <-evC: - if ev.Type == EventTypeConn { - t.Fatalf("Superfluous conn found %v -> %v", ev.Conn.One, ev.Conn.Other) - } - } - - // This test validates if all connections from the snapshot - // are created in the network. - t.Run("conns after load", func(t *testing.T) { - // Create new network. - n := NewNetwork( - adapters.NewSimAdapter(adapters.LifecycleConstructors{ - "noopwoop": func(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) { - return NewNoopService(nil), nil - }, - }), - &NetworkConfig{ - DefaultService: "noopwoop", - }, - ) - defer n.Shutdown() - - // Load the same snapshot. - err := n.Load(snap) - if err != nil { - t.Fatal(err) - } - - // Check every connection from the snapshot - // if it is in the network, too. - for _, c := range snap.Conns { - if n.GetConn(c.One, c.Other) == nil { - t.Errorf("missing connection: %s -> %s", c.One, c.Other) - } - } - }) -} - -// TestNetworkSimulation creates a multi-node simulation network with each node -// connected in a ring topology, checks that all nodes successfully handshake -// with each other and that a snapshot fully represents the desired topology -func TestNetworkSimulation(t *testing.T) { - t.Skip("need test for p2p sentry") - // create simulation network with 20 testService nodes - adapter := adapters.NewSimAdapter(adapters.LifecycleConstructors{ - "test": newTestService, - }) - network := NewNetwork(adapter, &NetworkConfig{ - DefaultService: "test", - }) - defer network.Shutdown() - nodeCount := 20 - ids := make([]enode.ID, nodeCount) - for i := 0; i < nodeCount; i++ { - conf := adapters.RandomNodeConfig() - node, err := network.NewNodeWithConfig(conf) - if err != nil { - t.Fatalf("error creating node: %s", err) - } - if err := network.Start(node.ID()); err != nil { - t.Fatalf("error starting node: %s", err) - } - ids[i] = node.ID() - } - - // perform a check which connects the nodes in a ring (so each node is - // connected to exactly two peers) and then checks that all nodes - // performed two handshakes by checking their peerCount - action := func(_ context.Context) error { - for i, id := range ids { - peerID := ids[(i+1)%len(ids)] - if err := network.Connect(id, peerID); err != nil { - return err - } - } - return nil - } - check := func(ctx context.Context, id enode.ID) (bool, error) { - // check we haven't run out of time - select { - case <-ctx.Done(): - return false, ctx.Err() - default: - } - - // get the node - node := network.GetNode(id) - if node == nil { - return false, fmt.Errorf("unknown node: %s", id) - } - - // check it has exactly two peers - client, err := node.Client() - if err != nil { - return false, err - } - var peerCount int64 - if err := client.CallContext(ctx, &peerCount, "test_peerCount"); err != nil { - return false, err - } - switch { - case peerCount < 2: - return false, nil - case peerCount == 2: - return true, nil - default: - return false, fmt.Errorf("unexpected peerCount: %d", peerCount) - } - } - - timeout := 30 * time.Second - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - - // trigger a check every 100ms - trigger := make(chan enode.ID) - go triggerChecks(ctx, ids, trigger, 100*time.Millisecond) - - result := NewSimulation(network).Run(ctx, &Step{ - Action: action, - Trigger: trigger, - Expect: &Expectation{ - Nodes: ids, - Check: check, - }, - }) - if result.Error != nil { - t.Fatalf("simulation failed: %s", result.Error) - } - - // take a network snapshot and check it contains the correct topology - snap, err := network.Snapshot() - if err != nil { - t.Fatal(err) - } - if len(snap.Nodes) != nodeCount { - t.Fatalf("expected snapshot to contain %d nodes, got %d", nodeCount, len(snap.Nodes)) - } - if len(snap.Conns) != nodeCount { - t.Fatalf("expected snapshot to contain %d connections, got %d", nodeCount, len(snap.Conns)) - } - for i, id := range ids { - conn := snap.Conns[i] - if conn.One != id { - t.Fatalf("expected conn[%d].One to be %s, got %s", i, id, conn.One) - } - peerID := ids[(i+1)%len(ids)] - if conn.Other != peerID { - t.Fatalf("expected conn[%d].Other to be %s, got %s", i, peerID, conn.Other) - } - } -} - -func createTestNodes(count int, network *Network) (nodes []*Node, err error) { - for i := 0; i < count; i++ { - nodeConf := adapters.RandomNodeConfig() - node, err := network.NewNodeWithConfig(nodeConf) - if err != nil { - return nil, err - } - if err := network.Start(node.ID()); err != nil { - return nil, err - } - - nodes = append(nodes, node) - } - - return nodes, nil -} - -func createTestNodesWithProperty(property string, count int, network *Network) (propertyNodes []*Node, err error) { - for i := 0; i < count; i++ { - nodeConf := adapters.RandomNodeConfig() - nodeConf.Properties = append(nodeConf.Properties, property) - - node, err := network.NewNodeWithConfig(nodeConf) - if err != nil { - return nil, err - } - if err := network.Start(node.ID()); err != nil { - return nil, err - } - - propertyNodes = append(propertyNodes, node) - } - - return propertyNodes, nil -} - -// TestGetNodeIDs creates a set of nodes and attempts to retrieve their IDs,. -// It then tests again whilst excluding a node ID from being returned. -// If a node ID is not returned, or more node IDs than expected are returned, the test fails. -func TestGetNodeIDs(t *testing.T) { - t.Skip("need test for p2p sentry") - adapter := adapters.NewSimAdapter(adapters.LifecycleConstructors{ - "test": newTestService, - }) - network := NewNetwork(adapter, &NetworkConfig{ - DefaultService: "test", - }) - defer network.Shutdown() - - numNodes := 5 - nodes, err := createTestNodes(numNodes, network) - if err != nil { - t.Fatalf("Could not creat test nodes %v", err) - } - - gotNodeIDs := network.GetNodeIDs() - if len(gotNodeIDs) != numNodes { - t.Fatalf("Expected %d nodes, got %d", numNodes, len(gotNodeIDs)) - } - - for _, node1 := range nodes { - match := false - for _, node2ID := range gotNodeIDs { - if bytes.Equal(node1.ID().Bytes(), node2ID.Bytes()) { - match = true - break - } - } - - if !match { - t.Fatalf("A created node was not returned by GetNodes(), ID: %s", node1.ID()) - } - } - - excludeNodeID := nodes[3].ID() - gotNodeIDsExcl := network.GetNodeIDs(excludeNodeID) - if len(gotNodeIDsExcl) != numNodes-1 { - t.Fatalf("Expected one less node ID to be returned") - } - for _, nodeID := range gotNodeIDsExcl { - if bytes.Equal(excludeNodeID.Bytes(), nodeID.Bytes()) { - t.Fatalf("GetNodeIDs returned the node ID we excluded, ID: %s", nodeID) - } - } -} - -// TestGetNodes creates a set of nodes and attempts to retrieve them again. -// It then tests again whilst excluding a node from being returned. -// If a node is not returned, or more nodes than expected are returned, the test fails. -func TestGetNodes(t *testing.T) { - t.Skip("need test for p2p sentry") - adapter := adapters.NewSimAdapter(adapters.LifecycleConstructors{ - "test": newTestService, - }) - network := NewNetwork(adapter, &NetworkConfig{ - DefaultService: "test", - }) - defer network.Shutdown() - - numNodes := 5 - nodes, err := createTestNodes(numNodes, network) - if err != nil { - t.Fatalf("Could not creat test nodes %v", err) - } - - gotNodes := network.GetNodes() - if len(gotNodes) != numNodes { - t.Fatalf("Expected %d nodes, got %d", numNodes, len(gotNodes)) - } - - for _, node1 := range nodes { - match := false - for _, node2 := range gotNodes { - if bytes.Equal(node1.ID().Bytes(), node2.ID().Bytes()) { - match = true - break - } - } - - if !match { - t.Fatalf("A created node was not returned by GetNodes(), ID: %s", node1.ID()) - } - } - - excludeNodeID := nodes[3].ID() - gotNodesExcl := network.GetNodes(excludeNodeID) - if len(gotNodesExcl) != numNodes-1 { - t.Fatalf("Expected one less node to be returned") - } - for _, node := range gotNodesExcl { - if bytes.Equal(excludeNodeID.Bytes(), node.ID().Bytes()) { - t.Fatalf("GetNodes returned the node we excluded, ID: %s", node.ID()) - } - } -} - -// TestGetNodesByID creates a set of nodes and attempts to retrieve a subset of them by ID -// If a node is not returned, or more nodes than expected are returned, the test fails. -func TestGetNodesByID(t *testing.T) { - t.Skip("need test for p2p sentry") - adapter := adapters.NewSimAdapter(adapters.LifecycleConstructors{ - "test": newTestService, - }) - network := NewNetwork(adapter, &NetworkConfig{ - DefaultService: "test", - }) - defer network.Shutdown() - - numNodes := 5 - nodes, err := createTestNodes(numNodes, network) - if err != nil { - t.Fatalf("Could not create test nodes: %v", err) - } - - numSubsetNodes := 2 - subsetNodes := nodes[0:numSubsetNodes] - var subsetNodeIDs []enode.ID - for _, node := range subsetNodes { - subsetNodeIDs = append(subsetNodeIDs, node.ID()) - } - - gotNodesByID := network.GetNodesByID(subsetNodeIDs) - if len(gotNodesByID) != numSubsetNodes { - t.Fatalf("Expected %d nodes, got %d", numSubsetNodes, len(gotNodesByID)) - } - - for _, node1 := range subsetNodes { - match := false - for _, node2 := range gotNodesByID { - if bytes.Equal(node1.ID().Bytes(), node2.ID().Bytes()) { - match = true - break - } - } - - if !match { - t.Fatalf("A created node was not returned by GetNodesByID(), ID: %s", node1.ID()) - } - } -} - -// TestGetNodesByProperty creates a subset of nodes with a property assigned. -// GetNodesByProperty is then checked for correctness by comparing the nodes returned to those initially created. -// If a node with a property is not found, or more nodes than expected are returned, the test fails. -func TestGetNodesByProperty(t *testing.T) { - t.Skip("need test for p2p sentry") - adapter := adapters.NewSimAdapter(adapters.LifecycleConstructors{ - "test": newTestService, - }) - network := NewNetwork(adapter, &NetworkConfig{ - DefaultService: "test", - }) - defer network.Shutdown() - - numNodes := 3 - _, err := createTestNodes(numNodes, network) - if err != nil { - t.Fatalf("Failed to create nodes: %v", err) - } - - numPropertyNodes := 3 - propertyTest := "test" - propertyNodes, err := createTestNodesWithProperty(propertyTest, numPropertyNodes, network) - if err != nil { - t.Fatalf("Failed to create nodes with property: %v", err) - } - - gotNodesByProperty := network.GetNodesByProperty(propertyTest) - if len(gotNodesByProperty) != numPropertyNodes { - t.Fatalf("Expected %d nodes with a property, got %d", numPropertyNodes, len(gotNodesByProperty)) - } - - for _, node1 := range propertyNodes { - match := false - for _, node2 := range gotNodesByProperty { - if bytes.Equal(node1.ID().Bytes(), node2.ID().Bytes()) { - match = true - break - } - } - - if !match { - t.Fatalf("A created node with property was not returned by GetNodesByProperty(), ID: %s", node1.ID()) - } - } -} - -// TestGetNodeIDsByProperty creates a subset of nodes with a property assigned. -// GetNodeIDsByProperty is then checked for correctness by comparing the node IDs returned to those initially created. -// If a node ID with a property is not found, or more nodes IDs than expected are returned, the test fails. -func TestGetNodeIDsByProperty(t *testing.T) { - t.Skip("need test for p2p sentry") - adapter := adapters.NewSimAdapter(adapters.LifecycleConstructors{ - "test": newTestService, - }) - network := NewNetwork(adapter, &NetworkConfig{ - DefaultService: "test", - }) - defer network.Shutdown() - - numNodes := 3 - _, err := createTestNodes(numNodes, network) - if err != nil { - t.Fatalf("Failed to create nodes: %v", err) - } - - numPropertyNodes := 3 - propertyTest := "test" - propertyNodes, err := createTestNodesWithProperty(propertyTest, numPropertyNodes, network) - if err != nil { - t.Fatalf("Failed to created nodes with property: %v", err) - } - - gotNodeIDsByProperty := network.GetNodeIDsByProperty(propertyTest) - if len(gotNodeIDsByProperty) != numPropertyNodes { - t.Fatalf("Expected %d nodes with a property, got %d", numPropertyNodes, len(gotNodeIDsByProperty)) - } - - for _, node1 := range propertyNodes { - match := false - id1 := node1.ID() - for _, id2 := range gotNodeIDsByProperty { - if bytes.Equal(id1.Bytes(), id2.Bytes()) { - match = true - break - } - } - - if !match { - t.Fatalf("Not all nodes IDs were returned by GetNodeIDsByProperty(), ID: %s", id1) - } - } -} - -func triggerChecks(ctx context.Context, ids []enode.ID, trigger chan enode.ID, interval time.Duration) { - tick := time.NewTicker(interval) - defer tick.Stop() - for { - select { - case <-tick.C: - for _, id := range ids { - select { - case trigger <- id: - case <-ctx.Done(): - return - } - } - case <-ctx.Done(): - return - } - } -} - -// \todo: refactor to implement shapshots -// and connect configuration methods once these are moved from -// swarm/network/simulations/connect.go -func BenchmarkMinimalService(b *testing.B) { - b.Run("ring/32", benchmarkMinimalServiceTmp) -} - -func benchmarkMinimalServiceTmp(b *testing.B) { - - // stop timer to discard setup time pollution - args := strings.Split(b.Name(), "/") - nodeCount, err := strconv.ParseInt(args[2], 10, 16) - if err != nil { - b.Fatal(err) - } - - for i := 0; i < b.N; i++ { - // this is a minimal service, whose protocol will close a channel upon run of protocol - // making it possible to bench the time it takes for the service to start and protocol actually to be run - protoCMap := make(map[enode.ID]map[enode.ID]chan struct{}) - adapter := adapters.NewSimAdapter(adapters.LifecycleConstructors{ - "noopwoop": func(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) { - protoCMap[ctx.Config.ID] = make(map[enode.ID]chan struct{}) - svc := NewNoopService(protoCMap[ctx.Config.ID]) - return svc, nil - }, - }) - - // create network - network := NewNetwork(adapter, &NetworkConfig{ - DefaultService: "noopwoop", - }) - defer network.Shutdown() - - // create and start nodes - ids := make([]enode.ID, nodeCount) - for i := 0; i < int(nodeCount); i++ { - conf := adapters.RandomNodeConfig() - node, err := network.NewNodeWithConfig(conf) - if err != nil { - b.Fatalf("error creating node: %s", err) - } - if err := network.Start(node.ID()); err != nil { - b.Fatalf("error starting node: %s", err) - } - ids[i] = node.ID() - } - - // ready, set, go - b.ResetTimer() - - // connect nodes in a ring - for i, id := range ids { - peerID := ids[(i+1)%len(ids)] - if err := network.Connect(id, peerID); err != nil { - b.Fatal(err) - } - } - - // wait for all protocols to signal to close down - ctx, cancel := context.WithTimeout(context.TODO(), time.Second) - defer cancel() - for nodid, peers := range protoCMap { - for peerid, peerC := range peers { - log.Trace("getting ", "node", nodid, "peer", peerid) - select { - case <-ctx.Done(): - b.Fatal(ctx.Err()) - case <-peerC: - } - } - } - } -} - -func TestNode_UnmarshalJSON(t *testing.T) { - t.Run("up_field", func(t *testing.T) { - runNodeUnmarshalJSON(t, casesNodeUnmarshalJSONUpField()) - }) - t.Run("config_field", func(t *testing.T) { - runNodeUnmarshalJSON(t, casesNodeUnmarshalJSONConfigField()) - }) -} - -func runNodeUnmarshalJSON(t *testing.T, tests []nodeUnmarshalTestCase) { - t.Helper() - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var got *Node - if err := json.Unmarshal([]byte(tt.marshaled), &got); err != nil { - expectErrorMessageToContain(t, err, tt.wantErr) - got = nil - } - expectNodeEquality(t, got, tt.want) - }) - } -} - -type nodeUnmarshalTestCase struct { - name string - marshaled string - want *Node - wantErr string -} - -func expectErrorMessageToContain(t *testing.T, got error, want string) { - t.Helper() - if got == nil && want == "" { - return - } - - if got == nil && want != "" { - t.Errorf("error was expected, got: nil, want: %v", want) - return - } - - if !strings.Contains(got.Error(), want) { - t.Errorf( - "unexpected error message, got %v, want: %v", - want, - got, - ) - } -} - -func expectNodeEquality(t *testing.T, got, want *Node) { - t.Helper() - if !reflect.DeepEqual(got, want) { - t.Errorf("Node.UnmarshalJSON() = %v, want %v", got, want) - } -} - -func casesNodeUnmarshalJSONUpField() []nodeUnmarshalTestCase { - return []nodeUnmarshalTestCase{ - { - name: "empty json", - marshaled: "{}", - want: newNode(nil, nil, false), - }, - { - name: "a stopped node", - marshaled: "{\"up\": false}", - want: newNode(nil, nil, false), - }, - { - name: "a running node", - marshaled: "{\"up\": true}", - want: newNode(nil, nil, true), - }, - { - name: "invalid JSON value on valid key", - marshaled: "{\"up\": foo}", - wantErr: "invalid character", - }, - { - name: "invalid JSON key and value", - marshaled: "{foo: bar}", - wantErr: "invalid character", - }, - { - name: "bool value expected but got something else (string)", - marshaled: "{\"up\": \"true\"}", - wantErr: "cannot unmarshal string into Go struct", - }, - } -} - -func casesNodeUnmarshalJSONConfigField() []nodeUnmarshalTestCase { - // Don't do a big fuss around testing, as adapters.NodeConfig should - // handle it's own serialization. Just do a sanity check. - return []nodeUnmarshalTestCase{ - { - name: "Config field is omitted", - marshaled: "{}", - want: newNode(nil, nil, false), - }, - { - name: "Config field is nil", - marshaled: "{\"config\": null}", - want: newNode(nil, nil, false), - }, - { - name: "a non default Config field", - marshaled: "{\"config\":{\"name\":\"node_ecdd0\",\"port\":44665}}", - want: newNode(nil, &adapters.NodeConfig{Name: "node_ecdd0", Port: 44665}, false), - }, - } -} diff --git a/rpc/client.go b/rpc/client.go index 172b2d31696..7d5b27b1254 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -112,7 +112,7 @@ type clientConn struct { func (c *Client) newClientConn(conn ServerCodec) *clientConn { ctx := context.WithValue(context.Background(), clientContextKey{}, c) - handler := newHandler(ctx, conn, c.idgen, c.services, c.methodAllowList, 50) + handler := newHandler(ctx, conn, c.idgen, c.services, c.methodAllowList, 50, false /* traceRequests */) return &clientConn{conn, handler} } diff --git a/rpc/handler.go b/rpc/handler.go index f35c8226b1e..60994b590ad 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -70,6 +70,7 @@ type handler struct { subLock sync.Mutex serverSubs map[ID]*Subscription maxBatchConcurrency uint + traceRequests bool } type callProc struct { @@ -77,7 +78,7 @@ type callProc struct { notifiers []*Notifier } -func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry, allowList AllowList, maxBatchConcurrency uint) *handler { +func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg *serviceRegistry, allowList AllowList, maxBatchConcurrency uint, traceRequests bool) *handler { rootCtx, cancelRoot := context.WithCancel(connCtx) forbiddenList := newForbiddenList() h := &handler{ @@ -95,6 +96,7 @@ func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg * forbiddenList: forbiddenList, maxBatchConcurrency: maxBatchConcurrency, + traceRequests: traceRequests, } if conn.remoteAddr() != "" { @@ -341,7 +343,11 @@ func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage, stream *json switch { case msg.isNotification(): h.handleCall(ctx, msg, stream) - h.log.Trace("Served", "t", time.Since(start), "method", msg.Method, "params", string(msg.Params)) + if h.traceRequests { + h.log.Info("Served", "t", time.Since(start), "method", msg.Method, "params", string(msg.Params)) + } else { + h.log.Trace("Served", "t", time.Since(start), "method", msg.Method, "params", string(msg.Params)) + } return nil case msg.isCall(): resp := h.handleCall(ctx, msg, stream) @@ -354,7 +360,11 @@ func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage, stream *json "err", resp.Error.Message) } } - h.log.Trace("Served", "t", time.Since(start), "method", msg.Method, "reqid", idForLog{msg.ID}, "params", string(msg.Params)) + if h.traceRequests { + h.log.Info("Served", "t", time.Since(start), "method", msg.Method, "reqid", idForLog{msg.ID}, "params", string(msg.Params)) + } else { + h.log.Trace("Served", "t", time.Since(start), "method", msg.Method, "reqid", idForLog{msg.ID}, "params", string(msg.Params)) + } return resp case msg.hasValidID(): return msg.errorResponse(&invalidRequestError{"invalid request"}) diff --git a/rpc/http_test.go b/rpc/http_test.go index f9aae87ed44..602b84a3a88 100644 --- a/rpc/http_test.go +++ b/rpc/http_test.go @@ -104,7 +104,7 @@ func TestHTTPResponseWithEmptyGet(t *testing.T) { func TestHTTPRespBodyUnlimited(t *testing.T) { const respLength = maxRequestContentLength * 3 - s := NewServer(50) + s := NewServer(50, false /* traceRequests */) defer s.Stop() if err := s.RegisterName("test", largeRespService{respLength}); err != nil { t.Fatal(err) diff --git a/rpc/server.go b/rpc/server.go index b92487bb1d7..1b42549cdbf 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -49,11 +49,12 @@ type Server struct { codecs mapset.Set batchConcurrency uint + traceRequests bool // Whether to print requests at INFO level } // NewServer creates a new server instance with no registered handlers. -func NewServer(batchConcurrency uint) *Server { - server := &Server{idgen: randomIDGenerator(), codecs: mapset.NewSet(), run: 1, batchConcurrency: batchConcurrency} +func NewServer(batchConcurrency uint, traceRequests bool) *Server { + server := &Server{idgen: randomIDGenerator(), codecs: mapset.NewSet(), run: 1, batchConcurrency: batchConcurrency, traceRequests: traceRequests} // Register the default service providing meta information about the RPC service such // as the services and methods it offers. rpcService := &RPCService{server: server} @@ -105,7 +106,7 @@ func (s *Server) serveSingleRequest(ctx context.Context, codec ServerCodec) { return } - h := newHandler(ctx, codec, s.idgen, &s.services, s.methodAllowList, s.batchConcurrency) + h := newHandler(ctx, codec, s.idgen, &s.services, s.methodAllowList, s.batchConcurrency, s.traceRequests) h.allowSubscribe = false defer h.close(io.EOF, nil) diff --git a/rpc/server_test.go b/rpc/server_test.go index 82c11bd7349..c58b1168213 100644 --- a/rpc/server_test.go +++ b/rpc/server_test.go @@ -31,7 +31,7 @@ import ( ) func TestServerRegisterName(t *testing.T) { - server := NewServer(50) + server := NewServer(50, false /* traceRequests */) service := new(testService) if err := server.RegisterName("test", service); err != nil { diff --git a/rpc/subscription_test.go b/rpc/subscription_test.go index f2474414a35..6b4d5d1cca7 100644 --- a/rpc/subscription_test.go +++ b/rpc/subscription_test.go @@ -53,7 +53,7 @@ func TestSubscriptions(t *testing.T) { subCount = len(namespaces) notificationCount = 3 - server = NewServer(50) + server = NewServer(50, false /* traceRequests */) clientConn, serverConn = net.Pipe() out = json.NewEncoder(clientConn) in = json.NewDecoder(clientConn) diff --git a/rpc/testservice_test.go b/rpc/testservice_test.go index e9e16f4202f..1378620586d 100644 --- a/rpc/testservice_test.go +++ b/rpc/testservice_test.go @@ -26,7 +26,7 @@ import ( ) func newTestServer() *Server { - server := NewServer(50) + server := NewServer(50, false /* traceRequests */) server.idgen = sequentialIDGenerator() if err := server.RegisterName("test", new(testService)); err != nil { panic(err) diff --git a/rpc/websocket_test.go b/rpc/websocket_test.go index 5e370cea8b4..373687efa09 100644 --- a/rpc/websocket_test.go +++ b/rpc/websocket_test.go @@ -163,7 +163,7 @@ func TestClientWebsocketPing(t *testing.T) { // This checks that the websocket transport can deal with large messages. func TestClientWebsocketLargeMessage(t *testing.T) { var ( - srv = NewServer(50) + srv = NewServer(50, false /* traceRequests */) httpsrv = httptest.NewServer(srv.WebsocketHandler(nil, nil, false)) wsURL = "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") ) diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index b127691e1c2..a066171f4c8 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -58,6 +58,7 @@ var DefaultFlags = []cli.Flag{ utils.HTTPApiFlag, utils.WSEnabledFlag, utils.WsCompressionFlag, + utils.HTTPTraceFlag, utils.StateCacheFlag, utils.RpcBatchConcurrencyFlag, utils.DBReadConcurrencyFlag, diff --git a/turbo/cli/flags.go b/turbo/cli/flags.go index 8eaf765ca4e..bdcbdd1983b 100644 --- a/turbo/cli/flags.go +++ b/turbo/cli/flags.go @@ -294,6 +294,7 @@ func setEmbeddedRpcDaemon(ctx *cli.Context, cfg *nodecfg.Config) { EngineHTTPListenAddress: ctx.GlobalString(utils.EngineAddr.Name), EnginePort: ctx.GlobalInt(utils.EnginePort.Name), JWTSecretPath: jwtSecretPath, + TraceRequests: ctx.GlobalBool(utils.HTTPTraceFlag.Name), HttpCORSDomain: strings.Split(ctx.GlobalString(utils.HTTPCORSDomainFlag.Name), ","), HttpVirtualHost: strings.Split(ctx.GlobalString(utils.HTTPVirtualHostsFlag.Name), ","), API: strings.Split(ctx.GlobalString(utils.HTTPApiFlag.Name), ","), From 29760d142549bef92729154cd46110e84b220789 Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Sun, 19 Jun 2022 21:39:43 +0300 Subject: [PATCH 083/136] Getting rid of unnecessary cache (#4494) * whitelistedTable used * added descriptions * no cache * lint --- eth/stagedsync/stage_execute.go | 14 ++------ ethdb/olddb/mapmutation.go | 60 +++++++++------------------------ 2 files changed, 18 insertions(+), 56 deletions(-) diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index aeae1dc7725..cb2c34a1a1f 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -9,7 +9,6 @@ import ( "time" "github.com/c2h5oh/datasize" - lru "github.com/hashicorp/golang-lru" "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/common/length" @@ -38,8 +37,7 @@ import ( ) const ( - logInterval = 20 * time.Second - lruDefaultSize = 1_000_000 // 56 MB + logInterval = 20 * time.Second ) type HasChangeSetWriter interface { @@ -222,15 +220,9 @@ func SpawnExecuteBlocksStage(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint startTime := time.Now() - whitelistedTables := []string{kv.Code, kv.ContractCode} var batch ethdb.DbWithPendingMutations - // Contract code is unlikely to change too much, so let's keep it cached - contractCodeCache, err := lru.New(lruDefaultSize) - if err != nil { - return err - } // state is stored through ethdb batches - batch = olddb.NewHashBatch(tx, quit, cfg.tmpdir, whitelistedTables, contractCodeCache) + batch = olddb.NewHashBatch(tx, quit, cfg.tmpdir) defer batch.Rollback() // changes are stored through memory buffer @@ -313,7 +305,7 @@ Loop: // TODO: This creates stacked up deferrals defer tx.Rollback() } - batch = olddb.NewHashBatch(tx, quit, cfg.tmpdir, whitelistedTables, contractCodeCache) + batch = olddb.NewHashBatch(tx, quit, cfg.tmpdir) // TODO: This creates stacked up deferrals defer batch.Rollback() } diff --git a/ethdb/olddb/mapmutation.go b/ethdb/olddb/mapmutation.go index f7bd4c19ae2..d1425522004 100644 --- a/ethdb/olddb/mapmutation.go +++ b/ethdb/olddb/mapmutation.go @@ -8,7 +8,6 @@ import ( "time" "unsafe" - lru "github.com/hashicorp/golang-lru" "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/ethdb" @@ -16,16 +15,14 @@ import ( ) type mapmutation struct { - puts map[string]map[string][]byte - whitelistedTables map[string]byte - whitelistCache *lru.Cache - db kv.RwTx - quit <-chan struct{} - clean func() - mu sync.RWMutex - size int - count uint64 - tmpdir string + puts map[string]map[string][]byte // table -> key -> value ie. blocks -> hash -> blockBod + db kv.RwTx + quit <-chan struct{} + clean func() + mu sync.RWMutex + size int + count uint64 + tmpdir string } // NewBatch - starts in-mem batch @@ -36,7 +33,7 @@ type mapmutation struct { // defer batch.Rollback() // ... some calculations on `batch` // batch.Commit() -func NewHashBatch(tx kv.RwTx, quit <-chan struct{}, tmpdir string, whitelistedTables []string, whitelistCache *lru.Cache) *mapmutation { +func NewHashBatch(tx kv.RwTx, quit <-chan struct{}, tmpdir string) *mapmutation { clean := func() {} if quit == nil { ch := make(chan struct{}) @@ -44,25 +41,15 @@ func NewHashBatch(tx kv.RwTx, quit <-chan struct{}, tmpdir string, whitelistedTa quit = ch } - whitelistedTablesMap := make(map[string]byte) - for idx, table := range whitelistedTables { - whitelistedTablesMap[table] = byte(idx) - } return &mapmutation{ - db: tx, - puts: make(map[string]map[string][]byte), - whitelistCache: whitelistCache, - quit: quit, - clean: clean, - tmpdir: tmpdir, - whitelistedTables: make(map[string]byte), + db: tx, + puts: make(map[string]map[string][]byte), + quit: quit, + clean: clean, + tmpdir: tmpdir, } } -func (m *mapmutation) makeCacheKey(table string, key []byte) string { - return string(append(key, m.whitelistedTables[table])) -} - func (m *mapmutation) RwKV() kv.RwDB { if casted, ok := m.db.(ethdb.HasRwKV); ok { return casted.RwKV() @@ -70,11 +57,6 @@ func (m *mapmutation) RwKV() kv.RwDB { return nil } -func (m *mapmutation) isWhitelisted(table string) bool { - _, ok := m.whitelistedTables[table] - return ok -} - func (m *mapmutation) getMem(table string, key []byte) ([]byte, bool) { m.mu.RLock() defer m.mu.RUnlock() @@ -85,11 +67,6 @@ func (m *mapmutation) getMem(table string, key []byte) ([]byte, bool) { return value, ok } - if m.whitelistCache != nil && m.isWhitelisted(table) { - if value, ok := m.whitelistCache.Get(m.makeCacheKey(table, key)); ok { - return value.([]byte), ok - } - } return nil, false } @@ -145,10 +122,6 @@ func (m *mapmutation) GetOne(table string, key []byte) ([]byte, error) { if err != nil { return nil, err } - if m.whitelistCache != nil && m.isWhitelisted(table) { - m.whitelistCache.Add(m.makeCacheKey(table, key), value) - } - return value, nil } return nil, nil @@ -187,6 +160,7 @@ func (m *mapmutation) Has(table string, key []byte) (bool, error) { return false, nil } +// puts a table key with a value and if the table is not found then it appends a table func (m *mapmutation) Put(table string, key []byte, value []byte) error { m.mu.Lock() defer m.mu.Unlock() @@ -254,10 +228,6 @@ func (m *mapmutation) doCommit(tx kv.RwTx) error { defer collector.Close() for key, value := range bucket { collector.Collect([]byte(key), value) - // Update cache on commits - if m.isWhitelisted(table) { - m.whitelistCache.Add(m.makeCacheKey(table, []byte(key)), value) - } count++ select { default: From 7a2b575e56d30eda4264247e2b1b55909c43c587 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Sun, 19 Jun 2022 21:45:36 +0200 Subject: [PATCH 084/136] Added memory execution (#4446) * add * added in memory execution draft * func to state * added functionality * backend function for memory execution * simplified stage state * haltable stage execution * added mod sum * hd * sl * LOL * LOL again * need trace * more logs * added cleanup on fcu * fcu * mod * bunch of prtln * feed * ops * headers notify * revert * slightly more commented * head * nil block retire * comments * prevent clean * corrected occasional panics * fixed lint --- cmd/integration/commands/stages.go | 6 +- cmd/integration/commands/state_stages.go | 6 +- cmd/utils/flags.go | 5 + eth/backend.go | 14 +- eth/ethconfig/config.go | 2 + eth/stagedsync/default_stages.go | 47 +++++++ eth/stagedsync/stage.go | 3 + eth/stagedsync/stage_execute.go | 6 + eth/stagedsync/stage_headers.go | 40 +++++- eth/stagedsync/sync.go | 4 +- turbo/cli/default_flags.go | 1 + turbo/stages/headerdownload/header_algos.go | 38 +++++- .../headerdownload/header_data_struct.go | 3 + turbo/stages/mock_sentry.go | 3 +- turbo/stages/stageloop.go | 127 +++++++++++++++++- 15 files changed, 290 insertions(+), 15 deletions(-) diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index 89d0bf39076..bbb8a617ce4 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -663,7 +663,9 @@ func stageExec(db kv.RwDB, ctx context.Context) error { pm.TxIndex = prune.Distance(s.BlockNumber - pruneTo) } - cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil, false, tmpdir, getBlockReader(chainConfig, db)) + cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil, + /*stateStream=*/ false, + /*badBlockHalt=*/ false, tmpdir, getBlockReader(chainConfig, db)) if unwind > 0 { u := sync.NewUnwindState(stages.Execution, s.BlockNumber-unwind, s.BlockNumber) err := stagedsync.UnwindExecutionStage(u, s, nil, ctx, cfg, false) @@ -1196,7 +1198,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) panic(err) } - sync, err := stages2.NewStagedSync(context.Background(), logger, db, p2p.Config{}, cfg, sentryControlServer, tmpdir, &stagedsync.Notifications{}, nil, allSn, nil) + sync, err := stages2.NewStagedSync(context.Background(), logger, db, p2p.Config{}, cfg, sentryControlServer, tmpdir, &stagedsync.Notifications{}, nil, allSn, nil, nil) if err != nil { panic(err) } diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go index a8c3f184b1b..a41e0db2f9d 100644 --- a/cmd/integration/commands/state_stages.go +++ b/cmd/integration/commands/state_stages.go @@ -181,7 +181,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. stateStages.DisableStages(stages.Headers, stages.BlockHashes, stages.Bodies, stages.Senders) - execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, changeSetHook, chainConfig, engine, vmConfig, nil, false, dirs.Tmp, getBlockReader(chainConfig, db)) + execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, changeSetHook, chainConfig, engine, vmConfig, nil, false, false, dirs.Tmp, getBlockReader(chainConfig, db)) execUntilFunc := func(execToBlock uint64) func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx) error { return func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx) error { @@ -491,7 +491,9 @@ func loopExec(db kv.RwDB, ctx context.Context, unwind uint64) error { from := progress(tx, stages.Execution) to := from + unwind - cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil, false, dirs.Tmp, getBlockReader(chainConfig, db)) + cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil, + /*stateStream=*/ false, + /*badBlockHalt=*/ false, dirs.Tmp, getBlockReader(chainConfig, db)) // set block limit of execute stage sync.MockExecFunc(stages.Execution, func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx) error { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 4a991bd6f60..d876ef330a3 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -399,6 +399,10 @@ var ( Name: "experimental.tevm", Usage: "Enables Transpiled EVM experiment", } + MemoryOverlayFlag = cli.BoolFlag{ + Name: "experimental.overlay", + Usage: "Enables In-Memory Overlay for PoS", + } TxpoolApiAddrFlag = cli.StringFlag{ Name: "txpool.api.addr", Usage: "txpool api network address, for example: 127.0.0.1:9090 (default: use value of --private.api.addr)", @@ -1379,6 +1383,7 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) { func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.Config) { cfg.Sync.UseSnapshots = ctx.GlobalBoolT(SnapshotFlag.Name) cfg.Dirs = nodeConfig.Dirs + cfg.MemoryOverlay = ctx.GlobalBool(MemoryOverlayFlag.Name) cfg.Snapshot.KeepBlocks = ctx.GlobalBool(SnapKeepBlocksFlag.Name) cfg.Snapshot.Produce = !ctx.GlobalBool(SnapStopFlag.Name) if !ctx.GlobalIsSet(DownloaderAddrFlag.Name) { diff --git a/eth/backend.go b/eth/backend.go index 291fc943c92..2be452d24ff 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -396,6 +396,18 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere return block, nil } + inMemoryExecution := func(batch kv.RwTx, header *types.Header, body *types.RawBody) error { + stateSync, err := stages2.NewInMemoryExecution(backend.sentryCtx, backend.log, backend.chainDB, stack.Config().P2P, *config, backend.sentriesClient, tmpdir, backend.notifications, backend.downloaderClient, allSnapshots, nil) + if err != nil { + return err + } + // We start the mining step + if err := stages2.StateStep(ctx, batch, stateSync, blockReader, header, body); err != nil { + return err + } + return nil + } + // Initialize ethbackend ethBackendRPC := privateapi.NewEthBackendServer(ctx, backend, backend.chainDB, backend.notifications.Events, blockReader, chainConfig, backend.sentriesClient.Hd.BeaconRequestList, backend.sentriesClient.Hd.PayloadStatusCh, @@ -478,7 +490,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere headCh = make(chan *types.Block, 1) } - backend.stagedSync, err = stages2.NewStagedSync(backend.sentryCtx, backend.log, backend.chainDB, stack.Config().P2P, *config, backend.sentriesClient, tmpdir, backend.notifications, backend.downloaderClient, allSnapshots, headCh) + backend.stagedSync, err = stages2.NewStagedSync(backend.sentryCtx, backend.log, backend.chainDB, stack.Config().P2P, *config, backend.sentriesClient, tmpdir, backend.notifications, backend.downloaderClient, allSnapshots, headCh, inMemoryExecution) if err != nil { return nil, err } diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 65806d3864a..714f8dc07f4 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -212,6 +212,8 @@ type Config struct { StateStream bool + MemoryOverlay bool + // Enable WatchTheBurn stage EnabledIssuance bool diff --git a/eth/stagedsync/default_stages.go b/eth/stagedsync/default_stages.go index 9abfb7e36d0..3317d056a2e 100644 --- a/eth/stagedsync/default_stages.go +++ b/eth/stagedsync/default_stages.go @@ -228,6 +228,53 @@ func DefaultStages(ctx context.Context, sm prune.Mode, headers HeadersCfg, cumul } } +func StateStages(ctx context.Context, blockHashCfg BlockHashesCfg, senders SendersCfg, exec ExecuteBlockCfg, hashState HashStateCfg, trieCfg TrieCfg) []*Stage { + return []*Stage{ + { + ID: stages.BlockHashes, + Description: "Write block hashes", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + return SpawnBlockHashStage(s, tx, blockHashCfg, ctx) + }, + }, + { + ID: stages.Senders, + Description: "Recover senders from tx signatures", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + return SpawnRecoverSendersStage(senders, s, u, tx, 0, ctx) + }, + }, + { + ID: stages.Execution, + Description: "Execute blocks w/o hash checks", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + return SpawnExecuteBlocksStage(s, u, tx, 0, ctx, exec, firstCycle) + }, + }, + { + ID: stages.HashState, + Description: "Hash the key in the state", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + return SpawnHashStateStage(s, tx, hashState, ctx) + }, + }, + { + ID: stages.IntermediateHashes, + Description: "Generate intermediate hashes and computing state root", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + _, err := SpawnIntermediateHashesStage(s, u, tx, trieCfg, ctx) + return err + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { + return nil + }, + Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { + return nil + }, + }, + } +} + var DefaultForwardOrder = UnwindOrder{ stages.Headers, stages.BlockHashes, diff --git a/eth/stagedsync/stage.go b/eth/stagedsync/stage.go index 5224e3c80c5..d483c776720 100644 --- a/eth/stagedsync/stage.go +++ b/eth/stagedsync/stage.go @@ -9,10 +9,13 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/log/v3" ) +type ExecutePayloadFunc func(batch kv.RwTx, header *types.Header, body *types.RawBody) error + // ExecFunc is the execution function for the stage to move forward. // * state - is the current state of the stage and contains stage data. // * unwinder - if the stage needs to cause unwinding, `unwinder` methods can be used. diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index cb2c34a1a1f..128ade1be52 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -54,6 +54,7 @@ type ExecuteBlockCfg struct { chainConfig *params.ChainConfig engine consensus.Engine vmConfig *vm.Config + badBlockHalt bool tmpdir string stateStream bool accumulator *shards.Accumulator @@ -70,6 +71,7 @@ func StageExecuteBlocksCfg( vmConfig *vm.Config, accumulator *shards.Accumulator, stateStream bool, + badBlockHalt bool, tmpdir string, blockReader services.FullBlockReader, ) ExecuteBlockCfg { @@ -84,6 +86,7 @@ func StageExecuteBlocksCfg( tmpdir: tmpdir, accumulator: accumulator, stateStream: stateStream, + badBlockHalt: badBlockHalt, blockReader: blockReader, } } @@ -280,6 +283,9 @@ Loop: writeCallTraces := nextStagesExpectData || blockNum > cfg.prune.CallTraces.PruneTo(to) if err = executeBlock(block, tx, batch, cfg, *cfg.vmConfig, writeChangeSets, writeReceipts, writeCallTraces, contractHasTEVM, initialCycle); err != nil { log.Warn(fmt.Sprintf("[%s] Execution failed", logPrefix), "block", blockNum, "hash", block.Hash().String(), "err", err) + if cfg.badBlockHalt { + return err + } u.UnwindTo(blockNum-1, block.Hash()) break Loop } diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 09d608ee22b..1fa2f4f9a17 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -48,12 +48,14 @@ type HeadersCfg struct { penalize func(context.Context, []headerdownload.PenaltyItem) batchSize datasize.ByteSize noP2PDiscovery bool + memoryOverlay bool tmpdir string snapshots *snapshotsync.RoSnapshots snapshotDownloader proto_downloader.DownloaderClient blockReader services.FullBlockReader dbEventNotifier snapshotsync.DBEventNotifier + execPayload ExecutePayloadFunc } func StageHeadersCfg( @@ -66,11 +68,13 @@ func StageHeadersCfg( penalize func(context.Context, []headerdownload.PenaltyItem), batchSize datasize.ByteSize, noP2PDiscovery bool, + memoryOverlay bool, snapshots *snapshotsync.RoSnapshots, snapshotDownloader proto_downloader.DownloaderClient, blockReader services.FullBlockReader, tmpdir string, - dbEventNotifier snapshotsync.DBEventNotifier) HeadersCfg { + dbEventNotifier snapshotsync.DBEventNotifier, + execPayload ExecutePayloadFunc) HeadersCfg { return HeadersCfg{ db: db, hd: headerDownload, @@ -86,6 +90,8 @@ func StageHeadersCfg( snapshotDownloader: snapshotDownloader, blockReader: blockReader, dbEventNotifier: dbEventNotifier, + execPayload: execPayload, + memoryOverlay: memoryOverlay, } } @@ -255,7 +261,9 @@ func startHandlingForkChoice( ) error { headerHash := forkChoice.HeadBlockHash log.Debug(fmt.Sprintf("[%s] Handling fork choice", s.LogPrefix()), "headerHash", headerHash) - + if cfg.memoryOverlay { + defer cfg.hd.CleanNextForkState() + } currentHeadHash := rawdb.ReadHeadHeaderHash(tx) if currentHeadHash == headerHash { // no-op log.Debug(fmt.Sprintf("[%s] Fork choice no-op", s.LogPrefix())) @@ -328,6 +336,7 @@ func startHandlingForkChoice( } return err } + if headerHash == canonicalHash { log.Info(fmt.Sprintf("[%s] Fork choice on previously known block", s.LogPrefix())) cfg.hd.BeaconRequestList.Remove(requestId) @@ -350,7 +359,6 @@ func startHandlingForkChoice( } cfg.hd.UpdateTopSeenHeightPoS(headerNumber) - forkingPoint := uint64(0) if headerNumber > 0 { parent, err := headerReader.Header(ctx, tx, header.ParentHash, headerNumber-1) @@ -366,6 +374,15 @@ func startHandlingForkChoice( } } + if cfg.memoryOverlay && headerHash == cfg.hd.GetNextForkHash() { + log.Info("Flushing in-memory state") + if err := cfg.hd.FlushNextForkState(tx); err != nil { + return err + } + cfg.hd.SetPendingPayloadStatus(headerHash) + + return nil + } log.Info(fmt.Sprintf("[%s] Fork choice re-org", s.LogPrefix()), "headerNumber", headerNumber, "forkingPoint", forkingPoint) if requestStatus == engineapi.New { @@ -444,7 +461,6 @@ func handleNewPayload( headerHash := header.Hash() log.Trace(fmt.Sprintf("[%s] Handling new payload", s.LogPrefix()), "height", headerNumber, "hash", headerHash) - cfg.hd.UpdateTopSeenHeightPoS(headerNumber) existingCanonicalHash, err := rawdb.ReadCanonicalHash(tx, headerNumber) @@ -533,7 +549,7 @@ func handleNewPayload( } log.Trace(fmt.Sprintf("[%s] New payload begin verification", s.LogPrefix())) - success, err := verifyAndSaveNewPoSHeader(requestStatus, s, tx, cfg, header, headerInserter) + success, err := verifyAndSaveNewPoSHeader(requestStatus, s, tx, cfg, header, payloadMessage.Body, headerInserter) log.Trace(fmt.Sprintf("[%s] New payload verification ended", s.LogPrefix()), "success", success, "err", err) if err != nil || !success { return err @@ -553,6 +569,7 @@ func verifyAndSaveNewPoSHeader( tx kv.RwTx, cfg HeadersCfg, header *types.Header, + body *types.RawBody, headerInserter *headerdownload.HeaderInserter, ) (success bool, err error) { headerNumber := header.Number.Uint64() @@ -582,6 +599,19 @@ func verifyAndSaveNewPoSHeader( currentHeadHash := rawdb.ReadHeadHeaderHash(tx) if currentHeadHash == header.ParentHash { + if cfg.memoryOverlay && (cfg.hd.GetNextForkHash() == (common.Hash{}) || header.ParentHash == cfg.hd.GetNextForkHash()) { + if err = cfg.hd.ValidatePayload(tx, header, body, cfg.execPayload); err != nil { + cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{Status: remote.EngineStatus_INVALID} + return + } + cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ + Status: remote.EngineStatus_VALID, + LatestValidHash: headerHash, + } + success = true + return + } + // OK, we're on the canonical chain if requestStatus == engineapi.New { cfg.hd.SetPendingPayloadStatus(headerHash) diff --git a/eth/stagedsync/sync.go b/eth/stagedsync/sync.go index 3b64f1023de..5b26d418a39 100644 --- a/eth/stagedsync/sync.go +++ b/eth/stagedsync/sync.go @@ -308,7 +308,9 @@ func printLogs(db kv.RoDB, tx kv.RwTx, timings []Timing) error { } bucketSizes = append(bucketSizes, "FreeList", libcommon.ByteCount(sz)) amountOfFreePagesInDb := sz / 4 // page_id encoded as bigEndian_u32 - bucketSizes = append(bucketSizes, "ReclaimableSpace", libcommon.ByteCount(amountOfFreePagesInDb*db.PageSize())) + if db != nil { + bucketSizes = append(bucketSizes, "ReclaimableSpace", libcommon.ByteCount(amountOfFreePagesInDb*db.PageSize())) + } log.Info("Tables", bucketSizes...) } tx.CollectMetrics() diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index a066171f4c8..db1dc66e233 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -67,6 +67,7 @@ var DefaultFlags = []cli.Flag{ utils.RpcGasCapFlag, utils.StarknetGrpcAddressFlag, utils.TevmFlag, + utils.MemoryOverlayFlag, utils.TxpoolApiAddrFlag, utils.TraceMaxtracesFlag, diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 47457aafcf3..5dc5cbd315e 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -16,6 +16,7 @@ import ( "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/log/v3" "golang.org/x/exp/slices" @@ -894,7 +895,6 @@ func (hi *HeaderInserter) FeedHeaderPoS(db kv.GetPut, header *types.Header, hash if err = rawdb.WriteTd(db, hash, blockHeight, td); err != nil { return fmt.Errorf("[%s] failed to WriteTd: %w", hi.logPrefix, err) } - rawdb.WriteHeader(db, header) hi.highest = blockHeight @@ -1084,6 +1084,42 @@ func (hd *HeaderDownload) SetHeadersCollector(collector *etl.Collector) { hd.headersCollector = collector } +func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, execPayload func(batch kv.RwTx, header *types.Header, body *types.RawBody) error) error { + hd.lock.Lock() + defer hd.lock.Unlock() + if hd.nextForkState == nil { + hd.nextForkState = memdb.NewMemoryBatch(tx) + } else { + hd.nextForkState.UpdateTxn(tx) + } + hd.nextForkHash = header.Hash() + return execPayload(hd.nextForkState, header, body) +} + +func (hd *HeaderDownload) FlushNextForkState(tx kv.RwTx) error { + hd.lock.Lock() + defer hd.lock.Unlock() + if err := hd.nextForkState.Flush(tx); err != nil { + return err + } + hd.nextForkHash = common.Hash{} + hd.nextForkState = nil + return nil +} + +func (hd *HeaderDownload) CleanNextForkState() { + hd.lock.Lock() + defer hd.lock.Unlock() + hd.nextForkHash = common.Hash{} + hd.nextForkState = nil +} + +func (hd *HeaderDownload) GetNextForkHash() common.Hash { + hd.lock.Lock() + defer hd.lock.Unlock() + return hd.nextForkHash +} + func (hd *HeaderDownload) SetPOSSync(posSync bool) { hd.lock.Lock() defer hd.lock.Unlock() diff --git a/turbo/stages/headerdownload/header_data_struct.go b/turbo/stages/headerdownload/header_data_struct.go index 7041122f7aa..4fdf3dc1398 100644 --- a/turbo/stages/headerdownload/header_data_struct.go +++ b/turbo/stages/headerdownload/header_data_struct.go @@ -9,6 +9,7 @@ import ( lru "github.com/hashicorp/golang-lru" "github.com/ledgerwatch/erigon-lib/etl" + "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core/types" @@ -313,6 +314,8 @@ type HeaderDownload struct { unsettledHeadHeight uint64 // Height of unsettledForkChoice.headBlockHash posDownloaderTip common.Hash // See https://hackmd.io/GDc0maGsQeKfP8o2C7L52w badPoSHeaders map[common.Hash]common.Hash // Invalid Tip -> Last Valid Ancestor + nextForkState *memdb.MemoryMutation // The db state of the next fork. + nextForkHash common.Hash // Hash of the next fork } // HeaderRecord encapsulates two forms of the same header - raw RLP encoding (to avoid duplicated decodings and encodings), and parsed value types.Header diff --git a/turbo/stages/mock_sentry.go b/turbo/stages/mock_sentry.go index 2de0eea076b..57731216e8f 100644 --- a/turbo/stages/mock_sentry.go +++ b/turbo/stages/mock_sentry.go @@ -309,7 +309,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey mock.Sync = stagedsync.New( stagedsync.DefaultStages(mock.Ctx, prune, - stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, allSnapshots, snapshotsDownloader, blockReader, mock.tmpdir, mock.Notifications.Events), + stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, false, allSnapshots, snapshotsDownloader, blockReader, mock.tmpdir, mock.Notifications.Events, nil), stagedsync.StageCumulativeIndexCfg(mock.DB), stagedsync.StageBlockHashesCfg(mock.DB, mock.tmpdir, mock.ChainConfig), stagedsync.StageBodiesCfg( @@ -336,6 +336,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey &vm.Config{}, mock.Notifications.Accumulator, cfg.StateStream, + /*stateStream=*/ false, mock.tmpdir, blockReader, ), diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 376ee88dd2b..96bfe6d5903 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -195,7 +195,7 @@ func StageLoopStep( if notifications != nil && notifications.Accumulator != nil { header := rawdb.ReadCurrentHeader(rotx) - if header != nil { + if header != nil && header.Number.Uint64() != finishProgressBefore { pendingBaseFee := misc.CalcBaseFee(notifications.Accumulator.ChainConfig(), header) if header.Number.Uint64() == 0 { @@ -235,6 +235,80 @@ func MiningStep(ctx context.Context, kv kv.RwDB, mining *stagedsync.Sync) (err e return nil } +func StateStep(ctx context.Context, batch kv.RwTx, stateSync *stagedsync.Sync, headerReader services.FullBlockReader, header *types.Header, body *types.RawBody) (err error) { + // Setup + height := header.Number.Uint64() + hash := header.Hash() + + defer func() { + if rec := recover(); rec != nil { + err = fmt.Errorf("%+v, trace: %s", rec, dbg.Stack()) + } + }() // avoid crash because Erigon's core does many things + + // Prepare memory state for block execution + if err = rawdb.WriteRawBodyIfNotExists(batch, hash, height, body); err != nil { + return err + } + + rawdb.WriteHeader(batch, header) + if err = rawdb.WriteHeaderNumber(batch, hash, height); err != nil { + return err + } + + if err = rawdb.WriteCanonicalHash(batch, hash, height); err != nil { + return err + } + + if err := rawdb.WriteHeadHeaderHash(batch, hash); err != nil { + return err + } + + if err = stages.SaveStageProgress(batch, stages.Headers, height); err != nil { + return err + } + + if err = stages.SaveStageProgress(batch, stages.Bodies, height); err != nil { + return err + } + + if height == 0 { + return nil + } + ancestorHash := hash + ancestorHeight := height + + var ch common.Hash + for ch, err = headerReader.CanonicalHash(context.Background(), batch, ancestorHeight); err == nil && ch != ancestorHash; ch, err = headerReader.CanonicalHash(context.Background(), batch, ancestorHeight) { + if err = rawdb.WriteCanonicalHash(batch, ancestorHash, ancestorHeight); err != nil { + return fmt.Errorf("marking canonical header %d %x: %w", ancestorHeight, ancestorHash, err) + } + + ancestor, err := headerReader.Header(context.Background(), batch, ancestorHash, ancestorHeight) + if err != nil { + return err + } + if ancestor == nil { + return fmt.Errorf("ancestor is nil. height %d, hash %x", ancestorHeight, ancestorHash) + } + + select { + default: + } + ancestorHash = ancestor.ParentHash + ancestorHeight-- + } + if err != nil { + return fmt.Errorf("reading canonical hash for %d: %w", ancestorHeight, err) + } + + // Run state sync + if err = stateSync.Run(nil, batch, false); err != nil { + return err + } + return nil +} + func NewStagedSync( ctx context.Context, logger log.Logger, @@ -247,6 +321,7 @@ func NewStagedSync( snapshotDownloader proto_downloader.DownloaderClient, snapshots *snapshotsync.RoSnapshots, headCh chan *types.Block, + execPayload stagedsync.ExecutePayloadFunc, ) (*stagedsync.Sync, error) { var blockReader services.FullBlockReader if cfg.Snapshot.Enabled { @@ -272,11 +347,13 @@ func NewStagedSync( controlServer.Penalize, cfg.BatchSize, p2pCfg.NoDiscovery, + cfg.MemoryOverlay, snapshots, snapshotDownloader, blockReader, tmpdir, - notifications.Events), + notifications.Events, + execPayload), stagedsync.StageCumulativeIndexCfg(db), stagedsync.StageBlockHashesCfg(db, tmpdir, controlServer.ChainConfig), stagedsync.StageBodiesCfg( @@ -303,6 +380,7 @@ func NewStagedSync( &vm.Config{EnableTEMV: cfg.Prune.Experiments.TEVM}, notifications.Accumulator, cfg.StateStream, + /*stateStream=*/ false, tmpdir, blockReader, ), @@ -318,3 +396,48 @@ func NewStagedSync( stagedsync.DefaultPruneOrder, ), nil } + +func NewInMemoryExecution( + ctx context.Context, + logger log.Logger, + db kv.RwDB, + p2pCfg p2p.Config, + cfg ethconfig.Config, + controlServer *sentry.MultiClient, + tmpdir string, + notifications *stagedsync.Notifications, + snapshotDownloader proto_downloader.DownloaderClient, + snapshots *snapshotsync.RoSnapshots, + headCh chan *types.Block, +) (*stagedsync.Sync, error) { + var blockReader services.FullBlockReader + if cfg.Snapshot.Enabled { + blockReader = snapshotsync.NewBlockReaderWithSnapshots(snapshots) + } else { + blockReader = snapshotsync.NewBlockReader() + } + + return stagedsync.New( + stagedsync.StateStages(ctx, + stagedsync.StageBlockHashesCfg(db, tmpdir, controlServer.ChainConfig), + stagedsync.StageSendersCfg(db, controlServer.ChainConfig, tmpdir, cfg.Prune, nil), + stagedsync.StageExecuteBlocksCfg( + db, + cfg.Prune, + cfg.BatchSize, + nil, + controlServer.ChainConfig, + controlServer.Engine, + &vm.Config{EnableTEMV: cfg.Prune.Experiments.TEVM}, + notifications.Accumulator, + cfg.StateStream, + true, + tmpdir, + blockReader, + ), + stagedsync.StageHashStateCfg(db, tmpdir), + stagedsync.StageTrieCfg(db, true, true, tmpdir, blockReader)), + nil, + nil, + ), nil +} From 811eef5a7b0ba3e3b39c8e0eaa495a829295d748 Mon Sep 17 00:00:00 2001 From: fenghaojiang Date: Mon, 20 Jun 2022 15:48:25 +0800 Subject: [PATCH 085/136] add_abigen_error_handle (#4498) * add_abigen_error_handle * add abigen error type test code --- accounts/abi/abi.go | 4 +++ accounts/abi/abi_test.go | 14 +++++++++ accounts/abi/error.go | 67 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 85 insertions(+) diff --git a/accounts/abi/abi.go b/accounts/abi/abi.go index fd7d249c18f..cb3dcc83d56 100644 --- a/accounts/abi/abi.go +++ b/accounts/abi/abi.go @@ -35,6 +35,7 @@ type ABI struct { Constructor Method Methods map[string]Method Events map[string]Event + Errors map[string]Error // Additional "special" functions introduced in solidity v0.6.0. // It's separated from the original default fallback. Each contract @@ -158,6 +159,7 @@ func (abi *ABI) UnmarshalJSON(data []byte) error { } abi.Methods = make(map[string]Method) abi.Events = make(map[string]Event) + abi.Errors = make(map[string]Error) for _, field := range fields { switch field.Type { case "constructor": @@ -185,6 +187,8 @@ func (abi *ABI) UnmarshalJSON(data []byte) error { case "event": name := abi.overloadedEventName(field.Name) abi.Events[name] = NewEvent(name, field.Name, field.Anonymous, field.Inputs) + case "error": + abi.Errors[field.Name] = NewError(field.Name, field.Inputs) default: return fmt.Errorf("abi: could not recognize type %v of field %v", field.Type, field.Name) } diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go index 5557a67ff22..ce1f35c6517 100644 --- a/accounts/abi/abi_test.go +++ b/accounts/abi/abi_test.go @@ -1144,3 +1144,17 @@ func TestUnpackRevert(t *testing.T) { }) } } + +func TestCustomErrors(t *testing.T) { + json := `[{ "inputs": [ { "internalType": "uint256", "name": "", "type": "uint256" } ],"name": "MyError", "type": "error"} ]` + abi, err := JSON(strings.NewReader(json)) + if err != nil { + t.Fatal(err) + } + check := func(name string, expect string) { + if abi.Errors[name].Sig != expect { + t.Fatalf("The signature of overloaded method mismatch, want %s, have %s", expect, abi.Methods[name].Sig) + } + } + check("MyError", "MyError(uint256)") +} diff --git a/accounts/abi/error.go b/accounts/abi/error.go index f0f71b6c916..d26e2984a2e 100644 --- a/accounts/abi/error.go +++ b/accounts/abi/error.go @@ -17,11 +17,78 @@ package abi import ( + "bytes" "errors" "fmt" "reflect" + "strings" + + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/crypto" ) +type Error struct { + Name string + Inputs Arguments + str string + // Sig contains the string signature according to the ABI spec. + // e.g. event foo(uint32 a, int b) = "foo(uint32,int256)" + // Please note that "int" is substitute for its canonical representation "int256" + Sig string + // ID returns the canonical representation of the event's signature used by the + // abi definition to identify event names and types. + ID common.Hash +} + +func NewError(name string, inputs Arguments) Error { + names := make([]string, len(inputs)) + types := make([]string, len(inputs)) + for i, input := range inputs { + if input.Name == "" { + inputs[i] = Argument{ + Name: fmt.Sprintf("arg%d", i), + Indexed: input.Indexed, + Type: input.Type, + } + } else { + inputs[i] = input + } + // string representation + names[i] = fmt.Sprintf("%v %v", input.Type, inputs[i].Name) + if input.Indexed { + names[i] = fmt.Sprintf("%v indexed %v", input.Type, inputs[i].Name) + } + // sig representation + types[i] = input.Type.String() + } + + str := fmt.Sprintf("error %v(%v)", name, strings.Join(names, ", ")) + sig := fmt.Sprintf("%v(%v)", name, strings.Join(types, ",")) + id := common.BytesToHash(crypto.Keccak256([]byte(sig))) + + return Error{ + Name: name, + Inputs: inputs, + str: str, + Sig: sig, + ID: id, + } +} + +func (e *Error) String() string { + return e.str +} + +func (e *Error) Unpack(data []byte) (interface{}, error) { + if len(data) < 4 { + return "", errors.New("invalid data for unpacking") + } + if !bytes.Equal(data[:4], e.ID[:4]) { + return "", errors.New("invalid data for unpacking") + } + return e.Inputs.Unpack(data[4:]) +} + var ( errBadBool = errors.New("abi: improperly encoded boolean value") ) From 820014734f9c6d2e2e75b09f46c4ee97c3d393e5 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Mon, 20 Jun 2022 08:53:01 +0100 Subject: [PATCH 086/136] [BSC] fix for the Euler fork (#4500) Co-authored-by: Alexey Sharp --- core/systemcontracts/upgrade.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/core/systemcontracts/upgrade.go b/core/systemcontracts/upgrade.go index e95b6eb1799..a3c77bb662d 100644 --- a/core/systemcontracts/upgrade.go +++ b/core/systemcontracts/upgrade.go @@ -10,6 +10,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/params/networkname" ) type UpgradeConfig struct { @@ -35,7 +36,6 @@ const ( ) var ( - GenesisHash common.Hash //upgrade config ramanujanUpgrade = make(map[string]*Upgrade) @@ -368,13 +368,12 @@ func UpgradeBuildInSystemContract(config *params.ChainConfig, blockNumber *big.I return } var network string - switch GenesisHash { - /* Add mainnet genesis hash */ - case params.BSCGenesisHash: + switch config.ChainName { + case networkname.BSCChainName: network = mainNet - case params.ChapelGenesisHash: + case networkname.ChapelChainName: network = chapelNet - case params.RialtoGenesisHash: + case networkname.RialtoChainName: network = rialtoNet default: network = defaultNet From 79830adefa2fdf1af617228b96173f1563530a72 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Mon, 20 Jun 2022 09:00:45 +0100 Subject: [PATCH 087/136] [erigon2.2] collecting read indices (#4499) * [erigon2.2] collecting read indices * Fix compile issue * Update to latest erigon-lib Co-authored-by: Alexey Sharp --- cmd/rpcdaemon22/commands/eth_receipts.go | 2 +- cmd/rpcdaemon22/commands/trace_filtering.go | 2 +- cmd/state/commands/history22.go | 83 +++++++++++++++++++-- core/state/HistoryReader22.go | 35 ++++++++- go.mod | 2 +- go.sum | 4 +- 6 files changed, 115 insertions(+), 13 deletions(-) diff --git a/cmd/rpcdaemon22/commands/eth_receipts.go b/cmd/rpcdaemon22/commands/eth_receipts.go index 774f5c5f2be..8813f52051d 100644 --- a/cmd/rpcdaemon22/commands/eth_receipts.go +++ b/cmd/rpcdaemon22/commands/eth_receipts.go @@ -159,7 +159,7 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ var lastHeader *types.Header var lastSigner *types.Signer var lastRules *params.Rules - stateReader := state.NewHistoryReader22(api._agg) + stateReader := state.NewHistoryReader22(api._agg, nil /* ReadIndices */) iter := txNumbers.Iterator() for iter.HasNext() { txNum := iter.Next() diff --git a/cmd/rpcdaemon22/commands/trace_filtering.go b/cmd/rpcdaemon22/commands/trace_filtering.go index a71a890e9af..7dd35347407 100644 --- a/cmd/rpcdaemon22/commands/trace_filtering.go +++ b/cmd/rpcdaemon22/commands/trace_filtering.go @@ -319,7 +319,7 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str var lastHeader *types.Header var lastSigner *types.Signer var lastRules *params.Rules - stateReader := state.NewHistoryReader22(api._agg) + stateReader := state.NewHistoryReader22(api._agg, nil /* ReadIndices */) noop := state.NewNoopWriter() for it.HasNext() { txNum := uint64(it.Next()) diff --git a/cmd/state/commands/history22.go b/cmd/state/commands/history22.go index c9efe1ccf7c..a3fb98bb066 100644 --- a/cmd/state/commands/history22.go +++ b/cmd/state/commands/history22.go @@ -2,6 +2,7 @@ package commands import ( "context" + "errors" "fmt" "os" "os/signal" @@ -10,6 +11,9 @@ import ( "syscall" "time" + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/mdbx" kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/common" @@ -65,13 +69,54 @@ func History22(genesis *core.Genesis, logger log.Logger) error { return err1 } defer historyTx.Rollback() - aggPath := filepath.Join(datadir, "erigon22") - h, err3 := libstate.NewAggregator(aggPath, AggregationStep) - //h, err3 := aggregator.NewHistory(aggPath, uint64(blockTo), aggregationStep) - if err3 != nil { - return fmt.Errorf("create history: %w", err3) + aggPath := filepath.Join(datadir, "erigon23") + h, err := libstate.NewAggregator(aggPath, AggregationStep) + if err != nil { + return fmt.Errorf("create history: %w", err) } defer h.Close() + readDbPath := path.Join(datadir, "readdb") + if block == 0 { + if _, err = os.Stat(readDbPath); err != nil { + if !errors.Is(err, os.ErrNotExist) { + return err + } + } else if err = os.RemoveAll(readDbPath); err != nil { + return err + } + } + db, err := kv2.NewMDBX(logger).Path(readDbPath).WriteMap().Open() + if err != nil { + return err + } + defer db.Close() + readPath := filepath.Join(datadir, "reads") + if block == 0 { + if _, err = os.Stat(readPath); err != nil { + if !errors.Is(err, os.ErrNotExist) { + return err + } + } else if err = os.RemoveAll(readPath); err != nil { + return err + } + if err = os.Mkdir(readPath, os.ModePerm); err != nil { + return err + } + } + ri, err := libstate.NewReadIndices(readPath, AggregationStep) + if err != nil { + return fmt.Errorf("create read indices: %w", err) + } + var rwTx kv.RwTx + defer func() { + if rwTx != nil { + rwTx.Rollback() + } + }() + if rwTx, err = db.BeginRw(ctx); err != nil { + return err + } + ri.SetTx(rwTx) chainConfig := genesis.Config vmConfig := vm.Config{} @@ -125,7 +170,7 @@ func History22(genesis *core.Genesis, logger log.Logger) error { txNum += uint64(len(b.Transactions())) + 2 // Pre and Post block transaction continue } - readWrapper := state.NewHistoryReader22(h) + readWrapper := state.NewHistoryReader22(h, ri) if traceBlock != 0 { readWrapper.SetTrace(blockNum == uint64(traceBlock)) } @@ -148,6 +193,29 @@ func History22(genesis *core.Genesis, logger log.Logger) error { log.Info(fmt.Sprintf("interrupted, please wait for cleanup, next time start with --block %d", blockNum)) default: } + // Commit transaction only when interrupted or just before computing commitment (so it can be re-done) + commit := interrupt + if !commit && (blockNum+1)%uint64(commitmentFrequency) == 0 { + var spaceDirty uint64 + if spaceDirty, _, err = rwTx.(*mdbx.MdbxTx).SpaceDirty(); err != nil { + return fmt.Errorf("retrieving spaceDirty: %w", err) + } + if spaceDirty >= dirtySpaceThreshold { + log.Info("Initiated tx commit", "block", blockNum, "space dirty", libcommon.ByteCount(spaceDirty)) + commit = true + } + } + if commit { + if err = rwTx.Commit(); err != nil { + return err + } + if !interrupt { + if rwTx, err = db.BeginRw(ctx); err != nil { + return err + } + } + ri.SetTx(rwTx) + } } return nil } @@ -177,6 +245,9 @@ func runHistory22(trace bool, blockNum, txNumStart uint64, hw *state.HistoryRead fmt.Printf("tx idx %d, num %d, gas used %d\n", i, txNum, receipt.GasUsed) } receipts = append(receipts, receipt) + if err = hw.FinishTx(); err != nil { + return 0, nil, fmt.Errorf("finish tx %d [%x] failed: %w", i, tx.Hash(), err) + } txNum++ hw.SetTxNum(txNum) } diff --git a/core/state/HistoryReader22.go b/core/state/HistoryReader22.go index e2d6ae826c8..647dfe25886 100644 --- a/core/state/HistoryReader22.go +++ b/core/state/HistoryReader22.go @@ -3,6 +3,7 @@ package state import ( "fmt" + "github.com/ledgerwatch/erigon-lib/kv" libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/types/accounts" @@ -21,17 +22,29 @@ func bytesToUint64(buf []byte) (x uint64) { // Implements StateReader and StateWriter type HistoryReader22 struct { a *libstate.Aggregator + ri *libstate.ReadIndices txNum uint64 trace bool } -func NewHistoryReader22(a *libstate.Aggregator) *HistoryReader22 { - return &HistoryReader22{a: a} +func NewHistoryReader22(a *libstate.Aggregator, ri *libstate.ReadIndices) *HistoryReader22 { + return &HistoryReader22{a: a, ri: ri} +} + +func (hr *HistoryReader22) SetTx(tx kv.RwTx) { + hr.ri.SetTx(tx) } func (hr *HistoryReader22) SetTxNum(txNum uint64) { hr.txNum = txNum hr.a.SetTxNum(txNum) + if hr.ri != nil { + hr.ri.SetTxNum(txNum) + } +} + +func (hr *HistoryReader22) FinishTx() error { + return hr.ri.FinishTx() } func (hr *HistoryReader22) SetTrace(trace bool) { @@ -39,6 +52,11 @@ func (hr *HistoryReader22) SetTrace(trace bool) { } func (hr *HistoryReader22) ReadAccountData(address common.Address) (*accounts.Account, error) { + if hr.ri != nil { + if err := hr.ri.ReadAccountData(address.Bytes()); err != nil { + return nil, err + } + } enc, err := hr.a.ReadAccountDataBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) if err != nil { return nil, err @@ -85,6 +103,11 @@ func (hr *HistoryReader22) ReadAccountData(address common.Address) (*accounts.Ac } func (hr *HistoryReader22) ReadAccountStorage(address common.Address, incarnation uint64, key *common.Hash) ([]byte, error) { + if hr.ri != nil { + if err := hr.ri.ReadAccountStorage(address.Bytes(), key.Bytes()); err != nil { + return nil, err + } + } enc, err := hr.a.ReadAccountStorageBeforeTxNum(address.Bytes(), key.Bytes(), hr.txNum, nil /* roTx */) if err != nil { return nil, err @@ -103,6 +126,9 @@ func (hr *HistoryReader22) ReadAccountStorage(address common.Address, incarnatio } func (hr *HistoryReader22) ReadAccountCode(address common.Address, incarnation uint64, codeHash common.Hash) ([]byte, error) { + if err := hr.ri.ReadAccountCode(address.Bytes()); err != nil { + return nil, err + } enc, err := hr.a.ReadAccountCodeBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) if err != nil { return nil, err @@ -114,6 +140,11 @@ func (hr *HistoryReader22) ReadAccountCode(address common.Address, incarnation u } func (hr *HistoryReader22) ReadAccountCodeSize(address common.Address, incarnation uint64, codeHash common.Hash) (int, error) { + if hr.ri != nil { + if err := hr.ri.ReadAccountCodeSize(address.Bytes()); err != nil { + return 0, err + } + } size, err := hr.a.ReadAccountCodeSizeBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) if err != nil { return 0, err diff --git a/go.mod b/go.mod index e86fb37bb83..9f03f74c565 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220619053529-5574d68a87ee + github.com/ledgerwatch/erigon-lib v0.0.0-20220620073929-46bebb3317d9 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index be76a4a279d..f98c63e1ec9 100644 --- a/go.sum +++ b/go.sum @@ -382,8 +382,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220619053529-5574d68a87ee h1:AoE5ESeSj/KUE/Je9KdNlEYyRA20KNkrV0nNrLmnYOY= -github.com/ledgerwatch/erigon-lib v0.0.0-20220619053529-5574d68a87ee/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= +github.com/ledgerwatch/erigon-lib v0.0.0-20220620073929-46bebb3317d9 h1:FsdxNVS9xgxjeMOeVx5cuvtb5704KSBIeoL5dZqe4N8= +github.com/ledgerwatch/erigon-lib v0.0.0-20220620073929-46bebb3317d9/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From 6612cf42d691ae9d404d02f32485a5a8560684f0 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 20 Jun 2022 14:31:57 +0600 Subject: [PATCH 088/136] "torrent_hashes --verify" to detect "snapshots/tmp" dir (#4501) --- cmd/downloader/downloader/util.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cmd/downloader/downloader/util.go b/cmd/downloader/downloader/util.go index 4056380d83d..9ef784910c9 100644 --- a/cmd/downloader/downloader/util.go +++ b/cmd/downloader/downloader/util.go @@ -24,6 +24,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/cmd/downloader/downloader/torrentcfg" "github.com/ledgerwatch/erigon/cmd/downloader/trackers" + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" "github.com/ledgerwatch/log/v3" "golang.org/x/sync/semaphore" @@ -352,6 +353,11 @@ var ErrSkip = fmt.Errorf("skip") func VerifyDtaFiles(ctx context.Context, snapDir string) error { logEvery := time.NewTicker(5 * time.Second) defer logEvery.Stop() + + tmpSnapDir := filepath.Join(snapDir, "tmp") // snapshots are in sub-dir "tmp", if not fully downloaded + if !common.FileExist(tmpSnapDir) { + snapDir = tmpSnapDir + } files, err := AllTorrentPaths(snapDir) if err != nil { return err From acd69a63ec6e523678b05ef63bb8f27efbde9f6f Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 20 Jun 2022 15:16:56 +0600 Subject: [PATCH 089/136] "-tags debug" to allow pprof show profiling from C code (#4502) --- common/debug/pprof_cgo.go | 8 ++++++++ go.mod | 2 ++ go.sum | 4 ++++ 3 files changed, 14 insertions(+) create mode 100644 common/debug/pprof_cgo.go diff --git a/common/debug/pprof_cgo.go b/common/debug/pprof_cgo.go new file mode 100644 index 00000000000..7a3c219387a --- /dev/null +++ b/common/debug/pprof_cgo.go @@ -0,0 +1,8 @@ +//go:build debug +// +build debug + +package debug + +import ( + _ "github.com/benesch/cgosymbolizer" +) diff --git a/go.mod b/go.mod index 9f03f74c565..dd9b5149c3c 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ require ( github.com/anacrolix/go-libutp v1.2.0 github.com/anacrolix/log v0.13.1 github.com/anacrolix/torrent v1.44.0 + github.com/benesch/cgosymbolizer v0.0.0-20190515212042-bec6fe6e597b github.com/btcsuite/btcd v0.22.0-beta github.com/c2h5oh/datasize v0.0.0-20200825124411-48ed595a09d2 github.com/consensys/gnark-crypto v0.4.0 @@ -103,6 +104,7 @@ require ( github.com/golang/protobuf v1.5.2 // indirect github.com/google/uuid v1.3.0 // indirect github.com/huandu/xstrings v1.3.2 // indirect + github.com/ianlancetaylor/cgosymbolizer v0.0.0-20220405231054-a1ae3e4bba26 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kr/pretty v0.3.0 // indirect diff --git a/go.sum b/go.sum index f98c63e1ec9..6bb49e41719 100644 --- a/go.sum +++ b/go.sum @@ -100,6 +100,8 @@ github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xW github.com/benbjohnson/immutable v0.2.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= github.com/benbjohnson/immutable v0.3.0 h1:TVRhuZx2wG9SZ0LRdqlbs9S5BZ6Y24hJEHTCgWHZEIw= github.com/benbjohnson/immutable v0.3.0/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= +github.com/benesch/cgosymbolizer v0.0.0-20190515212042-bec6fe6e597b h1:5JgaFtHFRnOPReItxvhMDXbvuBkjSWE+9glJyF466yw= +github.com/benesch/cgosymbolizer v0.0.0-20190515212042-bec6fe6e597b/go.mod h1:eMD2XUcPsHYbakFEocKrWZp47G0MRJYoC60qFblGjpA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -337,6 +339,8 @@ github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmK github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/ianlancetaylor/cgosymbolizer v0.0.0-20220405231054-a1ae3e4bba26 h1:UT3hQ6+5hwqUT83cKhKlY5I0W/kqsl6lpn3iFb3Gtqs= +github.com/ianlancetaylor/cgosymbolizer v0.0.0-20220405231054-a1ae3e4bba26/go.mod h1:DvXTE/K/RtHehxU8/GtDs4vFtfw64jJ3PaCnFri8CRg= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= From 834f0f4deadd179f612e9e3e4e2441685323a9f5 Mon Sep 17 00:00:00 2001 From: dylanhuang Date: Mon, 20 Jun 2022 19:45:24 +0800 Subject: [PATCH 090/136] add eulerBlock in chapel.json (#4503) --- params/chainspecs/chapel.json | 1 + 1 file changed, 1 insertion(+) diff --git a/params/chainspecs/chapel.json b/params/chainspecs/chapel.json index ee755b44a17..0a1e8fbcf94 100644 --- a/params/chainspecs/chapel.json +++ b/params/chainspecs/chapel.json @@ -15,6 +15,7 @@ "nielsBlock": 1014369, "mirrorSyncBlock": 5582500, "brunoBlock": 13837000, + "eulerBlock": 19203503, "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "parlia": { "DBPath": "", From f38ab485e0111e0e13f436c91cf86931c1affb09 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Mon, 20 Jun 2022 16:51:19 +0100 Subject: [PATCH 091/136] Update chainConfig in the database (#4505) * Update genesis.go * Remove the test * Remove unnecessary code Co-authored-by: Alex Sharp --- core/genesis.go | 6 ------ params/config.go | 4 ---- turbo/stages/genesis_test.go | 9 --------- 3 files changed, 19 deletions(-) diff --git a/core/genesis.go b/core/genesis.go index 89a3ca80e18..0d9de90a61f 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -271,12 +271,6 @@ func WriteGenesisBlock(db kv.RwTx, genesis *Genesis, overrideMergeNetsplitBlock, } return newCfg, storedBlock, nil } - // Special case: don't change the existing config of a non-mainnet chain if no new - // config is supplied. These chains would get AllProtocolChanges (and a compatibility error) - // if we just continued here. - if genesis == nil && storedHash != params.MainnetGenesisHash && overrideMergeNetsplitBlock == nil && overrideTerminalTotalDifficulty == nil { - return storedCfg, storedBlock, nil - } // Check config compatibility and write the config. Compatibility errors // are returned to the caller unless we're already at block zero. height := rawdb.ReadHeaderNumber(db, rawdb.ReadHeadHeaderHash(db)) diff --git a/params/config.go b/params/config.go index 6446c63eb19..14d7b65d3e0 100644 --- a/params/config.go +++ b/params/config.go @@ -599,7 +599,6 @@ func (c *ChainConfig) CheckConfigForkOrder() error { {name: "petersburgBlock", block: c.PetersburgBlock}, {name: "istanbulBlock", block: c.IstanbulBlock}, {name: "muirGlacierBlock", block: c.MuirGlacierBlock, optional: true}, - {name: "eulerBlock", block: c.EulerBlock, optional: true}, {name: "berlinBlock", block: c.BerlinBlock}, {name: "londonBlock", block: c.LondonBlock}, {name: "arrowGlacierBlock", block: c.ArrowGlacierBlock, optional: true}, @@ -676,9 +675,6 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head uint64) *ConfigC if isForkIncompatible(c.GrayGlacierBlock, newcfg.GrayGlacierBlock, head) { return newCompatError("Gray Glacier fork block", c.GrayGlacierBlock, newcfg.GrayGlacierBlock) } - if isForkIncompatible(c.EulerBlock, newcfg.EulerBlock, head) { - return newCompatError("Euler fork block", c.EulerBlock, newcfg.EulerBlock) - } return nil } diff --git a/turbo/stages/genesis_test.go b/turbo/stages/genesis_test.go index 5c6877b6549..a4ba81675a6 100644 --- a/turbo/stages/genesis_test.go +++ b/turbo/stages/genesis_test.go @@ -166,15 +166,6 @@ func TestSetupGenesis(t *testing.T) { wantHash: params.MainnetGenesisHash, wantConfig: params.MainnetChainConfig, }, - { - name: "custom block in DB, genesis == nil", - fn: func(db kv.RwDB) (*params.ChainConfig, *types.Block, error) { - customg.MustCommit(db) - return core.CommitGenesisBlock(db, nil) - }, - wantHash: customghash, - wantConfig: customg.Config, - }, { name: "custom block in DB, genesis == ropsten", fn: func(db kv.RwDB) (*params.ChainConfig, *types.Block, error) { From daa84922d61e3e40692c4e89c2bc6017dcb8cb0f Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Mon, 20 Jun 2022 23:03:17 +0200 Subject: [PATCH 092/136] Commit transaction before responding on Engine API (#4506) * Remove an unused interrupt value * Remove sendErrResponse from safeAndFinalizedBlocksAreCanonical * Clean up err handling * startHandlingForkChoice returns response * handleNewPayload returns response * Commit transaction before responding on Engine API * small fix * Reply with SYNCING before long unwind * schedulePoSDownload doesn't require requestStatus anymore --- eth/stagedsync/stage_headers.go | 313 ++++++++++++++------------------ turbo/engineapi/request_list.go | 3 +- 2 files changed, 135 insertions(+), 181 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 1fa2f4f9a17..9adcf1ee321 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -185,19 +185,31 @@ func HeadersPOS( cfg.hd.ClearPendingPayloadStatus() + var response *privateapi.PayloadStatus + var err error if forkChoiceInsteadOfNewPayload { - if err := startHandlingForkChoice(forkChoiceMessage, status, requestId, s, u, ctx, tx, cfg, headerInserter, cfg.blockReader); err != nil { - return err - } + response, err = startHandlingForkChoice(forkChoiceMessage, status, requestId, s, u, ctx, tx, cfg, headerInserter, cfg.blockReader) } else { - if err := handleNewPayload(payloadMessage, status, requestId, s, ctx, tx, cfg, headerInserter); err != nil { - return err + response, err = handleNewPayload(payloadMessage, status, requestId, s, ctx, tx, cfg, headerInserter) + } + + if err != nil { + if status == engineapi.New { + cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} } + return err } if !useExternalTx { - return tx.Commit() + if err = tx.Commit(); err != nil { + return err + } } + + if response != nil && status == engineapi.New { + cfg.hd.PayloadStatusCh <- *response + } + return nil } @@ -206,7 +218,6 @@ func safeAndFinalizedBlocksAreCanonical( s *StageState, tx kv.RwTx, cfg HeadersCfg, - sendErrResponse bool, ) (bool, error) { if forkChoice.SafeBlockHash != (common.Hash{}) { safeIsCanonical, err := rawdb.IsCanonicalHash(tx, forkChoice.SafeBlockHash) @@ -217,11 +228,6 @@ func safeAndFinalizedBlocksAreCanonical( rawdb.WriteForkchoiceSafe(tx, forkChoice.SafeBlockHash) } else { log.Warn(fmt.Sprintf("[%s] Non-canonical SafeBlockHash", s.LogPrefix()), "forkChoice", forkChoice) - if sendErrResponse { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ - CriticalError: &privateapi.InvalidForkchoiceStateErr, - } - } return false, nil } } @@ -235,11 +241,6 @@ func safeAndFinalizedBlocksAreCanonical( rawdb.WriteForkchoiceFinalized(tx, forkChoice.FinalizedBlockHash) } else { log.Warn(fmt.Sprintf("[%s] Non-canonical FinalizedBlockHash", s.LogPrefix()), "forkChoice", forkChoice) - if sendErrResponse { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ - CriticalError: &privateapi.InvalidForkchoiceStateErr, - } - } return false, nil } } @@ -258,7 +259,7 @@ func startHandlingForkChoice( cfg HeadersCfg, headerInserter *headerdownload.HeaderInserter, headerReader services.HeaderReader, -) error { +) (*privateapi.PayloadStatus, error) { headerHash := forkChoice.HeadBlockHash log.Debug(fmt.Sprintf("[%s] Handling fork choice", s.LogPrefix()), "headerHash", headerHash) if cfg.memoryOverlay { @@ -269,58 +270,47 @@ func startHandlingForkChoice( log.Debug(fmt.Sprintf("[%s] Fork choice no-op", s.LogPrefix())) cfg.hd.BeaconRequestList.Remove(requestId) rawdb.WriteForkchoiceHead(tx, forkChoice.HeadBlockHash) - canonical, err := safeAndFinalizedBlocksAreCanonical(forkChoice, s, tx, cfg, requestStatus == engineapi.New) + canonical, err := safeAndFinalizedBlocksAreCanonical(forkChoice, s, tx, cfg) if err != nil { log.Warn(fmt.Sprintf("[%s] Fork choice err", s.LogPrefix()), "err", err) - if requestStatus == engineapi.New { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} - } - return err + return nil, err } - if canonical && requestStatus == engineapi.New { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ + if canonical { + return &privateapi.PayloadStatus{ Status: remote.EngineStatus_VALID, LatestValidHash: currentHeadHash, - } + }, nil + } else { + return &privateapi.PayloadStatus{ + CriticalError: &privateapi.InvalidForkchoiceStateErr, + }, nil } - return nil } bad, lastValidHash := cfg.hd.IsBadHeaderPoS(headerHash) if bad { log.Warn(fmt.Sprintf("[%s] Fork choice bad head block", s.LogPrefix()), "headerHash", headerHash) cfg.hd.BeaconRequestList.Remove(requestId) - if requestStatus == engineapi.New { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ - Status: remote.EngineStatus_INVALID, - LatestValidHash: lastValidHash, - } - } else { - cfg.hd.ReportBadHeaderPoS(headerHash, lastValidHash) - } - return nil + return &privateapi.PayloadStatus{ + Status: remote.EngineStatus_INVALID, + LatestValidHash: lastValidHash, + }, nil } // Header itself may already be in the snapshots, if CL starts off at much earlier state than Erigon header, err := headerReader.HeaderByHash(ctx, tx, headerHash) - if err != nil { - return err - } if err != nil { log.Warn(fmt.Sprintf("[%s] Fork choice err (reading header by hash %x)", s.LogPrefix(), headerHash), "err", err) cfg.hd.BeaconRequestList.Remove(requestId) - if requestStatus == engineapi.New { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} - } - return err + return nil, err } if header == nil { log.Info(fmt.Sprintf("[%s] Fork choice missing header with hash %x", s.LogPrefix(), headerHash)) hashToDownload := headerHash cfg.hd.SetPoSDownloaderTip(headerHash) - schedulePoSDownload(requestStatus, requestId, hashToDownload, 0 /* header height is unknown, setting to 0 */, s, cfg) - return nil + schedulePoSDownload(requestId, hashToDownload, 0 /* header height is unknown, setting to 0 */, s, cfg) + return &privateapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } cfg.hd.BeaconRequestList.Remove(requestId) @@ -331,31 +321,28 @@ func startHandlingForkChoice( if err != nil { log.Warn(fmt.Sprintf("[%s] Fork choice err (reading canonical hash of %d)", s.LogPrefix(), headerNumber), "err", err) cfg.hd.BeaconRequestList.Remove(requestId) - if requestStatus == engineapi.New { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} - } - return err + return nil, err } if headerHash == canonicalHash { log.Info(fmt.Sprintf("[%s] Fork choice on previously known block", s.LogPrefix())) cfg.hd.BeaconRequestList.Remove(requestId) rawdb.WriteForkchoiceHead(tx, forkChoice.HeadBlockHash) - canonical, err := safeAndFinalizedBlocksAreCanonical(forkChoice, s, tx, cfg, requestStatus == engineapi.New) + canonical, err := safeAndFinalizedBlocksAreCanonical(forkChoice, s, tx, cfg) if err != nil { log.Warn(fmt.Sprintf("[%s] Fork choice err", s.LogPrefix()), "err", err) - if requestStatus == engineapi.New { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} - } - return err + return nil, err } - if canonical && requestStatus == engineapi.New { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ + if canonical { + return &privateapi.PayloadStatus{ Status: remote.EngineStatus_VALID, LatestValidHash: headerHash, - } + }, nil + } else { + return &privateapi.PayloadStatus{ + CriticalError: &privateapi.InvalidForkchoiceStateErr, + }, nil } - return nil } cfg.hd.UpdateTopSeenHeightPoS(headerNumber) @@ -363,26 +350,23 @@ func startHandlingForkChoice( if headerNumber > 0 { parent, err := headerReader.Header(ctx, tx, header.ParentHash, headerNumber-1) if err != nil { - return err + return nil, err } forkingPoint, err = headerInserter.ForkingPoint(tx, header, parent) if err != nil { - if requestStatus == engineapi.New { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} - } - return err + return nil, err } } if cfg.memoryOverlay && headerHash == cfg.hd.GetNextForkHash() { log.Info("Flushing in-memory state") if err := cfg.hd.FlushNextForkState(tx); err != nil { - return err + return nil, err } cfg.hd.SetPendingPayloadStatus(headerHash) - - return nil + return nil, nil } + log.Info(fmt.Sprintf("[%s] Fork choice re-org", s.LogPrefix()), "headerNumber", headerNumber, "forkingPoint", forkingPoint) if requestStatus == engineapi.New { @@ -398,7 +382,7 @@ func startHandlingForkChoice( cfg.hd.SetUnsettledForkChoice(forkChoice, headerNumber) - return nil + return nil, nil } func finishHandlingForkChoice( @@ -423,14 +407,10 @@ func finishHandlingForkChoice( } rawdb.WriteForkchoiceHead(tx, forkChoice.HeadBlockHash) - sendErrResponse := cfg.hd.GetPendingPayloadStatus() != (common.Hash{}) - canonical, err := safeAndFinalizedBlocksAreCanonical(forkChoice, s, tx, cfg, sendErrResponse) + canonical, err := safeAndFinalizedBlocksAreCanonical(forkChoice, s, tx, cfg) if err != nil { return err } - if !canonical { - cfg.hd.ClearPendingPayloadStatus() - } if err := s.Update(tx, headHeight); err != nil { return err @@ -442,6 +422,15 @@ func finishHandlingForkChoice( } } + if !canonical { + if cfg.hd.GetPendingPayloadStatus() != (common.Hash{}) { + cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ + CriticalError: &privateapi.InvalidForkchoiceStateErr, + } + } + cfg.hd.ClearPendingPayloadStatus() + } + cfg.hd.ClearUnsettledForkChoice() return nil } @@ -455,7 +444,7 @@ func handleNewPayload( tx kv.RwTx, cfg HeadersCfg, headerInserter *headerdownload.HeaderInserter, -) error { +) (*privateapi.PayloadStatus, error) { header := payloadMessage.Header headerNumber := header.Number.Uint64() headerHash := header.Hash() @@ -467,22 +456,16 @@ func handleNewPayload( if err != nil { log.Warn(fmt.Sprintf("[%s] New payload err", s.LogPrefix()), "err", err) cfg.hd.BeaconRequestList.Remove(requestId) - if requestStatus == engineapi.New { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} - } - return err + return nil, err } if existingCanonicalHash != (common.Hash{}) && headerHash == existingCanonicalHash { log.Info(fmt.Sprintf("[%s] New payload: previously received valid header %d", s.LogPrefix(), headerNumber)) cfg.hd.BeaconRequestList.Remove(requestId) - if requestStatus == engineapi.New { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ - Status: remote.EngineStatus_VALID, - LatestValidHash: headerHash, - } - } - return nil + return &privateapi.PayloadStatus{ + Status: remote.EngineStatus_VALID, + LatestValidHash: headerHash, + }, nil } bad, lastValidHash := cfg.hd.IsBadHeaderPoS(headerHash) @@ -492,67 +475,55 @@ func handleNewPayload( if bad { log.Info(fmt.Sprintf("[%s] Previously known bad block", s.LogPrefix()), "height", headerNumber, "hash", headerHash) cfg.hd.BeaconRequestList.Remove(requestId) - if requestStatus == engineapi.New { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ - Status: remote.EngineStatus_INVALID, - LatestValidHash: lastValidHash, - } - } else { - cfg.hd.ReportBadHeaderPoS(headerHash, lastValidHash) - } - return nil + cfg.hd.ReportBadHeaderPoS(headerHash, lastValidHash) + return &privateapi.PayloadStatus{ + Status: remote.EngineStatus_INVALID, + LatestValidHash: lastValidHash, + }, nil } parent, err := cfg.blockReader.Header(ctx, tx, header.ParentHash, headerNumber-1) if err != nil { - return err + return nil, err } if parent == nil { log.Info(fmt.Sprintf("[%s] New payload missing parent", s.LogPrefix())) hashToDownload := header.ParentHash heightToDownload := headerNumber - 1 cfg.hd.SetPoSDownloaderTip(headerHash) - schedulePoSDownload(requestStatus, requestId, hashToDownload, heightToDownload, s, cfg) - return nil + schedulePoSDownload(requestId, hashToDownload, heightToDownload, s, cfg) + return &privateapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } cfg.hd.BeaconRequestList.Remove(requestId) for _, tx := range payloadMessage.Body.Transactions { if types.TypedTransactionMarshalledAsRlpString(tx) { - if requestStatus == engineapi.New { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ - Status: remote.EngineStatus_INVALID, - LatestValidHash: header.ParentHash, - ValidationError: errors.New("typed txn marshalled as RLP string"), - } - } else { - cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) - } - return nil + cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) + return &privateapi.PayloadStatus{ + Status: remote.EngineStatus_INVALID, + LatestValidHash: header.ParentHash, + ValidationError: errors.New("typed txn marshalled as RLP string"), + }, nil } } transactions, err := types.DecodeTransactions(payloadMessage.Body.Transactions) if err != nil { log.Warn("Error during Beacon transaction decoding", "err", err.Error()) - if requestStatus == engineapi.New { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ - Status: remote.EngineStatus_INVALID, - LatestValidHash: header.ParentHash, - ValidationError: err, - } - } else { - cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) - } - return nil + cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) + return &privateapi.PayloadStatus{ + Status: remote.EngineStatus_INVALID, + LatestValidHash: header.ParentHash, + ValidationError: err, + }, nil } log.Trace(fmt.Sprintf("[%s] New payload begin verification", s.LogPrefix())) - success, err := verifyAndSaveNewPoSHeader(requestStatus, s, tx, cfg, header, payloadMessage.Body, headerInserter) + response, success, err := verifyAndSaveNewPoSHeader(requestStatus, s, tx, cfg, header, payloadMessage.Body, headerInserter) log.Trace(fmt.Sprintf("[%s] New payload verification ended", s.LogPrefix()), "success", success, "err", err) if err != nil || !success { - return err + return response, err } if cfg.bodyDownload != nil { @@ -560,7 +531,7 @@ func handleNewPayload( cfg.bodyDownload.AddToPrefetch(block) } - return nil + return response, nil } func verifyAndSaveNewPoSHeader( @@ -571,95 +542,79 @@ func verifyAndSaveNewPoSHeader( header *types.Header, body *types.RawBody, headerInserter *headerdownload.HeaderInserter, -) (success bool, err error) { +) (response *privateapi.PayloadStatus, success bool, err error) { headerNumber := header.Number.Uint64() headerHash := header.Hash() if verificationErr := cfg.hd.VerifyHeader(header); verificationErr != nil { log.Warn("Verification failed for header", "hash", headerHash, "height", headerNumber, "err", verificationErr) - if requestStatus == engineapi.New { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ - Status: remote.EngineStatus_INVALID, - LatestValidHash: header.ParentHash, - ValidationError: verificationErr, - } - } else { - cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) - } - return + cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) + return &privateapi.PayloadStatus{ + Status: remote.EngineStatus_INVALID, + LatestValidHash: header.ParentHash, + ValidationError: verificationErr, + }, false, nil } err = headerInserter.FeedHeaderPoS(tx, header, headerHash) if err != nil { - if requestStatus == engineapi.New { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} - } - return + return nil, false, err } currentHeadHash := rawdb.ReadHeadHeaderHash(tx) - if currentHeadHash == header.ParentHash { - if cfg.memoryOverlay && (cfg.hd.GetNextForkHash() == (common.Hash{}) || header.ParentHash == cfg.hd.GetNextForkHash()) { - if err = cfg.hd.ValidatePayload(tx, header, body, cfg.execPayload); err != nil { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{Status: remote.EngineStatus_INVALID} - return - } - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ - Status: remote.EngineStatus_VALID, - LatestValidHash: headerHash, - } - success = true - return - } + if currentHeadHash != header.ParentHash { + // Side chain or something weird + // TODO(yperbasis): considered non-canonical because some missing headers were downloaded but not canonized + // Or it's not a problem because forkChoice is updated frequently? - // OK, we're on the canonical chain - if requestStatus == engineapi.New { - cfg.hd.SetPendingPayloadStatus(headerHash) + // No canonization, HeadHeaderHash & StageProgress are not updated + return &privateapi.PayloadStatus{Status: remote.EngineStatus_ACCEPTED}, true, nil + } + + if cfg.memoryOverlay && (cfg.hd.GetNextForkHash() == (common.Hash{}) || header.ParentHash == cfg.hd.GetNextForkHash()) { + if err = cfg.hd.ValidatePayload(tx, header, body, cfg.execPayload); err != nil { + return &privateapi.PayloadStatus{Status: remote.EngineStatus_INVALID}, false, nil } + return &privateapi.PayloadStatus{ + Status: remote.EngineStatus_VALID, + LatestValidHash: headerHash, + }, true, nil + } - logEvery := time.NewTicker(logInterval) - defer logEvery.Stop() + // OK, we're on the canonical chain + if requestStatus == engineapi.New { + cfg.hd.SetPendingPayloadStatus(headerHash) + } - // Extend canonical chain by the new header - err = fixCanonicalChain(s.LogPrefix(), logEvery, headerInserter.GetHighest(), headerInserter.GetHighestHash(), tx, cfg.blockReader) - if err != nil { - return - } + logEvery := time.NewTicker(logInterval) + defer logEvery.Stop() - err = rawdb.WriteHeadHeaderHash(tx, headerHash) - if err != nil { - return - } + // Extend canonical chain by the new header + err = fixCanonicalChain(s.LogPrefix(), logEvery, headerInserter.GetHighest(), headerInserter.GetHighestHash(), tx, cfg.blockReader) + if err != nil { + return nil, false, err + } - err = s.Update(tx, headerNumber) - if err != nil { - return - } - } else { - // Side chain or something weird - // TODO(yperbasis): considered non-canonical because some missing headers were downloaded but not canonized - // Or it's not a problem because forkChoice is updated frequently? - if requestStatus == engineapi.New { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{Status: remote.EngineStatus_ACCEPTED} - } - // No canonization, HeadHeaderHash & StageProgress are not updated + err = rawdb.WriteHeadHeaderHash(tx, headerHash) + if err != nil { + return nil, false, err } - success = true - return + err = s.Update(tx, headerNumber) + if err != nil { + return nil, false, err + } + + return nil, true, nil } func schedulePoSDownload( - requestStatus engineapi.RequestStatus, requestId int, hashToDownload common.Hash, heightToDownload uint64, s *StageState, cfg HeadersCfg, ) { - if requestStatus == engineapi.New { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{Status: remote.EngineStatus_SYNCING} - } cfg.hd.BeaconRequestList.SetStatus(requestId, engineapi.DataWasMissing) if cfg.hd.PosStatus() != headerdownload.Idle { diff --git a/turbo/engineapi/request_list.go b/turbo/engineapi/request_list.go index 9e36560e1e7..e66084c5fc9 100644 --- a/turbo/engineapi/request_list.go +++ b/turbo/engineapi/request_list.go @@ -38,8 +38,7 @@ type RequestWithStatus struct { type Interrupt int const ( // Interrupt values - None = iota - Yield // e.g. yield RW transaction to block building + None = iota Synced Stopping ) From a11dadd01266122464306beb0e26796e0ccef0bd Mon Sep 17 00:00:00 2001 From: Luke Montgomery <63685748+LMonty-1@users.noreply.github.com> Date: Tue, 21 Jun 2022 00:58:55 -0700 Subject: [PATCH 093/136] fixed typo in db_faq.md (#4509) --- docs/programmers_guide/db_faq.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/programmers_guide/db_faq.md b/docs/programmers_guide/db_faq.md index 8a9d79fce4e..e411996b049 100644 --- a/docs/programmers_guide/db_faq.md +++ b/docs/programmers_guide/db_faq.md @@ -33,7 +33,7 @@ and [mdbx.h](https://github.com/torquem-ch/libmdbx/blob/master/mdbx.h) ### How RAM used -Erigon will use all available RAM, but this RAM will not belong to Eroigon’s process. OS will own all this +Erigon will use all available RAM, but this RAM will not belong to Erigon’s process. OS will own all this memory. And OS will maintain hot part of DB in RAM. If OS will need RAM for other programs or for second Erigon instance OS will manage all the work. This called PageCache. Erigon itself using under 2Gb. So, Erigon will benefit from more RAM and will use all RAM without re-configuration. Same PageCache can be used by other processes if they run on same From 18e5bf3bbff69ee1c7c36b43002c561e03639ff6 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Tue, 21 Jun 2022 10:17:54 +0200 Subject: [PATCH 094/136] Rework PR 4505 (#4511) * Revert "Update chainConfig in the database (#4505)" This reverts commit f38ab485e0111e0e13f436c91cf86931c1affb09. * Only preserve config of unknown chains --- core/genesis.go | 5 +++++ params/config.go | 4 ++++ turbo/stages/genesis_test.go | 9 +++++++++ 3 files changed, 18 insertions(+) diff --git a/core/genesis.go b/core/genesis.go index 0d9de90a61f..ee16a2d26be 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -271,6 +271,11 @@ func WriteGenesisBlock(db kv.RwTx, genesis *Genesis, overrideMergeNetsplitBlock, } return newCfg, storedBlock, nil } + // Special case: don't change the existing config of an unknown chain if no new + // config is supplied. This is useful, for example, to preserve DB config created by erigon init. + if genesis == nil && params.ChainConfigByGenesisHash(storedHash) == nil && overrideMergeNetsplitBlock == nil && overrideTerminalTotalDifficulty == nil { + return storedCfg, storedBlock, nil + } // Check config compatibility and write the config. Compatibility errors // are returned to the caller unless we're already at block zero. height := rawdb.ReadHeaderNumber(db, rawdb.ReadHeadHeaderHash(db)) diff --git a/params/config.go b/params/config.go index 14d7b65d3e0..6446c63eb19 100644 --- a/params/config.go +++ b/params/config.go @@ -599,6 +599,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error { {name: "petersburgBlock", block: c.PetersburgBlock}, {name: "istanbulBlock", block: c.IstanbulBlock}, {name: "muirGlacierBlock", block: c.MuirGlacierBlock, optional: true}, + {name: "eulerBlock", block: c.EulerBlock, optional: true}, {name: "berlinBlock", block: c.BerlinBlock}, {name: "londonBlock", block: c.LondonBlock}, {name: "arrowGlacierBlock", block: c.ArrowGlacierBlock, optional: true}, @@ -675,6 +676,9 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head uint64) *ConfigC if isForkIncompatible(c.GrayGlacierBlock, newcfg.GrayGlacierBlock, head) { return newCompatError("Gray Glacier fork block", c.GrayGlacierBlock, newcfg.GrayGlacierBlock) } + if isForkIncompatible(c.EulerBlock, newcfg.EulerBlock, head) { + return newCompatError("Euler fork block", c.EulerBlock, newcfg.EulerBlock) + } return nil } diff --git a/turbo/stages/genesis_test.go b/turbo/stages/genesis_test.go index a4ba81675a6..5c6877b6549 100644 --- a/turbo/stages/genesis_test.go +++ b/turbo/stages/genesis_test.go @@ -166,6 +166,15 @@ func TestSetupGenesis(t *testing.T) { wantHash: params.MainnetGenesisHash, wantConfig: params.MainnetChainConfig, }, + { + name: "custom block in DB, genesis == nil", + fn: func(db kv.RwDB) (*params.ChainConfig, *types.Block, error) { + customg.MustCommit(db) + return core.CommitGenesisBlock(db, nil) + }, + wantHash: customghash, + wantConfig: customg.Config, + }, { name: "custom block in DB, genesis == ropsten", fn: func(db kv.RwDB) (*params.ChainConfig, *types.Block, error) { From 0a527feddbeeab99027cd61d974d19bf5ed3f242 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Wed, 22 Jun 2022 14:54:23 +0200 Subject: [PATCH 095/136] Send Engine API response after tx.Commit() even when useExternalTx (#4516) * pendingPayloadStatus -> pendingPayloadHash * Ensure that Engine API response is sent after tx.Commit() even when useExternalTx * ProcessEngineApiResponse -> SendEngineApiResponse * PayloadResponse -> PayloadStatus --- eth/stagedsync/stage_headers.go | 27 ++++++----- turbo/stages/headerdownload/header_algos.go | 25 +++++++--- .../headerdownload/header_data_struct.go | 3 +- turbo/stages/mock_sentry.go | 4 ++ turbo/stages/sentry_mock_test.go | 45 +++++++++++------- turbo/stages/stageloop.go | 47 +++++++++++-------- 6 files changed, 94 insertions(+), 57 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 9adcf1ee321..8dbf64d82fe 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -174,7 +174,7 @@ func HeadersPOS( } request := requestWithStatus.Message - status := requestWithStatus.Status + requestStatus := requestWithStatus.Status // Decide what kind of action we need to take place var payloadMessage *engineapi.PayloadMessage @@ -183,18 +183,19 @@ func HeadersPOS( payloadMessage = request.(*engineapi.PayloadMessage) } - cfg.hd.ClearPendingPayloadStatus() + cfg.hd.ClearPendingPayloadHash() + cfg.hd.SetPendingPayloadStatus(nil) - var response *privateapi.PayloadStatus + var payloadStatus *privateapi.PayloadStatus var err error if forkChoiceInsteadOfNewPayload { - response, err = startHandlingForkChoice(forkChoiceMessage, status, requestId, s, u, ctx, tx, cfg, headerInserter, cfg.blockReader) + payloadStatus, err = startHandlingForkChoice(forkChoiceMessage, requestStatus, requestId, s, u, ctx, tx, cfg, headerInserter, cfg.blockReader) } else { - response, err = handleNewPayload(payloadMessage, status, requestId, s, ctx, tx, cfg, headerInserter) + payloadStatus, err = handleNewPayload(payloadMessage, requestStatus, requestId, s, ctx, tx, cfg, headerInserter) } if err != nil { - if status == engineapi.New { + if requestStatus == engineapi.New { cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} } return err @@ -206,8 +207,8 @@ func HeadersPOS( } } - if response != nil && status == engineapi.New { - cfg.hd.PayloadStatusCh <- *response + if requestStatus == engineapi.New { + cfg.hd.SetPendingPayloadStatus(payloadStatus) } return nil @@ -363,7 +364,7 @@ func startHandlingForkChoice( if err := cfg.hd.FlushNextForkState(tx); err != nil { return nil, err } - cfg.hd.SetPendingPayloadStatus(headerHash) + cfg.hd.SetPendingPayloadHash(headerHash) return nil, nil } @@ -372,7 +373,7 @@ func startHandlingForkChoice( if requestStatus == engineapi.New { if headerNumber-forkingPoint <= ShortPoSReorgThresholdBlocks { // TODO(yperbasis): what if some bodies are missing and we have to download them? - cfg.hd.SetPendingPayloadStatus(headerHash) + cfg.hd.SetPendingPayloadHash(headerHash) } else { cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{Status: remote.EngineStatus_SYNCING} } @@ -423,12 +424,12 @@ func finishHandlingForkChoice( } if !canonical { - if cfg.hd.GetPendingPayloadStatus() != (common.Hash{}) { + if cfg.hd.GetPendingPayloadHash() != (common.Hash{}) { cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ CriticalError: &privateapi.InvalidForkchoiceStateErr, } } - cfg.hd.ClearPendingPayloadStatus() + cfg.hd.ClearPendingPayloadHash() } cfg.hd.ClearUnsettledForkChoice() @@ -583,7 +584,7 @@ func verifyAndSaveNewPoSHeader( // OK, we're on the canonical chain if requestStatus == engineapi.New { - cfg.hd.SetPendingPayloadStatus(headerHash) + cfg.hd.SetPendingPayloadHash(headerHash) } logEvery := time.NewTicker(logInterval) diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 5dc5cbd315e..c4731b698cb 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -27,6 +27,7 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" + "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/engineapi" @@ -1150,22 +1151,34 @@ func (hd *HeaderDownload) FetchingNew() bool { return hd.fetchingNew } -func (hd *HeaderDownload) GetPendingPayloadStatus() common.Hash { +func (hd *HeaderDownload) GetPendingPayloadHash() common.Hash { hd.lock.RLock() defer hd.lock.RUnlock() - return hd.pendingPayloadStatus + return hd.pendingPayloadHash +} + +func (hd *HeaderDownload) SetPendingPayloadHash(header common.Hash) { + hd.lock.Lock() + defer hd.lock.Unlock() + hd.pendingPayloadHash = header } -func (hd *HeaderDownload) SetPendingPayloadStatus(header common.Hash) { +func (hd *HeaderDownload) ClearPendingPayloadHash() { hd.lock.Lock() defer hd.lock.Unlock() - hd.pendingPayloadStatus = header + hd.pendingPayloadHash = common.Hash{} +} + +func (hd *HeaderDownload) GetPendingPayloadStatus() *privateapi.PayloadStatus { + hd.lock.RLock() + defer hd.lock.RUnlock() + return hd.pendingPayloadStatus } -func (hd *HeaderDownload) ClearPendingPayloadStatus() { +func (hd *HeaderDownload) SetPendingPayloadStatus(response *privateapi.PayloadStatus) { hd.lock.Lock() defer hd.lock.Unlock() - hd.pendingPayloadStatus = common.Hash{} + hd.pendingPayloadStatus = response } func (hd *HeaderDownload) GetUnsettledForkChoice() (*engineapi.ForkChoiceMessage, uint64) { diff --git a/turbo/stages/headerdownload/header_data_struct.go b/turbo/stages/headerdownload/header_data_struct.go index 4fdf3dc1398..54b3bee1913 100644 --- a/turbo/stages/headerdownload/header_data_struct.go +++ b/turbo/stages/headerdownload/header_data_struct.go @@ -309,7 +309,8 @@ type HeaderDownload struct { headersCollector *etl.Collector // ETL collector for headers BeaconRequestList *engineapi.RequestList // Requests from ethbackend to staged sync PayloadStatusCh chan privateapi.PayloadStatus // Responses (validation/execution status) - pendingPayloadStatus common.Hash // Header whose status we still should send to PayloadStatusCh + pendingPayloadHash common.Hash // Header whose status we still should send to PayloadStatusCh + pendingPayloadStatus *privateapi.PayloadStatus // Alternatively, there can be an already prepared response to send to PayloadStatusCh unsettledForkChoice *engineapi.ForkChoiceMessage // Forkchoice to process after unwind unsettledHeadHeight uint64 // Height of unsettledForkChoice.headBlockHash posDownloaderTip common.Hash // See https://hackmd.io/GDc0maGsQeKfP8o2C7L52w diff --git a/turbo/stages/mock_sentry.go b/turbo/stages/mock_sentry.go index 57731216e8f..4fd664c83ae 100644 --- a/turbo/stages/mock_sentry.go +++ b/turbo/stages/mock_sentry.go @@ -532,3 +532,7 @@ func (ms *MockSentry) SendForkChoiceRequest(message *engineapi.ForkChoiceMessage func (ms *MockSentry) ReceivePayloadStatus() privateapi.PayloadStatus { return <-ms.sentriesClient.Hd.PayloadStatusCh } + +func (ms *MockSentry) HeaderDownload() *headerdownload.HeaderDownload { + return ms.sentriesClient.Hd +} diff --git a/turbo/stages/sentry_mock_test.go b/turbo/stages/sentry_mock_test.go index f3d4096fb88..f0a9c136efa 100644 --- a/turbo/stages/sentry_mock_test.go +++ b/turbo/stages/sentry_mock_test.go @@ -520,9 +520,8 @@ func TestForkchoiceToGenesis(t *testing.T) { m.SendForkChoiceRequest(&forkChoiceMessage) headBlockHash, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, true, m.UpdateHead, nil) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) assert.Equal(t, m.Genesis.Hash(), headBlockHash) @@ -541,10 +540,9 @@ func TestBogusForkchoice(t *testing.T) { } m.SendForkChoiceRequest(&forkChoiceMessage) - _, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, true, m.UpdateHead, nil) - if err != nil { - t.Fatal(err) - } + headBlockHash, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, true, m.UpdateHead, nil) + require.NoError(t, err) + stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) payloadStatus := m.ReceivePayloadStatus() assert.Equal(t, remote.EngineStatus_SYNCING, payloadStatus.Status) @@ -557,10 +555,9 @@ func TestBogusForkchoice(t *testing.T) { } m.SendForkChoiceRequest(&forkChoiceMessage) - _, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, false, m.UpdateHead, nil) - if err != nil { - t.Fatal(err) - } + headBlockHash, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, false, m.UpdateHead, nil) + require.NoError(t, err) + stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) payloadStatus = m.ReceivePayloadStatus() assert.Equal(t, remote.EngineStatus_VALID, payloadStatus.Status) @@ -580,8 +577,10 @@ func TestPoSDownloader(t *testing.T) { Body: chain.TopBlock.RawBody(), } m.SendPayloadRequest(&payloadMessage) - _, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, true, m.UpdateHead, nil) + headBlockHash, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, true, m.UpdateHead, nil) require.NoError(t, err) + stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) + payloadStatus := m.ReceivePayloadStatus() assert.Equal(t, remote.EngineStatus_SYNCING, payloadStatus.Status) @@ -598,11 +597,14 @@ func TestPoSDownloader(t *testing.T) { m.ReceiveWg.Wait() // First cycle: save the downloaded header - _, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, false, m.UpdateHead, nil) + headBlockHash, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, false, m.UpdateHead, nil) require.NoError(t, err) + stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) + // Second cycle: process the previous beacon request - _, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, false, m.UpdateHead, nil) + headBlockHash, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, false, m.UpdateHead, nil) require.NoError(t, err) + stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) // Point forkChoice to the head forkChoiceMessage := engineapi.ForkChoiceMessage{ @@ -611,8 +613,10 @@ func TestPoSDownloader(t *testing.T) { FinalizedBlockHash: chain.TopBlock.Hash(), } m.SendForkChoiceRequest(&forkChoiceMessage) - headBlockHash, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, false, m.UpdateHead, nil) + headBlockHash, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, false, m.UpdateHead, nil) require.NoError(t, err) + stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) + assert.Equal(t, chain.TopBlock.Hash(), headBlockHash) } @@ -639,8 +643,10 @@ func TestPoSSyncWithInvalidHeader(t *testing.T) { Body: chain.TopBlock.RawBody(), } m.SendPayloadRequest(&payloadMessage) - _, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, true, m.UpdateHead, nil) + headBlockHash, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, true, m.UpdateHead, nil) require.NoError(t, err) + stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) + payloadStatus1 := m.ReceivePayloadStatus() assert.Equal(t, remote.EngineStatus_SYNCING, payloadStatus1.Status) @@ -656,8 +662,9 @@ func TestPoSSyncWithInvalidHeader(t *testing.T) { } m.ReceiveWg.Wait() - _, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, false, m.UpdateHead, nil) + headBlockHash, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, false, m.UpdateHead, nil) require.NoError(t, err) + stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) // Point forkChoice to the invalid tip forkChoiceMessage := engineapi.ForkChoiceMessage{ @@ -666,8 +673,10 @@ func TestPoSSyncWithInvalidHeader(t *testing.T) { FinalizedBlockHash: invalidTip.Hash(), } m.SendForkChoiceRequest(&forkChoiceMessage) - _, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, false, m.UpdateHead, nil) + headBlockHash, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, false, m.UpdateHead, nil) require.NoError(t, err) + stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) + payloadStatus2 := m.ReceivePayloadStatus() require.Equal(t, remote.EngineStatus_INVALID, payloadStatus2.Status) assert.Equal(t, lastValidHeader.Hash(), payloadStatus2.LatestValidHash) diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 96bfe6d5903..5e6cbaf3277 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -31,6 +31,33 @@ import ( "github.com/ledgerwatch/log/v3" ) +func SendPayloadStatus(hd *headerdownload.HeaderDownload, headBlockHash common.Hash, err error) { + if pendingPayloadStatus := hd.GetPendingPayloadStatus(); pendingPayloadStatus != nil { + if err != nil { + hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} + } else { + hd.PayloadStatusCh <- *pendingPayloadStatus + } + } else if pendingPayloadHash := hd.GetPendingPayloadHash(); pendingPayloadHash != (common.Hash{}) { + if err != nil { + hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} + } else { + var status remote.EngineStatus + if headBlockHash == pendingPayloadHash { + status = remote.EngineStatus_VALID + } else { + status = remote.EngineStatus_INVALID + } + hd.PayloadStatusCh <- privateapi.PayloadStatus{ + Status: status, + LatestValidHash: headBlockHash, + } + } + } + hd.ClearPendingPayloadHash() + hd.SetPendingPayloadStatus(nil) +} + // StageLoop runs the continuous loop of staged sync func StageLoop( ctx context.Context, @@ -52,25 +79,7 @@ func StageLoop( height := hd.TopSeenHeight() headBlockHash, err := StageLoopStep(ctx, db, sync, height, notifications, initialCycle, updateHead, nil) - pendingPayloadStatus := hd.GetPendingPayloadStatus() - if pendingPayloadStatus != (common.Hash{}) { - if err != nil { - hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} - } else { - var status remote.EngineStatus - if headBlockHash == pendingPayloadStatus { - status = remote.EngineStatus_VALID - } else { - status = remote.EngineStatus_INVALID - } - hd.PayloadStatusCh <- privateapi.PayloadStatus{ - Status: status, - LatestValidHash: headBlockHash, - } - } - - hd.ClearPendingPayloadStatus() - } + SendPayloadStatus(hd, headBlockHash, err) if err != nil { if errors.Is(err, libcommon.ErrStopped) || errors.Is(err, context.Canceled) { From 1c4584b43864d5a60772bf5f268bc1ba716c3188 Mon Sep 17 00:00:00 2001 From: Chase Wright Date: Wed, 22 Jun 2022 20:13:32 -0500 Subject: [PATCH 096/136] Include admin methods in README (#4519) --- cmd/rpcdaemon/README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/rpcdaemon/README.md b/cmd/rpcdaemon/README.md index 307d3c5e186..a889b7af456 100644 --- a/cmd/rpcdaemon/README.md +++ b/cmd/rpcdaemon/README.md @@ -157,6 +157,9 @@ The following table shows the current implementation status of Erigon's RPC daem | Command | Avail | Notes | | ------------------------------------------ | ------- | ------------------------------------------ | +| admin_nodeInfo | Yes | | +| admin_peers | Yes | | +| | | | | web3_clientVersion | Yes | | | web3_sha3 | Yes | | | | | | From 70bd93c5c3c4339e7247b9daae4a4c3e8226db11 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Thu, 23 Jun 2022 10:41:15 +0200 Subject: [PATCH 097/136] Only apply overrides to stored config of a private chain (#4521) --- core/genesis.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/core/genesis.go b/core/genesis.go index ee16a2d26be..047fe37e56d 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -271,10 +271,17 @@ func WriteGenesisBlock(db kv.RwTx, genesis *Genesis, overrideMergeNetsplitBlock, } return newCfg, storedBlock, nil } - // Special case: don't change the existing config of an unknown chain if no new + // Special case: don't change the existing config of a private chain if no new // config is supplied. This is useful, for example, to preserve DB config created by erigon init. - if genesis == nil && params.ChainConfigByGenesisHash(storedHash) == nil && overrideMergeNetsplitBlock == nil && overrideTerminalTotalDifficulty == nil { - return storedCfg, storedBlock, nil + // In that case, only apply the overrides. + if genesis == nil && params.ChainConfigByGenesisHash(storedHash) == nil { + newCfg = storedCfg + if overrideMergeNetsplitBlock != nil { + newCfg.MergeNetsplitBlock = overrideMergeNetsplitBlock + } + if overrideTerminalTotalDifficulty != nil { + newCfg.TerminalTotalDifficulty = overrideTerminalTotalDifficulty + } } // Check config compatibility and write the config. Compatibility errors // are returned to the caller unless we're already at block zero. From 7896fc60b63fc5e61680f8d6dfb52bb29b3284d7 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Thu, 23 Jun 2022 17:23:35 +0200 Subject: [PATCH 098/136] fixed notifications (#4520) --- eth/stagedsync/stage_headers.go | 6 ++++++ turbo/stages/mock_sentry.go | 2 +- turbo/stages/stageloop.go | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 8dbf64d82fe..d96767a3e13 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -20,6 +20,7 @@ import ( "github.com/ledgerwatch/erigon/cmd/downloader/downloadergrpc" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/ethdb/privateapi" @@ -56,6 +57,7 @@ type HeadersCfg struct { blockReader services.FullBlockReader dbEventNotifier snapshotsync.DBEventNotifier execPayload ExecutePayloadFunc + notifications *Notifications } func StageHeadersCfg( @@ -74,6 +76,7 @@ func StageHeadersCfg( blockReader services.FullBlockReader, tmpdir string, dbEventNotifier snapshotsync.DBEventNotifier, + notifications *Notifications, execPayload ExecutePayloadFunc) HeadersCfg { return HeadersCfg{ db: db, @@ -91,6 +94,7 @@ func StageHeadersCfg( blockReader: blockReader, dbEventNotifier: dbEventNotifier, execPayload: execPayload, + notifications: notifications, memoryOverlay: memoryOverlay, } } @@ -576,6 +580,8 @@ func verifyAndSaveNewPoSHeader( if err = cfg.hd.ValidatePayload(tx, header, body, cfg.execPayload); err != nil { return &privateapi.PayloadStatus{Status: remote.EngineStatus_INVALID}, false, nil } + pendingBaseFee := misc.CalcBaseFee(cfg.notifications.Accumulator.ChainConfig(), header) + cfg.notifications.Accumulator.SendAndReset(context.Background(), cfg.notifications.StateChangesConsumer, pendingBaseFee.Uint64(), header.GasLimit) return &privateapi.PayloadStatus{ Status: remote.EngineStatus_VALID, LatestValidHash: headerHash, diff --git a/turbo/stages/mock_sentry.go b/turbo/stages/mock_sentry.go index 4fd664c83ae..5ba56a2fad5 100644 --- a/turbo/stages/mock_sentry.go +++ b/turbo/stages/mock_sentry.go @@ -309,7 +309,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey mock.Sync = stagedsync.New( stagedsync.DefaultStages(mock.Ctx, prune, - stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, false, allSnapshots, snapshotsDownloader, blockReader, mock.tmpdir, mock.Notifications.Events, nil), + stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, false, allSnapshots, snapshotsDownloader, blockReader, mock.tmpdir, mock.Notifications.Events, mock.Notifications, nil), stagedsync.StageCumulativeIndexCfg(mock.DB), stagedsync.StageBlockHashesCfg(mock.DB, mock.tmpdir, mock.ChainConfig), stagedsync.StageBodiesCfg( diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 5e6cbaf3277..5530f581610 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -362,6 +362,7 @@ func NewStagedSync( blockReader, tmpdir, notifications.Events, + notifications, execPayload), stagedsync.StageCumulativeIndexCfg(db), stagedsync.StageBlockHashesCfg(db, tmpdir, controlServer.ChainConfig), From 8de7c5e41cc034384321364eabb116247a401609 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Thu, 23 Jun 2022 19:37:39 +0200 Subject: [PATCH 099/136] JSON parsing of safe & finalized (#4524) --- rpc/types.go | 26 ++++++++++++++++++++------ turbo/rpchelper/helper.go | 2 +- 2 files changed, 21 insertions(+), 7 deletions(-) diff --git a/rpc/types.go b/rpc/types.go index 1f7ea53c2e6..bf61b327c91 100644 --- a/rpc/types.go +++ b/rpc/types.go @@ -71,15 +71,15 @@ type BlockNumber int64 type Timestamp uint64 const ( - FinalizeBlockNumber = BlockNumber(-4) - SafeBlockNumber = BlockNumber(-3) - PendingBlockNumber = BlockNumber(-2) - LatestBlockNumber = BlockNumber(-1) - EarliestBlockNumber = BlockNumber(0) + FinalizedBlockNumber = BlockNumber(-4) + SafeBlockNumber = BlockNumber(-3) + PendingBlockNumber = BlockNumber(-2) + LatestBlockNumber = BlockNumber(-1) + EarliestBlockNumber = BlockNumber(0) ) // UnmarshalJSON parses the given JSON fragment into a BlockNumber. It supports: -// - "latest", "earliest" or "pending" as string arguments +// - "latest", "earliest", "pending", "safe", or "finalized" as string arguments // - the block number // Returned errors: // - an invalid block number error when the given argument isn't a known strings @@ -100,6 +100,12 @@ func (bn *BlockNumber) UnmarshalJSON(data []byte) error { case "pending": *bn = PendingBlockNumber return nil + case "safe": + *bn = SafeBlockNumber + return nil + case "finalized": + *bn = FinalizedBlockNumber + return nil case "null": *bn = LatestBlockNumber return nil @@ -173,6 +179,14 @@ func (bnh *BlockNumberOrHash) UnmarshalJSON(data []byte) error { bn := PendingBlockNumber bnh.BlockNumber = &bn return nil + case "safe": + bn := SafeBlockNumber + bnh.BlockNumber = &bn + return nil + case "finalized": + bn := FinalizedBlockNumber + bnh.BlockNumber = &bn + return nil default: if len(input) == 66 { hash := common.Hash{} diff --git a/turbo/rpchelper/helper.go b/turbo/rpchelper/helper.go index b87a61a68d9..6acc313f92e 100644 --- a/turbo/rpchelper/helper.go +++ b/turbo/rpchelper/helper.go @@ -50,7 +50,7 @@ func _GetBlockNumber(requireCanonical bool, blockNrOrHash rpc.BlockNumberOrHash, } case rpc.EarliestBlockNumber: blockNumber = 0 - case rpc.FinalizeBlockNumber: + case rpc.FinalizedBlockNumber: blockNumber, err = GetFinalizedBlockNumber(tx) if err != nil { return 0, common.Hash{}, false, err From 5e2f6bb2dbd4aff2af392a5489cd6e4261e0fae6 Mon Sep 17 00:00:00 2001 From: primal_concrete_sledge Date: Fri, 24 Jun 2022 12:11:38 +0400 Subject: [PATCH 100/136] Fix/new eth filters (#4504) * Add fixes to eth_newPendingTxFilter and others * Fix tests * Add test * Add goroutines return on closed chans --- cmd/rpcdaemon/commands/eth_api.go | 6 +- cmd/rpcdaemon/commands/eth_filters.go | 138 ++++++++++-------- cmd/rpcdaemon/commands/eth_filters_test.go | 45 ++++++ cmd/rpcdaemon/commands/eth_subscribe_test.go | 1 - cmd/rpcdaemon22/commands/eth_api.go | 6 +- cmd/rpcdaemon22/commands/eth_filters.go | 101 ++++++------- .../commands/eth_subscribe_test.go | 1 - turbo/rpchelper/filters.go | 45 +++--- turbo/rpchelper/logsfilter.go | 6 - 9 files changed, 201 insertions(+), 148 deletions(-) create mode 100644 cmd/rpcdaemon/commands/eth_filters_test.go diff --git a/cmd/rpcdaemon/commands/eth_api.go b/cmd/rpcdaemon/commands/eth_api.go index 98a469ac143..c9eac584e76 100644 --- a/cmd/rpcdaemon/commands/eth_api.go +++ b/cmd/rpcdaemon/commands/eth_api.go @@ -53,9 +53,9 @@ type EthAPI interface { GetUncleCountByBlockHash(ctx context.Context, hash common.Hash) (*hexutil.Uint, error) // Filter related (see ./eth_filters.go) - NewPendingTransactionFilter(_ context.Context) (common.Hash, error) - NewBlockFilter(_ context.Context) (common.Hash, error) - NewFilter(_ context.Context, crit ethFilters.FilterCriteria) (common.Hash, error) + NewPendingTransactionFilter(_ context.Context) (string, error) + NewBlockFilter(_ context.Context) (string, error) + NewFilter(_ context.Context, crit ethFilters.FilterCriteria) (string, error) UninstallFilter(_ context.Context, index string) (bool, error) GetFilterChanges(_ context.Context, index string) ([]interface{}, error) diff --git a/cmd/rpcdaemon/commands/eth_filters.go b/cmd/rpcdaemon/commands/eth_filters.go index f99ced97f1c..6596075af3b 100644 --- a/cmd/rpcdaemon/commands/eth_filters.go +++ b/cmd/rpcdaemon/commands/eth_filters.go @@ -2,9 +2,7 @@ package commands import ( "context" - "fmt" - "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core/types" @@ -15,9 +13,9 @@ import ( ) // NewPendingTransactionFilter new transaction filter -func (api *APIImpl) NewPendingTransactionFilter(_ context.Context) (common.Hash, error) { +func (api *APIImpl) NewPendingTransactionFilter(_ context.Context) (string, error) { if api.filters == nil { - return common.Hash{}, rpc.ErrNotificationsUnsupported + return "", rpc.ErrNotificationsUnsupported } txsCh := make(chan []types.Transaction, 1) id := api.filters.SubscribePendingTxs(txsCh) @@ -29,20 +27,19 @@ func (api *APIImpl) NewPendingTransactionFilter(_ context.Context) (common.Hash, return } api.filters.AddPendingTxs(id, txs) - default: } } }() - return common.HexToHash(string(id)), nil + return "0x" + string(id), nil } // NewBlockFilter implements eth_newBlockFilter. Creates a filter in the node, to notify when a new block arrives. -func (api *APIImpl) NewBlockFilter(_ context.Context) (common.Hash, error) { +func (api *APIImpl) NewBlockFilter(_ context.Context) (string, error) { if api.filters == nil { - return common.Hash{}, rpc.ErrNotificationsUnsupported + return "", rpc.ErrNotificationsUnsupported } - ch := make(chan *types.Block, 1) - id := api.filters.SubscribePendingBlock(ch) + ch := make(chan *types.Header, 1) + id := api.filters.SubscribeNewHeads(ch) go func() { for { select { @@ -51,17 +48,16 @@ func (api *APIImpl) NewBlockFilter(_ context.Context) (common.Hash, error) { return } api.filters.AddPendingBlock(id, block) - default: } } }() - return common.HexToHash(string(id)), nil + return "0x" + string(id), nil } // NewFilter implements eth_newFilter. Creates an arbitrary filter object, based on filter options, to notify when the state changes (logs). -func (api *APIImpl) NewFilter(_ context.Context, crit filters.FilterCriteria) (common.Hash, error) { +func (api *APIImpl) NewFilter(_ context.Context, crit filters.FilterCriteria) (string, error) { if api.filters == nil { - return common.Hash{}, rpc.ErrNotificationsUnsupported + return "", rpc.ErrNotificationsUnsupported } logs := make(chan *types.Log, 1) id := api.filters.SubscribeLogs(logs, crit) @@ -73,11 +69,10 @@ func (api *APIImpl) NewFilter(_ context.Context, crit filters.FilterCriteria) (c return } api.filters.AddLogs(id, lg) - default: } } }() - return common.HexToHash(hexutil.EncodeUint64(uint64(id))), nil + return hexutil.EncodeUint64(uint64(id)), nil } // UninstallFilter new transaction filter @@ -85,20 +80,20 @@ func (api *APIImpl) UninstallFilter(_ context.Context, index string) (bool, erro if api.filters == nil { return false, rpc.ErrNotificationsUnsupported } - if common.IsHexAddress32(index) { - // remove 0x - if len(index) >= 2 && index[0] == '0' && (index[1] == 'x' || index[1] == 'X') { - index = index[2:] - } - isDeleted := api.filters.UnsubscribePendingBlock(rpchelper.PendingBlockSubID(index)) || - api.filters.UnsubscribePendingTxs(rpchelper.PendingTxsSubID(index)) - id, err := hexutil.DecodeUint64(index) - if err == nil { - return isDeleted || api.filters.UnsubscribeLogs(rpchelper.LogsSubID(id)), nil - } + var isDeleted bool + // remove 0x + cutIndex := index + if len(index) >= 2 && index[0] == '0' && (index[1] == 'x' || index[1] == 'X') { + cutIndex = index[2:] + } + isDeleted = api.filters.UnsubscribeHeads(rpchelper.HeadsSubID(cutIndex)) || + api.filters.UnsubscribePendingTxs(rpchelper.PendingTxsSubID(cutIndex)) + id, err := hexutil.DecodeUint64(index) + if err == nil { + return isDeleted || api.filters.UnsubscribeLogs(rpchelper.LogsSubID(id)), nil } - return false, nil + return isDeleted, nil } // GetFilterChanges implements eth_getFilterChanges. Polling method for a previously-created filter, which returns an array of logs which occurred since last poll. @@ -107,35 +102,36 @@ func (api *APIImpl) GetFilterChanges(_ context.Context, index string) ([]interfa return nil, rpc.ErrNotificationsUnsupported } stub := make([]interface{}, 0) - if common.IsHexAddress32(index) { - // remove 0x - if len(index) >= 2 && index[0] == '0' && (index[1] == 'x' || index[1] == 'X') { - index = index[2:] - } - if blocks, ok := api.filters.ReadPendingBlocks(rpchelper.PendingBlockSubID(index)); ok { - for _, v := range blocks { - stub = append(stub, v.Hash()) - } - return stub, nil + + // remove 0x + cutIndex := index + if len(index) >= 2 && index[0] == '0' && (index[1] == 'x' || index[1] == 'X') { + cutIndex = index[2:] + } + if blocks, ok := api.filters.ReadPendingBlocks(rpchelper.HeadsSubID(cutIndex)); ok { + for _, v := range blocks { + stub = append(stub, v.Hash()) } - if txs, ok := api.filters.ReadPendingTxs(rpchelper.PendingTxsSubID(index)); ok { - for _, v := range txs { - for _, tx := range v { - stub = append(stub, tx.Hash()) - } + return stub, nil + } + if txs, ok := api.filters.ReadPendingTxs(rpchelper.PendingTxsSubID(cutIndex)); ok { + for _, v := range txs { + for _, tx := range v { + stub = append(stub, tx.Hash()) } return stub, nil } - id, err := hexutil.DecodeUint64(index) - if err != nil { - return stub, fmt.Errorf("eth_getFilterChanges, wrong index: %w", err) - } - if logs, ok := api.filters.ReadLogs(rpchelper.LogsSubID(id)); ok { - for _, v := range logs { - stub = append(stub, v) - } - return stub, nil + return stub, nil + } + id, err := hexutil.DecodeUint64(index) + if err != nil { + return stub, nil + } + if logs, ok := api.filters.ReadLogs(rpchelper.LogsSubID(id)); ok { + for _, v := range logs { + stub = append(stub, v) } + return stub, nil } return stub, nil } @@ -161,10 +157,17 @@ func (api *APIImpl) NewHeads(ctx context.Context) (*rpc.Subscription, error) { for { select { - case h := <-headers: - err := notifier.Notify(rpcSub.ID, h) - if err != nil { - log.Warn("error while notifying subscription", "err", err) + case h, ok := <-headers: + if h != nil { + err := notifier.Notify(rpcSub.ID, h) + if err != nil { + log.Warn("error while notifying subscription", "err", err) + return + } + } + if !ok { + log.Warn("new heads channel was closed") + return } case <-rpcSub.Err(): return @@ -195,15 +198,20 @@ func (api *APIImpl) NewPendingTransactions(ctx context.Context) (*rpc.Subscripti for { select { - case txs := <-txsCh: + case txs, ok := <-txsCh: for _, t := range txs { if t != nil { err := notifier.Notify(rpcSub.ID, t.Hash()) if err != nil { log.Warn("error while notifying subscription", "err", err) + return } } } + if !ok { + log.Warn("new pending transactions channel was closed") + return + } case <-rpcSub.Err(): return } @@ -230,13 +238,19 @@ func (api *APIImpl) Logs(ctx context.Context, crit filters.FilterCriteria) (*rpc logs := make(chan *types.Log, 1) id := api.filters.SubscribeLogs(logs, crit) defer api.filters.UnsubscribeLogs(id) - for { select { - case h := <-logs: - err := notifier.Notify(rpcSub.ID, h) - if err != nil { - log.Warn("error while notifying subscription", "err", err) + case h, ok := <-logs: + if h != nil { + err := notifier.Notify(rpcSub.ID, h) + if err != nil { + log.Warn("error while notifying subscription", "err", err) + return + } + } + if !ok { + log.Warn("log channel was closed") + return } case <-rpcSub.Err(): return diff --git a/cmd/rpcdaemon/commands/eth_filters_test.go b/cmd/rpcdaemon/commands/eth_filters_test.go new file mode 100644 index 00000000000..ca4366bb99f --- /dev/null +++ b/cmd/rpcdaemon/commands/eth_filters_test.go @@ -0,0 +1,45 @@ +package commands + +import ( + "testing" + + "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcdaemontest" + "github.com/ledgerwatch/erigon/eth/filters" + "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/erigon/turbo/stages" + "github.com/stretchr/testify/assert" +) + +func TestNewFilters(t *testing.T) { + assert := assert.New(t) + db := rpcdaemontest.CreateTestKV(t) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, stages.Mock(t)) + mining := txpool.NewMiningClient(conn) + ff := rpchelper.New(ctx, nil, nil, mining, func() {}) + api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + + ptf, err := api.NewPendingTransactionFilter(ctx) + assert.Nil(err) + + nf, err := api.NewFilter(ctx, filters.FilterCriteria{}) + assert.Nil(err) + + bf, err := api.NewBlockFilter(ctx) + assert.Nil(err) + + ok, err := api.UninstallFilter(ctx, nf) + assert.Nil(err) + assert.Equal(ok, true) + + ok, err = api.UninstallFilter(ctx, bf) + assert.Nil(err) + assert.Equal(ok, true) + + ok, err = api.UninstallFilter(ctx, ptf) + assert.Nil(err) + assert.Equal(ok, true) +} diff --git a/cmd/rpcdaemon/commands/eth_subscribe_test.go b/cmd/rpcdaemon/commands/eth_subscribe_test.go index db689e44771..a997495bd99 100644 --- a/cmd/rpcdaemon/commands/eth_subscribe_test.go +++ b/cmd/rpcdaemon/commands/eth_subscribe_test.go @@ -43,7 +43,6 @@ func TestEthSubscribe(t *testing.T) { ff := rpchelper.New(ctx, backend, nil, nil, func() {}) newHeads := make(chan *types.Header) - defer close(newHeads) id := ff.SubscribeNewHeads(newHeads) defer ff.UnsubscribeHeads(id) diff --git a/cmd/rpcdaemon22/commands/eth_api.go b/cmd/rpcdaemon22/commands/eth_api.go index 982b79a86de..0ad9730cb4f 100644 --- a/cmd/rpcdaemon22/commands/eth_api.go +++ b/cmd/rpcdaemon22/commands/eth_api.go @@ -54,9 +54,9 @@ type EthAPI interface { GetUncleCountByBlockHash(ctx context.Context, hash common.Hash) (*hexutil.Uint, error) // Filter related (see ./eth_filters.go) - NewPendingTransactionFilter(_ context.Context) (common.Hash, error) - NewBlockFilter(_ context.Context) (common.Hash, error) - NewFilter(_ context.Context, crit ethFilters.FilterCriteria) (common.Hash, error) + NewPendingTransactionFilter(_ context.Context) (string, error) + NewBlockFilter(_ context.Context) (string, error) + NewFilter(_ context.Context, crit ethFilters.FilterCriteria) (string, error) UninstallFilter(_ context.Context, index string) (bool, error) GetFilterChanges(_ context.Context, index string) ([]interface{}, error) diff --git a/cmd/rpcdaemon22/commands/eth_filters.go b/cmd/rpcdaemon22/commands/eth_filters.go index f99ced97f1c..05c69ff1898 100644 --- a/cmd/rpcdaemon22/commands/eth_filters.go +++ b/cmd/rpcdaemon22/commands/eth_filters.go @@ -2,9 +2,8 @@ package commands import ( "context" - "fmt" + "time" - "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core/types" @@ -15,9 +14,9 @@ import ( ) // NewPendingTransactionFilter new transaction filter -func (api *APIImpl) NewPendingTransactionFilter(_ context.Context) (common.Hash, error) { +func (api *APIImpl) NewPendingTransactionFilter(_ context.Context) (string, error) { if api.filters == nil { - return common.Hash{}, rpc.ErrNotificationsUnsupported + return "", rpc.ErrNotificationsUnsupported } txsCh := make(chan []types.Transaction, 1) id := api.filters.SubscribePendingTxs(txsCh) @@ -30,19 +29,20 @@ func (api *APIImpl) NewPendingTransactionFilter(_ context.Context) (common.Hash, } api.filters.AddPendingTxs(id, txs) default: + time.Sleep(time.Second) } } }() - return common.HexToHash(string(id)), nil + return "0x" + string(id), nil } // NewBlockFilter implements eth_newBlockFilter. Creates a filter in the node, to notify when a new block arrives. -func (api *APIImpl) NewBlockFilter(_ context.Context) (common.Hash, error) { +func (api *APIImpl) NewBlockFilter(_ context.Context) (string, error) { if api.filters == nil { - return common.Hash{}, rpc.ErrNotificationsUnsupported + return "", rpc.ErrNotificationsUnsupported } - ch := make(chan *types.Block, 1) - id := api.filters.SubscribePendingBlock(ch) + ch := make(chan *types.Header, 1) + id := api.filters.SubscribeNewHeads(ch) go func() { for { select { @@ -52,16 +52,17 @@ func (api *APIImpl) NewBlockFilter(_ context.Context) (common.Hash, error) { } api.filters.AddPendingBlock(id, block) default: + time.Sleep(time.Second) } } }() - return common.HexToHash(string(id)), nil + return "0x" + string(id), nil } // NewFilter implements eth_newFilter. Creates an arbitrary filter object, based on filter options, to notify when the state changes (logs). -func (api *APIImpl) NewFilter(_ context.Context, crit filters.FilterCriteria) (common.Hash, error) { +func (api *APIImpl) NewFilter(_ context.Context, crit filters.FilterCriteria) (string, error) { if api.filters == nil { - return common.Hash{}, rpc.ErrNotificationsUnsupported + return "", rpc.ErrNotificationsUnsupported } logs := make(chan *types.Log, 1) id := api.filters.SubscribeLogs(logs, crit) @@ -74,10 +75,11 @@ func (api *APIImpl) NewFilter(_ context.Context, crit filters.FilterCriteria) (c } api.filters.AddLogs(id, lg) default: + time.Sleep(time.Second) } } }() - return common.HexToHash(hexutil.EncodeUint64(uint64(id))), nil + return hexutil.EncodeUint64(uint64(id)), nil } // UninstallFilter new transaction filter @@ -85,20 +87,20 @@ func (api *APIImpl) UninstallFilter(_ context.Context, index string) (bool, erro if api.filters == nil { return false, rpc.ErrNotificationsUnsupported } - if common.IsHexAddress32(index) { - // remove 0x - if len(index) >= 2 && index[0] == '0' && (index[1] == 'x' || index[1] == 'X') { - index = index[2:] - } - isDeleted := api.filters.UnsubscribePendingBlock(rpchelper.PendingBlockSubID(index)) || - api.filters.UnsubscribePendingTxs(rpchelper.PendingTxsSubID(index)) - id, err := hexutil.DecodeUint64(index) - if err == nil { - return isDeleted || api.filters.UnsubscribeLogs(rpchelper.LogsSubID(id)), nil - } + var isDeleted bool + // remove 0x + cutIndex := index + if len(index) >= 2 && index[0] == '0' && (index[1] == 'x' || index[1] == 'X') { + cutIndex = index[2:] + } + isDeleted = api.filters.UnsubscribeHeads(rpchelper.HeadsSubID(cutIndex)) || + api.filters.UnsubscribePendingTxs(rpchelper.PendingTxsSubID(cutIndex)) + id, err := hexutil.DecodeUint64(index) + if err == nil { + return isDeleted || api.filters.UnsubscribeLogs(rpchelper.LogsSubID(id)), nil } - return false, nil + return isDeleted, nil } // GetFilterChanges implements eth_getFilterChanges. Polling method for a previously-created filter, which returns an array of logs which occurred since last poll. @@ -107,35 +109,36 @@ func (api *APIImpl) GetFilterChanges(_ context.Context, index string) ([]interfa return nil, rpc.ErrNotificationsUnsupported } stub := make([]interface{}, 0) - if common.IsHexAddress32(index) { - // remove 0x - if len(index) >= 2 && index[0] == '0' && (index[1] == 'x' || index[1] == 'X') { - index = index[2:] - } - if blocks, ok := api.filters.ReadPendingBlocks(rpchelper.PendingBlockSubID(index)); ok { - for _, v := range blocks { - stub = append(stub, v.Hash()) - } - return stub, nil + + // remove 0x + cutIndex := index + if len(index) >= 2 && index[0] == '0' && (index[1] == 'x' || index[1] == 'X') { + cutIndex = index[2:] + } + if blocks, ok := api.filters.ReadPendingBlocks(rpchelper.HeadsSubID(cutIndex)); ok { + for _, v := range blocks { + stub = append(stub, v.Hash()) } - if txs, ok := api.filters.ReadPendingTxs(rpchelper.PendingTxsSubID(index)); ok { - for _, v := range txs { - for _, tx := range v { - stub = append(stub, tx.Hash()) - } + return stub, nil + } + if txs, ok := api.filters.ReadPendingTxs(rpchelper.PendingTxsSubID(cutIndex)); ok { + for _, v := range txs { + for _, tx := range v { + stub = append(stub, tx.Hash()) } return stub, nil } - id, err := hexutil.DecodeUint64(index) - if err != nil { - return stub, fmt.Errorf("eth_getFilterChanges, wrong index: %w", err) - } - if logs, ok := api.filters.ReadLogs(rpchelper.LogsSubID(id)); ok { - for _, v := range logs { - stub = append(stub, v) - } - return stub, nil + return stub, nil + } + id, err := hexutil.DecodeUint64(index) + if err != nil { + return stub, nil + } + if logs, ok := api.filters.ReadLogs(rpchelper.LogsSubID(id)); ok { + for _, v := range logs { + stub = append(stub, v) } + return stub, nil } return stub, nil } diff --git a/cmd/rpcdaemon22/commands/eth_subscribe_test.go b/cmd/rpcdaemon22/commands/eth_subscribe_test.go index 9b29d53c5f4..875b7a2456a 100644 --- a/cmd/rpcdaemon22/commands/eth_subscribe_test.go +++ b/cmd/rpcdaemon22/commands/eth_subscribe_test.go @@ -42,7 +42,6 @@ func TestEthSubscribe(t *testing.T) { ff := rpchelper.New(ctx, backend, nil, nil, func() {}) newHeads := make(chan *types.Header) - defer close(newHeads) id := ff.SubscribeNewHeads(newHeads) defer ff.UnsubscribeHeads(id) diff --git a/turbo/rpchelper/filters.go b/turbo/rpchelper/filters.go index c279744e0c5..1a90ee3754a 100644 --- a/turbo/rpchelper/filters.go +++ b/turbo/rpchelper/filters.go @@ -49,7 +49,7 @@ type Filters struct { storeMu sync.Mutex logsStores map[LogsSubID][]*types.Log - pendingBlockStores map[PendingBlockSubID][]*types.Block + pendingHeadsStores map[HeadsSubID][]*types.Header pendingTxsStores map[PendingTxsSubID][][]types.Transaction } @@ -64,7 +64,7 @@ func New(ctx context.Context, ethBackend ApiBackend, txPool txpool.TxpoolClient, logsSubs: NewLogsFilterAggregator(), onNewSnapshot: onNewSnapshot, logsStores: make(map[LogsSubID][]*types.Log), - pendingBlockStores: make(map[PendingBlockSubID][]*types.Block), + pendingHeadsStores: make(map[HeadsSubID][]*types.Header), pendingTxsStores: make(map[PendingTxsSubID][][]types.Transaction), } @@ -310,10 +310,17 @@ func (ff *Filters) SubscribeNewHeads(out chan *types.Header) HeadsSubID { return id } -func (ff *Filters) UnsubscribeHeads(id HeadsSubID) { +func (ff *Filters) UnsubscribeHeads(id HeadsSubID) bool { ff.mu.Lock() defer ff.mu.Unlock() - delete(ff.headsSubs, id) + if ch, ok := ff.headsSubs[id]; ok { + close(ch) + ff.storeMu.Lock() + defer ff.storeMu.Unlock() + delete(ff.pendingHeadsStores, id) + return true + } + return false } func (ff *Filters) SubscribePendingLogs(c chan types.Logs) PendingLogsSubID { @@ -338,18 +345,10 @@ func (ff *Filters) SubscribePendingBlock(f chan *types.Block) PendingBlockSubID return id } -func (ff *Filters) UnsubscribePendingBlock(id PendingBlockSubID) bool { +func (ff *Filters) UnsubscribePendingBlock(id PendingBlockSubID) { ff.mu.Lock() defer ff.mu.Unlock() - if ch, ok := ff.pendingBlockSubs[id]; ok { - close(ch) - delete(ff.pendingBlockSubs, id) - ff.storeMu.Lock() - defer ff.storeMu.Unlock() - delete(ff.pendingBlockStores, id) - return true - } - return false + delete(ff.pendingBlockSubs, id) } func (ff *Filters) SubscribePendingTxs(out chan []types.Transaction) PendingTxsSubID { @@ -576,28 +575,28 @@ func (ff *Filters) ReadLogs(id LogsSubID) ([]*types.Log, bool) { return res, true } -func (ff *Filters) AddPendingBlock(id PendingBlockSubID, block *types.Block) { +func (ff *Filters) AddPendingBlock(id HeadsSubID, block *types.Header) { ff.storeMu.Lock() defer ff.storeMu.Unlock() - st, ok := ff.pendingBlockStores[id] + st, ok := ff.pendingHeadsStores[id] if !ok { - st = make([]*types.Block, 0) + st = make([]*types.Header, 0) } st = append(st, block) - ff.pendingBlockStores[id] = st + ff.pendingHeadsStores[id] = st } -func (ff *Filters) ReadPendingBlocks(id PendingBlockSubID) ([]*types.Block, bool) { +func (ff *Filters) ReadPendingBlocks(id HeadsSubID) ([]*types.Header, bool) { ff.storeMu.Lock() defer ff.storeMu.Unlock() - res := make([]*types.Block, 0) - st, ok := ff.pendingBlockStores[id] + res := make([]*types.Header, 0) + st, ok := ff.pendingHeadsStores[id] if !ok { return res, false } res = append(res, st...) - st = make([]*types.Block, 0) - ff.pendingBlockStores[id] = st + st = make([]*types.Header, 0) + ff.pendingHeadsStores[id] = st return res, true } diff --git a/turbo/rpchelper/logsfilter.go b/turbo/rpchelper/logsfilter.go index 62a4d45fd4a..621bbaa72bb 100644 --- a/turbo/rpchelper/logsfilter.go +++ b/turbo/rpchelper/logsfilter.go @@ -94,7 +94,6 @@ func (a *LogsFilterAggregator) addLogsFilters(f *LogsFilter) { func (a *LogsFilterAggregator) distributeLog(eventLog *remote.SubscribeLogsReply) error { a.logsFilterLock.Lock() defer a.logsFilterLock.Unlock() - filtersToDelete := make(map[LogsSubID]*LogsFilter) for _, filter := range a.logsFilters { if filter.allAddrs == 0 { _, addrOk := filter.addrs[gointerfaces.ConvertH160toAddress(eventLog.Address)] @@ -124,11 +123,6 @@ func (a *LogsFilterAggregator) distributeLog(eventLog *remote.SubscribeLogsReply } filter.sender <- lg } - // remove malfunctioned filters - for filterId, filter := range filtersToDelete { - a.subtractLogFilters(filter) - delete(a.logsFilters, filterId) - } return nil } From 389af4fc067478f4e2731a4cd055104ec3e2793d Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Fri, 24 Jun 2022 16:48:40 +0300 Subject: [PATCH 101/136] check if block is nil (#4528) * check if block is nil * added to rpcdaemon22 --- cmd/rpcdaemon/commands/eth_system.go | 3 +++ cmd/rpcdaemon22/commands/eth_system.go | 3 +++ eth/gasprice/gasprice.go | 3 +++ 3 files changed, 9 insertions(+) diff --git a/cmd/rpcdaemon/commands/eth_system.go b/cmd/rpcdaemon/commands/eth_system.go index 654c4fc2879..3a4bea0e5bf 100644 --- a/cmd/rpcdaemon/commands/eth_system.go +++ b/cmd/rpcdaemon/commands/eth_system.go @@ -203,6 +203,9 @@ func (b *GasPriceOracleBackend) HeaderByNumber(ctx context.Context, number rpc.B if err != nil { return nil, err } + if block == nil { + return nil, nil + } return block.Header(), nil } func (b *GasPriceOracleBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { diff --git a/cmd/rpcdaemon22/commands/eth_system.go b/cmd/rpcdaemon22/commands/eth_system.go index 1095cdd82a1..32cc015c821 100644 --- a/cmd/rpcdaemon22/commands/eth_system.go +++ b/cmd/rpcdaemon22/commands/eth_system.go @@ -202,6 +202,9 @@ func (b *GasPriceOracleBackend) HeaderByNumber(ctx context.Context, number rpc.B if err != nil { return nil, err } + if block == nil { + return nil, nil + } return block.Header(), nil } func (b *GasPriceOracleBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go index 168a4846629..63a57aa1dab 100644 --- a/eth/gasprice/gasprice.go +++ b/eth/gasprice/gasprice.go @@ -118,6 +118,9 @@ func NewOracle(backend OracleBackend, params Config) *Oracle { // baseFee to the returned bigInt func (gpo *Oracle) SuggestTipCap(ctx context.Context) (*big.Int, error) { head, _ := gpo.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) + if head == nil { + return gpo.lastPrice, nil + } headHash := head.Hash() // If the latest gasprice is still available, return it. From 9b8888d797f5bc64a74a23b7ce631a45cc7bac1e Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Fri, 24 Jun 2022 15:52:04 +0200 Subject: [PATCH 102/136] Delete bad blocks on unwind (#4529) * Delete bad headers * Delete bad bodies --- cmd/integration/commands/stages.go | 2 +- core/rawdb/accessors_chain.go | 47 +++++++++++++++++---------- core/rawdb/accessors_chain_test.go | 2 +- core/rawdb/rawdbreset/reset_stages.go | 4 +-- eth/stagedsync/stage_bodies.go | 4 ++- eth/stagedsync/stage_bodies_test.go | 4 +-- eth/stagedsync/stage_headers.go | 7 +++- migrations/txs_begin_end_test.go | 2 +- 8 files changed, 46 insertions(+), 26 deletions(-) diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index bbb8a617ce4..54b18bcbfec 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -486,7 +486,7 @@ func stageHeaders(db kv.RwDB, ctx context.Context) error { } } // remove all canonical markers from this point - if err = rawdb.TruncateCanonicalHash(tx, progress+1); err != nil { + if err = rawdb.TruncateCanonicalHash(tx, progress+1, false); err != nil { return err } if err = rawdb.TruncateTd(tx, progress+1); err != nil { diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 7da67b4a896..5393535b61f 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -59,8 +59,11 @@ func WriteCanonicalHash(db kv.Putter, hash common.Hash, number uint64) error { } // TruncateCanonicalHash removes all the number to hash canonical mapping from block number N -func TruncateCanonicalHash(tx kv.RwTx, blockFrom uint64) error { - if err := tx.ForEach(kv.HeaderCanonical, dbutils.EncodeBlockNumber(blockFrom), func(k, _ []byte) error { +func TruncateCanonicalHash(tx kv.RwTx, blockFrom uint64, deleteHeaders bool) error { + if err := tx.ForEach(kv.HeaderCanonical, dbutils.EncodeBlockNumber(blockFrom), func(k, v []byte) error { + if deleteHeaders { + deleteHeader(tx, common.BytesToHash(v), blockFrom) + } return tx.Delete(kv.HeaderCanonical, k, nil) }); err != nil { return fmt.Errorf("TruncateCanonicalHash: %w", err) @@ -768,7 +771,7 @@ func MakeBodiesCanonical(tx kv.RwTx, from uint64, ctx context.Context, logPrefix } // MakeBodiesNonCanonical - move all txs of canonical blocks to NonCanonicalTxs bucket -func MakeBodiesNonCanonical(tx kv.RwTx, from uint64, ctx context.Context, logPrefix string, logEvery *time.Ticker) error { +func MakeBodiesNonCanonical(tx kv.RwTx, from uint64, deleteBodies bool, ctx context.Context, logPrefix string, logEvery *time.Ticker) error { var firstMovedTxnID uint64 var firstMovedTxnIDIsSet bool for blockNum := from; ; blockNum++ { @@ -793,17 +796,22 @@ func MakeBodiesNonCanonical(tx kv.RwTx, from uint64, ctx context.Context, logPre firstMovedTxnID = bodyForStorage.BaseTxId } - // move txs to NonCanonical bucket, it has own sequence - newBaseId, err := tx.IncrementSequence(kv.NonCanonicalTxs, uint64(bodyForStorage.TxAmount)) - if err != nil { - return err + newBaseId := uint64(0) + if !deleteBodies { + // move txs to NonCanonical bucket, it has own sequence + newBaseId, err = tx.IncrementSequence(kv.NonCanonicalTxs, uint64(bodyForStorage.TxAmount)) + if err != nil { + return err + } } // next loop does move only non-system txs. need move system-txs manually (because they may not exist) i := uint64(0) if err := tx.ForAmount(kv.EthTx, dbutils.EncodeBlockNumber(bodyForStorage.BaseTxId+1), bodyForStorage.TxAmount-2, func(k, v []byte) error { - id := newBaseId + 1 + i - if err := tx.Put(kv.NonCanonicalTxs, dbutils.EncodeBlockNumber(id), v); err != nil { - return err + if !deleteBodies { + id := newBaseId + 1 + i + if err := tx.Put(kv.NonCanonicalTxs, dbutils.EncodeBlockNumber(id), v); err != nil { + return err + } } if err := tx.Delete(kv.EthTx, k, nil); err != nil { return err @@ -813,9 +821,14 @@ func MakeBodiesNonCanonical(tx kv.RwTx, from uint64, ctx context.Context, logPre }); err != nil { return err } - bodyForStorage.BaseTxId = newBaseId - if err := WriteBodyForStorage(tx, h, blockNum, bodyForStorage); err != nil { - return err + + if deleteBodies { + deleteBody(tx, h, blockNum) + } else { + bodyForStorage.BaseTxId = newBaseId + if err := WriteBodyForStorage(tx, h, blockNum, bodyForStorage); err != nil { + return err + } } select { @@ -1158,8 +1171,8 @@ func WriteBlock(db kv.RwTx, block *types.Block) error { // DeleteAncientBlocks - delete [1, to) old blocks after moving it to snapshots. // keeps genesis in db: [1, to) -// doesn't change sequnces of kv.EthTx and kv.NonCanonicalTxs -// doesn't delete Reciepts, Senders, Canonical markers, TotalDifficulty +// doesn't change sequences of kv.EthTx and kv.NonCanonicalTxs +// doesn't delete Receipts, Senders, Canonical markers, TotalDifficulty // returns [deletedFrom, deletedTo) func DeleteAncientBlocks(tx kv.RwTx, blockTo uint64, blocksDeleteLimit int) (deletedFrom, deletedTo uint64, err error) { c, err := tx.Cursor(kv.Headers) @@ -1283,8 +1296,8 @@ func SecondKey(tx kv.Tx, table string) ([]byte, error) { } // TruncateBlocks - delete block >= blockFrom -// does decrement sequnces of kv.EthTx and kv.NonCanonicalTxs -// doesn't delete Reciepts, Senders, Canonical markers, TotalDifficulty +// does decrement sequences of kv.EthTx and kv.NonCanonicalTxs +// doesn't delete Receipts, Senders, Canonical markers, TotalDifficulty func TruncateBlocks(ctx context.Context, tx kv.RwTx, blockFrom uint64) error { logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index 7239ffafde0..efb89dcfa3c 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -302,7 +302,7 @@ func TestCanonicalMappingStorage(t *testing.T) { t.Fatalf("Retrieved canonical mapping mismatch: have %v, want %v", entry, hash) } // Delete the TD and verify the execution - err = TruncateCanonicalHash(tx, number) + err = TruncateCanonicalHash(tx, number, false) if err != nil { t.Fatalf("DeleteCanonicalHash failed: %v", err) } diff --git a/core/rawdb/rawdbreset/reset_stages.go b/core/rawdb/rawdbreset/reset_stages.go index c8842a104ca..2bf60380e1b 100644 --- a/core/rawdb/rawdbreset/reset_stages.go +++ b/core/rawdb/rawdbreset/reset_stages.go @@ -55,7 +55,7 @@ func ResetBlocks(tx kv.RwTx) error { } // remove all canonical markers from this point - if err := rawdb.TruncateCanonicalHash(tx, 1); err != nil { + if err := rawdb.TruncateCanonicalHash(tx, 1, false); err != nil { return err } if err := rawdb.TruncateTd(tx, 1); err != nil { @@ -69,7 +69,7 @@ func ResetBlocks(tx kv.RwTx) error { return err } - // ensure no grabage records left (it may happen if db is inconsistent) + // ensure no garbage records left (it may happen if db is inconsistent) if err := tx.ForEach(kv.BlockBody, dbutils.EncodeBlockNumber(2), func(k, _ []byte) error { return tx.Delete(kv.BlockBody, k, nil) }); err != nil { return err } diff --git a/eth/stagedsync/stage_bodies.go b/eth/stagedsync/stage_bodies.go index 86412f711a3..79109eb12f4 100644 --- a/eth/stagedsync/stage_bodies.go +++ b/eth/stagedsync/stage_bodies.go @@ -9,6 +9,7 @@ import ( "github.com/c2h5oh/datasize" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/params" @@ -281,7 +282,8 @@ func UnwindBodiesStage(u *UnwindState, tx kv.RwTx, cfg BodiesCfg, ctx context.Co logEvery := time.NewTicker(logInterval) defer logEvery.Stop() - if err := rawdb.MakeBodiesNonCanonical(tx, u.UnwindPoint+1, ctx, u.LogPrefix(), logEvery); err != nil { + badBlock := u.BadBlock != (common.Hash{}) + if err := rawdb.MakeBodiesNonCanonical(tx, u.UnwindPoint+1, badBlock /* deleteBodies */, ctx, u.LogPrefix(), logEvery); err != nil { return err } diff --git a/eth/stagedsync/stage_bodies_test.go b/eth/stagedsync/stage_bodies_test.go index 9580d322dad..01e16c13794 100644 --- a/eth/stagedsync/stage_bodies_test.go +++ b/eth/stagedsync/stage_bodies_test.go @@ -35,7 +35,7 @@ func TestBodiesUnwind(t *testing.T) { require.NoError(err) } { - err = rawdb.MakeBodiesNonCanonical(tx, 5+1, ctx, "test", logEvery) // block 5 already canonical, start from next one + err = rawdb.MakeBodiesNonCanonical(tx, 5+1, false, ctx, "test", logEvery) // block 5 already canonical, start from next one require.NoError(err) n, err := tx.ReadSequence(kv.EthTx) @@ -61,7 +61,7 @@ func TestBodiesUnwind(t *testing.T) { { // unwind to block 5, means mark blocks >= 6 as non-canonical - err = rawdb.MakeBodiesNonCanonical(tx, 5+1, ctx, "test", logEvery) + err = rawdb.MakeBodiesNonCanonical(tx, 5+1, false, ctx, "test", logEvery) require.NoError(err) n, err := tx.ReadSequence(kv.EthTx) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index d96767a3e13..1d8b554f310 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -953,7 +953,7 @@ func HeadersUnwind(u *UnwindState, s *StageState, tx kv.RwTx, cfg HeadersCfg, te return fmt.Errorf("iterate over headers to mark bad headers: %w", err) } } - if err := rawdb.TruncateCanonicalHash(tx, u.UnwindPoint+1); err != nil { + if err := rawdb.TruncateCanonicalHash(tx, u.UnwindPoint+1, badBlock /* deleteHeaders */); err != nil { return err } if badBlock { @@ -996,6 +996,11 @@ func HeadersUnwind(u *UnwindState, s *StageState, tx kv.RwTx, cfg HeadersCfg, te return err } } + /* TODO(yperbasis): Is it safe? + if err := rawdb.TruncateTd(tx, u.UnwindPoint+1); err != nil { + return err + } + */ if maxNum == 0 { maxNum = u.UnwindPoint if maxHash, err = rawdb.ReadCanonicalHash(tx, maxNum); err != nil { diff --git a/migrations/txs_begin_end_test.go b/migrations/txs_begin_end_test.go index 148c4bcc4ae..46f8b1b2eeb 100644 --- a/migrations/txs_begin_end_test.go +++ b/migrations/txs_begin_end_test.go @@ -41,7 +41,7 @@ func TestTxsBeginEnd(t *testing.T) { return err } - err = rawdb.TruncateCanonicalHash(tx, 7) + err = rawdb.TruncateCanonicalHash(tx, 7, false) for i := uint64(7); i < 10; i++ { require.NoError(err) hash := common.Hash{0xa, byte(i)} From 0982edda380e9c7ea0a700bdc934f62c5c6436d4 Mon Sep 17 00:00:00 2001 From: Andrea Lanfranchi Date: Sat, 25 Jun 2022 09:50:36 +0200 Subject: [PATCH 103/136] Wmake.ps1 changes (#4532) * wmake.ps1 changes * Polish knobs * wnoSubmoduleUpdate -> WnoSubmoduleUpdate * Remove leftover * Remove pollution of GODEBUG --- wmake.ps1 | 215 +++++++++++++++++++++++++++++++++--------------------- 1 file changed, 131 insertions(+), 84 deletions(-) diff --git a/wmake.ps1 b/wmake.ps1 index 7202f105d30..0771cbbe230 100644 --- a/wmake.ps1 +++ b/wmake.ps1 @@ -39,7 +39,8 @@ Param( "txpool", "all" )] - [string[]]$BuildTargets=@("erigon", "rpcdaemon", "sentry", "downloader", "integration") + [string[]]$BuildTargets=@("erigon", "rpcdaemon", "sentry", "downloader", "integration"), + [switch]$WnoSubmoduleUpdate ) # Sanity checks on $BuildTargets @@ -98,31 +99,6 @@ $headerText = @" "@ -$gitErrorText = @" - - Requirement Error. - You need to have Git installed - Please visit https://git-scm.com/downloads and download the appropriate - installer. - -"@ - -$goMinMinorVersion = 18 -$goMinVersion = "1.$goMinMinorVersion" - -$goErrorText = @" - - Requirement Error. - You need to have Go Programming Language (aka golang) installed. - Minimum required version is $goMinVersion - Please visit https://golang.org/dl/ and download the appropriate - installer. - Ensure that go.exe installation - directory is properly inserted into your PATH - environment variable. - -"@ - $chocolateyErrorText = @" Requirement Error. @@ -210,14 +186,53 @@ function Get-Uninstall-Item { # Returns : $true / $false # ----------------------------------------------------------------------------- function Test-GO-Installed { - $versionStr = go.exe version - if (!($?)) { - return $false - } - - $minorVersionStr = $versionStr.Substring(15, 2) - $minorVersion = [int]$minorVersionStr - return ($minorVersion -ge $goMinMinorVersion) + param ([string]$MinVersion = "" ) + + $Private:GOcmd = (Get-Command -CommandType Application -ErrorAction SilentlyContinue "go.exe") + if ($Private:GOcmd -eq $null) { + Write-Host @" + + Error ! + Could not locate GO language binary executable go.exe + Either you don't have GO installed or GO binary directory (usually C:\Program Files\Go\bin\) is not + properly listed in your PATH environment variable. + If the first please visit https://golang.org/dl/ and download the appropriate installer. + If the latter please edit your PATH environment variable ad add the Go binary directory. + +"@ + return $false + + } + + # Go version is not detected by Get-Command hence we need to query for it + $Private:tmpstr = [string]@(go.exe version) + if ($Private:tmpstr -match '\d{1,}\.\d{1,}(.\d{1,})?') { + $Private:GOversion = [Version]::Parse($matches[0]) + Write-Host " Found GO version $Private:GOversion" + if ($MinVersion -ne "") { + + $Private:GOMinversion = [Version]::Parse($MinVersion) + if ($Private:GOversion -lt $Private:GOMinversion) { + Write-Host @" + + Error ! + Minimum GO version required is $Private:GOMinversion + +"@ + return $false + } + } + return $true + } + + Write-Host @" + + Error ! + Could not detect GO version installed + +"@ + return $false + } # ----------------------------------------------------------------------------- @@ -227,15 +242,54 @@ function Test-GO-Installed { # Returns : $true / $false # ----------------------------------------------------------------------------- function Test-Git-Installed { - $Private:item = Get-Uninstall-Item "^Git version [0-9\.]{1,}|^Git$" - $Private:result = $false + param ([string]$MinVersion = "" ) + + $Private:GITcmd = (Get-Command -CommandType Application -ErrorAction SilentlyContinue "git.exe") + if ($Private:GITcmd -eq $null) { + Write-Host @" - if ($Private:item) { - Write-Host " Found Git version $($Private:item.DisplayVersion)" - $Private:result = $true + Error ! + Could not locate git command utility git.exe + Either you don't have GIT installed or GIT binary directory (usually C:\Program Files\Git\cmd\) is not + properly listed in your PATH environment variable. + If the first please visit https://git-scm.com/downloads and download the appropriate installer. + If the latter please edit your PATH environment variable ad add the Git binary directory. + +"@ + return $false + + } + + # Go version is not detected by Get-Command hence we need to query for it + $Private:tmpstr = [string]@(git.exe --version) + if ($Private:tmpstr -match '\d{1,}\.\d{1,}(.\d{1,})?') { + $Private:GITversion = [Version]::Parse($matches[0]) + Write-Host " Found GIT version $Private:GITversion" + if ($MinVersion -ne "") { + + $Private:GITMinversion = [Version]::Parse($MinVersion) + if ($Private:GITversion -lt $Private:GITMinversion) { + Write-Host @" + + Error ! + Minimum GIT version required is $Private:GITMinversion + +"@ + return $false + } + } + return $true } - Write-Output $Private:result + Write-Host @" + + Error ! + Could not detect GIT version installed + +"@ + return $false + + } # ----------------------------------------------------------------------------- @@ -334,30 +388,26 @@ $MyContext.PSVer = [int]$PSVersionTable.PSVersion.Major # ==================================================================== # ## Test requirements # ==================================================================== -## Test Git is installed -if(!(Test-Git-Installed)) { - Write-Host $gitErrorText - exit 1 -} -Get-Command git.exe | Out-Null -if (!($?)) { +Set-Location $MyContext.Directory + +## Test we're a git cloned repo +if (!Test-Path -Path [string](Join-Path $MyContext.Directory "\.git") -PathType Directory) { Write-Host @" - - Error ! - Though Git installation is found I could not get - the Git binary executable. Ensure Git installation - directory is properly inserted into your PATH - environment variable. + + Error ! + Directory $MyContext.Directory does not seem to be a properly cloned Erigon repository + Please clone it using + git clone --recurse-submodules -j8 https://github.com/ledgerwatch/erigon.git "@ exit 1 } -## GO language is installed -if(!(Test-GO-Installed)) { - Write-Host $goErrorText - exit 1 -} +## Test Git is installed +if(!(Test-Git-Installed)) { exit 1 } + +## Test GO language is installed AND min version +if(!(Test-GO-Installed "1.18")) { exit 1 } # Build erigon binaries Set-Variable -Name "Erigon" -Value ([hashtable]::Synchronized(@{})) -Scope Script @@ -368,11 +418,10 @@ $Erigon.Tag = [string]@(git.exe describe --tags) $Erigon.BuildTags = "nosqlite,noboltdb" $Erigon.Package = "github.com/ledgerwatch/erigon" -$Erigon.BuildFlags = "-trimpath -tags $($Erigon.BuildTags) -buildvcs=false" +$Erigon.BuildFlags = "-trimpath -tags $($Erigon.BuildTags) -buildvcs=false -v" $Erigon.BuildFlags += " -ldflags ""-X $($Erigon.Package)/params.GitCommit=$($Erigon.Commit) -X $($Erigon.Package)/params.GitBranch=$($Erigon.Branch) -X $($Erigon.Package)/params.GitTag=$($Erigon.Tag)""" $Erigon.BinPath = [string](Join-Path $MyContext.StartDir "\build\bin") -$Erigon.Submodules = $false $env:GO111MODULE = "on" New-Item -Path $Erigon.BinPath -ItemType Directory -Force | Out-Null @@ -394,6 +443,15 @@ Write-Host @" "@ +if (!$WnoSubmoduleUpdate -and $BuildTargets[0] -ne "clean") { + Write-Host " Updating git submodules ..." + Invoke-Expression -Command "git.exe submodule update --init --recursive --force --quiet" + if (!($?)) { + Write-Host " ERROR : Update submodules failed" + exit 1 + } +} + foreach($BuildTarget in $BuildTargets) { ## Choco components for building db-tools @@ -412,13 +470,6 @@ if ($BuildTarget -eq "db-tools") { exit 1 } - if (!Test-Path -Path [string](Join-Path $Erigon.MDBXSourcePath "\.git") -PathType Directory) { - git.exe submodule update --init --recursive - if($LASTEXITCODE) { - Write-Host "An error has occurred while updating libmdbx submodule" - exit $LASTEXITCODE - } - } # Create build directory for mdbx and enter it $Erigon.MDBXBuildPath = [string](Join-Path $Erigon.BinPath "\mdbx") @@ -456,9 +507,8 @@ if ($BuildTarget -eq "db-tools") { Set-Location $MyContext.Directory # Eventually move all mdbx_*.exe to ./build/bin directory Move-Item -Path "$($Erigon.MDBXBuildPath)/mdbx_*.exe" -Destination $Erigon.BinPath -Force -} - -if ($BuildTarget -eq "clean") { + +} elseif ($BuildTarget -eq "clean") { Write-Host " Cleaning ..." # Remove ./build/bin directory @@ -474,9 +524,11 @@ if ($BuildTarget -eq "clean") { Invoke-Expression -Command $TestCommand | Out-Host if (!($?)) { Write-Host " ERROR : Tests failed" + Remove-Item Env:\GODEBUG exit 1 } else { Write-Host "`n Tests completed" + Remove-Item Env:\GODEBUG } } elseif ($BuildTarget -eq "test-integration") { @@ -492,28 +544,23 @@ if ($BuildTarget -eq "clean") { } } else { - if (!($Erigon.Submodules)) { - Write-Host " Updating git submodules ..." - Invoke-Expression -Command "git.exe submodule update --init --recursive --force" | Out-Host - if (!($?)) { - Write-Host " ERROR : Update submodules failed" - exit 1 - } - $Erigon.Submodules = $true - } + + # This has a naive assumption every target has a compilation unit wih same name Write-Host "`n Building $BuildTarget" $outExecutable = [string](Join-Path $Erigon.BinPath "$BuildTarget.exe") $BuildCommand = "go build $($Erigon.BuildFlags) -o ""$($outExecutable)"" ./cmd/$BuildTarget" - Invoke-Expression -Command $BuildCommand | Out-Host - if (!($?)) { - Write-Host " ERROR : Could not build $BuildTarget" + $BuildCommand += ';$?' + $success = Invoke-Expression -Command $BuildCommand + if (-not $success) { + Write-Host " ERROR : Could not build target $($BuildTarget)" exit 1 } else { Write-Host "`n Built $($BuildTarget). Run $($outExecutable) to launch" } + } } -# Return to source folder -Set-Location $MyContext.Directory +# Return to origin folder +Set-Location $MyContext.StartDir From 0d29c3d47d708a527a1d84d02f1e171aa036ed74 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sat, 25 Jun 2022 19:39:39 +0600 Subject: [PATCH 104/136] Compress: reduce etl buffers to save RAM (#4536) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index dd9b5149c3c..ecea7d44e68 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220620073929-46bebb3317d9 + github.com/ledgerwatch/erigon-lib v0.0.0-20220625091153-e7b09db04531 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 6bb49e41719..b7a39052902 100644 --- a/go.sum +++ b/go.sum @@ -386,8 +386,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220620073929-46bebb3317d9 h1:FsdxNVS9xgxjeMOeVx5cuvtb5704KSBIeoL5dZqe4N8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220620073929-46bebb3317d9/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= +github.com/ledgerwatch/erigon-lib v0.0.0-20220625091153-e7b09db04531 h1:UKQC0chFY2s0wXOMDOyPEuUTwymsQRUpNHm7/5isnUo= +github.com/ledgerwatch/erigon-lib v0.0.0-20220625091153-e7b09db04531/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From 1f4f850b89d6e0f3c02731bd4ad1f7c30b3bfeb8 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sat, 25 Jun 2022 20:34:42 +0600 Subject: [PATCH 105/136] pass context around hased state stage (#4537) * save * save * save --- cmd/state/commands/state_root.go | 2 +- eth/stagedsync/stage_hashstate.go | 24 ++++++++++++------------ eth/stagedsync/stage_hashstate_test.go | 16 ++++++++++------ 3 files changed, 23 insertions(+), 19 deletions(-) diff --git a/cmd/state/commands/state_root.go b/cmd/state/commands/state_root.go index 7d00121beea..dc94011b8cd 100644 --- a/cmd/state/commands/state_root.go +++ b/cmd/state/commands/state_root.go @@ -141,7 +141,7 @@ func StateRoot(genesis *core.Genesis, logger log.Logger, blockNum uint64, datadi if err = rwTx.ClearBucket(kv.HashedStorage); err != nil { return err } - if err = stagedsync.PromoteHashedStateCleanly("hashedstate", rwTx, stagedsync.StageHashStateCfg(nil, stateDbPath), make(chan struct{})); err != nil { + if err = stagedsync.PromoteHashedStateCleanly("hashedstate", rwTx, stagedsync.StageHashStateCfg(nil, stateDbPath), ctx); err != nil { return err } var root common.Hash diff --git a/eth/stagedsync/stage_hashstate.go b/eth/stagedsync/stage_hashstate.go index 3a6d7f1f6c2..cb8c0a35a06 100644 --- a/eth/stagedsync/stage_hashstate.go +++ b/eth/stagedsync/stage_hashstate.go @@ -62,7 +62,7 @@ func SpawnHashStateStage(s *StageState, tx kv.RwTx, cfg HashStateCfg, ctx contex log.Info(fmt.Sprintf("[%s] Promoting plain state", logPrefix), "from", s.BlockNumber, "to", to) } if s.BlockNumber == 0 { // Initial hashing of the state is performed at the previous stage - if err := PromoteHashedStateCleanly(logPrefix, tx, cfg, ctx.Done()); err != nil { + if err := PromoteHashedStateCleanly(logPrefix, tx, cfg, ctx); err != nil { return err } } else { @@ -125,34 +125,34 @@ func unwindHashStateStageImpl(logPrefix string, u *UnwindState, s *StageState, t return nil } -func PromoteHashedStateCleanly(logPrefix string, db kv.RwTx, cfg HashStateCfg, quit <-chan struct{}) error { - if err := readPlainStateOnce( +func PromoteHashedStateCleanly(logPrefix string, tx kv.RwTx, cfg HashStateCfg, ctx context.Context) error { + if err := promotePlainState( logPrefix, - db, + tx, cfg.tmpDir, etl.IdentityLoadFunc, - quit, + ctx.Done(), ); err != nil { return err } return etl.Transform( logPrefix, - db, + tx, kv.PlainContractCode, kv.ContractCode, cfg.tmpDir, keyTransformExtractFunc(transformContractCodeKey), etl.IdentityLoadFunc, etl.TransformArgs{ - Quit: quit, + Quit: ctx.Done(), }, ) } -func readPlainStateOnce( +func promotePlainState( logPrefix string, - db kv.RwTx, + tx kv.RwTx, tmpdir string, loadFunc etl.LoadFunc, quit <-chan struct{}, @@ -169,7 +169,7 @@ func readPlainStateOnce( defer logEvery.Stop() var m runtime.MemStats - c, err := db.Cursor(kv.PlainState) + c, err := tx.Cursor(kv.PlainState) if err != nil { return err } @@ -240,11 +240,11 @@ func readPlainStateOnce( Quit: quit, } - if err := accCollector.Load(db, kv.HashedAccounts, loadFunc, args); err != nil { + if err := accCollector.Load(tx, kv.HashedAccounts, loadFunc, args); err != nil { return err } - if err := storageCollector.Load(db, kv.HashedStorage, loadFunc, args); err != nil { + if err := storageCollector.Load(tx, kv.HashedStorage, loadFunc, args); err != nil { return err } diff --git a/eth/stagedsync/stage_hashstate_test.go b/eth/stagedsync/stage_hashstate_test.go index 8c0534c9332..a0486c1db98 100644 --- a/eth/stagedsync/stage_hashstate_test.go +++ b/eth/stagedsync/stage_hashstate_test.go @@ -19,7 +19,7 @@ func TestPromoteHashedStateClearState(t *testing.T) { generateBlocks(t, 1, 50, hashedWriterGen(tx1), changeCodeWithIncarnations) generateBlocks(t, 1, 50, plainWriterGen(tx2), changeCodeWithIncarnations) - err := PromoteHashedStateCleanly("logPrefix", tx2, StageHashStateCfg(db2, t.TempDir()), nil) + err := PromoteHashedStateCleanly("logPrefix", tx2, StageHashStateCfg(db2, t.TempDir()), context.Background()) if err != nil { t.Errorf("error while promoting state: %v", err) } @@ -35,7 +35,7 @@ func TestPromoteHashedStateIncremental(t *testing.T) { generateBlocks(t, 1, 50, plainWriterGen(tx2), changeCodeWithIncarnations) cfg := StageHashStateCfg(db2, t.TempDir()) - err := PromoteHashedStateCleanly("logPrefix", tx2, cfg, nil) + err := PromoteHashedStateCleanly("logPrefix", tx2, cfg, context.Background()) if err != nil { t.Errorf("error while promoting state: %v", err) } @@ -73,7 +73,7 @@ func TestUnwindHashed(t *testing.T) { generateBlocks(t, 1, 50, hashedWriterGen(tx1), changeCodeWithIncarnations) generateBlocks(t, 1, 50, plainWriterGen(tx2), changeCodeWithIncarnations) - err := PromoteHashedStateCleanly("logPrefix", tx2, StageHashStateCfg(db2, t.TempDir()), nil) + err := PromoteHashedStateCleanly("logPrefix", tx2, StageHashStateCfg(db2, t.TempDir()), context.Background()) if err != nil { t.Errorf("error while promoting state: %v", err) } @@ -142,7 +142,7 @@ func TestPromoteHashedStateCleanlyShutdown(t *testing.T) { generateBlocks(t, 1, 10, plainWriterGen(tx), changeCodeWithIncarnations) - if err := PromoteHashedStateCleanly("logPrefix", tx, StageHashStateCfg(db, t.TempDir()), ctx.Done()); !errors.Is(err, tc.errExp) { + if err := PromoteHashedStateCleanly("logPrefix", tx, StageHashStateCfg(db, t.TempDir()), ctx); !errors.Is(err, tc.errExp) { t.Errorf("error does not match expected error while shutdown promoteHashedStateCleanly , got: %v, expected: %v", err, tc.errExp) } @@ -176,8 +176,12 @@ func TestUnwindHashStateShutdown(t *testing.T) { generateBlocks(t, 1, 10, plainWriterGen(tx), changeCodeWithIncarnations) cfg := StageHashStateCfg(db, t.TempDir()) - err := PromoteHashedStateCleanly("logPrefix", tx, cfg, nil) - require.NoError(t, err) + err := PromoteHashedStateCleanly("logPrefix", tx, cfg, ctx) + if tc.cancelFuncExec { + require.ErrorIs(t, err, libcommon.ErrStopped) + } else { + require.NoError(t, err) + } u := &UnwindState{UnwindPoint: 5} s := &StageState{BlockNumber: 10} From 529682aa9deaa650c740b231207daf7319dc15ec Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sun, 26 Jun 2022 17:13:07 +0600 Subject: [PATCH 106/136] New goerli snapshot (#4544) --- turbo/snapshotsync/snapshothashes/erigon-snapshots | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/turbo/snapshotsync/snapshothashes/erigon-snapshots b/turbo/snapshotsync/snapshothashes/erigon-snapshots index 7e85e4d0028..879c9801be6 160000 --- a/turbo/snapshotsync/snapshothashes/erigon-snapshots +++ b/turbo/snapshotsync/snapshothashes/erigon-snapshots @@ -1 +1 @@ -Subproject commit 7e85e4d0028c27f747d97f65ac0b8c252a050b39 +Subproject commit 879c9801be6bbe8b7863e2a6f2afae0140bff09b From afd07e5dee01dbe52436412ece8b53955eb1f257 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sun, 26 Jun 2022 17:13:32 +0600 Subject: [PATCH 107/136] --no-downloader flag support (#4545) --- cmd/downloader/downloader/downloader.go | 6 +-- .../downloadercfg.go} | 20 ++++++++-- .../{torrentcfg => downloadercfg}/logger.go | 2 +- cmd/downloader/downloader/util.go | 6 +-- cmd/downloader/main.go | 6 +-- cmd/utils/flags.go | 16 +++++--- common/bytes.go | 14 ------- core/blockchain.go | 38 ------------------ eth/backend.go | 40 ++++++++++--------- eth/ethconfig/config.go | 14 ++++--- eth/stagedsync/stage_headers.go | 4 ++ node/nodecfg/config.go | 2 - turbo/cli/default_flags.go | 1 + turbo/stages/stageloop.go | 20 ++-------- 14 files changed, 76 insertions(+), 113 deletions(-) rename cmd/downloader/downloader/{torrentcfg/torrentcfg.go => downloadercfg/downloadercfg.go} (88%) rename cmd/downloader/downloader/{torrentcfg => downloadercfg}/logger.go (98%) diff --git a/cmd/downloader/downloader/downloader.go b/cmd/downloader/downloader/downloader.go index e828fd80be9..39ea6b980a4 100644 --- a/cmd/downloader/downloader/downloader.go +++ b/cmd/downloader/downloader/downloader.go @@ -15,7 +15,7 @@ import ( common2 "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/mdbx" - "github.com/ledgerwatch/erigon/cmd/downloader/downloader/torrentcfg" + "github.com/ledgerwatch/erigon/cmd/downloader/downloader/downloadercfg" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/log/v3" mdbx2 "github.com/torquem-ch/mdbx-go/mdbx" @@ -29,7 +29,7 @@ type Downloader struct { torrentClient *torrent.Client clientLock *sync.RWMutex - cfg *torrentcfg.Cfg + cfg *downloadercfg.Cfg statsLock *sync.RWMutex stats AggStats @@ -51,7 +51,7 @@ type AggStats struct { UploadRate, DownloadRate uint64 } -func New(cfg *torrentcfg.Cfg) (*Downloader, error) { +func New(cfg *downloadercfg.Cfg) (*Downloader, error) { if err := portMustBeTCPAndUDPOpen(cfg.ListenPort); err != nil { return nil, err } diff --git a/cmd/downloader/downloader/torrentcfg/torrentcfg.go b/cmd/downloader/downloader/downloadercfg/downloadercfg.go similarity index 88% rename from cmd/downloader/downloader/torrentcfg/torrentcfg.go rename to cmd/downloader/downloader/downloadercfg/downloadercfg.go index 9ca2bd48a84..15816927ba1 100644 --- a/cmd/downloader/downloader/torrentcfg/torrentcfg.go +++ b/cmd/downloader/downloader/downloadercfg/downloadercfg.go @@ -1,4 +1,4 @@ -package torrentcfg +package downloadercfg import ( "fmt" @@ -26,6 +26,7 @@ const DefaultNetworkChunkSize = DefaultPieceSize type Cfg struct { *torrent.ClientConfig DownloadSlots int + Disabled bool } func Default() *torrent.ClientConfig { @@ -54,7 +55,7 @@ func Default() *torrent.ClientConfig { return torrentConfig } -func New(snapDir string, verbosity lg.Level, natif nat.Interface, downloadRate, uploadRate datasize.ByteSize, port, connsPerFile int, downloadSlots int) (*Cfg, error) { +func New(snapDir string, verbosity lg.Level, natif nat.Interface, downloadRate, uploadRate datasize.ByteSize, port, connsPerFile int, downloadSlots int, disable bool) (*Cfg, error) { torrentConfig := Default() // We would-like to reduce amount of goroutines in Erigon, so reducing next params torrentConfig.EstablishedConnsPerTorrent = connsPerFile // default: 50 @@ -120,5 +121,18 @@ func New(snapDir string, verbosity lg.Level, natif nat.Interface, downloadRate, torrentConfig.Logger = lg.Default.FilterLevel(verbosity) torrentConfig.Logger.Handlers = []lg.Handler{adapterHandler{}} - return &Cfg{ClientConfig: torrentConfig, DownloadSlots: downloadSlots}, nil + if disable { + torrentConfig.DisableIPv4 = true + torrentConfig.DisableIPv6 = true + torrentConfig.DisableTCP = true + torrentConfig.DisableUTP = true + torrentConfig.NoDHT = true + torrentConfig.DisablePEX = true + torrentConfig.DisableTrackers = true + torrentConfig.DisableWebseeds = true + torrentConfig.DisableWebtorrent = true + + torrentConfig.Seed = false + } + return &Cfg{ClientConfig: torrentConfig, DownloadSlots: downloadSlots, Disabled: disable}, nil } diff --git a/cmd/downloader/downloader/torrentcfg/logger.go b/cmd/downloader/downloader/downloadercfg/logger.go similarity index 98% rename from cmd/downloader/downloader/torrentcfg/logger.go rename to cmd/downloader/downloader/downloadercfg/logger.go index e5a8a31881c..bf1da9bfb63 100644 --- a/cmd/downloader/downloader/torrentcfg/logger.go +++ b/cmd/downloader/downloader/downloadercfg/logger.go @@ -1,4 +1,4 @@ -package torrentcfg +package downloadercfg import ( "strings" diff --git a/cmd/downloader/downloader/util.go b/cmd/downloader/downloader/util.go index 9ef784910c9..0c96aff15cb 100644 --- a/cmd/downloader/downloader/util.go +++ b/cmd/downloader/downloader/util.go @@ -22,7 +22,7 @@ import ( common2 "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/cmd/downloader/downloader/torrentcfg" + "github.com/ledgerwatch/erigon/cmd/downloader/downloader/downloadercfg" "github.com/ledgerwatch/erigon/cmd/downloader/trackers" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" @@ -114,7 +114,7 @@ func BuildTorrentFileIfNeed(ctx context.Context, originalFileName, root string) if !errors.Is(err, os.ErrNotExist) { return false, fmt.Errorf("os.Stat: %w", err) } - info := &metainfo.Info{PieceLength: torrentcfg.DefaultPieceSize} + info := &metainfo.Info{PieceLength: downloadercfg.DefaultPieceSize} if err := info.BuildFromFilePath(filepath.Join(root, originalFileName)); err != nil { return false, fmt.Errorf("BuildFromFilePath: %w", err) } @@ -334,7 +334,7 @@ func AddTorrentFile(ctx context.Context, torrentFilePath string, torrentClient * } if _, ok := torrentClient.Torrent(ts.InfoHash); !ok { // can set ChunkSize only for new torrents - ts.ChunkSize = torrentcfg.DefaultNetworkChunkSize + ts.ChunkSize = downloadercfg.DefaultNetworkChunkSize } else { ts.ChunkSize = 0 } diff --git a/cmd/downloader/main.go b/cmd/downloader/main.go index 6cf8644ce6b..61827d83886 100644 --- a/cmd/downloader/main.go +++ b/cmd/downloader/main.go @@ -15,7 +15,7 @@ import ( "github.com/ledgerwatch/erigon-lib/common" proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" "github.com/ledgerwatch/erigon/cmd/downloader/downloader" - "github.com/ledgerwatch/erigon/cmd/downloader/downloader/torrentcfg" + "github.com/ledgerwatch/erigon/cmd/downloader/downloader/downloadercfg" "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/common/paths" "github.com/ledgerwatch/erigon/internal/debug" @@ -114,7 +114,7 @@ var rootCmd = &cobra.Command{ func Downloader(ctx context.Context) error { dirs := datadir.New(datadirCli) - torrentLogLevel, err := torrentcfg.Str2LogLevel(torrentVerbosity) + torrentLogLevel, err := downloadercfg.Str2LogLevel(torrentVerbosity) if err != nil { return err } @@ -133,7 +133,7 @@ func Downloader(ctx context.Context) error { return fmt.Errorf("invalid nat option %s: %w", natSetting, err) } - cfg, err := torrentcfg.New(dirs.Snap, torrentLogLevel, natif, downloadRate, uploadRate, torrentPort, torrentConnsPerFile, torrentDownloadSlots) + cfg, err := downloadercfg.New(dirs.Snap, torrentLogLevel, natif, downloadRate, uploadRate, torrentPort, torrentConnsPerFile, torrentDownloadSlots, false) if err != nil { return err } diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index d876ef330a3..56bcf981638 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -33,7 +33,7 @@ import ( "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon-lib/txpool" - "github.com/ledgerwatch/erigon/cmd/downloader/downloader/torrentcfg" + "github.com/ledgerwatch/erigon/cmd/downloader/downloader/downloadercfg" "github.com/ledgerwatch/erigon/node/nodecfg" "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/log/v3" @@ -665,6 +665,10 @@ var ( Value: 3, Usage: "amount of files to download in parallel. If network has enough seeders 1-3 slot enough, if network has lack of seeders increase to 5-7 (too big value will slow down everything).", } + NoDownloaderFlag = cli.BoolFlag{ + Name: "no-downloader", + Usage: "to disable downloader component", + } TorrentPortFlag = cli.IntFlag{ Name: "torrent.port", Value: 42069, @@ -1029,7 +1033,6 @@ func SetNodeConfig(ctx *cli.Context, cfg *nodecfg.Config) { setNodeUserIdent(ctx, cfg) SetP2PConfig(ctx, &cfg.P2P, cfg.NodeName(), cfg.Dirs.DataDir) - cfg.DownloaderAddr = strings.TrimSpace(ctx.GlobalString(DownloaderAddrFlag.Name)) cfg.SentryLogPeerInfo = ctx.GlobalIsSet(SentryLogPeerInfoFlag.Name) } @@ -1386,7 +1389,9 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C cfg.MemoryOverlay = ctx.GlobalBool(MemoryOverlayFlag.Name) cfg.Snapshot.KeepBlocks = ctx.GlobalBool(SnapKeepBlocksFlag.Name) cfg.Snapshot.Produce = !ctx.GlobalBool(SnapStopFlag.Name) - if !ctx.GlobalIsSet(DownloaderAddrFlag.Name) { + cfg.Snapshot.NoDownloader = ctx.GlobalBool(NoDownloaderFlag.Name) + cfg.Snapshot.DownloaderAddr = strings.TrimSpace(ctx.GlobalString(DownloaderAddrFlag.Name)) + if cfg.Snapshot.DownloaderAddr == "" { downloadRateStr := ctx.GlobalString(TorrentDownloadRateFlag.Name) uploadRateStr := ctx.GlobalString(TorrentUploadRateFlag.Name) var downloadRate, uploadRate datasize.ByteSize @@ -1397,17 +1402,18 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C panic(err) } - lvl, err := torrentcfg.Str2LogLevel(ctx.GlobalString(TorrentVerbosityFlag.Name)) + lvl, err := downloadercfg.Str2LogLevel(ctx.GlobalString(TorrentVerbosityFlag.Name)) if err != nil { panic(err) } - cfg.Torrent, err = torrentcfg.New(cfg.Dirs.Snap, + cfg.Downloader, err = downloadercfg.New(cfg.Dirs.Snap, lvl, nodeConfig.P2P.NAT, downloadRate, uploadRate, ctx.GlobalInt(TorrentPortFlag.Name), ctx.GlobalInt(TorrentConnsPerFileFlag.Name), ctx.GlobalInt(TorrentDownloadSlotsFlag.Name), + ctx.GlobalBool(NoDownloaderFlag.Name), ) if err != nil { panic(err) diff --git a/common/bytes.go b/common/bytes.go index 33291f4a89c..73e9b5097f1 100644 --- a/common/bytes.go +++ b/common/bytes.go @@ -79,20 +79,6 @@ func Hex2Bytes(str string) []byte { return h } -// Hex2BytesFixed returns bytes of a specified fixed length flen. -func Hex2BytesFixed(str string, flen int) []byte { - h, _ := hex.DecodeString(str) - if len(h) == flen { - return h - } - if len(h) > flen { - return h[len(h)-flen:] - } - hh := make([]byte, flen) - copy(hh[flen-len(h):flen], h) - return hh -} - // RightPadBytes zero-pads slice to the right up to length l. func RightPadBytes(slice []byte, l int) []byte { if l <= len(slice) { diff --git a/core/blockchain.go b/core/blockchain.go index ef830c54388..e32d8de9aa8 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -27,10 +27,7 @@ import ( "golang.org/x/exp/slices" metrics2 "github.com/VictoriaMetrics/metrics" - "github.com/ledgerwatch/log/v3" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/mclock" "github.com/ledgerwatch/erigon/common/u256" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/misc" @@ -55,41 +52,6 @@ const ( // always print out progress. This avoids the user wondering what's going on. const statsReportLimit = 8 * time.Second -// report prints statistics if some number of blocks have been processed -// or more than a few seconds have passed since the last message. -func (st *InsertStats) Report(logPrefix string, chain []*types.Block, index int, toCommit bool) { - // Fetch the timings for the batch - var ( - now = mclock.Now() - elapsed = time.Duration(now) - time.Duration(st.StartTime) - ) - // If we're at the last block of the batch or report period reached, log - if index == len(chain)-1 || elapsed >= statsReportLimit || toCommit { - // Count the number of transactions in this segment - var txs int - for _, block := range chain[st.lastIndex : index+1] { - txs += len(block.Transactions()) - } - end := chain[index] - context := []interface{}{ - "blocks", st.Processed, "txs", txs, - "elapsed", common.PrettyDuration(elapsed), - "number", end.Number(), "hash", end.Hash(), - } - if timestamp := time.Unix(int64(end.Time()), 0); time.Since(timestamp) > time.Minute { - context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) - } - if st.queued > 0 { - context = append(context, []interface{}{"queued", st.queued}...) - } - if st.ignored > 0 { - context = append(context, []interface{}{"ignored", st.ignored}...) - } - log.Info(fmt.Sprintf("[%s] Imported new chain segment", logPrefix), context...) - *st = InsertStats{StartTime: now, lastIndex: index + 1} - } -} - // ExecuteBlockEphemerally runs a block from provided stateReader and // writes the result to the provided stateWriter func ExecuteBlockEphemerallyForBSC( diff --git a/eth/backend.go b/eth/backend.go index 2be452d24ff..7118a832691 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -311,7 +311,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere return nil, err } - blockReader, allSnapshots, err := backend.setUpBlockReader(ctx, config.Snapshot.Enabled, config, stack) + blockReader, allSnapshots, err := backend.setUpBlockReader(ctx, config.Snapshot.Enabled, config) if err != nil { return nil, err } @@ -397,7 +397,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere } inMemoryExecution := func(batch kv.RwTx, header *types.Header, body *types.RawBody) error { - stateSync, err := stages2.NewInMemoryExecution(backend.sentryCtx, backend.log, backend.chainDB, stack.Config().P2P, *config, backend.sentriesClient, tmpdir, backend.notifications, backend.downloaderClient, allSnapshots, nil) + stateSync, err := stages2.NewInMemoryExecution(backend.sentryCtx, backend.log, backend.chainDB, *config, backend.sentriesClient, tmpdir, backend.notifications, allSnapshots) if err != nil { return err } @@ -788,7 +788,7 @@ func (s *Ethereum) NodesInfo(limit int) (*remote.NodesInfoReply, error) { } // sets up blockReader and client downloader -func (s *Ethereum) setUpBlockReader(ctx context.Context, isSnapshotEnabled bool, cfg *ethconfig.Config, stack *node.Node) (services.FullBlockReader, *snapshotsync.RoSnapshots, error) { +func (s *Ethereum) setUpBlockReader(ctx context.Context, isSnapshotEnabled bool, cfg *ethconfig.Config) (services.FullBlockReader, *snapshotsync.RoSnapshots, error) { var err error if isSnapshotEnabled { @@ -796,25 +796,27 @@ func (s *Ethereum) setUpBlockReader(ctx context.Context, isSnapshotEnabled bool, allSnapshots.OptimisticReopen() blockReader := snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) - if len(stack.Config().DownloaderAddr) > 0 { - // connect to external Downloader - s.downloaderClient, err = downloadergrpc.NewClient(ctx, stack.Config().DownloaderAddr) - } else { - // start embedded Downloader - s.downloader, err = downloader.New(cfg.Torrent) - if err != nil { - return nil, nil, err + if !cfg.Snapshot.NoDownloader { + if cfg.Snapshot.DownloaderAddr != "" { + // connect to external Downloader + s.downloaderClient, err = downloadergrpc.NewClient(ctx, cfg.Snapshot.DownloaderAddr) + } else { + // start embedded Downloader + s.downloader, err = downloader.New(cfg.Downloader) + if err != nil { + return nil, nil, err + } + go downloader.MainLoop(ctx, s.downloader, true) + bittorrentServer, err := downloader.NewGrpcServer(s.downloader) + if err != nil { + return nil, nil, fmt.Errorf("new server: %w", err) + } + + s.downloaderClient = direct.NewDownloaderClient(bittorrentServer) } - go downloader.MainLoop(ctx, s.downloader, true) - bittorrentServer, err := downloader.NewGrpcServer(s.downloader) if err != nil { - return nil, nil, fmt.Errorf("new server: %w", err) + return nil, nil, err } - - s.downloaderClient = direct.NewDownloaderClient(bittorrentServer) - } - if err != nil { - return nil, nil, err } return blockReader, allSnapshots, nil } else { diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 714f8dc07f4..0e4e745f577 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -28,7 +28,7 @@ import ( "github.com/c2h5oh/datasize" txpool2 "github.com/ledgerwatch/erigon-lib/txpool" - "github.com/ledgerwatch/erigon/cmd/downloader/downloader/torrentcfg" + "github.com/ledgerwatch/erigon/cmd/downloader/downloader/downloadercfg" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core" @@ -121,9 +121,11 @@ func init() { //go:generate gencodec -dir . -type Config -formats toml -out gen_config.go type Snapshot struct { - Enabled bool - KeepBlocks bool - Produce bool // produce new snapshots + Enabled bool + KeepBlocks bool // produce new snapshots of blocks but don't remove blocks from DB + Produce bool // produce new snapshots + NoDownloader bool // possible to use snapshots without calling Downloader + DownloaderAddr string } func (s Snapshot) String() string { @@ -173,8 +175,8 @@ type Config struct { BadBlockHash common.Hash // hash of the block marked as bad - Snapshot Snapshot - Torrent *torrentcfg.Cfg + Snapshot Snapshot + Downloader *downloadercfg.Cfg Dirs datadir.Dirs diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 1d8b554f310..79ea6ff4028 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -1251,6 +1251,10 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R // WaitForDownloader - wait for Downloader service to download all expected snapshots // for MVP we sync with Downloader only once, in future will send new snapshots also func WaitForDownloader(ctx context.Context, cfg HeadersCfg) error { + if cfg.snapshots.Cfg().NoDownloader { + return nil + } + // send all hashes to the Downloader service preverified := snapshothashes.KnownConfig(cfg.chainConfig.ChainName).Preverified req := &proto_downloader.DownloadRequest{Items: make([]*proto_downloader.DownloadItem, 0, len(preverified))} diff --git a/node/nodecfg/config.go b/node/nodecfg/config.go index ff8742b1fa1..da6d2dc8ebf 100644 --- a/node/nodecfg/config.go +++ b/node/nodecfg/config.go @@ -68,8 +68,6 @@ type Config struct { // Configuration of peer-to-peer networking. P2P p2p.Config - DownloaderAddr string - // IPCPath is the requested location to place the IPC endpoint. If the path is // a simple file name, it is placed inside the data directory (or on the root // pipe path on Windows), whereas if it's a resolvable path name (absolute or diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index db1dc66e233..af923646c0e 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -122,6 +122,7 @@ var DefaultFlags = []cli.Flag{ utils.SentryAddrFlag, utils.SentryLogPeerInfoFlag, utils.DownloaderAddrFlag, + utils.NoDownloaderFlag, HealthCheckFlag, utils.HeimdallURLFlag, utils.WithoutHeimdallFlag, diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 5530f581610..b40cbc39dae 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -327,7 +327,7 @@ func NewStagedSync( controlServer *sentry.MultiClient, tmpdir string, notifications *stagedsync.Notifications, - snapshotDownloader proto_downloader.DownloaderClient, + snapDownloader proto_downloader.DownloaderClient, snapshots *snapshotsync.RoSnapshots, headCh chan *types.Block, execPayload stagedsync.ExecutePayloadFunc, @@ -338,7 +338,7 @@ func NewStagedSync( } else { blockReader = snapshotsync.NewBlockReader() } - blockRetire := snapshotsync.NewBlockRetire(1, tmpdir, snapshots, db, snapshotDownloader, notifications.Events) + blockRetire := snapshotsync.NewBlockRetire(1, tmpdir, snapshots, db, snapDownloader, notifications.Events) // During Import we don't want other services like header requests, body requests etc. to be running. // Hence we run it in the test mode. @@ -358,7 +358,7 @@ func NewStagedSync( p2pCfg.NoDiscovery, cfg.MemoryOverlay, snapshots, - snapshotDownloader, + snapDownloader, blockReader, tmpdir, notifications.Events, @@ -407,19 +407,7 @@ func NewStagedSync( ), nil } -func NewInMemoryExecution( - ctx context.Context, - logger log.Logger, - db kv.RwDB, - p2pCfg p2p.Config, - cfg ethconfig.Config, - controlServer *sentry.MultiClient, - tmpdir string, - notifications *stagedsync.Notifications, - snapshotDownloader proto_downloader.DownloaderClient, - snapshots *snapshotsync.RoSnapshots, - headCh chan *types.Block, -) (*stagedsync.Sync, error) { +func NewInMemoryExecution(ctx context.Context, logger log.Logger, db kv.RwDB, cfg ethconfig.Config, controlServer *sentry.MultiClient, tmpdir string, notifications *stagedsync.Notifications, snapshots *snapshotsync.RoSnapshots) (*stagedsync.Sync, error) { var blockReader services.FullBlockReader if cfg.Snapshot.Enabled { blockReader = snapshotsync.NewBlockReaderWithSnapshots(snapshots) From b9cb6d953e7886af1087c945816a0d5c06743110 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sun, 26 Jun 2022 17:27:14 +0600 Subject: [PATCH 108/136] Fix nil td (#4546) * save * save --- .../downloader/downloadercfg/downloadercfg.go | 18 ++---------------- cmd/downloader/main.go | 2 +- cmd/utils/flags.go | 10 +--------- turbo/stages/stageloop.go | 11 ++++++----- 4 files changed, 10 insertions(+), 31 deletions(-) diff --git a/cmd/downloader/downloader/downloadercfg/downloadercfg.go b/cmd/downloader/downloader/downloadercfg/downloadercfg.go index 15816927ba1..51fb7a752a0 100644 --- a/cmd/downloader/downloader/downloadercfg/downloadercfg.go +++ b/cmd/downloader/downloader/downloadercfg/downloadercfg.go @@ -26,7 +26,6 @@ const DefaultNetworkChunkSize = DefaultPieceSize type Cfg struct { *torrent.ClientConfig DownloadSlots int - Disabled bool } func Default() *torrent.ClientConfig { @@ -55,7 +54,7 @@ func Default() *torrent.ClientConfig { return torrentConfig } -func New(snapDir string, verbosity lg.Level, natif nat.Interface, downloadRate, uploadRate datasize.ByteSize, port, connsPerFile int, downloadSlots int, disable bool) (*Cfg, error) { +func New(snapDir string, verbosity lg.Level, natif nat.Interface, downloadRate, uploadRate datasize.ByteSize, port, connsPerFile, downloadSlots int) (*Cfg, error) { torrentConfig := Default() // We would-like to reduce amount of goroutines in Erigon, so reducing next params torrentConfig.EstablishedConnsPerTorrent = connsPerFile // default: 50 @@ -121,18 +120,5 @@ func New(snapDir string, verbosity lg.Level, natif nat.Interface, downloadRate, torrentConfig.Logger = lg.Default.FilterLevel(verbosity) torrentConfig.Logger.Handlers = []lg.Handler{adapterHandler{}} - if disable { - torrentConfig.DisableIPv4 = true - torrentConfig.DisableIPv6 = true - torrentConfig.DisableTCP = true - torrentConfig.DisableUTP = true - torrentConfig.NoDHT = true - torrentConfig.DisablePEX = true - torrentConfig.DisableTrackers = true - torrentConfig.DisableWebseeds = true - torrentConfig.DisableWebtorrent = true - - torrentConfig.Seed = false - } - return &Cfg{ClientConfig: torrentConfig, DownloadSlots: downloadSlots, Disabled: disable}, nil + return &Cfg{ClientConfig: torrentConfig, DownloadSlots: downloadSlots}, nil } diff --git a/cmd/downloader/main.go b/cmd/downloader/main.go index 61827d83886..acd3d856a75 100644 --- a/cmd/downloader/main.go +++ b/cmd/downloader/main.go @@ -133,7 +133,7 @@ func Downloader(ctx context.Context) error { return fmt.Errorf("invalid nat option %s: %w", natSetting, err) } - cfg, err := downloadercfg.New(dirs.Snap, torrentLogLevel, natif, downloadRate, uploadRate, torrentPort, torrentConnsPerFile, torrentDownloadSlots, false) + cfg, err := downloadercfg.New(dirs.Snap, torrentLogLevel, natif, downloadRate, uploadRate, torrentPort, torrentConnsPerFile, torrentDownloadSlots) if err != nil { return err } diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 56bcf981638..ff39c1e4367 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1406,15 +1406,7 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C if err != nil { panic(err) } - cfg.Downloader, err = downloadercfg.New(cfg.Dirs.Snap, - lvl, - nodeConfig.P2P.NAT, - downloadRate, uploadRate, - ctx.GlobalInt(TorrentPortFlag.Name), - ctx.GlobalInt(TorrentConnsPerFileFlag.Name), - ctx.GlobalInt(TorrentDownloadSlotsFlag.Name), - ctx.GlobalBool(NoDownloaderFlag.Name), - ) + cfg.Downloader, err = downloadercfg.New(cfg.Dirs.Snap, lvl, nodeConfig.P2P.NAT, downloadRate, uploadRate, ctx.GlobalInt(TorrentPortFlag.Name), ctx.GlobalInt(TorrentConnsPerFileFlag.Name), ctx.GlobalInt(TorrentDownloadSlotsFlag.Name)) if err != nil { panic(err) } diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index b40cbc39dae..81d3732a407 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -196,12 +196,13 @@ func StageLoopStep( } } - headTd256, overflow := uint256.FromBig(headTd) - if overflow { - return headBlockHash, fmt.Errorf("headTds higher than 2^256-1") + if headTd != nil { + headTd256, overflow := uint256.FromBig(headTd) + if overflow { + return headBlockHash, fmt.Errorf("headTds higher than 2^256-1") + } + updateHead(ctx, head, headHash, headTd256) } - updateHead(ctx, head, headHash, headTd256) - if notifications != nil && notifications.Accumulator != nil { header := rawdb.ReadCurrentHeader(rotx) if header != nil && header.Number.Uint64() != finishProgressBefore { From f0fde269026774688dc4612feb3aeb3d7ac60135 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 27 Jun 2022 09:45:19 +0600 Subject: [PATCH 109/136] document rpc filter methods (#4548) --- cmd/rpcdaemon/README.md | 258 ++++++++++++++++++++-------------------- 1 file changed, 129 insertions(+), 129 deletions(-) diff --git a/cmd/rpcdaemon/README.md b/cmd/rpcdaemon/README.md index a889b7af456..8caf193d6ca 100644 --- a/cmd/rpcdaemon/README.md +++ b/cmd/rpcdaemon/README.md @@ -155,135 +155,135 @@ Label "remote" means: `--private.api.addr` flag is required. The following table shows the current implementation status of Erigon's RPC daemon. -| Command | Avail | Notes | -| ------------------------------------------ | ------- | ------------------------------------------ | -| admin_nodeInfo | Yes | | -| admin_peers | Yes | | -| | | | -| web3_clientVersion | Yes | | -| web3_sha3 | Yes | | -| | | | -| net_listening | HC | (`remote` hard coded returns true) | -| net_peerCount | Limited | internal sentries only | -| net_version | Yes | `remote`. | -| | | | -| eth_blockNumber | Yes | | -| eth_chainID/eth_chainId | Yes | | -| eth_protocolVersion | Yes | | -| eth_syncing | Yes | | -| eth_gasPrice | Yes | | -| eth_maxPriorityFeePerGas | Yes | | -| eth_feeHistory | Yes | | -| | | | -| eth_getBlockByHash | Yes | | -| eth_getBlockByNumber | Yes | | -| eth_getBlockTransactionCountByHash | Yes | | -| eth_getBlockTransactionCountByNumber | Yes | | -| eth_getUncleByBlockHashAndIndex | Yes | | -| eth_getUncleByBlockNumberAndIndex | Yes | | -| eth_getUncleCountByBlockHash | Yes | | -| eth_getUncleCountByBlockNumber | Yes | | -| | | | -| eth_getTransactionByHash | Yes | | -| eth_getRawTransactionByHash | Yes | | -| eth_getTransactionByBlockHashAndIndex | Yes | | -| eth_retRawTransactionByBlockHashAndIndex | Yes | | -| eth_getTransactionByBlockNumberAndIndex | Yes | | -| eth_retRawTransactionByBlockNumberAndIndex | Yes | | -| eth_getTransactionReceipt | Yes | | -| eth_getBlockReceipts | Yes | | -| | | | -| eth_estimateGas | Yes | | -| eth_getBalance | Yes | | -| eth_getCode | Yes | | -| eth_getTransactionCount | Yes | | -| eth_getStorageAt | Yes | | -| eth_call | Yes | | -| eth_callBundle | Yes | | -| eth_createAccessList | Yes | -| | | | -| eth_newFilter | - | not yet implemented | -| eth_newBlockFilter | - | not yet implemented | -| eth_newPendingTransactionFilter | - | not yet implemented | -| eth_getFilterChanges | - | not yet implemented | -| eth_uninstallFilter | - | not yet implemented | -| eth_getLogs | Yes | | -| | | | -| eth_accounts | No | deprecated | -| eth_sendRawTransaction | Yes | `remote`. | -| eth_sendTransaction | - | not yet implemented | -| eth_sign | No | deprecated | -| eth_signTransaction | - | not yet implemented | -| eth_signTypedData | - | ???? | -| | | | -| eth_getProof | - | not yet implemented | -| | | | -| eth_mining | Yes | returns true if --mine flag provided | -| eth_coinbase | Yes | | -| eth_hashrate | Yes | | -| eth_submitHashrate | Yes | | -| eth_getWork | Yes | | -| eth_submitWork | Yes | | -| | | | -| eth_subscribe | Limited | Websock Only - newHeads, | -| | | newPendingTransactions | -| eth_unsubscribe | Yes | Websock Only | -| | | | -| engine_newPayloadV1 | Yes | | -| engine_forkchoiceUpdatedV1 | Yes | | -| engine_getPayloadV1 | Yes | | -| engine_exchangeTransitionConfigurationV1 | Yes | | -| | | | -| debug_accountRange | Yes | Private Erigon debug module | -| debug_accountAt | Yes | Private Erigon debug module | -| debug_getModifiedAccountsByNumber | Yes | | -| debug_getModifiedAccountsByHash | Yes | | -| debug_storageRangeAt | Yes | | -| debug_traceBlockByHash | Yes | Streaming (can handle huge results) | -| debug_traceBlockByNumber | Yes | Streaming (can handle huge results) | -| debug_traceTransaction | Yes | Streaming (can handle huge results) | -| debug_traceCall | Yes | Streaming (can handle huge results) | -| | | | -| trace_call | Yes | | -| trace_callMany | Yes | | -| trace_rawTransaction | - | not yet implemented (come help!) | -| trace_replayBlockTransactions | yes | stateDiff only (come help!) | -| trace_replayTransaction | yes | stateDiff only (come help!) | -| trace_block | Yes | | -| trace_filter | Yes | no pagination, but streaming | -| trace_get | Yes | | -| trace_transaction | Yes | | -| | | | -| txpool_content | Yes | `remote` | -| txpool_status | Yes | `remote` | -| | | | -| eth_getCompilers | No | deprecated | -| eth_compileLLL | No | deprecated | -| eth_compileSolidity | No | deprecated | -| eth_compileSerpent | No | deprecated | -| | | | -| db_putString | No | deprecated | -| db_getString | No | deprecated | -| db_putHex | No | deprecated | -| db_getHex | No | deprecated | -| | | | -| erigon_getHeaderByHash | Yes | Erigon only | -| erigon_getHeaderByNumber | Yes | Erigon only | -| erigon_getLogsByHash | Yes | Erigon only | -| erigon_forks | Yes | Erigon only | -| erigon_issuance | Yes | Erigon only | -| erigon_GetBlockByTimestamp | Yes | Erigon only | -| | | | -| starknet_call | Yes | Starknet only | -| | | | -| bor_getSnapshot | Yes | Bor only | -| bor_getAuthor | Yes | Bor only | -| bor_getSnapshotAtHash | Yes | Bor only | -| bor_getSigners | Yes | Bor only | -| bor_getSignersAtHash | Yes | Bor only | -| bor_getCurrentProposer | Yes | Bor only | -| bor_getCurrentValidators | Yes | Bor only | -| bor_getRootHash | Yes | Bor only | +| Command | Avail | Notes | +| ------------------------------------------ |---------|--------------------------------------| +| admin_nodeInfo | Yes | | +| admin_peers | Yes | | +| | | | +| web3_clientVersion | Yes | | +| web3_sha3 | Yes | | +| | | | +| net_listening | HC | (`remote` hard coded returns true) | +| net_peerCount | Limited | internal sentries only | +| net_version | Yes | `remote`. | +| | | | +| eth_blockNumber | Yes | | +| eth_chainID/eth_chainId | Yes | | +| eth_protocolVersion | Yes | | +| eth_syncing | Yes | | +| eth_gasPrice | Yes | | +| eth_maxPriorityFeePerGas | Yes | | +| eth_feeHistory | Yes | | +| | | | +| eth_getBlockByHash | Yes | | +| eth_getBlockByNumber | Yes | | +| eth_getBlockTransactionCountByHash | Yes | | +| eth_getBlockTransactionCountByNumber | Yes | | +| eth_getUncleByBlockHashAndIndex | Yes | | +| eth_getUncleByBlockNumberAndIndex | Yes | | +| eth_getUncleCountByBlockHash | Yes | | +| eth_getUncleCountByBlockNumber | Yes | | +| | | | +| eth_getTransactionByHash | Yes | | +| eth_getRawTransactionByHash | Yes | | +| eth_getTransactionByBlockHashAndIndex | Yes | | +| eth_retRawTransactionByBlockHashAndIndex | Yes | | +| eth_getTransactionByBlockNumberAndIndex | Yes | | +| eth_retRawTransactionByBlockNumberAndIndex | Yes | | +| eth_getTransactionReceipt | Yes | | +| eth_getBlockReceipts | Yes | | +| | | | +| eth_estimateGas | Yes | | +| eth_getBalance | Yes | | +| eth_getCode | Yes | | +| eth_getTransactionCount | Yes | | +| eth_getStorageAt | Yes | | +| eth_call | Yes | | +| eth_callBundle | Yes | | +| eth_createAccessList | Yes | | +| | | | +| eth_newFilter | Yes | Added by PR#4253 | +| eth_newBlockFilter | Yes | | +| eth_newPendingTransactionFilter | Yes | | +| eth_getFilterChanges | Yes | | +| eth_uninstallFilter | Yes | | +| eth_getLogs | Yes | | +| | | | +| eth_accounts | No | deprecated | +| eth_sendRawTransaction | Yes | `remote`. | +| eth_sendTransaction | - | not yet implemented | +| eth_sign | No | deprecated | +| eth_signTransaction | - | not yet implemented | +| eth_signTypedData | - | ???? | +| | | | +| eth_getProof | - | not yet implemented | +| | | | +| eth_mining | Yes | returns true if --mine flag provided | +| eth_coinbase | Yes | | +| eth_hashrate | Yes | | +| eth_submitHashrate | Yes | | +| eth_getWork | Yes | | +| eth_submitWork | Yes | | +| | | | +| eth_subscribe | Limited | Websock Only - newHeads, | +| | | newPendingTransactions | +| eth_unsubscribe | Yes | Websock Only | +| | | | +| engine_newPayloadV1 | Yes | | +| engine_forkchoiceUpdatedV1 | Yes | | +| engine_getPayloadV1 | Yes | | +| engine_exchangeTransitionConfigurationV1 | Yes | | +| | | | +| debug_accountRange | Yes | Private Erigon debug module | +| debug_accountAt | Yes | Private Erigon debug module | +| debug_getModifiedAccountsByNumber | Yes | | +| debug_getModifiedAccountsByHash | Yes | | +| debug_storageRangeAt | Yes | | +| debug_traceBlockByHash | Yes | Streaming (can handle huge results) | +| debug_traceBlockByNumber | Yes | Streaming (can handle huge results) | +| debug_traceTransaction | Yes | Streaming (can handle huge results) | +| debug_traceCall | Yes | Streaming (can handle huge results) | +| | | | +| trace_call | Yes | | +| trace_callMany | Yes | | +| trace_rawTransaction | - | not yet implemented (come help!) | +| trace_replayBlockTransactions | yes | stateDiff only (come help!) | +| trace_replayTransaction | yes | stateDiff only (come help!) | +| trace_block | Yes | | +| trace_filter | Yes | no pagination, but streaming | +| trace_get | Yes | | +| trace_transaction | Yes | | +| | | | +| txpool_content | Yes | `remote` | +| txpool_status | Yes | `remote` | +| | | | +| eth_getCompilers | No | deprecated | +| eth_compileLLL | No | deprecated | +| eth_compileSolidity | No | deprecated | +| eth_compileSerpent | No | deprecated | +| | | | +| db_putString | No | deprecated | +| db_getString | No | deprecated | +| db_putHex | No | deprecated | +| db_getHex | No | deprecated | +| | | | +| erigon_getHeaderByHash | Yes | Erigon only | +| erigon_getHeaderByNumber | Yes | Erigon only | +| erigon_getLogsByHash | Yes | Erigon only | +| erigon_forks | Yes | Erigon only | +| erigon_issuance | Yes | Erigon only | +| erigon_GetBlockByTimestamp | Yes | Erigon only | +| | | | +| starknet_call | Yes | Starknet only | +| | | | +| bor_getSnapshot | Yes | Bor only | +| bor_getAuthor | Yes | Bor only | +| bor_getSnapshotAtHash | Yes | Bor only | +| bor_getSigners | Yes | Bor only | +| bor_getSignersAtHash | Yes | Bor only | +| bor_getCurrentProposer | Yes | Bor only | +| bor_getCurrentValidators | Yes | Bor only | +| bor_getRootHash | Yes | Bor only | This table is constantly updated. Please visit again. From 4897f03cbde4e87106ec0374340ca8b9e42532d7 Mon Sep 17 00:00:00 2001 From: ValValu <98066337+ValValu@users.noreply.github.com> Date: Mon, 27 Jun 2022 23:10:45 +1200 Subject: [PATCH 110/136] Update Readme re http=false (#4550) if you wish to separate node from rpcdaemon need to launch with -http=false or rpcdaemon will not launch as port occupied by erigon --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 08acb532ea3..83de5de31b1 100644 --- a/README.md +++ b/README.md @@ -274,7 +274,7 @@ socket connection to pass data between them. To use this mode, run Erigon in one ```sh make erigon -./build/bin/erigon --private.api.addr=localhost:9090 +./build/bin/erigon --private.api.addr=localhost:9090 --http=false make rpcdaemon ./build/bin/rpcdaemon --private.api.addr=localhost:9090 --http.api=eth,erigon,web3,net,debug,trace,txpool ``` From 588c2d4e36b68a59580154fc20f4c3e75ad36623 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Mon, 27 Jun 2022 15:59:54 +0200 Subject: [PATCH 111/136] Implemented side forks support (up to depth of 128). (#4514) * side forks * fixed stuff * lint * update go.mod and go.sum * cmd * added comment * better validatePayload * fixed empty payload sometimes * support future side forks * added crit error handle * fix compile err * lint --- core/blockchain.go | 4 - eth/backend.go | 5 +- eth/stagedsync/default_stages.go | 51 +++++++++- eth/stagedsync/stage.go | 2 +- eth/stagedsync/stage_headers.go | 29 ++++-- turbo/stages/headerdownload/header_algos.go | 85 +++++++++++++++-- .../headerdownload/header_data_struct.go | 7 ++ turbo/stages/stageloop.go | 93 ++++++++++++------- 8 files changed, 214 insertions(+), 62 deletions(-) diff --git a/core/blockchain.go b/core/blockchain.go index e32d8de9aa8..b102ae22c15 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -48,10 +48,6 @@ const ( TriesInMemory = 128 ) -// statsReportLimit is the time limit during import and export after which we -// always print out progress. This avoids the user wondering what's going on. -const statsReportLimit = 8 * time.Second - // ExecuteBlockEphemerally runs a block from provided stateReader and // writes the result to the provided stateWriter func ExecuteBlockEphemerallyForBSC( diff --git a/eth/backend.go b/eth/backend.go index 7118a832691..d85647cde94 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -396,13 +396,14 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere return block, nil } - inMemoryExecution := func(batch kv.RwTx, header *types.Header, body *types.RawBody) error { + inMemoryExecution := func(batch kv.RwTx, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody) error { stateSync, err := stages2.NewInMemoryExecution(backend.sentryCtx, backend.log, backend.chainDB, *config, backend.sentriesClient, tmpdir, backend.notifications, allSnapshots) if err != nil { return err } // We start the mining step - if err := stages2.StateStep(ctx, batch, stateSync, blockReader, header, body); err != nil { + if err := stages2.StateStep(ctx, batch, stateSync, blockReader, header, body, unwindPoint, headersChain, bodiesChain); err != nil { + log.Warn("Could not validate block", "err", err) return err } return nil diff --git a/eth/stagedsync/default_stages.go b/eth/stagedsync/default_stages.go index 3317d056a2e..359f7aed023 100644 --- a/eth/stagedsync/default_stages.go +++ b/eth/stagedsync/default_stages.go @@ -228,14 +228,38 @@ func DefaultStages(ctx context.Context, sm prune.Mode, headers HeadersCfg, cumul } } -func StateStages(ctx context.Context, blockHashCfg BlockHashesCfg, senders SendersCfg, exec ExecuteBlockCfg, hashState HashStateCfg, trieCfg TrieCfg) []*Stage { +// StateStages are all stages necessary for basic unwind and stage computation, it is primarly used to process side forks and memory execution. +func StateStages(ctx context.Context, headers HeadersCfg, bodies BodiesCfg, blockHashCfg BlockHashesCfg, senders SendersCfg, exec ExecuteBlockCfg, hashState HashStateCfg, trieCfg TrieCfg) []*Stage { return []*Stage{ + { + ID: stages.Headers, + Description: "Download headers", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + return nil + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { + return HeadersUnwind(u, s, tx, headers, false) + }, + }, { ID: stages.BlockHashes, Description: "Write block hashes", Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnBlockHashStage(s, tx, blockHashCfg, ctx) }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { + return UnwindBlockHashStage(u, tx, blockHashCfg, ctx) + }, + }, + { + ID: stages.Bodies, + Description: "Download block bodies", + Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { + return nil + }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { + return UnwindBodiesStage(u, tx, bodies, ctx) + }, }, { ID: stages.Senders, @@ -243,6 +267,9 @@ func StateStages(ctx context.Context, blockHashCfg BlockHashesCfg, senders Sende Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnRecoverSendersStage(senders, s, u, tx, 0, ctx) }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { + return UnwindSendersStage(u, tx, senders, ctx) + }, }, { ID: stages.Execution, @@ -250,6 +277,9 @@ func StateStages(ctx context.Context, blockHashCfg BlockHashesCfg, senders Sende Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnExecuteBlocksStage(s, u, tx, 0, ctx, exec, firstCycle) }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { + return UnwindExecutionStage(u, s, tx, ctx, exec, firstCycle) + }, }, { ID: stages.HashState, @@ -257,6 +287,9 @@ func StateStages(ctx context.Context, blockHashCfg BlockHashesCfg, senders Sende Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { return SpawnHashStateStage(s, tx, hashState, ctx) }, + Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { + return UnwindHashStateStage(u, s, tx, hashState, ctx) + }, }, { ID: stages.IntermediateHashes, @@ -266,10 +299,7 @@ func StateStages(ctx context.Context, blockHashCfg BlockHashesCfg, senders Sende return err }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { - return nil - }, - Prune: func(firstCycle bool, p *PruneState, tx kv.RwTx) error { - return nil + return UnwindIntermediateHashesStage(u, s, tx, trieCfg, ctx) }, }, } @@ -322,6 +352,17 @@ var DefaultUnwindOrder = UnwindOrder{ stages.Headers, } +var StateUnwindOrder = UnwindOrder{ + // Unwinding of IHashes needs to happen after unwinding HashState + stages.HashState, + stages.IntermediateHashes, + stages.Execution, + stages.Senders, + stages.Bodies, + stages.BlockHashes, + stages.Headers, +} + var DefaultPruneOrder = PruneOrder{ stages.Finish, stages.TxLookup, diff --git a/eth/stagedsync/stage.go b/eth/stagedsync/stage.go index d483c776720..9ed5212aa52 100644 --- a/eth/stagedsync/stage.go +++ b/eth/stagedsync/stage.go @@ -14,7 +14,7 @@ import ( "github.com/ledgerwatch/log/v3" ) -type ExecutePayloadFunc func(batch kv.RwTx, header *types.Header, body *types.RawBody) error +type ExecutePayloadFunc func(batch kv.RwTx, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody) error // ExecFunc is the execution function for the stage to move forward. // * state - is the current state of the stage and contains stage data. diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 79ea6ff4028..ad0f8f3dd47 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -20,7 +20,6 @@ import ( "github.com/ledgerwatch/erigon/cmd/downloader/downloadergrpc" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" - "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/ethdb/privateapi" @@ -571,21 +570,33 @@ func verifyAndSaveNewPoSHeader( // Side chain or something weird // TODO(yperbasis): considered non-canonical because some missing headers were downloaded but not canonized // Or it's not a problem because forkChoice is updated frequently? - + if cfg.memoryOverlay { + status, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, false, cfg.execPayload) + if criticalError != nil { + return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError + } + success = status == remote.EngineStatus_VALID || status == remote.EngineStatus_ACCEPTED + return &privateapi.PayloadStatus{ + Status: status, + LatestValidHash: currentHeadHash, + ValidationError: validationError, + }, success, nil + } // No canonization, HeadHeaderHash & StageProgress are not updated return &privateapi.PayloadStatus{Status: remote.EngineStatus_ACCEPTED}, true, nil } if cfg.memoryOverlay && (cfg.hd.GetNextForkHash() == (common.Hash{}) || header.ParentHash == cfg.hd.GetNextForkHash()) { - if err = cfg.hd.ValidatePayload(tx, header, body, cfg.execPayload); err != nil { - return &privateapi.PayloadStatus{Status: remote.EngineStatus_INVALID}, false, nil + status, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, true, cfg.execPayload) + if criticalError != nil { + return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError } - pendingBaseFee := misc.CalcBaseFee(cfg.notifications.Accumulator.ChainConfig(), header) - cfg.notifications.Accumulator.SendAndReset(context.Background(), cfg.notifications.StateChangesConsumer, pendingBaseFee.Uint64(), header.GasLimit) + success = status == remote.EngineStatus_VALID || status == remote.EngineStatus_ACCEPTED return &privateapi.PayloadStatus{ - Status: remote.EngineStatus_VALID, - LatestValidHash: headerHash, - }, true, nil + Status: status, + LatestValidHash: currentHeadHash, + ValidationError: validationError, + }, success, nil } // OK, we're on the canonical chain diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index c4731b698cb..2fe3b984551 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -15,6 +15,7 @@ import ( "time" "github.com/ledgerwatch/erigon-lib/etl" + "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/turbo/services" @@ -1085,16 +1086,86 @@ func (hd *HeaderDownload) SetHeadersCollector(collector *etl.Collector) { hd.headersCollector = collector } -func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, execPayload func(batch kv.RwTx, header *types.Header, body *types.RawBody) error) error { +func abs64(n int64) uint64 { + if n < 0 { + return uint64(-n) + } + return uint64(n) +} + +func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, store bool, execPayload func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody) error) (status remote.EngineStatus, validationError error, criticalError error) { hd.lock.Lock() defer hd.lock.Unlock() - if hd.nextForkState == nil { - hd.nextForkState = memdb.NewMemoryBatch(tx) - } else { - hd.nextForkState.UpdateTxn(tx) + maxDepth := uint64(16) + if store { + // If it is a continuation of the canonical chain we can stack it up. + if hd.nextForkState == nil { + hd.nextForkState = memdb.NewMemoryBatch(tx) + } else { + hd.nextForkState.UpdateTxn(tx) + } + hd.nextForkHash = header.Hash() + status = remote.EngineStatus_VALID + // Let's assemble the side fork chain if we have others building. + validationError = execPayload(hd.nextForkState, header, body, 0, nil, nil) + if validationError != nil { + status = remote.EngineStatus_INVALID + } + return } - hd.nextForkHash = header.Hash() - return execPayload(hd.nextForkState, header, body) + currentHeight := rawdb.ReadCurrentBlockNumber(tx) + if currentHeight == nil { + criticalError = fmt.Errorf("could not read block number.") + return + } + // if the block is not in range of MAX_DEPTH from head then we do not validate it. + if abs64(int64(*currentHeight)-header.Number.Int64()) > maxDepth { + status = remote.EngineStatus_ACCEPTED + return + } + // if it is not canonical we validate it as a side fork. + batch := memdb.NewMemoryBatch(tx) + // Let's assemble the side fork backwards + var foundCanonical bool + currentHash := header.ParentHash + foundCanonical, criticalError = rawdb.IsCanonicalHash(tx, currentHash) + if criticalError != nil { + return + } + + var bodiesChain []*types.RawBody + var headersChain []*types.Header + unwindPoint := header.Number.Uint64() - 1 + for !foundCanonical { + var sb sideForkBlock + var ok bool + if sb, ok = hd.sideForksBlock[currentHash]; !ok { + // We miss some components so we did not check validity. + status = remote.EngineStatus_ACCEPTED + return + } + headersChain = append(headersChain, sb.header) + bodiesChain = append(bodiesChain, sb.body) + currentHash = sb.header.ParentHash + foundCanonical, criticalError = rawdb.IsCanonicalHash(tx, currentHash) + if criticalError != nil { + return + } + unwindPoint = sb.header.Number.Uint64() - 1 + } + hd.sideForksBlock[header.Hash()] = sideForkBlock{header, body} + status = remote.EngineStatus_VALID + validationError = execPayload(batch, header, body, unwindPoint, headersChain, bodiesChain) + if validationError != nil { + status = remote.EngineStatus_INVALID + } + // After the we finished executing, we clean up old forks + for hash, sb := range hd.sideForksBlock { + if abs64(int64(*currentHeight)-sb.header.Number.Int64()) > maxDepth { + delete(hd.sideForksBlock, hash) + } + } + return } func (hd *HeaderDownload) FlushNextForkState(tx kv.RwTx) error { diff --git a/turbo/stages/headerdownload/header_data_struct.go b/turbo/stages/headerdownload/header_data_struct.go index 54b3bee1913..f3609309a62 100644 --- a/turbo/stages/headerdownload/header_data_struct.go +++ b/turbo/stages/headerdownload/header_data_struct.go @@ -272,10 +272,16 @@ type Stats struct { RespMaxBlock uint64 } +type sideForkBlock struct { + header *types.Header + body *types.RawBody +} + type HeaderDownload struct { badHeaders map[common.Hash]struct{} anchors map[common.Hash]*Anchor // Mapping from parentHash to collection of anchors links map[common.Hash]*Link // Links by header hash + sideForksBlock map[common.Hash]sideForkBlock engine consensus.Engine insertQueue InsertQueue // Priority queue of non-persisted links that need to be verified and can be inserted seenAnnounces *SeenAnnounces // External announcement hashes, after header verification if hash is in this set - will broadcast it further @@ -347,6 +353,7 @@ func NewHeaderDownload( PayloadStatusCh: make(chan privateapi.PayloadStatus, 1), headerReader: headerReader, badPoSHeaders: make(map[common.Hash]common.Hash), + sideForksBlock: make(map[common.Hash]sideForkBlock), } heap.Init(&hd.persistedLinkQueue) heap.Init(&hd.linkQueue) diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 81d3732a407..027bde40fbb 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -126,6 +126,7 @@ func StageLoopStep( } }() // avoid crash because Erigon's core does many things + var prevHeadBlockHash common.Hash var origin, finishProgressBefore uint64 if err := db.View(ctx, func(tx kv.Tx) error { origin, err = stages.GetStageProgress(tx, stages.Headers) @@ -136,6 +137,7 @@ func StageLoopStep( if err != nil { return err } + prevHeadBlockHash = rawdb.ReadHeadBlockHash(tx) return nil }); err != nil { return headBlockHash, err @@ -205,7 +207,7 @@ func StageLoopStep( } if notifications != nil && notifications.Accumulator != nil { header := rawdb.ReadCurrentHeader(rotx) - if header != nil && header.Number.Uint64() != finishProgressBefore { + if header != nil && headBlockHash != prevHeadBlockHash { pendingBaseFee := misc.CalcBaseFee(notifications.Accumulator.ChainConfig(), header) if header.Number.Uint64() == 0 { @@ -245,7 +247,7 @@ func MiningStep(ctx context.Context, kv kv.RwDB, mining *stagedsync.Sync) (err e return nil } -func StateStep(ctx context.Context, batch kv.RwTx, stateSync *stagedsync.Sync, headerReader services.FullBlockReader, header *types.Header, body *types.RawBody) (err error) { +func StateStep(ctx context.Context, batch kv.RwTx, stateSync *stagedsync.Sync, headerReader services.FullBlockReader, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody) (err error) { // Setup height := header.Number.Uint64() hash := header.Hash() @@ -256,6 +258,32 @@ func StateStep(ctx context.Context, batch kv.RwTx, stateSync *stagedsync.Sync, h } }() // avoid crash because Erigon's core does many things + // Construct side fork if we have one + if unwindPoint > 0 { + // Run it through the unwind + stateSync.UnwindTo(unwindPoint, common.Hash{}) + if err = stateSync.Run(nil, batch, false); err != nil { + return err + } + // Once we unwond we can start constructing the chain (assumption: len(headersChain) == len(bodiesChain)) + for i := range headersChain { + currentHeader := headersChain[i] + currentBody := bodiesChain[i] + currentHeight := headersChain[i].Number.Uint64() + currentHash := headersChain[i].Hash() + // Prepare memory state for block execution + if err = rawdb.WriteRawBodyIfNotExists(batch, currentHash, currentHeight, currentBody); err != nil { + return err + } + rawdb.WriteHeader(batch, currentHeader) + if err = rawdb.WriteHeaderNumber(batch, currentHash, currentHeight); err != nil { + return err + } + if err = rawdb.WriteCanonicalHash(batch, currentHash, currentHeight); err != nil { + return err + } + } + } // Prepare memory state for block execution if err = rawdb.WriteRawBodyIfNotExists(batch, hash, height, body); err != nil { return err @@ -282,36 +310,6 @@ func StateStep(ctx context.Context, batch kv.RwTx, stateSync *stagedsync.Sync, h return err } - if height == 0 { - return nil - } - ancestorHash := hash - ancestorHeight := height - - var ch common.Hash - for ch, err = headerReader.CanonicalHash(context.Background(), batch, ancestorHeight); err == nil && ch != ancestorHash; ch, err = headerReader.CanonicalHash(context.Background(), batch, ancestorHeight) { - if err = rawdb.WriteCanonicalHash(batch, ancestorHash, ancestorHeight); err != nil { - return fmt.Errorf("marking canonical header %d %x: %w", ancestorHeight, ancestorHash, err) - } - - ancestor, err := headerReader.Header(context.Background(), batch, ancestorHash, ancestorHeight) - if err != nil { - return err - } - if ancestor == nil { - return fmt.Errorf("ancestor is nil. height %d, hash %x", ancestorHeight, ancestorHash) - } - - select { - default: - } - ancestorHash = ancestor.ParentHash - ancestorHeight-- - } - if err != nil { - return fmt.Errorf("reading canonical hash for %d: %w", ancestorHeight, err) - } - // Run state sync if err = stateSync.Run(nil, batch, false); err != nil { return err @@ -418,7 +416,34 @@ func NewInMemoryExecution(ctx context.Context, logger log.Logger, db kv.RwDB, cf return stagedsync.New( stagedsync.StateStages(ctx, - stagedsync.StageBlockHashesCfg(db, tmpdir, controlServer.ChainConfig), + stagedsync.StageHeadersCfg( + db, + controlServer.Hd, + controlServer.Bd, + *controlServer.ChainConfig, + controlServer.SendHeaderRequest, + controlServer.PropagateNewBlockHashes, + controlServer.Penalize, + cfg.BatchSize, + false, + cfg.MemoryOverlay, + snapshots, + nil, + blockReader, + tmpdir, + notifications.Events, + nil, nil), stagedsync.StageBodiesCfg( + db, + controlServer.Bd, + controlServer.SendBodyRequest, + controlServer.Penalize, + controlServer.BroadcastNewBlock, + cfg.Sync.BodyDownloadTimeoutSeconds, + *controlServer.ChainConfig, + cfg.BatchSize, + snapshots, + blockReader, + ), stagedsync.StageBlockHashesCfg(db, tmpdir, controlServer.ChainConfig), stagedsync.StageSendersCfg(db, controlServer.ChainConfig, tmpdir, cfg.Prune, nil), stagedsync.StageExecuteBlocksCfg( db, @@ -436,7 +461,7 @@ func NewInMemoryExecution(ctx context.Context, logger log.Logger, db kv.RwDB, cf ), stagedsync.StageHashStateCfg(db, tmpdir), stagedsync.StageTrieCfg(db, true, true, tmpdir, blockReader)), - nil, + stagedsync.StateUnwindOrder, nil, ), nil } From b96a3e73ae948d5d010ead06cb9376b78972beb2 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Tue, 28 Jun 2022 01:53:04 +0200 Subject: [PATCH 112/136] ttd sepolia (#4552) --- params/chainspecs/sepolia.json | 1 + 1 file changed, 1 insertion(+) diff --git a/params/chainspecs/sepolia.json b/params/chainspecs/sepolia.json index e349c2525ec..ec4f7538390 100644 --- a/params/chainspecs/sepolia.json +++ b/params/chainspecs/sepolia.json @@ -14,6 +14,7 @@ "muirGlacierBlock": 0, "berlinBlock": 0, "londonBlock": 0, + "terminalTotalDifficulty": 17000000000000000, "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "ethash": {} } From 5b42a6e8e835a90405b26c033c71c019a7117f6f Mon Sep 17 00:00:00 2001 From: Levi Aul Date: Mon, 27 Jun 2022 20:49:51 -0700 Subject: [PATCH 113/136] Pass context to engines that perform async operations (#4531) * Configure consensus engine with context of stage if engine will do async work * Change API to make setting of context for AsyncEngine multithreaded-safe * Ensure lock gets inherited by reference * Fix linter errors --- consensus/bor/bor.go | 21 ++++++++++++++++----- consensus/bor/rest.go | 33 +++++++++++++++++++++------------ consensus/consensus.go | 7 +++++++ eth/stagedsync/stage_execute.go | 24 +++++++++++++++++------- 4 files changed, 61 insertions(+), 24 deletions(-) diff --git a/consensus/bor/bor.go b/consensus/bor/bor.go index 35b457ef40a..18e28a3f6b6 100644 --- a/consensus/bor/bor.go +++ b/consensus/bor/bor.go @@ -2,6 +2,7 @@ package bor import ( "bytes" + "context" "encoding/hex" "encoding/json" "errors" @@ -215,7 +216,9 @@ type Bor struct { signer common.Address // Ethereum address of the signing key signFn SignerFn // Signer function to authorize hashes with - lock sync.RWMutex // Protects the signer fields + lock *sync.RWMutex // Protects the signer fields + + execCtx context.Context // context of caller execution stage GenesisContractsClient *GenesisContractsClient validatorSetABI abi.ABI @@ -263,6 +266,8 @@ func New( HeimdallClient: heimdallClient, WithoutHeimdall: withoutHeimdall, spanCache: btree.New(32), + execCtx: context.Background(), + lock: &sync.RWMutex{}, } // make sure we can decode all the GenesisAlloc in the BorConfig. @@ -292,6 +297,12 @@ func (c *Bor) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Head return c.verifyHeader(chain, header, nil) } +func (c *Bor) WithExecutionContext(ctx context.Context) *Bor { + subclient := *c + subclient.execCtx = ctx + return &subclient +} + // verifyHeader checks whether a header conforms to the consensus rules.The // caller may optionally pass in a batch of parents (ascending order) to avoid // looking those up from the database. This is useful for concurrently verifying @@ -1056,7 +1067,7 @@ func (c *Bor) getSpanForBlock(blockNum uint64) (*HeimdallSpan, error) { for span == nil || span.EndBlock < blockNum { var heimdallSpan HeimdallSpan log.Info("Span with high enough block number is not loaded", "fetching span", spanID) - response, err := c.HeimdallClient.FetchWithRetry(fmt.Sprintf("bor/span/%d", spanID), "") + response, err := c.HeimdallClient.FetchWithRetry(c.execCtx, fmt.Sprintf("bor/span/%d", spanID), "") if err != nil { return nil, err } @@ -1073,7 +1084,7 @@ func (c *Bor) getSpanForBlock(blockNum uint64) (*HeimdallSpan, error) { var spanID uint64 = span.ID - 1 var heimdallSpan HeimdallSpan log.Info("Span with low enough block number is not loaded", "fetching span", spanID) - response, err := c.HeimdallClient.FetchWithRetry(fmt.Sprintf("bor/span/%d", spanID), "") + response, err := c.HeimdallClient.FetchWithRetry(c.execCtx, fmt.Sprintf("bor/span/%d", spanID), "") if err != nil { return nil, err } @@ -1106,7 +1117,7 @@ func (c *Bor) fetchAndCommitSpan( } heimdallSpan = *s } else { - response, err := c.HeimdallClient.FetchWithRetry(fmt.Sprintf("bor/span/%d", newSpanID), "") + response, err := c.HeimdallClient.FetchWithRetry(c.execCtx, fmt.Sprintf("bor/span/%d", newSpanID), "") if err != nil { return err } @@ -1193,7 +1204,7 @@ func (c *Bor) CommitStates( "Fetching state updates from Heimdall", "fromID", lastStateID+1, "to", to.Format(time.RFC3339)) - eventRecords, err := c.HeimdallClient.FetchStateSyncEvents(lastStateID+1, to.Unix()) + eventRecords, err := c.HeimdallClient.FetchStateSyncEvents(c.execCtx, lastStateID+1, to.Unix()) if err != nil { return nil, err diff --git a/consensus/bor/rest.go b/consensus/bor/rest.go index 55057767d31..0a9954f84f4 100644 --- a/consensus/bor/rest.go +++ b/consensus/bor/rest.go @@ -1,6 +1,7 @@ package bor import ( + "context" "encoding/json" "fmt" "io" @@ -24,9 +25,9 @@ type ResponseWithHeight struct { } type IHeimdallClient interface { - Fetch(path string, query string) (*ResponseWithHeight, error) - FetchWithRetry(path string, query string) (*ResponseWithHeight, error) - FetchStateSyncEvents(fromID uint64, to int64) ([]*EventRecordWithTime, error) + Fetch(ctx context.Context, path string, query string) (*ResponseWithHeight, error) + FetchWithRetry(ctx context.Context, path string, query string) (*ResponseWithHeight, error) + FetchStateSyncEvents(ctx context.Context, fromID uint64, to int64) ([]*EventRecordWithTime, error) } type HeimdallClient struct { @@ -44,12 +45,12 @@ func NewHeimdallClient(urlString string) (*HeimdallClient, error) { return h, nil } -func (h *HeimdallClient) FetchStateSyncEvents(fromID uint64, to int64) ([]*EventRecordWithTime, error) { +func (h *HeimdallClient) FetchStateSyncEvents(ctx context.Context, fromID uint64, to int64) ([]*EventRecordWithTime, error) { eventRecords := make([]*EventRecordWithTime, 0) for { queryParams := fmt.Sprintf("from-id=%d&to-time=%d&limit=%d", fromID, to, stateFetchLimit) log.Trace("Fetching state sync events", "queryParams", queryParams) - response, err := h.FetchWithRetry("clerk/event-record/list", queryParams) + response, err := h.FetchWithRetry(ctx, "clerk/event-record/list", queryParams) if err != nil { return nil, err } @@ -74,7 +75,7 @@ func (h *HeimdallClient) FetchStateSyncEvents(fromID uint64, to int64) ([]*Event } // Fetch fetches response from heimdall -func (h *HeimdallClient) Fetch(rawPath string, rawQuery string) (*ResponseWithHeight, error) { +func (h *HeimdallClient) Fetch(ctx context.Context, rawPath string, rawQuery string) (*ResponseWithHeight, error) { u, err := url.Parse(h.urlString) if err != nil { return nil, err @@ -83,11 +84,11 @@ func (h *HeimdallClient) Fetch(rawPath string, rawQuery string) (*ResponseWithHe u.Path = rawPath u.RawQuery = rawQuery - return h.internalFetch(u) + return h.internalFetch(ctx, u) } // FetchWithRetry returns data from heimdall with retry -func (h *HeimdallClient) FetchWithRetry(rawPath string, rawQuery string) (*ResponseWithHeight, error) { +func (h *HeimdallClient) FetchWithRetry(ctx context.Context, rawPath string, rawQuery string) (*ResponseWithHeight, error) { u, err := url.Parse(h.urlString) if err != nil { return nil, err @@ -97,18 +98,26 @@ func (h *HeimdallClient) FetchWithRetry(rawPath string, rawQuery string) (*Respo u.RawQuery = rawQuery for { - res, err := h.internalFetch(u) + res, err := h.internalFetch(ctx, u) if err == nil && res != nil { return res, nil } log.Info("Retrying again in 5 seconds for next Heimdall span", "path", u.Path) - time.Sleep(5 * time.Second) + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(5 * time.Second): + } } } // internal fetch method -func (h *HeimdallClient) internalFetch(u *url.URL) (*ResponseWithHeight, error) { - res, err := h.client.Get(u.String()) +func (h *HeimdallClient) internalFetch(ctx context.Context, u *url.URL) (*ResponseWithHeight, error) { + req, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil) + if err != nil { + return nil, err + } + res, err := h.client.Do(req) if err != nil { return nil, err } diff --git a/consensus/consensus.go b/consensus/consensus.go index 044cc5218d3..7f5dfdee9c3 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -18,6 +18,7 @@ package consensus import ( + "context" "math/big" "github.com/ledgerwatch/erigon/common" @@ -161,3 +162,9 @@ type PoSA interface { IsLocalBlock(header *types.Header) bool AllowLightProcess(chain ChainReader, currentHeader *types.Header) bool } + +type AsyncEngine interface { + Engine + + WithExecutionContext(context.Context) AsyncEngine +} diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index 128ade1be52..200f95d6f0f 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -3,6 +3,7 @@ package stagedsync import ( "context" "encoding/binary" + "errors" "fmt" "math/big" "runtime" @@ -102,6 +103,7 @@ func executeBlock( writeCallTraces bool, contractHasTEVM func(contractHash commonold.Hash) (bool, error), initialCycle bool, + effectiveEngine consensus.Engine, ) error { blockNum := block.NumberU64() stateReader, stateWriter, err := newStateReaderWriter(batch, tx, block, writeChangesets, cfg.accumulator, initialCycle, cfg.stateStream) @@ -121,11 +123,11 @@ func executeBlock( var receipts types.Receipts var stateSyncReceipt *types.ReceiptForStorage - _, isPoSa := cfg.engine.(consensus.PoSA) + _, isPoSa := effectiveEngine.(consensus.PoSA) if isPoSa { - receipts, err = core.ExecuteBlockEphemerallyForBSC(cfg.chainConfig, &vmConfig, getHeader, cfg.engine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, contractHasTEVM) + receipts, err = core.ExecuteBlockEphemerallyForBSC(cfg.chainConfig, &vmConfig, getHeader, effectiveEngine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, contractHasTEVM) } else { - receipts, stateSyncReceipt, err = core.ExecuteBlockEphemerally(cfg.chainConfig, &vmConfig, getHeader, cfg.engine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, contractHasTEVM) + receipts, stateSyncReceipt, err = core.ExecuteBlockEphemerally(cfg.chainConfig, &vmConfig, getHeader, effectiveEngine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, contractHasTEVM) } if err != nil { return err @@ -250,6 +252,12 @@ func SpawnExecuteBlocksStage(s *StageState, u Unwinder, tx kv.RwTx, toBlock uint return err } var stoppedErr error + + effectiveEngine := cfg.engine + if asyncEngine, ok := effectiveEngine.(consensus.AsyncEngine); ok { + asyncEngine = asyncEngine.WithExecutionContext(ctx) + effectiveEngine = asyncEngine.(consensus.Engine) + } Loop: for blockNum := stageProgress + 1; blockNum <= to; blockNum++ { if stoppedErr = common.Stopped(quit); stoppedErr != nil { @@ -281,10 +289,12 @@ Loop: writeChangeSets := nextStagesExpectData || blockNum > cfg.prune.History.PruneTo(to) writeReceipts := nextStagesExpectData || blockNum > cfg.prune.Receipts.PruneTo(to) writeCallTraces := nextStagesExpectData || blockNum > cfg.prune.CallTraces.PruneTo(to) - if err = executeBlock(block, tx, batch, cfg, *cfg.vmConfig, writeChangeSets, writeReceipts, writeCallTraces, contractHasTEVM, initialCycle); err != nil { - log.Warn(fmt.Sprintf("[%s] Execution failed", logPrefix), "block", blockNum, "hash", block.Hash().String(), "err", err) - if cfg.badBlockHalt { - return err + if err = executeBlock(block, tx, batch, cfg, *cfg.vmConfig, writeChangeSets, writeReceipts, writeCallTraces, contractHasTEVM, initialCycle, effectiveEngine); err != nil { + if !errors.Is(err, context.Canceled) { + log.Warn(fmt.Sprintf("[%s] Execution failed", logPrefix), "block", blockNum, "hash", block.Hash().String(), "err", err) + if cfg.badBlockHalt { + return err + } } u.UnwindTo(blockNum-1, block.Hash()) break Loop From 359ae889b801ad27843c25aed12282abe7efaf89 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 28 Jun 2022 10:19:57 +0600 Subject: [PATCH 114/136] don't crush on p2p message handling panic (#4557) * save * save --- cmd/sentry/sentry/sentry_multi_client.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/cmd/sentry/sentry/sentry_multi_client.go b/cmd/sentry/sentry/sentry_multi_client.go index efa5f9c871b..1359398c43c 100644 --- a/cmd/sentry/sentry/sentry_multi_client.go +++ b/cmd/sentry/sentry/sentry_multi_client.go @@ -630,8 +630,14 @@ func makeInboundMessage() *proto_sentry.InboundMessage { return new(proto_sentry.InboundMessage) } -func (cs *MultiClient) HandleInboundMessage(ctx context.Context, message *proto_sentry.InboundMessage, sentry direct.SentryClient) error { - err := cs.handleInboundMessage(ctx, message, sentry) +func (cs *MultiClient) HandleInboundMessage(ctx context.Context, message *proto_sentry.InboundMessage, sentry direct.SentryClient) (err error) { + defer func() { + if rec := recover(); rec != nil { + err = fmt.Errorf("%+v, msgID=%s, trace: %s", rec, message.Id.String(), dbg.Stack()) + } + }() // avoid crash because Erigon's core does many things + + err = cs.handleInboundMessage(ctx, message, sentry) if (err != nil) && rlp.IsInvalidRLPError(err) { log.Debug("Kick peer for invalid RLP", "err", err) From 538b4fea6ca1b30594a6977e92b37090c35d0ecc Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 28 Jun 2022 10:31:44 +0600 Subject: [PATCH 115/136] Snapshots: don't panic after too far reset (#4558) * save * save * save * save --- turbo/snapshotsync/block_reader.go | 34 ++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/turbo/snapshotsync/block_reader.go b/turbo/snapshotsync/block_reader.go index f467a34520a..c5c5ef2d365 100644 --- a/turbo/snapshotsync/block_reader.go +++ b/turbo/snapshotsync/block_reader.go @@ -6,6 +6,7 @@ import ( "encoding/binary" "fmt" + "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/kv" @@ -490,6 +491,9 @@ func (back *BlockReaderWithSnapshots) headerFromSnapshot(blockHeight uint64, sn headerOffset := sn.idxHeaderHash.OrdinalLookup(blockHeight - sn.idxHeaderHash.BaseDataID()) gg := sn.seg.MakeGetter() gg.Reset(headerOffset) + if !gg.HasNext() { + return nil, nil, nil + } buf, _ = gg.Next(buf[:0]) if len(buf) == 0 { return nil, buf, nil @@ -506,6 +510,12 @@ func (back *BlockReaderWithSnapshots) headerFromSnapshot(blockHeight uint64, sn // but because our indices are based on PerfectHashMap, no way to know is given key exists or not, only way - // to make sure is to fetch it and compare hash func (back *BlockReaderWithSnapshots) headerFromSnapshotByHash(hash common.Hash, sn *HeaderSegment, buf []byte) (*types.Header, error) { + defer func() { + if rec := recover(); rec != nil { + panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, sn.From, sn.To, dbg.Stack())) + } + }() // avoid crash because Erigon's core does many things + if sn.idxHeaderHash == nil { return nil, nil } @@ -514,6 +524,9 @@ func (back *BlockReaderWithSnapshots) headerFromSnapshotByHash(hash common.Hash, headerOffset := sn.idxHeaderHash.OrdinalLookup(localID) gg := sn.seg.MakeGetter() gg.Reset(headerOffset) + if !gg.HasNext() { + return nil, nil + } buf, _ = gg.Next(buf[:0]) if len(buf) > 1 && hash[0] != buf[0] { return nil, nil @@ -545,6 +558,12 @@ func (back *BlockReaderWithSnapshots) bodyFromSnapshot(blockHeight uint64, sn *B } func (back *BlockReaderWithSnapshots) bodyForStorageFromSnapshot(blockHeight uint64, sn *BodySegment, buf []byte) (*types.BodyForStorage, []byte, error) { + defer func() { + if rec := recover(); rec != nil { + panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, sn.From, sn.To, dbg.Stack())) + } + }() // avoid crash because Erigon's core does many things + if sn.idxBodyNumber == nil { return nil, buf, nil } @@ -552,6 +571,9 @@ func (back *BlockReaderWithSnapshots) bodyForStorageFromSnapshot(blockHeight uin gg := sn.seg.MakeGetter() gg.Reset(bodyOffset) + if !gg.HasNext() { + return nil, nil, nil + } buf, _ = gg.Next(buf[:0]) if len(buf) == 0 { return nil, nil, nil @@ -569,6 +591,12 @@ func (back *BlockReaderWithSnapshots) bodyForStorageFromSnapshot(blockHeight uin } func (back *BlockReaderWithSnapshots) txsFromSnapshot(baseTxnID uint64, txsAmount uint32, txsSeg *TxnSegment, buf []byte) (txs []types.Transaction, senders []common.Address, err error) { + defer func() { + if rec := recover(); rec != nil { + panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, txsSeg.From, txsSeg.To, dbg.Stack())) + } + }() // avoid crash because Erigon's core does many things + if txsSeg.IdxTxnHash == nil { return nil, nil, nil } @@ -587,6 +615,9 @@ func (back *BlockReaderWithSnapshots) txsFromSnapshot(baseTxnID uint64, txsAmoun gg.Reset(txnOffset) stream := rlp.NewStream(reader, 0) for i := uint32(0); i < txsAmount; i++ { + if !gg.HasNext() { + return nil, nil, nil + } buf, _ = gg.Next(buf[:0]) if len(buf) < 1+20 { return nil, nil, fmt.Errorf("segment %s has too short record: len(buf)=%d < 21", txsSeg.Seg.FilePath(), len(buf)) @@ -609,6 +640,9 @@ func (back *BlockReaderWithSnapshots) txnByID(txnID uint64, sn *TxnSegment, buf offset := sn.IdxTxnHash.OrdinalLookup(txnID - sn.IdxTxnHash.BaseDataID()) gg := sn.Seg.MakeGetter() gg.Reset(offset) + if !gg.HasNext() { + return nil, nil + } buf, _ = gg.Next(buf[:0]) sender, txnRlp := buf[1:1+20], buf[1+20:] From b315394c481f7244cccc95cbb17071d8c86f051f Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 28 Jun 2022 13:56:08 +0600 Subject: [PATCH 116/136] eth_estimateGas to use snapshots and blocksLRU (#4556) --- cmd/rpcdaemon/commands/eth_call.go | 30 ++++++------------------------ eth/stagedsync/stage_headers.go | 1 + 2 files changed, 7 insertions(+), 24 deletions(-) diff --git a/cmd/rpcdaemon/commands/eth_call.go b/cmd/rpcdaemon/commands/eth_call.go index 331c4e4ec02..1339bb77333 100644 --- a/cmd/rpcdaemon/commands/eth_call.go +++ b/cmd/rpcdaemon/commands/eth_call.go @@ -13,7 +13,6 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core" - "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/vm" @@ -76,34 +75,17 @@ func (api *APIImpl) Call(ctx context.Context, args ethapi.CallArgs, blockNrOrHas return result.Return(), result.Err } -func HeaderByNumberOrHash(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash, filters *rpchelper.Filters) (*types.Header, error) { - _, hash, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, filters) +// headerByNumberOrHash - intent to read recent headers only +func headerByNumberOrHash(tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash, api *APIImpl) (*types.Header, error) { + blockNum, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) if err != nil { return nil, err } - header, err := rawdb.ReadHeaderByHash(tx, hash) + block, err := api.blockByNumberWithSenders(tx, blockNum) if err != nil { return nil, err } - if header == nil { - return nil, errors.New("header for hash not found") - } - - if blockNrOrHash.RequireCanonical { - can, err := rawdb.ReadCanonicalHash(tx, header.Number.Uint64()) - if err != nil { - return nil, err - } - if can != hash { - return nil, errors.New("hash is not currently canonical") - } - } - - h := rawdb.ReadHeader(tx, hash, header.Number.Uint64()) - if h == nil { - return nil, errors.New("header found, but block body is missing") - } - return h, nil + return block.Header(), nil } // EstimateGas implements eth_estimateGas. Returns an estimate of how much gas is necessary to allow the transaction to complete. The transaction will not be added to the blockchain. @@ -141,7 +123,7 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi.CallArgs, hi = uint64(*args.Gas) } else { // Retrieve the block to act as the gas ceiling - h, err := HeaderByNumberOrHash(ctx, dbtx, bNrOrHash, api.filters) + h, err := headerByNumberOrHash(dbtx, bNrOrHash, api) if err != nil { return 0, err } diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index ad0f8f3dd47..f8a0142fa8c 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -1180,6 +1180,7 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R if err := cfg.snapshots.Reopen(); err != nil { return fmt.Errorf("ReopenIndices: %w", err) } + } if cfg.dbEventNotifier != nil { cfg.dbEventNotifier.OnNewSnapshot() From 16e57aa8a2a94a8ec872b088c9c9f43ac818d979 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Tue, 28 Jun 2022 10:12:48 +0200 Subject: [PATCH 117/136] Switch from eth/66 to eth/67 (#4549) * Switch from eth/66 to eth/67 * Fix a compilation error * Update erigon-lib * Refresh erigon-lib --- DEV_CHAIN.md | 2 +- README.md | 4 +- cmd/observer/observer/handshake.go | 13 +-- cmd/observer/observer/handshake_test.go | 5 +- .../observer/sentry_candidates/log_test.go | 11 ++- cmd/observer/observer/server.go | 7 +- cmd/rpcdaemon/commands/eth_subscribe_test.go | 2 +- .../commands/send_transaction_test.go | 6 +- .../commands/eth_subscribe_test.go | 2 +- .../commands/send_transaction_test.go | 4 +- cmd/sentry/main.go | 4 +- cmd/sentry/sentry/broadcast.go | 65 +++++++------ cmd/sentry/sentry/eth_handshake_test.go | 6 +- cmd/sentry/sentry/sentry_api.go | 8 +- cmd/sentry/sentry/sentry_grpc_server.go | 20 +--- cmd/sentry/sentry/sentry_grpc_server_test.go | 2 +- cmd/sentry/sentry/sentry_multi_client.go | 40 ++++---- cmd/utils/flags.go | 4 +- eth/backend.go | 12 +-- eth/protocols/eth/handler_test.go | 2 +- eth/protocols/eth/protocol.go | 92 +++++++------------ eth/protocols/eth/protocol_test.go | 26 +----- ethdb/privateapi/ethbackend.go | 2 +- go.mod | 2 +- go.sum | 4 +- p2p/dial_test.go | 2 +- turbo/stages/mock_sentry.go | 10 +- turbo/stages/sentry_mock_test.go | 54 +++++------ 28 files changed, 179 insertions(+), 232 deletions(-) diff --git a/DEV_CHAIN.md b/DEV_CHAIN.md index d3510aea3df..cbff468d1a8 100644 --- a/DEV_CHAIN.md +++ b/DEV_CHAIN.md @@ -70,7 +70,7 @@ Open terminal 3 and navigate to erigon/build/bin folder. Paste in the following To check if the nodes are connected, you can go to the log of both the nodes and look for the line - ``` [p2p] GoodPeers eth66=1 ``` + ``` [p2p] GoodPeers eth67=1 ``` Note: this might take a while it is not istantaneus, also if you see a 1 on either one of the two the node is fine. diff --git a/README.md b/README.md index 83de5de31b1..2ed20281c4f 100644 --- a/README.md +++ b/README.md @@ -335,7 +335,7 @@ Detailed explanation: [./docs/programmers_guide/db_faq.md](./docs/programmers_gu | Port | Protocol | Purpose | Expose | |:-----:|:---------:|:----------------------:|:-------:| -| 30303 | TCP & UDP | eth/66 peering | Public | +| 30303 | TCP & UDP | eth/67 peering | Public | | 9090 | TCP | gRPC Connections | Private | | 42069 | TCP & UDP | Snap sync (Bittorrent) | Public | | 6060 | TCP | Metrics or Pprof | Private | @@ -360,7 +360,7 @@ Typically, 8551 (JWT authenticated) is exposed only internally for the Engine AP | 30303 | TCP & UDP | Peering | Public | | 9091 | TCP | gRPC Connections | Private | -Typically, a sentry process will run one eth/xx protocol (e.g. eth/66) and will be exposed to the internet on 30303. Port +Typically, a sentry process will run one eth/xx protocol (e.g. eth/67) and will be exposed to the internet on 30303. Port 9091 is for internal gRCP connections (e.g erigon -> sentry). #### Other ports diff --git a/cmd/observer/observer/handshake.go b/cmd/observer/observer/handshake.go index 6fdcd3414ab..785a97a5762 100644 --- a/cmd/observer/observer/handshake.go +++ b/cmd/observer/observer/handshake.go @@ -4,6 +4,11 @@ import ( "context" "crypto/ecdsa" "fmt" + "math/big" + "net" + "strings" + "time" + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/forkid" "github.com/ledgerwatch/erigon/crypto" @@ -12,10 +17,6 @@ import ( "github.com/ledgerwatch/erigon/p2p/rlpx" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" - "math/big" - "net" - "strings" - "time" ) // https://github.com/ethereum/devp2p/blob/master/rlpx.md#p2p-capability @@ -236,10 +237,10 @@ func makeOurHelloMessage(myPrivateKey *ecdsa.PrivateKey) HelloMessage { clientID := common.MakeName("observer", version) caps := []p2p.Cap{ - {Name: eth.ProtocolName, Version: 63}, {Name: eth.ProtocolName, Version: 64}, {Name: eth.ProtocolName, Version: 65}, - {Name: eth.ProtocolName, Version: eth.ETH66}, + {Name: eth.ProtocolName, Version: 66}, + {Name: eth.ProtocolName, Version: eth.ETH67}, } return HelloMessage{ diff --git a/cmd/observer/observer/handshake_test.go b/cmd/observer/observer/handshake_test.go index 2691cd24006..19dd35ada3c 100644 --- a/cmd/observer/observer/handshake_test.go +++ b/cmd/observer/observer/handshake_test.go @@ -2,13 +2,14 @@ package observer import ( "context" + "testing" + "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/p2p/enode" "github.com/ledgerwatch/erigon/params" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "testing" ) func TestHandshake(t *testing.T) { @@ -30,6 +31,6 @@ func TestHandshake(t *testing.T) { assert.Contains(t, hello.ClientID, "erigon") require.NotNil(t, status) - assert.Equal(t, uint32(eth.ETH66), status.ProtocolVersion) + assert.Equal(t, uint32(eth.ETH67), status.ProtocolVersion) assert.Equal(t, uint64(1), status.NetworkID) } diff --git a/cmd/observer/observer/sentry_candidates/log_test.go b/cmd/observer/observer/sentry_candidates/log_test.go index e1ac9c391c7..3e75a1a5647 100644 --- a/cmd/observer/observer/sentry_candidates/log_test.go +++ b/cmd/observer/observer/sentry_candidates/log_test.go @@ -2,16 +2,17 @@ package sentry_candidates import ( "context" + "strings" + "testing" + "github.com/nxadm/tail" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "strings" - "testing" ) func TestLogRead(t *testing.T) { line := ` -{"capabilities":["eth/66","wit/0"],"clientID":"Nethermind/v1.13.0-0-2e8910b5b-20220520/X64-Linux/6.0.4","lvl":"dbug","msg":"Sentry peer did Connect","nodeURL":"enode://4293b17b897abed4a88d6e760e86a4bb700d62c12a9411fbf9ec0c9df3740c8670b184bd9f24d163cbd9bf05264b3047a69f079209d53d2e0dc05dd678d07cf0@1.2.3.4:45492","peer":"93b17b897abed4a88d6e760e86a4bb700d62c12a9411fbf9ec0c9df3740c8670b184bd9f24d163cbd9bf05264b3047a69f079209d53d2e0dc05dd678d07cf000","t":"2022-05-31T11:10:19.032092272Z"} +{"capabilities":["eth/67","wit/0"],"clientID":"Nethermind/v1.13.0-0-2e8910b5b-20220520/X64-Linux/6.0.4","lvl":"dbug","msg":"Sentry peer did Connect","nodeURL":"enode://4293b17b897abed4a88d6e760e86a4bb700d62c12a9411fbf9ec0c9df3740c8670b184bd9f24d163cbd9bf05264b3047a69f079209d53d2e0dc05dd678d07cf0@1.2.3.4:45492","peer":"93b17b897abed4a88d6e760e86a4bb700d62c12a9411fbf9ec0c9df3740c8670b184bd9f24d163cbd9bf05264b3047a69f079209d53d2e0dc05dd678d07cf000","t":"2022-05-31T11:10:19.032092272Z"} ` line = strings.TrimLeft(line, "\r\n ") eventLog := NewLog(NewScannerLineReader(strings.NewReader(line))) @@ -54,7 +55,7 @@ func TestLogReadTailSkimFile(t *testing.T) { func TestLogEventEthVersion(t *testing.T) { event := LogEvent{} - event.Capabilities = []string{"wit/0", "eth/64", "eth/65", "eth/66"} + event.Capabilities = []string{"wit/0", "eth/65", "eth/66", "eth/67"} version := event.EthVersion() - assert.Equal(t, uint(66), version) + assert.Equal(t, uint(67), version) } diff --git a/cmd/observer/observer/server.go b/cmd/observer/observer/server.go index 907b7c709a6..24e3a8899d8 100644 --- a/cmd/observer/observer/server.go +++ b/cmd/observer/observer/server.go @@ -5,6 +5,9 @@ import ( "crypto/ecdsa" "errors" "fmt" + "net" + "path/filepath" + "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/core/forkid" @@ -17,8 +20,6 @@ import ( "github.com/ledgerwatch/erigon/p2p/netutil" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/log/v3" - "net" - "path/filepath" ) type Server struct { @@ -32,7 +33,7 @@ type Server struct { } func NewServer(flags CommandFlags) (*Server, error) { - nodeDBPath := filepath.Join(flags.DataDir, "nodes", "eth66") + nodeDBPath := filepath.Join(flags.DataDir, "nodes", "eth67") nodeKeyConfig := p2p.NodeKeyConfig{} privateKey, err := nodeKeyConfig.LoadOrParseOrGenerateAndSave(flags.NodeKeyFile, flags.NodeKeyHex, flags.DataDir) diff --git a/cmd/rpcdaemon/commands/eth_subscribe_test.go b/cmd/rpcdaemon/commands/eth_subscribe_test.go index a997495bd99..337a8df9049 100644 --- a/cmd/rpcdaemon/commands/eth_subscribe_test.go +++ b/cmd/rpcdaemon/commands/eth_subscribe_test.go @@ -33,7 +33,7 @@ func TestEthSubscribe(t *testing.T) { require.NoError(err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { require.NoError(err) } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed diff --git a/cmd/rpcdaemon/commands/send_transaction_test.go b/cmd/rpcdaemon/commands/send_transaction_test.go index 0e848258126..719064f6eb9 100644 --- a/cmd/rpcdaemon/commands/send_transaction_test.go +++ b/cmd/rpcdaemon/commands/send_transaction_test.go @@ -42,7 +42,7 @@ func TestSendRawTransaction(t *testing.T) { }) require.NoError(err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { require.NoError(err) } // Send all the headers @@ -52,10 +52,10 @@ func TestSendRawTransaction(t *testing.T) { }) require.NoError(err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { require.NoError(err) } - m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed + m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceed initialCycle := true highestSeenHeader := chain.TopBlock.NumberU64() diff --git a/cmd/rpcdaemon22/commands/eth_subscribe_test.go b/cmd/rpcdaemon22/commands/eth_subscribe_test.go index 875b7a2456a..180137a9722 100644 --- a/cmd/rpcdaemon22/commands/eth_subscribe_test.go +++ b/cmd/rpcdaemon22/commands/eth_subscribe_test.go @@ -32,7 +32,7 @@ func TestEthSubscribe(t *testing.T) { require.NoError(err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { require.NoError(err) } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed diff --git a/cmd/rpcdaemon22/commands/send_transaction_test.go b/cmd/rpcdaemon22/commands/send_transaction_test.go index 87c6ef411b3..634c265d99d 100644 --- a/cmd/rpcdaemon22/commands/send_transaction_test.go +++ b/cmd/rpcdaemon22/commands/send_transaction_test.go @@ -42,7 +42,7 @@ func TestSendRawTransaction(t *testing.T) { }) require.NoError(err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { require.NoError(err) } // Send all the headers @@ -52,7 +52,7 @@ func TestSendRawTransaction(t *testing.T) { }) require.NoError(err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { require.NoError(err) } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed diff --git a/cmd/sentry/main.go b/cmd/sentry/main.go index 80ed49d8522..1b75579a0f0 100644 --- a/cmd/sentry/main.go +++ b/cmd/sentry/main.go @@ -45,7 +45,7 @@ func init() { rootCmd.Flags().StringSliceVar(&trustedPeers, utils.TrustedPeersFlag.Name, []string{}, utils.TrustedPeersFlag.Usage) rootCmd.Flags().StringSliceVar(&discoveryDNS, utils.DNSDiscoveryFlag.Name, []string{}, utils.DNSDiscoveryFlag.Usage) rootCmd.Flags().BoolVar(&nodiscover, utils.NoDiscoverFlag.Name, false, utils.NoDiscoverFlag.Usage) - rootCmd.Flags().StringVar(&protocol, "p2p.protocol", "eth66", "eth66") + rootCmd.Flags().StringVar(&protocol, "p2p.protocol", "eth67", "eth67") rootCmd.Flags().StringVar(&netRestrict, utils.NetrestrictFlag.Name, utils.NetrestrictFlag.Value, utils.NetrestrictFlag.Usage) rootCmd.Flags().IntVar(&maxPeers, utils.MaxPeersFlag.Name, utils.MaxPeersFlag.Value, utils.MaxPeersFlag.Usage) rootCmd.Flags().IntVar(&maxPendPeers, utils.MaxPendingPeersFlag.Name, utils.MaxPendingPeersFlag.Value, utils.MaxPendingPeersFlag.Usage) @@ -68,7 +68,7 @@ var rootCmd = &cobra.Command{ debug.Exit() }, RunE: func(cmd *cobra.Command, args []string) error { - p := eth.ETH66 + p := eth.ETH67 dirs := datadir.New(datadirCli) nodeConfig := node2.NewNodeConfig() diff --git a/cmd/sentry/sentry/broadcast.go b/cmd/sentry/sentry/broadcast.go index 869fede9555..32b9f694f50 100644 --- a/cmd/sentry/sentry/broadcast.go +++ b/cmd/sentry/sentry/broadcast.go @@ -41,7 +41,7 @@ func (cs *MultiClient) PropagateNewBlockHashes(ctx context.Context, announces [] log.Error("propagateNewBlockHashes", "err", err) return } - var req66 *proto_sentry.OutboundMessageData + var req67 *proto_sentry.OutboundMessageData // Send the block to a subset of our peers sendToAmount := int(math.Sqrt(float64(len(cs.sentries)))) for i, sentry := range cs.sentries { @@ -54,14 +54,14 @@ func (cs *MultiClient) PropagateNewBlockHashes(ctx context.Context, announces [] switch sentry.Protocol() { - case eth.ETH66: - if req66 == nil { - req66 = &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_NEW_BLOCK_HASHES_66, + case eth.ETH67: + if req67 == nil { + req67 = &proto_sentry.OutboundMessageData{ + Id: proto_sentry.MessageId_NEW_BLOCK_HASHES, Data: data, } - _, err = sentry.SendMessageToAll(ctx, req66, &grpc.EmptyCallOption{}) + _, err = sentry.SendMessageToAll(ctx, req67, &grpc.EmptyCallOption{}) if err != nil { log.Error("propagateNewBlockHashes", "err", err) } @@ -82,7 +82,7 @@ func (cs *MultiClient) BroadcastNewBlock(ctx context.Context, block *types.Block if err != nil { log.Error("broadcastNewBlock", "err", err) } - var req66 *proto_sentry.SendMessageToRandomPeersRequest + var req67 *proto_sentry.SendMessageToRandomPeersRequest // Send the block to a subset of our peers sendToAmount := int(math.Sqrt(float64(len(cs.sentries)))) for i, sentry := range cs.sentries { @@ -95,17 +95,17 @@ func (cs *MultiClient) BroadcastNewBlock(ctx context.Context, block *types.Block switch sentry.Protocol() { - case eth.ETH66: - if req66 == nil { - req66 = &proto_sentry.SendMessageToRandomPeersRequest{ + case eth.ETH67: + if req67 == nil { + req67 = &proto_sentry.SendMessageToRandomPeersRequest{ MaxPeers: 1024, Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_NEW_BLOCK_66, + Id: proto_sentry.MessageId_NEW_BLOCK, Data: data, }, } } - if _, err = sentry.SendMessageToRandomPeers(ctx, req66, &grpc.EmptyCallOption{}); err != nil { + if _, err = sentry.SendMessageToRandomPeers(ctx, req67, &grpc.EmptyCallOption{}); err != nil { if isPeerNotFoundErr(err) || networkTemporaryErr(err) { log.Debug("broadcastNewBlock", "err", err) continue @@ -124,8 +124,7 @@ func (cs *MultiClient) BroadcastLocalPooledTxs(ctx context.Context, txs []common cs.lock.RLock() defer cs.lock.RUnlock() initialAmount := len(txs) - avgPeersPerSent65 := 0 - avgPeersPerSent66 := 0 + avgPeersPerSent67 := 0 initialTxs := txs for len(txs) > 0 { @@ -141,7 +140,7 @@ func (cs *MultiClient) BroadcastLocalPooledTxs(ctx context.Context, txs []common if err != nil { log.Error("BroadcastLocalPooledTxs", "err", err) } - var req66 *proto_sentry.OutboundMessageData + var req67 *proto_sentry.OutboundMessageData // Send the block to a subset of our peers sendToAmount := int(math.Sqrt(float64(len(cs.sentries)))) for i, sentry := range cs.sentries { @@ -153,14 +152,14 @@ func (cs *MultiClient) BroadcastLocalPooledTxs(ctx context.Context, txs []common } switch sentry.Protocol() { - case eth.ETH66: - if req66 == nil { - req66 = &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66, + case eth.ETH67: + if req67 == nil { + req67 = &proto_sentry.OutboundMessageData{ + Id: proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES, Data: data, } } - peers, err := sentry.SendMessageToAll(ctx, req66, &grpc.EmptyCallOption{}) + peers, err := sentry.SendMessageToAll(ctx, req67, &grpc.EmptyCallOption{}) if err != nil { if isPeerNotFoundErr(err) || networkTemporaryErr(err) { log.Debug("BroadcastLocalPooledTxs", "err", err) @@ -168,14 +167,14 @@ func (cs *MultiClient) BroadcastLocalPooledTxs(ctx context.Context, txs []common } log.Error("BroadcastLocalPooledTxs", "err", err) } - avgPeersPerSent66 += len(peers.GetPeers()) + avgPeersPerSent67 += len(peers.GetPeers()) } } } if initialAmount == 1 { - log.Info("local tx propagated", "to_peers_amount", avgPeersPerSent65+avgPeersPerSent66, "tx_hash", initialTxs[0].String()) + log.Info("local tx propagated", "to_peers_amount", avgPeersPerSent67, "tx_hash", initialTxs[0].String()) } else { - log.Info("local txs propagated", "to_peers_amount", avgPeersPerSent65+avgPeersPerSent66, "txs_amount", initialAmount) + log.Info("local txs propagated", "to_peers_amount", avgPeersPerSent67, "txs_amount", initialAmount) } } @@ -200,7 +199,7 @@ func (cs *MultiClient) BroadcastRemotePooledTxs(ctx context.Context, txs []commo if err != nil { log.Error("BroadcastRemotePooledTxs", "err", err) } - var req66 *proto_sentry.SendMessageToRandomPeersRequest + var req67 *proto_sentry.SendMessageToRandomPeersRequest // Send the block to a subset of our peers sendToAmount := int(math.Sqrt(float64(len(cs.sentries)))) for i, sentry := range cs.sentries { @@ -213,17 +212,17 @@ func (cs *MultiClient) BroadcastRemotePooledTxs(ctx context.Context, txs []commo switch sentry.Protocol() { - case eth.ETH66: - if req66 == nil { - req66 = &proto_sentry.SendMessageToRandomPeersRequest{ + case eth.ETH67: + if req67 == nil { + req67 = &proto_sentry.SendMessageToRandomPeersRequest{ MaxPeers: 1024, Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66, + Id: proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES, Data: data, }, } } - if _, err = sentry.SendMessageToRandomPeers(ctx, req66, &grpc.EmptyCallOption{}); err != nil { + if _, err = sentry.SendMessageToRandomPeers(ctx, req67, &grpc.EmptyCallOption{}); err != nil { if isPeerNotFoundErr(err) || networkTemporaryErr(err) { log.Debug("BroadcastRemotePooledTxs", "err", err) continue @@ -264,15 +263,15 @@ func (cs *MultiClient) PropagatePooledTxsToPeersList(ctx context.Context, peers for _, peer := range peers { switch sentry.Protocol() { - case eth.ETH66: - req66 := &proto_sentry.SendMessageByIdRequest{ + case eth.ETH67: + req67 := &proto_sentry.SendMessageByIdRequest{ PeerId: peer, Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66, + Id: proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES, Data: data, }, } - if _, err = sentry.SendMessageById(ctx, req66, &grpc.EmptyCallOption{}); err != nil { + if _, err = sentry.SendMessageById(ctx, req67, &grpc.EmptyCallOption{}); err != nil { if isPeerNotFoundErr(err) || networkTemporaryErr(err) { log.Debug("PropagatePooledTxsToPeersList", "err", err) continue diff --git a/cmd/sentry/sentry/eth_handshake_test.go b/cmd/sentry/sentry/eth_handshake_test.go index eeec146f3f9..ba32d8b770f 100644 --- a/cmd/sentry/sentry/eth_handshake_test.go +++ b/cmd/sentry/sentry/eth_handshake_test.go @@ -15,7 +15,7 @@ import ( ) func TestCheckPeerStatusCompatibility(t *testing.T) { - var version uint = eth.ETH66 + var version uint = eth.ETH67 networkID := params.MainnetChainConfig.ChainID.Uint64() goodReply := eth.StatusPacket{ ProtocolVersion: uint32(version), @@ -49,14 +49,14 @@ func TestCheckPeerStatusCompatibility(t *testing.T) { }) t.Run("version mismatch min", func(t *testing.T) { reply := goodReply - reply.ProtocolVersion = eth.ETH66 - 1 + reply.ProtocolVersion = eth.ETH67 - 1 err := checkPeerStatusCompatibility(&reply, &status, version, version) assert.NotNil(t, err) assert.Contains(t, err.Error(), "version is less") }) t.Run("version mismatch max", func(t *testing.T) { reply := goodReply - reply.ProtocolVersion = eth.ETH66 + 1 + reply.ProtocolVersion = eth.ETH67 + 1 err := checkPeerStatusCompatibility(&reply, &status, version, version) assert.NotNil(t, err) assert.Contains(t, err.Error(), "version is more") diff --git a/cmd/sentry/sentry/sentry_api.go b/cmd/sentry/sentry/sentry_api.go index fbb1ed814d9..11b548eeb8d 100644 --- a/cmd/sentry/sentry/sentry_api.go +++ b/cmd/sentry/sentry/sentry_api.go @@ -44,7 +44,7 @@ func (cs *MultiClient) SendBodyRequest(ctx context.Context, req *bodydownload.Bo } switch cs.sentries[i].Protocol() { - case eth.ETH66: + case eth.ETH67: //log.Info(fmt.Sprintf("Sending body request for %v", req.BlockNums)) var bytes []byte var err error @@ -59,7 +59,7 @@ func (cs *MultiClient) SendBodyRequest(ctx context.Context, req *bodydownload.Bo outreq := proto_sentry.SendMessageByMinBlockRequest{ MinBlock: req.BlockNums[len(req.BlockNums)-1], Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_GET_BLOCK_BODIES_66, + Id: proto_sentry.MessageId_GET_BLOCK_BODIES, Data: bytes, }, } @@ -85,7 +85,7 @@ func (cs *MultiClient) SendHeaderRequest(ctx context.Context, req *headerdownloa continue } switch cs.sentries[i].Protocol() { - case eth.ETH66: + case eth.ETH67: //log.Info(fmt.Sprintf("Sending header request {hash: %x, height: %d, length: %d}", req.Hash, req.Number, req.Length)) reqData := ð.GetBlockHeadersPacket66{ RequestId: rand.Uint64(), @@ -109,7 +109,7 @@ func (cs *MultiClient) SendHeaderRequest(ctx context.Context, req *headerdownloa outreq := proto_sentry.SendMessageByMinBlockRequest{ MinBlock: minBlock, Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_GET_BLOCK_HEADERS_66, + Id: proto_sentry.MessageId_GET_BLOCK_HEADERS, Data: bytes, }, } diff --git a/cmd/sentry/sentry/sentry_grpc_server.go b/cmd/sentry/sentry/sentry_grpc_server.go index 098c77693b6..539385d2cba 100644 --- a/cmd/sentry/sentry/sentry_grpc_server.go +++ b/cmd/sentry/sentry/sentry_grpc_server.go @@ -356,16 +356,6 @@ func runPeer( log.Error(fmt.Sprintf("%s: reading msg into bytes: %v", peerID, err)) } send(eth.ToProto[protocol][msg.Code], peerID, b) - case eth.GetNodeDataMsg: - if !hasSubscribers(eth.ToProto[protocol][msg.Code]) { - continue - } - b := make([]byte, msg.Size) - if _, err := io.ReadFull(msg.Payload, b); err != nil { - log.Error(fmt.Sprintf("%s: reading msg into bytes: %v", peerID, err)) - } - send(eth.ToProto[protocol][msg.Code], peerID, b) - //log.Info(fmt.Sprintf("[%s] GetNodeData", peerID)) case eth.GetReceiptsMsg: if !hasSubscribers(eth.ToProto[protocol][msg.Code]) { continue @@ -491,7 +481,7 @@ func NewGrpcServer(ctx context.Context, dialCandidates enode.Iterator, readNodeI peersStreams: NewPeersStreams(), } - if protocol != eth.ETH66 { + if protocol != eth.ETH67 { panic(fmt.Errorf("unexpected p2p protocol: %d", protocol)) } @@ -631,7 +621,7 @@ func (ss *GrpcServer) writePeer(logPrefix string, peerInfo *PeerInfo, msgcode ui func (ss *GrpcServer) startSync(ctx context.Context, bestHash common.Hash, peerID [64]byte) error { switch ss.Protocol.Version { - case eth.ETH66: + case eth.ETH67: b, err := rlp.EncodeToBytes(ð.GetBlockHeadersPacket66{ RequestId: rand.Uint64(), GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ @@ -647,7 +637,7 @@ func (ss *GrpcServer) startSync(ctx context.Context, bestHash common.Hash, peerI if _, err := ss.SendMessageById(ctx, &proto_sentry.SendMessageByIdRequest{ PeerId: gointerfaces.ConvertHashToH512(peerID), Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_GET_BLOCK_HEADERS_66, + Id: proto_sentry.MessageId_GET_BLOCK_HEADERS, Data: b, }, }); err != nil { @@ -803,8 +793,8 @@ func (ss *GrpcServer) SendMessageToAll(ctx context.Context, req *proto_sentry.Ou func (ss *GrpcServer) HandShake(context.Context, *emptypb.Empty) (*proto_sentry.HandShakeReply, error) { reply := &proto_sentry.HandShakeReply{} switch ss.Protocol.Version { - case eth.ETH66: - reply.Protocol = proto_sentry.Protocol_ETH66 + case eth.ETH67: + reply.Protocol = proto_sentry.Protocol_ETH67 } return reply, nil } diff --git a/cmd/sentry/sentry/sentry_grpc_server_test.go b/cmd/sentry/sentry/sentry_grpc_server_test.go index 8cbd567a9e2..281216624de 100644 --- a/cmd/sentry/sentry/sentry_grpc_server_test.go +++ b/cmd/sentry/sentry/sentry_grpc_server_test.go @@ -50,7 +50,7 @@ func testSentryServer(db kv.Getter, genesis *core.Genesis, genesisHash common.Ha // Tests that peers are correctly accepted (or rejected) based on the advertised // fork IDs in the protocol handshake. -func TestForkIDSplit66(t *testing.T) { testForkIDSplit(t, eth.ETH66) } +func TestForkIDSplit67(t *testing.T) { testForkIDSplit(t, eth.ETH67) } func testForkIDSplit(t *testing.T, protocol uint) { var ( diff --git a/cmd/sentry/sentry/sentry_multi_client.go b/cmd/sentry/sentry/sentry_multi_client.go index 1359398c43c..0434ea51aa7 100644 --- a/cmd/sentry/sentry/sentry_multi_client.go +++ b/cmd/sentry/sentry/sentry_multi_client.go @@ -64,8 +64,8 @@ func (cs *MultiClient) RecvUploadMessageLoop( wg *sync.WaitGroup, ) { ids := []proto_sentry.MessageId{ - eth.ToProto[eth.ETH66][eth.GetBlockBodiesMsg], - eth.ToProto[eth.ETH66][eth.GetReceiptsMsg], + eth.ToProto[eth.ETH67][eth.GetBlockBodiesMsg], + eth.ToProto[eth.ETH67][eth.GetReceiptsMsg], } streamFactory := func(streamCtx context.Context, sentry direct.SentryClient) (sentryMessageStream, error) { return sentry.Messages(streamCtx, &proto_sentry.MessagesRequest{Ids: ids}, grpc.WaitForReady(true)) @@ -80,7 +80,7 @@ func (cs *MultiClient) RecvUploadHeadersMessageLoop( wg *sync.WaitGroup, ) { ids := []proto_sentry.MessageId{ - eth.ToProto[eth.ETH66][eth.GetBlockHeadersMsg], + eth.ToProto[eth.ETH67][eth.GetBlockHeadersMsg], } streamFactory := func(streamCtx context.Context, sentry direct.SentryClient) (sentryMessageStream, error) { return sentry.Messages(streamCtx, &proto_sentry.MessagesRequest{Ids: ids}, grpc.WaitForReady(true)) @@ -95,10 +95,10 @@ func (cs *MultiClient) RecvMessageLoop( wg *sync.WaitGroup, ) { ids := []proto_sentry.MessageId{ - eth.ToProto[eth.ETH66][eth.BlockHeadersMsg], - eth.ToProto[eth.ETH66][eth.BlockBodiesMsg], - eth.ToProto[eth.ETH66][eth.NewBlockHashesMsg], - eth.ToProto[eth.ETH66][eth.NewBlockMsg], + eth.ToProto[eth.ETH67][eth.BlockHeadersMsg], + eth.ToProto[eth.ETH67][eth.BlockBodiesMsg], + eth.ToProto[eth.ETH67][eth.NewBlockHashesMsg], + eth.ToProto[eth.ETH67][eth.NewBlockMsg], } streamFactory := func(streamCtx context.Context, sentry direct.SentryClient) (sentryMessageStream, error) { return sentry.Messages(streamCtx, &proto_sentry.MessagesRequest{Ids: ids}, grpc.WaitForReady(true)) @@ -336,7 +336,7 @@ func (cs *MultiClient) newBlockHashes66(ctx context.Context, req *proto_sentry.I outreq := proto_sentry.SendMessageByIdRequest{ PeerId: req.PeerId, Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_GET_BLOCK_HEADERS_66, + Id: proto_sentry.MessageId_GET_BLOCK_HEADERS, Data: b, }, } @@ -534,7 +534,7 @@ func (cs *MultiClient) getBlockHeaders66(ctx context.Context, inreq *proto_sentr outreq := proto_sentry.SendMessageByIdRequest{ PeerId: inreq.PeerId, Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_BLOCK_HEADERS_66, + Id: proto_sentry.MessageId_BLOCK_HEADERS, Data: b, }, } @@ -571,7 +571,7 @@ func (cs *MultiClient) getBlockBodies66(ctx context.Context, inreq *proto_sentry outreq := proto_sentry.SendMessageByIdRequest{ PeerId: inreq.PeerId, Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_BLOCK_BODIES_66, + Id: proto_sentry.MessageId_BLOCK_BODIES, Data: b, }, } @@ -611,7 +611,7 @@ func (cs *MultiClient) getReceipts66(ctx context.Context, inreq *proto_sentry.In outreq := proto_sentry.SendMessageByIdRequest{ PeerId: inreq.PeerId, Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_RECEIPTS_66, + Id: proto_sentry.MessageId_RECEIPTS, Data: b, }, } @@ -655,23 +655,23 @@ func (cs *MultiClient) HandleInboundMessage(ctx context.Context, message *proto_ func (cs *MultiClient) handleInboundMessage(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry direct.SentryClient) error { switch inreq.Id { - // ========= eth 66 ========== + // ========= eth 67 ========== - case proto_sentry.MessageId_NEW_BLOCK_HASHES_66: + case proto_sentry.MessageId_NEW_BLOCK_HASHES: return cs.newBlockHashes66(ctx, inreq, sentry) - case proto_sentry.MessageId_BLOCK_HEADERS_66: + case proto_sentry.MessageId_BLOCK_HEADERS: return cs.blockHeaders66(ctx, inreq, sentry) - case proto_sentry.MessageId_NEW_BLOCK_66: + case proto_sentry.MessageId_NEW_BLOCK: return cs.newBlock66(ctx, inreq, sentry) - case proto_sentry.MessageId_BLOCK_BODIES_66: + case proto_sentry.MessageId_BLOCK_BODIES: return cs.blockBodies66(inreq, sentry) - case proto_sentry.MessageId_GET_BLOCK_HEADERS_66: + case proto_sentry.MessageId_GET_BLOCK_HEADERS: return cs.getBlockHeaders66(ctx, inreq, sentry) - case proto_sentry.MessageId_GET_BLOCK_BODIES_66: + case proto_sentry.MessageId_GET_BLOCK_BODIES: return cs.getBlockBodies66(ctx, inreq, sentry) - case proto_sentry.MessageId_RECEIPTS_66: + case proto_sentry.MessageId_RECEIPTS: return cs.receipts66(ctx, inreq, sentry) - case proto_sentry.MessageId_GET_RECEIPTS_66: + case proto_sentry.MessageId_GET_RECEIPTS: return cs.getReceipts66(ctx, inreq, sentry) default: return fmt.Errorf("not implemented for message Id: %s", inreq.Id) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index ff39c1e4367..7b91de08b43 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -841,8 +841,8 @@ func NewP2PConfig( ) (*p2p.Config, error) { var enodeDBPath string switch protocol { - case eth.ETH66: - enodeDBPath = filepath.Join(dirs.Nodes, "eth66") + case eth.ETH67: + enodeDBPath = filepath.Join(dirs.Nodes, "eth67") default: return nil, fmt.Errorf("unknown protocol: %v", protocol) } diff --git a/eth/backend.go b/eth/backend.go index d85647cde94..d704058fe1a 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -235,16 +235,16 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere return res } - d66, err := setupDiscovery(backend.config.EthDiscoveryURLs) + d67, err := setupDiscovery(backend.config.EthDiscoveryURLs) if err != nil { return nil, err } - cfg66 := stack.Config().P2P - cfg66.NodeDatabase = filepath.Join(stack.Config().Dirs.Nodes, "eth66") - server66 := sentry.NewGrpcServer(backend.sentryCtx, d66, readNodeInfo, &cfg66, eth.ETH66) - backend.sentryServers = append(backend.sentryServers, server66) - sentries = []direct.SentryClient{direct.NewSentryClientDirect(eth.ETH66, server66)} + cfg67 := stack.Config().P2P + cfg67.NodeDatabase = filepath.Join(stack.Config().Dirs.Nodes, "eth67") + server67 := sentry.NewGrpcServer(backend.sentryCtx, d67, readNodeInfo, &cfg67, eth.ETH67) + backend.sentryServers = append(backend.sentryServers, server67) + sentries = []direct.SentryClient{direct.NewSentryClientDirect(eth.ETH67, server67)} go func() { logEvery := time.NewTicker(120 * time.Second) diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index 76a08335ec0..d718c0888f0 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -112,7 +112,7 @@ func TestGetBlockReceipts(t *testing.T) { m.ReceiveWg.Add(1) // Send the hash request and verify the response - for _, err = range m.Send(&sentry.InboundMessage{Id: eth.ToProto[eth.ETH66][eth.GetReceiptsMsg], Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: eth.ToProto[eth.ETH67][eth.GetReceiptsMsg], Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go index 37569605b82..bb9ed2012d0 100644 --- a/eth/protocols/eth/protocol.go +++ b/eth/protocols/eth/protocol.go @@ -33,11 +33,11 @@ import ( // Constants to match up protocol versions and messages const ( - ETH66 = 66 + ETH67 = 67 ) var ProtocolToString = map[uint]string{ - ETH66: "eth66", + ETH67: "eth67", } // ProtocolName is the official short name of the `eth` protocol used during @@ -58,10 +58,10 @@ const ( GetBlockBodiesMsg = 0x05 BlockBodiesMsg = 0x06 NewBlockMsg = 0x07 - GetNodeDataMsg = 0x0d - NodeDataMsg = 0x0e - GetReceiptsMsg = 0x0f - ReceiptsMsg = 0x10 + // GetNodeDataMsg = 0x0d // removed in eth/67 + // NodeDataMsg = 0x0e // removed in eth/67 + GetReceiptsMsg = 0x0f + ReceiptsMsg = 0x10 // Protocol messages overloaded in eth/65 NewPooledTransactionHashesMsg = 0x08 @@ -70,40 +70,36 @@ const ( ) var ToProto = map[uint]map[uint64]proto_sentry.MessageId{ - ETH66: { - GetBlockHeadersMsg: proto_sentry.MessageId_GET_BLOCK_HEADERS_66, - BlockHeadersMsg: proto_sentry.MessageId_BLOCK_HEADERS_66, - GetBlockBodiesMsg: proto_sentry.MessageId_GET_BLOCK_BODIES_66, - BlockBodiesMsg: proto_sentry.MessageId_BLOCK_BODIES_66, - GetNodeDataMsg: proto_sentry.MessageId_GET_NODE_DATA_66, - NodeDataMsg: proto_sentry.MessageId_NODE_DATA_66, - GetReceiptsMsg: proto_sentry.MessageId_GET_RECEIPTS_66, - ReceiptsMsg: proto_sentry.MessageId_RECEIPTS_66, - NewBlockHashesMsg: proto_sentry.MessageId_NEW_BLOCK_HASHES_66, - NewBlockMsg: proto_sentry.MessageId_NEW_BLOCK_66, - TransactionsMsg: proto_sentry.MessageId_TRANSACTIONS_66, - NewPooledTransactionHashesMsg: proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66, - GetPooledTransactionsMsg: proto_sentry.MessageId_GET_POOLED_TRANSACTIONS_66, - PooledTransactionsMsg: proto_sentry.MessageId_POOLED_TRANSACTIONS_66, + ETH67: { + GetBlockHeadersMsg: proto_sentry.MessageId_GET_BLOCK_HEADERS, + BlockHeadersMsg: proto_sentry.MessageId_BLOCK_HEADERS, + GetBlockBodiesMsg: proto_sentry.MessageId_GET_BLOCK_BODIES, + BlockBodiesMsg: proto_sentry.MessageId_BLOCK_BODIES, + GetReceiptsMsg: proto_sentry.MessageId_GET_RECEIPTS, + ReceiptsMsg: proto_sentry.MessageId_RECEIPTS, + NewBlockHashesMsg: proto_sentry.MessageId_NEW_BLOCK_HASHES, + NewBlockMsg: proto_sentry.MessageId_NEW_BLOCK, + TransactionsMsg: proto_sentry.MessageId_TRANSACTIONS, + NewPooledTransactionHashesMsg: proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES, + GetPooledTransactionsMsg: proto_sentry.MessageId_GET_POOLED_TRANSACTIONS, + PooledTransactionsMsg: proto_sentry.MessageId_POOLED_TRANSACTIONS, }, } var FromProto = map[uint]map[proto_sentry.MessageId]uint64{ - ETH66: { - proto_sentry.MessageId_GET_BLOCK_HEADERS_66: GetBlockHeadersMsg, - proto_sentry.MessageId_BLOCK_HEADERS_66: BlockHeadersMsg, - proto_sentry.MessageId_GET_BLOCK_BODIES_66: GetBlockBodiesMsg, - proto_sentry.MessageId_BLOCK_BODIES_66: BlockBodiesMsg, - proto_sentry.MessageId_GET_NODE_DATA_66: GetNodeDataMsg, - proto_sentry.MessageId_NODE_DATA_66: NodeDataMsg, - proto_sentry.MessageId_GET_RECEIPTS_66: GetReceiptsMsg, - proto_sentry.MessageId_RECEIPTS_66: ReceiptsMsg, - proto_sentry.MessageId_NEW_BLOCK_HASHES_66: NewBlockHashesMsg, - proto_sentry.MessageId_NEW_BLOCK_66: NewBlockMsg, - proto_sentry.MessageId_TRANSACTIONS_66: TransactionsMsg, - proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66: NewPooledTransactionHashesMsg, - proto_sentry.MessageId_GET_POOLED_TRANSACTIONS_66: GetPooledTransactionsMsg, - proto_sentry.MessageId_POOLED_TRANSACTIONS_66: PooledTransactionsMsg, + ETH67: { + proto_sentry.MessageId_GET_BLOCK_HEADERS: GetBlockHeadersMsg, + proto_sentry.MessageId_BLOCK_HEADERS: BlockHeadersMsg, + proto_sentry.MessageId_GET_BLOCK_BODIES: GetBlockBodiesMsg, + proto_sentry.MessageId_BLOCK_BODIES: BlockBodiesMsg, + proto_sentry.MessageId_GET_RECEIPTS: GetReceiptsMsg, + proto_sentry.MessageId_RECEIPTS: ReceiptsMsg, + proto_sentry.MessageId_NEW_BLOCK_HASHES: NewBlockHashesMsg, + proto_sentry.MessageId_NEW_BLOCK: NewBlockMsg, + proto_sentry.MessageId_TRANSACTIONS: TransactionsMsg, + proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES: NewPooledTransactionHashesMsg, + proto_sentry.MessageId_GET_POOLED_TRANSACTIONS: GetPooledTransactionsMsg, + proto_sentry.MessageId_POOLED_TRANSACTIONS: PooledTransactionsMsg, }, } @@ -654,24 +650,6 @@ func (p *BlockRawBodiesPacket) Unpack() ([][][]byte, [][]*types.Header) { return txset, uncleset } -// GetNodeDataPacket represents a trie node data query. -type GetNodeDataPacket []common.Hash - -// GetNodeDataPacket represents a trie node data query over eth/66. -type GetNodeDataPacket66 struct { - RequestId uint64 - GetNodeDataPacket -} - -// NodeDataPacket is the network packet for trie node data distribution. -type NodeDataPacket [][]byte - -// NodeDataPacket is the network packet for trie node data distribution over eth/66. -type NodeDataPacket66 struct { - RequestId uint64 - NodeDataPacket -} - // GetReceiptsPacket represents a block receipts query. type GetReceiptsPacket []common.Hash @@ -914,12 +892,6 @@ func (*BlockBodiesPacket) Kind() byte { return BlockBodiesMsg } func (*NewBlockPacket) Name() string { return "NewBlock" } func (*NewBlockPacket) Kind() byte { return NewBlockMsg } -func (*GetNodeDataPacket) Name() string { return "GetNodeData" } -func (*GetNodeDataPacket) Kind() byte { return GetNodeDataMsg } - -func (*NodeDataPacket) Name() string { return "NodeData" } -func (*NodeDataPacket) Kind() byte { return NodeDataMsg } - func (*GetReceiptsPacket) Name() string { return "GetReceipts" } func (*GetReceiptsPacket) Kind() byte { return GetReceiptsMsg } diff --git a/eth/protocols/eth/protocol_test.go b/eth/protocols/eth/protocol_test.go index 592dcbdf0c0..5bae0706887 100644 --- a/eth/protocols/eth/protocol_test.go +++ b/eth/protocols/eth/protocol_test.go @@ -71,8 +71,8 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { } } -// TestEth66EmptyMessages tests encoding of empty eth66 messages -func TestEth66EmptyMessages(t *testing.T) { +// TestEth67EmptyMessages tests encoding of empty eth67 messages +func TestEth67EmptyMessages(t *testing.T) { // All empty messages encodes to the same format want := common.FromHex("c4820457c0") @@ -84,9 +84,6 @@ func TestEth66EmptyMessages(t *testing.T) { GetBlockBodiesPacket66{1111, nil}, BlockBodiesPacket66{1111, nil}, BlockBodiesRLPPacket66{1111, nil}, - // Node data - GetNodeDataPacket66{1111, nil}, - NodeDataPacket66{1111, nil}, // Receipts GetReceiptsPacket66{1111, nil}, ReceiptsPacket66{1111, nil}, @@ -101,9 +98,6 @@ func TestEth66EmptyMessages(t *testing.T) { GetBlockBodiesPacket66{1111, GetBlockBodiesPacket([]common.Hash{})}, BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{})}, BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{})}, - // Node data - GetNodeDataPacket66{1111, GetNodeDataPacket([]common.Hash{})}, - NodeDataPacket66{1111, NodeDataPacket([][]byte{})}, // Receipts GetReceiptsPacket66{1111, GetReceiptsPacket([]common.Hash{})}, ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{})}, @@ -119,8 +113,8 @@ func TestEth66EmptyMessages(t *testing.T) { } -// TestEth66Messages tests the encoding of all redefined eth66 messages -func TestEth66Messages(t *testing.T) { +// TestEth67Messages tests the encoding of all redefined eth67 messages +func TestEth67Messages(t *testing.T) { // Some basic structs used during testing var ( @@ -173,10 +167,6 @@ func TestEth66Messages(t *testing.T) { common.HexToHash("deadc0de"), common.HexToHash("feedbeef"), } - byteSlices := [][]byte{ - common.FromHex("deadc0de"), - common.FromHex("feedbeef"), - } // init the receipts { receipts = []*types.Receipt{ @@ -230,14 +220,6 @@ func TestEth66Messages(t *testing.T) { BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{blockBodyRlp})}, common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), }, - { - GetNodeDataPacket66{1111, GetNodeDataPacket(hashes)}, - common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), - }, - { - NodeDataPacket66{1111, NodeDataPacket(byteSlices)}, - common.FromHex("ce820457ca84deadc0de84feedbeef"), - }, { GetReceiptsPacket66{1111, GetReceiptsPacket(hashes)}, common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), diff --git a/ethdb/privateapi/ethbackend.go b/ethdb/privateapi/ethbackend.go index 4719d6c78e6..46e0fa49f01 100644 --- a/ethdb/privateapi/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -197,7 +197,7 @@ func (s *EthBackendServer) Subscribe(r *remote.SubscribeRequest, subscribeServer func (s *EthBackendServer) ProtocolVersion(_ context.Context, _ *remote.ProtocolVersionRequest) (*remote.ProtocolVersionReply, error) { // Hardcoding to avoid import cycle - return &remote.ProtocolVersionReply{Id: 66}, nil + return &remote.ProtocolVersionReply{Id: 67}, nil } func (s *EthBackendServer) ClientVersion(_ context.Context, _ *remote.ClientVersionRequest) (*remote.ClientVersionReply, error) { diff --git a/go.mod b/go.mod index ecea7d44e68..7eab08c2969 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220625091153-e7b09db04531 + github.com/ledgerwatch/erigon-lib v0.0.0-20220628075812-eac8c1fa590b github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index b7a39052902..3e38c0a8807 100644 --- a/go.sum +++ b/go.sum @@ -386,8 +386,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220625091153-e7b09db04531 h1:UKQC0chFY2s0wXOMDOyPEuUTwymsQRUpNHm7/5isnUo= -github.com/ledgerwatch/erigon-lib v0.0.0-20220625091153-e7b09db04531/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= +github.com/ledgerwatch/erigon-lib v0.0.0-20220628075812-eac8c1fa590b h1:9SnS7lnKnl4PtXB0tYZjnK/wJIl/wHbx9ByXp2wDUNQ= +github.com/ledgerwatch/erigon-lib v0.0.0-20220628075812-eac8c1fa590b/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= diff --git a/p2p/dial_test.go b/p2p/dial_test.go index 9bafb293fc7..185a71a401e 100644 --- a/p2p/dial_test.go +++ b/p2p/dial_test.go @@ -410,7 +410,7 @@ func runDialTest(t *testing.T, config dialConfig, rounds []dialTestRound) { setupCh <- conn return nil } - dialsched = newDialScheduler(config, iterator, setup, 66) + dialsched = newDialScheduler(config, iterator, setup, 67) defer dialsched.stop() for i, round := range rounds { diff --git a/turbo/stages/mock_sentry.go b/turbo/stages/mock_sentry.go index 5ba56a2fad5..2caf0cfa040 100644 --- a/turbo/stages/mock_sentry.go +++ b/turbo/stages/mock_sentry.go @@ -117,7 +117,7 @@ func (ms *MockSentry) PeerMinBlock(context.Context, *proto_sentry.PeerMinBlockRe } func (ms *MockSentry) HandShake(ctx context.Context, in *emptypb.Empty) (*proto_sentry.HandShakeReply, error) { - return &proto_sentry.HandShakeReply{Protocol: proto_sentry.Protocol_ETH66}, nil + return &proto_sentry.HandShakeReply{Protocol: proto_sentry.Protocol_ETH67}, nil } func (ms *MockSentry) SendMessageByMinBlock(_ context.Context, r *proto_sentry.SendMessageByMinBlockRequest) (*proto_sentry.SentPeers, error) { ms.sentMessages = append(ms.sentMessages, r.Data) @@ -234,7 +234,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey cfg.DeprecatedTxPool.Disable = !withTxPool cfg.DeprecatedTxPool.StartOnInit = true - mock.SentryClient = direct.NewSentryClientDirect(eth.ETH66, mock) + mock.SentryClient = direct.NewSentryClientDirect(eth.ETH67, mock) sentries := []direct.SentryClient{mock.SentryClient} sendBodyRequest := func(context.Context, *bodydownload.BodyRequest) ([64]byte, bool) { return [64]byte{}, false } @@ -450,7 +450,7 @@ func (ms *MockSentry) InsertChain(chain *core.ChainPack) error { return err } ms.ReceiveWg.Add(1) - for _, err = range ms.Send(&proto_sentry.InboundMessage{Id: proto_sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: ms.PeerId}) { + for _, err = range ms.Send(&proto_sentry.InboundMessage{Id: proto_sentry.MessageId_NEW_BLOCK, Data: b, PeerId: ms.PeerId}) { if err != nil { return err } @@ -464,7 +464,7 @@ func (ms *MockSentry) InsertChain(chain *core.ChainPack) error { return err } ms.ReceiveWg.Add(1) - for _, err = range ms.Send(&proto_sentry.InboundMessage{Id: proto_sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: ms.PeerId}) { + for _, err = range ms.Send(&proto_sentry.InboundMessage{Id: proto_sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: ms.PeerId}) { if err != nil { return err } @@ -482,7 +482,7 @@ func (ms *MockSentry) InsertChain(chain *core.ChainPack) error { return err } ms.ReceiveWg.Add(1) - for _, err = range ms.Send(&proto_sentry.InboundMessage{Id: proto_sentry.MessageId_BLOCK_BODIES_66, Data: b, PeerId: ms.PeerId}) { + for _, err = range ms.Send(&proto_sentry.InboundMessage{Id: proto_sentry.MessageId_BLOCK_BODIES, Data: b, PeerId: ms.PeerId}) { if err != nil { return err } diff --git a/turbo/stages/sentry_mock_test.go b/turbo/stages/sentry_mock_test.go index f0a9c136efa..df2c6329478 100644 --- a/turbo/stages/sentry_mock_test.go +++ b/turbo/stages/sentry_mock_test.go @@ -40,7 +40,7 @@ func TestHeaderStep(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } // Send all the headers @@ -50,7 +50,7 @@ func TestHeaderStep(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceed @@ -79,7 +79,7 @@ func TestMineBlockWith1Tx(t *testing.T) { }) require.NoError(err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { require.NoError(err) } // Send all the headers @@ -89,7 +89,7 @@ func TestMineBlockWith1Tx(t *testing.T) { }) require.NoError(err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { require.NoError(err) } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed @@ -113,7 +113,7 @@ func TestMineBlockWith1Tx(t *testing.T) { b, err := rlp.EncodeToBytes(chain.TopBlock.Transactions()) require.NoError(err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_TRANSACTIONS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_TRANSACTIONS, Data: b, PeerId: m.PeerId}) { require.NoError(err) } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed @@ -145,7 +145,7 @@ func TestReorg(t *testing.T) { t.Fatal(err) } m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -158,7 +158,7 @@ func TestReorg(t *testing.T) { t.Fatal(err) } m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed @@ -199,7 +199,7 @@ func TestReorg(t *testing.T) { t.Fatal(err) } m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -212,7 +212,7 @@ func TestReorg(t *testing.T) { t.Fatal(err) } m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed @@ -232,7 +232,7 @@ func TestReorg(t *testing.T) { t.Fatal(err) } m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -245,7 +245,7 @@ func TestReorg(t *testing.T) { t.Fatal(err) } m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -256,7 +256,7 @@ func TestReorg(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed @@ -283,7 +283,7 @@ func TestReorg(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -294,7 +294,7 @@ func TestReorg(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed @@ -342,7 +342,7 @@ func TestAnchorReplace(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -353,7 +353,7 @@ func TestAnchorReplace(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } require.NoError(t, err) @@ -365,7 +365,7 @@ func TestAnchorReplace(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } require.NoError(t, err) @@ -377,7 +377,7 @@ func TestAnchorReplace(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -390,7 +390,7 @@ func TestAnchorReplace(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -438,7 +438,7 @@ func TestAnchorReplace2(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -449,7 +449,7 @@ func TestAnchorReplace2(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -460,7 +460,7 @@ func TestAnchorReplace2(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -471,7 +471,7 @@ func TestAnchorReplace2(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -482,7 +482,7 @@ func TestAnchorReplace2(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -495,7 +495,7 @@ func TestAnchorReplace2(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -591,7 +591,7 @@ func TestPoSDownloader(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } m.ReceiveWg.Wait() @@ -657,7 +657,7 @@ func TestPoSSyncWithInvalidHeader(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } m.ReceiveWg.Wait() From dc5d3ffaac8420e45581d84575ef1c2ca37c6513 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Tue, 28 Jun 2022 13:42:35 +0200 Subject: [PATCH 118/136] Revert "Switch from eth/66 to eth/67 (#4549)" (#4562) This reverts commit 16e57aa8a2a94a8ec872b088c9c9f43ac818d979. --- DEV_CHAIN.md | 2 +- README.md | 4 +- cmd/observer/observer/handshake.go | 13 ++- cmd/observer/observer/handshake_test.go | 5 +- .../observer/sentry_candidates/log_test.go | 11 +-- cmd/observer/observer/server.go | 7 +- cmd/rpcdaemon/commands/eth_subscribe_test.go | 2 +- .../commands/send_transaction_test.go | 6 +- .../commands/eth_subscribe_test.go | 2 +- .../commands/send_transaction_test.go | 4 +- cmd/sentry/main.go | 4 +- cmd/sentry/sentry/broadcast.go | 65 ++++++------- cmd/sentry/sentry/eth_handshake_test.go | 6 +- cmd/sentry/sentry/sentry_api.go | 8 +- cmd/sentry/sentry/sentry_grpc_server.go | 20 +++- cmd/sentry/sentry/sentry_grpc_server_test.go | 2 +- cmd/sentry/sentry/sentry_multi_client.go | 40 ++++---- cmd/utils/flags.go | 4 +- eth/backend.go | 12 +-- eth/protocols/eth/handler_test.go | 2 +- eth/protocols/eth/protocol.go | 92 ++++++++++++------- eth/protocols/eth/protocol_test.go | 26 +++++- ethdb/privateapi/ethbackend.go | 2 +- go.mod | 2 +- go.sum | 4 +- p2p/dial_test.go | 2 +- turbo/stages/mock_sentry.go | 10 +- turbo/stages/sentry_mock_test.go | 54 +++++------ 28 files changed, 232 insertions(+), 179 deletions(-) diff --git a/DEV_CHAIN.md b/DEV_CHAIN.md index cbff468d1a8..d3510aea3df 100644 --- a/DEV_CHAIN.md +++ b/DEV_CHAIN.md @@ -70,7 +70,7 @@ Open terminal 3 and navigate to erigon/build/bin folder. Paste in the following To check if the nodes are connected, you can go to the log of both the nodes and look for the line - ``` [p2p] GoodPeers eth67=1 ``` + ``` [p2p] GoodPeers eth66=1 ``` Note: this might take a while it is not istantaneus, also if you see a 1 on either one of the two the node is fine. diff --git a/README.md b/README.md index 2ed20281c4f..83de5de31b1 100644 --- a/README.md +++ b/README.md @@ -335,7 +335,7 @@ Detailed explanation: [./docs/programmers_guide/db_faq.md](./docs/programmers_gu | Port | Protocol | Purpose | Expose | |:-----:|:---------:|:----------------------:|:-------:| -| 30303 | TCP & UDP | eth/67 peering | Public | +| 30303 | TCP & UDP | eth/66 peering | Public | | 9090 | TCP | gRPC Connections | Private | | 42069 | TCP & UDP | Snap sync (Bittorrent) | Public | | 6060 | TCP | Metrics or Pprof | Private | @@ -360,7 +360,7 @@ Typically, 8551 (JWT authenticated) is exposed only internally for the Engine AP | 30303 | TCP & UDP | Peering | Public | | 9091 | TCP | gRPC Connections | Private | -Typically, a sentry process will run one eth/xx protocol (e.g. eth/67) and will be exposed to the internet on 30303. Port +Typically, a sentry process will run one eth/xx protocol (e.g. eth/66) and will be exposed to the internet on 30303. Port 9091 is for internal gRCP connections (e.g erigon -> sentry). #### Other ports diff --git a/cmd/observer/observer/handshake.go b/cmd/observer/observer/handshake.go index 785a97a5762..6fdcd3414ab 100644 --- a/cmd/observer/observer/handshake.go +++ b/cmd/observer/observer/handshake.go @@ -4,11 +4,6 @@ import ( "context" "crypto/ecdsa" "fmt" - "math/big" - "net" - "strings" - "time" - "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/forkid" "github.com/ledgerwatch/erigon/crypto" @@ -17,6 +12,10 @@ import ( "github.com/ledgerwatch/erigon/p2p/rlpx" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" + "math/big" + "net" + "strings" + "time" ) // https://github.com/ethereum/devp2p/blob/master/rlpx.md#p2p-capability @@ -237,10 +236,10 @@ func makeOurHelloMessage(myPrivateKey *ecdsa.PrivateKey) HelloMessage { clientID := common.MakeName("observer", version) caps := []p2p.Cap{ + {Name: eth.ProtocolName, Version: 63}, {Name: eth.ProtocolName, Version: 64}, {Name: eth.ProtocolName, Version: 65}, - {Name: eth.ProtocolName, Version: 66}, - {Name: eth.ProtocolName, Version: eth.ETH67}, + {Name: eth.ProtocolName, Version: eth.ETH66}, } return HelloMessage{ diff --git a/cmd/observer/observer/handshake_test.go b/cmd/observer/observer/handshake_test.go index 19dd35ada3c..2691cd24006 100644 --- a/cmd/observer/observer/handshake_test.go +++ b/cmd/observer/observer/handshake_test.go @@ -2,14 +2,13 @@ package observer import ( "context" - "testing" - "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/p2p/enode" "github.com/ledgerwatch/erigon/params" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "testing" ) func TestHandshake(t *testing.T) { @@ -31,6 +30,6 @@ func TestHandshake(t *testing.T) { assert.Contains(t, hello.ClientID, "erigon") require.NotNil(t, status) - assert.Equal(t, uint32(eth.ETH67), status.ProtocolVersion) + assert.Equal(t, uint32(eth.ETH66), status.ProtocolVersion) assert.Equal(t, uint64(1), status.NetworkID) } diff --git a/cmd/observer/observer/sentry_candidates/log_test.go b/cmd/observer/observer/sentry_candidates/log_test.go index 3e75a1a5647..e1ac9c391c7 100644 --- a/cmd/observer/observer/sentry_candidates/log_test.go +++ b/cmd/observer/observer/sentry_candidates/log_test.go @@ -2,17 +2,16 @@ package sentry_candidates import ( "context" - "strings" - "testing" - "github.com/nxadm/tail" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "strings" + "testing" ) func TestLogRead(t *testing.T) { line := ` -{"capabilities":["eth/67","wit/0"],"clientID":"Nethermind/v1.13.0-0-2e8910b5b-20220520/X64-Linux/6.0.4","lvl":"dbug","msg":"Sentry peer did Connect","nodeURL":"enode://4293b17b897abed4a88d6e760e86a4bb700d62c12a9411fbf9ec0c9df3740c8670b184bd9f24d163cbd9bf05264b3047a69f079209d53d2e0dc05dd678d07cf0@1.2.3.4:45492","peer":"93b17b897abed4a88d6e760e86a4bb700d62c12a9411fbf9ec0c9df3740c8670b184bd9f24d163cbd9bf05264b3047a69f079209d53d2e0dc05dd678d07cf000","t":"2022-05-31T11:10:19.032092272Z"} +{"capabilities":["eth/66","wit/0"],"clientID":"Nethermind/v1.13.0-0-2e8910b5b-20220520/X64-Linux/6.0.4","lvl":"dbug","msg":"Sentry peer did Connect","nodeURL":"enode://4293b17b897abed4a88d6e760e86a4bb700d62c12a9411fbf9ec0c9df3740c8670b184bd9f24d163cbd9bf05264b3047a69f079209d53d2e0dc05dd678d07cf0@1.2.3.4:45492","peer":"93b17b897abed4a88d6e760e86a4bb700d62c12a9411fbf9ec0c9df3740c8670b184bd9f24d163cbd9bf05264b3047a69f079209d53d2e0dc05dd678d07cf000","t":"2022-05-31T11:10:19.032092272Z"} ` line = strings.TrimLeft(line, "\r\n ") eventLog := NewLog(NewScannerLineReader(strings.NewReader(line))) @@ -55,7 +54,7 @@ func TestLogReadTailSkimFile(t *testing.T) { func TestLogEventEthVersion(t *testing.T) { event := LogEvent{} - event.Capabilities = []string{"wit/0", "eth/65", "eth/66", "eth/67"} + event.Capabilities = []string{"wit/0", "eth/64", "eth/65", "eth/66"} version := event.EthVersion() - assert.Equal(t, uint(67), version) + assert.Equal(t, uint(66), version) } diff --git a/cmd/observer/observer/server.go b/cmd/observer/observer/server.go index 24e3a8899d8..907b7c709a6 100644 --- a/cmd/observer/observer/server.go +++ b/cmd/observer/observer/server.go @@ -5,9 +5,6 @@ import ( "crypto/ecdsa" "errors" "fmt" - "net" - "path/filepath" - "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/core/forkid" @@ -20,6 +17,8 @@ import ( "github.com/ledgerwatch/erigon/p2p/netutil" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/log/v3" + "net" + "path/filepath" ) type Server struct { @@ -33,7 +32,7 @@ type Server struct { } func NewServer(flags CommandFlags) (*Server, error) { - nodeDBPath := filepath.Join(flags.DataDir, "nodes", "eth67") + nodeDBPath := filepath.Join(flags.DataDir, "nodes", "eth66") nodeKeyConfig := p2p.NodeKeyConfig{} privateKey, err := nodeKeyConfig.LoadOrParseOrGenerateAndSave(flags.NodeKeyFile, flags.NodeKeyHex, flags.DataDir) diff --git a/cmd/rpcdaemon/commands/eth_subscribe_test.go b/cmd/rpcdaemon/commands/eth_subscribe_test.go index 337a8df9049..a997495bd99 100644 --- a/cmd/rpcdaemon/commands/eth_subscribe_test.go +++ b/cmd/rpcdaemon/commands/eth_subscribe_test.go @@ -33,7 +33,7 @@ func TestEthSubscribe(t *testing.T) { require.NoError(err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(err) } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed diff --git a/cmd/rpcdaemon/commands/send_transaction_test.go b/cmd/rpcdaemon/commands/send_transaction_test.go index 719064f6eb9..0e848258126 100644 --- a/cmd/rpcdaemon/commands/send_transaction_test.go +++ b/cmd/rpcdaemon/commands/send_transaction_test.go @@ -42,7 +42,7 @@ func TestSendRawTransaction(t *testing.T) { }) require.NoError(err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { require.NoError(err) } // Send all the headers @@ -52,10 +52,10 @@ func TestSendRawTransaction(t *testing.T) { }) require.NoError(err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(err) } - m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceed + m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed initialCycle := true highestSeenHeader := chain.TopBlock.NumberU64() diff --git a/cmd/rpcdaemon22/commands/eth_subscribe_test.go b/cmd/rpcdaemon22/commands/eth_subscribe_test.go index 180137a9722..875b7a2456a 100644 --- a/cmd/rpcdaemon22/commands/eth_subscribe_test.go +++ b/cmd/rpcdaemon22/commands/eth_subscribe_test.go @@ -32,7 +32,7 @@ func TestEthSubscribe(t *testing.T) { require.NoError(err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(err) } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed diff --git a/cmd/rpcdaemon22/commands/send_transaction_test.go b/cmd/rpcdaemon22/commands/send_transaction_test.go index 634c265d99d..87c6ef411b3 100644 --- a/cmd/rpcdaemon22/commands/send_transaction_test.go +++ b/cmd/rpcdaemon22/commands/send_transaction_test.go @@ -42,7 +42,7 @@ func TestSendRawTransaction(t *testing.T) { }) require.NoError(err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { require.NoError(err) } // Send all the headers @@ -52,7 +52,7 @@ func TestSendRawTransaction(t *testing.T) { }) require.NoError(err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(err) } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed diff --git a/cmd/sentry/main.go b/cmd/sentry/main.go index 1b75579a0f0..80ed49d8522 100644 --- a/cmd/sentry/main.go +++ b/cmd/sentry/main.go @@ -45,7 +45,7 @@ func init() { rootCmd.Flags().StringSliceVar(&trustedPeers, utils.TrustedPeersFlag.Name, []string{}, utils.TrustedPeersFlag.Usage) rootCmd.Flags().StringSliceVar(&discoveryDNS, utils.DNSDiscoveryFlag.Name, []string{}, utils.DNSDiscoveryFlag.Usage) rootCmd.Flags().BoolVar(&nodiscover, utils.NoDiscoverFlag.Name, false, utils.NoDiscoverFlag.Usage) - rootCmd.Flags().StringVar(&protocol, "p2p.protocol", "eth67", "eth67") + rootCmd.Flags().StringVar(&protocol, "p2p.protocol", "eth66", "eth66") rootCmd.Flags().StringVar(&netRestrict, utils.NetrestrictFlag.Name, utils.NetrestrictFlag.Value, utils.NetrestrictFlag.Usage) rootCmd.Flags().IntVar(&maxPeers, utils.MaxPeersFlag.Name, utils.MaxPeersFlag.Value, utils.MaxPeersFlag.Usage) rootCmd.Flags().IntVar(&maxPendPeers, utils.MaxPendingPeersFlag.Name, utils.MaxPendingPeersFlag.Value, utils.MaxPendingPeersFlag.Usage) @@ -68,7 +68,7 @@ var rootCmd = &cobra.Command{ debug.Exit() }, RunE: func(cmd *cobra.Command, args []string) error { - p := eth.ETH67 + p := eth.ETH66 dirs := datadir.New(datadirCli) nodeConfig := node2.NewNodeConfig() diff --git a/cmd/sentry/sentry/broadcast.go b/cmd/sentry/sentry/broadcast.go index 32b9f694f50..869fede9555 100644 --- a/cmd/sentry/sentry/broadcast.go +++ b/cmd/sentry/sentry/broadcast.go @@ -41,7 +41,7 @@ func (cs *MultiClient) PropagateNewBlockHashes(ctx context.Context, announces [] log.Error("propagateNewBlockHashes", "err", err) return } - var req67 *proto_sentry.OutboundMessageData + var req66 *proto_sentry.OutboundMessageData // Send the block to a subset of our peers sendToAmount := int(math.Sqrt(float64(len(cs.sentries)))) for i, sentry := range cs.sentries { @@ -54,14 +54,14 @@ func (cs *MultiClient) PropagateNewBlockHashes(ctx context.Context, announces [] switch sentry.Protocol() { - case eth.ETH67: - if req67 == nil { - req67 = &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_NEW_BLOCK_HASHES, + case eth.ETH66: + if req66 == nil { + req66 = &proto_sentry.OutboundMessageData{ + Id: proto_sentry.MessageId_NEW_BLOCK_HASHES_66, Data: data, } - _, err = sentry.SendMessageToAll(ctx, req67, &grpc.EmptyCallOption{}) + _, err = sentry.SendMessageToAll(ctx, req66, &grpc.EmptyCallOption{}) if err != nil { log.Error("propagateNewBlockHashes", "err", err) } @@ -82,7 +82,7 @@ func (cs *MultiClient) BroadcastNewBlock(ctx context.Context, block *types.Block if err != nil { log.Error("broadcastNewBlock", "err", err) } - var req67 *proto_sentry.SendMessageToRandomPeersRequest + var req66 *proto_sentry.SendMessageToRandomPeersRequest // Send the block to a subset of our peers sendToAmount := int(math.Sqrt(float64(len(cs.sentries)))) for i, sentry := range cs.sentries { @@ -95,17 +95,17 @@ func (cs *MultiClient) BroadcastNewBlock(ctx context.Context, block *types.Block switch sentry.Protocol() { - case eth.ETH67: - if req67 == nil { - req67 = &proto_sentry.SendMessageToRandomPeersRequest{ + case eth.ETH66: + if req66 == nil { + req66 = &proto_sentry.SendMessageToRandomPeersRequest{ MaxPeers: 1024, Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_NEW_BLOCK, + Id: proto_sentry.MessageId_NEW_BLOCK_66, Data: data, }, } } - if _, err = sentry.SendMessageToRandomPeers(ctx, req67, &grpc.EmptyCallOption{}); err != nil { + if _, err = sentry.SendMessageToRandomPeers(ctx, req66, &grpc.EmptyCallOption{}); err != nil { if isPeerNotFoundErr(err) || networkTemporaryErr(err) { log.Debug("broadcastNewBlock", "err", err) continue @@ -124,7 +124,8 @@ func (cs *MultiClient) BroadcastLocalPooledTxs(ctx context.Context, txs []common cs.lock.RLock() defer cs.lock.RUnlock() initialAmount := len(txs) - avgPeersPerSent67 := 0 + avgPeersPerSent65 := 0 + avgPeersPerSent66 := 0 initialTxs := txs for len(txs) > 0 { @@ -140,7 +141,7 @@ func (cs *MultiClient) BroadcastLocalPooledTxs(ctx context.Context, txs []common if err != nil { log.Error("BroadcastLocalPooledTxs", "err", err) } - var req67 *proto_sentry.OutboundMessageData + var req66 *proto_sentry.OutboundMessageData // Send the block to a subset of our peers sendToAmount := int(math.Sqrt(float64(len(cs.sentries)))) for i, sentry := range cs.sentries { @@ -152,14 +153,14 @@ func (cs *MultiClient) BroadcastLocalPooledTxs(ctx context.Context, txs []common } switch sentry.Protocol() { - case eth.ETH67: - if req67 == nil { - req67 = &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES, + case eth.ETH66: + if req66 == nil { + req66 = &proto_sentry.OutboundMessageData{ + Id: proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66, Data: data, } } - peers, err := sentry.SendMessageToAll(ctx, req67, &grpc.EmptyCallOption{}) + peers, err := sentry.SendMessageToAll(ctx, req66, &grpc.EmptyCallOption{}) if err != nil { if isPeerNotFoundErr(err) || networkTemporaryErr(err) { log.Debug("BroadcastLocalPooledTxs", "err", err) @@ -167,14 +168,14 @@ func (cs *MultiClient) BroadcastLocalPooledTxs(ctx context.Context, txs []common } log.Error("BroadcastLocalPooledTxs", "err", err) } - avgPeersPerSent67 += len(peers.GetPeers()) + avgPeersPerSent66 += len(peers.GetPeers()) } } } if initialAmount == 1 { - log.Info("local tx propagated", "to_peers_amount", avgPeersPerSent67, "tx_hash", initialTxs[0].String()) + log.Info("local tx propagated", "to_peers_amount", avgPeersPerSent65+avgPeersPerSent66, "tx_hash", initialTxs[0].String()) } else { - log.Info("local txs propagated", "to_peers_amount", avgPeersPerSent67, "txs_amount", initialAmount) + log.Info("local txs propagated", "to_peers_amount", avgPeersPerSent65+avgPeersPerSent66, "txs_amount", initialAmount) } } @@ -199,7 +200,7 @@ func (cs *MultiClient) BroadcastRemotePooledTxs(ctx context.Context, txs []commo if err != nil { log.Error("BroadcastRemotePooledTxs", "err", err) } - var req67 *proto_sentry.SendMessageToRandomPeersRequest + var req66 *proto_sentry.SendMessageToRandomPeersRequest // Send the block to a subset of our peers sendToAmount := int(math.Sqrt(float64(len(cs.sentries)))) for i, sentry := range cs.sentries { @@ -212,17 +213,17 @@ func (cs *MultiClient) BroadcastRemotePooledTxs(ctx context.Context, txs []commo switch sentry.Protocol() { - case eth.ETH67: - if req67 == nil { - req67 = &proto_sentry.SendMessageToRandomPeersRequest{ + case eth.ETH66: + if req66 == nil { + req66 = &proto_sentry.SendMessageToRandomPeersRequest{ MaxPeers: 1024, Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES, + Id: proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66, Data: data, }, } } - if _, err = sentry.SendMessageToRandomPeers(ctx, req67, &grpc.EmptyCallOption{}); err != nil { + if _, err = sentry.SendMessageToRandomPeers(ctx, req66, &grpc.EmptyCallOption{}); err != nil { if isPeerNotFoundErr(err) || networkTemporaryErr(err) { log.Debug("BroadcastRemotePooledTxs", "err", err) continue @@ -263,15 +264,15 @@ func (cs *MultiClient) PropagatePooledTxsToPeersList(ctx context.Context, peers for _, peer := range peers { switch sentry.Protocol() { - case eth.ETH67: - req67 := &proto_sentry.SendMessageByIdRequest{ + case eth.ETH66: + req66 := &proto_sentry.SendMessageByIdRequest{ PeerId: peer, Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES, + Id: proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66, Data: data, }, } - if _, err = sentry.SendMessageById(ctx, req67, &grpc.EmptyCallOption{}); err != nil { + if _, err = sentry.SendMessageById(ctx, req66, &grpc.EmptyCallOption{}); err != nil { if isPeerNotFoundErr(err) || networkTemporaryErr(err) { log.Debug("PropagatePooledTxsToPeersList", "err", err) continue diff --git a/cmd/sentry/sentry/eth_handshake_test.go b/cmd/sentry/sentry/eth_handshake_test.go index ba32d8b770f..eeec146f3f9 100644 --- a/cmd/sentry/sentry/eth_handshake_test.go +++ b/cmd/sentry/sentry/eth_handshake_test.go @@ -15,7 +15,7 @@ import ( ) func TestCheckPeerStatusCompatibility(t *testing.T) { - var version uint = eth.ETH67 + var version uint = eth.ETH66 networkID := params.MainnetChainConfig.ChainID.Uint64() goodReply := eth.StatusPacket{ ProtocolVersion: uint32(version), @@ -49,14 +49,14 @@ func TestCheckPeerStatusCompatibility(t *testing.T) { }) t.Run("version mismatch min", func(t *testing.T) { reply := goodReply - reply.ProtocolVersion = eth.ETH67 - 1 + reply.ProtocolVersion = eth.ETH66 - 1 err := checkPeerStatusCompatibility(&reply, &status, version, version) assert.NotNil(t, err) assert.Contains(t, err.Error(), "version is less") }) t.Run("version mismatch max", func(t *testing.T) { reply := goodReply - reply.ProtocolVersion = eth.ETH67 + 1 + reply.ProtocolVersion = eth.ETH66 + 1 err := checkPeerStatusCompatibility(&reply, &status, version, version) assert.NotNil(t, err) assert.Contains(t, err.Error(), "version is more") diff --git a/cmd/sentry/sentry/sentry_api.go b/cmd/sentry/sentry/sentry_api.go index 11b548eeb8d..fbb1ed814d9 100644 --- a/cmd/sentry/sentry/sentry_api.go +++ b/cmd/sentry/sentry/sentry_api.go @@ -44,7 +44,7 @@ func (cs *MultiClient) SendBodyRequest(ctx context.Context, req *bodydownload.Bo } switch cs.sentries[i].Protocol() { - case eth.ETH67: + case eth.ETH66: //log.Info(fmt.Sprintf("Sending body request for %v", req.BlockNums)) var bytes []byte var err error @@ -59,7 +59,7 @@ func (cs *MultiClient) SendBodyRequest(ctx context.Context, req *bodydownload.Bo outreq := proto_sentry.SendMessageByMinBlockRequest{ MinBlock: req.BlockNums[len(req.BlockNums)-1], Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_GET_BLOCK_BODIES, + Id: proto_sentry.MessageId_GET_BLOCK_BODIES_66, Data: bytes, }, } @@ -85,7 +85,7 @@ func (cs *MultiClient) SendHeaderRequest(ctx context.Context, req *headerdownloa continue } switch cs.sentries[i].Protocol() { - case eth.ETH67: + case eth.ETH66: //log.Info(fmt.Sprintf("Sending header request {hash: %x, height: %d, length: %d}", req.Hash, req.Number, req.Length)) reqData := ð.GetBlockHeadersPacket66{ RequestId: rand.Uint64(), @@ -109,7 +109,7 @@ func (cs *MultiClient) SendHeaderRequest(ctx context.Context, req *headerdownloa outreq := proto_sentry.SendMessageByMinBlockRequest{ MinBlock: minBlock, Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_GET_BLOCK_HEADERS, + Id: proto_sentry.MessageId_GET_BLOCK_HEADERS_66, Data: bytes, }, } diff --git a/cmd/sentry/sentry/sentry_grpc_server.go b/cmd/sentry/sentry/sentry_grpc_server.go index 539385d2cba..098c77693b6 100644 --- a/cmd/sentry/sentry/sentry_grpc_server.go +++ b/cmd/sentry/sentry/sentry_grpc_server.go @@ -356,6 +356,16 @@ func runPeer( log.Error(fmt.Sprintf("%s: reading msg into bytes: %v", peerID, err)) } send(eth.ToProto[protocol][msg.Code], peerID, b) + case eth.GetNodeDataMsg: + if !hasSubscribers(eth.ToProto[protocol][msg.Code]) { + continue + } + b := make([]byte, msg.Size) + if _, err := io.ReadFull(msg.Payload, b); err != nil { + log.Error(fmt.Sprintf("%s: reading msg into bytes: %v", peerID, err)) + } + send(eth.ToProto[protocol][msg.Code], peerID, b) + //log.Info(fmt.Sprintf("[%s] GetNodeData", peerID)) case eth.GetReceiptsMsg: if !hasSubscribers(eth.ToProto[protocol][msg.Code]) { continue @@ -481,7 +491,7 @@ func NewGrpcServer(ctx context.Context, dialCandidates enode.Iterator, readNodeI peersStreams: NewPeersStreams(), } - if protocol != eth.ETH67 { + if protocol != eth.ETH66 { panic(fmt.Errorf("unexpected p2p protocol: %d", protocol)) } @@ -621,7 +631,7 @@ func (ss *GrpcServer) writePeer(logPrefix string, peerInfo *PeerInfo, msgcode ui func (ss *GrpcServer) startSync(ctx context.Context, bestHash common.Hash, peerID [64]byte) error { switch ss.Protocol.Version { - case eth.ETH67: + case eth.ETH66: b, err := rlp.EncodeToBytes(ð.GetBlockHeadersPacket66{ RequestId: rand.Uint64(), GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ @@ -637,7 +647,7 @@ func (ss *GrpcServer) startSync(ctx context.Context, bestHash common.Hash, peerI if _, err := ss.SendMessageById(ctx, &proto_sentry.SendMessageByIdRequest{ PeerId: gointerfaces.ConvertHashToH512(peerID), Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_GET_BLOCK_HEADERS, + Id: proto_sentry.MessageId_GET_BLOCK_HEADERS_66, Data: b, }, }); err != nil { @@ -793,8 +803,8 @@ func (ss *GrpcServer) SendMessageToAll(ctx context.Context, req *proto_sentry.Ou func (ss *GrpcServer) HandShake(context.Context, *emptypb.Empty) (*proto_sentry.HandShakeReply, error) { reply := &proto_sentry.HandShakeReply{} switch ss.Protocol.Version { - case eth.ETH67: - reply.Protocol = proto_sentry.Protocol_ETH67 + case eth.ETH66: + reply.Protocol = proto_sentry.Protocol_ETH66 } return reply, nil } diff --git a/cmd/sentry/sentry/sentry_grpc_server_test.go b/cmd/sentry/sentry/sentry_grpc_server_test.go index 281216624de..8cbd567a9e2 100644 --- a/cmd/sentry/sentry/sentry_grpc_server_test.go +++ b/cmd/sentry/sentry/sentry_grpc_server_test.go @@ -50,7 +50,7 @@ func testSentryServer(db kv.Getter, genesis *core.Genesis, genesisHash common.Ha // Tests that peers are correctly accepted (or rejected) based on the advertised // fork IDs in the protocol handshake. -func TestForkIDSplit67(t *testing.T) { testForkIDSplit(t, eth.ETH67) } +func TestForkIDSplit66(t *testing.T) { testForkIDSplit(t, eth.ETH66) } func testForkIDSplit(t *testing.T, protocol uint) { var ( diff --git a/cmd/sentry/sentry/sentry_multi_client.go b/cmd/sentry/sentry/sentry_multi_client.go index 0434ea51aa7..1359398c43c 100644 --- a/cmd/sentry/sentry/sentry_multi_client.go +++ b/cmd/sentry/sentry/sentry_multi_client.go @@ -64,8 +64,8 @@ func (cs *MultiClient) RecvUploadMessageLoop( wg *sync.WaitGroup, ) { ids := []proto_sentry.MessageId{ - eth.ToProto[eth.ETH67][eth.GetBlockBodiesMsg], - eth.ToProto[eth.ETH67][eth.GetReceiptsMsg], + eth.ToProto[eth.ETH66][eth.GetBlockBodiesMsg], + eth.ToProto[eth.ETH66][eth.GetReceiptsMsg], } streamFactory := func(streamCtx context.Context, sentry direct.SentryClient) (sentryMessageStream, error) { return sentry.Messages(streamCtx, &proto_sentry.MessagesRequest{Ids: ids}, grpc.WaitForReady(true)) @@ -80,7 +80,7 @@ func (cs *MultiClient) RecvUploadHeadersMessageLoop( wg *sync.WaitGroup, ) { ids := []proto_sentry.MessageId{ - eth.ToProto[eth.ETH67][eth.GetBlockHeadersMsg], + eth.ToProto[eth.ETH66][eth.GetBlockHeadersMsg], } streamFactory := func(streamCtx context.Context, sentry direct.SentryClient) (sentryMessageStream, error) { return sentry.Messages(streamCtx, &proto_sentry.MessagesRequest{Ids: ids}, grpc.WaitForReady(true)) @@ -95,10 +95,10 @@ func (cs *MultiClient) RecvMessageLoop( wg *sync.WaitGroup, ) { ids := []proto_sentry.MessageId{ - eth.ToProto[eth.ETH67][eth.BlockHeadersMsg], - eth.ToProto[eth.ETH67][eth.BlockBodiesMsg], - eth.ToProto[eth.ETH67][eth.NewBlockHashesMsg], - eth.ToProto[eth.ETH67][eth.NewBlockMsg], + eth.ToProto[eth.ETH66][eth.BlockHeadersMsg], + eth.ToProto[eth.ETH66][eth.BlockBodiesMsg], + eth.ToProto[eth.ETH66][eth.NewBlockHashesMsg], + eth.ToProto[eth.ETH66][eth.NewBlockMsg], } streamFactory := func(streamCtx context.Context, sentry direct.SentryClient) (sentryMessageStream, error) { return sentry.Messages(streamCtx, &proto_sentry.MessagesRequest{Ids: ids}, grpc.WaitForReady(true)) @@ -336,7 +336,7 @@ func (cs *MultiClient) newBlockHashes66(ctx context.Context, req *proto_sentry.I outreq := proto_sentry.SendMessageByIdRequest{ PeerId: req.PeerId, Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_GET_BLOCK_HEADERS, + Id: proto_sentry.MessageId_GET_BLOCK_HEADERS_66, Data: b, }, } @@ -534,7 +534,7 @@ func (cs *MultiClient) getBlockHeaders66(ctx context.Context, inreq *proto_sentr outreq := proto_sentry.SendMessageByIdRequest{ PeerId: inreq.PeerId, Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_BLOCK_HEADERS, + Id: proto_sentry.MessageId_BLOCK_HEADERS_66, Data: b, }, } @@ -571,7 +571,7 @@ func (cs *MultiClient) getBlockBodies66(ctx context.Context, inreq *proto_sentry outreq := proto_sentry.SendMessageByIdRequest{ PeerId: inreq.PeerId, Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_BLOCK_BODIES, + Id: proto_sentry.MessageId_BLOCK_BODIES_66, Data: b, }, } @@ -611,7 +611,7 @@ func (cs *MultiClient) getReceipts66(ctx context.Context, inreq *proto_sentry.In outreq := proto_sentry.SendMessageByIdRequest{ PeerId: inreq.PeerId, Data: &proto_sentry.OutboundMessageData{ - Id: proto_sentry.MessageId_RECEIPTS, + Id: proto_sentry.MessageId_RECEIPTS_66, Data: b, }, } @@ -655,23 +655,23 @@ func (cs *MultiClient) HandleInboundMessage(ctx context.Context, message *proto_ func (cs *MultiClient) handleInboundMessage(ctx context.Context, inreq *proto_sentry.InboundMessage, sentry direct.SentryClient) error { switch inreq.Id { - // ========= eth 67 ========== + // ========= eth 66 ========== - case proto_sentry.MessageId_NEW_BLOCK_HASHES: + case proto_sentry.MessageId_NEW_BLOCK_HASHES_66: return cs.newBlockHashes66(ctx, inreq, sentry) - case proto_sentry.MessageId_BLOCK_HEADERS: + case proto_sentry.MessageId_BLOCK_HEADERS_66: return cs.blockHeaders66(ctx, inreq, sentry) - case proto_sentry.MessageId_NEW_BLOCK: + case proto_sentry.MessageId_NEW_BLOCK_66: return cs.newBlock66(ctx, inreq, sentry) - case proto_sentry.MessageId_BLOCK_BODIES: + case proto_sentry.MessageId_BLOCK_BODIES_66: return cs.blockBodies66(inreq, sentry) - case proto_sentry.MessageId_GET_BLOCK_HEADERS: + case proto_sentry.MessageId_GET_BLOCK_HEADERS_66: return cs.getBlockHeaders66(ctx, inreq, sentry) - case proto_sentry.MessageId_GET_BLOCK_BODIES: + case proto_sentry.MessageId_GET_BLOCK_BODIES_66: return cs.getBlockBodies66(ctx, inreq, sentry) - case proto_sentry.MessageId_RECEIPTS: + case proto_sentry.MessageId_RECEIPTS_66: return cs.receipts66(ctx, inreq, sentry) - case proto_sentry.MessageId_GET_RECEIPTS: + case proto_sentry.MessageId_GET_RECEIPTS_66: return cs.getReceipts66(ctx, inreq, sentry) default: return fmt.Errorf("not implemented for message Id: %s", inreq.Id) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 7b91de08b43..ff39c1e4367 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -841,8 +841,8 @@ func NewP2PConfig( ) (*p2p.Config, error) { var enodeDBPath string switch protocol { - case eth.ETH67: - enodeDBPath = filepath.Join(dirs.Nodes, "eth67") + case eth.ETH66: + enodeDBPath = filepath.Join(dirs.Nodes, "eth66") default: return nil, fmt.Errorf("unknown protocol: %v", protocol) } diff --git a/eth/backend.go b/eth/backend.go index d704058fe1a..d85647cde94 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -235,16 +235,16 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere return res } - d67, err := setupDiscovery(backend.config.EthDiscoveryURLs) + d66, err := setupDiscovery(backend.config.EthDiscoveryURLs) if err != nil { return nil, err } - cfg67 := stack.Config().P2P - cfg67.NodeDatabase = filepath.Join(stack.Config().Dirs.Nodes, "eth67") - server67 := sentry.NewGrpcServer(backend.sentryCtx, d67, readNodeInfo, &cfg67, eth.ETH67) - backend.sentryServers = append(backend.sentryServers, server67) - sentries = []direct.SentryClient{direct.NewSentryClientDirect(eth.ETH67, server67)} + cfg66 := stack.Config().P2P + cfg66.NodeDatabase = filepath.Join(stack.Config().Dirs.Nodes, "eth66") + server66 := sentry.NewGrpcServer(backend.sentryCtx, d66, readNodeInfo, &cfg66, eth.ETH66) + backend.sentryServers = append(backend.sentryServers, server66) + sentries = []direct.SentryClient{direct.NewSentryClientDirect(eth.ETH66, server66)} go func() { logEvery := time.NewTicker(120 * time.Second) diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index d718c0888f0..76a08335ec0 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -112,7 +112,7 @@ func TestGetBlockReceipts(t *testing.T) { m.ReceiveWg.Add(1) // Send the hash request and verify the response - for _, err = range m.Send(&sentry.InboundMessage{Id: eth.ToProto[eth.ETH67][eth.GetReceiptsMsg], Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: eth.ToProto[eth.ETH66][eth.GetReceiptsMsg], Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go index bb9ed2012d0..37569605b82 100644 --- a/eth/protocols/eth/protocol.go +++ b/eth/protocols/eth/protocol.go @@ -33,11 +33,11 @@ import ( // Constants to match up protocol versions and messages const ( - ETH67 = 67 + ETH66 = 66 ) var ProtocolToString = map[uint]string{ - ETH67: "eth67", + ETH66: "eth66", } // ProtocolName is the official short name of the `eth` protocol used during @@ -58,10 +58,10 @@ const ( GetBlockBodiesMsg = 0x05 BlockBodiesMsg = 0x06 NewBlockMsg = 0x07 - // GetNodeDataMsg = 0x0d // removed in eth/67 - // NodeDataMsg = 0x0e // removed in eth/67 - GetReceiptsMsg = 0x0f - ReceiptsMsg = 0x10 + GetNodeDataMsg = 0x0d + NodeDataMsg = 0x0e + GetReceiptsMsg = 0x0f + ReceiptsMsg = 0x10 // Protocol messages overloaded in eth/65 NewPooledTransactionHashesMsg = 0x08 @@ -70,36 +70,40 @@ const ( ) var ToProto = map[uint]map[uint64]proto_sentry.MessageId{ - ETH67: { - GetBlockHeadersMsg: proto_sentry.MessageId_GET_BLOCK_HEADERS, - BlockHeadersMsg: proto_sentry.MessageId_BLOCK_HEADERS, - GetBlockBodiesMsg: proto_sentry.MessageId_GET_BLOCK_BODIES, - BlockBodiesMsg: proto_sentry.MessageId_BLOCK_BODIES, - GetReceiptsMsg: proto_sentry.MessageId_GET_RECEIPTS, - ReceiptsMsg: proto_sentry.MessageId_RECEIPTS, - NewBlockHashesMsg: proto_sentry.MessageId_NEW_BLOCK_HASHES, - NewBlockMsg: proto_sentry.MessageId_NEW_BLOCK, - TransactionsMsg: proto_sentry.MessageId_TRANSACTIONS, - NewPooledTransactionHashesMsg: proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES, - GetPooledTransactionsMsg: proto_sentry.MessageId_GET_POOLED_TRANSACTIONS, - PooledTransactionsMsg: proto_sentry.MessageId_POOLED_TRANSACTIONS, + ETH66: { + GetBlockHeadersMsg: proto_sentry.MessageId_GET_BLOCK_HEADERS_66, + BlockHeadersMsg: proto_sentry.MessageId_BLOCK_HEADERS_66, + GetBlockBodiesMsg: proto_sentry.MessageId_GET_BLOCK_BODIES_66, + BlockBodiesMsg: proto_sentry.MessageId_BLOCK_BODIES_66, + GetNodeDataMsg: proto_sentry.MessageId_GET_NODE_DATA_66, + NodeDataMsg: proto_sentry.MessageId_NODE_DATA_66, + GetReceiptsMsg: proto_sentry.MessageId_GET_RECEIPTS_66, + ReceiptsMsg: proto_sentry.MessageId_RECEIPTS_66, + NewBlockHashesMsg: proto_sentry.MessageId_NEW_BLOCK_HASHES_66, + NewBlockMsg: proto_sentry.MessageId_NEW_BLOCK_66, + TransactionsMsg: proto_sentry.MessageId_TRANSACTIONS_66, + NewPooledTransactionHashesMsg: proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66, + GetPooledTransactionsMsg: proto_sentry.MessageId_GET_POOLED_TRANSACTIONS_66, + PooledTransactionsMsg: proto_sentry.MessageId_POOLED_TRANSACTIONS_66, }, } var FromProto = map[uint]map[proto_sentry.MessageId]uint64{ - ETH67: { - proto_sentry.MessageId_GET_BLOCK_HEADERS: GetBlockHeadersMsg, - proto_sentry.MessageId_BLOCK_HEADERS: BlockHeadersMsg, - proto_sentry.MessageId_GET_BLOCK_BODIES: GetBlockBodiesMsg, - proto_sentry.MessageId_BLOCK_BODIES: BlockBodiesMsg, - proto_sentry.MessageId_GET_RECEIPTS: GetReceiptsMsg, - proto_sentry.MessageId_RECEIPTS: ReceiptsMsg, - proto_sentry.MessageId_NEW_BLOCK_HASHES: NewBlockHashesMsg, - proto_sentry.MessageId_NEW_BLOCK: NewBlockMsg, - proto_sentry.MessageId_TRANSACTIONS: TransactionsMsg, - proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES: NewPooledTransactionHashesMsg, - proto_sentry.MessageId_GET_POOLED_TRANSACTIONS: GetPooledTransactionsMsg, - proto_sentry.MessageId_POOLED_TRANSACTIONS: PooledTransactionsMsg, + ETH66: { + proto_sentry.MessageId_GET_BLOCK_HEADERS_66: GetBlockHeadersMsg, + proto_sentry.MessageId_BLOCK_HEADERS_66: BlockHeadersMsg, + proto_sentry.MessageId_GET_BLOCK_BODIES_66: GetBlockBodiesMsg, + proto_sentry.MessageId_BLOCK_BODIES_66: BlockBodiesMsg, + proto_sentry.MessageId_GET_NODE_DATA_66: GetNodeDataMsg, + proto_sentry.MessageId_NODE_DATA_66: NodeDataMsg, + proto_sentry.MessageId_GET_RECEIPTS_66: GetReceiptsMsg, + proto_sentry.MessageId_RECEIPTS_66: ReceiptsMsg, + proto_sentry.MessageId_NEW_BLOCK_HASHES_66: NewBlockHashesMsg, + proto_sentry.MessageId_NEW_BLOCK_66: NewBlockMsg, + proto_sentry.MessageId_TRANSACTIONS_66: TransactionsMsg, + proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66: NewPooledTransactionHashesMsg, + proto_sentry.MessageId_GET_POOLED_TRANSACTIONS_66: GetPooledTransactionsMsg, + proto_sentry.MessageId_POOLED_TRANSACTIONS_66: PooledTransactionsMsg, }, } @@ -650,6 +654,24 @@ func (p *BlockRawBodiesPacket) Unpack() ([][][]byte, [][]*types.Header) { return txset, uncleset } +// GetNodeDataPacket represents a trie node data query. +type GetNodeDataPacket []common.Hash + +// GetNodeDataPacket represents a trie node data query over eth/66. +type GetNodeDataPacket66 struct { + RequestId uint64 + GetNodeDataPacket +} + +// NodeDataPacket is the network packet for trie node data distribution. +type NodeDataPacket [][]byte + +// NodeDataPacket is the network packet for trie node data distribution over eth/66. +type NodeDataPacket66 struct { + RequestId uint64 + NodeDataPacket +} + // GetReceiptsPacket represents a block receipts query. type GetReceiptsPacket []common.Hash @@ -892,6 +914,12 @@ func (*BlockBodiesPacket) Kind() byte { return BlockBodiesMsg } func (*NewBlockPacket) Name() string { return "NewBlock" } func (*NewBlockPacket) Kind() byte { return NewBlockMsg } +func (*GetNodeDataPacket) Name() string { return "GetNodeData" } +func (*GetNodeDataPacket) Kind() byte { return GetNodeDataMsg } + +func (*NodeDataPacket) Name() string { return "NodeData" } +func (*NodeDataPacket) Kind() byte { return NodeDataMsg } + func (*GetReceiptsPacket) Name() string { return "GetReceipts" } func (*GetReceiptsPacket) Kind() byte { return GetReceiptsMsg } diff --git a/eth/protocols/eth/protocol_test.go b/eth/protocols/eth/protocol_test.go index 5bae0706887..592dcbdf0c0 100644 --- a/eth/protocols/eth/protocol_test.go +++ b/eth/protocols/eth/protocol_test.go @@ -71,8 +71,8 @@ func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { } } -// TestEth67EmptyMessages tests encoding of empty eth67 messages -func TestEth67EmptyMessages(t *testing.T) { +// TestEth66EmptyMessages tests encoding of empty eth66 messages +func TestEth66EmptyMessages(t *testing.T) { // All empty messages encodes to the same format want := common.FromHex("c4820457c0") @@ -84,6 +84,9 @@ func TestEth67EmptyMessages(t *testing.T) { GetBlockBodiesPacket66{1111, nil}, BlockBodiesPacket66{1111, nil}, BlockBodiesRLPPacket66{1111, nil}, + // Node data + GetNodeDataPacket66{1111, nil}, + NodeDataPacket66{1111, nil}, // Receipts GetReceiptsPacket66{1111, nil}, ReceiptsPacket66{1111, nil}, @@ -98,6 +101,9 @@ func TestEth67EmptyMessages(t *testing.T) { GetBlockBodiesPacket66{1111, GetBlockBodiesPacket([]common.Hash{})}, BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{})}, BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{})}, + // Node data + GetNodeDataPacket66{1111, GetNodeDataPacket([]common.Hash{})}, + NodeDataPacket66{1111, NodeDataPacket([][]byte{})}, // Receipts GetReceiptsPacket66{1111, GetReceiptsPacket([]common.Hash{})}, ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{})}, @@ -113,8 +119,8 @@ func TestEth67EmptyMessages(t *testing.T) { } -// TestEth67Messages tests the encoding of all redefined eth67 messages -func TestEth67Messages(t *testing.T) { +// TestEth66Messages tests the encoding of all redefined eth66 messages +func TestEth66Messages(t *testing.T) { // Some basic structs used during testing var ( @@ -167,6 +173,10 @@ func TestEth67Messages(t *testing.T) { common.HexToHash("deadc0de"), common.HexToHash("feedbeef"), } + byteSlices := [][]byte{ + common.FromHex("deadc0de"), + common.FromHex("feedbeef"), + } // init the receipts { receipts = []*types.Receipt{ @@ -220,6 +230,14 @@ func TestEth67Messages(t *testing.T) { BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{blockBodyRlp})}, common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), }, + { + GetNodeDataPacket66{1111, GetNodeDataPacket(hashes)}, + common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), + }, + { + NodeDataPacket66{1111, NodeDataPacket(byteSlices)}, + common.FromHex("ce820457ca84deadc0de84feedbeef"), + }, { GetReceiptsPacket66{1111, GetReceiptsPacket(hashes)}, common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), diff --git a/ethdb/privateapi/ethbackend.go b/ethdb/privateapi/ethbackend.go index 46e0fa49f01..4719d6c78e6 100644 --- a/ethdb/privateapi/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -197,7 +197,7 @@ func (s *EthBackendServer) Subscribe(r *remote.SubscribeRequest, subscribeServer func (s *EthBackendServer) ProtocolVersion(_ context.Context, _ *remote.ProtocolVersionRequest) (*remote.ProtocolVersionReply, error) { // Hardcoding to avoid import cycle - return &remote.ProtocolVersionReply{Id: 67}, nil + return &remote.ProtocolVersionReply{Id: 66}, nil } func (s *EthBackendServer) ClientVersion(_ context.Context, _ *remote.ClientVersionRequest) (*remote.ClientVersionReply, error) { diff --git a/go.mod b/go.mod index 7eab08c2969..ecea7d44e68 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220628075812-eac8c1fa590b + github.com/ledgerwatch/erigon-lib v0.0.0-20220625091153-e7b09db04531 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 3e38c0a8807..b7a39052902 100644 --- a/go.sum +++ b/go.sum @@ -386,8 +386,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220628075812-eac8c1fa590b h1:9SnS7lnKnl4PtXB0tYZjnK/wJIl/wHbx9ByXp2wDUNQ= -github.com/ledgerwatch/erigon-lib v0.0.0-20220628075812-eac8c1fa590b/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= +github.com/ledgerwatch/erigon-lib v0.0.0-20220625091153-e7b09db04531 h1:UKQC0chFY2s0wXOMDOyPEuUTwymsQRUpNHm7/5isnUo= +github.com/ledgerwatch/erigon-lib v0.0.0-20220625091153-e7b09db04531/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= diff --git a/p2p/dial_test.go b/p2p/dial_test.go index 185a71a401e..9bafb293fc7 100644 --- a/p2p/dial_test.go +++ b/p2p/dial_test.go @@ -410,7 +410,7 @@ func runDialTest(t *testing.T, config dialConfig, rounds []dialTestRound) { setupCh <- conn return nil } - dialsched = newDialScheduler(config, iterator, setup, 67) + dialsched = newDialScheduler(config, iterator, setup, 66) defer dialsched.stop() for i, round := range rounds { diff --git a/turbo/stages/mock_sentry.go b/turbo/stages/mock_sentry.go index 2caf0cfa040..5ba56a2fad5 100644 --- a/turbo/stages/mock_sentry.go +++ b/turbo/stages/mock_sentry.go @@ -117,7 +117,7 @@ func (ms *MockSentry) PeerMinBlock(context.Context, *proto_sentry.PeerMinBlockRe } func (ms *MockSentry) HandShake(ctx context.Context, in *emptypb.Empty) (*proto_sentry.HandShakeReply, error) { - return &proto_sentry.HandShakeReply{Protocol: proto_sentry.Protocol_ETH67}, nil + return &proto_sentry.HandShakeReply{Protocol: proto_sentry.Protocol_ETH66}, nil } func (ms *MockSentry) SendMessageByMinBlock(_ context.Context, r *proto_sentry.SendMessageByMinBlockRequest) (*proto_sentry.SentPeers, error) { ms.sentMessages = append(ms.sentMessages, r.Data) @@ -234,7 +234,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey cfg.DeprecatedTxPool.Disable = !withTxPool cfg.DeprecatedTxPool.StartOnInit = true - mock.SentryClient = direct.NewSentryClientDirect(eth.ETH67, mock) + mock.SentryClient = direct.NewSentryClientDirect(eth.ETH66, mock) sentries := []direct.SentryClient{mock.SentryClient} sendBodyRequest := func(context.Context, *bodydownload.BodyRequest) ([64]byte, bool) { return [64]byte{}, false } @@ -450,7 +450,7 @@ func (ms *MockSentry) InsertChain(chain *core.ChainPack) error { return err } ms.ReceiveWg.Add(1) - for _, err = range ms.Send(&proto_sentry.InboundMessage{Id: proto_sentry.MessageId_NEW_BLOCK, Data: b, PeerId: ms.PeerId}) { + for _, err = range ms.Send(&proto_sentry.InboundMessage{Id: proto_sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: ms.PeerId}) { if err != nil { return err } @@ -464,7 +464,7 @@ func (ms *MockSentry) InsertChain(chain *core.ChainPack) error { return err } ms.ReceiveWg.Add(1) - for _, err = range ms.Send(&proto_sentry.InboundMessage{Id: proto_sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: ms.PeerId}) { + for _, err = range ms.Send(&proto_sentry.InboundMessage{Id: proto_sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: ms.PeerId}) { if err != nil { return err } @@ -482,7 +482,7 @@ func (ms *MockSentry) InsertChain(chain *core.ChainPack) error { return err } ms.ReceiveWg.Add(1) - for _, err = range ms.Send(&proto_sentry.InboundMessage{Id: proto_sentry.MessageId_BLOCK_BODIES, Data: b, PeerId: ms.PeerId}) { + for _, err = range ms.Send(&proto_sentry.InboundMessage{Id: proto_sentry.MessageId_BLOCK_BODIES_66, Data: b, PeerId: ms.PeerId}) { if err != nil { return err } diff --git a/turbo/stages/sentry_mock_test.go b/turbo/stages/sentry_mock_test.go index df2c6329478..f0a9c136efa 100644 --- a/turbo/stages/sentry_mock_test.go +++ b/turbo/stages/sentry_mock_test.go @@ -40,7 +40,7 @@ func TestHeaderStep(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } // Send all the headers @@ -50,7 +50,7 @@ func TestHeaderStep(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceed @@ -79,7 +79,7 @@ func TestMineBlockWith1Tx(t *testing.T) { }) require.NoError(err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { require.NoError(err) } // Send all the headers @@ -89,7 +89,7 @@ func TestMineBlockWith1Tx(t *testing.T) { }) require.NoError(err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(err) } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed @@ -113,7 +113,7 @@ func TestMineBlockWith1Tx(t *testing.T) { b, err := rlp.EncodeToBytes(chain.TopBlock.Transactions()) require.NoError(err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_TRANSACTIONS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_TRANSACTIONS_66, Data: b, PeerId: m.PeerId}) { require.NoError(err) } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed @@ -145,7 +145,7 @@ func TestReorg(t *testing.T) { t.Fatal(err) } m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -158,7 +158,7 @@ func TestReorg(t *testing.T) { t.Fatal(err) } m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed @@ -199,7 +199,7 @@ func TestReorg(t *testing.T) { t.Fatal(err) } m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -212,7 +212,7 @@ func TestReorg(t *testing.T) { t.Fatal(err) } m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed @@ -232,7 +232,7 @@ func TestReorg(t *testing.T) { t.Fatal(err) } m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -245,7 +245,7 @@ func TestReorg(t *testing.T) { t.Fatal(err) } m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -256,7 +256,7 @@ func TestReorg(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed @@ -283,7 +283,7 @@ func TestReorg(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -294,7 +294,7 @@ func TestReorg(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed @@ -342,7 +342,7 @@ func TestAnchorReplace(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -353,7 +353,7 @@ func TestAnchorReplace(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } require.NoError(t, err) @@ -365,7 +365,7 @@ func TestAnchorReplace(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } require.NoError(t, err) @@ -377,7 +377,7 @@ func TestAnchorReplace(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -390,7 +390,7 @@ func TestAnchorReplace(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -438,7 +438,7 @@ func TestAnchorReplace2(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -449,7 +449,7 @@ func TestAnchorReplace2(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -460,7 +460,7 @@ func TestAnchorReplace2(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_NEW_BLOCK_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -471,7 +471,7 @@ func TestAnchorReplace2(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -482,7 +482,7 @@ func TestAnchorReplace2(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -495,7 +495,7 @@ func TestAnchorReplace2(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } @@ -591,7 +591,7 @@ func TestPoSDownloader(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } m.ReceiveWg.Wait() @@ -657,7 +657,7 @@ func TestPoSSyncWithInvalidHeader(t *testing.T) { }) require.NoError(t, err) m.ReceiveWg.Add(1) - for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS, Data: b, PeerId: m.PeerId}) { + for _, err = range m.Send(&sentry.InboundMessage{Id: sentry.MessageId_BLOCK_HEADERS_66, Data: b, PeerId: m.PeerId}) { require.NoError(t, err) } m.ReceiveWg.Wait() From aa7985341e194cf33ad51afaf09252b1558f2599 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Tue, 28 Jun 2022 17:46:24 +0200 Subject: [PATCH 119/136] LVH support to memory overlay (#4555) * fixed fcu * fixed leak * maybe now? * wrote forkchoice --- eth/stagedsync/stage_headers.go | 39 +++++++++++++------- turbo/stages/headerdownload/header_algos.go | 40 +++++++++++++++------ 2 files changed, 55 insertions(+), 24 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index f8a0142fa8c..187eb90509c 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -349,6 +349,28 @@ func startHandlingForkChoice( } } + if cfg.memoryOverlay && headerHash == cfg.hd.GetNextForkHash() { + log.Info("Flushing in-memory state") + if err := cfg.hd.FlushNextForkState(tx); err != nil { + return nil, err + } + cfg.hd.BeaconRequestList.Remove(requestId) + rawdb.WriteForkchoiceHead(tx, forkChoice.HeadBlockHash) + canonical, err := safeAndFinalizedBlocksAreCanonical(forkChoice, s, tx, cfg) + if err != nil { + log.Warn(fmt.Sprintf("[%s] Fork choice err", s.LogPrefix()), "err", err) + return nil, err + } + if canonical { + cfg.hd.SetPendingPayloadHash(headerHash) + return nil, nil + } else { + return &privateapi.PayloadStatus{ + CriticalError: &privateapi.InvalidForkchoiceStateErr, + }, nil + } + } + cfg.hd.UpdateTopSeenHeightPoS(headerNumber) forkingPoint := uint64(0) if headerNumber > 0 { @@ -362,15 +384,6 @@ func startHandlingForkChoice( } } - if cfg.memoryOverlay && headerHash == cfg.hd.GetNextForkHash() { - log.Info("Flushing in-memory state") - if err := cfg.hd.FlushNextForkState(tx); err != nil { - return nil, err - } - cfg.hd.SetPendingPayloadHash(headerHash) - return nil, nil - } - log.Info(fmt.Sprintf("[%s] Fork choice re-org", s.LogPrefix()), "headerNumber", headerNumber, "forkingPoint", forkingPoint) if requestStatus == engineapi.New { @@ -571,14 +584,14 @@ func verifyAndSaveNewPoSHeader( // TODO(yperbasis): considered non-canonical because some missing headers were downloaded but not canonized // Or it's not a problem because forkChoice is updated frequently? if cfg.memoryOverlay { - status, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, false, cfg.execPayload) + status, latestValidHash, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, false, cfg.execPayload) if criticalError != nil { return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError } success = status == remote.EngineStatus_VALID || status == remote.EngineStatus_ACCEPTED return &privateapi.PayloadStatus{ Status: status, - LatestValidHash: currentHeadHash, + LatestValidHash: latestValidHash, ValidationError: validationError, }, success, nil } @@ -587,14 +600,14 @@ func verifyAndSaveNewPoSHeader( } if cfg.memoryOverlay && (cfg.hd.GetNextForkHash() == (common.Hash{}) || header.ParentHash == cfg.hd.GetNextForkHash()) { - status, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, true, cfg.execPayload) + status, latestValidHash, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, true, cfg.execPayload) if criticalError != nil { return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError } success = status == remote.EngineStatus_VALID || status == remote.EngineStatus_ACCEPTED return &privateapi.PayloadStatus{ Status: status, - LatestValidHash: currentHeadHash, + LatestValidHash: latestValidHash, ValidationError: validationError, }, success, nil } diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 2fe3b984551..759ba81be44 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -1093,10 +1093,16 @@ func abs64(n int64) uint64 { return uint64(n) } -func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, store bool, execPayload func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody) error) (status remote.EngineStatus, validationError error, criticalError error) { +func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, store bool, execPayload func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody) error) (status remote.EngineStatus, latestValidHash common.Hash, validationError error, criticalError error) { hd.lock.Lock() defer hd.lock.Unlock() maxDepth := uint64(16) + + currentHeight := rawdb.ReadCurrentBlockNumber(tx) + if currentHeight == nil { + criticalError = fmt.Errorf("could not read block number.") + return + } if store { // If it is a continuation of the canonical chain we can stack it up. if hd.nextForkState == nil { @@ -1105,17 +1111,17 @@ func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body hd.nextForkState.UpdateTxn(tx) } hd.nextForkHash = header.Hash() - status = remote.EngineStatus_VALID // Let's assemble the side fork chain if we have others building. validationError = execPayload(hd.nextForkState, header, body, 0, nil, nil) if validationError != nil { status = remote.EngineStatus_INVALID + latestValidHash = header.ParentHash + return } - return - } - currentHeight := rawdb.ReadCurrentBlockNumber(tx) - if currentHeight == nil { - criticalError = fmt.Errorf("could not read block number.") + status = remote.EngineStatus_VALID + latestValidHash = header.Hash() + hd.sideForksBlock[latestValidHash] = sideForkBlock{header, body} + hd.cleanupOutdateSideForks(*currentHeight, maxDepth) return } // if the block is not in range of MAX_DEPTH from head then we do not validate it. @@ -1123,8 +1129,6 @@ func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body status = remote.EngineStatus_ACCEPTED return } - // if it is not canonical we validate it as a side fork. - batch := memdb.NewMemoryBatch(tx) // Let's assemble the side fork backwards var foundCanonical bool currentHash := header.ParentHash @@ -1155,17 +1159,26 @@ func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body } hd.sideForksBlock[header.Hash()] = sideForkBlock{header, body} status = remote.EngineStatus_VALID + // if it is not canonical we validate it as a side fork. + batch := memdb.NewMemoryBatch(tx) + defer batch.Close() validationError = execPayload(batch, header, body, unwindPoint, headersChain, bodiesChain) + latestValidHash = header.Hash() if validationError != nil { + latestValidHash = header.ParentHash status = remote.EngineStatus_INVALID } // After the we finished executing, we clean up old forks + hd.cleanupOutdateSideForks(*currentHeight, maxDepth) + return +} + +func (hd *HeaderDownload) cleanupOutdateSideForks(currentHeight uint64, maxDepth uint64) { for hash, sb := range hd.sideForksBlock { - if abs64(int64(*currentHeight)-sb.header.Number.Int64()) > maxDepth { + if abs64(int64(currentHeight)-sb.header.Number.Int64()) > maxDepth { delete(hd.sideForksBlock, hash) } } - return } func (hd *HeaderDownload) FlushNextForkState(tx kv.RwTx) error { @@ -1174,6 +1187,11 @@ func (hd *HeaderDownload) FlushNextForkState(tx kv.RwTx) error { if err := hd.nextForkState.Flush(tx); err != nil { return err } + // If the side fork hash is now becoming canonical we can clean up. + if _, ok := hd.sideForksBlock[hd.nextForkHash]; ok { + delete(hd.sideForksBlock, hd.nextForkHash) + } + hd.nextForkState.Close() hd.nextForkHash = common.Hash{} hd.nextForkState = nil return nil From 6878ddb76f53a51da3a7278dc4c1fe651fbc718a Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 29 Jun 2022 08:24:57 +0600 Subject: [PATCH 120/136] More user-friendly warning about non-existing module of rpc #4568 --- node/rpcstack.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/rpcstack.go b/node/rpcstack.go index 42da30d1847..5249ee1d3eb 100644 --- a/node/rpcstack.go +++ b/node/rpcstack.go @@ -465,7 +465,7 @@ func newGzipHandler(next http.Handler) http.Handler { // and then registers all of the APIs exposed by the services. func RegisterApisFromWhitelist(apis []rpc.API, modules []string, srv *rpc.Server, exposeAll bool) error { if bad, available := checkModuleAvailability(modules, apis); len(bad) > 0 { - log.Error("Unavailable modules in HTTP API list", "unavailable", bad, "available", available) + log.Error("Non-existing modules in HTTP API list, please remove it", "non-existing", bad, "existing", available) } // Generate the whitelist based on the allowed modules whitelist := make(map[string]bool) From ed69bac065bdfa10daaece3dfd641732378a3e14 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Wed, 29 Jun 2022 04:28:00 +0200 Subject: [PATCH 121/136] Fixed in-memory execution hive tests (#4565) * badBlockHalt * fixed buidl * lintl --- cmd/integration/commands/stages.go | 6 +++--- cmd/integration/commands/state_stages.go | 2 +- eth/backend.go | 4 ++-- eth/stagedsync/stage_interhashes.go | 7 ++++++- eth/stagedsync/stage_interhashes_test.go | 6 +++--- eth/stagedsync/stage_senders.go | 7 ++++++- eth/stagedsync/stage_senders_test.go | 2 +- turbo/stages/mock_sentry.go | 6 +++--- turbo/stages/stageloop.go | 8 ++++---- 9 files changed, 29 insertions(+), 19 deletions(-) diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index 54b18bcbfec..eb57cc871e9 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -609,7 +609,7 @@ func stageSenders(db kv.RwDB, ctx context.Context) error { return err } - cfg := stagedsync.StageSendersCfg(db, chainConfig, tmpdir, pm, br) + cfg := stagedsync.StageSendersCfg(db, chainConfig, false, tmpdir, pm, br) if unwind > 0 { u := sync.NewUnwindState(stages.Senders, s.BlockNumber-unwind, s.BlockNumber) if err = stagedsync.UnwindSendersStage(u, tx, cfg, ctx); err != nil { @@ -724,7 +724,7 @@ func stageTrie(db kv.RwDB, ctx context.Context) error { log.Info("StageExec", "progress", execStage.BlockNumber) log.Info("StageTrie", "progress", s.BlockNumber) - cfg := stagedsync.StageTrieCfg(db, true, true, tmpdir, getBlockReader(chainConfig, db)) + cfg := stagedsync.StageTrieCfg(db, true, true, false, tmpdir, getBlockReader(chainConfig, db)) if unwind > 0 { u := sync.NewUnwindState(stages.IntermediateHashes, s.BlockNumber-unwind, s.BlockNumber) if err := stagedsync.UnwindIntermediateHashesStage(u, s, tx, cfg, ctx); err != nil { @@ -1209,7 +1209,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, nil, nil, nil, tmpdir), stagedsync.StageMiningExecCfg(db, miner, events, *chainConfig, engine, &vm.Config{}, tmpdir, nil), stagedsync.StageHashStateCfg(db, tmpdir), - stagedsync.StageTrieCfg(db, false, true, tmpdir, br), + stagedsync.StageTrieCfg(db, false, true, false, tmpdir, br), stagedsync.StageMiningFinishCfg(db, *chainConfig, engine, miner, ctx.Done()), ), stagedsync.MiningUnwindOrder, diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go index a41e0db2f9d..d620bb0ab86 100644 --- a/cmd/integration/commands/state_stages.go +++ b/cmd/integration/commands/state_stages.go @@ -426,7 +426,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64) error { } _ = sync.SetCurrentStage(stages.IntermediateHashes) u = &stagedsync.UnwindState{ID: stages.IntermediateHashes, UnwindPoint: to} - if err = stagedsync.UnwindIntermediateHashesStage(u, stage(sync, tx, nil, stages.IntermediateHashes), tx, stagedsync.StageTrieCfg(db, true, true, dirs.Tmp, getBlockReader(chainConfig, db)), ctx); err != nil { + if err = stagedsync.UnwindIntermediateHashesStage(u, stage(sync, tx, nil, stages.IntermediateHashes), tx, stagedsync.StageTrieCfg(db, true, true, false, dirs.Tmp, getBlockReader(chainConfig, db)), ctx); err != nil { return err } must(tx.Commit()) diff --git a/eth/backend.go b/eth/backend.go index d85647cde94..63e2c0f4b90 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -367,7 +367,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miner, *backend.chainConfig, backend.engine, backend.txPool2, backend.txPool2DB, nil, tmpdir), stagedsync.StageMiningExecCfg(backend.chainDB, miner, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, nil), stagedsync.StageHashStateCfg(backend.chainDB, tmpdir), - stagedsync.StageTrieCfg(backend.chainDB, false, true, tmpdir, blockReader), + stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, blockReader), stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miner, backend.miningSealingQuit), ), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder) @@ -385,7 +385,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miningStatePos, *backend.chainConfig, backend.engine, backend.txPool2, backend.txPool2DB, param, tmpdir), stagedsync.StageMiningExecCfg(backend.chainDB, miningStatePos, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, interrupt), stagedsync.StageHashStateCfg(backend.chainDB, tmpdir), - stagedsync.StageTrieCfg(backend.chainDB, false, true, tmpdir, blockReader), + stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, blockReader), stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miningStatePos, backend.miningSealingQuit), ), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder) // We start the mining step diff --git a/eth/stagedsync/stage_interhashes.go b/eth/stagedsync/stage_interhashes.go index 950d204be0b..57a41ea723d 100644 --- a/eth/stagedsync/stage_interhashes.go +++ b/eth/stagedsync/stage_interhashes.go @@ -24,17 +24,19 @@ import ( type TrieCfg struct { db kv.RwDB checkRoot bool + badBlockHalt bool tmpDir string saveNewHashesToDB bool // no reason to save changes when calculating root for mining blockReader services.FullBlockReader } -func StageTrieCfg(db kv.RwDB, checkRoot, saveNewHashesToDB bool, tmpDir string, blockReader services.FullBlockReader) TrieCfg { +func StageTrieCfg(db kv.RwDB, checkRoot, saveNewHashesToDB, badBlockHalt bool, tmpDir string, blockReader services.FullBlockReader) TrieCfg { return TrieCfg{ db: db, checkRoot: checkRoot, tmpDir: tmpDir, saveNewHashesToDB: saveNewHashesToDB, + badBlockHalt: badBlockHalt, blockReader: blockReader, } } @@ -91,6 +93,9 @@ func SpawnIntermediateHashesStage(s *StageState, u Unwinder, tx kv.RwTx, cfg Tri if err == nil { if cfg.checkRoot && root != expectedRootHash { log.Error(fmt.Sprintf("[%s] Wrong trie root of block %d: %x, expected (from header): %x. Block hash: %x", logPrefix, to, root, expectedRootHash, headerHash)) + if cfg.badBlockHalt { + return trie.EmptyRoot, fmt.Errorf("Wrong trie root") + } if to > s.BlockNumber { unwindTo := (to + s.BlockNumber) / 2 // Binary search for the correct block, biased to the lower numbers log.Warn("Unwinding due to incorrect root hash", "to", unwindTo) diff --git a/eth/stagedsync/stage_interhashes_test.go b/eth/stagedsync/stage_interhashes_test.go index ee2f4b375af..2766c0eaa9c 100644 --- a/eth/stagedsync/stage_interhashes_test.go +++ b/eth/stagedsync/stage_interhashes_test.go @@ -71,7 +71,7 @@ func TestAccountAndStorageTrie(t *testing.T) { // ---------------------------------------------------------------- blockReader := snapshotsync.NewBlockReader() - cfg := StageTrieCfg(nil, false, true, t.TempDir(), blockReader) + cfg := StageTrieCfg(nil, false, true, false, t.TempDir(), blockReader) _, err := RegenerateIntermediateHashes("IH", tx, cfg, common.Hash{} /* expectedRootHash */, nil /* quit */) assert.Nil(t, err) @@ -191,7 +191,7 @@ func TestAccountTrieAroundExtensionNode(t *testing.T) { assert.Nil(t, tx.Put(kv.HashedAccounts, hash6[:], encoded)) blockReader := snapshotsync.NewBlockReader() - _, err := RegenerateIntermediateHashes("IH", tx, StageTrieCfg(nil, false, true, t.TempDir(), blockReader), common.Hash{} /* expectedRootHash */, nil /* quit */) + _, err := RegenerateIntermediateHashes("IH", tx, StageTrieCfg(nil, false, true, false, t.TempDir(), blockReader), common.Hash{} /* expectedRootHash */, nil /* quit */) assert.Nil(t, err) accountTrie := make(map[string][]byte) @@ -253,7 +253,7 @@ func TestStorageDeletion(t *testing.T) { // ---------------------------------------------------------------- blockReader := snapshotsync.NewBlockReader() - cfg := StageTrieCfg(nil, false, true, t.TempDir(), blockReader) + cfg := StageTrieCfg(nil, false, true, false, t.TempDir(), blockReader) _, err = RegenerateIntermediateHashes("IH", tx, cfg, common.Hash{} /* expectedRootHash */, nil /* quit */) assert.Nil(t, err) diff --git a/eth/stagedsync/stage_senders.go b/eth/stagedsync/stage_senders.go index 4e10937d855..954bdc9faa6 100644 --- a/eth/stagedsync/stage_senders.go +++ b/eth/stagedsync/stage_senders.go @@ -36,6 +36,7 @@ type SendersCfg struct { bufferSize int numOfGoroutines int readChLen int + badBlockHalt bool tmpdir string prune prune.Mode chainConfig *params.ChainConfig @@ -43,7 +44,7 @@ type SendersCfg struct { snapshotHashesCfg *snapshothashes.Config } -func StageSendersCfg(db kv.RwDB, chainCfg *params.ChainConfig, tmpdir string, prune prune.Mode, br *snapshotsync.BlockRetire) SendersCfg { +func StageSendersCfg(db kv.RwDB, chainCfg *params.ChainConfig, badBlockHalt bool, tmpdir string, prune prune.Mode, br *snapshotsync.BlockRetire) SendersCfg { const sendersBatchSize = 10000 const sendersBlockSize = 4096 @@ -54,6 +55,7 @@ func StageSendersCfg(db kv.RwDB, chainCfg *params.ChainConfig, tmpdir string, pr bufferSize: (sendersBlockSize * 10 / 20) * 10000, // 20*4096 numOfGoroutines: secp256k1.NumOfContexts(), // we can only be as parallels as our crypto library supports, readChLen: 4, + badBlockHalt: badBlockHalt, tmpdir: tmpdir, chainConfig: chainCfg, prune: prune, @@ -257,6 +259,9 @@ Loop: } if minBlockErr != nil { log.Error(fmt.Sprintf("[%s] Error recovering senders for block %d %x): %v", logPrefix, minBlockNum, minBlockHash, minBlockErr)) + if cfg.badBlockHalt { + return minBlockErr + } if to > s.BlockNumber { u.UnwindTo(minBlockNum-1, minBlockHash) } diff --git a/eth/stagedsync/stage_senders_test.go b/eth/stagedsync/stage_senders_test.go index c6384bc871d..996beda20d9 100644 --- a/eth/stagedsync/stage_senders_test.go +++ b/eth/stagedsync/stage_senders_test.go @@ -109,7 +109,7 @@ func TestSenders(t *testing.T) { require.NoError(stages.SaveStageProgress(tx, stages.Bodies, 3)) - cfg := StageSendersCfg(db, params.TestChainConfig, "", prune.Mode{}, snapshotsync.NewBlockRetire(1, "", nil, db, nil, nil)) + cfg := StageSendersCfg(db, params.TestChainConfig, false, "", prune.Mode{}, snapshotsync.NewBlockRetire(1, "", nil, db, nil, nil)) err := SpawnRecoverSendersStage(cfg, &StageState{ID: stages.Senders}, nil, tx, 3, ctx) assert.NoError(t, err) diff --git a/turbo/stages/mock_sentry.go b/turbo/stages/mock_sentry.go index 5ba56a2fad5..83789c41db5 100644 --- a/turbo/stages/mock_sentry.go +++ b/turbo/stages/mock_sentry.go @@ -325,7 +325,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey blockReader, ), stagedsync.StageIssuanceCfg(mock.DB, mock.ChainConfig, blockReader, true), - stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, mock.tmpdir, prune, snapshotsync.NewBlockRetire(1, mock.tmpdir, allSnapshots, mock.DB, snapshotsDownloader, mock.Notifications.Events)), + stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, false, mock.tmpdir, prune, snapshotsync.NewBlockRetire(1, mock.tmpdir, allSnapshots, mock.DB, snapshotsDownloader, mock.Notifications.Events)), stagedsync.StageExecuteBlocksCfg( mock.DB, prune, @@ -342,7 +342,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey ), stagedsync.StageTranspileCfg(mock.DB, cfg.BatchSize, mock.ChainConfig), stagedsync.StageHashStateCfg(mock.DB, mock.tmpdir), - stagedsync.StageTrieCfg(mock.DB, true, true, mock.tmpdir, blockReader), + stagedsync.StageTrieCfg(mock.DB, true, true, false, mock.tmpdir, blockReader), stagedsync.StageHistoryCfg(mock.DB, prune, mock.tmpdir), stagedsync.StageLogIndexCfg(mock.DB, prune, mock.tmpdir), stagedsync.StageCallTracesCfg(mock.DB, prune, 0, mock.tmpdir), @@ -368,7 +368,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey stagedsync.StageMiningCreateBlockCfg(mock.DB, miner, *mock.ChainConfig, mock.Engine, mock.TxPool, nil, nil, mock.tmpdir), stagedsync.StageMiningExecCfg(mock.DB, miner, nil, *mock.ChainConfig, mock.Engine, &vm.Config{}, mock.tmpdir, nil), stagedsync.StageHashStateCfg(mock.DB, mock.tmpdir), - stagedsync.StageTrieCfg(mock.DB, false, true, mock.tmpdir, blockReader), + stagedsync.StageTrieCfg(mock.DB, false, true, false, mock.tmpdir, blockReader), stagedsync.StageMiningFinishCfg(mock.DB, *mock.ChainConfig, mock.Engine, miner, mock.Ctx.Done()), ), stagedsync.MiningUnwindOrder, diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 027bde40fbb..7fdd36ca352 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -378,7 +378,7 @@ func NewStagedSync( blockReader, ), stagedsync.StageIssuanceCfg(db, controlServer.ChainConfig, blockReader, cfg.EnabledIssuance), - stagedsync.StageSendersCfg(db, controlServer.ChainConfig, tmpdir, cfg.Prune, blockRetire), + stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, tmpdir, cfg.Prune, blockRetire), stagedsync.StageExecuteBlocksCfg( db, cfg.Prune, @@ -395,7 +395,7 @@ func NewStagedSync( ), stagedsync.StageTranspileCfg(db, cfg.BatchSize, controlServer.ChainConfig), stagedsync.StageHashStateCfg(db, tmpdir), - stagedsync.StageTrieCfg(db, true, true, tmpdir, blockReader), + stagedsync.StageTrieCfg(db, true, true, false, tmpdir, blockReader), stagedsync.StageHistoryCfg(db, cfg.Prune, tmpdir), stagedsync.StageLogIndexCfg(db, cfg.Prune, tmpdir), stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, tmpdir), @@ -444,7 +444,7 @@ func NewInMemoryExecution(ctx context.Context, logger log.Logger, db kv.RwDB, cf snapshots, blockReader, ), stagedsync.StageBlockHashesCfg(db, tmpdir, controlServer.ChainConfig), - stagedsync.StageSendersCfg(db, controlServer.ChainConfig, tmpdir, cfg.Prune, nil), + stagedsync.StageSendersCfg(db, controlServer.ChainConfig, true, tmpdir, cfg.Prune, nil), stagedsync.StageExecuteBlocksCfg( db, cfg.Prune, @@ -460,7 +460,7 @@ func NewInMemoryExecution(ctx context.Context, logger log.Logger, db kv.RwDB, cf blockReader, ), stagedsync.StageHashStateCfg(db, tmpdir), - stagedsync.StageTrieCfg(db, true, true, tmpdir, blockReader)), + stagedsync.StageTrieCfg(db, true, true, true, tmpdir, blockReader)), stagedsync.StateUnwindOrder, nil, ), nil From 64697a9647667fa9904b88b3475ff11da0eb2695 Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Wed, 29 Jun 2022 05:29:59 +0300 Subject: [PATCH 122/136] torrent verbosity now uses int (#4551) * torrent verbosity now uses int * logging torrent verbosity * moved crit correctly --- .../downloader/downloadercfg/logger.go | 21 ++++++++++++++++--- cmd/downloader/main.go | 7 ++++--- cmd/utils/flags.go | 11 +++++----- 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/cmd/downloader/downloader/downloadercfg/logger.go b/cmd/downloader/downloader/downloadercfg/logger.go index bf1da9bfb63..f21650263f5 100644 --- a/cmd/downloader/downloader/downloadercfg/logger.go +++ b/cmd/downloader/downloader/downloadercfg/logger.go @@ -1,6 +1,7 @@ package downloadercfg import ( + "fmt" "strings" utp "github.com/anacrolix/go-libutp" @@ -13,10 +14,24 @@ func init() { utp.Logger.Handlers = []lg.Handler{noopHandler{}} } -func Str2LogLevel(in string) (lg.Level, error) { +func Int2LogLevel(level int) (lg.Level, error) { lvl := lg.Level{} - if err := lvl.UnmarshalText([]byte(in)); err != nil { - return lvl, err + + switch level { + case 0: + lvl = lg.NotSet + case 1: + lvl = lg.Critical + case 2: + lvl = lg.Error + case 3: + lvl = lg.Warning + case 4: + lvl = lg.Info + case 5: + lvl = lg.Debug + default: + return lvl, fmt.Errorf("invalid level set, expected a number between 0-5 but got: %d", level) } return lvl, nil } diff --git a/cmd/downloader/main.go b/cmd/downloader/main.go index acd3d856a75..ac415b95e90 100644 --- a/cmd/downloader/main.go +++ b/cmd/downloader/main.go @@ -38,7 +38,7 @@ var ( forceVerify bool downloaderApiAddr string natSetting string - torrentVerbosity string + torrentVerbosity int downloadRateStr, uploadRateStr string torrentDownloadSlots int torrentPort int @@ -55,9 +55,9 @@ func init() { rootCmd.Flags().StringVar(&natSetting, "nat", utils.NATFlag.Value, utils.NATFlag.Usage) rootCmd.Flags().StringVar(&downloaderApiAddr, "downloader.api.addr", "127.0.0.1:9093", "external downloader api network address, for example: 127.0.0.1:9093 serves remote downloader interface") - rootCmd.Flags().StringVar(&torrentVerbosity, "torrent.verbosity", utils.TorrentVerbosityFlag.Value, utils.TorrentVerbosityFlag.Usage) rootCmd.Flags().StringVar(&downloadRateStr, "torrent.download.rate", utils.TorrentDownloadRateFlag.Value, utils.TorrentDownloadRateFlag.Usage) rootCmd.Flags().StringVar(&uploadRateStr, "torrent.upload.rate", utils.TorrentUploadRateFlag.Value, utils.TorrentUploadRateFlag.Usage) + rootCmd.Flags().IntVar(&torrentVerbosity, "torrent.verbosity", utils.TorrentVerbosityFlag.Value, utils.TorrentVerbosityFlag.Usage) rootCmd.Flags().IntVar(&torrentPort, "torrent.port", utils.TorrentPortFlag.Value, utils.TorrentPortFlag.Usage) rootCmd.Flags().IntVar(&torrentMaxPeers, "torrent.maxpeers", utils.TorrentMaxPeersFlag.Value, utils.TorrentMaxPeersFlag.Usage) rootCmd.Flags().IntVar(&torrentConnsPerFile, "torrent.conns.perfile", utils.TorrentConnsPerFileFlag.Value, utils.TorrentConnsPerFileFlag.Usage) @@ -114,10 +114,11 @@ var rootCmd = &cobra.Command{ func Downloader(ctx context.Context) error { dirs := datadir.New(datadirCli) - torrentLogLevel, err := downloadercfg.Str2LogLevel(torrentVerbosity) + torrentLogLevel, err := downloadercfg.Int2LogLevel(torrentVerbosity) if err != nil { return err } + log.Info("torrentLogLevel", torrentLogLevel) var downloadRate, uploadRate datasize.ByteSize if err := downloadRate.UnmarshalText([]byte(downloadRateStr)); err != nil { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index ff39c1e4367..31b3a90ad35 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -29,7 +29,6 @@ import ( "text/tabwriter" "text/template" - lg "github.com/anacrolix/log" "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon-lib/txpool" @@ -645,10 +644,10 @@ var ( Name: ethconfig.FlagSnapStop, Usage: "Stop producing new snapshots", } - TorrentVerbosityFlag = cli.StringFlag{ + TorrentVerbosityFlag = cli.IntFlag{ Name: "torrent.verbosity", - Value: lg.Warning.LogString(), - Usage: "DBG | INF | WRN | ERR (must set --verbosity to equal or higher level)", + Value: 3, + Usage: "0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail (must set --verbosity to equal or higher level and has defeault: 3)", } TorrentDownloadRateFlag = cli.StringFlag{ Name: "torrent.download.rate", @@ -1401,8 +1400,8 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C if err := uploadRate.UnmarshalText([]byte(uploadRateStr)); err != nil { panic(err) } - - lvl, err := downloadercfg.Str2LogLevel(ctx.GlobalString(TorrentVerbosityFlag.Name)) + log.Info("torrent verbosity", "level", ctx.GlobalInt(TorrentVerbosityFlag.Name)) + lvl, err := downloadercfg.Int2LogLevel(ctx.GlobalInt(TorrentVerbosityFlag.Name)) if err != nil { panic(err) } From f613fcafd2d47e7f19859a2a5a9b6567cf1f49bc Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 29 Jun 2022 08:42:11 +0600 Subject: [PATCH 123/136] torrent verbosity allow debug #4569 Open --- .../downloader/downloadercfg/downloadercfg.go | 8 ++++---- .../downloader/downloadercfg/logger.go | 19 +++++++++---------- cmd/downloader/main.go | 4 ++-- cmd/utils/flags.go | 6 +++--- 4 files changed, 18 insertions(+), 19 deletions(-) diff --git a/cmd/downloader/downloader/downloadercfg/downloadercfg.go b/cmd/downloader/downloader/downloadercfg/downloadercfg.go index 51fb7a752a0..761c421f322 100644 --- a/cmd/downloader/downloader/downloadercfg/downloadercfg.go +++ b/cmd/downloader/downloader/downloadercfg/downloadercfg.go @@ -54,7 +54,7 @@ func Default() *torrent.ClientConfig { return torrentConfig } -func New(snapDir string, verbosity lg.Level, natif nat.Interface, downloadRate, uploadRate datasize.ByteSize, port, connsPerFile, downloadSlots int) (*Cfg, error) { +func New(snapDir string, verbosity lg.Level, dbg bool, natif nat.Interface, downloadRate, uploadRate datasize.ByteSize, port, connsPerFile, downloadSlots int) (*Cfg, error) { torrentConfig := Default() // We would-like to reduce amount of goroutines in Erigon, so reducing next params torrentConfig.EstablishedConnsPerTorrent = connsPerFile // default: 50 @@ -114,9 +114,9 @@ func New(snapDir string, verbosity lg.Level, natif nat.Interface, downloadRate, } // debug - //if lg.Debug == verbosity { - // torrentConfig.Debug = true - //} + if dbg { + torrentConfig.Debug = true + } torrentConfig.Logger = lg.Default.FilterLevel(verbosity) torrentConfig.Logger.Handlers = []lg.Handler{adapterHandler{}} diff --git a/cmd/downloader/downloader/downloadercfg/logger.go b/cmd/downloader/downloader/downloadercfg/logger.go index f21650263f5..7c71fa81e45 100644 --- a/cmd/downloader/downloader/downloadercfg/logger.go +++ b/cmd/downloader/downloader/downloadercfg/logger.go @@ -14,26 +14,25 @@ func init() { utp.Logger.Handlers = []lg.Handler{noopHandler{}} } -func Int2LogLevel(level int) (lg.Level, error) { - lvl := lg.Level{} - +func Int2LogLevel(level int) (lvl lg.Level, dbg bool, err error) { switch level { case 0: - lvl = lg.NotSet - case 1: lvl = lg.Critical - case 2: + case 1: lvl = lg.Error - case 3: + case 2: lvl = lg.Warning - case 4: + case 3: lvl = lg.Info + case 4: + lvl = lg.Debug case 5: lvl = lg.Debug + dbg = true default: - return lvl, fmt.Errorf("invalid level set, expected a number between 0-5 but got: %d", level) + return lvl, dbg, fmt.Errorf("invalid level set, expected a number between 0-5 but got: %d", level) } - return lvl, nil + return lvl, dbg, nil } type noopHandler struct{} diff --git a/cmd/downloader/main.go b/cmd/downloader/main.go index ac415b95e90..6f833871ac1 100644 --- a/cmd/downloader/main.go +++ b/cmd/downloader/main.go @@ -114,7 +114,7 @@ var rootCmd = &cobra.Command{ func Downloader(ctx context.Context) error { dirs := datadir.New(datadirCli) - torrentLogLevel, err := downloadercfg.Int2LogLevel(torrentVerbosity) + torrentLogLevel, dbg, err := downloadercfg.Int2LogLevel(torrentVerbosity) if err != nil { return err } @@ -134,7 +134,7 @@ func Downloader(ctx context.Context) error { return fmt.Errorf("invalid nat option %s: %w", natSetting, err) } - cfg, err := downloadercfg.New(dirs.Snap, torrentLogLevel, natif, downloadRate, uploadRate, torrentPort, torrentConnsPerFile, torrentDownloadSlots) + cfg, err := downloadercfg.New(dirs.Snap, torrentLogLevel, dbg, natif, downloadRate, uploadRate, torrentPort, torrentConnsPerFile, torrentDownloadSlots) if err != nil { return err } diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 31b3a90ad35..a06e6050fe9 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -646,7 +646,7 @@ var ( } TorrentVerbosityFlag = cli.IntFlag{ Name: "torrent.verbosity", - Value: 3, + Value: 2, Usage: "0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail (must set --verbosity to equal or higher level and has defeault: 3)", } TorrentDownloadRateFlag = cli.StringFlag{ @@ -1401,11 +1401,11 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C panic(err) } log.Info("torrent verbosity", "level", ctx.GlobalInt(TorrentVerbosityFlag.Name)) - lvl, err := downloadercfg.Int2LogLevel(ctx.GlobalInt(TorrentVerbosityFlag.Name)) + lvl, dbg, err := downloadercfg.Int2LogLevel(ctx.GlobalInt(TorrentVerbosityFlag.Name)) if err != nil { panic(err) } - cfg.Downloader, err = downloadercfg.New(cfg.Dirs.Snap, lvl, nodeConfig.P2P.NAT, downloadRate, uploadRate, ctx.GlobalInt(TorrentPortFlag.Name), ctx.GlobalInt(TorrentConnsPerFileFlag.Name), ctx.GlobalInt(TorrentDownloadSlotsFlag.Name)) + cfg.Downloader, err = downloadercfg.New(cfg.Dirs.Snap, lvl, dbg, nodeConfig.P2P.NAT, downloadRate, uploadRate, ctx.GlobalInt(TorrentPortFlag.Name), ctx.GlobalInt(TorrentConnsPerFileFlag.Name), ctx.GlobalInt(TorrentDownloadSlotsFlag.Name)) if err != nil { panic(err) } From 95b41490e01796b59af98a84339381b1880fb23e Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 29 Jun 2022 09:14:37 +0600 Subject: [PATCH 124/136] Advise remove --snap.stop when no indices (#4570) * save * save --- eth/stagedsync/stage_headers.go | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 187eb90509c..4ae0fdad76b 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -1176,24 +1176,28 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R log.Info("[Snapshots] Stat", "blocks", cfg.snapshots.BlocksAvailable(), "segments", cfg.snapshots.SegmentsMax(), "indices", cfg.snapshots.IndicesMax(), "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys)) // Create .idx files - if cfg.snapshots.Cfg().Produce && cfg.snapshots.IndicesMax() < cfg.snapshots.SegmentsMax() { - if !cfg.snapshots.SegmentsReady() { - return fmt.Errorf("not all snapshot segments are available") + if cfg.snapshots.IndicesMax() < cfg.snapshots.SegmentsMax() { + if !cfg.snapshots.Cfg().Produce && cfg.snapshots.IndicesMax() == 0 { + return fmt.Errorf("please remove --snap.stop, erigon can't work without creating basic indices") } + if cfg.snapshots.Cfg().Produce { + if !cfg.snapshots.SegmentsReady() { + return fmt.Errorf("not all snapshot segments are available") + } - // wait for Downloader service to download all expected snapshots - if cfg.snapshots.IndicesMax() < cfg.snapshots.SegmentsMax() { - chainID, _ := uint256.FromBig(cfg.chainConfig.ChainID) - workers := cmp.InRange(1, 2, runtime.GOMAXPROCS(-1)-1) - if err := snapshotsync.BuildIndices(ctx, cfg.snapshots, *chainID, cfg.tmpdir, cfg.snapshots.IndicesMax(), workers, log.LvlInfo); err != nil { - return fmt.Errorf("BuildIndices: %w", err) + // wait for Downloader service to download all expected snapshots + if cfg.snapshots.IndicesMax() < cfg.snapshots.SegmentsMax() { + chainID, _ := uint256.FromBig(cfg.chainConfig.ChainID) + workers := cmp.InRange(1, 2, runtime.GOMAXPROCS(-1)-1) + if err := snapshotsync.BuildIndices(ctx, cfg.snapshots, *chainID, cfg.tmpdir, cfg.snapshots.IndicesMax(), workers, log.LvlInfo); err != nil { + return fmt.Errorf("BuildIndices: %w", err) + } } - } - if err := cfg.snapshots.Reopen(); err != nil { - return fmt.Errorf("ReopenIndices: %w", err) + if err := cfg.snapshots.Reopen(); err != nil { + return fmt.Errorf("ReopenIndices: %w", err) + } } - } if cfg.dbEventNotifier != nil { cfg.dbEventNotifier.OnNewSnapshot() From 880a3394560305d15132f6d82533eba7e6ce7f8d Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 29 Jun 2022 13:22:47 +0600 Subject: [PATCH 125/136] gen less blocks in test (#4571) --- cmd/rpcdaemon/commands/eth_subscribe_test.go | 2 +- cmd/rpcdaemon22/commands/eth_subscribe_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/rpcdaemon/commands/eth_subscribe_test.go b/cmd/rpcdaemon/commands/eth_subscribe_test.go index a997495bd99..96406798d1f 100644 --- a/cmd/rpcdaemon/commands/eth_subscribe_test.go +++ b/cmd/rpcdaemon/commands/eth_subscribe_test.go @@ -21,7 +21,7 @@ import ( func TestEthSubscribe(t *testing.T) { m, require := stages.Mock(t), require.New(t) - chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 21, func(i int, b *core.BlockGen) { + chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 7, func(i int, b *core.BlockGen) { b.SetCoinbase(common.Address{1}) }, false /* intermediateHashes */) require.NoError(err) diff --git a/cmd/rpcdaemon22/commands/eth_subscribe_test.go b/cmd/rpcdaemon22/commands/eth_subscribe_test.go index 875b7a2456a..9a5d220ba65 100644 --- a/cmd/rpcdaemon22/commands/eth_subscribe_test.go +++ b/cmd/rpcdaemon22/commands/eth_subscribe_test.go @@ -20,7 +20,7 @@ import ( func TestEthSubscribe(t *testing.T) { m, require := stages.Mock(t), require.New(t) - chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 21, func(i int, b *core.BlockGen) { + chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 7, func(i int, b *core.BlockGen) { b.SetCoinbase(common.Address{1}) }, false /* intermediateHashes */) require.NoError(err) From 8f86c5d6155c92b298faf7a7733ee08dbe666fa5 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Wed, 29 Jun 2022 14:23:00 +0200 Subject: [PATCH 126/136] Remove getNodeData experimental feature (#4559) --- common/debug/experiments.go | 37 ------------- turbo/trie/trie.go | 100 +----------------------------------- turbo/trie/trie_test.go | 40 --------------- 3 files changed, 1 insertion(+), 176 deletions(-) diff --git a/common/debug/experiments.go b/common/debug/experiments.go index 2abd9db8771..d9ce8558875 100644 --- a/common/debug/experiments.go +++ b/common/debug/experiments.go @@ -4,46 +4,9 @@ import ( "os" "strconv" "sync" - "sync/atomic" "time" ) -// atomic: bit 0 is the value, bit 1 is the initialized flag -var getNodeData uint32 - -const ( - gndValueFlag = 1 << iota - gndInitializedFlag -) - -// IsGetNodeData indicates whether the GetNodeData functionality should be enabled. -// By default that's driven by the presence or absence of DISABLE_GET_NODE_DATA environment variable. -func IsGetNodeData() bool { - x := atomic.LoadUint32(&getNodeData) - if x&gndInitializedFlag != 0 { // already initialized - return x&gndValueFlag != 0 - } - - RestoreGetNodeData() - return IsGetNodeData() -} - -// RestoreGetNodeData enables or disables the GetNodeData functionality -// according to the presence or absence of GET_NODE_DATA environment variable. -func RestoreGetNodeData() { - _, envVarSet := os.LookupEnv("GET_NODE_DATA") - OverrideGetNodeData(envVarSet) -} - -// OverrideGetNodeData allows to explicitly enable or disable the GetNodeData functionality. -func OverrideGetNodeData(val bool) { - if val { - atomic.StoreUint32(&getNodeData, gndInitializedFlag|gndValueFlag) - } else { - atomic.StoreUint32(&getNodeData, gndInitializedFlag) - } -} - var ( bigRoTx uint getBigRoTx sync.Once diff --git a/turbo/trie/trie.go b/turbo/trie/trie.go index afadc7bf625..33e067e87b6 100644 --- a/turbo/trie/trie.go +++ b/turbo/trie/trie.go @@ -25,11 +25,9 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/ethdb" - "github.com/ledgerwatch/log/v3" ) var ( @@ -52,8 +50,6 @@ type Trie struct { root node newHasherFunc func() *hasher - - hashMap map[common.Hash]node } // New creates a trie with an existing root node from db. @@ -67,7 +63,6 @@ type Trie struct { func New(root common.Hash) *Trie { trie := &Trie{ newHasherFunc: func() *hasher { return newHasher( /*valueNodesRlpEncoded = */ false) }, - hashMap: make(map[common.Hash]node), } if (root != common.Hash{}) && root != EmptyRoot { trie.root = hashNode{hash: root[:]} @@ -80,7 +75,6 @@ func New(root common.Hash) *Trie { func NewTestRLPTrie(root common.Hash) *Trie { trie := &Trie{ newHasherFunc: func() *hasher { return newHasher( /*valueNodesRlpEncoded = */ true) }, - hashMap: make(map[common.Hash]node), } if (root != common.Hash{}) && root != EmptyRoot { trie.root = hashNode{hash: root[:]} @@ -515,7 +509,6 @@ func (t *Trie) insertRecursive(origNode node, key []byte, pos int, value node) ( // replacing nodes except accounts if !origNok { - t.evictSubtreeFromHashMap(origNode) return true, value } } @@ -538,14 +531,12 @@ func (t *Trie) insertRecursive(origNode node, key []byte, pos int, value node) ( if matchlen == len(n.Key) || n.Key[matchlen] == 16 { updated, nn = t.insertRecursive(n.Val, key, pos+matchlen, value) if updated { - t.evictNodeFromHashMap(n) n.Val = nn n.ref.len = 0 } newNode = n } else { // Otherwise branch out at the index where they differ. - t.evictNodeFromHashMap(n) var c1 node if len(n.Key) == matchlen+1 { c1 = n.Val @@ -588,7 +579,6 @@ func (t *Trie) insertRecursive(origNode node, key []byte, pos int, value node) ( case i1: updated, nn = t.insertRecursive(n.child1, key, pos+1, value) if updated { - t.evictNodeFromHashMap(n) n.child1 = nn n.ref.len = 0 } @@ -596,13 +586,11 @@ func (t *Trie) insertRecursive(origNode node, key []byte, pos int, value node) ( case i2: updated, nn = t.insertRecursive(n.child2, key, pos+1, value) if updated { - t.evictNodeFromHashMap(n) n.child2 = nn n.ref.len = 0 } newNode = n default: - t.evictNodeFromHashMap(n) var child node if len(key) == pos+1 { child = value @@ -622,7 +610,6 @@ func (t *Trie) insertRecursive(origNode node, key []byte, pos int, value node) ( case *fullNode: child := n.Children[key[pos]] if child == nil { - t.evictNodeFromHashMap(n) if len(key) == pos+1 { n.Children[key[pos]] = value } else { @@ -633,7 +620,6 @@ func (t *Trie) insertRecursive(origNode node, key []byte, pos int, value node) ( } else { updated, nn = t.insertRecursive(child, key, pos+1, value) if updated { - t.evictNodeFromHashMap(n) n.Children[key[pos]] = nn n.ref.len = 0 } @@ -760,14 +746,6 @@ func (t *Trie) hook(hex []byte, n node, hash []byte) error { } func (t *Trie) touchAll(n node, hex []byte, del bool, incarnation uint64) { - if del { - t.evictNodeFromHashMap(n) - } else if len(n.reference()) == common.HashLength && debug.IsGetNodeData() { - var key common.Hash - copy(key[:], n.reference()) - t.hashMap[key] = n - } - switch n := n.(type) { case *shortNode: if _, ok := n.Val.(valueNode); !ok { @@ -819,7 +797,6 @@ func (t *Trie) convertToShortNode(child node, pos uint) node { // might not be loaded yet, resolve it just for this // check. if short, ok := child.(*shortNode); ok { - t.evictNodeFromHashMap(child) k := make([]byte, len(short.Key)+1) k[0] = byte(pos) copy(k[1:], short.Key) @@ -854,7 +831,6 @@ func (t *Trie) deleteRecursive(origNode node, key []byte, keyStart int, preserve } if removeNodeEntirely { - t.evictNodeFromHashMap(n) updated = true touchKey := key[:keyStart+matchlen] if touchKey[len(touchKey)-1] == 16 { @@ -871,7 +847,6 @@ func (t *Trie) deleteRecursive(origNode node, key []byte, keyStart int, preserve if !updated { newNode = n } else { - t.evictNodeFromHashMap(n) if nn == nil { newNode = nil } else { @@ -905,7 +880,6 @@ func (t *Trie) deleteRecursive(origNode node, key []byte, keyStart int, preserve if !updated { newNode = n } else { - t.evictNodeFromHashMap(n) if nn == nil { newNode = t.convertToShortNode(n.child2, uint(i2)) } else { @@ -919,7 +893,6 @@ func (t *Trie) deleteRecursive(origNode node, key []byte, keyStart int, preserve if !updated { newNode = n } else { - t.evictNodeFromHashMap(n) if nn == nil { newNode = t.convertToShortNode(n.child1, uint(i1)) } else { @@ -940,7 +913,6 @@ func (t *Trie) deleteRecursive(origNode node, key []byte, keyStart int, preserve if !updated { newNode = n } else { - t.evictNodeFromHashMap(n) n.Children[key[keyStart]] = nn // Check how many non-nil entries are left after deleting and // reduce the full node to a short node if only one entry is @@ -1078,13 +1050,7 @@ func (t *Trie) Reset() { } func (t *Trie) getHasher() *hasher { - h := t.newHasherFunc() - if debug.IsGetNodeData() { - h.callback = func(key common.Hash, nd node) { - t.hashMap[key] = nd - } - } - return h + return t.newHasherFunc() } // DeepHash returns internal hash of a node reachable by the specified key prefix. @@ -1135,8 +1101,6 @@ func (t *Trie) EvictNode(hex []byte) { // can work with other nodes type } - t.evictSubtreeFromHashMap(nd) - var hn common.Hash if nd == nil { fmt.Printf("nd == nil, hex %x, parent node: %T\n", hex, parent) @@ -1214,65 +1178,3 @@ func (t *Trie) TrieSize() int { func (t *Trie) NumberOfAccounts() int { return calcSubtreeNodes(t.root) } - -// GetNodeByHash gets node's RLP by hash. -func (t *Trie) GetNodeByHash(hash common.Hash) []byte { - nd := t.hashMap[hash] - if nd == nil { - return nil - } - - h := t.getHasher() - defer returnHasherToPool(h) - - rlp, err := h.hashChildren(nd, 0) - if err != nil { - log.Warn("GetNodeByHash error while producing node RLP", "err", err) - return nil - } - - return libcommon.Copy(rlp) -} - -func (t *Trie) evictNodeFromHashMap(nd node) { - if !debug.IsGetNodeData() || nd == nil { - return - } - if len(nd.reference()) == common.HashLength { - var key common.Hash - copy(key[:], nd.reference()) - delete(t.hashMap, key) - } -} - -func (t *Trie) evictSubtreeFromHashMap(n node) { - if !debug.IsGetNodeData() { - return - } - t.evictNodeFromHashMap(n) - - switch n := n.(type) { - case *shortNode: - if _, ok := n.Val.(valueNode); !ok { - t.evictSubtreeFromHashMap(n.Val) - } - case *duoNode: - t.evictSubtreeFromHashMap(n.child1) - t.evictSubtreeFromHashMap(n.child2) - case *fullNode: - for _, child := range n.Children { - if child != nil { - t.evictSubtreeFromHashMap(child) - } - } - case *accountNode: - if n.storage != nil { - t.evictSubtreeFromHashMap(n.storage) - } - } -} - -// HashMapSize returns the number of entries in trie's hash map. -func (t *Trie) HashMapSize() int { - return len(t.hashMap) -} diff --git a/turbo/trie/trie_test.go b/turbo/trie/trie_test.go index be1d4dc7006..f4691cb330a 100644 --- a/turbo/trie/trie_test.go +++ b/turbo/trie/trie_test.go @@ -27,7 +27,6 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/rlp" @@ -282,45 +281,6 @@ func TestDeepHash(t *testing.T) { } } -func TestHashMapLeak(t *testing.T) { - debug.OverrideGetNodeData(true) - defer debug.OverrideGetNodeData(false) - // freeze the randomness - random := rand.New(rand.NewSource(794656320434)) - - // now create a trie with some small and some big leaves - trie := newEmpty() - nTouches := 256 * 10 - - var key [1]byte - var val [8]byte - for i := 0; i < nTouches; i++ { - key[0] = byte(random.Intn(256)) - binary.BigEndian.PutUint64(val[:], random.Uint64()) - - option := random.Intn(3) - if option == 0 { - // small leaf node - trie.Update(key[:], val[:]) - } else if option == 1 { - // big leaf node - trie.Update(key[:], crypto.Keccak256(val[:])) - } else { - // test delete as well - trie.Delete(key[:]) - } - } - - // check the size of trie's hash map - trie.Hash() - - nHashes := trie.HashMapSize() - nExpected := 1 + 16 + 256/3 - - assert.GreaterOrEqual(t, nHashes, nExpected*7/8) - assert.LessOrEqual(t, nHashes, nExpected*9/8) -} - func genRandomByteArrayOfLen(length uint) []byte { array := make([]byte, length) for i := uint(0); i < length; i++ { From 4155ec101c110d122a303f05ca98c3010243d021 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Wed, 29 Jun 2022 14:44:22 +0200 Subject: [PATCH 127/136] Fix txpool.accountslots flag (#4573) --- cmd/txpool/main.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/cmd/txpool/main.go b/cmd/txpool/main.go index 0715c5759b2..e50153c0896 100644 --- a/cmd/txpool/main.go +++ b/cmd/txpool/main.go @@ -44,8 +44,9 @@ var ( baseFeePoolLimit int queuedPoolLimit int - priceLimit uint64 - priceBump uint64 + priceLimit uint64 + accountSlots uint64 + priceBump uint64 ) func init() { @@ -65,7 +66,7 @@ func init() { rootCmd.PersistentFlags().IntVar(&baseFeePoolLimit, "txpool.globalbasefeeeslots", txpool.DefaultConfig.BaseFeeSubPoolLimit, "Maximum number of non-executable transactions where only not enough baseFee") rootCmd.PersistentFlags().IntVar(&queuedPoolLimit, "txpool.globalqueue", txpool.DefaultConfig.QueuedSubPoolLimit, "Maximum number of non-executable transaction slots for all accounts") rootCmd.PersistentFlags().Uint64Var(&priceLimit, "txpool.pricelimit", txpool.DefaultConfig.MinFeeCap, "Minimum gas price (fee cap) limit to enforce for acceptance into the pool") - rootCmd.PersistentFlags().Uint64Var(&priceLimit, "txpool.accountslots", txpool.DefaultConfig.AccountSlots, "Minimum number of executable transaction slots guaranteed per account") + rootCmd.PersistentFlags().Uint64Var(&accountSlots, "txpool.accountslots", txpool.DefaultConfig.AccountSlots, "Minimum number of executable transaction slots guaranteed per account") rootCmd.PersistentFlags().Uint64Var(&priceBump, "txpool.pricebump", txpool.DefaultConfig.PriceBump, "Price bump percentage to replace an already existing transaction") rootCmd.Flags().StringSliceVar(&traceSenders, utils.TxPoolTraceSendersFlag.Name, []string{}, utils.TxPoolTraceSendersFlag.Usage) } @@ -121,6 +122,7 @@ var rootCmd = &cobra.Command{ cfg.BaseFeeSubPoolLimit = baseFeePoolLimit cfg.QueuedSubPoolLimit = queuedPoolLimit cfg.MinFeeCap = priceLimit + cfg.AccountSlots = accountSlots cfg.PriceBump = priceBump cacheConfig := kvcache.DefaultCoherentConfig From 479912423e37339b1b8422d3e507b1eaaef2106d Mon Sep 17 00:00:00 2001 From: hrthaowang <98331735+hrthaowang@users.noreply.github.com> Date: Wed, 29 Jun 2022 09:24:21 -0400 Subject: [PATCH 128/136] Introduce `eth_callMany` and `debug_traceCallMany` (#4567) * rpc: add eth_callMany (#1) * clean the repo * clean style * remove unwanted err check * fix header bug * Add RPC `debug_traceCallMany` (#4) * update submodule * fix error msg --- cmd/rpcdaemon/commands/eth_callMany.go | 310 ++++++++++++++++++++ cmd/rpcdaemon/commands/eth_callMany_test.go | 165 +++++++++++ cmd/rpcdaemon/commands/tracing.go | 197 +++++++++++++ 3 files changed, 672 insertions(+) create mode 100644 cmd/rpcdaemon/commands/eth_callMany.go create mode 100644 cmd/rpcdaemon/commands/eth_callMany_test.go diff --git a/cmd/rpcdaemon/commands/eth_callMany.go b/cmd/rpcdaemon/commands/eth_callMany.go new file mode 100644 index 00000000000..9de19308369 --- /dev/null +++ b/cmd/rpcdaemon/commands/eth_callMany.go @@ -0,0 +1,310 @@ +package commands + +import ( + "context" + "encoding/hex" + "fmt" + "math/big" + "time" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/common/math" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/ethdb" + rpcapi "github.com/ledgerwatch/erigon/internal/ethapi" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/adapter/ethapi" + "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/log/v3" +) + +type BlockOverrides struct { + BlockNumber *hexutil.Uint64 + Coinbase *common.Address + Timestamp *hexutil.Uint64 + GasLimit *hexutil.Uint + Difficulty *hexutil.Uint + BaseFee *uint256.Int + BlockHash *map[uint64]common.Hash +} + +type Bundle struct { + Transactions []rpcapi.CallArgs + BlockOverride BlockOverrides +} + +type StateContext struct { + BlockNumber rpc.BlockNumberOrHash + TransactionIndex *int +} + +func blockHeaderOverride(blockCtx *vm.BlockContext, blockOverride BlockOverrides, overrideBlockHash map[uint64]common.Hash) { + if blockOverride.BlockNumber != nil { + blockCtx.BlockNumber = uint64(*blockOverride.BlockNumber) + } + if blockOverride.BaseFee != nil { + blockCtx.BaseFee = blockOverride.BaseFee + } + if blockOverride.Coinbase != nil { + blockCtx.Coinbase = *blockOverride.Coinbase + } + if blockOverride.Difficulty != nil { + blockCtx.Difficulty = big.NewInt(int64(*blockOverride.Difficulty)) + } + if blockOverride.Timestamp != nil { + blockCtx.Time = uint64(*blockOverride.Timestamp) + } + if blockOverride.GasLimit != nil { + blockCtx.GasLimit = uint64(*blockOverride.GasLimit) + } + if blockOverride.BlockHash != nil { + for blockNum, hash := range *blockOverride.BlockHash { + overrideBlockHash[blockNum] = hash + } + } +} + +func (api *APIImpl) CallMany(ctx context.Context, bundles []Bundle, simulateContext StateContext, stateOverride *rpcapi.StateOverrides, timeoutMilliSecondsPtr *int64) ([][]map[string]interface{}, error) { + var ( + hash common.Hash + replayTransactions types.Transactions + evm *vm.EVM + blockCtx vm.BlockContext + txCtx vm.TxContext + overrideBlockHash map[uint64]common.Hash + baseFee uint256.Int + ) + + overrideBlockHash = make(map[uint64]common.Hash) + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + chainConfig, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + if len(bundles) == 0 { + return nil, fmt.Errorf("empty bundles") + } + empty := true + for _, bundle := range bundles { + if len(bundle.Transactions) != 0 { + empty = false + } + } + + if empty { + return nil, fmt.Errorf("empty bundles") + } + + defer func(start time.Time) { log.Trace("Executing EVM callMany finished", "runtime", time.Since(start)) }(time.Now()) + + blockNum, hash, _, err := rpchelper.GetBlockNumber(simulateContext.BlockNumber, tx, api.filters) + if err != nil { + return nil, err + } + + block, err := api.blockByNumberWithSenders(tx, blockNum) + if err != nil { + return nil, err + } + + // -1 is a default value for transaction index. + // If it's -1, we will try to replay every single transaction in that block + transactionIndex := -1 + + if simulateContext.TransactionIndex != nil { + transactionIndex = *simulateContext.TransactionIndex + } + + if transactionIndex == -1 { + transactionIndex = len(block.Transactions()) + } + + replayTransactions = block.Transactions()[:transactionIndex] + + stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNum-1)), api.filters, api.stateCache) + + if err != nil { + return nil, err + } + + st := state.New(stateReader) + + parent := block.Header() + + if parent == nil { + return nil, fmt.Errorf("block %d(%x) not found", blockNum, hash) + } + + // Get a new instance of the EVM + signer := types.MakeSigner(chainConfig, blockNum) + rules := chainConfig.Rules(blockNum) + + contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } + + if api.TevmEnabled { + contractHasTEVM = ethdb.GetHasTEVM(tx) + } + + getHash := func(i uint64) common.Hash { + if hash, ok := overrideBlockHash[i]; ok { + return hash + } + hash, err := rawdb.ReadCanonicalHash(tx, i) + if err != nil { + log.Debug("Can't get block hash by number", "number", i, "only-canonical", true) + } + return hash + } + + if parent.BaseFee != nil { + baseFee.SetFromBig(parent.BaseFee) + } + + blockCtx = vm.BlockContext{ + CanTransfer: core.CanTransfer, + Transfer: core.Transfer, + GetHash: getHash, + ContractHasTEVM: contractHasTEVM, + Coinbase: parent.Coinbase, + BlockNumber: parent.Number.Uint64(), + Time: parent.Time, + Difficulty: new(big.Int).Set(parent.Difficulty), + GasLimit: parent.GasLimit, + BaseFee: &baseFee, + } + + evm = vm.NewEVM(blockCtx, txCtx, st, chainConfig, vm.Config{Debug: false}) + + timeoutMilliSeconds := int64(5000) + + if timeoutMilliSecondsPtr != nil { + timeoutMilliSeconds = *timeoutMilliSecondsPtr + } + + timeout := time.Millisecond * time.Duration(timeoutMilliSeconds) + // Setup context so it may be cancelled the call has completed + // or, in case of unmetered gas, setup a context with a timeout. + var cancel context.CancelFunc + if timeout > 0 { + ctx, cancel = context.WithTimeout(ctx, timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + // Make sure the context is cancelled when the call has completed + // this makes sure resources are cleaned up. + defer cancel() + + // Wait for the context to be done and cancel the evm. Even if the + // EVM has finished, cancelling may be done (repeatedly) + go func() { + <-ctx.Done() + evm.Cancel() + }() + + // Setup the gas pool (also for unmetered requests) + // and apply the message. + gp := new(core.GasPool).AddGas(math.MaxUint64) + for _, txn := range replayTransactions { + msg, err := txn.AsMessage(*signer, nil, rules) + if err != nil { + return nil, err + } + txCtx = core.NewEVMTxContext(msg) + evm = vm.NewEVM(blockCtx, txCtx, evm.IntraBlockState(), chainConfig, vm.Config{Debug: false}) + // Execute the transaction message + _, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, false /* gasBailout */) + if err != nil { + return nil, err + } + // If the timer caused an abort, return an appropriate error message + if evm.Cancelled() { + return nil, fmt.Errorf("execution aborted (timeout = %v)", timeout) + } + } + + // after replaying the txns, we want to overload the state + // overload state + if stateOverride != nil { + err = stateOverride.Override((evm.IntraBlockState()).(*state.IntraBlockState)) + if err != nil { + return nil, err + } + } + + ret := make([][]map[string]interface{}, 0) + + for _, bundle := range bundles { + // first change blockContext + if bundle.BlockOverride.BlockNumber != nil { + blockCtx.BlockNumber = uint64(*bundle.BlockOverride.BlockNumber) + } + if bundle.BlockOverride.BaseFee != nil { + blockCtx.BaseFee = bundle.BlockOverride.BaseFee + } + if bundle.BlockOverride.Coinbase != nil { + blockCtx.Coinbase = *bundle.BlockOverride.Coinbase + } + if bundle.BlockOverride.Difficulty != nil { + blockCtx.Difficulty = big.NewInt(int64(*bundle.BlockOverride.Difficulty)) + } + if bundle.BlockOverride.Timestamp != nil { + blockCtx.Time = uint64(*bundle.BlockOverride.Timestamp) + } + if bundle.BlockOverride.GasLimit != nil { + blockCtx.GasLimit = uint64(*bundle.BlockOverride.GasLimit) + } + if bundle.BlockOverride.BlockHash != nil { + for blockNum, hash := range *bundle.BlockOverride.BlockHash { + overrideBlockHash[blockNum] = hash + } + } + results := []map[string]interface{}{} + for _, txn := range bundle.Transactions { + if txn.Gas == nil || *(txn.Gas) == 0 { + txn.Gas = (*hexutil.Uint64)(&api.GasCap) + } + msg, err := txn.ToMessage(api.GasCap, blockCtx.BaseFee) + if err != nil { + return nil, err + } + txCtx = core.NewEVMTxContext(msg) + evm = vm.NewEVM(blockCtx, txCtx, evm.IntraBlockState(), chainConfig, vm.Config{Debug: false}) + result, err := core.ApplyMessage(evm, msg, gp, true, false) + if err != nil { + return nil, err + } + // If the timer caused an abort, return an appropriate error message + if evm.Cancelled() { + return nil, fmt.Errorf("execution aborted (timeout = %v)", timeout) + } + jsonResult := make(map[string]interface{}) + if result.Err != nil { + if len(result.Revert()) > 0 { + jsonResult["error"] = ethapi.NewRevertError(result) + } else { + jsonResult["error"] = result.Err.Error() + } + } else { + jsonResult["value"] = hex.EncodeToString(result.Return()) + } + + results = append(results, jsonResult) + } + + blockCtx.BlockNumber++ + blockCtx.Time++ + ret = append(ret, results) + } + + return ret, err +} diff --git a/cmd/rpcdaemon/commands/eth_callMany_test.go b/cmd/rpcdaemon/commands/eth_callMany_test.go new file mode 100644 index 00000000000..1dfa837ba72 --- /dev/null +++ b/cmd/rpcdaemon/commands/eth_callMany_test.go @@ -0,0 +1,165 @@ +package commands + +import ( + "context" + "encoding/hex" + "fmt" + "math/big" + "strconv" + "testing" + + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon/accounts/abi/bind" + "github.com/ledgerwatch/erigon/accounts/abi/bind/backends" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon/commands/contracts" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/internal/ethapi" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" +) + +// block 1 contains 3 Transactions +// 1. deploy token A +// 2. mint address 2 100 token +// 3. transfer from address 2 to address 1 + +// test 2 bundles +// check balance of addr1 and addr 2 at the end of block and interblock + +func TestCallMany(t *testing.T) { + var ( + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + key1, _ = crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") + key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + address = crypto.PubkeyToAddress(key.PublicKey) + address1 = crypto.PubkeyToAddress(key1.PublicKey) + address2 = crypto.PubkeyToAddress(key2.PublicKey) + gspec = &core.Genesis{ + Config: params.AllEthashProtocolChanges, + Alloc: core.GenesisAlloc{ + address: {Balance: big.NewInt(9000000000000000000)}, + address1: {Balance: big.NewInt(200000000000000000)}, + address2: {Balance: big.NewInt(300000000000000000)}, + }, + GasLimit: 10000000, + } + chainID = big.NewInt(1337) + ctx = context.Background() + + addr1BalanceCheck = "70a08231" + "000000000000000000000000" + address1.Hex()[2:] + addr2BalanceCheck = "70a08231" + "000000000000000000000000" + address2.Hex()[2:] + transferAddr2 = "70a08231" + "000000000000000000000000" + address1.Hex()[2:] + "0000000000000000000000000000000000000000000000000000000000000064" + ) + + hexBytes, _ := hex.DecodeString(addr2BalanceCheck) + balanceCallAddr2 := hexutil.Bytes(hexBytes) + hexBytes, _ = hex.DecodeString(addr1BalanceCheck) + balanceCallAddr1 := hexutil.Bytes(hexBytes) + hexBytes, _ = hex.DecodeString(transferAddr2) + transferCallData := hexutil.Bytes(hexBytes) + + //submit 3 Transactions and commit the results + transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, chainID) + transactOpts1, _ := bind.NewKeyedTransactorWithChainID(key1, chainID) + transactOpts2, _ := bind.NewKeyedTransactorWithChainID(key2, chainID) + contractBackend := backends.NewSimulatedBackendWithConfig(gspec.Alloc, gspec.Config, gspec.GasLimit) + defer contractBackend.Close() + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + tokenAddr, _, tokenContract, _ := contracts.DeployToken(transactOpts, contractBackend, address1) + tokenContract.Mint(transactOpts1, address2, big.NewInt(100)) + tokenContract.Transfer(transactOpts2, address1, big.NewInt(100)) + contractBackend.Commit() + + // set up the callargs + var nonce hexutil.Uint64 = 1 + var secondNonce hexutil.Uint64 = 2 + + db := contractBackend.DB() + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + + callArgAddr1 := ethapi.CallArgs{From: &address, To: &tokenAddr, Nonce: &nonce, + MaxPriorityFeePerGas: (*hexutil.Big)(big.NewInt(1e9)), + MaxFeePerGas: (*hexutil.Big)(big.NewInt(1e10)), + Data: &balanceCallAddr1, + } + callArgAddr2 := ethapi.CallArgs{From: &address, To: &tokenAddr, Nonce: &secondNonce, + MaxPriorityFeePerGas: (*hexutil.Big)(big.NewInt(1e9)), + MaxFeePerGas: (*hexutil.Big)(big.NewInt(1e10)), + Data: &balanceCallAddr2, + } + + callArgTransferAddr2 := ethapi.CallArgs{From: &address2, To: &tokenAddr, Nonce: &nonce, + MaxPriorityFeePerGas: (*hexutil.Big)(big.NewInt(1e9)), + MaxFeePerGas: (*hexutil.Big)(big.NewInt(1e10)), + Data: &transferCallData, + } + + timeout := int64(50000) + txIndex := -1 + res, err := api.CallMany(ctx, []Bundle{{ + Transactions: []ethapi.CallArgs{callArgAddr1, callArgAddr2}}}, StateContext{BlockNumber: rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), TransactionIndex: &txIndex}, nil, &timeout) + if err != nil { + t.Errorf("eth_callMany: %v", err) + } + + // parse the results and do balance checks + addr1CalRet := fmt.Sprintf("%v", res[0][0]["value"])[2:] + addr2CalRet := fmt.Sprintf("%v", res[0][1]["value"])[2:] + addr1Balance, err := strconv.ParseInt(addr1CalRet, 16, 64) + if err != nil { + t.Errorf("eth_callMany: %v", err) + } + addr2Balance, err := strconv.ParseInt(addr2CalRet, 16, 64) + + if err != nil { + t.Errorf("eth_callMany: %v", err) + } + if addr1Balance != 100 || addr2Balance != 0 { + t.Errorf("eth_callMany: %v", "balanceUnmatch") + } + + txIndex = 2 + res, err = api.CallMany(ctx, []Bundle{{ + Transactions: []ethapi.CallArgs{callArgAddr1, callArgAddr2}}}, StateContext{BlockNumber: rpc.BlockNumberOrHashWithNumber(1), TransactionIndex: &txIndex}, nil, &timeout) + if err != nil { + t.Errorf("eth_callMany: %v", err) + } + + addr1CalRet = fmt.Sprintf("%v", res[0][0]["value"])[2:] + addr2CalRet = fmt.Sprintf("%v", res[0][1]["value"])[2:] + addr1Balance, err = strconv.ParseInt(addr1CalRet, 16, 64) + if err != nil { + t.Errorf("%v", err) + } + addr2Balance, err = strconv.ParseInt(addr2CalRet, 16, 64) + if err != nil { + t.Errorf("%v", err) + } + + if addr1Balance != 0 || addr2Balance != 100 { + t.Errorf("eth_callMany: %s", "balanceUnmatch") + } + txIndex = -1 + res, err = api.CallMany(ctx, []Bundle{{Transactions: []ethapi.CallArgs{callArgTransferAddr2, callArgAddr1, callArgAddr2}}}, StateContext{BlockNumber: rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), TransactionIndex: &txIndex}, nil, &timeout) + if err != nil { + t.Errorf("%v", err) + } + + addr1CalRet = fmt.Sprintf("%v", res[0][1]["value"])[2:] + addr2CalRet = fmt.Sprintf("%v", res[0][2]["value"])[2:] + + addr1Balance, err = strconv.ParseInt(addr1CalRet, 16, 64) + if err != nil { + t.Errorf("%v", err) + } + addr2Balance, err = strconv.ParseInt(addr2CalRet, 16, 64) + if err != nil { + t.Errorf("%v", err) + } + if addr1Balance != 100 || addr2Balance != 0 { + t.Errorf("eth_callMany: %s", "balanceUnmatch") + } +} diff --git a/cmd/rpcdaemon/commands/tracing.go b/cmd/rpcdaemon/commands/tracing.go index 31ddd4522cc..3f354822407 100644 --- a/cmd/rpcdaemon/commands/tracing.go +++ b/cmd/rpcdaemon/commands/tracing.go @@ -3,11 +3,16 @@ package commands import ( "context" "fmt" + "math/big" + "time" "github.com/holiman/uint256" jsoniter "github.com/json-iterator/go" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/consensus/ethash" + "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" @@ -239,3 +244,195 @@ func (api *PrivateDebugAPIImpl) TraceCall(ctx context.Context, args ethapi.CallA // Trace the transaction and return return transactions.TraceTx(ctx, msg, blockCtx, txCtx, ibs, config, chainConfig, stream) } + +func (api *PrivateDebugAPIImpl) TraceCallMany(ctx context.Context, bundles []Bundle, simulateContext StateContext, config *tracers.TraceConfig, stream *jsoniter.Stream) error { + var ( + hash common.Hash + replayTransactions types.Transactions + evm *vm.EVM + blockCtx vm.BlockContext + txCtx vm.TxContext + overrideBlockHash map[uint64]common.Hash + baseFee uint256.Int + ) + + overrideBlockHash = make(map[uint64]common.Hash) + tx, err := api.db.BeginRo(ctx) + if err != nil { + stream.WriteNil() + return err + } + defer tx.Rollback() + chainConfig, err := api.chainConfig(tx) + if err != nil { + stream.WriteNil() + return err + } + if len(bundles) == 0 { + stream.WriteNil() + return fmt.Errorf("empty bundles") + } + empty := true + for _, bundle := range bundles { + if len(bundle.Transactions) != 0 { + empty = false + } + } + + if empty { + stream.WriteNil() + return fmt.Errorf("empty bundles") + } + + defer func(start time.Time) { log.Trace("Tracing CallMany finished", "runtime", time.Since(start)) }(time.Now()) + + blockNum, hash, _, err := rpchelper.GetBlockNumber(simulateContext.BlockNumber, tx, api.filters) + if err != nil { + stream.WriteNil() + return err + } + + block, err := api.blockByNumberWithSenders(tx, blockNum) + if err != nil { + stream.WriteNil() + return err + } + + // -1 is a default value for transaction index. + // If it's -1, we will try to replay every single transaction in that block + transactionIndex := -1 + + if simulateContext.TransactionIndex != nil { + transactionIndex = *simulateContext.TransactionIndex + } + + if transactionIndex == -1 { + transactionIndex = len(block.Transactions()) + } + + replayTransactions = block.Transactions()[:transactionIndex] + + stateReader, err := rpchelper.CreateStateReader(ctx, tx, rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockNum-1)), api.filters, api.stateCache) + + if err != nil { + stream.WriteNil() + return err + } + + st := state.New(stateReader) + + parent := block.Header() + + if parent == nil { + stream.WriteNil() + return fmt.Errorf("block %d(%x) not found", blockNum, hash) + } + + // Get a new instance of the EVM + signer := types.MakeSigner(chainConfig, blockNum) + rules := chainConfig.Rules(blockNum) + + contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } + + if api.TevmEnabled { + contractHasTEVM = ethdb.GetHasTEVM(tx) + } + + getHash := func(i uint64) common.Hash { + if hash, ok := overrideBlockHash[i]; ok { + return hash + } + hash, err := rawdb.ReadCanonicalHash(tx, i) + if err != nil { + log.Debug("Can't get block hash by number", "number", i, "only-canonical", true) + } + return hash + } + + if parent.BaseFee != nil { + baseFee.SetFromBig(parent.BaseFee) + } + + blockCtx = vm.BlockContext{ + CanTransfer: core.CanTransfer, + Transfer: core.Transfer, + GetHash: getHash, + ContractHasTEVM: contractHasTEVM, + Coinbase: parent.Coinbase, + BlockNumber: parent.Number.Uint64(), + Time: parent.Time, + Difficulty: new(big.Int).Set(parent.Difficulty), + GasLimit: parent.GasLimit, + BaseFee: &baseFee, + } + + evm = vm.NewEVM(blockCtx, txCtx, st, chainConfig, vm.Config{Debug: false}) + + // Setup the gas pool (also for unmetered requests) + // and apply the message. + gp := new(core.GasPool).AddGas(math.MaxUint64) + for _, txn := range replayTransactions { + msg, err := txn.AsMessage(*signer, nil, rules) + if err != nil { + stream.WriteNil() + return err + } + txCtx = core.NewEVMTxContext(msg) + evm = vm.NewEVM(blockCtx, txCtx, evm.IntraBlockState(), chainConfig, vm.Config{Debug: false}) + // Execute the transaction message + _, err = core.ApplyMessage(evm, msg, gp, true /* refunds */, false /* gasBailout */) + if err != nil { + stream.WriteNil() + return err + } + + } + + // after replaying the txns, we want to overload the state + if config.StateOverrides != nil { + err = config.StateOverrides.Override(evm.IntraBlockState().(*state.IntraBlockState)) + if err != nil { + stream.WriteNil() + return err + } + } + + stream.WriteArrayStart() + for bundle_index, bundle := range bundles { + stream.WriteArrayStart() + // first change blockContext + blockHeaderOverride(&blockCtx, bundle.BlockOverride, overrideBlockHash) + for txn_index, txn := range bundle.Transactions { + if txn.Gas == nil || *(txn.Gas) == 0 { + txn.Gas = (*hexutil.Uint64)(&api.GasCap) + } + msg, err := txn.ToMessage(api.GasCap, blockCtx.BaseFee) + if err != nil { + stream.WriteNil() + return err + } + txCtx = core.NewEVMTxContext(msg) + ibs := evm.IntraBlockState().(*state.IntraBlockState) + ibs.Prepare(common.Hash{}, parent.Hash(), txn_index) + err = transactions.TraceTx(ctx, msg, blockCtx, txCtx, evm.IntraBlockState(), config, chainConfig, stream) + + if err != nil { + stream.WriteNil() + return err + } + + if txn_index < len(bundle.Transactions)-1 { + stream.WriteMore() + } + } + stream.WriteArrayEnd() + + if bundle_index < len(bundles)-1 { + stream.WriteMore() + } + blockCtx.BlockNumber++ + blockCtx.Time++ + } + stream.WriteArrayEnd() + return nil +} From cfa8b545fc0f094de30771f46d350fa4c6ac69e0 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 29 Jun 2022 19:53:52 +0600 Subject: [PATCH 129/136] downloader: fix tmp dir detection check #4575 --- cmd/downloader/downloader/util.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/downloader/downloader/util.go b/cmd/downloader/downloader/util.go index 0c96aff15cb..1cc79d588ed 100644 --- a/cmd/downloader/downloader/util.go +++ b/cmd/downloader/downloader/util.go @@ -355,7 +355,7 @@ func VerifyDtaFiles(ctx context.Context, snapDir string) error { defer logEvery.Stop() tmpSnapDir := filepath.Join(snapDir, "tmp") // snapshots are in sub-dir "tmp", if not fully downloaded - if !common.FileExist(tmpSnapDir) { + if common.FileExist(tmpSnapDir) { snapDir = tmpSnapDir } files, err := AllTorrentPaths(snapDir) From d72fba3bec6beb4ef829f626760ff3725152a6f1 Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Wed, 29 Jun 2022 17:29:31 +0300 Subject: [PATCH 130/136] eth_estimateGas: read header instead of whole block (#4561) * check if block is nil * using read header * returning 0, nil --- cmd/rpcdaemon/commands/eth_call.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/cmd/rpcdaemon/commands/eth_call.go b/cmd/rpcdaemon/commands/eth_call.go index 1339bb77333..714c0ecc195 100644 --- a/cmd/rpcdaemon/commands/eth_call.go +++ b/cmd/rpcdaemon/commands/eth_call.go @@ -76,16 +76,17 @@ func (api *APIImpl) Call(ctx context.Context, args ethapi.CallArgs, blockNrOrHas } // headerByNumberOrHash - intent to read recent headers only -func headerByNumberOrHash(tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash, api *APIImpl) (*types.Header, error) { +func headerByNumberOrHash(ctx context.Context, tx kv.Tx, blockNrOrHash rpc.BlockNumberOrHash, api *APIImpl) (*types.Header, error) { blockNum, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) if err != nil { return nil, err } - block, err := api.blockByNumberWithSenders(tx, blockNum) + header, err := api._blockReader.HeaderByNumber(ctx, tx, blockNum) if err != nil { return nil, err } - return block.Header(), nil + // header can be nil + return header, nil } // EstimateGas implements eth_estimateGas. Returns an estimate of how much gas is necessary to allow the transaction to complete. The transaction will not be added to the blockchain. @@ -123,10 +124,13 @@ func (api *APIImpl) EstimateGas(ctx context.Context, argsOrNil *ethapi.CallArgs, hi = uint64(*args.Gas) } else { // Retrieve the block to act as the gas ceiling - h, err := headerByNumberOrHash(dbtx, bNrOrHash, api) + h, err := headerByNumberOrHash(ctx, dbtx, bNrOrHash, api) if err != nil { return 0, err } + if h == nil { + return 0, nil + } hi = h.GasLimit } From b9473745ad0f90117a54011227204e9f6e8afab0 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 29 Jun 2022 20:41:00 +0600 Subject: [PATCH 131/136] don't recommend --snap.stop flag #4577 Open --- cmd/utils/flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index a06e6050fe9..50150af6354 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -642,7 +642,7 @@ var ( } SnapStopFlag = cli.BoolFlag{ Name: ethconfig.FlagSnapStop, - Usage: "Stop producing new snapshots", + Usage: "Workaround to stop producing new snapshots, if you meet some snapshots-related critical bug", } TorrentVerbosityFlag = cli.IntFlag{ Name: "torrent.verbosity", From 6add6ecd0ebcd51cab64a6061fc43abf56927b00 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 29 Jun 2022 20:42:05 +0600 Subject: [PATCH 132/136] Snapshots: new bsc hash #4578 --- turbo/snapshotsync/snapshothashes/erigon-snapshots | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/turbo/snapshotsync/snapshothashes/erigon-snapshots b/turbo/snapshotsync/snapshothashes/erigon-snapshots index 879c9801be6..9cd91d0b377 160000 --- a/turbo/snapshotsync/snapshothashes/erigon-snapshots +++ b/turbo/snapshotsync/snapshothashes/erigon-snapshots @@ -1 +1 @@ -Subproject commit 879c9801be6bbe8b7863e2a6f2afae0140bff09b +Subproject commit 9cd91d0b377149102613f6bec46f28429aa3c761 From 7cd195117f80e070f2df33e694240c6eb93dca29 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Wed, 29 Jun 2022 17:39:12 +0200 Subject: [PATCH 133/136] More robust quitting of PoW mining (#4574) * isTrans -> cfg.blockBuilderParameters != nil * More robust quitting of PoW mining --- eth/backend.go | 33 ++----------------- eth/stagedsync/stage_headers.go | 11 ++++--- eth/stagedsync/stage_mining_create_block.go | 11 ++----- .../headerdownload/header_data_struct.go | 2 ++ 4 files changed, 14 insertions(+), 43 deletions(-) diff --git a/eth/backend.go b/eth/backend.go index 63e2c0f4b90..ec29d91a01c 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -70,7 +70,6 @@ import ( "github.com/ledgerwatch/erigon/eth/ethutils" "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/eth/stagedsync" - "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/ethstats" @@ -110,7 +109,6 @@ type Ethereum struct { lock sync.RWMutex // Protects the variadic fields (e.g. gas price and etherbase) chainConfig *params.ChainConfig genesisHash common.Hash - quitMining chan struct{} miningSealingQuit chan struct{} pendingBlocks chan *types.Block minedBlocks chan *types.Block @@ -352,7 +350,6 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere } backend.notifyMiningAboutNewTxs = make(chan struct{}, 1) - backend.quitMining = make(chan struct{}) backend.miningSealingQuit = make(chan struct{}) backend.pendingBlocks = make(chan *types.Block, 1) backend.minedBlocks = make(chan *types.Block, 1) @@ -476,13 +473,13 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere if err := miningRPC.(*privateapi.MiningServer).BroadcastPendingBlock(b); err != nil { log.Error("txpool rpc pending block broadcast", "err", err) } - case <-backend.quitMining: + case <-backend.sentriesClient.Hd.QuitPoWMining: return } } }() - if err := backend.StartMining(context.Background(), backend.chainDB, mining, backend.config.Miner, backend.gasPrice, backend.quitMining); err != nil { + if err := backend.StartMining(context.Background(), backend.chainDB, mining, backend.config.Miner, backend.gasPrice, backend.sentriesClient.Hd.QuitPoWMining); err != nil { return nil, err } @@ -693,12 +690,6 @@ func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, mining *stagedsy var hasWork bool errc := make(chan error, 1) - tx, err := s.chainDB.BeginRo(ctx) - if err != nil { - log.Warn("mining", "err", err) - return - } - for { mineEvery.Reset(3 * time.Second) select { @@ -718,21 +709,6 @@ func (s *Ethereum) StartMining(ctx context.Context, db kv.RwDB, mining *stagedsy case <-quitCh: return } - // Check if we transitioned and if we did halt POW mining - headNumber, err := stages.GetStageProgress(tx, stages.Headers) - if err != nil { - log.Warn("mining", "err", err) - return - } - - isTrans, err := rawdb.Transitioned(tx, headNumber, s.chainConfig.TerminalTotalDifficulty) - if err != nil { - log.Warn("mining", "err", err) - return - } - if isTrans { - return - } if !works && hasWork { works = true @@ -880,11 +856,8 @@ func (s *Ethereum) Stop() error { case <-shutdownDone: } } - if s.quitMining != nil { - close(s.quitMining) - } + libcommon.SafeClose(s.sentriesClient.Hd.QuitPoWMining) - //s.miner.Stop() s.engine.Close() <-s.waitForStageLoopStop if s.config.Miner.Enabled { diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 4ae0fdad76b..cebd06e98a9 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -132,12 +132,13 @@ func SpawnStageHeaders( return finishHandlingForkChoice(unsettledForkChoice, headHeight, s, tx, cfg, useExternalTx) } - isTrans, err := rawdb.Transitioned(tx, blockNumber, cfg.chainConfig.TerminalTotalDifficulty) + transitionedToPoS, err := rawdb.Transitioned(tx, blockNumber, cfg.chainConfig.TerminalTotalDifficulty) if err != nil { return err } - if isTrans { + if transitionedToPoS { + libcommon.SafeClose(cfg.hd.QuitPoWMining) return HeadersPOS(s, u, ctx, tx, cfg, useExternalTx) } else { return HeadersPOW(s, u, ctx, tx, cfg, initialCycle, test, useExternalTx) @@ -771,17 +772,17 @@ func HeadersPOW( Loop: for !stopped { - isTrans, err := rawdb.Transitioned(tx, headerProgress, cfg.chainConfig.TerminalTotalDifficulty) + transitionedToPoS, err := rawdb.Transitioned(tx, headerProgress, cfg.chainConfig.TerminalTotalDifficulty) if err != nil { return err } - - if isTrans { + if transitionedToPoS { if err := s.Update(tx, headerProgress); err != nil { return err } break } + currentTime := time.Now() req, penalties := cfg.hd.RequestMoreHeaders(currentTime) if req != nil { diff --git a/eth/stagedsync/stage_mining_create_block.go b/eth/stagedsync/stage_mining_create_block.go index a5d1dde905c..ec6b3532624 100644 --- a/eth/stagedsync/stage_mining_create_block.go +++ b/eth/stagedsync/stage_mining_create_block.go @@ -115,13 +115,8 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc return fmt.Errorf("wrong head block: %x (current) vs %x (requested)", parent.Hash(), cfg.blockBuilderParameters.ParentHash) } - isTrans, err := rawdb.Transitioned(tx, executionAt, cfg.chainConfig.TerminalTotalDifficulty) - if err != nil { - return err - } - if cfg.miner.MiningConfig.Etherbase == (common.Address{}) { - if !isTrans { + if cfg.blockBuilderParameters == nil { return fmt.Errorf("refusing to mine without etherbase") } // If we do not have an etherbase, let's use the suggested one @@ -199,7 +194,7 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc // re-written miner/worker.go:commitNewWork var timestamp uint64 - if !isTrans { + if cfg.blockBuilderParameters == nil { timestamp = uint64(time.Now().Unix()) if parent.Time >= timestamp { timestamp = parent.Time + 1 @@ -230,7 +225,7 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc return err } - if isTrans { + if cfg.blockBuilderParameters != nil { header.MixDigest = cfg.blockBuilderParameters.PrevRandao current.Header = header diff --git a/turbo/stages/headerdownload/header_data_struct.go b/turbo/stages/headerdownload/header_data_struct.go index f3609309a62..f2ed7797a62 100644 --- a/turbo/stages/headerdownload/header_data_struct.go +++ b/turbo/stages/headerdownload/header_data_struct.go @@ -300,6 +300,7 @@ type HeaderDownload struct { fetchingNew bool // Set when the stage that is actively fetching the headers is in progress topSeenHeightPoW uint64 latestMinedBlockNumber uint64 + QuitPoWMining chan struct{} trace bool stats Stats @@ -349,6 +350,7 @@ func NewHeaderDownload( anchorQueue: &AnchorQueue{}, seenAnnounces: NewSeenAnnounces(), DeliveryNotify: make(chan struct{}, 1), + QuitPoWMining: make(chan struct{}), BeaconRequestList: engineapi.NewRequestList(), PayloadStatusCh: make(chan privateapi.PayloadStatus, 1), headerReader: headerReader, From 687295f01bbf629ed92e04765b1135e1261a21c2 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Wed, 29 Jun 2022 17:49:33 +0200 Subject: [PATCH 134/136] fixed stall (#4576) --- ethdb/privateapi/ethbackend.go | 34 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/ethdb/privateapi/ethbackend.go b/ethdb/privateapi/ethbackend.go index 4719d6c78e6..f77d841b480 100644 --- a/ethdb/privateapi/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -274,6 +274,16 @@ func (s *EthBackendServer) stageLoopIsBusy() bool { // EngineNewPayloadV1 validates and possibly executes payload func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.ExecutionPayload) (*remote.EnginePayloadStatus, error) { + // If another payload is already commissioned then we just reply with syncing + if s.stageLoopIsBusy() { + // We are still syncing a commissioned payload + // TODO(yperbasis): not entirely correct since per the spec: + // The process of validating a payload on the canonical chain MUST NOT be affected by an active sync process on a side branch of the block tree. + // For example, if side branch B is SYNCING but the requisite data for validating a payload from canonical branch A is available, client software MUST initiate the validation process. + // https://github.com/ethereum/execution-apis/blob/v1.0.0-alpha.6/src/engine/specification.md#payload-validation + log.Debug("[NewPayload] stage loop is busy") + return &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, nil + } log.Debug("[NewPayload] acquiring lock") s.lock.Lock() defer s.lock.Unlock() @@ -333,17 +343,6 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E } tx.Rollback() - // If another payload is already commissioned then we just reply with syncing - if s.stageLoopIsBusy() { - // We are still syncing a commissioned payload - // TODO(yperbasis): not entirely correct since per the spec: - // The process of validating a payload on the canonical chain MUST NOT be affected by an active sync process on a side branch of the block tree. - // For example, if side branch B is SYNCING but the requisite data for validating a payload from canonical branch A is available, client software MUST initiate the validation process. - // https://github.com/ethereum/execution-apis/blob/v1.0.0-alpha.6/src/engine/specification.md#payload-validation - log.Debug("[NewPayload] stage loop is busy") - return &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, nil - } - log.Debug("[NewPayload] sending block", "height", header.Number, "hash", common.Hash(blockHash)) s.requestList.AddPayloadRequest(&engineapi.PayloadMessage{ Header: &header, @@ -419,6 +418,12 @@ func (s *EthBackendServer) EngineGetPayloadV1(ctx context.Context, req *remote.E // EngineForkChoiceUpdatedV1 either states new block head or request the assembling of a new block func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *remote.EngineForkChoiceUpdatedRequest) (*remote.EngineForkChoiceUpdatedReply, error) { + if s.stageLoopIsBusy() { + log.Debug("[ForkChoiceUpdated] stage loop is busy") + return &remote.EngineForkChoiceUpdatedReply{ + PayloadStatus: &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, + }, nil + } log.Debug("[ForkChoiceUpdated] acquiring lock") s.lock.Lock() defer s.lock.Unlock() @@ -451,13 +456,6 @@ func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *r }, nil } - if s.stageLoopIsBusy() { - log.Debug("[ForkChoiceUpdated] stage loop is busy") - return &remote.EngineForkChoiceUpdatedReply{ - PayloadStatus: &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, - }, nil - } - log.Debug("[ForkChoiceUpdated] sending forkChoiceMessage", "head", forkChoice.HeadBlockHash) s.requestList.AddForkChoiceRequest(&forkChoice) From 7d8c67e8539a0f2372195e6a2bfc52da6dac53ff Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Wed, 29 Jun 2022 17:50:20 +0200 Subject: [PATCH 135/136] Exempt local transactions from spam protection (#4580) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ecea7d44e68..68ed3b07b6b 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220625091153-e7b09db04531 + github.com/ledgerwatch/erigon-lib v0.0.0-20220629154434-59f7b5b57b68 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index b7a39052902..40b0cee0689 100644 --- a/go.sum +++ b/go.sum @@ -386,8 +386,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220625091153-e7b09db04531 h1:UKQC0chFY2s0wXOMDOyPEuUTwymsQRUpNHm7/5isnUo= -github.com/ledgerwatch/erigon-lib v0.0.0-20220625091153-e7b09db04531/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= +github.com/ledgerwatch/erigon-lib v0.0.0-20220629154434-59f7b5b57b68 h1:GWy2Jan7bkQe7xkptxxM2zWCjNyxGNDgSUl30oDMmHQ= +github.com/ledgerwatch/erigon-lib v0.0.0-20220629154434-59f7b5b57b68/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From 8558778ee263810e365f36beccc986d3e2574a06 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Thu, 30 Jun 2022 02:27:34 +0200 Subject: [PATCH 136/136] fixed engine unit tests (#4581) * fixed engine unit tests * done --- ethdb/privateapi/ethbackend.go | 65 ++++++++++++++++++---------------- 1 file changed, 34 insertions(+), 31 deletions(-) diff --git a/ethdb/privateapi/ethbackend.go b/ethdb/privateapi/ethbackend.go index f77d841b480..079f3be97bc 100644 --- a/ethdb/privateapi/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -274,21 +274,6 @@ func (s *EthBackendServer) stageLoopIsBusy() bool { // EngineNewPayloadV1 validates and possibly executes payload func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.ExecutionPayload) (*remote.EnginePayloadStatus, error) { - // If another payload is already commissioned then we just reply with syncing - if s.stageLoopIsBusy() { - // We are still syncing a commissioned payload - // TODO(yperbasis): not entirely correct since per the spec: - // The process of validating a payload on the canonical chain MUST NOT be affected by an active sync process on a side branch of the block tree. - // For example, if side branch B is SYNCING but the requisite data for validating a payload from canonical branch A is available, client software MUST initiate the validation process. - // https://github.com/ethereum/execution-apis/blob/v1.0.0-alpha.6/src/engine/specification.md#payload-validation - log.Debug("[NewPayload] stage loop is busy") - return &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, nil - } - log.Debug("[NewPayload] acquiring lock") - s.lock.Lock() - defer s.lock.Unlock() - log.Debug("[NewPayload] lock acquired") - if s.config.TerminalTotalDifficulty == nil { log.Error("[NewPayload] not a proof-of-stake chain") return nil, fmt.Errorf("not a proof-of-stake chain") @@ -301,7 +286,6 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E baseFee = gointerfaces.ConvertH256ToUint256Int(req.BaseFeePerGas).ToBig() eip1559 = true } - header := types.Header{ ParentHash: gointerfaces.ConvertH256ToHash(req.ParentHash), Coinbase: gointerfaces.ConvertH160toAddress(req.Coinbase), @@ -323,16 +307,13 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E } blockHash := gointerfaces.ConvertH256ToHash(req.BlockHash) - if header.Hash() != blockHash { - log.Error("[NewPayload] invalid block hash", "stated", common.Hash(blockHash), "actual", header.Hash()) - return &remote.EnginePayloadStatus{Status: remote.EngineStatus_INVALID_BLOCK_HASH}, nil - } tx, err := s.db.BeginRo(ctx) if err != nil { return nil, err } defer tx.Rollback() + parentTd, err := rawdb.ReadTd(tx, header.ParentHash, req.BlockNumber-1) if err != nil { return nil, err @@ -342,6 +323,27 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E return &remote.EnginePayloadStatus{Status: remote.EngineStatus_INVALID, LatestValidHash: gointerfaces.ConvertHashToH256(common.Hash{})}, nil } tx.Rollback() + // If another payload is already commissioned then we just reply with syncing + if s.stageLoopIsBusy() { + // We are still syncing a commissioned payload + // TODO(yperbasis): not entirely correct since per the spec: + // The process of validating a payload on the canonical chain MUST NOT be affected by an active sync process on a side branch of the block tree. + // For example, if side branch B is SYNCING but the requisite data for validating a payload from canonical branch A is available, client software MUST initiate the validation process. + // https://github.com/ethereum/execution-apis/blob/v1.0.0-alpha.6/src/engine/specification.md#payload-validation + log.Debug("[NewPayload] stage loop is busy") + return &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, nil + } + + if header.Hash() != blockHash { + log.Error("[NewPayload] invalid block hash", "stated", common.Hash(blockHash), "actual", header.Hash()) + return &remote.EnginePayloadStatus{Status: remote.EngineStatus_INVALID_BLOCK_HASH}, nil + } + + // Lock the thread (We modify shared resources). + log.Debug("[NewPayload] acquiring lock") + s.lock.Lock() + defer s.lock.Unlock() + log.Debug("[NewPayload] lock acquired") log.Debug("[NewPayload] sending block", "height", header.Number, "hash", common.Hash(blockHash)) s.requestList.AddPayloadRequest(&engineapi.PayloadMessage{ @@ -418,17 +420,6 @@ func (s *EthBackendServer) EngineGetPayloadV1(ctx context.Context, req *remote.E // EngineForkChoiceUpdatedV1 either states new block head or request the assembling of a new block func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *remote.EngineForkChoiceUpdatedRequest) (*remote.EngineForkChoiceUpdatedReply, error) { - if s.stageLoopIsBusy() { - log.Debug("[ForkChoiceUpdated] stage loop is busy") - return &remote.EngineForkChoiceUpdatedReply{ - PayloadStatus: &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, - }, nil - } - log.Debug("[ForkChoiceUpdated] acquiring lock") - s.lock.Lock() - defer s.lock.Unlock() - log.Debug("[ForkChoiceUpdated] lock acquired") - if s.config.TerminalTotalDifficulty == nil { return nil, fmt.Errorf("not a proof-of-stake chain") } @@ -456,6 +447,18 @@ func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *r }, nil } + if s.stageLoopIsBusy() { + log.Debug("[ForkChoiceUpdated] stage loop is busy") + return &remote.EngineForkChoiceUpdatedReply{ + PayloadStatus: &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, + }, nil + } + + log.Debug("[ForkChoiceUpdated] acquiring lock") + s.lock.Lock() + defer s.lock.Unlock() + log.Debug("[ForkChoiceUpdated] lock acquired") + log.Debug("[ForkChoiceUpdated] sending forkChoiceMessage", "head", forkChoice.HeadBlockHash) s.requestList.AddForkChoiceRequest(&forkChoice)