From 5013be00756c186615d4107f43f8018abe08e75c Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Thu, 30 Jun 2022 12:46:25 +0300 Subject: [PATCH 001/152] getting header instead of block (#4582) --- cmd/rpcdaemon/commands/eth_system.go | 6 +++--- cmd/rpcdaemon22/commands/eth_system.go | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/rpcdaemon/commands/eth_system.go b/cmd/rpcdaemon/commands/eth_system.go index 3a4bea0e5bf..1ba77c18138 100644 --- a/cmd/rpcdaemon/commands/eth_system.go +++ b/cmd/rpcdaemon/commands/eth_system.go @@ -199,14 +199,14 @@ func NewGasPriceOracleBackend(tx kv.Tx, cc *params.ChainConfig, baseApi *BaseAPI } func (b *GasPriceOracleBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { - block, err := b.baseApi.blockByRPCNumber(number, b.tx) + header, err := b.baseApi._blockReader.HeaderByNumber(ctx, b.tx, uint64(number.Int64())) if err != nil { return nil, err } - if block == nil { + if header == nil { return nil, nil } - return block.Header(), nil + return header, nil } func (b *GasPriceOracleBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { return b.baseApi.blockByRPCNumber(number, b.tx) diff --git a/cmd/rpcdaemon22/commands/eth_system.go b/cmd/rpcdaemon22/commands/eth_system.go index 32cc015c821..c327893192c 100644 --- a/cmd/rpcdaemon22/commands/eth_system.go +++ b/cmd/rpcdaemon22/commands/eth_system.go @@ -198,14 +198,14 @@ func NewGasPriceOracleBackend(tx kv.Tx, cc *params.ChainConfig, baseApi *BaseAPI } func (b *GasPriceOracleBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { - block, err := b.baseApi.blockByRPCNumber(number, b.tx) + header, err := b.baseApi._blockReader.HeaderByNumber(ctx, b.tx, uint64(number.Int64())) if err != nil { return nil, err } - if block == nil { + if header == nil { return nil, nil } - return block.Header(), nil + return header, nil } func (b *GasPriceOracleBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { return b.baseApi.blockByRPCNumber(number, b.tx) From 74cf9840aebaa75efe273307b67495ee0eece873 Mon Sep 17 00:00:00 2001 From: Cory Date: Thu, 30 Jun 2022 03:11:37 -0700 Subject: [PATCH 002/152] Patch plumbing of docker-compose UID/GID build args (#4527) * Patch plumbing of docker-compose UID/GID build args * Fallback to 1000/1000 if DOCKER_(U|G)ID not set * Revise README.md instructions for docker further * Fix existing typo forc 'servie' -> 'service' * Rename PUID/GUID -> UID/GID * Specify user in erigon docker service * Rely on .env instead of specifying :-1000 * Polish Makefile for docker use case * one more helpful comment * make docker should use UID/GID --build-arg * Fix make docker and more fail-fast if envvar set incorrect * mv .env->.env.example to not intefere existing workflows * Specify envvars in docker CI * Adjust validate_docker_build_args to permit non-erigon user * Also run docker CI target on macos-11 os * Add DOCKER_UID, DOCKER_GID in hooks/build * Patch docker build arg validation for macos * Add actions-setup-docker@master for macos * Don't run automated test for docker macos * Cleanup Makefile * Comments, targets for erigon users * More Makefile cleanup, debugging still * Typo fix * Create subdirs before calling ls * Get rid of flaky validation * DOCKER_UID, DOCKER_GID init to runner if not set * Get rid of unnecessary variable for now * Improved README based on new changes * Proper uid/gid `make user_*` when no envars set * Fix typo in Makefile comment * Fix make docker as sudo user --- .env.example | 6 +++ .github/workflows/ci.yml | 8 +++- .gitignore | 4 +- Dockerfile | 36 ++++++++++++------ Makefile | 79 +++++++++++++++++++++++++++++++------- README.md | 82 ++++++++++++++++++++++++++++++++++------ docker-compose.yml | 26 ++++++++----- hooks/build | 12 +++++- 8 files changed, 203 insertions(+), 50 deletions(-) create mode 100644 .env.example diff --git a/.env.example b/.env.example new file mode 100644 index 00000000000..78ad26cbc5e --- /dev/null +++ b/.env.example @@ -0,0 +1,6 @@ +# host OS dedicated user (for docker especially) +ERIGON_USER=erigon + +# UID, GID of user inside docker process which must exist also on host OS +DOCKER_UID=3473 # random number [1001, 10000] chosen arbitrarily for example +DOCKER_GID=3473 # can choose any valid #. 1000 tends to be taken by first user diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 636a25f1a0a..e5512bcee86 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -104,4 +104,10 @@ jobs: - uses: actions/checkout@v3 with: fetch-depth: 0 # fetch git tags for "git describe" - - run: make docker + + - name: make docker + run: DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker + + # check with root permissions, should be cached from previous build + - name: sudo make docker + run: sudo DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker diff --git a/.gitignore b/.gitignore index 36bd64dfef4..c86d201a6e4 100644 --- a/.gitignore +++ b/.gitignore @@ -74,4 +74,6 @@ go.work /goerli -docker-compose.*.yml \ No newline at end of file +docker-compose.*.yml +.env + diff --git a/Dockerfile b/Dockerfile index cd79ce62baa..54704d5c1c8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,29 +14,41 @@ RUN --mount=type=cache,target=/root/.cache \ FROM docker.io/library/alpine:3.15 RUN apk add --no-cache ca-certificates libstdc++ tzdata +# copy compiled artifacts from builder COPY --from=builder /app/build/bin/* /usr/local/bin/ -ARG PUID=1000 -ARG PGID=1000 -RUN adduser -H -u ${PUID} -g ${PGID} -D erigon -RUN mkdir -p /home/erigon -RUN mkdir -p /home/erigon/.local/share/erigon -RUN chown -R erigon:erigon /home/erigon - +# Setup user and group +# +# from the perspective of the container, uid=1000, gid=1000 is a sensible choice +# (mimicking Ubuntu Server), but if caller creates a .env (example in repo root), +# these defaults will get overridden when make calls docker-compose +ARG UID=1000 +ARG GID=1000 +RUN adduser -D -u $UID -g $GID erigon USER erigon - -EXPOSE 8545 8551 8546 30303 30303/udp 42069 42069/udp 8080 9090 6060 +RUN mkdir -p ~/.local/share/erigon + +EXPOSE 8545 \ + 8551 \ + 8546 \ + 30303 \ + 30303/udp \ + 42069 \ + 42069/udp \ + 8080 \ + 9090 \ + 6060 # https://github.com/opencontainers/image-spec/blob/main/annotations.md ARG BUILD_DATE ARG VCS_REF ARG VERSION LABEL org.label-schema.build-date=$BUILD_DATE \ - org.label-schema.name="Erigon" \ org.label-schema.description="Erigon Ethereum Client" \ + org.label-schema.name="Erigon" \ + org.label-schema.schema-version="1.0" \ org.label-schema.url="https://torquem.ch" \ org.label-schema.vcs-ref=$VCS_REF \ org.label-schema.vcs-url="https://github.com/ledgerwatch/erigon.git" \ org.label-schema.vendor="Torquem" \ - org.label-schema.version=$VERSION \ - org.label-schema.schema-version="1.0" + org.label-schema.version=$VERSION diff --git a/Makefile b/Makefile index 7a0dd3d99d5..5fcc65b0ef1 100644 --- a/Makefile +++ b/Makefile @@ -1,19 +1,26 @@ -GO = go +GO = go # if using docker, should not need to be installed/linked GOBIN = $(CURDIR)/build/bin +UNAME = $(shell uname) # Supported: Darwin, Linux +DOCKER := $(shell command -v docker 2> /dev/null) GIT_COMMIT ?= $(shell git rev-list -1 HEAD) GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD) GIT_TAG ?= $(shell git describe --tags '--match=v*' --dirty) -DOCKER_UID ?= 1000 -DOCKER_PID ?= 1000 +ERIGON_USER ?= erigon +# if using volume-mounting data dir, then must exist on host OS +DOCKER_UID ?= $(shell id -u) +DOCKER_GID ?= $(shell id -g) DOCKER_TAG ?= thorax/erigon:latest -CGO_CFLAGS := $(shell $(GO) env CGO_CFLAGS) # don't loose default +# Variables below for building on host OS, and are ignored for docker +# +# Pipe error below to /dev/null since Makefile structure kind of expects +# Go to be available, but with docker it's not strictly necessary +CGO_CFLAGS := $(shell $(GO) env CGO_CFLAGS 2>/dev/null) # don't lose default CGO_CFLAGS += -DMDBX_FORCE_ASSERTIONS=1 # Enable MDBX's asserts by default in 'devel' branch and disable in 'stable' CGO_CFLAGS := CGO_CFLAGS="$(CGO_CFLAGS)" DBG_CGO_CFLAGS += -DMDBX_DEBUG=1 -GO_MINOR_VERSION = $(shell $(GO) version | cut -c 16-17) BUILD_TAGS = nosqlite,noboltdb PACKAGE = github.com/ledgerwatch/erigon @@ -27,18 +34,30 @@ GOTEST = GODEBUG=cgocheck=0 $(GO) test $(GO_FLAGS) ./... -p 2 default: all go-version: - @if [ $(GO_MINOR_VERSION) -lt 18 ]; then \ + @if [ $(shell $(GO) version | cut -c 16-17) -lt 18 ]; then \ echo "minimum required Golang version is 1.18"; \ exit 1 ;\ fi -docker: git-submodules - DOCKER_BUILDKIT=1 docker build -t ${DOCKER_TAG} \ +validate_docker_build_args: + @echo "Docker build args:" + @echo " DOCKER_UID: $(DOCKER_UID)" + @echo " DOCKER_GID: $(DOCKER_GID)\n" + @echo "Ensuring host OS user exists with specified UID/GID..." + @if [ "$(UNAME)" = "Darwin" ]; then \ + dscl . list /Users UniqueID | grep "$(DOCKER_UID)"; \ + elif [ "$(UNAME)" = "Linux" ]; then \ + cat /etc/passwd | grep "$(DOCKER_UID):$(DOCKER_GID)"; \ + fi + @echo "✔️ host OS user exists: $(shell id -nu $(DOCKER_UID))" + +docker: validate_docker_build_args git-submodules + DOCKER_BUILDKIT=1 $(DOCKER) build -t ${DOCKER_TAG} \ --build-arg "BUILD_DATE=$(shell date -Iseconds)" \ --build-arg VCS_REF=${GIT_COMMIT} \ --build-arg VERSION=${GIT_TAG} \ - --build-arg PUID=${DOCKER_UID} \ - --build-arg PGID=${DOCKER_PID} \ + --build-arg UID=${DOCKER_UID} \ + --build-arg GID=${DOCKER_GID} \ ${DOCKER_FLAGS} \ . @@ -46,8 +65,10 @@ xdg_data_home := ~/.local/share ifdef XDG_DATA_HOME xdg_data_home = $(XDG_DATA_HOME) endif -docker-compose: - mkdir -p $(xdg_data_home)/erigon $(xdg_data_home)/erigon-grafana $(xdg_data_home)/erigon-prometheus; \ +xdg_data_home_subdirs = $(xdg_data_home)/erigon $(xdg_data_home)/erigon-grafana $(xdg_data_home)/erigon-prometheus + +docker-compose: validate_docker_build_args + mkdir -p $(xdg_data_home_subdirs) docker-compose up # debug build allows see C stack traces, run it with GOTRACEBACK=crash. You don't need debug build for C pit for profiling. To profile C code use SETCGOTRCKEBACK=1 @@ -146,7 +167,6 @@ bindings: prometheus: docker-compose up prometheus grafana - escape: cd $(path) && go test -gcflags "-m -m" -run none -bench=BenchmarkJumpdest* -benchmem -memprofile mem.out @@ -154,5 +174,36 @@ git-submodules: @[ -d ".git" ] || (echo "Not a git repository" && exit 1) @echo "Updating git submodules" @# Dockerhub using ./hooks/post-checkout to set submodules, so this line will fail on Dockerhub - @git submodule sync --quiet --recursive + @# these lines will also fail if ran as root in a non-root user's checked out repository + @git submodule sync --quiet --recursive || true @git submodule update --quiet --init --recursive --force || true + +# since DOCKER_UID, DOCKER_GID are default initialized to the current user uid/gid, +# we need separate envvars to facilitate creation of the erigon user on the host OS. +ERIGON_USER_UID ?= 3473 +ERIGON_USER_GID ?= 3473 +ERIGON_USER_XDG_DATA_HOME ?= ~$(ERIGON_USER)/.local/share + +# create "erigon" user +user_linux: +ifdef DOCKER + sudo groupadd -f docker +endif + sudo addgroup --gid $(ERIGON_USER_GID) $(ERIGON_USER) 2> /dev/null || true + sudo adduser --disabled-password --gecos '' --uid $(ERIGON_USER_UID) --gid $(ERIGON_USER_GID) $(ERIGON_USER) 2> /dev/null || true + sudo mkhomedir_helper $(ERIGON_USER) + echo 'export PATH=$$PATH:/usr/local/go/bin' | sudo -u $(ERIGON_USER) tee /home/$(ERIGON_USER)/.bash_aliases >/dev/null +ifdef DOCKER + sudo usermod -aG docker $(ERIGON_USER) +endif + sudo -u $(ERIGON_USER) mkdir -p ~$(ERIGON_USER_XDG_DATA_HOME) + +# create "erigon" user +user_macos: + sudo dscl . -create /Users/$(ERIGON_USER) + sudo dscl . -create /Users/$(ERIGON_USER) UserShell /bin/bash + sudo dscl . -list /Users UniqueID | grep $(ERIGON_USER) | grep $(ERIGON_USER_UID) || sudo dscl . -create /Users/$(ERIGON_USER) UniqueID $(ERIGON_USER_UID) + sudo dscl . -create /Users/$(ERIGON_USER) PrimaryGroupID $(ERIGON_USER_GID) + sudo dscl . -create /Users/$(ERIGON_USER) NFSHomeDirectory /Users/$(ERIGON_USER) + sudo dscl . -append /Groups/admin GroupMembership $(ERIGON_USER) + sudo -u $(ERIGON_USER) mkdir -p ~$(ERIGON_USER_XDG_DATA_HOME) diff --git a/README.md b/README.md index 83de5de31b1..43dfa333dc9 100644 --- a/README.md +++ b/README.md @@ -48,11 +48,11 @@ System Requirements * Goerli Full node (see `--prune*` flags): 189GB on Beta, 114GB on Alpha (April 2022). -* BSC Archive: 7TB. BSC Full: 1TB. +* BSC Archive: 7TB. BSC Full: 1TB. * Polygon Mainnet Archive: 5TB. Polygon Mumbai Archive: 1TB. -SSD or NVMe. Do not recommend HDD - on HDD Erigon will always stay N blocks behind chain tip, but not fall behind. +SSD or NVMe. Do not recommend HDD - on HDD Erigon will always stay N blocks behind chain tip, but not fall behind. Bear in mind that SSD performance deteriorates when close to capacity. RAM: >=16GB, 64-bit architecture, [Golang version >= 1.18](https://golang.org/doc/install), GCC 10+ @@ -71,7 +71,7 @@ make erigon ./build/bin/erigon ``` -Default `--snapshots=true` for `mainnet`, `goerli`, `bsc`. Other networks now have default `--snapshots=false`. Increase download speed by flag `--torrent.download.rate=20mb`. 🔬 See [Downloader docs](./cmd/downloader/readme.md) +Default `--snapshots=true` for `mainnet`, `goerli`, `bsc`. Other networks now have default `--snapshots=false`. Increase download speed by flag `--torrent.download.rate=20mb`. 🔬 See [Downloader docs](./cmd/downloader/readme.md) Use `--datadir` to choose where to store data. @@ -190,18 +190,18 @@ In order to establish a secure connection between the Consensus Layer and the Ex The JWT secret key will be present in the datadir by default under the name of `jwt.hex` and its path can be specified with the flag `--authrpc.jwtsecret`. This piece of info needs to be specified in the Consensus Layer as well in order to establish connection successfully. More information can be found [here](https://github.com/ethereum/execution-apis/blob/main/src/engine/authentication.md) - + ### Multiple Instances / One Machine Define 5 flags to avoid conflicts: `--datadir --port --http.port --torrent.port --private.api.addr`. Example of multiple chains on the same machine: ``` # mainnet -./build/bin/erigon --datadir="" --chain=mainnet --port=30303 --http.port=8545 --torrent.port=42069 --private.api.addr=127.0.0.1:9090 --http --ws --http.api=eth,debug,net,trace,web3,erigon +./build/bin/erigon --datadir="" --chain=mainnet --port=30303 --http.port=8545 --torrent.port=42069 --private.api.addr=127.0.0.1:9090 --http --ws --http.api=eth,debug,net,trace,web3,erigon # rinkeby -./build/bin/erigon --datadir="" --chain=rinkeby --port=30304 --http.port=8546 --torrent.port=42068 --private.api.addr=127.0.0.1:9091 --http --ws --http.api=eth,debug,net,trace,web3,erigon +./build/bin/erigon --datadir="" --chain=rinkeby --port=30304 --http.port=8546 --torrent.port=42068 --private.api.addr=127.0.0.1:9091 --http --ws --http.api=eth,debug,net,trace,web3,erigon ``` Quote your path if it has spaces. @@ -210,7 +210,7 @@ Quote your path if it has spaces. 🔬 Detailed explanation is [DEV_CHAIN](/DEV_CHAIN.md). Key features -============ +============ 🔬 See more detailed [overview of functionality and current limitations](https://ledgerwatch.github.io/turbo_geth_release.html). It @@ -261,11 +261,11 @@ Examples of stages are: ### JSON-RPC daemon -Most of Erigon's components (sentry, txpool, snapshots downloader, can work inside Erigon and as independent process. +Most of Erigon's components (sentry, txpool, snapshots downloader, can work inside Erigon and as independent process. To enable built-in RPC server: `--http` and `--ws` (sharing same port with http) -Run RPCDaemon as separated process: this daemon can use local DB (with running Erigon or on snapshot of a database) or remote DB (run on another server). 🔬 See [RPC-Daemon docs](./cmd/rpcdaemon/README.md) +Run RPCDaemon as separated process: this daemon can use local DB (with running Erigon or on snapshot of a database) or remote DB (run on another server). 🔬 See [RPC-Daemon docs](./cmd/rpcdaemon/README.md) #### **For remote DB** @@ -290,13 +290,71 @@ For a details on the implementation status of each command, [see this table](./cmd/rpcdaemon/README.md#rpc-implementation-status). ### Run all components by docker-compose +Docker allows for building and running Erigon via containers. This alleviates the need for installing build dependencies onto the host OS. + +#### Optional: Setup dedicated user +User UID/GID need to be synchronized between the host OS and container so files are written with correct permission. + +You may wish to setup a dedicated user/group on the host OS, in which case the following `make` targets are available. +```sh +# create "erigon" user +make user_linux +# or +make user_macos +``` +#### Environment Variables +There is a `.env.example` file in the root of the repo. +* `DOCKER_UID` - The UID of the docker user +* `DOCKER_GID` - The GID of the docker user +* `XDG_DATA_HOME` - The data directory which will be mounted to the docker containers + +If not specified, the UID/GID will use the current user. + +A good choice for `XDG_DATA_HOME` is to use the `~erigon/.ethereum` directory created by helper targets `make user_linux` or `make user_macos`. + +#### Check: Permissions +In all cases, `XDG_DATA_HOME` (specified or default) must be writeable by the user UID/GID in docker, which will be determined by the `DOCKER_UID` and `DOCKER_GID` at build time. + +If a build or service startup is failing due to permissions, check that all the directories, UID, and GID controlled by these environment variables are correct. + +#### Run Next command starts: Erigon on port 30303, rpcdaemon on port 8545, prometheus on port 9090, and grafana on port 3000. ```sh +# +# Will mount ~/.local/share/erigon to /home/erigon/.local/share/erigon inside container +# make docker-compose + +# # or -XDG_DATA_HOME=/preferred/data/folder DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 make docker-compose +# +# if you want to use a custom data directory +# or, if you want to use different uid/gid for a dedicated user +# +# To solve this, pass in the uid/gid parameters into the container. +# +# DOCKER_UID: the user id +# DOCKER_GID: the group id +# XDG_DATA_HOME: the data directory (default: ~/.local/share) +# +# Note: /preferred/data/folder must be read/writeable on host OS by user with UID/GID given +# if you followed above instructions +# +# Note: uid/gid syntax below will automatically use uid/gid of running user so this syntax +# is intended to be ran via the dedicated user setup earlier +# +DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) XDG_DATA_HOME=/preferred/data/folder DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 make docker-compose + +# +# if you want to run the docker, but you are not logged in as the $ERIGON_USER +# then you'll need to adjust the syntax above to grab the correct uid/gid +# +# To run the command via another user, use +# +ERIGON_USER=erigon +sudo -u ${ERIGON_USER} DOCKER_UID=$(id -u ${ERIGON_USER}) DOCKER_GID=$(id -g ${ERIGON_USER}) XDG_DATA_HOME=~${ERIGON_USER}/.ethereum DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 make docker-compose ``` Makefile creates the initial directories for erigon, prometheus and grafana. The PID namespace is shared between erigon @@ -382,7 +440,7 @@ Reserved for future use: **gRPC ports**: `9092` consensus engine, `9093` snapsho run `go tool pprof -png http://127.0.0.1:6060/debug/pprof/profile\?seconds\=20 > cpu.png` - Get RAM profiling: add `--pprof flag` run `go tool pprof -inuse_space -png http://127.0.0.1:6060/debug/pprof/heap > mem.png` - + ### How to run local devnet? 🔬 Detailed explanation is [here](/DEV_CHAIN.md). @@ -455,7 +513,7 @@ Application `htop` on column `res` shows memory of "App + OS used to hold page cache for given App", but it's not informative, because if `htop` says that app using 90% of memory you still can run 3 more instances of app on the same machine - -because most of that `90%` is "OS pages cache". +because most of that `90%` is "OS pages cache". OS automatically free this cache any time it needs memory. Smaller "page cache size" may not impact performance of Erigon at all. diff --git a/docker-compose.yml b/docker-compose.yml index 47cd6884f74..f1f303473c5 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -4,6 +4,8 @@ # This file is an example: how to start Erigon's services as separated processes. # Default: --datadir=/home/erigon/.local/share/erigon +# Default UID: 3473 +# Default GID: 3473 # Ports: `9090` execution engine (private api), `9091` sentry, `9092` consensus engine, `9093` snapshot downloader, `9094` TxPool # Ports: `8545` json rpc, `8551` consensus json rpc, `30303` eth p2p protocol, `42069` bittorrent protocol, @@ -12,7 +14,7 @@ version: '2.2' # Basic erigon's service -x-erigon-service: &default-erigon-servie +x-erigon-service: &default-erigon-service image: thorax/erigon:${TAG:-latest} pid: service:erigon # Use erigon's PID namespace. It's required to open Erigon's DB from another process (RPCDaemon local-mode) volumes_from: [ erigon ] @@ -22,9 +24,14 @@ x-erigon-service: &default-erigon-servie services: erigon: image: thorax/erigon:${TAG:-latest} - build: . + build: + args: + UID: ${DOCKER_UID} + GID: ${DOCKER_GID} + context: . + user: "${DOCKER_UID}:${DOCKER_GID}" command: | - erigon ${ERIGON_FLAGS-} --private.api.addr=0.0.0.0:9090 + erigon ${ERIGON_FLAGS-} --private.api.addr=0.0.0.0:9090 --sentry.api.addr=sentry:9091 --downloader.api.addr=downloader:9093 --txpool.disable --metrics --metrics.addr=0.0.0.0 --metrics.port=6060 --pprof --pprof.addr=0.0.0.0 --pprof.port=6061 volumes: @@ -34,21 +41,21 @@ services: mem_swappiness: 0 sentry: - <<: *default-erigon-servie + <<: *default-erigon-service command: sentry ${SENTRY_FLAGS-} --sentry.api.addr=0.0.0.0:9091 ports: [ "30303:30303/tcp", "30303:30303/udp" ] downloader: - <<: *default-erigon-servie + <<: *default-erigon-service command: downloader ${DOWNLOADER_FLAGS-} --downloader.api.addr=0.0.0.0:9093 ports: [ "42069:42069/tcp", "42069:42069/udp" ] txpool: - <<: *default-erigon-servie + <<: *default-erigon-service command: txpool ${TXPOOL_FLAGS-} --private.api.addr=erigon:9090 --txpool.api.addr=0.0.0.0:9094 rpcdaemon: - <<: *default-erigon-servie + <<: *default-erigon-service command: | rpcdaemon ${RPCDAEMON_FLAGS-} --http.addr=0.0.0.0 --http.vhosts=* --http.corsdomain=* --ws --private.api.addr=erigon:9090 --txpool.api.addr=txpool:9094 @@ -60,7 +67,7 @@ services: prometheus: image: prom/prometheus:v2.36.0 - user: 1000:1000 # Uses erigon user from Dockerfile + user: ${DOCKER_UID}:${DOCKER_GID} # Uses erigon user from Dockerfile command: --log.level=warn --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=150d --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles ports: [ "9090:9090" ] volumes: @@ -70,7 +77,7 @@ services: grafana: image: grafana/grafana:8.5.4 - user: 1000:1000 # Uses erigon user from Dockerfile + user: ${DOCKER_UID}:${DOCKER_GID} # Uses erigon user from Dockerfile ports: [ "3000:3000" ] volumes: - ${ERIGON_GRAFANA_CONFIG:-./cmd/prometheus/grafana.ini}:/etc/grafana/grafana.ini @@ -78,3 +85,4 @@ services: - ./cmd/prometheus/dashboards:/etc/grafana/provisioning/dashboards - ${XDG_DATA_HOME:-~/.local/share}/erigon-grafana:/var/lib/grafana restart: unless-stopped + diff --git a/hooks/build b/hooks/build index b5a49356f13..c242d1ab85f 100755 --- a/hooks/build +++ b/hooks/build @@ -3,6 +3,16 @@ # This is needed to pass build ARGs to Dockerfile. # see https://docs.docker.com/docker-hub/builds/advanced/ -DOCKER_FLAGS="-t $IMAGE_NAME" \ +set -o errexit # exit on error +set -o nounset # disallow unset variables +set -o pipefail # fail if anything in pipe fails + +# $(id -u) and $(id -g) will be 0 +# +# so we need to specify the erigon user uid/gid in the image +# choose 3473 matching defaults in .env.example +DOCKER_FLAGS="-t ${IMAGE_NAME}" \ +DOCKER_UID=3473 \ +DOCKER_GID=3473 \ GIT_TAG=$(git describe --tags '--match=v*' --dirty) \ make docker From 60b5a9efe3db324f4060fbdbdb841959d6d8f7c3 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Thu, 30 Jun 2022 18:17:01 +0600 Subject: [PATCH 003/152] grafana fix panel header #4587 --- cmd/prometheus/dashboards/erigon.json | 77 +-------------------------- 1 file changed, 1 insertion(+), 76 deletions(-) diff --git a/cmd/prometheus/dashboards/erigon.json b/cmd/prometheus/dashboards/erigon.json index 15e7a4226fe..7c4fb19ae67 100644 --- a/cmd/prometheus/dashboards/erigon.json +++ b/cmd/prometheus/dashboards/erigon.json @@ -4566,7 +4566,7 @@ "pluginVersion": "8.3.3", "targets": [ { - "expr": "stage_headers{instance=~\"$instance\"}", + "expr": "sync{instance=~\"$instance\",stage=\"headers\"}", "format": "time_series", "interval": "", "intervalFactor": 1, @@ -4577,81 +4577,6 @@ "title": "Latest header", "type": "stat" }, - { - "fieldConfig": { - "defaults": { - "mappings": [ - { - "options": { - "match": "null", - "result": { - "text": "N/A" - } - }, - "type": "special" - } - ], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none" - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 4, - "x": 4, - "y": 111 - }, - "id": 111, - "links": [], - "maxDataPoints": 100, - "options": { - "colorMode": "value", - "fieldOptions": { - "calcs": [ - "lastNotNull" - ] - }, - "graphMode": "none", - "justifyMode": "auto", - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "mean" - ], - "fields": "", - "values": false - }, - "text": {}, - "textMode": "auto" - }, - "pluginVersion": "8.3.3", - "targets": [ - { - "exemplar": true, - "expr": "chain_head_receipt{instance=~\"$instance\"}", - "format": "time_series", - "interval": "", - "intervalFactor": 1, - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "title": "Latest receipt", - "type": "stat" - }, { "fieldConfig": { "defaults": { From 087105d1f31d83ff88ceeca30dfe36dc01de28d7 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Thu, 30 Jun 2022 18:20:21 +0200 Subject: [PATCH 004/152] lvh on invalid transition block (#4583) --- eth/stagedsync/stage_headers.go | 4 ++-- turbo/stages/headerdownload/header_algos.go | 15 ++++++++++++--- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index cebd06e98a9..4aefc5079e6 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -585,7 +585,7 @@ func verifyAndSaveNewPoSHeader( // TODO(yperbasis): considered non-canonical because some missing headers were downloaded but not canonized // Or it's not a problem because forkChoice is updated frequently? if cfg.memoryOverlay { - status, latestValidHash, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, false, cfg.execPayload) + status, latestValidHash, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, cfg.chainConfig.TerminalTotalDifficulty, false, cfg.execPayload) if criticalError != nil { return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError } @@ -601,7 +601,7 @@ func verifyAndSaveNewPoSHeader( } if cfg.memoryOverlay && (cfg.hd.GetNextForkHash() == (common.Hash{}) || header.ParentHash == cfg.hd.GetNextForkHash()) { - status, latestValidHash, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, true, cfg.execPayload) + status, latestValidHash, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, cfg.chainConfig.TerminalTotalDifficulty, true, cfg.execPayload) if criticalError != nil { return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError } diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 759ba81be44..1aabb1e3f00 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -1093,7 +1093,7 @@ func abs64(n int64) uint64 { return uint64(n) } -func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, store bool, execPayload func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody) error) (status remote.EngineStatus, latestValidHash common.Hash, validationError error, criticalError error) { +func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, terminalTotalDifficulty *big.Int, store bool, execPayload func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody) error) (status remote.EngineStatus, latestValidHash common.Hash, validationError error, criticalError error) { hd.lock.Lock() defer hd.lock.Unlock() maxDepth := uint64(16) @@ -1103,6 +1103,11 @@ func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body criticalError = fmt.Errorf("could not read block number.") return } + + isAncestorPosBlock, criticalError := rawdb.Transitioned(tx, header.Number.Uint64()-1, terminalTotalDifficulty) + if criticalError != nil { + return + } if store { // If it is a continuation of the canonical chain we can stack it up. if hd.nextForkState == nil { @@ -1115,7 +1120,9 @@ func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body validationError = execPayload(hd.nextForkState, header, body, 0, nil, nil) if validationError != nil { status = remote.EngineStatus_INVALID - latestValidHash = header.ParentHash + if isAncestorPosBlock { + latestValidHash = header.ParentHash + } return } status = remote.EngineStatus_VALID @@ -1165,7 +1172,9 @@ func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body validationError = execPayload(batch, header, body, unwindPoint, headersChain, bodiesChain) latestValidHash = header.Hash() if validationError != nil { - latestValidHash = header.ParentHash + if isAncestorPosBlock { + latestValidHash = header.ParentHash + } status = remote.EngineStatus_INVALID } // After the we finished executing, we clean up old forks From c03d57356c225eeafd703e95d52ec9823881d8e9 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Thu, 30 Jun 2022 22:35:44 +0600 Subject: [PATCH 005/152] prevent downloading new snapshots after initial sync (#4585) --- cmd/downloader/downloader/downloader.go | 36 +++-- .../downloader/downloader_grpc_server.go | 20 ++- cmd/downloader/downloader/util.go | 129 +++++++----------- eth/stagedsync/stage_headers.go | 16 --- turbo/snapshotsync/snap/files.go | 5 + 5 files changed, 100 insertions(+), 106 deletions(-) diff --git a/cmd/downloader/downloader/downloader.go b/cmd/downloader/downloader/downloader.go index 39ea6b980a4..4e167c951c8 100644 --- a/cmd/downloader/downloader/downloader.go +++ b/cmd/downloader/downloader/downloader.go @@ -86,7 +86,7 @@ func New(cfg *downloadercfg.Cfg) (*Downloader, error) { } } - return &Downloader{ + d := &Downloader{ cfg: cfg, db: db, pieceCompletionDB: c, @@ -95,7 +95,8 @@ func New(cfg *downloadercfg.Cfg) (*Downloader, error) { clientLock: &sync.RWMutex{}, statsLock: &sync.RWMutex{}, - }, nil + } + return d, d.addSegments() } func (d *Downloader) SnapDir() string { @@ -104,6 +105,13 @@ func (d *Downloader) SnapDir() string { return d.cfg.DataDir } +func (d *Downloader) IsInitialSync() bool { + d.clientLock.RLock() + defer d.clientLock.RUnlock() + _, lastPart := filepath.Split(d.cfg.DataDir) + return lastPart == "tmp" +} + func (d *Downloader) ReCalcStats(interval time.Duration) { d.statsLock.Lock() defer d.statsLock.Unlock() @@ -215,6 +223,24 @@ func (d *Downloader) onComplete() { d.pieceCompletionDB = c d.folder = m d.torrentClient = torrentClient + _ = d.addSegments() +} + +func (d *Downloader) addSegments() error { + if err := BuildTorrentFilesIfNeed(context.Background(), d.cfg.DataDir); err != nil { + return err + } + files, err := seedableSegmentFiles(d.cfg.DataDir) + if err != nil { + return fmt.Errorf("seedableSegmentFiles: %w", err) + } + for _, f := range files { + _, err := AddSegment(f, d.cfg.DataDir, d.torrentClient) + if err != nil { + return err + } + } + return nil } func (d *Downloader) Stats() AggStats { @@ -279,12 +305,6 @@ func openClient(cfg *torrent.ClientConfig) (db kv.RwDB, c storage.PieceCompletio return nil, nil, nil, nil, fmt.Errorf("torrent.NewClient: %w", err) } - if err := BuildTorrentsAndAdd(context.Background(), snapDir, torrentClient); err != nil { - if err != nil { - return nil, nil, nil, nil, fmt.Errorf("BuildTorrentsAndAdd: %w", err) - } - } - return db, c, m, torrentClient, nil } diff --git a/cmd/downloader/downloader/downloader_grpc_server.go b/cmd/downloader/downloader/downloader_grpc_server.go index 8c7411e2158..60bb4642459 100644 --- a/cmd/downloader/downloader/downloader_grpc_server.go +++ b/cmd/downloader/downloader/downloader_grpc_server.go @@ -2,6 +2,7 @@ package downloader import ( "context" + "fmt" "time" "github.com/anacrolix/torrent/metainfo" @@ -25,15 +26,27 @@ type GrpcServer struct { d *Downloader } +// Download - create new .torrent ONLY if initialSync, everything else Erigon can generate by itself func (s *GrpcServer) Download(ctx context.Context, request *proto_downloader.DownloadRequest) (*emptypb.Empty, error) { + isInitialSync := s.d.IsInitialSync() + torrentClient := s.d.Torrent() mi := &metainfo.MetaInfo{AnnounceList: Trackers} for _, it := range request.Items { - if it.TorrentHash == nil { - err := BuildTorrentAndAdd(ctx, it.Path, s.d.SnapDir(), torrentClient) - if err != nil { + if it.TorrentHash == nil { // seed new snapshot + if err := BuildTorrentFileIfNeed(it.Path, s.d.SnapDir()); err != nil { return nil, err } + } + ok, err := AddSegment(it.Path, s.d.SnapDir(), torrentClient) + if err != nil { + return nil, fmt.Errorf("AddSegment: %w", err) + } + if ok { + continue + } + + if !isInitialSync { continue } @@ -52,6 +65,7 @@ func (s *GrpcServer) Download(ctx context.Context, request *proto_downloader.Dow t.DisallowDataDownload() t.AllowDataUpload() <-t.GotInfo() + mi := t.Metainfo() if err := CreateTorrentFileIfNotExists(s.d.SnapDir(), t.Info(), &mi); err != nil { log.Warn("[downloader] create torrent file", "err", err) diff --git a/cmd/downloader/downloader/util.go b/cmd/downloader/downloader/util.go index 1cc79d588ed..905dfa101d5 100644 --- a/cmd/downloader/downloader/util.go +++ b/cmd/downloader/downloader/util.go @@ -75,7 +75,7 @@ func AllTorrentFiles(dir string) ([]string, error) { } return res, nil } -func allSegmentFiles(dir string) ([]string, error) { +func seedableSegmentFiles(dir string) ([]string, error) { files, err := os.ReadDir(dir) if err != nil { return nil, err @@ -95,50 +95,57 @@ func allSegmentFiles(dir string) ([]string, error) { if filepath.Ext(f.Name()) != ".seg" { // filter out only compressed files continue } + ff, err := snap.ParseFileName(dir, f.Name()) + if err != nil { + return nil, fmt.Errorf("ParseFileName: %w", err) + } + if !ff.Seedable() { + continue + } res = append(res, f.Name()) } return res, nil } // BuildTorrentFileIfNeed - create .torrent files from .seg files (big IO) - if .seg files were added manually -func BuildTorrentFileIfNeed(ctx context.Context, originalFileName, root string) (ok bool, err error) { +func BuildTorrentFileIfNeed(originalFileName, root string) (err error) { f, err := snap.ParseFileName(root, originalFileName) if err != nil { - return false, fmt.Errorf("ParseFileName: %w", err) + return fmt.Errorf("ParseFileName: %w", err) } - if f.To-f.From != snap.DEFAULT_SEGMENT_SIZE { - return false, nil + if !f.NeedTorrentFile() { + return nil } - torrentFilePath := filepath.Join(root, originalFileName+".torrent") - if _, err := os.Stat(torrentFilePath); err != nil { - if !errors.Is(err, os.ErrNotExist) { - return false, fmt.Errorf("os.Stat: %w", err) - } - info := &metainfo.Info{PieceLength: downloadercfg.DefaultPieceSize} - if err := info.BuildFromFilePath(filepath.Join(root, originalFileName)); err != nil { - return false, fmt.Errorf("BuildFromFilePath: %w", err) - } - if err := CreateTorrentFile(root, info, nil); err != nil { - return false, fmt.Errorf("CreateTorrentFile: %w", err) - } + if err := createTorrentFileFromSegment(f, nil); err != nil { + return fmt.Errorf("createTorrentFileFromInfo: %w", err) } - return true, nil + return nil +} + +func createTorrentFileFromSegment(f snap.FileInfo, mi *metainfo.MetaInfo) error { + info := &metainfo.Info{PieceLength: downloadercfg.DefaultPieceSize} + if err := info.BuildFromFilePath(f.Path); err != nil { + return fmt.Errorf("createTorrentFileFromSegment: %w", err) + } + + dir, _ := filepath.Split(f.Path) + return createTorrentFileFromInfo(dir, info, mi) } -func BuildTorrentAndAdd(ctx context.Context, originalFileName, snapDir string, client *torrent.Client) error { - ok, err := BuildTorrentFileIfNeed(ctx, originalFileName, snapDir) +// AddSegment - add existing .seg file, create corresponding .torrent if need +func AddSegment(originalFileName, snapDir string, client *torrent.Client) (bool, error) { + f, err := snap.ParseFileName(snapDir, originalFileName) if err != nil { - return fmt.Errorf("BuildTorrentFileIfNeed: %w", err) + return false, fmt.Errorf("ParseFileName: %w", err) } - if !ok { - return nil + if !f.TorrentFileExists() { + return false, nil } - torrentFilePath := filepath.Join(snapDir, originalFileName+".torrent") - _, err = AddTorrentFile(ctx, torrentFilePath, client) + _, err = AddTorrentFile(f.Path+".torrent", client) if err != nil { - return fmt.Errorf("AddTorrentFile: %w", err) + return false, fmt.Errorf("AddTorrentFile: %w", err) } - return nil + return true, nil } // BuildTorrentFilesIfNeed - create .torrent files from .seg files (big IO) - if .seg files were added manually @@ -146,17 +153,23 @@ func BuildTorrentFilesIfNeed(ctx context.Context, snapDir string) error { logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() - files, err := allSegmentFiles(snapDir) + files, err := seedableSegmentFiles(snapDir) if err != nil { return err } errs := make(chan error, len(files)*2) wg := &sync.WaitGroup{} + workers := cmp.Max(1, runtime.GOMAXPROCS(-1)-1) * 2 + var sem = semaphore.NewWeighted(int64(workers)) for i, f := range files { wg.Add(1) + if err := sem.Acquire(ctx, 1); err != nil { + return err + } go func(f string, i int) { + defer sem.Release(1) defer wg.Done() - _, err = BuildTorrentFileIfNeed(ctx, f, snapDir) + err = BuildTorrentFileIfNeed(f, snapDir) if err != nil { errs <- err } @@ -182,63 +195,21 @@ func BuildTorrentFilesIfNeed(ctx context.Context, snapDir string) error { return nil } -// BuildTorrentsAndAdd - create .torrent files from .seg files (big IO) - if .seg files were placed manually to snapDir -// torrent.Client does automaticaly read all .torrent files, but we also willing to add .seg files even if corresponding .torrent doesn't exist -func BuildTorrentsAndAdd(ctx context.Context, snapDir string, client *torrent.Client) error { - logEvery := time.NewTicker(20 * time.Second) - defer logEvery.Stop() - files, err := allSegmentFiles(snapDir) +func CreateTorrentFileIfNotExists(root string, info *metainfo.Info, mi *metainfo.MetaInfo) error { + f, err := snap.ParseFileName(root, info.Name) if err != nil { - return fmt.Errorf("allSegmentFiles: %w", err) + return fmt.Errorf("ParseFileName: %w", err) } - errs := make(chan error, len(files)*2) - wg := &sync.WaitGroup{} - workers := cmp.Max(1, runtime.GOMAXPROCS(-1)-1) - var sem = semaphore.NewWeighted(int64(workers)) - for i, f := range files { - wg.Add(1) - if err := sem.Acquire(ctx, 1); err != nil { - return err - } - go func(f string, i int) { - defer sem.Release(1) - defer wg.Done() - - select { - case <-ctx.Done(): - errs <- ctx.Err() - case <-logEvery.C: - log.Info("[Snapshots] Verify snapshots", "Progress", fmt.Sprintf("%d/%d", i, len(files))) - default: - } - errs <- BuildTorrentAndAdd(ctx, f, snapDir, client) - }(f, i) - } - go func() { - wg.Wait() - close(errs) - }() - for err := range errs { - if err != nil { - return err - } + if !f.NeedTorrentFile() { + return nil } - - return nil -} - -func CreateTorrentFileIfNotExists(root string, info *metainfo.Info, mi *metainfo.MetaInfo) error { - torrentFileName := filepath.Join(root, info.Name+".torrent") - if _, err := os.Stat(torrentFileName); err != nil { - if errors.Is(err, os.ErrNotExist) { - return CreateTorrentFile(root, info, mi) - } + if err := createTorrentFileFromInfo(root, info, mi); err != nil { return err } return nil } -func CreateTorrentFile(root string, info *metainfo.Info, mi *metainfo.MetaInfo) error { +func createTorrentFileFromInfo(root string, info *metainfo.Info, mi *metainfo.MetaInfo) error { if mi == nil { infoBytes, err := bencode.Marshal(info) if err != nil { @@ -322,7 +293,7 @@ func verifyTorrent(info *metainfo.Info, root string, consumer func(i int, good b // added first time - pieces verification process will start (disk IO heavy) - Progress // kept in `piece completion storage` (surviving reboot). Once it done - no disk IO needed again. // Don't need call torrent.VerifyData manually -func AddTorrentFile(ctx context.Context, torrentFilePath string, torrentClient *torrent.Client) (*torrent.Torrent, error) { +func AddTorrentFile(torrentFilePath string, torrentClient *torrent.Client) (*torrent.Torrent, error) { mi, err := metainfo.LoadFromFile(torrentFilePath) if err != nil { return nil, err diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 4aefc5079e6..2a88d823da3 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -1155,22 +1155,6 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R if err := cfg.snapshots.Reopen(); err != nil { return fmt.Errorf("ReopenSegments: %w", err) } - expect := snapshothashes.KnownConfig(cfg.chainConfig.ChainName).ExpectBlocks - if cfg.snapshots.SegmentsMax() < expect { - k, err := rawdb.SecondKey(tx, kv.Headers) // genesis always first - if err != nil { - return err - } - var hasInDB uint64 = 1 - if k != nil { - hasInDB = binary.BigEndian.Uint64(k) - } - if cfg.snapshots.SegmentsMax() < hasInDB { - return fmt.Errorf("not enough snapshots available: snapshots=%d, blockInDB=%d, expect=%d", cfg.snapshots.SegmentsMax(), hasInDB, expect) - } else { - log.Warn(fmt.Sprintf("not enough snapshots available: %d < %d, but we can re-generate them because DB has historical blocks up to: %d", cfg.snapshots.SegmentsMax(), expect, hasInDB)) - } - } var m runtime.MemStats libcommon.ReadMemStats(&m) diff --git a/turbo/snapshotsync/snap/files.go b/turbo/snapshotsync/snap/files.go index e5ff027a00e..fa009192ce0 100644 --- a/turbo/snapshotsync/snap/files.go +++ b/turbo/snapshotsync/snap/files.go @@ -9,6 +9,7 @@ import ( "strconv" "strings" + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapshothashes" "golang.org/x/exp/slices" ) @@ -140,6 +141,10 @@ type FileInfo struct { T Type } +func (f FileInfo) TorrentFileExists() bool { return common.FileExist(f.Path + ".torrent") } +func (f FileInfo) Seedable() bool { return f.To-f.From == DEFAULT_SEGMENT_SIZE } +func (f FileInfo) NeedTorrentFile() bool { return f.Seedable() && !f.TorrentFileExists() } + func IdxFiles(dir string) (res []FileInfo, err error) { return FilesWithExt(dir, ".idx") } func Segments(dir string) (res []FileInfo, err error) { return FilesWithExt(dir, ".seg") } func TmpFiles(dir string) (res []string, err error) { From 7e9d65abdf1bf92f10fc34923c47c5b504fa0640 Mon Sep 17 00:00:00 2001 From: Justin Date: Thu, 30 Jun 2022 19:25:05 +0100 Subject: [PATCH 006/152] adding buildkit fix to the known issues (#4594) --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 43dfa333dc9..6b96cb38e9e 100644 --- a/README.md +++ b/README.md @@ -547,3 +547,10 @@ For example: btrfs's autodefrag option - may increase write IO 100x times ### Gnome Tracker can kill Erigon [Gnome Tracker](https://wiki.gnome.org/Projects/Tracker) - detecting miners and kill them. + +### the --mount option requires BuildKit error + +For anyone else that was getting the BuildKit error when trying to start Erigon the old way you can use the below... +``` +XDG_DATA_HOME=/preferred/data/folder DOCKER_BUILDKIT=1 COMPOSE_DOCKER_CLI_BUILD=1 make docker-compose +``` From a5c1cad3466d2d28a62f7a1a5c35f214f0194b94 Mon Sep 17 00:00:00 2001 From: battlmonstr Date: Fri, 1 Jul 2022 05:03:29 +0200 Subject: [PATCH 007/152] Fix eth_subscribe_test timeout on CI (#4595) Reduce the likelihood of a deadlock caused by goroutine starvation on CI. The CI macOS runners have 3 cores. When running other tests in parallel having too few available cores could cause a deadlock. Test with: GOMAXPROCS=2 go test ./cmd/rpcdaemon/commands/eth_subscribe_test.go -test.count 100 --timeout 10s Expected: the command finishes within 3 sec Before the fix: it timeouts (without a timeout - hangs forever) --- cmd/rpcdaemon/commands/eth_subscribe_test.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/cmd/rpcdaemon/commands/eth_subscribe_test.go b/cmd/rpcdaemon/commands/eth_subscribe_test.go index 96406798d1f..b7c8531e3d4 100644 --- a/cmd/rpcdaemon/commands/eth_subscribe_test.go +++ b/cmd/rpcdaemon/commands/eth_subscribe_test.go @@ -1,17 +1,18 @@ package commands import ( + "context" "fmt" "testing" - "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + "github.com/ledgerwatch/erigon-lib/direct" "github.com/ledgerwatch/erigon-lib/gointerfaces/sentry" - "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcdaemontest" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcservices" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/protocols/eth" + "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/snapshotsync" @@ -38,8 +39,10 @@ func TestEthSubscribe(t *testing.T) { } m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed - ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) - backend := rpcservices.NewRemoteBackend(remote.NewETHBACKENDClient(conn), m.DB, snapshotsync.NewBlockReader()) + ctx := context.Background() + backendServer := privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, snapshotsync.NewBlockReader(), nil, nil, nil, nil, false) + backendClient := direct.NewEthBackendClientDirect(backendServer) + backend := rpcservices.NewRemoteBackend(backendClient, m.DB, snapshotsync.NewBlockReader()) ff := rpchelper.New(ctx, backend, nil, nil, func() {}) newHeads := make(chan *types.Header) From fa2998728a32799e8c467bcf20bf3fecd4aee3bb Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Fri, 1 Jul 2022 07:08:13 +0300 Subject: [PATCH 008/152] pending block will only return if it is not nil (#4588) * pending block will only return if it is not nil * unncessary --- cmd/rpcdaemon/commands/eth_api.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/cmd/rpcdaemon/commands/eth_api.go b/cmd/rpcdaemon/commands/eth_api.go index c9eac584e76..a12af3e941e 100644 --- a/cmd/rpcdaemon/commands/eth_api.go +++ b/cmd/rpcdaemon/commands/eth_api.go @@ -213,10 +213,6 @@ func (api *BaseAPI) pendingBlock() *types.Block { } func (api *BaseAPI) blockByRPCNumber(number rpc.BlockNumber, tx kv.Tx) (*types.Block, error) { - if number == rpc.PendingBlockNumber { - return api.pendingBlock(), nil - } - n, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) if err != nil { return nil, err From 33892ffd79ecaeaf3fca2638f0d2f563b3ace0c3 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Fri, 1 Jul 2022 11:02:24 +0200 Subject: [PATCH 009/152] Fixed transaction on hive tests (#4590) * try 1 * try 1 * fix now? * Update accessors_chain.go * Update accessors_chain.go * added comments --- eth/backend.go | 20 ++++++++++++++++++++ turbo/stages/stageloop.go | 4 +--- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/eth/backend.go b/eth/backend.go index ec29d91a01c..1741a2760a6 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -158,6 +158,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere return nil, err } + var currentBlock *types.Block // Check if we have an already initialized chain and fall back to // that if so. Otherwise we need to generate a new genesis spec. if err := chainKv.View(context.Background(), func(tx kv.Tx) error { @@ -168,6 +169,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere if h != (common.Hash{}) { config.Genesis = nil // fallback to db content } + currentBlock = rawdb.ReadCurrentBlock(tx) return nil }); err != nil { panic(err) @@ -434,6 +436,24 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere } } + if currentBlock == nil { + currentBlock = genesis + } + // We start the transaction pool on startup, for a couple of reasons: + // 1) Hive tests requires us to do so and starting it from eth_sendRawTransaction is not viable as we have not enough data + // to initialize it properly. + // 2) we cannot propose for block 1 regardless. + go func() { + time.Sleep(10 * time.Millisecond) + baseFee := uint64(0) + if currentBlock.BaseFee() != nil { + baseFee = currentBlock.BaseFee().Uint64() + } + backend.notifications.Accumulator.StartChange(currentBlock.NumberU64(), currentBlock.Hash(), nil, false) + backend.notifications.Accumulator.SendAndReset(ctx, backend.notifications.StateChangesConsumer, baseFee, currentBlock.GasLimit()) + + }() + if !config.DeprecatedTxPool.Disable { backend.txPool2Fetch.ConnectCore() backend.txPool2Fetch.ConnectSentries() diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 7fdd36ca352..a93342e2dd2 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -126,7 +126,6 @@ func StageLoopStep( } }() // avoid crash because Erigon's core does many things - var prevHeadBlockHash common.Hash var origin, finishProgressBefore uint64 if err := db.View(ctx, func(tx kv.Tx) error { origin, err = stages.GetStageProgress(tx, stages.Headers) @@ -137,7 +136,6 @@ func StageLoopStep( if err != nil { return err } - prevHeadBlockHash = rawdb.ReadHeadBlockHash(tx) return nil }); err != nil { return headBlockHash, err @@ -207,7 +205,7 @@ func StageLoopStep( } if notifications != nil && notifications.Accumulator != nil { header := rawdb.ReadCurrentHeader(rotx) - if header != nil && headBlockHash != prevHeadBlockHash { + if header != nil { pendingBaseFee := misc.CalcBaseFee(notifications.Accumulator.ChainConfig(), header) if header.Number.Uint64() == 0 { From 5a8c729fbb68bead59b6469e307c91a8a21163b2 Mon Sep 17 00:00:00 2001 From: primal_concrete_sledge Date: Fri, 1 Jul 2022 13:51:42 +0400 Subject: [PATCH 010/152] fix/Issue-4593_remove_double_close_of_newheads_chan (#4598) --- cmd/rpcdaemon/commands/eth_filters.go | 1 - cmd/rpcdaemon22/commands/eth_filters.go | 47 ++++++++++++-------- cmd/rpcdaemon22/commands/eth_filters_test.go | 45 +++++++++++++++++++ 3 files changed, 74 insertions(+), 19 deletions(-) create mode 100644 cmd/rpcdaemon22/commands/eth_filters_test.go diff --git a/cmd/rpcdaemon/commands/eth_filters.go b/cmd/rpcdaemon/commands/eth_filters.go index 6596075af3b..9ec2a36c3d5 100644 --- a/cmd/rpcdaemon/commands/eth_filters.go +++ b/cmd/rpcdaemon/commands/eth_filters.go @@ -151,7 +151,6 @@ func (api *APIImpl) NewHeads(ctx context.Context) (*rpc.Subscription, error) { go func() { defer debug.LogPanic() headers := make(chan *types.Header, 1) - defer close(headers) id := api.filters.SubscribeNewHeads(headers) defer api.filters.UnsubscribeHeads(id) diff --git a/cmd/rpcdaemon22/commands/eth_filters.go b/cmd/rpcdaemon22/commands/eth_filters.go index 05c69ff1898..b604880b2d5 100644 --- a/cmd/rpcdaemon22/commands/eth_filters.go +++ b/cmd/rpcdaemon22/commands/eth_filters.go @@ -2,7 +2,6 @@ package commands import ( "context" - "time" "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/common/hexutil" @@ -28,8 +27,6 @@ func (api *APIImpl) NewPendingTransactionFilter(_ context.Context) (string, erro return } api.filters.AddPendingTxs(id, txs) - default: - time.Sleep(time.Second) } } }() @@ -51,8 +48,6 @@ func (api *APIImpl) NewBlockFilter(_ context.Context) (string, error) { return } api.filters.AddPendingBlock(id, block) - default: - time.Sleep(time.Second) } } }() @@ -74,8 +69,6 @@ func (api *APIImpl) NewFilter(_ context.Context, crit filters.FilterCriteria) (s return } api.filters.AddLogs(id, lg) - default: - time.Sleep(time.Second) } } }() @@ -158,16 +151,23 @@ func (api *APIImpl) NewHeads(ctx context.Context) (*rpc.Subscription, error) { go func() { defer debug.LogPanic() headers := make(chan *types.Header, 1) - defer close(headers) + id := api.filters.SubscribeNewHeads(headers) defer api.filters.UnsubscribeHeads(id) for { select { - case h := <-headers: - err := notifier.Notify(rpcSub.ID, h) - if err != nil { - log.Warn("error while notifying subscription", "err", err) + case h, ok := <-headers: + if h != nil { + err := notifier.Notify(rpcSub.ID, h) + if err != nil { + log.Warn("error while notifying subscription", "err", err) + return + } + } + if !ok { + log.Warn("new heads channel was closed") + return } case <-rpcSub.Err(): return @@ -198,15 +198,20 @@ func (api *APIImpl) NewPendingTransactions(ctx context.Context) (*rpc.Subscripti for { select { - case txs := <-txsCh: + case txs, ok := <-txsCh: for _, t := range txs { if t != nil { err := notifier.Notify(rpcSub.ID, t.Hash()) if err != nil { log.Warn("error while notifying subscription", "err", err) + return } } } + if !ok { + log.Warn("new pending transactions channel was closed") + return + } case <-rpcSub.Err(): return } @@ -233,13 +238,19 @@ func (api *APIImpl) Logs(ctx context.Context, crit filters.FilterCriteria) (*rpc logs := make(chan *types.Log, 1) id := api.filters.SubscribeLogs(logs, crit) defer api.filters.UnsubscribeLogs(id) - for { select { - case h := <-logs: - err := notifier.Notify(rpcSub.ID, h) - if err != nil { - log.Warn("error while notifying subscription", "err", err) + case h, ok := <-logs: + if h != nil { + err := notifier.Notify(rpcSub.ID, h) + if err != nil { + log.Warn("error while notifying subscription", "err", err) + return + } + } + if !ok { + log.Warn("log channel was closed") + return } case <-rpcSub.Err(): return diff --git a/cmd/rpcdaemon22/commands/eth_filters_test.go b/cmd/rpcdaemon22/commands/eth_filters_test.go new file mode 100644 index 00000000000..329fd4ac2c9 --- /dev/null +++ b/cmd/rpcdaemon22/commands/eth_filters_test.go @@ -0,0 +1,45 @@ +package commands + +import ( + "testing" + + "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon22/rpcdaemontest" + "github.com/ledgerwatch/erigon/eth/filters" + "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/erigon/turbo/stages" + "github.com/stretchr/testify/assert" +) + +func TestNewFilters(t *testing.T) { + assert := assert.New(t) + db := rpcdaemontest.CreateTestKV(t) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, stages.Mock(t)) + mining := txpool.NewMiningClient(conn) + ff := rpchelper.New(ctx, nil, nil, mining, func() {}) + api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil, nil, nil, 5000000) + + ptf, err := api.NewPendingTransactionFilter(ctx) + assert.Nil(err) + + nf, err := api.NewFilter(ctx, filters.FilterCriteria{}) + assert.Nil(err) + + bf, err := api.NewBlockFilter(ctx) + assert.Nil(err) + + ok, err := api.UninstallFilter(ctx, nf) + assert.Nil(err) + assert.Equal(ok, true) + + ok, err = api.UninstallFilter(ctx, bf) + assert.Nil(err) + assert.Equal(ok, true) + + ok, err = api.UninstallFilter(ctx, ptf) + assert.Nil(err) + assert.Equal(ok, true) +} From c92ef8870c2ab690a3747242cfc7538e53c6cc0d Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Fri, 1 Jul 2022 12:50:34 +0200 Subject: [PATCH 011/152] ReportBadHeaderPoS with memoryOverlay (#4600) --- eth/stagedsync/stage_headers.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 2a88d823da3..e94db76b84b 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -589,6 +589,9 @@ func verifyAndSaveNewPoSHeader( if criticalError != nil { return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError } + if validationError != nil { + cfg.hd.ReportBadHeaderPoS(headerHash, latestValidHash) + } success = status == remote.EngineStatus_VALID || status == remote.EngineStatus_ACCEPTED return &privateapi.PayloadStatus{ Status: status, @@ -605,6 +608,9 @@ func verifyAndSaveNewPoSHeader( if criticalError != nil { return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError } + if validationError != nil { + cfg.hd.ReportBadHeaderPoS(headerHash, latestValidHash) + } success = status == remote.EngineStatus_VALID || status == remote.EngineStatus_ACCEPTED return &privateapi.PayloadStatus{ Status: status, From 2415fecb26a787ce891dba799ea37929d768bf82 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Fri, 1 Jul 2022 16:52:43 +0600 Subject: [PATCH 012/152] --downloader.verfiy flag to verify once on startup (#4597) * save * save * save * save * save * save --- cmd/downloader/downloader/downloader.go | 47 ++++++++++++++++++- .../downloader/downloader_grpc_server.go | 8 ++++ cmd/downloader/main.go | 1 - cmd/utils/flags.go | 5 ++ eth/ethconfig/config.go | 1 + eth/stagedsync/stage_headers.go | 19 ++++++-- go.mod | 2 +- go.sum | 4 +- turbo/cli/default_flags.go | 1 + turbo/shards/state_change_accumulator.go | 8 ++-- 10 files changed, 83 insertions(+), 13 deletions(-) diff --git a/cmd/downloader/downloader/downloader.go b/cmd/downloader/downloader/downloader.go index 4e167c951c8..5ea02a0fe33 100644 --- a/cmd/downloader/downloader/downloader.go +++ b/cmd/downloader/downloader/downloader.go @@ -19,6 +19,7 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/log/v3" mdbx2 "github.com/torquem-ch/mdbx-go/mdbx" + "go.uber.org/atomic" "golang.org/x/sync/semaphore" ) @@ -96,7 +97,10 @@ func New(cfg *downloadercfg.Cfg) (*Downloader, error) { statsLock: &sync.RWMutex{}, } - return d, d.addSegments() + if err := d.addSegments(); err != nil { + return nil, err + } + return d, nil } func (d *Downloader) SnapDir() string { @@ -226,6 +230,45 @@ func (d *Downloader) onComplete() { _ = d.addSegments() } +func (d *Downloader) verify() error { + total := 0 + for _, t := range d.torrentClient.Torrents() { + select { + case <-t.GotInfo(): + total += t.NumPieces() + default: + continue + } + } + logInterval := 20 * time.Second + logEvery := time.NewTicker(logInterval) + defer logEvery.Stop() + + wg := &sync.WaitGroup{} + j := atomic.NewInt64(0) + + for _, t := range d.torrentClient.Torrents() { + wg.Add(1) + go func(t *torrent.Torrent) { + defer wg.Done() + for i := 0; i < t.NumPieces(); i++ { + j.Inc() + t.Piece(i).VerifyData() + + select { + case <-logEvery.C: + log.Info("[snapshots] Verifying", "progress", fmt.Sprintf("%.2f%%", 100*float64(j.Load())/float64(total))) + default: + } + //<-t.Complete.On() + } + }(t) + } + wg.Wait() + + return nil +} + func (d *Downloader) addSegments() error { if err := BuildTorrentFilesIfNeed(context.Background(), d.cfg.DataDir); err != nil { return err @@ -325,12 +368,12 @@ func MainLoop(ctx context.Context, d *Downloader, silent bool) { t.AllowDataDownload() t.DownloadAll() go func(t *torrent.Torrent) { + defer sem.Release(1) //r := t.NewReader() //r.SetReadahead(t.Length()) //_, _ = io.Copy(io.Discard, r) // enable streaming - it will prioritize sequential download <-t.Complete.On() - sem.Release(1) }(t) } time.Sleep(30 * time.Second) diff --git a/cmd/downloader/downloader/downloader_grpc_server.go b/cmd/downloader/downloader/downloader_grpc_server.go index 60bb4642459..11f1f596039 100644 --- a/cmd/downloader/downloader/downloader_grpc_server.go +++ b/cmd/downloader/downloader/downloader_grpc_server.go @@ -77,6 +77,14 @@ func (s *GrpcServer) Download(ctx context.Context, request *proto_downloader.Dow return &emptypb.Empty{}, nil } +func (s *GrpcServer) Verify(ctx context.Context, request *proto_downloader.VerifyRequest) (*emptypb.Empty, error) { + err := s.d.verify() + if err != nil { + return nil, err + } + return &emptypb.Empty{}, nil +} + func (s *GrpcServer) Stats(ctx context.Context, request *proto_downloader.StatsRequest) (*proto_downloader.StatsReply, error) { stats := s.d.Stats() return &proto_downloader.StatsReply{ diff --git a/cmd/downloader/main.go b/cmd/downloader/main.go index 6f833871ac1..731fd5ab3bf 100644 --- a/cmd/downloader/main.go +++ b/cmd/downloader/main.go @@ -118,7 +118,6 @@ func Downloader(ctx context.Context) error { if err != nil { return err } - log.Info("torrentLogLevel", torrentLogLevel) var downloadRate, uploadRate datasize.ByteSize if err := downloadRate.UnmarshalText([]byte(downloadRateStr)); err != nil { diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 50150af6354..d4cc43990ef 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -668,6 +668,10 @@ var ( Name: "no-downloader", Usage: "to disable downloader component", } + DownloaderVerifyFlag = cli.BoolFlag{ + Name: "downloader.verify", + Usage: "verify snapshots on startup. it will not report founded problems but just re-download broken pieces", + } TorrentPortFlag = cli.IntFlag{ Name: "torrent.port", Value: 42069, @@ -1389,6 +1393,7 @@ func SetEthConfig(ctx *cli.Context, nodeConfig *nodecfg.Config, cfg *ethconfig.C cfg.Snapshot.KeepBlocks = ctx.GlobalBool(SnapKeepBlocksFlag.Name) cfg.Snapshot.Produce = !ctx.GlobalBool(SnapStopFlag.Name) cfg.Snapshot.NoDownloader = ctx.GlobalBool(NoDownloaderFlag.Name) + cfg.Snapshot.Verify = ctx.GlobalBool(DownloaderVerifyFlag.Name) cfg.Snapshot.DownloaderAddr = strings.TrimSpace(ctx.GlobalString(DownloaderAddrFlag.Name)) if cfg.Snapshot.DownloaderAddr == "" { downloadRateStr := ctx.GlobalString(TorrentDownloadRateFlag.Name) diff --git a/eth/ethconfig/config.go b/eth/ethconfig/config.go index 0e4e745f577..dc54c9b31cb 100644 --- a/eth/ethconfig/config.go +++ b/eth/ethconfig/config.go @@ -125,6 +125,7 @@ type Snapshot struct { KeepBlocks bool // produce new snapshots of blocks but don't remove blocks from DB Produce bool // produce new snapshots NoDownloader bool // possible to use snapshots without calling Downloader + Verify bool // verify snapshots on startup DownloaderAddr string } diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index e94db76b84b..204f76b301f 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -1302,13 +1302,14 @@ func WaitForDownloader(ctx context.Context, cfg HeadersCfg) error { } logEvery := time.NewTicker(logInterval) defer logEvery.Stop() + var m runtime.MemStats // Check once without delay, for faster erigon re-start - if stats, err := cfg.snapshotDownloader.Stats(ctx, &proto_downloader.StatsRequest{}); err == nil && stats.Completed { - return nil + stats, err := cfg.snapshotDownloader.Stats(ctx, &proto_downloader.StatsRequest{}) + if err == nil && stats.Completed { + goto Finish } - var m runtime.MemStats // Print download progress until all segments are available Loop: for { @@ -1319,6 +1320,11 @@ Loop: if stats, err := cfg.snapshotDownloader.Stats(ctx, &proto_downloader.StatsRequest{}); err != nil { log.Warn("Error while waiting for snapshots progress", "err", err) } else if stats.Completed { + if !cfg.snapshots.Cfg().Verify { // will verify after loop + if _, err := cfg.snapshotDownloader.Verify(ctx, &proto_downloader.VerifyRequest{}); err != nil { + return err + } + } break Loop } else { if stats.MetadataReady < stats.FilesTotal { @@ -1338,5 +1344,12 @@ Loop: } } } + +Finish: + if cfg.snapshots.Cfg().Verify { + if _, err := cfg.snapshotDownloader.Verify(ctx, &proto_downloader.VerifyRequest{}); err != nil { + return err + } + } return nil } diff --git a/go.mod b/go.mod index 68ed3b07b6b..bebbc1d6159 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220629154434-59f7b5b57b68 + github.com/ledgerwatch/erigon-lib v0.0.0-20220701042032-ed452dbc4b21 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 40b0cee0689..b99c0955a95 100644 --- a/go.sum +++ b/go.sum @@ -386,8 +386,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220629154434-59f7b5b57b68 h1:GWy2Jan7bkQe7xkptxxM2zWCjNyxGNDgSUl30oDMmHQ= -github.com/ledgerwatch/erigon-lib v0.0.0-20220629154434-59f7b5b57b68/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= +github.com/ledgerwatch/erigon-lib v0.0.0-20220701042032-ed452dbc4b21 h1:mZAojUAtvuvFLS8sumuYlZrHKGvkjTBxA6fvvujT/Kc= +github.com/ledgerwatch/erigon-lib v0.0.0-20220701042032-ed452dbc4b21/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index af923646c0e..93d61401c22 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -123,6 +123,7 @@ var DefaultFlags = []cli.Flag{ utils.SentryLogPeerInfoFlag, utils.DownloaderAddrFlag, utils.NoDownloaderFlag, + utils.DownloaderVerifyFlag, HealthCheckFlag, utils.HeimdallURLFlag, utils.WithoutHeimdallFlag, diff --git a/turbo/shards/state_change_accumulator.go b/turbo/shards/state_change_accumulator.go index 9fdf9f0b8dc..dc7372c9537 100644 --- a/turbo/shards/state_change_accumulator.go +++ b/turbo/shards/state_change_accumulator.go @@ -81,7 +81,7 @@ func (a *Accumulator) ChangeAccount(address common.Address, incarnation uint64, accountChange.Action = remote.Action_UPSERT case remote.Action_CODE: accountChange.Action = remote.Action_UPSERT_CODE - case remote.Action_DELETE: + case remote.Action_REMOVE: panic("") } accountChange.Incarnation = incarnation @@ -104,7 +104,7 @@ func (a *Accumulator) DeleteAccount(address common.Address) { accountChange.Data = nil accountChange.Code = nil accountChange.StorageChanges = nil - accountChange.Action = remote.Action_DELETE + accountChange.Action = remote.Action_REMOVE } // ChangeCode adds code to the latest change @@ -122,7 +122,7 @@ func (a *Accumulator) ChangeCode(address common.Address, incarnation uint64, cod accountChange.Action = remote.Action_CODE case remote.Action_UPSERT: accountChange.Action = remote.Action_UPSERT_CODE - case remote.Action_DELETE: + case remote.Action_REMOVE: panic("") } accountChange.Incarnation = incarnation @@ -138,7 +138,7 @@ func (a *Accumulator) ChangeStorage(address common.Address, incarnation uint64, a.accountChangeIndex[address] = i } accountChange := a.latestChange.Changes[i] - if accountChange.Action == remote.Action_DELETE { + if accountChange.Action == remote.Action_REMOVE { panic("") } accountChange.Incarnation = incarnation From 057fb3f4b00ea7b1df4c5b6c162790c83b59289f Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Fri, 1 Jul 2022 14:12:01 +0300 Subject: [PATCH 013/152] More tests (#4591) * check if we have TD * some test * fixed pending test * all test * reverting initial commit * checking for td again --- cmd/rpcdaemon/commands/eth_block.go | 4 +- cmd/rpcdaemon/commands/eth_block_test.go | 179 +++++++++++++++++++++++ 2 files changed, 182 insertions(+), 1 deletion(-) create mode 100644 cmd/rpcdaemon/commands/eth_block_test.go diff --git a/cmd/rpcdaemon/commands/eth_block.go b/cmd/rpcdaemon/commands/eth_block.go index 1da5b7b0293..f21c4fedf44 100644 --- a/cmd/rpcdaemon/commands/eth_block.go +++ b/cmd/rpcdaemon/commands/eth_block.go @@ -208,7 +208,9 @@ func (api *APIImpl) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber if err != nil { return nil, err } - additionalFields["totalDifficulty"] = (*hexutil.Big)(td) + if td != nil { + additionalFields["totalDifficulty"] = (*hexutil.Big)(td) + } response, err := ethapi.RPCMarshalBlock(b, true, fullTx, additionalFields) if err == nil && number == rpc.PendingBlockNumber { diff --git a/cmd/rpcdaemon/commands/eth_block_test.go b/cmd/rpcdaemon/commands/eth_block_test.go new file mode 100644 index 00000000000..e335d44cc0b --- /dev/null +++ b/cmd/rpcdaemon/commands/eth_block_test.go @@ -0,0 +1,179 @@ +package commands + +import ( + "context" + "math/big" + "testing" + + "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcdaemontest" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/rpc" + "github.com/ledgerwatch/erigon/turbo/rpchelper" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/erigon/turbo/stages" + "github.com/stretchr/testify/assert" +) + +// Gets the latest block number with the latest tag +func TestGetBlockByNumberWithLatestTag(t *testing.T) { + db := rpcdaemontest.CreateTestKV(t) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + b, err := api.GetBlockByNumber(context.Background(), rpc.LatestBlockNumber, false) + expected := common.HexToHash("0x6804117de2f3e6ee32953e78ced1db7b20214e0d8c745a03b8fecf7cc8ee76ef") + if err != nil { + t.Errorf("error getting block number with latest tag: %s", err) + } + assert.Equal(t, expected, b["hash"]) +} + +func TestGetBlockByNumberWithLatestTag_WithHeadHashInDb(t *testing.T) { + db := rpcdaemontest.CreateTestKV(t) + ctx := context.Background() + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + tx, err := db.BeginRw(ctx) + if err != nil { + t.Errorf("could not begin read write transaction: %s", err) + } + latestBlockHash := common.HexToHash("0x6804117de2f3e6ee32953e78ced1db7b20214e0d8c745a03b8fecf7cc8ee76ef") + latestBlock, err := rawdb.ReadBlockByHash(tx, latestBlockHash) + if err != nil { + tx.Rollback() + t.Errorf("couldn't retrieve latest block") + } + rawdb.WriteHeaderNumber(tx, latestBlockHash, latestBlock.NonceU64()) + rawdb.WriteForkchoiceHead(tx, latestBlockHash) + if safedHeadBlock := rawdb.ReadForkchoiceHead(tx); safedHeadBlock == (common.Hash{}) { + tx.Rollback() + t.Error("didn't find forkchoice head hash") + } + tx.Commit() + + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + block, err := api.GetBlockByNumber(ctx, rpc.LatestBlockNumber, false) + if err != nil { + t.Errorf("error retrieving block by number: %s", err) + } + expectedHash := common.HexToHash("0x71b89b6ca7b65debfd2fbb01e4f07de7bba343e6617559fa81df19b605f84662") + assert.Equal(t, expectedHash, block["hash"]) +} + +func TestGetBlockByNumberWithPendingTag(t *testing.T) { + db := rpcdaemontest.CreateTestKV(t) + m := stages.MockWithTxPool(t) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + + ctx, conn := rpcdaemontest.CreateTestGrpcConn(t, m) + txPool := txpool.NewTxpoolClient(conn) + ff := rpchelper.New(ctx, nil, txPool, txpool.NewMiningClient(conn), func() {}) + + expected := 1 + header := &types.Header{ + Number: big.NewInt(int64(expected)), + } + + rlpBlock, err := rlp.EncodeToBytes(types.NewBlockWithHeader(header)) + if err != nil { + t.Errorf("failed encoding the block: %s", err) + } + ff.HandlePendingBlock(&txpool.OnPendingBlockReply{ + RplBlock: rlpBlock, + }) + + api := NewEthAPI(NewBaseApi(ff, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + b, err := api.GetBlockByNumber(context.Background(), rpc.PendingBlockNumber, false) + if err != nil { + t.Errorf("error getting block number with pending tag: %s", err) + } + assert.Equal(t, (*hexutil.Big)(big.NewInt(int64(expected))), b["number"]) +} + +func TestGetBlockByNumber_WithFinalizedTag_NoFinalizedBlockInDb(t *testing.T) { + db := rpcdaemontest.CreateTestKV(t) + ctx := context.Background() + + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + if _, err := api.GetBlockByNumber(ctx, rpc.FinalizedBlockNumber, false); err != nil { + assert.ErrorIs(t, rpchelper.UnknownBlockError, err) + } +} + +func TestGetBlockByNumber_WithFinalizedTag_WithFinalizedBlockInDb(t *testing.T) { + db := rpcdaemontest.CreateTestKV(t) + ctx := context.Background() + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + tx, err := db.BeginRw(ctx) + if err != nil { + t.Errorf("could not begin read write transaction: %s", err) + } + latestBlockHash := common.HexToHash("0x6804117de2f3e6ee32953e78ced1db7b20214e0d8c745a03b8fecf7cc8ee76ef") + latestBlock, err := rawdb.ReadBlockByHash(tx, latestBlockHash) + if err != nil { + tx.Rollback() + t.Errorf("couldn't retrieve latest block") + } + rawdb.WriteHeaderNumber(tx, latestBlockHash, latestBlock.NonceU64()) + rawdb.WriteForkchoiceFinalized(tx, latestBlockHash) + if safedFinalizedBlock := rawdb.ReadForkchoiceFinalized(tx); safedFinalizedBlock == (common.Hash{}) { + tx.Rollback() + t.Error("didn't find forkchoice finalized hash") + } + tx.Commit() + + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + block, err := api.GetBlockByNumber(ctx, rpc.FinalizedBlockNumber, false) + if err != nil { + t.Errorf("error retrieving block by number: %s", err) + } + expectedHash := common.HexToHash("0x71b89b6ca7b65debfd2fbb01e4f07de7bba343e6617559fa81df19b605f84662") + assert.Equal(t, expectedHash, block["hash"]) +} + +func TestGetBlockByNumber_WithSafeTag_NoSafeBlockInDb(t *testing.T) { + db := rpcdaemontest.CreateTestKV(t) + ctx := context.Background() + + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + if _, err := api.GetBlockByNumber(ctx, rpc.SafeBlockNumber, false); err != nil { + assert.ErrorIs(t, rpchelper.UnknownBlockError, err) + } +} + +func TestGetBlockByNumber_WithSafeTag_WithSafeBlockInDb(t *testing.T) { + db := rpcdaemontest.CreateTestKV(t) + ctx := context.Background() + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + tx, err := db.BeginRw(ctx) + if err != nil { + t.Errorf("could not begin read write transaction: %s", err) + } + latestBlockHash := common.HexToHash("0x6804117de2f3e6ee32953e78ced1db7b20214e0d8c745a03b8fecf7cc8ee76ef") + latestBlock, err := rawdb.ReadBlockByHash(tx, latestBlockHash) + if err != nil { + tx.Rollback() + t.Errorf("couldn't retrieve latest block") + } + rawdb.WriteHeaderNumber(tx, latestBlockHash, latestBlock.NonceU64()) + rawdb.WriteForkchoiceSafe(tx, latestBlockHash) + if safedSafeBlock := rawdb.ReadForkchoiceSafe(tx); safedSafeBlock == (common.Hash{}) { + tx.Rollback() + t.Error("didn't find forkchoice safe block hash") + } + tx.Commit() + + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + block, err := api.GetBlockByNumber(ctx, rpc.SafeBlockNumber, false) + if err != nil { + t.Errorf("error retrieving block by number: %s", err) + } + expectedHash := common.HexToHash("0x71b89b6ca7b65debfd2fbb01e4f07de7bba343e6617559fa81df19b605f84662") + assert.Equal(t, expectedHash, block["hash"]) +} From 1d0058105526667bc43befe0d0c61b69f7ab970c Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Fri, 1 Jul 2022 15:32:54 +0200 Subject: [PATCH 014/152] More thorough (*ChainConfig) checkCompatible (#4601) --- params/config.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/params/config.go b/params/config.go index 6446c63eb19..3c7f8dffb5a 100644 --- a/params/config.go +++ b/params/config.go @@ -243,6 +243,7 @@ type ChainConfig struct { ArrowGlacierBlock *big.Int `json:"arrowGlacierBlock,omitempty"` // EIP-4345 (bomb delay) switch block (nil = no fork, 0 = already activated) GrayGlacierBlock *big.Int `json:"grayGlacierBlock,omitempty"` // EIP-5133 (bomb delay) switch block (nil = no fork, 0 = already activated) + // Parlia fork blocks RamanujanBlock *big.Int `json:"ramanujanBlock,omitempty" toml:",omitempty"` // ramanujanBlock switch block (nil = no fork, 0 = already activated) NielsBlock *big.Int `json:"nielsBlock,omitempty" toml:",omitempty"` // nielsBlock switch block (nil = no fork, 0 = already activated) MirrorSyncBlock *big.Int `json:"mirrorSyncBlock,omitempty" toml:",omitempty"` // mirrorSyncBlock switch block (nil = no fork, 0 = already activated) @@ -627,6 +628,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error { } func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head uint64) *ConfigCompatError { + // Ethereum mainnet forks if isForkIncompatible(c.HomesteadBlock, newcfg.HomesteadBlock, head) { return newCompatError("Homestead fork block", c.HomesteadBlock, newcfg.HomesteadBlock) } @@ -676,6 +678,23 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head uint64) *ConfigC if isForkIncompatible(c.GrayGlacierBlock, newcfg.GrayGlacierBlock, head) { return newCompatError("Gray Glacier fork block", c.GrayGlacierBlock, newcfg.GrayGlacierBlock) } + if isForkIncompatible(c.MergeNetsplitBlock, newcfg.MergeNetsplitBlock, head) { + return newCompatError("Merge netsplit block", c.MergeNetsplitBlock, newcfg.MergeNetsplitBlock) + } + + // Parlia forks + if isForkIncompatible(c.RamanujanBlock, newcfg.RamanujanBlock, head) { + return newCompatError("Ramanujan fork block", c.RamanujanBlock, newcfg.RamanujanBlock) + } + if isForkIncompatible(c.NielsBlock, newcfg.NielsBlock, head) { + return newCompatError("Niels fork block", c.NielsBlock, newcfg.NielsBlock) + } + if isForkIncompatible(c.MirrorSyncBlock, newcfg.MirrorSyncBlock, head) { + return newCompatError("MirrorSync fork block", c.MirrorSyncBlock, newcfg.MirrorSyncBlock) + } + if isForkIncompatible(c.BrunoBlock, newcfg.BrunoBlock, head) { + return newCompatError("Bruno fork block", c.BrunoBlock, newcfg.BrunoBlock) + } if isForkIncompatible(c.EulerBlock, newcfg.EulerBlock, head) { return newCompatError("Euler fork block", c.EulerBlock, newcfg.EulerBlock) } From 975bf0ecb9ab63f02b0886369848e6fad1e4d45a Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Fri, 1 Jul 2022 17:36:44 +0300 Subject: [PATCH 015/152] Changing rawdb to blockReader (#4602) --- cmd/rpcdaemon/commands/bor_helper.go | 13 +++++--- cmd/rpcdaemon/commands/bor_snapshot.go | 24 +++++++-------- cmd/rpcdaemon/commands/erigon_block.go | 37 ++++++++++++++++++++--- cmd/rpcdaemon/commands/eth_call_test.go | 8 ++++- cmd/rpcdaemon22/commands/bor_helper.go | 13 +++++--- cmd/rpcdaemon22/commands/bor_snapshot.go | 24 +++++++-------- cmd/rpcdaemon22/commands/erigon_block.go | 36 +++++++++++++++++++--- cmd/rpcdaemon22/commands/eth_call_test.go | 8 ++++- 8 files changed, 117 insertions(+), 46 deletions(-) diff --git a/cmd/rpcdaemon/commands/bor_helper.go b/cmd/rpcdaemon/commands/bor_helper.go index bb054a8054b..ef5cf774b4a 100644 --- a/cmd/rpcdaemon/commands/bor_helper.go +++ b/cmd/rpcdaemon/commands/bor_helper.go @@ -2,13 +2,13 @@ package commands import ( "bytes" + "context" "errors" "fmt" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/params" @@ -45,7 +45,7 @@ var ( // getHeaderByNumber returns a block's header given a block number ignoring the block's transaction and uncle list (may be faster). // derived from erigon_getHeaderByNumber implementation (see ./erigon_block.go) -func getHeaderByNumber(number rpc.BlockNumber, api *BorImpl, tx kv.Tx) (*types.Header, error) { +func getHeaderByNumber(ctx context.Context, number rpc.BlockNumber, api *BorImpl, tx kv.Tx) (*types.Header, error) { // Pending block is only known by the miner if number == rpc.PendingBlockNumber { block := api.pendingBlock() @@ -60,7 +60,10 @@ func getHeaderByNumber(number rpc.BlockNumber, api *BorImpl, tx kv.Tx) (*types.H return nil, err } - header := rawdb.ReadHeaderByNumber(tx, blockNum) + header, err := api._blockReader.HeaderByNumber(ctx, tx, blockNum) + if err != nil { + return nil, err + } if header == nil { return nil, fmt.Errorf("block header not found: %d", blockNum) } @@ -70,8 +73,8 @@ func getHeaderByNumber(number rpc.BlockNumber, api *BorImpl, tx kv.Tx) (*types.H // getHeaderByHash returns a block's header given a block's hash. // derived from erigon_getHeaderByHash implementation (see ./erigon_block.go) -func getHeaderByHash(tx kv.Tx, hash common.Hash) (*types.Header, error) { - header, err := rawdb.ReadHeaderByHash(tx, hash) +func getHeaderByHash(ctx context.Context, api *BorImpl, tx kv.Tx, hash common.Hash) (*types.Header, error) { + header, err := api._blockReader.HeaderByHash(ctx, tx, hash) if err != nil { return nil, err } diff --git a/cmd/rpcdaemon/commands/bor_snapshot.go b/cmd/rpcdaemon/commands/bor_snapshot.go index 96d18cc86bf..2915afe9513 100644 --- a/cmd/rpcdaemon/commands/bor_snapshot.go +++ b/cmd/rpcdaemon/commands/bor_snapshot.go @@ -45,7 +45,7 @@ func (api *BorImpl) GetSnapshot(number *rpc.BlockNumber) (*Snapshot, error) { if number == nil || *number == rpc.LatestBlockNumber { header = rawdb.ReadCurrentHeader(tx) } else { - header, _ = getHeaderByNumber(*number, api, tx) + header, _ = getHeaderByNumber(ctx, *number, api, tx) } // Ensure we have an actually valid block if header == nil { @@ -58,7 +58,7 @@ func (api *BorImpl) GetSnapshot(number *rpc.BlockNumber) (*Snapshot, error) { return nil, err } defer borTx.Rollback() - return snapshot(api, tx, borTx, header) + return snapshot(ctx, api, tx, borTx, header) } // GetAuthor retrieves the author a block. @@ -75,7 +75,7 @@ func (api *BorImpl) GetAuthor(number *rpc.BlockNumber) (*common.Address, error) if number == nil || *number == rpc.LatestBlockNumber { header = rawdb.ReadCurrentHeader(tx) } else { - header, _ = getHeaderByNumber(*number, api, tx) + header, _ = getHeaderByNumber(ctx, *number, api, tx) } // Ensure we have an actually valid block if header == nil { @@ -96,7 +96,7 @@ func (api *BorImpl) GetSnapshotAtHash(hash common.Hash) (*Snapshot, error) { defer tx.Rollback() // Retreive the header - header, _ := getHeaderByHash(tx, hash) + header, _ := getHeaderByHash(ctx, api, tx, hash) // Ensure we have an actually valid block if header == nil { @@ -109,7 +109,7 @@ func (api *BorImpl) GetSnapshotAtHash(hash common.Hash) (*Snapshot, error) { return nil, err } defer borTx.Rollback() - return snapshot(api, tx, borTx, header) + return snapshot(ctx, api, tx, borTx, header) } // GetSigners retrieves the list of authorized signers at the specified block. @@ -127,7 +127,7 @@ func (api *BorImpl) GetSigners(number *rpc.BlockNumber) ([]common.Address, error if number == nil || *number == rpc.LatestBlockNumber { header = rawdb.ReadCurrentHeader(tx) } else { - header, _ = getHeaderByNumber(*number, api, tx) + header, _ = getHeaderByNumber(ctx, *number, api, tx) } // Ensure we have an actually valid block if header == nil { @@ -140,7 +140,7 @@ func (api *BorImpl) GetSigners(number *rpc.BlockNumber) ([]common.Address, error return nil, err } defer borTx.Rollback() - snap, err := snapshot(api, tx, borTx, header) + snap, err := snapshot(ctx, api, tx, borTx, header) return snap.signers(), err } @@ -155,7 +155,7 @@ func (api *BorImpl) GetSignersAtHash(hash common.Hash) ([]common.Address, error) defer tx.Rollback() // Retreive the header - header, _ := getHeaderByHash(tx, hash) + header, _ := getHeaderByHash(ctx, api, tx, hash) // Ensure we have an actually valid block if header == nil { @@ -169,7 +169,7 @@ func (api *BorImpl) GetSignersAtHash(hash common.Hash) ([]common.Address, error) } defer borTx.Rollback() - snap, err := snapshot(api, tx, borTx, header) + snap, err := snapshot(ctx, api, tx, borTx, header) return snap.signers(), err } @@ -214,7 +214,7 @@ func (api *BorImpl) GetRootHash(start, end uint64) (string, error) { } blockHeaders := make([]*types.Header, end-start+1) for number := start; number <= end; number++ { - blockHeaders[number-start], _ = getHeaderByNumber(rpc.BlockNumber(number), api, tx) + blockHeaders[number-start], _ = getHeaderByNumber(ctx, rpc.BlockNumber(number), api, tx) } headers := make([][32]byte, bor.NextPowerOfTwo(length)) @@ -354,7 +354,7 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) { } // snapshot retrieves the authorization snapshot at a given point in time. -func snapshot(api *BorImpl, db kv.Tx, borDb kv.Tx, header *types.Header) (*Snapshot, error) { +func snapshot(ctx context.Context, api *BorImpl, db kv.Tx, borDb kv.Tx, header *types.Header) (*Snapshot, error) { // Search for a snapshot on disk or build it from checkpoint var ( headers []*types.Header @@ -376,7 +376,7 @@ func snapshot(api *BorImpl, db kv.Tx, borDb kv.Tx, header *types.Header) (*Snaps // No snapshot for this header, move backward and check parent snapshots if header == nil { - header, _ = getHeaderByNumber(rpc.BlockNumber(number), api, db) + header, _ = getHeaderByNumber(ctx, rpc.BlockNumber(number), api, db) if header == nil { return nil, consensus.ErrUnknownAncestor } diff --git a/cmd/rpcdaemon/commands/erigon_block.go b/cmd/rpcdaemon/commands/erigon_block.go index f094311a6be..7a5bf1bda1d 100644 --- a/cmd/rpcdaemon/commands/erigon_block.go +++ b/cmd/rpcdaemon/commands/erigon_block.go @@ -2,6 +2,7 @@ package commands import ( "context" + "errors" "fmt" "sort" @@ -36,7 +37,11 @@ func (api *ErigonImpl) GetHeaderByNumber(ctx context.Context, blockNumber rpc.Bl return nil, err } - header := rawdb.ReadHeaderByNumber(tx, blockNum) + header, err := api._blockReader.HeaderByNumber(ctx, tx, blockNum) + if err != nil { + return nil, err + } + if header == nil { return nil, fmt.Errorf("block header not found: %d", blockNum) } @@ -52,7 +57,7 @@ func (api *ErigonImpl) GetHeaderByHash(ctx context.Context, hash common.Hash) (* } defer tx.Rollback() - header, err := rawdb.ReadHeaderByHash(tx, hash) + header, err := api._blockReader.HeaderByHash(ctx, tx, hash) if err != nil { return nil, err } @@ -76,7 +81,15 @@ func (api *ErigonImpl) GetBlockByTimestamp(ctx context.Context, timeStamp rpc.Ti currenttHeaderTime := currentHeader.Time highestNumber := currentHeader.Number.Uint64() - firstHeader := rawdb.ReadHeaderByNumber(tx, 0) + firstHeader, err := api._blockReader.HeaderByNumber(ctx, tx, 0) + if err != nil { + return nil, err + } + + if firstHeader == nil { + return nil, errors.New("no genesis header found") + } + firstHeaderTime := firstHeader.Time if currenttHeaderTime <= uintTimestamp { @@ -98,12 +111,26 @@ func (api *ErigonImpl) GetBlockByTimestamp(ctx context.Context, timeStamp rpc.Ti } blockNum := sort.Search(int(currentHeader.Number.Uint64()), func(blockNum int) bool { - currentHeader := rawdb.ReadHeaderByNumber(tx, uint64(blockNum)) + currentHeader, err := api._blockReader.HeaderByNumber(ctx, tx, uint64(blockNum)) + if err != nil { + return false + } + + if currentHeader == nil { + return false + } return currentHeader.Time >= uintTimestamp }) - resultingHeader := rawdb.ReadHeaderByNumber(tx, uint64(blockNum)) + resultingHeader, err := api._blockReader.HeaderByNumber(ctx, tx, uint64(blockNum)) + if err != nil { + return nil, err + } + + if resultingHeader == nil { + return nil, fmt.Errorf("no header found with header number: %d", blockNum) + } if resultingHeader.Time > uintTimestamp { response, err := buildBlockResponse(tx, uint64(blockNum)-1, fullTx) diff --git a/cmd/rpcdaemon/commands/eth_call_test.go b/cmd/rpcdaemon/commands/eth_call_test.go index a73a0b53342..fc204365a4f 100644 --- a/cmd/rpcdaemon/commands/eth_call_test.go +++ b/cmd/rpcdaemon/commands/eth_call_test.go @@ -180,7 +180,13 @@ func TestGetBlockByTimeMiddle(t *testing.T) { api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil) currentHeader := rawdb.ReadCurrentHeader(tx) - oldestHeader := rawdb.ReadHeaderByNumber(tx, 0) + oldestHeader, err := api._blockReader.HeaderByNumber(ctx, tx, 0) + if err != nil { + t.Errorf("error getting oldest header %s", err) + } + if oldestHeader == nil { + t.Error("couldn't find oldest header") + } middleNumber := (currentHeader.Number.Uint64() + oldestHeader.Number.Uint64()) / 2 middleBlock, err := rawdb.ReadBlockByNumber(tx, middleNumber) diff --git a/cmd/rpcdaemon22/commands/bor_helper.go b/cmd/rpcdaemon22/commands/bor_helper.go index 49d074307da..51e60d5f70f 100644 --- a/cmd/rpcdaemon22/commands/bor_helper.go +++ b/cmd/rpcdaemon22/commands/bor_helper.go @@ -2,13 +2,13 @@ package commands import ( "bytes" + "context" "errors" "fmt" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus/bor" - "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/params" @@ -44,7 +44,7 @@ var ( // getHeaderByNumber returns a block's header given a block number ignoring the block's transaction and uncle list (may be faster). // derived from erigon_getHeaderByNumber implementation (see ./erigon_block.go) -func getHeaderByNumber(number rpc.BlockNumber, api *BorImpl, tx kv.Tx) (*types.Header, error) { +func getHeaderByNumber(ctx context.Context, number rpc.BlockNumber, api *BorImpl, tx kv.Tx) (*types.Header, error) { // Pending block is only known by the miner if number == rpc.PendingBlockNumber { block := api.pendingBlock() @@ -59,7 +59,10 @@ func getHeaderByNumber(number rpc.BlockNumber, api *BorImpl, tx kv.Tx) (*types.H return nil, err } - header := rawdb.ReadHeaderByNumber(tx, blockNum) + header, err := api._blockReader.HeaderByNumber(ctx, tx, blockNum) + if err != nil { + return nil, err + } if header == nil { return nil, fmt.Errorf("block header not found: %d", blockNum) } @@ -69,8 +72,8 @@ func getHeaderByNumber(number rpc.BlockNumber, api *BorImpl, tx kv.Tx) (*types.H // getHeaderByHash returns a block's header given a block's hash. // derived from erigon_getHeaderByHash implementation (see ./erigon_block.go) -func getHeaderByHash(tx kv.Tx, hash common.Hash) (*types.Header, error) { - header, err := rawdb.ReadHeaderByHash(tx, hash) +func getHeaderByHash(ctx context.Context, api *BorImpl, tx kv.Tx, hash common.Hash) (*types.Header, error) { + header, err := api._blockReader.HeaderByHash(ctx, tx, hash) if err != nil { return nil, err } diff --git a/cmd/rpcdaemon22/commands/bor_snapshot.go b/cmd/rpcdaemon22/commands/bor_snapshot.go index 96d18cc86bf..2915afe9513 100644 --- a/cmd/rpcdaemon22/commands/bor_snapshot.go +++ b/cmd/rpcdaemon22/commands/bor_snapshot.go @@ -45,7 +45,7 @@ func (api *BorImpl) GetSnapshot(number *rpc.BlockNumber) (*Snapshot, error) { if number == nil || *number == rpc.LatestBlockNumber { header = rawdb.ReadCurrentHeader(tx) } else { - header, _ = getHeaderByNumber(*number, api, tx) + header, _ = getHeaderByNumber(ctx, *number, api, tx) } // Ensure we have an actually valid block if header == nil { @@ -58,7 +58,7 @@ func (api *BorImpl) GetSnapshot(number *rpc.BlockNumber) (*Snapshot, error) { return nil, err } defer borTx.Rollback() - return snapshot(api, tx, borTx, header) + return snapshot(ctx, api, tx, borTx, header) } // GetAuthor retrieves the author a block. @@ -75,7 +75,7 @@ func (api *BorImpl) GetAuthor(number *rpc.BlockNumber) (*common.Address, error) if number == nil || *number == rpc.LatestBlockNumber { header = rawdb.ReadCurrentHeader(tx) } else { - header, _ = getHeaderByNumber(*number, api, tx) + header, _ = getHeaderByNumber(ctx, *number, api, tx) } // Ensure we have an actually valid block if header == nil { @@ -96,7 +96,7 @@ func (api *BorImpl) GetSnapshotAtHash(hash common.Hash) (*Snapshot, error) { defer tx.Rollback() // Retreive the header - header, _ := getHeaderByHash(tx, hash) + header, _ := getHeaderByHash(ctx, api, tx, hash) // Ensure we have an actually valid block if header == nil { @@ -109,7 +109,7 @@ func (api *BorImpl) GetSnapshotAtHash(hash common.Hash) (*Snapshot, error) { return nil, err } defer borTx.Rollback() - return snapshot(api, tx, borTx, header) + return snapshot(ctx, api, tx, borTx, header) } // GetSigners retrieves the list of authorized signers at the specified block. @@ -127,7 +127,7 @@ func (api *BorImpl) GetSigners(number *rpc.BlockNumber) ([]common.Address, error if number == nil || *number == rpc.LatestBlockNumber { header = rawdb.ReadCurrentHeader(tx) } else { - header, _ = getHeaderByNumber(*number, api, tx) + header, _ = getHeaderByNumber(ctx, *number, api, tx) } // Ensure we have an actually valid block if header == nil { @@ -140,7 +140,7 @@ func (api *BorImpl) GetSigners(number *rpc.BlockNumber) ([]common.Address, error return nil, err } defer borTx.Rollback() - snap, err := snapshot(api, tx, borTx, header) + snap, err := snapshot(ctx, api, tx, borTx, header) return snap.signers(), err } @@ -155,7 +155,7 @@ func (api *BorImpl) GetSignersAtHash(hash common.Hash) ([]common.Address, error) defer tx.Rollback() // Retreive the header - header, _ := getHeaderByHash(tx, hash) + header, _ := getHeaderByHash(ctx, api, tx, hash) // Ensure we have an actually valid block if header == nil { @@ -169,7 +169,7 @@ func (api *BorImpl) GetSignersAtHash(hash common.Hash) ([]common.Address, error) } defer borTx.Rollback() - snap, err := snapshot(api, tx, borTx, header) + snap, err := snapshot(ctx, api, tx, borTx, header) return snap.signers(), err } @@ -214,7 +214,7 @@ func (api *BorImpl) GetRootHash(start, end uint64) (string, error) { } blockHeaders := make([]*types.Header, end-start+1) for number := start; number <= end; number++ { - blockHeaders[number-start], _ = getHeaderByNumber(rpc.BlockNumber(number), api, tx) + blockHeaders[number-start], _ = getHeaderByNumber(ctx, rpc.BlockNumber(number), api, tx) } headers := make([][32]byte, bor.NextPowerOfTwo(length)) @@ -354,7 +354,7 @@ func (s *Snapshot) apply(headers []*types.Header) (*Snapshot, error) { } // snapshot retrieves the authorization snapshot at a given point in time. -func snapshot(api *BorImpl, db kv.Tx, borDb kv.Tx, header *types.Header) (*Snapshot, error) { +func snapshot(ctx context.Context, api *BorImpl, db kv.Tx, borDb kv.Tx, header *types.Header) (*Snapshot, error) { // Search for a snapshot on disk or build it from checkpoint var ( headers []*types.Header @@ -376,7 +376,7 @@ func snapshot(api *BorImpl, db kv.Tx, borDb kv.Tx, header *types.Header) (*Snaps // No snapshot for this header, move backward and check parent snapshots if header == nil { - header, _ = getHeaderByNumber(rpc.BlockNumber(number), api, db) + header, _ = getHeaderByNumber(ctx, rpc.BlockNumber(number), api, db) if header == nil { return nil, consensus.ErrUnknownAncestor } diff --git a/cmd/rpcdaemon22/commands/erigon_block.go b/cmd/rpcdaemon22/commands/erigon_block.go index d32557024e4..3d68de3a370 100644 --- a/cmd/rpcdaemon22/commands/erigon_block.go +++ b/cmd/rpcdaemon22/commands/erigon_block.go @@ -2,6 +2,7 @@ package commands import ( "context" + "errors" "fmt" "sort" @@ -35,7 +36,10 @@ func (api *ErigonImpl) GetHeaderByNumber(ctx context.Context, blockNumber rpc.Bl return nil, err } - header := rawdb.ReadHeaderByNumber(tx, blockNum) + header, err := api._blockReader.HeaderByNumber(ctx, tx, blockNum) + if err != nil { + return nil, err + } if header == nil { return nil, fmt.Errorf("block header not found: %d", blockNum) } @@ -51,7 +55,7 @@ func (api *ErigonImpl) GetHeaderByHash(ctx context.Context, hash common.Hash) (* } defer tx.Rollback() - header, err := rawdb.ReadHeaderByHash(tx, hash) + header, err := api._blockReader.HeaderByHash(ctx, tx, hash) if err != nil { return nil, err } @@ -75,7 +79,15 @@ func (api *ErigonImpl) GetBlockByTimestamp(ctx context.Context, timeStamp rpc.Ti currenttHeaderTime := currentHeader.Time highestNumber := currentHeader.Number.Uint64() - firstHeader := rawdb.ReadHeaderByNumber(tx, 0) + firstHeader, err := api._blockReader.HeaderByNumber(ctx, tx, 0) + if err != nil { + return nil, err + } + + if firstHeader == nil { + return nil, errors.New("genesis header not found") + } + firstHeaderTime := firstHeader.Time if currenttHeaderTime <= uintTimestamp { @@ -97,12 +109,26 @@ func (api *ErigonImpl) GetBlockByTimestamp(ctx context.Context, timeStamp rpc.Ti } blockNum := sort.Search(int(currentHeader.Number.Uint64()), func(blockNum int) bool { - currentHeader := rawdb.ReadHeaderByNumber(tx, uint64(blockNum)) + currentHeader, err := api._blockReader.HeaderByNumber(ctx, tx, uint64(blockNum)) + if err != nil { + return false + } + + if currentHeader == nil { + return false + } return currentHeader.Time >= uintTimestamp }) - resultingHeader := rawdb.ReadHeaderByNumber(tx, uint64(blockNum)) + resultingHeader, err := api._blockReader.HeaderByNumber(ctx, tx, uint64(blockNum)) + if err != nil { + return nil, err + } + + if resultingHeader == nil { + return nil, fmt.Errorf("no header found with block num %d", blockNum) + } if resultingHeader.Time > uintTimestamp { response, err := buildBlockResponse(tx, uint64(blockNum)-1, fullTx) diff --git a/cmd/rpcdaemon22/commands/eth_call_test.go b/cmd/rpcdaemon22/commands/eth_call_test.go index 6958651e4f5..714b47de951 100644 --- a/cmd/rpcdaemon22/commands/eth_call_test.go +++ b/cmd/rpcdaemon22/commands/eth_call_test.go @@ -174,7 +174,13 @@ func TestGetBlockByTimeMiddle(t *testing.T) { api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), nil, nil, false), db, nil) currentHeader := rawdb.ReadCurrentHeader(tx) - oldestHeader := rawdb.ReadHeaderByNumber(tx, 0) + oldestHeader, err := api._blockReader.HeaderByNumber(ctx, tx, 0) + if err != nil { + t.Errorf("error getting the oldest header %s", err) + } + if oldestHeader == nil { + t.Error("couldn't find oldest header") + } middleNumber := (currentHeader.Number.Uint64() + oldestHeader.Number.Uint64()) / 2 middleBlock, err := rawdb.ReadBlockByNumber(tx, middleNumber) From fff6e4ffa5c4d73ac886852503b1ae0f5420f76d Mon Sep 17 00:00:00 2001 From: primal_concrete_sledge Date: Fri, 1 Jul 2022 23:59:52 +0400 Subject: [PATCH 016/152] fix/issue-4593_fix_closed_chan (#4603) --- turbo/rpchelper/filters.go | 1 + 1 file changed, 1 insertion(+) diff --git a/turbo/rpchelper/filters.go b/turbo/rpchelper/filters.go index 1a90ee3754a..60a312d1961 100644 --- a/turbo/rpchelper/filters.go +++ b/turbo/rpchelper/filters.go @@ -315,6 +315,7 @@ func (ff *Filters) UnsubscribeHeads(id HeadsSubID) bool { defer ff.mu.Unlock() if ch, ok := ff.headsSubs[id]; ok { close(ch) + delete(ff.headsSubs, id) ff.storeMu.Lock() defer ff.storeMu.Unlock() delete(ff.pendingHeadsStores, id) From db93d2ea37f74871219231ce128652c1a3aac16f Mon Sep 17 00:00:00 2001 From: sudeep Date: Sat, 2 Jul 2022 11:22:23 +0530 Subject: [PATCH 017/152] evm t8n tool to use ExecuteBlockEphemerally api (#4512) * fix to set V, R, S in legacy transaction * fix to dump post-execution alloc for evm t8n * close tx in evm t8n * populate current difficulty and gas used in output result - update the ExecutionResult to include corresponding info (like Difficulty/GasUsed) * initial attempt at migrating 'evm t8n' to use ExecuteBlockEphemerally * using ExecutionResult in ExecuteBlockEphemerally * bypass validations and integrate with EphemeralExecResult * fixing output of 'evm t8n' - remaining bits are "stateRoot" in results.txt and "balance" field for one account in alloc.txt (for testdata=1) * get ExecuteBlockEphemerally to accept getTracer lambda * fix build failure * test cases for evm t8n * more test cases for evm t8n * fix stateRoot computation in evm t8n * remove reward argument, as EBE itself takes care of it * final cleanups for migration to using ExecuteBlockEphemerally * change EBEforBSC to match EBE * fix linter issues * manually revert an unwanted diff * avoid calculating ReceiptHash twice * linter check * minor correction * remove unnecessary logic in EBEforBsc --- accounts/abi/bind/backends/simulated.go | 5 +- cmd/evm/internal/t8ntool/execution.go | 288 +++-------------- cmd/evm/internal/t8ntool/flags.go | 7 +- cmd/evm/internal/t8ntool/gen_stenv.go | 65 ++-- cmd/evm/internal/t8ntool/transition.go | 268 ++++++++++++++-- cmd/evm/main.go | 3 +- cmd/evm/t8n_test.go | 247 ++++++++++++++ cmd/evm/testdata/1/exp.json | 45 +++ cmd/evm/testdata/10/alloc.json | 23 ++ cmd/evm/testdata/10/env.json | 12 + cmd/evm/testdata/10/exp.json | 79 +++++ cmd/evm/testdata/10/readme.md | 79 +++++ cmd/evm/testdata/10/txs.json | 70 ++++ cmd/evm/testdata/11/alloc.json | 25 ++ cmd/evm/testdata/11/env.json | 12 + cmd/evm/testdata/11/readme.md | 13 + cmd/evm/testdata/11/txs.json | 14 + cmd/evm/testdata/12/alloc.json | 11 + cmd/evm/testdata/12/env.json | 10 + cmd/evm/testdata/12/exp.json | 26 ++ cmd/evm/testdata/12/readme.md | 40 +++ cmd/evm/testdata/12/txs.json | 20 ++ cmd/evm/testdata/19/alloc.json | 12 + cmd/evm/testdata/19/env.json | 9 + cmd/evm/testdata/19/exp_arrowglacier.json | 24 ++ cmd/evm/testdata/19/exp_london.json | 24 ++ cmd/evm/testdata/19/readme.md | 9 + cmd/evm/testdata/19/txs.json | 1 + cmd/evm/testdata/3/exp.json | 39 +++ cmd/evm/testdata/5/exp.json | 23 ++ cmd/evm/testdata/7/exp.json | 375 ++++++++++++++++++++++ cmd/evm/testdata/8/exp.json | 68 ++++ cmd/evm/testdata/9/alloc.json | 28 +- cmd/evm/testdata/9/env.json | 15 +- cmd/evm/testdata/9/exp.json | 54 ++++ cmd/evm/testdata/9/readme.md | 75 +++++ cmd/evm/testdata/9/txs.json | 49 ++- cmd/integration/commands/state_stages.go | 3 +- cmd/rpcdaemon/commands/eth_receipts.go | 3 +- cmd/rpcdaemon22/commands/eth_receipts.go | 3 +- cmd/state/commands/erigon2.go | 2 +- cmd/state/commands/erigon22.go | 2 +- cmd/state/commands/history2.go | 2 +- cmd/state/commands/history22.go | 2 +- cmd/state/commands/opcode_tracer.go | 2 +- consensus/parlia/parlia.go | 2 +- core/blockchain.go | 203 ++++++++---- core/chain_makers.go | 4 +- core/evm.go | 4 +- core/state_processor.go | 58 +--- core/vm/logger.go | 75 +++++ eth/stagedsync/stage_execute.go | 19 +- eth/stagedsync/stage_mining_exec.go | 2 +- go.mod | 7 +- go.sum | 17 +- internal/cmdtest/test_cmd.go | 300 +++++++++++++++++ tests/state_test_util.go | 3 +- turbo/transactions/tracing.go | 3 +- 58 files changed, 2383 insertions(+), 500 deletions(-) create mode 100644 cmd/evm/t8n_test.go create mode 100644 cmd/evm/testdata/1/exp.json create mode 100644 cmd/evm/testdata/10/alloc.json create mode 100644 cmd/evm/testdata/10/env.json create mode 100644 cmd/evm/testdata/10/exp.json create mode 100644 cmd/evm/testdata/10/readme.md create mode 100644 cmd/evm/testdata/10/txs.json create mode 100644 cmd/evm/testdata/11/alloc.json create mode 100644 cmd/evm/testdata/11/env.json create mode 100644 cmd/evm/testdata/11/readme.md create mode 100644 cmd/evm/testdata/11/txs.json create mode 100644 cmd/evm/testdata/12/alloc.json create mode 100644 cmd/evm/testdata/12/env.json create mode 100644 cmd/evm/testdata/12/exp.json create mode 100644 cmd/evm/testdata/12/readme.md create mode 100644 cmd/evm/testdata/12/txs.json create mode 100644 cmd/evm/testdata/19/alloc.json create mode 100644 cmd/evm/testdata/19/env.json create mode 100644 cmd/evm/testdata/19/exp_arrowglacier.json create mode 100644 cmd/evm/testdata/19/exp_london.json create mode 100644 cmd/evm/testdata/19/readme.md create mode 100644 cmd/evm/testdata/19/txs.json create mode 100644 cmd/evm/testdata/3/exp.json create mode 100644 cmd/evm/testdata/5/exp.json create mode 100644 cmd/evm/testdata/7/exp.json create mode 100644 cmd/evm/testdata/8/exp.json create mode 100644 cmd/evm/testdata/9/exp.json create mode 100644 cmd/evm/testdata/9/readme.md create mode 100644 internal/cmdtest/test_cmd.go diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index a04bcb9149d..2c5753f68d5 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -667,7 +667,8 @@ func (b *SimulatedBackend) callContract(_ context.Context, call ethereum.CallMsg msg := callMsg{call} txContext := core.NewEVMTxContext(msg) - evmContext := core.NewEVMBlockContext(block.Header(), b.getHeader, b.m.Engine, nil, b.contractHasTEVM) + header := block.Header() + evmContext := core.NewEVMBlockContext(header, core.GetHashFn(header, b.getHeader), b.m.Engine, nil, b.contractHasTEVM) // Create a new environment which holds all relevant information // about the transaction and calling mechanisms. vmEnv := vm.NewEVM(evmContext, txContext, statedb, b.m.ChainConfig, vm.Config{}) @@ -696,7 +697,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx types.Transac b.pendingState.Prepare(tx.Hash(), common.Hash{}, len(b.pendingBlock.Transactions())) //fmt.Printf("==== Start producing block %d, header: %d\n", b.pendingBlock.NumberU64(), b.pendingHeader.Number.Uint64()) if _, _, err := core.ApplyTransaction( - b.m.ChainConfig, b.getHeader, b.m.Engine, + b.m.ChainConfig, core.GetHashFn(b.pendingHeader, b.getHeader), b.m.Engine, &b.pendingHeader.Coinbase, b.gasPool, b.pendingState, state.NewNoopWriter(), b.pendingHeader, tx, diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index 1ef1457c2c7..0bd33bc8a43 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -17,29 +17,19 @@ package t8ntool import ( - "context" "encoding/binary" - "fmt" "math/big" "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/memdb" - "github.com/ledgerwatch/log/v3" - "golang.org/x/crypto/sha3" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/math" - "github.com/ledgerwatch/erigon/consensus/misc" + "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/core/systemcontracts" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/erigon/turbo/trie" ) type Prestate struct { @@ -47,18 +37,6 @@ type Prestate struct { Pre core.GenesisAlloc `json:"pre"` } -// ExecutionResult contains the execution status after running a state test, any -// error that might have occurred and a dump of the final state if requested. -type ExecutionResult struct { - StateRoot common.Hash `json:"stateRoot"` - TxRoot common.Hash `json:"txRoot"` - ReceiptRoot common.Hash `json:"receiptRoot"` - LogsHash common.Hash `json:"logsHash"` - Bloom types.Bloom `json:"logsBloom" gencodec:"required"` - Receipts types.Receipts `json:"receipts"` - Rejected []*rejectedTx `json:"rejected,omitempty"` -} - type ommer struct { Delta uint64 `json:"delta"` Address common.Address `json:"address"` @@ -66,226 +44,36 @@ type ommer struct { //go:generate gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go type stEnv struct { - Coinbase common.Address `json:"currentCoinbase" gencodec:"required"` - Difficulty *big.Int `json:"currentDifficulty" gencodec:"required"` - GasLimit uint64 `json:"currentGasLimit" gencodec:"required"` - Number uint64 `json:"currentNumber" gencodec:"required"` - Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` - BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` - BaseFee *big.Int `json:"currentBaseFee,omitempty"` - Random *common.Hash `json:"currentRandom,omitempty"` -} - -type rejectedTx struct { - Index int `json:"index"` - Err string `json:"error"` + Coinbase common.Address `json:"currentCoinbase" gencodec:"required"` + Difficulty *big.Int `json:"currentDifficulty"` + Random *big.Int `json:"currentRandom"` + ParentDifficulty *big.Int `json:"parentDifficulty"` + GasLimit uint64 `json:"currentGasLimit" gencodec:"required"` + Number uint64 `json:"currentNumber" gencodec:"required"` + Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp uint64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *big.Int `json:"currentBaseFee,omitempty"` + ParentUncleHash common.Hash `json:"parentUncleHash"` } type stEnvMarshaling struct { - Coinbase common.UnprefixedAddress - Difficulty *math.HexOrDecimal256 - GasLimit math.HexOrDecimal64 - Number math.HexOrDecimal64 - Timestamp math.HexOrDecimal64 - BaseFee *math.HexOrDecimal256 -} - -// Apply applies a set of transactions to a pre-state -func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, - txs types.Transactions, miningReward int64, - getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error)) (kv.RwDB, *ExecutionResult, error) { - - // Capture errors for BLOCKHASH operation, if we haven't been supplied the - // required blockhashes - var hashError error - getHash := func(num uint64) common.Hash { - if pre.Env.BlockHashes == nil { - hashError = fmt.Errorf("getHash(%d) invoked, no blockhashes provided", num) - return common.Hash{} - } - h, ok := pre.Env.BlockHashes[math.HexOrDecimal64(num)] - if !ok { - hashError = fmt.Errorf("getHash(%d) invoked, blockhash for that block not provided", num) - } - return h - } - db := memdb.New() - - tx, err := db.BeginRw(context.Background()) - if err != nil { - return nil, nil, err - } - defer tx.Rollback() - - var ( - rules0 = chainConfig.Rules(0) - rules1 = chainConfig.Rules(1) - rules = chainConfig.Rules(pre.Env.Number) - ibs = MakePreState(rules0, tx, pre.Pre) - signer = types.MakeSigner(chainConfig, pre.Env.Number) - gaspool = new(core.GasPool) - blockHash = common.Hash{0x13, 0x37} - rejectedTxs []*rejectedTx - includedTxs types.Transactions - gasUsed = uint64(0) - receipts = make(types.Receipts, 0) - txIndex = 0 - ) - gaspool.AddGas(pre.Env.GasLimit) - - difficulty := new(big.Int) - if pre.Env.Random == nil { - difficulty = pre.Env.Difficulty - } else { - // We are on POS hence difficulty opcode is now supplant with RANDOM - random := pre.Env.Random.Bytes() - difficulty.SetBytes(random) - } - vmContext := vm.BlockContext{ - CanTransfer: core.CanTransfer, - Transfer: core.Transfer, - Coinbase: pre.Env.Coinbase, - BlockNumber: pre.Env.Number, - ContractHasTEVM: func(common.Hash) (bool, error) { return false, nil }, - Time: pre.Env.Timestamp, - Difficulty: difficulty, - GasLimit: pre.Env.GasLimit, - GetHash: getHash, - } - // If currentBaseFee is defined, add it to the vmContext. - if pre.Env.BaseFee != nil { - vmContext.BaseFee = new(uint256.Int) - overflow := vmContext.BaseFee.SetFromBig(pre.Env.BaseFee) - if overflow { - return nil, nil, fmt.Errorf("pre.Env.BaseFee higher than 2^256-1") - } - } - // If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's - // done in StateProcessor.Process(block, ...), right before transactions are applied. - if chainConfig.DAOForkSupport && - chainConfig.DAOForkBlock != nil && - chainConfig.DAOForkBlock.Cmp(new(big.Int).SetUint64(pre.Env.Number)) == 0 { - misc.ApplyDAOHardFork(ibs) - } - systemcontracts.UpgradeBuildInSystemContract(chainConfig, new(big.Int).SetUint64(pre.Env.Number), ibs) - - for i, txn := range txs { - msg, err := txn.AsMessage(*signer, pre.Env.BaseFee, rules) - if err != nil { - log.Warn("rejected txn", "index", i, "hash", txn.Hash(), "err", err) - rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()}) - continue - } - tracer, err := getTracerFn(txIndex, txn.Hash()) - if err != nil { - return nil, nil, err - } - vmConfig.Tracer = tracer - vmConfig.Debug = (tracer != nil) - ibs.Prepare(txn.Hash(), blockHash, txIndex) - txContext := core.NewEVMTxContext(msg) - snapshot := ibs.Snapshot() - evm := vm.NewEVM(vmContext, txContext, ibs, chainConfig, vmConfig) - - // (ret []byte, usedGas uint64, failed bool, err error) - msgResult, err := core.ApplyMessage(evm, msg, gaspool, true /* refunds */, false /* gasBailout */) - if err != nil { - ibs.RevertToSnapshot(snapshot) - log.Info("rejected txn", "index", i, "hash", txn.Hash(), "from", msg.From(), "err", err) - rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()}) - continue - } - includedTxs = append(includedTxs, txn) - if hashError != nil { - return nil, nil, NewError(ErrorMissingBlockhash, hashError) - } - gasUsed += msgResult.UsedGas - - // Receipt: - { - // Create a new receipt for the transaction, storing the intermediate root and - // gas used by the txn. - receipt := &types.Receipt{Type: txn.Type(), CumulativeGasUsed: gasUsed} - if msgResult.Failed() { - receipt.Status = types.ReceiptStatusFailed - } else { - receipt.Status = types.ReceiptStatusSuccessful - } - receipt.TxHash = txn.Hash() - receipt.GasUsed = msgResult.UsedGas - - // If the transaction created a contract, store the creation address in the receipt. - if msg.To() == nil { - receipt.ContractAddress = crypto.CreateAddress(evm.TxContext().Origin, txn.GetNonce()) - } - - // Set the receipt logs and create a bloom for filtering - receipt.Logs = ibs.GetLogs(txn.Hash()) - receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) - // These three are non-consensus fields: - //receipt.BlockHash - //receipt.BlockNumber - receipt.TransactionIndex = uint(txIndex) - receipts = append(receipts, receipt) - } - - txIndex++ - } - // Add mining reward? - if miningReward > 0 { - // Add mining reward. The mining reward may be `0`, which only makes a difference in the cases - // where - // - the coinbase suicided, or - // - there are only 'bad' transactions, which aren't executed. In those cases, - // the coinbase gets no txfee, so isn't created, and thus needs to be touched - var ( - blockReward = uint256.NewInt(uint64(miningReward)) - minerReward = uint256.NewInt(0).Set(blockReward) - perOmmer = uint256.NewInt(0).Div(blockReward, uint256.NewInt(32)) - ) - for _, ommer := range pre.Env.Ommers { - // Add 1/32th for each ommer included - minerReward.Add(minerReward, perOmmer) - // Add (8-delta)/8 - reward := uint256.NewInt(8) - reward.Sub(reward, uint256.NewInt(ommer.Delta)) - reward.Mul(reward, blockReward) - reward.Div(reward, uint256.NewInt(8)) - ibs.AddBalance(ommer.Address, reward) - } - ibs.AddBalance(pre.Env.Coinbase, minerReward) - } - - // Commit block - var root common.Hash - if err = ibs.FinalizeTx(rules1, state.NewPlainStateWriter(tx, tx, 1)); err != nil { - return nil, nil, err - } - root, err = trie.CalcRoot("", tx) - if err != nil { - return nil, nil, err - } - if err = tx.Commit(); err != nil { - return nil, nil, err - } - - execRs := &ExecutionResult{ - StateRoot: root, - TxRoot: types.DeriveSha(includedTxs), - ReceiptRoot: types.DeriveSha(receipts), - Bloom: types.CreateBloom(receipts), - LogsHash: rlpHash(ibs.Logs()), - Receipts: receipts, - Rejected: rejectedTxs, - } - return db, execRs, nil + Coinbase common.UnprefixedAddress + Difficulty *math.HexOrDecimal256 + Random *math.HexOrDecimal256 + ParentDifficulty *math.HexOrDecimal256 + GasLimit math.HexOrDecimal64 + Number math.HexOrDecimal64 + Timestamp math.HexOrDecimal64 + ParentTimestamp math.HexOrDecimal64 + BaseFee *math.HexOrDecimal256 } -func MakePreState(chainRules *params.Rules, tx kv.RwTx, accounts core.GenesisAlloc) *state.IntraBlockState { +func MakePreState(chainRules *params.Rules, tx kv.RwTx, accounts core.GenesisAlloc) (*state.PlainStateReader, *state.PlainStateWriter) { var blockNr uint64 = 0 - r, _ := state.NewPlainStateReader(tx), state.NewPlainStateWriter(tx, tx, blockNr) - statedb := state.New(r) + stateReader, stateWriter := state.NewPlainStateReader(tx), state.NewPlainStateWriter(tx, tx, blockNr) + statedb := state.New(stateReader) //ibs for addr, a := range accounts { statedb.SetCode(addr, a.Code) statedb.SetNonce(addr, a.Nonce) @@ -299,7 +87,6 @@ func MakePreState(chainRules *params.Rules, tx kv.RwTx, accounts core.GenesisAll if len(a.Code) > 0 || len(a.Storage) > 0 { statedb.SetIncarnation(addr, state.FirstContractIncarnation) - var b [8]byte binary.BigEndian.PutUint64(b[:], state.FirstContractIncarnation) tx.Put(kv.IncarnationMap, addr[:], b[:]) @@ -312,12 +99,25 @@ func MakePreState(chainRules *params.Rules, tx kv.RwTx, accounts core.GenesisAll if err := statedb.CommitBlock(chainRules, state.NewPlainStateWriter(tx, tx, blockNr+1)); err != nil { panic(err) } - return statedb + return stateReader, stateWriter } -func rlpHash(x interface{}) (h common.Hash) { - hw := sha3.NewLegacyKeccak256() - rlp.Encode(hw, x) //nolint:errcheck - hw.Sum(h[:0]) - return h +// calcDifficulty is based on ethash.CalcDifficulty. This method is used in case +// the caller does not provide an explicit difficulty, but instead provides only +// parent timestamp + difficulty. +// Note: this method only works for ethash engine. +func calcDifficulty(config *params.ChainConfig, number, currentTime, parentTime uint64, + parentDifficulty *big.Int, parentUncleHash common.Hash) *big.Int { + uncleHash := parentUncleHash + if uncleHash == (common.Hash{}) { + uncleHash = types.EmptyUncleHash + } + parent := &types.Header{ + ParentHash: common.Hash{}, + UncleHash: uncleHash, + Difficulty: parentDifficulty, + Number: new(big.Int).SetUint64(number - 1), + Time: parentTime, + } + return ethash.CalcDifficulty(config, currentTime, parent.Time, parent.Difficulty, number-1, parent.UncleHash) } diff --git a/cmd/evm/internal/t8ntool/flags.go b/cmd/evm/internal/t8ntool/flags.go index 7a5da94d6f8..4a918b048fc 100644 --- a/cmd/evm/internal/t8ntool/flags.go +++ b/cmd/evm/internal/t8ntool/flags.go @@ -83,11 +83,6 @@ var ( Usage: "`stdin` or file name of where to find the transactions to apply.", Value: "txs.json", } - RewardFlag = cli.Int64Flag{ - Name: "state.reward", - Usage: "Mining reward. Set to -1 to disable", - Value: 0, - } ChainIDFlag = cli.Int64Flag{ Name: "state.chainid", Usage: "ChainID to use", @@ -103,7 +98,7 @@ var ( "\n\tSyntax (+ExtraEip)", strings.Join(tests.AvailableForks(), "\n\t "), strings.Join(vm.ActivateableEips(), ", ")), - Value: "Istanbul", + Value: "ArrowGlacier", } VerbosityFlag = cli.IntFlag{ Name: "verbosity", diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go index 88dfc4d3cb2..677948e5927 100644 --- a/cmd/evm/internal/t8ntool/gen_stenv.go +++ b/cmd/evm/internal/t8ntool/gen_stenv.go @@ -16,41 +16,50 @@ var _ = (*stEnvMarshaling)(nil) // MarshalJSON marshals as JSON. func (s stEnv) MarshalJSON() ([]byte, error) { type stEnv struct { - Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` - GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` - Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` - Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` - BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` - BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` - Random *common.Hash `json:"currentRandom,omitempty"` + Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` + Random *math.HexOrDecimal256 `json:"currentRandom"` + ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` + GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` + ParentUncleHash common.Hash `json:"parentUncleHash"` } var enc stEnv enc.Coinbase = common.UnprefixedAddress(s.Coinbase) enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty) + enc.Random = (*math.HexOrDecimal256)(s.Random) + enc.ParentDifficulty = (*math.HexOrDecimal256)(s.ParentDifficulty) enc.GasLimit = math.HexOrDecimal64(s.GasLimit) enc.Number = math.HexOrDecimal64(s.Number) enc.Timestamp = math.HexOrDecimal64(s.Timestamp) + enc.ParentTimestamp = math.HexOrDecimal64(s.ParentTimestamp) enc.BlockHashes = s.BlockHashes enc.Ommers = s.Ommers enc.BaseFee = (*math.HexOrDecimal256)(s.BaseFee) - enc.Random = s.Random + enc.ParentUncleHash = s.ParentUncleHash return json.Marshal(&enc) } // UnmarshalJSON unmarshals from JSON. func (s *stEnv) UnmarshalJSON(input []byte) error { type stEnv struct { - Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` - GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` - Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` - Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` - BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` - BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` - Random *common.Hash `json:"currentRandom,omitempty"` + Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` + Random *math.HexOrDecimal256 `json:"currentRandom"` + ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` + GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` + ParentUncleHash *common.Hash `json:"parentUncleHash"` } var dec stEnv if err := json.Unmarshal(input, &dec); err != nil { @@ -60,10 +69,15 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'currentCoinbase' for stEnv") } s.Coinbase = common.Address(*dec.Coinbase) - if dec.Difficulty == nil { - return errors.New("missing required field 'currentDifficulty' for stEnv") + if dec.Difficulty != nil { + s.Difficulty = (*big.Int)(dec.Difficulty) + } + if dec.Random != nil { + s.Random = (*big.Int)(dec.Random) + } + if dec.ParentDifficulty != nil { + s.ParentDifficulty = (*big.Int)(dec.ParentDifficulty) } - s.Difficulty = (*big.Int)(dec.Difficulty) if dec.GasLimit == nil { return errors.New("missing required field 'currentGasLimit' for stEnv") } @@ -76,6 +90,9 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'currentTimestamp' for stEnv") } s.Timestamp = uint64(*dec.Timestamp) + if dec.ParentTimestamp != nil { + s.ParentTimestamp = uint64(*dec.ParentTimestamp) + } if dec.BlockHashes != nil { s.BlockHashes = dec.BlockHashes } @@ -85,8 +102,8 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { if dec.BaseFee != nil { s.BaseFee = (*big.Int)(dec.BaseFee) } - if dec.Random != nil { - s.Random = dec.Random + if dec.ParentUncleHash != nil { + s.ParentUncleHash = *dec.ParentUncleHash } return nil } diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index a6547a7a3d6..230efb89be4 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -17,6 +17,7 @@ package t8ntool import ( + "context" "crypto/ecdsa" "encoding/json" "errors" @@ -27,9 +28,13 @@ import ( "path/filepath" "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/commands" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/common/math" + "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" @@ -38,6 +43,7 @@ import ( "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/tests" + "github.com/ledgerwatch/erigon/turbo/trie" "github.com/ledgerwatch/log/v3" "github.com/urfave/cli" @@ -67,10 +73,15 @@ func (n *NumberedError) Error() string { return fmt.Sprintf("ERROR(%d): %v", n.errorCode, n.err.Error()) } -func (n *NumberedError) Code() int { +func (n *NumberedError) ExitCode() int { return n.errorCode } +// compile-time conformance test +var ( + _ cli.ExitCoder = (*NumberedError)(nil) +) + type input struct { Alloc core.GenesisAlloc `json:"alloc,omitempty"` Env *stEnv `json:"env,omitempty"` @@ -79,16 +90,8 @@ type input struct { func Main(ctx *cli.Context) error { log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler)) - /* - // Configure the go-ethereum logger - glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) - glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name))) - log.Root().SetHandler(glogger) - */ - var ( err error - tracer vm.Tracer baseDir = "" ) var getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error) @@ -162,6 +165,7 @@ func Main(ctx *cli.Context) error { return NewError(ErrorJson, fmt.Errorf("failed unmarshaling alloc-file: %v", err)) } } + prestate.Pre = inputData.Alloc // Set the block environment @@ -181,8 +185,8 @@ func Main(ctx *cli.Context) error { prestate.Env = *inputData.Env vmConfig := vm.Config{ - Tracer: tracer, - Debug: (tracer != nil), + Tracer: nil, + Debug: ctx.Bool(TraceFlag.Name), } // Construct the chainconfig var chainConfig *params.ChainConfig @@ -216,25 +220,90 @@ func Main(ctx *cli.Context) error { return NewError(ErrorJson, fmt.Errorf("failed signing transactions: %v", err)) } + eip1559 := chainConfig.IsLondon(prestate.Env.Number) // Sanity check, to not `panic` in state_transition - if chainConfig.IsLondon(prestate.Env.Number) { + if eip1559 { if prestate.Env.BaseFee == nil { return NewError(ErrorVMConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section")) } } - // Run the test and aggregate the result - _, result, err1 := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer) - if err1 != nil { - return err1 + // Sanity check, to not `panic` in state_transition + if prestate.Env.Random != nil && !eip1559 { + return NewError(ErrorVMConfig, errors.New("can only apply RANDOM on top of London chainrules")) + } + if env := prestate.Env; env.Difficulty == nil { + // If difficulty was not provided by caller, we need to calculate it. + switch { + case env.ParentDifficulty == nil: + return NewError(ErrorVMConfig, errors.New("currentDifficulty was not provided, and cannot be calculated due to missing parentDifficulty")) + case env.Number == 0: + return NewError(ErrorVMConfig, errors.New("currentDifficulty needs to be provided for block number 0")) + case env.Timestamp <= env.ParentTimestamp: + return NewError(ErrorVMConfig, fmt.Errorf("currentDifficulty cannot be calculated -- currentTime (%d) needs to be after parent time (%d)", + env.Timestamp, env.ParentTimestamp)) + } + prestate.Env.Difficulty = calcDifficulty(chainConfig, env.Number, env.Timestamp, + env.ParentTimestamp, env.ParentDifficulty, env.ParentUncleHash) + } + + // manufacture block from above inputs + header := NewHeader(prestate.Env, chainConfig.IsLondon(prestate.Env.Number)) + + var ommerHeaders = make([]*types.Header, len(prestate.Env.Ommers)) + header.Number.Add(header.Number, big.NewInt(int64(len(prestate.Env.Ommers)))) + for i, ommer := range prestate.Env.Ommers { + var ommerN big.Int + ommerN.SetUint64(header.Number.Uint64() - ommer.Delta) + ommerHeaders[i] = &types.Header{Coinbase: ommer.Address, Number: &ommerN} + } + block := types.NewBlock(header, txs, ommerHeaders, nil) + + var hashError error + getHash := func(num uint64) common.Hash { + if prestate.Env.BlockHashes == nil { + hashError = fmt.Errorf("getHash(%d) invoked, no blockhashes provided", num) + return common.Hash{} + } + h, ok := prestate.Env.BlockHashes[math.HexOrDecimal64(num)] + if !ok { + hashError = fmt.Errorf("getHash(%d) invoked, blockhash for that block not provided", num) + } + return h + } + db := memdb.New() + + tx, err := db.BeginRw(context.Background()) + if err != nil { + return err + } + + reader, writer := MakePreState(chainConfig.Rules(0), tx, prestate.Pre) + engine := ethash.NewFaker() + + result, err := core.ExecuteBlockEphemerally(chainConfig, &vmConfig, getHash, engine, block, reader, writer, nil, nil, nil, true, getTracer) + + if hashError != nil { + return NewError(ErrorMissingBlockhash, fmt.Errorf("blockhash error: %v", err)) } + + if err != nil { + return fmt.Errorf("error on EBE: %w", err) + } + + // state root calculation + root, err := CalculateStateRoot(tx) + if err != nil { + return err + } + result.StateRoot = *root + + // Dump the execution result body, _ := rlp.EncodeToBytes(txs) - // Dump the excution result collector := make(Alloc) - // TODO: Where DumpToCollector is declared? - //state.DumpToCollector(collector, false, false, false, nil, -1) + dumper := state.NewDumper(tx, prestate.Env.Number) + dumper.DumpToCollector(collector, false, false, common.Address{}, 0) return dispatchOutput(ctx, baseDir, result, collector, body) - } // txWithKey is a helper-struct, to allow us to use the types.Transaction along with @@ -261,8 +330,7 @@ func (t *txWithKey) UnmarshalJSON(input []byte) error { return err } } - gasPrice, value := uint256.NewInt(0), uint256.NewInt(0) - var overflow bool + // Now, read the transaction itself var txJson commands.RPCTransaction @@ -270,22 +338,104 @@ func (t *txWithKey) UnmarshalJSON(input []byte) error { return err } + // assemble transaction + tx, err := getTransaction(txJson) + if err != nil { + return err + } + t.tx = tx + return nil +} + +func getTransaction(txJson commands.RPCTransaction) (types.Transaction, error) { + gasPrice, value := uint256.NewInt(0), uint256.NewInt(0) + var overflow bool + var chainId *uint256.Int + if txJson.Value != nil { value, overflow = uint256.FromBig((*big.Int)(txJson.Value)) if overflow { - return fmt.Errorf("value field caused an overflow (uint256)") + return nil, fmt.Errorf("value field caused an overflow (uint256)") } } if txJson.GasPrice != nil { gasPrice, overflow = uint256.FromBig((*big.Int)(txJson.GasPrice)) if overflow { - return fmt.Errorf("gasPrice field caused an overflow (uint256)") + return nil, fmt.Errorf("gasPrice field caused an overflow (uint256)") } } - // assemble transaction - t.tx = types.NewTransaction(uint64(txJson.Nonce), *txJson.To, value, uint64(txJson.Gas), gasPrice, txJson.Input) - return nil + + if txJson.ChainID != nil { + chainId, overflow = uint256.FromBig((*big.Int)(txJson.ChainID)) + if overflow { + return nil, fmt.Errorf("chainId field caused an overflow (uint256)") + } + } + + switch txJson.Type { + case types.LegacyTxType, types.AccessListTxType: + var toAddr common.Address = common.Address{} + if txJson.To != nil { + toAddr = *txJson.To + } + legacyTx := types.NewTransaction(uint64(txJson.Nonce), toAddr, value, uint64(txJson.Gas), gasPrice, txJson.Input) + legacyTx.V.SetFromBig(txJson.V.ToInt()) + legacyTx.S.SetFromBig(txJson.S.ToInt()) + legacyTx.R.SetFromBig(txJson.R.ToInt()) + + if txJson.Type == types.AccessListTxType { + accessListTx := types.AccessListTx{ + LegacyTx: *legacyTx, + ChainID: chainId, + AccessList: *txJson.Accesses, + } + + return &accessListTx, nil + } else { + return legacyTx, nil + } + + case types.DynamicFeeTxType: + var tip *uint256.Int + var feeCap *uint256.Int + if txJson.Tip != nil { + tip, overflow = uint256.FromBig((*big.Int)(txJson.Tip)) + if overflow { + return nil, fmt.Errorf("maxPriorityFeePerGas field caused an overflow (uint256)") + } + } + + if txJson.FeeCap != nil { + feeCap, overflow = uint256.FromBig((*big.Int)(txJson.FeeCap)) + if overflow { + return nil, fmt.Errorf("maxFeePerGas field caused an overflow (uint256)") + } + } + + dynamicFeeTx := types.DynamicFeeTransaction{ + CommonTx: types.CommonTx{ + ChainID: chainId, + Nonce: uint64(txJson.Nonce), + To: txJson.To, + Value: value, + Gas: uint64(txJson.Gas), + Data: txJson.Input, + }, + Tip: tip, + FeeCap: feeCap, + AccessList: *txJson.Accesses, + } + + dynamicFeeTx.V.SetFromBig(txJson.V.ToInt()) + dynamicFeeTx.S.SetFromBig(txJson.S.ToInt()) + dynamicFeeTx.R.SetFromBig(txJson.R.ToInt()) + + return &dynamicFeeTx, nil + + default: + return nil, nil + } } // signUnsignedTransactions converts the input txs to canonical transactions. @@ -358,7 +508,7 @@ func saveFile(baseDir, filename string, data interface{}) error { // dispatchOutput writes the output data to either stderr or stdout, or to the specified // files -func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, alloc Alloc, body hexutil.Bytes) error { +func dispatchOutput(ctx *cli.Context, baseDir string, result *core.EphemeralExecResult, alloc Alloc, body hexutil.Bytes) error { stdOutObject := make(map[string]interface{}) stdErrObject := make(map[string]interface{}) dispatch := func(baseDir, fName, name string, obj interface{}) error { @@ -401,3 +551,65 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a } return nil } + +func NewHeader(env stEnv, Eip1559 bool) *types.Header { + var header types.Header + header.UncleHash = env.ParentUncleHash + header.Coinbase = env.Coinbase + header.Difficulty = env.Difficulty + header.Number = big.NewInt(int64(env.Number)) + header.GasLimit = env.GasLimit + header.Time = env.Timestamp + header.BaseFee = env.BaseFee + header.Eip1559 = Eip1559 + + return &header +} + +func CalculateStateRoot(tx kv.RwTx) (*common.Hash, error) { + // Generate hashed state + c, err := tx.RwCursor(kv.PlainState) + if err != nil { + return nil, err + } + h := common.NewHasher() + defer common.ReturnHasherToPool(h) + for k, v, err := c.First(); k != nil; k, v, err = c.Next() { + if err != nil { + return nil, fmt.Errorf("interate over plain state: %w", err) + } + var newK []byte + if len(k) == common.AddressLength { + newK = make([]byte, common.HashLength) + } else { + newK = make([]byte, common.HashLength*2+common.IncarnationLength) + } + h.Sha.Reset() + //nolint:errcheck + h.Sha.Write(k[:common.AddressLength]) + //nolint:errcheck + h.Sha.Read(newK[:common.HashLength]) + if len(k) > common.AddressLength { + copy(newK[common.HashLength:], k[common.AddressLength:common.AddressLength+common.IncarnationLength]) + h.Sha.Reset() + //nolint:errcheck + h.Sha.Write(k[common.AddressLength+common.IncarnationLength:]) + //nolint:errcheck + h.Sha.Read(newK[common.HashLength+common.IncarnationLength:]) + if err = tx.Put(kv.HashedStorage, newK, common.CopyBytes(v)); err != nil { + return nil, fmt.Errorf("insert hashed key: %w", err) + } + } else { + if err = tx.Put(kv.HashedAccounts, newK, common.CopyBytes(v)); err != nil { + return nil, fmt.Errorf("insert hashed key: %w", err) + } + } + } + c.Close() + root, err := trie.CalcRoot("", tx) + if err != nil { + return nil, err + } + + return &root, nil +} diff --git a/cmd/evm/main.go b/cmd/evm/main.go index 451c0edbd8d..8d6bb1ef5f6 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -148,7 +148,6 @@ var stateTransitionCommand = cli.Command{ t8ntool.InputTxsFlag, t8ntool.ForknameFlag, t8ntool.ChainIDFlag, - t8ntool.RewardFlag, t8ntool.VerbosityFlag, }, } @@ -192,7 +191,7 @@ func main() { if err := app.Run(os.Args); err != nil { code := 1 if ec, ok := err.(*t8ntool.NumberedError); ok { - code = ec.Code() + code = ec.ExitCode() } fmt.Fprintln(os.Stderr, err) os.Exit(code) diff --git a/cmd/evm/t8n_test.go b/cmd/evm/t8n_test.go new file mode 100644 index 00000000000..a2868e080b8 --- /dev/null +++ b/cmd/evm/t8n_test.go @@ -0,0 +1,247 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/pkg/reexec" + "github.com/ledgerwatch/erigon/internal/cmdtest" +) + +func TestMain(m *testing.M) { + // Run the app if we've been exec'd as "ethkey-test" in runEthkey. + reexec.Register("evm-test", func() { + if err := app.Run(os.Args); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + os.Exit(0) + }) + // check if we have been reexec'd + if reexec.Init() { + return + } + os.Exit(m.Run()) +} + +type testT8n struct { + *cmdtest.TestCmd +} + +type t8nInput struct { + inAlloc string + inTxs string + inEnv string + stFork string +} + +func (args *t8nInput) get(base string) []string { + var out []string + if opt := args.inAlloc; opt != "" { + out = append(out, "--input.alloc") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.inTxs; opt != "" { + out = append(out, "--input.txs") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.inEnv; opt != "" { + out = append(out, "--input.env") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.stFork; opt != "" { + out = append(out, "--state.fork", opt) + } + return out +} + +type t8nOutput struct { + alloc bool + result bool + body bool +} + +func (args *t8nOutput) get() (out []string) { + if args.body { + out = append(out, "--output.body", "stdout") + } else { + out = append(out, "--output.body", "") // empty means ignore + } + if args.result { + out = append(out, "--output.result", "stdout") + } else { + out = append(out, "--output.result", "") + } + if args.alloc { + out = append(out, "--output.alloc", "stdout") + } else { + out = append(out, "--output.alloc", "") + } + return out +} + +func TestT8n(t *testing.T) { + tt := new(testT8n) + tt.TestCmd = cmdtest.NewTestCmd(t, tt) + for i, tc := range []struct { + base string + input t8nInput + output t8nOutput + expExitCode int + expOut string + }{ + { // Test exit (3) on bad config + base: "./testdata/1", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Frontier+1346", + }, + output: t8nOutput{alloc: true, result: true}, + expExitCode: 3, + }, + { + base: "./testdata/1", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Byzantium", + }, + output: t8nOutput{alloc: true, result: true}, + expOut: "exp.json", + }, + { // blockhash test + base: "./testdata/3", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Berlin", + }, + output: t8nOutput{alloc: true, result: true}, + expOut: "exp.json", + }, + { // missing blockhash test + base: "./testdata/4", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Berlin", + }, + expExitCode: 4, + }, + { // Uncle test + base: "./testdata/5", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Byzantium", + }, + output: t8nOutput{alloc: true, result: true}, + expOut: "exp.json", + }, + { // Dao-transition check + base: "./testdata/7", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "HomesteadToDaoAt5", + }, + expOut: "exp.json", + output: t8nOutput{alloc: true, result: true}, + }, + { // transactions with access list + base: "./testdata/8", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Berlin", + }, + expOut: "exp.json", + output: t8nOutput{alloc: true, result: true}, + }, + { // EIP-1559 + base: "./testdata/9", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "London", + }, + expOut: "exp.json", + output: t8nOutput{alloc: true, result: true}, + }, + { // EIP-1559 + base: "./testdata/10", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "London", + }, + expOut: "exp.json", + output: t8nOutput{alloc: true, result: true}, + }, + { // missing base fees + base: "./testdata/11", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "London", + }, + expExitCode: 3, + }, + { // EIP-1559 & gasCap + base: "./testdata/12", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "London", + }, + expOut: "exp.json", + output: t8nOutput{alloc: true, result: true}, + }, + { // Difficulty calculation on London + base: "./testdata/19", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "London", + }, + expOut: "exp_london.json", + output: t8nOutput{alloc: true, result: true}, + }, + { // Difficulty calculation on arrow glacier + base: "./testdata/19", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "ArrowGlacier", + }, + expOut: "exp_arrowglacier.json", + output: t8nOutput{alloc: true, result: true}, + }, + } { + + args := []string{"t8n"} + args = append(args, tc.output.get()...) + args = append(args, tc.input.get(tc.base)...) + var qArgs []string // quoted args for debugging purposes + for _, arg := range args { + if len(arg) == 0 { + qArgs = append(qArgs, `""`) + } else { + qArgs = append(qArgs, arg) + } + } + tt.Logf("args: %v\n", strings.Join(qArgs, " ")) + tt.Run("evm-test", args...) + // Compare the expected output, if provided + if tc.expOut != "" { + want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut)) + if err != nil { + t.Fatalf("test %d: could not read expected output: %v", i, err) + } + have := tt.Output() + ok, err := cmpJson(have, want) + switch { + case err != nil: + t.Fatalf("test %d, json parsing failed: %v", i, err) + case !ok: + t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want)) + } + } + tt.WaitExit() + if have, want := tt.ExitStatus(), tc.expExitCode; have != want { + t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want) + } + } +} + +// cmpJson compares the JSON in two byte slices. +func cmpJson(a, b []byte) (bool, error) { + var j, j2 interface{} + if err := json.Unmarshal(a, &j); err != nil { + return false, err + } + if err := json.Unmarshal(b, &j2); err != nil { + return false, err + } + + return reflect.DeepEqual(j2, j), nil +} diff --git a/cmd/evm/testdata/1/exp.json b/cmd/evm/testdata/1/exp.json new file mode 100644 index 00000000000..d8094e7aa67 --- /dev/null +++ b/cmd/evm/testdata/1/exp.json @@ -0,0 +1,45 @@ +{ + "alloc": { + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { + "balance": "0xfeed1a9d", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "nonce": "0xac" + }, + "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x29a2241af62ca410" + } + }, + "result": { + "stateRoot": "0xe72f10cef9b1d32a16e2f5a8d64b25dacde99efcdea460387db527486582c3f7", + "txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d", + "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x5208", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x5208", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1", + "transactionIndex": "0x0" + } + ], + "rejected": [ + { + "index": 1, + "error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" + } + ], + "currentDifficulty": "0x20000", + "gasUsed": "0x5208" + } +} diff --git a/cmd/evm/testdata/10/alloc.json b/cmd/evm/testdata/10/alloc.json new file mode 100644 index 00000000000..6e98e7513c4 --- /dev/null +++ b/cmd/evm/testdata/10/alloc.json @@ -0,0 +1,23 @@ +{ + "0x1111111111111111111111111111111111111111" : { + "balance" : "0x010000000000", + "code" : "0xfe", + "nonce" : "0x01", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x010000000000", + "code" : "0x", + "nonce" : "0x01", + "storage" : { + } + }, + "0xd02d72e067e77158444ef2020ff2d325f929b363" : { + "balance" : "0x01000000000000", + "code" : "0x", + "nonce" : "0x01", + "storage" : { + } + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/10/env.json b/cmd/evm/testdata/10/env.json new file mode 100644 index 00000000000..3a82d46a774 --- /dev/null +++ b/cmd/evm/testdata/10/env.json @@ -0,0 +1,12 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x01", + "currentTimestamp" : "0x079e", + "previousHash" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f", + "currentGasLimit" : "0x40000000", + "currentBaseFee" : "0x036b", + "blockHashes" : { + "0" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/10/exp.json b/cmd/evm/testdata/10/exp.json new file mode 100644 index 00000000000..5ab98860c77 --- /dev/null +++ b/cmd/evm/testdata/10/exp.json @@ -0,0 +1,79 @@ +{ + "alloc": { + "0x1111111111111111111111111111111111111111": { + "code": "0xfe", + "balance": "0x10000000000", + "nonce": "0x1" + }, + "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { + "balance": "0x1bc16d674ec80000" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x10000000000", + "nonce": "0x1" + }, + "0xd02d72e067e77158444ef2020ff2d325f929b363": { + "balance": "0xff5beffffc95", + "nonce": "0x4" + } + }, + "result": { + "stateRoot": "0x4b7b4d5dd6316b58407468a5d3cf0a18e42d3833911d3fccd80eb49273024ffa", + "txRoot": "0xda925f2306a52fa24c15d5cd212d736ee016415fd8dd0c45fd368de7917d64bb", + "receiptsRoot": "0x439a25f7fc424c10fb1f89800e4aa1df74156b137239d9ac3eaa7c911c353cd5", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x10000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x88980f6efcc5358d9c359663e7b9414722d430497637340ea056b076bc206701", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000001", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1", + "transactionIndex": "0x0" + }, + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x20000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0xd7bf3886f4e2aef74d525ae072c680f3846f550254401b67cbfda4a233757582", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000000", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1", + "transactionIndex": "0x1" + }, + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x30000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x50308296760f01f1eeec7500e9e73cad67469249b1f59e9a9f55e6625a4923db", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000000", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1", + "transactionIndex": "0x2" + } + ], + "rejected": [ + { + "index": 3, + "error": "gas limit reached" + } + ], + "currentDifficulty": "0x20000", + "gasUsed": "0x30000001" + } +} diff --git a/cmd/evm/testdata/10/readme.md b/cmd/evm/testdata/10/readme.md new file mode 100644 index 00000000000..c34be80bb71 --- /dev/null +++ b/cmd/evm/testdata/10/readme.md @@ -0,0 +1,79 @@ +## EIP-1559 testing + +This test contains testcases for EIP-1559, which were reported by Ori as misbehaving. + +``` +[user@work evm]$ dir=./testdata/10 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout 2>&1 +INFO [05-09|22:11:59.436] rejected tx index=3 hash=db07bf..ede1e8 from=0xd02d72E067e77158444ef2020Ff2d325f929B363 error="gas limit reached" +``` +Output: +```json +{ + "alloc": { + "0x1111111111111111111111111111111111111111": { + "code": "0xfe", + "balance": "0x10000000000", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x10000000000", + "nonce": "0x1" + }, + "0xd02d72e067e77158444ef2020ff2d325f929b363": { + "balance": "0xff5beffffc95", + "nonce": "0x4" + } + }, + "result": { + "stateRoot": "0xf91a7ec08e4bfea88719aab34deabb000c86902360532b52afa9599d41f2bb8b", + "txRoot": "0xda925f2306a52fa24c15d5cd212d736ee016415fd8dd0c45fd368de7917d64bb", + "receiptRoot": "0x439a25f7fc424c10fb1f89800e4aa1df74156b137239d9ac3eaa7c911c353cd5", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x10000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x88980f6efcc5358d9c359663e7b9414722d430497637340ea056b076bc206701", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000001", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + }, + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x20000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0xd7bf3886f4e2aef74d525ae072c680f3846f550254401b67cbfda4a233757582", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000000", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x1" + }, + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x30000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x50308296760f01f1eeec7500e9e73cad67469249b1f59e9a9f55e6625a4923db", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000000", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x2" + } + ], + "rejected": [ + 3 + ] + } +} +``` diff --git a/cmd/evm/testdata/10/txs.json b/cmd/evm/testdata/10/txs.json new file mode 100644 index 00000000000..f7c9baa26da --- /dev/null +++ b/cmd/evm/testdata/10/txs.json @@ -0,0 +1,70 @@ +[ + { + "input" : "0x", + "gas" : "0x10000001", + "nonce" : "0x1", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x7a45f00bcde9036b026cdf1628b023cd8a31a95c62b5e4dbbee2fa7debe668fb", + "s" : "0x3cc9d6f2cd00a045b0263f2d6dad7d60938d5d13d061af4969f95928aa934d4a", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + }, + { + "input" : "0x", + "gas" : "0x10000000", + "nonce" : "0x2", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x4c564b94b0281a8210eeec2dd1fe2e16ff1c1903a8c3a1078d735d7f8208b2af", + "s" : "0x56432b2593e6de95db1cb997b7385217aca03f1615327e231734446b39f266d", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + }, + { + "input" : "0x", + "gas" : "0x10000000", + "nonce" : "0x3", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x2ed2ef52f924f59d4a21e1f2a50d3b1109303ce5e32334a7ece9b46f4fbc2a57", + "s" : "0x2980257129cbd3da987226f323d50ba3975a834d165e0681f991b75615605c44", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + }, + { + "input" : "0x", + "gas" : "0x10000000", + "nonce" : "0x4", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x5df7d7f8f8e15b36fc9f189cacb625040fad10398d08fc90812595922a2c49b2", + "s" : "0x565fc1803f77a84d754ffe3c5363ab54a8d93a06ea1bb9d4c73c73a282b35917", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/11/alloc.json b/cmd/evm/testdata/11/alloc.json new file mode 100644 index 00000000000..86938230fa7 --- /dev/null +++ b/cmd/evm/testdata/11/alloc.json @@ -0,0 +1,25 @@ +{ + "0x0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x61ffff5060046000f3", + "nonce" : "0x01", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + "0x00" : "0x00" + } + }, + "0xb94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x00", + "code" : "0x6001600055", + "nonce" : "0x00", + "storage" : { + } + } +} + diff --git a/cmd/evm/testdata/11/env.json b/cmd/evm/testdata/11/env.json new file mode 100644 index 00000000000..37dedf09475 --- /dev/null +++ b/cmd/evm/testdata/11/env.json @@ -0,0 +1,12 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x01", + "currentTimestamp" : "0x03e8", + "previousHash" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2", + "currentGasLimit" : "0x0f4240", + "blockHashes" : { + "0" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2" + } +} + diff --git a/cmd/evm/testdata/11/readme.md b/cmd/evm/testdata/11/readme.md new file mode 100644 index 00000000000..d499f8e99fa --- /dev/null +++ b/cmd/evm/testdata/11/readme.md @@ -0,0 +1,13 @@ +## Test missing basefee + +In this test, the `currentBaseFee` is missing from the env portion. +On a live blockchain, the basefee is present in the header, and verified as part of header validation. + +In `evm t8n`, we don't have blocks, so it needs to be added in the `env`instead. + +When it's missing, an error is expected. + +``` +dir=./testdata/11 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout 2>&1>/dev/null +ERROR(3): EIP-1559 config but missing 'currentBaseFee' in env section +``` \ No newline at end of file diff --git a/cmd/evm/testdata/11/txs.json b/cmd/evm/testdata/11/txs.json new file mode 100644 index 00000000000..c54b0a1f5b4 --- /dev/null +++ b/cmd/evm/testdata/11/txs.json @@ -0,0 +1,14 @@ +[ + { + "input" : "0x38600060013960015160005560006000f3", + "gas" : "0x61a80", + "gasPrice" : "0x1", + "nonce" : "0x0", + "value" : "0x186a0", + "v" : "0x1c", + "r" : "0x2e1391fd903387f1cc2b51df083805fb4bbb0d4710a2cdf4a044d191ff7be63e", + "s" : "0x7f10a933c42ab74927db02b1db009e923d9d2ab24ac24d63c399f2fe5d9c9b22", + "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + } +] + diff --git a/cmd/evm/testdata/12/alloc.json b/cmd/evm/testdata/12/alloc.json new file mode 100644 index 00000000000..3ed96894fbc --- /dev/null +++ b/cmd/evm/testdata/12/alloc.json @@ -0,0 +1,11 @@ +{ + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "84000000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + "0x00" : "0x00" + } + } +} + diff --git a/cmd/evm/testdata/12/env.json b/cmd/evm/testdata/12/env.json new file mode 100644 index 00000000000..8ae5465369c --- /dev/null +++ b/cmd/evm/testdata/12/env.json @@ -0,0 +1,10 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x01", + "currentTimestamp" : "0x03e8", + "previousHash" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2", + "currentGasLimit" : "0x0f4240", + "currentBaseFee" : "0x20" +} + diff --git a/cmd/evm/testdata/12/exp.json b/cmd/evm/testdata/12/exp.json new file mode 100644 index 00000000000..9f88273f734 --- /dev/null +++ b/cmd/evm/testdata/12/exp.json @@ -0,0 +1,26 @@ +{ + "alloc": { + "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { + "balance": "0x1bc16d674ec80000" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x501bd00" + } + }, + "result": { + "stateRoot": "0x9fd6c7f520a9e9a160c19d65b929161415bc4e86ea75e7c9cac4fe8f776cf453", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "rejected": [ + { + "index": 0, + "error": "insufficient funds for gas * price + value: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B have 84000000 want 84000032" + } + ], + "currentDifficulty": "0x20000", + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/12/readme.md b/cmd/evm/testdata/12/readme.md new file mode 100644 index 00000000000..b0177ecc24b --- /dev/null +++ b/cmd/evm/testdata/12/readme.md @@ -0,0 +1,40 @@ +## Test 1559 balance + gasCap + +This test contains an EIP-1559 consensus issue which happened on Ropsten, where +`geth` did not properly account for the value transfer while doing the check on `max_fee_per_gas * gas_limit`. + +Before the issue was fixed, this invocation allowed the transaction to pass into a block: +``` +dir=./testdata/12 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout +``` + +With the fix applied, the result is: +``` +dir=./testdata/12 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout +INFO [07-21|19:03:50.276] rejected tx index=0 hash=ccc996..d83435 from=0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B error="insufficient funds for gas * price + value: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B have 84000000 want 84000032" +INFO [07-21|19:03:50.276] Trie dumping started root=e05f81..6597a5 +INFO [07-21|19:03:50.276] Trie dumping complete accounts=1 elapsed="39.549µs" +{ + "alloc": { + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x501bd00" + } + }, + "result": { + "stateRoot": "0xe05f81f8244a76503ceec6f88abfcd03047a612a1001217f37d30984536597a5", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "rejected": [ + { + "index": 0, + "error": "insufficient funds for gas * price + value: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B have 84000000 want 84000032" + } + ] + } +} +``` + +The transaction is rejected. \ No newline at end of file diff --git a/cmd/evm/testdata/12/txs.json b/cmd/evm/testdata/12/txs.json new file mode 100644 index 00000000000..cd683f271c7 --- /dev/null +++ b/cmd/evm/testdata/12/txs.json @@ -0,0 +1,20 @@ +[ + { + "input" : "0x", + "gas" : "0x5208", + "nonce" : "0x0", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x20", + "v" : "0x0", + "r" : "0x0", + "s" : "0x0", + "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x20", + "accessList" : [ + ] + } +] + diff --git a/cmd/evm/testdata/19/alloc.json b/cmd/evm/testdata/19/alloc.json new file mode 100644 index 00000000000..cef1a25ff01 --- /dev/null +++ b/cmd/evm/testdata/19/alloc.json @@ -0,0 +1,12 @@ +{ + "a94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "code": "0x", + "nonce": "0xac", + "storage": {} + }, + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{ + "balance": "0xfeedbead", + "nonce" : "0x00" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/19/env.json b/cmd/evm/testdata/19/env.json new file mode 100644 index 00000000000..0c64392aff5 --- /dev/null +++ b/cmd/evm/testdata/19/env.json @@ -0,0 +1,9 @@ +{ + "currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "currentGasLimit": "0x750a163df65e8a", + "currentBaseFee": "0x500", + "currentNumber": "13000000", + "currentTimestamp": "100015", + "parentTimestamp" : "99999", + "parentDifficulty" : "0x2000000000000" +} diff --git a/cmd/evm/testdata/19/exp_arrowglacier.json b/cmd/evm/testdata/19/exp_arrowglacier.json new file mode 100644 index 00000000000..266b955565b --- /dev/null +++ b/cmd/evm/testdata/19/exp_arrowglacier.json @@ -0,0 +1,24 @@ +{ + "alloc": { + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { + "balance": "0xfeedbead" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "nonce": "0xac" + }, + "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x1bc16d674ec80000" + } + }, + "result": { + "stateRoot": "0x374cbd5c614cb6ef173024d1c0d4e0313dafc2d7fc8f4399cf4bd1b60fc7c2ca", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "currentDifficulty": "0x2000000200000", + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/19/exp_london.json b/cmd/evm/testdata/19/exp_london.json new file mode 100644 index 00000000000..d594281e4be --- /dev/null +++ b/cmd/evm/testdata/19/exp_london.json @@ -0,0 +1,24 @@ +{ + "alloc": { + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { + "balance": "0xfeedbead" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "nonce": "0xac" + }, + "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x1bc16d674ec80000" + } + }, + "result": { + "stateRoot": "0x374cbd5c614cb6ef173024d1c0d4e0313dafc2d7fc8f4399cf4bd1b60fc7c2ca", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "currentDifficulty": "0x2000080000000", + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/19/readme.md b/cmd/evm/testdata/19/readme.md new file mode 100644 index 00000000000..5fae183f488 --- /dev/null +++ b/cmd/evm/testdata/19/readme.md @@ -0,0 +1,9 @@ +## Difficulty calculation + +This test shows how the `evm t8n` can be used to calculate the (ethash) difficulty, if none is provided by the caller, +this time on `ArrowGlacier` (Eip 4345). + +Calculating it (with an empty set of txs) using `ArrowGlacier` rules (and no provided unclehash for the parent block): +``` +[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.json --output.result=stdout --state.fork=ArrowGlacier +``` \ No newline at end of file diff --git a/cmd/evm/testdata/19/txs.json b/cmd/evm/testdata/19/txs.json new file mode 100644 index 00000000000..fe51488c706 --- /dev/null +++ b/cmd/evm/testdata/19/txs.json @@ -0,0 +1 @@ +[] diff --git a/cmd/evm/testdata/3/exp.json b/cmd/evm/testdata/3/exp.json new file mode 100644 index 00000000000..5b8b7c84ebc --- /dev/null +++ b/cmd/evm/testdata/3/exp.json @@ -0,0 +1,39 @@ +{ + "alloc": { + "0x095e7baea6a6c7c4c2dfeb977efac326af552d87": { + "code": "0x600140", + "balance": "0xde0b6b3a76586a0" + }, + "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { + "balance": "0x1bc16d674ec8521f" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xde0b6b3a7622741", + "nonce": "0x1" + } + }, + "result": { + "stateRoot": "0x5aeefb3e8fe1d722455ff4b4ee76793af2c654f7f5120b79a8427d696ed01558", + "txRoot": "0x75e61774a2ff58cbe32653420256c7f44bc715715a423b0b746d5c622979af6b", + "receiptsRoot": "0xd0d26df80374a327c025d405ebadc752b1bbd089d864801ae78ab704bcad8086", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x521f", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x521f", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x5", + "transactionIndex": "0x0" + } + ], + "currentDifficulty": "0x20000", + "gasUsed": "0x521f" + } +} diff --git a/cmd/evm/testdata/5/exp.json b/cmd/evm/testdata/5/exp.json new file mode 100644 index 00000000000..5feeff85c09 --- /dev/null +++ b/cmd/evm/testdata/5/exp.json @@ -0,0 +1,23 @@ +{ + "alloc": { + "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": { + "balance": "0x2c3c465ca58ec000" + }, + "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb": { + "balance": "0x246ddf9797668000" + }, + "0xcccccccccccccccccccccccccccccccccccccccc": { + "balance": "0x1f399b1438a10000" + } + }, + "result": { + "stateRoot": "0x5069e6c86aeba39397685cf7914a7505a78059be8c5f4d1348050ce78b348e99", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "currentDifficulty": "0x20000", + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/7/exp.json b/cmd/evm/testdata/7/exp.json new file mode 100644 index 00000000000..23ca9f5cf97 --- /dev/null +++ b/cmd/evm/testdata/7/exp.json @@ -0,0 +1,375 @@ +{ + "alloc": { + "0x005f5cee7a43331d5a3d3eec71305925a62f34b6": { + "balance": "0x0" + }, + "0x0101f3be8ebb4bbd39a2e3b9a3639d4259832fd9": { + "balance": "0x0" + }, + "0x057b56736d32b86616a10f619859c6cd6f59092a": { + "balance": "0x0" + }, + "0x06706dd3f2c9abf0a21ddcc6941d9b86f0596936": { + "balance": "0x0" + }, + "0x0737a6b837f97f46ebade41b9bc3e1c509c85c53": { + "balance": "0x0" + }, + "0x07f5c1e1bc2c93e0402f23341973a0e043f7bf8a": { + "balance": "0x0" + }, + "0x0e0da70933f4c7849fc0d203f5d1d43b9ae4532d": { + "balance": "0x0" + }, + "0x0ff30d6de14a8224aa97b78aea5388d1c51c1f00": { + "balance": "0x0" + }, + "0x12e626b0eebfe86a56d633b9864e389b45dcb260": { + "balance": "0x0" + }, + "0x1591fc0f688c81fbeb17f5426a162a7024d430c2": { + "balance": "0x0" + }, + "0x17802f43a0137c506ba92291391a8a8f207f487d": { + "balance": "0x0" + }, + "0x1975bd06d486162d5dc297798dfc41edd5d160a7": { + "balance": "0x0" + }, + "0x1ca6abd14d30affe533b24d7a21bff4c2d5e1f3b": { + "balance": "0x0" + }, + "0x1cba23d343a983e9b5cfd19496b9a9701ada385f": { + "balance": "0x0" + }, + "0x200450f06520bdd6c527622a273333384d870efb": { + "balance": "0x0" + }, + "0x21c7fdb9ed8d291d79ffd82eb2c4356ec0d81241": { + "balance": "0x0" + }, + "0x23b75c2f6791eef49c69684db4c6c1f93bf49a50": { + "balance": "0x0" + }, + "0x24c4d950dfd4dd1902bbed3508144a54542bba94": { + "balance": "0x0" + }, + "0x253488078a4edf4d6f42f113d1e62836a942cf1a": { + "balance": "0x0" + }, + "0x27b137a85656544b1ccb5a0f2e561a5703c6a68f": { + "balance": "0x0" + }, + "0x2a5ed960395e2a49b1c758cef4aa15213cfd874c": { + "balance": "0x0" + }, + "0x2b3455ec7fedf16e646268bf88846bd7a2319bb2": { + "balance": "0x0" + }, + "0x2c19c7f9ae8b751e37aeb2d93a699722395ae18f": { + "balance": "0x0" + }, + "0x304a554a310c7e546dfe434669c62820b7d83490": { + "balance": "0x0" + }, + "0x319f70bab6845585f412ec7724b744fec6095c85": { + "balance": "0x0" + }, + "0x35a051a0010aba705c9008d7a7eff6fb88f6ea7b": { + "balance": "0x0" + }, + "0x3ba4d81db016dc2890c81f3acec2454bff5aada5": { + "balance": "0x0" + }, + "0x3c02a7bc0391e86d91b7d144e61c2c01a25a79c5": { + "balance": "0x0" + }, + "0x40b803a9abce16f50f36a77ba41180eb90023925": { + "balance": "0x0" + }, + "0x440c59b325d2997a134c2c7c60a8c61611212bad": { + "balance": "0x0" + }, + "0x4486a3d68fac6967006d7a517b889fd3f98c102b": { + "balance": "0x0" + }, + "0x4613f3bca5c44ea06337a9e439fbc6d42e501d0a": { + "balance": "0x0" + }, + "0x47e7aa56d6bdf3f36be34619660de61275420af8": { + "balance": "0x0" + }, + "0x4863226780fe7c0356454236d3b1c8792785748d": { + "balance": "0x0" + }, + "0x492ea3bb0f3315521c31f273e565b868fc090f17": { + "balance": "0x0" + }, + "0x4cb31628079fb14e4bc3cd5e30c2f7489b00960c": { + "balance": "0x0" + }, + "0x4deb0033bb26bc534b197e61d19e0733e5679784": { + "balance": "0x0" + }, + "0x4fa802324e929786dbda3b8820dc7834e9134a2a": { + "balance": "0x0" + }, + "0x4fd6ace747f06ece9c49699c7cabc62d02211f75": { + "balance": "0x0" + }, + "0x51e0ddd9998364a2eb38588679f0d2c42653e4a6": { + "balance": "0x0" + }, + "0x52c5317c848ba20c7504cb2c8052abd1fde29d03": { + "balance": "0x0" + }, + "0x542a9515200d14b68e934e9830d91645a980dd7a": { + "balance": "0x0" + }, + "0x5524c55fb03cf21f549444ccbecb664d0acad706": { + "balance": "0x0" + }, + "0x579a80d909f346fbfb1189493f521d7f48d52238": { + "balance": "0x0" + }, + "0x58b95c9a9d5d26825e70a82b6adb139d3fd829eb": { + "balance": "0x0" + }, + "0x5c6e67ccd5849c0d29219c4f95f1a7a93b3f5dc5": { + "balance": "0x0" + }, + "0x5c8536898fbb74fc7445814902fd08422eac56d0": { + "balance": "0x0" + }, + "0x5d2b2e6fcbe3b11d26b525e085ff818dae332479": { + "balance": "0x0" + }, + "0x5dc28b15dffed94048d73806ce4b7a4612a1d48f": { + "balance": "0x0" + }, + "0x5f9f3392e9f62f63b8eac0beb55541fc8627f42c": { + "balance": "0x0" + }, + "0x6131c42fa982e56929107413a9d526fd99405560": { + "balance": "0x0" + }, + "0x6231b6d0d5e77fe001c2a460bd9584fee60d409b": { + "balance": "0x0" + }, + "0x627a0a960c079c21c34f7612d5d230e01b4ad4c7": { + "balance": "0x0" + }, + "0x63ed5a272de2f6d968408b4acb9024f4cc208ebf": { + "balance": "0x0" + }, + "0x6966ab0d485353095148a2155858910e0965b6f9": { + "balance": "0x0" + }, + "0x6b0c4d41ba9ab8d8cfb5d379c69a612f2ced8ecb": { + "balance": "0x0" + }, + "0x6d87578288b6cb5549d5076a207456a1f6a63dc0": { + "balance": "0x0" + }, + "0x6f6704e5a10332af6672e50b3d9754dc460dfa4d": { + "balance": "0x0" + }, + "0x7602b46df5390e432ef1c307d4f2c9ff6d65cc97": { + "balance": "0x0" + }, + "0x779543a0491a837ca36ce8c635d6154e3c4911a6": { + "balance": "0x0" + }, + "0x77ca7b50b6cd7e2f3fa008e24ab793fd56cb15f6": { + "balance": "0x0" + }, + "0x782495b7b3355efb2833d56ecb34dc22ad7dfcc4": { + "balance": "0x0" + }, + "0x807640a13483f8ac783c557fcdf27be11ea4ac7a": { + "balance": "0x0" + }, + "0x8163e7fb499e90f8544ea62bbf80d21cd26d9efd": { + "balance": "0x0" + }, + "0x84ef4b2357079cd7a7c69fd7a37cd0609a679106": { + "balance": "0x0" + }, + "0x86af3e9626fce1957c82e88cbf04ddf3a2ed7915": { + "balance": "0x0" + }, + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { + "balance": "0xfeedbead" + }, + "0x8d9edb3054ce5c5774a420ac37ebae0ac02343c6": { + "balance": "0x0" + }, + "0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79": { + "balance": "0x0" + }, + "0x97f43a37f595ab5dd318fb46e7a155eae057317a": { + "balance": "0x0" + }, + "0x9aa008f65de0b923a2a4f02012ad034a5e2e2192": { + "balance": "0x0" + }, + "0x9c15b54878ba618f494b38f0ae7443db6af648ba": { + "balance": "0x0" + }, + "0x9c50426be05db97f5d64fc54bf89eff947f0a321": { + "balance": "0x0" + }, + "0x9da397b9e80755301a3b32173283a91c0ef6c87e": { + "balance": "0x0" + }, + "0x9ea779f907f0b315b364b0cfc39a0fde5b02a416": { + "balance": "0x0" + }, + "0x9f27daea7aca0aa0446220b98d028715e3bc803d": { + "balance": "0x0" + }, + "0x9fcd2deaff372a39cc679d5c5e4de7bafb0b1339": { + "balance": "0x0" + }, + "0xa2f1ccba9395d7fcb155bba8bc92db9bafaeade7": { + "balance": "0x0" + }, + "0xa3acf3a1e16b1d7c315e23510fdd7847b48234f6": { + "balance": "0x0" + }, + "0xa5dc5acd6a7968a4554d89d65e59b7fd3bff0f90": { + "balance": "0x0" + }, + "0xa82f360a8d3455c5c41366975bde739c37bfeb8a": { + "balance": "0x0" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "nonce": "0xac" + }, + "0xac1ecab32727358dba8962a0f3b261731aad9723": { + "balance": "0x0" + }, + "0xaccc230e8a6e5be9160b8cdf2864dd2a001c28b6": { + "balance": "0x0" + }, + "0xacd87e28b0c9d1254e868b81cba4cc20d9a32225": { + "balance": "0x0" + }, + "0xadf80daec7ba8dcf15392f1ac611fff65d94f880": { + "balance": "0x0" + }, + "0xaeeb8ff27288bdabc0fa5ebb731b6f409507516c": { + "balance": "0x0" + }, + "0xb136707642a4ea12fb4bae820f03d2562ebff487": { + "balance": "0x0" + }, + "0xb2c6f0dfbb716ac562e2d85d6cb2f8d5ee87603e": { + "balance": "0x0" + }, + "0xb3fb0e5aba0e20e5c49d252dfd30e102b171a425": { + "balance": "0x0" + }, + "0xb52042c8ca3f8aa246fa79c3feaa3d959347c0ab": { + "balance": "0x0" + }, + "0xb9637156d330c0d605a791f1c31ba5890582fe1c": { + "balance": "0x0" + }, + "0xbb9bc244d798123fde783fcc1c72d3bb8c189413": { + "balance": "0x0" + }, + "0xbc07118b9ac290e4622f5e77a0853539789effbe": { + "balance": "0x0" + }, + "0xbcf899e6c7d9d5a215ab1e3444c86806fa854c76": { + "balance": "0x0" + }, + "0xbe8539bfe837b67d1282b2b1d61c3f723966f049": { + "balance": "0x0" + }, + "0xbf4ed7b27f1d666546e30d74d50d173d20bca754": { + "balance": "0x0" + }, + "0xc4bbd073882dd2add2424cf47d35213405b01324": { + "balance": "0x0" + }, + "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x4563918244f40000" + }, + "0xca544e5c4687d109611d0f8f928b53a25af72448": { + "balance": "0x0" + }, + "0xcbb9d3703e651b0d496cdefb8b92c25aeb2171f7": { + "balance": "0x0" + }, + "0xcc34673c6c40e791051898567a1222daf90be287": { + "balance": "0x0" + }, + "0xceaeb481747ca6c540a000c1f3641f8cef161fa7": { + "balance": "0x0" + }, + "0xd131637d5275fd1a68a3200f4ad25c71a2a9522e": { + "balance": "0x0" + }, + "0xd164b088bd9108b60d0ca3751da4bceb207b0782": { + "balance": "0x0" + }, + "0xd1ac8b1ef1b69ff51d1d401a476e7e612414f091": { + "balance": "0x0" + }, + "0xd343b217de44030afaa275f54d31a9317c7f441e": { + "balance": "0x0" + }, + "0xd4fe7bc31cedb7bfb8a345f31e668033056b2728": { + "balance": "0x0" + }, + "0xd9aef3a1e38a39c16b31d1ace71bca8ef58d315b": { + "balance": "0x0" + }, + "0xda2fef9e4a3230988ff17df2165440f37e8b1708": { + "balance": "0x0" + }, + "0xdbe9b615a3ae8709af8b93336ce9b477e4ac0940": { + "balance": "0x0" + }, + "0xe308bd1ac5fda103967359b2712dd89deffb7973": { + "balance": "0x0" + }, + "0xe4ae1efdfc53b73893af49113d8694a057b9c0d1": { + "balance": "0x0" + }, + "0xec8e57756626fdc07c63ad2eafbd28d08e7b0ca5": { + "balance": "0x0" + }, + "0xecd135fa4f61a655311e86238c92adcd779555d2": { + "balance": "0x0" + }, + "0xf0b1aa0eb660754448a7937c022e30aa692fe0c5": { + "balance": "0x0" + }, + "0xf1385fb24aad0cd7432824085e42aff90886fef5": { + "balance": "0x0" + }, + "0xf14c14075d6c4ed84b86798af0956deef67365b5": { + "balance": "0x0" + }, + "0xf4c64518ea10f995918a454158c6b61407ea345c": { + "balance": "0x0" + }, + "0xfe24cdd8648121a43a7c86d289be4dd2951ed49f": { + "balance": "0x0" + } + }, + "result": { + "stateRoot": "0xd320ae476350b8107b9b78d45d73f539cc363e7e588d8c794666515d852f6e81", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "currentDifficulty": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffff020000", + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/8/exp.json b/cmd/evm/testdata/8/exp.json new file mode 100644 index 00000000000..2d44c071be7 --- /dev/null +++ b/cmd/evm/testdata/8/exp.json @@ -0,0 +1,68 @@ +{ + "alloc": { + "0x000000000000000000000000000000000000aaaa": { + "code": "0x5854505854", + "balance": "0x7", + "nonce": "0x1" + }, + "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { + "balance": "0x1bc16d674ec94832" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xeb7ca", + "nonce": "0x3" + } + }, + "result": { + "stateRoot": "0xb78515d83d9ad63ae2740f09f21bb6b44e9041e18b606a3ed35dd6cfd338c0bb", + "txRoot": "0xe42c488908c04b9f7d4d39614ed4093a33ff16353299672e1770b786c28a5e6f", + "receiptsRoot": "0xb207f384195fb6fb7ee7105ba963cc19e1614ce0e75809999289c6c82e7a8d97", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "type": "0x1", + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x7aae", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x26c8c6e23fa3b246f44fba53e7b5fcb55f01f1e075f2de3db9b982afd4bd3901", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x7aae", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1000000", + "transactionIndex": "0x0" + }, + { + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0xdd24", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x26ea003b1188334eced68a720dbe89886cd6a477cccdf924cf1d392e2281c01b", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x6276", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1000000", + "transactionIndex": "0x1" + }, + { + "type": "0x1", + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x14832", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x6997569ed85f1d810bc61d969cbbae12f34ce88d314ff5ef2629bc741466fca6", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x6b0e", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1000000", + "transactionIndex": "0x2" + } + ], + "currentDifficulty": "0x20000", + "gasUsed": "0x14832" + } +} diff --git a/cmd/evm/testdata/9/alloc.json b/cmd/evm/testdata/9/alloc.json index 430e4242732..c14e38e8451 100644 --- a/cmd/evm/testdata/9/alloc.json +++ b/cmd/evm/testdata/9/alloc.json @@ -1,19 +1,11 @@ { - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x100000000000000000", - "nonce": "0x00" - }, - "0x00000000000000000000000000000000b0b0face": { - "code":"0x40600052", - "storage":{}, - "balance":"0x0", - "nonce": - "0x0" - }, - "0x000000000000000000000000000000ca1100f022": { - "code":"0x60806040527f248f18b25d9b5856c092f62a7d329b239f4a0a77e6ee6c58637f56745b9803f3446040518082815260200191505060405180910390a100fea265627a7a72315820eea50cf12e938601a56dcdef0ab1446f14ba25367299eb81834af54e1672f5d864736f6c63430005110032", - "storage":{}, - "balance":"0x0", - "nonce":"0x0" - } - } \ No newline at end of file + "0x000000000000000000000000000000000000aaaa": { + "balance": "0x03", + "code": "0x58585454", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x100000000000000", + "nonce": "0x00" + } +} diff --git a/cmd/evm/testdata/9/env.json b/cmd/evm/testdata/9/env.json index 479d8a3f47d..05f35191fd8 100644 --- a/cmd/evm/testdata/9/env.json +++ b/cmd/evm/testdata/9/env.json @@ -1,8 +1,9 @@ { - "currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", - "currentDifficulty": "0x20000", - "currentGasLimit": "0x1000000000", - "currentNumber": "0x1000000", - "currentTimestamp": "0x04", - "currentRandom": "0x1000000000000000000000000000000000000000000000000000000000000001" - } \ No newline at end of file + "currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty": "0x20000", + "currentGasTarget": "0x1000000000", + "currentGasLimit": "0x750a163df65e8a", + "currentBaseFee": "0x3B9ACA00", + "currentNumber": "0x1000000", + "currentTimestamp": "0x04" +} diff --git a/cmd/evm/testdata/9/exp.json b/cmd/evm/testdata/9/exp.json new file mode 100644 index 00000000000..53a1bfd4d91 --- /dev/null +++ b/cmd/evm/testdata/9/exp.json @@ -0,0 +1,54 @@ +{ + "alloc": { + "0x000000000000000000000000000000000000aaaa": { + "code": "0x58585454", + "balance": "0x3", + "nonce": "0x1" + }, + "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { + "balance": "0x1bc1c9185ca6f6e0" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xff745ee8832120", + "nonce": "0x2" + } + }, + "result": { + "stateRoot": "0x8e0c14cca1717d764e5cd25569bdf079758d704bb8ba56a3827997842f135ad8", + "txRoot": "0xbe6c599aefbec1cfe31dbdeca4b4dd0315bf5fca0f78e10c8f869c40a42feb0d", + "receiptsRoot": "0x5fdadbccc0b40ed39f6c7aacafb08a71c468f28793027552d9d99b1aeb19d406", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "type": "0x2", + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x6b70", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0xb4821e4a9122a6f9baecad99351bee6ec54fe8c3f6a737b2e6478f4963536819", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x6b70", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1000000", + "transactionIndex": "0x0" + }, + { + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0xcde4", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0xa9c6c6a848b9c9a0d8bbb4df5f30394983632817dbccc738e839c8e174fa4036", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x6274", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1000000", + "transactionIndex": "0x1" + } + ], + "currentDifficulty": "0x20000", + "gasUsed": "0xcde4" + } +} diff --git a/cmd/evm/testdata/9/readme.md b/cmd/evm/testdata/9/readme.md new file mode 100644 index 00000000000..88f0f12aaaa --- /dev/null +++ b/cmd/evm/testdata/9/readme.md @@ -0,0 +1,75 @@ +## EIP-1559 testing + +This test contains testcases for EIP-1559, which uses an new transaction type and has a new block parameter. + +### Prestate + +The alloc portion contains one contract (`0x000000000000000000000000000000000000aaaa`), containing the +following code: `0x58585454`: `PC; PC; SLOAD; SLOAD`. + +Essentialy, this contract does `SLOAD(0)` and `SLOAD(1)`. + +The alloc also contains some funds on `0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b`. + +## Transactions + +There are two transactions, each invokes the contract above. + +1. EIP-1559 ACL-transaction, which contains the `0x0` slot for `0xaaaa` +2. Legacy transaction + +## Execution + +Running it yields: +``` +$ dir=./testdata/9 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --trace && cat trace-* | grep SLOAD +{"pc":2,"op":84,"gas":"0x48c28","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x1"],"returnStack":null,"returnD +ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":3,"op":84,"gas":"0x483f4","gasCost":"0x64","memory":"0x","memSize":0,"stack":["0x0","0x0"],"returnStack":null,"returnDa +ta":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":2,"op":84,"gas":"0x49cf4","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x1"],"returnStack":null,"returnD +ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":3,"op":84,"gas":"0x494c0","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x0"],"returnStack":null,"returnD +ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +``` + +We can also get the post-alloc: +``` +$ dir=./testdata/9 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout +{ + "alloc": { + "0x000000000000000000000000000000000000aaaa": { + "code": "0x58585454", + "balance": "0x3", + "nonce": "0x1" + }, + "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { + "balance": "0xbfc02677a000" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xff104fcfea7800", + "nonce": "0x2" + } + } +} +``` + +If we try to execute it on older rules: +``` +dir=./testdata/9 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout +ERROR(10): Failed signing transactions: ERROR(10): Tx 0: failed to sign tx: transaction type not supported +``` + +It fails, due to the `evm t8n` cannot sign them in with the given signer. We can bypass that, however, +by feeding it presigned transactions, located in `txs_signed.json`. + +``` +dir=./testdata/9 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json --input.txs=$dir/txs_signed.json --input.env=$dir/env.json +INFO [05-07|12:28:42.072] rejected tx index=0 hash=b4821e..536819 error="transaction type not supported" +INFO [05-07|12:28:42.072] rejected tx index=1 hash=a9c6c6..fa4036 from=0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B error="nonce too high: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B, tx: 1 state: 0" +INFO [05-07|12:28:42.073] Wrote file file=alloc.json +INFO [05-07|12:28:42.073] Wrote file file=result.json +``` + +Number `0` is not applicable, and therefore number `1` has wrong nonce, and both are rejected. + diff --git a/cmd/evm/testdata/9/txs.json b/cmd/evm/testdata/9/txs.json index 7f15b0b2215..740abce079d 100644 --- a/cmd/evm/testdata/9/txs.json +++ b/cmd/evm/testdata/9/txs.json @@ -1,14 +1,37 @@ [ - { - "gasPrice":"0x80", - "nonce":"0x0", - "to":"0x000000000000000000000000000000ca1100f022", - "input": "", - "gas":"0x1312d00", - "value": "0x0", - "v": "0x0", - "r": "0x0", - "s": "0x0", - "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" - } -] \ No newline at end of file + { + "gas": "0x4ef00", + "maxPriorityFeePerGas": "0x2", + "maxFeePerGas": "0x12A05F200", + "chainId": "0x1", + "input": "0x", + "nonce": "0x0", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x0", + "type" : "0x2", + "accessList": [ + {"address": "0x000000000000000000000000000000000000aaaa", + "storageKeys": [ + "0x0000000000000000000000000000000000000000000000000000000000000000" + ] + } + ], + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + }, + { + "gas": "0x4ef00", + "gasPrice": "0x12A05F200", + "chainId": "0x1", + "input": "0x", + "nonce": "0x1", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x0", + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + } +] diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go index d620bb0ab86..4714e587d32 100644 --- a/cmd/integration/commands/state_stages.go +++ b/cmd/integration/commands/state_stages.go @@ -17,7 +17,6 @@ import ( "github.com/ledgerwatch/erigon/common/changeset" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/debugprint" - "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" @@ -219,7 +218,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. } encoder := json.NewEncoder(w) encoder.SetIndent(" ", " ") - for _, l := range core.FormatLogs(vmConfig.Tracer.(*vm.StructLogger).StructLogs()) { + for _, l := range vm.FormatLogs(vmConfig.Tracer.(*vm.StructLogger).StructLogs()) { if err2 := encoder.Encode(l); err2 != nil { panic(err2) } diff --git a/cmd/rpcdaemon/commands/eth_receipts.go b/cmd/rpcdaemon/commands/eth_receipts.go index bfd481ec08d..cc7ac0d99bd 100644 --- a/cmd/rpcdaemon/commands/eth_receipts.go +++ b/cmd/rpcdaemon/commands/eth_receipts.go @@ -59,7 +59,8 @@ func (api *BaseAPI) getReceipts(ctx context.Context, tx kv.Tx, chainConfig *para for i, txn := range block.Transactions() { ibs.Prepare(txn.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, ethashFaker, nil, gp, ibs, noopWriter, block.Header(), txn, usedGas, vm.Config{}, contractHasTEVM) + header := block.Header() + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), ethashFaker, nil, gp, ibs, noopWriter, header, txn, usedGas, vm.Config{}, contractHasTEVM) if err != nil { return nil, err } diff --git a/cmd/rpcdaemon22/commands/eth_receipts.go b/cmd/rpcdaemon22/commands/eth_receipts.go index 8813f52051d..3a49b77b49a 100644 --- a/cmd/rpcdaemon22/commands/eth_receipts.go +++ b/cmd/rpcdaemon22/commands/eth_receipts.go @@ -55,7 +55,8 @@ func (api *BaseAPI) getReceipts(ctx context.Context, tx kv.Tx, chainConfig *para for i, txn := range block.Transactions() { ibs.Prepare(txn.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, ethashFaker, nil, gp, ibs, noopWriter, block.Header(), txn, usedGas, vm.Config{}, contractHasTEVM) + header := block.Header() + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), ethashFaker, nil, gp, ibs, noopWriter, header, txn, usedGas, vm.Config{}, contractHasTEVM) if err != nil { return nil, err } diff --git a/cmd/state/commands/erigon2.go b/cmd/state/commands/erigon2.go index 934f79cac63..370433ecaa3 100644 --- a/cmd/state/commands/erigon2.go +++ b/cmd/state/commands/erigon2.go @@ -412,7 +412,7 @@ func processBlock(trace bool, txNumStart uint64, rw *ReaderWrapper, ww *WriterWr daoBlock = false } ibs.Prepare(tx.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) if err != nil { return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } diff --git a/cmd/state/commands/erigon22.go b/cmd/state/commands/erigon22.go index 9fd7f7bb7a1..bae4512818c 100644 --- a/cmd/state/commands/erigon22.go +++ b/cmd/state/commands/erigon22.go @@ -327,7 +327,7 @@ func processBlock22(trace bool, txNumStart uint64, rw *ReaderWrapper22, ww *Writ ibs.Prepare(tx.Hash(), block.Hash(), i) ct := NewCallTracer() vmConfig.Tracer = ct - receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) if err != nil { return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } diff --git a/cmd/state/commands/history2.go b/cmd/state/commands/history2.go index 63332bcb0b0..0aa8b07ffee 100644 --- a/cmd/state/commands/history2.go +++ b/cmd/state/commands/history2.go @@ -157,7 +157,7 @@ func runHistory2(trace bool, blockNum, txNumStart uint64, hw *HistoryWrapper, ww daoBlock = false } ibs.Prepare(tx.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) if err != nil { return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } diff --git a/cmd/state/commands/history22.go b/cmd/state/commands/history22.go index a3fb98bb066..f8b30069496 100644 --- a/cmd/state/commands/history22.go +++ b/cmd/state/commands/history22.go @@ -237,7 +237,7 @@ func runHistory22(trace bool, blockNum, txNumStart uint64, hw *state.HistoryRead daoBlock = false } ibs.Prepare(tx.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) if err != nil { return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } diff --git a/cmd/state/commands/opcode_tracer.go b/cmd/state/commands/opcode_tracer.go index fb6e9777180..ee6a5d86102 100644 --- a/cmd/state/commands/opcode_tracer.go +++ b/cmd/state/commands/opcode_tracer.go @@ -684,7 +684,7 @@ func runBlock(engine consensus.Engine, ibs *state.IntraBlockState, txnWriter sta rules := chainConfig.Rules(block.NumberU64()) for i, tx := range block.Transactions() { ibs.Prepare(tx.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, txnWriter, header, tx, usedGas, vmConfig, contractHasTEVM) + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, txnWriter, header, tx, usedGas, vmConfig, contractHasTEVM) if err != nil { return nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } diff --git a/consensus/parlia/parlia.go b/consensus/parlia/parlia.go index 30fd185239a..731fd618121 100644 --- a/consensus/parlia/parlia.go +++ b/consensus/parlia/parlia.go @@ -1218,7 +1218,7 @@ func (p *Parlia) systemCall(from, contract common.Address, data []byte, ibs *sta ) vmConfig := vm.Config{NoReceipts: true} // Create a new context to be used in the EVM environment - blockContext := core.NewEVMBlockContext(header, nil, p, &from, nil) + blockContext := core.NewEVMBlockContext(header, core.GetHashFn(header, nil), p, &from, nil) evm := vm.NewEVM(blockContext, core.NewEVMTxContext(msg), ibs, chainConfig, vmConfig) ret, leftOverGas, err := evm.Call( vm.AccountRef(msg.From()), diff --git a/core/blockchain.go b/core/blockchain.go index b102ae22c15..dc8af60a6c6 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -18,16 +18,17 @@ package core import ( - "encoding/json" "fmt" - "os" "time" "github.com/ledgerwatch/erigon/core/systemcontracts" + "github.com/ledgerwatch/erigon/rlp" + "golang.org/x/crypto/sha3" "golang.org/x/exp/slices" metrics2 "github.com/VictoriaMetrics/metrics" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/common/u256" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/misc" @@ -48,12 +49,30 @@ const ( TriesInMemory = 128 ) -// ExecuteBlockEphemerally runs a block from provided stateReader and -// writes the result to the provided stateWriter +type RejectedTx struct { + Index int `json:"index" gencodec:"required"` + Err string `json:"error" gencodec:"required"` +} + +type RejectedTxs []*RejectedTx + +type EphemeralExecResult struct { + StateRoot common.Hash `json:"stateRoot"` + TxRoot common.Hash `json:"txRoot"` + ReceiptRoot common.Hash `json:"receiptsRoot"` + LogsHash common.Hash `json:"logsHash"` + Bloom types.Bloom `json:"logsBloom" gencodec:"required"` + Receipts types.Receipts `json:"receipts"` + Rejected RejectedTxs `json:"rejected,omitempty"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` + GasUsed math.HexOrDecimal64 `json:"gasUsed"` + ReceiptForStorage *types.ReceiptForStorage `json:"-"` +} + func ExecuteBlockEphemerallyForBSC( chainConfig *params.ChainConfig, vmConfig *vm.Config, - getHeader func(hash common.Hash, number uint64) *types.Header, + blockHashFunc func(n uint64) common.Hash, engine consensus.Engine, block *types.Block, stateReader state.StateReader, @@ -61,7 +80,9 @@ func ExecuteBlockEphemerallyForBSC( epochReader consensus.EpochReader, chainReader consensus.ChainHeaderReader, contractHasTEVM func(codeHash common.Hash) (bool, error), -) (types.Receipts, error) { + statelessExec bool, // for usage of this API via cli tools wherein some of the validations need to be relaxed. + getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error), +) (*EphemeralExecResult, error) { defer blockExecutionTimer.UpdateDuration(time.Now()) block.Uncles() ibs := state.New(stateReader) @@ -71,6 +92,11 @@ func ExecuteBlockEphemerallyForBSC( gp := new(GasPool) gp.AddGas(block.GasLimit()) + var ( + rejectedTxs []*RejectedTx + includedTxs types.Transactions + ) + if !vmConfig.ReadOnly { if err := InitializeBlockExecution(engine, chainReader, epochReader, block.Header(), block.Transactions(), block.Uncles(), chainConfig, ibs); err != nil { return nil, err @@ -95,35 +121,36 @@ func ExecuteBlockEphemerallyForBSC( ibs.Prepare(tx.Hash(), block.Hash(), i) writeTrace := false if vmConfig.Debug && vmConfig.Tracer == nil { - vmConfig.Tracer = vm.NewStructLogger(&vm.LogConfig{}) + tracer, err := getTracer(i, tx.Hash()) + if err != nil { + panic(err) + } + vmConfig.Tracer = tracer writeTrace = true } - receipt, _, err := ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig, contractHasTEVM) + receipt, _, err := ApplyTransaction(chainConfig, blockHashFunc, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig, contractHasTEVM) if writeTrace { - w, err1 := os.Create(fmt.Sprintf("txtrace_%x.txt", tx.Hash())) - if err1 != nil { - panic(err1) - } - encoder := json.NewEncoder(w) - logs := FormatLogs(vmConfig.Tracer.(*vm.StructLogger).StructLogs()) - if err2 := encoder.Encode(logs); err2 != nil { - panic(err2) - } - if err2 := w.Close(); err2 != nil { - panic(err2) + if ftracer, ok := vmConfig.Tracer.(vm.FlushableTracer); ok { + ftracer.Flush(tx) } + vmConfig.Tracer = nil } - if err != nil { + if err != nil && statelessExec { + rejectedTxs = append(rejectedTxs, &RejectedTx{i, err.Error()}) + } else if err != nil && !statelessExec { return nil, fmt.Errorf("could not apply tx %d from block %d [%v]: %w", i, block.NumberU64(), tx.Hash().Hex(), err) - } - if !vmConfig.NoReceipts { - receipts = append(receipts, receipt) + } else { + includedTxs = append(includedTxs, tx) + if !vmConfig.NoReceipts { + receipts = append(receipts, receipt) + } } } var newBlock *types.Block + var receiptSha common.Hash if !vmConfig.ReadOnly { // We're doing this hack for BSC to avoid changing consensus interfaces a lot. BSC modifies txs and receipts by appending // system transactions, and they increase used gas and write cumulative gas to system receipts, that's why we need @@ -148,20 +175,25 @@ func ExecuteBlockEphemerallyForBSC( if !vmConfig.NoReceipts { receipts = outReceipts } + receiptSha = newBlock.ReceiptHash() } else { newBlock = block + receiptSha = types.DeriveSha(receipts) } + var bloom types.Bloom + if chainConfig.IsByzantium(header.Number.Uint64()) && !vmConfig.NoReceipts { - if newBlock.ReceiptHash() != block.ReceiptHash() { + if !statelessExec && newBlock.ReceiptHash() != block.ReceiptHash() { return nil, fmt.Errorf("mismatched receipt headers for block %d (%s != %s)", block.NumberU64(), newBlock.ReceiptHash().Hex(), block.Header().ReceiptHash.Hex()) } } - if newBlock.GasUsed() != header.GasUsed { + if !statelessExec && newBlock.GasUsed() != header.GasUsed { return nil, fmt.Errorf("gas used by execution: %d, in header: %d", *usedGas, header.GasUsed) } if !vmConfig.NoReceipts { - if newBlock.Bloom() != header.Bloom { + bloom = newBlock.Bloom() + if !statelessExec && newBlock.Bloom() != header.Bloom { return nil, fmt.Errorf("bloom computed by execution: %x, in header: %x", newBlock.Bloom(), header.Bloom) } } @@ -172,7 +204,17 @@ func ExecuteBlockEphemerallyForBSC( return nil, fmt.Errorf("writing changesets for block %d failed: %w", header.Number.Uint64(), err) } - return receipts, nil + execRs := &EphemeralExecResult{ + TxRoot: types.DeriveSha(includedTxs), + ReceiptRoot: receiptSha, + Bloom: bloom, + Receipts: receipts, + Difficulty: (*math.HexOrDecimal256)(block.Header().Difficulty), + GasUsed: math.HexOrDecimal64(*usedGas), + Rejected: rejectedTxs, + } + + return execRs, nil } // ExecuteBlockEphemerally runs a block from provided stateReader and @@ -180,7 +222,7 @@ func ExecuteBlockEphemerallyForBSC( func ExecuteBlockEphemerally( chainConfig *params.ChainConfig, vmConfig *vm.Config, - getHeader func(hash common.Hash, number uint64) *types.Header, + blockHashFunc func(n uint64) common.Hash, engine consensus.Engine, block *types.Block, stateReader state.StateReader, @@ -188,19 +230,27 @@ func ExecuteBlockEphemerally( epochReader consensus.EpochReader, chainReader consensus.ChainHeaderReader, contractHasTEVM func(codeHash common.Hash) (bool, error), -) (types.Receipts, *types.ReceiptForStorage, error) { + statelessExec bool, // for usage of this API via cli tools wherein some of the validations need to be relaxed. + getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error), +) (*EphemeralExecResult, error) { + defer blockExecutionTimer.UpdateDuration(time.Now()) block.Uncles() ibs := state.New(stateReader) header := block.Header() - var receipts types.Receipts + var receipts = make(types.Receipts, 0) usedGas := new(uint64) gp := new(GasPool) gp.AddGas(block.GasLimit()) + var ( + rejectedTxs []*RejectedTx + includedTxs types.Transactions + ) + if !vmConfig.ReadOnly { if err := InitializeBlockExecution(engine, chainReader, epochReader, block.Header(), block.Transactions(), block.Uncles(), chainConfig, ibs); err != nil { - return nil, nil, err + return nil, err } } @@ -213,54 +263,53 @@ func ExecuteBlockEphemerally( ibs.Prepare(tx.Hash(), block.Hash(), i) writeTrace := false if vmConfig.Debug && vmConfig.Tracer == nil { - vmConfig.Tracer = vm.NewStructLogger(&vm.LogConfig{}) + tracer, err := getTracer(i, tx.Hash()) + if err != nil { + panic(err) + } + vmConfig.Tracer = tracer writeTrace = true } - receipt, _, err := ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig, contractHasTEVM) + receipt, _, err := ApplyTransaction(chainConfig, blockHashFunc, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig, contractHasTEVM) if writeTrace { - w, err1 := os.Create(fmt.Sprintf("txtrace_%x.txt", tx.Hash())) - if err1 != nil { - panic(err1) - } - encoder := json.NewEncoder(w) - logs := FormatLogs(vmConfig.Tracer.(*vm.StructLogger).StructLogs()) - if err2 := encoder.Encode(logs); err2 != nil { - panic(err2) - } - if err2 := w.Close(); err2 != nil { - panic(err2) + if ftracer, ok := vmConfig.Tracer.(vm.FlushableTracer); ok { + ftracer.Flush(tx) } + vmConfig.Tracer = nil } - if err != nil { - return nil, nil, fmt.Errorf("could not apply tx %d from block %d [%v]: %w", i, block.NumberU64(), tx.Hash().Hex(), err) - } - if !vmConfig.NoReceipts { - receipts = append(receipts, receipt) + if err != nil && statelessExec { + rejectedTxs = append(rejectedTxs, &RejectedTx{i, err.Error()}) + } else if err != nil && !statelessExec { + return nil, fmt.Errorf("could not apply tx %d from block %d [%v]: %w", i, block.NumberU64(), tx.Hash().Hex(), err) + } else { + includedTxs = append(includedTxs, tx) + if !vmConfig.NoReceipts { + receipts = append(receipts, receipt) + } } } - if chainConfig.IsByzantium(header.Number.Uint64()) && !vmConfig.NoReceipts { - receiptSha := types.DeriveSha(receipts) - if receiptSha != block.ReceiptHash() { - return nil, nil, fmt.Errorf("mismatched receipt headers for block %d", block.NumberU64()) - } + var bloom types.Bloom + receiptSha := types.DeriveSha(receipts) + if !statelessExec && chainConfig.IsByzantium(header.Number.Uint64()) && !vmConfig.NoReceipts && receiptSha != block.ReceiptHash() { + return nil, fmt.Errorf("mismatched receipt headers for block %d", block.NumberU64()) } - if *usedGas != header.GasUsed { - return nil, nil, fmt.Errorf("gas used by execution: %d, in header: %d", *usedGas, header.GasUsed) + if !statelessExec && *usedGas != header.GasUsed { + return nil, fmt.Errorf("gas used by execution: %d, in header: %d", *usedGas, header.GasUsed) } if !vmConfig.NoReceipts { - bloom := types.CreateBloom(receipts) - if bloom != header.Bloom { - return nil, nil, fmt.Errorf("bloom computed by execution: %x, in header: %x", bloom, header.Bloom) + bloom = types.CreateBloom(receipts) + if !statelessExec && bloom != header.Bloom { + return nil, fmt.Errorf("bloom computed by execution: %x, in header: %x", bloom, header.Bloom) } } if !vmConfig.ReadOnly { txs := block.Transactions() if _, err := FinalizeBlockExecution(engine, stateReader, block.Header(), txs, block.Uncles(), stateWriter, chainConfig, ibs, receipts, epochReader, chainReader, false); err != nil { - return nil, nil, err + return nil, err } } @@ -287,7 +336,26 @@ func ExecuteBlockEphemerally( } } - return receipts, stateSyncReceipt, nil + execRs := &EphemeralExecResult{ + TxRoot: types.DeriveSha(includedTxs), + ReceiptRoot: receiptSha, + Bloom: bloom, + LogsHash: rlpHash(blockLogs), + Receipts: receipts, + Difficulty: (*math.HexOrDecimal256)(header.Difficulty), + GasUsed: math.HexOrDecimal64(*usedGas), + Rejected: rejectedTxs, + ReceiptForStorage: stateSyncReceipt, + } + + return execRs, nil +} + +func rlpHash(x interface{}) (h common.Hash) { + hw := sha3.NewLegacyKeccak256() + rlp.Encode(hw, x) //nolint:errcheck + hw.Sum(h[:0]) + return h } func SysCallContract(contract common.Address, data []byte, chainConfig params.ChainConfig, ibs *state.IntraBlockState, header *types.Header, engine consensus.Engine) (result []byte, err error) { @@ -308,19 +376,16 @@ func SysCallContract(contract common.Address, data []byte, chainConfig params.Ch vmConfig := vm.Config{NoReceipts: true} // Create a new context to be used in the EVM environment isBor := chainConfig.Bor != nil + var txContext vm.TxContext var author *common.Address if isBor { author = &header.Coinbase - } else { - author = &state.SystemAddress - } - blockContext := NewEVMBlockContext(header, nil, engine, author, nil) - var txContext vm.TxContext - if isBor { txContext = vm.TxContext{} } else { + author = &state.SystemAddress txContext = NewEVMTxContext(msg) } + blockContext := NewEVMBlockContext(header, GetHashFn(header, nil), engine, author, nil) evm := vm.NewEVM(blockContext, txContext, ibs, &chainConfig, vmConfig) if isBor { ret, _, err := evm.Call( @@ -364,7 +429,7 @@ func CallContract(contract common.Address, data []byte, chainConfig params.Chain return nil, fmt.Errorf("SysCallContract: %w ", err) } vmConfig := vm.Config{NoReceipts: true} - _, result, err = ApplyTransaction(&chainConfig, nil, engine, &state.SystemAddress, gp, ibs, noop, header, tx, &gasUsed, vmConfig, nil) + _, result, err = ApplyTransaction(&chainConfig, GetHashFn(header, nil), engine, &state.SystemAddress, gp, ibs, noop, header, tx, &gasUsed, vmConfig, nil) if err != nil { return result, fmt.Errorf("SysCallContract: %w ", err) } @@ -392,7 +457,7 @@ func FinalizeBlockExecution(engine consensus.Engine, stateReader state.StateRead _, _, err = engine.Finalize(cc, header, ibs, txs, uncles, receipts, e, headerReader, syscall) } if err != nil { - return + return nil, err } var originalSystemAcc *accounts.Account diff --git a/core/chain_makers.go b/core/chain_makers.go index 24a8e4266df..17248745e79 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -112,7 +112,7 @@ func (b *BlockGen) AddTxWithChain(getHeader func(hash common.Hash, number uint64 } b.ibs.Prepare(tx.Hash(), common.Hash{}, len(b.txs)) contractHasTEVM := func(_ common.Hash) (bool, error) { return false, nil } - receipt, _, err := ApplyTransaction(b.config, getHeader, engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, vm.Config{}, contractHasTEVM) + receipt, _, err := ApplyTransaction(b.config, GetHashFn(b.header, getHeader), engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, vm.Config{}, contractHasTEVM) if err != nil { panic(err) } @@ -126,7 +126,7 @@ func (b *BlockGen) AddFailedTxWithChain(getHeader func(hash common.Hash, number } b.ibs.Prepare(tx.Hash(), common.Hash{}, len(b.txs)) contractHasTEVM := func(common.Hash) (bool, error) { return false, nil } - receipt, _, err := ApplyTransaction(b.config, getHeader, engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, vm.Config{}, contractHasTEVM) + receipt, _, err := ApplyTransaction(b.config, GetHashFn(b.header, getHeader), engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, vm.Config{}, contractHasTEVM) _ = err // accept failed transactions b.txs = append(b.txs, tx) b.receipts = append(b.receipts, receipt) diff --git a/core/evm.go b/core/evm.go index c2fa1471cd5..b89f8e5acb4 100644 --- a/core/evm.go +++ b/core/evm.go @@ -30,7 +30,7 @@ import ( ) // NewEVMBlockContext creates a new context for use in the EVM. -func NewEVMBlockContext(header *types.Header, getHeader func(hash common.Hash, number uint64) *types.Header, engine consensus.Engine, author *common.Address, contractHasTEVM func(contractHash common.Hash) (bool, error)) vm.BlockContext { +func NewEVMBlockContext(header *types.Header, blockHashFunc func(n uint64) common.Hash, engine consensus.Engine, author *common.Address, contractHasTEVM func(contractHash common.Hash) (bool, error)) vm.BlockContext { // If we don't have an explicit author (i.e. not mining), extract from the header var beneficiary common.Address if author == nil { @@ -71,7 +71,7 @@ func NewEVMBlockContext(header *types.Header, getHeader func(hash common.Hash, n return vm.BlockContext{ CanTransfer: CanTransfer, Transfer: transferFunc, - GetHash: GetHashFn(header, getHeader), + GetHash: blockHashFunc, Coinbase: beneficiary, BlockNumber: header.Number.Uint64(), Time: header.Time, diff --git a/core/state_processor.go b/core/state_processor.go index 066378ab5d9..c203685bb9e 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -17,10 +17,7 @@ package core import ( - "fmt" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" @@ -29,57 +26,6 @@ import ( "github.com/ledgerwatch/erigon/params" ) -// StructLogRes stores a structured log emitted by the EVM while replaying a -// transaction in debug mode -type StructLogRes struct { - Pc uint64 `json:"pc"` - Op string `json:"op"` - Gas uint64 `json:"gas"` - GasCost uint64 `json:"gasCost"` - Depth int `json:"depth"` - Error error `json:"error,omitempty"` - Stack *[]string `json:"stack,omitempty"` - Memory *[]string `json:"memory,omitempty"` - Storage *map[string]string `json:"storage,omitempty"` -} - -// FormatLogs formats EVM returned structured logs for json output -func FormatLogs(logs []vm.StructLog) []StructLogRes { - formatted := make([]StructLogRes, len(logs)) - for index, trace := range logs { - formatted[index] = StructLogRes{ - Pc: trace.Pc, - Op: trace.Op.String(), - Gas: trace.Gas, - GasCost: trace.GasCost, - Depth: trace.Depth, - Error: trace.Err, - } - if trace.Stack != nil { - stack := make([]string, len(trace.Stack)) - for i, stackValue := range trace.Stack { - stack[i] = fmt.Sprintf("%x", math.PaddedBigBytes(stackValue, 32)) - } - formatted[index].Stack = &stack - } - if trace.Memory != nil { - memory := make([]string, 0, (len(trace.Memory)+31)/32) - for i := 0; i+32 <= len(trace.Memory); i += 32 { - memory = append(memory, fmt.Sprintf("%x", trace.Memory[i:i+32])) - } - formatted[index].Memory = &memory - } - if trace.Storage != nil { - storage := make(map[string]string) - for i, storageValue := range trace.Storage { - storage[fmt.Sprintf("%x", i)] = fmt.Sprintf("%x", storageValue) - } - formatted[index].Storage = &storage - } - } - return formatted -} - // applyTransaction attempts to apply a transaction to the given state database // and uses the input parameters for its environment. It returns the receipt // for the transaction, gas used and an error if the transaction failed, @@ -140,7 +86,7 @@ func applyTransaction(config *params.ChainConfig, gp *GasPool, statedb *state.In // and uses the input parameters for its environment. It returns the receipt // for the transaction, gas used and an error if the transaction failed, // indicating the block was invalid. -func ApplyTransaction(config *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, engine consensus.Engine, author *common.Address, gp *GasPool, ibs *state.IntraBlockState, stateWriter state.StateWriter, header *types.Header, tx types.Transaction, usedGas *uint64, cfg vm.Config, contractHasTEVM func(contractHash common.Hash) (bool, error)) (*types.Receipt, []byte, error) { +func ApplyTransaction(config *params.ChainConfig, blockHashFunc func(n uint64) common.Hash, engine consensus.Engine, author *common.Address, gp *GasPool, ibs *state.IntraBlockState, stateWriter state.StateWriter, header *types.Header, tx types.Transaction, usedGas *uint64, cfg vm.Config, contractHasTEVM func(contractHash common.Hash) (bool, error)) (*types.Receipt, []byte, error) { // Create a new context to be used in the EVM environment // Add addresses to access list if applicable @@ -152,7 +98,7 @@ func ApplyTransaction(config *params.ChainConfig, getHeader func(hash common.Has if tx.IsStarkNet() { vmenv = &vm.CVMAdapter{Cvm: vm.NewCVM(ibs)} } else { - blockContext := NewEVMBlockContext(header, getHeader, engine, author, contractHasTEVM) + blockContext := NewEVMBlockContext(header, blockHashFunc, engine, author, contractHasTEVM) vmenv = vm.NewEVM(blockContext, vm.TxContext{}, ibs, config, cfg) } diff --git a/core/vm/logger.go b/core/vm/logger.go index 1d50dca6c1b..b9d511b8858 100644 --- a/core/vm/logger.go +++ b/core/vm/logger.go @@ -18,10 +18,12 @@ package vm import ( "encoding/hex" + "encoding/json" "errors" "fmt" "io" "math/big" + "os" "strings" "time" @@ -128,6 +130,27 @@ type Tracer interface { CaptureAccountWrite(account common.Address) error } +// FlushableTracer is a Tracer extension whose accumulated traces has to be +// flushed once the tracing is completed. +type FlushableTracer interface { + Tracer + Flush(tx types.Transaction) +} + +// StructLogRes stores a structured log emitted by the EVM while replaying a +// transaction in debug mode +type StructLogRes struct { + Pc uint64 `json:"pc"` + Op string `json:"op"` + Gas uint64 `json:"gas"` + GasCost uint64 `json:"gasCost"` + Depth int `json:"depth"` + Error error `json:"error,omitempty"` + Stack *[]string `json:"stack,omitempty"` + Memory *[]string `json:"memory,omitempty"` + Storage *map[string]string `json:"storage,omitempty"` +} + // StructLogger is an EVM state logger and implements Tracer. // // StructLogger can capture state based on the given Log configuration and also keeps @@ -261,6 +284,58 @@ func (l *StructLogger) Error() error { return l.err } // Output returns the VM return value captured by the trace. func (l *StructLogger) Output() []byte { return l.output } +func (l *StructLogger) Flush(tx types.Transaction) { + w, err1 := os.Create(fmt.Sprintf("txtrace_%x.txt", tx.Hash())) + if err1 != nil { + panic(err1) + } + encoder := json.NewEncoder(w) + logs := FormatLogs(l.StructLogs()) + if err2 := encoder.Encode(logs); err2 != nil { + panic(err2) + } + if err2 := w.Close(); err2 != nil { + panic(err2) + } +} + +// FormatLogs formats EVM returned structured logs for json output +func FormatLogs(logs []StructLog) []StructLogRes { + formatted := make([]StructLogRes, len(logs)) + for index, trace := range logs { + formatted[index] = StructLogRes{ + Pc: trace.Pc, + Op: trace.Op.String(), + Gas: trace.Gas, + GasCost: trace.GasCost, + Depth: trace.Depth, + Error: trace.Err, + } + if trace.Stack != nil { + stack := make([]string, len(trace.Stack)) + for i, stackValue := range trace.Stack { + stack[i] = fmt.Sprintf("%x", math.PaddedBigBytes(stackValue, 32)) + } + formatted[index].Stack = &stack + } + if trace.Memory != nil { + memory := make([]string, 0, (len(trace.Memory)+31)/32) + for i := 0; i+32 <= len(trace.Memory); i += 32 { + memory = append(memory, fmt.Sprintf("%x", trace.Memory[i:i+32])) + } + formatted[index].Memory = &memory + } + if trace.Storage != nil { + storage := make(map[string]string) + for i, storageValue := range trace.Storage { + storage[fmt.Sprintf("%x", i)] = fmt.Sprintf("%x", storageValue) + } + formatted[index].Storage = &storage + } + } + return formatted +} + // WriteTrace writes a formatted trace to the given writer func WriteTrace(writer io.Writer, logs []StructLog) { for _, log := range logs { diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index 200f95d6f0f..1d6c76bdb83 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -16,6 +16,7 @@ import ( "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" commonold "github.com/ledgerwatch/erigon/common" + ecom "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/changeset" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/math" @@ -123,11 +124,23 @@ func executeBlock( var receipts types.Receipts var stateSyncReceipt *types.ReceiptForStorage - _, isPoSa := effectiveEngine.(consensus.PoSA) + var execRs *core.EphemeralExecResult + _, isPoSa := cfg.engine.(consensus.PoSA) + getHashFn := core.GetHashFn(block.Header(), getHeader) if isPoSa { - receipts, err = core.ExecuteBlockEphemerallyForBSC(cfg.chainConfig, &vmConfig, getHeader, effectiveEngine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, contractHasTEVM) + getTracer := func(txIndex int, txHash ecom.Hash) (vm.Tracer, error) { + return vm.NewStructLogger(&vm.LogConfig{}), nil + } + execRs, err = core.ExecuteBlockEphemerallyForBSC(cfg.chainConfig, &vmConfig, getHashFn, cfg.engine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, contractHasTEVM, false, getTracer) + receipts = execRs.Receipts + stateSyncReceipt = execRs.ReceiptForStorage } else { - receipts, stateSyncReceipt, err = core.ExecuteBlockEphemerally(cfg.chainConfig, &vmConfig, getHeader, effectiveEngine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, contractHasTEVM) + getTracer := func(txIndex int, txHash ecom.Hash) (vm.Tracer, error) { + return vm.NewStructLogger(&vm.LogConfig{}), nil + } + execRs, err = core.ExecuteBlockEphemerally(cfg.chainConfig, &vmConfig, getHashFn, cfg.engine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, contractHasTEVM, false, getTracer) + receipts = execRs.Receipts + stateSyncReceipt = execRs.ReceiptForStorage } if err != nil { return err diff --git a/eth/stagedsync/stage_mining_exec.go b/eth/stagedsync/stage_mining_exec.go index 3afa9922767..a1742548088 100644 --- a/eth/stagedsync/stage_mining_exec.go +++ b/eth/stagedsync/stage_mining_exec.go @@ -180,7 +180,7 @@ func addTransactionsToMiningBlock(logPrefix string, current *MiningBlock, chainC var miningCommitTx = func(txn types.Transaction, coinbase common.Address, vmConfig *vm.Config, chainConfig params.ChainConfig, ibs *state.IntraBlockState, current *MiningBlock) ([]*types.Log, error) { snap := ibs.Snapshot() - receipt, _, err := core.ApplyTransaction(&chainConfig, getHeader, engine, &coinbase, gasPool, ibs, noop, header, txn, &header.GasUsed, *vmConfig, contractHasTEVM) + receipt, _, err := core.ApplyTransaction(&chainConfig, core.GetHashFn(header, getHeader), engine, &coinbase, gasPool, ibs, noop, header, txn, &header.GasUsed, *vmConfig, contractHasTEVM) if err != nil { ibs.RevertToSnapshot(snap) return nil, err diff --git a/go.mod b/go.mod index bebbc1d6159..415f1852b12 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,8 @@ require ( github.com/edsrzf/mmap-go v1.1.0 github.com/emicklei/dot v0.16.0 github.com/emirpasic/gods v1.18.1 - github.com/fjl/gencodec v0.0.0-20191126094850-e283372f291f + github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c + github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/goccy/go-json v0.9.7 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.4.1 @@ -92,13 +93,13 @@ require ( github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect + github.com/docker/docker v20.10.17+incompatible github.com/dustin/go-humanize v1.0.0 // indirect github.com/flanglet/kanzi-go v1.9.1-0.20211212184056-72dda96261ee // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 // indirect github.com/go-kit/kit v0.10.0 // indirect github.com/go-logfmt/logfmt v0.5.0 // indirect - github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/go-stack/stack v1.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.2 // indirect @@ -161,3 +162,5 @@ require ( modernc.org/strutil v1.1.1 // indirect modernc.org/token v1.0.0 // indirect ) + +require gotest.tools/v3 v3.3.0 // indirect diff --git a/go.sum b/go.sum index b99c0955a95..982d284196a 100644 --- a/go.sum +++ b/go.sum @@ -165,6 +165,8 @@ github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= +github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 h1:iZOop7pqsg+56twTopWgwCGxdB5SI2yDO8Ti7eTRliQ= github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= @@ -191,8 +193,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fjl/gencodec v0.0.0-20191126094850-e283372f291f h1:Y/gg/utVetS+WS6htAKCTDralkm/8hLIIUAtLFdbdQ8= -github.com/fjl/gencodec v0.0.0-20191126094850-e283372f291f/go.mod h1:q+7Z5oyy8cvKF3TakcuihvQvBHFTnXjB+7UP1e2Q+1o= +github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c h1:CndMRAH4JIwxbW8KYq6Q+cGWcGHz0FjGR3QqcInWcW0= +github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY= github.com/flanglet/kanzi-go v1.9.1-0.20211212184056-72dda96261ee h1:CaVlPeoz5kJQ+cAOV+ZDdlr3J2FmKyNkGu9LY+x7cDM= github.com/flanglet/kanzi-go v1.9.1-0.20211212184056-72dda96261ee/go.mod h1:/sUSVgDcbjsisuW42GPDgaMqvJ0McZERNICnD7b1nRA= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= @@ -208,12 +210,10 @@ github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILD github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= @@ -291,7 +291,6 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -359,7 +358,6 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= @@ -383,7 +381,6 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= -github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/ledgerwatch/erigon-lib v0.0.0-20220701042032-ed452dbc4b21 h1:mZAojUAtvuvFLS8sumuYlZrHKGvkjTBxA6fvvujT/Kc= @@ -446,7 +443,6 @@ github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= @@ -586,6 +582,7 @@ github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3 github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -792,12 +789,12 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191126055441-b0650ceb63d9/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -878,6 +875,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo= +gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/internal/cmdtest/test_cmd.go b/internal/cmdtest/test_cmd.go new file mode 100644 index 00000000000..b837c9c399c --- /dev/null +++ b/internal/cmdtest/test_cmd.go @@ -0,0 +1,300 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package cmdtest + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "strings" + "sync" + "sync/atomic" + "syscall" + "testing" + "text/template" + "time" + + "github.com/docker/docker/pkg/reexec" +) + +func NewTestCmd(t *testing.T, data interface{}) *TestCmd { + return &TestCmd{T: t, Data: data} +} + +type TestCmd struct { + // For total convenience, all testing methods are available. + *testing.T + + Func template.FuncMap + Data interface{} + Cleanup func() + + cmd *exec.Cmd + stdout *bufio.Reader + stdin io.WriteCloser + stderr *testlogger + // Err will contain the process exit error or interrupt signal error + Err error +} + +var id int32 + +// Run exec's the current binary using name as argv[0] which will trigger the +// reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go) +func (tt *TestCmd) Run(name string, args ...string) { + id := atomic.AddInt32(&id, 1) + tt.stderr = &testlogger{t: tt.T, name: fmt.Sprintf("%d", id)} + tt.cmd = &exec.Cmd{ + Path: reexec.Self(), + Args: append([]string{name}, args...), + Stderr: tt.stderr, + } + stdout, err := tt.cmd.StdoutPipe() + if err != nil { + tt.Fatal(err) + } + tt.stdout = bufio.NewReader(stdout) + if tt.stdin, err = tt.cmd.StdinPipe(); err != nil { + tt.Fatal(err) + } + if err := tt.cmd.Start(); err != nil { + tt.Fatal(err) + } +} + +// InputLine writes the given text to the child's stdin. +// This method can also be called from an expect template, e.g.: +// +// geth.expect(`Passphrase: {{.InputLine "password"}}`) +func (tt *TestCmd) InputLine(s string) string { + io.WriteString(tt.stdin, s+"\n") + return "" +} + +func (tt *TestCmd) SetTemplateFunc(name string, fn interface{}) { + if tt.Func == nil { + tt.Func = make(map[string]interface{}) + } + tt.Func[name] = fn +} + +// Expect runs its argument as a template, then expects the +// child process to output the result of the template within 5s. +// +// If the template starts with a newline, the newline is removed +// before matching. +func (tt *TestCmd) Expect(tplsource string) { + // Generate the expected output by running the template. + tpl := template.Must(template.New("").Funcs(tt.Func).Parse(tplsource)) + wantbuf := new(bytes.Buffer) + if err := tpl.Execute(wantbuf, tt.Data); err != nil { + panic(err) + } + // Trim exactly one newline at the beginning. This makes tests look + // much nicer because all expect strings are at column 0. + want := bytes.TrimPrefix(wantbuf.Bytes(), []byte("\n")) + if err := tt.matchExactOutput(want); err != nil { + tt.Fatal(err) + } + tt.Logf("Matched stdout text:\n%s", want) +} + +// Output reads all output from stdout, and returns the data. +func (tt *TestCmd) Output() []byte { + var buf []byte + tt.withKillTimeout(func() { buf, _ = io.ReadAll(tt.stdout) }) + return buf +} + +func (tt *TestCmd) matchExactOutput(want []byte) error { + buf := make([]byte, len(want)) + n := 0 + tt.withKillTimeout(func() { n, _ = io.ReadFull(tt.stdout, buf) }) + buf = buf[:n] + if n < len(want) || !bytes.Equal(buf, want) { + // Grab any additional buffered output in case of mismatch + // because it might help with debugging. + buf = append(buf, make([]byte, tt.stdout.Buffered())...) + tt.stdout.Read(buf[n:]) + // Find the mismatch position. + for i := 0; i < n; i++ { + if want[i] != buf[i] { + return fmt.Errorf("output mismatch at ◊:\n---------------- (stdout text)\n%s◊%s\n---------------- (expected text)\n%s", + buf[:i], buf[i:n], want) + } + } + if n < len(want) { + return fmt.Errorf("not enough output, got until ◊:\n---------------- (stdout text)\n%s\n---------------- (expected text)\n%s◊%s", + buf, want[:n], want[n:]) + } + } + return nil +} + +// ExpectRegexp expects the child process to output text matching the +// given regular expression within 5s. +// +// Note that an arbitrary amount of output may be consumed by the +// regular expression. This usually means that expect cannot be used +// after ExpectRegexp. +func (tt *TestCmd) ExpectRegexp(regex string) (*regexp.Regexp, []string) { + regex = strings.TrimPrefix(regex, "\n") + var ( + re = regexp.MustCompile(regex) + rtee = &runeTee{in: tt.stdout} + matches []int + ) + tt.withKillTimeout(func() { matches = re.FindReaderSubmatchIndex(rtee) }) + output := rtee.buf.Bytes() + if matches == nil { + tt.Fatalf("Output did not match:\n---------------- (stdout text)\n%s\n---------------- (regular expression)\n%s", + output, regex) + return re, nil + } + tt.Logf("Matched stdout text:\n%s", output) + var submatches []string + for i := 0; i < len(matches); i += 2 { + submatch := string(output[matches[i]:matches[i+1]]) + submatches = append(submatches, submatch) + } + return re, submatches +} + +// ExpectExit expects the child process to exit within 5s without +// printing any additional text on stdout. +func (tt *TestCmd) ExpectExit() { + var output []byte + tt.withKillTimeout(func() { + output, _ = io.ReadAll(tt.stdout) + }) + tt.WaitExit() + if tt.Cleanup != nil { + tt.Cleanup() + } + if len(output) > 0 { + tt.Errorf("Unmatched stdout text:\n%s", output) + } +} + +func (tt *TestCmd) WaitExit() { + tt.Err = tt.cmd.Wait() +} + +func (tt *TestCmd) Interrupt() { + tt.Err = tt.cmd.Process.Signal(os.Interrupt) +} + +// ExitStatus exposes the process' OS exit code +// It will only return a valid value after the process has finished. +func (tt *TestCmd) ExitStatus() int { + if tt.Err != nil { + exitErr := tt.Err.(*exec.ExitError) + if exitErr != nil { + if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { + return status.ExitStatus() + } + } + } + return 0 +} + +// StderrText returns any stderr output written so far. +// The returned text holds all log lines after ExpectExit has +// returned. +func (tt *TestCmd) StderrText() string { + tt.stderr.mu.Lock() + defer tt.stderr.mu.Unlock() + return tt.stderr.buf.String() +} + +func (tt *TestCmd) CloseStdin() { + tt.stdin.Close() +} + +func (tt *TestCmd) Kill() { + tt.cmd.Process.Kill() + if tt.Cleanup != nil { + tt.Cleanup() + } +} + +func (tt *TestCmd) withKillTimeout(fn func()) { + timeout := time.AfterFunc(5*time.Second, func() { + tt.Log("killing the child process (timeout)") + tt.Kill() + }) + defer timeout.Stop() + fn() +} + +// testlogger logs all written lines via t.Log and also +// collects them for later inspection. +type testlogger struct { + t *testing.T + mu sync.Mutex + buf bytes.Buffer + name string +} + +func (tl *testlogger) Write(b []byte) (n int, err error) { + lines := bytes.Split(b, []byte("\n")) + for _, line := range lines { + if len(line) > 0 { + tl.t.Logf("(stderr:%v) %s", tl.name, line) + } + } + tl.mu.Lock() + tl.buf.Write(b) + tl.mu.Unlock() + return len(b), err +} + +// runeTee collects text read through it into buf. +type runeTee struct { + in interface { + io.Reader + io.ByteReader + io.RuneReader + } + buf bytes.Buffer +} + +func (rtee *runeTee) Read(b []byte) (n int, err error) { + n, err = rtee.in.Read(b) + rtee.buf.Write(b[:n]) + return n, err +} + +func (rtee *runeTee) ReadRune() (r rune, size int, err error) { + r, size, err = rtee.in.ReadRune() + if err == nil { + rtee.buf.WriteRune(r) + } + return r, size, err +} + +func (rtee *runeTee) ReadByte() (b byte, err error) { + b, err = rtee.in.ReadByte() + if err == nil { + rtee.buf.WriteByte(b) + } + return b, err +} diff --git a/tests/state_test_util.go b/tests/state_test_util.go index 4989ab2af97..51e3a94079c 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -190,7 +190,8 @@ func (t *StateTest) RunNoVerify(rules *params.Rules, tx kv.RwTx, subtest StateSu // Prepare the EVM. txContext := core.NewEVMTxContext(msg) contractHasTEVM := func(common.Hash) (bool, error) { return false, nil } - context := core.NewEVMBlockContext(block.Header(), nil, nil, &t.json.Env.Coinbase, contractHasTEVM) + header := block.Header() + context := core.NewEVMBlockContext(header, core.GetHashFn(header, nil), nil, &t.json.Env.Coinbase, contractHasTEVM) context.GetHash = vmTestBlockHash if baseFee != nil { context.BaseFee = new(uint256.Int) diff --git a/turbo/transactions/tracing.go b/turbo/transactions/tracing.go index 3c3a2b3b557..d0d569c93fc 100644 --- a/turbo/transactions/tracing.go +++ b/turbo/transactions/tracing.go @@ -42,7 +42,8 @@ func ComputeTxEnv(ctx context.Context, block *types.Block, cfg *params.ChainConf // Recompute transactions up to the target index. signer := types.MakeSigner(cfg, block.NumberU64()) - BlockContext := core.NewEVMBlockContext(block.Header(), getHeader, engine, nil, contractHasTEVM) + header := block.Header() + BlockContext := core.NewEVMBlockContext(header, core.GetHashFn(header, getHeader), engine, nil, contractHasTEVM) vmenv := vm.NewEVM(BlockContext, vm.TxContext{}, statedb, cfg, vm.Config{}) rules := vmenv.ChainRules() for idx, tx := range block.Transactions() { From 84c3cdc417502080b34d99d095e15b2e2175bc41 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Sat, 2 Jul 2022 21:38:06 +0200 Subject: [PATCH 018/152] Fixed Invalid block number on payload (#4605) --- eth/stagedsync/stage_headers.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 204f76b301f..6b6f5f547e9 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -500,7 +500,7 @@ func handleNewPayload( }, nil } - parent, err := cfg.blockReader.Header(ctx, tx, header.ParentHash, headerNumber-1) + parent, err := cfg.blockReader.HeaderByHash(ctx, tx, header.ParentHash) if err != nil { return nil, err } @@ -513,6 +513,14 @@ func handleNewPayload( return &privateapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } + if header.Number.Uint64() != parent.Number.Uint64()+1 { + return &privateapi.PayloadStatus{ + Status: remote.EngineStatus_INVALID, + LatestValidHash: header.ParentHash, + ValidationError: errors.New("invalid block number"), + }, nil + } + cfg.hd.BeaconRequestList.Remove(requestId) for _, tx := range payloadMessage.Body.Transactions { @@ -574,6 +582,10 @@ func verifyAndSaveNewPoSHeader( }, false, nil } + if err := rawdb.WriteHeaderNumber(tx, headerHash, headerNumber); err != nil { + return nil, false, err + } + err = headerInserter.FeedHeaderPoS(tx, header, headerHash) if err != nil { return nil, false, err From 8599dceec7c6e74126de1b63b9a4538f8435aad6 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Sat, 2 Jul 2022 20:48:42 +0100 Subject: [PATCH 019/152] [erigon2.2] State reconstitution prototype (#4508) * reconstitution * Add history access without state function * More on state reconstitution * More on state recon * More on state recon * More * More * support dao fork * More on state reconstitution * Update to erigon-lib * More * Added genesis block and filling with history * update * Genesis works * Start on parallel * Preparation for parallel reconstitution, stats for EfSearch * continue with parallel work * Fix history reader * Remove time measurements * Fixes * Fixes and UX improvements * Fixes * More tracing * More fixes * More fixes * Fix code size * Update to latest erigon-lib * Fix for dao fork * Remove hacks * Update to erigon-lib, fix lint Co-authored-by: Alexey Sharp Co-authored-by: Alex Sharp --- cmd/hack/hack.go | 368 +-------- cmd/rpcdaemon22/commands/eth_receipts.go | 5 +- cmd/state/commands/erigon22.go | 211 ++--- cmd/state/commands/history22.go | 20 +- cmd/state/commands/state_recon.go | 775 ++++++++++++++++++ ...istoryReader22.go => history_reader_22.go} | 6 +- core/state/history_reader_nostate.go | 210 +++++ core/state/intra_block_state.go | 8 +- core/state/state_object.go | 11 +- core/state/state_recon_writer.go | 257 ++++++ go.mod | 2 +- go.sum | 4 +- turbo/snapshotsync/block_reader.go | 3 + 13 files changed, 1401 insertions(+), 479 deletions(-) create mode 100644 cmd/state/commands/state_recon.go rename core/state/{HistoryReader22.go => history_reader_22.go} (97%) create mode 100644 core/state/history_reader_nostate.go create mode 100644 core/state/state_recon_writer.go diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go index ce4fbf4ae3b..8434f6b3ebe 100644 --- a/cmd/hack/hack.go +++ b/cmd/hack/hack.go @@ -13,10 +13,8 @@ import ( _ "net/http/pprof" //nolint:gosec "os" "path/filepath" - "regexp" "runtime/pprof" "sort" - "strconv" "strings" "time" @@ -25,8 +23,6 @@ import ( "github.com/ledgerwatch/erigon-lib/compress" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/mdbx" - "github.com/ledgerwatch/erigon-lib/recsplit" - "github.com/ledgerwatch/erigon-lib/recsplit/eliasfano32" "golang.org/x/exp/slices" hackdb "github.com/ledgerwatch/erigon/cmd/hack/db" @@ -994,180 +990,6 @@ func devTx(chaindata string) error { return nil } -func mainnetGenesis() error { - g := core.DefaultGenesisBlock() - _, _, err := g.ToBlock() - if err != nil { - return err - } - return nil -} - -func junkdb() error { - dir, err := os.MkdirTemp(".", "junk") - if err != nil { - return fmt.Errorf("creating temp dir for db size test: %w", err) - } - //defer os.RemoveAll(dir) - oneBucketCfg := make(kv.TableCfg) - oneBucketCfg["t"] = kv.TableCfgItem{} - var db kv.RwDB - db, err = mdbx.NewMDBX(log.New()).Path(dir).WithTablessCfg(func(kv.TableCfg) kv.TableCfg { - return oneBucketCfg - }).Open() - if err != nil { - return fmt.Errorf("opening database: %w", err) - } - defer db.Close() - for i := 0; i < 1_000_000; i++ { - if err = db.Update(context.Background(), func(tx kv.RwTx) error { - c, e := tx.RwCursor("t") - if e != nil { - return e - } - defer c.Close() - for j := 0; j < 1_000_000_000; j++ { - var b [8]byte - binary.BigEndian.PutUint64(b[:], uint64(i*1_000_000_000+j)) - if e = c.Append(b[:], b[:]); e != nil { - return e - } - } - return nil - }); err != nil { - return err - } - log.Info("Appended records", "bln", i+1) - } - return nil -} - -func histStats() error { - files, err := os.ReadDir(".") - if err != nil { - return err - } - endBlockMap := map[uint64]struct{}{} - pageMap := map[string]map[uint64]uint64{} - keys := []string{"ahistory", "shistory", "abitmap", "sbitmap"} - for _, k := range keys { - pageMap[k] = map[uint64]uint64{} - } - re := regexp.MustCompile(`(ahistory|shistory|abitmap|sbitmap).([0-9]+).txt`) - for _, f := range files { - name := f.Name() - subs := re.FindStringSubmatch(name) - if len(subs) != 3 { - if len(subs) != 0 { - log.Warn("File ignored by changes scan, more than 3 submatches", "name", name, "submatches", len(subs)) - } - continue - } - var endBlock uint64 - if endBlock, err = strconv.ParseUint(subs[2], 10, 64); err != nil { - return err - } - endBlockMap[endBlock] = struct{}{} - var ff *os.File - if ff, err = os.Open(name); err != nil { - return err - } - scanner := bufio.NewScanner(ff) - // Skip 5 lines - for i := 0; i < 5; i++ { - scanner.Scan() - } - var totalPages uint64 - for i := 0; i < 3; i++ { - scanner.Scan() - line := scanner.Text() - p := strings.Index(line, ": ") - var pages uint64 - if pages, err = strconv.ParseUint(line[p+2:], 10, 64); err != nil { - return err - } - totalPages += pages - } - pageMap[subs[1]][endBlock] = totalPages - ff.Close() - } - var endBlocks []uint64 - for endBlock := range endBlockMap { - endBlocks = append(endBlocks, endBlock) - } - slices.Sort(endBlocks) - var lastEndBlock uint64 - fmt.Printf("endBlock,%s\n", strings.Join(keys, ",")) - for _, endBlock := range endBlocks { - fmt.Printf("%d", endBlock) - for _, k := range keys { - if lastEndBlock == 0 { - fmt.Printf(",%.3f", float64(pageMap[k][endBlock])/256.0/1024.0) - } else { - fmt.Printf(",%.3f", float64(pageMap[k][endBlock]-pageMap[k][lastEndBlock])/256.0/1024.0) - } - } - fmt.Printf("\n") - lastEndBlock = endBlock - } - return nil -} - -func histStat1(chaindata string) error { - files, err := os.ReadDir(chaindata) - if err != nil { - return err - } - endBlockMap := map[uint64]struct{}{} - sizeMap := map[string]map[uint64]uint64{} - keys := []string{"ahistory", "shistory", "chistory", "abitmap", "sbitmap", "cbitmap"} - for _, k := range keys { - sizeMap[k] = map[uint64]uint64{} - } - re := regexp.MustCompile(fmt.Sprintf("(%s).([0-9]+)-([0-9]+).(dat|idx)", strings.Join(keys, "|"))) - for _, f := range files { - name := f.Name() - subs := re.FindStringSubmatch(name) - if len(subs) != 5 { - if len(subs) != 0 { - log.Warn("File ignored by changes scan, more than 5 submatches", "name", name, "submatches", len(subs)) - } - continue - } - var startBlock uint64 - if startBlock, err = strconv.ParseUint(subs[2], 10, 64); err != nil { - return err - } - var endBlock uint64 - if endBlock, err = strconv.ParseUint(subs[3], 10, 64); err != nil { - return err - } - if endBlock-startBlock < 499_999 { - continue - } - endBlockMap[endBlock] = struct{}{} - if fileInfo, err1 := os.Stat(filepath.Join(chaindata, name)); err1 == nil { - sizeMap[subs[1]][endBlock] += uint64(fileInfo.Size()) - } else { - return err1 - } - } - var endBlocks []uint64 - for endBlock := range endBlockMap { - endBlocks = append(endBlocks, endBlock) - } - slices.Sort(endBlocks) - fmt.Printf("endBlock,%s\n", strings.Join(keys, ",")) - for _, endBlock := range endBlocks { - fmt.Printf("%d", endBlock) - for _, k := range keys { - fmt.Printf(",%.3f", float64(sizeMap[k][endBlock])/1024.0/1024.0/1024.0) - } - fmt.Printf("\n") - } - return nil -} - func chainConfig(name string) error { var chainConfig *params.ChainConfig switch name { @@ -1270,81 +1092,6 @@ func findPrefix(chaindata string) error { return nil } -func readEf(file string, addr []byte) error { - datPath := file + ".dat" - idxPath := file + ".idx" - index, err := recsplit.OpenIndex(idxPath) - if err != nil { - return err - } - defer index.Close() - decomp, err := compress.NewDecompressor(datPath) - if err != nil { - return err - } - defer decomp.Close() - indexReader := recsplit.NewIndexReader(index) - offset := indexReader.Lookup(addr) - g := decomp.MakeGetter() - g.Reset(offset) - word, _ := g.Next(nil) - fmt.Printf("%x\n", word) - word, _ = g.NextUncompressed() - ef, _ := eliasfano32.ReadEliasFano(word) - it := ef.Iterator() - line := 0 - for it.HasNext() { - fmt.Printf("%d ", it.Next()) - line++ - if line%20 == 0 { - fmt.Printf("\n") - } - } - fmt.Printf("\n") - return nil -} - -func readBodies(file string) error { - decomp, err := compress.NewDecompressor(file) - if err != nil { - return err - } - defer decomp.Close() - gg := decomp.MakeGetter() - buf, _ := gg.Next(nil) - firstBody := &types.BodyForStorage{} - if err = rlp.DecodeBytes(buf, firstBody); err != nil { - return err - } - //var blockFrom uint64 = 12300000 - //var blockTo uint64 = 12400000 - firstTxID := firstBody.BaseTxId - - lastBody := new(types.BodyForStorage) - i := uint64(0) - for gg.HasNext() { - i++ - //if i == blockTo-blockFrom-1 { - //fmt.Printf("lastBody\n") - buf, _ = gg.Next(buf[:0]) - if err = rlp.DecodeBytes(buf, lastBody); err != nil { - return err - } - //if gg.HasNext() { - // panic(1) - //} - //} else { - if gg.HasNext() { - gg.Skip() - } - //} - } - expectedCount := lastBody.BaseTxId + uint64(lastBody.TxAmount) - firstBody.BaseTxId - fmt.Printf("i=%d, firstBody=%v, lastBody=%v, firstTxID=%d, expectedCount=%d\n", i, firstBody, lastBody, firstTxID, expectedCount) - - return nil -} - func findLogs(chaindata string, block uint64, blockTotal uint64) error { db := mdbx.MustOpen(chaindata) defer db.Close() @@ -1416,106 +1163,21 @@ func findLogs(chaindata string, block uint64, blockTotal uint64) error { return nil } -func decompress(chaindata string) error { - dir := filepath.Join(chaindata, "erigon22") - files, err := os.ReadDir(dir) - if err != nil { - return err - } - for _, f := range files { - name := f.Name() - if !strings.HasSuffix(name, ".dat") { - continue - } - if err = decompressAll(dir, filepath.Join(dir, name), strings.Contains(name, "code")); err != nil { - return err - } - } - // Re-read directory - files, err = os.ReadDir(dir) - if err != nil { - return err - } - for _, f := range files { - name := f.Name() - if !strings.HasSuffix(name, ".d") { - continue - } - if err = os.Rename(filepath.Join(dir, name), filepath.Join(dir, name[:len(name)-2])); err != nil { - return err - } - } - return nil -} - -func decompressAll(dir string, filename string, onlyKeys bool) error { - fmt.Printf("decompress file %s, onlyKeys=%t\n", filename, onlyKeys) +func iterate(filename string) error { d, err := compress.NewDecompressor(filename) if err != nil { return err } defer d.Close() - newDatPath := filename + ".d" - comp, err := compress.NewCompressor(context.Background(), "comp", newDatPath, dir, compress.MinPatternScore, 1, log.LvlInfo) - if err != nil { - return err - } - defer comp.Close() - idxPath := filename[:len(filename)-3] + "idx" - idx, err := recsplit.OpenIndex(idxPath) - if err != nil { - return err - } - defer idx.Close() g := d.MakeGetter() - var isKey bool - var word []byte + var buf, bufv []byte for g.HasNext() { - word, _ = g.Next(word[:0]) - if onlyKeys && !isKey { - if err := comp.AddWord(word); err != nil { - return err - } - } else { - if err := comp.AddUncompressedWord(word); err != nil { - return err - } + buf, _ = g.Next(buf[:0]) + bufv, _ = g.Next(bufv[:0]) + s := fmt.Sprintf("%x", buf) + if strings.HasPrefix(s, "000000000000006f6502b7f2bbac8c30a3f67e9a") { + fmt.Printf("%s [%x]\n", s, bufv) } - isKey = !isKey - } - if err = comp.Compress(); err != nil { - return err - } - comp.Close() - offsets := idx.ExtractOffsets() - newD, err := compress.NewDecompressor(newDatPath) - if err != nil { - return err - } - defer newD.Close() - newG := newD.MakeGetter() - g.Reset(0) - offset := uint64(0) - newOffset := uint64(0) - for g.HasNext() { - offsets[offset] = newOffset - offset = g.Skip() - newOffset = newG.Skip() - } - newIdxPath := idxPath + ".d" - f, err := os.Create(newIdxPath) - if err != nil { - return err - } - w := bufio.NewWriter(f) - if err = idx.RewriteWithOffsets(w, offsets); err != nil { - return err - } - if err = w.Flush(); err != nil { - return err - } - if err = f.Close(); err != nil { - return err } return nil } @@ -1636,26 +1298,14 @@ func main() { case "devTx": err = devTx(*chaindata) - case "mainnetGenesis": - err = mainnetGenesis() - case "junkdb": - err = junkdb() - case "histStats": - err = histStats() - case "histStat1": - err = histStat1(*chaindata) case "chainConfig": err = chainConfig(*name) case "findPrefix": err = findPrefix(*chaindata) - case "readEf": - err = readEf(*chaindata, common.FromHex(*account)) - case "readBodies": - err = readBodies(*chaindata) case "findLogs": err = findLogs(*chaindata, uint64(*block), uint64(*blockTotal)) - case "decompress": - err = decompress(*chaindata) + case "iterate": + err = iterate(*chaindata) } if err != nil { diff --git a/cmd/rpcdaemon22/commands/eth_receipts.go b/cmd/rpcdaemon22/commands/eth_receipts.go index 3a49b77b49a..0128a6b1ab8 100644 --- a/cmd/rpcdaemon22/commands/eth_receipts.go +++ b/cmd/rpcdaemon22/commands/eth_receipts.go @@ -5,6 +5,7 @@ import ( "fmt" "math/big" "sort" + "time" "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/kv" @@ -69,6 +70,7 @@ func (api *BaseAPI) getReceipts(ctx context.Context, tx kv.Tx, chainConfig *para // GetLogs implements eth_getLogs. Returns an array of logs matching a given filter object. func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([]*types.Log, error) { + start := time.Now() var begin, end uint64 logs := []*types.Log{} @@ -215,7 +217,8 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ } logs = append(logs, filtered...) } - + stats := api._agg.GetAndResetStats() + log.Info("Finished", "duration", time.Since(start), "history queries", stats.HistoryQueries, "ef search duration", stats.EfSearchTime) return logs, nil } diff --git a/cmd/state/commands/erigon22.go b/cmd/state/commands/erigon22.go index bae4512818c..754c11cdd7f 100644 --- a/cmd/state/commands/erigon22.go +++ b/cmd/state/commands/erigon22.go @@ -80,14 +80,12 @@ func Erigon22(genesis *core.Genesis, chainConfig *params.ChainConfig, logger log } defer historyTx.Rollback() stateDbPath := path.Join(datadir, "statedb") - if block == 0 { - if _, err = os.Stat(stateDbPath); err != nil { - if !errors.Is(err, os.ErrNotExist) { - return err - } - } else if err = os.RemoveAll(stateDbPath); err != nil { + if _, err = os.Stat(stateDbPath); err != nil { + if !errors.Is(err, os.ErrNotExist) { return err } + } else if err = os.RemoveAll(stateDbPath); err != nil { + return err } db, err2 := kv2.NewMDBX(logger).Path(stateDbPath).WriteMap().Open() if err2 != nil { @@ -96,18 +94,6 @@ func Erigon22(genesis *core.Genesis, chainConfig *params.ChainConfig, logger log defer db.Close() aggPath := filepath.Join(datadir, "erigon22") - if block == 0 { - if _, err = os.Stat(aggPath); err != nil { - if !errors.Is(err, os.ErrNotExist) { - return err - } - } else if err = os.RemoveAll(aggPath); err != nil { - return err - } - if err = os.Mkdir(aggPath, os.ModePerm); err != nil { - return err - } - } var rwTx kv.RwTx defer func() { @@ -124,9 +110,11 @@ func Erigon22(genesis *core.Genesis, chainConfig *params.ChainConfig, logger log return fmt.Errorf("create aggregator: %w", err3) } defer agg.Close() + startTxNum := agg.EndTxNumMinimax() + fmt.Printf("Max txNum in files: %d\n", startTxNum) interrupt := false - if block == 0 { + if startTxNum == 0 { _, genesisIbs, err := genesis.ToBlock() if err != nil { return err @@ -176,6 +164,16 @@ func Erigon22(genesis *core.Genesis, chainConfig *params.ChainConfig, logger log blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) engine := initConsensusEngine(chainConfig, logger, allSnapshots) + getHeader := func(hash common.Hash, number uint64) *types.Header { + h, err := blockReader.Header(ctx, historyTx, hash, number) + if err != nil { + panic(err) + } + return h + } + readWrapper := &ReaderWrapper22{r: agg, roTx: rwTx} + writeWrapper := &WriterWrapper22{w: agg} + for !interrupt { blockNum++ trace = traceBlock > 0 && blockNum == uint64(traceBlock) @@ -192,40 +190,12 @@ func Erigon22(genesis *core.Genesis, chainConfig *params.ChainConfig, logger log log.Info("history: block is nil", "block", blockNum) break } - if blockNum <= block { - // Skip that block, but increase txNum - txNum += uint64(len(b.Transactions())) + 2 // Pre and Post block transaction - continue - } agg.SetTx(rwTx) agg.SetTxNum(txNum) - readWrapper := &ReaderWrapper22{r: agg, roTx: rwTx, blockNum: blockNum} - writeWrapper := &WriterWrapper22{w: agg, blockNum: blockNum} - getHeader := func(hash common.Hash, number uint64) *types.Header { - h, err := blockReader.Header(ctx, historyTx, hash, number) - if err != nil { - panic(err) - } - return h - } - - txNum++ // Pre-block transaction - agg.SetTxNum(txNum) - - if txNum, _, err = processBlock22(trace, txNum, readWrapper, writeWrapper, chainConfig, engine, getHeader, b, vmConfig); err != nil { + if txNum, _, err = processBlock22(startTxNum, trace, txNum, readWrapper, writeWrapper, chainConfig, engine, getHeader, b, vmConfig); err != nil { return fmt.Errorf("processing block %d: %w", blockNum, err) } - agg.SetTxNum(txNum) - if err := agg.FinishTx(); err != nil { - return fmt.Errorf("failed to finish tx: %w", err) - } - if trace { - fmt.Printf("FinishTx called for %d block %d\n", txNum, blockNum) - } - - txNum++ // Post-block transaction - agg.SetTxNum(txNum) // Check for interrupts select { @@ -255,6 +225,7 @@ func Erigon22(genesis *core.Genesis, chainConfig *params.ChainConfig, logger log } } agg.SetTx(rwTx) + readWrapper.roTx = rwTx } } @@ -303,7 +274,7 @@ func (s *stat22) delta(aStats libstate.FilesStats, blockNum uint64) *stat22 { return s } -func processBlock22(trace bool, txNumStart uint64, rw *ReaderWrapper22, ww *WriterWrapper22, chainConfig *params.ChainConfig, +func processBlock22(startTxNum uint64, trace bool, txNumStart uint64, rw *ReaderWrapper22, ww *WriterWrapper22, chainConfig *params.ChainConfig, engine consensus.Engine, getHeader func(hash common.Hash, number uint64) *types.Header, block *types.Block, vmConfig vm.Config, ) (uint64, types.Receipts, error) { defer blockExecutionTimer.UpdateDuration(time.Now()) @@ -313,82 +284,119 @@ func processBlock22(trace bool, txNumStart uint64, rw *ReaderWrapper22, ww *Writ gp := new(core.GasPool).AddGas(block.GasLimit()) usedGas := new(uint64) var receipts types.Receipts - daoBlock := chainConfig.DAOForkSupport && chainConfig.DAOForkBlock != nil && chainConfig.DAOForkBlock.Cmp(block.Number()) == 0 rules := chainConfig.Rules(block.NumberU64()) txNum := txNumStart ww.w.SetTxNum(txNum) + rw.blockNum = block.NumberU64() + ww.blockNum = block.NumberU64() - for i, tx := range block.Transactions() { + daoFork := txNum >= startTxNum && chainConfig.DAOForkSupport && chainConfig.DAOForkBlock != nil && chainConfig.DAOForkBlock.Cmp(block.Number()) == 0 + if daoFork { ibs := state.New(rw) - if daoBlock { - misc.ApplyDAOHardFork(ibs) - daoBlock = false + // TODO Actually add tracing to the DAO related accounts + misc.ApplyDAOHardFork(ibs) + if err := ibs.FinalizeTx(rules, ww); err != nil { + return 0, nil, err } - ibs.Prepare(tx.Hash(), block.Hash(), i) - ct := NewCallTracer() - vmConfig.Tracer = ct - receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) - if err != nil { - return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) + if err := ww.w.FinishTx(); err != nil { + return 0, nil, fmt.Errorf("finish daoFork failed: %w", err) } - for from := range ct.froms { - if err := ww.w.AddTraceFrom(from[:]); err != nil { - return 0, nil, err + } + + txNum++ // Pre-block transaction + ww.w.SetTxNum(txNum) + + for i, tx := range block.Transactions() { + if txNum >= startTxNum { + ibs := state.New(rw) + ibs.Prepare(tx.Hash(), block.Hash(), i) + ct := NewCallTracer() + vmConfig.Tracer = ct + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) + if err != nil { + return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } - } - for to := range ct.tos { - if err := ww.w.AddTraceTo(to[:]); err != nil { - return 0, nil, err + for from := range ct.froms { + if err := ww.w.AddTraceFrom(from[:]); err != nil { + return 0, nil, err + } } - } - receipts = append(receipts, receipt) - for _, log := range receipt.Logs { - if err = ww.w.AddLogAddr(log.Address[:]); err != nil { - return 0, nil, fmt.Errorf("adding event log for addr %x: %w", log.Address, err) + for to := range ct.tos { + if err := ww.w.AddTraceTo(to[:]); err != nil { + return 0, nil, err + } } - for _, topic := range log.Topics { - if err = ww.w.AddLogTopic(topic[:]); err != nil { - return 0, nil, fmt.Errorf("adding event log for topic %x: %w", topic, err) + receipts = append(receipts, receipt) + for _, log := range receipt.Logs { + if err = ww.w.AddLogAddr(log.Address[:]); err != nil { + return 0, nil, fmt.Errorf("adding event log for addr %x: %w", log.Address, err) + } + for _, topic := range log.Topics { + if err = ww.w.AddLogTopic(topic[:]); err != nil { + return 0, nil, fmt.Errorf("adding event log for topic %x: %w", topic, err) + } } } - } - if err = ww.w.FinishTx(); err != nil { - return 0, nil, fmt.Errorf("finish tx %d [%x] failed: %w", i, tx.Hash(), err) - } - if trace { - fmt.Printf("FinishTx called for blockNum=%d, txIndex=%d, txNum=%d txHash=[%x]\n", block.NumberU64(), i, txNum, tx.Hash()) + if err = ww.w.FinishTx(); err != nil { + return 0, nil, fmt.Errorf("finish tx %d [%x] failed: %w", i, tx.Hash(), err) + } + if trace { + fmt.Printf("FinishTx called for blockNum=%d, txIndex=%d, txNum=%d txHash=[%x]\n", block.NumberU64(), i, txNum, tx.Hash()) + } } txNum++ ww.w.SetTxNum(txNum) } - ibs := state.New(rw) - if err := ww.w.AddTraceTo(block.Coinbase().Bytes()); err != nil { - return 0, nil, fmt.Errorf("adding coinbase trace: %w", err) - } - for _, uncle := range block.Uncles() { - if err := ww.w.AddTraceTo(uncle.Coinbase.Bytes()); err != nil { - return 0, nil, fmt.Errorf("adding uncle trace: %w", err) + if txNum >= startTxNum && chainConfig.IsByzantium(block.NumberU64()) { + receiptSha := types.DeriveSha(receipts) + if receiptSha != block.ReceiptHash() { + fmt.Printf("mismatched receipt headers for block %d\n", block.NumberU64()) + for j, receipt := range receipts { + fmt.Printf("tx %d, used gas: %d\n", j, receipt.GasUsed) + } } } - // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) - if _, _, _, err := engine.FinalizeAndAssemble(chainConfig, header, ibs, block.Transactions(), block.Uncles(), receipts, nil, nil, nil, nil); err != nil { - return 0, nil, fmt.Errorf("finalize of block %d failed: %w", block.NumberU64(), err) - } + if txNum >= startTxNum { + ibs := state.New(rw) + if err := ww.w.AddTraceTo(block.Coinbase().Bytes()); err != nil { + return 0, nil, fmt.Errorf("adding coinbase trace: %w", err) + } + for _, uncle := range block.Uncles() { + if err := ww.w.AddTraceTo(uncle.Coinbase.Bytes()); err != nil { + return 0, nil, fmt.Errorf("adding uncle trace: %w", err) + } + } + + // Finalize the block, applying any consensus engine specific extras (e.g. block rewards) + if _, _, err := engine.Finalize(chainConfig, header, ibs, block.Transactions(), block.Uncles(), receipts, nil, nil, nil); err != nil { + return 0, nil, fmt.Errorf("finalize of block %d failed: %w", block.NumberU64(), err) + } + + if err := ibs.CommitBlock(rules, ww); err != nil { + return 0, nil, fmt.Errorf("committing block %d failed: %w", block.NumberU64(), err) + } - if err := ibs.CommitBlock(rules, ww); err != nil { - return 0, nil, fmt.Errorf("committing block %d failed: %w", block.NumberU64(), err) + if err := ww.w.FinishTx(); err != nil { + return 0, nil, fmt.Errorf("failed to finish tx: %w", err) + } + if trace { + fmt.Printf("FinishTx called for %d block %d\n", txNum, block.NumberU64()) + } } + txNum++ // Post-block transaction + ww.w.SetTxNum(txNum) + return txNum, receipts, nil } // Implements StateReader and StateWriter type ReaderWrapper22 struct { - blockNum uint64 roTx kv.Tx r *libstate.Aggregator + blockNum uint64 } type WriterWrapper22 struct { @@ -431,6 +439,9 @@ func (rw *ReaderWrapper22) ReadAccountData(address common.Address) (*accounts.Ac if incBytes > 0 { a.Incarnation = bytesToUint64(enc[pos : pos+incBytes]) } + if rw.blockNum == 10264901 { + fmt.Printf("block %d ReadAccount [%x] => {Balance: %d, Nonce: %d}\n", rw.blockNum, address, &a.Balance, a.Nonce) + } return &a, nil } @@ -439,9 +450,15 @@ func (rw *ReaderWrapper22) ReadAccountStorage(address common.Address, incarnatio if err != nil { return nil, err } + if rw.blockNum == 10264901 { + fmt.Printf("block %d ReadStorage [%x] [%x] => [%x]\n", rw.blockNum, address, *key, enc) + } if enc == nil { return nil, nil } + if len(enc) == 1 && enc[0] == 0 { + return nil, nil + } return enc, nil } @@ -541,6 +558,10 @@ func (ww *WriterWrapper22) DeleteAccount(address common.Address, original *accou } func (ww *WriterWrapper22) WriteAccountStorage(address common.Address, incarnation uint64, key *common.Hash, original, value *uint256.Int) error { + trace := fmt.Sprintf("%x", address) == "000000000000006f6502b7f2bbac8c30a3f67e9a" + if trace { + fmt.Printf("block %d WriteAccountStorage [%x] [%x] => [%x]\n", ww.blockNum, address, *key, value.Bytes()) + } if err := ww.w.WriteAccountStorage(address.Bytes(), key.Bytes(), value.Bytes()); err != nil { return err } diff --git a/cmd/state/commands/history22.go b/cmd/state/commands/history22.go index f8b30069496..a7ecf4d8ad9 100644 --- a/cmd/state/commands/history22.go +++ b/cmd/state/commands/history22.go @@ -175,7 +175,6 @@ func History22(genesis *core.Genesis, logger log.Logger) error { readWrapper.SetTrace(blockNum == uint64(traceBlock)) } writeWrapper := state.NewNoopWriter() - txNum++ // Pre block transaction getHeader := func(hash common.Hash, number uint64) *types.Header { h, err := blockReader.Header(ctx, historyTx, hash, number) if err != nil { @@ -227,15 +226,24 @@ func runHistory22(trace bool, blockNum, txNumStart uint64, hw *state.HistoryRead gp := new(core.GasPool).AddGas(block.GasLimit()) usedGas := new(uint64) var receipts types.Receipts - daoBlock := chainConfig.DAOForkSupport && chainConfig.DAOForkBlock != nil && chainConfig.DAOForkBlock.Cmp(block.Number()) == 0 + rules := chainConfig.Rules(block.NumberU64()) txNum := txNumStart + hw.SetTxNum(txNum) + daoFork := chainConfig.DAOForkSupport && chainConfig.DAOForkBlock != nil && chainConfig.DAOForkBlock.Cmp(block.Number()) == 0 + if daoFork { + ibs := state.New(hw) + misc.ApplyDAOHardFork(ibs) + if err := ibs.FinalizeTx(rules, ww); err != nil { + return 0, nil, err + } + if err := hw.FinishTx(); err != nil { + return 0, nil, fmt.Errorf("finish dao fork failed: %w", err) + } + } + txNum++ // Pre block transaction for i, tx := range block.Transactions() { hw.SetTxNum(txNum) ibs := state.New(hw) - if daoBlock { - misc.ApplyDAOHardFork(ibs) - daoBlock = false - } ibs.Prepare(tx.Hash(), block.Hash(), i) receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) if err != nil { diff --git a/cmd/state/commands/state_recon.go b/cmd/state/commands/state_recon.go new file mode 100644 index 00000000000..501ec1420a4 --- /dev/null +++ b/cmd/state/commands/state_recon.go @@ -0,0 +1,775 @@ +package commands + +import ( + "context" + "errors" + "fmt" + "math/big" + "os" + "os/signal" + "path" + "path/filepath" + "runtime" + "sort" + "sync" + "sync/atomic" + "syscall" + "time" + + "github.com/RoaringBitmap/roaring/roaring64" + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" + kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" + libstate "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/consensus/misc" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/types/accounts" + "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/log/v3" + "github.com/spf13/cobra" +) + +func init() { + withBlock(reconCmd) + withDataDir(reconCmd) + rootCmd.AddCommand(reconCmd) +} + +var reconCmd = &cobra.Command{ + Use: "recon", + Short: "Exerimental command to reconstitute the state from state history at given block", + RunE: func(cmd *cobra.Command, args []string) error { + logger := log.New() + return Recon(genesis, logger) + }, +} + +type ReconWorker struct { + lock sync.Locker + wg *sync.WaitGroup + rs *state.ReconState + blockReader services.FullBlockReader + allSnapshots *snapshotsync.RoSnapshots + stateWriter *state.StateReconWriter + stateReader *state.HistoryReaderNoState + firstBlock bool + lastBlockNum uint64 + lastBlockHash common.Hash + lastHeader *types.Header + lastRules *params.Rules + getHeader func(hash common.Hash, number uint64) *types.Header + ctx context.Context + engine consensus.Engine + txNums []uint64 + chainConfig *params.ChainConfig + logger log.Logger + genesis *core.Genesis +} + +func NewReconWorker(lock sync.Locker, wg *sync.WaitGroup, rs *state.ReconState, + a *libstate.Aggregator, blockReader services.FullBlockReader, allSnapshots *snapshotsync.RoSnapshots, + txNums []uint64, chainConfig *params.ChainConfig, logger log.Logger, genesis *core.Genesis, +) *ReconWorker { + ac := a.MakeContext() + return &ReconWorker{ + lock: lock, + wg: wg, + rs: rs, + blockReader: blockReader, + allSnapshots: allSnapshots, + ctx: context.Background(), + stateWriter: state.NewStateReconWriter(ac, rs), + stateReader: state.NewHistoryReaderNoState(ac, rs), + txNums: txNums, + chainConfig: chainConfig, + logger: logger, + genesis: genesis, + } +} + +func (rw *ReconWorker) SetTx(tx kv.Tx) { + rw.stateReader.SetTx(tx) +} + +func (rw *ReconWorker) run() { + defer rw.wg.Done() + rw.firstBlock = true + rw.getHeader = func(hash common.Hash, number uint64) *types.Header { + h, err := rw.blockReader.Header(rw.ctx, nil, hash, number) + if err != nil { + panic(err) + } + return h + } + rw.engine = initConsensusEngine(rw.chainConfig, rw.logger, rw.allSnapshots) + for txNum, ok := rw.rs.Schedule(); ok; txNum, ok = rw.rs.Schedule() { + rw.runTxNum(txNum) + } +} + +func (rw *ReconWorker) runTxNum(txNum uint64) { + rw.lock.Lock() + defer rw.lock.Unlock() + rw.stateReader.SetTxNum(txNum) + rw.stateReader.ResetError() + rw.stateWriter.SetTxNum(txNum) + noop := state.NewNoopWriter() + // Find block number + blockNum := uint64(sort.Search(len(rw.txNums), func(i int) bool { + return rw.txNums[i] > txNum + })) + if rw.firstBlock || blockNum != rw.lastBlockNum { + var err error + if rw.lastHeader, err = rw.blockReader.HeaderByNumber(rw.ctx, nil, blockNum); err != nil { + panic(err) + } + rw.lastBlockNum = blockNum + rw.lastBlockHash = rw.lastHeader.Hash() + rw.lastRules = rw.chainConfig.Rules(blockNum) + rw.firstBlock = false + } + var startTxNum uint64 + if blockNum > 0 { + startTxNum = rw.txNums[blockNum-1] + } + ibs := state.New(rw.stateReader) + daoForkTx := rw.chainConfig.DAOForkSupport && rw.chainConfig.DAOForkBlock != nil && rw.chainConfig.DAOForkBlock.Uint64() == blockNum && txNum == rw.txNums[blockNum-1] + var err error + if blockNum == 0 { + //fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txNum, blockNum) + // Genesis block + _, ibs, err = rw.genesis.ToBlock() + if err != nil { + panic(err) + } + } else if daoForkTx { + //fmt.Printf("txNum=%d, blockNum=%d, DAO fork\n", txNum, blockNum) + misc.ApplyDAOHardFork(ibs) + if err := ibs.FinalizeTx(rw.lastRules, noop); err != nil { + panic(err) + } + } else if txNum+1 == rw.txNums[blockNum] { + //fmt.Printf("txNum=%d, blockNum=%d, finalisation of the block\n", txNum, blockNum) + // End of block transaction in a block + block, _, err := rw.blockReader.BlockWithSenders(rw.ctx, nil, rw.lastBlockHash, blockNum) + if err != nil { + panic(err) + } + if _, _, err := rw.engine.Finalize(rw.chainConfig, rw.lastHeader, ibs, block.Transactions(), block.Uncles(), nil /* receipts */, nil, nil, nil); err != nil { + panic(fmt.Errorf("finalize of block %d failed: %w", blockNum, err)) + } + } else { + txIndex := txNum - startTxNum - 1 + //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d\n", txNum, blockNum, txIndex) + txn, err := rw.blockReader.TxnByIdxInBlock(rw.ctx, nil, blockNum, int(txIndex)) + if err != nil { + panic(err) + } + txHash := txn.Hash() + gp := new(core.GasPool).AddGas(txn.GetGas()) + //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d, gas=%d, input=[%x]\n", txNum, blockNum, txIndex, txn.GetGas(), txn.GetData()) + usedGas := new(uint64) + vmConfig := vm.Config{NoReceipts: true, SkipAnalysis: core.SkipAnalysis(rw.chainConfig, blockNum)} + contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } + ibs.Prepare(txHash, rw.lastBlockHash, int(txIndex)) + _, _, err = core.ApplyTransaction(rw.chainConfig, core.GetHashFn(rw.lastHeader, rw.getHeader), rw.engine, nil, gp, ibs, noop, rw.lastHeader, txn, usedGas, vmConfig, contractHasTEVM) + if err != nil { + panic(fmt.Errorf("could not apply tx %d [%x] failed: %w", txIndex, txHash, err)) + } + } + if dependency, ok := rw.stateReader.ReadError(); ok { + //fmt.Printf("rollback %d\n", txNum) + rw.rs.RollbackTxNum(txNum, dependency) + } else { + if err = ibs.CommitBlock(rw.lastRules, rw.stateWriter); err != nil { + panic(err) + } + //fmt.Printf("commit %d\n", txNum) + rw.rs.CommitTxNum(txNum) + } +} + +type FillWorker struct { + txNum uint64 + doneCount *uint64 + rs *state.ReconState + ac *libstate.AggregatorContext + fromKey, toKey []byte + currentKey []byte + bitmap roaring64.Bitmap + total uint64 + progress uint64 +} + +func NewFillWorker(txNum uint64, doneCount *uint64, rs *state.ReconState, a *libstate.Aggregator, fromKey, toKey []byte) *FillWorker { + fw := &FillWorker{ + txNum: txNum, + doneCount: doneCount, + rs: rs, + ac: a.MakeContext(), + fromKey: fromKey, + toKey: toKey, + } + return fw +} + +func (fw *FillWorker) Total() uint64 { + return atomic.LoadUint64(&fw.total) +} + +func (fw *FillWorker) Progress() uint64 { + return atomic.LoadUint64(&fw.progress) +} + +func (fw *FillWorker) fillAccounts() { + defer func() { + atomic.AddUint64(fw.doneCount, 1) + }() + it := fw.ac.IterateAccountsHistory(fw.fromKey, fw.toKey, fw.txNum) + atomic.StoreUint64(&fw.total, it.Total()) + for it.HasNext() { + key, val, progress := it.Next() + atomic.StoreUint64(&fw.progress, progress) + fw.currentKey = key + if len(val) > 0 { + var a accounts.Account + a.Reset() + pos := 0 + nonceBytes := int(val[pos]) + pos++ + if nonceBytes > 0 { + a.Nonce = bytesToUint64(val[pos : pos+nonceBytes]) + pos += nonceBytes + } + balanceBytes := int(val[pos]) + pos++ + if balanceBytes > 0 { + a.Balance.SetBytes(val[pos : pos+balanceBytes]) + pos += balanceBytes + } + codeHashBytes := int(val[pos]) + pos++ + if codeHashBytes > 0 { + copy(a.CodeHash[:], val[pos:pos+codeHashBytes]) + pos += codeHashBytes + } + incBytes := int(val[pos]) + pos++ + if incBytes > 0 { + a.Incarnation = bytesToUint64(val[pos : pos+incBytes]) + } + value := make([]byte, a.EncodingLengthForStorage()) + a.EncodeForStorage(value) + fw.rs.Put(kv.PlainState, key, value) + //fmt.Printf("Account [%x]=>{Balance: %d, Nonce: %d, Root: %x, CodeHash: %x}\n", key, &a.Balance, a.Nonce, a.Root, a.CodeHash) + } + } +} + +func (fw *FillWorker) fillStorage() { + defer func() { + atomic.AddUint64(fw.doneCount, 1) + }() + it := fw.ac.IterateStorageHistory(fw.fromKey, fw.toKey, fw.txNum) + atomic.StoreUint64(&fw.total, it.Total()) + for it.HasNext() { + key, val, progress := it.Next() + atomic.StoreUint64(&fw.progress, progress) + fw.currentKey = key + compositeKey := dbutils.PlainGenerateCompositeStorageKey(key[:20], state.FirstContractIncarnation, key[20:]) + if len(val) > 0 { + if len(val) > 1 || val[0] != 0 { + fw.rs.Put(kv.PlainState, compositeKey, val) + } + //fmt.Printf("Storage [%x] => [%x]\n", compositeKey, val) + } + } +} + +func (fw *FillWorker) fillCode() { + defer func() { + atomic.AddUint64(fw.doneCount, 1) + }() + it := fw.ac.IterateCodeHistory(fw.fromKey, fw.toKey, fw.txNum) + atomic.StoreUint64(&fw.total, it.Total()) + for it.HasNext() { + key, val, progress := it.Next() + atomic.StoreUint64(&fw.progress, progress) + fw.currentKey = key + compositeKey := dbutils.PlainGenerateStoragePrefix(key, state.FirstContractIncarnation) + if len(val) > 0 { + if len(val) > 1 || val[0] != 0 { + codeHash := crypto.Keccak256(val) + fw.rs.Put(kv.Code, codeHash[:], val) + fw.rs.Put(kv.PlainContractCode, compositeKey, codeHash[:]) + } + //fmt.Printf("Code [%x] => [%x]\n", compositeKey, val) + } + } +} + +func (fw *FillWorker) ResetProgress() { + fw.total = 0 + fw.progress = 0 +} + +func (fw *FillWorker) bitmapAccounts() { + defer func() { + atomic.AddUint64(fw.doneCount, 1) + }() + it := fw.ac.IterateAccountsReconTxs(fw.fromKey, fw.toKey, fw.txNum) + atomic.StoreUint64(&fw.total, it.Total()) + for it.HasNext() { + txNum, progress := it.Next() + atomic.StoreUint64(&fw.progress, progress) + fw.bitmap.Add(txNum) + } +} + +func (fw *FillWorker) bitmapStorage() { + defer func() { + atomic.AddUint64(fw.doneCount, 1) + }() + it := fw.ac.IterateStorageReconTxs(fw.fromKey, fw.toKey, fw.txNum) + atomic.StoreUint64(&fw.total, it.Total()) + for it.HasNext() { + txNum, progress := it.Next() + atomic.StoreUint64(&fw.progress, progress) + fw.bitmap.Add(txNum) + } +} + +func (fw *FillWorker) bitmapCode() { + defer func() { + atomic.AddUint64(fw.doneCount, 1) + }() + it := fw.ac.IterateCodeReconTxs(fw.fromKey, fw.toKey, fw.txNum) + atomic.StoreUint64(&fw.total, it.Total()) + for it.HasNext() { + txNum, progress := it.Next() + atomic.StoreUint64(&fw.progress, progress) + fw.bitmap.Add(txNum) + } +} + +func Recon(genesis *core.Genesis, logger log.Logger) error { + sigs := make(chan os.Signal, 1) + interruptCh := make(chan bool, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + + go func() { + <-sigs + interruptCh <- true + }() + ctx := context.Background() + aggPath := filepath.Join(datadir, "erigon23") + agg, err := libstate.NewAggregator(aggPath, AggregationStep) + if err != nil { + return fmt.Errorf("create history: %w", err) + } + defer agg.Close() + reconDbPath := path.Join(datadir, "recondb") + if _, err = os.Stat(reconDbPath); err != nil { + if !errors.Is(err, os.ErrNotExist) { + return err + } + } else if err = os.RemoveAll(reconDbPath); err != nil { + return err + } + db, err := kv2.NewMDBX(logger).Path(reconDbPath).WriteMap().Open() + if err != nil { + return err + } + var blockReader services.FullBlockReader + var allSnapshots *snapshotsync.RoSnapshots + allSnapshots = snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadir, "snapshots")) + defer allSnapshots.Close() + if err := allSnapshots.Reopen(); err != nil { + return fmt.Errorf("reopen snapshot segments: %w", err) + } + blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) + // Compute mapping blockNum -> last TxNum in that block + txNums := make([]uint64, allSnapshots.BlocksAvailable()+1) + if err = allSnapshots.Bodies.View(func(bs []*snapshotsync.BodySegment) error { + for _, b := range bs { + if err = b.Iterate(func(blockNum, baseTxNum, txAmount uint64) { + txNums[blockNum] = baseTxNum + txAmount + }); err != nil { + return err + } + } + return nil + }); err != nil { + return fmt.Errorf("build txNum => blockNum mapping: %w", err) + } + endTxNumMinimax := agg.EndTxNumMinimax() + fmt.Printf("Max txNum in files: %d\n", endTxNumMinimax) + blockNum := uint64(sort.Search(len(txNums), func(i int) bool { + return txNums[i] > endTxNumMinimax + })) + if blockNum == uint64(len(txNums)) { + return fmt.Errorf("mininmax txNum not found in snapshot blocks: %d", endTxNumMinimax) + } + if blockNum == 0 { + return fmt.Errorf("not enough transactions in the history data") + } + if block+1 > blockNum { + return fmt.Errorf("specified block %d which is higher than available %d", block, blockNum) + } + fmt.Printf("Max blockNum = %d\n", blockNum) + blockNum = block + 1 + txNum := txNums[blockNum-1] + fmt.Printf("Corresponding block num = %d, txNum = %d\n", blockNum, txNum) + workerCount := runtime.NumCPU() + var wg sync.WaitGroup + rs := state.NewReconState() + var fromKey, toKey []byte + bigCount := big.NewInt(int64(workerCount)) + bigStep := big.NewInt(0x100000000) + bigStep.Div(bigStep, bigCount) + bigCurrent := big.NewInt(0) + fillWorkers := make([]*FillWorker, workerCount) + var doneCount uint64 + for i := 0; i < workerCount; i++ { + fromKey = toKey + if i == workerCount-1 { + toKey = nil + } else { + bigCurrent.Add(bigCurrent, bigStep) + toKey = make([]byte, 4) + bigCurrent.FillBytes(toKey) + } + //fmt.Printf("%d) Fill worker [%x] - [%x]\n", i, fromKey, toKey) + fillWorkers[i] = NewFillWorker(txNum, &doneCount, rs, agg, fromKey, toKey) + } + logEvery := time.NewTicker(logInterval) + defer logEvery.Stop() + doneCount = 0 + for i := 0; i < workerCount; i++ { + fillWorkers[i].ResetProgress() + go fillWorkers[i].bitmapAccounts() + } + for atomic.LoadUint64(&doneCount) < uint64(workerCount) { + select { + case <-logEvery.C: + var m runtime.MemStats + libcommon.ReadMemStats(&m) + var p float64 + for i := 0; i < workerCount; i++ { + if total := fillWorkers[i].Total(); total > 0 { + p += float64(fillWorkers[i].Progress()) / float64(total) + } + } + p *= 100.0 + log.Info("Scan accounts history", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), + "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), + ) + } + } + doneCount = 0 + for i := 0; i < workerCount; i++ { + fillWorkers[i].ResetProgress() + go fillWorkers[i].bitmapStorage() + } + for atomic.LoadUint64(&doneCount) < uint64(workerCount) { + select { + case <-logEvery.C: + var m runtime.MemStats + libcommon.ReadMemStats(&m) + var p float64 + for i := 0; i < workerCount; i++ { + if total := fillWorkers[i].Total(); total > 0 { + p += float64(fillWorkers[i].Progress()) / float64(total) + } + } + p *= 100.0 + log.Info("Scan storage history", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), + "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), + ) + } + } + doneCount = 0 + for i := 0; i < workerCount; i++ { + fillWorkers[i].ResetProgress() + go fillWorkers[i].bitmapCode() + } + for atomic.LoadUint64(&doneCount) < uint64(workerCount) { + select { + case <-logEvery.C: + var m runtime.MemStats + libcommon.ReadMemStats(&m) + var p float64 + for i := 0; i < workerCount; i++ { + if total := fillWorkers[i].Total(); total > 0 { + p += float64(fillWorkers[i].Progress()) / float64(total) + } + } + p *= 100.0 + log.Info("Scan code history", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), + "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), + ) + } + } + var bitmap roaring64.Bitmap + for i := 0; i < workerCount; i++ { + bitmap.Or(&fillWorkers[i].bitmap) + } + log.Info("Ready to replay", "transactions", bitmap.GetCardinality(), "out of", txNum) + rs.SetWorkBitmap(&bitmap) + var lock sync.RWMutex + reconWorkers := make([]*ReconWorker, workerCount) + roTxs := make([]kv.Tx, workerCount) + defer func() { + for i := 0; i < workerCount; i++ { + if roTxs[i] != nil { + roTxs[i].Rollback() + } + } + }() + for i := 0; i < workerCount; i++ { + roTxs[i], err = db.BeginRo(ctx) + if err != nil { + return err + } + } + for i := 0; i < workerCount; i++ { + reconWorkers[i] = NewReconWorker(lock.RLocker(), &wg, rs, agg, blockReader, allSnapshots, txNums, chainConfig, logger, genesis) + reconWorkers[i].SetTx(roTxs[i]) + } + wg.Add(workerCount) + count := uint64(0) + rollbackCount := uint64(0) + total := bitmap.GetCardinality() + for i := 0; i < workerCount; i++ { + go reconWorkers[i].run() + } + commitThreshold := uint64(256 * 1024 * 1024) + prevCount := uint64(0) + prevRollbackCount := uint64(0) + prevTime := time.Now() + for count < total { + select { + case <-logEvery.C: + var m runtime.MemStats + libcommon.ReadMemStats(&m) + sizeEstimate := rs.SizeEstimate() + count = rs.DoneCount() + rollbackCount = rs.RollbackCount() + currentTime := time.Now() + interval := currentTime.Sub(prevTime) + speedTx := float64(count-prevCount) / (float64(interval) / float64(time.Second)) + progress := 100.0 * float64(count) / float64(total) + var repeatRatio float64 + if count > prevCount { + repeatRatio = 100.0 * float64(rollbackCount-prevRollbackCount) / float64(count-prevCount) + } + prevTime = currentTime + prevCount = count + prevRollbackCount = rollbackCount + log.Info("State reconstitution", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", progress), "tx/s", fmt.Sprintf("%.1f", speedTx), "repeat ratio", fmt.Sprintf("%.2f%%", repeatRatio), "buffer", libcommon.ByteCount(sizeEstimate), + "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), + ) + if sizeEstimate >= commitThreshold { + err := func() error { + lock.Lock() + defer lock.Unlock() + for i := 0; i < workerCount; i++ { + roTxs[i].Rollback() + } + rwTx, err := db.BeginRw(ctx) + if err != nil { + return err + } + if err = rs.Flush(rwTx); err != nil { + return err + } + if err = rwTx.Commit(); err != nil { + return err + } + for i := 0; i < workerCount; i++ { + if roTxs[i], err = db.BeginRo(ctx); err != nil { + return err + } + reconWorkers[i].SetTx(roTxs[i]) + } + return nil + }() + if err != nil { + panic(err) + } + } + } + } + wg.Wait() + for i := 0; i < workerCount; i++ { + roTxs[i].Rollback() + } + rwTx, err := db.BeginRw(ctx) + if err != nil { + return err + } + defer func() { + if rwTx != nil { + rwTx.Rollback() + } + }() + if err = rs.Flush(rwTx); err != nil { + return err + } + if err = rwTx.Commit(); err != nil { + return err + } + doneCount = 0 + for i := 0; i < workerCount; i++ { + fillWorkers[i].ResetProgress() + go fillWorkers[i].fillAccounts() + } + for atomic.LoadUint64(&doneCount) < uint64(workerCount) { + select { + case <-logEvery.C: + var m runtime.MemStats + libcommon.ReadMemStats(&m) + sizeEstimate := rs.SizeEstimate() + var p float64 + for i := 0; i < workerCount; i++ { + if total := fillWorkers[i].Total(); total > 0 { + p += float64(fillWorkers[i].Progress()) / float64(total) + } + } + p *= 100.0 + log.Info("Filling accounts", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), "buffer", libcommon.ByteCount(sizeEstimate), + "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), + ) + if sizeEstimate >= commitThreshold { + flushStart := time.Now() + rwTx, err := db.BeginRw(ctx) + if err != nil { + return err + } + if err = rs.Flush(rwTx); err != nil { + return err + } + if err = rwTx.Commit(); err != nil { + return err + } + log.Info("Flush buffer", "duration", time.Since(flushStart)) + } + } + } + doneCount = 0 + for i := 0; i < workerCount; i++ { + fillWorkers[i].ResetProgress() + go fillWorkers[i].fillStorage() + } + for atomic.LoadUint64(&doneCount) < uint64(workerCount) { + select { + case <-logEvery.C: + var m runtime.MemStats + libcommon.ReadMemStats(&m) + sizeEstimate := rs.SizeEstimate() + var p float64 + for i := 0; i < workerCount; i++ { + if total := fillWorkers[i].Total(); total > 0 { + p += float64(fillWorkers[i].Progress()) / float64(total) + } + } + p *= 100.0 + log.Info("Filling storage", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), "buffer", libcommon.ByteCount(sizeEstimate), + "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), + ) + if sizeEstimate >= commitThreshold { + flushStart := time.Now() + rwTx, err := db.BeginRw(ctx) + if err != nil { + return err + } + if err = rs.Flush(rwTx); err != nil { + return err + } + if err = rwTx.Commit(); err != nil { + return err + } + log.Info("Flush buffer", "duration", time.Since(flushStart)) + } + } + } + doneCount = 0 + for i := 0; i < workerCount; i++ { + fillWorkers[i].ResetProgress() + go fillWorkers[i].fillCode() + } + for atomic.LoadUint64(&doneCount) < uint64(workerCount) { + select { + case <-logEvery.C: + var m runtime.MemStats + libcommon.ReadMemStats(&m) + sizeEstimate := rs.SizeEstimate() + var p float64 + for i := 0; i < workerCount; i++ { + if total := fillWorkers[i].Total(); total > 0 { + p += float64(fillWorkers[i].Progress()) / float64(total) + } + } + p *= 100.0 + log.Info("Filling code", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), "buffer", libcommon.ByteCount(sizeEstimate), + "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), + ) + if sizeEstimate >= commitThreshold { + flushStart := time.Now() + rwTx, err := db.BeginRw(ctx) + if err != nil { + return err + } + if err = rs.Flush(rwTx); err != nil { + return err + } + if err = rwTx.Commit(); err != nil { + return err + } + log.Info("Flush buffer", "duration", time.Since(flushStart)) + } + } + } + rwTx, err = db.BeginRw(ctx) + if err != nil { + return err + } + if err = rs.Flush(rwTx); err != nil { + return err + } + if err = rwTx.Commit(); err != nil { + return err + } + if rwTx, err = db.BeginRw(ctx); err != nil { + return err + } + log.Info("Computing hashed state") + tmpDir := filepath.Join(datadir, "tmp") + if err = stagedsync.PromoteHashedStateCleanly("recon", rwTx, stagedsync.StageHashStateCfg(db, tmpDir), ctx); err != nil { + return err + } + if err = rwTx.Commit(); err != nil { + return err + } + if rwTx, err = db.BeginRw(ctx); err != nil { + return err + } + if _, err = stagedsync.RegenerateIntermediateHashes("recon", rwTx, stagedsync.StageTrieCfg(db, false /* checkRoot */, false /* saveHashesToDB */, false /* badBlockHalt */, tmpDir, blockReader), common.Hash{}, make(chan struct{}, 1)); err != nil { + return err + } + if err = rwTx.Commit(); err != nil { + return err + } + return nil +} diff --git a/core/state/HistoryReader22.go b/core/state/history_reader_22.go similarity index 97% rename from core/state/HistoryReader22.go rename to core/state/history_reader_22.go index 647dfe25886..be89d3bf43b 100644 --- a/core/state/HistoryReader22.go +++ b/core/state/history_reader_22.go @@ -126,8 +126,10 @@ func (hr *HistoryReader22) ReadAccountStorage(address common.Address, incarnatio } func (hr *HistoryReader22) ReadAccountCode(address common.Address, incarnation uint64, codeHash common.Hash) ([]byte, error) { - if err := hr.ri.ReadAccountCode(address.Bytes()); err != nil { - return nil, err + if hr.ri != nil { + if err := hr.ri.ReadAccountCode(address.Bytes()); err != nil { + return nil, err + } } enc, err := hr.a.ReadAccountCodeBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) if err != nil { diff --git a/core/state/history_reader_nostate.go b/core/state/history_reader_nostate.go new file mode 100644 index 00000000000..d1e270bec3d --- /dev/null +++ b/core/state/history_reader_nostate.go @@ -0,0 +1,210 @@ +package state + +import ( + "fmt" + + "github.com/ledgerwatch/erigon-lib/kv" + libstate "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/core/types/accounts" +) + +type RequiredStateError struct { + StateTxNum uint64 +} + +func (r *RequiredStateError) Error() string { + return fmt.Sprintf("required state at txNum %d", r.StateTxNum) +} + +// Implements StateReader and StateWriter +type HistoryReaderNoState struct { + ac *libstate.AggregatorContext + tx kv.Tx + txNum uint64 + trace bool + rs *ReconState + readError bool + stateTxNum uint64 +} + +func NewHistoryReaderNoState(ac *libstate.AggregatorContext, rs *ReconState) *HistoryReaderNoState { + return &HistoryReaderNoState{ac: ac, rs: rs} +} + +func (hr *HistoryReaderNoState) SetTxNum(txNum uint64) { + hr.txNum = txNum +} + +func (hr *HistoryReaderNoState) SetTx(tx kv.Tx) { + hr.tx = tx +} + +func (hr *HistoryReaderNoState) SetTrace(trace bool) { + hr.trace = trace +} + +func (hr *HistoryReaderNoState) ReadAccountData(address common.Address) (*accounts.Account, error) { + enc, noState, stateTxNum, err := hr.ac.ReadAccountDataNoState(address.Bytes(), hr.txNum) + if err != nil { + return nil, err + } + if !noState { + if !hr.rs.Done(stateTxNum) { + hr.readError = true + hr.stateTxNum = stateTxNum + return nil, &RequiredStateError{StateTxNum: stateTxNum} + } + enc = hr.rs.Get(kv.PlainState, address.Bytes()) + if enc == nil { + enc, err = hr.tx.GetOne(kv.PlainState, address.Bytes()) + if err != nil { + return nil, err + } + if enc == nil { + return nil, nil + } + } + var a accounts.Account + if err = a.DecodeForStorage(enc); err != nil { + return nil, err + } + if hr.trace { + fmt.Printf("ReadAccountData [%x] => [nonce: %d, balance: %d, codeHash: %x], noState=%t, stateTxNum=%d, txNum: %d\n", address, a.Nonce, &a.Balance, a.CodeHash, noState, stateTxNum, hr.txNum) + } + return &a, nil + } + if len(enc) == 0 { + if hr.trace { + fmt.Printf("ReadAccountData [%x] => [], noState=%t, stateTxNum=%d, txNum: %d\n", address, noState, stateTxNum, hr.txNum) + } + return nil, nil + } + var a accounts.Account + a.Reset() + pos := 0 + nonceBytes := int(enc[pos]) + pos++ + if nonceBytes > 0 { + a.Nonce = bytesToUint64(enc[pos : pos+nonceBytes]) + pos += nonceBytes + } + balanceBytes := int(enc[pos]) + pos++ + if balanceBytes > 0 { + a.Balance.SetBytes(enc[pos : pos+balanceBytes]) + pos += balanceBytes + } + codeHashBytes := int(enc[pos]) + pos++ + if codeHashBytes > 0 { + copy(a.CodeHash[:], enc[pos:pos+codeHashBytes]) + pos += codeHashBytes + } + incBytes := int(enc[pos]) + pos++ + if incBytes > 0 { + a.Incarnation = bytesToUint64(enc[pos : pos+incBytes]) + } + if hr.trace { + fmt.Printf("ReadAccountData [%x] => [nonce: %d, balance: %d, codeHash: %x], noState=%t, stateTxNum=%d, txNum: %d\n", address, a.Nonce, &a.Balance, a.CodeHash, noState, stateTxNum, hr.txNum) + } + return &a, nil +} + +func (hr *HistoryReaderNoState) ReadAccountStorage(address common.Address, incarnation uint64, key *common.Hash) ([]byte, error) { + enc, noState, stateTxNum, err := hr.ac.ReadAccountStorageNoState(address.Bytes(), key.Bytes(), hr.txNum) + if err != nil { + return nil, err + } + if !noState { + if !hr.rs.Done(stateTxNum) { + hr.readError = true + hr.stateTxNum = stateTxNum + return nil, &RequiredStateError{StateTxNum: stateTxNum} + } + compositeKey := dbutils.PlainGenerateCompositeStorageKey(address.Bytes(), FirstContractIncarnation, key.Bytes()) + enc = hr.rs.Get(kv.PlainState, compositeKey) + if enc == nil { + enc, err = hr.tx.GetOne(kv.PlainState, compositeKey) + if err != nil { + return nil, err + } + } + } + if hr.trace { + if enc == nil { + fmt.Printf("ReadAccountStorage [%x] [%x] => [], txNum: %d\n", address, key.Bytes(), hr.txNum) + } else { + fmt.Printf("ReadAccountStorage [%x] [%x] => [%x], txNum: %d\n", address, key.Bytes(), enc, hr.txNum) + } + } + if enc == nil { + return nil, nil + } + return enc, nil +} + +func (hr *HistoryReaderNoState) ReadAccountCode(address common.Address, incarnation uint64, codeHash common.Hash) ([]byte, error) { + enc, noState, stateTxNum, err := hr.ac.ReadAccountCodeNoState(address.Bytes(), hr.txNum) + if err != nil { + return nil, err + } + if !noState { + if !hr.rs.Done(stateTxNum) { + hr.readError = true + hr.stateTxNum = stateTxNum + return nil, &RequiredStateError{StateTxNum: stateTxNum} + } + enc = hr.rs.Get(kv.Code, codeHash.Bytes()) + if enc == nil { + enc, err = hr.tx.GetOne(kv.Code, codeHash.Bytes()) + if err != nil { + return nil, err + } + } + } + if hr.trace { + fmt.Printf("ReadAccountCode [%x] => [%x], noState=%t, stateTxNum=%d, txNum: %d\n", address, enc, noState, stateTxNum, hr.txNum) + } + return enc, nil +} + +func (hr *HistoryReaderNoState) ReadAccountCodeSize(address common.Address, incarnation uint64, codeHash common.Hash) (int, error) { + size, noState, stateTxNum, err := hr.ac.ReadAccountCodeSizeNoState(address.Bytes(), hr.txNum) + if err != nil { + return 0, err + } + if !noState { + if !hr.rs.Done(stateTxNum) { + hr.readError = true + hr.stateTxNum = stateTxNum + return 0, &RequiredStateError{StateTxNum: stateTxNum} + } + enc := hr.rs.Get(kv.Code, codeHash.Bytes()) + if enc == nil { + enc, err = hr.tx.GetOne(kv.Code, codeHash.Bytes()) + if err != nil { + return 0, err + } + } + size = len(enc) + } + if hr.trace { + fmt.Printf("ReadAccountCodeSize [%x] => [%d], txNum: %d\n", address, size, hr.txNum) + } + return size, nil +} + +func (hr *HistoryReaderNoState) ReadAccountIncarnation(address common.Address) (uint64, error) { + return 0, nil +} + +func (hr *HistoryReaderNoState) ResetError() { + hr.readError = false +} + +func (hr *HistoryReaderNoState) ReadError() (uint64, bool) { + return hr.stateTxNum, hr.readError +} diff --git a/core/state/intra_block_state.go b/core/state/intra_block_state.go index 43c98f608a2..d061ee91fe8 100644 --- a/core/state/intra_block_state.go +++ b/core/state/intra_block_state.go @@ -70,7 +70,7 @@ type IntraBlockState struct { // unable to deal with database-level errors. Any error that occurs // during a database read is memoized here and will eventually be returned // by IntraBlockState.Commit. - dbErr error + savedErr error // The refund counter, also used by state transitioning. refund uint64 @@ -115,13 +115,13 @@ func (sdb *IntraBlockState) SetTrace(trace bool) { // setErrorUnsafe sets error but should be called in medhods that already have locks func (sdb *IntraBlockState) setErrorUnsafe(err error) { - if sdb.dbErr == nil { - sdb.dbErr = err + if sdb.savedErr == nil { + sdb.savedErr = err } } func (sdb *IntraBlockState) Error() error { - return sdb.dbErr + return sdb.savedErr } // Reset clears out all ephemeral state objects from the state db, but keeps diff --git a/core/state/state_object.go b/core/state/state_object.go index 7547a97fda9..ffdcde3e770 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -69,13 +69,6 @@ type stateObject struct { original accounts.Account db *IntraBlockState - // DB error. - // State objects are used by the consensus core and VM which are - // unable to deal with database-level errors. Any error that occurs - // during a database read is memoized here and will eventually be returned - // by IntraBlockState.Commit. - dbErr error - // Write caches. //trie Trie // storage trie, which becomes non-nil on first access code Code // contract bytecode, which gets set when code is loaded @@ -134,8 +127,8 @@ func (so *stateObject) EncodeRLP(w io.Writer) error { // setError remembers the first non-nil error it is called with. func (so *stateObject) setError(err error) { - if so.dbErr == nil { - so.dbErr = err + if so.db.savedErr == nil { + so.db.savedErr = err } } diff --git a/core/state/state_recon_writer.go b/core/state/state_recon_writer.go new file mode 100644 index 00000000000..c3893b6545e --- /dev/null +++ b/core/state/state_recon_writer.go @@ -0,0 +1,257 @@ +package state + +import ( + //"fmt" + + "container/heap" + "github.com/RoaringBitmap/roaring/roaring64" + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/kv" + libstate "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/core/types/accounts" + "golang.org/x/exp/constraints" + "sync" +) + +type theap[T constraints.Ordered] []T + +func (h theap[T]) Len() int { + return len(h) +} + +func (h theap[T]) Less(i, j int) bool { + return h[i] < h[j] +} + +func (h theap[T]) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +func (h *theap[T]) Push(a interface{}) { + *h = append(*h, a.(T)) +} + +func (h *theap[T]) Pop() interface{} { + c := *h + *h = c[:len(c)-1] + return c[len(c)-1] +} + +// ReconState is the accumulator of changes to the state +type ReconState struct { + lock sync.RWMutex + workIterator roaring64.IntPeekable64 + doneBitmap roaring64.Bitmap + triggers map[uint64][]uint64 + queue theap[uint64] + changes map[string]map[string][]byte + sizeEstimate uint64 + rollbackCount uint64 +} + +func NewReconState() *ReconState { + rs := &ReconState{ + triggers: map[uint64][]uint64{}, + changes: map[string]map[string][]byte{}, + } + return rs +} + +func (rs *ReconState) SetWorkBitmap(workBitmap *roaring64.Bitmap) { + rs.workIterator = workBitmap.Iterator() +} + +func (rs *ReconState) Put(table string, key, val []byte) { + rs.lock.Lock() + defer rs.lock.Unlock() + t, ok := rs.changes[table] + if !ok { + t = map[string][]byte{} + rs.changes[table] = t + } + t[string(key)] = val + rs.sizeEstimate += uint64(len(key)) + uint64(len(val)) +} + +func (rs *ReconState) Delete(table string, key []byte) { + rs.lock.Lock() + defer rs.lock.Unlock() + t, ok := rs.changes[table] + if !ok { + t = map[string][]byte{} + rs.changes[table] = t + } + t[string(key)] = nil + rs.sizeEstimate += uint64(len(key)) +} + +func (rs *ReconState) Get(table string, key []byte) []byte { + rs.lock.RLock() + defer rs.lock.RUnlock() + t, ok := rs.changes[table] + if !ok { + return nil + } + return t[string(key)] +} + +func (rs *ReconState) Flush(rwTx kv.RwTx) error { + rs.lock.Lock() + defer rs.lock.Unlock() + for table, t := range rs.changes { + for ks, val := range t { + if len(val) == 0 { + if err := rwTx.Delete(table, []byte(ks), nil); err != nil { + return err + } + } else { + if err := rwTx.Put(table, []byte(ks), val); err != nil { + return err + } + } + } + } + rs.changes = map[string]map[string][]byte{} + rs.sizeEstimate = 0 + return nil +} + +func (rs *ReconState) Schedule() (uint64, bool) { + rs.lock.Lock() + defer rs.lock.Unlock() + for rs.queue.Len() < 16 && rs.workIterator.HasNext() { + heap.Push(&rs.queue, rs.workIterator.Next()) + } + if rs.queue.Len() > 0 { + return heap.Pop(&rs.queue).(uint64), true + } + return 0, false +} + +func (rs *ReconState) CommitTxNum(txNum uint64) { + rs.lock.Lock() + defer rs.lock.Unlock() + if tt, ok := rs.triggers[txNum]; ok { + for _, t := range tt { + heap.Push(&rs.queue, t) + } + delete(rs.triggers, txNum) + } + rs.doneBitmap.Add(txNum) +} + +func (rs *ReconState) RollbackTxNum(txNum, dependency uint64) { + rs.lock.Lock() + defer rs.lock.Unlock() + if rs.doneBitmap.Contains(dependency) { + heap.Push(&rs.queue, txNum) + } else { + tt, _ := rs.triggers[dependency] + tt = append(tt, txNum) + rs.triggers[dependency] = tt + } + rs.rollbackCount++ +} + +func (rs *ReconState) Done(txNum uint64) bool { + rs.lock.RLock() + defer rs.lock.RUnlock() + return rs.doneBitmap.Contains(txNum) +} + +func (rs *ReconState) DoneCount() uint64 { + rs.lock.RLock() + defer rs.lock.RUnlock() + return rs.doneBitmap.GetCardinality() +} + +func (rs *ReconState) RollbackCount() uint64 { + rs.lock.RLock() + defer rs.lock.RUnlock() + return rs.rollbackCount +} + +func (rs *ReconState) SizeEstimate() uint64 { + rs.lock.RLock() + defer rs.lock.RUnlock() + return rs.sizeEstimate +} + +type StateReconWriter struct { + ac *libstate.AggregatorContext + rs *ReconState + txNum uint64 +} + +func NewStateReconWriter(ac *libstate.AggregatorContext, rs *ReconState) *StateReconWriter { + return &StateReconWriter{ + ac: ac, + rs: rs, + } +} + +func (w *StateReconWriter) SetTxNum(txNum uint64) { + w.txNum = txNum +} + +func (w *StateReconWriter) UpdateAccountData(address common.Address, original, account *accounts.Account) error { + found, txNum := w.ac.MaxAccountsTxNum(address.Bytes()) + if !found { + return nil + } + if txNum != w.txNum { + //fmt.Printf("no change account [%x] txNum = %d\n", address, txNum) + return nil + } + value := make([]byte, account.EncodingLengthForStorage()) + account.EncodeForStorage(value) + //fmt.Printf("account [%x]=>{Balance: %d, Nonce: %d, Root: %x, CodeHash: %x} txNum: %d\n", address, &account.Balance, account.Nonce, account.Root, account.CodeHash, w.txNum) + w.rs.Put(kv.PlainState, address[:], value) + return nil +} + +func (w *StateReconWriter) UpdateAccountCode(address common.Address, incarnation uint64, codeHash common.Hash, code []byte) error { + found, txNum := w.ac.MaxCodeTxNum(address.Bytes()) + if !found { + return nil + } + if txNum != w.txNum { + //fmt.Printf("no change code [%x] txNum = %d\n", address, txNum) + return nil + } + w.rs.Put(kv.Code, codeHash[:], code) + if len(code) > 0 { + //fmt.Printf("code [%x] => [%x] CodeHash: %x, txNum: %d\n", address, code, codeHash, w.txNum) + w.rs.Put(kv.PlainContractCode, dbutils.PlainGenerateStoragePrefix(address[:], FirstContractIncarnation), codeHash[:]) + } + return nil +} + +func (w *StateReconWriter) DeleteAccount(address common.Address, original *accounts.Account) error { + return nil +} + +func (w *StateReconWriter) WriteAccountStorage(address common.Address, incarnation uint64, key *common.Hash, original, value *uint256.Int) error { + found, txNum := w.ac.MaxStorageTxNum(address.Bytes(), key.Bytes()) + if !found { + //fmt.Printf("no found storage [%x] [%x]\n", address, *key) + return nil + } + if txNum != w.txNum { + //fmt.Printf("no change storage [%x] [%x] txNum = %d\n", address, *key, txNum) + return nil + } + v := value.Bytes() + if len(v) != 0 { + //fmt.Printf("storage [%x] [%x] => [%x], txNum: %d\n", address, *key, v, w.txNum) + compositeKey := dbutils.PlainGenerateCompositeStorageKey(address.Bytes(), FirstContractIncarnation, key.Bytes()) + w.rs.Put(kv.PlainState, compositeKey, v) + } + return nil +} + +func (w *StateReconWriter) CreateContract(address common.Address) error { + return nil +} diff --git a/go.mod b/go.mod index 415f1852b12..d2360950a28 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220701042032-ed452dbc4b21 + github.com/ledgerwatch/erigon-lib v0.0.0-20220702183834-707a89842d6b github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 982d284196a..45f0c922d41 100644 --- a/go.sum +++ b/go.sum @@ -383,8 +383,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220701042032-ed452dbc4b21 h1:mZAojUAtvuvFLS8sumuYlZrHKGvkjTBxA6fvvujT/Kc= -github.com/ledgerwatch/erigon-lib v0.0.0-20220701042032-ed452dbc4b21/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= +github.com/ledgerwatch/erigon-lib v0.0.0-20220702183834-707a89842d6b h1:jxk2V9PBN9z2FQIL2SAV3V1wq01RUPz2kgzSqaCZmJQ= +github.com/ledgerwatch/erigon-lib v0.0.0-20220702183834-707a89842d6b/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= diff --git a/turbo/snapshotsync/block_reader.go b/turbo/snapshotsync/block_reader.go index c5c5ef2d365..c35e22b99fd 100644 --- a/turbo/snapshotsync/block_reader.go +++ b/turbo/snapshotsync/block_reader.go @@ -313,6 +313,9 @@ func (back *BlockReaderWithSnapshots) Header(ctx context.Context, tx kv.Getter, } return nil }) + if err != nil { + return h, err + } if ok { return h, nil } From 3de3baf585738d184a09a195a6f954b17b53b6f4 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Sun, 3 Jul 2022 00:14:22 +0200 Subject: [PATCH 020/152] default side fork support (#4611) --- eth/stagedsync/stage_headers.go | 36 ++++++++++----------- turbo/stages/headerdownload/header_algos.go | 13 ++++++++ 2 files changed, 31 insertions(+), 18 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 6b6f5f547e9..fd0c70e8bd5 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -586,8 +586,11 @@ func verifyAndSaveNewPoSHeader( return nil, false, err } - err = headerInserter.FeedHeaderPoS(tx, header, headerHash) - if err != nil { + if err := headerInserter.FeedHeaderPoS(tx, header, headerHash); err != nil { + return nil, false, err + } + + if err := cfg.hd.StorePayloadFork(tx, header, body); err != nil { return nil, false, err } @@ -596,23 +599,20 @@ func verifyAndSaveNewPoSHeader( // Side chain or something weird // TODO(yperbasis): considered non-canonical because some missing headers were downloaded but not canonized // Or it's not a problem because forkChoice is updated frequently? - if cfg.memoryOverlay { - status, latestValidHash, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, cfg.chainConfig.TerminalTotalDifficulty, false, cfg.execPayload) - if criticalError != nil { - return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError - } - if validationError != nil { - cfg.hd.ReportBadHeaderPoS(headerHash, latestValidHash) - } - success = status == remote.EngineStatus_VALID || status == remote.EngineStatus_ACCEPTED - return &privateapi.PayloadStatus{ - Status: status, - LatestValidHash: latestValidHash, - ValidationError: validationError, - }, success, nil + status, latestValidHash, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, cfg.chainConfig.TerminalTotalDifficulty, false, cfg.execPayload) + if criticalError != nil { + return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError } - // No canonization, HeadHeaderHash & StageProgress are not updated - return &privateapi.PayloadStatus{Status: remote.EngineStatus_ACCEPTED}, true, nil + if validationError != nil { + cfg.hd.ReportBadHeaderPoS(headerHash, latestValidHash) + } + success = status == remote.EngineStatus_VALID || status == remote.EngineStatus_ACCEPTED + return &privateapi.PayloadStatus{ + Status: status, + LatestValidHash: latestValidHash, + ValidationError: validationError, + }, success, nil + } if cfg.memoryOverlay && (cfg.hd.GetNextForkHash() == (common.Hash{}) || header.ParentHash == cfg.hd.GetNextForkHash()) { diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 1aabb1e3f00..c0ed7df24c5 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -1093,6 +1093,19 @@ func abs64(n int64) uint64 { return uint64(n) } +func (hd *HeaderDownload) StorePayloadFork(tx kv.RwTx, header *types.Header, body *types.RawBody) error { + hd.lock.Lock() + defer hd.lock.Unlock() + maxDepth := uint64(16) + height := rawdb.ReadCurrentBlockNumber(tx) + if height == nil { + return fmt.Errorf("could not read block number.") + } + hd.sideForksBlock[header.Hash()] = sideForkBlock{header, body} + hd.cleanupOutdateSideForks(*height, maxDepth) + return nil +} + func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, terminalTotalDifficulty *big.Int, store bool, execPayload func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody) error) (status remote.EngineStatus, latestValidHash common.Hash, validationError error, criticalError error) { hd.lock.Lock() defer hd.lock.Unlock() From 77dc35bb854e83dfd5afdbca33485907bd4848a9 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Sat, 2 Jul 2022 23:24:20 +0100 Subject: [PATCH 021/152] Revert "evm t8n tool to use ExecuteBlockEphemerally api (#4512)" (#4610) * Revert "evm t8n tool to use ExecuteBlockEphemerally api (#4512)" This reverts commit db93d2ea37f74871219231ce128652c1a3aac16f. * Fix compilation Co-authored-by: Alex Sharp --- accounts/abi/bind/backends/simulated.go | 5 +- cmd/evm/internal/t8ntool/execution.go | 288 ++++++++++++++--- cmd/evm/internal/t8ntool/flags.go | 7 +- cmd/evm/internal/t8ntool/gen_stenv.go | 65 ++-- cmd/evm/internal/t8ntool/transition.go | 268 ++-------------- cmd/evm/main.go | 3 +- cmd/evm/t8n_test.go | 247 -------------- cmd/evm/testdata/1/exp.json | 45 --- cmd/evm/testdata/10/alloc.json | 23 -- cmd/evm/testdata/10/env.json | 12 - cmd/evm/testdata/10/exp.json | 79 ----- cmd/evm/testdata/10/readme.md | 79 ----- cmd/evm/testdata/10/txs.json | 70 ---- cmd/evm/testdata/11/alloc.json | 25 -- cmd/evm/testdata/11/env.json | 12 - cmd/evm/testdata/11/readme.md | 13 - cmd/evm/testdata/11/txs.json | 14 - cmd/evm/testdata/12/alloc.json | 11 - cmd/evm/testdata/12/env.json | 10 - cmd/evm/testdata/12/exp.json | 26 -- cmd/evm/testdata/12/readme.md | 40 --- cmd/evm/testdata/12/txs.json | 20 -- cmd/evm/testdata/19/alloc.json | 12 - cmd/evm/testdata/19/env.json | 9 - cmd/evm/testdata/19/exp_arrowglacier.json | 24 -- cmd/evm/testdata/19/exp_london.json | 24 -- cmd/evm/testdata/19/readme.md | 9 - cmd/evm/testdata/19/txs.json | 1 - cmd/evm/testdata/3/exp.json | 39 --- cmd/evm/testdata/5/exp.json | 23 -- cmd/evm/testdata/7/exp.json | 375 ---------------------- cmd/evm/testdata/8/exp.json | 68 ---- cmd/evm/testdata/9/alloc.json | 28 +- cmd/evm/testdata/9/env.json | 15 +- cmd/evm/testdata/9/exp.json | 54 ---- cmd/evm/testdata/9/readme.md | 75 ----- cmd/evm/testdata/9/txs.json | 49 +-- cmd/integration/commands/state_stages.go | 3 +- cmd/rpcdaemon/commands/eth_receipts.go | 3 +- cmd/rpcdaemon22/commands/eth_receipts.go | 3 +- cmd/state/commands/erigon2.go | 2 +- cmd/state/commands/erigon22.go | 2 +- cmd/state/commands/history2.go | 2 +- cmd/state/commands/history22.go | 2 +- cmd/state/commands/opcode_tracer.go | 2 +- cmd/state/commands/state_recon.go | 2 +- consensus/parlia/parlia.go | 2 +- core/blockchain.go | 203 ++++-------- core/chain_makers.go | 4 +- core/evm.go | 4 +- core/state_processor.go | 58 +++- core/vm/logger.go | 75 ----- eth/stagedsync/stage_execute.go | 19 +- eth/stagedsync/stage_mining_exec.go | 2 +- go.mod | 7 +- go.sum | 17 +- internal/cmdtest/test_cmd.go | 300 ----------------- tests/state_test_util.go | 3 +- turbo/transactions/tracing.go | 3 +- 59 files changed, 501 insertions(+), 2384 deletions(-) delete mode 100644 cmd/evm/t8n_test.go delete mode 100644 cmd/evm/testdata/1/exp.json delete mode 100644 cmd/evm/testdata/10/alloc.json delete mode 100644 cmd/evm/testdata/10/env.json delete mode 100644 cmd/evm/testdata/10/exp.json delete mode 100644 cmd/evm/testdata/10/readme.md delete mode 100644 cmd/evm/testdata/10/txs.json delete mode 100644 cmd/evm/testdata/11/alloc.json delete mode 100644 cmd/evm/testdata/11/env.json delete mode 100644 cmd/evm/testdata/11/readme.md delete mode 100644 cmd/evm/testdata/11/txs.json delete mode 100644 cmd/evm/testdata/12/alloc.json delete mode 100644 cmd/evm/testdata/12/env.json delete mode 100644 cmd/evm/testdata/12/exp.json delete mode 100644 cmd/evm/testdata/12/readme.md delete mode 100644 cmd/evm/testdata/12/txs.json delete mode 100644 cmd/evm/testdata/19/alloc.json delete mode 100644 cmd/evm/testdata/19/env.json delete mode 100644 cmd/evm/testdata/19/exp_arrowglacier.json delete mode 100644 cmd/evm/testdata/19/exp_london.json delete mode 100644 cmd/evm/testdata/19/readme.md delete mode 100644 cmd/evm/testdata/19/txs.json delete mode 100644 cmd/evm/testdata/3/exp.json delete mode 100644 cmd/evm/testdata/5/exp.json delete mode 100644 cmd/evm/testdata/7/exp.json delete mode 100644 cmd/evm/testdata/8/exp.json delete mode 100644 cmd/evm/testdata/9/exp.json delete mode 100644 cmd/evm/testdata/9/readme.md delete mode 100644 internal/cmdtest/test_cmd.go diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 2c5753f68d5..a04bcb9149d 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -667,8 +667,7 @@ func (b *SimulatedBackend) callContract(_ context.Context, call ethereum.CallMsg msg := callMsg{call} txContext := core.NewEVMTxContext(msg) - header := block.Header() - evmContext := core.NewEVMBlockContext(header, core.GetHashFn(header, b.getHeader), b.m.Engine, nil, b.contractHasTEVM) + evmContext := core.NewEVMBlockContext(block.Header(), b.getHeader, b.m.Engine, nil, b.contractHasTEVM) // Create a new environment which holds all relevant information // about the transaction and calling mechanisms. vmEnv := vm.NewEVM(evmContext, txContext, statedb, b.m.ChainConfig, vm.Config{}) @@ -697,7 +696,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx types.Transac b.pendingState.Prepare(tx.Hash(), common.Hash{}, len(b.pendingBlock.Transactions())) //fmt.Printf("==== Start producing block %d, header: %d\n", b.pendingBlock.NumberU64(), b.pendingHeader.Number.Uint64()) if _, _, err := core.ApplyTransaction( - b.m.ChainConfig, core.GetHashFn(b.pendingHeader, b.getHeader), b.m.Engine, + b.m.ChainConfig, b.getHeader, b.m.Engine, &b.pendingHeader.Coinbase, b.gasPool, b.pendingState, state.NewNoopWriter(), b.pendingHeader, tx, diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index 0bd33bc8a43..1ef1457c2c7 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -17,19 +17,29 @@ package t8ntool import ( + "context" "encoding/binary" + "fmt" "math/big" "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/log/v3" + "golang.org/x/crypto/sha3" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/math" - "github.com/ledgerwatch/erigon/consensus/ethash" + "github.com/ledgerwatch/erigon/consensus/misc" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/systemcontracts" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/turbo/trie" ) type Prestate struct { @@ -37,6 +47,18 @@ type Prestate struct { Pre core.GenesisAlloc `json:"pre"` } +// ExecutionResult contains the execution status after running a state test, any +// error that might have occurred and a dump of the final state if requested. +type ExecutionResult struct { + StateRoot common.Hash `json:"stateRoot"` + TxRoot common.Hash `json:"txRoot"` + ReceiptRoot common.Hash `json:"receiptRoot"` + LogsHash common.Hash `json:"logsHash"` + Bloom types.Bloom `json:"logsBloom" gencodec:"required"` + Receipts types.Receipts `json:"receipts"` + Rejected []*rejectedTx `json:"rejected,omitempty"` +} + type ommer struct { Delta uint64 `json:"delta"` Address common.Address `json:"address"` @@ -44,36 +66,226 @@ type ommer struct { //go:generate gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go type stEnv struct { - Coinbase common.Address `json:"currentCoinbase" gencodec:"required"` - Difficulty *big.Int `json:"currentDifficulty"` - Random *big.Int `json:"currentRandom"` - ParentDifficulty *big.Int `json:"parentDifficulty"` - GasLimit uint64 `json:"currentGasLimit" gencodec:"required"` - Number uint64 `json:"currentNumber" gencodec:"required"` - Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` - ParentTimestamp uint64 `json:"parentTimestamp,omitempty"` - BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` - BaseFee *big.Int `json:"currentBaseFee,omitempty"` - ParentUncleHash common.Hash `json:"parentUncleHash"` + Coinbase common.Address `json:"currentCoinbase" gencodec:"required"` + Difficulty *big.Int `json:"currentDifficulty" gencodec:"required"` + GasLimit uint64 `json:"currentGasLimit" gencodec:"required"` + Number uint64 `json:"currentNumber" gencodec:"required"` + Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *big.Int `json:"currentBaseFee,omitempty"` + Random *common.Hash `json:"currentRandom,omitempty"` +} + +type rejectedTx struct { + Index int `json:"index"` + Err string `json:"error"` } type stEnvMarshaling struct { - Coinbase common.UnprefixedAddress - Difficulty *math.HexOrDecimal256 - Random *math.HexOrDecimal256 - ParentDifficulty *math.HexOrDecimal256 - GasLimit math.HexOrDecimal64 - Number math.HexOrDecimal64 - Timestamp math.HexOrDecimal64 - ParentTimestamp math.HexOrDecimal64 - BaseFee *math.HexOrDecimal256 + Coinbase common.UnprefixedAddress + Difficulty *math.HexOrDecimal256 + GasLimit math.HexOrDecimal64 + Number math.HexOrDecimal64 + Timestamp math.HexOrDecimal64 + BaseFee *math.HexOrDecimal256 +} + +// Apply applies a set of transactions to a pre-state +func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, + txs types.Transactions, miningReward int64, + getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error)) (kv.RwDB, *ExecutionResult, error) { + + // Capture errors for BLOCKHASH operation, if we haven't been supplied the + // required blockhashes + var hashError error + getHash := func(num uint64) common.Hash { + if pre.Env.BlockHashes == nil { + hashError = fmt.Errorf("getHash(%d) invoked, no blockhashes provided", num) + return common.Hash{} + } + h, ok := pre.Env.BlockHashes[math.HexOrDecimal64(num)] + if !ok { + hashError = fmt.Errorf("getHash(%d) invoked, blockhash for that block not provided", num) + } + return h + } + db := memdb.New() + + tx, err := db.BeginRw(context.Background()) + if err != nil { + return nil, nil, err + } + defer tx.Rollback() + + var ( + rules0 = chainConfig.Rules(0) + rules1 = chainConfig.Rules(1) + rules = chainConfig.Rules(pre.Env.Number) + ibs = MakePreState(rules0, tx, pre.Pre) + signer = types.MakeSigner(chainConfig, pre.Env.Number) + gaspool = new(core.GasPool) + blockHash = common.Hash{0x13, 0x37} + rejectedTxs []*rejectedTx + includedTxs types.Transactions + gasUsed = uint64(0) + receipts = make(types.Receipts, 0) + txIndex = 0 + ) + gaspool.AddGas(pre.Env.GasLimit) + + difficulty := new(big.Int) + if pre.Env.Random == nil { + difficulty = pre.Env.Difficulty + } else { + // We are on POS hence difficulty opcode is now supplant with RANDOM + random := pre.Env.Random.Bytes() + difficulty.SetBytes(random) + } + vmContext := vm.BlockContext{ + CanTransfer: core.CanTransfer, + Transfer: core.Transfer, + Coinbase: pre.Env.Coinbase, + BlockNumber: pre.Env.Number, + ContractHasTEVM: func(common.Hash) (bool, error) { return false, nil }, + Time: pre.Env.Timestamp, + Difficulty: difficulty, + GasLimit: pre.Env.GasLimit, + GetHash: getHash, + } + // If currentBaseFee is defined, add it to the vmContext. + if pre.Env.BaseFee != nil { + vmContext.BaseFee = new(uint256.Int) + overflow := vmContext.BaseFee.SetFromBig(pre.Env.BaseFee) + if overflow { + return nil, nil, fmt.Errorf("pre.Env.BaseFee higher than 2^256-1") + } + } + // If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's + // done in StateProcessor.Process(block, ...), right before transactions are applied. + if chainConfig.DAOForkSupport && + chainConfig.DAOForkBlock != nil && + chainConfig.DAOForkBlock.Cmp(new(big.Int).SetUint64(pre.Env.Number)) == 0 { + misc.ApplyDAOHardFork(ibs) + } + systemcontracts.UpgradeBuildInSystemContract(chainConfig, new(big.Int).SetUint64(pre.Env.Number), ibs) + + for i, txn := range txs { + msg, err := txn.AsMessage(*signer, pre.Env.BaseFee, rules) + if err != nil { + log.Warn("rejected txn", "index", i, "hash", txn.Hash(), "err", err) + rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()}) + continue + } + tracer, err := getTracerFn(txIndex, txn.Hash()) + if err != nil { + return nil, nil, err + } + vmConfig.Tracer = tracer + vmConfig.Debug = (tracer != nil) + ibs.Prepare(txn.Hash(), blockHash, txIndex) + txContext := core.NewEVMTxContext(msg) + snapshot := ibs.Snapshot() + evm := vm.NewEVM(vmContext, txContext, ibs, chainConfig, vmConfig) + + // (ret []byte, usedGas uint64, failed bool, err error) + msgResult, err := core.ApplyMessage(evm, msg, gaspool, true /* refunds */, false /* gasBailout */) + if err != nil { + ibs.RevertToSnapshot(snapshot) + log.Info("rejected txn", "index", i, "hash", txn.Hash(), "from", msg.From(), "err", err) + rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()}) + continue + } + includedTxs = append(includedTxs, txn) + if hashError != nil { + return nil, nil, NewError(ErrorMissingBlockhash, hashError) + } + gasUsed += msgResult.UsedGas + + // Receipt: + { + // Create a new receipt for the transaction, storing the intermediate root and + // gas used by the txn. + receipt := &types.Receipt{Type: txn.Type(), CumulativeGasUsed: gasUsed} + if msgResult.Failed() { + receipt.Status = types.ReceiptStatusFailed + } else { + receipt.Status = types.ReceiptStatusSuccessful + } + receipt.TxHash = txn.Hash() + receipt.GasUsed = msgResult.UsedGas + + // If the transaction created a contract, store the creation address in the receipt. + if msg.To() == nil { + receipt.ContractAddress = crypto.CreateAddress(evm.TxContext().Origin, txn.GetNonce()) + } + + // Set the receipt logs and create a bloom for filtering + receipt.Logs = ibs.GetLogs(txn.Hash()) + receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) + // These three are non-consensus fields: + //receipt.BlockHash + //receipt.BlockNumber + receipt.TransactionIndex = uint(txIndex) + receipts = append(receipts, receipt) + } + + txIndex++ + } + // Add mining reward? + if miningReward > 0 { + // Add mining reward. The mining reward may be `0`, which only makes a difference in the cases + // where + // - the coinbase suicided, or + // - there are only 'bad' transactions, which aren't executed. In those cases, + // the coinbase gets no txfee, so isn't created, and thus needs to be touched + var ( + blockReward = uint256.NewInt(uint64(miningReward)) + minerReward = uint256.NewInt(0).Set(blockReward) + perOmmer = uint256.NewInt(0).Div(blockReward, uint256.NewInt(32)) + ) + for _, ommer := range pre.Env.Ommers { + // Add 1/32th for each ommer included + minerReward.Add(minerReward, perOmmer) + // Add (8-delta)/8 + reward := uint256.NewInt(8) + reward.Sub(reward, uint256.NewInt(ommer.Delta)) + reward.Mul(reward, blockReward) + reward.Div(reward, uint256.NewInt(8)) + ibs.AddBalance(ommer.Address, reward) + } + ibs.AddBalance(pre.Env.Coinbase, minerReward) + } + + // Commit block + var root common.Hash + if err = ibs.FinalizeTx(rules1, state.NewPlainStateWriter(tx, tx, 1)); err != nil { + return nil, nil, err + } + root, err = trie.CalcRoot("", tx) + if err != nil { + return nil, nil, err + } + if err = tx.Commit(); err != nil { + return nil, nil, err + } + + execRs := &ExecutionResult{ + StateRoot: root, + TxRoot: types.DeriveSha(includedTxs), + ReceiptRoot: types.DeriveSha(receipts), + Bloom: types.CreateBloom(receipts), + LogsHash: rlpHash(ibs.Logs()), + Receipts: receipts, + Rejected: rejectedTxs, + } + return db, execRs, nil } -func MakePreState(chainRules *params.Rules, tx kv.RwTx, accounts core.GenesisAlloc) (*state.PlainStateReader, *state.PlainStateWriter) { +func MakePreState(chainRules *params.Rules, tx kv.RwTx, accounts core.GenesisAlloc) *state.IntraBlockState { var blockNr uint64 = 0 - stateReader, stateWriter := state.NewPlainStateReader(tx), state.NewPlainStateWriter(tx, tx, blockNr) - statedb := state.New(stateReader) //ibs + r, _ := state.NewPlainStateReader(tx), state.NewPlainStateWriter(tx, tx, blockNr) + statedb := state.New(r) for addr, a := range accounts { statedb.SetCode(addr, a.Code) statedb.SetNonce(addr, a.Nonce) @@ -87,6 +299,7 @@ func MakePreState(chainRules *params.Rules, tx kv.RwTx, accounts core.GenesisAll if len(a.Code) > 0 || len(a.Storage) > 0 { statedb.SetIncarnation(addr, state.FirstContractIncarnation) + var b [8]byte binary.BigEndian.PutUint64(b[:], state.FirstContractIncarnation) tx.Put(kv.IncarnationMap, addr[:], b[:]) @@ -99,25 +312,12 @@ func MakePreState(chainRules *params.Rules, tx kv.RwTx, accounts core.GenesisAll if err := statedb.CommitBlock(chainRules, state.NewPlainStateWriter(tx, tx, blockNr+1)); err != nil { panic(err) } - return stateReader, stateWriter + return statedb } -// calcDifficulty is based on ethash.CalcDifficulty. This method is used in case -// the caller does not provide an explicit difficulty, but instead provides only -// parent timestamp + difficulty. -// Note: this method only works for ethash engine. -func calcDifficulty(config *params.ChainConfig, number, currentTime, parentTime uint64, - parentDifficulty *big.Int, parentUncleHash common.Hash) *big.Int { - uncleHash := parentUncleHash - if uncleHash == (common.Hash{}) { - uncleHash = types.EmptyUncleHash - } - parent := &types.Header{ - ParentHash: common.Hash{}, - UncleHash: uncleHash, - Difficulty: parentDifficulty, - Number: new(big.Int).SetUint64(number - 1), - Time: parentTime, - } - return ethash.CalcDifficulty(config, currentTime, parent.Time, parent.Difficulty, number-1, parent.UncleHash) +func rlpHash(x interface{}) (h common.Hash) { + hw := sha3.NewLegacyKeccak256() + rlp.Encode(hw, x) //nolint:errcheck + hw.Sum(h[:0]) + return h } diff --git a/cmd/evm/internal/t8ntool/flags.go b/cmd/evm/internal/t8ntool/flags.go index 4a918b048fc..7a5da94d6f8 100644 --- a/cmd/evm/internal/t8ntool/flags.go +++ b/cmd/evm/internal/t8ntool/flags.go @@ -83,6 +83,11 @@ var ( Usage: "`stdin` or file name of where to find the transactions to apply.", Value: "txs.json", } + RewardFlag = cli.Int64Flag{ + Name: "state.reward", + Usage: "Mining reward. Set to -1 to disable", + Value: 0, + } ChainIDFlag = cli.Int64Flag{ Name: "state.chainid", Usage: "ChainID to use", @@ -98,7 +103,7 @@ var ( "\n\tSyntax (+ExtraEip)", strings.Join(tests.AvailableForks(), "\n\t "), strings.Join(vm.ActivateableEips(), ", ")), - Value: "ArrowGlacier", + Value: "Istanbul", } VerbosityFlag = cli.IntFlag{ Name: "verbosity", diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go index 677948e5927..88dfc4d3cb2 100644 --- a/cmd/evm/internal/t8ntool/gen_stenv.go +++ b/cmd/evm/internal/t8ntool/gen_stenv.go @@ -16,50 +16,41 @@ var _ = (*stEnvMarshaling)(nil) // MarshalJSON marshals as JSON. func (s stEnv) MarshalJSON() ([]byte, error) { type stEnv struct { - Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` - Random *math.HexOrDecimal256 `json:"currentRandom"` - ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` - GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` - Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` - Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` - ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` - BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` - BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` - ParentUncleHash common.Hash `json:"parentUncleHash"` + Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` + GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` + Random *common.Hash `json:"currentRandom,omitempty"` } var enc stEnv enc.Coinbase = common.UnprefixedAddress(s.Coinbase) enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty) - enc.Random = (*math.HexOrDecimal256)(s.Random) - enc.ParentDifficulty = (*math.HexOrDecimal256)(s.ParentDifficulty) enc.GasLimit = math.HexOrDecimal64(s.GasLimit) enc.Number = math.HexOrDecimal64(s.Number) enc.Timestamp = math.HexOrDecimal64(s.Timestamp) - enc.ParentTimestamp = math.HexOrDecimal64(s.ParentTimestamp) enc.BlockHashes = s.BlockHashes enc.Ommers = s.Ommers enc.BaseFee = (*math.HexOrDecimal256)(s.BaseFee) - enc.ParentUncleHash = s.ParentUncleHash + enc.Random = s.Random return json.Marshal(&enc) } // UnmarshalJSON unmarshals from JSON. func (s *stEnv) UnmarshalJSON(input []byte) error { type stEnv struct { - Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` - Random *math.HexOrDecimal256 `json:"currentRandom"` - ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` - GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` - Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` - Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` - ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` - BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` - BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` - ParentUncleHash *common.Hash `json:"parentUncleHash"` + Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` + GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` + Random *common.Hash `json:"currentRandom,omitempty"` } var dec stEnv if err := json.Unmarshal(input, &dec); err != nil { @@ -69,15 +60,10 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'currentCoinbase' for stEnv") } s.Coinbase = common.Address(*dec.Coinbase) - if dec.Difficulty != nil { - s.Difficulty = (*big.Int)(dec.Difficulty) - } - if dec.Random != nil { - s.Random = (*big.Int)(dec.Random) - } - if dec.ParentDifficulty != nil { - s.ParentDifficulty = (*big.Int)(dec.ParentDifficulty) + if dec.Difficulty == nil { + return errors.New("missing required field 'currentDifficulty' for stEnv") } + s.Difficulty = (*big.Int)(dec.Difficulty) if dec.GasLimit == nil { return errors.New("missing required field 'currentGasLimit' for stEnv") } @@ -90,9 +76,6 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'currentTimestamp' for stEnv") } s.Timestamp = uint64(*dec.Timestamp) - if dec.ParentTimestamp != nil { - s.ParentTimestamp = uint64(*dec.ParentTimestamp) - } if dec.BlockHashes != nil { s.BlockHashes = dec.BlockHashes } @@ -102,8 +85,8 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { if dec.BaseFee != nil { s.BaseFee = (*big.Int)(dec.BaseFee) } - if dec.ParentUncleHash != nil { - s.ParentUncleHash = *dec.ParentUncleHash + if dec.Random != nil { + s.Random = dec.Random } return nil } diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index 230efb89be4..a6547a7a3d6 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -17,7 +17,6 @@ package t8ntool import ( - "context" "crypto/ecdsa" "encoding/json" "errors" @@ -28,13 +27,9 @@ import ( "path/filepath" "github.com/holiman/uint256" - "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/commands" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" - "github.com/ledgerwatch/erigon/common/math" - "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" @@ -43,7 +38,6 @@ import ( "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/tests" - "github.com/ledgerwatch/erigon/turbo/trie" "github.com/ledgerwatch/log/v3" "github.com/urfave/cli" @@ -73,15 +67,10 @@ func (n *NumberedError) Error() string { return fmt.Sprintf("ERROR(%d): %v", n.errorCode, n.err.Error()) } -func (n *NumberedError) ExitCode() int { +func (n *NumberedError) Code() int { return n.errorCode } -// compile-time conformance test -var ( - _ cli.ExitCoder = (*NumberedError)(nil) -) - type input struct { Alloc core.GenesisAlloc `json:"alloc,omitempty"` Env *stEnv `json:"env,omitempty"` @@ -90,8 +79,16 @@ type input struct { func Main(ctx *cli.Context) error { log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler)) + /* + // Configure the go-ethereum logger + glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) + glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name))) + log.Root().SetHandler(glogger) + */ + var ( err error + tracer vm.Tracer baseDir = "" ) var getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error) @@ -165,7 +162,6 @@ func Main(ctx *cli.Context) error { return NewError(ErrorJson, fmt.Errorf("failed unmarshaling alloc-file: %v", err)) } } - prestate.Pre = inputData.Alloc // Set the block environment @@ -185,8 +181,8 @@ func Main(ctx *cli.Context) error { prestate.Env = *inputData.Env vmConfig := vm.Config{ - Tracer: nil, - Debug: ctx.Bool(TraceFlag.Name), + Tracer: tracer, + Debug: (tracer != nil), } // Construct the chainconfig var chainConfig *params.ChainConfig @@ -220,90 +216,25 @@ func Main(ctx *cli.Context) error { return NewError(ErrorJson, fmt.Errorf("failed signing transactions: %v", err)) } - eip1559 := chainConfig.IsLondon(prestate.Env.Number) // Sanity check, to not `panic` in state_transition - if eip1559 { + if chainConfig.IsLondon(prestate.Env.Number) { if prestate.Env.BaseFee == nil { return NewError(ErrorVMConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section")) } } - // Sanity check, to not `panic` in state_transition - if prestate.Env.Random != nil && !eip1559 { - return NewError(ErrorVMConfig, errors.New("can only apply RANDOM on top of London chainrules")) - } - if env := prestate.Env; env.Difficulty == nil { - // If difficulty was not provided by caller, we need to calculate it. - switch { - case env.ParentDifficulty == nil: - return NewError(ErrorVMConfig, errors.New("currentDifficulty was not provided, and cannot be calculated due to missing parentDifficulty")) - case env.Number == 0: - return NewError(ErrorVMConfig, errors.New("currentDifficulty needs to be provided for block number 0")) - case env.Timestamp <= env.ParentTimestamp: - return NewError(ErrorVMConfig, fmt.Errorf("currentDifficulty cannot be calculated -- currentTime (%d) needs to be after parent time (%d)", - env.Timestamp, env.ParentTimestamp)) - } - prestate.Env.Difficulty = calcDifficulty(chainConfig, env.Number, env.Timestamp, - env.ParentTimestamp, env.ParentDifficulty, env.ParentUncleHash) - } - - // manufacture block from above inputs - header := NewHeader(prestate.Env, chainConfig.IsLondon(prestate.Env.Number)) - - var ommerHeaders = make([]*types.Header, len(prestate.Env.Ommers)) - header.Number.Add(header.Number, big.NewInt(int64(len(prestate.Env.Ommers)))) - for i, ommer := range prestate.Env.Ommers { - var ommerN big.Int - ommerN.SetUint64(header.Number.Uint64() - ommer.Delta) - ommerHeaders[i] = &types.Header{Coinbase: ommer.Address, Number: &ommerN} - } - block := types.NewBlock(header, txs, ommerHeaders, nil) - - var hashError error - getHash := func(num uint64) common.Hash { - if prestate.Env.BlockHashes == nil { - hashError = fmt.Errorf("getHash(%d) invoked, no blockhashes provided", num) - return common.Hash{} - } - h, ok := prestate.Env.BlockHashes[math.HexOrDecimal64(num)] - if !ok { - hashError = fmt.Errorf("getHash(%d) invoked, blockhash for that block not provided", num) - } - return h - } - db := memdb.New() - - tx, err := db.BeginRw(context.Background()) - if err != nil { - return err - } - - reader, writer := MakePreState(chainConfig.Rules(0), tx, prestate.Pre) - engine := ethash.NewFaker() - - result, err := core.ExecuteBlockEphemerally(chainConfig, &vmConfig, getHash, engine, block, reader, writer, nil, nil, nil, true, getTracer) - - if hashError != nil { - return NewError(ErrorMissingBlockhash, fmt.Errorf("blockhash error: %v", err)) + // Run the test and aggregate the result + _, result, err1 := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer) + if err1 != nil { + return err1 } - - if err != nil { - return fmt.Errorf("error on EBE: %w", err) - } - - // state root calculation - root, err := CalculateStateRoot(tx) - if err != nil { - return err - } - result.StateRoot = *root - - // Dump the execution result body, _ := rlp.EncodeToBytes(txs) + // Dump the excution result collector := make(Alloc) - dumper := state.NewDumper(tx, prestate.Env.Number) - dumper.DumpToCollector(collector, false, false, common.Address{}, 0) + // TODO: Where DumpToCollector is declared? + //state.DumpToCollector(collector, false, false, false, nil, -1) return dispatchOutput(ctx, baseDir, result, collector, body) + } // txWithKey is a helper-struct, to allow us to use the types.Transaction along with @@ -330,7 +261,8 @@ func (t *txWithKey) UnmarshalJSON(input []byte) error { return err } } - + gasPrice, value := uint256.NewInt(0), uint256.NewInt(0) + var overflow bool // Now, read the transaction itself var txJson commands.RPCTransaction @@ -338,104 +270,22 @@ func (t *txWithKey) UnmarshalJSON(input []byte) error { return err } - // assemble transaction - tx, err := getTransaction(txJson) - if err != nil { - return err - } - t.tx = tx - return nil -} - -func getTransaction(txJson commands.RPCTransaction) (types.Transaction, error) { - gasPrice, value := uint256.NewInt(0), uint256.NewInt(0) - var overflow bool - var chainId *uint256.Int - if txJson.Value != nil { value, overflow = uint256.FromBig((*big.Int)(txJson.Value)) if overflow { - return nil, fmt.Errorf("value field caused an overflow (uint256)") + return fmt.Errorf("value field caused an overflow (uint256)") } } if txJson.GasPrice != nil { gasPrice, overflow = uint256.FromBig((*big.Int)(txJson.GasPrice)) if overflow { - return nil, fmt.Errorf("gasPrice field caused an overflow (uint256)") - } - } - - if txJson.ChainID != nil { - chainId, overflow = uint256.FromBig((*big.Int)(txJson.ChainID)) - if overflow { - return nil, fmt.Errorf("chainId field caused an overflow (uint256)") + return fmt.Errorf("gasPrice field caused an overflow (uint256)") } } - - switch txJson.Type { - case types.LegacyTxType, types.AccessListTxType: - var toAddr common.Address = common.Address{} - if txJson.To != nil { - toAddr = *txJson.To - } - legacyTx := types.NewTransaction(uint64(txJson.Nonce), toAddr, value, uint64(txJson.Gas), gasPrice, txJson.Input) - legacyTx.V.SetFromBig(txJson.V.ToInt()) - legacyTx.S.SetFromBig(txJson.S.ToInt()) - legacyTx.R.SetFromBig(txJson.R.ToInt()) - - if txJson.Type == types.AccessListTxType { - accessListTx := types.AccessListTx{ - LegacyTx: *legacyTx, - ChainID: chainId, - AccessList: *txJson.Accesses, - } - - return &accessListTx, nil - } else { - return legacyTx, nil - } - - case types.DynamicFeeTxType: - var tip *uint256.Int - var feeCap *uint256.Int - if txJson.Tip != nil { - tip, overflow = uint256.FromBig((*big.Int)(txJson.Tip)) - if overflow { - return nil, fmt.Errorf("maxPriorityFeePerGas field caused an overflow (uint256)") - } - } - - if txJson.FeeCap != nil { - feeCap, overflow = uint256.FromBig((*big.Int)(txJson.FeeCap)) - if overflow { - return nil, fmt.Errorf("maxFeePerGas field caused an overflow (uint256)") - } - } - - dynamicFeeTx := types.DynamicFeeTransaction{ - CommonTx: types.CommonTx{ - ChainID: chainId, - Nonce: uint64(txJson.Nonce), - To: txJson.To, - Value: value, - Gas: uint64(txJson.Gas), - Data: txJson.Input, - }, - Tip: tip, - FeeCap: feeCap, - AccessList: *txJson.Accesses, - } - - dynamicFeeTx.V.SetFromBig(txJson.V.ToInt()) - dynamicFeeTx.S.SetFromBig(txJson.S.ToInt()) - dynamicFeeTx.R.SetFromBig(txJson.R.ToInt()) - - return &dynamicFeeTx, nil - - default: - return nil, nil - } + // assemble transaction + t.tx = types.NewTransaction(uint64(txJson.Nonce), *txJson.To, value, uint64(txJson.Gas), gasPrice, txJson.Input) + return nil } // signUnsignedTransactions converts the input txs to canonical transactions. @@ -508,7 +358,7 @@ func saveFile(baseDir, filename string, data interface{}) error { // dispatchOutput writes the output data to either stderr or stdout, or to the specified // files -func dispatchOutput(ctx *cli.Context, baseDir string, result *core.EphemeralExecResult, alloc Alloc, body hexutil.Bytes) error { +func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, alloc Alloc, body hexutil.Bytes) error { stdOutObject := make(map[string]interface{}) stdErrObject := make(map[string]interface{}) dispatch := func(baseDir, fName, name string, obj interface{}) error { @@ -551,65 +401,3 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *core.EphemeralExec } return nil } - -func NewHeader(env stEnv, Eip1559 bool) *types.Header { - var header types.Header - header.UncleHash = env.ParentUncleHash - header.Coinbase = env.Coinbase - header.Difficulty = env.Difficulty - header.Number = big.NewInt(int64(env.Number)) - header.GasLimit = env.GasLimit - header.Time = env.Timestamp - header.BaseFee = env.BaseFee - header.Eip1559 = Eip1559 - - return &header -} - -func CalculateStateRoot(tx kv.RwTx) (*common.Hash, error) { - // Generate hashed state - c, err := tx.RwCursor(kv.PlainState) - if err != nil { - return nil, err - } - h := common.NewHasher() - defer common.ReturnHasherToPool(h) - for k, v, err := c.First(); k != nil; k, v, err = c.Next() { - if err != nil { - return nil, fmt.Errorf("interate over plain state: %w", err) - } - var newK []byte - if len(k) == common.AddressLength { - newK = make([]byte, common.HashLength) - } else { - newK = make([]byte, common.HashLength*2+common.IncarnationLength) - } - h.Sha.Reset() - //nolint:errcheck - h.Sha.Write(k[:common.AddressLength]) - //nolint:errcheck - h.Sha.Read(newK[:common.HashLength]) - if len(k) > common.AddressLength { - copy(newK[common.HashLength:], k[common.AddressLength:common.AddressLength+common.IncarnationLength]) - h.Sha.Reset() - //nolint:errcheck - h.Sha.Write(k[common.AddressLength+common.IncarnationLength:]) - //nolint:errcheck - h.Sha.Read(newK[common.HashLength+common.IncarnationLength:]) - if err = tx.Put(kv.HashedStorage, newK, common.CopyBytes(v)); err != nil { - return nil, fmt.Errorf("insert hashed key: %w", err) - } - } else { - if err = tx.Put(kv.HashedAccounts, newK, common.CopyBytes(v)); err != nil { - return nil, fmt.Errorf("insert hashed key: %w", err) - } - } - } - c.Close() - root, err := trie.CalcRoot("", tx) - if err != nil { - return nil, err - } - - return &root, nil -} diff --git a/cmd/evm/main.go b/cmd/evm/main.go index 8d6bb1ef5f6..451c0edbd8d 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -148,6 +148,7 @@ var stateTransitionCommand = cli.Command{ t8ntool.InputTxsFlag, t8ntool.ForknameFlag, t8ntool.ChainIDFlag, + t8ntool.RewardFlag, t8ntool.VerbosityFlag, }, } @@ -191,7 +192,7 @@ func main() { if err := app.Run(os.Args); err != nil { code := 1 if ec, ok := err.(*t8ntool.NumberedError); ok { - code = ec.ExitCode() + code = ec.Code() } fmt.Fprintln(os.Stderr, err) os.Exit(code) diff --git a/cmd/evm/t8n_test.go b/cmd/evm/t8n_test.go deleted file mode 100644 index a2868e080b8..00000000000 --- a/cmd/evm/t8n_test.go +++ /dev/null @@ -1,247 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "os" - "reflect" - "strings" - "testing" - - "github.com/docker/docker/pkg/reexec" - "github.com/ledgerwatch/erigon/internal/cmdtest" -) - -func TestMain(m *testing.M) { - // Run the app if we've been exec'd as "ethkey-test" in runEthkey. - reexec.Register("evm-test", func() { - if err := app.Run(os.Args); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - os.Exit(0) - }) - // check if we have been reexec'd - if reexec.Init() { - return - } - os.Exit(m.Run()) -} - -type testT8n struct { - *cmdtest.TestCmd -} - -type t8nInput struct { - inAlloc string - inTxs string - inEnv string - stFork string -} - -func (args *t8nInput) get(base string) []string { - var out []string - if opt := args.inAlloc; opt != "" { - out = append(out, "--input.alloc") - out = append(out, fmt.Sprintf("%v/%v", base, opt)) - } - if opt := args.inTxs; opt != "" { - out = append(out, "--input.txs") - out = append(out, fmt.Sprintf("%v/%v", base, opt)) - } - if opt := args.inEnv; opt != "" { - out = append(out, "--input.env") - out = append(out, fmt.Sprintf("%v/%v", base, opt)) - } - if opt := args.stFork; opt != "" { - out = append(out, "--state.fork", opt) - } - return out -} - -type t8nOutput struct { - alloc bool - result bool - body bool -} - -func (args *t8nOutput) get() (out []string) { - if args.body { - out = append(out, "--output.body", "stdout") - } else { - out = append(out, "--output.body", "") // empty means ignore - } - if args.result { - out = append(out, "--output.result", "stdout") - } else { - out = append(out, "--output.result", "") - } - if args.alloc { - out = append(out, "--output.alloc", "stdout") - } else { - out = append(out, "--output.alloc", "") - } - return out -} - -func TestT8n(t *testing.T) { - tt := new(testT8n) - tt.TestCmd = cmdtest.NewTestCmd(t, tt) - for i, tc := range []struct { - base string - input t8nInput - output t8nOutput - expExitCode int - expOut string - }{ - { // Test exit (3) on bad config - base: "./testdata/1", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Frontier+1346", - }, - output: t8nOutput{alloc: true, result: true}, - expExitCode: 3, - }, - { - base: "./testdata/1", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Byzantium", - }, - output: t8nOutput{alloc: true, result: true}, - expOut: "exp.json", - }, - { // blockhash test - base: "./testdata/3", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Berlin", - }, - output: t8nOutput{alloc: true, result: true}, - expOut: "exp.json", - }, - { // missing blockhash test - base: "./testdata/4", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Berlin", - }, - expExitCode: 4, - }, - { // Uncle test - base: "./testdata/5", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Byzantium", - }, - output: t8nOutput{alloc: true, result: true}, - expOut: "exp.json", - }, - { // Dao-transition check - base: "./testdata/7", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "HomesteadToDaoAt5", - }, - expOut: "exp.json", - output: t8nOutput{alloc: true, result: true}, - }, - { // transactions with access list - base: "./testdata/8", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "Berlin", - }, - expOut: "exp.json", - output: t8nOutput{alloc: true, result: true}, - }, - { // EIP-1559 - base: "./testdata/9", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "London", - }, - expOut: "exp.json", - output: t8nOutput{alloc: true, result: true}, - }, - { // EIP-1559 - base: "./testdata/10", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "London", - }, - expOut: "exp.json", - output: t8nOutput{alloc: true, result: true}, - }, - { // missing base fees - base: "./testdata/11", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "London", - }, - expExitCode: 3, - }, - { // EIP-1559 & gasCap - base: "./testdata/12", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "London", - }, - expOut: "exp.json", - output: t8nOutput{alloc: true, result: true}, - }, - { // Difficulty calculation on London - base: "./testdata/19", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "London", - }, - expOut: "exp_london.json", - output: t8nOutput{alloc: true, result: true}, - }, - { // Difficulty calculation on arrow glacier - base: "./testdata/19", - input: t8nInput{ - "alloc.json", "txs.json", "env.json", "ArrowGlacier", - }, - expOut: "exp_arrowglacier.json", - output: t8nOutput{alloc: true, result: true}, - }, - } { - - args := []string{"t8n"} - args = append(args, tc.output.get()...) - args = append(args, tc.input.get(tc.base)...) - var qArgs []string // quoted args for debugging purposes - for _, arg := range args { - if len(arg) == 0 { - qArgs = append(qArgs, `""`) - } else { - qArgs = append(qArgs, arg) - } - } - tt.Logf("args: %v\n", strings.Join(qArgs, " ")) - tt.Run("evm-test", args...) - // Compare the expected output, if provided - if tc.expOut != "" { - want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut)) - if err != nil { - t.Fatalf("test %d: could not read expected output: %v", i, err) - } - have := tt.Output() - ok, err := cmpJson(have, want) - switch { - case err != nil: - t.Fatalf("test %d, json parsing failed: %v", i, err) - case !ok: - t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want)) - } - } - tt.WaitExit() - if have, want := tt.ExitStatus(), tc.expExitCode; have != want { - t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want) - } - } -} - -// cmpJson compares the JSON in two byte slices. -func cmpJson(a, b []byte) (bool, error) { - var j, j2 interface{} - if err := json.Unmarshal(a, &j); err != nil { - return false, err - } - if err := json.Unmarshal(b, &j2); err != nil { - return false, err - } - - return reflect.DeepEqual(j2, j), nil -} diff --git a/cmd/evm/testdata/1/exp.json b/cmd/evm/testdata/1/exp.json deleted file mode 100644 index d8094e7aa67..00000000000 --- a/cmd/evm/testdata/1/exp.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "alloc": { - "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { - "balance": "0xfeed1a9d", - "nonce": "0x1" - }, - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x5ffd4878be161d74", - "nonce": "0xac" - }, - "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x29a2241af62ca410" - } - }, - "result": { - "stateRoot": "0xe72f10cef9b1d32a16e2f5a8d64b25dacde99efcdea460387db527486582c3f7", - "txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d", - "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", - "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "receipts": [ - { - "root": "0x", - "status": "0x1", - "cumulativeGasUsed": "0x5208", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "logs": null, - "transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673", - "contractAddress": "0x0000000000000000000000000000000000000000", - "gasUsed": "0x5208", - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "blockNumber": "0x1", - "transactionIndex": "0x0" - } - ], - "rejected": [ - { - "index": 1, - "error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" - } - ], - "currentDifficulty": "0x20000", - "gasUsed": "0x5208" - } -} diff --git a/cmd/evm/testdata/10/alloc.json b/cmd/evm/testdata/10/alloc.json deleted file mode 100644 index 6e98e7513c4..00000000000 --- a/cmd/evm/testdata/10/alloc.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "0x1111111111111111111111111111111111111111" : { - "balance" : "0x010000000000", - "code" : "0xfe", - "nonce" : "0x01", - "storage" : { - } - }, - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { - "balance" : "0x010000000000", - "code" : "0x", - "nonce" : "0x01", - "storage" : { - } - }, - "0xd02d72e067e77158444ef2020ff2d325f929b363" : { - "balance" : "0x01000000000000", - "code" : "0x", - "nonce" : "0x01", - "storage" : { - } - } -} \ No newline at end of file diff --git a/cmd/evm/testdata/10/env.json b/cmd/evm/testdata/10/env.json deleted file mode 100644 index 3a82d46a774..00000000000 --- a/cmd/evm/testdata/10/env.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", - "currentDifficulty" : "0x020000", - "currentNumber" : "0x01", - "currentTimestamp" : "0x079e", - "previousHash" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f", - "currentGasLimit" : "0x40000000", - "currentBaseFee" : "0x036b", - "blockHashes" : { - "0" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f" - } -} \ No newline at end of file diff --git a/cmd/evm/testdata/10/exp.json b/cmd/evm/testdata/10/exp.json deleted file mode 100644 index 5ab98860c77..00000000000 --- a/cmd/evm/testdata/10/exp.json +++ /dev/null @@ -1,79 +0,0 @@ -{ - "alloc": { - "0x1111111111111111111111111111111111111111": { - "code": "0xfe", - "balance": "0x10000000000", - "nonce": "0x1" - }, - "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { - "balance": "0x1bc16d674ec80000" - }, - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x10000000000", - "nonce": "0x1" - }, - "0xd02d72e067e77158444ef2020ff2d325f929b363": { - "balance": "0xff5beffffc95", - "nonce": "0x4" - } - }, - "result": { - "stateRoot": "0x4b7b4d5dd6316b58407468a5d3cf0a18e42d3833911d3fccd80eb49273024ffa", - "txRoot": "0xda925f2306a52fa24c15d5cd212d736ee016415fd8dd0c45fd368de7917d64bb", - "receiptsRoot": "0x439a25f7fc424c10fb1f89800e4aa1df74156b137239d9ac3eaa7c911c353cd5", - "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "receipts": [ - { - "type": "0x2", - "root": "0x", - "status": "0x0", - "cumulativeGasUsed": "0x10000001", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "logs": null, - "transactionHash": "0x88980f6efcc5358d9c359663e7b9414722d430497637340ea056b076bc206701", - "contractAddress": "0x0000000000000000000000000000000000000000", - "gasUsed": "0x10000001", - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "blockNumber": "0x1", - "transactionIndex": "0x0" - }, - { - "type": "0x2", - "root": "0x", - "status": "0x0", - "cumulativeGasUsed": "0x20000001", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "logs": null, - "transactionHash": "0xd7bf3886f4e2aef74d525ae072c680f3846f550254401b67cbfda4a233757582", - "contractAddress": "0x0000000000000000000000000000000000000000", - "gasUsed": "0x10000000", - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "blockNumber": "0x1", - "transactionIndex": "0x1" - }, - { - "type": "0x2", - "root": "0x", - "status": "0x0", - "cumulativeGasUsed": "0x30000001", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "logs": null, - "transactionHash": "0x50308296760f01f1eeec7500e9e73cad67469249b1f59e9a9f55e6625a4923db", - "contractAddress": "0x0000000000000000000000000000000000000000", - "gasUsed": "0x10000000", - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "blockNumber": "0x1", - "transactionIndex": "0x2" - } - ], - "rejected": [ - { - "index": 3, - "error": "gas limit reached" - } - ], - "currentDifficulty": "0x20000", - "gasUsed": "0x30000001" - } -} diff --git a/cmd/evm/testdata/10/readme.md b/cmd/evm/testdata/10/readme.md deleted file mode 100644 index c34be80bb71..00000000000 --- a/cmd/evm/testdata/10/readme.md +++ /dev/null @@ -1,79 +0,0 @@ -## EIP-1559 testing - -This test contains testcases for EIP-1559, which were reported by Ori as misbehaving. - -``` -[user@work evm]$ dir=./testdata/10 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout 2>&1 -INFO [05-09|22:11:59.436] rejected tx index=3 hash=db07bf..ede1e8 from=0xd02d72E067e77158444ef2020Ff2d325f929B363 error="gas limit reached" -``` -Output: -```json -{ - "alloc": { - "0x1111111111111111111111111111111111111111": { - "code": "0xfe", - "balance": "0x10000000000", - "nonce": "0x1" - }, - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x10000000000", - "nonce": "0x1" - }, - "0xd02d72e067e77158444ef2020ff2d325f929b363": { - "balance": "0xff5beffffc95", - "nonce": "0x4" - } - }, - "result": { - "stateRoot": "0xf91a7ec08e4bfea88719aab34deabb000c86902360532b52afa9599d41f2bb8b", - "txRoot": "0xda925f2306a52fa24c15d5cd212d736ee016415fd8dd0c45fd368de7917d64bb", - "receiptRoot": "0x439a25f7fc424c10fb1f89800e4aa1df74156b137239d9ac3eaa7c911c353cd5", - "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "receipts": [ - { - "type": "0x2", - "root": "0x", - "status": "0x0", - "cumulativeGasUsed": "0x10000001", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "logs": null, - "transactionHash": "0x88980f6efcc5358d9c359663e7b9414722d430497637340ea056b076bc206701", - "contractAddress": "0x0000000000000000000000000000000000000000", - "gasUsed": "0x10000001", - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "transactionIndex": "0x0" - }, - { - "type": "0x2", - "root": "0x", - "status": "0x0", - "cumulativeGasUsed": "0x20000001", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "logs": null, - "transactionHash": "0xd7bf3886f4e2aef74d525ae072c680f3846f550254401b67cbfda4a233757582", - "contractAddress": "0x0000000000000000000000000000000000000000", - "gasUsed": "0x10000000", - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "transactionIndex": "0x1" - }, - { - "type": "0x2", - "root": "0x", - "status": "0x0", - "cumulativeGasUsed": "0x30000001", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "logs": null, - "transactionHash": "0x50308296760f01f1eeec7500e9e73cad67469249b1f59e9a9f55e6625a4923db", - "contractAddress": "0x0000000000000000000000000000000000000000", - "gasUsed": "0x10000000", - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "transactionIndex": "0x2" - } - ], - "rejected": [ - 3 - ] - } -} -``` diff --git a/cmd/evm/testdata/10/txs.json b/cmd/evm/testdata/10/txs.json deleted file mode 100644 index f7c9baa26da..00000000000 --- a/cmd/evm/testdata/10/txs.json +++ /dev/null @@ -1,70 +0,0 @@ -[ - { - "input" : "0x", - "gas" : "0x10000001", - "nonce" : "0x1", - "to" : "0x1111111111111111111111111111111111111111", - "value" : "0x0", - "v" : "0x0", - "r" : "0x7a45f00bcde9036b026cdf1628b023cd8a31a95c62b5e4dbbee2fa7debe668fb", - "s" : "0x3cc9d6f2cd00a045b0263f2d6dad7d60938d5d13d061af4969f95928aa934d4a", - "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", - "chainId" : "0x1", - "type" : "0x2", - "maxFeePerGas" : "0xfa0", - "maxPriorityFeePerGas" : "0x0", - "accessList" : [ - ] - }, - { - "input" : "0x", - "gas" : "0x10000000", - "nonce" : "0x2", - "to" : "0x1111111111111111111111111111111111111111", - "value" : "0x0", - "v" : "0x0", - "r" : "0x4c564b94b0281a8210eeec2dd1fe2e16ff1c1903a8c3a1078d735d7f8208b2af", - "s" : "0x56432b2593e6de95db1cb997b7385217aca03f1615327e231734446b39f266d", - "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", - "chainId" : "0x1", - "type" : "0x2", - "maxFeePerGas" : "0xfa0", - "maxPriorityFeePerGas" : "0x0", - "accessList" : [ - ] - }, - { - "input" : "0x", - "gas" : "0x10000000", - "nonce" : "0x3", - "to" : "0x1111111111111111111111111111111111111111", - "value" : "0x0", - "v" : "0x0", - "r" : "0x2ed2ef52f924f59d4a21e1f2a50d3b1109303ce5e32334a7ece9b46f4fbc2a57", - "s" : "0x2980257129cbd3da987226f323d50ba3975a834d165e0681f991b75615605c44", - "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", - "chainId" : "0x1", - "type" : "0x2", - "maxFeePerGas" : "0xfa0", - "maxPriorityFeePerGas" : "0x0", - "accessList" : [ - ] - }, - { - "input" : "0x", - "gas" : "0x10000000", - "nonce" : "0x4", - "to" : "0x1111111111111111111111111111111111111111", - "value" : "0x0", - "v" : "0x0", - "r" : "0x5df7d7f8f8e15b36fc9f189cacb625040fad10398d08fc90812595922a2c49b2", - "s" : "0x565fc1803f77a84d754ffe3c5363ab54a8d93a06ea1bb9d4c73c73a282b35917", - "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", - "chainId" : "0x1", - "type" : "0x2", - "maxFeePerGas" : "0xfa0", - "maxPriorityFeePerGas" : "0x0", - "accessList" : [ - ] - } -] \ No newline at end of file diff --git a/cmd/evm/testdata/11/alloc.json b/cmd/evm/testdata/11/alloc.json deleted file mode 100644 index 86938230fa7..00000000000 --- a/cmd/evm/testdata/11/alloc.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "0x0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6" : { - "balance" : "0x0de0b6b3a7640000", - "code" : "0x61ffff5060046000f3", - "nonce" : "0x01", - "storage" : { - } - }, - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { - "balance" : "0x0de0b6b3a7640000", - "code" : "0x", - "nonce" : "0x00", - "storage" : { - "0x00" : "0x00" - } - }, - "0xb94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { - "balance" : "0x00", - "code" : "0x6001600055", - "nonce" : "0x00", - "storage" : { - } - } -} - diff --git a/cmd/evm/testdata/11/env.json b/cmd/evm/testdata/11/env.json deleted file mode 100644 index 37dedf09475..00000000000 --- a/cmd/evm/testdata/11/env.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", - "currentDifficulty" : "0x020000", - "currentNumber" : "0x01", - "currentTimestamp" : "0x03e8", - "previousHash" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2", - "currentGasLimit" : "0x0f4240", - "blockHashes" : { - "0" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2" - } -} - diff --git a/cmd/evm/testdata/11/readme.md b/cmd/evm/testdata/11/readme.md deleted file mode 100644 index d499f8e99fa..00000000000 --- a/cmd/evm/testdata/11/readme.md +++ /dev/null @@ -1,13 +0,0 @@ -## Test missing basefee - -In this test, the `currentBaseFee` is missing from the env portion. -On a live blockchain, the basefee is present in the header, and verified as part of header validation. - -In `evm t8n`, we don't have blocks, so it needs to be added in the `env`instead. - -When it's missing, an error is expected. - -``` -dir=./testdata/11 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout 2>&1>/dev/null -ERROR(3): EIP-1559 config but missing 'currentBaseFee' in env section -``` \ No newline at end of file diff --git a/cmd/evm/testdata/11/txs.json b/cmd/evm/testdata/11/txs.json deleted file mode 100644 index c54b0a1f5b4..00000000000 --- a/cmd/evm/testdata/11/txs.json +++ /dev/null @@ -1,14 +0,0 @@ -[ - { - "input" : "0x38600060013960015160005560006000f3", - "gas" : "0x61a80", - "gasPrice" : "0x1", - "nonce" : "0x0", - "value" : "0x186a0", - "v" : "0x1c", - "r" : "0x2e1391fd903387f1cc2b51df083805fb4bbb0d4710a2cdf4a044d191ff7be63e", - "s" : "0x7f10a933c42ab74927db02b1db009e923d9d2ab24ac24d63c399f2fe5d9c9b22", - "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" - } -] - diff --git a/cmd/evm/testdata/12/alloc.json b/cmd/evm/testdata/12/alloc.json deleted file mode 100644 index 3ed96894fbc..00000000000 --- a/cmd/evm/testdata/12/alloc.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { - "balance" : "84000000", - "code" : "0x", - "nonce" : "0x00", - "storage" : { - "0x00" : "0x00" - } - } -} - diff --git a/cmd/evm/testdata/12/env.json b/cmd/evm/testdata/12/env.json deleted file mode 100644 index 8ae5465369c..00000000000 --- a/cmd/evm/testdata/12/env.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", - "currentDifficulty" : "0x020000", - "currentNumber" : "0x01", - "currentTimestamp" : "0x03e8", - "previousHash" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2", - "currentGasLimit" : "0x0f4240", - "currentBaseFee" : "0x20" -} - diff --git a/cmd/evm/testdata/12/exp.json b/cmd/evm/testdata/12/exp.json deleted file mode 100644 index 9f88273f734..00000000000 --- a/cmd/evm/testdata/12/exp.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "alloc": { - "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { - "balance": "0x1bc16d674ec80000" - }, - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x501bd00" - } - }, - "result": { - "stateRoot": "0x9fd6c7f520a9e9a160c19d65b929161415bc4e86ea75e7c9cac4fe8f776cf453", - "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "receipts": [], - "rejected": [ - { - "index": 0, - "error": "insufficient funds for gas * price + value: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B have 84000000 want 84000032" - } - ], - "currentDifficulty": "0x20000", - "gasUsed": "0x0" - } -} diff --git a/cmd/evm/testdata/12/readme.md b/cmd/evm/testdata/12/readme.md deleted file mode 100644 index b0177ecc24b..00000000000 --- a/cmd/evm/testdata/12/readme.md +++ /dev/null @@ -1,40 +0,0 @@ -## Test 1559 balance + gasCap - -This test contains an EIP-1559 consensus issue which happened on Ropsten, where -`geth` did not properly account for the value transfer while doing the check on `max_fee_per_gas * gas_limit`. - -Before the issue was fixed, this invocation allowed the transaction to pass into a block: -``` -dir=./testdata/12 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout -``` - -With the fix applied, the result is: -``` -dir=./testdata/12 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout -INFO [07-21|19:03:50.276] rejected tx index=0 hash=ccc996..d83435 from=0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B error="insufficient funds for gas * price + value: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B have 84000000 want 84000032" -INFO [07-21|19:03:50.276] Trie dumping started root=e05f81..6597a5 -INFO [07-21|19:03:50.276] Trie dumping complete accounts=1 elapsed="39.549µs" -{ - "alloc": { - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x501bd00" - } - }, - "result": { - "stateRoot": "0xe05f81f8244a76503ceec6f88abfcd03047a612a1001217f37d30984536597a5", - "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "receipts": [], - "rejected": [ - { - "index": 0, - "error": "insufficient funds for gas * price + value: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B have 84000000 want 84000032" - } - ] - } -} -``` - -The transaction is rejected. \ No newline at end of file diff --git a/cmd/evm/testdata/12/txs.json b/cmd/evm/testdata/12/txs.json deleted file mode 100644 index cd683f271c7..00000000000 --- a/cmd/evm/testdata/12/txs.json +++ /dev/null @@ -1,20 +0,0 @@ -[ - { - "input" : "0x", - "gas" : "0x5208", - "nonce" : "0x0", - "to" : "0x1111111111111111111111111111111111111111", - "value" : "0x20", - "v" : "0x0", - "r" : "0x0", - "s" : "0x0", - "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8", - "chainId" : "0x1", - "type" : "0x2", - "maxFeePerGas" : "0xfa0", - "maxPriorityFeePerGas" : "0x20", - "accessList" : [ - ] - } -] - diff --git a/cmd/evm/testdata/19/alloc.json b/cmd/evm/testdata/19/alloc.json deleted file mode 100644 index cef1a25ff01..00000000000 --- a/cmd/evm/testdata/19/alloc.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "a94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x5ffd4878be161d74", - "code": "0x", - "nonce": "0xac", - "storage": {} - }, - "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{ - "balance": "0xfeedbead", - "nonce" : "0x00" - } -} \ No newline at end of file diff --git a/cmd/evm/testdata/19/env.json b/cmd/evm/testdata/19/env.json deleted file mode 100644 index 0c64392aff5..00000000000 --- a/cmd/evm/testdata/19/env.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b", - "currentGasLimit": "0x750a163df65e8a", - "currentBaseFee": "0x500", - "currentNumber": "13000000", - "currentTimestamp": "100015", - "parentTimestamp" : "99999", - "parentDifficulty" : "0x2000000000000" -} diff --git a/cmd/evm/testdata/19/exp_arrowglacier.json b/cmd/evm/testdata/19/exp_arrowglacier.json deleted file mode 100644 index 266b955565b..00000000000 --- a/cmd/evm/testdata/19/exp_arrowglacier.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "alloc": { - "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { - "balance": "0xfeedbead" - }, - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x5ffd4878be161d74", - "nonce": "0xac" - }, - "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x1bc16d674ec80000" - } - }, - "result": { - "stateRoot": "0x374cbd5c614cb6ef173024d1c0d4e0313dafc2d7fc8f4399cf4bd1b60fc7c2ca", - "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "receipts": [], - "currentDifficulty": "0x2000000200000", - "gasUsed": "0x0" - } -} diff --git a/cmd/evm/testdata/19/exp_london.json b/cmd/evm/testdata/19/exp_london.json deleted file mode 100644 index d594281e4be..00000000000 --- a/cmd/evm/testdata/19/exp_london.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "alloc": { - "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { - "balance": "0xfeedbead" - }, - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x5ffd4878be161d74", - "nonce": "0xac" - }, - "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x1bc16d674ec80000" - } - }, - "result": { - "stateRoot": "0x374cbd5c614cb6ef173024d1c0d4e0313dafc2d7fc8f4399cf4bd1b60fc7c2ca", - "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "receipts": [], - "currentDifficulty": "0x2000080000000", - "gasUsed": "0x0" - } -} diff --git a/cmd/evm/testdata/19/readme.md b/cmd/evm/testdata/19/readme.md deleted file mode 100644 index 5fae183f488..00000000000 --- a/cmd/evm/testdata/19/readme.md +++ /dev/null @@ -1,9 +0,0 @@ -## Difficulty calculation - -This test shows how the `evm t8n` can be used to calculate the (ethash) difficulty, if none is provided by the caller, -this time on `ArrowGlacier` (Eip 4345). - -Calculating it (with an empty set of txs) using `ArrowGlacier` rules (and no provided unclehash for the parent block): -``` -[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.json --output.result=stdout --state.fork=ArrowGlacier -``` \ No newline at end of file diff --git a/cmd/evm/testdata/19/txs.json b/cmd/evm/testdata/19/txs.json deleted file mode 100644 index fe51488c706..00000000000 --- a/cmd/evm/testdata/19/txs.json +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/cmd/evm/testdata/3/exp.json b/cmd/evm/testdata/3/exp.json deleted file mode 100644 index 5b8b7c84ebc..00000000000 --- a/cmd/evm/testdata/3/exp.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "alloc": { - "0x095e7baea6a6c7c4c2dfeb977efac326af552d87": { - "code": "0x600140", - "balance": "0xde0b6b3a76586a0" - }, - "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { - "balance": "0x1bc16d674ec8521f" - }, - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0xde0b6b3a7622741", - "nonce": "0x1" - } - }, - "result": { - "stateRoot": "0x5aeefb3e8fe1d722455ff4b4ee76793af2c654f7f5120b79a8427d696ed01558", - "txRoot": "0x75e61774a2ff58cbe32653420256c7f44bc715715a423b0b746d5c622979af6b", - "receiptsRoot": "0xd0d26df80374a327c025d405ebadc752b1bbd089d864801ae78ab704bcad8086", - "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "receipts": [ - { - "root": "0x", - "status": "0x1", - "cumulativeGasUsed": "0x521f", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "logs": null, - "transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81", - "contractAddress": "0x0000000000000000000000000000000000000000", - "gasUsed": "0x521f", - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "blockNumber": "0x5", - "transactionIndex": "0x0" - } - ], - "currentDifficulty": "0x20000", - "gasUsed": "0x521f" - } -} diff --git a/cmd/evm/testdata/5/exp.json b/cmd/evm/testdata/5/exp.json deleted file mode 100644 index 5feeff85c09..00000000000 --- a/cmd/evm/testdata/5/exp.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "alloc": { - "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": { - "balance": "0x2c3c465ca58ec000" - }, - "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb": { - "balance": "0x246ddf9797668000" - }, - "0xcccccccccccccccccccccccccccccccccccccccc": { - "balance": "0x1f399b1438a10000" - } - }, - "result": { - "stateRoot": "0x5069e6c86aeba39397685cf7914a7505a78059be8c5f4d1348050ce78b348e99", - "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "receipts": [], - "currentDifficulty": "0x20000", - "gasUsed": "0x0" - } -} diff --git a/cmd/evm/testdata/7/exp.json b/cmd/evm/testdata/7/exp.json deleted file mode 100644 index 23ca9f5cf97..00000000000 --- a/cmd/evm/testdata/7/exp.json +++ /dev/null @@ -1,375 +0,0 @@ -{ - "alloc": { - "0x005f5cee7a43331d5a3d3eec71305925a62f34b6": { - "balance": "0x0" - }, - "0x0101f3be8ebb4bbd39a2e3b9a3639d4259832fd9": { - "balance": "0x0" - }, - "0x057b56736d32b86616a10f619859c6cd6f59092a": { - "balance": "0x0" - }, - "0x06706dd3f2c9abf0a21ddcc6941d9b86f0596936": { - "balance": "0x0" - }, - "0x0737a6b837f97f46ebade41b9bc3e1c509c85c53": { - "balance": "0x0" - }, - "0x07f5c1e1bc2c93e0402f23341973a0e043f7bf8a": { - "balance": "0x0" - }, - "0x0e0da70933f4c7849fc0d203f5d1d43b9ae4532d": { - "balance": "0x0" - }, - "0x0ff30d6de14a8224aa97b78aea5388d1c51c1f00": { - "balance": "0x0" - }, - "0x12e626b0eebfe86a56d633b9864e389b45dcb260": { - "balance": "0x0" - }, - "0x1591fc0f688c81fbeb17f5426a162a7024d430c2": { - "balance": "0x0" - }, - "0x17802f43a0137c506ba92291391a8a8f207f487d": { - "balance": "0x0" - }, - "0x1975bd06d486162d5dc297798dfc41edd5d160a7": { - "balance": "0x0" - }, - "0x1ca6abd14d30affe533b24d7a21bff4c2d5e1f3b": { - "balance": "0x0" - }, - "0x1cba23d343a983e9b5cfd19496b9a9701ada385f": { - "balance": "0x0" - }, - "0x200450f06520bdd6c527622a273333384d870efb": { - "balance": "0x0" - }, - "0x21c7fdb9ed8d291d79ffd82eb2c4356ec0d81241": { - "balance": "0x0" - }, - "0x23b75c2f6791eef49c69684db4c6c1f93bf49a50": { - "balance": "0x0" - }, - "0x24c4d950dfd4dd1902bbed3508144a54542bba94": { - "balance": "0x0" - }, - "0x253488078a4edf4d6f42f113d1e62836a942cf1a": { - "balance": "0x0" - }, - "0x27b137a85656544b1ccb5a0f2e561a5703c6a68f": { - "balance": "0x0" - }, - "0x2a5ed960395e2a49b1c758cef4aa15213cfd874c": { - "balance": "0x0" - }, - "0x2b3455ec7fedf16e646268bf88846bd7a2319bb2": { - "balance": "0x0" - }, - "0x2c19c7f9ae8b751e37aeb2d93a699722395ae18f": { - "balance": "0x0" - }, - "0x304a554a310c7e546dfe434669c62820b7d83490": { - "balance": "0x0" - }, - "0x319f70bab6845585f412ec7724b744fec6095c85": { - "balance": "0x0" - }, - "0x35a051a0010aba705c9008d7a7eff6fb88f6ea7b": { - "balance": "0x0" - }, - "0x3ba4d81db016dc2890c81f3acec2454bff5aada5": { - "balance": "0x0" - }, - "0x3c02a7bc0391e86d91b7d144e61c2c01a25a79c5": { - "balance": "0x0" - }, - "0x40b803a9abce16f50f36a77ba41180eb90023925": { - "balance": "0x0" - }, - "0x440c59b325d2997a134c2c7c60a8c61611212bad": { - "balance": "0x0" - }, - "0x4486a3d68fac6967006d7a517b889fd3f98c102b": { - "balance": "0x0" - }, - "0x4613f3bca5c44ea06337a9e439fbc6d42e501d0a": { - "balance": "0x0" - }, - "0x47e7aa56d6bdf3f36be34619660de61275420af8": { - "balance": "0x0" - }, - "0x4863226780fe7c0356454236d3b1c8792785748d": { - "balance": "0x0" - }, - "0x492ea3bb0f3315521c31f273e565b868fc090f17": { - "balance": "0x0" - }, - "0x4cb31628079fb14e4bc3cd5e30c2f7489b00960c": { - "balance": "0x0" - }, - "0x4deb0033bb26bc534b197e61d19e0733e5679784": { - "balance": "0x0" - }, - "0x4fa802324e929786dbda3b8820dc7834e9134a2a": { - "balance": "0x0" - }, - "0x4fd6ace747f06ece9c49699c7cabc62d02211f75": { - "balance": "0x0" - }, - "0x51e0ddd9998364a2eb38588679f0d2c42653e4a6": { - "balance": "0x0" - }, - "0x52c5317c848ba20c7504cb2c8052abd1fde29d03": { - "balance": "0x0" - }, - "0x542a9515200d14b68e934e9830d91645a980dd7a": { - "balance": "0x0" - }, - "0x5524c55fb03cf21f549444ccbecb664d0acad706": { - "balance": "0x0" - }, - "0x579a80d909f346fbfb1189493f521d7f48d52238": { - "balance": "0x0" - }, - "0x58b95c9a9d5d26825e70a82b6adb139d3fd829eb": { - "balance": "0x0" - }, - "0x5c6e67ccd5849c0d29219c4f95f1a7a93b3f5dc5": { - "balance": "0x0" - }, - "0x5c8536898fbb74fc7445814902fd08422eac56d0": { - "balance": "0x0" - }, - "0x5d2b2e6fcbe3b11d26b525e085ff818dae332479": { - "balance": "0x0" - }, - "0x5dc28b15dffed94048d73806ce4b7a4612a1d48f": { - "balance": "0x0" - }, - "0x5f9f3392e9f62f63b8eac0beb55541fc8627f42c": { - "balance": "0x0" - }, - "0x6131c42fa982e56929107413a9d526fd99405560": { - "balance": "0x0" - }, - "0x6231b6d0d5e77fe001c2a460bd9584fee60d409b": { - "balance": "0x0" - }, - "0x627a0a960c079c21c34f7612d5d230e01b4ad4c7": { - "balance": "0x0" - }, - "0x63ed5a272de2f6d968408b4acb9024f4cc208ebf": { - "balance": "0x0" - }, - "0x6966ab0d485353095148a2155858910e0965b6f9": { - "balance": "0x0" - }, - "0x6b0c4d41ba9ab8d8cfb5d379c69a612f2ced8ecb": { - "balance": "0x0" - }, - "0x6d87578288b6cb5549d5076a207456a1f6a63dc0": { - "balance": "0x0" - }, - "0x6f6704e5a10332af6672e50b3d9754dc460dfa4d": { - "balance": "0x0" - }, - "0x7602b46df5390e432ef1c307d4f2c9ff6d65cc97": { - "balance": "0x0" - }, - "0x779543a0491a837ca36ce8c635d6154e3c4911a6": { - "balance": "0x0" - }, - "0x77ca7b50b6cd7e2f3fa008e24ab793fd56cb15f6": { - "balance": "0x0" - }, - "0x782495b7b3355efb2833d56ecb34dc22ad7dfcc4": { - "balance": "0x0" - }, - "0x807640a13483f8ac783c557fcdf27be11ea4ac7a": { - "balance": "0x0" - }, - "0x8163e7fb499e90f8544ea62bbf80d21cd26d9efd": { - "balance": "0x0" - }, - "0x84ef4b2357079cd7a7c69fd7a37cd0609a679106": { - "balance": "0x0" - }, - "0x86af3e9626fce1957c82e88cbf04ddf3a2ed7915": { - "balance": "0x0" - }, - "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { - "balance": "0xfeedbead" - }, - "0x8d9edb3054ce5c5774a420ac37ebae0ac02343c6": { - "balance": "0x0" - }, - "0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79": { - "balance": "0x0" - }, - "0x97f43a37f595ab5dd318fb46e7a155eae057317a": { - "balance": "0x0" - }, - "0x9aa008f65de0b923a2a4f02012ad034a5e2e2192": { - "balance": "0x0" - }, - "0x9c15b54878ba618f494b38f0ae7443db6af648ba": { - "balance": "0x0" - }, - "0x9c50426be05db97f5d64fc54bf89eff947f0a321": { - "balance": "0x0" - }, - "0x9da397b9e80755301a3b32173283a91c0ef6c87e": { - "balance": "0x0" - }, - "0x9ea779f907f0b315b364b0cfc39a0fde5b02a416": { - "balance": "0x0" - }, - "0x9f27daea7aca0aa0446220b98d028715e3bc803d": { - "balance": "0x0" - }, - "0x9fcd2deaff372a39cc679d5c5e4de7bafb0b1339": { - "balance": "0x0" - }, - "0xa2f1ccba9395d7fcb155bba8bc92db9bafaeade7": { - "balance": "0x0" - }, - "0xa3acf3a1e16b1d7c315e23510fdd7847b48234f6": { - "balance": "0x0" - }, - "0xa5dc5acd6a7968a4554d89d65e59b7fd3bff0f90": { - "balance": "0x0" - }, - "0xa82f360a8d3455c5c41366975bde739c37bfeb8a": { - "balance": "0x0" - }, - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x5ffd4878be161d74", - "nonce": "0xac" - }, - "0xac1ecab32727358dba8962a0f3b261731aad9723": { - "balance": "0x0" - }, - "0xaccc230e8a6e5be9160b8cdf2864dd2a001c28b6": { - "balance": "0x0" - }, - "0xacd87e28b0c9d1254e868b81cba4cc20d9a32225": { - "balance": "0x0" - }, - "0xadf80daec7ba8dcf15392f1ac611fff65d94f880": { - "balance": "0x0" - }, - "0xaeeb8ff27288bdabc0fa5ebb731b6f409507516c": { - "balance": "0x0" - }, - "0xb136707642a4ea12fb4bae820f03d2562ebff487": { - "balance": "0x0" - }, - "0xb2c6f0dfbb716ac562e2d85d6cb2f8d5ee87603e": { - "balance": "0x0" - }, - "0xb3fb0e5aba0e20e5c49d252dfd30e102b171a425": { - "balance": "0x0" - }, - "0xb52042c8ca3f8aa246fa79c3feaa3d959347c0ab": { - "balance": "0x0" - }, - "0xb9637156d330c0d605a791f1c31ba5890582fe1c": { - "balance": "0x0" - }, - "0xbb9bc244d798123fde783fcc1c72d3bb8c189413": { - "balance": "0x0" - }, - "0xbc07118b9ac290e4622f5e77a0853539789effbe": { - "balance": "0x0" - }, - "0xbcf899e6c7d9d5a215ab1e3444c86806fa854c76": { - "balance": "0x0" - }, - "0xbe8539bfe837b67d1282b2b1d61c3f723966f049": { - "balance": "0x0" - }, - "0xbf4ed7b27f1d666546e30d74d50d173d20bca754": { - "balance": "0x0" - }, - "0xc4bbd073882dd2add2424cf47d35213405b01324": { - "balance": "0x0" - }, - "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x4563918244f40000" - }, - "0xca544e5c4687d109611d0f8f928b53a25af72448": { - "balance": "0x0" - }, - "0xcbb9d3703e651b0d496cdefb8b92c25aeb2171f7": { - "balance": "0x0" - }, - "0xcc34673c6c40e791051898567a1222daf90be287": { - "balance": "0x0" - }, - "0xceaeb481747ca6c540a000c1f3641f8cef161fa7": { - "balance": "0x0" - }, - "0xd131637d5275fd1a68a3200f4ad25c71a2a9522e": { - "balance": "0x0" - }, - "0xd164b088bd9108b60d0ca3751da4bceb207b0782": { - "balance": "0x0" - }, - "0xd1ac8b1ef1b69ff51d1d401a476e7e612414f091": { - "balance": "0x0" - }, - "0xd343b217de44030afaa275f54d31a9317c7f441e": { - "balance": "0x0" - }, - "0xd4fe7bc31cedb7bfb8a345f31e668033056b2728": { - "balance": "0x0" - }, - "0xd9aef3a1e38a39c16b31d1ace71bca8ef58d315b": { - "balance": "0x0" - }, - "0xda2fef9e4a3230988ff17df2165440f37e8b1708": { - "balance": "0x0" - }, - "0xdbe9b615a3ae8709af8b93336ce9b477e4ac0940": { - "balance": "0x0" - }, - "0xe308bd1ac5fda103967359b2712dd89deffb7973": { - "balance": "0x0" - }, - "0xe4ae1efdfc53b73893af49113d8694a057b9c0d1": { - "balance": "0x0" - }, - "0xec8e57756626fdc07c63ad2eafbd28d08e7b0ca5": { - "balance": "0x0" - }, - "0xecd135fa4f61a655311e86238c92adcd779555d2": { - "balance": "0x0" - }, - "0xf0b1aa0eb660754448a7937c022e30aa692fe0c5": { - "balance": "0x0" - }, - "0xf1385fb24aad0cd7432824085e42aff90886fef5": { - "balance": "0x0" - }, - "0xf14c14075d6c4ed84b86798af0956deef67365b5": { - "balance": "0x0" - }, - "0xf4c64518ea10f995918a454158c6b61407ea345c": { - "balance": "0x0" - }, - "0xfe24cdd8648121a43a7c86d289be4dd2951ed49f": { - "balance": "0x0" - } - }, - "result": { - "stateRoot": "0xd320ae476350b8107b9b78d45d73f539cc363e7e588d8c794666515d852f6e81", - "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", - "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "receipts": [], - "currentDifficulty": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffff020000", - "gasUsed": "0x0" - } -} diff --git a/cmd/evm/testdata/8/exp.json b/cmd/evm/testdata/8/exp.json deleted file mode 100644 index 2d44c071be7..00000000000 --- a/cmd/evm/testdata/8/exp.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "alloc": { - "0x000000000000000000000000000000000000aaaa": { - "code": "0x5854505854", - "balance": "0x7", - "nonce": "0x1" - }, - "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { - "balance": "0x1bc16d674ec94832" - }, - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0xeb7ca", - "nonce": "0x3" - } - }, - "result": { - "stateRoot": "0xb78515d83d9ad63ae2740f09f21bb6b44e9041e18b606a3ed35dd6cfd338c0bb", - "txRoot": "0xe42c488908c04b9f7d4d39614ed4093a33ff16353299672e1770b786c28a5e6f", - "receiptsRoot": "0xb207f384195fb6fb7ee7105ba963cc19e1614ce0e75809999289c6c82e7a8d97", - "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "receipts": [ - { - "type": "0x1", - "root": "0x", - "status": "0x1", - "cumulativeGasUsed": "0x7aae", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "logs": null, - "transactionHash": "0x26c8c6e23fa3b246f44fba53e7b5fcb55f01f1e075f2de3db9b982afd4bd3901", - "contractAddress": "0x0000000000000000000000000000000000000000", - "gasUsed": "0x7aae", - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "blockNumber": "0x1000000", - "transactionIndex": "0x0" - }, - { - "root": "0x", - "status": "0x1", - "cumulativeGasUsed": "0xdd24", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "logs": null, - "transactionHash": "0x26ea003b1188334eced68a720dbe89886cd6a477cccdf924cf1d392e2281c01b", - "contractAddress": "0x0000000000000000000000000000000000000000", - "gasUsed": "0x6276", - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "blockNumber": "0x1000000", - "transactionIndex": "0x1" - }, - { - "type": "0x1", - "root": "0x", - "status": "0x1", - "cumulativeGasUsed": "0x14832", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "logs": null, - "transactionHash": "0x6997569ed85f1d810bc61d969cbbae12f34ce88d314ff5ef2629bc741466fca6", - "contractAddress": "0x0000000000000000000000000000000000000000", - "gasUsed": "0x6b0e", - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "blockNumber": "0x1000000", - "transactionIndex": "0x2" - } - ], - "currentDifficulty": "0x20000", - "gasUsed": "0x14832" - } -} diff --git a/cmd/evm/testdata/9/alloc.json b/cmd/evm/testdata/9/alloc.json index c14e38e8451..430e4242732 100644 --- a/cmd/evm/testdata/9/alloc.json +++ b/cmd/evm/testdata/9/alloc.json @@ -1,11 +1,19 @@ { - "0x000000000000000000000000000000000000aaaa": { - "balance": "0x03", - "code": "0x58585454", - "nonce": "0x1" - }, - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x100000000000000", - "nonce": "0x00" - } -} + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x100000000000000000", + "nonce": "0x00" + }, + "0x00000000000000000000000000000000b0b0face": { + "code":"0x40600052", + "storage":{}, + "balance":"0x0", + "nonce": + "0x0" + }, + "0x000000000000000000000000000000ca1100f022": { + "code":"0x60806040527f248f18b25d9b5856c092f62a7d329b239f4a0a77e6ee6c58637f56745b9803f3446040518082815260200191505060405180910390a100fea265627a7a72315820eea50cf12e938601a56dcdef0ab1446f14ba25367299eb81834af54e1672f5d864736f6c63430005110032", + "storage":{}, + "balance":"0x0", + "nonce":"0x0" + } + } \ No newline at end of file diff --git a/cmd/evm/testdata/9/env.json b/cmd/evm/testdata/9/env.json index 05f35191fd8..479d8a3f47d 100644 --- a/cmd/evm/testdata/9/env.json +++ b/cmd/evm/testdata/9/env.json @@ -1,9 +1,8 @@ { - "currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", - "currentDifficulty": "0x20000", - "currentGasTarget": "0x1000000000", - "currentGasLimit": "0x750a163df65e8a", - "currentBaseFee": "0x3B9ACA00", - "currentNumber": "0x1000000", - "currentTimestamp": "0x04" -} + "currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty": "0x20000", + "currentGasLimit": "0x1000000000", + "currentNumber": "0x1000000", + "currentTimestamp": "0x04", + "currentRandom": "0x1000000000000000000000000000000000000000000000000000000000000001" + } \ No newline at end of file diff --git a/cmd/evm/testdata/9/exp.json b/cmd/evm/testdata/9/exp.json deleted file mode 100644 index 53a1bfd4d91..00000000000 --- a/cmd/evm/testdata/9/exp.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "alloc": { - "0x000000000000000000000000000000000000aaaa": { - "code": "0x58585454", - "balance": "0x3", - "nonce": "0x1" - }, - "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { - "balance": "0x1bc1c9185ca6f6e0" - }, - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0xff745ee8832120", - "nonce": "0x2" - } - }, - "result": { - "stateRoot": "0x8e0c14cca1717d764e5cd25569bdf079758d704bb8ba56a3827997842f135ad8", - "txRoot": "0xbe6c599aefbec1cfe31dbdeca4b4dd0315bf5fca0f78e10c8f869c40a42feb0d", - "receiptsRoot": "0x5fdadbccc0b40ed39f6c7aacafb08a71c468f28793027552d9d99b1aeb19d406", - "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "receipts": [ - { - "type": "0x2", - "root": "0x", - "status": "0x1", - "cumulativeGasUsed": "0x6b70", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "logs": null, - "transactionHash": "0xb4821e4a9122a6f9baecad99351bee6ec54fe8c3f6a737b2e6478f4963536819", - "contractAddress": "0x0000000000000000000000000000000000000000", - "gasUsed": "0x6b70", - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "blockNumber": "0x1000000", - "transactionIndex": "0x0" - }, - { - "root": "0x", - "status": "0x1", - "cumulativeGasUsed": "0xcde4", - "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", - "logs": null, - "transactionHash": "0xa9c6c6a848b9c9a0d8bbb4df5f30394983632817dbccc738e839c8e174fa4036", - "contractAddress": "0x0000000000000000000000000000000000000000", - "gasUsed": "0x6274", - "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", - "blockNumber": "0x1000000", - "transactionIndex": "0x1" - } - ], - "currentDifficulty": "0x20000", - "gasUsed": "0xcde4" - } -} diff --git a/cmd/evm/testdata/9/readme.md b/cmd/evm/testdata/9/readme.md deleted file mode 100644 index 88f0f12aaaa..00000000000 --- a/cmd/evm/testdata/9/readme.md +++ /dev/null @@ -1,75 +0,0 @@ -## EIP-1559 testing - -This test contains testcases for EIP-1559, which uses an new transaction type and has a new block parameter. - -### Prestate - -The alloc portion contains one contract (`0x000000000000000000000000000000000000aaaa`), containing the -following code: `0x58585454`: `PC; PC; SLOAD; SLOAD`. - -Essentialy, this contract does `SLOAD(0)` and `SLOAD(1)`. - -The alloc also contains some funds on `0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b`. - -## Transactions - -There are two transactions, each invokes the contract above. - -1. EIP-1559 ACL-transaction, which contains the `0x0` slot for `0xaaaa` -2. Legacy transaction - -## Execution - -Running it yields: -``` -$ dir=./testdata/9 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --trace && cat trace-* | grep SLOAD -{"pc":2,"op":84,"gas":"0x48c28","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x1"],"returnStack":null,"returnD -ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} -{"pc":3,"op":84,"gas":"0x483f4","gasCost":"0x64","memory":"0x","memSize":0,"stack":["0x0","0x0"],"returnStack":null,"returnDa -ta":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} -{"pc":2,"op":84,"gas":"0x49cf4","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x1"],"returnStack":null,"returnD -ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} -{"pc":3,"op":84,"gas":"0x494c0","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x0"],"returnStack":null,"returnD -ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} -``` - -We can also get the post-alloc: -``` -$ dir=./testdata/9 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout -{ - "alloc": { - "0x000000000000000000000000000000000000aaaa": { - "code": "0x58585454", - "balance": "0x3", - "nonce": "0x1" - }, - "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { - "balance": "0xbfc02677a000" - }, - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0xff104fcfea7800", - "nonce": "0x2" - } - } -} -``` - -If we try to execute it on older rules: -``` -dir=./testdata/9 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout -ERROR(10): Failed signing transactions: ERROR(10): Tx 0: failed to sign tx: transaction type not supported -``` - -It fails, due to the `evm t8n` cannot sign them in with the given signer. We can bypass that, however, -by feeding it presigned transactions, located in `txs_signed.json`. - -``` -dir=./testdata/9 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json --input.txs=$dir/txs_signed.json --input.env=$dir/env.json -INFO [05-07|12:28:42.072] rejected tx index=0 hash=b4821e..536819 error="transaction type not supported" -INFO [05-07|12:28:42.072] rejected tx index=1 hash=a9c6c6..fa4036 from=0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B error="nonce too high: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B, tx: 1 state: 0" -INFO [05-07|12:28:42.073] Wrote file file=alloc.json -INFO [05-07|12:28:42.073] Wrote file file=result.json -``` - -Number `0` is not applicable, and therefore number `1` has wrong nonce, and both are rejected. - diff --git a/cmd/evm/testdata/9/txs.json b/cmd/evm/testdata/9/txs.json index 740abce079d..7f15b0b2215 100644 --- a/cmd/evm/testdata/9/txs.json +++ b/cmd/evm/testdata/9/txs.json @@ -1,37 +1,14 @@ [ - { - "gas": "0x4ef00", - "maxPriorityFeePerGas": "0x2", - "maxFeePerGas": "0x12A05F200", - "chainId": "0x1", - "input": "0x", - "nonce": "0x0", - "to": "0x000000000000000000000000000000000000aaaa", - "value": "0x0", - "type" : "0x2", - "accessList": [ - {"address": "0x000000000000000000000000000000000000aaaa", - "storageKeys": [ - "0x0000000000000000000000000000000000000000000000000000000000000000" - ] - } - ], - "v": "0x0", - "r": "0x0", - "s": "0x0", - "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" - }, - { - "gas": "0x4ef00", - "gasPrice": "0x12A05F200", - "chainId": "0x1", - "input": "0x", - "nonce": "0x1", - "to": "0x000000000000000000000000000000000000aaaa", - "value": "0x0", - "v": "0x0", - "r": "0x0", - "s": "0x0", - "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" - } -] + { + "gasPrice":"0x80", + "nonce":"0x0", + "to":"0x000000000000000000000000000000ca1100f022", + "input": "", + "gas":"0x1312d00", + "value": "0x0", + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + } +] \ No newline at end of file diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go index 4714e587d32..d620bb0ab86 100644 --- a/cmd/integration/commands/state_stages.go +++ b/cmd/integration/commands/state_stages.go @@ -17,6 +17,7 @@ import ( "github.com/ledgerwatch/erigon/common/changeset" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/debugprint" + "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" @@ -218,7 +219,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. } encoder := json.NewEncoder(w) encoder.SetIndent(" ", " ") - for _, l := range vm.FormatLogs(vmConfig.Tracer.(*vm.StructLogger).StructLogs()) { + for _, l := range core.FormatLogs(vmConfig.Tracer.(*vm.StructLogger).StructLogs()) { if err2 := encoder.Encode(l); err2 != nil { panic(err2) } diff --git a/cmd/rpcdaemon/commands/eth_receipts.go b/cmd/rpcdaemon/commands/eth_receipts.go index cc7ac0d99bd..bfd481ec08d 100644 --- a/cmd/rpcdaemon/commands/eth_receipts.go +++ b/cmd/rpcdaemon/commands/eth_receipts.go @@ -59,8 +59,7 @@ func (api *BaseAPI) getReceipts(ctx context.Context, tx kv.Tx, chainConfig *para for i, txn := range block.Transactions() { ibs.Prepare(txn.Hash(), block.Hash(), i) - header := block.Header() - receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), ethashFaker, nil, gp, ibs, noopWriter, header, txn, usedGas, vm.Config{}, contractHasTEVM) + receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, ethashFaker, nil, gp, ibs, noopWriter, block.Header(), txn, usedGas, vm.Config{}, contractHasTEVM) if err != nil { return nil, err } diff --git a/cmd/rpcdaemon22/commands/eth_receipts.go b/cmd/rpcdaemon22/commands/eth_receipts.go index 0128a6b1ab8..f822381c54b 100644 --- a/cmd/rpcdaemon22/commands/eth_receipts.go +++ b/cmd/rpcdaemon22/commands/eth_receipts.go @@ -56,8 +56,7 @@ func (api *BaseAPI) getReceipts(ctx context.Context, tx kv.Tx, chainConfig *para for i, txn := range block.Transactions() { ibs.Prepare(txn.Hash(), block.Hash(), i) - header := block.Header() - receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), ethashFaker, nil, gp, ibs, noopWriter, header, txn, usedGas, vm.Config{}, contractHasTEVM) + receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, ethashFaker, nil, gp, ibs, noopWriter, block.Header(), txn, usedGas, vm.Config{}, contractHasTEVM) if err != nil { return nil, err } diff --git a/cmd/state/commands/erigon2.go b/cmd/state/commands/erigon2.go index 370433ecaa3..934f79cac63 100644 --- a/cmd/state/commands/erigon2.go +++ b/cmd/state/commands/erigon2.go @@ -412,7 +412,7 @@ func processBlock(trace bool, txNumStart uint64, rw *ReaderWrapper, ww *WriterWr daoBlock = false } ibs.Prepare(tx.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) + receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) if err != nil { return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } diff --git a/cmd/state/commands/erigon22.go b/cmd/state/commands/erigon22.go index 754c11cdd7f..55dffedbd41 100644 --- a/cmd/state/commands/erigon22.go +++ b/cmd/state/commands/erigon22.go @@ -312,7 +312,7 @@ func processBlock22(startTxNum uint64, trace bool, txNumStart uint64, rw *Reader ibs.Prepare(tx.Hash(), block.Hash(), i) ct := NewCallTracer() vmConfig.Tracer = ct - receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) + receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) if err != nil { return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } diff --git a/cmd/state/commands/history2.go b/cmd/state/commands/history2.go index 0aa8b07ffee..63332bcb0b0 100644 --- a/cmd/state/commands/history2.go +++ b/cmd/state/commands/history2.go @@ -157,7 +157,7 @@ func runHistory2(trace bool, blockNum, txNumStart uint64, hw *HistoryWrapper, ww daoBlock = false } ibs.Prepare(tx.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) + receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) if err != nil { return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } diff --git a/cmd/state/commands/history22.go b/cmd/state/commands/history22.go index a7ecf4d8ad9..fd9d10f2613 100644 --- a/cmd/state/commands/history22.go +++ b/cmd/state/commands/history22.go @@ -245,7 +245,7 @@ func runHistory22(trace bool, blockNum, txNumStart uint64, hw *state.HistoryRead hw.SetTxNum(txNum) ibs := state.New(hw) ibs.Prepare(tx.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) + receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) if err != nil { return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } diff --git a/cmd/state/commands/opcode_tracer.go b/cmd/state/commands/opcode_tracer.go index ee6a5d86102..fb6e9777180 100644 --- a/cmd/state/commands/opcode_tracer.go +++ b/cmd/state/commands/opcode_tracer.go @@ -684,7 +684,7 @@ func runBlock(engine consensus.Engine, ibs *state.IntraBlockState, txnWriter sta rules := chainConfig.Rules(block.NumberU64()) for i, tx := range block.Transactions() { ibs.Prepare(tx.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, txnWriter, header, tx, usedGas, vmConfig, contractHasTEVM) + receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, txnWriter, header, tx, usedGas, vmConfig, contractHasTEVM) if err != nil { return nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } diff --git a/cmd/state/commands/state_recon.go b/cmd/state/commands/state_recon.go index 501ec1420a4..478ec8bd3b1 100644 --- a/cmd/state/commands/state_recon.go +++ b/cmd/state/commands/state_recon.go @@ -183,7 +183,7 @@ func (rw *ReconWorker) runTxNum(txNum uint64) { vmConfig := vm.Config{NoReceipts: true, SkipAnalysis: core.SkipAnalysis(rw.chainConfig, blockNum)} contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } ibs.Prepare(txHash, rw.lastBlockHash, int(txIndex)) - _, _, err = core.ApplyTransaction(rw.chainConfig, core.GetHashFn(rw.lastHeader, rw.getHeader), rw.engine, nil, gp, ibs, noop, rw.lastHeader, txn, usedGas, vmConfig, contractHasTEVM) + _, _, err = core.ApplyTransaction(rw.chainConfig, rw.getHeader, rw.engine, nil, gp, ibs, noop, rw.lastHeader, txn, usedGas, vmConfig, contractHasTEVM) if err != nil { panic(fmt.Errorf("could not apply tx %d [%x] failed: %w", txIndex, txHash, err)) } diff --git a/consensus/parlia/parlia.go b/consensus/parlia/parlia.go index 731fd618121..30fd185239a 100644 --- a/consensus/parlia/parlia.go +++ b/consensus/parlia/parlia.go @@ -1218,7 +1218,7 @@ func (p *Parlia) systemCall(from, contract common.Address, data []byte, ibs *sta ) vmConfig := vm.Config{NoReceipts: true} // Create a new context to be used in the EVM environment - blockContext := core.NewEVMBlockContext(header, core.GetHashFn(header, nil), p, &from, nil) + blockContext := core.NewEVMBlockContext(header, nil, p, &from, nil) evm := vm.NewEVM(blockContext, core.NewEVMTxContext(msg), ibs, chainConfig, vmConfig) ret, leftOverGas, err := evm.Call( vm.AccountRef(msg.From()), diff --git a/core/blockchain.go b/core/blockchain.go index dc8af60a6c6..b102ae22c15 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -18,17 +18,16 @@ package core import ( + "encoding/json" "fmt" + "os" "time" "github.com/ledgerwatch/erigon/core/systemcontracts" - "github.com/ledgerwatch/erigon/rlp" - "golang.org/x/crypto/sha3" "golang.org/x/exp/slices" metrics2 "github.com/VictoriaMetrics/metrics" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/common/u256" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/misc" @@ -49,30 +48,12 @@ const ( TriesInMemory = 128 ) -type RejectedTx struct { - Index int `json:"index" gencodec:"required"` - Err string `json:"error" gencodec:"required"` -} - -type RejectedTxs []*RejectedTx - -type EphemeralExecResult struct { - StateRoot common.Hash `json:"stateRoot"` - TxRoot common.Hash `json:"txRoot"` - ReceiptRoot common.Hash `json:"receiptsRoot"` - LogsHash common.Hash `json:"logsHash"` - Bloom types.Bloom `json:"logsBloom" gencodec:"required"` - Receipts types.Receipts `json:"receipts"` - Rejected RejectedTxs `json:"rejected,omitempty"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` - GasUsed math.HexOrDecimal64 `json:"gasUsed"` - ReceiptForStorage *types.ReceiptForStorage `json:"-"` -} - +// ExecuteBlockEphemerally runs a block from provided stateReader and +// writes the result to the provided stateWriter func ExecuteBlockEphemerallyForBSC( chainConfig *params.ChainConfig, vmConfig *vm.Config, - blockHashFunc func(n uint64) common.Hash, + getHeader func(hash common.Hash, number uint64) *types.Header, engine consensus.Engine, block *types.Block, stateReader state.StateReader, @@ -80,9 +61,7 @@ func ExecuteBlockEphemerallyForBSC( epochReader consensus.EpochReader, chainReader consensus.ChainHeaderReader, contractHasTEVM func(codeHash common.Hash) (bool, error), - statelessExec bool, // for usage of this API via cli tools wherein some of the validations need to be relaxed. - getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error), -) (*EphemeralExecResult, error) { +) (types.Receipts, error) { defer blockExecutionTimer.UpdateDuration(time.Now()) block.Uncles() ibs := state.New(stateReader) @@ -92,11 +71,6 @@ func ExecuteBlockEphemerallyForBSC( gp := new(GasPool) gp.AddGas(block.GasLimit()) - var ( - rejectedTxs []*RejectedTx - includedTxs types.Transactions - ) - if !vmConfig.ReadOnly { if err := InitializeBlockExecution(engine, chainReader, epochReader, block.Header(), block.Transactions(), block.Uncles(), chainConfig, ibs); err != nil { return nil, err @@ -121,36 +95,35 @@ func ExecuteBlockEphemerallyForBSC( ibs.Prepare(tx.Hash(), block.Hash(), i) writeTrace := false if vmConfig.Debug && vmConfig.Tracer == nil { - tracer, err := getTracer(i, tx.Hash()) - if err != nil { - panic(err) - } - vmConfig.Tracer = tracer + vmConfig.Tracer = vm.NewStructLogger(&vm.LogConfig{}) writeTrace = true } - receipt, _, err := ApplyTransaction(chainConfig, blockHashFunc, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig, contractHasTEVM) + receipt, _, err := ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig, contractHasTEVM) if writeTrace { - if ftracer, ok := vmConfig.Tracer.(vm.FlushableTracer); ok { - ftracer.Flush(tx) + w, err1 := os.Create(fmt.Sprintf("txtrace_%x.txt", tx.Hash())) + if err1 != nil { + panic(err1) + } + encoder := json.NewEncoder(w) + logs := FormatLogs(vmConfig.Tracer.(*vm.StructLogger).StructLogs()) + if err2 := encoder.Encode(logs); err2 != nil { + panic(err2) + } + if err2 := w.Close(); err2 != nil { + panic(err2) } - vmConfig.Tracer = nil } - if err != nil && statelessExec { - rejectedTxs = append(rejectedTxs, &RejectedTx{i, err.Error()}) - } else if err != nil && !statelessExec { + if err != nil { return nil, fmt.Errorf("could not apply tx %d from block %d [%v]: %w", i, block.NumberU64(), tx.Hash().Hex(), err) - } else { - includedTxs = append(includedTxs, tx) - if !vmConfig.NoReceipts { - receipts = append(receipts, receipt) - } + } + if !vmConfig.NoReceipts { + receipts = append(receipts, receipt) } } var newBlock *types.Block - var receiptSha common.Hash if !vmConfig.ReadOnly { // We're doing this hack for BSC to avoid changing consensus interfaces a lot. BSC modifies txs and receipts by appending // system transactions, and they increase used gas and write cumulative gas to system receipts, that's why we need @@ -175,25 +148,20 @@ func ExecuteBlockEphemerallyForBSC( if !vmConfig.NoReceipts { receipts = outReceipts } - receiptSha = newBlock.ReceiptHash() } else { newBlock = block - receiptSha = types.DeriveSha(receipts) } - var bloom types.Bloom - if chainConfig.IsByzantium(header.Number.Uint64()) && !vmConfig.NoReceipts { - if !statelessExec && newBlock.ReceiptHash() != block.ReceiptHash() { + if newBlock.ReceiptHash() != block.ReceiptHash() { return nil, fmt.Errorf("mismatched receipt headers for block %d (%s != %s)", block.NumberU64(), newBlock.ReceiptHash().Hex(), block.Header().ReceiptHash.Hex()) } } - if !statelessExec && newBlock.GasUsed() != header.GasUsed { + if newBlock.GasUsed() != header.GasUsed { return nil, fmt.Errorf("gas used by execution: %d, in header: %d", *usedGas, header.GasUsed) } if !vmConfig.NoReceipts { - bloom = newBlock.Bloom() - if !statelessExec && newBlock.Bloom() != header.Bloom { + if newBlock.Bloom() != header.Bloom { return nil, fmt.Errorf("bloom computed by execution: %x, in header: %x", newBlock.Bloom(), header.Bloom) } } @@ -204,17 +172,7 @@ func ExecuteBlockEphemerallyForBSC( return nil, fmt.Errorf("writing changesets for block %d failed: %w", header.Number.Uint64(), err) } - execRs := &EphemeralExecResult{ - TxRoot: types.DeriveSha(includedTxs), - ReceiptRoot: receiptSha, - Bloom: bloom, - Receipts: receipts, - Difficulty: (*math.HexOrDecimal256)(block.Header().Difficulty), - GasUsed: math.HexOrDecimal64(*usedGas), - Rejected: rejectedTxs, - } - - return execRs, nil + return receipts, nil } // ExecuteBlockEphemerally runs a block from provided stateReader and @@ -222,7 +180,7 @@ func ExecuteBlockEphemerallyForBSC( func ExecuteBlockEphemerally( chainConfig *params.ChainConfig, vmConfig *vm.Config, - blockHashFunc func(n uint64) common.Hash, + getHeader func(hash common.Hash, number uint64) *types.Header, engine consensus.Engine, block *types.Block, stateReader state.StateReader, @@ -230,27 +188,19 @@ func ExecuteBlockEphemerally( epochReader consensus.EpochReader, chainReader consensus.ChainHeaderReader, contractHasTEVM func(codeHash common.Hash) (bool, error), - statelessExec bool, // for usage of this API via cli tools wherein some of the validations need to be relaxed. - getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error), -) (*EphemeralExecResult, error) { - +) (types.Receipts, *types.ReceiptForStorage, error) { defer blockExecutionTimer.UpdateDuration(time.Now()) block.Uncles() ibs := state.New(stateReader) header := block.Header() - var receipts = make(types.Receipts, 0) + var receipts types.Receipts usedGas := new(uint64) gp := new(GasPool) gp.AddGas(block.GasLimit()) - var ( - rejectedTxs []*RejectedTx - includedTxs types.Transactions - ) - if !vmConfig.ReadOnly { if err := InitializeBlockExecution(engine, chainReader, epochReader, block.Header(), block.Transactions(), block.Uncles(), chainConfig, ibs); err != nil { - return nil, err + return nil, nil, err } } @@ -263,53 +213,54 @@ func ExecuteBlockEphemerally( ibs.Prepare(tx.Hash(), block.Hash(), i) writeTrace := false if vmConfig.Debug && vmConfig.Tracer == nil { - tracer, err := getTracer(i, tx.Hash()) - if err != nil { - panic(err) - } - vmConfig.Tracer = tracer + vmConfig.Tracer = vm.NewStructLogger(&vm.LogConfig{}) writeTrace = true } - receipt, _, err := ApplyTransaction(chainConfig, blockHashFunc, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig, contractHasTEVM) + receipt, _, err := ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig, contractHasTEVM) if writeTrace { - if ftracer, ok := vmConfig.Tracer.(vm.FlushableTracer); ok { - ftracer.Flush(tx) + w, err1 := os.Create(fmt.Sprintf("txtrace_%x.txt", tx.Hash())) + if err1 != nil { + panic(err1) + } + encoder := json.NewEncoder(w) + logs := FormatLogs(vmConfig.Tracer.(*vm.StructLogger).StructLogs()) + if err2 := encoder.Encode(logs); err2 != nil { + panic(err2) + } + if err2 := w.Close(); err2 != nil { + panic(err2) } - vmConfig.Tracer = nil } - if err != nil && statelessExec { - rejectedTxs = append(rejectedTxs, &RejectedTx{i, err.Error()}) - } else if err != nil && !statelessExec { - return nil, fmt.Errorf("could not apply tx %d from block %d [%v]: %w", i, block.NumberU64(), tx.Hash().Hex(), err) - } else { - includedTxs = append(includedTxs, tx) - if !vmConfig.NoReceipts { - receipts = append(receipts, receipt) - } + if err != nil { + return nil, nil, fmt.Errorf("could not apply tx %d from block %d [%v]: %w", i, block.NumberU64(), tx.Hash().Hex(), err) + } + if !vmConfig.NoReceipts { + receipts = append(receipts, receipt) } } - var bloom types.Bloom - receiptSha := types.DeriveSha(receipts) - if !statelessExec && chainConfig.IsByzantium(header.Number.Uint64()) && !vmConfig.NoReceipts && receiptSha != block.ReceiptHash() { - return nil, fmt.Errorf("mismatched receipt headers for block %d", block.NumberU64()) + if chainConfig.IsByzantium(header.Number.Uint64()) && !vmConfig.NoReceipts { + receiptSha := types.DeriveSha(receipts) + if receiptSha != block.ReceiptHash() { + return nil, nil, fmt.Errorf("mismatched receipt headers for block %d", block.NumberU64()) + } } - if !statelessExec && *usedGas != header.GasUsed { - return nil, fmt.Errorf("gas used by execution: %d, in header: %d", *usedGas, header.GasUsed) + if *usedGas != header.GasUsed { + return nil, nil, fmt.Errorf("gas used by execution: %d, in header: %d", *usedGas, header.GasUsed) } if !vmConfig.NoReceipts { - bloom = types.CreateBloom(receipts) - if !statelessExec && bloom != header.Bloom { - return nil, fmt.Errorf("bloom computed by execution: %x, in header: %x", bloom, header.Bloom) + bloom := types.CreateBloom(receipts) + if bloom != header.Bloom { + return nil, nil, fmt.Errorf("bloom computed by execution: %x, in header: %x", bloom, header.Bloom) } } if !vmConfig.ReadOnly { txs := block.Transactions() if _, err := FinalizeBlockExecution(engine, stateReader, block.Header(), txs, block.Uncles(), stateWriter, chainConfig, ibs, receipts, epochReader, chainReader, false); err != nil { - return nil, err + return nil, nil, err } } @@ -336,26 +287,7 @@ func ExecuteBlockEphemerally( } } - execRs := &EphemeralExecResult{ - TxRoot: types.DeriveSha(includedTxs), - ReceiptRoot: receiptSha, - Bloom: bloom, - LogsHash: rlpHash(blockLogs), - Receipts: receipts, - Difficulty: (*math.HexOrDecimal256)(header.Difficulty), - GasUsed: math.HexOrDecimal64(*usedGas), - Rejected: rejectedTxs, - ReceiptForStorage: stateSyncReceipt, - } - - return execRs, nil -} - -func rlpHash(x interface{}) (h common.Hash) { - hw := sha3.NewLegacyKeccak256() - rlp.Encode(hw, x) //nolint:errcheck - hw.Sum(h[:0]) - return h + return receipts, stateSyncReceipt, nil } func SysCallContract(contract common.Address, data []byte, chainConfig params.ChainConfig, ibs *state.IntraBlockState, header *types.Header, engine consensus.Engine) (result []byte, err error) { @@ -376,16 +308,19 @@ func SysCallContract(contract common.Address, data []byte, chainConfig params.Ch vmConfig := vm.Config{NoReceipts: true} // Create a new context to be used in the EVM environment isBor := chainConfig.Bor != nil - var txContext vm.TxContext var author *common.Address if isBor { author = &header.Coinbase - txContext = vm.TxContext{} } else { author = &state.SystemAddress + } + blockContext := NewEVMBlockContext(header, nil, engine, author, nil) + var txContext vm.TxContext + if isBor { + txContext = vm.TxContext{} + } else { txContext = NewEVMTxContext(msg) } - blockContext := NewEVMBlockContext(header, GetHashFn(header, nil), engine, author, nil) evm := vm.NewEVM(blockContext, txContext, ibs, &chainConfig, vmConfig) if isBor { ret, _, err := evm.Call( @@ -429,7 +364,7 @@ func CallContract(contract common.Address, data []byte, chainConfig params.Chain return nil, fmt.Errorf("SysCallContract: %w ", err) } vmConfig := vm.Config{NoReceipts: true} - _, result, err = ApplyTransaction(&chainConfig, GetHashFn(header, nil), engine, &state.SystemAddress, gp, ibs, noop, header, tx, &gasUsed, vmConfig, nil) + _, result, err = ApplyTransaction(&chainConfig, nil, engine, &state.SystemAddress, gp, ibs, noop, header, tx, &gasUsed, vmConfig, nil) if err != nil { return result, fmt.Errorf("SysCallContract: %w ", err) } @@ -457,7 +392,7 @@ func FinalizeBlockExecution(engine consensus.Engine, stateReader state.StateRead _, _, err = engine.Finalize(cc, header, ibs, txs, uncles, receipts, e, headerReader, syscall) } if err != nil { - return nil, err + return } var originalSystemAcc *accounts.Account diff --git a/core/chain_makers.go b/core/chain_makers.go index 17248745e79..24a8e4266df 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -112,7 +112,7 @@ func (b *BlockGen) AddTxWithChain(getHeader func(hash common.Hash, number uint64 } b.ibs.Prepare(tx.Hash(), common.Hash{}, len(b.txs)) contractHasTEVM := func(_ common.Hash) (bool, error) { return false, nil } - receipt, _, err := ApplyTransaction(b.config, GetHashFn(b.header, getHeader), engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, vm.Config{}, contractHasTEVM) + receipt, _, err := ApplyTransaction(b.config, getHeader, engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, vm.Config{}, contractHasTEVM) if err != nil { panic(err) } @@ -126,7 +126,7 @@ func (b *BlockGen) AddFailedTxWithChain(getHeader func(hash common.Hash, number } b.ibs.Prepare(tx.Hash(), common.Hash{}, len(b.txs)) contractHasTEVM := func(common.Hash) (bool, error) { return false, nil } - receipt, _, err := ApplyTransaction(b.config, GetHashFn(b.header, getHeader), engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, vm.Config{}, contractHasTEVM) + receipt, _, err := ApplyTransaction(b.config, getHeader, engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, vm.Config{}, contractHasTEVM) _ = err // accept failed transactions b.txs = append(b.txs, tx) b.receipts = append(b.receipts, receipt) diff --git a/core/evm.go b/core/evm.go index b89f8e5acb4..c2fa1471cd5 100644 --- a/core/evm.go +++ b/core/evm.go @@ -30,7 +30,7 @@ import ( ) // NewEVMBlockContext creates a new context for use in the EVM. -func NewEVMBlockContext(header *types.Header, blockHashFunc func(n uint64) common.Hash, engine consensus.Engine, author *common.Address, contractHasTEVM func(contractHash common.Hash) (bool, error)) vm.BlockContext { +func NewEVMBlockContext(header *types.Header, getHeader func(hash common.Hash, number uint64) *types.Header, engine consensus.Engine, author *common.Address, contractHasTEVM func(contractHash common.Hash) (bool, error)) vm.BlockContext { // If we don't have an explicit author (i.e. not mining), extract from the header var beneficiary common.Address if author == nil { @@ -71,7 +71,7 @@ func NewEVMBlockContext(header *types.Header, blockHashFunc func(n uint64) commo return vm.BlockContext{ CanTransfer: CanTransfer, Transfer: transferFunc, - GetHash: blockHashFunc, + GetHash: GetHashFn(header, getHeader), Coinbase: beneficiary, BlockNumber: header.Number.Uint64(), Time: header.Time, diff --git a/core/state_processor.go b/core/state_processor.go index c203685bb9e..066378ab5d9 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -17,7 +17,10 @@ package core import ( + "fmt" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" @@ -26,6 +29,57 @@ import ( "github.com/ledgerwatch/erigon/params" ) +// StructLogRes stores a structured log emitted by the EVM while replaying a +// transaction in debug mode +type StructLogRes struct { + Pc uint64 `json:"pc"` + Op string `json:"op"` + Gas uint64 `json:"gas"` + GasCost uint64 `json:"gasCost"` + Depth int `json:"depth"` + Error error `json:"error,omitempty"` + Stack *[]string `json:"stack,omitempty"` + Memory *[]string `json:"memory,omitempty"` + Storage *map[string]string `json:"storage,omitempty"` +} + +// FormatLogs formats EVM returned structured logs for json output +func FormatLogs(logs []vm.StructLog) []StructLogRes { + formatted := make([]StructLogRes, len(logs)) + for index, trace := range logs { + formatted[index] = StructLogRes{ + Pc: trace.Pc, + Op: trace.Op.String(), + Gas: trace.Gas, + GasCost: trace.GasCost, + Depth: trace.Depth, + Error: trace.Err, + } + if trace.Stack != nil { + stack := make([]string, len(trace.Stack)) + for i, stackValue := range trace.Stack { + stack[i] = fmt.Sprintf("%x", math.PaddedBigBytes(stackValue, 32)) + } + formatted[index].Stack = &stack + } + if trace.Memory != nil { + memory := make([]string, 0, (len(trace.Memory)+31)/32) + for i := 0; i+32 <= len(trace.Memory); i += 32 { + memory = append(memory, fmt.Sprintf("%x", trace.Memory[i:i+32])) + } + formatted[index].Memory = &memory + } + if trace.Storage != nil { + storage := make(map[string]string) + for i, storageValue := range trace.Storage { + storage[fmt.Sprintf("%x", i)] = fmt.Sprintf("%x", storageValue) + } + formatted[index].Storage = &storage + } + } + return formatted +} + // applyTransaction attempts to apply a transaction to the given state database // and uses the input parameters for its environment. It returns the receipt // for the transaction, gas used and an error if the transaction failed, @@ -86,7 +140,7 @@ func applyTransaction(config *params.ChainConfig, gp *GasPool, statedb *state.In // and uses the input parameters for its environment. It returns the receipt // for the transaction, gas used and an error if the transaction failed, // indicating the block was invalid. -func ApplyTransaction(config *params.ChainConfig, blockHashFunc func(n uint64) common.Hash, engine consensus.Engine, author *common.Address, gp *GasPool, ibs *state.IntraBlockState, stateWriter state.StateWriter, header *types.Header, tx types.Transaction, usedGas *uint64, cfg vm.Config, contractHasTEVM func(contractHash common.Hash) (bool, error)) (*types.Receipt, []byte, error) { +func ApplyTransaction(config *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, engine consensus.Engine, author *common.Address, gp *GasPool, ibs *state.IntraBlockState, stateWriter state.StateWriter, header *types.Header, tx types.Transaction, usedGas *uint64, cfg vm.Config, contractHasTEVM func(contractHash common.Hash) (bool, error)) (*types.Receipt, []byte, error) { // Create a new context to be used in the EVM environment // Add addresses to access list if applicable @@ -98,7 +152,7 @@ func ApplyTransaction(config *params.ChainConfig, blockHashFunc func(n uint64) c if tx.IsStarkNet() { vmenv = &vm.CVMAdapter{Cvm: vm.NewCVM(ibs)} } else { - blockContext := NewEVMBlockContext(header, blockHashFunc, engine, author, contractHasTEVM) + blockContext := NewEVMBlockContext(header, getHeader, engine, author, contractHasTEVM) vmenv = vm.NewEVM(blockContext, vm.TxContext{}, ibs, config, cfg) } diff --git a/core/vm/logger.go b/core/vm/logger.go index b9d511b8858..1d50dca6c1b 100644 --- a/core/vm/logger.go +++ b/core/vm/logger.go @@ -18,12 +18,10 @@ package vm import ( "encoding/hex" - "encoding/json" "errors" "fmt" "io" "math/big" - "os" "strings" "time" @@ -130,27 +128,6 @@ type Tracer interface { CaptureAccountWrite(account common.Address) error } -// FlushableTracer is a Tracer extension whose accumulated traces has to be -// flushed once the tracing is completed. -type FlushableTracer interface { - Tracer - Flush(tx types.Transaction) -} - -// StructLogRes stores a structured log emitted by the EVM while replaying a -// transaction in debug mode -type StructLogRes struct { - Pc uint64 `json:"pc"` - Op string `json:"op"` - Gas uint64 `json:"gas"` - GasCost uint64 `json:"gasCost"` - Depth int `json:"depth"` - Error error `json:"error,omitempty"` - Stack *[]string `json:"stack,omitempty"` - Memory *[]string `json:"memory,omitempty"` - Storage *map[string]string `json:"storage,omitempty"` -} - // StructLogger is an EVM state logger and implements Tracer. // // StructLogger can capture state based on the given Log configuration and also keeps @@ -284,58 +261,6 @@ func (l *StructLogger) Error() error { return l.err } // Output returns the VM return value captured by the trace. func (l *StructLogger) Output() []byte { return l.output } -func (l *StructLogger) Flush(tx types.Transaction) { - w, err1 := os.Create(fmt.Sprintf("txtrace_%x.txt", tx.Hash())) - if err1 != nil { - panic(err1) - } - encoder := json.NewEncoder(w) - logs := FormatLogs(l.StructLogs()) - if err2 := encoder.Encode(logs); err2 != nil { - panic(err2) - } - if err2 := w.Close(); err2 != nil { - panic(err2) - } -} - -// FormatLogs formats EVM returned structured logs for json output -func FormatLogs(logs []StructLog) []StructLogRes { - formatted := make([]StructLogRes, len(logs)) - for index, trace := range logs { - formatted[index] = StructLogRes{ - Pc: trace.Pc, - Op: trace.Op.String(), - Gas: trace.Gas, - GasCost: trace.GasCost, - Depth: trace.Depth, - Error: trace.Err, - } - if trace.Stack != nil { - stack := make([]string, len(trace.Stack)) - for i, stackValue := range trace.Stack { - stack[i] = fmt.Sprintf("%x", math.PaddedBigBytes(stackValue, 32)) - } - formatted[index].Stack = &stack - } - if trace.Memory != nil { - memory := make([]string, 0, (len(trace.Memory)+31)/32) - for i := 0; i+32 <= len(trace.Memory); i += 32 { - memory = append(memory, fmt.Sprintf("%x", trace.Memory[i:i+32])) - } - formatted[index].Memory = &memory - } - if trace.Storage != nil { - storage := make(map[string]string) - for i, storageValue := range trace.Storage { - storage[fmt.Sprintf("%x", i)] = fmt.Sprintf("%x", storageValue) - } - formatted[index].Storage = &storage - } - } - return formatted -} - // WriteTrace writes a formatted trace to the given writer func WriteTrace(writer io.Writer, logs []StructLog) { for _, log := range logs { diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index 1d6c76bdb83..200f95d6f0f 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -16,7 +16,6 @@ import ( "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" commonold "github.com/ledgerwatch/erigon/common" - ecom "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/changeset" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/math" @@ -124,23 +123,11 @@ func executeBlock( var receipts types.Receipts var stateSyncReceipt *types.ReceiptForStorage - var execRs *core.EphemeralExecResult - _, isPoSa := cfg.engine.(consensus.PoSA) - getHashFn := core.GetHashFn(block.Header(), getHeader) + _, isPoSa := effectiveEngine.(consensus.PoSA) if isPoSa { - getTracer := func(txIndex int, txHash ecom.Hash) (vm.Tracer, error) { - return vm.NewStructLogger(&vm.LogConfig{}), nil - } - execRs, err = core.ExecuteBlockEphemerallyForBSC(cfg.chainConfig, &vmConfig, getHashFn, cfg.engine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, contractHasTEVM, false, getTracer) - receipts = execRs.Receipts - stateSyncReceipt = execRs.ReceiptForStorage + receipts, err = core.ExecuteBlockEphemerallyForBSC(cfg.chainConfig, &vmConfig, getHeader, effectiveEngine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, contractHasTEVM) } else { - getTracer := func(txIndex int, txHash ecom.Hash) (vm.Tracer, error) { - return vm.NewStructLogger(&vm.LogConfig{}), nil - } - execRs, err = core.ExecuteBlockEphemerally(cfg.chainConfig, &vmConfig, getHashFn, cfg.engine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, contractHasTEVM, false, getTracer) - receipts = execRs.Receipts - stateSyncReceipt = execRs.ReceiptForStorage + receipts, stateSyncReceipt, err = core.ExecuteBlockEphemerally(cfg.chainConfig, &vmConfig, getHeader, effectiveEngine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, contractHasTEVM) } if err != nil { return err diff --git a/eth/stagedsync/stage_mining_exec.go b/eth/stagedsync/stage_mining_exec.go index a1742548088..3afa9922767 100644 --- a/eth/stagedsync/stage_mining_exec.go +++ b/eth/stagedsync/stage_mining_exec.go @@ -180,7 +180,7 @@ func addTransactionsToMiningBlock(logPrefix string, current *MiningBlock, chainC var miningCommitTx = func(txn types.Transaction, coinbase common.Address, vmConfig *vm.Config, chainConfig params.ChainConfig, ibs *state.IntraBlockState, current *MiningBlock) ([]*types.Log, error) { snap := ibs.Snapshot() - receipt, _, err := core.ApplyTransaction(&chainConfig, core.GetHashFn(header, getHeader), engine, &coinbase, gasPool, ibs, noop, header, txn, &header.GasUsed, *vmConfig, contractHasTEVM) + receipt, _, err := core.ApplyTransaction(&chainConfig, getHeader, engine, &coinbase, gasPool, ibs, noop, header, txn, &header.GasUsed, *vmConfig, contractHasTEVM) if err != nil { ibs.RevertToSnapshot(snap) return nil, err diff --git a/go.mod b/go.mod index d2360950a28..cd52eb705b8 100644 --- a/go.mod +++ b/go.mod @@ -19,8 +19,7 @@ require ( github.com/edsrzf/mmap-go v1.1.0 github.com/emicklei/dot v0.16.0 github.com/emirpasic/gods v1.18.1 - github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c - github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect + github.com/fjl/gencodec v0.0.0-20191126094850-e283372f291f github.com/goccy/go-json v0.9.7 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.4.1 @@ -93,13 +92,13 @@ require ( github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect - github.com/docker/docker v20.10.17+incompatible github.com/dustin/go-humanize v1.0.0 // indirect github.com/flanglet/kanzi-go v1.9.1-0.20211212184056-72dda96261ee // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 // indirect github.com/go-kit/kit v0.10.0 // indirect github.com/go-logfmt/logfmt v0.5.0 // indirect + github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/go-stack/stack v1.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.2 // indirect @@ -162,5 +161,3 @@ require ( modernc.org/strutil v1.1.1 // indirect modernc.org/token v1.0.0 // indirect ) - -require gotest.tools/v3 v3.3.0 // indirect diff --git a/go.sum b/go.sum index 45f0c922d41..42febcd921c 100644 --- a/go.sum +++ b/go.sum @@ -165,8 +165,6 @@ github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= -github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= -github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 h1:iZOop7pqsg+56twTopWgwCGxdB5SI2yDO8Ti7eTRliQ= github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= @@ -193,8 +191,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c h1:CndMRAH4JIwxbW8KYq6Q+cGWcGHz0FjGR3QqcInWcW0= -github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY= +github.com/fjl/gencodec v0.0.0-20191126094850-e283372f291f h1:Y/gg/utVetS+WS6htAKCTDralkm/8hLIIUAtLFdbdQ8= +github.com/fjl/gencodec v0.0.0-20191126094850-e283372f291f/go.mod h1:q+7Z5oyy8cvKF3TakcuihvQvBHFTnXjB+7UP1e2Q+1o= github.com/flanglet/kanzi-go v1.9.1-0.20211212184056-72dda96261ee h1:CaVlPeoz5kJQ+cAOV+ZDdlr3J2FmKyNkGu9LY+x7cDM= github.com/flanglet/kanzi-go v1.9.1-0.20211212184056-72dda96261ee/go.mod h1:/sUSVgDcbjsisuW42GPDgaMqvJ0McZERNICnD7b1nRA= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= @@ -210,10 +208,12 @@ github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILD github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= @@ -291,6 +291,7 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -358,6 +359,7 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= @@ -381,6 +383,7 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= +github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/ledgerwatch/erigon-lib v0.0.0-20220702183834-707a89842d6b h1:jxk2V9PBN9z2FQIL2SAV3V1wq01RUPz2kgzSqaCZmJQ= @@ -443,6 +446,7 @@ github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= @@ -582,7 +586,6 @@ github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3 github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -789,12 +792,12 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191126055441-b0650ceb63d9/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -875,8 +878,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo= -gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/internal/cmdtest/test_cmd.go b/internal/cmdtest/test_cmd.go deleted file mode 100644 index b837c9c399c..00000000000 --- a/internal/cmdtest/test_cmd.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package cmdtest - -import ( - "bufio" - "bytes" - "fmt" - "io" - "os" - "os/exec" - "regexp" - "strings" - "sync" - "sync/atomic" - "syscall" - "testing" - "text/template" - "time" - - "github.com/docker/docker/pkg/reexec" -) - -func NewTestCmd(t *testing.T, data interface{}) *TestCmd { - return &TestCmd{T: t, Data: data} -} - -type TestCmd struct { - // For total convenience, all testing methods are available. - *testing.T - - Func template.FuncMap - Data interface{} - Cleanup func() - - cmd *exec.Cmd - stdout *bufio.Reader - stdin io.WriteCloser - stderr *testlogger - // Err will contain the process exit error or interrupt signal error - Err error -} - -var id int32 - -// Run exec's the current binary using name as argv[0] which will trigger the -// reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go) -func (tt *TestCmd) Run(name string, args ...string) { - id := atomic.AddInt32(&id, 1) - tt.stderr = &testlogger{t: tt.T, name: fmt.Sprintf("%d", id)} - tt.cmd = &exec.Cmd{ - Path: reexec.Self(), - Args: append([]string{name}, args...), - Stderr: tt.stderr, - } - stdout, err := tt.cmd.StdoutPipe() - if err != nil { - tt.Fatal(err) - } - tt.stdout = bufio.NewReader(stdout) - if tt.stdin, err = tt.cmd.StdinPipe(); err != nil { - tt.Fatal(err) - } - if err := tt.cmd.Start(); err != nil { - tt.Fatal(err) - } -} - -// InputLine writes the given text to the child's stdin. -// This method can also be called from an expect template, e.g.: -// -// geth.expect(`Passphrase: {{.InputLine "password"}}`) -func (tt *TestCmd) InputLine(s string) string { - io.WriteString(tt.stdin, s+"\n") - return "" -} - -func (tt *TestCmd) SetTemplateFunc(name string, fn interface{}) { - if tt.Func == nil { - tt.Func = make(map[string]interface{}) - } - tt.Func[name] = fn -} - -// Expect runs its argument as a template, then expects the -// child process to output the result of the template within 5s. -// -// If the template starts with a newline, the newline is removed -// before matching. -func (tt *TestCmd) Expect(tplsource string) { - // Generate the expected output by running the template. - tpl := template.Must(template.New("").Funcs(tt.Func).Parse(tplsource)) - wantbuf := new(bytes.Buffer) - if err := tpl.Execute(wantbuf, tt.Data); err != nil { - panic(err) - } - // Trim exactly one newline at the beginning. This makes tests look - // much nicer because all expect strings are at column 0. - want := bytes.TrimPrefix(wantbuf.Bytes(), []byte("\n")) - if err := tt.matchExactOutput(want); err != nil { - tt.Fatal(err) - } - tt.Logf("Matched stdout text:\n%s", want) -} - -// Output reads all output from stdout, and returns the data. -func (tt *TestCmd) Output() []byte { - var buf []byte - tt.withKillTimeout(func() { buf, _ = io.ReadAll(tt.stdout) }) - return buf -} - -func (tt *TestCmd) matchExactOutput(want []byte) error { - buf := make([]byte, len(want)) - n := 0 - tt.withKillTimeout(func() { n, _ = io.ReadFull(tt.stdout, buf) }) - buf = buf[:n] - if n < len(want) || !bytes.Equal(buf, want) { - // Grab any additional buffered output in case of mismatch - // because it might help with debugging. - buf = append(buf, make([]byte, tt.stdout.Buffered())...) - tt.stdout.Read(buf[n:]) - // Find the mismatch position. - for i := 0; i < n; i++ { - if want[i] != buf[i] { - return fmt.Errorf("output mismatch at ◊:\n---------------- (stdout text)\n%s◊%s\n---------------- (expected text)\n%s", - buf[:i], buf[i:n], want) - } - } - if n < len(want) { - return fmt.Errorf("not enough output, got until ◊:\n---------------- (stdout text)\n%s\n---------------- (expected text)\n%s◊%s", - buf, want[:n], want[n:]) - } - } - return nil -} - -// ExpectRegexp expects the child process to output text matching the -// given regular expression within 5s. -// -// Note that an arbitrary amount of output may be consumed by the -// regular expression. This usually means that expect cannot be used -// after ExpectRegexp. -func (tt *TestCmd) ExpectRegexp(regex string) (*regexp.Regexp, []string) { - regex = strings.TrimPrefix(regex, "\n") - var ( - re = regexp.MustCompile(regex) - rtee = &runeTee{in: tt.stdout} - matches []int - ) - tt.withKillTimeout(func() { matches = re.FindReaderSubmatchIndex(rtee) }) - output := rtee.buf.Bytes() - if matches == nil { - tt.Fatalf("Output did not match:\n---------------- (stdout text)\n%s\n---------------- (regular expression)\n%s", - output, regex) - return re, nil - } - tt.Logf("Matched stdout text:\n%s", output) - var submatches []string - for i := 0; i < len(matches); i += 2 { - submatch := string(output[matches[i]:matches[i+1]]) - submatches = append(submatches, submatch) - } - return re, submatches -} - -// ExpectExit expects the child process to exit within 5s without -// printing any additional text on stdout. -func (tt *TestCmd) ExpectExit() { - var output []byte - tt.withKillTimeout(func() { - output, _ = io.ReadAll(tt.stdout) - }) - tt.WaitExit() - if tt.Cleanup != nil { - tt.Cleanup() - } - if len(output) > 0 { - tt.Errorf("Unmatched stdout text:\n%s", output) - } -} - -func (tt *TestCmd) WaitExit() { - tt.Err = tt.cmd.Wait() -} - -func (tt *TestCmd) Interrupt() { - tt.Err = tt.cmd.Process.Signal(os.Interrupt) -} - -// ExitStatus exposes the process' OS exit code -// It will only return a valid value after the process has finished. -func (tt *TestCmd) ExitStatus() int { - if tt.Err != nil { - exitErr := tt.Err.(*exec.ExitError) - if exitErr != nil { - if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { - return status.ExitStatus() - } - } - } - return 0 -} - -// StderrText returns any stderr output written so far. -// The returned text holds all log lines after ExpectExit has -// returned. -func (tt *TestCmd) StderrText() string { - tt.stderr.mu.Lock() - defer tt.stderr.mu.Unlock() - return tt.stderr.buf.String() -} - -func (tt *TestCmd) CloseStdin() { - tt.stdin.Close() -} - -func (tt *TestCmd) Kill() { - tt.cmd.Process.Kill() - if tt.Cleanup != nil { - tt.Cleanup() - } -} - -func (tt *TestCmd) withKillTimeout(fn func()) { - timeout := time.AfterFunc(5*time.Second, func() { - tt.Log("killing the child process (timeout)") - tt.Kill() - }) - defer timeout.Stop() - fn() -} - -// testlogger logs all written lines via t.Log and also -// collects them for later inspection. -type testlogger struct { - t *testing.T - mu sync.Mutex - buf bytes.Buffer - name string -} - -func (tl *testlogger) Write(b []byte) (n int, err error) { - lines := bytes.Split(b, []byte("\n")) - for _, line := range lines { - if len(line) > 0 { - tl.t.Logf("(stderr:%v) %s", tl.name, line) - } - } - tl.mu.Lock() - tl.buf.Write(b) - tl.mu.Unlock() - return len(b), err -} - -// runeTee collects text read through it into buf. -type runeTee struct { - in interface { - io.Reader - io.ByteReader - io.RuneReader - } - buf bytes.Buffer -} - -func (rtee *runeTee) Read(b []byte) (n int, err error) { - n, err = rtee.in.Read(b) - rtee.buf.Write(b[:n]) - return n, err -} - -func (rtee *runeTee) ReadRune() (r rune, size int, err error) { - r, size, err = rtee.in.ReadRune() - if err == nil { - rtee.buf.WriteRune(r) - } - return r, size, err -} - -func (rtee *runeTee) ReadByte() (b byte, err error) { - b, err = rtee.in.ReadByte() - if err == nil { - rtee.buf.WriteByte(b) - } - return b, err -} diff --git a/tests/state_test_util.go b/tests/state_test_util.go index 51e3a94079c..4989ab2af97 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -190,8 +190,7 @@ func (t *StateTest) RunNoVerify(rules *params.Rules, tx kv.RwTx, subtest StateSu // Prepare the EVM. txContext := core.NewEVMTxContext(msg) contractHasTEVM := func(common.Hash) (bool, error) { return false, nil } - header := block.Header() - context := core.NewEVMBlockContext(header, core.GetHashFn(header, nil), nil, &t.json.Env.Coinbase, contractHasTEVM) + context := core.NewEVMBlockContext(block.Header(), nil, nil, &t.json.Env.Coinbase, contractHasTEVM) context.GetHash = vmTestBlockHash if baseFee != nil { context.BaseFee = new(uint256.Int) diff --git a/turbo/transactions/tracing.go b/turbo/transactions/tracing.go index d0d569c93fc..3c3a2b3b557 100644 --- a/turbo/transactions/tracing.go +++ b/turbo/transactions/tracing.go @@ -42,8 +42,7 @@ func ComputeTxEnv(ctx context.Context, block *types.Block, cfg *params.ChainConf // Recompute transactions up to the target index. signer := types.MakeSigner(cfg, block.NumberU64()) - header := block.Header() - BlockContext := core.NewEVMBlockContext(header, core.GetHashFn(header, getHeader), engine, nil, contractHasTEVM) + BlockContext := core.NewEVMBlockContext(block.Header(), getHeader, engine, nil, contractHasTEVM) vmenv := vm.NewEVM(BlockContext, vm.TxContext{}, statedb, cfg, vm.Config{}) rules := vmenv.ChainRules() for idx, tx := range block.Transactions() { From 6e31b56d656b3b6831e0084ef74656cf0a513fa9 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sun, 3 Jul 2022 13:29:10 +0600 Subject: [PATCH 022/152] grafana version 9 (#4613) --- docker-compose.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index f1f303473c5..ff5d5ad67fa 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -66,7 +66,7 @@ services: prometheus: - image: prom/prometheus:v2.36.0 + image: prom/prometheus:v2.36.2 user: ${DOCKER_UID}:${DOCKER_GID} # Uses erigon user from Dockerfile command: --log.level=warn --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=150d --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles ports: [ "9090:9090" ] @@ -76,7 +76,7 @@ services: restart: unless-stopped grafana: - image: grafana/grafana:8.5.4 + image: grafana/grafana:9.0.2 user: ${DOCKER_UID}:${DOCKER_GID} # Uses erigon user from Dockerfile ports: [ "3000:3000" ] volumes: From e90e03ae31606e64d407547fddc8fc5551da649a Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Sun, 3 Jul 2022 11:58:57 +0200 Subject: [PATCH 023/152] Proper Pos block checker when INVALID/ACCEPTED status is sent (#4604) * added proper PoS block checker * proper invalid lvh * p * fixed smol thingy * fix more * fixed engine API * fixed engine API * better nil hash * added 0x0 checks * full support --- cmd/rpcdaemon/commands/engine_api.go | 3 ++ core/rawdb/accessors_chain.go | 14 ++++++++ eth/stagedsync/stage_headers.go | 38 +++++++++++++++++---- ethdb/privateapi/ethbackend.go | 4 +-- turbo/stages/headerdownload/header_algos.go | 12 ++++--- turbo/stages/stageloop.go | 9 +++++ 6 files changed, 67 insertions(+), 13 deletions(-) diff --git a/cmd/rpcdaemon/commands/engine_api.go b/cmd/rpcdaemon/commands/engine_api.go index 1d7984fa91a..2fd1621a72a 100644 --- a/cmd/rpcdaemon/commands/engine_api.go +++ b/cmd/rpcdaemon/commands/engine_api.go @@ -76,6 +76,9 @@ func convertPayloadStatus(x *remote.EnginePayloadStatus) map[string]interface{} json := map[string]interface{}{ "status": x.Status.String(), } + if x.Status == remote.EngineStatus_INVALID || x.Status == remote.EngineStatus_ACCEPTED { + json["latestValidHash"] = common.Hash{} + } if x.LatestValidHash != nil { json["latestValidHash"] = common.Hash(gointerfaces.ConvertH256ToHash(x.LatestValidHash)) } diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 5393535b61f..79d7da31804 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -1603,3 +1603,17 @@ func Transitioned(db kv.Getter, blockNum uint64, terminalTotalDifficulty *big.In return headerTd.Cmp(terminalTotalDifficulty) >= 0, nil } + +// IsPosBlock returns true if the block is a PoS block, aka. all blocks with null difficulty. +func IsPosBlock(db kv.Getter, blockHash common.Hash, blockNum uint64) (trans bool, err error) { + if blockNum == 0 { + return false, nil + } + + header := ReadHeader(db, blockHash, blockNum) + if header == nil { + return false, nil + } + + return header.Difficulty.Cmp(common.Big0) == 0, nil +} diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index fd0c70e8bd5..fe2e3d628f2 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -500,6 +500,11 @@ func handleNewPayload( }, nil } + isAncestorPos, err := rawdb.IsPosBlock(tx, header.ParentHash, headerNumber-1) + if err != nil { + return nil, err + } + parent, err := cfg.blockReader.HeaderByHash(ctx, tx, header.ParentHash) if err != nil { return nil, err @@ -514,9 +519,14 @@ func handleNewPayload( } if header.Number.Uint64() != parent.Number.Uint64()+1 { + latestValidHash := common.Hash{} + if isAncestorPos { + latestValidHash = header.ParentHash + } + cfg.hd.ReportBadHeaderPoS(headerHash, latestValidHash) return &privateapi.PayloadStatus{ Status: remote.EngineStatus_INVALID, - LatestValidHash: header.ParentHash, + LatestValidHash: latestValidHash, ValidationError: errors.New("invalid block number"), }, nil } @@ -525,10 +535,14 @@ func handleNewPayload( for _, tx := range payloadMessage.Body.Transactions { if types.TypedTransactionMarshalledAsRlpString(tx) { - cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) + latestValidHash := common.Hash{} + if isAncestorPos { + latestValidHash = header.ParentHash + } + cfg.hd.ReportBadHeaderPoS(headerHash, latestValidHash) return &privateapi.PayloadStatus{ Status: remote.EngineStatus_INVALID, - LatestValidHash: header.ParentHash, + LatestValidHash: latestValidHash, ValidationError: errors.New("typed txn marshalled as RLP string"), }, nil } @@ -537,7 +551,11 @@ func handleNewPayload( transactions, err := types.DecodeTransactions(payloadMessage.Body.Transactions) if err != nil { log.Warn("Error during Beacon transaction decoding", "err", err.Error()) - cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) + latestValidHash := common.Hash{} + if isAncestorPos { + latestValidHash = header.ParentHash + } + cfg.hd.ReportBadHeaderPoS(headerHash, latestValidHash) return &privateapi.PayloadStatus{ Status: remote.EngineStatus_INVALID, LatestValidHash: header.ParentHash, @@ -574,7 +592,15 @@ func verifyAndSaveNewPoSHeader( if verificationErr := cfg.hd.VerifyHeader(header); verificationErr != nil { log.Warn("Verification failed for header", "hash", headerHash, "height", headerNumber, "err", verificationErr) - cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) + isAncestorPos, err := rawdb.IsPosBlock(tx, header.ParentHash, headerNumber-1) + if err != nil { + return nil, false, err + } + latestValidHash := common.Hash{} + if isAncestorPos { + latestValidHash = header.ParentHash + } + cfg.hd.ReportBadHeaderPoS(headerHash, latestValidHash) return &privateapi.PayloadStatus{ Status: remote.EngineStatus_INVALID, LatestValidHash: header.ParentHash, @@ -616,7 +642,7 @@ func verifyAndSaveNewPoSHeader( } if cfg.memoryOverlay && (cfg.hd.GetNextForkHash() == (common.Hash{}) || header.ParentHash == cfg.hd.GetNextForkHash()) { - status, latestValidHash, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, cfg.chainConfig.TerminalTotalDifficulty, true, cfg.execPayload) + status, latestValidHash, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, true, cfg.execPayload) if criticalError != nil { return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError } diff --git a/ethdb/privateapi/ethbackend.go b/ethdb/privateapi/ethbackend.go index 079f3be97bc..5e43af9da9a 100644 --- a/ethdb/privateapi/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -246,9 +246,7 @@ func (s *EthBackendServer) Block(ctx context.Context, req *remote.BlockRequest) func convertPayloadStatus(payloadStatus *PayloadStatus) *remote.EnginePayloadStatus { reply := remote.EnginePayloadStatus{Status: payloadStatus.Status} - if payloadStatus.LatestValidHash != (common.Hash{}) { - reply.LatestValidHash = gointerfaces.ConvertHashToH256(payloadStatus.LatestValidHash) - } + reply.LatestValidHash = gointerfaces.ConvertHashToH256(payloadStatus.LatestValidHash) if payloadStatus.ValidationError != nil { reply.ValidationError = payloadStatus.ValidationError.Error() } diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index c0ed7df24c5..9c36365bc43 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -1117,10 +1117,13 @@ func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body return } - isAncestorPosBlock, criticalError := rawdb.Transitioned(tx, header.Number.Uint64()-1, terminalTotalDifficulty) + // If the previous block is the transition block, then the latest valid hash is the 0x0 hash, in case we return INVALID + isAncestorPosBlock, criticalError := rawdb.IsPosBlock(tx, header.ParentHash, header.Number.Uint64()-1) if criticalError != nil { return } + _, isAncestorSideFork := hd.sideForksBlock[header.ParentHash] + if store { // If it is a continuation of the canonical chain we can stack it up. if hd.nextForkState == nil { @@ -1133,7 +1136,7 @@ func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body validationError = execPayload(hd.nextForkState, header, body, 0, nil, nil) if validationError != nil { status = remote.EngineStatus_INVALID - if isAncestorPosBlock { + if isAncestorPosBlock || isAncestorSideFork { latestValidHash = header.ParentHash } return @@ -1183,13 +1186,14 @@ func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body batch := memdb.NewMemoryBatch(tx) defer batch.Close() validationError = execPayload(batch, header, body, unwindPoint, headersChain, bodiesChain) - latestValidHash = header.Hash() if validationError != nil { - if isAncestorPosBlock { + if isAncestorPosBlock || isAncestorSideFork { latestValidHash = header.ParentHash } status = remote.EngineStatus_INVALID + return } + latestValidHash = header.Hash() // After the we finished executing, we clean up old forks hd.cleanupOutdateSideForks(*currentHeight, maxDepth) return diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index a93342e2dd2..b3416c1eded 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -188,7 +188,16 @@ func StageLoopStep( return headBlockHash, err } headBlockHash = rawdb.ReadHeadBlockHash(rotx) + headBlockNumber := rawdb.ReadCurrentBlockNumber(rotx) + isAncestorPosBlock, err := rawdb.IsPosBlock(rotx, headBlockHash, *headBlockNumber) + if err != nil { + return headBlockHash, err + } + + if !isAncestorPosBlock { + headBlockHash = common.Hash{} + } if canRunCycleInOneTransaction && snapshotMigratorFinal != nil { err = snapshotMigratorFinal(rotx) if err != nil { From 1c5ec22d09f39bbca34ddcf6cb20aa5cf737544e Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Sun, 3 Jul 2022 12:37:31 +0200 Subject: [PATCH 024/152] fixed compilation (#4614) --- eth/stagedsync/stage_headers.go | 2 +- turbo/stages/headerdownload/header_algos.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index fe2e3d628f2..3492173ed53 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -625,7 +625,7 @@ func verifyAndSaveNewPoSHeader( // Side chain or something weird // TODO(yperbasis): considered non-canonical because some missing headers were downloaded but not canonized // Or it's not a problem because forkChoice is updated frequently? - status, latestValidHash, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, cfg.chainConfig.TerminalTotalDifficulty, false, cfg.execPayload) + status, latestValidHash, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, false, cfg.execPayload) if criticalError != nil { return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError } diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 9c36365bc43..79944f9b60e 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -1106,7 +1106,7 @@ func (hd *HeaderDownload) StorePayloadFork(tx kv.RwTx, header *types.Header, bod return nil } -func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, terminalTotalDifficulty *big.Int, store bool, execPayload func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody) error) (status remote.EngineStatus, latestValidHash common.Hash, validationError error, criticalError error) { +func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, store bool, execPayload func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody) error) (status remote.EngineStatus, latestValidHash common.Hash, validationError error, criticalError error) { hd.lock.Lock() defer hd.lock.Unlock() maxDepth := uint64(16) From fd2886b9271c620418f0102052061df601161a70 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Sun, 3 Jul 2022 12:54:27 +0200 Subject: [PATCH 025/152] Revert "fixed compilation (#4614)" (#4615) This reverts commit 1c5ec22d09f39bbca34ddcf6cb20aa5cf737544e. --- eth/stagedsync/stage_headers.go | 2 +- turbo/stages/headerdownload/header_algos.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 3492173ed53..fe2e3d628f2 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -625,7 +625,7 @@ func verifyAndSaveNewPoSHeader( // Side chain or something weird // TODO(yperbasis): considered non-canonical because some missing headers were downloaded but not canonized // Or it's not a problem because forkChoice is updated frequently? - status, latestValidHash, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, false, cfg.execPayload) + status, latestValidHash, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, cfg.chainConfig.TerminalTotalDifficulty, false, cfg.execPayload) if criticalError != nil { return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError } diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 79944f9b60e..9c36365bc43 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -1106,7 +1106,7 @@ func (hd *HeaderDownload) StorePayloadFork(tx kv.RwTx, header *types.Header, bod return nil } -func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, store bool, execPayload func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody) error) (status remote.EngineStatus, latestValidHash common.Hash, validationError error, criticalError error) { +func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, terminalTotalDifficulty *big.Int, store bool, execPayload func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody) error) (status remote.EngineStatus, latestValidHash common.Hash, validationError error, criticalError error) { hd.lock.Lock() defer hd.lock.Unlock() maxDepth := uint64(16) From 3fc51f5ef76399b08a9aae59c98936e5383d0745 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Sun, 3 Jul 2022 13:11:16 +0200 Subject: [PATCH 026/152] Revert "Proper Pos block checker when INVALID/ACCEPTED status is sent (#4604)" (#4616) This reverts commit e90e03ae31606e64d407547fddc8fc5551da649a. --- cmd/rpcdaemon/commands/engine_api.go | 3 -- core/rawdb/accessors_chain.go | 14 -------- eth/stagedsync/stage_headers.go | 38 ++++----------------- ethdb/privateapi/ethbackend.go | 4 ++- turbo/stages/headerdownload/header_algos.go | 12 +++---- turbo/stages/stageloop.go | 9 ----- 6 files changed, 13 insertions(+), 67 deletions(-) diff --git a/cmd/rpcdaemon/commands/engine_api.go b/cmd/rpcdaemon/commands/engine_api.go index 2fd1621a72a..1d7984fa91a 100644 --- a/cmd/rpcdaemon/commands/engine_api.go +++ b/cmd/rpcdaemon/commands/engine_api.go @@ -76,9 +76,6 @@ func convertPayloadStatus(x *remote.EnginePayloadStatus) map[string]interface{} json := map[string]interface{}{ "status": x.Status.String(), } - if x.Status == remote.EngineStatus_INVALID || x.Status == remote.EngineStatus_ACCEPTED { - json["latestValidHash"] = common.Hash{} - } if x.LatestValidHash != nil { json["latestValidHash"] = common.Hash(gointerfaces.ConvertH256ToHash(x.LatestValidHash)) } diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 79d7da31804..5393535b61f 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -1603,17 +1603,3 @@ func Transitioned(db kv.Getter, blockNum uint64, terminalTotalDifficulty *big.In return headerTd.Cmp(terminalTotalDifficulty) >= 0, nil } - -// IsPosBlock returns true if the block is a PoS block, aka. all blocks with null difficulty. -func IsPosBlock(db kv.Getter, blockHash common.Hash, blockNum uint64) (trans bool, err error) { - if blockNum == 0 { - return false, nil - } - - header := ReadHeader(db, blockHash, blockNum) - if header == nil { - return false, nil - } - - return header.Difficulty.Cmp(common.Big0) == 0, nil -} diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index fe2e3d628f2..fd0c70e8bd5 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -500,11 +500,6 @@ func handleNewPayload( }, nil } - isAncestorPos, err := rawdb.IsPosBlock(tx, header.ParentHash, headerNumber-1) - if err != nil { - return nil, err - } - parent, err := cfg.blockReader.HeaderByHash(ctx, tx, header.ParentHash) if err != nil { return nil, err @@ -519,14 +514,9 @@ func handleNewPayload( } if header.Number.Uint64() != parent.Number.Uint64()+1 { - latestValidHash := common.Hash{} - if isAncestorPos { - latestValidHash = header.ParentHash - } - cfg.hd.ReportBadHeaderPoS(headerHash, latestValidHash) return &privateapi.PayloadStatus{ Status: remote.EngineStatus_INVALID, - LatestValidHash: latestValidHash, + LatestValidHash: header.ParentHash, ValidationError: errors.New("invalid block number"), }, nil } @@ -535,14 +525,10 @@ func handleNewPayload( for _, tx := range payloadMessage.Body.Transactions { if types.TypedTransactionMarshalledAsRlpString(tx) { - latestValidHash := common.Hash{} - if isAncestorPos { - latestValidHash = header.ParentHash - } - cfg.hd.ReportBadHeaderPoS(headerHash, latestValidHash) + cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) return &privateapi.PayloadStatus{ Status: remote.EngineStatus_INVALID, - LatestValidHash: latestValidHash, + LatestValidHash: header.ParentHash, ValidationError: errors.New("typed txn marshalled as RLP string"), }, nil } @@ -551,11 +537,7 @@ func handleNewPayload( transactions, err := types.DecodeTransactions(payloadMessage.Body.Transactions) if err != nil { log.Warn("Error during Beacon transaction decoding", "err", err.Error()) - latestValidHash := common.Hash{} - if isAncestorPos { - latestValidHash = header.ParentHash - } - cfg.hd.ReportBadHeaderPoS(headerHash, latestValidHash) + cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) return &privateapi.PayloadStatus{ Status: remote.EngineStatus_INVALID, LatestValidHash: header.ParentHash, @@ -592,15 +574,7 @@ func verifyAndSaveNewPoSHeader( if verificationErr := cfg.hd.VerifyHeader(header); verificationErr != nil { log.Warn("Verification failed for header", "hash", headerHash, "height", headerNumber, "err", verificationErr) - isAncestorPos, err := rawdb.IsPosBlock(tx, header.ParentHash, headerNumber-1) - if err != nil { - return nil, false, err - } - latestValidHash := common.Hash{} - if isAncestorPos { - latestValidHash = header.ParentHash - } - cfg.hd.ReportBadHeaderPoS(headerHash, latestValidHash) + cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) return &privateapi.PayloadStatus{ Status: remote.EngineStatus_INVALID, LatestValidHash: header.ParentHash, @@ -642,7 +616,7 @@ func verifyAndSaveNewPoSHeader( } if cfg.memoryOverlay && (cfg.hd.GetNextForkHash() == (common.Hash{}) || header.ParentHash == cfg.hd.GetNextForkHash()) { - status, latestValidHash, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, true, cfg.execPayload) + status, latestValidHash, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, cfg.chainConfig.TerminalTotalDifficulty, true, cfg.execPayload) if criticalError != nil { return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError } diff --git a/ethdb/privateapi/ethbackend.go b/ethdb/privateapi/ethbackend.go index 5e43af9da9a..079f3be97bc 100644 --- a/ethdb/privateapi/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -246,7 +246,9 @@ func (s *EthBackendServer) Block(ctx context.Context, req *remote.BlockRequest) func convertPayloadStatus(payloadStatus *PayloadStatus) *remote.EnginePayloadStatus { reply := remote.EnginePayloadStatus{Status: payloadStatus.Status} - reply.LatestValidHash = gointerfaces.ConvertHashToH256(payloadStatus.LatestValidHash) + if payloadStatus.LatestValidHash != (common.Hash{}) { + reply.LatestValidHash = gointerfaces.ConvertHashToH256(payloadStatus.LatestValidHash) + } if payloadStatus.ValidationError != nil { reply.ValidationError = payloadStatus.ValidationError.Error() } diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 9c36365bc43..c0ed7df24c5 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -1117,13 +1117,10 @@ func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body return } - // If the previous block is the transition block, then the latest valid hash is the 0x0 hash, in case we return INVALID - isAncestorPosBlock, criticalError := rawdb.IsPosBlock(tx, header.ParentHash, header.Number.Uint64()-1) + isAncestorPosBlock, criticalError := rawdb.Transitioned(tx, header.Number.Uint64()-1, terminalTotalDifficulty) if criticalError != nil { return } - _, isAncestorSideFork := hd.sideForksBlock[header.ParentHash] - if store { // If it is a continuation of the canonical chain we can stack it up. if hd.nextForkState == nil { @@ -1136,7 +1133,7 @@ func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body validationError = execPayload(hd.nextForkState, header, body, 0, nil, nil) if validationError != nil { status = remote.EngineStatus_INVALID - if isAncestorPosBlock || isAncestorSideFork { + if isAncestorPosBlock { latestValidHash = header.ParentHash } return @@ -1186,14 +1183,13 @@ func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body batch := memdb.NewMemoryBatch(tx) defer batch.Close() validationError = execPayload(batch, header, body, unwindPoint, headersChain, bodiesChain) + latestValidHash = header.Hash() if validationError != nil { - if isAncestorPosBlock || isAncestorSideFork { + if isAncestorPosBlock { latestValidHash = header.ParentHash } status = remote.EngineStatus_INVALID - return } - latestValidHash = header.Hash() // After the we finished executing, we clean up old forks hd.cleanupOutdateSideForks(*currentHeight, maxDepth) return diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index b3416c1eded..a93342e2dd2 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -188,16 +188,7 @@ func StageLoopStep( return headBlockHash, err } headBlockHash = rawdb.ReadHeadBlockHash(rotx) - headBlockNumber := rawdb.ReadCurrentBlockNumber(rotx) - isAncestorPosBlock, err := rawdb.IsPosBlock(rotx, headBlockHash, *headBlockNumber) - if err != nil { - return headBlockHash, err - } - - if !isAncestorPosBlock { - headBlockHash = common.Hash{} - } if canRunCycleInOneTransaction && snapshotMigratorFinal != nil { err = snapshotMigratorFinal(rotx) if err != nil { From c422b8c4da2cc558c7c6ce50ec8f25988760c3cb Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Sun, 3 Jul 2022 13:34:35 +0200 Subject: [PATCH 027/152] better ancestor check (#4617) --- cmd/rpcdaemon/commands/engine_api.go | 22 ++++++++++++++++++++-- core/rawdb/accessors_chain.go | 10 ++++++++++ 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/cmd/rpcdaemon/commands/engine_api.go b/cmd/rpcdaemon/commands/engine_api.go index 1d7984fa91a..5fb9c9eb059 100644 --- a/cmd/rpcdaemon/commands/engine_api.go +++ b/cmd/rpcdaemon/commands/engine_api.go @@ -13,6 +13,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/log/v3" @@ -127,6 +128,13 @@ func (e *EngineImpl) ForkchoiceUpdatedV1(ctx context.Context, forkChoiceState *F func (e *EngineImpl) NewPayloadV1(ctx context.Context, payload *ExecutionPayload) (map[string]interface{}, error) { log.Trace("Received NewPayload", "height", uint64(payload.BlockNumber), "hash", payload.BlockHash) + tx, err := e.db.BeginRo(ctx) + if err != nil { + return nil, err + } + + defer tx.Rollback() + var baseFee *uint256.Int if payload.BaseFeePerGas != nil { var overflow bool @@ -162,8 +170,18 @@ func (e *EngineImpl) NewPayloadV1(ctx context.Context, payload *ExecutionPayload log.Warn("NewPayload", "err", err) return nil, err } - - return convertPayloadStatus(res), nil + payloadStatus := convertPayloadStatus(res) + if payloadStatus["latestValidHash"] != nil { + latestValidHash := payloadStatus["latestValidHash"].(common.Hash) + isValidHashPos, err := rawdb.IsPosBlock(tx, latestValidHash) + if err != nil { + return nil, err + } + if !isValidHashPos { + payloadStatus["latestValidHash"] = common.Hash{} + } + } + return payloadStatus, nil } func (e *EngineImpl) GetPayloadV1(ctx context.Context, payloadID hexutil.Bytes) (*ExecutionPayload, error) { diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 5393535b61f..1111f926786 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -1603,3 +1603,13 @@ func Transitioned(db kv.Getter, blockNum uint64, terminalTotalDifficulty *big.In return headerTd.Cmp(terminalTotalDifficulty) >= 0, nil } + +// Transitioned returns true if the block number comes after POS transition or is the last POW block +func IsPosBlock(db kv.Getter, blockHash common.Hash) (trans bool, err error) { + header, err := ReadHeaderByHash(db, blockHash) + if err != nil { + return false, err + } + + return header.Difficulty.Cmp(common.Big0) == 0, nil +} From b98028078521cc5335e4fb15641058f44c9e5973 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Sun, 3 Jul 2022 17:36:39 +0200 Subject: [PATCH 028/152] Fixed hive test on invalid transition payload (#4618) * experiment #1 * experiment #2 * experiment #3 * experiment 4 --- cmd/rpcdaemon/commands/engine_api.go | 32 +++++++++++++++------ turbo/stages/headerdownload/header_algos.go | 3 ++ 2 files changed, 27 insertions(+), 8 deletions(-) diff --git a/cmd/rpcdaemon/commands/engine_api.go b/cmd/rpcdaemon/commands/engine_api.go index 5fb9c9eb059..e6f6e149525 100644 --- a/cmd/rpcdaemon/commands/engine_api.go +++ b/cmd/rpcdaemon/commands/engine_api.go @@ -111,8 +111,25 @@ func (e *EngineImpl) ForkchoiceUpdatedV1(ctx context.Context, forkChoiceState *F return nil, err } + payloadStatus := convertPayloadStatus(reply.PayloadStatus) + if reply.PayloadStatus.Status == remote.EngineStatus_INVALID && payloadStatus["latestValidHash"] != nil { + tx, err := e.db.BeginRo(ctx) + if err != nil { + return nil, err + } + + defer tx.Rollback() + latestValidHash := payloadStatus["latestValidHash"].(common.Hash) + isValidHashPos, err := rawdb.IsPosBlock(tx, latestValidHash) + if err != nil { + return nil, err + } + if !isValidHashPos { + payloadStatus["latestValidHash"] = common.Hash{} + } + } json := map[string]interface{}{ - "payloadStatus": convertPayloadStatus(reply.PayloadStatus), + "payloadStatus": payloadStatus, } if reply.PayloadId != 0 { encodedPayloadId := make([]byte, 8) @@ -128,13 +145,6 @@ func (e *EngineImpl) ForkchoiceUpdatedV1(ctx context.Context, forkChoiceState *F func (e *EngineImpl) NewPayloadV1(ctx context.Context, payload *ExecutionPayload) (map[string]interface{}, error) { log.Trace("Received NewPayload", "height", uint64(payload.BlockNumber), "hash", payload.BlockHash) - tx, err := e.db.BeginRo(ctx) - if err != nil { - return nil, err - } - - defer tx.Rollback() - var baseFee *uint256.Int if payload.BaseFeePerGas != nil { var overflow bool @@ -172,6 +182,12 @@ func (e *EngineImpl) NewPayloadV1(ctx context.Context, payload *ExecutionPayload } payloadStatus := convertPayloadStatus(res) if payloadStatus["latestValidHash"] != nil { + tx, err := e.db.BeginRo(ctx) + if err != nil { + return nil, err + } + + defer tx.Rollback() latestValidHash := payloadStatus["latestValidHash"].(common.Hash) isValidHashPos, err := rawdb.IsPosBlock(tx, latestValidHash) if err != nil { diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index c0ed7df24c5..c250f5b2cc7 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -898,6 +898,9 @@ func (hi *HeaderInserter) FeedHeaderPoS(db kv.GetPut, header *types.Header, hash return fmt.Errorf("[%s] failed to WriteTd: %w", hi.logPrefix, err) } rawdb.WriteHeader(db, header) + if err = rawdb.WriteHeaderNumber(db, hash, blockHeight); err != nil { + return err + } hi.highest = blockHeight hi.highestHash = hash From 8e3c099490948ce33729c991b4bdff553420c201 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Sun, 3 Jul 2022 18:45:27 +0200 Subject: [PATCH 029/152] fix panic (#4620) --- core/rawdb/accessors_chain.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 1111f926786..256e96c7705 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -1610,6 +1610,9 @@ func IsPosBlock(db kv.Getter, blockHash common.Hash) (trans bool, err error) { if err != nil { return false, err } + if header == nil { + return false, nil + } return header.Difficulty.Cmp(common.Big0) == 0, nil } From 8ae9381f0fa8372eb25073db1d9a705dc618e6e5 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Mon, 4 Jul 2022 01:40:12 +0200 Subject: [PATCH 030/152] extra logging (#4622) --- eth/stagedsync/stage_headers.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index fd0c70e8bd5..68553f986e8 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -514,6 +514,8 @@ func handleNewPayload( } if header.Number.Uint64() != parent.Number.Uint64()+1 { + cfg.hd.BeaconRequestList.Remove(requestId) + cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) return &privateapi.PayloadStatus{ Status: remote.EngineStatus_INVALID, LatestValidHash: header.ParentHash, From 2a0fda33dd806744bc673c87913b0f0ef998784e Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 4 Jul 2022 11:17:39 +0600 Subject: [PATCH 031/152] snapshots table (#4624) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index cd52eb705b8..d0af2ecc461 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220702183834-707a89842d6b + github.com/ledgerwatch/erigon-lib v0.0.0-20220704045547-cdbb792be709 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 42febcd921c..f5138074a24 100644 --- a/go.sum +++ b/go.sum @@ -386,8 +386,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220702183834-707a89842d6b h1:jxk2V9PBN9z2FQIL2SAV3V1wq01RUPz2kgzSqaCZmJQ= -github.com/ledgerwatch/erigon-lib v0.0.0-20220702183834-707a89842d6b/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= +github.com/ledgerwatch/erigon-lib v0.0.0-20220704045547-cdbb792be709 h1:vYR135oqA1gu6XCjUi60Sis9qQE64YkXNSzo4ug+7H8= +github.com/ledgerwatch/erigon-lib v0.0.0-20220704045547-cdbb792be709/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From 00ee68e4a15b5f96c2d1ebf88dbeb52501d15d56 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Mon, 4 Jul 2022 11:28:10 +0200 Subject: [PATCH 032/152] FeedHeaderPoS already writes header number (#4626) * FeedHeaderPoS already calls WriteHeaderNumber * Moreover, WriteHeader already writes to HeaderNumber --- eth/stagedsync/stage_headers.go | 4 ---- turbo/stages/headerdownload/header_algos.go | 3 --- 2 files changed, 7 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 68553f986e8..5f2c59cb94b 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -584,10 +584,6 @@ func verifyAndSaveNewPoSHeader( }, false, nil } - if err := rawdb.WriteHeaderNumber(tx, headerHash, headerNumber); err != nil { - return nil, false, err - } - if err := headerInserter.FeedHeaderPoS(tx, header, headerHash); err != nil { return nil, false, err } diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index c250f5b2cc7..c0ed7df24c5 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -898,9 +898,6 @@ func (hi *HeaderInserter) FeedHeaderPoS(db kv.GetPut, header *types.Header, hash return fmt.Errorf("[%s] failed to WriteTd: %w", hi.logPrefix, err) } rawdb.WriteHeader(db, header) - if err = rawdb.WriteHeaderNumber(db, hash, blockHeight); err != nil { - return err - } hi.highest = blockHeight hi.highestHash = hash From 782b8b65572b0d2e5f1507b6a08a7be9b58dcb4f Mon Sep 17 00:00:00 2001 From: Temirlan Date: Mon, 4 Jul 2022 17:07:45 +0600 Subject: [PATCH 033/152] rpcdaemon, erigon: add new flags (#4623) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Темирлан Ермагамбет --- cmd/rpcdaemon/cli/config.go | 17 +++++++---- cmd/rpcdaemon/cli/httpcfg/http_cfg.go | 3 ++ cmd/utils/flags.go | 17 +++++------ turbo/cli/default_flags.go | 6 ++++ turbo/cli/flags.go | 43 +++++++++++++++++++++++++++ 5 files changed, 71 insertions(+), 15 deletions(-) diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index 5ecedf225bd..59d40050e5a 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -6,6 +6,7 @@ import ( "encoding/binary" "errors" "fmt" + "github.com/ledgerwatch/erigon/rpc/rpccfg" "net" "net/http" "os" @@ -14,10 +15,6 @@ import ( "strings" "time" - "github.com/ledgerwatch/erigon/internal/debug" - "github.com/ledgerwatch/erigon/node/nodecfg/datadir" - "github.com/ledgerwatch/erigon/rpc/rpccfg" - "github.com/ledgerwatch/erigon-lib/direct" "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil" @@ -36,8 +33,10 @@ import ( "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/common/paths" "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/internal/debug" "github.com/ledgerwatch/erigon/node" "github.com/ledgerwatch/erigon/node/nodecfg" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" @@ -94,6 +93,12 @@ func RootCommand() (*cobra.Command, *httpcfg.HttpCfg) { rootCmd.PersistentFlags().StringVar(&cfg.StarknetGRPCAddress, "starknet.grpc.address", "127.0.0.1:6066", "Starknet GRPC address") rootCmd.PersistentFlags().StringVar(&cfg.JWTSecretPath, utils.JWTSecretPath.Name, utils.JWTSecretPath.Value, "Token to ensure safe connection between CL and EL") rootCmd.PersistentFlags().BoolVar(&cfg.TraceRequests, utils.HTTPTraceFlag.Name, false, "Trace HTTP requests with INFO level") + rootCmd.PersistentFlags().DurationVar(&cfg.HTTPTimeouts.ReadTimeout, "http.timeouts.read", rpccfg.DefaultHTTPTimeouts.ReadTimeout, "Maximum duration for reading the entire request, including the body.") + rootCmd.PersistentFlags().DurationVar(&cfg.HTTPTimeouts.WriteTimeout, "http.timeouts.write", rpccfg.DefaultHTTPTimeouts.WriteTimeout, "Maximum duration before timing out writes of the response. It is reset whenever a new request's header is read") + rootCmd.PersistentFlags().DurationVar(&cfg.HTTPTimeouts.IdleTimeout, "http.timeouts.idle", rpccfg.DefaultHTTPTimeouts.IdleTimeout, "Maximum amount of time to wait for the next request when keep-alives are enabled. If http.timeouts.idle is zero, the value of http.timeouts.read is used") + rootCmd.PersistentFlags().DurationVar(&cfg.EngineTimeouts.ReadTimeout, "engine.timeouts.read", rpccfg.DefaultHTTPTimeouts.ReadTimeout, "Maximum duration for reading the entire request, including the body.") + rootCmd.PersistentFlags().DurationVar(&cfg.EngineTimeouts.WriteTimeout, "engine.timeouts.write", rpccfg.DefaultHTTPTimeouts.WriteTimeout, "Maximum duration before timing out writes of the response. It is reset whenever a new request's header is read.") + rootCmd.PersistentFlags().DurationVar(&cfg.EngineTimeouts.IdleTimeout, "engine.timeouts.idle", rpccfg.DefaultHTTPTimeouts.IdleTimeout, "Maximum amount of time to wait for the next request when keep-alives are enabled. If engine.timeouts.idle is zero, the value of engine.timeouts.read is used.") if err := rootCmd.MarkPersistentFlagFilename("rpc.accessList", "json"); err != nil { panic(err) @@ -484,7 +489,7 @@ func StartRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rpc.API) return err } - listener, _, err := node.StartHTTPEndpoint(httpEndpoint, rpccfg.DefaultHTTPTimeouts, apiHandler) + listener, _, err := node.StartHTTPEndpoint(httpEndpoint, cfg.HTTPTimeouts, apiHandler) if err != nil { return fmt.Errorf("could not start RPC api: %w", err) } @@ -637,7 +642,7 @@ func createEngineListener(cfg httpcfg.HttpCfg, engineApi []rpc.API) (*http.Serve return nil, nil, "", err } - engineListener, _, err := node.StartHTTPEndpoint(engineHttpEndpoint, rpccfg.DefaultHTTPTimeouts, engineApiHandler) + engineListener, _, err := node.StartHTTPEndpoint(engineHttpEndpoint, cfg.EngineTimeouts, engineApiHandler) if err != nil { return nil, nil, "", fmt.Errorf("could not start RPC api: %w", err) } diff --git a/cmd/rpcdaemon/cli/httpcfg/http_cfg.go b/cmd/rpcdaemon/cli/httpcfg/http_cfg.go index 42e15eb17d5..a198722f1eb 100644 --- a/cmd/rpcdaemon/cli/httpcfg/http_cfg.go +++ b/cmd/rpcdaemon/cli/httpcfg/http_cfg.go @@ -4,6 +4,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/node/nodecfg/datadir" + "github.com/ledgerwatch/erigon/rpc/rpccfg" ) type HttpCfg struct { @@ -43,4 +44,6 @@ type HttpCfg struct { StarknetGRPCAddress string JWTSecretPath string // Engine API Authentication TraceRequests bool // Always trace requests in INFO level + HTTPTimeouts rpccfg.HTTPTimeouts + EngineTimeouts rpccfg.HTTPTimeouts } diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index d4cc43990ef..f04ba285e6d 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -20,15 +20,6 @@ package utils import ( "crypto/ecdsa" "fmt" - "io" - "math/big" - "path/filepath" - "runtime" - "strconv" - "strings" - "text/tabwriter" - "text/template" - "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon-lib/txpool" @@ -39,6 +30,14 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/urfave/cli" + "io" + "math/big" + "path/filepath" + "runtime" + "strconv" + "strings" + "text/tabwriter" + "text/template" "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/params/networkname" diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index 93d61401c22..3f4aa8cc07d 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -70,6 +70,12 @@ var DefaultFlags = []cli.Flag{ utils.MemoryOverlayFlag, utils.TxpoolApiAddrFlag, utils.TraceMaxtracesFlag, + HTTPReadTimeoutFlag, + HTTPWriteTimeoutFlag, + HTTPIdleTimeoutFlag, + EngineReadTimeoutFlag, + EngineWriteTimeoutFlag, + EngineIdleTimeoutFlag, utils.SnapKeepBlocksFlag, utils.SnapStopFlag, diff --git a/turbo/cli/flags.go b/turbo/cli/flags.go index bdcbdd1983b..cd58ceea21e 100644 --- a/turbo/cli/flags.go +++ b/turbo/cli/flags.go @@ -2,6 +2,7 @@ package cli import ( "fmt" + "github.com/ledgerwatch/erigon/rpc/rpccfg" "strings" "time" @@ -150,6 +151,38 @@ var ( Name: "healthcheck", Usage: "Enable grpc health check", } + + HTTPReadTimeoutFlag = cli.DurationFlag{ + Name: "http.timeouts.read", + Usage: "Maximum duration for reading the entire request, including the body.", + Value: rpccfg.DefaultHTTPTimeouts.ReadTimeout, + } + HTTPWriteTimeoutFlag = cli.DurationFlag{ + Name: "http.timeouts.write", + Usage: "Maximum duration before timing out writes of the response. It is reset whenever a new request's header is read.", + Value: rpccfg.DefaultHTTPTimeouts.WriteTimeout, + } + HTTPIdleTimeoutFlag = cli.DurationFlag{ + Name: "http.timeouts.idle", + Usage: "Maximum amount of time to wait for the next request when keep-alives are enabled. If http.timeouts.idle is zero, the value of http.timeouts.read is used.", + Value: rpccfg.DefaultHTTPTimeouts.IdleTimeout, + } + + EngineReadTimeoutFlag = cli.DurationFlag{ + Name: "engine.timeouts.read", + Usage: "Maximum duration for reading the entire request, including the body.", + Value: rpccfg.DefaultHTTPTimeouts.ReadTimeout, + } + EngineWriteTimeoutFlag = cli.DurationFlag{ + Name: "engine.timeouts.write", + Usage: "Maximum duration before timing out writes of the response. It is reset whenever a new request's header is read.", + Value: rpccfg.DefaultHTTPTimeouts.WriteTimeout, + } + EngineIdleTimeoutFlag = cli.DurationFlag{ + Name: "engine.timeouts.idle", + Usage: "Maximum amount of time to wait for the next request when keep-alives are enabled. If engine.timeouts.idle is zero, the value of engine.timeouts.read is used.", + Value: rpccfg.DefaultHTTPTimeouts.IdleTimeout, + } ) func ApplyFlagsForEthConfig(ctx *cli.Context, cfg *ethconfig.Config) { @@ -298,6 +331,16 @@ func setEmbeddedRpcDaemon(ctx *cli.Context, cfg *nodecfg.Config) { HttpCORSDomain: strings.Split(ctx.GlobalString(utils.HTTPCORSDomainFlag.Name), ","), HttpVirtualHost: strings.Split(ctx.GlobalString(utils.HTTPVirtualHostsFlag.Name), ","), API: strings.Split(ctx.GlobalString(utils.HTTPApiFlag.Name), ","), + HTTPTimeouts: rpccfg.HTTPTimeouts{ + ReadTimeout: ctx.GlobalDuration(HTTPReadTimeoutFlag.Name), + WriteTimeout: ctx.GlobalDuration(HTTPWriteTimeoutFlag.Name), + IdleTimeout: ctx.GlobalDuration(HTTPIdleTimeoutFlag.Name), + }, + EngineTimeouts: rpccfg.HTTPTimeouts{ + ReadTimeout: ctx.GlobalDuration(EngineReadTimeoutFlag.Name), + WriteTimeout: ctx.GlobalDuration(EngineWriteTimeoutFlag.Name), + IdleTimeout: ctx.GlobalDuration(HTTPIdleTimeoutFlag.Name), + }, WebsocketEnabled: ctx.GlobalIsSet(utils.WSEnabledFlag.Name), RpcBatchConcurrency: ctx.GlobalUint(utils.RpcBatchConcurrencyFlag.Name), From 9562b38c6458ef07829ac9bbc047d2196f546283 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Mon, 4 Jul 2022 13:19:08 +0200 Subject: [PATCH 034/152] Small refactoring: extract applyOverrides func (#4628) --- core/genesis.go | 31 +++++++++++++------------------ 1 file changed, 13 insertions(+), 18 deletions(-) diff --git a/core/genesis.go b/core/genesis.go index 047fe37e56d..c298a5e6de7 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -210,6 +210,16 @@ func WriteGenesisBlock(db kv.RwTx, genesis *Genesis, overrideMergeNetsplitBlock, if storedErr != nil { return nil, nil, storedErr } + + applyOverrides := func(config *params.ChainConfig) { + if overrideMergeNetsplitBlock != nil { + config.MergeNetsplitBlock = overrideMergeNetsplitBlock + } + if overrideTerminalTotalDifficulty != nil { + config.TerminalTotalDifficulty = overrideTerminalTotalDifficulty + } + } + if (storedHash == common.Hash{}) { custom := true if genesis == nil { @@ -217,12 +227,7 @@ func WriteGenesisBlock(db kv.RwTx, genesis *Genesis, overrideMergeNetsplitBlock, genesis = DefaultGenesisBlock() custom = false } - if overrideMergeNetsplitBlock != nil { - genesis.Config.MergeNetsplitBlock = overrideMergeNetsplitBlock - } - if overrideTerminalTotalDifficulty != nil { - genesis.Config.TerminalTotalDifficulty = overrideTerminalTotalDifficulty - } + applyOverrides(genesis.Config) block, _, err1 := genesis.Write(db) if err1 != nil { return genesis.Config, nil, err1 @@ -250,12 +255,7 @@ func WriteGenesisBlock(db kv.RwTx, genesis *Genesis, overrideMergeNetsplitBlock, } // Get the existing chain configuration. newCfg := genesis.configOrDefault(storedHash) - if overrideMergeNetsplitBlock != nil { - newCfg.MergeNetsplitBlock = overrideMergeNetsplitBlock - } - if overrideTerminalTotalDifficulty != nil { - newCfg.TerminalTotalDifficulty = overrideTerminalTotalDifficulty - } + applyOverrides(newCfg) if err := newCfg.CheckConfigForkOrder(); err != nil { return newCfg, nil, err } @@ -276,12 +276,7 @@ func WriteGenesisBlock(db kv.RwTx, genesis *Genesis, overrideMergeNetsplitBlock, // In that case, only apply the overrides. if genesis == nil && params.ChainConfigByGenesisHash(storedHash) == nil { newCfg = storedCfg - if overrideMergeNetsplitBlock != nil { - newCfg.MergeNetsplitBlock = overrideMergeNetsplitBlock - } - if overrideTerminalTotalDifficulty != nil { - newCfg.TerminalTotalDifficulty = overrideTerminalTotalDifficulty - } + applyOverrides(newCfg) } // Check config compatibility and write the config. Compatibility errors // are returned to the caller unless we're already at block zero. From 99d9535fd837d0c827d107d8f73c58392eb82e7f Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 4 Jul 2022 18:43:46 +0600 Subject: [PATCH 035/152] new node to not print warning #4629 Open --- migrations/reset_blocks.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/migrations/reset_blocks.go b/migrations/reset_blocks.go index add8240bad2..16256bf3267 100644 --- a/migrations/reset_blocks.go +++ b/migrations/reset_blocks.go @@ -6,6 +6,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/rawdb/rawdbreset" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" "github.com/ledgerwatch/log/v3" @@ -42,7 +43,10 @@ var resetBlocks = Migration{ if err != nil { return err } - log.Warn("NOTE: this migration will remove recent blocks (and senders) to fix several recent bugs. Your node will re-download last ~400K blocks, should not take very long") + headersProgress, _ := stages.GetStageProgress(tx, stages.Headers) + if headersProgress > 0 { + log.Warn("NOTE: this migration will remove recent blocks (and senders) to fix several recent bugs. Your node will re-download last ~400K blocks, should not take very long") + } if err := snap.RemoveNonPreverifiedFiles(chainConfig.ChainName, dirs.Snap); err != nil { return err From ff847cd4595d03efca7a0b590629fc7fbd78a616 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 4 Jul 2022 18:44:15 +0600 Subject: [PATCH 036/152] Snapshots: save initial list to db, to avoid future snapshots downloading #4625 --- cmd/downloader/downloader/downloader.go | 7 ----- .../downloader/downloader_grpc_server.go | 7 +---- core/rawdb/accessors_chain.go | 27 +++++++++++++++++++ eth/stagedsync/stage_headers.go | 24 +++++++++++++++-- 4 files changed, 50 insertions(+), 15 deletions(-) diff --git a/cmd/downloader/downloader/downloader.go b/cmd/downloader/downloader/downloader.go index 5ea02a0fe33..ec6dd4357ec 100644 --- a/cmd/downloader/downloader/downloader.go +++ b/cmd/downloader/downloader/downloader.go @@ -109,13 +109,6 @@ func (d *Downloader) SnapDir() string { return d.cfg.DataDir } -func (d *Downloader) IsInitialSync() bool { - d.clientLock.RLock() - defer d.clientLock.RUnlock() - _, lastPart := filepath.Split(d.cfg.DataDir) - return lastPart == "tmp" -} - func (d *Downloader) ReCalcStats(interval time.Duration) { d.statsLock.Lock() defer d.statsLock.Unlock() diff --git a/cmd/downloader/downloader/downloader_grpc_server.go b/cmd/downloader/downloader/downloader_grpc_server.go index 11f1f596039..8ff4757f58c 100644 --- a/cmd/downloader/downloader/downloader_grpc_server.go +++ b/cmd/downloader/downloader/downloader_grpc_server.go @@ -28,8 +28,6 @@ type GrpcServer struct { // Download - create new .torrent ONLY if initialSync, everything else Erigon can generate by itself func (s *GrpcServer) Download(ctx context.Context, request *proto_downloader.DownloadRequest) (*emptypb.Empty, error) { - isInitialSync := s.d.IsInitialSync() - torrentClient := s.d.Torrent() mi := &metainfo.MetaInfo{AnnounceList: Trackers} for _, it := range request.Items { @@ -38,6 +36,7 @@ func (s *GrpcServer) Download(ctx context.Context, request *proto_downloader.Dow return nil, err } } + ok, err := AddSegment(it.Path, s.d.SnapDir(), torrentClient) if err != nil { return nil, fmt.Errorf("AddSegment: %w", err) @@ -46,10 +45,6 @@ func (s *GrpcServer) Download(ctx context.Context, request *proto_downloader.Dow continue } - if !isInitialSync { - continue - } - hash := Proto2InfoHash(it.TorrentHash) if _, ok := torrentClient.Torrent(hash); ok { continue diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index 256e96c7705..e2100d45e77 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -1616,3 +1616,30 @@ func IsPosBlock(db kv.Getter, blockHash common.Hash) (trans bool, err error) { return header.Difficulty.Cmp(common.Big0) == 0, nil } + +func ReadSnapshots(tx kv.Tx) (map[string]string, error) { + res := map[string]string{} + if err := tx.ForEach(kv.Snapshots, nil, func(k, v []byte) error { + res[string(k)] = string(v) + return nil + }); err != nil { + return nil, err + } + return res, nil +} + +func WriteSnapshots(tx kv.RwTx, list map[string]string) error { + for k, v := range list { + has, err := tx.Has(kv.Snapshots, []byte(k)) + if err != nil { + return err + } + if has { + continue + } + if err = tx.Put(kv.Snapshots, []byte(k), []byte(v)); err != nil { + return err + } + } + return nil +} diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 5f2c59cb94b..c80220a42b4 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -1165,7 +1165,7 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R return nil } - if err := WaitForDownloader(ctx, cfg); err != nil { + if err := WaitForDownloader(ctx, cfg, tx); err != nil { return err } if err := cfg.snapshots.Reopen(); err != nil { @@ -1280,16 +1280,30 @@ func DownloadAndIndexSnapshotsIfNeed(s *StageState, ctx context.Context, tx kv.R // WaitForDownloader - wait for Downloader service to download all expected snapshots // for MVP we sync with Downloader only once, in future will send new snapshots also -func WaitForDownloader(ctx context.Context, cfg HeadersCfg) error { +func WaitForDownloader(ctx context.Context, cfg HeadersCfg, tx kv.RwTx) error { if cfg.snapshots.Cfg().NoDownloader { return nil } + snInDB, err := rawdb.ReadSnapshots(tx) + if err != nil { + return err + } + dbEmpty := len(snInDB) == 0 + // send all hashes to the Downloader service preverified := snapshothashes.KnownConfig(cfg.chainConfig.ChainName).Preverified req := &proto_downloader.DownloadRequest{Items: make([]*proto_downloader.DownloadItem, 0, len(preverified))} i := 0 for _, p := range preverified { + _, has := snInDB[p.Name] + if !dbEmpty && !has { + continue + } + if dbEmpty { + snInDB[p.Name] = p.Hash + } + req.Items = append(req.Items, &proto_downloader.DownloadItem{ TorrentHash: downloadergrpc.String2Proto(p.Hash), Path: p.Name, @@ -1361,5 +1375,11 @@ Finish: return err } } + + if dbEmpty { + if err = rawdb.WriteSnapshots(tx, snInDB); err != nil { + return err + } + } return nil } From d83b7a4ae02bd3a8c62322503470456ea4038d14 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Tue, 5 Jul 2022 01:31:16 +0200 Subject: [PATCH 037/152] Proper PoS Error reporting (#4631) * better reporting * removed debug log * proper error reporting --- cmd/integration/commands/stages.go | 8 ++++---- cmd/integration/commands/state_stages.go | 6 +++--- cmd/state/commands/state_recon.go | 2 +- eth/backend.go | 4 ++-- eth/stagedsync/stage_execute.go | 7 +++++++ eth/stagedsync/stage_headers.go | 2 +- eth/stagedsync/stage_interhashes.go | 10 +++++++++- eth/stagedsync/stage_interhashes_test.go | 6 +++--- eth/stagedsync/stage_senders.go | 9 ++++++++- eth/stagedsync/stage_senders_test.go | 2 +- ethdb/privateapi/ethbackend.go | 1 - turbo/stages/mock_sentry.go | 7 ++++--- turbo/stages/stageloop.go | 10 ++++++---- 13 files changed, 49 insertions(+), 25 deletions(-) diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index eb57cc871e9..45a51f3344f 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -609,7 +609,7 @@ func stageSenders(db kv.RwDB, ctx context.Context) error { return err } - cfg := stagedsync.StageSendersCfg(db, chainConfig, false, tmpdir, pm, br) + cfg := stagedsync.StageSendersCfg(db, chainConfig, false, tmpdir, pm, br, nil) if unwind > 0 { u := sync.NewUnwindState(stages.Senders, s.BlockNumber-unwind, s.BlockNumber) if err = stagedsync.UnwindSendersStage(u, tx, cfg, ctx); err != nil { @@ -665,7 +665,7 @@ func stageExec(db kv.RwDB, ctx context.Context) error { cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil, /*stateStream=*/ false, - /*badBlockHalt=*/ false, tmpdir, getBlockReader(chainConfig, db)) + /*badBlockHalt=*/ false, tmpdir, getBlockReader(chainConfig, db), nil) if unwind > 0 { u := sync.NewUnwindState(stages.Execution, s.BlockNumber-unwind, s.BlockNumber) err := stagedsync.UnwindExecutionStage(u, s, nil, ctx, cfg, false) @@ -724,7 +724,7 @@ func stageTrie(db kv.RwDB, ctx context.Context) error { log.Info("StageExec", "progress", execStage.BlockNumber) log.Info("StageTrie", "progress", s.BlockNumber) - cfg := stagedsync.StageTrieCfg(db, true, true, false, tmpdir, getBlockReader(chainConfig, db)) + cfg := stagedsync.StageTrieCfg(db, true, true, false, tmpdir, getBlockReader(chainConfig, db), nil) if unwind > 0 { u := sync.NewUnwindState(stages.IntermediateHashes, s.BlockNumber-unwind, s.BlockNumber) if err := stagedsync.UnwindIntermediateHashesStage(u, s, tx, cfg, ctx); err != nil { @@ -1209,7 +1209,7 @@ func newSync(ctx context.Context, db kv.RwDB, miningConfig *params.MiningConfig) stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, nil, nil, nil, tmpdir), stagedsync.StageMiningExecCfg(db, miner, events, *chainConfig, engine, &vm.Config{}, tmpdir, nil), stagedsync.StageHashStateCfg(db, tmpdir), - stagedsync.StageTrieCfg(db, false, true, false, tmpdir, br), + stagedsync.StageTrieCfg(db, false, true, false, tmpdir, br, nil), stagedsync.StageMiningFinishCfg(db, *chainConfig, engine, miner, ctx.Done()), ), stagedsync.MiningUnwindOrder, diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go index d620bb0ab86..1edb32fcbe7 100644 --- a/cmd/integration/commands/state_stages.go +++ b/cmd/integration/commands/state_stages.go @@ -181,7 +181,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. stateStages.DisableStages(stages.Headers, stages.BlockHashes, stages.Bodies, stages.Senders) - execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, changeSetHook, chainConfig, engine, vmConfig, nil, false, false, dirs.Tmp, getBlockReader(chainConfig, db)) + execCfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, changeSetHook, chainConfig, engine, vmConfig, nil, false, false, dirs.Tmp, getBlockReader(chainConfig, db), nil) execUntilFunc := func(execToBlock uint64) func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx) error { return func(firstCycle bool, badBlockUnwind bool, s *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx) error { @@ -426,7 +426,7 @@ func loopIh(db kv.RwDB, ctx context.Context, unwind uint64) error { } _ = sync.SetCurrentStage(stages.IntermediateHashes) u = &stagedsync.UnwindState{ID: stages.IntermediateHashes, UnwindPoint: to} - if err = stagedsync.UnwindIntermediateHashesStage(u, stage(sync, tx, nil, stages.IntermediateHashes), tx, stagedsync.StageTrieCfg(db, true, true, false, dirs.Tmp, getBlockReader(chainConfig, db)), ctx); err != nil { + if err = stagedsync.UnwindIntermediateHashesStage(u, stage(sync, tx, nil, stages.IntermediateHashes), tx, stagedsync.StageTrieCfg(db, true, true, false, dirs.Tmp, getBlockReader(chainConfig, db), nil), ctx); err != nil { return err } must(tx.Commit()) @@ -493,7 +493,7 @@ func loopExec(db kv.RwDB, ctx context.Context, unwind uint64) error { cfg := stagedsync.StageExecuteBlocksCfg(db, pm, batchSize, nil, chainConfig, engine, vmConfig, nil, /*stateStream=*/ false, - /*badBlockHalt=*/ false, dirs.Tmp, getBlockReader(chainConfig, db)) + /*badBlockHalt=*/ false, dirs.Tmp, getBlockReader(chainConfig, db), nil) // set block limit of execute stage sync.MockExecFunc(stages.Execution, func(firstCycle bool, badBlockUnwind bool, stageState *stagedsync.StageState, unwinder stagedsync.Unwinder, tx kv.RwTx) error { diff --git a/cmd/state/commands/state_recon.go b/cmd/state/commands/state_recon.go index 478ec8bd3b1..83f6f9bd156 100644 --- a/cmd/state/commands/state_recon.go +++ b/cmd/state/commands/state_recon.go @@ -765,7 +765,7 @@ func Recon(genesis *core.Genesis, logger log.Logger) error { if rwTx, err = db.BeginRw(ctx); err != nil { return err } - if _, err = stagedsync.RegenerateIntermediateHashes("recon", rwTx, stagedsync.StageTrieCfg(db, false /* checkRoot */, false /* saveHashesToDB */, false /* badBlockHalt */, tmpDir, blockReader), common.Hash{}, make(chan struct{}, 1)); err != nil { + if _, err = stagedsync.RegenerateIntermediateHashes("recon", rwTx, stagedsync.StageTrieCfg(db, false /* checkRoot */, false /* saveHashesToDB */, false /* badBlockHalt */, tmpDir, blockReader, nil), common.Hash{}, make(chan struct{}, 1)); err != nil { return err } if err = rwTx.Commit(); err != nil { diff --git a/eth/backend.go b/eth/backend.go index 1741a2760a6..68fa281e2e8 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -366,7 +366,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miner, *backend.chainConfig, backend.engine, backend.txPool2, backend.txPool2DB, nil, tmpdir), stagedsync.StageMiningExecCfg(backend.chainDB, miner, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, nil), stagedsync.StageHashStateCfg(backend.chainDB, tmpdir), - stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, blockReader), + stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, blockReader, nil), stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miner, backend.miningSealingQuit), ), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder) @@ -384,7 +384,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere stagedsync.StageMiningCreateBlockCfg(backend.chainDB, miningStatePos, *backend.chainConfig, backend.engine, backend.txPool2, backend.txPool2DB, param, tmpdir), stagedsync.StageMiningExecCfg(backend.chainDB, miningStatePos, backend.notifications.Events, *backend.chainConfig, backend.engine, &vm.Config{}, tmpdir, interrupt), stagedsync.StageHashStateCfg(backend.chainDB, tmpdir), - stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, blockReader), + stagedsync.StageTrieCfg(backend.chainDB, false, true, true, tmpdir, blockReader, nil), stagedsync.StageMiningFinishCfg(backend.chainDB, *backend.chainConfig, backend.engine, miningStatePos, backend.miningSealingQuit), ), stagedsync.MiningUnwindOrder, stagedsync.MiningPruneOrder) // We start the mining step diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index 200f95d6f0f..f40a126d865 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -34,6 +34,7 @@ import ( "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/shards" + "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/log/v3" ) @@ -60,6 +61,7 @@ type ExecuteBlockCfg struct { stateStream bool accumulator *shards.Accumulator blockReader services.FullBlockReader + hd *headerdownload.HeaderDownload } func StageExecuteBlocksCfg( @@ -75,6 +77,7 @@ func StageExecuteBlocksCfg( badBlockHalt bool, tmpdir string, blockReader services.FullBlockReader, + hd *headerdownload.HeaderDownload, ) ExecuteBlockCfg { return ExecuteBlockCfg{ db: kv, @@ -89,6 +92,7 @@ func StageExecuteBlocksCfg( stateStream: stateStream, badBlockHalt: badBlockHalt, blockReader: blockReader, + hd: hd, } } @@ -292,6 +296,9 @@ Loop: if err = executeBlock(block, tx, batch, cfg, *cfg.vmConfig, writeChangeSets, writeReceipts, writeCallTraces, contractHasTEVM, initialCycle, effectiveEngine); err != nil { if !errors.Is(err, context.Canceled) { log.Warn(fmt.Sprintf("[%s] Execution failed", logPrefix), "block", blockNum, "hash", block.Hash().String(), "err", err) + if cfg.hd != nil { + cfg.hd.ReportBadHeaderPoS(blockHash, block.ParentHash()) + } if cfg.badBlockHalt { return err } diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index c80220a42b4..6a58e3217d2 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -994,7 +994,7 @@ func HeadersUnwind(u *UnwindState, s *StageState, tx kv.RwTx, cfg HeadersCfg, te return fmt.Errorf("iterate over headers to mark bad headers: %w", err) } } - if err := rawdb.TruncateCanonicalHash(tx, u.UnwindPoint+1, badBlock /* deleteHeaders */); err != nil { + if err := rawdb.TruncateCanonicalHash(tx, u.UnwindPoint+1, false /* deleteHeaders */); err != nil { return err } if badBlock { diff --git a/eth/stagedsync/stage_interhashes.go b/eth/stagedsync/stage_interhashes.go index 57a41ea723d..5fcab1f24f9 100644 --- a/eth/stagedsync/stage_interhashes.go +++ b/eth/stagedsync/stage_interhashes.go @@ -13,9 +13,11 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/changeset" "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/erigon/turbo/trie" "github.com/ledgerwatch/log/v3" "golang.org/x/exp/slices" @@ -28,9 +30,10 @@ type TrieCfg struct { tmpDir string saveNewHashesToDB bool // no reason to save changes when calculating root for mining blockReader services.FullBlockReader + hd *headerdownload.HeaderDownload } -func StageTrieCfg(db kv.RwDB, checkRoot, saveNewHashesToDB, badBlockHalt bool, tmpDir string, blockReader services.FullBlockReader) TrieCfg { +func StageTrieCfg(db kv.RwDB, checkRoot, saveNewHashesToDB, badBlockHalt bool, tmpDir string, blockReader services.FullBlockReader, hd *headerdownload.HeaderDownload) TrieCfg { return TrieCfg{ db: db, checkRoot: checkRoot, @@ -38,6 +41,7 @@ func StageTrieCfg(db kv.RwDB, checkRoot, saveNewHashesToDB, badBlockHalt bool, t saveNewHashesToDB: saveNewHashesToDB, badBlockHalt: badBlockHalt, blockReader: blockReader, + hd: hd, } } @@ -96,6 +100,10 @@ func SpawnIntermediateHashesStage(s *StageState, u Unwinder, tx kv.RwTx, cfg Tri if cfg.badBlockHalt { return trie.EmptyRoot, fmt.Errorf("Wrong trie root") } + if cfg.hd != nil { + header := rawdb.ReadHeader(tx, headerHash, to) + cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) + } if to > s.BlockNumber { unwindTo := (to + s.BlockNumber) / 2 // Binary search for the correct block, biased to the lower numbers log.Warn("Unwinding due to incorrect root hash", "to", unwindTo) diff --git a/eth/stagedsync/stage_interhashes_test.go b/eth/stagedsync/stage_interhashes_test.go index 2766c0eaa9c..91eea3bd948 100644 --- a/eth/stagedsync/stage_interhashes_test.go +++ b/eth/stagedsync/stage_interhashes_test.go @@ -71,7 +71,7 @@ func TestAccountAndStorageTrie(t *testing.T) { // ---------------------------------------------------------------- blockReader := snapshotsync.NewBlockReader() - cfg := StageTrieCfg(nil, false, true, false, t.TempDir(), blockReader) + cfg := StageTrieCfg(nil, false, true, false, t.TempDir(), blockReader, nil) _, err := RegenerateIntermediateHashes("IH", tx, cfg, common.Hash{} /* expectedRootHash */, nil /* quit */) assert.Nil(t, err) @@ -191,7 +191,7 @@ func TestAccountTrieAroundExtensionNode(t *testing.T) { assert.Nil(t, tx.Put(kv.HashedAccounts, hash6[:], encoded)) blockReader := snapshotsync.NewBlockReader() - _, err := RegenerateIntermediateHashes("IH", tx, StageTrieCfg(nil, false, true, false, t.TempDir(), blockReader), common.Hash{} /* expectedRootHash */, nil /* quit */) + _, err := RegenerateIntermediateHashes("IH", tx, StageTrieCfg(nil, false, true, false, t.TempDir(), blockReader, nil), common.Hash{} /* expectedRootHash */, nil /* quit */) assert.Nil(t, err) accountTrie := make(map[string][]byte) @@ -253,7 +253,7 @@ func TestStorageDeletion(t *testing.T) { // ---------------------------------------------------------------- blockReader := snapshotsync.NewBlockReader() - cfg := StageTrieCfg(nil, false, true, false, t.TempDir(), blockReader) + cfg := StageTrieCfg(nil, false, true, false, t.TempDir(), blockReader, nil) _, err = RegenerateIntermediateHashes("IH", tx, cfg, common.Hash{} /* expectedRootHash */, nil /* quit */) assert.Nil(t, err) diff --git a/eth/stagedsync/stage_senders.go b/eth/stagedsync/stage_senders.go index 954bdc9faa6..ae01802da06 100644 --- a/eth/stagedsync/stage_senders.go +++ b/eth/stagedsync/stage_senders.go @@ -25,6 +25,7 @@ import ( "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapshothashes" + "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/secp256k1" ) @@ -42,9 +43,10 @@ type SendersCfg struct { chainConfig *params.ChainConfig blockRetire *snapshotsync.BlockRetire snapshotHashesCfg *snapshothashes.Config + hd *headerdownload.HeaderDownload } -func StageSendersCfg(db kv.RwDB, chainCfg *params.ChainConfig, badBlockHalt bool, tmpdir string, prune prune.Mode, br *snapshotsync.BlockRetire) SendersCfg { +func StageSendersCfg(db kv.RwDB, chainCfg *params.ChainConfig, badBlockHalt bool, tmpdir string, prune prune.Mode, br *snapshotsync.BlockRetire, hd *headerdownload.HeaderDownload) SendersCfg { const sendersBatchSize = 10000 const sendersBlockSize = 4096 @@ -61,6 +63,7 @@ func StageSendersCfg(db kv.RwDB, chainCfg *params.ChainConfig, badBlockHalt bool prune: prune, blockRetire: br, snapshotHashesCfg: snapshothashes.KnownConfig(chainCfg.ChainName), + hd: hd, } } @@ -262,6 +265,10 @@ Loop: if cfg.badBlockHalt { return minBlockErr } + minHeader := rawdb.ReadHeader(tx, minBlockHash, minBlockNum) + if cfg.hd != nil { + cfg.hd.ReportBadHeaderPoS(minBlockHash, minHeader.ParentHash) + } if to > s.BlockNumber { u.UnwindTo(minBlockNum-1, minBlockHash) } diff --git a/eth/stagedsync/stage_senders_test.go b/eth/stagedsync/stage_senders_test.go index 996beda20d9..adc825e2055 100644 --- a/eth/stagedsync/stage_senders_test.go +++ b/eth/stagedsync/stage_senders_test.go @@ -109,7 +109,7 @@ func TestSenders(t *testing.T) { require.NoError(stages.SaveStageProgress(tx, stages.Bodies, 3)) - cfg := StageSendersCfg(db, params.TestChainConfig, false, "", prune.Mode{}, snapshotsync.NewBlockRetire(1, "", nil, db, nil, nil)) + cfg := StageSendersCfg(db, params.TestChainConfig, false, "", prune.Mode{}, snapshotsync.NewBlockRetire(1, "", nil, db, nil, nil), nil) err := SpawnRecoverSendersStage(cfg, &StageState{ID: stages.Senders}, nil, tx, 3, ctx) assert.NoError(t, err) diff --git a/ethdb/privateapi/ethbackend.go b/ethdb/privateapi/ethbackend.go index 079f3be97bc..5925ad29009 100644 --- a/ethdb/privateapi/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -307,7 +307,6 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E } blockHash := gointerfaces.ConvertH256ToHash(req.BlockHash) - tx, err := s.db.BeginRo(ctx) if err != nil { return nil, err diff --git a/turbo/stages/mock_sentry.go b/turbo/stages/mock_sentry.go index 83789c41db5..b9fd0f8e133 100644 --- a/turbo/stages/mock_sentry.go +++ b/turbo/stages/mock_sentry.go @@ -325,7 +325,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey blockReader, ), stagedsync.StageIssuanceCfg(mock.DB, mock.ChainConfig, blockReader, true), - stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, false, mock.tmpdir, prune, snapshotsync.NewBlockRetire(1, mock.tmpdir, allSnapshots, mock.DB, snapshotsDownloader, mock.Notifications.Events)), + stagedsync.StageSendersCfg(mock.DB, mock.ChainConfig, false, mock.tmpdir, prune, snapshotsync.NewBlockRetire(1, mock.tmpdir, allSnapshots, mock.DB, snapshotsDownloader, mock.Notifications.Events), nil), stagedsync.StageExecuteBlocksCfg( mock.DB, prune, @@ -339,10 +339,11 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey /*stateStream=*/ false, mock.tmpdir, blockReader, + mock.sentriesClient.Hd, ), stagedsync.StageTranspileCfg(mock.DB, cfg.BatchSize, mock.ChainConfig), stagedsync.StageHashStateCfg(mock.DB, mock.tmpdir), - stagedsync.StageTrieCfg(mock.DB, true, true, false, mock.tmpdir, blockReader), + stagedsync.StageTrieCfg(mock.DB, true, true, false, mock.tmpdir, blockReader, nil), stagedsync.StageHistoryCfg(mock.DB, prune, mock.tmpdir), stagedsync.StageLogIndexCfg(mock.DB, prune, mock.tmpdir), stagedsync.StageCallTracesCfg(mock.DB, prune, 0, mock.tmpdir), @@ -368,7 +369,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey stagedsync.StageMiningCreateBlockCfg(mock.DB, miner, *mock.ChainConfig, mock.Engine, mock.TxPool, nil, nil, mock.tmpdir), stagedsync.StageMiningExecCfg(mock.DB, miner, nil, *mock.ChainConfig, mock.Engine, &vm.Config{}, mock.tmpdir, nil), stagedsync.StageHashStateCfg(mock.DB, mock.tmpdir), - stagedsync.StageTrieCfg(mock.DB, false, true, false, mock.tmpdir, blockReader), + stagedsync.StageTrieCfg(mock.DB, false, true, false, mock.tmpdir, blockReader, nil), stagedsync.StageMiningFinishCfg(mock.DB, *mock.ChainConfig, mock.Engine, miner, mock.Ctx.Done()), ), stagedsync.MiningUnwindOrder, diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index a93342e2dd2..ef794f74009 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -376,7 +376,7 @@ func NewStagedSync( blockReader, ), stagedsync.StageIssuanceCfg(db, controlServer.ChainConfig, blockReader, cfg.EnabledIssuance), - stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, tmpdir, cfg.Prune, blockRetire), + stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, tmpdir, cfg.Prune, blockRetire, controlServer.Hd), stagedsync.StageExecuteBlocksCfg( db, cfg.Prune, @@ -390,10 +390,11 @@ func NewStagedSync( /*stateStream=*/ false, tmpdir, blockReader, + controlServer.Hd, ), stagedsync.StageTranspileCfg(db, cfg.BatchSize, controlServer.ChainConfig), stagedsync.StageHashStateCfg(db, tmpdir), - stagedsync.StageTrieCfg(db, true, true, false, tmpdir, blockReader), + stagedsync.StageTrieCfg(db, true, true, false, tmpdir, blockReader, controlServer.Hd), stagedsync.StageHistoryCfg(db, cfg.Prune, tmpdir), stagedsync.StageLogIndexCfg(db, cfg.Prune, tmpdir), stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, tmpdir), @@ -442,7 +443,7 @@ func NewInMemoryExecution(ctx context.Context, logger log.Logger, db kv.RwDB, cf snapshots, blockReader, ), stagedsync.StageBlockHashesCfg(db, tmpdir, controlServer.ChainConfig), - stagedsync.StageSendersCfg(db, controlServer.ChainConfig, true, tmpdir, cfg.Prune, nil), + stagedsync.StageSendersCfg(db, controlServer.ChainConfig, true, tmpdir, cfg.Prune, nil, controlServer.Hd), stagedsync.StageExecuteBlocksCfg( db, cfg.Prune, @@ -456,9 +457,10 @@ func NewInMemoryExecution(ctx context.Context, logger log.Logger, db kv.RwDB, cf true, tmpdir, blockReader, + controlServer.Hd, ), stagedsync.StageHashStateCfg(db, tmpdir), - stagedsync.StageTrieCfg(db, true, true, true, tmpdir, blockReader)), + stagedsync.StageTrieCfg(db, true, true, true, tmpdir, blockReader, controlServer.Hd)), stagedsync.StateUnwindOrder, nil, ), nil From a76e6a1d05527a29d5646110920eb249338035d1 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Tue, 5 Jul 2022 09:53:28 +0200 Subject: [PATCH 038/152] fixed deadlock (#4633) --- turbo/stages/headerdownload/header_algos.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index c0ed7df24c5..4d795003198 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -1129,8 +1129,10 @@ func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body hd.nextForkState.UpdateTxn(tx) } hd.nextForkHash = header.Hash() + hd.lock.Unlock() // Let's assemble the side fork chain if we have others building. validationError = execPayload(hd.nextForkState, header, body, 0, nil, nil) + hd.lock.Lock() if validationError != nil { status = remote.EngineStatus_INVALID if isAncestorPosBlock { @@ -1182,7 +1184,9 @@ func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body // if it is not canonical we validate it as a side fork. batch := memdb.NewMemoryBatch(tx) defer batch.Close() + hd.lock.Unlock() validationError = execPayload(batch, header, body, unwindPoint, headersChain, bodiesChain) + hd.lock.Lock() latestValidHash = header.Hash() if validationError != nil { if isAncestorPosBlock { From 7e2d46cbe42c958ec541bb1837cc1e351eaf08ef Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 5 Jul 2022 14:49:39 +0600 Subject: [PATCH 039/152] Support "latests block number" in Oracle Backend (#4635) * save * save * save * save * save * save * save * save --- cmd/rpcdaemon/commands/eth_api.go | 8 ++++++++ cmd/rpcdaemon/commands/eth_system.go | 2 +- eth/gasprice/gasprice.go | 5 ++++- turbo/snapshotsync/block_reader.go | 3 ++- 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/cmd/rpcdaemon/commands/eth_api.go b/cmd/rpcdaemon/commands/eth_api.go index a12af3e941e..b5193975f74 100644 --- a/cmd/rpcdaemon/commands/eth_api.go +++ b/cmd/rpcdaemon/commands/eth_api.go @@ -222,6 +222,14 @@ func (api *BaseAPI) blockByRPCNumber(number rpc.BlockNumber, tx kv.Tx) (*types.B return block, err } +func (api *BaseAPI) headerByRPCNumber(number rpc.BlockNumber, tx kv.Tx) (*types.Header, error) { + n, h, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(number), tx, api.filters) + if err != nil { + return nil, err + } + return api._blockReader.Header(context.Background(), tx, h, n) +} + // APIImpl is implementation of the EthAPI interface based on remote Db access type APIImpl struct { *BaseAPI diff --git a/cmd/rpcdaemon/commands/eth_system.go b/cmd/rpcdaemon/commands/eth_system.go index 1ba77c18138..20c1e5ef315 100644 --- a/cmd/rpcdaemon/commands/eth_system.go +++ b/cmd/rpcdaemon/commands/eth_system.go @@ -199,7 +199,7 @@ func NewGasPriceOracleBackend(tx kv.Tx, cc *params.ChainConfig, baseApi *BaseAPI } func (b *GasPriceOracleBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { - header, err := b.baseApi._blockReader.HeaderByNumber(ctx, b.tx, uint64(number.Int64())) + header, err := b.baseApi.headerByRPCNumber(number, b.tx) if err != nil { return nil, err } diff --git a/eth/gasprice/gasprice.go b/eth/gasprice/gasprice.go index 63a57aa1dab..50557984de5 100644 --- a/eth/gasprice/gasprice.go +++ b/eth/gasprice/gasprice.go @@ -117,7 +117,10 @@ func NewOracle(backend OracleBackend, params Config) *Oracle { // NODE: if caller wants legacy tx SuggestedPrice, we need to add // baseFee to the returned bigInt func (gpo *Oracle) SuggestTipCap(ctx context.Context) (*big.Int, error) { - head, _ := gpo.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) + head, err := gpo.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) + if err != nil { + return gpo.lastPrice, err + } if head == nil { return gpo.lastPrice, nil } diff --git a/turbo/snapshotsync/block_reader.go b/turbo/snapshotsync/block_reader.go index c35e22b99fd..4fd90b22c67 100644 --- a/turbo/snapshotsync/block_reader.go +++ b/turbo/snapshotsync/block_reader.go @@ -251,7 +251,8 @@ func (back *BlockReaderWithSnapshots) HeaderByNumber(ctx context.Context, tx kv. if ok { return h, nil } - return rawdb.ReadHeaderByNumber(tx, blockHeight), nil + h = rawdb.ReadHeaderByNumber(tx, blockHeight) + return h, nil } // HeaderByHash - will search header in all snapshots starting from recent From a6a5b1db904e8ad90fd482ef3efce354b065d093 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Tue, 5 Jul 2022 13:41:48 +0100 Subject: [PATCH 040/152] Update skip_analysis.go (#4632) --- core/skip_analysis.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/skip_analysis.go b/core/skip_analysis.go index 231a3150750..53eaea84294 100644 --- a/core/skip_analysis.go +++ b/core/skip_analysis.go @@ -36,8 +36,8 @@ import ( // 0x21ab7bf7245a87eae265124aaf180d91133377e47db2b1a4866493ec4b371150 (block 13119520) var analysisBlocks map[string][]uint64 = map[string][]uint64{ - networkname.MainnetChainName: {5_800_596, 6_426_298, 6_426_432, 11_079_912, 13_119_520, 14_961_400}, - networkname.BSCChainName: {18_682_505}, + networkname.MainnetChainName: {5_800_596, 6_426_298, 6_426_432, 11_079_912, 13_119_520, 15_081_051}, + networkname.BSCChainName: {19_278_044}, networkname.BorMainnetChainName: {29_447_463}, networkname.RopstenChainName: {2_534_105, 2_534_116, 3_028_887, 3_028_940, 3_028_956, 3_450_102, 5_294_626, 5_752_787, 10_801_303, 10_925_062, 11_440_683, 11_897_655, 11_898_288, 12_291_199, 12_331_664}, } From d4a0aff53f1d0cbad1d4bfc7da2278752fd63f22 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Tue, 5 Jul 2022 20:03:05 +0200 Subject: [PATCH 041/152] fixed hive test: Transaction Reorg - Check Blockhash with NP on revert (erigon) (#4640) --- turbo/stages/headerdownload/header_algos.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 4d795003198..2e391f208f2 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -1146,6 +1146,13 @@ func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body hd.cleanupOutdateSideForks(*currentHeight, maxDepth) return } + // If the block is stored within the side fork it means it was already validated. + if _, ok := hd.sideForksBlock[header.Hash()]; ok { + status = remote.EngineStatus_VALID + latestValidHash = header.Hash() + return + } + // if the block is not in range of MAX_DEPTH from head then we do not validate it. if abs64(int64(*currentHeight)-header.Number.Int64()) > maxDepth { status = remote.EngineStatus_ACCEPTED @@ -1213,10 +1220,7 @@ func (hd *HeaderDownload) FlushNextForkState(tx kv.RwTx) error { if err := hd.nextForkState.Flush(tx); err != nil { return err } - // If the side fork hash is now becoming canonical we can clean up. - if _, ok := hd.sideForksBlock[hd.nextForkHash]; ok { - delete(hd.sideForksBlock, hd.nextForkHash) - } + hd.nextForkState.Close() hd.nextForkHash = common.Hash{} hd.nextForkState = nil From cc75387d1063330b5ed8c07427c80e07da15f33d Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Tue, 5 Jul 2022 20:16:52 +0200 Subject: [PATCH 042/152] Small simplification of startHandlingForkChoice (#4636) --- eth/stagedsync/stage_headers.go | 30 +----------------------------- 1 file changed, 1 insertion(+), 29 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 6a58e3217d2..94b40fa8bb5 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -321,34 +321,7 @@ func startHandlingForkChoice( cfg.hd.BeaconRequestList.Remove(requestId) headerNumber := header.Number.Uint64() - // If header is canonical, then no reorgs are required - canonicalHash, err := rawdb.ReadCanonicalHash(tx, headerNumber) - if err != nil { - log.Warn(fmt.Sprintf("[%s] Fork choice err (reading canonical hash of %d)", s.LogPrefix(), headerNumber), "err", err) - cfg.hd.BeaconRequestList.Remove(requestId) - return nil, err - } - - if headerHash == canonicalHash { - log.Info(fmt.Sprintf("[%s] Fork choice on previously known block", s.LogPrefix())) - cfg.hd.BeaconRequestList.Remove(requestId) - rawdb.WriteForkchoiceHead(tx, forkChoice.HeadBlockHash) - canonical, err := safeAndFinalizedBlocksAreCanonical(forkChoice, s, tx, cfg) - if err != nil { - log.Warn(fmt.Sprintf("[%s] Fork choice err", s.LogPrefix()), "err", err) - return nil, err - } - if canonical { - return &privateapi.PayloadStatus{ - Status: remote.EngineStatus_VALID, - LatestValidHash: headerHash, - }, nil - } else { - return &privateapi.PayloadStatus{ - CriticalError: &privateapi.InvalidForkchoiceStateErr, - }, nil - } - } + cfg.hd.UpdateTopSeenHeightPoS(headerNumber) if cfg.memoryOverlay && headerHash == cfg.hd.GetNextForkHash() { log.Info("Flushing in-memory state") @@ -372,7 +345,6 @@ func startHandlingForkChoice( } } - cfg.hd.UpdateTopSeenHeightPoS(headerNumber) forkingPoint := uint64(0) if headerNumber > 0 { parent, err := headerReader.Header(ctx, tx, header.ParentHash, headerNumber-1) From 5e1cc9ae51e0bc3f1f8919e1f165d5d57de54a97 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Tue, 5 Jul 2022 23:40:10 +0200 Subject: [PATCH 043/152] fixed Invalid Number reorg (#4643) --- eth/stagedsync/stage_headers.go | 4 --- turbo/stages/headerdownload/header_algos.go | 32 +++++++-------------- 2 files changed, 11 insertions(+), 25 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 94b40fa8bb5..ed9eddbd743 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -560,10 +560,6 @@ func verifyAndSaveNewPoSHeader( return nil, false, err } - if err := cfg.hd.StorePayloadFork(tx, header, body); err != nil { - return nil, false, err - } - currentHeadHash := rawdb.ReadHeadHeaderHash(tx) if currentHeadHash != header.ParentHash { // Side chain or something weird diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 2e391f208f2..03bf5ef186f 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -645,12 +645,6 @@ func (hd *HeaderDownload) ProcessHeadersPOS(csHeaders []ChainSegmentHeader, tx k log.Trace("posAnchor is nil") return nil, nil } - // We may have received answer from old request so not enough evidence for penalizing. - if hd.posAnchor.blockHeight != 1 && csHeaders[0].Number != hd.posAnchor.blockHeight-1 { - // hd.posAnchor.blockHeight == 1 is a special case when the height of the anchor is unknown (it is created from the fork choice message from beacon node) - log.Trace("posAnchor", "blockHeight", hd.posAnchor.blockHeight) - return nil, nil - } // Handle request after closing collectors if hd.headersCollector == nil { @@ -661,6 +655,10 @@ func (hd *HeaderDownload) ProcessHeadersPOS(csHeaders []ChainSegmentHeader, tx k header := sh.Header headerHash := sh.Hash if headerHash != hd.posAnchor.parentHash { + if hd.posAnchor.blockHeight != 1 && sh.Number != hd.posAnchor.blockHeight-1 { + log.Info("posAnchor", "blockHeight", hd.posAnchor.blockHeight) + return nil, nil + } log.Warn("Unexpected header", "hash", headerHash, "expected", hd.posAnchor.parentHash) return []PenaltyItem{{PeerID: peerId, Penalty: BadBlockPenalty}}, nil } @@ -670,12 +668,16 @@ func (hd *HeaderDownload) ProcessHeadersPOS(csHeaders []ChainSegmentHeader, tx k return nil, err } - hh, err := hd.headerReader.Header(context.Background(), tx, header.ParentHash, headerNumber-1) + hh, err := hd.headerReader.HeaderByHash(context.Background(), tx, header.ParentHash) if err != nil { return nil, err } if hh != nil { log.Trace("Synced", "requestId", hd.requestId) + if headerNumber != hh.Number.Uint64()+1 { + hd.badPoSHeaders[headerHash] = header.ParentHash + return nil, fmt.Errorf("Invalid PoS segment detected: invalid block number. got %d, expected %d", headerNumber, hh.Number.Uint64()+1) + } hd.posAnchor = nil hd.posStatus = Synced hd.BeaconRequestList.Interrupt(engineapi.Synced) @@ -1093,19 +1095,6 @@ func abs64(n int64) uint64 { return uint64(n) } -func (hd *HeaderDownload) StorePayloadFork(tx kv.RwTx, header *types.Header, body *types.RawBody) error { - hd.lock.Lock() - defer hd.lock.Unlock() - maxDepth := uint64(16) - height := rawdb.ReadCurrentBlockNumber(tx) - if height == nil { - return fmt.Errorf("could not read block number.") - } - hd.sideForksBlock[header.Hash()] = sideForkBlock{header, body} - hd.cleanupOutdateSideForks(*height, maxDepth) - return nil -} - func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, terminalTotalDifficulty *big.Int, store bool, execPayload func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody) error) (status remote.EngineStatus, latestValidHash common.Hash, validationError error, criticalError error) { hd.lock.Lock() defer hd.lock.Unlock() @@ -1186,7 +1175,6 @@ func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body } unwindPoint = sb.header.Number.Uint64() - 1 } - hd.sideForksBlock[header.Hash()] = sideForkBlock{header, body} status = remote.EngineStatus_VALID // if it is not canonical we validate it as a side fork. batch := memdb.NewMemoryBatch(tx) @@ -1200,7 +1188,9 @@ func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body latestValidHash = header.ParentHash } status = remote.EngineStatus_INVALID + return } + hd.sideForksBlock[header.Hash()] = sideForkBlock{header, body} // After the we finished executing, we clean up old forks hd.cleanupOutdateSideForks(*currentHeight, maxDepth) return From 73b0659bb7f0a55dafb907e23df1ecd5963e9c95 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Wed, 6 Jul 2022 07:49:00 +0100 Subject: [PATCH 044/152] [erigon2.2] Optimisation of state reconstitution (#4621) * Init * Optimise ReconState flushes * Updates * Update * Updates * Updates * More on parallel execution * More on parallel exec * Fix lint * Improvements to parallel exec * comment * Fix history access, include incarnation * Close work channel * TxTask * more fixes to parallel exec * Update to latest erigon-lib * Fix compilation * Cleanup and timing * Fixes to recon1 Co-authored-by: Alex Sharp Co-authored-by: Alexey Sharp --- cmd/state/commands/erigon22.go | 10 - cmd/state/commands/state_recon.go | 460 +++++++++++++++----------- cmd/state/commands/state_recon_1.go | 433 ++++++++++++++++++++++++ core/state/history_reader_nostate.go | 52 ++- core/state/intra_block_state.go | 32 ++ core/state/recon_state_1.go | 474 +++++++++++++++++++++++++++ core/state/state_recon_writer.go | 156 ++++----- go.mod | 2 +- go.sum | 4 +- 9 files changed, 1337 insertions(+), 286 deletions(-) create mode 100644 cmd/state/commands/state_recon_1.go create mode 100644 core/state/recon_state_1.go diff --git a/cmd/state/commands/erigon22.go b/cmd/state/commands/erigon22.go index 55dffedbd41..de83339c87a 100644 --- a/cmd/state/commands/erigon22.go +++ b/cmd/state/commands/erigon22.go @@ -439,9 +439,6 @@ func (rw *ReaderWrapper22) ReadAccountData(address common.Address) (*accounts.Ac if incBytes > 0 { a.Incarnation = bytesToUint64(enc[pos : pos+incBytes]) } - if rw.blockNum == 10264901 { - fmt.Printf("block %d ReadAccount [%x] => {Balance: %d, Nonce: %d}\n", rw.blockNum, address, &a.Balance, a.Nonce) - } return &a, nil } @@ -450,9 +447,6 @@ func (rw *ReaderWrapper22) ReadAccountStorage(address common.Address, incarnatio if err != nil { return nil, err } - if rw.blockNum == 10264901 { - fmt.Printf("block %d ReadStorage [%x] [%x] => [%x]\n", rw.blockNum, address, *key, enc) - } if enc == nil { return nil, nil } @@ -558,10 +552,6 @@ func (ww *WriterWrapper22) DeleteAccount(address common.Address, original *accou } func (ww *WriterWrapper22) WriteAccountStorage(address common.Address, incarnation uint64, key *common.Hash, original, value *uint256.Int) error { - trace := fmt.Sprintf("%x", address) == "000000000000006f6502b7f2bbac8c30a3f67e9a" - if trace { - fmt.Printf("block %d WriteAccountStorage [%x] [%x] => [%x]\n", ww.blockNum, address, *key, value.Bytes()) - } if err := ww.w.WriteAccountStorage(address.Bytes(), key.Bytes(), value.Bytes()); err != nil { return err } diff --git a/cmd/state/commands/state_recon.go b/cmd/state/commands/state_recon.go index 83f6f9bd156..1e6b3568438 100644 --- a/cmd/state/commands/state_recon.go +++ b/cmd/state/commands/state_recon.go @@ -18,6 +18,7 @@ import ( "github.com/RoaringBitmap/roaring/roaring64" libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" libstate "github.com/ledgerwatch/erigon-lib/state" @@ -30,7 +31,6 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/params" @@ -56,30 +56,24 @@ var reconCmd = &cobra.Command{ } type ReconWorker struct { - lock sync.Locker - wg *sync.WaitGroup - rs *state.ReconState - blockReader services.FullBlockReader - allSnapshots *snapshotsync.RoSnapshots - stateWriter *state.StateReconWriter - stateReader *state.HistoryReaderNoState - firstBlock bool - lastBlockNum uint64 - lastBlockHash common.Hash - lastHeader *types.Header - lastRules *params.Rules - getHeader func(hash common.Hash, number uint64) *types.Header - ctx context.Context - engine consensus.Engine - txNums []uint64 - chainConfig *params.ChainConfig - logger log.Logger - genesis *core.Genesis + lock sync.Locker + wg *sync.WaitGroup + rs *state.ReconState + blockReader services.FullBlockReader + allSnapshots *snapshotsync.RoSnapshots + stateWriter *state.StateReconWriter + stateReader *state.HistoryReaderNoState + getHeader func(hash common.Hash, number uint64) *types.Header + ctx context.Context + engine consensus.Engine + chainConfig *params.ChainConfig + logger log.Logger + genesis *core.Genesis } func NewReconWorker(lock sync.Locker, wg *sync.WaitGroup, rs *state.ReconState, a *libstate.Aggregator, blockReader services.FullBlockReader, allSnapshots *snapshotsync.RoSnapshots, - txNums []uint64, chainConfig *params.ChainConfig, logger log.Logger, genesis *core.Genesis, + chainConfig *params.ChainConfig, logger log.Logger, genesis *core.Genesis, ) *ReconWorker { ac := a.MakeContext() return &ReconWorker{ @@ -91,7 +85,6 @@ func NewReconWorker(lock sync.Locker, wg *sync.WaitGroup, rs *state.ReconState, ctx: context.Background(), stateWriter: state.NewStateReconWriter(ac, rs), stateReader: state.NewHistoryReaderNoState(ac, rs), - txNums: txNums, chainConfig: chainConfig, logger: logger, genesis: genesis, @@ -104,7 +97,6 @@ func (rw *ReconWorker) SetTx(tx kv.Tx) { func (rw *ReconWorker) run() { defer rw.wg.Done() - rw.firstBlock = true rw.getHeader = func(hash common.Hash, number uint64) *types.Header { h, err := rw.blockReader.Header(rw.ctx, nil, hash, number) if err != nil { @@ -113,40 +105,23 @@ func (rw *ReconWorker) run() { return h } rw.engine = initConsensusEngine(rw.chainConfig, rw.logger, rw.allSnapshots) - for txNum, ok := rw.rs.Schedule(); ok; txNum, ok = rw.rs.Schedule() { - rw.runTxNum(txNum) + for txTask, ok := rw.rs.Schedule(); ok; txTask, ok = rw.rs.Schedule() { + rw.runTxTask(txTask) } } -func (rw *ReconWorker) runTxNum(txNum uint64) { +func (rw *ReconWorker) runTxTask(txTask state.TxTask) { rw.lock.Lock() defer rw.lock.Unlock() - rw.stateReader.SetTxNum(txNum) + rw.stateReader.SetTxNum(txTask.TxNum) rw.stateReader.ResetError() - rw.stateWriter.SetTxNum(txNum) + rw.stateWriter.SetTxNum(txTask.TxNum) noop := state.NewNoopWriter() - // Find block number - blockNum := uint64(sort.Search(len(rw.txNums), func(i int) bool { - return rw.txNums[i] > txNum - })) - if rw.firstBlock || blockNum != rw.lastBlockNum { - var err error - if rw.lastHeader, err = rw.blockReader.HeaderByNumber(rw.ctx, nil, blockNum); err != nil { - panic(err) - } - rw.lastBlockNum = blockNum - rw.lastBlockHash = rw.lastHeader.Hash() - rw.lastRules = rw.chainConfig.Rules(blockNum) - rw.firstBlock = false - } - var startTxNum uint64 - if blockNum > 0 { - startTxNum = rw.txNums[blockNum-1] - } + rules := rw.chainConfig.Rules(txTask.BlockNum) ibs := state.New(rw.stateReader) - daoForkTx := rw.chainConfig.DAOForkSupport && rw.chainConfig.DAOForkBlock != nil && rw.chainConfig.DAOForkBlock.Uint64() == blockNum && txNum == rw.txNums[blockNum-1] + daoForkTx := rw.chainConfig.DAOForkSupport && rw.chainConfig.DAOForkBlock != nil && rw.chainConfig.DAOForkBlock.Uint64() == txTask.BlockNum && txTask.TxIndex == -1 var err error - if blockNum == 0 { + if txTask.BlockNum == 0 && txTask.TxIndex == -1 { //fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txNum, blockNum) // Genesis block _, ibs, err = rw.genesis.ToBlock() @@ -156,54 +131,47 @@ func (rw *ReconWorker) runTxNum(txNum uint64) { } else if daoForkTx { //fmt.Printf("txNum=%d, blockNum=%d, DAO fork\n", txNum, blockNum) misc.ApplyDAOHardFork(ibs) - if err := ibs.FinalizeTx(rw.lastRules, noop); err != nil { - panic(err) - } - } else if txNum+1 == rw.txNums[blockNum] { - //fmt.Printf("txNum=%d, blockNum=%d, finalisation of the block\n", txNum, blockNum) - // End of block transaction in a block - block, _, err := rw.blockReader.BlockWithSenders(rw.ctx, nil, rw.lastBlockHash, blockNum) - if err != nil { + if err := ibs.FinalizeTx(rules, noop); err != nil { panic(err) } - if _, _, err := rw.engine.Finalize(rw.chainConfig, rw.lastHeader, ibs, block.Transactions(), block.Uncles(), nil /* receipts */, nil, nil, nil); err != nil { - panic(fmt.Errorf("finalize of block %d failed: %w", blockNum, err)) + } else if txTask.Final { + if txTask.BlockNum > 0 { + //fmt.Printf("txNum=%d, blockNum=%d, finalisation of the block\n", txNum, blockNum) + // End of block transaction in a block + if _, _, err := rw.engine.Finalize(rw.chainConfig, txTask.Header, ibs, txTask.Block.Transactions(), txTask.Block.Uncles(), nil /* receipts */, nil, nil, nil); err != nil { + panic(fmt.Errorf("finalize of block %d failed: %w", txTask.BlockNum, err)) + } } + } else if txTask.TxIndex == -1 { + // Block initialisation } else { - txIndex := txNum - startTxNum - 1 - //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d\n", txNum, blockNum, txIndex) - txn, err := rw.blockReader.TxnByIdxInBlock(rw.ctx, nil, blockNum, int(txIndex)) - if err != nil { - panic(err) - } - txHash := txn.Hash() - gp := new(core.GasPool).AddGas(txn.GetGas()) + txHash := txTask.Tx.Hash() + gp := new(core.GasPool).AddGas(txTask.Tx.GetGas()) //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d, gas=%d, input=[%x]\n", txNum, blockNum, txIndex, txn.GetGas(), txn.GetData()) usedGas := new(uint64) - vmConfig := vm.Config{NoReceipts: true, SkipAnalysis: core.SkipAnalysis(rw.chainConfig, blockNum)} + vmConfig := vm.Config{NoReceipts: true, SkipAnalysis: core.SkipAnalysis(rw.chainConfig, txTask.BlockNum)} contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - ibs.Prepare(txHash, rw.lastBlockHash, int(txIndex)) - _, _, err = core.ApplyTransaction(rw.chainConfig, rw.getHeader, rw.engine, nil, gp, ibs, noop, rw.lastHeader, txn, usedGas, vmConfig, contractHasTEVM) + ibs.Prepare(txHash, txTask.BlockHash, txTask.TxIndex) + _, _, err = core.ApplyTransaction(rw.chainConfig, rw.getHeader, rw.engine, nil, gp, ibs, noop, txTask.Header, txTask.Tx, usedGas, vmConfig, contractHasTEVM) if err != nil { - panic(fmt.Errorf("could not apply tx %d [%x] failed: %w", txIndex, txHash, err)) + panic(fmt.Errorf("could not apply tx %d [%x] failed: %w", txTask.TxIndex, txHash, err)) } } if dependency, ok := rw.stateReader.ReadError(); ok { //fmt.Printf("rollback %d\n", txNum) - rw.rs.RollbackTxNum(txNum, dependency) + rw.rs.RollbackTx(txTask, dependency) } else { - if err = ibs.CommitBlock(rw.lastRules, rw.stateWriter); err != nil { + if err = ibs.CommitBlock(rules, rw.stateWriter); err != nil { panic(err) } //fmt.Printf("commit %d\n", txNum) - rw.rs.CommitTxNum(txNum) + rw.rs.CommitTxNum(txTask.TxNum) } } type FillWorker struct { txNum uint64 doneCount *uint64 - rs *state.ReconState ac *libstate.AggregatorContext fromKey, toKey []byte currentKey []byte @@ -212,11 +180,10 @@ type FillWorker struct { progress uint64 } -func NewFillWorker(txNum uint64, doneCount *uint64, rs *state.ReconState, a *libstate.Aggregator, fromKey, toKey []byte) *FillWorker { +func NewFillWorker(txNum uint64, doneCount *uint64, a *libstate.Aggregator, fromKey, toKey []byte) *FillWorker { fw := &FillWorker{ txNum: txNum, doneCount: doneCount, - rs: rs, ac: a.MakeContext(), fromKey: fromKey, toKey: toKey, @@ -232,7 +199,7 @@ func (fw *FillWorker) Progress() uint64 { return atomic.LoadUint64(&fw.progress) } -func (fw *FillWorker) fillAccounts() { +func (fw *FillWorker) fillAccounts(plainStateCollector *etl.Collector) { defer func() { atomic.AddUint64(fw.doneCount, 1) }() @@ -271,13 +238,15 @@ func (fw *FillWorker) fillAccounts() { } value := make([]byte, a.EncodingLengthForStorage()) a.EncodeForStorage(value) - fw.rs.Put(kv.PlainState, key, value) + if err := plainStateCollector.Collect(key, value); err != nil { + panic(err) + } //fmt.Printf("Account [%x]=>{Balance: %d, Nonce: %d, Root: %x, CodeHash: %x}\n", key, &a.Balance, a.Nonce, a.Root, a.CodeHash) } } } -func (fw *FillWorker) fillStorage() { +func (fw *FillWorker) fillStorage(plainStateCollector *etl.Collector) { defer func() { atomic.AddUint64(fw.doneCount, 1) }() @@ -290,14 +259,18 @@ func (fw *FillWorker) fillStorage() { compositeKey := dbutils.PlainGenerateCompositeStorageKey(key[:20], state.FirstContractIncarnation, key[20:]) if len(val) > 0 { if len(val) > 1 || val[0] != 0 { - fw.rs.Put(kv.PlainState, compositeKey, val) + if err := plainStateCollector.Collect(compositeKey, val); err != nil { + panic(err) + } + } else { + fmt.Printf("Storage [%x] => [%x]\n", compositeKey, val) } //fmt.Printf("Storage [%x] => [%x]\n", compositeKey, val) } } } -func (fw *FillWorker) fillCode() { +func (fw *FillWorker) fillCode(codeCollector, plainContractCollector *etl.Collector) { defer func() { atomic.AddUint64(fw.doneCount, 1) }() @@ -310,9 +283,18 @@ func (fw *FillWorker) fillCode() { compositeKey := dbutils.PlainGenerateStoragePrefix(key, state.FirstContractIncarnation) if len(val) > 0 { if len(val) > 1 || val[0] != 0 { - codeHash := crypto.Keccak256(val) - fw.rs.Put(kv.Code, codeHash[:], val) - fw.rs.Put(kv.PlainContractCode, compositeKey, codeHash[:]) + codeHash, err := common.HashData(val) + if err != nil { + panic(err) + } + if err = codeCollector.Collect(codeHash[:], val); err != nil { + panic(err) + } + if err = plainContractCollector.Collect(compositeKey, codeHash[:]); err != nil { + panic(err) + } + } else { + fmt.Printf("Code [%x] => [%x]\n", compositeKey, val) } //fmt.Printf("Code [%x] => [%x]\n", compositeKey, val) } @@ -387,6 +369,7 @@ func Recon(genesis *core.Genesis, logger log.Logger) error { } else if err = os.RemoveAll(reconDbPath); err != nil { return err } + startTime := time.Now() db, err := kv2.NewMDBX(logger).Path(reconDbPath).WriteMap().Open() if err != nil { return err @@ -433,7 +416,8 @@ func Recon(genesis *core.Genesis, logger log.Logger) error { fmt.Printf("Corresponding block num = %d, txNum = %d\n", blockNum, txNum) workerCount := runtime.NumCPU() var wg sync.WaitGroup - rs := state.NewReconState() + workCh := make(chan state.TxTask, 128) + rs := state.NewReconState(workCh) var fromKey, toKey []byte bigCount := big.NewInt(int64(workerCount)) bigStep := big.NewInt(0x100000000) @@ -451,7 +435,7 @@ func Recon(genesis *core.Genesis, logger log.Logger) error { bigCurrent.FillBytes(toKey) } //fmt.Printf("%d) Fill worker [%x] - [%x]\n", i, fromKey, toKey) - fillWorkers[i] = NewFillWorker(txNum, &doneCount, rs, agg, fromKey, toKey) + fillWorkers[i] = NewFillWorker(txNum, &doneCount, agg, fromKey, toKey) } logEvery := time.NewTicker(logInterval) defer logEvery.Stop() @@ -526,7 +510,6 @@ func Recon(genesis *core.Genesis, logger log.Logger) error { bitmap.Or(&fillWorkers[i].bitmap) } log.Info("Ready to replay", "transactions", bitmap.GetCardinality(), "out of", txNum) - rs.SetWorkBitmap(&bitmap) var lock sync.RWMutex reconWorkers := make([]*ReconWorker, workerCount) roTxs := make([]kv.Tx, workerCount) @@ -544,7 +527,7 @@ func Recon(genesis *core.Genesis, logger log.Logger) error { } } for i := 0; i < workerCount; i++ { - reconWorkers[i] = NewReconWorker(lock.RLocker(), &wg, rs, agg, blockReader, allSnapshots, txNums, chainConfig, logger, genesis) + reconWorkers[i] = NewReconWorker(lock.RLocker(), &wg, rs, agg, blockReader, allSnapshots, chainConfig, logger, genesis) reconWorkers[i].SetTx(roTxs[i]) } wg.Add(workerCount) @@ -558,60 +541,98 @@ func Recon(genesis *core.Genesis, logger log.Logger) error { prevCount := uint64(0) prevRollbackCount := uint64(0) prevTime := time.Now() - for count < total { - select { - case <-logEvery.C: - var m runtime.MemStats - libcommon.ReadMemStats(&m) - sizeEstimate := rs.SizeEstimate() - count = rs.DoneCount() - rollbackCount = rs.RollbackCount() - currentTime := time.Now() - interval := currentTime.Sub(prevTime) - speedTx := float64(count-prevCount) / (float64(interval) / float64(time.Second)) - progress := 100.0 * float64(count) / float64(total) - var repeatRatio float64 - if count > prevCount { - repeatRatio = 100.0 * float64(rollbackCount-prevRollbackCount) / float64(count-prevCount) - } - prevTime = currentTime - prevCount = count - prevRollbackCount = rollbackCount - log.Info("State reconstitution", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", progress), "tx/s", fmt.Sprintf("%.1f", speedTx), "repeat ratio", fmt.Sprintf("%.2f%%", repeatRatio), "buffer", libcommon.ByteCount(sizeEstimate), - "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), - ) - if sizeEstimate >= commitThreshold { - err := func() error { - lock.Lock() - defer lock.Unlock() - for i := 0; i < workerCount; i++ { - roTxs[i].Rollback() - } - rwTx, err := db.BeginRw(ctx) - if err != nil { - return err - } - if err = rs.Flush(rwTx); err != nil { - return err - } - if err = rwTx.Commit(); err != nil { - return err - } - for i := 0; i < workerCount; i++ { - if roTxs[i], err = db.BeginRo(ctx); err != nil { + reconDone := make(chan struct{}) + go func() { + for { + select { + case <-reconDone: + return + case <-logEvery.C: + var m runtime.MemStats + libcommon.ReadMemStats(&m) + sizeEstimate := rs.SizeEstimate() + count = rs.DoneCount() + rollbackCount = rs.RollbackCount() + currentTime := time.Now() + interval := currentTime.Sub(prevTime) + speedTx := float64(count-prevCount) / (float64(interval) / float64(time.Second)) + progress := 100.0 * float64(count) / float64(total) + var repeatRatio float64 + if count > prevCount { + repeatRatio = 100.0 * float64(rollbackCount-prevRollbackCount) / float64(count-prevCount) + } + prevTime = currentTime + prevCount = count + prevRollbackCount = rollbackCount + log.Info("State reconstitution", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", progress), "tx/s", fmt.Sprintf("%.1f", speedTx), "repeat ratio", fmt.Sprintf("%.2f%%", repeatRatio), "buffer", libcommon.ByteCount(sizeEstimate), + "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), + ) + if sizeEstimate >= commitThreshold { + err := func() error { + lock.Lock() + defer lock.Unlock() + for i := 0; i < workerCount; i++ { + roTxs[i].Rollback() + } + rwTx, err := db.BeginRw(ctx) + if err != nil { + return err + } + if err = rs.Flush(rwTx); err != nil { return err } - reconWorkers[i].SetTx(roTxs[i]) + if err = rwTx.Commit(); err != nil { + return err + } + for i := 0; i < workerCount; i++ { + if roTxs[i], err = db.BeginRo(ctx); err != nil { + return err + } + reconWorkers[i].SetTx(roTxs[i]) + } + return nil + }() + if err != nil { + panic(err) } - return nil - }() - if err != nil { - panic(err) } } } + }() + var inputTxNum uint64 + for bn := uint64(0); bn < blockNum; bn++ { + header, err := blockReader.HeaderByNumber(ctx, nil, bn) + if err != nil { + panic(err) + } + blockHash := header.Hash() + b, _, err := blockReader.BlockWithSenders(ctx, nil, blockHash, bn) + if err != nil { + panic(err) + } + txs := b.Transactions() + for txIndex := -1; txIndex <= len(txs); txIndex++ { + if bitmap.Contains(inputTxNum) { + txTask := state.TxTask{ + Header: header, + BlockNum: bn, + Block: b, + TxNum: inputTxNum, + TxIndex: txIndex, + BlockHash: blockHash, + Final: txIndex == len(txs), + } + if txIndex >= 0 && txIndex < len(txs) { + txTask.Tx = txs[txIndex] + } + workCh <- txTask + } + inputTxNum++ + } } + close(workCh) wg.Wait() + reconDone <- struct{}{} // Complete logging and committing go-routine for i := 0; i < workerCount; i++ { roTxs[i].Rollback() } @@ -630,17 +651,95 @@ func Recon(genesis *core.Genesis, logger log.Logger) error { if err = rwTx.Commit(); err != nil { return err } + plainStateCollector := etl.NewCollector("recon plainState", datadir, etl.NewSortableBuffer(etl.BufferOptimalSize)) + defer plainStateCollector.Close() + codeCollector := etl.NewCollector("recon code", datadir, etl.NewOldestEntryBuffer(etl.BufferOptimalSize)) + defer codeCollector.Close() + plainContractCollector := etl.NewCollector("recon plainContract", datadir, etl.NewSortableBuffer(etl.BufferOptimalSize)) + defer plainContractCollector.Close() + roTx, err := db.BeginRo(ctx) + if err != nil { + return err + } + defer roTx.Rollback() + cursor, err := roTx.Cursor(kv.PlainStateR) + if err != nil { + return err + } + defer cursor.Close() + var k, v []byte + for k, v, err = cursor.First(); err == nil && k != nil; k, v, err = cursor.Next() { + if err = plainStateCollector.Collect(k[8:], v); err != nil { + return err + } + } + if err != nil { + return err + } + cursor.Close() + if cursor, err = roTx.Cursor(kv.CodeR); err != nil { + return err + } + defer cursor.Close() + for k, v, err = cursor.First(); err == nil && k != nil; k, v, err = cursor.Next() { + if err = codeCollector.Collect(k[8:], v); err != nil { + return err + } + } + if err != nil { + return err + } + cursor.Close() + if cursor, err = roTx.Cursor(kv.PlainContractR); err != nil { + return err + } + defer cursor.Close() + for k, v, err = cursor.First(); err == nil && k != nil; k, v, err = cursor.Next() { + if err = plainContractCollector.Collect(k[8:], v); err != nil { + return err + } + } + if err != nil { + return err + } + cursor.Close() + roTx.Rollback() + if rwTx, err = db.BeginRw(ctx); err != nil { + return err + } + if err = rwTx.ClearBucket(kv.PlainStateR); err != nil { + return err + } + if err = rwTx.ClearBucket(kv.CodeR); err != nil { + return err + } + if err = rwTx.ClearBucket(kv.PlainContractR); err != nil { + return err + } + if err = rwTx.Commit(); err != nil { + return err + } + plainStateCollectors := make([]*etl.Collector, workerCount) + codeCollectors := make([]*etl.Collector, workerCount) + plainContractCollectors := make([]*etl.Collector, workerCount) + for i := 0; i < workerCount; i++ { + plainStateCollectors[i] = etl.NewCollector(fmt.Sprintf("plainState %d", i), datadir, etl.NewSortableBuffer(etl.BufferOptimalSize)) + defer plainStateCollectors[i].Close() + codeCollectors[i] = etl.NewCollector(fmt.Sprintf("code %d", i), datadir, etl.NewSortableBuffer(etl.BufferOptimalSize)) + defer codeCollectors[i].Close() + plainContractCollectors[i] = etl.NewCollector(fmt.Sprintf("plainContract %d", i), datadir, etl.NewSortableBuffer(etl.BufferOptimalSize)) + defer plainContractCollectors[i].Close() + } doneCount = 0 for i := 0; i < workerCount; i++ { fillWorkers[i].ResetProgress() - go fillWorkers[i].fillAccounts() + go fillWorkers[i].fillAccounts(plainStateCollectors[i]) } for atomic.LoadUint64(&doneCount) < uint64(workerCount) { select { case <-logEvery.C: var m runtime.MemStats libcommon.ReadMemStats(&m) - sizeEstimate := rs.SizeEstimate() var p float64 for i := 0; i < workerCount; i++ { if total := fillWorkers[i].Total(); total > 0 { @@ -648,36 +747,21 @@ func Recon(genesis *core.Genesis, logger log.Logger) error { } } p *= 100.0 - log.Info("Filling accounts", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), "buffer", libcommon.ByteCount(sizeEstimate), + log.Info("Filling accounts", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), ) - if sizeEstimate >= commitThreshold { - flushStart := time.Now() - rwTx, err := db.BeginRw(ctx) - if err != nil { - return err - } - if err = rs.Flush(rwTx); err != nil { - return err - } - if err = rwTx.Commit(); err != nil { - return err - } - log.Info("Flush buffer", "duration", time.Since(flushStart)) - } } } doneCount = 0 for i := 0; i < workerCount; i++ { fillWorkers[i].ResetProgress() - go fillWorkers[i].fillStorage() + go fillWorkers[i].fillStorage(plainStateCollectors[i]) } for atomic.LoadUint64(&doneCount) < uint64(workerCount) { select { case <-logEvery.C: var m runtime.MemStats libcommon.ReadMemStats(&m) - sizeEstimate := rs.SizeEstimate() var p float64 for i := 0; i < workerCount; i++ { if total := fillWorkers[i].Total(); total > 0 { @@ -685,36 +769,21 @@ func Recon(genesis *core.Genesis, logger log.Logger) error { } } p *= 100.0 - log.Info("Filling storage", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), "buffer", libcommon.ByteCount(sizeEstimate), + log.Info("Filling storage", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), ) - if sizeEstimate >= commitThreshold { - flushStart := time.Now() - rwTx, err := db.BeginRw(ctx) - if err != nil { - return err - } - if err = rs.Flush(rwTx); err != nil { - return err - } - if err = rwTx.Commit(); err != nil { - return err - } - log.Info("Flush buffer", "duration", time.Since(flushStart)) - } } } doneCount = 0 for i := 0; i < workerCount; i++ { fillWorkers[i].ResetProgress() - go fillWorkers[i].fillCode() + go fillWorkers[i].fillCode(codeCollectors[i], plainContractCollectors[i]) } for atomic.LoadUint64(&doneCount) < uint64(workerCount) { select { case <-logEvery.C: var m runtime.MemStats libcommon.ReadMemStats(&m) - sizeEstimate := rs.SizeEstimate() var p float64 for i := 0; i < workerCount; i++ { if total := fillWorkers[i].Total(); total > 0 { @@ -722,38 +791,55 @@ func Recon(genesis *core.Genesis, logger log.Logger) error { } } p *= 100.0 - log.Info("Filling code", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), "buffer", libcommon.ByteCount(sizeEstimate), + log.Info("Filling code", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", p), "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), ) - if sizeEstimate >= commitThreshold { - flushStart := time.Now() - rwTx, err := db.BeginRw(ctx) - if err != nil { - return err - } - if err = rs.Flush(rwTx); err != nil { - return err - } - if err = rwTx.Commit(); err != nil { - return err - } - log.Info("Flush buffer", "duration", time.Since(flushStart)) - } } } + // Load all collections into the main collector + for i := 0; i < workerCount; i++ { + if err = plainStateCollectors[i].Load(nil, "", func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + return plainStateCollector.Collect(k, v) + }, etl.TransformArgs{}); err != nil { + return err + } + plainStateCollectors[i].Close() + if err = codeCollectors[i].Load(nil, "", func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + return codeCollector.Collect(k, v) + }, etl.TransformArgs{}); err != nil { + return err + } + codeCollectors[i].Close() + if err = plainContractCollectors[i].Load(nil, "", func(k, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + return plainContractCollector.Collect(k, v) + }, etl.TransformArgs{}); err != nil { + return err + } + plainContractCollectors[i].Close() + } rwTx, err = db.BeginRw(ctx) if err != nil { return err } - if err = rs.Flush(rwTx); err != nil { + if err = plainStateCollector.Load(rwTx, kv.PlainState, etl.IdentityLoadFunc, etl.TransformArgs{}); err != nil { + return err + } + plainStateCollector.Close() + if err = codeCollector.Load(rwTx, kv.Code, etl.IdentityLoadFunc, etl.TransformArgs{}); err != nil { + return err + } + codeCollector.Close() + if err = plainContractCollector.Load(rwTx, kv.PlainContractCode, etl.IdentityLoadFunc, etl.TransformArgs{}); err != nil { return err } + plainContractCollector.Close() if err = rwTx.Commit(); err != nil { return err } if rwTx, err = db.BeginRw(ctx); err != nil { return err } + log.Info("Reconstitution complete", "duration", time.Since(startTime)) log.Info("Computing hashed state") tmpDir := filepath.Join(datadir, "tmp") if err = stagedsync.PromoteHashedStateCleanly("recon", rwTx, stagedsync.StageHashStateCfg(db, tmpDir), ctx); err != nil { @@ -765,7 +851,7 @@ func Recon(genesis *core.Genesis, logger log.Logger) error { if rwTx, err = db.BeginRw(ctx); err != nil { return err } - if _, err = stagedsync.RegenerateIntermediateHashes("recon", rwTx, stagedsync.StageTrieCfg(db, false /* checkRoot */, false /* saveHashesToDB */, false /* badBlockHalt */, tmpDir, blockReader, nil), common.Hash{}, make(chan struct{}, 1)); err != nil { + if _, err = stagedsync.RegenerateIntermediateHashes("recon", rwTx, stagedsync.StageTrieCfg(db, false /* checkRoot */, false /* saveHashesToDB */, false /* badBlockHalt */, tmpDir, blockReader, nil /* HeaderDownload */), common.Hash{}, make(chan struct{}, 1)); err != nil { return err } if err = rwTx.Commit(); err != nil { diff --git a/cmd/state/commands/state_recon_1.go b/cmd/state/commands/state_recon_1.go new file mode 100644 index 00000000000..fd968542373 --- /dev/null +++ b/cmd/state/commands/state_recon_1.go @@ -0,0 +1,433 @@ +package commands + +import ( + "container/heap" + "context" + "errors" + "fmt" + "os" + "os/signal" + "path" + "path/filepath" + "runtime" + "sync" + "syscall" + "time" + + libcommon "github.com/ledgerwatch/erigon-lib/common" + "github.com/ledgerwatch/erigon-lib/kv" + kv2 "github.com/ledgerwatch/erigon-lib/kv/mdbx" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/consensus" + "github.com/ledgerwatch/erigon/consensus/misc" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/stagedsync" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/log/v3" + "github.com/spf13/cobra" +) + +func init() { + withBlock(recon1Cmd) + withDataDir(recon1Cmd) + rootCmd.AddCommand(recon1Cmd) +} + +var recon1Cmd = &cobra.Command{ + Use: "recon1", + Short: "Exerimental command to reconstitute the state at given block", + RunE: func(cmd *cobra.Command, args []string) error { + logger := log.New() + return Recon1(genesis, logger) + }, +} + +type ReconWorker1 struct { + lock sync.Locker + wg *sync.WaitGroup + rs *state.ReconState1 + blockReader services.FullBlockReader + allSnapshots *snapshotsync.RoSnapshots + stateWriter *state.StateReconWriter1 + stateReader *state.StateReconReader1 + getHeader func(hash common.Hash, number uint64) *types.Header + ctx context.Context + engine consensus.Engine + txNums []uint64 + chainConfig *params.ChainConfig + logger log.Logger + genesis *core.Genesis + resultCh chan state.TxTask +} + +func NewReconWorker1(lock sync.Locker, wg *sync.WaitGroup, rs *state.ReconState1, + blockReader services.FullBlockReader, allSnapshots *snapshotsync.RoSnapshots, + txNums []uint64, chainConfig *params.ChainConfig, logger log.Logger, genesis *core.Genesis, + resultCh chan state.TxTask, +) *ReconWorker1 { + return &ReconWorker1{ + lock: lock, + wg: wg, + rs: rs, + blockReader: blockReader, + allSnapshots: allSnapshots, + ctx: context.Background(), + stateWriter: state.NewStateReconWriter1(rs), + stateReader: state.NewStateReconReader1(rs), + txNums: txNums, + chainConfig: chainConfig, + logger: logger, + genesis: genesis, + resultCh: resultCh, + } +} + +func (rw *ReconWorker1) SetTx(tx kv.Tx) { + rw.stateReader.SetTx(tx) +} + +func (rw *ReconWorker1) run() { + defer rw.wg.Done() + rw.getHeader = func(hash common.Hash, number uint64) *types.Header { + h, err := rw.blockReader.Header(rw.ctx, nil, hash, number) + if err != nil { + panic(err) + } + return h + } + rw.engine = initConsensusEngine(rw.chainConfig, rw.logger, rw.allSnapshots) + for txTask, ok := rw.rs.Schedule(); ok; txTask, ok = rw.rs.Schedule() { + rw.runTxTask(txTask) + } +} + +func (rw *ReconWorker1) runTxTask(txTask state.TxTask) { + rw.lock.Lock() + defer rw.lock.Unlock() + txTask.Error = nil + rw.stateReader.SetTxNum(txTask.TxNum) + rw.stateWriter.SetTxNum(txTask.TxNum) + rw.stateReader.ResetReadSet() + rw.stateWriter.ResetWriteSet() + rules := rw.chainConfig.Rules(txTask.BlockNum) + ibs := state.New(rw.stateReader) + daoForkTx := rw.chainConfig.DAOForkSupport && rw.chainConfig.DAOForkBlock != nil && rw.chainConfig.DAOForkBlock.Uint64() == txTask.BlockNum && txTask.TxIndex == -1 + var err error + if txTask.BlockNum == 0 && txTask.TxIndex == -1 { + fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txTask.TxNum, txTask.BlockNum) + // Genesis block + _, ibs, err = rw.genesis.ToBlock() + if err != nil { + panic(err) + } + } else if daoForkTx { + fmt.Printf("txNum=%d, blockNum=%d, DAO fork\n", txTask.TxNum, txTask.BlockNum) + misc.ApplyDAOHardFork(ibs) + ibs.SoftFinalise() + } else if txTask.TxIndex == -1 { + // Block initialisation + } else if txTask.Final { + if txTask.BlockNum > 0 { + fmt.Printf("txNum=%d, blockNum=%d, finalisation of the block\n", txTask.TxNum, txTask.BlockNum) + // End of block transaction in a block + if _, _, err := rw.engine.Finalize(rw.chainConfig, txTask.Header, ibs, txTask.Block.Transactions(), txTask.Block.Uncles(), nil /* receipts */, nil, nil, nil); err != nil { + panic(fmt.Errorf("finalize of block %d failed: %w", txTask.BlockNum, err)) + } + } + } else { + fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex) + txHash := txTask.Tx.Hash() + gp := new(core.GasPool).AddGas(txTask.Tx.GetGas()) + //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d, gas=%d, input=[%x]\n", txNum, blockNum, txIndex, txn.GetGas(), txn.GetData()) + usedGas := new(uint64) + vmConfig := vm.Config{NoReceipts: true, SkipAnalysis: core.SkipAnalysis(rw.chainConfig, txTask.BlockNum)} + contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } + ibs.Prepare(txHash, txTask.BlockHash, txTask.TxIndex) + vmConfig.SkipAnalysis = core.SkipAnalysis(rw.chainConfig, txTask.BlockNum) + blockContext := core.NewEVMBlockContext(txTask.Header, rw.getHeader, rw.engine, nil /* author */, contractHasTEVM) + vmenv := vm.NewEVM(blockContext, vm.TxContext{}, ibs, rw.chainConfig, vmConfig) + msg, err := txTask.Tx.AsMessage(*types.MakeSigner(rw.chainConfig, txTask.BlockNum), txTask.Header.BaseFee, rules) + if err != nil { + panic(err) + } + txContext := core.NewEVMTxContext(msg) + + // Update the evm with the new transaction context. + vmenv.Reset(txContext, ibs) + + result, err := core.ApplyMessage(vmenv, msg, gp, true /* refunds */, false /* gasBailout */) + if err != nil { + txTask.Error = err + } + // Update the state with pending changes + ibs.SoftFinalise() + *usedGas += result.UsedGas + } + // Prepare read set, write set and balanceIncrease set and send for serialisation + if txTask.Error == nil { + txTask.BalanceIncreaseSet = ibs.BalanceIncreaseSet() + for addr, bal := range txTask.BalanceIncreaseSet { + fmt.Printf("[%x]=>[%d]\n", addr, &bal) + } + if err = ibs.MakeWriteSet(rules, rw.stateWriter); err != nil { + panic(err) + } + txTask.ReadKeys, txTask.ReadVals = rw.stateReader.ReadSet() + txTask.WriteKeys, txTask.WriteVals = rw.stateWriter.WriteSet() + } + rw.resultCh <- txTask +} + +func Recon1(genesis *core.Genesis, logger log.Logger) error { + sigs := make(chan os.Signal, 1) + interruptCh := make(chan bool, 1) + signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM) + + go func() { + <-sigs + interruptCh <- true + }() + ctx := context.Background() + reconDbPath := path.Join(datadir, "recon1db") + var err error + if _, err = os.Stat(reconDbPath); err != nil { + if !errors.Is(err, os.ErrNotExist) { + return err + } + } else if err = os.RemoveAll(reconDbPath); err != nil { + return err + } + db, err := kv2.NewMDBX(logger).Path(reconDbPath).WriteMap().Open() + if err != nil { + return err + } + startTime := time.Now() + var blockReader services.FullBlockReader + var allSnapshots *snapshotsync.RoSnapshots + allSnapshots = snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadir, "snapshots")) + defer allSnapshots.Close() + if err := allSnapshots.Reopen(); err != nil { + return fmt.Errorf("reopen snapshot segments: %w", err) + } + blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) + // Compute mapping blockNum -> last TxNum in that block + txNums := make([]uint64, allSnapshots.BlocksAvailable()+1) + if err = allSnapshots.Bodies.View(func(bs []*snapshotsync.BodySegment) error { + for _, b := range bs { + if err = b.Iterate(func(blockNum, baseTxNum, txAmount uint64) { + txNums[blockNum] = baseTxNum + txAmount + }); err != nil { + return err + } + } + return nil + }); err != nil { + return fmt.Errorf("build txNum => blockNum mapping: %w", err) + } + blockNum := block + 1 + txNum := txNums[blockNum-1] + fmt.Printf("Corresponding block num = %d, txNum = %d\n", blockNum, txNum) + workerCount := runtime.NumCPU() + workCh := make(chan state.TxTask, 128) + rs := state.NewReconState1(workCh) + var lock sync.RWMutex + reconWorkers := make([]*ReconWorker1, workerCount) + roTxs := make([]kv.Tx, workerCount) + defer func() { + for i := 0; i < workerCount; i++ { + if roTxs[i] != nil { + roTxs[i].Rollback() + } + } + }() + for i := 0; i < workerCount; i++ { + roTxs[i], err = db.BeginRo(ctx) + if err != nil { + return err + } + } + var wg sync.WaitGroup + resultCh := make(chan state.TxTask, 128) + for i := 0; i < workerCount; i++ { + reconWorkers[i] = NewReconWorker1(lock.RLocker(), &wg, rs, blockReader, allSnapshots, txNums, chainConfig, logger, genesis, resultCh) + reconWorkers[i].SetTx(roTxs[i]) + } + wg.Add(workerCount) + for i := 0; i < workerCount; i++ { + go reconWorkers[i].run() + } + commitThreshold := uint64(256 * 1024 * 1024) + count := uint64(0) + rollbackCount := uint64(0) + total := txNum + prevCount := uint64(0) + prevRollbackCount := uint64(0) + prevTime := time.Now() + logEvery := time.NewTicker(logInterval) + defer logEvery.Stop() + var rws state.TxTaskQueue + heap.Init(&rws) + var outputTxNum uint64 + // Go-routine gathering results from the workers + go func() { + for outputTxNum < txNum { + select { + case txTask := <-resultCh: + if txTask.TxNum == outputTxNum { + // Try to apply without placing on the queue first + if txTask.Error == nil && rs.ReadsValid(txTask.ReadKeys, txTask.ReadVals) { + rs.Apply(txTask.WriteKeys, txTask.WriteVals, txTask.BalanceIncreaseSet) + rs.CommitTxNum(txTask.Sender, txTask.TxNum) + outputTxNum++ + } else { + rs.RollbackTx(txTask) + } + } else { + heap.Push(&rws, txTask) + } + for rws.Len() > 0 && rws[0].TxNum == outputTxNum { + txTask = heap.Pop(&rws).(state.TxTask) + if txTask.Error == nil && rs.ReadsValid(txTask.ReadKeys, txTask.ReadVals) { + rs.Apply(txTask.WriteKeys, txTask.WriteVals, txTask.BalanceIncreaseSet) + rs.CommitTxNum(txTask.Sender, txTask.TxNum) + outputTxNum++ + } else { + rs.RollbackTx(txTask) + } + } + case <-logEvery.C: + var m runtime.MemStats + libcommon.ReadMemStats(&m) + sizeEstimate := rs.SizeEstimate() + count = rs.DoneCount() + rollbackCount = rs.RollbackCount() + currentTime := time.Now() + interval := currentTime.Sub(prevTime) + speedTx := float64(count-prevCount) / (float64(interval) / float64(time.Second)) + progress := 100.0 * float64(count) / float64(total) + var repeatRatio float64 + if count > prevCount { + repeatRatio = 100.0 * float64(rollbackCount-prevRollbackCount) / float64(count-prevCount) + } + prevTime = currentTime + prevCount = count + prevRollbackCount = rollbackCount + log.Info("Transaction replay", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", progress), "tx/s", fmt.Sprintf("%.1f", speedTx), "repeat ratio", fmt.Sprintf("%.2f%%", repeatRatio), "buffer", libcommon.ByteCount(sizeEstimate), + "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), + ) + if sizeEstimate >= commitThreshold { + commitStart := time.Now() + log.Info("Committing...") + err := func() error { + lock.Lock() + defer lock.Unlock() + for i := 0; i < workerCount; i++ { + roTxs[i].Rollback() + } + rwTx, err := db.BeginRw(ctx) + if err != nil { + return err + } + if err = rs.Flush(rwTx); err != nil { + return err + } + if err = rwTx.Commit(); err != nil { + return err + } + for i := 0; i < workerCount; i++ { + if roTxs[i], err = db.BeginRo(ctx); err != nil { + return err + } + reconWorkers[i].SetTx(roTxs[i]) + } + return nil + }() + if err != nil { + panic(err) + } + log.Info("Committed", "time", time.Since(commitStart)) + } + } + } + }() + var inputTxNum uint64 + for blockNum := uint64(0); blockNum <= block; blockNum++ { + header, err := blockReader.HeaderByNumber(ctx, nil, blockNum) + if err != nil { + panic(err) + } + blockHash := header.Hash() + b, _, err := blockReader.BlockWithSenders(ctx, nil, blockHash, blockNum) + if err != nil { + panic(err) + } + txs := b.Transactions() + for txIndex := -1; txIndex <= len(txs); txIndex++ { + txTask := state.TxTask{ + Header: header, + BlockNum: blockNum, + Block: b, + TxNum: inputTxNum, + TxIndex: txIndex, + BlockHash: blockHash, + Final: txIndex == len(txs), + } + if txIndex >= 0 && txIndex < len(txs) { + txTask.Tx = txs[txIndex] + if sender, ok := txs[txIndex].GetSender(); ok { + txTask.Sender = &sender + } + } + workCh <- txTask + inputTxNum++ + } + } + close(workCh) + wg.Wait() + for i := 0; i < workerCount; i++ { + roTxs[i].Rollback() + } + rwTx, err := db.BeginRw(ctx) + if err != nil { + return err + } + defer func() { + if rwTx != nil { + rwTx.Rollback() + } + }() + if err = rs.Flush(rwTx); err != nil { + return err + } + if err = rwTx.Commit(); err != nil { + return err + } + if rwTx, err = db.BeginRw(ctx); err != nil { + return err + } + log.Info("Transaction replay complete", "duration", time.Since(startTime)) + log.Info("Computing hashed state") + tmpDir := filepath.Join(datadir, "tmp") + if err = stagedsync.PromoteHashedStateCleanly("recon", rwTx, stagedsync.StageHashStateCfg(db, tmpDir), ctx); err != nil { + return err + } + if err = rwTx.Commit(); err != nil { + return err + } + if rwTx, err = db.BeginRw(ctx); err != nil { + return err + } + if _, err = stagedsync.RegenerateIntermediateHashes("recon", rwTx, stagedsync.StageTrieCfg(db, false /* checkRoot */, false /* saveHashesToDB */, false /* badBlockHalt */, tmpDir, blockReader, nil /* HeaderDownload */), common.Hash{}, make(chan struct{}, 1)); err != nil { + return err + } + if err = rwTx.Commit(); err != nil { + return err + } + return nil +} diff --git a/core/state/history_reader_nostate.go b/core/state/history_reader_nostate.go index d1e270bec3d..4b489f42bca 100644 --- a/core/state/history_reader_nostate.go +++ b/core/state/history_reader_nostate.go @@ -1,12 +1,12 @@ package state import ( + "encoding/binary" "fmt" "github.com/ledgerwatch/erigon-lib/kv" libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/types/accounts" ) @@ -18,7 +18,6 @@ func (r *RequiredStateError) Error() string { return fmt.Sprintf("required state at txNum %d", r.StateTxNum) } -// Implements StateReader and StateWriter type HistoryReaderNoState struct { ac *libstate.AggregatorContext tx kv.Tx @@ -27,6 +26,7 @@ type HistoryReaderNoState struct { rs *ReconState readError bool stateTxNum uint64 + composite []byte } func NewHistoryReaderNoState(ac *libstate.AggregatorContext, rs *ReconState) *HistoryReaderNoState { @@ -56,9 +56,16 @@ func (hr *HistoryReaderNoState) ReadAccountData(address common.Address) (*accoun hr.stateTxNum = stateTxNum return nil, &RequiredStateError{StateTxNum: stateTxNum} } - enc = hr.rs.Get(kv.PlainState, address.Bytes()) + enc = hr.rs.Get(kv.PlainStateR, address.Bytes(), nil, stateTxNum) if enc == nil { - enc, err = hr.tx.GetOne(kv.PlainState, address.Bytes()) + if cap(hr.composite) < 8+20 { + hr.composite = make([]byte, 8+20) + } else if len(hr.composite) != 8+20 { + hr.composite = hr.composite[:8+20] + } + binary.BigEndian.PutUint64(hr.composite, stateTxNum) + copy(hr.composite[8:], address.Bytes()) + enc, err = hr.tx.GetOne(kv.PlainStateR, hr.composite) if err != nil { return nil, err } @@ -124,10 +131,19 @@ func (hr *HistoryReaderNoState) ReadAccountStorage(address common.Address, incar hr.stateTxNum = stateTxNum return nil, &RequiredStateError{StateTxNum: stateTxNum} } - compositeKey := dbutils.PlainGenerateCompositeStorageKey(address.Bytes(), FirstContractIncarnation, key.Bytes()) - enc = hr.rs.Get(kv.PlainState, compositeKey) + + enc = hr.rs.Get(kv.PlainStateR, address.Bytes(), key.Bytes(), stateTxNum) if enc == nil { - enc, err = hr.tx.GetOne(kv.PlainState, compositeKey) + if cap(hr.composite) < 8+20+8+32 { + hr.composite = make([]byte, 8+20+8+32) + } else if len(hr.composite) != 8+20+8+32 { + hr.composite = hr.composite[:8+20+8+32] + } + binary.BigEndian.PutUint64(hr.composite, stateTxNum) + copy(hr.composite[8:], address.Bytes()) + binary.BigEndian.PutUint64(hr.composite[8+20:], 1) + copy(hr.composite[8+20+8:], key.Bytes()) + enc, err = hr.tx.GetOne(kv.PlainStateR, hr.composite) if err != nil { return nil, err } @@ -157,9 +173,16 @@ func (hr *HistoryReaderNoState) ReadAccountCode(address common.Address, incarnat hr.stateTxNum = stateTxNum return nil, &RequiredStateError{StateTxNum: stateTxNum} } - enc = hr.rs.Get(kv.Code, codeHash.Bytes()) + enc = hr.rs.Get(kv.CodeR, codeHash.Bytes(), nil, stateTxNum) if enc == nil { - enc, err = hr.tx.GetOne(kv.Code, codeHash.Bytes()) + if cap(hr.composite) < 8+32 { + hr.composite = make([]byte, 8+32) + } else if len(hr.composite) != 8+32 { + hr.composite = hr.composite[:8+32] + } + binary.BigEndian.PutUint64(hr.composite, stateTxNum) + copy(hr.composite[8:], codeHash.Bytes()) + enc, err = hr.tx.GetOne(kv.CodeR, hr.composite) if err != nil { return nil, err } @@ -182,9 +205,16 @@ func (hr *HistoryReaderNoState) ReadAccountCodeSize(address common.Address, inca hr.stateTxNum = stateTxNum return 0, &RequiredStateError{StateTxNum: stateTxNum} } - enc := hr.rs.Get(kv.Code, codeHash.Bytes()) + enc := hr.rs.Get(kv.CodeR, codeHash.Bytes(), nil, stateTxNum) if enc == nil { - enc, err = hr.tx.GetOne(kv.Code, codeHash.Bytes()) + if cap(hr.composite) < 8+32 { + hr.composite = make([]byte, 8+32) + } else if len(hr.composite) != 8+32 { + hr.composite = hr.composite[:8+32] + } + binary.BigEndian.PutUint64(hr.composite, stateTxNum) + copy(hr.composite[8:], codeHash.Bytes()) + enc, err = hr.tx.GetOne(kv.CodeR, hr.composite) if err != nil { return 0, err } diff --git a/core/state/intra_block_state.go b/core/state/intra_block_state.go index d061ee91fe8..4c8dc91db7b 100644 --- a/core/state/intra_block_state.go +++ b/core/state/intra_block_state.go @@ -743,6 +743,24 @@ func (sdb *IntraBlockState) FinalizeTx(chainRules *params.Rules, stateWriter Sta return nil } +func (sdb *IntraBlockState) SoftFinalise() { + for addr := range sdb.journal.dirties { + _, exist := sdb.stateObjects[addr] + if !exist { + // ripeMD is 'touched' at block 1714175, in tx 0x1237f737031e40bcde4a8b7e717b2d15e3ecadfe49bb1bbc71ee9deb09c6fcf2 + // That tx goes out of gas, and although the notion of 'touched' does not exist there, the + // touch-event will still be recorded in the journal. Since ripeMD is a special snowflake, + // it will persist in the journal even though the journal is reverted. In this special circumstance, + // it may exist in `sdb.journal.dirties` but not in `sdb.stateObjects`. + // Thus, we can safely ignore it here + continue + } + sdb.stateObjectsDirty[addr] = struct{}{} + } + // Invalidate journal because reverting across transactions is not allowed. + sdb.clearJournalAndRefund() +} + // CommitBlock finalizes the state by removing the self destructed objects // and clears the journal as well as the refunds. func (sdb *IntraBlockState) CommitBlock(chainRules *params.Rules, stateWriter StateWriter) error { @@ -751,6 +769,20 @@ func (sdb *IntraBlockState) CommitBlock(chainRules *params.Rules, stateWriter St sdb.getStateObject(addr) } } + return sdb.MakeWriteSet(chainRules, stateWriter) +} + +func (sdb *IntraBlockState) BalanceIncreaseSet() map[common.Address]uint256.Int { + s := map[common.Address]uint256.Int{} + for addr, bi := range sdb.balanceInc { + if !bi.transferred { + s[addr] = bi.increase + } + } + return s +} + +func (sdb *IntraBlockState) MakeWriteSet(chainRules *params.Rules, stateWriter StateWriter) error { for addr := range sdb.journal.dirties { sdb.stateObjectsDirty[addr] = struct{}{} } diff --git a/core/state/recon_state_1.go b/core/state/recon_state_1.go new file mode 100644 index 00000000000..3487a7e4ab8 --- /dev/null +++ b/core/state/recon_state_1.go @@ -0,0 +1,474 @@ +package state + +import ( + "bytes" + "container/heap" + "encoding/binary" + "fmt" + "sync" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/types/accounts" +) + +// ReadWriteSet contains ReadSet, WriteSet and BalanceIncrease of a transaction, +// which is processed by a single thread that writes into the ReconState1 and +// flushes to the database +type TxTask struct { + TxNum uint64 + BlockNum uint64 + Header *types.Header + Block *types.Block + BlockHash common.Hash + Sender *common.Address + TxIndex int // -1 for block initialisation + Final bool + Tx types.Transaction + BalanceIncreaseSet map[common.Address]uint256.Int + ReadKeys map[string][][]byte + ReadVals map[string][][]byte + WriteKeys map[string][][]byte + WriteVals map[string][][]byte + Error error +} + +type TxTaskQueue []TxTask + +func (h TxTaskQueue) Len() int { + return len(h) +} + +func (h TxTaskQueue) Less(i, j int) bool { + return h[i].TxNum < h[j].TxNum +} + +func (h TxTaskQueue) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +func (h *TxTaskQueue) Push(a interface{}) { + *h = append(*h, a.(TxTask)) +} + +func (h *TxTaskQueue) Pop() interface{} { + c := *h + *h = c[:len(c)-1] + return c[len(c)-1] +} + +type ReconState1 struct { + lock sync.RWMutex + triggers map[uint64]TxTask + senderTxNums map[common.Address]uint64 + workCh chan TxTask + queue TxTaskQueue + changes map[string]map[string][]byte + sizeEstimate uint64 + rollbackCount uint64 + txsDone uint64 +} + +func NewReconState1(workCh chan TxTask) *ReconState1 { + rs := &ReconState1{ + workCh: workCh, + triggers: map[uint64]TxTask{}, + senderTxNums: map[common.Address]uint64{}, + changes: map[string]map[string][]byte{}, + } + return rs +} + +func (rs *ReconState1) put(table string, key, val []byte) { + t, ok := rs.changes[table] + if !ok { + t = map[string][]byte{} + rs.changes[table] = t + } + t[string(key)] = val + rs.sizeEstimate += uint64(len(key)) + uint64(len(val)) +} + +func (rs *ReconState1) Delete(table string, key []byte) { + rs.lock.Lock() + defer rs.lock.Unlock() + t, ok := rs.changes[table] + if !ok { + t = map[string][]byte{} + rs.changes[table] = t + } + t[string(key)] = nil + rs.sizeEstimate += uint64(len(key)) +} + +func (rs *ReconState1) Get(table string, key []byte) []byte { + rs.lock.RLock() + defer rs.lock.RUnlock() + return rs.get(table, key) +} + +func (rs *ReconState1) get(table string, key []byte) []byte { + t, ok := rs.changes[table] + if !ok { + return nil + } + return t[string(key)] +} + +func (rs *ReconState1) Flush(rwTx kv.RwTx) error { + rs.lock.Lock() + defer rs.lock.Unlock() + for table, t := range rs.changes { + for ks, val := range t { + if len(val) > 0 { + if err := rwTx.Put(table, []byte(ks), val); err != nil { + return err + } + } + } + } + rs.changes = map[string]map[string][]byte{} + rs.sizeEstimate = 0 + return nil +} + +func (rs *ReconState1) Schedule() (TxTask, bool) { + rs.lock.Lock() + defer rs.lock.Unlock() + for rs.queue.Len() < 16 { + txTask, ok := <-rs.workCh + if !ok { + // No more work, channel is closed + break + } + if txTask.Sender == nil { + heap.Push(&rs.queue, txTask) + } else if rs.registerSender(txTask) { + heap.Push(&rs.queue, txTask) + } + } + if rs.queue.Len() > 0 { + return heap.Pop(&rs.queue).(TxTask), true + } + return TxTask{}, false +} + +func (rs *ReconState1) registerSender(txTask TxTask) bool { + lastTxNum, deferral := rs.senderTxNums[*txTask.Sender] + if deferral { + // Transactions with the same sender have obvious data dependency, no point running it before lastTxNum + // So we add this data dependency as a trigger + //fmt.Printf("trigger[%d] sender [%x]<=%x\n", lastTxNum, *txTask.Sender, txTask.Tx.Hash()) + rs.triggers[lastTxNum] = txTask + } + //fmt.Printf("senderTxNums[%x]=%d\n", *txTask.Sender, txTask.TxNum) + rs.senderTxNums[*txTask.Sender] = txTask.TxNum + return !deferral +} + +func (rs *ReconState1) CommitTxNum(sender *common.Address, txNum uint64) { + rs.lock.Lock() + defer rs.lock.Unlock() + if triggered, ok := rs.triggers[txNum]; ok { + heap.Push(&rs.queue, triggered) + delete(rs.triggers, txNum) + } + if sender != nil { + if lastTxNum, ok := rs.senderTxNums[*sender]; ok && lastTxNum == txNum { + // This is the last transaction so far with this sender, remove + delete(rs.senderTxNums, *sender) + } + } + rs.txsDone++ +} + +func (rs *ReconState1) RollbackTx(txTask TxTask) { + rs.lock.Lock() + defer rs.lock.Unlock() + heap.Push(&rs.queue, txTask) + rs.rollbackCount++ +} + +func (rs *ReconState1) Apply(writeKeys, writeVals map[string][][]byte, balanceIncreaseSet map[common.Address]uint256.Int) { + rs.lock.Lock() + defer rs.lock.Unlock() + for table, keyList := range writeKeys { + valList := writeVals[table] + for i, key := range keyList { + val := valList[i] + rs.put(table, key, val) + } + } + for addr, increase := range balanceIncreaseSet { + enc := rs.get(kv.PlainState, addr.Bytes()) + var a accounts.Account + if err := a.DecodeForStorage(enc); err != nil { + panic(err) + } + a.Balance.Add(&a.Balance, &increase) + l := a.EncodingLengthForStorage() + enc = make([]byte, l) + a.EncodeForStorage(enc) + rs.put(kv.PlainState, addr.Bytes(), enc) + } +} + +func (rs *ReconState1) DoneCount() uint64 { + rs.lock.RLock() + defer rs.lock.RUnlock() + return rs.txsDone +} + +func (rs *ReconState1) RollbackCount() uint64 { + rs.lock.RLock() + defer rs.lock.RUnlock() + return rs.rollbackCount +} + +func (rs *ReconState1) SizeEstimate() uint64 { + rs.lock.RLock() + defer rs.lock.RUnlock() + return rs.sizeEstimate +} + +func (rs *ReconState1) ReadsValid(readKeys, readVals map[string][][]byte) bool { + rs.lock.RLock() + defer rs.lock.RUnlock() + for table, keyList := range readKeys { + t, ok := rs.changes[table] + if !ok { + continue + } + valList := readVals[table] + for i, key := range keyList { + val := valList[i] + if rereadVal, ok := t[string(key)]; ok { + if !bytes.Equal(val, rereadVal) { + return false + } + } + } + } + return true +} + +type StateReconWriter1 struct { + rs *ReconState1 + txNum uint64 + writeKeys map[string][][]byte + writeVals map[string][][]byte +} + +func NewStateReconWriter1(rs *ReconState1) *StateReconWriter1 { + return &StateReconWriter1{ + rs: rs, + } +} + +func (w *StateReconWriter1) SetTxNum(txNum uint64) { + w.txNum = txNum +} + +func (w *StateReconWriter1) ResetWriteSet() { + w.writeKeys = map[string][][]byte{} + w.writeVals = map[string][][]byte{} +} + +func (w *StateReconWriter1) WriteSet() (map[string][][]byte, map[string][][]byte) { + return w.writeKeys, w.writeVals +} + +func (w *StateReconWriter1) UpdateAccountData(address common.Address, original, account *accounts.Account) error { + value := make([]byte, account.EncodingLengthForStorage()) + account.EncodeForStorage(value) + //fmt.Printf("account [%x]=>{Balance: %d, Nonce: %d, Root: %x, CodeHash: %x} txNum: %d\n", address, &account.Balance, account.Nonce, account.Root, account.CodeHash, w.txNum) + w.writeKeys[kv.PlainState] = append(w.writeKeys[kv.PlainState], address.Bytes()) + w.writeVals[kv.PlainState] = append(w.writeVals[kv.PlainState], value) + return nil +} + +func (w *StateReconWriter1) UpdateAccountCode(address common.Address, incarnation uint64, codeHash common.Hash, code []byte) error { + w.writeKeys[kv.Code] = append(w.writeKeys[kv.Code], codeHash.Bytes()) + w.writeVals[kv.Code] = append(w.writeVals[kv.Code], code) + if len(code) > 0 { + //fmt.Printf("code [%x] => [%x] CodeHash: %x, txNum: %d\n", address, code, codeHash, w.txNum) + w.writeKeys[kv.PlainContractCode] = append(w.writeKeys[kv.PlainContractCode], dbutils.PlainGenerateStoragePrefix(address[:], incarnation)) + w.writeVals[kv.PlainContractCode] = append(w.writeVals[kv.PlainContractCode], codeHash.Bytes()) + } + return nil +} + +func (w *StateReconWriter1) DeleteAccount(address common.Address, original *accounts.Account) error { + w.writeKeys[kv.PlainState] = append(w.writeKeys[kv.PlainState], address.Bytes()) + w.writeVals[kv.PlainState] = append(w.writeVals[kv.PlainState], nil) + if original.Incarnation > 0 { + var b [8]byte + binary.BigEndian.PutUint64(b[:], original.Incarnation) + w.writeKeys[kv.IncarnationMap] = append(w.writeKeys[kv.IncarnationMap], address.Bytes()) + w.writeVals[kv.IncarnationMap] = append(w.writeVals[kv.IncarnationMap], b[:]) + } + return nil +} + +func (w *StateReconWriter1) WriteAccountStorage(address common.Address, incarnation uint64, key *common.Hash, original, value *uint256.Int) error { + if *original == *value { + return nil + } + w.writeKeys[kv.PlainState] = append(w.writeKeys[kv.PlainState], dbutils.PlainGenerateCompositeStorageKey(address.Bytes(), incarnation, key.Bytes())) + w.writeVals[kv.PlainState] = append(w.writeVals[kv.PlainState], value.Bytes()) + //fmt.Printf("storage [%x] [%x] => [%x], txNum: %d\n", address, *key, v, w.txNum) + return nil +} + +func (w *StateReconWriter1) CreateContract(address common.Address) error { + return nil +} + +type StateReconReader1 struct { + tx kv.Tx + txNum uint64 + trace bool + rs *ReconState1 + readError bool + stateTxNum uint64 + composite []byte + readKeys map[string][][]byte + readVals map[string][][]byte +} + +func NewStateReconReader1(rs *ReconState1) *StateReconReader1 { + return &StateReconReader1{rs: rs} +} + +func (r *StateReconReader1) SetTxNum(txNum uint64) { + r.txNum = txNum +} + +func (r *StateReconReader1) SetTx(tx kv.Tx) { + r.tx = tx +} + +func (r *StateReconReader1) ResetReadSet() { + r.readKeys = map[string][][]byte{} + r.readVals = map[string][][]byte{} +} + +func (r *StateReconReader1) ReadSet() (map[string][][]byte, map[string][][]byte) { + return r.readKeys, r.readVals +} + +func (r *StateReconReader1) SetTrace(trace bool) { + r.trace = trace +} + +func (r *StateReconReader1) ReadAccountData(address common.Address) (*accounts.Account, error) { + enc := r.rs.Get(kv.PlainState, address.Bytes()) + if enc == nil { + var err error + enc, err = r.tx.GetOne(kv.PlainState, address.Bytes()) + if err != nil { + return nil, err + } + } + r.readKeys[kv.PlainState] = append(r.readKeys[kv.PlainState], address.Bytes()) + r.readVals[kv.PlainState] = append(r.readVals[kv.PlainState], enc) + if len(enc) == 0 { + return nil, nil + } + var a accounts.Account + if err := a.DecodeForStorage(enc); err != nil { + return nil, err + } + if r.trace { + fmt.Printf("ReadAccountData [%x] => [nonce: %d, balance: %d, codeHash: %x], txNum: %d\n", address, a.Nonce, &a.Balance, a.CodeHash, r.txNum) + } + return &a, nil +} + +func (r *StateReconReader1) ReadAccountStorage(address common.Address, incarnation uint64, key *common.Hash) ([]byte, error) { + if cap(r.composite) < 20+8+32 { + r.composite = make([]byte, 20+8+32) + } else if len(r.composite) != 20+8+32 { + r.composite = r.composite[:20+8+32] + } + copy(r.composite, address.Bytes()) + binary.BigEndian.PutUint64(r.composite[20:], incarnation) + copy(r.composite[20+8:], key.Bytes()) + + enc := r.rs.Get(kv.PlainState, r.composite) + if enc == nil { + var err error + enc, err = r.tx.GetOne(kv.PlainState, r.composite) + if err != nil { + return nil, err + } + } + r.readKeys[kv.PlainState] = append(r.readKeys[kv.PlainState], r.composite) + r.readVals[kv.PlainState] = append(r.readVals[kv.PlainState], enc) + if r.trace { + if enc == nil { + fmt.Printf("ReadAccountStorage [%x] [%x] => [], txNum: %d\n", address, key.Bytes(), r.txNum) + } else { + fmt.Printf("ReadAccountStorage [%x] [%x] => [%x], txNum: %d\n", address, key.Bytes(), enc, r.txNum) + } + } + if enc == nil { + return nil, nil + } + return enc, nil +} + +func (r *StateReconReader1) ReadAccountCode(address common.Address, incarnation uint64, codeHash common.Hash) ([]byte, error) { + enc := r.rs.Get(kv.Code, codeHash.Bytes()) + if enc == nil { + var err error + enc, err = r.tx.GetOne(kv.Code, codeHash.Bytes()) + if err != nil { + return nil, err + } + } + r.readKeys[kv.Code] = append(r.readKeys[kv.Code], address.Bytes()) + r.readVals[kv.Code] = append(r.readVals[kv.Code], enc) + if r.trace { + fmt.Printf("ReadAccountCode [%x] => [%x], txNum: %d\n", address, enc, r.txNum) + } + return enc, nil +} + +func (r *StateReconReader1) ReadAccountCodeSize(address common.Address, incarnation uint64, codeHash common.Hash) (int, error) { + enc := r.rs.Get(kv.Code, codeHash.Bytes()) + if enc == nil { + var err error + enc, err = r.tx.GetOne(kv.Code, codeHash.Bytes()) + if err != nil { + return 0, err + } + } + r.readKeys[kv.Code] = append(r.readKeys[kv.Code], address.Bytes()) + r.readVals[kv.Code] = append(r.readVals[kv.Code], enc) + size := len(enc) + if r.trace { + fmt.Printf("ReadAccountCodeSize [%x] => [%d], txNum: %d\n", address, size, r.txNum) + } + return size, nil +} + +func (r *StateReconReader1) ReadAccountIncarnation(address common.Address) (uint64, error) { + enc := r.rs.Get(kv.IncarnationMap, address.Bytes()) + if enc == nil { + var err error + enc, err = r.tx.GetOne(kv.IncarnationMap, address.Bytes()) + if err != nil { + return 0, err + } + } + r.readKeys[kv.IncarnationMap] = append(r.readKeys[kv.IncarnationMap], address.Bytes()) + r.readVals[kv.IncarnationMap] = append(r.readVals[kv.IncarnationMap], enc) + if len(enc) == 0 { + return 0, nil + } + return binary.BigEndian.Uint64(enc), nil +} diff --git a/core/state/state_recon_writer.go b/core/state/state_recon_writer.go index c3893b6545e..49f2635c75f 100644 --- a/core/state/state_recon_writer.go +++ b/core/state/state_recon_writer.go @@ -3,131 +3,139 @@ package state import ( //"fmt" + "bytes" "container/heap" + "encoding/binary" + "sync" + "unsafe" + "github.com/RoaringBitmap/roaring/roaring64" + "github.com/google/btree" "github.com/holiman/uint256" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" libstate "github.com/ledgerwatch/erigon-lib/state" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/types/accounts" - "golang.org/x/exp/constraints" - "sync" ) -type theap[T constraints.Ordered] []T - -func (h theap[T]) Len() int { - return len(h) -} - -func (h theap[T]) Less(i, j int) bool { - return h[i] < h[j] -} - -func (h theap[T]) Swap(i, j int) { - h[i], h[j] = h[j], h[i] -} - -func (h *theap[T]) Push(a interface{}) { - *h = append(*h, a.(T)) +type ReconStateItem struct { + txNum uint64 // txNum where the item has been created + key1, key2 []byte + val []byte } -func (h *theap[T]) Pop() interface{} { - c := *h - *h = c[:len(c)-1] - return c[len(c)-1] +func (i ReconStateItem) Less(than btree.Item) bool { + thanItem := than.(ReconStateItem) + if i.txNum == thanItem.txNum { + c1 := bytes.Compare(i.key1, thanItem.key1) + if c1 == 0 { + c2 := bytes.Compare(i.key2, thanItem.key2) + return c2 < 0 + } + return c1 < 0 + } + return i.txNum < thanItem.txNum } // ReconState is the accumulator of changes to the state type ReconState struct { lock sync.RWMutex - workIterator roaring64.IntPeekable64 doneBitmap roaring64.Bitmap - triggers map[uint64][]uint64 - queue theap[uint64] - changes map[string]map[string][]byte + triggers map[uint64][]TxTask + workCh chan TxTask + queue TxTaskQueue + changes map[string]*btree.BTree // table => [] (txNum; key1; key2; val) sizeEstimate uint64 rollbackCount uint64 } -func NewReconState() *ReconState { +func NewReconState(workCh chan TxTask) *ReconState { rs := &ReconState{ - triggers: map[uint64][]uint64{}, - changes: map[string]map[string][]byte{}, + workCh: workCh, + triggers: map[uint64][]TxTask{}, + changes: map[string]*btree.BTree{}, } return rs } -func (rs *ReconState) SetWorkBitmap(workBitmap *roaring64.Bitmap) { - rs.workIterator = workBitmap.Iterator() -} - -func (rs *ReconState) Put(table string, key, val []byte) { +func (rs *ReconState) Put(table string, key1, key2, val []byte, txNum uint64) { rs.lock.Lock() defer rs.lock.Unlock() t, ok := rs.changes[table] if !ok { - t = map[string][]byte{} + t = btree.New(32) rs.changes[table] = t } - t[string(key)] = val - rs.sizeEstimate += uint64(len(key)) + uint64(len(val)) + item := ReconStateItem{key1: libcommon.Copy(key1), key2: libcommon.Copy(key2), val: libcommon.Copy(val), txNum: txNum} + t.ReplaceOrInsert(item) + rs.sizeEstimate += uint64(unsafe.Sizeof(item)) + uint64(len(key1)) + uint64(len(key2)) + uint64(len(val)) } -func (rs *ReconState) Delete(table string, key []byte) { - rs.lock.Lock() - defer rs.lock.Unlock() - t, ok := rs.changes[table] - if !ok { - t = map[string][]byte{} - rs.changes[table] = t - } - t[string(key)] = nil - rs.sizeEstimate += uint64(len(key)) -} - -func (rs *ReconState) Get(table string, key []byte) []byte { +func (rs *ReconState) Get(table string, key1, key2 []byte, txNum uint64) []byte { rs.lock.RLock() defer rs.lock.RUnlock() t, ok := rs.changes[table] if !ok { return nil } - return t[string(key)] + i := t.Get(ReconStateItem{txNum: txNum, key1: key1, key2: key2}) + if i == nil { + return nil + } + return i.(ReconStateItem).val } func (rs *ReconState) Flush(rwTx kv.RwTx) error { rs.lock.Lock() defer rs.lock.Unlock() for table, t := range rs.changes { - for ks, val := range t { - if len(val) == 0 { - if err := rwTx.Delete(table, []byte(ks), nil); err != nil { - return err - } + var err error + t.Ascend(func(i btree.Item) bool { + item := i.(ReconStateItem) + if len(item.val) == 0 { + return true + } + var composite []byte + if item.key2 == nil { + composite = make([]byte, 8+len(item.key1)) } else { - if err := rwTx.Put(table, []byte(ks), val); err != nil { - return err - } + composite = make([]byte, 8+len(item.key1)+8+len(item.key2)) + binary.BigEndian.PutUint64(composite[8+len(item.key1):], 1) + copy(composite[8+len(item.key1)+8:], item.key2) + } + binary.BigEndian.PutUint64(composite, item.txNum) + copy(composite[8:], item.key1) + if err = rwTx.Put(table, composite, item.val); err != nil { + return false } + return true + }) + if err != nil { + return err } + t.Clear(true) } - rs.changes = map[string]map[string][]byte{} rs.sizeEstimate = 0 return nil } -func (rs *ReconState) Schedule() (uint64, bool) { +func (rs *ReconState) Schedule() (TxTask, bool) { rs.lock.Lock() defer rs.lock.Unlock() - for rs.queue.Len() < 16 && rs.workIterator.HasNext() { - heap.Push(&rs.queue, rs.workIterator.Next()) + for rs.queue.Len() < 16 { + txTask, ok := <-rs.workCh + if !ok { + // No more work, channel is closed + break + } + heap.Push(&rs.queue, txTask) } if rs.queue.Len() > 0 { - return heap.Pop(&rs.queue).(uint64), true + return heap.Pop(&rs.queue).(TxTask), true } - return 0, false + return TxTask{}, false } func (rs *ReconState) CommitTxNum(txNum uint64) { @@ -142,14 +150,14 @@ func (rs *ReconState) CommitTxNum(txNum uint64) { rs.doneBitmap.Add(txNum) } -func (rs *ReconState) RollbackTxNum(txNum, dependency uint64) { +func (rs *ReconState) RollbackTx(txTask TxTask, dependency uint64) { rs.lock.Lock() defer rs.lock.Unlock() if rs.doneBitmap.Contains(dependency) { - heap.Push(&rs.queue, txNum) + heap.Push(&rs.queue, txTask) } else { tt, _ := rs.triggers[dependency] - tt = append(tt, txNum) + tt = append(tt, txTask) rs.triggers[dependency] = tt } rs.rollbackCount++ @@ -208,7 +216,7 @@ func (w *StateReconWriter) UpdateAccountData(address common.Address, original, a value := make([]byte, account.EncodingLengthForStorage()) account.EncodeForStorage(value) //fmt.Printf("account [%x]=>{Balance: %d, Nonce: %d, Root: %x, CodeHash: %x} txNum: %d\n", address, &account.Balance, account.Nonce, account.Root, account.CodeHash, w.txNum) - w.rs.Put(kv.PlainState, address[:], value) + w.rs.Put(kv.PlainStateR, address[:], nil, value, w.txNum) return nil } @@ -221,10 +229,10 @@ func (w *StateReconWriter) UpdateAccountCode(address common.Address, incarnation //fmt.Printf("no change code [%x] txNum = %d\n", address, txNum) return nil } - w.rs.Put(kv.Code, codeHash[:], code) + w.rs.Put(kv.CodeR, codeHash[:], nil, code, w.txNum) if len(code) > 0 { //fmt.Printf("code [%x] => [%x] CodeHash: %x, txNum: %d\n", address, code, codeHash, w.txNum) - w.rs.Put(kv.PlainContractCode, dbutils.PlainGenerateStoragePrefix(address[:], FirstContractIncarnation), codeHash[:]) + w.rs.Put(kv.PlainContractR, dbutils.PlainGenerateStoragePrefix(address[:], FirstContractIncarnation), nil, codeHash[:], w.txNum) } return nil } @@ -243,11 +251,9 @@ func (w *StateReconWriter) WriteAccountStorage(address common.Address, incarnati //fmt.Printf("no change storage [%x] [%x] txNum = %d\n", address, *key, txNum) return nil } - v := value.Bytes() - if len(v) != 0 { + if !value.IsZero() { //fmt.Printf("storage [%x] [%x] => [%x], txNum: %d\n", address, *key, v, w.txNum) - compositeKey := dbutils.PlainGenerateCompositeStorageKey(address.Bytes(), FirstContractIncarnation, key.Bytes()) - w.rs.Put(kv.PlainState, compositeKey, v) + w.rs.Put(kv.PlainStateR, address.Bytes(), key.Bytes(), value.Bytes(), w.txNum) } return nil } diff --git a/go.mod b/go.mod index d0af2ecc461..fd3347d9883 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220704045547-cdbb792be709 + github.com/ledgerwatch/erigon-lib v0.0.0-20220706054240-9e7f22667e55 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index f5138074a24..b6b1e9bf77a 100644 --- a/go.sum +++ b/go.sum @@ -386,8 +386,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220704045547-cdbb792be709 h1:vYR135oqA1gu6XCjUi60Sis9qQE64YkXNSzo4ug+7H8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220704045547-cdbb792be709/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= +github.com/ledgerwatch/erigon-lib v0.0.0-20220706054240-9e7f22667e55 h1:KOECbI1OzXn9Dwy58wHq0KMaG+4siMvxAPb3YSh+u+s= +github.com/ledgerwatch/erigon-lib v0.0.0-20220706054240-9e7f22667e55/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From 423b4f68acea80038a2f7e7ec10e6bcc64c7afce Mon Sep 17 00:00:00 2001 From: hexoscott <70711990+hexoscott@users.noreply.github.com> Date: Wed, 6 Jul 2022 08:43:22 +0100 Subject: [PATCH 045/152] use headers in rpc health check (#4639) adding in unit tests for healthchecks --- cmd/rpcdaemon/health/check_peers.go | 7 +- cmd/rpcdaemon/health/check_synced.go | 25 ++ cmd/rpcdaemon/health/check_time.go | 35 ++ cmd/rpcdaemon/health/health.go | 116 +++++- cmd/rpcdaemon/health/health_test.go | 562 +++++++++++++++++++++++++++ cmd/rpcdaemon/health/interfaces.go | 1 + 6 files changed, 735 insertions(+), 11 deletions(-) create mode 100644 cmd/rpcdaemon/health/check_synced.go create mode 100644 cmd/rpcdaemon/health/check_time.go create mode 100644 cmd/rpcdaemon/health/health_test.go diff --git a/cmd/rpcdaemon/health/check_peers.go b/cmd/rpcdaemon/health/check_peers.go index 818152b668b..4e55a59eb9d 100644 --- a/cmd/rpcdaemon/health/check_peers.go +++ b/cmd/rpcdaemon/health/check_peers.go @@ -2,9 +2,14 @@ package health import ( "context" + "errors" "fmt" ) +var ( + errNotEnoughPeers = errors.New("not enough peers") +) + func checkMinPeers(minPeerCount uint, api NetAPI) error { if api == nil { return fmt.Errorf("no connection to the Erigon server or `net` namespace isn't enabled") @@ -16,7 +21,7 @@ func checkMinPeers(minPeerCount uint, api NetAPI) error { } if uint64(peerCount) < uint64(minPeerCount) { - return fmt.Errorf("not enough peers: %d (minimum %d))", peerCount, minPeerCount) + return fmt.Errorf("%w: %d (minimum %d)", errNotEnoughPeers, peerCount, minPeerCount) } return nil diff --git a/cmd/rpcdaemon/health/check_synced.go b/cmd/rpcdaemon/health/check_synced.go new file mode 100644 index 00000000000..af39a22b28d --- /dev/null +++ b/cmd/rpcdaemon/health/check_synced.go @@ -0,0 +1,25 @@ +package health + +import ( + "errors" + "net/http" + + "github.com/ledgerwatch/log/v3" +) + +var ( + errNotSynced = errors.New("not synced") +) + +func checkSynced(ethAPI EthAPI, r *http.Request) error { + i, err := ethAPI.Syncing(r.Context()) + if err != nil { + log.Root().Warn("unable to process synced request", "err", err.Error()) + return err + } + if i == nil || i == false { + return nil + } + + return errNotSynced +} diff --git a/cmd/rpcdaemon/health/check_time.go b/cmd/rpcdaemon/health/check_time.go new file mode 100644 index 00000000000..d604521656c --- /dev/null +++ b/cmd/rpcdaemon/health/check_time.go @@ -0,0 +1,35 @@ +package health + +import ( + "errors" + "fmt" + "net/http" + + "github.com/ledgerwatch/erigon/rpc" +) + +var ( + errTimestampTooOld = errors.New("timestamp too old") +) + +func checkTime( + r *http.Request, + seconds int, + ethAPI EthAPI, +) error { + i, err := ethAPI.GetBlockByNumber(r.Context(), rpc.LatestBlockNumber, false) + if err != nil { + return err + } + timestamp := 0 + if ts, ok := i["timestamp"]; ok { + if cs, ok := ts.(uint64); ok { + timestamp = int(cs) + } + } + if timestamp > seconds { + return fmt.Errorf("%w: got ts: %d, need: %d", errTimestampTooOld, timestamp, seconds) + } + + return nil +} diff --git a/cmd/rpcdaemon/health/health.go b/cmd/rpcdaemon/health/health.go index 311af85c5d9..f99ecaca39a 100644 --- a/cmd/rpcdaemon/health/health.go +++ b/cmd/rpcdaemon/health/health.go @@ -6,10 +6,13 @@ import ( "fmt" "io" "net/http" + "strconv" "strings" + "time" - "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/log/v3" + + "github.com/ledgerwatch/erigon/rpc" ) type requestBody struct { @@ -18,11 +21,17 @@ type requestBody struct { } const ( - urlPath = "/health" + urlPath = "/health" + healthHeader = "X-ERIGON-HEALTHCHECK" + synced = "synced" + minPeerCount = "min_peer_count" + checkBlock = "check_block" + maxSecondsBehind = "max_seconds_behind" ) var ( - errCheckDisabled = errors.New("error check disabled") + errCheckDisabled = errors.New("error check disabled") + errBadHeaderValue = errors.New("bad header value") ) func ProcessHealthcheckIfNeeded( @@ -36,12 +45,70 @@ func ProcessHealthcheckIfNeeded( netAPI, ethAPI := parseAPI(rpcAPI) - var errMinPeerCount = errCheckDisabled - var errCheckBlock = errCheckDisabled + headers := r.Header.Values(healthHeader) + if len(headers) != 0 { + processFromHeaders(headers, ethAPI, netAPI, w, r) + } else { + processFromBody(w, r, netAPI, ethAPI) + } + return true +} + +func processFromHeaders(headers []string, ethAPI EthAPI, netAPI NetAPI, w http.ResponseWriter, r *http.Request) { + var ( + errCheckSynced = errCheckDisabled + errCheckPeer = errCheckDisabled + errCheckBlock = errCheckDisabled + errCheckSeconds = errCheckDisabled + ) + + for _, header := range headers { + lHeader := strings.ToLower(header) + if lHeader == synced { + errCheckSynced = checkSynced(ethAPI, r) + } + if strings.HasPrefix(lHeader, minPeerCount) { + peers, err := strconv.Atoi(strings.TrimPrefix(lHeader, minPeerCount)) + if err != nil { + errCheckPeer = err + break + } + errCheckPeer = checkMinPeers(uint(peers), netAPI) + } + if strings.HasPrefix(lHeader, checkBlock) { + block, err := strconv.Atoi(strings.TrimPrefix(lHeader, checkBlock)) + if err != nil { + errCheckBlock = err + break + } + errCheckBlock = checkBlockNumber(rpc.BlockNumber(block), ethAPI) + } + if strings.HasPrefix(lHeader, maxSecondsBehind) { + seconds, err := strconv.Atoi(strings.TrimPrefix(lHeader, maxSecondsBehind)) + if err != nil { + errCheckSeconds = err + break + } + if seconds < 0 { + errCheckSeconds = errBadHeaderValue + break + } + now := time.Now().Unix() + errCheckSeconds = checkTime(r, int(now)-seconds, ethAPI) + } + } + + reportHealthFromHeaders(errCheckSynced, errCheckPeer, errCheckBlock, errCheckSeconds, w) +} + +func processFromBody(w http.ResponseWriter, r *http.Request, netAPI NetAPI, ethAPI EthAPI) { body, errParse := parseHealthCheckBody(r.Body) defer r.Body.Close() + var errMinPeerCount = errCheckDisabled + var errCheckBlock = errCheckDisabled + if errParse != nil { log.Root().Warn("unable to process healthcheck request", "err", errParse) } else { @@ -56,12 +123,10 @@ func ProcessHealthcheckIfNeeded( // TODO add time from the last sync cycle } - err := reportHealth(errParse, errMinPeerCount, errCheckBlock, w) + err := reportHealthFromBody(errParse, errMinPeerCount, errCheckBlock, w) if err != nil { log.Root().Warn("unable to process healthcheck request", "err", err) } - - return true } func parseHealthCheckBody(reader io.Reader) (requestBody, error) { @@ -80,7 +145,7 @@ func parseHealthCheckBody(reader io.Reader) (requestBody, error) { return body, nil } -func reportHealth(errParse, errMinPeerCount, errCheckBlock error, w http.ResponseWriter) error { +func reportHealthFromBody(errParse, errMinPeerCount, errCheckBlock error, w http.ResponseWriter) error { statusCode := http.StatusOK errors := make(map[string]string) @@ -99,9 +164,40 @@ func reportHealth(errParse, errMinPeerCount, errCheckBlock error, w http.Respons } errors["check_block"] = errorStringOrOK(errCheckBlock) + return writeResponse(w, errors, statusCode) +} + +func reportHealthFromHeaders(errCheckSynced, errCheckPeer, errCheckBlock, errCheckSeconds error, w http.ResponseWriter) error { + statusCode := http.StatusOK + errs := make(map[string]string) + + if shouldChangeStatusCode(errCheckSynced) { + statusCode = http.StatusInternalServerError + } + errs[synced] = errorStringOrOK(errCheckSynced) + + if shouldChangeStatusCode(errCheckPeer) { + statusCode = http.StatusInternalServerError + } + errs[minPeerCount] = errorStringOrOK(errCheckPeer) + + if shouldChangeStatusCode(errCheckBlock) { + statusCode = http.StatusInternalServerError + } + errs[checkBlock] = errorStringOrOK(errCheckBlock) + + if shouldChangeStatusCode(errCheckSeconds) { + statusCode = http.StatusInternalServerError + } + errs[maxSecondsBehind] = errorStringOrOK(errCheckSeconds) + + return writeResponse(w, errs, statusCode) +} + +func writeResponse(w http.ResponseWriter, errs map[string]string, statusCode int) error { w.WriteHeader(statusCode) - bodyJson, err := json.Marshal(errors) + bodyJson, err := json.Marshal(errs) if err != nil { return err } diff --git a/cmd/rpcdaemon/health/health_test.go b/cmd/rpcdaemon/health/health_test.go new file mode 100644 index 00000000000..e90f840127c --- /dev/null +++ b/cmd/rpcdaemon/health/health_test.go @@ -0,0 +1,562 @@ +package health + +import ( + "context" + "encoding/json" + "errors" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/rpc" +) + +type netApiStub struct { + response hexutil.Uint + error error +} + +func (n *netApiStub) PeerCount(_ context.Context) (hexutil.Uint, error) { + return n.response, n.error +} + +type ethApiStub struct { + blockResult map[string]interface{} + blockError error + syncingResult interface{} + syncingError error +} + +func (e *ethApiStub) GetBlockByNumber(_ context.Context, _ rpc.BlockNumber, _ bool) (map[string]interface{}, error) { + return e.blockResult, e.blockError +} + +func (e *ethApiStub) Syncing(_ context.Context) (interface{}, error) { + return e.syncingResult, e.syncingError +} + +func TestProcessHealthcheckIfNeeded_HeadersTests(t *testing.T) { + cases := []struct { + headers []string + netApiResponse hexutil.Uint + netApiError error + ethApiBlockResult map[string]interface{} + ethApiBlockError error + ethApiSyncingResult interface{} + ethApiSyncingError error + expectedStatusCode int + expectedBody map[string]string + }{ + // 0 - sync check enabled - syncing + { + headers: []string{"synced"}, + netApiResponse: hexutil.Uint(1), + netApiError: nil, + ethApiBlockResult: make(map[string]interface{}), + ethApiBlockError: nil, + ethApiSyncingResult: false, + ethApiSyncingError: nil, + expectedStatusCode: http.StatusOK, + expectedBody: map[string]string{ + synced: "HEALTHY", + minPeerCount: "DISABLED", + checkBlock: "DISABLED", + maxSecondsBehind: "DISABLED", + }, + }, + // 1 - sync check enabled - not syncing + { + headers: []string{"synced"}, + netApiResponse: hexutil.Uint(1), + netApiError: nil, + ethApiBlockResult: make(map[string]interface{}), + ethApiBlockError: nil, + ethApiSyncingResult: struct{}{}, + ethApiSyncingError: nil, + expectedStatusCode: http.StatusInternalServerError, + expectedBody: map[string]string{ + synced: "ERROR: not synced", + minPeerCount: "DISABLED", + checkBlock: "DISABLED", + maxSecondsBehind: "DISABLED", + }, + }, + // 2 - sync check enabled - error checking sync + { + headers: []string{"synced"}, + netApiResponse: hexutil.Uint(1), + netApiError: nil, + ethApiBlockResult: make(map[string]interface{}), + ethApiBlockError: nil, + ethApiSyncingResult: struct{}{}, + ethApiSyncingError: errors.New("problem checking sync"), + expectedStatusCode: http.StatusInternalServerError, + expectedBody: map[string]string{ + synced: "ERROR: problem checking sync", + minPeerCount: "DISABLED", + checkBlock: "DISABLED", + maxSecondsBehind: "DISABLED", + }, + }, + // 3 - peer count enabled - good request + { + headers: []string{"min_peer_count1"}, + netApiResponse: hexutil.Uint(1), + netApiError: nil, + ethApiBlockResult: make(map[string]interface{}), + ethApiBlockError: nil, + ethApiSyncingResult: false, + ethApiSyncingError: nil, + expectedStatusCode: http.StatusOK, + expectedBody: map[string]string{ + synced: "DISABLED", + minPeerCount: "HEALTHY", + checkBlock: "DISABLED", + maxSecondsBehind: "DISABLED", + }, + }, + // 4 - peer count enabled - not enough peers + { + headers: []string{"min_peer_count10"}, + netApiResponse: hexutil.Uint(1), + netApiError: nil, + ethApiBlockResult: make(map[string]interface{}), + ethApiBlockError: nil, + ethApiSyncingResult: false, + ethApiSyncingError: nil, + expectedStatusCode: http.StatusInternalServerError, + expectedBody: map[string]string{ + synced: "DISABLED", + minPeerCount: "ERROR: not enough peers: 1 (minimum 10)", + checkBlock: "DISABLED", + maxSecondsBehind: "DISABLED", + }, + }, + // 5 - peer count enabled - error checking peers + { + headers: []string{"min_peer_count10"}, + netApiResponse: hexutil.Uint(1), + netApiError: errors.New("problem checking peers"), + ethApiBlockResult: make(map[string]interface{}), + ethApiBlockError: nil, + ethApiSyncingResult: false, + ethApiSyncingError: nil, + expectedStatusCode: http.StatusInternalServerError, + expectedBody: map[string]string{ + synced: "DISABLED", + minPeerCount: "ERROR: problem checking peers", + checkBlock: "DISABLED", + maxSecondsBehind: "DISABLED", + }, + }, + // 6 - peer count enabled - badly formed request + { + headers: []string{"min_peer_countABC"}, + netApiResponse: hexutil.Uint(1), + netApiError: nil, + ethApiBlockResult: make(map[string]interface{}), + ethApiBlockError: nil, + ethApiSyncingResult: false, + ethApiSyncingError: nil, + expectedStatusCode: http.StatusInternalServerError, + expectedBody: map[string]string{ + synced: "DISABLED", + minPeerCount: "ERROR: strconv.Atoi: parsing \"abc\": invalid syntax", + checkBlock: "DISABLED", + maxSecondsBehind: "DISABLED", + }, + }, + // 7 - block check - all ok + { + headers: []string{"check_block10"}, + netApiResponse: hexutil.Uint(1), + netApiError: nil, + ethApiBlockResult: map[string]interface{}{"test": struct{}{}}, + ethApiBlockError: nil, + ethApiSyncingResult: false, + ethApiSyncingError: nil, + expectedStatusCode: http.StatusOK, + expectedBody: map[string]string{ + synced: "DISABLED", + minPeerCount: "DISABLED", + checkBlock: "HEALTHY", + maxSecondsBehind: "DISABLED", + }, + }, + // 8 - block check - no block found + { + headers: []string{"check_block10"}, + netApiResponse: hexutil.Uint(1), + netApiError: nil, + ethApiBlockResult: map[string]interface{}{}, + ethApiBlockError: nil, + ethApiSyncingResult: false, + ethApiSyncingError: nil, + expectedStatusCode: http.StatusInternalServerError, + expectedBody: map[string]string{ + synced: "DISABLED", + minPeerCount: "DISABLED", + checkBlock: "ERROR: no known block with number 10 (a hex)", + maxSecondsBehind: "DISABLED", + }, + }, + // 9 - block check - error checking block + { + headers: []string{"check_block10"}, + netApiResponse: hexutil.Uint(1), + netApiError: nil, + ethApiBlockResult: map[string]interface{}{}, + ethApiBlockError: errors.New("problem checking block"), + ethApiSyncingResult: false, + ethApiSyncingError: nil, + expectedStatusCode: http.StatusInternalServerError, + expectedBody: map[string]string{ + synced: "DISABLED", + minPeerCount: "DISABLED", + checkBlock: "ERROR: problem checking block", + maxSecondsBehind: "DISABLED", + }, + }, + // 10 - block check - badly formed request + { + headers: []string{"check_blockABC"}, + netApiResponse: hexutil.Uint(1), + netApiError: nil, + ethApiBlockResult: map[string]interface{}{}, + ethApiBlockError: nil, + ethApiSyncingResult: false, + ethApiSyncingError: nil, + expectedStatusCode: http.StatusInternalServerError, + expectedBody: map[string]string{ + synced: "DISABLED", + minPeerCount: "DISABLED", + checkBlock: "ERROR: strconv.Atoi: parsing \"abc\": invalid syntax", + maxSecondsBehind: "DISABLED", + }, + }, + // 11 - seconds check - all ok + { + headers: []string{"max_seconds_behind60"}, + netApiResponse: hexutil.Uint(1), + netApiError: nil, + ethApiBlockResult: map[string]interface{}{ + "timestamp": time.Now().Add(1 * time.Second).Unix(), + }, + ethApiBlockError: nil, + ethApiSyncingResult: false, + ethApiSyncingError: nil, + expectedStatusCode: http.StatusOK, + expectedBody: map[string]string{ + synced: "DISABLED", + minPeerCount: "DISABLED", + checkBlock: "DISABLED", + maxSecondsBehind: "HEALTHY", + }, + }, + // 12 - seconds check - too old + { + headers: []string{"max_seconds_behind60"}, + netApiResponse: hexutil.Uint(1), + netApiError: nil, + ethApiBlockResult: map[string]interface{}{ + "timestamp": uint64(time.Now().Add(1 * time.Hour).Unix()), + }, + ethApiBlockError: nil, + ethApiSyncingResult: false, + ethApiSyncingError: nil, + expectedStatusCode: http.StatusInternalServerError, + expectedBody: map[string]string{ + synced: "DISABLED", + minPeerCount: "DISABLED", + checkBlock: "DISABLED", + maxSecondsBehind: "ERROR: timestamp too old: got ts:", + }, + }, + // 13 - seconds check - less than 0 seconds + { + headers: []string{"max_seconds_behind-1"}, + netApiResponse: hexutil.Uint(1), + netApiError: nil, + ethApiBlockResult: map[string]interface{}{ + "timestamp": uint64(time.Now().Add(1 * time.Hour).Unix()), + }, + ethApiBlockError: nil, + ethApiSyncingResult: false, + ethApiSyncingError: nil, + expectedStatusCode: http.StatusInternalServerError, + expectedBody: map[string]string{ + synced: "DISABLED", + minPeerCount: "DISABLED", + checkBlock: "DISABLED", + maxSecondsBehind: "ERROR: bad header value", + }, + }, + // 14 - seconds check - badly formed request + { + headers: []string{"max_seconds_behindABC"}, + netApiResponse: hexutil.Uint(1), + netApiError: nil, + ethApiBlockResult: map[string]interface{}{}, + ethApiBlockError: nil, + ethApiSyncingResult: false, + ethApiSyncingError: nil, + expectedStatusCode: http.StatusInternalServerError, + expectedBody: map[string]string{ + synced: "DISABLED", + minPeerCount: "DISABLED", + checkBlock: "DISABLED", + maxSecondsBehind: "ERROR: strconv.Atoi: parsing \"abc\": invalid syntax", + }, + }, + // 15 - all checks - report ok + { + headers: []string{"synced", "check_block10", "min_peer_count1", "max_seconds_behind60"}, + netApiResponse: hexutil.Uint(10), + netApiError: nil, + ethApiBlockResult: map[string]interface{}{ + "timestamp": time.Now().Add(1 * time.Second).Unix(), + }, + ethApiBlockError: nil, + ethApiSyncingResult: false, + ethApiSyncingError: nil, + expectedStatusCode: http.StatusOK, + expectedBody: map[string]string{ + synced: "HEALTHY", + minPeerCount: "HEALTHY", + checkBlock: "HEALTHY", + maxSecondsBehind: "HEALTHY", + }, + }, + } + + for idx, c := range cases { + w := httptest.NewRecorder() + r, err := http.NewRequest(http.MethodGet, "http://localhost:9090/health", nil) + if err != nil { + t.Errorf("%v: creating request: %v", idx, err) + } + + for _, header := range c.headers { + r.Header.Add("X-ERIGON-HEALTHCHECK", header) + } + + netAPI := rpc.API{ + Namespace: "", + Version: "", + Service: &netApiStub{ + response: c.netApiResponse, + error: c.netApiError, + }, + Public: false, + } + + ethAPI := rpc.API{ + Namespace: "", + Version: "", + Service: ðApiStub{ + blockResult: c.ethApiBlockResult, + blockError: c.ethApiBlockError, + syncingResult: c.ethApiSyncingResult, + syncingError: c.ethApiSyncingError, + }, + Public: false, + } + + apis := make([]rpc.API, 2) + apis[0] = netAPI + apis[1] = ethAPI + + ProcessHealthcheckIfNeeded(w, r, apis) + + result := w.Result() + if result.StatusCode != c.expectedStatusCode { + t.Errorf("%v: expected status code: %v, but got: %v", idx, c.expectedStatusCode, result.StatusCode) + } + + bodyBytes, err := ioutil.ReadAll(result.Body) + if err != nil { + t.Errorf("%v: reading response body: %s", idx, err) + } + + var body map[string]string + err = json.Unmarshal(bodyBytes, &body) + if err != nil { + t.Errorf("%v: unmarshalling the response body: %s", idx, err) + } + result.Body.Close() + + for k, v := range c.expectedBody { + val, found := body[k] + if !found { + t.Errorf("%v: expected the key: %s to be in the response body but it wasn't there", idx, k) + } + if !strings.Contains(val, v) { + t.Errorf("%v: expected the response body key: %s to contain: %s, but it contained: %s", idx, k, v, val) + } + } + } +} + +func TestProcessHealthcheckIfNeeded_RequestBody(t *testing.T) { + cases := []struct { + body string + netApiResponse hexutil.Uint + netApiError error + ethApiBlockResult map[string]interface{} + ethApiBlockError error + expectedStatusCode int + expectedBody map[string]string + }{ + // 0 - happy path + { + body: "{\"min_peer_count\": 1, \"known_block\": 123}", + netApiResponse: hexutil.Uint(1), + netApiError: nil, + ethApiBlockResult: map[string]interface{}{"test": struct{}{}}, + ethApiBlockError: nil, + expectedStatusCode: http.StatusOK, + expectedBody: map[string]string{ + "healthcheck_query": "HEALTHY", + "min_peer_count": "HEALTHY", + "check_block": "HEALTHY", + }, + }, + // 1 - bad request body + { + body: "{\"min_peer_count\" 1, \"known_block\": 123}", + netApiResponse: hexutil.Uint(1), + netApiError: nil, + ethApiBlockResult: map[string]interface{}{"test": struct{}{}}, + ethApiBlockError: nil, + expectedStatusCode: http.StatusInternalServerError, + expectedBody: map[string]string{ + "healthcheck_query": "ERROR:", + "min_peer_count": "DISABLED", + "check_block": "DISABLED", + }, + }, + // 2 - min peers - error from api + { + body: "{\"min_peer_count\": 1, \"known_block\": 123}", + netApiResponse: hexutil.Uint(1), + netApiError: errors.New("problem getting peers"), + ethApiBlockResult: map[string]interface{}{"test": struct{}{}}, + ethApiBlockError: nil, + expectedStatusCode: http.StatusInternalServerError, + expectedBody: map[string]string{ + "healthcheck_query": "HEALTHY", + "min_peer_count": "ERROR: problem getting peers", + "check_block": "HEALTHY", + }, + }, + // 3 - min peers - not enough peers + { + body: "{\"min_peer_count\": 10, \"known_block\": 123}", + netApiResponse: hexutil.Uint(1), + netApiError: nil, + ethApiBlockResult: map[string]interface{}{"test": struct{}{}}, + ethApiBlockError: nil, + expectedStatusCode: http.StatusInternalServerError, + expectedBody: map[string]string{ + "healthcheck_query": "HEALTHY", + "min_peer_count": "ERROR: not enough peers", + "check_block": "HEALTHY", + }, + }, + // 4 - check block - no block + { + body: "{\"min_peer_count\": 1, \"known_block\": 123}", + netApiResponse: hexutil.Uint(1), + netApiError: nil, + ethApiBlockResult: map[string]interface{}{}, + ethApiBlockError: nil, + expectedStatusCode: http.StatusInternalServerError, + expectedBody: map[string]string{ + "healthcheck_query": "HEALTHY", + "min_peer_count": "HEALTHY", + "check_block": "ERROR: no known block with number ", + }, + }, + // 5 - check block - error getting block info + { + body: "{\"min_peer_count\": 1, \"known_block\": 123}", + netApiResponse: hexutil.Uint(1), + netApiError: nil, + ethApiBlockResult: map[string]interface{}{}, + ethApiBlockError: errors.New("problem getting block"), + expectedStatusCode: http.StatusInternalServerError, + expectedBody: map[string]string{ + "healthcheck_query": "HEALTHY", + "min_peer_count": "HEALTHY", + "check_block": "ERROR: problem getting block", + }, + }, + } + + for idx, c := range cases { + w := httptest.NewRecorder() + r, err := http.NewRequest(http.MethodGet, "http://localhost:9090/health", nil) + if err != nil { + t.Errorf("%v: creating request: %v", idx, err) + } + + r.Body = ioutil.NopCloser(strings.NewReader(c.body)) + + netAPI := rpc.API{ + Namespace: "", + Version: "", + Service: &netApiStub{ + response: c.netApiResponse, + error: c.netApiError, + }, + Public: false, + } + + ethAPI := rpc.API{ + Namespace: "", + Version: "", + Service: ðApiStub{ + blockResult: c.ethApiBlockResult, + blockError: c.ethApiBlockError, + }, + Public: false, + } + + apis := make([]rpc.API, 2) + apis[0] = netAPI + apis[1] = ethAPI + + ProcessHealthcheckIfNeeded(w, r, apis) + + result := w.Result() + if result.StatusCode != c.expectedStatusCode { + t.Errorf("%v: expected status code: %v, but got: %v", idx, c.expectedStatusCode, result.StatusCode) + } + + bodyBytes, err := ioutil.ReadAll(result.Body) + if err != nil { + t.Errorf("%v: reading response body: %s", idx, err) + } + + var body map[string]string + err = json.Unmarshal(bodyBytes, &body) + if err != nil { + t.Errorf("%v: unmarshalling the response body: %s", idx, err) + } + result.Body.Close() + + for k, v := range c.expectedBody { + val, found := body[k] + if !found { + t.Errorf("%v: expected the key: %s to be in the response body but it wasn't there", idx, k) + } + if !strings.Contains(val, v) { + t.Errorf("%v: expected the response body key: %s to contain: %s, but it contained: %s", idx, k, v, val) + } + } + } +} diff --git a/cmd/rpcdaemon/health/interfaces.go b/cmd/rpcdaemon/health/interfaces.go index 4cf0fc6892b..c8bdca5dbf8 100644 --- a/cmd/rpcdaemon/health/interfaces.go +++ b/cmd/rpcdaemon/health/interfaces.go @@ -13,4 +13,5 @@ type NetAPI interface { type EthAPI interface { GetBlockByNumber(_ context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) + Syncing(ctx context.Context) (interface{}, error) } From 45ac977b9fa88512990a09839c675cb137028ddd Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Wed, 6 Jul 2022 12:04:25 +0200 Subject: [PATCH 046/152] Revert "Small simplification of startHandlingForkChoice (#4636)" (#4649) This reverts commit cc75387d1063330b5ed8c07427c80e07da15f33d. --- eth/stagedsync/stage_headers.go | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index ed9eddbd743..4cd6e6e63b9 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -321,7 +321,34 @@ func startHandlingForkChoice( cfg.hd.BeaconRequestList.Remove(requestId) headerNumber := header.Number.Uint64() - cfg.hd.UpdateTopSeenHeightPoS(headerNumber) + // If header is canonical, then no reorgs are required + canonicalHash, err := rawdb.ReadCanonicalHash(tx, headerNumber) + if err != nil { + log.Warn(fmt.Sprintf("[%s] Fork choice err (reading canonical hash of %d)", s.LogPrefix(), headerNumber), "err", err) + cfg.hd.BeaconRequestList.Remove(requestId) + return nil, err + } + + if headerHash == canonicalHash { + log.Info(fmt.Sprintf("[%s] Fork choice on previously known block", s.LogPrefix())) + cfg.hd.BeaconRequestList.Remove(requestId) + rawdb.WriteForkchoiceHead(tx, forkChoice.HeadBlockHash) + canonical, err := safeAndFinalizedBlocksAreCanonical(forkChoice, s, tx, cfg) + if err != nil { + log.Warn(fmt.Sprintf("[%s] Fork choice err", s.LogPrefix()), "err", err) + return nil, err + } + if canonical { + return &privateapi.PayloadStatus{ + Status: remote.EngineStatus_VALID, + LatestValidHash: headerHash, + }, nil + } else { + return &privateapi.PayloadStatus{ + CriticalError: &privateapi.InvalidForkchoiceStateErr, + }, nil + } + } if cfg.memoryOverlay && headerHash == cfg.hd.GetNextForkHash() { log.Info("Flushing in-memory state") @@ -345,6 +372,7 @@ func startHandlingForkChoice( } } + cfg.hd.UpdateTopSeenHeightPoS(headerNumber) forkingPoint := uint64(0) if headerNumber > 0 { parent, err := headerReader.Header(ctx, tx, header.ParentHash, headerNumber-1) From 70bf7dbc114887a7996b756276dd72acc8e22749 Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Wed, 6 Jul 2022 13:25:29 +0300 Subject: [PATCH 047/152] getting amount from non canonical bucket (#4648) --- core/rawdb/accessors_chain.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index e2100d45e77..f27f11fc96e 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -430,7 +430,7 @@ func NonCanonicalTransactions(db kv.Getter, baseTxId uint64, amount uint32) ([]t binary.BigEndian.PutUint64(txIdKey, baseTxId) i := uint32(0) - if err := db.ForAmount(kv.EthTx, txIdKey, amount, func(k, v []byte) error { + if err := db.ForAmount(kv.NonCanonicalTxs, txIdKey, amount, func(k, v []byte) error { var decodeErr error reader.Reset(v) stream.Reset(reader, 0) From f19101d33bbbcf9ceb1c02bfc4a9848d6fb1e2c3 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 6 Jul 2022 16:27:01 +0600 Subject: [PATCH 048/152] macos retry reopen torrent client (#4645) * save * save * save * save * save * save * save * save * save --- cmd/downloader/downloader/downloader.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/cmd/downloader/downloader/downloader.go b/cmd/downloader/downloader/downloader.go index ec6dd4357ec..5c8debdc526 100644 --- a/cmd/downloader/downloader/downloader.go +++ b/cmd/downloader/downloader/downloader.go @@ -210,7 +210,6 @@ func (d *Downloader) onComplete() { panic(err) } d.cfg.DataDir = snapDir - // fmt.Printf("alex1: %s\n", d.cfg.DataDir) db, c, m, torrentClient, err := openClient(d.cfg.ClientConfig) if err != nil { @@ -336,7 +335,14 @@ func openClient(cfg *torrent.ClientConfig) (db kv.RwDB, c storage.PieceCompletio } m = storage.NewMMapWithCompletion(snapDir, c) cfg.DefaultStorage = m - torrentClient, err = torrent.NewClient(cfg) + + for retry := 0; retry < 5; retry++ { + torrentClient, err = torrent.NewClient(cfg) + if err == nil { + break + } + time.Sleep(10 * time.Millisecond) + } if err != nil { return nil, nil, nil, nil, fmt.Errorf("torrent.NewClient: %w", err) } From d9cb87a149e67483dc1ac99ae946577416994e90 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 6 Jul 2022 16:44:06 +0600 Subject: [PATCH 049/152] RPC: Enable back json streaming for non-batch and non-websocket cases (#4647) * enable rpc streaming * enable rpc streaming --- cmd/rpcdaemon/cli/config.go | 10 ++++++---- cmd/rpcdaemon/cli/httpcfg/http_cfg.go | 1 + cmd/rpcdaemon/commands/tracing.go | 7 ++++++- cmd/rpcdaemon22/cli/config.go | 7 ++++--- cmd/rpcdaemon22/cli/httpcfg/http_cfg.go | 1 + cmd/rpcdaemon22/commands/tracing.go | 4 ++++ cmd/utils/flags.go | 21 +++++++++++++-------- node/rpcstack.go | 4 ++-- rpc/client.go | 2 +- rpc/handler.go | 17 ++++++++++++----- rpc/http.go | 7 ++++++- rpc/http_test.go | 2 +- rpc/server.go | 10 ++++++---- rpc/server_test.go | 2 +- rpc/subscription_test.go | 2 +- rpc/testservice_test.go | 2 +- rpc/websocket_test.go | 2 +- turbo/cli/default_flags.go | 1 + turbo/cli/flags.go | 4 +++- 19 files changed, 71 insertions(+), 35 deletions(-) diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index 59d40050e5a..d326e45911e 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -6,7 +6,6 @@ import ( "encoding/binary" "errors" "fmt" - "github.com/ledgerwatch/erigon/rpc/rpccfg" "net" "net/http" "os" @@ -15,6 +14,8 @@ import ( "strings" "time" + "github.com/ledgerwatch/erigon/rpc/rpccfg" + "github.com/ledgerwatch/erigon-lib/direct" "github.com/ledgerwatch/erigon-lib/gointerfaces" "github.com/ledgerwatch/erigon-lib/gointerfaces/grpcutil" @@ -79,7 +80,8 @@ func RootCommand() (*cobra.Command, *httpcfg.HttpCfg) { rootCmd.PersistentFlags().BoolVar(&cfg.WebsocketEnabled, "ws", false, "Enable Websockets") rootCmd.PersistentFlags().BoolVar(&cfg.WebsocketCompression, "ws.compression", false, "Enable Websocket compression (RFC 7692)") rootCmd.PersistentFlags().StringVar(&cfg.RpcAllowListFilePath, "rpc.accessList", "", "Specify granular (method-by-method) API allowlist") - rootCmd.PersistentFlags().UintVar(&cfg.RpcBatchConcurrency, "rpc.batch.concurrency", 2, "Does limit amount of goroutines to process 1 batch request. Means 1 bach request can't overload server. 1 batch still can have unlimited amount of request") + rootCmd.PersistentFlags().UintVar(&cfg.RpcBatchConcurrency, utils.RpcBatchConcurrencyFlag.Name, 2, utils.RpcBatchConcurrencyFlag.Usage) + rootCmd.PersistentFlags().BoolVar(&cfg.RpcStreamingDisable, utils.RpcStreamingDisableFlag.Name, false, utils.RpcStreamingDisableFlag.Usage) rootCmd.PersistentFlags().IntVar(&cfg.DBReadConcurrency, "db.read.concurrency", runtime.GOMAXPROCS(-1), "Does limit amount of parallel db reads") rootCmd.PersistentFlags().BoolVar(&cfg.TraceCompatibility, "trace.compat", false, "Bug for bug compatibility with OE for trace_ routines") rootCmd.PersistentFlags().StringVar(&cfg.TxPoolApiAddr, "txpool.api.addr", "", "txpool api network address, for example: 127.0.0.1:9090 (default: use value of --private.api.addr)") @@ -439,7 +441,7 @@ func StartRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rpc.API) httpEndpoint := fmt.Sprintf("%s:%d", cfg.HttpListenAddress, cfg.HttpPort) fmt.Printf("TraceRequests = %t\n", cfg.TraceRequests) - srv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests) + srv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests, cfg.RpcStreamingDisable) allowListForRPC, err := parseAllowListForRPC(cfg.RpcAllowListFilePath) if err != nil { @@ -613,7 +615,7 @@ func createHandler(cfg httpcfg.HttpCfg, apiList []rpc.API, httpHandler http.Hand func createEngineListener(cfg httpcfg.HttpCfg, engineApi []rpc.API) (*http.Server, *rpc.Server, string, error) { engineHttpEndpoint := fmt.Sprintf("%s:%d", cfg.EngineHTTPListenAddress, cfg.EnginePort) - engineSrv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests) + engineSrv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests, true) allowListForRPC, err := parseAllowListForRPC(cfg.RpcAllowListFilePath) if err != nil { diff --git a/cmd/rpcdaemon/cli/httpcfg/http_cfg.go b/cmd/rpcdaemon/cli/httpcfg/http_cfg.go index a198722f1eb..db6cfd2004a 100644 --- a/cmd/rpcdaemon/cli/httpcfg/http_cfg.go +++ b/cmd/rpcdaemon/cli/httpcfg/http_cfg.go @@ -30,6 +30,7 @@ type HttpCfg struct { WebsocketCompression bool RpcAllowListFilePath string RpcBatchConcurrency uint + RpcStreamingDisable bool DBReadConcurrency int TraceCompatibility bool // Bug for bug compatibility for trace_ routines with OpenEthereum TxPoolApiAddr string diff --git a/cmd/rpcdaemon/commands/tracing.go b/cmd/rpcdaemon/commands/tracing.go index 3f354822407..16dfecfc3df 100644 --- a/cmd/rpcdaemon/commands/tracing.go +++ b/cmd/rpcdaemon/commands/tracing.go @@ -123,16 +123,20 @@ func (api *PrivateDebugAPIImpl) TraceTransaction(ctx context.Context, hash commo // Retrieve the transaction and assemble its EVM context blockNum, ok, err := api.txnLookup(ctx, tx, hash) if err != nil { + stream.WriteNil() return err } if !ok { + stream.WriteNil() return nil } block, err := api.blockByNumberWithSenders(tx, blockNum) if err != nil { + stream.WriteNil() return err } if block == nil { + stream.WriteNil() return nil } blockHash := block.Hash() @@ -148,12 +152,13 @@ func (api *PrivateDebugAPIImpl) TraceTransaction(ctx context.Context, hash commo if txn == nil { var borTx *types.Transaction borTx, _, _, _, err = rawdb.ReadBorTransaction(tx, hash) - if err != nil { + stream.WriteNil() return err } if borTx != nil { + stream.WriteNil() return nil } stream.WriteNil() diff --git a/cmd/rpcdaemon22/cli/config.go b/cmd/rpcdaemon22/cli/config.go index 5bce87f69f6..7f15ae9db48 100644 --- a/cmd/rpcdaemon22/cli/config.go +++ b/cmd/rpcdaemon22/cli/config.go @@ -81,7 +81,8 @@ func RootCommand() (*cobra.Command, *httpcfg.HttpCfg) { rootCmd.PersistentFlags().BoolVar(&cfg.WebsocketEnabled, "ws", false, "Enable Websockets") rootCmd.PersistentFlags().BoolVar(&cfg.WebsocketCompression, "ws.compression", false, "Enable Websocket compression (RFC 7692)") rootCmd.PersistentFlags().StringVar(&cfg.RpcAllowListFilePath, "rpc.accessList", "", "Specify granular (method-by-method) API allowlist") - rootCmd.PersistentFlags().UintVar(&cfg.RpcBatchConcurrency, "rpc.batch.concurrency", 2, "Does limit amount of goroutines to process 1 batch request. Means 1 bach request can't overload server. 1 batch still can have unlimited amount of request") + rootCmd.PersistentFlags().UintVar(&cfg.RpcBatchConcurrency, utils.RpcBatchConcurrencyFlag.Name, 2, utils.RpcBatchConcurrencyFlag.Usage) + rootCmd.PersistentFlags().BoolVar(&cfg.RpcStreamingDisable, utils.RpcStreamingDisableFlag.Name, false, utils.RpcStreamingDisableFlag.Usage) rootCmd.PersistentFlags().IntVar(&cfg.DBReadConcurrency, "db.read.concurrency", runtime.GOMAXPROCS(-1), "Does limit amount of parallel db reads") rootCmd.PersistentFlags().BoolVar(&cfg.TraceCompatibility, "trace.compat", false, "Bug for bug compatibility with OE for trace_ routines") rootCmd.PersistentFlags().StringVar(&cfg.TxPoolApiAddr, "txpool.api.addr", "", "txpool api network address, for example: 127.0.0.1:9090 (default: use value of --private.api.addr)") @@ -455,7 +456,7 @@ func StartRpcServer(ctx context.Context, cfg httpcfg.HttpCfg, rpcAPI []rpc.API) // register apis and create handler stack httpEndpoint := fmt.Sprintf("%s:%d", cfg.HttpListenAddress, cfg.HttpPort) - srv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests) + srv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests, cfg.RpcStreamingDisable) allowListForRPC, err := parseAllowListForRPC(cfg.RpcAllowListFilePath) if err != nil { @@ -629,7 +630,7 @@ func createHandler(cfg httpcfg.HttpCfg, apiList []rpc.API, httpHandler http.Hand func createEngineListener(cfg httpcfg.HttpCfg, engineApi []rpc.API) (*http.Server, *rpc.Server, string, error) { engineHttpEndpoint := fmt.Sprintf("%s:%d", cfg.EngineHTTPListenAddress, cfg.EnginePort) - engineSrv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests) + engineSrv := rpc.NewServer(cfg.RpcBatchConcurrency, cfg.TraceRequests, true) allowListForRPC, err := parseAllowListForRPC(cfg.RpcAllowListFilePath) if err != nil { diff --git a/cmd/rpcdaemon22/cli/httpcfg/http_cfg.go b/cmd/rpcdaemon22/cli/httpcfg/http_cfg.go index 42e15eb17d5..9cc7c72b68b 100644 --- a/cmd/rpcdaemon22/cli/httpcfg/http_cfg.go +++ b/cmd/rpcdaemon22/cli/httpcfg/http_cfg.go @@ -29,6 +29,7 @@ type HttpCfg struct { WebsocketCompression bool RpcAllowListFilePath string RpcBatchConcurrency uint + RpcStreamingDisable bool DBReadConcurrency int TraceCompatibility bool // Bug for bug compatibility for trace_ routines with OpenEthereum TxPoolApiAddr string diff --git a/cmd/rpcdaemon22/commands/tracing.go b/cmd/rpcdaemon22/commands/tracing.go index 31ddd4522cc..a63add34e34 100644 --- a/cmd/rpcdaemon22/commands/tracing.go +++ b/cmd/rpcdaemon22/commands/tracing.go @@ -118,16 +118,20 @@ func (api *PrivateDebugAPIImpl) TraceTransaction(ctx context.Context, hash commo // Retrieve the transaction and assemble its EVM context blockNum, ok, err := api.txnLookup(ctx, tx, hash) if err != nil { + stream.WriteNil() return err } if !ok { + stream.WriteNil() return nil } block, err := api.blockByNumberWithSenders(tx, blockNum) if err != nil { + stream.WriteNil() return err } if block == nil { + stream.WriteNil() return nil } blockHash := block.Hash() diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index f04ba285e6d..92b0ebf4e76 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -20,6 +20,15 @@ package utils import ( "crypto/ecdsa" "fmt" + "io" + "math/big" + "path/filepath" + "runtime" + "strconv" + "strings" + "text/tabwriter" + "text/template" + "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon-lib/txpool" @@ -30,14 +39,6 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/urfave/cli" - "io" - "math/big" - "path/filepath" - "runtime" - "strconv" - "strings" - "text/tabwriter" - "text/template" "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/params/networkname" @@ -363,6 +364,10 @@ var ( Usage: "Does limit amount of goroutines to process 1 batch request. Means 1 bach request can't overload server. 1 batch still can have unlimited amount of request", Value: 2, } + RpcStreamingDisableFlag = cli.BoolFlag{ + Name: "rpc.streaming.disable", + Usage: "Erigon has enalbed json streamin for some heavy endpoints (like trace_*). It's treadoff: greatly reduce amount of RAM (in some cases from 30GB to 30mb), but it produce invalid json format if error happened in the middle of streaming (because json is not streaming-friendly format)", + } HTTPTraceFlag = cli.BoolFlag{ Name: "http.trace", Usage: "Trace HTTP requests with INFO level", diff --git a/node/rpcstack.go b/node/rpcstack.go index 5249ee1d3eb..e2ca438e382 100644 --- a/node/rpcstack.go +++ b/node/rpcstack.go @@ -265,7 +265,7 @@ func (h *httpServer) enableRPC(apis []rpc.API, config httpConfig, allowList rpc. } // Create RPC server and handler. - srv := rpc.NewServer(50, false /* traceRequests */) + srv := rpc.NewServer(50, false /* traceRequests */, true) srv.SetAllowList(allowList) if err := RegisterApisFromWhitelist(apis, config.Modules, srv, false); err != nil { return err @@ -298,7 +298,7 @@ func (h *httpServer) enableWS(apis []rpc.API, config wsConfig, allowList rpc.All } // Create RPC server and handler. - srv := rpc.NewServer(50, false /* traceRequests */) + srv := rpc.NewServer(50, false /* traceRequests */, true) srv.SetAllowList(allowList) if err := RegisterApisFromWhitelist(apis, config.Modules, srv, false); err != nil { return err diff --git a/rpc/client.go b/rpc/client.go index 7d5b27b1254..56262d4dc21 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -560,7 +560,7 @@ func (c *Client) dispatch(codec ServerCodec) { if op.batch { conn.handler.handleBatch(op.msgs) } else { - conn.handler.handleMsg(op.msgs[0]) + conn.handler.handleMsg(op.msgs[0], nil) } case err := <-c.readErr: diff --git a/rpc/handler.go b/rpc/handler.go index 60994b590ad..86985ea56e5 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -178,19 +178,26 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) { } // handleMsg handles a single message. -func (h *handler) handleMsg(msg *jsonrpcMessage) { +func (h *handler) handleMsg(msg *jsonrpcMessage, stream *jsoniter.Stream) { if ok := h.handleImmediate(msg); ok { return } h.startCallProc(func(cp *callProc) { - stream := jsoniter.NewStream(jsoniter.ConfigDefault, nil, 4096) + needWriteStream := false + if stream == nil { + stream = jsoniter.NewStream(jsoniter.ConfigDefault, nil, 4096) + needWriteStream = true + } answer := h.handleCallMsg(cp, msg, stream) h.addSubscriptions(cp.notifiers) if answer != nil { - h.conn.writeJSON(cp.ctx, answer) - } else { - _ = stream.Flush() + buffer, _ := json.Marshal(answer) + stream.Write(json.RawMessage(buffer)) + } + if needWriteStream { h.conn.writeJSON(cp.ctx, json.RawMessage(stream.Buffer())) + } else { + stream.Write([]byte("\n")) } for _, n := range cp.notifiers { n.activate() diff --git a/rpc/http.go b/rpc/http.go index 497a99cd59b..748748bb48d 100644 --- a/rpc/http.go +++ b/rpc/http.go @@ -31,6 +31,7 @@ import ( "time" "github.com/golang-jwt/jwt/v4" + jsoniter "github.com/json-iterator/go" ) const ( @@ -222,7 +223,11 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.Header().Set("content-type", contentType) codec := newHTTPServerConn(r, w) defer codec.close() - s.serveSingleRequest(ctx, codec) + var stream *jsoniter.Stream + if !s.disableStreaming { + stream = jsoniter.NewStream(jsoniter.ConfigDefault, w, 4096) + } + s.serveSingleRequest(ctx, codec, stream) } // validateRequest returns a non-zero response code and error message if the diff --git a/rpc/http_test.go b/rpc/http_test.go index 602b84a3a88..728f90cbfc7 100644 --- a/rpc/http_test.go +++ b/rpc/http_test.go @@ -104,7 +104,7 @@ func TestHTTPResponseWithEmptyGet(t *testing.T) { func TestHTTPRespBodyUnlimited(t *testing.T) { const respLength = maxRequestContentLength * 3 - s := NewServer(50, false /* traceRequests */) + s := NewServer(50, false /* traceRequests */, true) defer s.Stop() if err := s.RegisterName("test", largeRespService{respLength}); err != nil { t.Fatal(err) diff --git a/rpc/server.go b/rpc/server.go index 1b42549cdbf..4df722629a6 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -22,6 +22,7 @@ import ( "sync/atomic" mapset "github.com/deckarep/golang-set" + jsoniter "github.com/json-iterator/go" "github.com/ledgerwatch/log/v3" ) @@ -49,12 +50,13 @@ type Server struct { codecs mapset.Set batchConcurrency uint + disableStreaming bool traceRequests bool // Whether to print requests at INFO level } // NewServer creates a new server instance with no registered handlers. -func NewServer(batchConcurrency uint, traceRequests bool) *Server { - server := &Server{idgen: randomIDGenerator(), codecs: mapset.NewSet(), run: 1, batchConcurrency: batchConcurrency, traceRequests: traceRequests} +func NewServer(batchConcurrency uint, traceRequests, disableStreaming bool) *Server { + server := &Server{idgen: randomIDGenerator(), codecs: mapset.NewSet(), run: 1, batchConcurrency: batchConcurrency, disableStreaming: disableStreaming, traceRequests: traceRequests} // Register the default service providing meta information about the RPC service such // as the services and methods it offers. rpcService := &RPCService{server: server} @@ -100,7 +102,7 @@ func (s *Server) ServeCodec(codec ServerCodec, options CodecOption) { // serveSingleRequest reads and processes a single RPC request from the given codec. This // is used to serve HTTP connections. Subscriptions and reverse calls are not allowed in // this mode. -func (s *Server) serveSingleRequest(ctx context.Context, codec ServerCodec) { +func (s *Server) serveSingleRequest(ctx context.Context, codec ServerCodec, stream *jsoniter.Stream) { // Don't serve if server is stopped. if atomic.LoadInt32(&s.run) == 0 { return @@ -120,7 +122,7 @@ func (s *Server) serveSingleRequest(ctx context.Context, codec ServerCodec) { if batch { h.handleBatch(reqs) } else { - h.handleMsg(reqs[0]) + h.handleMsg(reqs[0], stream) } } diff --git a/rpc/server_test.go b/rpc/server_test.go index c58b1168213..ed07e7f2225 100644 --- a/rpc/server_test.go +++ b/rpc/server_test.go @@ -31,7 +31,7 @@ import ( ) func TestServerRegisterName(t *testing.T) { - server := NewServer(50, false /* traceRequests */) + server := NewServer(50, false /* traceRequests */, true) service := new(testService) if err := server.RegisterName("test", service); err != nil { diff --git a/rpc/subscription_test.go b/rpc/subscription_test.go index 6b4d5d1cca7..874ab977c43 100644 --- a/rpc/subscription_test.go +++ b/rpc/subscription_test.go @@ -53,7 +53,7 @@ func TestSubscriptions(t *testing.T) { subCount = len(namespaces) notificationCount = 3 - server = NewServer(50, false /* traceRequests */) + server = NewServer(50, false /* traceRequests */, true) clientConn, serverConn = net.Pipe() out = json.NewEncoder(clientConn) in = json.NewDecoder(clientConn) diff --git a/rpc/testservice_test.go b/rpc/testservice_test.go index 1378620586d..04d2e792ef8 100644 --- a/rpc/testservice_test.go +++ b/rpc/testservice_test.go @@ -26,7 +26,7 @@ import ( ) func newTestServer() *Server { - server := NewServer(50, false /* traceRequests */) + server := NewServer(50, false /* traceRequests */, true) server.idgen = sequentialIDGenerator() if err := server.RegisterName("test", new(testService)); err != nil { panic(err) diff --git a/rpc/websocket_test.go b/rpc/websocket_test.go index 373687efa09..cd0fbb1c0b6 100644 --- a/rpc/websocket_test.go +++ b/rpc/websocket_test.go @@ -163,7 +163,7 @@ func TestClientWebsocketPing(t *testing.T) { // This checks that the websocket transport can deal with large messages. func TestClientWebsocketLargeMessage(t *testing.T) { var ( - srv = NewServer(50, false /* traceRequests */) + srv = NewServer(50, false /* traceRequests */, true) httpsrv = httptest.NewServer(srv.WebsocketHandler(nil, nil, false)) wsURL = "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") ) diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index 3f4aa8cc07d..7c7e783b8c4 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -61,6 +61,7 @@ var DefaultFlags = []cli.Flag{ utils.HTTPTraceFlag, utils.StateCacheFlag, utils.RpcBatchConcurrencyFlag, + utils.RpcStreamingDisableFlag, utils.DBReadConcurrencyFlag, utils.RpcAccessListFlag, utils.RpcTraceCompatFlag, diff --git a/turbo/cli/flags.go b/turbo/cli/flags.go index cd58ceea21e..7dd6f18fd20 100644 --- a/turbo/cli/flags.go +++ b/turbo/cli/flags.go @@ -2,10 +2,11 @@ package cli import ( "fmt" - "github.com/ledgerwatch/erigon/rpc/rpccfg" "strings" "time" + "github.com/ledgerwatch/erigon/rpc/rpccfg" + "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" @@ -344,6 +345,7 @@ func setEmbeddedRpcDaemon(ctx *cli.Context, cfg *nodecfg.Config) { WebsocketEnabled: ctx.GlobalIsSet(utils.WSEnabledFlag.Name), RpcBatchConcurrency: ctx.GlobalUint(utils.RpcBatchConcurrencyFlag.Name), + RpcStreamingDisable: ctx.GlobalBool(utils.RpcStreamingDisableFlag.Name), DBReadConcurrency: ctx.GlobalInt(utils.DBReadConcurrencyFlag.Name), RpcAllowListFilePath: ctx.GlobalString(utils.RpcAccessListFlag.Name), Gascap: ctx.GlobalUint64(utils.RpcGasCapFlag.Name), From f05401b78ece0f3751dd57da5f0f371b61222724 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Wed, 6 Jul 2022 13:14:13 +0200 Subject: [PATCH 050/152] Don't write headBlockHash & co when FCU points to an old canonical block (#4650) --- eth/stagedsync/stage_headers.go | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 4cd6e6e63b9..9b4bde03951 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -332,22 +332,15 @@ func startHandlingForkChoice( if headerHash == canonicalHash { log.Info(fmt.Sprintf("[%s] Fork choice on previously known block", s.LogPrefix())) cfg.hd.BeaconRequestList.Remove(requestId) - rawdb.WriteForkchoiceHead(tx, forkChoice.HeadBlockHash) - canonical, err := safeAndFinalizedBlocksAreCanonical(forkChoice, s, tx, cfg) - if err != nil { - log.Warn(fmt.Sprintf("[%s] Fork choice err", s.LogPrefix()), "err", err) - return nil, err - } - if canonical { - return &privateapi.PayloadStatus{ - Status: remote.EngineStatus_VALID, - LatestValidHash: headerHash, - }, nil - } else { - return &privateapi.PayloadStatus{ - CriticalError: &privateapi.InvalidForkchoiceStateErr, - }, nil - } + // Per the Engine API spec: + // Client software MAY skip an update of the forkchoice state and MUST NOT begin a payload build process + // if forkchoiceState.headBlockHash references an ancestor of the head of canonical chain. + // In the case of such an event, client software MUST return + // {payloadStatus: {status: VALID, latestValidHash: forkchoiceState.headBlockHash, validationError: null}, payloadId: null}. + return &privateapi.PayloadStatus{ + Status: remote.EngineStatus_VALID, + LatestValidHash: headerHash, + }, nil } if cfg.memoryOverlay && headerHash == cfg.hd.GetNextForkHash() { From 99208ff4dc91da8f08556cb073f46b73573bd152 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Wed, 6 Jul 2022 13:52:49 +0200 Subject: [PATCH 051/152] Fixed hive test (#4653) --- eth/stagedsync/stage_headers.go | 2 +- turbo/stages/headerdownload/header_algos.go | 10 +++++++++- turbo/stages/stageloop.go | 10 +++++++--- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 9b4bde03951..03007d1c799 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -268,7 +268,7 @@ func startHandlingForkChoice( headerHash := forkChoice.HeadBlockHash log.Debug(fmt.Sprintf("[%s] Handling fork choice", s.LogPrefix()), "headerHash", headerHash) if cfg.memoryOverlay { - defer cfg.hd.CleanNextForkState() + defer cfg.hd.CleanNextForkState(tx, cfg.execPayload) } currentHeadHash := rawdb.ReadHeadHeaderHash(tx) if currentHeadHash == headerHash { // no-op diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 03bf5ef186f..c1e539b6d7e 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -1217,9 +1217,17 @@ func (hd *HeaderDownload) FlushNextForkState(tx kv.RwTx) error { return nil } -func (hd *HeaderDownload) CleanNextForkState() { +func (hd *HeaderDownload) CleanNextForkState(tx kv.RwTx, execPayload func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody) error) { hd.lock.Lock() defer hd.lock.Unlock() + sb, ok := hd.sideForksBlock[hd.nextForkHash] + // If we did not flush the fork state, then we need to notify the txpool. + if hd.nextForkState != nil && hd.nextForkHash != (common.Hash{}) && ok { + hd.nextForkState.UpdateTxn(tx) + if err := execPayload(hd.nextForkState, nil, nil, sb.header.Number.Uint64()-1, nil, nil); err != nil { + log.Warn("Could not clean payload", "err", err) + } + } hd.nextForkHash = common.Hash{} hd.nextForkState = nil } diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index ef794f74009..b7e86dc4843 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -246,9 +246,6 @@ func MiningStep(ctx context.Context, kv kv.RwDB, mining *stagedsync.Sync) (err e } func StateStep(ctx context.Context, batch kv.RwTx, stateSync *stagedsync.Sync, headerReader services.FullBlockReader, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody) (err error) { - // Setup - height := header.Number.Uint64() - hash := header.Hash() defer func() { if rec := recover(); rec != nil { @@ -282,6 +279,13 @@ func StateStep(ctx context.Context, batch kv.RwTx, stateSync *stagedsync.Sync, h } } } + // If we did not specify header or body we stop here + if header == nil || body == nil { + return nil + } + // Setup + height := header.Number.Uint64() + hash := header.Hash() // Prepare memory state for block execution if err = rawdb.WriteRawBodyIfNotExists(batch, hash, height, body); err != nil { return err From b84867f13b3f12b9c39bc5892a3c782636a5142f Mon Sep 17 00:00:00 2001 From: hexoscott <70711990+hexoscott@users.noreply.github.com> Date: Wed, 6 Jul 2022 13:22:32 +0100 Subject: [PATCH 052/152] adding health endpoint documentation for headers (#4656) --- cmd/rpcdaemon/README.md | 39 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/cmd/rpcdaemon/README.md b/cmd/rpcdaemon/README.md index 8caf193d6ca..341bc7c8c14 100644 --- a/cmd/rpcdaemon/README.md +++ b/cmd/rpcdaemon/README.md @@ -72,7 +72,10 @@ it may scale well for some workloads that are heavy on the current state queries ### Healthcheck -Running the daemon also opens an endpoint `/health` that provides a basic health check. +There are 2 options for running healtchecks, POST request, or GET request with custom headers. Both options are available +at the `/health` endpoint. + +#### POST request If the health check is successful it returns 200 OK. @@ -107,6 +110,40 @@ Example response } ``` +#### GET with headers + +If the healthcheck is successful it will return a 200 status code. + +If the healthcheck fails for any reason a status 500 will be returned. This is true if one of the criteria requested +fails its check. + +You can set any number of values on the `X-ERIGON-HEALTHCHECK` header. Ones that are not included are skipped in the +checks. + +Available Options: +- `synced` - will check if the node has completed syncing +- `min_peer_count` - will check that the node has at least `` many peers +- `check_block` - will check that the node is at least ahead of the `` specified +- `max_seconds_behind` - will check that the node is no more than `` behind from its latest block + +Example Request +``` +curl --location --request GET 'http://localhost:8545/health' \ +--header 'X-ERIGON-HEALTHCHECK: min_peer_count1' \ +--header 'X-ERIGON-HEALTHCHECK: synced' \ +--header 'X-ERIGON-HEALTHCHECK: max_seconds_behind600' +``` + +Example Response +``` +{ + "check_block":"DISABLED", + "max_seconds_behind":"HEALTHY", + "min_peer_count":"HEALTHY", + "synced":"HEALTHY" +} +``` + ### Testing By default, the `rpcdaemon` serves data from `localhost:8545`. You may send `curl` commands to see if things are From 71525fa85e157d7d8b4aab07541622afd9140db8 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Wed, 6 Jul 2022 14:42:33 +0200 Subject: [PATCH 053/152] safeAndFinalizedBlocksAreCanonical -> writeForkChoiceHashes (#4655) * safeAndFinalizedBlocksAreCanonical -> writeForkChoiceHashes * fix --- eth/stagedsync/stage_headers.go | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 03007d1c799..1202e3f89b9 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -218,7 +218,7 @@ func HeadersPOS( return nil } -func safeAndFinalizedBlocksAreCanonical( +func writeForkChoiceHashes( forkChoice *engineapi.ForkChoiceMessage, s *StageState, tx kv.RwTx, @@ -229,9 +229,7 @@ func safeAndFinalizedBlocksAreCanonical( if err != nil { return false, err } - if safeIsCanonical { - rawdb.WriteForkchoiceSafe(tx, forkChoice.SafeBlockHash) - } else { + if !safeIsCanonical { log.Warn(fmt.Sprintf("[%s] Non-canonical SafeBlockHash", s.LogPrefix()), "forkChoice", forkChoice) return false, nil } @@ -242,14 +240,20 @@ func safeAndFinalizedBlocksAreCanonical( if err != nil { return false, err } - if finalizedIsCanonical { - rawdb.WriteForkchoiceFinalized(tx, forkChoice.FinalizedBlockHash) - } else { + if !finalizedIsCanonical { log.Warn(fmt.Sprintf("[%s] Non-canonical FinalizedBlockHash", s.LogPrefix()), "forkChoice", forkChoice) return false, nil } } + rawdb.WriteForkchoiceHead(tx, forkChoice.HeadBlockHash) + if forkChoice.SafeBlockHash != (common.Hash{}) { + rawdb.WriteForkchoiceSafe(tx, forkChoice.SafeBlockHash) + } + if forkChoice.FinalizedBlockHash != (common.Hash{}) { + rawdb.WriteForkchoiceFinalized(tx, forkChoice.FinalizedBlockHash) + } + return true, nil } @@ -274,8 +278,7 @@ func startHandlingForkChoice( if currentHeadHash == headerHash { // no-op log.Debug(fmt.Sprintf("[%s] Fork choice no-op", s.LogPrefix())) cfg.hd.BeaconRequestList.Remove(requestId) - rawdb.WriteForkchoiceHead(tx, forkChoice.HeadBlockHash) - canonical, err := safeAndFinalizedBlocksAreCanonical(forkChoice, s, tx, cfg) + canonical, err := writeForkChoiceHashes(forkChoice, s, tx, cfg) if err != nil { log.Warn(fmt.Sprintf("[%s] Fork choice err", s.LogPrefix()), "err", err) return nil, err @@ -349,8 +352,7 @@ func startHandlingForkChoice( return nil, err } cfg.hd.BeaconRequestList.Remove(requestId) - rawdb.WriteForkchoiceHead(tx, forkChoice.HeadBlockHash) - canonical, err := safeAndFinalizedBlocksAreCanonical(forkChoice, s, tx, cfg) + canonical, err := writeForkChoiceHashes(forkChoice, s, tx, cfg) if err != nil { log.Warn(fmt.Sprintf("[%s] Fork choice err", s.LogPrefix()), "err", err) return nil, err @@ -416,9 +418,8 @@ func finishHandlingForkChoice( if err := rawdb.WriteHeadHeaderHash(tx, forkChoice.HeadBlockHash); err != nil { return err } - rawdb.WriteForkchoiceHead(tx, forkChoice.HeadBlockHash) - canonical, err := safeAndFinalizedBlocksAreCanonical(forkChoice, s, tx, cfg) + canonical, err := writeForkChoiceHashes(forkChoice, s, tx, cfg) if err != nil { return err } From 329d586464334fce05a4613055c30077b4630c78 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Wed, 6 Jul 2022 14:10:12 +0100 Subject: [PATCH 054/152] Try to reproduce and fix eth_getBlockByNumber returning nil (#4608) * Small optimisation for eth_getBlockByNumber * Option to not retrieve transactions * fixes * Check hash * Fixes * Avoid shadowing of err in BlockWithSenders * Fix test Co-authored-by: Alexey Sharp Co-authored-by: Alex Sharp --- cmd/rpctest/main.go | 11 ++++ cmd/rpctest/rpctest/bench1.go | 4 +- cmd/rpctest/rpctest/bench_blockbynumber.go | 63 +++++++++++++++++++ cmd/rpctest/rpctest/bench_debugtracecall.go | 4 +- cmd/rpctest/rpctest/bench_ethcall.go | 4 +- cmd/rpctest/rpctest/bench_traceblock.go | 2 +- cmd/rpctest/rpctest/bench_tracecall.go | 2 +- cmd/rpctest/rpctest/bench_tracecallmany.go | 2 +- .../rpctest/bench_tracereplaytransaction.go | 2 +- cmd/rpctest/rpctest/bench_tracetransaction.go | 4 +- cmd/rpctest/rpctest/bench_txreceipts.go | 2 +- cmd/rpctest/rpctest/request_generator.go | 6 +- cmd/rpctest/rpctest/type.go | 3 +- turbo/snapshotsync/block_reader.go | 4 +- 14 files changed, 94 insertions(+), 19 deletions(-) create mode 100644 cmd/rpctest/rpctest/bench_blockbynumber.go diff --git a/cmd/rpctest/main.go b/cmd/rpctest/main.go index 53a19fe0877..c3bb37819f6 100644 --- a/cmd/rpctest/main.go +++ b/cmd/rpctest/main.go @@ -242,6 +242,16 @@ func main() { } with(benchTraceReplayTransactionCmd, withGethUrl, withErigonUrl, withNeedCompare, withBlockNum, withRecord, withErrorFile) + var benchEthBlockByNumberCmd = &cobra.Command{ + Use: "benchBlockByNumber", + Short: "", + Long: ``, + Run: func(cmd *cobra.Command, args []string) { + rpctest.BenchEthGetBlockByNumber(erigonURL) + }, + } + with(benchEthBlockByNumberCmd, withErigonUrl) + var replayCmd = &cobra.Command{ Use: "replay", Short: "", @@ -293,6 +303,7 @@ func main() { benchTxReceiptCmd, compareAccountRange, benchTraceReplayTransactionCmd, + benchEthBlockByNumberCmd, replayCmd, ) if err := rootCmd.ExecuteContext(rootContext()); err != nil { diff --git a/cmd/rpctest/rpctest/bench1.go b/cmd/rpctest/rpctest/bench1.go index 3096e8eb1f9..065c8fe6247 100644 --- a/cmd/rpctest/rpctest/bench1.go +++ b/cmd/rpctest/rpctest/bench1.go @@ -55,7 +55,7 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro for bn := blockFrom; bn <= blockTo; bn++ { reqGen.reqID++ var b EthBlockByNumber - res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn), &b) + res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b) resultsCh <- res if res.Err != nil { fmt.Printf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err) @@ -68,7 +68,7 @@ func Bench1(erigonURL, gethURL string, needCompare bool, fullTest bool, blockFro if needCompare { var bg EthBlockByNumber - res = reqGen.Geth("eth_getBlockByNumber", reqGen.getBlockByNumber(bn), &bg) + res = reqGen.Geth("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &bg) if res.Err != nil { fmt.Printf("Could not retrieve block (geth) %d: %v\n", bn, res.Err) return diff --git a/cmd/rpctest/rpctest/bench_blockbynumber.go b/cmd/rpctest/rpctest/bench_blockbynumber.go new file mode 100644 index 00000000000..9ba4a06177f --- /dev/null +++ b/cmd/rpctest/rpctest/bench_blockbynumber.go @@ -0,0 +1,63 @@ +package rpctest + +import ( + "fmt" + "net/http" + "time" +) + +// BenchEthGetBlockByNumber generates lots of requests for eth_getBlockByNumber to attempt to reproduce issue where empty results are being returned +func BenchEthGetBlockByNumber(erigonURL string) { + setRoutes(erigonURL, erigonURL) + var client = &http.Client{ + Timeout: time.Second * 600, + } + var res CallResult + reqGen := &RequestGenerator{ + client: client, + } + reqGen.reqID++ + var blockNumber EthBlockNumber + res = reqGen.Erigon("eth_blockNumber", reqGen.blockNumber(), &blockNumber) + if res.Err != nil { + fmt.Printf("Could not get block number: %v\n", res.Err) + return + } + if blockNumber.Error != nil { + fmt.Printf("Error getting block number: %d %s\n", blockNumber.Error.Code, blockNumber.Error.Message) + return + } + fmt.Printf("Last block: %d\n", blockNumber.Number) + for bn := uint64(0); bn <= uint64(blockNumber.Number)/2; bn++ { + reqGen.reqID++ + res = reqGen.Erigon2("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, false /* withTxs */)) + if res.Err != nil { + fmt.Printf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err) + return + } + if errVal := res.Result.Get("error"); errVal != nil { + fmt.Printf("error: %d %s", errVal.GetInt("code"), errVal.GetStringBytes("message")) + return + } + if res.Result.Get("result") == nil || res.Result.Get("result").Get("number") == nil { + fmt.Printf("empty result: %s\n", res.Response) + return + } + + reqGen.reqID++ + bn1 := uint64(blockNumber.Number) - bn + res = reqGen.Erigon2("eth_getBlockByNumber", reqGen.getBlockByNumber(bn1, false /* withTxs */)) + if res.Err != nil { + fmt.Printf("Could not retrieve block (Erigon) %d: %v\n", bn1, res.Err) + return + } + if errVal := res.Result.Get("error"); errVal != nil { + fmt.Printf("error: %d %s", errVal.GetInt("code"), errVal.GetStringBytes("message")) + return + } + if res.Result.Get("result") == nil || res.Result.Get("result").Get("number") == nil { + fmt.Printf("empty result: %s\n", res.Response) + return + } + } +} diff --git a/cmd/rpctest/rpctest/bench_debugtracecall.go b/cmd/rpctest/rpctest/bench_debugtracecall.go index 033a3d1fb80..4492da33f7b 100644 --- a/cmd/rpctest/rpctest/bench_debugtracecall.go +++ b/cmd/rpctest/rpctest/bench_debugtracecall.go @@ -60,7 +60,7 @@ func BenchDebugTraceCall(erigonURL, gethURL string, needCompare bool, blockFrom for bn := blockFrom; bn <= blockTo; bn++ { reqGen.reqID++ var b EthBlockByNumber - res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn), &b) + res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b) if res.Err != nil { fmt.Printf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err) return @@ -73,7 +73,7 @@ func BenchDebugTraceCall(erigonURL, gethURL string, needCompare bool, blockFrom if needCompare { var bg EthBlockByNumber - res = reqGen.Geth("eth_getBlockByNumber", reqGen.getBlockByNumber(bn), &bg) + res = reqGen.Geth("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &bg) if res.Err != nil { fmt.Printf("Could not retrieve block (geth) %d: %v\n", bn, res.Err) return diff --git a/cmd/rpctest/rpctest/bench_ethcall.go b/cmd/rpctest/rpctest/bench_ethcall.go index 3358f98c591..ddf1ba53322 100644 --- a/cmd/rpctest/rpctest/bench_ethcall.go +++ b/cmd/rpctest/rpctest/bench_ethcall.go @@ -64,7 +64,7 @@ func BenchEthCall(erigonURL, gethURL string, needCompare, latest bool, blockFrom for bn := blockFrom; bn <= blockTo; bn++ { reqGen.reqID++ var b EthBlockByNumber - res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn), &b) + res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b) if res.Err != nil { fmt.Printf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err) return @@ -77,7 +77,7 @@ func BenchEthCall(erigonURL, gethURL string, needCompare, latest bool, blockFrom if needCompare { var bg EthBlockByNumber - res = reqGen.Geth("eth_getBlockByNumber", reqGen.getBlockByNumber(bn), &bg) + res = reqGen.Geth("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &bg) if res.Err != nil { fmt.Printf("Could not retrieve block (geth) %d: %v\n", bn, res.Err) return diff --git a/cmd/rpctest/rpctest/bench_traceblock.go b/cmd/rpctest/rpctest/bench_traceblock.go index fe05c04b6f9..6e6105be1db 100644 --- a/cmd/rpctest/rpctest/bench_traceblock.go +++ b/cmd/rpctest/rpctest/bench_traceblock.go @@ -60,7 +60,7 @@ func BenchTraceBlock(erigonURL, oeURL string, needCompare bool, blockFrom uint64 for bn := blockFrom; bn <= blockTo; bn++ { reqGen.reqID++ var b EthBlockByNumber - res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn), &b) + res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b) if res.Err != nil { fmt.Printf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err) return diff --git a/cmd/rpctest/rpctest/bench_tracecall.go b/cmd/rpctest/rpctest/bench_tracecall.go index 2ba848b0c58..1a1202b9873 100644 --- a/cmd/rpctest/rpctest/bench_tracecall.go +++ b/cmd/rpctest/rpctest/bench_tracecall.go @@ -60,7 +60,7 @@ func BenchTraceCall(erigonURL, oeURL string, needCompare bool, blockFrom uint64, for bn := blockFrom; bn <= blockTo; bn++ { reqGen.reqID++ var b EthBlockByNumber - res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn), &b) + res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b) if res.Err != nil { fmt.Printf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err) return diff --git a/cmd/rpctest/rpctest/bench_tracecallmany.go b/cmd/rpctest/rpctest/bench_tracecallmany.go index 7035a8cadd4..9fd506958e3 100644 --- a/cmd/rpctest/rpctest/bench_tracecallmany.go +++ b/cmd/rpctest/rpctest/bench_tracecallmany.go @@ -63,7 +63,7 @@ func BenchTraceCallMany(erigonURL, oeURL string, needCompare bool, blockFrom uin for bn := blockFrom; bn <= blockTo; bn++ { reqGen.reqID++ var b EthBlockByNumber - res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn), &b) + res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b) if res.Err != nil { fmt.Printf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err) return diff --git a/cmd/rpctest/rpctest/bench_tracereplaytransaction.go b/cmd/rpctest/rpctest/bench_tracereplaytransaction.go index 9249474da18..82e8854f5de 100644 --- a/cmd/rpctest/rpctest/bench_tracereplaytransaction.go +++ b/cmd/rpctest/rpctest/bench_tracereplaytransaction.go @@ -46,7 +46,7 @@ func BenchTraceReplayTransaction(erigonUrl, gethUrl string, needCompare bool, bl for bn := blockFrom; bn < blockTo; bn++ { var b EthBlockByNumber - res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn), &b) + res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b) if res.Err != nil { fmt.Printf("retrieve block (Erigon) %d: %v", blockFrom, res.Err) return diff --git a/cmd/rpctest/rpctest/bench_tracetransaction.go b/cmd/rpctest/rpctest/bench_tracetransaction.go index 403e1b99b17..36c4d50092f 100644 --- a/cmd/rpctest/rpctest/bench_tracetransaction.go +++ b/cmd/rpctest/rpctest/bench_tracetransaction.go @@ -46,7 +46,7 @@ func BenchTraceBlockByHash(erigonUrl, gethUrl string, needCompare bool, blockFro for bn := blockFrom; bn < blockTo; bn++ { var b EthBlockByNumber - res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn), &b) + res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b) if res.Err != nil { fmt.Printf("retrieve block (Erigon) %d: %v", blockFrom, res.Err) return @@ -103,7 +103,7 @@ func BenchTraceTransaction(erigonUrl, gethUrl string, needCompare bool, blockFro for bn := blockFrom; bn < blockTo; bn++ { var b EthBlockByNumber - res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn), &b) + res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, true /* withTxs */), &b) if res.Err != nil { fmt.Printf("retrieve block (Erigon) %d: %v", blockFrom, res.Err) return diff --git a/cmd/rpctest/rpctest/bench_txreceipts.go b/cmd/rpctest/rpctest/bench_txreceipts.go index 2ee7406f7d7..02013b7b9c5 100644 --- a/cmd/rpctest/rpctest/bench_txreceipts.go +++ b/cmd/rpctest/rpctest/bench_txreceipts.go @@ -61,7 +61,7 @@ func BenchTxReceipt(erigonURL, gethURL string, needCompare bool, blockFrom uint6 for bn := blockFrom; bn <= blockTo; bn++ { reqGen.reqID++ var b EthBlockByNumber - res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn), &b) + res = reqGen.Erigon("eth_getBlockByNumber", reqGen.getBlockByNumber(bn, false /* withTxs */), &b) if res.Err != nil { fmt.Printf("Could not retrieve block (Erigon) %d: %v\n", bn, res.Err) return diff --git a/cmd/rpctest/rpctest/request_generator.go b/cmd/rpctest/rpctest/request_generator.go index 5e23c3f1a7c..484b84bd995 100644 --- a/cmd/rpctest/rpctest/request_generator.go +++ b/cmd/rpctest/rpctest/request_generator.go @@ -31,9 +31,9 @@ func (g *RequestGenerator) blockNumber() string { const template = `{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":%d}` return fmt.Sprintf(template, g.reqID) } -func (g *RequestGenerator) getBlockByNumber(blockNum uint64) string { - const template = `{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params":["0x%x",true],"id":%d}` - return fmt.Sprintf(template, blockNum, g.reqID) +func (g *RequestGenerator) getBlockByNumber(blockNum uint64, withTxs bool) string { + const template = `{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params":["0x%x",%t],"id":%d}` + return fmt.Sprintf(template, blockNum, withTxs, g.reqID) } func (g *RequestGenerator) storageRangeAt(hash common.Hash, i int, to *common.Address, nextKey common.Hash) string { diff --git a/cmd/rpctest/rpctest/type.go b/cmd/rpctest/rpctest/type.go index 29cd65e5989..2a9bbd0d4fd 100644 --- a/cmd/rpctest/rpctest/type.go +++ b/cmd/rpctest/rpctest/type.go @@ -2,6 +2,7 @@ package rpctest import ( "fmt" + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core/state" @@ -61,7 +62,7 @@ type EthBlockByNumberResult struct { type EthBlockByNumber struct { CommonResponse - Result EthBlockByNumberResult `json:"result"` + Result *EthBlockByNumberResult `json:"result"` } type StructLog struct { diff --git a/turbo/snapshotsync/block_reader.go b/turbo/snapshotsync/block_reader.go index 4fd90b22c67..4bde09ed7dc 100644 --- a/turbo/snapshotsync/block_reader.go +++ b/turbo/snapshotsync/block_reader.go @@ -427,7 +427,7 @@ func (back *BlockReaderWithSnapshots) BlockWithSenders(ctx context.Context, tx k return nil }) if err != nil { - return + return nil, nil, err } if ok && h != nil { var b *types.Body @@ -441,7 +441,7 @@ func (back *BlockReaderWithSnapshots) BlockWithSenders(ctx context.Context, tx k return nil }) if err != nil { - return + return nil, nil, err } if ok && b != nil { if txsAmount == 0 { From 01bca8f1e289e9ddf73ad6b3c9a51554cb141e23 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Thu, 7 Jul 2022 11:07:48 +0600 Subject: [PATCH 055/152] TxPool: generics btree (#4665) * save * save * save * save --- go.mod | 24 +++++++++---------- go.sum | 50 +++++++++++++++++++++------------------ turbo/stages/stageloop.go | 2 +- 3 files changed, 40 insertions(+), 36 deletions(-) diff --git a/go.mod b/go.mod index fd3347d9883..afd7bc38995 100644 --- a/go.mod +++ b/go.mod @@ -11,20 +11,20 @@ require ( github.com/anacrolix/torrent v1.44.0 github.com/benesch/cgosymbolizer v0.0.0-20190515212042-bec6fe6e597b github.com/btcsuite/btcd v0.22.0-beta - github.com/c2h5oh/datasize v0.0.0-20200825124411-48ed595a09d2 + github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b github.com/consensys/gnark-crypto v0.4.0 github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 github.com/edsrzf/mmap-go v1.1.0 - github.com/emicklei/dot v0.16.0 + github.com/emicklei/dot v1.0.0 github.com/emirpasic/gods v1.18.1 github.com/fjl/gencodec v0.0.0-20191126094850-e283372f291f github.com/goccy/go-json v0.9.7 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.4.1 github.com/golang/snappy v0.0.4 - github.com/google/btree v1.0.1 + github.com/google/btree v1.1.2 github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa github.com/gorilla/websocket v1.5.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 @@ -35,7 +35,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220706054240-9e7f22667e55 + github.com/ledgerwatch/erigon-lib v0.0.0-20220707042037-fcddfca502a1 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 @@ -44,9 +44,9 @@ require ( github.com/pion/stun v0.3.5 github.com/quasilyte/go-ruleguard/dsl v0.3.21 github.com/rs/cors v1.8.2 - github.com/spf13/cobra v1.4.0 + github.com/spf13/cobra v1.5.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.7.2 + github.com/stretchr/testify v1.8.0 github.com/tendermint/go-amino v0.14.1 github.com/tendermint/tendermint v0.31.11 github.com/torquem-ch/mdbx-go v0.24.3-0.20220614090901-342411560dde @@ -56,11 +56,11 @@ require ( github.com/valyala/fastjson v1.6.3 github.com/xsleonard/go-merkle v1.1.0 go.uber.org/atomic v1.9.0 - golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122 - golang.org/x/exp v0.0.0-20220428152302-39d4317da171 - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 - golang.org/x/time v0.0.0-20220411224347-583f2d630306 + golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d + golang.org/x/exp v0.0.0-20220706164943-b4a6d9510983 + golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f + golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e + golang.org/x/time v0.0.0-20220609170525-579cf78fd858 google.golang.org/grpc v1.46.2 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 google.golang.org/protobuf v1.28.0 @@ -90,7 +90,7 @@ require ( github.com/bits-and-blooms/bitset v1.2.0 // indirect github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/flanglet/kanzi-go v1.9.1-0.20211212184056-72dda96261ee // indirect diff --git a/go.sum b/go.sum index b6b1e9bf77a..87fb36d618d 100644 --- a/go.sum +++ b/go.sum @@ -126,8 +126,8 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/c2h5oh/datasize v0.0.0-20200825124411-48ed595a09d2 h1:t8KYCwSKsOEZBFELI4Pn/phbp38iJ1RRAkDFNin1aak= -github.com/c2h5oh/datasize v0.0.0-20200825124411-48ed595a09d2/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= +github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b h1:6+ZFm0flnudZzdSE0JxlhR2hKnGPcNB35BjQf4RYQDY= +github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -151,8 +151,8 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -179,8 +179,8 @@ github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFP github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= -github.com/emicklei/dot v0.16.0 h1:7PseyizTgeQ/aSF1eo4LcEfWlQSlzamFZpzY/nMB9EY= -github.com/emicklei/dot v0.16.0/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= +github.com/emicklei/dot v1.0.0 h1:yyObALINBOuI1GdCRwVea2IPtGtVgh0NQgJDrE03Tqc= +github.com/emicklei/dot v1.0.0/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= @@ -268,8 +268,8 @@ github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -277,8 +277,8 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa h1:Q75Upo5UN4JbPFURXZ8nLKYUvF85dyFRop/vQ0Rv+64= github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -386,8 +386,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220706054240-9e7f22667e55 h1:KOECbI1OzXn9Dwy58wHq0KMaG+4siMvxAPb3YSh+u+s= -github.com/ledgerwatch/erigon-lib v0.0.0-20220706054240-9e7f22667e55/go.mod h1:7sQ5B5m54zoo7RVRVukH3YZCYVrCC+BmwDBD+9KyTrE= +github.com/ledgerwatch/erigon-lib v0.0.0-20220707042037-fcddfca502a1 h1:FmhYe6F5gorY4XFS/nIohHS30Io99u1AyatKYjOf52c= +github.com/ledgerwatch/erigon-lib v0.0.0-20220707042037-fcddfca502a1/go.mod h1:omkeXZH0obdyYjsJTabLbw6yPB66ZWXjcmJfI0zLG+M= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= @@ -583,8 +583,8 @@ github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJ github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= +github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -593,6 +593,7 @@ github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3 github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -600,8 +601,10 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/tendermint/go-amino v0.14.1 h1:o2WudxNfdLNBwMyl2dqOJxiro5rfrEaU0Ugs6offJMk= github.com/tendermint/go-amino v0.14.1/go.mod h1:i/UKE5Uocn+argJJBb12qTZsCDBcAYMbR92AaJVmKso= github.com/tendermint/tendermint v0.31.11 h1:TIs//4WfEAG4TOZc2eUfJPI3T8KrywXQCCPnGAaM1Wo= @@ -668,11 +671,11 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122 h1:NvGWuYG8dkDHFSKksI1P9faiVJ9rayE6l0+ouWVIDs8= -golang.org/x/crypto v0.0.0-20220507011949-2cf3adece122/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20220428152302-39d4317da171 h1:TfdoLivD44QwvssI9Sv1xwa5DcL5XQr4au4sZ2F2NV4= -golang.org/x/exp v0.0.0-20220428152302-39d4317da171/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= +golang.org/x/exp v0.0.0-20220706164943-b4a6d9510983 h1:sUweFwmLOje8KNfXAVqGGAsmgJ/F8jJ6wBLJDt4BTKY= +golang.org/x/exp v0.0.0-20220706164943-b4a6d9510983/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -726,8 +729,9 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -765,8 +769,8 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211030160813-b3129d9d1021/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6 h1:nonptSpoQ4vQjyraW20DXPAglgQfVnM9ZC6MmNLMR60= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e h1:CsOuNlbOuf0mzxJIefr6Q4uAUetRUwZE4qt7VfzP+xo= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -777,8 +781,8 @@ golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220411224347-583f2d630306 h1:+gHMid33q6pen7kv9xvT+JRinntgeXO2AeZVd0AWD3w= -golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index b7e86dc4843..3fa3dc78637 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -88,7 +88,7 @@ func StageLoop( log.Error("Staged Sync", "err", err) if recoveryErr := hd.RecoverFromDb(db); recoveryErr != nil { - log.Error("Failed to recover header sentriesClient", "err", recoveryErr) + log.Error("Failed to recover header downloader", "err", recoveryErr) } time.Sleep(500 * time.Millisecond) // just to avoid too much similar errors in logs continue From 024061018ee7dd6b68d4d0cb7d185f66c0f8bc12 Mon Sep 17 00:00:00 2001 From: Igor Mandrigin Date: Thu, 7 Jul 2022 09:34:51 +0200 Subject: [PATCH 056/152] Fix a deadlock under RPC load (#4667) * debugs * rename logs * updated erigon lib with the attempt to fix deadlock * Update erigon-lib that fixes the deadlock * Revert "rename logs" This reverts commit 13b6ac555314ce4317de2438d5f1a5e883343865. * Revert "debugs" This reverts commit 356de6c8ef3df2a440f8ad6c18ccb63496d8256d. * revert unnecessary change * go mod tidy Co-authored-by: Igor Mandrigin --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index afd7bc38995..92826f92b17 100644 --- a/go.mod +++ b/go.mod @@ -35,7 +35,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220707042037-fcddfca502a1 + github.com/ledgerwatch/erigon-lib v0.0.0-20220707065230-95e361fa1ed7 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 87fb36d618d..939cfee5aaf 100644 --- a/go.sum +++ b/go.sum @@ -386,8 +386,8 @@ github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3P github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220707042037-fcddfca502a1 h1:FmhYe6F5gorY4XFS/nIohHS30Io99u1AyatKYjOf52c= -github.com/ledgerwatch/erigon-lib v0.0.0-20220707042037-fcddfca502a1/go.mod h1:omkeXZH0obdyYjsJTabLbw6yPB66ZWXjcmJfI0zLG+M= +github.com/ledgerwatch/erigon-lib v0.0.0-20220707065230-95e361fa1ed7 h1:ytnJHsVttH1NleI45f6FbP7HaratpDx4IPCK/D/aZwI= +github.com/ledgerwatch/erigon-lib v0.0.0-20220707065230-95e361fa1ed7/go.mod h1:bttvdtZXjh803u/CeMerKYnWvVvXTICWSfpcMeQNtmc= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From 7b80744768cb7ff1f2da6c04e6ff2c4bdae2e657 Mon Sep 17 00:00:00 2001 From: Levi Aul Date: Thu, 7 Jul 2022 01:40:50 -0700 Subject: [PATCH 057/152] Ensure (fake) Bor txs + receipts are returned from all relevant RPC methods (#4663) * Ensure fake Bor txs + receipts are returned from all relevant RPC methods * Add rest of bor implementation for eth_getBlockByNumber * Use TxLookup index to find Bor txs * Fix txHash on emitted borTxs and borReceipts * Fix checks given that borTxs get registered in TxLookup; remove useless ref indirections --- cmd/rpcdaemon/commands/eth_block.go | 25 +++++++++- cmd/rpcdaemon/commands/eth_receipts.go | 59 +++++++++++++----------- cmd/rpcdaemon/commands/eth_txs.go | 47 +++++++++++++++++-- cmd/rpcdaemon/commands/tracing.go | 2 +- cmd/rpcdaemon22/commands/eth_receipts.go | 43 ++++++----------- cmd/rpcdaemon22/commands/tracing.go | 2 +- core/rawdb/bor_receipts.go | 43 ++++++++++++----- core/types/access_list_tx.go | 4 ++ core/types/dynamic_fee_tx.go | 4 ++ core/types/legacy_tx.go | 6 +++ core/types/starknet_tx.go | 4 ++ core/types/transaction.go | 1 + internal/ethapi/api.go | 32 +++++++++++-- turbo/adapter/ethapi/internal.go | 14 ++++++ 14 files changed, 208 insertions(+), 78 deletions(-) diff --git a/cmd/rpcdaemon/commands/eth_block.go b/cmd/rpcdaemon/commands/eth_block.go index f21c4fedf44..f3d569e8a65 100644 --- a/cmd/rpcdaemon/commands/eth_block.go +++ b/cmd/rpcdaemon/commands/eth_block.go @@ -211,7 +211,30 @@ func (api *APIImpl) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber if td != nil { additionalFields["totalDifficulty"] = (*hexutil.Big)(td) } - response, err := ethapi.RPCMarshalBlock(b, true, fullTx, additionalFields) + + chainConfig, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + var borTx types.Transaction + var borReceipt *types.Receipt + if chainConfig.Bor != nil { + borTx, _, _, _, err = rawdb.ReadBorTransactionWithBlockNumberAndHash(tx, b.NumberU64(), b.Hash()) + if err != nil { + return nil, err + } + if borTx != nil { + borReceipt = rawdb.ReadBorReceipt(tx, b.Hash(), b.NumberU64()) + if borReceipt != nil { + borTx, err = borTx.WithHash(borReceipt.TxHash) + if err != nil { + return nil, err + } + } + } + } + + response, err := ethapi.RPCMarshalBlockEx(b, true, fullTx, borTx, borReceipt, additionalFields) if err == nil && number == rpc.PendingBlockNumber { // Pending blocks need to nil out a few fields diff --git a/cmd/rpcdaemon/commands/eth_receipts.go b/cmd/rpcdaemon/commands/eth_receipts.go index bfd481ec08d..8b41227542a 100644 --- a/cmd/rpcdaemon/commands/eth_receipts.go +++ b/cmd/rpcdaemon/commands/eth_receipts.go @@ -251,18 +251,11 @@ func (api *APIImpl) GetTransactionReceipt(ctx context.Context, hash common.Hash) } defer tx.Rollback() - var borTx *types.Transaction - var blockHash common.Hash var blockNum uint64 var ok bool - chainConfig, err := api.chainConfig(tx) - if err != nil { - return nil, err - } - blockNum, ok, err = api.txnLookup(ctx, tx, hash) - if blockNum == 0 { + if !ok || blockNum == 0 { // It is not an ideal solution (ideal solution requires extending TxnLookupReply proto type to include bool flag indicating absense of result), // but 0 block number is used here to mean that the transaction is not found return nil, nil @@ -270,21 +263,6 @@ func (api *APIImpl) GetTransactionReceipt(ctx context.Context, hash common.Hash) if err != nil { return nil, err } - if !ok { - if chainConfig.Bor != nil { - var blocN uint64 - borTx, blockHash, blocN, _, err = rawdb.ReadBorTransaction(tx, hash) - if err != nil { - return nil, err - } - if borTx == nil { - return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 - } - blockNum = blocN - } else { - return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 - } - } block, err := api.blockByNumberWithSenders(tx, blockNum) if err != nil { @@ -298,10 +276,6 @@ func (api *APIImpl) GetTransactionReceipt(ctx context.Context, hash common.Hash) if err != nil { return nil, err } - if borTx != nil { - receipt := rawdb.ReadBorReceipt(tx, blockHash, blockNum) - return marshalReceipt(receipt, *borTx, cc, block, hash), nil - } var txnIndex uint64 var txn types.Transaction for idx, transaction := range block.Transactions() { @@ -313,7 +287,19 @@ func (api *APIImpl) GetTransactionReceipt(ctx context.Context, hash common.Hash) } if txn == nil { - return nil, nil + if cc.Bor == nil { + return nil, nil + } + + borTx, blockHash, _, _, err := rawdb.ReadBorTransactionWithBlockNumber(tx, blockNum) + if err != nil { + return nil, err + } + if borTx == nil { + return nil, nil + } + borReceipt := rawdb.ReadBorReceipt(tx, blockHash, blockNum) + return marshalReceipt(borReceipt, borTx, cc, block, hash), nil } receipts, err := api.getReceipts(ctx, tx, cc, block, block.Body().SendersFromTxs()) @@ -360,6 +346,23 @@ func (api *APIImpl) GetBlockReceipts(ctx context.Context, number rpc.BlockNumber result = append(result, marshalReceipt(receipt, txn, chainConfig, block, txn.Hash())) } + if chainConfig.Bor != nil { + borTx, _, _, _, err := rawdb.ReadBorTransactionWithBlockNumberAndHash(tx, blockNum, block.Hash()) + if err != nil { + return nil, err + } + if borTx != nil { + borReceipt := rawdb.ReadBorReceipt(tx, block.Hash(), blockNum) + borTx, err = borTx.WithHash(borReceipt.TxHash) + if err != nil { + return nil, err + } + if borReceipt != nil { + result = append(result, marshalReceipt(borReceipt, borTx, chainConfig, block, borReceipt.TxHash)) + } + } + } + return result, nil } diff --git a/cmd/rpcdaemon/commands/eth_txs.go b/cmd/rpcdaemon/commands/eth_txs.go index 507c52225b2..7e452bf2c38 100644 --- a/cmd/rpcdaemon/commands/eth_txs.go +++ b/cmd/rpcdaemon/commands/eth_txs.go @@ -61,8 +61,17 @@ func (api *APIImpl) GetTransactionByHash(ctx context.Context, hash common.Hash) // if no transaction was found then we return nil if txn == nil { - return nil, nil + if chainConfig.Bor != nil { + borTx, _, _, _, err := rawdb.ReadBorTransactionWithBlockNumberAndHash(tx, blockNum, block.Hash()) + if err != nil { + return nil, err + } + if borTx != nil { + return newRPCTransaction(borTx, blockHash, blockNum, uint64(len(block.Transactions())), baseFee), nil + } + } + return nil, nil } return newRPCTransaction(txn, blockHash, blockNum, txnIndex, baseFee), nil @@ -152,6 +161,10 @@ func (api *APIImpl) GetTransactionByBlockHashAndIndex(ctx context.Context, block return nil, err } defer tx.Rollback() + chainConfig, err := api.chainConfig(tx) + if err != nil { + return nil, err + } // https://infura.io/docs/ethereum/json-rpc/eth-getTransactionByBlockHashAndIndex block, err := api.blockByHashWithSenders(tx, blockHash) @@ -163,8 +176,20 @@ func (api *APIImpl) GetTransactionByBlockHashAndIndex(ctx context.Context, block } txs := block.Transactions() - if uint64(txIndex) >= uint64(len(txs)) { + if uint64(txIndex) > uint64(len(txs)) { return nil, nil // not error + } else if uint64(txIndex) == uint64(len(txs)) { + if chainConfig.Bor != nil { + borTx, _, _, _, err := rawdb.ReadBorTransactionWithBlockNumberAndHash(tx, block.NumberU64(), block.Hash()) + if err != nil { + return nil, err + } + if borTx != nil { + return newRPCTransaction(borTx, block.Hash(), block.NumberU64(), uint64(txIndex), block.BaseFee()), nil + } + } else { + return nil, nil // not error + } } return newRPCTransaction(txs[txIndex], block.Hash(), block.NumberU64(), uint64(txIndex), block.BaseFee()), nil @@ -197,6 +222,10 @@ func (api *APIImpl) GetTransactionByBlockNumberAndIndex(ctx context.Context, blo return nil, err } defer tx.Rollback() + chainConfig, err := api.chainConfig(tx) + if err != nil { + return nil, err + } // https://infura.io/docs/ethereum/json-rpc/eth-getTransactionByBlockNumberAndIndex blockNum, _, _, err := rpchelper.GetBlockNumber(rpc.BlockNumberOrHashWithNumber(blockNr), tx, api.filters) @@ -213,8 +242,20 @@ func (api *APIImpl) GetTransactionByBlockNumberAndIndex(ctx context.Context, blo } txs := block.Transactions() - if uint64(txIndex) >= uint64(len(txs)) { + if uint64(txIndex) > uint64(len(txs)) { return nil, nil // not error + } else if uint64(txIndex) == uint64(len(txs)) { + if chainConfig.Bor != nil { + borTx, _, _, _, err := rawdb.ReadBorTransactionWithBlockNumberAndHash(tx, blockNum, block.Hash()) + if err != nil { + return nil, err + } + if borTx != nil { + return newRPCTransaction(borTx, block.Hash(), block.NumberU64(), uint64(txIndex), block.BaseFee()), nil + } + } else { + return nil, nil // not error + } } return newRPCTransaction(txs[txIndex], block.Hash(), block.NumberU64(), uint64(txIndex), block.BaseFee()), nil diff --git a/cmd/rpcdaemon/commands/tracing.go b/cmd/rpcdaemon/commands/tracing.go index 16dfecfc3df..a69e3791db8 100644 --- a/cmd/rpcdaemon/commands/tracing.go +++ b/cmd/rpcdaemon/commands/tracing.go @@ -150,7 +150,7 @@ func (api *PrivateDebugAPIImpl) TraceTransaction(ctx context.Context, hash commo } } if txn == nil { - var borTx *types.Transaction + var borTx types.Transaction borTx, _, _, _, err = rawdb.ReadBorTransaction(tx, hash) if err != nil { stream.WriteNil() diff --git a/cmd/rpcdaemon22/commands/eth_receipts.go b/cmd/rpcdaemon22/commands/eth_receipts.go index f822381c54b..bdd74f1017d 100644 --- a/cmd/rpcdaemon22/commands/eth_receipts.go +++ b/cmd/rpcdaemon22/commands/eth_receipts.go @@ -263,18 +263,11 @@ func (api *APIImpl) GetTransactionReceipt(ctx context.Context, hash common.Hash) } defer tx.Rollback() - var borTx *types.Transaction - var blockHash common.Hash var blockNum uint64 var ok bool - chainConfig, err := api.chainConfig(tx) - if err != nil { - return nil, err - } - blockNum, ok, err = api.txnLookup(ctx, tx, hash) - if blockNum == 0 { + if !ok || blockNum == 0 { // It is not an ideal solution (ideal solution requires extending TxnLookupReply proto type to include bool flag indicating absense of result), // but 0 block number is used here to mean that the transaction is not found return nil, nil @@ -282,21 +275,6 @@ func (api *APIImpl) GetTransactionReceipt(ctx context.Context, hash common.Hash) if err != nil { return nil, err } - if !ok { - if chainConfig.Bor != nil { - var blocN uint64 - borTx, blockHash, blocN, _, err = rawdb.ReadBorTransaction(tx, hash) - if err != nil { - return nil, err - } - if borTx == nil { - return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 - } - blockNum = blocN - } else { - return nil, nil // not error, see https://github.com/ledgerwatch/erigon/issues/1645 - } - } block, err := api.blockByNumberWithSenders(tx, blockNum) if err != nil { @@ -310,10 +288,7 @@ func (api *APIImpl) GetTransactionReceipt(ctx context.Context, hash common.Hash) if err != nil { return nil, err } - if borTx != nil { - receipt := rawdb.ReadBorReceipt(tx, blockHash, blockNum) - return marshalReceipt(receipt, *borTx, cc, block, hash), nil - } + var txnIndex uint64 var txn types.Transaction for idx, transaction := range block.Transactions() { @@ -325,7 +300,19 @@ func (api *APIImpl) GetTransactionReceipt(ctx context.Context, hash common.Hash) } if txn == nil { - return nil, nil + if cc.Bor == nil { + return nil, nil + } + + borTx, blockHash, _, _, err := rawdb.ReadBorTransactionWithBlockNumber(tx, blockNum) + if err != nil { + return nil, err + } + if borTx == nil { + return nil, nil + } + borReceipt := rawdb.ReadBorReceipt(tx, blockHash, blockNum) + return marshalReceipt(borReceipt, borTx, cc, block, hash), nil } receipts, err := api.getReceipts(ctx, tx, cc, block, block.Body().SendersFromTxs()) diff --git a/cmd/rpcdaemon22/commands/tracing.go b/cmd/rpcdaemon22/commands/tracing.go index a63add34e34..02b56214acd 100644 --- a/cmd/rpcdaemon22/commands/tracing.go +++ b/cmd/rpcdaemon22/commands/tracing.go @@ -145,7 +145,7 @@ func (api *PrivateDebugAPIImpl) TraceTransaction(ctx context.Context, hash commo } } if txn == nil { - var borTx *types.Transaction + var borTx types.Transaction borTx, _, _, _, err = rawdb.ReadBorTransaction(tx, hash) if err != nil { diff --git a/core/rawdb/bor_receipts.go b/core/rawdb/bor_receipts.go index 123edabe76a..ed89e892d5d 100644 --- a/core/rawdb/bor_receipts.go +++ b/core/rawdb/bor_receipts.go @@ -108,13 +108,16 @@ func DeleteBorReceipt(tx kv.RwTx, hash common.Hash, number uint64) { } } +/* // ReadBorTransactionWithBlockHash retrieves a specific bor (fake) transaction by tx hash and block hash, along with // its added positional metadata. -func ReadBorTransactionWithBlockHash(db kv.Tx, txHash common.Hash, blockHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) { - blockNumber := ReadHeaderNumber(db, txHash) - +func ReadBorTransactionWithBlockHash(db kv.Tx, borTxHash common.Hash, blockHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) { + blockNumber, err := ReadTxLookupEntry(db, borTxHash) + if err != nil { + return nil, common.Hash{}, 0, 0, err + } if blockNumber == nil { - return nil, common.Hash{}, 0, 0, nil + return nil, common.Hash{}, 0, 0, errors.New("missing block number") } bodyForStorage, err := ReadStorageBody(db, blockHash, *blockNumber) @@ -125,28 +128,46 @@ func ReadBorTransactionWithBlockHash(db kv.Tx, txHash common.Hash, blockHash com var tx types.Transaction = types.NewBorTransaction() return &tx, blockHash, *blockNumber, uint64(bodyForStorage.TxAmount), nil } +*/ // ReadBorTransaction retrieves a specific bor (fake) transaction by hash, along with // its added positional metadata. -func ReadBorTransaction(db kv.Tx, hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) { - blockNumber := ReadHeaderNumber(db, hash) - +func ReadBorTransaction(db kv.Tx, borTxHash common.Hash) (types.Transaction, common.Hash, uint64, uint64, error) { + blockNumber, err := ReadTxLookupEntry(db, borTxHash) + if err != nil { + return nil, common.Hash{}, 0, 0, err + } if blockNumber == nil { return nil, common.Hash{}, 0, 0, errors.New("missing block number") } - blockHash, _ := ReadCanonicalHash(db, *blockNumber) + return ReadBorTransactionWithBlockNumber(db, *blockNumber) +} + +// ReadBorTransaction retrieves a specific bor (fake) transaction by block number, along with +// its added positional metadata. +func ReadBorTransactionWithBlockNumber(db kv.Tx, blockNumber uint64) (types.Transaction, common.Hash, uint64, uint64, error) { + blockHash, err := ReadCanonicalHash(db, blockNumber) + if err != nil { + return nil, common.Hash{}, 0, 0, err + } if blockHash == (common.Hash{}) { return nil, common.Hash{}, 0, 0, errors.New("missing block hash") } - bodyForStorage, err := ReadStorageBody(db, hash, *blockNumber) + return ReadBorTransactionWithBlockNumberAndHash(db, blockNumber, blockHash) +} + +// ReadBorTransactionWithBlockNumberAndHash retrieves a specific bor (fake) transaction by block number and block hash, along with +// its added positional metadata. +func ReadBorTransactionWithBlockNumberAndHash(db kv.Tx, blockNumber uint64, blockHash common.Hash) (types.Transaction, common.Hash, uint64, uint64, error) { + bodyForStorage, err := ReadStorageBody(db, blockHash, blockNumber) if err != nil { - return nil, common.Hash{}, 0, 0, nil + return nil, common.Hash{}, 0, 0, err } var tx types.Transaction = types.NewBorTransaction() - return &tx, blockHash, *blockNumber, uint64(bodyForStorage.TxAmount), nil + return tx, blockHash, blockNumber, uint64(bodyForStorage.TxAmount), nil } // TruncateBorReceipts removes all bor receipt for given block number or newer diff --git a/core/types/access_list_tx.go b/core/types/access_list_tx.go index ed700591cd2..538413130d4 100644 --- a/core/types/access_list_tx.go +++ b/core/types/access_list_tx.go @@ -566,6 +566,10 @@ func (tx *AccessListTx) FakeSign(address common.Address) (Transaction, error) { return cpy, nil } +func (tx *AccessListTx) WithHash(newHash common.Hash) (Transaction, error) { + return nil, errors.New("hash is immutable for AccessListTx") +} + // Hash computes the hash (but not for signatures!) func (tx *AccessListTx) Hash() common.Hash { if hash := tx.hash.Load(); hash != nil { diff --git a/core/types/dynamic_fee_tx.go b/core/types/dynamic_fee_tx.go index aac5967bf8d..f1eaf57235d 100644 --- a/core/types/dynamic_fee_tx.go +++ b/core/types/dynamic_fee_tx.go @@ -463,6 +463,10 @@ func (tx DynamicFeeTransaction) AsMessage(s Signer, baseFee *big.Int, rules *par return msg, err } +func (tx *DynamicFeeTransaction) WithHash(newHash common.Hash) (Transaction, error) { + return nil, errors.New("hash is immutable for DynamicFeeTransaction") +} + // Hash computes the hash (but not for signatures!) func (tx *DynamicFeeTransaction) Hash() common.Hash { if hash := tx.hash.Load(); hash != nil { diff --git a/core/types/legacy_tx.go b/core/types/legacy_tx.go index 01ba4d63d0d..f3385156d56 100644 --- a/core/types/legacy_tx.go +++ b/core/types/legacy_tx.go @@ -482,6 +482,12 @@ func (tx *LegacyTx) FakeSign(address common.Address) (Transaction, error) { return cpy, nil } +func (tx *LegacyTx) WithHash(hash common.Hash) (Transaction, error) { + cpy := tx.copy() + cpy.hash.Store(&hash) + return cpy, nil +} + // Hash computes the hash (but not for signatures!) func (tx *LegacyTx) Hash() common.Hash { if hash := tx.hash.Load(); hash != nil { diff --git a/core/types/starknet_tx.go b/core/types/starknet_tx.go index d90805d7254..2b6ebeb8055 100644 --- a/core/types/starknet_tx.go +++ b/core/types/starknet_tx.go @@ -170,6 +170,10 @@ func (tx StarknetTransaction) FakeSign(address common.Address) (Transaction, err panic("implement me") } +func (tx *StarknetTransaction) WithHash(newHash common.Hash) (Transaction, error) { + return nil, errors.New("hash is immutable for StarknetTransaction") +} + func (tx StarknetTransaction) Hash() common.Hash { if hash := tx.hash.Load(); hash != nil { return *hash.(*common.Hash) diff --git a/core/types/transaction.go b/core/types/transaction.go index 1934103b24c..6c8561d11a2 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -65,6 +65,7 @@ type Transaction interface { AsMessage(s Signer, baseFee *big.Int, rules *params.Rules) (Message, error) WithSignature(signer Signer, sig []byte) (Transaction, error) FakeSign(address common.Address) (Transaction, error) + WithHash(newHash common.Hash) (Transaction, error) Hash() common.Hash SigningHash(chainID *big.Int) common.Hash Size() common.StorageSize diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 29fe64aac4b..9c198e981bd 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -283,26 +283,39 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} { // returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain // transaction hashes. func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) { + return RPCMarshalBlockEx(block, inclTx, fullTx, nil, nil) +} + +func RPCMarshalBlockEx(block *types.Block, inclTx bool, fullTx bool, borTx types.Transaction, borTxReceipt *types.Receipt) (map[string]interface{}, error) { fields := RPCMarshalHeader(block.Header()) fields["size"] = hexutil.Uint64(block.Size()) if inclTx { - formatTx := func(tx types.Transaction) (interface{}, error) { + formatTx := func(tx types.Transaction, index int) (interface{}, error) { return tx.Hash(), nil } if fullTx { - formatTx = func(tx types.Transaction) (interface{}, error) { - return newRPCTransactionFromBlockHash(block, tx.Hash()), nil + formatTx = func(tx types.Transaction, index int) (interface{}, error) { + return newRPCTransactionFromBlockAndTxGivenIndex(block, tx, uint64(index)), nil } } txs := block.Transactions() - transactions := make([]interface{}, len(txs)) + transactions := make([]interface{}, len(txs), len(txs)+1) var err error for i, tx := range txs { - if transactions[i], err = formatTx(tx); err != nil { + if transactions[i], err = formatTx(tx, i); err != nil { return nil, err } } + + if borTx != nil && borTxReceipt != nil { + if fullTx { + transactions = append(transactions, newRPCTransactionFromBlockAndTxGivenIndex(block, borTx, uint64(len(txs)))) + } else { + transactions = append(transactions, borTxReceipt.TxHash) + } + } + fields["transactions"] = transactions } uncles := block.Uncles() @@ -438,6 +451,7 @@ func newRPCPendingTransaction(tx types.Transaction) *RPCTransaction { } */ +/* // newRPCTransactionFromBlockIndex returns a transaction that will serialize to the RPC representation. func newRPCTransactionFromBlockIndex(b *types.Block, index uint64) *RPCTransaction { txs := b.Transactions() @@ -446,6 +460,12 @@ func newRPCTransactionFromBlockIndex(b *types.Block, index uint64) *RPCTransacti } return newRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index, b.BaseFee()) } +*/ + +// newRPCTransactionFromBlockAndTxGivenIndex returns a transaction that will serialize to the RPC representation. +func newRPCTransactionFromBlockAndTxGivenIndex(b *types.Block, tx types.Transaction, index uint64) *RPCTransaction { + return newRPCTransaction(tx, b.Hash(), b.NumberU64(), index, b.BaseFee()) +} /* // newRPCRawTransactionFromBlockIndex returns the bytes of a transaction given a block and a transaction index. @@ -467,6 +487,7 @@ func newRPCRawTransactionFromBlockIndex(b *types.Block, index uint64) hexutil.By } */ +/* // newRPCTransactionFromBlockHash returns a transaction that will serialize to the RPC representation. func newRPCTransactionFromBlockHash(b *types.Block, hash common.Hash) *RPCTransaction { for idx, tx := range b.Transactions() { @@ -476,6 +497,7 @@ func newRPCTransactionFromBlockHash(b *types.Block, hash common.Hash) *RPCTransa } return nil } +*/ /* // PublicTransactionPoolAPI exposes methods for the RPC interface diff --git a/turbo/adapter/ethapi/internal.go b/turbo/adapter/ethapi/internal.go index 0be933ca509..95c81c555a6 100644 --- a/turbo/adapter/ethapi/internal.go +++ b/turbo/adapter/ethapi/internal.go @@ -44,6 +44,20 @@ func RPCMarshalBlock(b *types.Block, inclTx bool, fullTx bool, additional map[st return fields, err } +//nolint +func RPCMarshalBlockEx(b *types.Block, inclTx bool, fullTx bool, borTx types.Transaction, borReceipt *types.Receipt, additional map[string]interface{}) (map[string]interface{}, error) { + fields, err := ethapi.RPCMarshalBlockEx(b, inclTx, fullTx, borTx, borReceipt) + if err != nil { + return nil, err + } + + for k, v := range additional { + fields[k] = v + } + + return fields, err +} + //nolint type RPCTransaction struct { *ethapi.RPCTransaction From e13a318e0b3b97e8161d40708000d5025d066bac Mon Sep 17 00:00:00 2001 From: sudeep Date: Thu, 7 Jul 2022 17:17:00 +0530 Subject: [PATCH 058/152] evm t8n to use ExecuteBlockEphemerally api (#4642) * evm t8n tool to use ExecuteBlockEphemerally api (#4512) * fix to set V, R, S in legacy transaction * fix to dump post-execution alloc for evm t8n * close tx in evm t8n * populate current difficulty and gas used in output result - update the ExecutionResult to include corresponding info (like Difficulty/GasUsed) * initial attempt at migrating 'evm t8n' to use ExecuteBlockEphemerally * using ExecutionResult in ExecuteBlockEphemerally * bypass validations and integrate with EphemeralExecResult * fixing output of 'evm t8n' - remaining bits are "stateRoot" in results.txt and "balance" field for one account in alloc.txt (for testdata=1) * get ExecuteBlockEphemerally to accept getTracer lambda * fix build failure * test cases for evm t8n * more test cases for evm t8n * fix stateRoot computation in evm t8n * remove reward argument, as EBE itself takes care of it * final cleanups for migration to using ExecuteBlockEphemerally * change EBEforBSC to match EBE * fix linter issues * manually revert an unwanted diff * avoid calculating ReceiptHash twice * linter check * minor correction * remove unnecessary logic in EBEforBsc * fix integration tests * fix build --- accounts/abi/bind/backends/simulated.go | 5 +- cmd/evm/internal/t8ntool/execution.go | 288 +++-------------- cmd/evm/internal/t8ntool/flags.go | 7 +- cmd/evm/internal/t8ntool/gen_stenv.go | 65 ++-- cmd/evm/internal/t8ntool/transition.go | 268 ++++++++++++++-- cmd/evm/main.go | 3 +- cmd/evm/t8n_test.go | 247 ++++++++++++++ cmd/evm/testdata/1/exp.json | 45 +++ cmd/evm/testdata/10/alloc.json | 23 ++ cmd/evm/testdata/10/env.json | 12 + cmd/evm/testdata/10/exp.json | 79 +++++ cmd/evm/testdata/10/readme.md | 79 +++++ cmd/evm/testdata/10/txs.json | 70 ++++ cmd/evm/testdata/11/alloc.json | 25 ++ cmd/evm/testdata/11/env.json | 12 + cmd/evm/testdata/11/readme.md | 13 + cmd/evm/testdata/11/txs.json | 14 + cmd/evm/testdata/12/alloc.json | 11 + cmd/evm/testdata/12/env.json | 10 + cmd/evm/testdata/12/exp.json | 26 ++ cmd/evm/testdata/12/readme.md | 40 +++ cmd/evm/testdata/12/txs.json | 20 ++ cmd/evm/testdata/19/alloc.json | 12 + cmd/evm/testdata/19/env.json | 9 + cmd/evm/testdata/19/exp_arrowglacier.json | 24 ++ cmd/evm/testdata/19/exp_london.json | 24 ++ cmd/evm/testdata/19/readme.md | 9 + cmd/evm/testdata/19/txs.json | 1 + cmd/evm/testdata/3/exp.json | 39 +++ cmd/evm/testdata/5/exp.json | 23 ++ cmd/evm/testdata/7/exp.json | 375 ++++++++++++++++++++++ cmd/evm/testdata/8/exp.json | 68 ++++ cmd/evm/testdata/9/alloc.json | 28 +- cmd/evm/testdata/9/env.json | 15 +- cmd/evm/testdata/9/exp.json | 54 ++++ cmd/evm/testdata/9/readme.md | 75 +++++ cmd/evm/testdata/9/txs.json | 49 ++- cmd/integration/commands/state_stages.go | 3 +- cmd/rpcdaemon/commands/eth_receipts.go | 3 +- cmd/rpcdaemon22/commands/eth_receipts.go | 3 +- cmd/state/commands/erigon2.go | 2 +- cmd/state/commands/erigon22.go | 3 +- cmd/state/commands/history2.go | 2 +- cmd/state/commands/history22.go | 2 +- cmd/state/commands/opcode_tracer.go | 2 +- cmd/state/commands/state_recon.go | 3 +- cmd/state/commands/state_recon_1.go | 3 +- consensus/parlia/parlia.go | 2 +- core/blockchain.go | 209 ++++++++---- core/chain_makers.go | 4 +- core/evm.go | 4 +- core/state_processor.go | 58 +--- core/vm/logger.go | 75 +++++ eth/stagedsync/stage_execute.go | 17 +- eth/stagedsync/stage_mining_exec.go | 2 +- go.mod | 7 +- go.sum | 17 +- internal/cmdtest/test_cmd.go | 300 +++++++++++++++++ tests/state_test_util.go | 3 +- turbo/transactions/tracing.go | 3 +- 60 files changed, 2390 insertions(+), 504 deletions(-) create mode 100644 cmd/evm/t8n_test.go create mode 100644 cmd/evm/testdata/1/exp.json create mode 100644 cmd/evm/testdata/10/alloc.json create mode 100644 cmd/evm/testdata/10/env.json create mode 100644 cmd/evm/testdata/10/exp.json create mode 100644 cmd/evm/testdata/10/readme.md create mode 100644 cmd/evm/testdata/10/txs.json create mode 100644 cmd/evm/testdata/11/alloc.json create mode 100644 cmd/evm/testdata/11/env.json create mode 100644 cmd/evm/testdata/11/readme.md create mode 100644 cmd/evm/testdata/11/txs.json create mode 100644 cmd/evm/testdata/12/alloc.json create mode 100644 cmd/evm/testdata/12/env.json create mode 100644 cmd/evm/testdata/12/exp.json create mode 100644 cmd/evm/testdata/12/readme.md create mode 100644 cmd/evm/testdata/12/txs.json create mode 100644 cmd/evm/testdata/19/alloc.json create mode 100644 cmd/evm/testdata/19/env.json create mode 100644 cmd/evm/testdata/19/exp_arrowglacier.json create mode 100644 cmd/evm/testdata/19/exp_london.json create mode 100644 cmd/evm/testdata/19/readme.md create mode 100644 cmd/evm/testdata/19/txs.json create mode 100644 cmd/evm/testdata/3/exp.json create mode 100644 cmd/evm/testdata/5/exp.json create mode 100644 cmd/evm/testdata/7/exp.json create mode 100644 cmd/evm/testdata/8/exp.json create mode 100644 cmd/evm/testdata/9/exp.json create mode 100644 cmd/evm/testdata/9/readme.md create mode 100644 internal/cmdtest/test_cmd.go diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index a04bcb9149d..2c5753f68d5 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -667,7 +667,8 @@ func (b *SimulatedBackend) callContract(_ context.Context, call ethereum.CallMsg msg := callMsg{call} txContext := core.NewEVMTxContext(msg) - evmContext := core.NewEVMBlockContext(block.Header(), b.getHeader, b.m.Engine, nil, b.contractHasTEVM) + header := block.Header() + evmContext := core.NewEVMBlockContext(header, core.GetHashFn(header, b.getHeader), b.m.Engine, nil, b.contractHasTEVM) // Create a new environment which holds all relevant information // about the transaction and calling mechanisms. vmEnv := vm.NewEVM(evmContext, txContext, statedb, b.m.ChainConfig, vm.Config{}) @@ -696,7 +697,7 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx types.Transac b.pendingState.Prepare(tx.Hash(), common.Hash{}, len(b.pendingBlock.Transactions())) //fmt.Printf("==== Start producing block %d, header: %d\n", b.pendingBlock.NumberU64(), b.pendingHeader.Number.Uint64()) if _, _, err := core.ApplyTransaction( - b.m.ChainConfig, b.getHeader, b.m.Engine, + b.m.ChainConfig, core.GetHashFn(b.pendingHeader, b.getHeader), b.m.Engine, &b.pendingHeader.Coinbase, b.gasPool, b.pendingState, state.NewNoopWriter(), b.pendingHeader, tx, diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index 1ef1457c2c7..0bd33bc8a43 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -17,29 +17,19 @@ package t8ntool import ( - "context" "encoding/binary" - "fmt" "math/big" "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/memdb" - "github.com/ledgerwatch/log/v3" - "golang.org/x/crypto/sha3" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/math" - "github.com/ledgerwatch/erigon/consensus/misc" + "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/state" - "github.com/ledgerwatch/erigon/core/systemcontracts" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/core/vm" - "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/params" - "github.com/ledgerwatch/erigon/rlp" - "github.com/ledgerwatch/erigon/turbo/trie" ) type Prestate struct { @@ -47,18 +37,6 @@ type Prestate struct { Pre core.GenesisAlloc `json:"pre"` } -// ExecutionResult contains the execution status after running a state test, any -// error that might have occurred and a dump of the final state if requested. -type ExecutionResult struct { - StateRoot common.Hash `json:"stateRoot"` - TxRoot common.Hash `json:"txRoot"` - ReceiptRoot common.Hash `json:"receiptRoot"` - LogsHash common.Hash `json:"logsHash"` - Bloom types.Bloom `json:"logsBloom" gencodec:"required"` - Receipts types.Receipts `json:"receipts"` - Rejected []*rejectedTx `json:"rejected,omitempty"` -} - type ommer struct { Delta uint64 `json:"delta"` Address common.Address `json:"address"` @@ -66,226 +44,36 @@ type ommer struct { //go:generate gencodec -type stEnv -field-override stEnvMarshaling -out gen_stenv.go type stEnv struct { - Coinbase common.Address `json:"currentCoinbase" gencodec:"required"` - Difficulty *big.Int `json:"currentDifficulty" gencodec:"required"` - GasLimit uint64 `json:"currentGasLimit" gencodec:"required"` - Number uint64 `json:"currentNumber" gencodec:"required"` - Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` - BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` - BaseFee *big.Int `json:"currentBaseFee,omitempty"` - Random *common.Hash `json:"currentRandom,omitempty"` -} - -type rejectedTx struct { - Index int `json:"index"` - Err string `json:"error"` + Coinbase common.Address `json:"currentCoinbase" gencodec:"required"` + Difficulty *big.Int `json:"currentDifficulty"` + Random *big.Int `json:"currentRandom"` + ParentDifficulty *big.Int `json:"parentDifficulty"` + GasLimit uint64 `json:"currentGasLimit" gencodec:"required"` + Number uint64 `json:"currentNumber" gencodec:"required"` + Timestamp uint64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp uint64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *big.Int `json:"currentBaseFee,omitempty"` + ParentUncleHash common.Hash `json:"parentUncleHash"` } type stEnvMarshaling struct { - Coinbase common.UnprefixedAddress - Difficulty *math.HexOrDecimal256 - GasLimit math.HexOrDecimal64 - Number math.HexOrDecimal64 - Timestamp math.HexOrDecimal64 - BaseFee *math.HexOrDecimal256 -} - -// Apply applies a set of transactions to a pre-state -func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, - txs types.Transactions, miningReward int64, - getTracerFn func(txIndex int, txHash common.Hash) (tracer vm.Tracer, err error)) (kv.RwDB, *ExecutionResult, error) { - - // Capture errors for BLOCKHASH operation, if we haven't been supplied the - // required blockhashes - var hashError error - getHash := func(num uint64) common.Hash { - if pre.Env.BlockHashes == nil { - hashError = fmt.Errorf("getHash(%d) invoked, no blockhashes provided", num) - return common.Hash{} - } - h, ok := pre.Env.BlockHashes[math.HexOrDecimal64(num)] - if !ok { - hashError = fmt.Errorf("getHash(%d) invoked, blockhash for that block not provided", num) - } - return h - } - db := memdb.New() - - tx, err := db.BeginRw(context.Background()) - if err != nil { - return nil, nil, err - } - defer tx.Rollback() - - var ( - rules0 = chainConfig.Rules(0) - rules1 = chainConfig.Rules(1) - rules = chainConfig.Rules(pre.Env.Number) - ibs = MakePreState(rules0, tx, pre.Pre) - signer = types.MakeSigner(chainConfig, pre.Env.Number) - gaspool = new(core.GasPool) - blockHash = common.Hash{0x13, 0x37} - rejectedTxs []*rejectedTx - includedTxs types.Transactions - gasUsed = uint64(0) - receipts = make(types.Receipts, 0) - txIndex = 0 - ) - gaspool.AddGas(pre.Env.GasLimit) - - difficulty := new(big.Int) - if pre.Env.Random == nil { - difficulty = pre.Env.Difficulty - } else { - // We are on POS hence difficulty opcode is now supplant with RANDOM - random := pre.Env.Random.Bytes() - difficulty.SetBytes(random) - } - vmContext := vm.BlockContext{ - CanTransfer: core.CanTransfer, - Transfer: core.Transfer, - Coinbase: pre.Env.Coinbase, - BlockNumber: pre.Env.Number, - ContractHasTEVM: func(common.Hash) (bool, error) { return false, nil }, - Time: pre.Env.Timestamp, - Difficulty: difficulty, - GasLimit: pre.Env.GasLimit, - GetHash: getHash, - } - // If currentBaseFee is defined, add it to the vmContext. - if pre.Env.BaseFee != nil { - vmContext.BaseFee = new(uint256.Int) - overflow := vmContext.BaseFee.SetFromBig(pre.Env.BaseFee) - if overflow { - return nil, nil, fmt.Errorf("pre.Env.BaseFee higher than 2^256-1") - } - } - // If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's - // done in StateProcessor.Process(block, ...), right before transactions are applied. - if chainConfig.DAOForkSupport && - chainConfig.DAOForkBlock != nil && - chainConfig.DAOForkBlock.Cmp(new(big.Int).SetUint64(pre.Env.Number)) == 0 { - misc.ApplyDAOHardFork(ibs) - } - systemcontracts.UpgradeBuildInSystemContract(chainConfig, new(big.Int).SetUint64(pre.Env.Number), ibs) - - for i, txn := range txs { - msg, err := txn.AsMessage(*signer, pre.Env.BaseFee, rules) - if err != nil { - log.Warn("rejected txn", "index", i, "hash", txn.Hash(), "err", err) - rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()}) - continue - } - tracer, err := getTracerFn(txIndex, txn.Hash()) - if err != nil { - return nil, nil, err - } - vmConfig.Tracer = tracer - vmConfig.Debug = (tracer != nil) - ibs.Prepare(txn.Hash(), blockHash, txIndex) - txContext := core.NewEVMTxContext(msg) - snapshot := ibs.Snapshot() - evm := vm.NewEVM(vmContext, txContext, ibs, chainConfig, vmConfig) - - // (ret []byte, usedGas uint64, failed bool, err error) - msgResult, err := core.ApplyMessage(evm, msg, gaspool, true /* refunds */, false /* gasBailout */) - if err != nil { - ibs.RevertToSnapshot(snapshot) - log.Info("rejected txn", "index", i, "hash", txn.Hash(), "from", msg.From(), "err", err) - rejectedTxs = append(rejectedTxs, &rejectedTx{i, err.Error()}) - continue - } - includedTxs = append(includedTxs, txn) - if hashError != nil { - return nil, nil, NewError(ErrorMissingBlockhash, hashError) - } - gasUsed += msgResult.UsedGas - - // Receipt: - { - // Create a new receipt for the transaction, storing the intermediate root and - // gas used by the txn. - receipt := &types.Receipt{Type: txn.Type(), CumulativeGasUsed: gasUsed} - if msgResult.Failed() { - receipt.Status = types.ReceiptStatusFailed - } else { - receipt.Status = types.ReceiptStatusSuccessful - } - receipt.TxHash = txn.Hash() - receipt.GasUsed = msgResult.UsedGas - - // If the transaction created a contract, store the creation address in the receipt. - if msg.To() == nil { - receipt.ContractAddress = crypto.CreateAddress(evm.TxContext().Origin, txn.GetNonce()) - } - - // Set the receipt logs and create a bloom for filtering - receipt.Logs = ibs.GetLogs(txn.Hash()) - receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) - // These three are non-consensus fields: - //receipt.BlockHash - //receipt.BlockNumber - receipt.TransactionIndex = uint(txIndex) - receipts = append(receipts, receipt) - } - - txIndex++ - } - // Add mining reward? - if miningReward > 0 { - // Add mining reward. The mining reward may be `0`, which only makes a difference in the cases - // where - // - the coinbase suicided, or - // - there are only 'bad' transactions, which aren't executed. In those cases, - // the coinbase gets no txfee, so isn't created, and thus needs to be touched - var ( - blockReward = uint256.NewInt(uint64(miningReward)) - minerReward = uint256.NewInt(0).Set(blockReward) - perOmmer = uint256.NewInt(0).Div(blockReward, uint256.NewInt(32)) - ) - for _, ommer := range pre.Env.Ommers { - // Add 1/32th for each ommer included - minerReward.Add(minerReward, perOmmer) - // Add (8-delta)/8 - reward := uint256.NewInt(8) - reward.Sub(reward, uint256.NewInt(ommer.Delta)) - reward.Mul(reward, blockReward) - reward.Div(reward, uint256.NewInt(8)) - ibs.AddBalance(ommer.Address, reward) - } - ibs.AddBalance(pre.Env.Coinbase, minerReward) - } - - // Commit block - var root common.Hash - if err = ibs.FinalizeTx(rules1, state.NewPlainStateWriter(tx, tx, 1)); err != nil { - return nil, nil, err - } - root, err = trie.CalcRoot("", tx) - if err != nil { - return nil, nil, err - } - if err = tx.Commit(); err != nil { - return nil, nil, err - } - - execRs := &ExecutionResult{ - StateRoot: root, - TxRoot: types.DeriveSha(includedTxs), - ReceiptRoot: types.DeriveSha(receipts), - Bloom: types.CreateBloom(receipts), - LogsHash: rlpHash(ibs.Logs()), - Receipts: receipts, - Rejected: rejectedTxs, - } - return db, execRs, nil + Coinbase common.UnprefixedAddress + Difficulty *math.HexOrDecimal256 + Random *math.HexOrDecimal256 + ParentDifficulty *math.HexOrDecimal256 + GasLimit math.HexOrDecimal64 + Number math.HexOrDecimal64 + Timestamp math.HexOrDecimal64 + ParentTimestamp math.HexOrDecimal64 + BaseFee *math.HexOrDecimal256 } -func MakePreState(chainRules *params.Rules, tx kv.RwTx, accounts core.GenesisAlloc) *state.IntraBlockState { +func MakePreState(chainRules *params.Rules, tx kv.RwTx, accounts core.GenesisAlloc) (*state.PlainStateReader, *state.PlainStateWriter) { var blockNr uint64 = 0 - r, _ := state.NewPlainStateReader(tx), state.NewPlainStateWriter(tx, tx, blockNr) - statedb := state.New(r) + stateReader, stateWriter := state.NewPlainStateReader(tx), state.NewPlainStateWriter(tx, tx, blockNr) + statedb := state.New(stateReader) //ibs for addr, a := range accounts { statedb.SetCode(addr, a.Code) statedb.SetNonce(addr, a.Nonce) @@ -299,7 +87,6 @@ func MakePreState(chainRules *params.Rules, tx kv.RwTx, accounts core.GenesisAll if len(a.Code) > 0 || len(a.Storage) > 0 { statedb.SetIncarnation(addr, state.FirstContractIncarnation) - var b [8]byte binary.BigEndian.PutUint64(b[:], state.FirstContractIncarnation) tx.Put(kv.IncarnationMap, addr[:], b[:]) @@ -312,12 +99,25 @@ func MakePreState(chainRules *params.Rules, tx kv.RwTx, accounts core.GenesisAll if err := statedb.CommitBlock(chainRules, state.NewPlainStateWriter(tx, tx, blockNr+1)); err != nil { panic(err) } - return statedb + return stateReader, stateWriter } -func rlpHash(x interface{}) (h common.Hash) { - hw := sha3.NewLegacyKeccak256() - rlp.Encode(hw, x) //nolint:errcheck - hw.Sum(h[:0]) - return h +// calcDifficulty is based on ethash.CalcDifficulty. This method is used in case +// the caller does not provide an explicit difficulty, but instead provides only +// parent timestamp + difficulty. +// Note: this method only works for ethash engine. +func calcDifficulty(config *params.ChainConfig, number, currentTime, parentTime uint64, + parentDifficulty *big.Int, parentUncleHash common.Hash) *big.Int { + uncleHash := parentUncleHash + if uncleHash == (common.Hash{}) { + uncleHash = types.EmptyUncleHash + } + parent := &types.Header{ + ParentHash: common.Hash{}, + UncleHash: uncleHash, + Difficulty: parentDifficulty, + Number: new(big.Int).SetUint64(number - 1), + Time: parentTime, + } + return ethash.CalcDifficulty(config, currentTime, parent.Time, parent.Difficulty, number-1, parent.UncleHash) } diff --git a/cmd/evm/internal/t8ntool/flags.go b/cmd/evm/internal/t8ntool/flags.go index 7a5da94d6f8..4a918b048fc 100644 --- a/cmd/evm/internal/t8ntool/flags.go +++ b/cmd/evm/internal/t8ntool/flags.go @@ -83,11 +83,6 @@ var ( Usage: "`stdin` or file name of where to find the transactions to apply.", Value: "txs.json", } - RewardFlag = cli.Int64Flag{ - Name: "state.reward", - Usage: "Mining reward. Set to -1 to disable", - Value: 0, - } ChainIDFlag = cli.Int64Flag{ Name: "state.chainid", Usage: "ChainID to use", @@ -103,7 +98,7 @@ var ( "\n\tSyntax (+ExtraEip)", strings.Join(tests.AvailableForks(), "\n\t "), strings.Join(vm.ActivateableEips(), ", ")), - Value: "Istanbul", + Value: "ArrowGlacier", } VerbosityFlag = cli.IntFlag{ Name: "verbosity", diff --git a/cmd/evm/internal/t8ntool/gen_stenv.go b/cmd/evm/internal/t8ntool/gen_stenv.go index 88dfc4d3cb2..677948e5927 100644 --- a/cmd/evm/internal/t8ntool/gen_stenv.go +++ b/cmd/evm/internal/t8ntool/gen_stenv.go @@ -16,41 +16,50 @@ var _ = (*stEnvMarshaling)(nil) // MarshalJSON marshals as JSON. func (s stEnv) MarshalJSON() ([]byte, error) { type stEnv struct { - Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` - GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` - Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` - Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` - BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` - BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` - Random *common.Hash `json:"currentRandom,omitempty"` + Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` + Random *math.HexOrDecimal256 `json:"currentRandom"` + ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` + GasLimit math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` + ParentUncleHash common.Hash `json:"parentUncleHash"` } var enc stEnv enc.Coinbase = common.UnprefixedAddress(s.Coinbase) enc.Difficulty = (*math.HexOrDecimal256)(s.Difficulty) + enc.Random = (*math.HexOrDecimal256)(s.Random) + enc.ParentDifficulty = (*math.HexOrDecimal256)(s.ParentDifficulty) enc.GasLimit = math.HexOrDecimal64(s.GasLimit) enc.Number = math.HexOrDecimal64(s.Number) enc.Timestamp = math.HexOrDecimal64(s.Timestamp) + enc.ParentTimestamp = math.HexOrDecimal64(s.ParentTimestamp) enc.BlockHashes = s.BlockHashes enc.Ommers = s.Ommers enc.BaseFee = (*math.HexOrDecimal256)(s.BaseFee) - enc.Random = s.Random + enc.ParentUncleHash = s.ParentUncleHash return json.Marshal(&enc) } // UnmarshalJSON unmarshals from JSON. func (s *stEnv) UnmarshalJSON(input []byte) error { type stEnv struct { - Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` - Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` - GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` - Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` - Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` - BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` - Ommers []ommer `json:"ommers,omitempty"` - BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` - Random *common.Hash `json:"currentRandom,omitempty"` + Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty"` + Random *math.HexOrDecimal256 `json:"currentRandom"` + ParentDifficulty *math.HexOrDecimal256 `json:"parentDifficulty"` + GasLimit *math.HexOrDecimal64 `json:"currentGasLimit" gencodec:"required"` + Number *math.HexOrDecimal64 `json:"currentNumber" gencodec:"required"` + Timestamp *math.HexOrDecimal64 `json:"currentTimestamp" gencodec:"required"` + ParentTimestamp *math.HexOrDecimal64 `json:"parentTimestamp,omitempty"` + BlockHashes map[math.HexOrDecimal64]common.Hash `json:"blockHashes,omitempty"` + Ommers []ommer `json:"ommers,omitempty"` + BaseFee *math.HexOrDecimal256 `json:"currentBaseFee,omitempty"` + ParentUncleHash *common.Hash `json:"parentUncleHash"` } var dec stEnv if err := json.Unmarshal(input, &dec); err != nil { @@ -60,10 +69,15 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'currentCoinbase' for stEnv") } s.Coinbase = common.Address(*dec.Coinbase) - if dec.Difficulty == nil { - return errors.New("missing required field 'currentDifficulty' for stEnv") + if dec.Difficulty != nil { + s.Difficulty = (*big.Int)(dec.Difficulty) + } + if dec.Random != nil { + s.Random = (*big.Int)(dec.Random) + } + if dec.ParentDifficulty != nil { + s.ParentDifficulty = (*big.Int)(dec.ParentDifficulty) } - s.Difficulty = (*big.Int)(dec.Difficulty) if dec.GasLimit == nil { return errors.New("missing required field 'currentGasLimit' for stEnv") } @@ -76,6 +90,9 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { return errors.New("missing required field 'currentTimestamp' for stEnv") } s.Timestamp = uint64(*dec.Timestamp) + if dec.ParentTimestamp != nil { + s.ParentTimestamp = uint64(*dec.ParentTimestamp) + } if dec.BlockHashes != nil { s.BlockHashes = dec.BlockHashes } @@ -85,8 +102,8 @@ func (s *stEnv) UnmarshalJSON(input []byte) error { if dec.BaseFee != nil { s.BaseFee = (*big.Int)(dec.BaseFee) } - if dec.Random != nil { - s.Random = dec.Random + if dec.ParentUncleHash != nil { + s.ParentUncleHash = *dec.ParentUncleHash } return nil } diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index a6547a7a3d6..230efb89be4 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -17,6 +17,7 @@ package t8ntool import ( + "context" "crypto/ecdsa" "encoding/json" "errors" @@ -27,9 +28,13 @@ import ( "path/filepath" "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/commands" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/common/math" + "github.com/ledgerwatch/erigon/consensus/ethash" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" @@ -38,6 +43,7 @@ import ( "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/tests" + "github.com/ledgerwatch/erigon/turbo/trie" "github.com/ledgerwatch/log/v3" "github.com/urfave/cli" @@ -67,10 +73,15 @@ func (n *NumberedError) Error() string { return fmt.Sprintf("ERROR(%d): %v", n.errorCode, n.err.Error()) } -func (n *NumberedError) Code() int { +func (n *NumberedError) ExitCode() int { return n.errorCode } +// compile-time conformance test +var ( + _ cli.ExitCoder = (*NumberedError)(nil) +) + type input struct { Alloc core.GenesisAlloc `json:"alloc,omitempty"` Env *stEnv `json:"env,omitempty"` @@ -79,16 +90,8 @@ type input struct { func Main(ctx *cli.Context) error { log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler)) - /* - // Configure the go-ethereum logger - glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) - glogger.Verbosity(log.Lvl(ctx.Int(VerbosityFlag.Name))) - log.Root().SetHandler(glogger) - */ - var ( err error - tracer vm.Tracer baseDir = "" ) var getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error) @@ -162,6 +165,7 @@ func Main(ctx *cli.Context) error { return NewError(ErrorJson, fmt.Errorf("failed unmarshaling alloc-file: %v", err)) } } + prestate.Pre = inputData.Alloc // Set the block environment @@ -181,8 +185,8 @@ func Main(ctx *cli.Context) error { prestate.Env = *inputData.Env vmConfig := vm.Config{ - Tracer: tracer, - Debug: (tracer != nil), + Tracer: nil, + Debug: ctx.Bool(TraceFlag.Name), } // Construct the chainconfig var chainConfig *params.ChainConfig @@ -216,25 +220,90 @@ func Main(ctx *cli.Context) error { return NewError(ErrorJson, fmt.Errorf("failed signing transactions: %v", err)) } + eip1559 := chainConfig.IsLondon(prestate.Env.Number) // Sanity check, to not `panic` in state_transition - if chainConfig.IsLondon(prestate.Env.Number) { + if eip1559 { if prestate.Env.BaseFee == nil { return NewError(ErrorVMConfig, errors.New("EIP-1559 config but missing 'currentBaseFee' in env section")) } } - // Run the test and aggregate the result - _, result, err1 := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer) - if err1 != nil { - return err1 + // Sanity check, to not `panic` in state_transition + if prestate.Env.Random != nil && !eip1559 { + return NewError(ErrorVMConfig, errors.New("can only apply RANDOM on top of London chainrules")) + } + if env := prestate.Env; env.Difficulty == nil { + // If difficulty was not provided by caller, we need to calculate it. + switch { + case env.ParentDifficulty == nil: + return NewError(ErrorVMConfig, errors.New("currentDifficulty was not provided, and cannot be calculated due to missing parentDifficulty")) + case env.Number == 0: + return NewError(ErrorVMConfig, errors.New("currentDifficulty needs to be provided for block number 0")) + case env.Timestamp <= env.ParentTimestamp: + return NewError(ErrorVMConfig, fmt.Errorf("currentDifficulty cannot be calculated -- currentTime (%d) needs to be after parent time (%d)", + env.Timestamp, env.ParentTimestamp)) + } + prestate.Env.Difficulty = calcDifficulty(chainConfig, env.Number, env.Timestamp, + env.ParentTimestamp, env.ParentDifficulty, env.ParentUncleHash) + } + + // manufacture block from above inputs + header := NewHeader(prestate.Env, chainConfig.IsLondon(prestate.Env.Number)) + + var ommerHeaders = make([]*types.Header, len(prestate.Env.Ommers)) + header.Number.Add(header.Number, big.NewInt(int64(len(prestate.Env.Ommers)))) + for i, ommer := range prestate.Env.Ommers { + var ommerN big.Int + ommerN.SetUint64(header.Number.Uint64() - ommer.Delta) + ommerHeaders[i] = &types.Header{Coinbase: ommer.Address, Number: &ommerN} + } + block := types.NewBlock(header, txs, ommerHeaders, nil) + + var hashError error + getHash := func(num uint64) common.Hash { + if prestate.Env.BlockHashes == nil { + hashError = fmt.Errorf("getHash(%d) invoked, no blockhashes provided", num) + return common.Hash{} + } + h, ok := prestate.Env.BlockHashes[math.HexOrDecimal64(num)] + if !ok { + hashError = fmt.Errorf("getHash(%d) invoked, blockhash for that block not provided", num) + } + return h + } + db := memdb.New() + + tx, err := db.BeginRw(context.Background()) + if err != nil { + return err + } + + reader, writer := MakePreState(chainConfig.Rules(0), tx, prestate.Pre) + engine := ethash.NewFaker() + + result, err := core.ExecuteBlockEphemerally(chainConfig, &vmConfig, getHash, engine, block, reader, writer, nil, nil, nil, true, getTracer) + + if hashError != nil { + return NewError(ErrorMissingBlockhash, fmt.Errorf("blockhash error: %v", err)) } + + if err != nil { + return fmt.Errorf("error on EBE: %w", err) + } + + // state root calculation + root, err := CalculateStateRoot(tx) + if err != nil { + return err + } + result.StateRoot = *root + + // Dump the execution result body, _ := rlp.EncodeToBytes(txs) - // Dump the excution result collector := make(Alloc) - // TODO: Where DumpToCollector is declared? - //state.DumpToCollector(collector, false, false, false, nil, -1) + dumper := state.NewDumper(tx, prestate.Env.Number) + dumper.DumpToCollector(collector, false, false, common.Address{}, 0) return dispatchOutput(ctx, baseDir, result, collector, body) - } // txWithKey is a helper-struct, to allow us to use the types.Transaction along with @@ -261,8 +330,7 @@ func (t *txWithKey) UnmarshalJSON(input []byte) error { return err } } - gasPrice, value := uint256.NewInt(0), uint256.NewInt(0) - var overflow bool + // Now, read the transaction itself var txJson commands.RPCTransaction @@ -270,22 +338,104 @@ func (t *txWithKey) UnmarshalJSON(input []byte) error { return err } + // assemble transaction + tx, err := getTransaction(txJson) + if err != nil { + return err + } + t.tx = tx + return nil +} + +func getTransaction(txJson commands.RPCTransaction) (types.Transaction, error) { + gasPrice, value := uint256.NewInt(0), uint256.NewInt(0) + var overflow bool + var chainId *uint256.Int + if txJson.Value != nil { value, overflow = uint256.FromBig((*big.Int)(txJson.Value)) if overflow { - return fmt.Errorf("value field caused an overflow (uint256)") + return nil, fmt.Errorf("value field caused an overflow (uint256)") } } if txJson.GasPrice != nil { gasPrice, overflow = uint256.FromBig((*big.Int)(txJson.GasPrice)) if overflow { - return fmt.Errorf("gasPrice field caused an overflow (uint256)") + return nil, fmt.Errorf("gasPrice field caused an overflow (uint256)") } } - // assemble transaction - t.tx = types.NewTransaction(uint64(txJson.Nonce), *txJson.To, value, uint64(txJson.Gas), gasPrice, txJson.Input) - return nil + + if txJson.ChainID != nil { + chainId, overflow = uint256.FromBig((*big.Int)(txJson.ChainID)) + if overflow { + return nil, fmt.Errorf("chainId field caused an overflow (uint256)") + } + } + + switch txJson.Type { + case types.LegacyTxType, types.AccessListTxType: + var toAddr common.Address = common.Address{} + if txJson.To != nil { + toAddr = *txJson.To + } + legacyTx := types.NewTransaction(uint64(txJson.Nonce), toAddr, value, uint64(txJson.Gas), gasPrice, txJson.Input) + legacyTx.V.SetFromBig(txJson.V.ToInt()) + legacyTx.S.SetFromBig(txJson.S.ToInt()) + legacyTx.R.SetFromBig(txJson.R.ToInt()) + + if txJson.Type == types.AccessListTxType { + accessListTx := types.AccessListTx{ + LegacyTx: *legacyTx, + ChainID: chainId, + AccessList: *txJson.Accesses, + } + + return &accessListTx, nil + } else { + return legacyTx, nil + } + + case types.DynamicFeeTxType: + var tip *uint256.Int + var feeCap *uint256.Int + if txJson.Tip != nil { + tip, overflow = uint256.FromBig((*big.Int)(txJson.Tip)) + if overflow { + return nil, fmt.Errorf("maxPriorityFeePerGas field caused an overflow (uint256)") + } + } + + if txJson.FeeCap != nil { + feeCap, overflow = uint256.FromBig((*big.Int)(txJson.FeeCap)) + if overflow { + return nil, fmt.Errorf("maxFeePerGas field caused an overflow (uint256)") + } + } + + dynamicFeeTx := types.DynamicFeeTransaction{ + CommonTx: types.CommonTx{ + ChainID: chainId, + Nonce: uint64(txJson.Nonce), + To: txJson.To, + Value: value, + Gas: uint64(txJson.Gas), + Data: txJson.Input, + }, + Tip: tip, + FeeCap: feeCap, + AccessList: *txJson.Accesses, + } + + dynamicFeeTx.V.SetFromBig(txJson.V.ToInt()) + dynamicFeeTx.S.SetFromBig(txJson.S.ToInt()) + dynamicFeeTx.R.SetFromBig(txJson.R.ToInt()) + + return &dynamicFeeTx, nil + + default: + return nil, nil + } } // signUnsignedTransactions converts the input txs to canonical transactions. @@ -358,7 +508,7 @@ func saveFile(baseDir, filename string, data interface{}) error { // dispatchOutput writes the output data to either stderr or stdout, or to the specified // files -func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, alloc Alloc, body hexutil.Bytes) error { +func dispatchOutput(ctx *cli.Context, baseDir string, result *core.EphemeralExecResult, alloc Alloc, body hexutil.Bytes) error { stdOutObject := make(map[string]interface{}) stdErrObject := make(map[string]interface{}) dispatch := func(baseDir, fName, name string, obj interface{}) error { @@ -401,3 +551,65 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a } return nil } + +func NewHeader(env stEnv, Eip1559 bool) *types.Header { + var header types.Header + header.UncleHash = env.ParentUncleHash + header.Coinbase = env.Coinbase + header.Difficulty = env.Difficulty + header.Number = big.NewInt(int64(env.Number)) + header.GasLimit = env.GasLimit + header.Time = env.Timestamp + header.BaseFee = env.BaseFee + header.Eip1559 = Eip1559 + + return &header +} + +func CalculateStateRoot(tx kv.RwTx) (*common.Hash, error) { + // Generate hashed state + c, err := tx.RwCursor(kv.PlainState) + if err != nil { + return nil, err + } + h := common.NewHasher() + defer common.ReturnHasherToPool(h) + for k, v, err := c.First(); k != nil; k, v, err = c.Next() { + if err != nil { + return nil, fmt.Errorf("interate over plain state: %w", err) + } + var newK []byte + if len(k) == common.AddressLength { + newK = make([]byte, common.HashLength) + } else { + newK = make([]byte, common.HashLength*2+common.IncarnationLength) + } + h.Sha.Reset() + //nolint:errcheck + h.Sha.Write(k[:common.AddressLength]) + //nolint:errcheck + h.Sha.Read(newK[:common.HashLength]) + if len(k) > common.AddressLength { + copy(newK[common.HashLength:], k[common.AddressLength:common.AddressLength+common.IncarnationLength]) + h.Sha.Reset() + //nolint:errcheck + h.Sha.Write(k[common.AddressLength+common.IncarnationLength:]) + //nolint:errcheck + h.Sha.Read(newK[common.HashLength+common.IncarnationLength:]) + if err = tx.Put(kv.HashedStorage, newK, common.CopyBytes(v)); err != nil { + return nil, fmt.Errorf("insert hashed key: %w", err) + } + } else { + if err = tx.Put(kv.HashedAccounts, newK, common.CopyBytes(v)); err != nil { + return nil, fmt.Errorf("insert hashed key: %w", err) + } + } + } + c.Close() + root, err := trie.CalcRoot("", tx) + if err != nil { + return nil, err + } + + return &root, nil +} diff --git a/cmd/evm/main.go b/cmd/evm/main.go index 451c0edbd8d..8d6bb1ef5f6 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -148,7 +148,6 @@ var stateTransitionCommand = cli.Command{ t8ntool.InputTxsFlag, t8ntool.ForknameFlag, t8ntool.ChainIDFlag, - t8ntool.RewardFlag, t8ntool.VerbosityFlag, }, } @@ -192,7 +191,7 @@ func main() { if err := app.Run(os.Args); err != nil { code := 1 if ec, ok := err.(*t8ntool.NumberedError); ok { - code = ec.Code() + code = ec.ExitCode() } fmt.Fprintln(os.Stderr, err) os.Exit(code) diff --git a/cmd/evm/t8n_test.go b/cmd/evm/t8n_test.go new file mode 100644 index 00000000000..a2868e080b8 --- /dev/null +++ b/cmd/evm/t8n_test.go @@ -0,0 +1,247 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/pkg/reexec" + "github.com/ledgerwatch/erigon/internal/cmdtest" +) + +func TestMain(m *testing.M) { + // Run the app if we've been exec'd as "ethkey-test" in runEthkey. + reexec.Register("evm-test", func() { + if err := app.Run(os.Args); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + os.Exit(0) + }) + // check if we have been reexec'd + if reexec.Init() { + return + } + os.Exit(m.Run()) +} + +type testT8n struct { + *cmdtest.TestCmd +} + +type t8nInput struct { + inAlloc string + inTxs string + inEnv string + stFork string +} + +func (args *t8nInput) get(base string) []string { + var out []string + if opt := args.inAlloc; opt != "" { + out = append(out, "--input.alloc") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.inTxs; opt != "" { + out = append(out, "--input.txs") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.inEnv; opt != "" { + out = append(out, "--input.env") + out = append(out, fmt.Sprintf("%v/%v", base, opt)) + } + if opt := args.stFork; opt != "" { + out = append(out, "--state.fork", opt) + } + return out +} + +type t8nOutput struct { + alloc bool + result bool + body bool +} + +func (args *t8nOutput) get() (out []string) { + if args.body { + out = append(out, "--output.body", "stdout") + } else { + out = append(out, "--output.body", "") // empty means ignore + } + if args.result { + out = append(out, "--output.result", "stdout") + } else { + out = append(out, "--output.result", "") + } + if args.alloc { + out = append(out, "--output.alloc", "stdout") + } else { + out = append(out, "--output.alloc", "") + } + return out +} + +func TestT8n(t *testing.T) { + tt := new(testT8n) + tt.TestCmd = cmdtest.NewTestCmd(t, tt) + for i, tc := range []struct { + base string + input t8nInput + output t8nOutput + expExitCode int + expOut string + }{ + { // Test exit (3) on bad config + base: "./testdata/1", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Frontier+1346", + }, + output: t8nOutput{alloc: true, result: true}, + expExitCode: 3, + }, + { + base: "./testdata/1", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Byzantium", + }, + output: t8nOutput{alloc: true, result: true}, + expOut: "exp.json", + }, + { // blockhash test + base: "./testdata/3", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Berlin", + }, + output: t8nOutput{alloc: true, result: true}, + expOut: "exp.json", + }, + { // missing blockhash test + base: "./testdata/4", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Berlin", + }, + expExitCode: 4, + }, + { // Uncle test + base: "./testdata/5", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Byzantium", + }, + output: t8nOutput{alloc: true, result: true}, + expOut: "exp.json", + }, + { // Dao-transition check + base: "./testdata/7", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "HomesteadToDaoAt5", + }, + expOut: "exp.json", + output: t8nOutput{alloc: true, result: true}, + }, + { // transactions with access list + base: "./testdata/8", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "Berlin", + }, + expOut: "exp.json", + output: t8nOutput{alloc: true, result: true}, + }, + { // EIP-1559 + base: "./testdata/9", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "London", + }, + expOut: "exp.json", + output: t8nOutput{alloc: true, result: true}, + }, + { // EIP-1559 + base: "./testdata/10", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "London", + }, + expOut: "exp.json", + output: t8nOutput{alloc: true, result: true}, + }, + { // missing base fees + base: "./testdata/11", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "London", + }, + expExitCode: 3, + }, + { // EIP-1559 & gasCap + base: "./testdata/12", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "London", + }, + expOut: "exp.json", + output: t8nOutput{alloc: true, result: true}, + }, + { // Difficulty calculation on London + base: "./testdata/19", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "London", + }, + expOut: "exp_london.json", + output: t8nOutput{alloc: true, result: true}, + }, + { // Difficulty calculation on arrow glacier + base: "./testdata/19", + input: t8nInput{ + "alloc.json", "txs.json", "env.json", "ArrowGlacier", + }, + expOut: "exp_arrowglacier.json", + output: t8nOutput{alloc: true, result: true}, + }, + } { + + args := []string{"t8n"} + args = append(args, tc.output.get()...) + args = append(args, tc.input.get(tc.base)...) + var qArgs []string // quoted args for debugging purposes + for _, arg := range args { + if len(arg) == 0 { + qArgs = append(qArgs, `""`) + } else { + qArgs = append(qArgs, arg) + } + } + tt.Logf("args: %v\n", strings.Join(qArgs, " ")) + tt.Run("evm-test", args...) + // Compare the expected output, if provided + if tc.expOut != "" { + want, err := os.ReadFile(fmt.Sprintf("%v/%v", tc.base, tc.expOut)) + if err != nil { + t.Fatalf("test %d: could not read expected output: %v", i, err) + } + have := tt.Output() + ok, err := cmpJson(have, want) + switch { + case err != nil: + t.Fatalf("test %d, json parsing failed: %v", i, err) + case !ok: + t.Fatalf("test %d: output wrong, have \n%v\nwant\n%v\n", i, string(have), string(want)) + } + } + tt.WaitExit() + if have, want := tt.ExitStatus(), tc.expExitCode; have != want { + t.Fatalf("test %d: wrong exit code, have %d, want %d", i, have, want) + } + } +} + +// cmpJson compares the JSON in two byte slices. +func cmpJson(a, b []byte) (bool, error) { + var j, j2 interface{} + if err := json.Unmarshal(a, &j); err != nil { + return false, err + } + if err := json.Unmarshal(b, &j2); err != nil { + return false, err + } + + return reflect.DeepEqual(j2, j), nil +} diff --git a/cmd/evm/testdata/1/exp.json b/cmd/evm/testdata/1/exp.json new file mode 100644 index 00000000000..d8094e7aa67 --- /dev/null +++ b/cmd/evm/testdata/1/exp.json @@ -0,0 +1,45 @@ +{ + "alloc": { + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { + "balance": "0xfeed1a9d", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "nonce": "0xac" + }, + "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x29a2241af62ca410" + } + }, + "result": { + "stateRoot": "0xe72f10cef9b1d32a16e2f5a8d64b25dacde99efcdea460387db527486582c3f7", + "txRoot": "0xc4761fd7b87ff2364c7c60b6c5c8d02e522e815328aaea3f20e3b7b7ef52c42d", + "receiptsRoot": "0x056b23fbba480696b65fe5a59b8f2148a1299103c4f57df839233af2cf4ca2d2", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x5208", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x0557bacce3375c98d806609b8d5043072f0b6a8bae45ae5a67a00d3a1a18d673", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x5208", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1", + "transactionIndex": "0x0" + } + ], + "rejected": [ + { + "index": 1, + "error": "nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" + } + ], + "currentDifficulty": "0x20000", + "gasUsed": "0x5208" + } +} diff --git a/cmd/evm/testdata/10/alloc.json b/cmd/evm/testdata/10/alloc.json new file mode 100644 index 00000000000..6e98e7513c4 --- /dev/null +++ b/cmd/evm/testdata/10/alloc.json @@ -0,0 +1,23 @@ +{ + "0x1111111111111111111111111111111111111111" : { + "balance" : "0x010000000000", + "code" : "0xfe", + "nonce" : "0x01", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x010000000000", + "code" : "0x", + "nonce" : "0x01", + "storage" : { + } + }, + "0xd02d72e067e77158444ef2020ff2d325f929b363" : { + "balance" : "0x01000000000000", + "code" : "0x", + "nonce" : "0x01", + "storage" : { + } + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/10/env.json b/cmd/evm/testdata/10/env.json new file mode 100644 index 00000000000..3a82d46a774 --- /dev/null +++ b/cmd/evm/testdata/10/env.json @@ -0,0 +1,12 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x01", + "currentTimestamp" : "0x079e", + "previousHash" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f", + "currentGasLimit" : "0x40000000", + "currentBaseFee" : "0x036b", + "blockHashes" : { + "0" : "0xcb23ee65a163121f640673b41788ee94633941405f95009999b502eedfbbfd4f" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/10/exp.json b/cmd/evm/testdata/10/exp.json new file mode 100644 index 00000000000..5ab98860c77 --- /dev/null +++ b/cmd/evm/testdata/10/exp.json @@ -0,0 +1,79 @@ +{ + "alloc": { + "0x1111111111111111111111111111111111111111": { + "code": "0xfe", + "balance": "0x10000000000", + "nonce": "0x1" + }, + "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { + "balance": "0x1bc16d674ec80000" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x10000000000", + "nonce": "0x1" + }, + "0xd02d72e067e77158444ef2020ff2d325f929b363": { + "balance": "0xff5beffffc95", + "nonce": "0x4" + } + }, + "result": { + "stateRoot": "0x4b7b4d5dd6316b58407468a5d3cf0a18e42d3833911d3fccd80eb49273024ffa", + "txRoot": "0xda925f2306a52fa24c15d5cd212d736ee016415fd8dd0c45fd368de7917d64bb", + "receiptsRoot": "0x439a25f7fc424c10fb1f89800e4aa1df74156b137239d9ac3eaa7c911c353cd5", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x10000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x88980f6efcc5358d9c359663e7b9414722d430497637340ea056b076bc206701", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000001", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1", + "transactionIndex": "0x0" + }, + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x20000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0xd7bf3886f4e2aef74d525ae072c680f3846f550254401b67cbfda4a233757582", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000000", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1", + "transactionIndex": "0x1" + }, + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x30000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x50308296760f01f1eeec7500e9e73cad67469249b1f59e9a9f55e6625a4923db", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000000", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1", + "transactionIndex": "0x2" + } + ], + "rejected": [ + { + "index": 3, + "error": "gas limit reached" + } + ], + "currentDifficulty": "0x20000", + "gasUsed": "0x30000001" + } +} diff --git a/cmd/evm/testdata/10/readme.md b/cmd/evm/testdata/10/readme.md new file mode 100644 index 00000000000..c34be80bb71 --- /dev/null +++ b/cmd/evm/testdata/10/readme.md @@ -0,0 +1,79 @@ +## EIP-1559 testing + +This test contains testcases for EIP-1559, which were reported by Ori as misbehaving. + +``` +[user@work evm]$ dir=./testdata/10 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout 2>&1 +INFO [05-09|22:11:59.436] rejected tx index=3 hash=db07bf..ede1e8 from=0xd02d72E067e77158444ef2020Ff2d325f929B363 error="gas limit reached" +``` +Output: +```json +{ + "alloc": { + "0x1111111111111111111111111111111111111111": { + "code": "0xfe", + "balance": "0x10000000000", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x10000000000", + "nonce": "0x1" + }, + "0xd02d72e067e77158444ef2020ff2d325f929b363": { + "balance": "0xff5beffffc95", + "nonce": "0x4" + } + }, + "result": { + "stateRoot": "0xf91a7ec08e4bfea88719aab34deabb000c86902360532b52afa9599d41f2bb8b", + "txRoot": "0xda925f2306a52fa24c15d5cd212d736ee016415fd8dd0c45fd368de7917d64bb", + "receiptRoot": "0x439a25f7fc424c10fb1f89800e4aa1df74156b137239d9ac3eaa7c911c353cd5", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x10000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x88980f6efcc5358d9c359663e7b9414722d430497637340ea056b076bc206701", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000001", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x0" + }, + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x20000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0xd7bf3886f4e2aef74d525ae072c680f3846f550254401b67cbfda4a233757582", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000000", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x1" + }, + { + "type": "0x2", + "root": "0x", + "status": "0x0", + "cumulativeGasUsed": "0x30000001", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x50308296760f01f1eeec7500e9e73cad67469249b1f59e9a9f55e6625a4923db", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x10000000", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "transactionIndex": "0x2" + } + ], + "rejected": [ + 3 + ] + } +} +``` diff --git a/cmd/evm/testdata/10/txs.json b/cmd/evm/testdata/10/txs.json new file mode 100644 index 00000000000..f7c9baa26da --- /dev/null +++ b/cmd/evm/testdata/10/txs.json @@ -0,0 +1,70 @@ +[ + { + "input" : "0x", + "gas" : "0x10000001", + "nonce" : "0x1", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x7a45f00bcde9036b026cdf1628b023cd8a31a95c62b5e4dbbee2fa7debe668fb", + "s" : "0x3cc9d6f2cd00a045b0263f2d6dad7d60938d5d13d061af4969f95928aa934d4a", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + }, + { + "input" : "0x", + "gas" : "0x10000000", + "nonce" : "0x2", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x4c564b94b0281a8210eeec2dd1fe2e16ff1c1903a8c3a1078d735d7f8208b2af", + "s" : "0x56432b2593e6de95db1cb997b7385217aca03f1615327e231734446b39f266d", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + }, + { + "input" : "0x", + "gas" : "0x10000000", + "nonce" : "0x3", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x2ed2ef52f924f59d4a21e1f2a50d3b1109303ce5e32334a7ece9b46f4fbc2a57", + "s" : "0x2980257129cbd3da987226f323d50ba3975a834d165e0681f991b75615605c44", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + }, + { + "input" : "0x", + "gas" : "0x10000000", + "nonce" : "0x4", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x0", + "v" : "0x0", + "r" : "0x5df7d7f8f8e15b36fc9f189cacb625040fad10398d08fc90812595922a2c49b2", + "s" : "0x565fc1803f77a84d754ffe3c5363ab54a8d93a06ea1bb9d4c73c73a282b35917", + "secretKey" : "0x41f6e321b31e72173f8ff2e292359e1862f24fba42fe6f97efaf641980eff298", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x0", + "accessList" : [ + ] + } +] \ No newline at end of file diff --git a/cmd/evm/testdata/11/alloc.json b/cmd/evm/testdata/11/alloc.json new file mode 100644 index 00000000000..86938230fa7 --- /dev/null +++ b/cmd/evm/testdata/11/alloc.json @@ -0,0 +1,25 @@ +{ + "0x0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x61ffff5060046000f3", + "nonce" : "0x01", + "storage" : { + } + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x0de0b6b3a7640000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + "0x00" : "0x00" + } + }, + "0xb94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "0x00", + "code" : "0x6001600055", + "nonce" : "0x00", + "storage" : { + } + } +} + diff --git a/cmd/evm/testdata/11/env.json b/cmd/evm/testdata/11/env.json new file mode 100644 index 00000000000..37dedf09475 --- /dev/null +++ b/cmd/evm/testdata/11/env.json @@ -0,0 +1,12 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x01", + "currentTimestamp" : "0x03e8", + "previousHash" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2", + "currentGasLimit" : "0x0f4240", + "blockHashes" : { + "0" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2" + } +} + diff --git a/cmd/evm/testdata/11/readme.md b/cmd/evm/testdata/11/readme.md new file mode 100644 index 00000000000..d499f8e99fa --- /dev/null +++ b/cmd/evm/testdata/11/readme.md @@ -0,0 +1,13 @@ +## Test missing basefee + +In this test, the `currentBaseFee` is missing from the env portion. +On a live blockchain, the basefee is present in the header, and verified as part of header validation. + +In `evm t8n`, we don't have blocks, so it needs to be added in the `env`instead. + +When it's missing, an error is expected. + +``` +dir=./testdata/11 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout 2>&1>/dev/null +ERROR(3): EIP-1559 config but missing 'currentBaseFee' in env section +``` \ No newline at end of file diff --git a/cmd/evm/testdata/11/txs.json b/cmd/evm/testdata/11/txs.json new file mode 100644 index 00000000000..c54b0a1f5b4 --- /dev/null +++ b/cmd/evm/testdata/11/txs.json @@ -0,0 +1,14 @@ +[ + { + "input" : "0x38600060013960015160005560006000f3", + "gas" : "0x61a80", + "gasPrice" : "0x1", + "nonce" : "0x0", + "value" : "0x186a0", + "v" : "0x1c", + "r" : "0x2e1391fd903387f1cc2b51df083805fb4bbb0d4710a2cdf4a044d191ff7be63e", + "s" : "0x7f10a933c42ab74927db02b1db009e923d9d2ab24ac24d63c399f2fe5d9c9b22", + "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + } +] + diff --git a/cmd/evm/testdata/12/alloc.json b/cmd/evm/testdata/12/alloc.json new file mode 100644 index 00000000000..3ed96894fbc --- /dev/null +++ b/cmd/evm/testdata/12/alloc.json @@ -0,0 +1,11 @@ +{ + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b" : { + "balance" : "84000000", + "code" : "0x", + "nonce" : "0x00", + "storage" : { + "0x00" : "0x00" + } + } +} + diff --git a/cmd/evm/testdata/12/env.json b/cmd/evm/testdata/12/env.json new file mode 100644 index 00000000000..8ae5465369c --- /dev/null +++ b/cmd/evm/testdata/12/env.json @@ -0,0 +1,10 @@ +{ + "currentCoinbase" : "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty" : "0x020000", + "currentNumber" : "0x01", + "currentTimestamp" : "0x03e8", + "previousHash" : "0xfda4419b3660e99f37e536dae1ab081c180136bb38c837a93e93d9aab58553b2", + "currentGasLimit" : "0x0f4240", + "currentBaseFee" : "0x20" +} + diff --git a/cmd/evm/testdata/12/exp.json b/cmd/evm/testdata/12/exp.json new file mode 100644 index 00000000000..0589c867585 --- /dev/null +++ b/cmd/evm/testdata/12/exp.json @@ -0,0 +1,26 @@ +{ + "alloc": { + "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { + "balance": "0x1bc16d674ec80000" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x501bd00" + } + }, + "result": { + "stateRoot": "0x9fd6c7f520a9e9a160c19d65b929161415bc4e86ea75e7c9cac4fe8f776cf453", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": null, + "rejected": [ + { + "index": 0, + "error": "insufficient funds for gas * price + value: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B have 84000000 want 84000032" + } + ], + "currentDifficulty": "0x20000", + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/12/readme.md b/cmd/evm/testdata/12/readme.md new file mode 100644 index 00000000000..b0177ecc24b --- /dev/null +++ b/cmd/evm/testdata/12/readme.md @@ -0,0 +1,40 @@ +## Test 1559 balance + gasCap + +This test contains an EIP-1559 consensus issue which happened on Ropsten, where +`geth` did not properly account for the value transfer while doing the check on `max_fee_per_gas * gas_limit`. + +Before the issue was fixed, this invocation allowed the transaction to pass into a block: +``` +dir=./testdata/12 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout +``` + +With the fix applied, the result is: +``` +dir=./testdata/12 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout --output.result=stdout +INFO [07-21|19:03:50.276] rejected tx index=0 hash=ccc996..d83435 from=0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B error="insufficient funds for gas * price + value: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B have 84000000 want 84000032" +INFO [07-21|19:03:50.276] Trie dumping started root=e05f81..6597a5 +INFO [07-21|19:03:50.276] Trie dumping complete accounts=1 elapsed="39.549µs" +{ + "alloc": { + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x501bd00" + } + }, + "result": { + "stateRoot": "0xe05f81f8244a76503ceec6f88abfcd03047a612a1001217f37d30984536597a5", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [], + "rejected": [ + { + "index": 0, + "error": "insufficient funds for gas * price + value: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B have 84000000 want 84000032" + } + ] + } +} +``` + +The transaction is rejected. \ No newline at end of file diff --git a/cmd/evm/testdata/12/txs.json b/cmd/evm/testdata/12/txs.json new file mode 100644 index 00000000000..cd683f271c7 --- /dev/null +++ b/cmd/evm/testdata/12/txs.json @@ -0,0 +1,20 @@ +[ + { + "input" : "0x", + "gas" : "0x5208", + "nonce" : "0x0", + "to" : "0x1111111111111111111111111111111111111111", + "value" : "0x20", + "v" : "0x0", + "r" : "0x0", + "s" : "0x0", + "secretKey" : "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8", + "chainId" : "0x1", + "type" : "0x2", + "maxFeePerGas" : "0xfa0", + "maxPriorityFeePerGas" : "0x20", + "accessList" : [ + ] + } +] + diff --git a/cmd/evm/testdata/19/alloc.json b/cmd/evm/testdata/19/alloc.json new file mode 100644 index 00000000000..cef1a25ff01 --- /dev/null +++ b/cmd/evm/testdata/19/alloc.json @@ -0,0 +1,12 @@ +{ + "a94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "code": "0x", + "nonce": "0xac", + "storage": {} + }, + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192":{ + "balance": "0xfeedbead", + "nonce" : "0x00" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/19/env.json b/cmd/evm/testdata/19/env.json new file mode 100644 index 00000000000..0c64392aff5 --- /dev/null +++ b/cmd/evm/testdata/19/env.json @@ -0,0 +1,9 @@ +{ + "currentCoinbase": "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b", + "currentGasLimit": "0x750a163df65e8a", + "currentBaseFee": "0x500", + "currentNumber": "13000000", + "currentTimestamp": "100015", + "parentTimestamp" : "99999", + "parentDifficulty" : "0x2000000000000" +} diff --git a/cmd/evm/testdata/19/exp_arrowglacier.json b/cmd/evm/testdata/19/exp_arrowglacier.json new file mode 100644 index 00000000000..85506a07ae9 --- /dev/null +++ b/cmd/evm/testdata/19/exp_arrowglacier.json @@ -0,0 +1,24 @@ +{ + "alloc": { + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { + "balance": "0xfeedbead" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "nonce": "0xac" + }, + "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x1bc16d674ec80000" + } + }, + "result": { + "stateRoot": "0x374cbd5c614cb6ef173024d1c0d4e0313dafc2d7fc8f4399cf4bd1b60fc7c2ca", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": null, + "currentDifficulty": "0x2000000200000", + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/19/exp_london.json b/cmd/evm/testdata/19/exp_london.json new file mode 100644 index 00000000000..10e11aedd5b --- /dev/null +++ b/cmd/evm/testdata/19/exp_london.json @@ -0,0 +1,24 @@ +{ + "alloc": { + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { + "balance": "0xfeedbead" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "nonce": "0xac" + }, + "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x1bc16d674ec80000" + } + }, + "result": { + "stateRoot": "0x374cbd5c614cb6ef173024d1c0d4e0313dafc2d7fc8f4399cf4bd1b60fc7c2ca", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": null, + "currentDifficulty": "0x2000080000000", + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/19/readme.md b/cmd/evm/testdata/19/readme.md new file mode 100644 index 00000000000..5fae183f488 --- /dev/null +++ b/cmd/evm/testdata/19/readme.md @@ -0,0 +1,9 @@ +## Difficulty calculation + +This test shows how the `evm t8n` can be used to calculate the (ethash) difficulty, if none is provided by the caller, +this time on `ArrowGlacier` (Eip 4345). + +Calculating it (with an empty set of txs) using `ArrowGlacier` rules (and no provided unclehash for the parent block): +``` +[user@work evm]$ ./evm t8n --input.alloc=./testdata/14/alloc.json --input.txs=./testdata/14/txs.json --input.env=./testdata/14/env.json --output.result=stdout --state.fork=ArrowGlacier +``` \ No newline at end of file diff --git a/cmd/evm/testdata/19/txs.json b/cmd/evm/testdata/19/txs.json new file mode 100644 index 00000000000..fe51488c706 --- /dev/null +++ b/cmd/evm/testdata/19/txs.json @@ -0,0 +1 @@ +[] diff --git a/cmd/evm/testdata/3/exp.json b/cmd/evm/testdata/3/exp.json new file mode 100644 index 00000000000..5b8b7c84ebc --- /dev/null +++ b/cmd/evm/testdata/3/exp.json @@ -0,0 +1,39 @@ +{ + "alloc": { + "0x095e7baea6a6c7c4c2dfeb977efac326af552d87": { + "code": "0x600140", + "balance": "0xde0b6b3a76586a0" + }, + "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { + "balance": "0x1bc16d674ec8521f" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xde0b6b3a7622741", + "nonce": "0x1" + } + }, + "result": { + "stateRoot": "0x5aeefb3e8fe1d722455ff4b4ee76793af2c654f7f5120b79a8427d696ed01558", + "txRoot": "0x75e61774a2ff58cbe32653420256c7f44bc715715a423b0b746d5c622979af6b", + "receiptsRoot": "0xd0d26df80374a327c025d405ebadc752b1bbd089d864801ae78ab704bcad8086", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x521f", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x521f", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x5", + "transactionIndex": "0x0" + } + ], + "currentDifficulty": "0x20000", + "gasUsed": "0x521f" + } +} diff --git a/cmd/evm/testdata/5/exp.json b/cmd/evm/testdata/5/exp.json new file mode 100644 index 00000000000..1815d430588 --- /dev/null +++ b/cmd/evm/testdata/5/exp.json @@ -0,0 +1,23 @@ +{ + "alloc": { + "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa": { + "balance": "0x2c3c465ca58ec000" + }, + "0xbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb": { + "balance": "0x246ddf9797668000" + }, + "0xcccccccccccccccccccccccccccccccccccccccc": { + "balance": "0x1f399b1438a10000" + } + }, + "result": { + "stateRoot": "0x5069e6c86aeba39397685cf7914a7505a78059be8c5f4d1348050ce78b348e99", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": null, + "currentDifficulty": "0x20000", + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/7/exp.json b/cmd/evm/testdata/7/exp.json new file mode 100644 index 00000000000..e5b41ef0d21 --- /dev/null +++ b/cmd/evm/testdata/7/exp.json @@ -0,0 +1,375 @@ +{ + "alloc": { + "0x005f5cee7a43331d5a3d3eec71305925a62f34b6": { + "balance": "0x0" + }, + "0x0101f3be8ebb4bbd39a2e3b9a3639d4259832fd9": { + "balance": "0x0" + }, + "0x057b56736d32b86616a10f619859c6cd6f59092a": { + "balance": "0x0" + }, + "0x06706dd3f2c9abf0a21ddcc6941d9b86f0596936": { + "balance": "0x0" + }, + "0x0737a6b837f97f46ebade41b9bc3e1c509c85c53": { + "balance": "0x0" + }, + "0x07f5c1e1bc2c93e0402f23341973a0e043f7bf8a": { + "balance": "0x0" + }, + "0x0e0da70933f4c7849fc0d203f5d1d43b9ae4532d": { + "balance": "0x0" + }, + "0x0ff30d6de14a8224aa97b78aea5388d1c51c1f00": { + "balance": "0x0" + }, + "0x12e626b0eebfe86a56d633b9864e389b45dcb260": { + "balance": "0x0" + }, + "0x1591fc0f688c81fbeb17f5426a162a7024d430c2": { + "balance": "0x0" + }, + "0x17802f43a0137c506ba92291391a8a8f207f487d": { + "balance": "0x0" + }, + "0x1975bd06d486162d5dc297798dfc41edd5d160a7": { + "balance": "0x0" + }, + "0x1ca6abd14d30affe533b24d7a21bff4c2d5e1f3b": { + "balance": "0x0" + }, + "0x1cba23d343a983e9b5cfd19496b9a9701ada385f": { + "balance": "0x0" + }, + "0x200450f06520bdd6c527622a273333384d870efb": { + "balance": "0x0" + }, + "0x21c7fdb9ed8d291d79ffd82eb2c4356ec0d81241": { + "balance": "0x0" + }, + "0x23b75c2f6791eef49c69684db4c6c1f93bf49a50": { + "balance": "0x0" + }, + "0x24c4d950dfd4dd1902bbed3508144a54542bba94": { + "balance": "0x0" + }, + "0x253488078a4edf4d6f42f113d1e62836a942cf1a": { + "balance": "0x0" + }, + "0x27b137a85656544b1ccb5a0f2e561a5703c6a68f": { + "balance": "0x0" + }, + "0x2a5ed960395e2a49b1c758cef4aa15213cfd874c": { + "balance": "0x0" + }, + "0x2b3455ec7fedf16e646268bf88846bd7a2319bb2": { + "balance": "0x0" + }, + "0x2c19c7f9ae8b751e37aeb2d93a699722395ae18f": { + "balance": "0x0" + }, + "0x304a554a310c7e546dfe434669c62820b7d83490": { + "balance": "0x0" + }, + "0x319f70bab6845585f412ec7724b744fec6095c85": { + "balance": "0x0" + }, + "0x35a051a0010aba705c9008d7a7eff6fb88f6ea7b": { + "balance": "0x0" + }, + "0x3ba4d81db016dc2890c81f3acec2454bff5aada5": { + "balance": "0x0" + }, + "0x3c02a7bc0391e86d91b7d144e61c2c01a25a79c5": { + "balance": "0x0" + }, + "0x40b803a9abce16f50f36a77ba41180eb90023925": { + "balance": "0x0" + }, + "0x440c59b325d2997a134c2c7c60a8c61611212bad": { + "balance": "0x0" + }, + "0x4486a3d68fac6967006d7a517b889fd3f98c102b": { + "balance": "0x0" + }, + "0x4613f3bca5c44ea06337a9e439fbc6d42e501d0a": { + "balance": "0x0" + }, + "0x47e7aa56d6bdf3f36be34619660de61275420af8": { + "balance": "0x0" + }, + "0x4863226780fe7c0356454236d3b1c8792785748d": { + "balance": "0x0" + }, + "0x492ea3bb0f3315521c31f273e565b868fc090f17": { + "balance": "0x0" + }, + "0x4cb31628079fb14e4bc3cd5e30c2f7489b00960c": { + "balance": "0x0" + }, + "0x4deb0033bb26bc534b197e61d19e0733e5679784": { + "balance": "0x0" + }, + "0x4fa802324e929786dbda3b8820dc7834e9134a2a": { + "balance": "0x0" + }, + "0x4fd6ace747f06ece9c49699c7cabc62d02211f75": { + "balance": "0x0" + }, + "0x51e0ddd9998364a2eb38588679f0d2c42653e4a6": { + "balance": "0x0" + }, + "0x52c5317c848ba20c7504cb2c8052abd1fde29d03": { + "balance": "0x0" + }, + "0x542a9515200d14b68e934e9830d91645a980dd7a": { + "balance": "0x0" + }, + "0x5524c55fb03cf21f549444ccbecb664d0acad706": { + "balance": "0x0" + }, + "0x579a80d909f346fbfb1189493f521d7f48d52238": { + "balance": "0x0" + }, + "0x58b95c9a9d5d26825e70a82b6adb139d3fd829eb": { + "balance": "0x0" + }, + "0x5c6e67ccd5849c0d29219c4f95f1a7a93b3f5dc5": { + "balance": "0x0" + }, + "0x5c8536898fbb74fc7445814902fd08422eac56d0": { + "balance": "0x0" + }, + "0x5d2b2e6fcbe3b11d26b525e085ff818dae332479": { + "balance": "0x0" + }, + "0x5dc28b15dffed94048d73806ce4b7a4612a1d48f": { + "balance": "0x0" + }, + "0x5f9f3392e9f62f63b8eac0beb55541fc8627f42c": { + "balance": "0x0" + }, + "0x6131c42fa982e56929107413a9d526fd99405560": { + "balance": "0x0" + }, + "0x6231b6d0d5e77fe001c2a460bd9584fee60d409b": { + "balance": "0x0" + }, + "0x627a0a960c079c21c34f7612d5d230e01b4ad4c7": { + "balance": "0x0" + }, + "0x63ed5a272de2f6d968408b4acb9024f4cc208ebf": { + "balance": "0x0" + }, + "0x6966ab0d485353095148a2155858910e0965b6f9": { + "balance": "0x0" + }, + "0x6b0c4d41ba9ab8d8cfb5d379c69a612f2ced8ecb": { + "balance": "0x0" + }, + "0x6d87578288b6cb5549d5076a207456a1f6a63dc0": { + "balance": "0x0" + }, + "0x6f6704e5a10332af6672e50b3d9754dc460dfa4d": { + "balance": "0x0" + }, + "0x7602b46df5390e432ef1c307d4f2c9ff6d65cc97": { + "balance": "0x0" + }, + "0x779543a0491a837ca36ce8c635d6154e3c4911a6": { + "balance": "0x0" + }, + "0x77ca7b50b6cd7e2f3fa008e24ab793fd56cb15f6": { + "balance": "0x0" + }, + "0x782495b7b3355efb2833d56ecb34dc22ad7dfcc4": { + "balance": "0x0" + }, + "0x807640a13483f8ac783c557fcdf27be11ea4ac7a": { + "balance": "0x0" + }, + "0x8163e7fb499e90f8544ea62bbf80d21cd26d9efd": { + "balance": "0x0" + }, + "0x84ef4b2357079cd7a7c69fd7a37cd0609a679106": { + "balance": "0x0" + }, + "0x86af3e9626fce1957c82e88cbf04ddf3a2ed7915": { + "balance": "0x0" + }, + "0x8a8eafb1cf62bfbeb1741769dae1a9dd47996192": { + "balance": "0xfeedbead" + }, + "0x8d9edb3054ce5c5774a420ac37ebae0ac02343c6": { + "balance": "0x0" + }, + "0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79": { + "balance": "0x0" + }, + "0x97f43a37f595ab5dd318fb46e7a155eae057317a": { + "balance": "0x0" + }, + "0x9aa008f65de0b923a2a4f02012ad034a5e2e2192": { + "balance": "0x0" + }, + "0x9c15b54878ba618f494b38f0ae7443db6af648ba": { + "balance": "0x0" + }, + "0x9c50426be05db97f5d64fc54bf89eff947f0a321": { + "balance": "0x0" + }, + "0x9da397b9e80755301a3b32173283a91c0ef6c87e": { + "balance": "0x0" + }, + "0x9ea779f907f0b315b364b0cfc39a0fde5b02a416": { + "balance": "0x0" + }, + "0x9f27daea7aca0aa0446220b98d028715e3bc803d": { + "balance": "0x0" + }, + "0x9fcd2deaff372a39cc679d5c5e4de7bafb0b1339": { + "balance": "0x0" + }, + "0xa2f1ccba9395d7fcb155bba8bc92db9bafaeade7": { + "balance": "0x0" + }, + "0xa3acf3a1e16b1d7c315e23510fdd7847b48234f6": { + "balance": "0x0" + }, + "0xa5dc5acd6a7968a4554d89d65e59b7fd3bff0f90": { + "balance": "0x0" + }, + "0xa82f360a8d3455c5c41366975bde739c37bfeb8a": { + "balance": "0x0" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x5ffd4878be161d74", + "nonce": "0xac" + }, + "0xac1ecab32727358dba8962a0f3b261731aad9723": { + "balance": "0x0" + }, + "0xaccc230e8a6e5be9160b8cdf2864dd2a001c28b6": { + "balance": "0x0" + }, + "0xacd87e28b0c9d1254e868b81cba4cc20d9a32225": { + "balance": "0x0" + }, + "0xadf80daec7ba8dcf15392f1ac611fff65d94f880": { + "balance": "0x0" + }, + "0xaeeb8ff27288bdabc0fa5ebb731b6f409507516c": { + "balance": "0x0" + }, + "0xb136707642a4ea12fb4bae820f03d2562ebff487": { + "balance": "0x0" + }, + "0xb2c6f0dfbb716ac562e2d85d6cb2f8d5ee87603e": { + "balance": "0x0" + }, + "0xb3fb0e5aba0e20e5c49d252dfd30e102b171a425": { + "balance": "0x0" + }, + "0xb52042c8ca3f8aa246fa79c3feaa3d959347c0ab": { + "balance": "0x0" + }, + "0xb9637156d330c0d605a791f1c31ba5890582fe1c": { + "balance": "0x0" + }, + "0xbb9bc244d798123fde783fcc1c72d3bb8c189413": { + "balance": "0x0" + }, + "0xbc07118b9ac290e4622f5e77a0853539789effbe": { + "balance": "0x0" + }, + "0xbcf899e6c7d9d5a215ab1e3444c86806fa854c76": { + "balance": "0x0" + }, + "0xbe8539bfe837b67d1282b2b1d61c3f723966f049": { + "balance": "0x0" + }, + "0xbf4ed7b27f1d666546e30d74d50d173d20bca754": { + "balance": "0x0" + }, + "0xc4bbd073882dd2add2424cf47d35213405b01324": { + "balance": "0x0" + }, + "0xc94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x4563918244f40000" + }, + "0xca544e5c4687d109611d0f8f928b53a25af72448": { + "balance": "0x0" + }, + "0xcbb9d3703e651b0d496cdefb8b92c25aeb2171f7": { + "balance": "0x0" + }, + "0xcc34673c6c40e791051898567a1222daf90be287": { + "balance": "0x0" + }, + "0xceaeb481747ca6c540a000c1f3641f8cef161fa7": { + "balance": "0x0" + }, + "0xd131637d5275fd1a68a3200f4ad25c71a2a9522e": { + "balance": "0x0" + }, + "0xd164b088bd9108b60d0ca3751da4bceb207b0782": { + "balance": "0x0" + }, + "0xd1ac8b1ef1b69ff51d1d401a476e7e612414f091": { + "balance": "0x0" + }, + "0xd343b217de44030afaa275f54d31a9317c7f441e": { + "balance": "0x0" + }, + "0xd4fe7bc31cedb7bfb8a345f31e668033056b2728": { + "balance": "0x0" + }, + "0xd9aef3a1e38a39c16b31d1ace71bca8ef58d315b": { + "balance": "0x0" + }, + "0xda2fef9e4a3230988ff17df2165440f37e8b1708": { + "balance": "0x0" + }, + "0xdbe9b615a3ae8709af8b93336ce9b477e4ac0940": { + "balance": "0x0" + }, + "0xe308bd1ac5fda103967359b2712dd89deffb7973": { + "balance": "0x0" + }, + "0xe4ae1efdfc53b73893af49113d8694a057b9c0d1": { + "balance": "0x0" + }, + "0xec8e57756626fdc07c63ad2eafbd28d08e7b0ca5": { + "balance": "0x0" + }, + "0xecd135fa4f61a655311e86238c92adcd779555d2": { + "balance": "0x0" + }, + "0xf0b1aa0eb660754448a7937c022e30aa692fe0c5": { + "balance": "0x0" + }, + "0xf1385fb24aad0cd7432824085e42aff90886fef5": { + "balance": "0x0" + }, + "0xf14c14075d6c4ed84b86798af0956deef67365b5": { + "balance": "0x0" + }, + "0xf4c64518ea10f995918a454158c6b61407ea345c": { + "balance": "0x0" + }, + "0xfe24cdd8648121a43a7c86d289be4dd2951ed49f": { + "balance": "0x0" + } + }, + "result": { + "stateRoot": "0xd320ae476350b8107b9b78d45d73f539cc363e7e588d8c794666515d852f6e81", + "txRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": null, + "currentDifficulty": "0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffff020000", + "gasUsed": "0x0" + } +} diff --git a/cmd/evm/testdata/8/exp.json b/cmd/evm/testdata/8/exp.json new file mode 100644 index 00000000000..2d44c071be7 --- /dev/null +++ b/cmd/evm/testdata/8/exp.json @@ -0,0 +1,68 @@ +{ + "alloc": { + "0x000000000000000000000000000000000000aaaa": { + "code": "0x5854505854", + "balance": "0x7", + "nonce": "0x1" + }, + "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { + "balance": "0x1bc16d674ec94832" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xeb7ca", + "nonce": "0x3" + } + }, + "result": { + "stateRoot": "0xb78515d83d9ad63ae2740f09f21bb6b44e9041e18b606a3ed35dd6cfd338c0bb", + "txRoot": "0xe42c488908c04b9f7d4d39614ed4093a33ff16353299672e1770b786c28a5e6f", + "receiptsRoot": "0xb207f384195fb6fb7ee7105ba963cc19e1614ce0e75809999289c6c82e7a8d97", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "type": "0x1", + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x7aae", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x26c8c6e23fa3b246f44fba53e7b5fcb55f01f1e075f2de3db9b982afd4bd3901", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x7aae", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1000000", + "transactionIndex": "0x0" + }, + { + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0xdd24", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x26ea003b1188334eced68a720dbe89886cd6a477cccdf924cf1d392e2281c01b", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x6276", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1000000", + "transactionIndex": "0x1" + }, + { + "type": "0x1", + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x14832", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0x6997569ed85f1d810bc61d969cbbae12f34ce88d314ff5ef2629bc741466fca6", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x6b0e", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1000000", + "transactionIndex": "0x2" + } + ], + "currentDifficulty": "0x20000", + "gasUsed": "0x14832" + } +} diff --git a/cmd/evm/testdata/9/alloc.json b/cmd/evm/testdata/9/alloc.json index 430e4242732..c14e38e8451 100644 --- a/cmd/evm/testdata/9/alloc.json +++ b/cmd/evm/testdata/9/alloc.json @@ -1,19 +1,11 @@ { - "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { - "balance": "0x100000000000000000", - "nonce": "0x00" - }, - "0x00000000000000000000000000000000b0b0face": { - "code":"0x40600052", - "storage":{}, - "balance":"0x0", - "nonce": - "0x0" - }, - "0x000000000000000000000000000000ca1100f022": { - "code":"0x60806040527f248f18b25d9b5856c092f62a7d329b239f4a0a77e6ee6c58637f56745b9803f3446040518082815260200191505060405180910390a100fea265627a7a72315820eea50cf12e938601a56dcdef0ab1446f14ba25367299eb81834af54e1672f5d864736f6c63430005110032", - "storage":{}, - "balance":"0x0", - "nonce":"0x0" - } - } \ No newline at end of file + "0x000000000000000000000000000000000000aaaa": { + "balance": "0x03", + "code": "0x58585454", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x100000000000000", + "nonce": "0x00" + } +} diff --git a/cmd/evm/testdata/9/env.json b/cmd/evm/testdata/9/env.json index 479d8a3f47d..05f35191fd8 100644 --- a/cmd/evm/testdata/9/env.json +++ b/cmd/evm/testdata/9/env.json @@ -1,8 +1,9 @@ { - "currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", - "currentDifficulty": "0x20000", - "currentGasLimit": "0x1000000000", - "currentNumber": "0x1000000", - "currentTimestamp": "0x04", - "currentRandom": "0x1000000000000000000000000000000000000000000000000000000000000001" - } \ No newline at end of file + "currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty": "0x20000", + "currentGasTarget": "0x1000000000", + "currentGasLimit": "0x750a163df65e8a", + "currentBaseFee": "0x3B9ACA00", + "currentNumber": "0x1000000", + "currentTimestamp": "0x04" +} diff --git a/cmd/evm/testdata/9/exp.json b/cmd/evm/testdata/9/exp.json new file mode 100644 index 00000000000..53a1bfd4d91 --- /dev/null +++ b/cmd/evm/testdata/9/exp.json @@ -0,0 +1,54 @@ +{ + "alloc": { + "0x000000000000000000000000000000000000aaaa": { + "code": "0x58585454", + "balance": "0x3", + "nonce": "0x1" + }, + "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { + "balance": "0x1bc1c9185ca6f6e0" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xff745ee8832120", + "nonce": "0x2" + } + }, + "result": { + "stateRoot": "0x8e0c14cca1717d764e5cd25569bdf079758d704bb8ba56a3827997842f135ad8", + "txRoot": "0xbe6c599aefbec1cfe31dbdeca4b4dd0315bf5fca0f78e10c8f869c40a42feb0d", + "receiptsRoot": "0x5fdadbccc0b40ed39f6c7aacafb08a71c468f28793027552d9d99b1aeb19d406", + "logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "receipts": [ + { + "type": "0x2", + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0x6b70", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0xb4821e4a9122a6f9baecad99351bee6ec54fe8c3f6a737b2e6478f4963536819", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x6b70", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1000000", + "transactionIndex": "0x0" + }, + { + "root": "0x", + "status": "0x1", + "cumulativeGasUsed": "0xcde4", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "logs": null, + "transactionHash": "0xa9c6c6a848b9c9a0d8bbb4df5f30394983632817dbccc738e839c8e174fa4036", + "contractAddress": "0x0000000000000000000000000000000000000000", + "gasUsed": "0x6274", + "blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "blockNumber": "0x1000000", + "transactionIndex": "0x1" + } + ], + "currentDifficulty": "0x20000", + "gasUsed": "0xcde4" + } +} diff --git a/cmd/evm/testdata/9/readme.md b/cmd/evm/testdata/9/readme.md new file mode 100644 index 00000000000..88f0f12aaaa --- /dev/null +++ b/cmd/evm/testdata/9/readme.md @@ -0,0 +1,75 @@ +## EIP-1559 testing + +This test contains testcases for EIP-1559, which uses an new transaction type and has a new block parameter. + +### Prestate + +The alloc portion contains one contract (`0x000000000000000000000000000000000000aaaa`), containing the +following code: `0x58585454`: `PC; PC; SLOAD; SLOAD`. + +Essentialy, this contract does `SLOAD(0)` and `SLOAD(1)`. + +The alloc also contains some funds on `0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b`. + +## Transactions + +There are two transactions, each invokes the contract above. + +1. EIP-1559 ACL-transaction, which contains the `0x0` slot for `0xaaaa` +2. Legacy transaction + +## Execution + +Running it yields: +``` +$ dir=./testdata/9 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --trace && cat trace-* | grep SLOAD +{"pc":2,"op":84,"gas":"0x48c28","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x1"],"returnStack":null,"returnD +ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":3,"op":84,"gas":"0x483f4","gasCost":"0x64","memory":"0x","memSize":0,"stack":["0x0","0x0"],"returnStack":null,"returnDa +ta":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":2,"op":84,"gas":"0x49cf4","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x1"],"returnStack":null,"returnD +ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":3,"op":84,"gas":"0x494c0","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0","0x0"],"returnStack":null,"returnD +ata":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +``` + +We can also get the post-alloc: +``` +$ dir=./testdata/9 && ./evm t8n --state.fork=London --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout +{ + "alloc": { + "0x000000000000000000000000000000000000aaaa": { + "code": "0x58585454", + "balance": "0x3", + "nonce": "0x1" + }, + "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": { + "balance": "0xbfc02677a000" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0xff104fcfea7800", + "nonce": "0x2" + } + } +} +``` + +If we try to execute it on older rules: +``` +dir=./testdata/9 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --output.alloc=stdout +ERROR(10): Failed signing transactions: ERROR(10): Tx 0: failed to sign tx: transaction type not supported +``` + +It fails, due to the `evm t8n` cannot sign them in with the given signer. We can bypass that, however, +by feeding it presigned transactions, located in `txs_signed.json`. + +``` +dir=./testdata/9 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json --input.txs=$dir/txs_signed.json --input.env=$dir/env.json +INFO [05-07|12:28:42.072] rejected tx index=0 hash=b4821e..536819 error="transaction type not supported" +INFO [05-07|12:28:42.072] rejected tx index=1 hash=a9c6c6..fa4036 from=0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B error="nonce too high: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B, tx: 1 state: 0" +INFO [05-07|12:28:42.073] Wrote file file=alloc.json +INFO [05-07|12:28:42.073] Wrote file file=result.json +``` + +Number `0` is not applicable, and therefore number `1` has wrong nonce, and both are rejected. + diff --git a/cmd/evm/testdata/9/txs.json b/cmd/evm/testdata/9/txs.json index 7f15b0b2215..740abce079d 100644 --- a/cmd/evm/testdata/9/txs.json +++ b/cmd/evm/testdata/9/txs.json @@ -1,14 +1,37 @@ [ - { - "gasPrice":"0x80", - "nonce":"0x0", - "to":"0x000000000000000000000000000000ca1100f022", - "input": "", - "gas":"0x1312d00", - "value": "0x0", - "v": "0x0", - "r": "0x0", - "s": "0x0", - "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" - } -] \ No newline at end of file + { + "gas": "0x4ef00", + "maxPriorityFeePerGas": "0x2", + "maxFeePerGas": "0x12A05F200", + "chainId": "0x1", + "input": "0x", + "nonce": "0x0", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x0", + "type" : "0x2", + "accessList": [ + {"address": "0x000000000000000000000000000000000000aaaa", + "storageKeys": [ + "0x0000000000000000000000000000000000000000000000000000000000000000" + ] + } + ], + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + }, + { + "gas": "0x4ef00", + "gasPrice": "0x12A05F200", + "chainId": "0x1", + "input": "0x", + "nonce": "0x1", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x0", + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + } +] diff --git a/cmd/integration/commands/state_stages.go b/cmd/integration/commands/state_stages.go index 1edb32fcbe7..449eaebf1fc 100644 --- a/cmd/integration/commands/state_stages.go +++ b/cmd/integration/commands/state_stages.go @@ -17,7 +17,6 @@ import ( "github.com/ledgerwatch/erigon/common/changeset" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/debugprint" - "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" @@ -219,7 +218,7 @@ func syncBySmallSteps(db kv.RwDB, miningConfig params.MiningConfig, ctx context. } encoder := json.NewEncoder(w) encoder.SetIndent(" ", " ") - for _, l := range core.FormatLogs(vmConfig.Tracer.(*vm.StructLogger).StructLogs()) { + for _, l := range vm.FormatLogs(vmConfig.Tracer.(*vm.StructLogger).StructLogs()) { if err2 := encoder.Encode(l); err2 != nil { panic(err2) } diff --git a/cmd/rpcdaemon/commands/eth_receipts.go b/cmd/rpcdaemon/commands/eth_receipts.go index 8b41227542a..5ebc6fc0f9c 100644 --- a/cmd/rpcdaemon/commands/eth_receipts.go +++ b/cmd/rpcdaemon/commands/eth_receipts.go @@ -59,7 +59,8 @@ func (api *BaseAPI) getReceipts(ctx context.Context, tx kv.Tx, chainConfig *para for i, txn := range block.Transactions() { ibs.Prepare(txn.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, ethashFaker, nil, gp, ibs, noopWriter, block.Header(), txn, usedGas, vm.Config{}, contractHasTEVM) + header := block.Header() + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), ethashFaker, nil, gp, ibs, noopWriter, header, txn, usedGas, vm.Config{}, contractHasTEVM) if err != nil { return nil, err } diff --git a/cmd/rpcdaemon22/commands/eth_receipts.go b/cmd/rpcdaemon22/commands/eth_receipts.go index bdd74f1017d..a1c313a9547 100644 --- a/cmd/rpcdaemon22/commands/eth_receipts.go +++ b/cmd/rpcdaemon22/commands/eth_receipts.go @@ -56,7 +56,8 @@ func (api *BaseAPI) getReceipts(ctx context.Context, tx kv.Tx, chainConfig *para for i, txn := range block.Transactions() { ibs.Prepare(txn.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, ethashFaker, nil, gp, ibs, noopWriter, block.Header(), txn, usedGas, vm.Config{}, contractHasTEVM) + header := block.Header() + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), ethashFaker, nil, gp, ibs, noopWriter, header, txn, usedGas, vm.Config{}, contractHasTEVM) if err != nil { return nil, err } diff --git a/cmd/state/commands/erigon2.go b/cmd/state/commands/erigon2.go index 934f79cac63..370433ecaa3 100644 --- a/cmd/state/commands/erigon2.go +++ b/cmd/state/commands/erigon2.go @@ -412,7 +412,7 @@ func processBlock(trace bool, txNumStart uint64, rw *ReaderWrapper, ww *WriterWr daoBlock = false } ibs.Prepare(tx.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) if err != nil { return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } diff --git a/cmd/state/commands/erigon22.go b/cmd/state/commands/erigon22.go index de83339c87a..0bcc8538c13 100644 --- a/cmd/state/commands/erigon22.go +++ b/cmd/state/commands/erigon22.go @@ -305,6 +305,7 @@ func processBlock22(startTxNum uint64, trace bool, txNumStart uint64, rw *Reader txNum++ // Pre-block transaction ww.w.SetTxNum(txNum) + getHashFn := core.GetHashFn(header, getHeader) for i, tx := range block.Transactions() { if txNum >= startTxNum { @@ -312,7 +313,7 @@ func processBlock22(startTxNum uint64, trace bool, txNumStart uint64, rw *Reader ibs.Prepare(tx.Hash(), block.Hash(), i) ct := NewCallTracer() vmConfig.Tracer = ct - receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) + receipt, _, err := core.ApplyTransaction(chainConfig, getHashFn, engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) if err != nil { return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } diff --git a/cmd/state/commands/history2.go b/cmd/state/commands/history2.go index 63332bcb0b0..0aa8b07ffee 100644 --- a/cmd/state/commands/history2.go +++ b/cmd/state/commands/history2.go @@ -157,7 +157,7 @@ func runHistory2(trace bool, blockNum, txNumStart uint64, hw *HistoryWrapper, ww daoBlock = false } ibs.Prepare(tx.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) if err != nil { return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } diff --git a/cmd/state/commands/history22.go b/cmd/state/commands/history22.go index fd9d10f2613..a7ecf4d8ad9 100644 --- a/cmd/state/commands/history22.go +++ b/cmd/state/commands/history22.go @@ -245,7 +245,7 @@ func runHistory22(trace bool, blockNum, txNumStart uint64, hw *state.HistoryRead hw.SetTxNum(txNum) ibs := state.New(hw) ibs.Prepare(tx.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, ww, header, tx, usedGas, vmConfig, nil) if err != nil { return 0, nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } diff --git a/cmd/state/commands/opcode_tracer.go b/cmd/state/commands/opcode_tracer.go index fb6e9777180..ee6a5d86102 100644 --- a/cmd/state/commands/opcode_tracer.go +++ b/cmd/state/commands/opcode_tracer.go @@ -684,7 +684,7 @@ func runBlock(engine consensus.Engine, ibs *state.IntraBlockState, txnWriter sta rules := chainConfig.Rules(block.NumberU64()) for i, tx := range block.Transactions() { ibs.Prepare(tx.Hash(), block.Hash(), i) - receipt, _, err := core.ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, txnWriter, header, tx, usedGas, vmConfig, contractHasTEVM) + receipt, _, err := core.ApplyTransaction(chainConfig, core.GetHashFn(header, getHeader), engine, nil, gp, ibs, txnWriter, header, tx, usedGas, vmConfig, contractHasTEVM) if err != nil { return nil, fmt.Errorf("could not apply tx %d [%x] failed: %w", i, tx.Hash(), err) } diff --git a/cmd/state/commands/state_recon.go b/cmd/state/commands/state_recon.go index 1e6b3568438..231fcca9499 100644 --- a/cmd/state/commands/state_recon.go +++ b/cmd/state/commands/state_recon.go @@ -152,7 +152,8 @@ func (rw *ReconWorker) runTxTask(txTask state.TxTask) { vmConfig := vm.Config{NoReceipts: true, SkipAnalysis: core.SkipAnalysis(rw.chainConfig, txTask.BlockNum)} contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } ibs.Prepare(txHash, txTask.BlockHash, txTask.TxIndex) - _, _, err = core.ApplyTransaction(rw.chainConfig, rw.getHeader, rw.engine, nil, gp, ibs, noop, txTask.Header, txTask.Tx, usedGas, vmConfig, contractHasTEVM) + getHashFn := core.GetHashFn(txTask.Header, rw.getHeader) + _, _, err = core.ApplyTransaction(rw.chainConfig, getHashFn, rw.engine, nil, gp, ibs, noop, txTask.Header, txTask.Tx, usedGas, vmConfig, contractHasTEVM) if err != nil { panic(fmt.Errorf("could not apply tx %d [%x] failed: %w", txTask.TxIndex, txHash, err)) } diff --git a/cmd/state/commands/state_recon_1.go b/cmd/state/commands/state_recon_1.go index fd968542373..b149406a892 100644 --- a/cmd/state/commands/state_recon_1.go +++ b/cmd/state/commands/state_recon_1.go @@ -150,7 +150,8 @@ func (rw *ReconWorker1) runTxTask(txTask state.TxTask) { contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } ibs.Prepare(txHash, txTask.BlockHash, txTask.TxIndex) vmConfig.SkipAnalysis = core.SkipAnalysis(rw.chainConfig, txTask.BlockNum) - blockContext := core.NewEVMBlockContext(txTask.Header, rw.getHeader, rw.engine, nil /* author */, contractHasTEVM) + getHashFn := core.GetHashFn(txTask.Header, rw.getHeader) + blockContext := core.NewEVMBlockContext(txTask.Header, getHashFn, rw.engine, nil /* author */, contractHasTEVM) vmenv := vm.NewEVM(blockContext, vm.TxContext{}, ibs, rw.chainConfig, vmConfig) msg, err := txTask.Tx.AsMessage(*types.MakeSigner(rw.chainConfig, txTask.BlockNum), txTask.Header.BaseFee, rules) if err != nil { diff --git a/consensus/parlia/parlia.go b/consensus/parlia/parlia.go index 30fd185239a..731fd618121 100644 --- a/consensus/parlia/parlia.go +++ b/consensus/parlia/parlia.go @@ -1218,7 +1218,7 @@ func (p *Parlia) systemCall(from, contract common.Address, data []byte, ibs *sta ) vmConfig := vm.Config{NoReceipts: true} // Create a new context to be used in the EVM environment - blockContext := core.NewEVMBlockContext(header, nil, p, &from, nil) + blockContext := core.NewEVMBlockContext(header, core.GetHashFn(header, nil), p, &from, nil) evm := vm.NewEVM(blockContext, core.NewEVMTxContext(msg), ibs, chainConfig, vmConfig) ret, leftOverGas, err := evm.Call( vm.AccountRef(msg.From()), diff --git a/core/blockchain.go b/core/blockchain.go index b102ae22c15..6b845347df9 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -18,16 +18,17 @@ package core import ( - "encoding/json" "fmt" - "os" "time" "github.com/ledgerwatch/erigon/core/systemcontracts" + "github.com/ledgerwatch/erigon/rlp" + "golang.org/x/crypto/sha3" "golang.org/x/exp/slices" metrics2 "github.com/VictoriaMetrics/metrics" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/common/u256" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/consensus/misc" @@ -48,12 +49,30 @@ const ( TriesInMemory = 128 ) -// ExecuteBlockEphemerally runs a block from provided stateReader and -// writes the result to the provided stateWriter +type RejectedTx struct { + Index int `json:"index" gencodec:"required"` + Err string `json:"error" gencodec:"required"` +} + +type RejectedTxs []*RejectedTx + +type EphemeralExecResult struct { + StateRoot common.Hash `json:"stateRoot"` + TxRoot common.Hash `json:"txRoot"` + ReceiptRoot common.Hash `json:"receiptsRoot"` + LogsHash common.Hash `json:"logsHash"` + Bloom types.Bloom `json:"logsBloom" gencodec:"required"` + Receipts types.Receipts `json:"receipts"` + Rejected RejectedTxs `json:"rejected,omitempty"` + Difficulty *math.HexOrDecimal256 `json:"currentDifficulty" gencodec:"required"` + GasUsed math.HexOrDecimal64 `json:"gasUsed"` + ReceiptForStorage *types.ReceiptForStorage `json:"-"` +} + func ExecuteBlockEphemerallyForBSC( chainConfig *params.ChainConfig, vmConfig *vm.Config, - getHeader func(hash common.Hash, number uint64) *types.Header, + blockHashFunc func(n uint64) common.Hash, engine consensus.Engine, block *types.Block, stateReader state.StateReader, @@ -61,16 +80,23 @@ func ExecuteBlockEphemerallyForBSC( epochReader consensus.EpochReader, chainReader consensus.ChainHeaderReader, contractHasTEVM func(codeHash common.Hash) (bool, error), -) (types.Receipts, error) { + statelessExec bool, // for usage of this API via cli tools wherein some of the validations need to be relaxed. + getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error), +) (*EphemeralExecResult, error) { defer blockExecutionTimer.UpdateDuration(time.Now()) block.Uncles() ibs := state.New(stateReader) header := block.Header() - var receipts types.Receipts usedGas := new(uint64) gp := new(GasPool) gp.AddGas(block.GasLimit()) + var ( + rejectedTxs []*RejectedTx + includedTxs types.Transactions + receipts types.Receipts + ) + if !vmConfig.ReadOnly { if err := InitializeBlockExecution(engine, chainReader, epochReader, block.Header(), block.Transactions(), block.Uncles(), chainConfig, ibs); err != nil { return nil, err @@ -95,35 +121,37 @@ func ExecuteBlockEphemerallyForBSC( ibs.Prepare(tx.Hash(), block.Hash(), i) writeTrace := false if vmConfig.Debug && vmConfig.Tracer == nil { - vmConfig.Tracer = vm.NewStructLogger(&vm.LogConfig{}) + tracer, err := getTracer(i, tx.Hash()) + if err != nil { + return nil, fmt.Errorf("could not obtain tracer: %w", err) + } + vmConfig.Tracer = tracer writeTrace = true } - receipt, _, err := ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig, contractHasTEVM) + receipt, _, err := ApplyTransaction(chainConfig, blockHashFunc, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig, contractHasTEVM) if writeTrace { - w, err1 := os.Create(fmt.Sprintf("txtrace_%x.txt", tx.Hash())) - if err1 != nil { - panic(err1) - } - encoder := json.NewEncoder(w) - logs := FormatLogs(vmConfig.Tracer.(*vm.StructLogger).StructLogs()) - if err2 := encoder.Encode(logs); err2 != nil { - panic(err2) - } - if err2 := w.Close(); err2 != nil { - panic(err2) + if ftracer, ok := vmConfig.Tracer.(vm.FlushableTracer); ok { + ftracer.Flush(tx) } + vmConfig.Tracer = nil } if err != nil { - return nil, fmt.Errorf("could not apply tx %d from block %d [%v]: %w", i, block.NumberU64(), tx.Hash().Hex(), err) - } - if !vmConfig.NoReceipts { - receipts = append(receipts, receipt) + if !statelessExec { + return nil, fmt.Errorf("could not apply tx %d from block %d [%v]: %w", i, block.NumberU64(), tx.Hash().Hex(), err) + } + rejectedTxs = append(rejectedTxs, &RejectedTx{i, err.Error()}) + } else { + includedTxs = append(includedTxs, tx) + if !vmConfig.NoReceipts { + receipts = append(receipts, receipt) + } } } var newBlock *types.Block + var receiptSha common.Hash if !vmConfig.ReadOnly { // We're doing this hack for BSC to avoid changing consensus interfaces a lot. BSC modifies txs and receipts by appending // system transactions, and they increase used gas and write cumulative gas to system receipts, that's why we need @@ -148,21 +176,26 @@ func ExecuteBlockEphemerallyForBSC( if !vmConfig.NoReceipts { receipts = outReceipts } + receiptSha = newBlock.ReceiptHash() } else { newBlock = block + receiptSha = types.DeriveSha(receipts) } if chainConfig.IsByzantium(header.Number.Uint64()) && !vmConfig.NoReceipts { - if newBlock.ReceiptHash() != block.ReceiptHash() { + if !statelessExec && receiptSha != block.ReceiptHash() { return nil, fmt.Errorf("mismatched receipt headers for block %d (%s != %s)", block.NumberU64(), newBlock.ReceiptHash().Hex(), block.Header().ReceiptHash.Hex()) } } - if newBlock.GasUsed() != header.GasUsed { + if !statelessExec && newBlock.GasUsed() != header.GasUsed { return nil, fmt.Errorf("gas used by execution: %d, in header: %d", *usedGas, header.GasUsed) } + + var bloom types.Bloom if !vmConfig.NoReceipts { - if newBlock.Bloom() != header.Bloom { - return nil, fmt.Errorf("bloom computed by execution: %x, in header: %x", newBlock.Bloom(), header.Bloom) + bloom = newBlock.Bloom() + if !statelessExec && bloom != header.Bloom { + return nil, fmt.Errorf("bloom computed by execution: %x, in header: %x", bloom, header.Bloom) } } @@ -172,7 +205,17 @@ func ExecuteBlockEphemerallyForBSC( return nil, fmt.Errorf("writing changesets for block %d failed: %w", header.Number.Uint64(), err) } - return receipts, nil + execRs := &EphemeralExecResult{ + TxRoot: types.DeriveSha(includedTxs), + ReceiptRoot: receiptSha, + Bloom: bloom, + Receipts: receipts, + Difficulty: (*math.HexOrDecimal256)(block.Header().Difficulty), + GasUsed: math.HexOrDecimal64(*usedGas), + Rejected: rejectedTxs, + } + + return execRs, nil } // ExecuteBlockEphemerally runs a block from provided stateReader and @@ -180,7 +223,7 @@ func ExecuteBlockEphemerallyForBSC( func ExecuteBlockEphemerally( chainConfig *params.ChainConfig, vmConfig *vm.Config, - getHeader func(hash common.Hash, number uint64) *types.Header, + blockHashFunc func(n uint64) common.Hash, engine consensus.Engine, block *types.Block, stateReader state.StateReader, @@ -188,19 +231,28 @@ func ExecuteBlockEphemerally( epochReader consensus.EpochReader, chainReader consensus.ChainHeaderReader, contractHasTEVM func(codeHash common.Hash) (bool, error), -) (types.Receipts, *types.ReceiptForStorage, error) { + statelessExec bool, // for usage of this API via cli tools wherein some of the validations need to be relaxed. + getTracer func(txIndex int, txHash common.Hash) (vm.Tracer, error), +) (*EphemeralExecResult, error) { + defer blockExecutionTimer.UpdateDuration(time.Now()) block.Uncles() ibs := state.New(stateReader) header := block.Header() - var receipts types.Receipts + usedGas := new(uint64) gp := new(GasPool) gp.AddGas(block.GasLimit()) + var ( + rejectedTxs []*RejectedTx + includedTxs types.Transactions + receipts types.Receipts + ) + if !vmConfig.ReadOnly { if err := InitializeBlockExecution(engine, chainReader, epochReader, block.Header(), block.Transactions(), block.Uncles(), chainConfig, ibs); err != nil { - return nil, nil, err + return nil, err } } @@ -213,54 +265,55 @@ func ExecuteBlockEphemerally( ibs.Prepare(tx.Hash(), block.Hash(), i) writeTrace := false if vmConfig.Debug && vmConfig.Tracer == nil { - vmConfig.Tracer = vm.NewStructLogger(&vm.LogConfig{}) + tracer, err := getTracer(i, tx.Hash()) + if err != nil { + return nil, fmt.Errorf("could not obtain tracer: %w", err) + } + vmConfig.Tracer = tracer writeTrace = true } - receipt, _, err := ApplyTransaction(chainConfig, getHeader, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig, contractHasTEVM) + receipt, _, err := ApplyTransaction(chainConfig, blockHashFunc, engine, nil, gp, ibs, noop, header, tx, usedGas, *vmConfig, contractHasTEVM) if writeTrace { - w, err1 := os.Create(fmt.Sprintf("txtrace_%x.txt", tx.Hash())) - if err1 != nil { - panic(err1) - } - encoder := json.NewEncoder(w) - logs := FormatLogs(vmConfig.Tracer.(*vm.StructLogger).StructLogs()) - if err2 := encoder.Encode(logs); err2 != nil { - panic(err2) - } - if err2 := w.Close(); err2 != nil { - panic(err2) + if ftracer, ok := vmConfig.Tracer.(vm.FlushableTracer); ok { + ftracer.Flush(tx) } + vmConfig.Tracer = nil } if err != nil { - return nil, nil, fmt.Errorf("could not apply tx %d from block %d [%v]: %w", i, block.NumberU64(), tx.Hash().Hex(), err) - } - if !vmConfig.NoReceipts { - receipts = append(receipts, receipt) + if !statelessExec { + return nil, fmt.Errorf("could not apply tx %d from block %d [%v]: %w", i, block.NumberU64(), tx.Hash().Hex(), err) + } + rejectedTxs = append(rejectedTxs, &RejectedTx{i, err.Error()}) + } else { + includedTxs = append(includedTxs, tx) + if !vmConfig.NoReceipts { + receipts = append(receipts, receipt) + } } } - if chainConfig.IsByzantium(header.Number.Uint64()) && !vmConfig.NoReceipts { - receiptSha := types.DeriveSha(receipts) - if receiptSha != block.ReceiptHash() { - return nil, nil, fmt.Errorf("mismatched receipt headers for block %d", block.NumberU64()) - } + receiptSha := types.DeriveSha(receipts) + if !statelessExec && chainConfig.IsByzantium(header.Number.Uint64()) && !vmConfig.NoReceipts && receiptSha != block.ReceiptHash() { + return nil, fmt.Errorf("mismatched receipt headers for block %d", block.NumberU64()) } - if *usedGas != header.GasUsed { - return nil, nil, fmt.Errorf("gas used by execution: %d, in header: %d", *usedGas, header.GasUsed) + if !statelessExec && *usedGas != header.GasUsed { + return nil, fmt.Errorf("gas used by execution: %d, in header: %d", *usedGas, header.GasUsed) } + + var bloom types.Bloom if !vmConfig.NoReceipts { - bloom := types.CreateBloom(receipts) - if bloom != header.Bloom { - return nil, nil, fmt.Errorf("bloom computed by execution: %x, in header: %x", bloom, header.Bloom) + bloom = types.CreateBloom(receipts) + if !statelessExec && bloom != header.Bloom { + return nil, fmt.Errorf("bloom computed by execution: %x, in header: %x", bloom, header.Bloom) } } if !vmConfig.ReadOnly { txs := block.Transactions() if _, err := FinalizeBlockExecution(engine, stateReader, block.Header(), txs, block.Uncles(), stateWriter, chainConfig, ibs, receipts, epochReader, chainReader, false); err != nil { - return nil, nil, err + return nil, err } } @@ -287,7 +340,26 @@ func ExecuteBlockEphemerally( } } - return receipts, stateSyncReceipt, nil + execRs := &EphemeralExecResult{ + TxRoot: types.DeriveSha(includedTxs), + ReceiptRoot: receiptSha, + Bloom: bloom, + LogsHash: rlpHash(blockLogs), + Receipts: receipts, + Difficulty: (*math.HexOrDecimal256)(header.Difficulty), + GasUsed: math.HexOrDecimal64(*usedGas), + Rejected: rejectedTxs, + ReceiptForStorage: stateSyncReceipt, + } + + return execRs, nil +} + +func rlpHash(x interface{}) (h common.Hash) { + hw := sha3.NewLegacyKeccak256() + rlp.Encode(hw, x) //nolint:errcheck + hw.Sum(h[:0]) + return h } func SysCallContract(contract common.Address, data []byte, chainConfig params.ChainConfig, ibs *state.IntraBlockState, header *types.Header, engine consensus.Engine) (result []byte, err error) { @@ -308,19 +380,16 @@ func SysCallContract(contract common.Address, data []byte, chainConfig params.Ch vmConfig := vm.Config{NoReceipts: true} // Create a new context to be used in the EVM environment isBor := chainConfig.Bor != nil + var txContext vm.TxContext var author *common.Address if isBor { author = &header.Coinbase - } else { - author = &state.SystemAddress - } - blockContext := NewEVMBlockContext(header, nil, engine, author, nil) - var txContext vm.TxContext - if isBor { txContext = vm.TxContext{} } else { + author = &state.SystemAddress txContext = NewEVMTxContext(msg) } + blockContext := NewEVMBlockContext(header, GetHashFn(header, nil), engine, author, nil) evm := vm.NewEVM(blockContext, txContext, ibs, &chainConfig, vmConfig) if isBor { ret, _, err := evm.Call( @@ -364,7 +433,7 @@ func CallContract(contract common.Address, data []byte, chainConfig params.Chain return nil, fmt.Errorf("SysCallContract: %w ", err) } vmConfig := vm.Config{NoReceipts: true} - _, result, err = ApplyTransaction(&chainConfig, nil, engine, &state.SystemAddress, gp, ibs, noop, header, tx, &gasUsed, vmConfig, nil) + _, result, err = ApplyTransaction(&chainConfig, GetHashFn(header, nil), engine, &state.SystemAddress, gp, ibs, noop, header, tx, &gasUsed, vmConfig, nil) if err != nil { return result, fmt.Errorf("SysCallContract: %w ", err) } @@ -392,7 +461,7 @@ func FinalizeBlockExecution(engine consensus.Engine, stateReader state.StateRead _, _, err = engine.Finalize(cc, header, ibs, txs, uncles, receipts, e, headerReader, syscall) } if err != nil { - return + return nil, err } var originalSystemAcc *accounts.Account diff --git a/core/chain_makers.go b/core/chain_makers.go index 24a8e4266df..17248745e79 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -112,7 +112,7 @@ func (b *BlockGen) AddTxWithChain(getHeader func(hash common.Hash, number uint64 } b.ibs.Prepare(tx.Hash(), common.Hash{}, len(b.txs)) contractHasTEVM := func(_ common.Hash) (bool, error) { return false, nil } - receipt, _, err := ApplyTransaction(b.config, getHeader, engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, vm.Config{}, contractHasTEVM) + receipt, _, err := ApplyTransaction(b.config, GetHashFn(b.header, getHeader), engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, vm.Config{}, contractHasTEVM) if err != nil { panic(err) } @@ -126,7 +126,7 @@ func (b *BlockGen) AddFailedTxWithChain(getHeader func(hash common.Hash, number } b.ibs.Prepare(tx.Hash(), common.Hash{}, len(b.txs)) contractHasTEVM := func(common.Hash) (bool, error) { return false, nil } - receipt, _, err := ApplyTransaction(b.config, getHeader, engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, vm.Config{}, contractHasTEVM) + receipt, _, err := ApplyTransaction(b.config, GetHashFn(b.header, getHeader), engine, &b.header.Coinbase, b.gasPool, b.ibs, state.NewNoopWriter(), b.header, tx, &b.header.GasUsed, vm.Config{}, contractHasTEVM) _ = err // accept failed transactions b.txs = append(b.txs, tx) b.receipts = append(b.receipts, receipt) diff --git a/core/evm.go b/core/evm.go index c2fa1471cd5..b89f8e5acb4 100644 --- a/core/evm.go +++ b/core/evm.go @@ -30,7 +30,7 @@ import ( ) // NewEVMBlockContext creates a new context for use in the EVM. -func NewEVMBlockContext(header *types.Header, getHeader func(hash common.Hash, number uint64) *types.Header, engine consensus.Engine, author *common.Address, contractHasTEVM func(contractHash common.Hash) (bool, error)) vm.BlockContext { +func NewEVMBlockContext(header *types.Header, blockHashFunc func(n uint64) common.Hash, engine consensus.Engine, author *common.Address, contractHasTEVM func(contractHash common.Hash) (bool, error)) vm.BlockContext { // If we don't have an explicit author (i.e. not mining), extract from the header var beneficiary common.Address if author == nil { @@ -71,7 +71,7 @@ func NewEVMBlockContext(header *types.Header, getHeader func(hash common.Hash, n return vm.BlockContext{ CanTransfer: CanTransfer, Transfer: transferFunc, - GetHash: GetHashFn(header, getHeader), + GetHash: blockHashFunc, Coinbase: beneficiary, BlockNumber: header.Number.Uint64(), Time: header.Time, diff --git a/core/state_processor.go b/core/state_processor.go index 066378ab5d9..c203685bb9e 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -17,10 +17,7 @@ package core import ( - "fmt" - "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/common/math" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" @@ -29,57 +26,6 @@ import ( "github.com/ledgerwatch/erigon/params" ) -// StructLogRes stores a structured log emitted by the EVM while replaying a -// transaction in debug mode -type StructLogRes struct { - Pc uint64 `json:"pc"` - Op string `json:"op"` - Gas uint64 `json:"gas"` - GasCost uint64 `json:"gasCost"` - Depth int `json:"depth"` - Error error `json:"error,omitempty"` - Stack *[]string `json:"stack,omitempty"` - Memory *[]string `json:"memory,omitempty"` - Storage *map[string]string `json:"storage,omitempty"` -} - -// FormatLogs formats EVM returned structured logs for json output -func FormatLogs(logs []vm.StructLog) []StructLogRes { - formatted := make([]StructLogRes, len(logs)) - for index, trace := range logs { - formatted[index] = StructLogRes{ - Pc: trace.Pc, - Op: trace.Op.String(), - Gas: trace.Gas, - GasCost: trace.GasCost, - Depth: trace.Depth, - Error: trace.Err, - } - if trace.Stack != nil { - stack := make([]string, len(trace.Stack)) - for i, stackValue := range trace.Stack { - stack[i] = fmt.Sprintf("%x", math.PaddedBigBytes(stackValue, 32)) - } - formatted[index].Stack = &stack - } - if trace.Memory != nil { - memory := make([]string, 0, (len(trace.Memory)+31)/32) - for i := 0; i+32 <= len(trace.Memory); i += 32 { - memory = append(memory, fmt.Sprintf("%x", trace.Memory[i:i+32])) - } - formatted[index].Memory = &memory - } - if trace.Storage != nil { - storage := make(map[string]string) - for i, storageValue := range trace.Storage { - storage[fmt.Sprintf("%x", i)] = fmt.Sprintf("%x", storageValue) - } - formatted[index].Storage = &storage - } - } - return formatted -} - // applyTransaction attempts to apply a transaction to the given state database // and uses the input parameters for its environment. It returns the receipt // for the transaction, gas used and an error if the transaction failed, @@ -140,7 +86,7 @@ func applyTransaction(config *params.ChainConfig, gp *GasPool, statedb *state.In // and uses the input parameters for its environment. It returns the receipt // for the transaction, gas used and an error if the transaction failed, // indicating the block was invalid. -func ApplyTransaction(config *params.ChainConfig, getHeader func(hash common.Hash, number uint64) *types.Header, engine consensus.Engine, author *common.Address, gp *GasPool, ibs *state.IntraBlockState, stateWriter state.StateWriter, header *types.Header, tx types.Transaction, usedGas *uint64, cfg vm.Config, contractHasTEVM func(contractHash common.Hash) (bool, error)) (*types.Receipt, []byte, error) { +func ApplyTransaction(config *params.ChainConfig, blockHashFunc func(n uint64) common.Hash, engine consensus.Engine, author *common.Address, gp *GasPool, ibs *state.IntraBlockState, stateWriter state.StateWriter, header *types.Header, tx types.Transaction, usedGas *uint64, cfg vm.Config, contractHasTEVM func(contractHash common.Hash) (bool, error)) (*types.Receipt, []byte, error) { // Create a new context to be used in the EVM environment // Add addresses to access list if applicable @@ -152,7 +98,7 @@ func ApplyTransaction(config *params.ChainConfig, getHeader func(hash common.Has if tx.IsStarkNet() { vmenv = &vm.CVMAdapter{Cvm: vm.NewCVM(ibs)} } else { - blockContext := NewEVMBlockContext(header, getHeader, engine, author, contractHasTEVM) + blockContext := NewEVMBlockContext(header, blockHashFunc, engine, author, contractHasTEVM) vmenv = vm.NewEVM(blockContext, vm.TxContext{}, ibs, config, cfg) } diff --git a/core/vm/logger.go b/core/vm/logger.go index 1d50dca6c1b..b9d511b8858 100644 --- a/core/vm/logger.go +++ b/core/vm/logger.go @@ -18,10 +18,12 @@ package vm import ( "encoding/hex" + "encoding/json" "errors" "fmt" "io" "math/big" + "os" "strings" "time" @@ -128,6 +130,27 @@ type Tracer interface { CaptureAccountWrite(account common.Address) error } +// FlushableTracer is a Tracer extension whose accumulated traces has to be +// flushed once the tracing is completed. +type FlushableTracer interface { + Tracer + Flush(tx types.Transaction) +} + +// StructLogRes stores a structured log emitted by the EVM while replaying a +// transaction in debug mode +type StructLogRes struct { + Pc uint64 `json:"pc"` + Op string `json:"op"` + Gas uint64 `json:"gas"` + GasCost uint64 `json:"gasCost"` + Depth int `json:"depth"` + Error error `json:"error,omitempty"` + Stack *[]string `json:"stack,omitempty"` + Memory *[]string `json:"memory,omitempty"` + Storage *map[string]string `json:"storage,omitempty"` +} + // StructLogger is an EVM state logger and implements Tracer. // // StructLogger can capture state based on the given Log configuration and also keeps @@ -261,6 +284,58 @@ func (l *StructLogger) Error() error { return l.err } // Output returns the VM return value captured by the trace. func (l *StructLogger) Output() []byte { return l.output } +func (l *StructLogger) Flush(tx types.Transaction) { + w, err1 := os.Create(fmt.Sprintf("txtrace_%x.txt", tx.Hash())) + if err1 != nil { + panic(err1) + } + encoder := json.NewEncoder(w) + logs := FormatLogs(l.StructLogs()) + if err2 := encoder.Encode(logs); err2 != nil { + panic(err2) + } + if err2 := w.Close(); err2 != nil { + panic(err2) + } +} + +// FormatLogs formats EVM returned structured logs for json output +func FormatLogs(logs []StructLog) []StructLogRes { + formatted := make([]StructLogRes, len(logs)) + for index, trace := range logs { + formatted[index] = StructLogRes{ + Pc: trace.Pc, + Op: trace.Op.String(), + Gas: trace.Gas, + GasCost: trace.GasCost, + Depth: trace.Depth, + Error: trace.Err, + } + if trace.Stack != nil { + stack := make([]string, len(trace.Stack)) + for i, stackValue := range trace.Stack { + stack[i] = fmt.Sprintf("%x", math.PaddedBigBytes(stackValue, 32)) + } + formatted[index].Stack = &stack + } + if trace.Memory != nil { + memory := make([]string, 0, (len(trace.Memory)+31)/32) + for i := 0; i+32 <= len(trace.Memory); i += 32 { + memory = append(memory, fmt.Sprintf("%x", trace.Memory[i:i+32])) + } + formatted[index].Memory = &memory + } + if trace.Storage != nil { + storage := make(map[string]string) + for i, storageValue := range trace.Storage { + storage[fmt.Sprintf("%x", i)] = fmt.Sprintf("%x", storageValue) + } + formatted[index].Storage = &storage + } + } + return formatted +} + // WriteTrace writes a formatted trace to the given writer func WriteTrace(writer io.Writer, logs []StructLog) { for _, log := range logs { diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index f40a126d865..0bb8ace725d 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -16,6 +16,7 @@ import ( "github.com/ledgerwatch/erigon-lib/etl" "github.com/ledgerwatch/erigon-lib/kv" commonold "github.com/ledgerwatch/erigon/common" + ecom "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/changeset" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/common/math" @@ -121,21 +122,30 @@ func executeBlock( return h } + getTracer := func(txIndex int, txHash ecom.Hash) (vm.Tracer, error) { + return vm.NewStructLogger(&vm.LogConfig{}), nil + } + callTracer := calltracer.NewCallTracer(contractHasTEVM) vmConfig.Debug = true vmConfig.Tracer = callTracer var receipts types.Receipts var stateSyncReceipt *types.ReceiptForStorage - _, isPoSa := effectiveEngine.(consensus.PoSA) + var execRs *core.EphemeralExecResult + _, isPoSa := cfg.engine.(consensus.PoSA) + getHashFn := core.GetHashFn(block.Header(), getHeader) + if isPoSa { - receipts, err = core.ExecuteBlockEphemerallyForBSC(cfg.chainConfig, &vmConfig, getHeader, effectiveEngine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, contractHasTEVM) + execRs, err = core.ExecuteBlockEphemerallyForBSC(cfg.chainConfig, &vmConfig, getHashFn, cfg.engine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, contractHasTEVM, false, getTracer) } else { - receipts, stateSyncReceipt, err = core.ExecuteBlockEphemerally(cfg.chainConfig, &vmConfig, getHeader, effectiveEngine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, contractHasTEVM) + execRs, err = core.ExecuteBlockEphemerally(cfg.chainConfig, &vmConfig, getHashFn, cfg.engine, block, stateReader, stateWriter, epochReader{tx: tx}, chainReader{config: cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}, contractHasTEVM, false, getTracer) } if err != nil { return err } + receipts = execRs.Receipts + stateSyncReceipt = execRs.ReceiptForStorage if writeReceipts { if err = rawdb.AppendReceipts(tx, blockNum, receipts); err != nil { @@ -147,7 +157,6 @@ func executeBlock( return err } } - } if cfg.changeSetHook != nil { diff --git a/eth/stagedsync/stage_mining_exec.go b/eth/stagedsync/stage_mining_exec.go index 3afa9922767..a1742548088 100644 --- a/eth/stagedsync/stage_mining_exec.go +++ b/eth/stagedsync/stage_mining_exec.go @@ -180,7 +180,7 @@ func addTransactionsToMiningBlock(logPrefix string, current *MiningBlock, chainC var miningCommitTx = func(txn types.Transaction, coinbase common.Address, vmConfig *vm.Config, chainConfig params.ChainConfig, ibs *state.IntraBlockState, current *MiningBlock) ([]*types.Log, error) { snap := ibs.Snapshot() - receipt, _, err := core.ApplyTransaction(&chainConfig, getHeader, engine, &coinbase, gasPool, ibs, noop, header, txn, &header.GasUsed, *vmConfig, contractHasTEVM) + receipt, _, err := core.ApplyTransaction(&chainConfig, core.GetHashFn(header, getHeader), engine, &coinbase, gasPool, ibs, noop, header, txn, &header.GasUsed, *vmConfig, contractHasTEVM) if err != nil { ibs.RevertToSnapshot(snap) return nil, err diff --git a/go.mod b/go.mod index 92826f92b17..7087daba894 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,8 @@ require ( github.com/edsrzf/mmap-go v1.1.0 github.com/emicklei/dot v1.0.0 github.com/emirpasic/gods v1.18.1 - github.com/fjl/gencodec v0.0.0-20191126094850-e283372f291f + github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c + github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/goccy/go-json v0.9.7 github.com/gofrs/flock v0.8.1 github.com/golang-jwt/jwt/v4 v4.4.1 @@ -92,13 +93,13 @@ require ( github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect + github.com/docker/docker v20.10.17+incompatible github.com/dustin/go-humanize v1.0.0 // indirect github.com/flanglet/kanzi-go v1.9.1-0.20211212184056-72dda96261ee // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 // indirect github.com/go-kit/kit v0.10.0 // indirect github.com/go-logfmt/logfmt v0.5.0 // indirect - github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/go-stack/stack v1.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.2 // indirect @@ -161,3 +162,5 @@ require ( modernc.org/strutil v1.1.1 // indirect modernc.org/token v1.0.0 // indirect ) + +require gotest.tools/v3 v3.3.0 // indirect diff --git a/go.sum b/go.sum index 939cfee5aaf..525743beec3 100644 --- a/go.sum +++ b/go.sum @@ -165,6 +165,8 @@ github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= +github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48 h1:iZOop7pqsg+56twTopWgwCGxdB5SI2yDO8Ti7eTRliQ= github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= @@ -191,8 +193,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fjl/gencodec v0.0.0-20191126094850-e283372f291f h1:Y/gg/utVetS+WS6htAKCTDralkm/8hLIIUAtLFdbdQ8= -github.com/fjl/gencodec v0.0.0-20191126094850-e283372f291f/go.mod h1:q+7Z5oyy8cvKF3TakcuihvQvBHFTnXjB+7UP1e2Q+1o= +github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c h1:CndMRAH4JIwxbW8KYq6Q+cGWcGHz0FjGR3QqcInWcW0= +github.com/fjl/gencodec v0.0.0-20220412091415-8bb9e558978c/go.mod h1:AzA8Lj6YtixmJWL+wkKoBGsLWy9gFrAzi4g+5bCKwpY= github.com/flanglet/kanzi-go v1.9.1-0.20211212184056-72dda96261ee h1:CaVlPeoz5kJQ+cAOV+ZDdlr3J2FmKyNkGu9LY+x7cDM= github.com/flanglet/kanzi-go v1.9.1-0.20211212184056-72dda96261ee/go.mod h1:/sUSVgDcbjsisuW42GPDgaMqvJ0McZERNICnD7b1nRA= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= @@ -208,12 +210,10 @@ github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILD github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= -github.com/glycerine/go-unsnap-stream v0.0.0-20181221182339-f9677308dec2/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/go-unsnap-stream v0.0.0-20190901134440-81cf024a9e0a/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= github.com/glycerine/goconvey v0.0.0-20180728074245-46e3a41ad493/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/glycerine/goconvey v0.0.0-20190315024820-982ee783a72e/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= -github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= @@ -291,7 +291,6 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20190910122728-9d188e94fb99/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= @@ -359,7 +358,6 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= @@ -383,7 +381,6 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= -github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/ledgerwatch/erigon-lib v0.0.0-20220707065230-95e361fa1ed7 h1:ytnJHsVttH1NleI45f6FbP7HaratpDx4IPCK/D/aZwI= @@ -446,7 +443,6 @@ github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= @@ -586,6 +582,7 @@ github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3 github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -796,12 +793,12 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191126055441-b0650ceb63d9/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -882,6 +879,8 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo= +gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/internal/cmdtest/test_cmd.go b/internal/cmdtest/test_cmd.go new file mode 100644 index 00000000000..b837c9c399c --- /dev/null +++ b/internal/cmdtest/test_cmd.go @@ -0,0 +1,300 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package cmdtest + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "strings" + "sync" + "sync/atomic" + "syscall" + "testing" + "text/template" + "time" + + "github.com/docker/docker/pkg/reexec" +) + +func NewTestCmd(t *testing.T, data interface{}) *TestCmd { + return &TestCmd{T: t, Data: data} +} + +type TestCmd struct { + // For total convenience, all testing methods are available. + *testing.T + + Func template.FuncMap + Data interface{} + Cleanup func() + + cmd *exec.Cmd + stdout *bufio.Reader + stdin io.WriteCloser + stderr *testlogger + // Err will contain the process exit error or interrupt signal error + Err error +} + +var id int32 + +// Run exec's the current binary using name as argv[0] which will trigger the +// reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go) +func (tt *TestCmd) Run(name string, args ...string) { + id := atomic.AddInt32(&id, 1) + tt.stderr = &testlogger{t: tt.T, name: fmt.Sprintf("%d", id)} + tt.cmd = &exec.Cmd{ + Path: reexec.Self(), + Args: append([]string{name}, args...), + Stderr: tt.stderr, + } + stdout, err := tt.cmd.StdoutPipe() + if err != nil { + tt.Fatal(err) + } + tt.stdout = bufio.NewReader(stdout) + if tt.stdin, err = tt.cmd.StdinPipe(); err != nil { + tt.Fatal(err) + } + if err := tt.cmd.Start(); err != nil { + tt.Fatal(err) + } +} + +// InputLine writes the given text to the child's stdin. +// This method can also be called from an expect template, e.g.: +// +// geth.expect(`Passphrase: {{.InputLine "password"}}`) +func (tt *TestCmd) InputLine(s string) string { + io.WriteString(tt.stdin, s+"\n") + return "" +} + +func (tt *TestCmd) SetTemplateFunc(name string, fn interface{}) { + if tt.Func == nil { + tt.Func = make(map[string]interface{}) + } + tt.Func[name] = fn +} + +// Expect runs its argument as a template, then expects the +// child process to output the result of the template within 5s. +// +// If the template starts with a newline, the newline is removed +// before matching. +func (tt *TestCmd) Expect(tplsource string) { + // Generate the expected output by running the template. + tpl := template.Must(template.New("").Funcs(tt.Func).Parse(tplsource)) + wantbuf := new(bytes.Buffer) + if err := tpl.Execute(wantbuf, tt.Data); err != nil { + panic(err) + } + // Trim exactly one newline at the beginning. This makes tests look + // much nicer because all expect strings are at column 0. + want := bytes.TrimPrefix(wantbuf.Bytes(), []byte("\n")) + if err := tt.matchExactOutput(want); err != nil { + tt.Fatal(err) + } + tt.Logf("Matched stdout text:\n%s", want) +} + +// Output reads all output from stdout, and returns the data. +func (tt *TestCmd) Output() []byte { + var buf []byte + tt.withKillTimeout(func() { buf, _ = io.ReadAll(tt.stdout) }) + return buf +} + +func (tt *TestCmd) matchExactOutput(want []byte) error { + buf := make([]byte, len(want)) + n := 0 + tt.withKillTimeout(func() { n, _ = io.ReadFull(tt.stdout, buf) }) + buf = buf[:n] + if n < len(want) || !bytes.Equal(buf, want) { + // Grab any additional buffered output in case of mismatch + // because it might help with debugging. + buf = append(buf, make([]byte, tt.stdout.Buffered())...) + tt.stdout.Read(buf[n:]) + // Find the mismatch position. + for i := 0; i < n; i++ { + if want[i] != buf[i] { + return fmt.Errorf("output mismatch at ◊:\n---------------- (stdout text)\n%s◊%s\n---------------- (expected text)\n%s", + buf[:i], buf[i:n], want) + } + } + if n < len(want) { + return fmt.Errorf("not enough output, got until ◊:\n---------------- (stdout text)\n%s\n---------------- (expected text)\n%s◊%s", + buf, want[:n], want[n:]) + } + } + return nil +} + +// ExpectRegexp expects the child process to output text matching the +// given regular expression within 5s. +// +// Note that an arbitrary amount of output may be consumed by the +// regular expression. This usually means that expect cannot be used +// after ExpectRegexp. +func (tt *TestCmd) ExpectRegexp(regex string) (*regexp.Regexp, []string) { + regex = strings.TrimPrefix(regex, "\n") + var ( + re = regexp.MustCompile(regex) + rtee = &runeTee{in: tt.stdout} + matches []int + ) + tt.withKillTimeout(func() { matches = re.FindReaderSubmatchIndex(rtee) }) + output := rtee.buf.Bytes() + if matches == nil { + tt.Fatalf("Output did not match:\n---------------- (stdout text)\n%s\n---------------- (regular expression)\n%s", + output, regex) + return re, nil + } + tt.Logf("Matched stdout text:\n%s", output) + var submatches []string + for i := 0; i < len(matches); i += 2 { + submatch := string(output[matches[i]:matches[i+1]]) + submatches = append(submatches, submatch) + } + return re, submatches +} + +// ExpectExit expects the child process to exit within 5s without +// printing any additional text on stdout. +func (tt *TestCmd) ExpectExit() { + var output []byte + tt.withKillTimeout(func() { + output, _ = io.ReadAll(tt.stdout) + }) + tt.WaitExit() + if tt.Cleanup != nil { + tt.Cleanup() + } + if len(output) > 0 { + tt.Errorf("Unmatched stdout text:\n%s", output) + } +} + +func (tt *TestCmd) WaitExit() { + tt.Err = tt.cmd.Wait() +} + +func (tt *TestCmd) Interrupt() { + tt.Err = tt.cmd.Process.Signal(os.Interrupt) +} + +// ExitStatus exposes the process' OS exit code +// It will only return a valid value after the process has finished. +func (tt *TestCmd) ExitStatus() int { + if tt.Err != nil { + exitErr := tt.Err.(*exec.ExitError) + if exitErr != nil { + if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { + return status.ExitStatus() + } + } + } + return 0 +} + +// StderrText returns any stderr output written so far. +// The returned text holds all log lines after ExpectExit has +// returned. +func (tt *TestCmd) StderrText() string { + tt.stderr.mu.Lock() + defer tt.stderr.mu.Unlock() + return tt.stderr.buf.String() +} + +func (tt *TestCmd) CloseStdin() { + tt.stdin.Close() +} + +func (tt *TestCmd) Kill() { + tt.cmd.Process.Kill() + if tt.Cleanup != nil { + tt.Cleanup() + } +} + +func (tt *TestCmd) withKillTimeout(fn func()) { + timeout := time.AfterFunc(5*time.Second, func() { + tt.Log("killing the child process (timeout)") + tt.Kill() + }) + defer timeout.Stop() + fn() +} + +// testlogger logs all written lines via t.Log and also +// collects them for later inspection. +type testlogger struct { + t *testing.T + mu sync.Mutex + buf bytes.Buffer + name string +} + +func (tl *testlogger) Write(b []byte) (n int, err error) { + lines := bytes.Split(b, []byte("\n")) + for _, line := range lines { + if len(line) > 0 { + tl.t.Logf("(stderr:%v) %s", tl.name, line) + } + } + tl.mu.Lock() + tl.buf.Write(b) + tl.mu.Unlock() + return len(b), err +} + +// runeTee collects text read through it into buf. +type runeTee struct { + in interface { + io.Reader + io.ByteReader + io.RuneReader + } + buf bytes.Buffer +} + +func (rtee *runeTee) Read(b []byte) (n int, err error) { + n, err = rtee.in.Read(b) + rtee.buf.Write(b[:n]) + return n, err +} + +func (rtee *runeTee) ReadRune() (r rune, size int, err error) { + r, size, err = rtee.in.ReadRune() + if err == nil { + rtee.buf.WriteRune(r) + } + return r, size, err +} + +func (rtee *runeTee) ReadByte() (b byte, err error) { + b, err = rtee.in.ReadByte() + if err == nil { + rtee.buf.WriteByte(b) + } + return b, err +} diff --git a/tests/state_test_util.go b/tests/state_test_util.go index 4989ab2af97..51e3a94079c 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -190,7 +190,8 @@ func (t *StateTest) RunNoVerify(rules *params.Rules, tx kv.RwTx, subtest StateSu // Prepare the EVM. txContext := core.NewEVMTxContext(msg) contractHasTEVM := func(common.Hash) (bool, error) { return false, nil } - context := core.NewEVMBlockContext(block.Header(), nil, nil, &t.json.Env.Coinbase, contractHasTEVM) + header := block.Header() + context := core.NewEVMBlockContext(header, core.GetHashFn(header, nil), nil, &t.json.Env.Coinbase, contractHasTEVM) context.GetHash = vmTestBlockHash if baseFee != nil { context.BaseFee = new(uint256.Int) diff --git a/turbo/transactions/tracing.go b/turbo/transactions/tracing.go index 3c3a2b3b557..d0d569c93fc 100644 --- a/turbo/transactions/tracing.go +++ b/turbo/transactions/tracing.go @@ -42,7 +42,8 @@ func ComputeTxEnv(ctx context.Context, block *types.Block, cfg *params.ChainConf // Recompute transactions up to the target index. signer := types.MakeSigner(cfg, block.NumberU64()) - BlockContext := core.NewEVMBlockContext(block.Header(), getHeader, engine, nil, contractHasTEVM) + header := block.Header() + BlockContext := core.NewEVMBlockContext(header, core.GetHashFn(header, getHeader), engine, nil, contractHasTEVM) vmenv := vm.NewEVM(BlockContext, vm.TxContext{}, statedb, cfg, vm.Config{}) rules := vmenv.ChainRules() for idx, tx := range block.Transactions() { From 9f5b4015472a9ad833e84dfe0476e83911b078ff Mon Sep 17 00:00:00 2001 From: Cory Date: Thu, 7 Jul 2022 23:11:07 -0700 Subject: [PATCH 059/152] Fix grafana startup in docker-compose (#4677) * grafana user 472:0 * Fix typo extra ~ in Makefile * Create grafana dir correctly * chown -R --- Makefile | 11 ++++++++--- docker-compose.yml | 2 +- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 5fcc65b0ef1..34caf2ca476 100644 --- a/Makefile +++ b/Makefile @@ -67,8 +67,13 @@ ifdef XDG_DATA_HOME endif xdg_data_home_subdirs = $(xdg_data_home)/erigon $(xdg_data_home)/erigon-grafana $(xdg_data_home)/erigon-prometheus -docker-compose: validate_docker_build_args +setup_xdg_data_home: mkdir -p $(xdg_data_home_subdirs) + ls -aln $(xdg_data_home) | grep -E "472*0*erigon-grafana" || sudo chown -R 472:0 $(xdg_data_home)/erigon-grafana + @echo "✔️ xdg_data_home setup" + @ls -al $(xdg_data_home) + +docker-compose: validate_docker_build_args setup_xdg_data_home docker-compose up # debug build allows see C stack traces, run it with GOTRACEBACK=crash. You don't need debug build for C pit for profiling. To profile C code use SETCGOTRCKEBACK=1 @@ -196,7 +201,7 @@ endif ifdef DOCKER sudo usermod -aG docker $(ERIGON_USER) endif - sudo -u $(ERIGON_USER) mkdir -p ~$(ERIGON_USER_XDG_DATA_HOME) + sudo -u $(ERIGON_USER) mkdir -p $(ERIGON_USER_XDG_DATA_HOME) # create "erigon" user user_macos: @@ -206,4 +211,4 @@ user_macos: sudo dscl . -create /Users/$(ERIGON_USER) PrimaryGroupID $(ERIGON_USER_GID) sudo dscl . -create /Users/$(ERIGON_USER) NFSHomeDirectory /Users/$(ERIGON_USER) sudo dscl . -append /Groups/admin GroupMembership $(ERIGON_USER) - sudo -u $(ERIGON_USER) mkdir -p ~$(ERIGON_USER_XDG_DATA_HOME) + sudo -u $(ERIGON_USER) mkdir -p $(ERIGON_USER_XDG_DATA_HOME) diff --git a/docker-compose.yml b/docker-compose.yml index ff5d5ad67fa..1fed2c340c6 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -77,7 +77,7 @@ services: grafana: image: grafana/grafana:9.0.2 - user: ${DOCKER_UID}:${DOCKER_GID} # Uses erigon user from Dockerfile + user: "472:0" # required for grafana version >= 7.3 ports: [ "3000:3000" ] volumes: - ${ERIGON_GRAFANA_CONFIG:-./cmd/prometheus/grafana.ini}:/etc/grafana/grafana.ini From eec5fa4d41b0912a6d9d5652edf94a715dcef7d6 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Fri, 8 Jul 2022 11:14:16 +0200 Subject: [PATCH 060/152] Add support for eth/67 (#4564) * Add eth/67 * Listen to eth/66 on a separate port * Fix compilation error * Fix cfg66.ListenAddr * Update erigon ports in README * Expose port 30304 in docker * P2pProtocolVersionFlag instead of second sentry * Remove "66 by default" from usage * Small comment --- README.md | 4 ++-- cmd/observer/observer/handshake.go | 10 +++++---- cmd/sentry/main.go | 11 ++++----- cmd/sentry/sentry/broadcast.go | 10 ++++----- cmd/sentry/sentry/sentry_api.go | 4 ++-- cmd/sentry/sentry/sentry_grpc_server.go | 10 +++++++-- cmd/utils/flags.go | 10 +++++++++ eth/backend.go | 12 +++++----- eth/protocols/eth/protocol.go | 30 +++++++++++++++++++++++++ node/nodecfg/defaults.go | 2 +- p2p/server.go | 12 ++++++---- turbo/cli/default_flags.go | 1 + 12 files changed, 83 insertions(+), 33 deletions(-) diff --git a/README.md b/README.md index 6b96cb38e9e..5a85d03e55a 100644 --- a/README.md +++ b/README.md @@ -393,12 +393,12 @@ Detailed explanation: [./docs/programmers_guide/db_faq.md](./docs/programmers_gu | Port | Protocol | Purpose | Expose | |:-----:|:---------:|:----------------------:|:-------:| -| 30303 | TCP & UDP | eth/66 peering | Public | +| 30303 | TCP & UDP | eth/66 or 67 peering | Public | | 9090 | TCP | gRPC Connections | Private | | 42069 | TCP & UDP | Snap sync (Bittorrent) | Public | | 6060 | TCP | Metrics or Pprof | Private | -Typically, 30303 and 30304 are exposed to the internet to allow incoming peering connections. 9090 is exposed only +Typically, 30303 is exposed to the internet to allow incoming peering connections. 9090 is exposed only internally for rpcdaemon or other connections, (e.g. rpcdaemon -> erigon). #### `RPC` ports diff --git a/cmd/observer/observer/handshake.go b/cmd/observer/observer/handshake.go index 6fdcd3414ab..42cde181172 100644 --- a/cmd/observer/observer/handshake.go +++ b/cmd/observer/observer/handshake.go @@ -4,6 +4,11 @@ import ( "context" "crypto/ecdsa" "fmt" + "math/big" + "net" + "strings" + "time" + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/forkid" "github.com/ledgerwatch/erigon/crypto" @@ -12,10 +17,6 @@ import ( "github.com/ledgerwatch/erigon/p2p/rlpx" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" - "math/big" - "net" - "strings" - "time" ) // https://github.com/ethereum/devp2p/blob/master/rlpx.md#p2p-capability @@ -240,6 +241,7 @@ func makeOurHelloMessage(myPrivateKey *ecdsa.PrivateKey) HelloMessage { {Name: eth.ProtocolName, Version: 64}, {Name: eth.ProtocolName, Version: 65}, {Name: eth.ProtocolName, Version: eth.ETH66}, + {Name: eth.ProtocolName, Version: eth.ETH67}, } return HelloMessage{ diff --git a/cmd/sentry/main.go b/cmd/sentry/main.go index 80ed49d8522..2c7f17f9e14 100644 --- a/cmd/sentry/main.go +++ b/cmd/sentry/main.go @@ -8,7 +8,6 @@ import ( "github.com/ledgerwatch/erigon/cmd/sentry/sentry" "github.com/ledgerwatch/erigon/cmd/utils" "github.com/ledgerwatch/erigon/common/paths" - "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/internal/debug" "github.com/ledgerwatch/erigon/node/nodecfg/datadir" node2 "github.com/ledgerwatch/erigon/turbo/node" @@ -27,7 +26,7 @@ var ( trustedPeers []string // trusted peers discoveryDNS []string nodiscover bool // disable sentry's discovery mechanism - protocol string + protocol int netRestrict string // CIDR to restrict peering to maxPeers int maxPendPeers int @@ -45,7 +44,7 @@ func init() { rootCmd.Flags().StringSliceVar(&trustedPeers, utils.TrustedPeersFlag.Name, []string{}, utils.TrustedPeersFlag.Usage) rootCmd.Flags().StringSliceVar(&discoveryDNS, utils.DNSDiscoveryFlag.Name, []string{}, utils.DNSDiscoveryFlag.Usage) rootCmd.Flags().BoolVar(&nodiscover, utils.NoDiscoverFlag.Name, false, utils.NoDiscoverFlag.Usage) - rootCmd.Flags().StringVar(&protocol, "p2p.protocol", "eth66", "eth66") + rootCmd.Flags().IntVar(&protocol, utils.P2pProtocolVersionFlag.Name, utils.P2pProtocolVersionFlag.Value, utils.P2pProtocolVersionFlag.Usage) rootCmd.Flags().StringVar(&netRestrict, utils.NetrestrictFlag.Name, utils.NetrestrictFlag.Value, utils.NetrestrictFlag.Usage) rootCmd.Flags().IntVar(&maxPeers, utils.MaxPeersFlag.Name, utils.MaxPeersFlag.Value, utils.MaxPeersFlag.Usage) rootCmd.Flags().IntVar(&maxPendPeers, utils.MaxPendingPeersFlag.Name, utils.MaxPendingPeersFlag.Value, utils.MaxPendingPeersFlag.Usage) @@ -68,8 +67,6 @@ var rootCmd = &cobra.Command{ debug.Exit() }, RunE: func(cmd *cobra.Command, args []string) error { - p := eth.ETH66 - dirs := datadir.New(datadirCli) nodeConfig := node2.NewNodeConfig() p2pConfig, err := utils.NewP2PConfig( @@ -83,13 +80,13 @@ var rootCmd = &cobra.Command{ staticPeers, trustedPeers, uint(port), - uint(p), + uint(protocol), ) if err != nil { return err } - return sentry.Sentry(cmd.Context(), dirs, sentryAddr, discoveryDNS, p2pConfig, uint(p), healthCheck) + return sentry.Sentry(cmd.Context(), dirs, sentryAddr, discoveryDNS, p2pConfig, uint(protocol), healthCheck) }, } diff --git a/cmd/sentry/sentry/broadcast.go b/cmd/sentry/sentry/broadcast.go index 869fede9555..82ce42abefa 100644 --- a/cmd/sentry/sentry/broadcast.go +++ b/cmd/sentry/sentry/broadcast.go @@ -54,7 +54,7 @@ func (cs *MultiClient) PropagateNewBlockHashes(ctx context.Context, announces [] switch sentry.Protocol() { - case eth.ETH66: + case eth.ETH66, eth.ETH67: if req66 == nil { req66 = &proto_sentry.OutboundMessageData{ Id: proto_sentry.MessageId_NEW_BLOCK_HASHES_66, @@ -95,7 +95,7 @@ func (cs *MultiClient) BroadcastNewBlock(ctx context.Context, block *types.Block switch sentry.Protocol() { - case eth.ETH66: + case eth.ETH66, eth.ETH67: if req66 == nil { req66 = &proto_sentry.SendMessageToRandomPeersRequest{ MaxPeers: 1024, @@ -153,7 +153,7 @@ func (cs *MultiClient) BroadcastLocalPooledTxs(ctx context.Context, txs []common } switch sentry.Protocol() { - case eth.ETH66: + case eth.ETH66, eth.ETH67: if req66 == nil { req66 = &proto_sentry.OutboundMessageData{ Id: proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66, @@ -213,7 +213,7 @@ func (cs *MultiClient) BroadcastRemotePooledTxs(ctx context.Context, txs []commo switch sentry.Protocol() { - case eth.ETH66: + case eth.ETH66, eth.ETH67: if req66 == nil { req66 = &proto_sentry.SendMessageToRandomPeersRequest{ MaxPeers: 1024, @@ -264,7 +264,7 @@ func (cs *MultiClient) PropagatePooledTxsToPeersList(ctx context.Context, peers for _, peer := range peers { switch sentry.Protocol() { - case eth.ETH66: + case eth.ETH66, eth.ETH67: req66 := &proto_sentry.SendMessageByIdRequest{ PeerId: peer, Data: &proto_sentry.OutboundMessageData{ diff --git a/cmd/sentry/sentry/sentry_api.go b/cmd/sentry/sentry/sentry_api.go index fbb1ed814d9..b76ca04b045 100644 --- a/cmd/sentry/sentry/sentry_api.go +++ b/cmd/sentry/sentry/sentry_api.go @@ -44,7 +44,7 @@ func (cs *MultiClient) SendBodyRequest(ctx context.Context, req *bodydownload.Bo } switch cs.sentries[i].Protocol() { - case eth.ETH66: + case eth.ETH66, eth.ETH67: //log.Info(fmt.Sprintf("Sending body request for %v", req.BlockNums)) var bytes []byte var err error @@ -85,7 +85,7 @@ func (cs *MultiClient) SendHeaderRequest(ctx context.Context, req *headerdownloa continue } switch cs.sentries[i].Protocol() { - case eth.ETH66: + case eth.ETH66, eth.ETH67: //log.Info(fmt.Sprintf("Sending header request {hash: %x, height: %d, length: %d}", req.Hash, req.Number, req.Length)) reqData := ð.GetBlockHeadersPacket66{ RequestId: rand.Uint64(), diff --git a/cmd/sentry/sentry/sentry_grpc_server.go b/cmd/sentry/sentry/sentry_grpc_server.go index 098c77693b6..6fe3fc4c0f7 100644 --- a/cmd/sentry/sentry/sentry_grpc_server.go +++ b/cmd/sentry/sentry/sentry_grpc_server.go @@ -357,6 +357,10 @@ func runPeer( } send(eth.ToProto[protocol][msg.Code], peerID, b) case eth.GetNodeDataMsg: + if protocol >= eth.ETH67 { + msg.Discard() + return fmt.Errorf("unexpected GetNodeDataMsg from %s in eth/%d", peerID, protocol) + } if !hasSubscribers(eth.ToProto[protocol][msg.Code]) { continue } @@ -491,7 +495,7 @@ func NewGrpcServer(ctx context.Context, dialCandidates enode.Iterator, readNodeI peersStreams: NewPeersStreams(), } - if protocol != eth.ETH66 { + if protocol != eth.ETH66 && protocol != eth.ETH67 { panic(fmt.Errorf("unexpected p2p protocol: %d", protocol)) } @@ -631,7 +635,7 @@ func (ss *GrpcServer) writePeer(logPrefix string, peerInfo *PeerInfo, msgcode ui func (ss *GrpcServer) startSync(ctx context.Context, bestHash common.Hash, peerID [64]byte) error { switch ss.Protocol.Version { - case eth.ETH66: + case eth.ETH66, eth.ETH67: b, err := rlp.EncodeToBytes(ð.GetBlockHeadersPacket66{ RequestId: rand.Uint64(), GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ @@ -805,6 +809,8 @@ func (ss *GrpcServer) HandShake(context.Context, *emptypb.Empty) (*proto_sentry. switch ss.Protocol.Version { case eth.ETH66: reply.Protocol = proto_sentry.Protocol_ETH66 + case eth.ETH67: + reply.Protocol = proto_sentry.Protocol_ETH67 } return reply, nil } diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 92b0ebf4e76..638c360ff2b 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -504,6 +504,11 @@ var ( Usage: "Network listening port", Value: 30303, } + P2pProtocolVersionFlag = cli.IntFlag{ + Name: "p2p.protocol", + Usage: "Version of eth p2p protocol", + Value: int(nodecfg.DefaultConfig.P2P.ProtocolVersion), + } SentryAddrFlag = cli.StringFlag{ Name: "sentry.api.addr", Usage: "comma separated sentry addresses ':,:'", @@ -850,6 +855,8 @@ func NewP2PConfig( switch protocol { case eth.ETH66: enodeDBPath = filepath.Join(dirs.Nodes, "eth66") + case eth.ETH67: + enodeDBPath = filepath.Join(dirs.Nodes, "eth67") default: return nil, fmt.Errorf("unknown protocol: %v", protocol) } @@ -910,6 +917,9 @@ func setListenAddress(ctx *cli.Context, cfg *p2p.Config) { if ctx.GlobalIsSet(ListenPortFlag.Name) { cfg.ListenAddr = fmt.Sprintf(":%d", ctx.GlobalInt(ListenPortFlag.Name)) } + if ctx.GlobalIsSet(P2pProtocolVersionFlag.Name) { + cfg.ProtocolVersion = uint(ctx.GlobalInt(P2pProtocolVersionFlag.Name)) + } if ctx.GlobalIsSet(SentryAddrFlag.Name) { cfg.SentryAddr = SplitAndTrim(ctx.GlobalString(SentryAddrFlag.Name)) } diff --git a/eth/backend.go b/eth/backend.go index 68fa281e2e8..25fbd139b41 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -235,16 +235,16 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere return res } - d66, err := setupDiscovery(backend.config.EthDiscoveryURLs) + discovery, err := setupDiscovery(backend.config.EthDiscoveryURLs) if err != nil { return nil, err } + cfg := stack.Config().P2P + cfg.NodeDatabase = filepath.Join(stack.Config().Dirs.Nodes, eth.ProtocolToString[cfg.ProtocolVersion]) + server := sentry.NewGrpcServer(backend.sentryCtx, discovery, readNodeInfo, &cfg, cfg.ProtocolVersion) - cfg66 := stack.Config().P2P - cfg66.NodeDatabase = filepath.Join(stack.Config().Dirs.Nodes, "eth66") - server66 := sentry.NewGrpcServer(backend.sentryCtx, d66, readNodeInfo, &cfg66, eth.ETH66) - backend.sentryServers = append(backend.sentryServers, server66) - sentries = []direct.SentryClient{direct.NewSentryClientDirect(eth.ETH66, server66)} + backend.sentryServers = append(backend.sentryServers, server) + sentries = []direct.SentryClient{direct.NewSentryClientDirect(cfg.ProtocolVersion, server)} go func() { logEvery := time.NewTicker(120 * time.Second) diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go index 37569605b82..c03fb32cfa1 100644 --- a/eth/protocols/eth/protocol.go +++ b/eth/protocols/eth/protocol.go @@ -34,10 +34,12 @@ import ( // Constants to match up protocol versions and messages const ( ETH66 = 66 + ETH67 = 67 ) var ProtocolToString = map[uint]string{ ETH66: "eth66", + ETH67: "eth67", } // ProtocolName is the official short name of the `eth` protocol used during @@ -86,6 +88,20 @@ var ToProto = map[uint]map[uint64]proto_sentry.MessageId{ GetPooledTransactionsMsg: proto_sentry.MessageId_GET_POOLED_TRANSACTIONS_66, PooledTransactionsMsg: proto_sentry.MessageId_POOLED_TRANSACTIONS_66, }, + ETH67: { + GetBlockHeadersMsg: proto_sentry.MessageId_GET_BLOCK_HEADERS_66, + BlockHeadersMsg: proto_sentry.MessageId_BLOCK_HEADERS_66, + GetBlockBodiesMsg: proto_sentry.MessageId_GET_BLOCK_BODIES_66, + BlockBodiesMsg: proto_sentry.MessageId_BLOCK_BODIES_66, + GetReceiptsMsg: proto_sentry.MessageId_GET_RECEIPTS_66, + ReceiptsMsg: proto_sentry.MessageId_RECEIPTS_66, + NewBlockHashesMsg: proto_sentry.MessageId_NEW_BLOCK_HASHES_66, + NewBlockMsg: proto_sentry.MessageId_NEW_BLOCK_66, + TransactionsMsg: proto_sentry.MessageId_TRANSACTIONS_66, + NewPooledTransactionHashesMsg: proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66, + GetPooledTransactionsMsg: proto_sentry.MessageId_GET_POOLED_TRANSACTIONS_66, + PooledTransactionsMsg: proto_sentry.MessageId_POOLED_TRANSACTIONS_66, + }, } var FromProto = map[uint]map[proto_sentry.MessageId]uint64{ @@ -105,6 +121,20 @@ var FromProto = map[uint]map[proto_sentry.MessageId]uint64{ proto_sentry.MessageId_GET_POOLED_TRANSACTIONS_66: GetPooledTransactionsMsg, proto_sentry.MessageId_POOLED_TRANSACTIONS_66: PooledTransactionsMsg, }, + ETH67: { + proto_sentry.MessageId_GET_BLOCK_HEADERS_66: GetBlockHeadersMsg, + proto_sentry.MessageId_BLOCK_HEADERS_66: BlockHeadersMsg, + proto_sentry.MessageId_GET_BLOCK_BODIES_66: GetBlockBodiesMsg, + proto_sentry.MessageId_BLOCK_BODIES_66: BlockBodiesMsg, + proto_sentry.MessageId_GET_RECEIPTS_66: GetReceiptsMsg, + proto_sentry.MessageId_RECEIPTS_66: ReceiptsMsg, + proto_sentry.MessageId_NEW_BLOCK_HASHES_66: NewBlockHashesMsg, + proto_sentry.MessageId_NEW_BLOCK_66: NewBlockMsg, + proto_sentry.MessageId_TRANSACTIONS_66: TransactionsMsg, + proto_sentry.MessageId_NEW_POOLED_TRANSACTION_HASHES_66: NewPooledTransactionHashesMsg, + proto_sentry.MessageId_GET_POOLED_TRANSACTIONS_66: GetPooledTransactionsMsg, + proto_sentry.MessageId_POOLED_TRANSACTIONS_66: PooledTransactionsMsg, + }, } // Packet represents a p2p message in the `eth` protocol. diff --git a/node/nodecfg/defaults.go b/node/nodecfg/defaults.go index d13022b93ee..a421c79f235 100644 --- a/node/nodecfg/defaults.go +++ b/node/nodecfg/defaults.go @@ -45,7 +45,7 @@ var DefaultConfig = Config{ WSModules: []string{"net", "web3"}, P2P: p2p.Config{ ListenAddr: ":30303", - ListenAddr65: ":30304", + ProtocolVersion: 66, // eth/66 by default MaxPeers: 100, MaxPendingPeers: 1000, NAT: nat.Any(), diff --git a/p2p/server.go b/p2p/server.go index b7b152b8547..d4d20274a4b 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -24,13 +24,14 @@ import ( "encoding/hex" "errors" "fmt" - "golang.org/x/sync/semaphore" "net" "sort" "sync" "sync/atomic" "time" + "golang.org/x/sync/semaphore" + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/debug" "github.com/ledgerwatch/erigon/common/mclock" @@ -136,9 +137,12 @@ type Config struct { // If the port is zero, the operating system will pick a port. The // ListenAddr field will be updated with the actual address when // the server is started. - ListenAddr string - ListenAddr65 string - SentryAddr []string + ListenAddr string + + // eth/66, eth/67, etc + ProtocolVersion uint + + SentryAddr []string // If set to a non-nil value, the given NAT port mapper // is used to make the listening port available to the diff --git a/turbo/cli/default_flags.go b/turbo/cli/default_flags.go index 7c7e783b8c4..26af061f036 100644 --- a/turbo/cli/default_flags.go +++ b/turbo/cli/default_flags.go @@ -89,6 +89,7 @@ var DefaultFlags = []cli.Flag{ utils.TorrentDownloadRateFlag, utils.TorrentVerbosityFlag, utils.ListenPortFlag, + utils.P2pProtocolVersionFlag, utils.NATFlag, utils.NoDiscoverFlag, utils.DiscoveryV5Flag, From d54a0072893f4d81e59ebda98fc4327316ccceb6 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Fri, 8 Jul 2022 12:05:57 +0200 Subject: [PATCH 061/152] Enable in-memory overlay by default (#4680) --- cmd/utils/flags.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 638c360ff2b..1ad11738fb9 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -402,7 +402,7 @@ var ( Name: "experimental.tevm", Usage: "Enables Transpiled EVM experiment", } - MemoryOverlayFlag = cli.BoolFlag{ + MemoryOverlayFlag = cli.BoolTFlag{ Name: "experimental.overlay", Usage: "Enables In-Memory Overlay for PoS", } From 225935b376a92548d3a2d85d9ba9dfae592d8106 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Fri, 8 Jul 2022 15:18:36 +0200 Subject: [PATCH 062/152] Bump log level of some PoS messages from Trace to Debug (#4682) --- cmd/rpcdaemon/commands/engine_api.go | 4 ++-- cmd/rpcdaemon22/commands/engine_api.go | 4 ++-- eth/stagedsync/stage_headers.go | 8 ++++---- turbo/stages/headerdownload/header_algos.go | 14 +++++++------- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/cmd/rpcdaemon/commands/engine_api.go b/cmd/rpcdaemon/commands/engine_api.go index e6f6e149525..a390138fc0d 100644 --- a/cmd/rpcdaemon/commands/engine_api.go +++ b/cmd/rpcdaemon/commands/engine_api.go @@ -88,7 +88,7 @@ func convertPayloadStatus(x *remote.EnginePayloadStatus) map[string]interface{} } func (e *EngineImpl) ForkchoiceUpdatedV1(ctx context.Context, forkChoiceState *ForkChoiceState, payloadAttributes *PayloadAttributes) (map[string]interface{}, error) { - log.Trace("Received ForkchoiceUpdated", "head", forkChoiceState.HeadHash, "safe", forkChoiceState.HeadHash, "finalized", forkChoiceState.FinalizedBlockHash, + log.Debug("Received ForkchoiceUpdated", "head", forkChoiceState.HeadHash, "safe", forkChoiceState.HeadHash, "finalized", forkChoiceState.FinalizedBlockHash, "build", payloadAttributes != nil) var prepareParameters *remote.EnginePayloadAttributes @@ -143,7 +143,7 @@ func (e *EngineImpl) ForkchoiceUpdatedV1(ctx context.Context, forkChoiceState *F // NewPayloadV1 processes new payloads (blocks) from the beacon chain. // See https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_newpayloadv1 func (e *EngineImpl) NewPayloadV1(ctx context.Context, payload *ExecutionPayload) (map[string]interface{}, error) { - log.Trace("Received NewPayload", "height", uint64(payload.BlockNumber), "hash", payload.BlockHash) + log.Debug("Received NewPayload", "height", uint64(payload.BlockNumber), "hash", payload.BlockHash) var baseFee *uint256.Int if payload.BaseFeePerGas != nil { diff --git a/cmd/rpcdaemon22/commands/engine_api.go b/cmd/rpcdaemon22/commands/engine_api.go index 1d7984fa91a..4c2aa76ef35 100644 --- a/cmd/rpcdaemon22/commands/engine_api.go +++ b/cmd/rpcdaemon22/commands/engine_api.go @@ -87,7 +87,7 @@ func convertPayloadStatus(x *remote.EnginePayloadStatus) map[string]interface{} } func (e *EngineImpl) ForkchoiceUpdatedV1(ctx context.Context, forkChoiceState *ForkChoiceState, payloadAttributes *PayloadAttributes) (map[string]interface{}, error) { - log.Trace("Received ForkchoiceUpdated", "head", forkChoiceState.HeadHash, "safe", forkChoiceState.HeadHash, "finalized", forkChoiceState.FinalizedBlockHash, + log.Debug("Received ForkchoiceUpdated", "head", forkChoiceState.HeadHash, "safe", forkChoiceState.HeadHash, "finalized", forkChoiceState.FinalizedBlockHash, "build", payloadAttributes != nil) var prepareParameters *remote.EnginePayloadAttributes @@ -125,7 +125,7 @@ func (e *EngineImpl) ForkchoiceUpdatedV1(ctx context.Context, forkChoiceState *F // NewPayloadV1 processes new payloads (blocks) from the beacon chain. // See https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_newpayloadv1 func (e *EngineImpl) NewPayloadV1(ctx context.Context, payload *ExecutionPayload) (map[string]interface{}, error) { - log.Trace("Received NewPayload", "height", uint64(payload.BlockNumber), "hash", payload.BlockHash) + log.Debug("Received NewPayload", "height", uint64(payload.BlockNumber), "hash", payload.BlockHash) var baseFee *uint256.Int if payload.BaseFeePerGas != nil { diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 1202e3f89b9..61b6d0882ec 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -461,7 +461,7 @@ func handleNewPayload( headerNumber := header.Number.Uint64() headerHash := header.Hash() - log.Trace(fmt.Sprintf("[%s] Handling new payload", s.LogPrefix()), "height", headerNumber, "hash", headerHash) + log.Debug(fmt.Sprintf("[%s] Handling new payload", s.LogPrefix()), "height", headerNumber, "hash", headerHash) cfg.hd.UpdateTopSeenHeightPoS(headerNumber) existingCanonicalHash, err := rawdb.ReadCanonicalHash(tx, headerNumber) @@ -541,9 +541,9 @@ func handleNewPayload( }, nil } - log.Trace(fmt.Sprintf("[%s] New payload begin verification", s.LogPrefix())) + log.Debug(fmt.Sprintf("[%s] New payload begin verification", s.LogPrefix())) response, success, err := verifyAndSaveNewPoSHeader(requestStatus, s, tx, cfg, header, payloadMessage.Body, headerInserter) - log.Trace(fmt.Sprintf("[%s] New payload verification ended", s.LogPrefix()), "success", success, "err", err) + log.Debug(fmt.Sprintf("[%s] New payload verification ended", s.LogPrefix()), "success", success, "err", err) if err != nil || !success { return response, err } @@ -656,7 +656,7 @@ func schedulePoSDownload( cfg.hd.BeaconRequestList.SetStatus(requestId, engineapi.DataWasMissing) if cfg.hd.PosStatus() != headerdownload.Idle { - log.Trace(fmt.Sprintf("[%s] Postponing PoS download since another one is in progress", s.LogPrefix()), "height", heightToDownload, "hash", hashToDownload) + log.Debug(fmt.Sprintf("[%s] Postponing PoS download since another one is in progress", s.LogPrefix()), "height", heightToDownload, "hash", hashToDownload) return } diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index c1e539b6d7e..090d09a2c81 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -420,7 +420,7 @@ func (hd *HeaderDownload) RequestMoreHeaders(currentTime time.Time) (*HeaderRequ func (hd *HeaderDownload) requestMoreHeadersForPOS(currentTime time.Time) (timeout bool, request *HeaderRequest, penalties []PenaltyItem) { anchor := hd.posAnchor if anchor == nil { - log.Trace("No PoS anchor") + log.Debug("No PoS anchor") return } @@ -626,7 +626,7 @@ func (hd *HeaderDownload) SetHeaderToDownloadPoS(hash common.Hash, height uint64 hd.lock.Lock() defer hd.lock.Unlock() - log.Trace("Set posAnchor", "blockHeight", height+1) + log.Debug("Set posAnchor", "blockHeight", height+1) hd.posAnchor = &Anchor{ parentHash: hash, blockHeight: height + 1, @@ -637,12 +637,12 @@ func (hd *HeaderDownload) ProcessHeadersPOS(csHeaders []ChainSegmentHeader, tx k if len(csHeaders) == 0 { return nil, nil } - log.Trace("Collecting...", "from", csHeaders[0].Number, "to", csHeaders[len(csHeaders)-1].Number, "len", len(csHeaders)) + log.Debug("Collecting...", "from", csHeaders[0].Number, "to", csHeaders[len(csHeaders)-1].Number, "len", len(csHeaders)) hd.lock.Lock() defer hd.lock.Unlock() if hd.posAnchor == nil { // May happen if peers are sending unrequested header packets after we've synced - log.Trace("posAnchor is nil") + log.Debug("posAnchor is nil") return nil, nil } @@ -673,10 +673,10 @@ func (hd *HeaderDownload) ProcessHeadersPOS(csHeaders []ChainSegmentHeader, tx k return nil, err } if hh != nil { - log.Trace("Synced", "requestId", hd.requestId) + log.Debug("Synced", "requestId", hd.requestId) if headerNumber != hh.Number.Uint64()+1 { hd.badPoSHeaders[headerHash] = header.ParentHash - return nil, fmt.Errorf("Invalid PoS segment detected: invalid block number. got %d, expected %d", headerNumber, hh.Number.Uint64()+1) + return nil, fmt.Errorf("invalid PoS segment detected: invalid block number. got %d, expected %d", headerNumber, hh.Number.Uint64()+1) } hd.posAnchor = nil hd.posStatus = Synced @@ -1429,7 +1429,7 @@ func (hd *HeaderDownload) StartPoSDownloader( if sentToPeer { // If request was actually sent to a peer, we update retry time to be 5 seconds in the future hd.UpdateRetryTime(req, currentTime, 5*time.Second /* timeout */) - log.Trace("Sent request", "height", req.Number) + log.Debug("Sent request", "height", req.Number) } } if len(penalties) > 0 { From 63f6eab6c7b076f3385d729b58e287fcb251e8b0 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Fri, 8 Jul 2022 16:48:24 +0200 Subject: [PATCH 063/152] removed envelope on txpool for PoS (#4683) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7087daba894..a495ce75d69 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220707065230-95e361fa1ed7 + github.com/ledgerwatch/erigon-lib v0.0.0-20220708142551-cf57648d7d41 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 525743beec3..fab357ab66b 100644 --- a/go.sum +++ b/go.sum @@ -383,8 +383,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220707065230-95e361fa1ed7 h1:ytnJHsVttH1NleI45f6FbP7HaratpDx4IPCK/D/aZwI= -github.com/ledgerwatch/erigon-lib v0.0.0-20220707065230-95e361fa1ed7/go.mod h1:bttvdtZXjh803u/CeMerKYnWvVvXTICWSfpcMeQNtmc= +github.com/ledgerwatch/erigon-lib v0.0.0-20220708142551-cf57648d7d41 h1:sTY+hHzUSFWpUiw7bUqmw1opanXeSltDBPtOzbTQLFQ= +github.com/ledgerwatch/erigon-lib v0.0.0-20220708142551-cf57648d7d41/go.mod h1:bttvdtZXjh803u/CeMerKYnWvVvXTICWSfpcMeQNtmc= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From 8de866028a42c33ac99d77bf3c3e7f9062a9b2f1 Mon Sep 17 00:00:00 2001 From: Levi Aul Date: Fri, 8 Jul 2022 20:15:22 -0700 Subject: [PATCH 064/152] Additional bor RPC fixes (#4675) * Add borTx to GetBlockByHash; ensure borTxs have hashes; don't try to derive sender for borTxs * Surface borReceipt logs in eth_getLogs * Check for existence of borReceipt before synthesizing a borTx --- cmd/rpcdaemon/commands/eth_api.go | 24 +++++++++ cmd/rpcdaemon/commands/eth_block.go | 33 +++++++----- cmd/rpcdaemon/commands/eth_receipts.go | 51 ++++++++++-------- cmd/rpcdaemon/commands/eth_txs.go | 57 +++++++++----------- cmd/rpcdaemon22/commands/eth_receipts.go | 2 +- core/rawdb/bor_receipts.go | 66 +++++++++++++++++++----- core/types/access_list_tx.go | 4 -- core/types/bor_receipt.go | 44 +++++++--------- core/types/dynamic_fee_tx.go | 4 -- core/types/legacy_tx.go | 6 --- core/types/starknet_tx.go | 4 -- core/types/transaction.go | 1 - internal/ethapi/api.go | 34 ++++++++++-- turbo/adapter/ethapi/internal.go | 5 +- 14 files changed, 201 insertions(+), 134 deletions(-) diff --git a/cmd/rpcdaemon/commands/eth_api.go b/cmd/rpcdaemon/commands/eth_api.go index b5193975f74..e84ec4c1295 100644 --- a/cmd/rpcdaemon/commands/eth_api.go +++ b/cmd/rpcdaemon/commands/eth_api.go @@ -339,6 +339,30 @@ func newRPCTransaction(tx types.Transaction, blockHash common.Hash, blockNumber return result } +// newRPCBorTransaction returns a Bor transaction that will serialize to the RPC +// representation, with the given location metadata set (if available). +func newRPCBorTransaction(opaqueTx types.Transaction, txHash common.Hash, blockHash common.Hash, blockNumber uint64, index uint64, baseFee *big.Int) *RPCTransaction { + tx := opaqueTx.(*types.LegacyTx) + result := &RPCTransaction{ + Type: hexutil.Uint64(tx.Type()), + ChainID: (*hexutil.Big)(new(big.Int)), + GasPrice: (*hexutil.Big)(tx.GasPrice.ToBig()), + Gas: hexutil.Uint64(tx.GetGas()), + Hash: txHash, + Input: hexutil.Bytes(tx.GetData()), + Nonce: hexutil.Uint64(tx.GetNonce()), + From: common.Address{}, + To: tx.GetTo(), + Value: (*hexutil.Big)(tx.GetValue().ToBig()), + } + if blockHash != (common.Hash{}) { + result.BlockHash = &blockHash + result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber)) + result.TransactionIndex = (*hexutil.Uint64)(&index) + } + return result +} + // newRPCPendingTransaction returns a pending transaction that will serialize to the RPC representation func newRPCPendingTransaction(tx types.Transaction, current *types.Header, config *params.ChainConfig) *RPCTransaction { var baseFee *big.Int diff --git a/cmd/rpcdaemon/commands/eth_block.go b/cmd/rpcdaemon/commands/eth_block.go index f3d569e8a65..a7610ede2c5 100644 --- a/cmd/rpcdaemon/commands/eth_block.go +++ b/cmd/rpcdaemon/commands/eth_block.go @@ -217,24 +217,15 @@ func (api *APIImpl) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber return nil, err } var borTx types.Transaction - var borReceipt *types.Receipt + var borTxHash common.Hash if chainConfig.Bor != nil { - borTx, _, _, _, err = rawdb.ReadBorTransactionWithBlockNumberAndHash(tx, b.NumberU64(), b.Hash()) - if err != nil { - return nil, err - } + borTx, _, _, _ = rawdb.ReadBorTransactionForBlock(tx, b) if borTx != nil { - borReceipt = rawdb.ReadBorReceipt(tx, b.Hash(), b.NumberU64()) - if borReceipt != nil { - borTx, err = borTx.WithHash(borReceipt.TxHash) - if err != nil { - return nil, err - } - } + borTxHash = types.ComputeBorTxHash(b.NumberU64(), b.Hash()) } } - response, err := ethapi.RPCMarshalBlockEx(b, true, fullTx, borTx, borReceipt, additionalFields) + response, err := ethapi.RPCMarshalBlockEx(b, true, fullTx, borTx, borTxHash, additionalFields) if err == nil && number == rpc.PendingBlockNumber { // Pending blocks need to nil out a few fields @@ -280,7 +271,21 @@ func (api *APIImpl) GetBlockByHash(ctx context.Context, numberOrHash rpc.BlockNu return nil, err } additionalFields["totalDifficulty"] = (*hexutil.Big)(td) - response, err := ethapi.RPCMarshalBlock(block, true, fullTx, additionalFields) + + chainConfig, err := api.chainConfig(tx) + if err != nil { + return nil, err + } + var borTx types.Transaction + var borTxHash common.Hash + if chainConfig.Bor != nil { + borTx, _, _, _ = rawdb.ReadBorTransactionForBlock(tx, block) + if borTx != nil { + borTxHash = types.ComputeBorTxHash(block.NumberU64(), block.Hash()) + } + } + + response, err := ethapi.RPCMarshalBlockEx(block, true, fullTx, borTx, borTxHash, additionalFields) if err == nil && int64(number) == rpc.PendingBlockNumber.Int64() { // Pending blocks need to nil out a few fields diff --git a/cmd/rpcdaemon/commands/eth_receipts.go b/cmd/rpcdaemon/commands/eth_receipts.go index 5ebc6fc0f9c..80d80b3b9fc 100644 --- a/cmd/rpcdaemon/commands/eth_receipts.go +++ b/cmd/rpcdaemon/commands/eth_receipts.go @@ -157,6 +157,7 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ block := uint64(iter.Next()) var logIndex uint + var txIndex uint var blockLogs []*types.Log err := tx.ForPrefix(kv.Log, dbutils.EncodeBlockNumber(block), func(k, v []byte) error { var logs types.Logs @@ -171,7 +172,7 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ if len(filtered) == 0 { return nil } - txIndex := uint(binary.BigEndian.Uint32(k[8:])) + txIndex = uint(binary.BigEndian.Uint32(k[8:])) for _, log := range filtered { log.TxIndex = txIndex } @@ -200,6 +201,14 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ log.TxHash = b.Transactions()[log.TxIndex].Hash() } logs = append(logs, blockLogs...) + + borLogs := rawdb.ReadBorReceiptLogs(tx, blockHash, block, txIndex+1, logIndex) + if borLogs != nil { + borLogs = filterLogs(borLogs, crit.Addresses, crit.Topics) + if len(borLogs) > 0 { + logs = append(logs, borLogs...) + } + } } return logs, nil @@ -245,7 +254,7 @@ func getTopicsBitmap(c kv.Tx, topics [][]common.Hash, from, to uint32) (*roaring } // GetTransactionReceipt implements eth_getTransactionReceipt. Returns the receipt of a transaction given the transaction's hash. -func (api *APIImpl) GetTransactionReceipt(ctx context.Context, hash common.Hash) (map[string]interface{}, error) { +func (api *APIImpl) GetTransactionReceipt(ctx context.Context, txnHash common.Hash) (map[string]interface{}, error) { tx, err := api.db.BeginRo(ctx) if err != nil { return nil, err @@ -255,7 +264,7 @@ func (api *APIImpl) GetTransactionReceipt(ctx context.Context, hash common.Hash) var blockNum uint64 var ok bool - blockNum, ok, err = api.txnLookup(ctx, tx, hash) + blockNum, ok, err = api.txnLookup(ctx, tx, txnHash) if !ok || blockNum == 0 { // It is not an ideal solution (ideal solution requires extending TxnLookupReply proto type to include bool flag indicating absense of result), // but 0 block number is used here to mean that the transaction is not found @@ -280,7 +289,7 @@ func (api *APIImpl) GetTransactionReceipt(ctx context.Context, hash common.Hash) var txnIndex uint64 var txn types.Transaction for idx, transaction := range block.Transactions() { - if transaction.Hash() == hash { + if transaction.Hash() == txnHash { txn = transaction txnIndex = uint64(idx) break @@ -292,7 +301,7 @@ func (api *APIImpl) GetTransactionReceipt(ctx context.Context, hash common.Hash) return nil, nil } - borTx, blockHash, _, _, err := rawdb.ReadBorTransactionWithBlockNumber(tx, blockNum) + borTx, blockHash, _, _, err := rawdb.ReadBorTransactionForBlockNumber(tx, blockNum) if err != nil { return nil, err } @@ -300,7 +309,10 @@ func (api *APIImpl) GetTransactionReceipt(ctx context.Context, hash common.Hash) return nil, nil } borReceipt := rawdb.ReadBorReceipt(tx, blockHash, blockNum) - return marshalReceipt(borReceipt, borTx, cc, block, hash), nil + if borReceipt == nil { + return nil, nil + } + return marshalReceipt(borReceipt, borTx, cc, block, txnHash, false), nil } receipts, err := api.getReceipts(ctx, tx, cc, block, block.Body().SendersFromTxs()) @@ -310,7 +322,7 @@ func (api *APIImpl) GetTransactionReceipt(ctx context.Context, hash common.Hash) if len(receipts) <= int(txnIndex) { return nil, fmt.Errorf("block has less receipts than expected: %d <= %d, block: %d", len(receipts), int(txnIndex), blockNum) } - return marshalReceipt(receipts[txnIndex], block.Transactions()[txnIndex], cc, block, hash), nil + return marshalReceipt(receipts[txnIndex], block.Transactions()[txnIndex], cc, block, txnHash, true), nil } // GetBlockReceipts - receipts for individual block @@ -344,22 +356,15 @@ func (api *APIImpl) GetBlockReceipts(ctx context.Context, number rpc.BlockNumber result := make([]map[string]interface{}, 0, len(receipts)) for _, receipt := range receipts { txn := block.Transactions()[receipt.TransactionIndex] - result = append(result, marshalReceipt(receipt, txn, chainConfig, block, txn.Hash())) + result = append(result, marshalReceipt(receipt, txn, chainConfig, block, txn.Hash(), true)) } if chainConfig.Bor != nil { - borTx, _, _, _, err := rawdb.ReadBorTransactionWithBlockNumberAndHash(tx, blockNum, block.Hash()) - if err != nil { - return nil, err - } + borTx, _, _, _ := rawdb.ReadBorTransactionForBlock(tx, block) if borTx != nil { borReceipt := rawdb.ReadBorReceipt(tx, block.Hash(), blockNum) - borTx, err = borTx.WithHash(borReceipt.TxHash) - if err != nil { - return nil, err - } if borReceipt != nil { - result = append(result, marshalReceipt(borReceipt, borTx, chainConfig, block, borReceipt.TxHash)) + result = append(result, marshalReceipt(borReceipt, borTx, chainConfig, block, borReceipt.TxHash, false)) } } } @@ -367,7 +372,7 @@ func (api *APIImpl) GetBlockReceipts(ctx context.Context, number rpc.BlockNumber return result, nil } -func marshalReceipt(receipt *types.Receipt, txn types.Transaction, chainConfig *params.ChainConfig, block *types.Block, hash common.Hash) map[string]interface{} { +func marshalReceipt(receipt *types.Receipt, txn types.Transaction, chainConfig *params.ChainConfig, block *types.Block, txnHash common.Hash, signed bool) map[string]interface{} { var chainId *big.Int switch t := txn.(type) { case *types.LegacyTx: @@ -379,13 +384,17 @@ func marshalReceipt(receipt *types.Receipt, txn types.Transaction, chainConfig * case *types.DynamicFeeTransaction: chainId = t.ChainID.ToBig() } - signer := types.LatestSignerForChainID(chainId) - from, _ := txn.Sender(*signer) + + var from common.Address + if signed { + signer := types.LatestSignerForChainID(chainId) + from, _ = txn.Sender(*signer) + } fields := map[string]interface{}{ "blockHash": receipt.BlockHash, "blockNumber": hexutil.Uint64(receipt.BlockNumber.Uint64()), - "transactionHash": hash, + "transactionHash": txnHash, "transactionIndex": hexutil.Uint64(receipt.TransactionIndex), "from": from, "to": txn.GetTo(), diff --git a/cmd/rpcdaemon/commands/eth_txs.go b/cmd/rpcdaemon/commands/eth_txs.go index 7e452bf2c38..8d99bbb7cd8 100644 --- a/cmd/rpcdaemon/commands/eth_txs.go +++ b/cmd/rpcdaemon/commands/eth_txs.go @@ -18,7 +18,7 @@ import ( ) // GetTransactionByHash implements eth_getTransactionByHash. Returns information about a transaction given the transaction's hash. -func (api *APIImpl) GetTransactionByHash(ctx context.Context, hash common.Hash) (*RPCTransaction, error) { +func (api *APIImpl) GetTransactionByHash(ctx context.Context, txnHash common.Hash) (*RPCTransaction, error) { tx, err := api.db.BeginRo(ctx) if err != nil { return nil, err @@ -30,7 +30,7 @@ func (api *APIImpl) GetTransactionByHash(ctx context.Context, hash common.Hash) } // https://infura.io/docs/ethereum/json-rpc/eth-getTransactionByHash - blockNum, ok, err := api.txnLookup(ctx, tx, hash) + blockNum, ok, err := api.txnLookup(ctx, tx, txnHash) if err != nil { return nil, err } @@ -46,7 +46,7 @@ func (api *APIImpl) GetTransactionByHash(ctx context.Context, hash common.Hash) var txnIndex uint64 var txn types2.Transaction for i, transaction := range block.Transactions() { - if transaction.Hash() == hash { + if transaction.Hash() == txnHash { txn = transaction txnIndex = uint64(i) break @@ -61,17 +61,14 @@ func (api *APIImpl) GetTransactionByHash(ctx context.Context, hash common.Hash) // if no transaction was found then we return nil if txn == nil { - if chainConfig.Bor != nil { - borTx, _, _, _, err := rawdb.ReadBorTransactionWithBlockNumberAndHash(tx, blockNum, block.Hash()) - if err != nil { - return nil, err - } - if borTx != nil { - return newRPCTransaction(borTx, blockHash, blockNum, uint64(len(block.Transactions())), baseFee), nil - } + if chainConfig.Bor == nil { + return nil, nil } - - return nil, nil + borTx, _, _, _ := rawdb.ReadBorTransactionForBlock(tx, block) + if borTx == nil { + return nil, nil + } + return newRPCBorTransaction(borTx, txnHash, blockHash, blockNum, uint64(len(block.Transactions())), baseFee), nil } return newRPCTransaction(txn, blockHash, blockNum, txnIndex, baseFee), nil @@ -83,7 +80,7 @@ func (api *APIImpl) GetTransactionByHash(ctx context.Context, hash common.Hash) } // No finalized transaction, try to retrieve it from the pool - reply, err := api.txPool.Transactions(ctx, &txpool.TransactionsRequest{Hashes: []*types.H256{gointerfaces.ConvertHashToH256(hash)}}) + reply, err := api.txPool.Transactions(ctx, &txpool.TransactionsRequest{Hashes: []*types.H256{gointerfaces.ConvertHashToH256(txnHash)}}) if err != nil { return nil, err } @@ -179,17 +176,15 @@ func (api *APIImpl) GetTransactionByBlockHashAndIndex(ctx context.Context, block if uint64(txIndex) > uint64(len(txs)) { return nil, nil // not error } else if uint64(txIndex) == uint64(len(txs)) { - if chainConfig.Bor != nil { - borTx, _, _, _, err := rawdb.ReadBorTransactionWithBlockNumberAndHash(tx, block.NumberU64(), block.Hash()) - if err != nil { - return nil, err - } - if borTx != nil { - return newRPCTransaction(borTx, block.Hash(), block.NumberU64(), uint64(txIndex), block.BaseFee()), nil - } - } else { + if chainConfig.Bor == nil { return nil, nil // not error } + borTx, _, _, _ := rawdb.ReadBorTransactionForBlock(tx, block) + if borTx == nil { + return nil, nil // not error + } + derivedBorTxHash := types2.ComputeBorTxHash(block.NumberU64(), block.Hash()) + return newRPCBorTransaction(borTx, derivedBorTxHash, block.Hash(), block.NumberU64(), uint64(txIndex), block.BaseFee()), nil } return newRPCTransaction(txs[txIndex], block.Hash(), block.NumberU64(), uint64(txIndex), block.BaseFee()), nil @@ -245,17 +240,15 @@ func (api *APIImpl) GetTransactionByBlockNumberAndIndex(ctx context.Context, blo if uint64(txIndex) > uint64(len(txs)) { return nil, nil // not error } else if uint64(txIndex) == uint64(len(txs)) { - if chainConfig.Bor != nil { - borTx, _, _, _, err := rawdb.ReadBorTransactionWithBlockNumberAndHash(tx, blockNum, block.Hash()) - if err != nil { - return nil, err - } - if borTx != nil { - return newRPCTransaction(borTx, block.Hash(), block.NumberU64(), uint64(txIndex), block.BaseFee()), nil - } - } else { + if chainConfig.Bor == nil { return nil, nil // not error } + borTx, _, _, _ := rawdb.ReadBorTransactionForBlock(tx, block) + if borTx == nil { + return nil, nil + } + derivedBorTxHash := types2.ComputeBorTxHash(block.NumberU64(), block.Hash()) + return newRPCBorTransaction(borTx, derivedBorTxHash, block.Hash(), block.NumberU64(), uint64(txIndex), block.BaseFee()), nil } return newRPCTransaction(txs[txIndex], block.Hash(), block.NumberU64(), uint64(txIndex), block.BaseFee()), nil diff --git a/cmd/rpcdaemon22/commands/eth_receipts.go b/cmd/rpcdaemon22/commands/eth_receipts.go index a1c313a9547..1a72299a2f0 100644 --- a/cmd/rpcdaemon22/commands/eth_receipts.go +++ b/cmd/rpcdaemon22/commands/eth_receipts.go @@ -305,7 +305,7 @@ func (api *APIImpl) GetTransactionReceipt(ctx context.Context, hash common.Hash) return nil, nil } - borTx, blockHash, _, _, err := rawdb.ReadBorTransactionWithBlockNumber(tx, blockNum) + borTx, blockHash, _, _, err := rawdb.ReadBorTransactionForBlockNumber(tx, blockNum) if err != nil { return nil, err } diff --git a/core/rawdb/bor_receipts.go b/core/rawdb/bor_receipts.go index ed89e892d5d..48a07967b91 100644 --- a/core/rawdb/bor_receipts.go +++ b/core/rawdb/bor_receipts.go @@ -18,7 +18,7 @@ var ( // HasBorReceipt verifies the existence of all block receipt belonging // to a block. -func HasBorReceipts(db kv.Has, hash common.Hash, number uint64) bool { +func HasBorReceipts(db kv.Has, number uint64) bool { if has, err := db.Has(kv.BorReceipts, borReceiptKey(number)); !has || err != nil { return false } @@ -70,12 +70,6 @@ func ReadBorReceipt(db kv.Tx, hash common.Hash, number uint64) *types.Receipt { receipts = make(types.Receipts, 0) } - data := ReadStorageBodyRLP(db, hash, number) - if len(data) == 0 { - log.Error("Missing body but have bor receipt", "hash", hash, "number", number) - return nil - } - if err := types.DeriveFieldsForBorReceipt(borReceipt, hash, number, receipts); err != nil { log.Error("Failed to derive bor receipt fields", "hash", hash, "number", number, "err", err) return nil @@ -83,6 +77,22 @@ func ReadBorReceipt(db kv.Tx, hash common.Hash, number uint64) *types.Receipt { return borReceipt } +// ReadBorReceiptLogs retrieves all the bor block receipt logs belonging to a block. +// If it is unable to populate these metadata fields then nil is returned. +func ReadBorReceiptLogs(db kv.Tx, blockHash common.Hash, blockNumber uint64, txIndex uint, logIndex uint) []*types.Log { + // We're deriving many fields from the block body, retrieve beside the receipt + borReceipt := ReadRawBorReceipt(db, blockHash, blockNumber) + if borReceipt == nil { + return nil + } + + borLogs := borReceipt.Logs + + types.DeriveFieldsForBorLogs(borLogs, blockHash, blockNumber, txIndex, logIndex) + + return borLogs +} + // WriteBorReceipt stores all the bor receipt belonging to a block. func WriteBorReceipt(tx kv.RwTx, hash common.Hash, number uint64, borReceipt *types.ReceiptForStorage) error { // Convert the bor receipt into their storage form and serialize them @@ -130,7 +140,7 @@ func ReadBorTransactionWithBlockHash(db kv.Tx, borTxHash common.Hash, blockHash } */ -// ReadBorTransaction retrieves a specific bor (fake) transaction by hash, along with +// ReadBorTransaction returns a specific bor (fake) transaction by txn hash, along with // its added positional metadata. func ReadBorTransaction(db kv.Tx, borTxHash common.Hash) (types.Transaction, common.Hash, uint64, uint64, error) { blockNumber, err := ReadTxLookupEntry(db, borTxHash) @@ -141,12 +151,19 @@ func ReadBorTransaction(db kv.Tx, borTxHash common.Hash) (types.Transaction, com return nil, common.Hash{}, 0, 0, errors.New("missing block number") } - return ReadBorTransactionWithBlockNumber(db, *blockNumber) + return computeBorTransactionForBlockNumber(db, *blockNumber) } -// ReadBorTransaction retrieves a specific bor (fake) transaction by block number, along with +// ReadBorTransactionForBlockNumber returns a bor (fake) transaction by block number, along with // its added positional metadata. -func ReadBorTransactionWithBlockNumber(db kv.Tx, blockNumber uint64) (types.Transaction, common.Hash, uint64, uint64, error) { +func ReadBorTransactionForBlockNumber(db kv.Tx, blockNumber uint64) (types.Transaction, common.Hash, uint64, uint64, error) { + if !HasBorReceipts(db, blockNumber) { + return nil, common.Hash{}, 0, 0, nil + } + return computeBorTransactionForBlockNumber(db, blockNumber) +} + +func computeBorTransactionForBlockNumber(db kv.Tx, blockNumber uint64) (types.Transaction, common.Hash, uint64, uint64, error) { blockHash, err := ReadCanonicalHash(db, blockNumber) if err != nil { return nil, common.Hash{}, 0, 0, err @@ -155,12 +172,19 @@ func ReadBorTransactionWithBlockNumber(db kv.Tx, blockNumber uint64) (types.Tran return nil, common.Hash{}, 0, 0, errors.New("missing block hash") } - return ReadBorTransactionWithBlockNumberAndHash(db, blockNumber, blockHash) + return computeBorTransactionForBlockNumberAndHash(db, blockNumber, blockHash) } -// ReadBorTransactionWithBlockNumberAndHash retrieves a specific bor (fake) transaction by block number and block hash, along with +// ReadBorTransactionForBlockNumberAndHash returns a bor (fake) transaction by block number and block hash, along with // its added positional metadata. -func ReadBorTransactionWithBlockNumberAndHash(db kv.Tx, blockNumber uint64, blockHash common.Hash) (types.Transaction, common.Hash, uint64, uint64, error) { +func ReadBorTransactionForBlockNumberAndHash(db kv.Tx, blockNumber uint64, blockHash common.Hash) (types.Transaction, common.Hash, uint64, uint64, error) { + if !HasBorReceipts(db, blockNumber) { + return nil, common.Hash{}, 0, 0, nil + } + return computeBorTransactionForBlockNumberAndHash(db, blockNumber, blockHash) +} + +func computeBorTransactionForBlockNumberAndHash(db kv.Tx, blockNumber uint64, blockHash common.Hash) (types.Transaction, common.Hash, uint64, uint64, error) { bodyForStorage, err := ReadStorageBody(db, blockHash, blockNumber) if err != nil { return nil, common.Hash{}, 0, 0, err @@ -170,6 +194,20 @@ func ReadBorTransactionWithBlockNumberAndHash(db kv.Tx, blockNumber uint64, bloc return tx, blockHash, blockNumber, uint64(bodyForStorage.TxAmount), nil } +// ReadBorTransactionForBlock retrieves a specific bor (fake) transaction associated with a block, along with +// its added positional metadata. +func ReadBorTransactionForBlock(db kv.Tx, block *types.Block) (types.Transaction, common.Hash, uint64, uint64) { + if !HasBorReceipts(db, block.NumberU64()) { + return nil, common.Hash{}, 0, 0 + } + return computeBorTransactionForBlock(db, block) +} + +func computeBorTransactionForBlock(db kv.Tx, block *types.Block) (types.Transaction, common.Hash, uint64, uint64) { + var tx types.Transaction = types.NewBorTransaction() + return tx, block.Hash(), block.NumberU64(), uint64(len(block.Transactions())) +} + // TruncateBorReceipts removes all bor receipt for given block number or newer func TruncateBorReceipts(db kv.RwTx, number uint64) error { if err := db.ForEach(kv.BorReceipts, dbutils.EncodeBlockNumber(number), func(k, _ []byte) error { diff --git a/core/types/access_list_tx.go b/core/types/access_list_tx.go index 538413130d4..ed700591cd2 100644 --- a/core/types/access_list_tx.go +++ b/core/types/access_list_tx.go @@ -566,10 +566,6 @@ func (tx *AccessListTx) FakeSign(address common.Address) (Transaction, error) { return cpy, nil } -func (tx *AccessListTx) WithHash(newHash common.Hash) (Transaction, error) { - return nil, errors.New("hash is immutable for AccessListTx") -} - // Hash computes the hash (but not for signatures!) func (tx *AccessListTx) Hash() common.Hash { if hash := tx.hash.Load(); hash != nil { diff --git a/core/types/bor_receipt.go b/core/types/bor_receipt.go index 671fc2eb27b..4042d9916bf 100644 --- a/core/types/bor_receipt.go +++ b/core/types/bor_receipt.go @@ -10,24 +10,20 @@ import ( "github.com/ledgerwatch/erigon/crypto" ) -// TenToTheFive - To be used while sorting bor logs -// -// Sorted using ( blockNumber * (10 ** 5) + logIndex ) -const TenToTheFive uint64 = 100000 - -var ( - // SystemAddress address for system sender - SystemAddress = common.HexToAddress("0xffffFFFfFFffffffffffffffFfFFFfffFFFfFFfE") -) +const BorTxKeyPrefix string = "matic-bor-receipt-" // BorReceiptKey = num (uint64 big endian) func BorReceiptKey(number uint64) []byte { return dbutils.EncodeBlockNumber(number) } -// GetDerivedBorTxHash get derived tx hash from receipt key -func GetDerivedBorTxHash(receiptKey []byte) common.Hash { - return common.BytesToHash(crypto.Keccak256(receiptKey)) +// ComputeBorTxHash get derived tx hash from block number and hash +func ComputeBorTxHash(blockNumber uint64, blockHash common.Hash) common.Hash { + txKeyPlain := make([]byte, 0, len(BorTxKeyPrefix)+8+32) + txKeyPlain = append(txKeyPlain, BorTxKeyPrefix...) + txKeyPlain = append(txKeyPlain, BorReceiptKey(blockNumber)...) + txKeyPlain = append(txKeyPlain, blockHash.Bytes()...) + return common.BytesToHash(crypto.Keccak256(txKeyPlain)) } // NewBorTransaction create new bor transaction for bor receipt @@ -37,18 +33,15 @@ func NewBorTransaction() *LegacyTx { // DeriveFieldsForBorReceipt fills the receipts with their computed fields based on consensus // data and contextual infos like containing block and transactions. -func DeriveFieldsForBorReceipt(receipt *Receipt, hash common.Hash, number uint64, receipts Receipts) error { - // get derived tx hash - borPrefix := []byte("matic-bor-receipt-") - // hashing using prefix + number + hash - txHash := GetDerivedBorTxHash((append(borPrefix, append(BorReceiptKey(number), hash.Bytes()...)...))) +func DeriveFieldsForBorReceipt(receipt *Receipt, blockHash common.Hash, blockNumber uint64, receipts Receipts) error { + txHash := ComputeBorTxHash(blockNumber, blockHash) txIndex := uint(len(receipts)) // set tx hash and tx index receipt.TxHash = txHash receipt.TransactionIndex = txIndex - receipt.BlockHash = hash - receipt.BlockNumber = big.NewInt(0).SetUint64(number) + receipt.BlockHash = blockHash + receipt.BlockNumber = big.NewInt(0).SetUint64(blockNumber) logIndex := 0 for i := 0; i < len(receipts); i++ { @@ -57,8 +50,8 @@ func DeriveFieldsForBorReceipt(receipt *Receipt, hash common.Hash, number uint64 // The derived log fields can simply be set from the block and transaction for j := 0; j < len(receipt.Logs); j++ { - receipt.Logs[j].BlockNumber = number - receipt.Logs[j].BlockHash = hash + receipt.Logs[j].BlockNumber = blockNumber + receipt.Logs[j].BlockHash = blockHash receipt.Logs[j].TxHash = txHash receipt.Logs[j].TxIndex = txIndex receipt.Logs[j].Index = uint(logIndex) @@ -69,14 +62,13 @@ func DeriveFieldsForBorReceipt(receipt *Receipt, hash common.Hash, number uint64 // DeriveFieldsForBorLogs fills the receipts with their computed fields based on consensus // data and contextual infos like containing block and transactions. -func DeriveFieldsForBorLogs(logs []*Log, hash common.Hash, number uint64, txIndex uint, logIndex uint) { - // get derived tx hash - txHash := GetDerivedBorTxHash(BorReceiptKey(number)) +func DeriveFieldsForBorLogs(logs []*Log, blockHash common.Hash, blockNumber uint64, txIndex uint, logIndex uint) { + txHash := ComputeBorTxHash(blockNumber, blockHash) // the derived log fields can simply be set from the block and transaction for j := 0; j < len(logs); j++ { - logs[j].BlockNumber = number - logs[j].BlockHash = hash + logs[j].BlockNumber = blockNumber + logs[j].BlockHash = blockHash logs[j].TxHash = txHash logs[j].TxIndex = txIndex logs[j].Index = logIndex diff --git a/core/types/dynamic_fee_tx.go b/core/types/dynamic_fee_tx.go index f1eaf57235d..aac5967bf8d 100644 --- a/core/types/dynamic_fee_tx.go +++ b/core/types/dynamic_fee_tx.go @@ -463,10 +463,6 @@ func (tx DynamicFeeTransaction) AsMessage(s Signer, baseFee *big.Int, rules *par return msg, err } -func (tx *DynamicFeeTransaction) WithHash(newHash common.Hash) (Transaction, error) { - return nil, errors.New("hash is immutable for DynamicFeeTransaction") -} - // Hash computes the hash (but not for signatures!) func (tx *DynamicFeeTransaction) Hash() common.Hash { if hash := tx.hash.Load(); hash != nil { diff --git a/core/types/legacy_tx.go b/core/types/legacy_tx.go index f3385156d56..01ba4d63d0d 100644 --- a/core/types/legacy_tx.go +++ b/core/types/legacy_tx.go @@ -482,12 +482,6 @@ func (tx *LegacyTx) FakeSign(address common.Address) (Transaction, error) { return cpy, nil } -func (tx *LegacyTx) WithHash(hash common.Hash) (Transaction, error) { - cpy := tx.copy() - cpy.hash.Store(&hash) - return cpy, nil -} - // Hash computes the hash (but not for signatures!) func (tx *LegacyTx) Hash() common.Hash { if hash := tx.hash.Load(); hash != nil { diff --git a/core/types/starknet_tx.go b/core/types/starknet_tx.go index 2b6ebeb8055..d90805d7254 100644 --- a/core/types/starknet_tx.go +++ b/core/types/starknet_tx.go @@ -170,10 +170,6 @@ func (tx StarknetTransaction) FakeSign(address common.Address) (Transaction, err panic("implement me") } -func (tx *StarknetTransaction) WithHash(newHash common.Hash) (Transaction, error) { - return nil, errors.New("hash is immutable for StarknetTransaction") -} - func (tx StarknetTransaction) Hash() common.Hash { if hash := tx.hash.Load(); hash != nil { return *hash.(*common.Hash) diff --git a/core/types/transaction.go b/core/types/transaction.go index 6c8561d11a2..1934103b24c 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -65,7 +65,6 @@ type Transaction interface { AsMessage(s Signer, baseFee *big.Int, rules *params.Rules) (Message, error) WithSignature(signer Signer, sig []byte) (Transaction, error) FakeSign(address common.Address) (Transaction, error) - WithHash(newHash common.Hash) (Transaction, error) Hash() common.Hash SigningHash(chainID *big.Int) common.Hash Size() common.StorageSize diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 9c198e981bd..85f3d6b470c 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -283,10 +283,10 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} { // returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain // transaction hashes. func RPCMarshalBlock(block *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) { - return RPCMarshalBlockEx(block, inclTx, fullTx, nil, nil) + return RPCMarshalBlockEx(block, inclTx, fullTx, nil, common.Hash{}) } -func RPCMarshalBlockEx(block *types.Block, inclTx bool, fullTx bool, borTx types.Transaction, borTxReceipt *types.Receipt) (map[string]interface{}, error) { +func RPCMarshalBlockEx(block *types.Block, inclTx bool, fullTx bool, borTx types.Transaction, borTxHash common.Hash) (map[string]interface{}, error) { fields := RPCMarshalHeader(block.Header()) fields["size"] = hexutil.Uint64(block.Size()) @@ -308,11 +308,11 @@ func RPCMarshalBlockEx(block *types.Block, inclTx bool, fullTx bool, borTx types } } - if borTx != nil && borTxReceipt != nil { + if borTx != nil { if fullTx { - transactions = append(transactions, newRPCTransactionFromBlockAndTxGivenIndex(block, borTx, uint64(len(txs)))) + transactions = append(transactions, newRPCBorTransaction(borTx, borTxHash, block.Hash(), block.NumberU64(), uint64(len(txs)), block.BaseFee())) } else { - transactions = append(transactions, borTxReceipt.TxHash) + transactions = append(transactions, borTxHash) } } @@ -444,6 +444,30 @@ func newRPCTransaction(tx types.Transaction, blockHash common.Hash, blockNumber return result } +// newRPCBorTransaction returns a Bor transaction that will serialize to the RPC +// representation, with the given location metadata set (if available). +func newRPCBorTransaction(opaqueTx types.Transaction, txHash common.Hash, blockHash common.Hash, blockNumber uint64, index uint64, baseFee *big.Int) *RPCTransaction { + tx := opaqueTx.(*types.LegacyTx) + result := &RPCTransaction{ + Type: hexutil.Uint64(tx.Type()), + ChainID: (*hexutil.Big)(new(big.Int)), + GasPrice: (*hexutil.Big)(tx.GasPrice.ToBig()), + Gas: hexutil.Uint64(tx.GetGas()), + Hash: txHash, + Input: hexutil.Bytes(tx.GetData()), + Nonce: hexutil.Uint64(tx.GetNonce()), + From: common.Address{}, + To: tx.GetTo(), + Value: (*hexutil.Big)(tx.GetValue().ToBig()), + } + if blockHash != (common.Hash{}) { + result.BlockHash = &blockHash + result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber)) + result.TransactionIndex = (*hexutil.Uint64)(&index) + } + return result +} + /* // newRPCPendingTransaction returns a pending transaction that will serialize to the RPC representation func newRPCPendingTransaction(tx types.Transaction) *RPCTransaction { diff --git a/turbo/adapter/ethapi/internal.go b/turbo/adapter/ethapi/internal.go index 95c81c555a6..12d0ce2e4ae 100644 --- a/turbo/adapter/ethapi/internal.go +++ b/turbo/adapter/ethapi/internal.go @@ -2,6 +2,7 @@ package ethapi // This file stores proxy-objects for `internal` package import ( + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/internal/ethapi" @@ -45,8 +46,8 @@ func RPCMarshalBlock(b *types.Block, inclTx bool, fullTx bool, additional map[st } //nolint -func RPCMarshalBlockEx(b *types.Block, inclTx bool, fullTx bool, borTx types.Transaction, borReceipt *types.Receipt, additional map[string]interface{}) (map[string]interface{}, error) { - fields, err := ethapi.RPCMarshalBlockEx(b, inclTx, fullTx, borTx, borReceipt) +func RPCMarshalBlockEx(b *types.Block, inclTx bool, fullTx bool, borTx types.Transaction, borTxHash common.Hash, additional map[string]interface{}) (map[string]interface{}, error) { + fields, err := ethapi.RPCMarshalBlockEx(b, inclTx, fullTx, borTx, borTxHash) if err != nil { return nil, err } From 0ca3ff3858bb339b2da3ccf022c868826b9f6ba3 Mon Sep 17 00:00:00 2001 From: Cory Date: Fri, 8 Jul 2022 22:56:23 -0700 Subject: [PATCH 065/152] Fix regex (#4678) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 34caf2ca476..1cff92359d3 100644 --- a/Makefile +++ b/Makefile @@ -69,7 +69,7 @@ xdg_data_home_subdirs = $(xdg_data_home)/erigon $(xdg_data_home)/erigon-grafana setup_xdg_data_home: mkdir -p $(xdg_data_home_subdirs) - ls -aln $(xdg_data_home) | grep -E "472*0*erigon-grafana" || sudo chown -R 472:0 $(xdg_data_home)/erigon-grafana + ls -aln $(xdg_data_home) | grep -E "472.*0.*erigon-grafana" || sudo chown -R 472:0 $(xdg_data_home)/erigon-grafana @echo "✔️ xdg_data_home setup" @ls -al $(xdg_data_home) From cfc051892e511f13b81dfc08fc4e07243247219b Mon Sep 17 00:00:00 2001 From: Leonard Chinonso <36096513+leonardchinonso@users.noreply.github.com> Date: Sat, 9 Jul 2022 14:42:57 +0100 Subject: [PATCH 066/152] Fix for issue 4205 (optimize eth_getLogs) (#4662) * optimize eth_getLogs * used the implementation from api._blockReader * used the implementation from api._blockReader --- cmd/rpcdaemon/commands/eth_receipts.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/cmd/rpcdaemon/commands/eth_receipts.go b/cmd/rpcdaemon/commands/eth_receipts.go index 80d80b3b9fc..d418dce4352 100644 --- a/cmd/rpcdaemon/commands/eth_receipts.go +++ b/cmd/rpcdaemon/commands/eth_receipts.go @@ -155,11 +155,11 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ return nil, err } - block := uint64(iter.Next()) + blockNumber := uint64(iter.Next()) var logIndex uint var txIndex uint var blockLogs []*types.Log - err := tx.ForPrefix(kv.Log, dbutils.EncodeBlockNumber(block), func(k, v []byte) error { + err := tx.ForPrefix(kv.Log, dbutils.EncodeBlockNumber(blockNumber), func(k, v []byte) error { var logs types.Logs if err := cbor.Unmarshal(&logs, bytes.NewReader(v)); err != nil { return fmt.Errorf("receipt unmarshal failed: %w", err) @@ -187,18 +187,19 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ continue } - b, err := api.blockByNumberWithSenders(tx, block) + blockHash, err := rawdb.ReadCanonicalHash(tx, blockNumber) if err != nil { return nil, err } - if b == nil { - return nil, fmt.Errorf("block not found %d", block) + + body, err := api._blockReader.BodyWithTransactions(ctx, tx, blockHash, blockNumber) + if err != nil || body == nil { + return nil, fmt.Errorf("block not found %d", blockNumber) } - blockHash := b.Hash() for _, log := range blockLogs { - log.BlockNumber = block + log.BlockNumber = blockNumber log.BlockHash = blockHash - log.TxHash = b.Transactions()[log.TxIndex].Hash() + log.TxHash = body.Transactions[log.TxIndex].Hash() } logs = append(logs, blockLogs...) From 8a75033b9860aa019f4ef88280f52f1abbfe8536 Mon Sep 17 00:00:00 2001 From: Leonard Chinonso <36096513+leonardchinonso@users.noreply.github.com> Date: Sun, 10 Jul 2022 03:13:24 +0100 Subject: [PATCH 067/152] returned error if error object is not nil (#4685) * returned error if error object is not nil * undefined block: changed it to blockNumber --- cmd/rpcdaemon/commands/eth_receipts.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cmd/rpcdaemon/commands/eth_receipts.go b/cmd/rpcdaemon/commands/eth_receipts.go index d418dce4352..ca43379f2ee 100644 --- a/cmd/rpcdaemon/commands/eth_receipts.go +++ b/cmd/rpcdaemon/commands/eth_receipts.go @@ -193,7 +193,10 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ } body, err := api._blockReader.BodyWithTransactions(ctx, tx, blockHash, blockNumber) - if err != nil || body == nil { + if err != nil { + return nil, err + } + if body == nil { return nil, fmt.Errorf("block not found %d", blockNumber) } for _, log := range blockLogs { @@ -203,7 +206,7 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ } logs = append(logs, blockLogs...) - borLogs := rawdb.ReadBorReceiptLogs(tx, blockHash, block, txIndex+1, logIndex) + borLogs := rawdb.ReadBorReceiptLogs(tx, blockHash, blockNumber, txIndex+1, logIndex) if borLogs != nil { borLogs = filterLogs(borLogs, crit.Addresses, crit.Topics) if len(borLogs) > 0 { From d8b2077aef1edca1693784a27b6ebe8f30684f1d Mon Sep 17 00:00:00 2001 From: nanevardanyan Date: Mon, 11 Jul 2022 15:56:00 +0400 Subject: [PATCH 068/152] eth: divide test cases and verify with exact values (#4691) --- eth/stagedsync/stage_log_index_test.go | 45 ++++++++++++++++++++++---- 1 file changed, 39 insertions(+), 6 deletions(-) diff --git a/eth/stagedsync/stage_log_index_test.go b/eth/stagedsync/stage_log_index_test.go index e43732944cd..611c8a999c4 100644 --- a/eth/stagedsync/stage_log_index_test.go +++ b/eth/stagedsync/stage_log_index_test.go @@ -90,8 +90,8 @@ func genReceipts(t *testing.T, tx kv.RwTx, blocks uint64) (map[common.Address]ui return expectAddrs, expectTopics } -func TestLogIndex(t *testing.T) { - require, tmpDir, ctx := require.New(t), t.TempDir(), context.Background() +func TestPromoteLogIndex(t *testing.T) { + require, ctx := require.New(t), context.Background() _, tx := memdb.NewTestTx(t) expectAddrs, expectTopics := genReceipts(t, tx, 100) @@ -100,6 +100,7 @@ func TestLogIndex(t *testing.T) { cfgCopy := cfg cfgCopy.bufLimit = 10 cfgCopy.flushEvery = time.Nanosecond + err := promoteLogIndex("logPrefix", tx, 0, cfgCopy, ctx) require.NoError(err) @@ -114,6 +115,20 @@ func TestLogIndex(t *testing.T) { require.NoError(err) require.Equal(expect, m.GetCardinality()) } +} + +func TestPruneLogIndex(t *testing.T) { + require, tmpDir, ctx := require.New(t), t.TempDir(), context.Background() + _, tx := memdb.NewTestTx(t) + + _, _ = genReceipts(t, tx, 100) + + cfg := StageLogIndexCfg(nil, prune.DefaultMode, "") + cfgCopy := cfg + cfgCopy.bufLimit = 10 + cfgCopy.flushEvery = time.Nanosecond + err := promoteLogIndex("logPrefix", tx, 0, cfgCopy, ctx) + require.NoError(err) // Mode test err = pruneLogIndex("", tx, tmpDir, 50, ctx) @@ -122,23 +137,41 @@ func TestLogIndex(t *testing.T) { { total := 0 err = tx.ForEach(kv.LogAddressIndex, nil, func(k, v []byte) error { - require.True(binary.BigEndian.Uint32(k[length.Addr:]) >= 50) + require.True(binary.BigEndian.Uint32(k[length.Addr:]) == 4294967295) total++ return nil }) require.NoError(err) - require.True(total > 0) + require.True(total == 3) } { total := 0 err = tx.ForEach(kv.LogTopicIndex, nil, func(k, v []byte) error { - require.True(binary.BigEndian.Uint32(k[length.Hash:]) >= 50) + require.True(binary.BigEndian.Uint32(k[length.Hash:]) == 4294967295) total++ return nil }) require.NoError(err) - require.True(total > 0) + require.True(total == 3) } +} + +func TestUnwindLogIndex(t *testing.T) { + require, tmpDir, ctx := require.New(t), t.TempDir(), context.Background() + _, tx := memdb.NewTestTx(t) + + expectAddrs, expectTopics := genReceipts(t, tx, 100) + + cfg := StageLogIndexCfg(nil, prune.DefaultMode, "") + cfgCopy := cfg + cfgCopy.bufLimit = 10 + cfgCopy.flushEvery = time.Nanosecond + err := promoteLogIndex("logPrefix", tx, 0, cfgCopy, ctx) + require.NoError(err) + + // Mode test + err = pruneLogIndex("", tx, tmpDir, 50, ctx) + require.NoError(err) // Unwind test err = unwindLogIndex("logPrefix", tx, 70, cfg, nil) From 51245d294f68103d2b03a580256571a85a557416 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Mon, 11 Jul 2022 15:12:21 +0200 Subject: [PATCH 069/152] Add PoS validator struct [Refactoring] (#4690) * added fork_validor struct * replaced occurences of hd with forkValidator * added engineapi.validatorFork * 32 blocks maxForkDepth * removed useless if --- eth/stagedsync/stage.go | 3 - eth/stagedsync/stage_headers.go | 20 +- turbo/engineapi/fork_validator.go | 209 ++++++++++++++++++ turbo/stages/headerdownload/header_algos.go | 152 ------------- .../headerdownload/header_data_struct.go | 10 - turbo/stages/mock_sentry.go | 2 +- turbo/stages/stageloop.go | 7 +- 7 files changed, 224 insertions(+), 179 deletions(-) create mode 100644 turbo/engineapi/fork_validator.go diff --git a/eth/stagedsync/stage.go b/eth/stagedsync/stage.go index 9ed5212aa52..5224e3c80c5 100644 --- a/eth/stagedsync/stage.go +++ b/eth/stagedsync/stage.go @@ -9,13 +9,10 @@ import ( libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/log/v3" ) -type ExecutePayloadFunc func(batch kv.RwTx, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody) error - // ExecFunc is the execution function for the stage to move forward. // * state - is the current state of the stage and contains stage data. // * unwinder - if the stage needs to cause unwinding, `unwinder` methods can be used. diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 61b6d0882ec..4e9fb14c4f4 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -55,7 +55,7 @@ type HeadersCfg struct { snapshotDownloader proto_downloader.DownloaderClient blockReader services.FullBlockReader dbEventNotifier snapshotsync.DBEventNotifier - execPayload ExecutePayloadFunc + forkValidator *engineapi.ForkValidator notifications *Notifications } @@ -76,7 +76,7 @@ func StageHeadersCfg( tmpdir string, dbEventNotifier snapshotsync.DBEventNotifier, notifications *Notifications, - execPayload ExecutePayloadFunc) HeadersCfg { + forkValidator *engineapi.ForkValidator) HeadersCfg { return HeadersCfg{ db: db, hd: headerDownload, @@ -92,7 +92,7 @@ func StageHeadersCfg( snapshotDownloader: snapshotDownloader, blockReader: blockReader, dbEventNotifier: dbEventNotifier, - execPayload: execPayload, + forkValidator: forkValidator, notifications: notifications, memoryOverlay: memoryOverlay, } @@ -272,8 +272,9 @@ func startHandlingForkChoice( headerHash := forkChoice.HeadBlockHash log.Debug(fmt.Sprintf("[%s] Handling fork choice", s.LogPrefix()), "headerHash", headerHash) if cfg.memoryOverlay { - defer cfg.hd.CleanNextForkState(tx, cfg.execPayload) + defer cfg.forkValidator.Clear(tx) } + currentHeadHash := rawdb.ReadHeadHeaderHash(tx) if currentHeadHash == headerHash { // no-op log.Debug(fmt.Sprintf("[%s] Fork choice no-op", s.LogPrefix())) @@ -346,9 +347,9 @@ func startHandlingForkChoice( }, nil } - if cfg.memoryOverlay && headerHash == cfg.hd.GetNextForkHash() { + if cfg.memoryOverlay && headerHash == cfg.forkValidator.ExtendingForkHeadHash() { log.Info("Flushing in-memory state") - if err := cfg.hd.FlushNextForkState(tx); err != nil { + if err := cfg.forkValidator.FlushExtendingFork(tx); err != nil { return nil, err } cfg.hd.BeaconRequestList.Remove(requestId) @@ -587,7 +588,7 @@ func verifyAndSaveNewPoSHeader( // Side chain or something weird // TODO(yperbasis): considered non-canonical because some missing headers were downloaded but not canonized // Or it's not a problem because forkChoice is updated frequently? - status, latestValidHash, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, cfg.chainConfig.TerminalTotalDifficulty, false, cfg.execPayload) + status, latestValidHash, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, header, body, false) if criticalError != nil { return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError } @@ -600,11 +601,10 @@ func verifyAndSaveNewPoSHeader( LatestValidHash: latestValidHash, ValidationError: validationError, }, success, nil - } - if cfg.memoryOverlay && (cfg.hd.GetNextForkHash() == (common.Hash{}) || header.ParentHash == cfg.hd.GetNextForkHash()) { - status, latestValidHash, validationError, criticalError := cfg.hd.ValidatePayload(tx, header, body, cfg.chainConfig.TerminalTotalDifficulty, true, cfg.execPayload) + if cfg.memoryOverlay && (cfg.forkValidator.ExtendingForkHeadHash() == (common.Hash{}) || header.ParentHash == cfg.forkValidator.ExtendingForkHeadHash()) { + status, latestValidHash, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, header, body, true) if criticalError != nil { return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError } diff --git a/turbo/engineapi/fork_validator.go b/turbo/engineapi/fork_validator.go new file mode 100644 index 00000000000..316dc1e51c3 --- /dev/null +++ b/turbo/engineapi/fork_validator.go @@ -0,0 +1,209 @@ +/* + Copyright 2022 Erigon contributors + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package engineapi + +import ( + "fmt" + + "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/memdb" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/log/v3" +) + +// the maximum point from the current head, past which side forks are not validated anymore. +const maxForkDepth = 32 // 32 slots is the duration of an epoch thus there cannot be side forks in PoS deeper than 32 blocks from head. + +type validatePayloadFunc func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody) error + +// Fork segment is a side fork segment and repressent a full side fork block. +type forkSegment struct { + header *types.Header + body *types.RawBody +} + +type ForkValidator struct { + // Hash => side fork block, any block saved into this map is considered valid. + // blocks saved are required to have at most distance maxForkDepth from the head. + // if we miss a segment, we only accept the block and give up on full validation. + sideForksBlock map[common.Hash]forkSegment + // current memory batch containing chain head that extend canonical fork. + extendingFork *memdb.MemoryMutation + // hash of chain head that extend canonical fork. + extendingForkHeadHash common.Hash + // this is the function we use to perform payload validation. + validatePayload validatePayloadFunc +} + +// abs64 is a utility method that given an int64, it returns its absolute value in uint64. +func abs64(n int64) uint64 { + if n < 0 { + return uint64(-n) + } + return uint64(n) +} + +func NewForkValidatorMock() *ForkValidator { + return &ForkValidator{ + sideForksBlock: make(map[common.Hash]forkSegment), + } +} + +func NewForkValidator(validatePayload validatePayloadFunc) *ForkValidator { + return &ForkValidator{ + sideForksBlock: make(map[common.Hash]forkSegment), + validatePayload: validatePayload, + } +} + +// ExtendingForkHeadHash return the fork head hash of the fork that extends the canonical chain. +func (fv *ForkValidator) ExtendingForkHeadHash() common.Hash { + return fv.extendingForkHeadHash +} + +// FlushExtendingFork flush the current extending fork if fcu chooses its head hash as the its forkchoice. +func (fv *ForkValidator) FlushExtendingFork(tx kv.RwTx) error { + // Flush changes to db. + if err := fv.extendingFork.Flush(tx); err != nil { + return err + } + // Clean extending fork data + fv.extendingFork.Rollback() + fv.extendingForkHeadHash = common.Hash{} + fv.extendingFork = nil + return nil +} + +// ValidatePayload returns whether a payload is valid or invalid, or if cannot be determined, it will be accepted. +// if the payload extend the canonical chain, then we stack it in extendingFork without any unwind. +// if the payload is a fork then we unwind to the point where the fork meet the canonical chain and we check if it is valid or not from there. +// if for any reasons none of the action above can be performed due to lack of information, we accept the payload and avoid validation. +func (fv *ForkValidator) ValidatePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, extendCanonical bool) (status remote.EngineStatus, latestValidHash common.Hash, validationError error, criticalError error) { + if fv.validatePayload == nil { + status = remote.EngineStatus_ACCEPTED + return + } + currentHeight := rawdb.ReadCurrentBlockNumber(tx) + if currentHeight == nil { + criticalError = fmt.Errorf("could not read block number.") + return + } + defer fv.clean(*currentHeight) + + if extendCanonical { + // If the new block extends the canonical chain we update extendingFork. + if fv.extendingFork == nil { + fv.extendingFork = memdb.NewMemoryBatch(tx) + } else { + fv.extendingFork.UpdateTxn(tx) + } + // Update fork head hash. + fv.extendingForkHeadHash = header.Hash() + // Let's assemble the side fork chain if we have others building. + validationError = fv.validatePayload(fv.extendingFork, header, body, 0, nil, nil) + if validationError != nil { + status = remote.EngineStatus_INVALID + latestValidHash = header.ParentHash + return + } + status = remote.EngineStatus_VALID + latestValidHash = header.Hash() + fv.sideForksBlock[latestValidHash] = forkSegment{header, body} + return + } + // If the block is stored within the side fork it means it was already validated. + if _, ok := fv.sideForksBlock[header.Hash()]; ok { + status = remote.EngineStatus_VALID + latestValidHash = header.Hash() + return + } + + // if the block is not in range of maxForkDepth from head then we do not validate it. + if abs64(int64(*currentHeight)-header.Number.Int64()) > maxForkDepth { + status = remote.EngineStatus_ACCEPTED + return + } + // Let's assemble the side fork backwards + var foundCanonical bool + currentHash := header.ParentHash + foundCanonical, criticalError = rawdb.IsCanonicalHash(tx, currentHash) + if criticalError != nil { + return + } + + var bodiesChain []*types.RawBody + var headersChain []*types.Header + unwindPoint := header.Number.Uint64() - 1 + for !foundCanonical { + var sb forkSegment + var ok bool + if sb, ok = fv.sideForksBlock[currentHash]; !ok { + // We miss some components so we did not check validity. + status = remote.EngineStatus_ACCEPTED + return + } + headersChain = append(headersChain, sb.header) + bodiesChain = append(bodiesChain, sb.body) + currentHash = sb.header.ParentHash + foundCanonical, criticalError = rawdb.IsCanonicalHash(tx, currentHash) + if criticalError != nil { + return + } + unwindPoint = sb.header.Number.Uint64() - 1 + } + status = remote.EngineStatus_VALID + // if it is not canonical we validate it as a side fork. + batch := memdb.NewMemoryBatch(tx) + defer batch.Close() + validationError = fv.validatePayload(batch, header, body, unwindPoint, headersChain, bodiesChain) + latestValidHash = header.Hash() + if validationError != nil { + latestValidHash = header.ParentHash + status = remote.EngineStatus_INVALID + return + } + fv.sideForksBlock[header.Hash()] = forkSegment{header, body} + return +} + +// Clear wipes out current extending fork data, this method is called after fcu is called, +// because fcu decides what the head is and after the call is done all the non-chosed forks are +// to be considered obsolete. +func (fv *ForkValidator) Clear(tx kv.RwTx) { + sb, ok := fv.sideForksBlock[fv.extendingForkHeadHash] + // If we did not flush the fork state, then we need to notify the txpool through unwind. + if fv.extendingFork != nil && fv.extendingForkHeadHash != (common.Hash{}) && ok { + fv.extendingFork.UpdateTxn(tx) + // this will call unwind of extending fork to notify txpool of reverting transactions. + if err := fv.validatePayload(fv.extendingFork, nil, nil, sb.header.Number.Uint64()-1, nil, nil); err != nil { + log.Warn("Could not clean payload", "err", err) + } + fv.extendingFork.Rollback() + } + // Clean all data relative to txpool + fv.extendingForkHeadHash = common.Hash{} + fv.extendingFork = nil +} + +// clean wipes out all outdated sideforks whose distance exceed the height of the head. +func (fv *ForkValidator) clean(currentHeight uint64) { + for hash, sb := range fv.sideForksBlock { + if abs64(int64(currentHeight)-sb.header.Number.Int64()) > maxForkDepth { + delete(fv.sideForksBlock, hash) + } + } +} diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 090d09a2c81..131b3b641ee 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -15,9 +15,7 @@ import ( "time" "github.com/ledgerwatch/erigon-lib/etl" - "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/log/v3" "golang.org/x/exp/slices" @@ -1088,156 +1086,6 @@ func (hd *HeaderDownload) SetHeadersCollector(collector *etl.Collector) { hd.headersCollector = collector } -func abs64(n int64) uint64 { - if n < 0 { - return uint64(-n) - } - return uint64(n) -} - -func (hd *HeaderDownload) ValidatePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, terminalTotalDifficulty *big.Int, store bool, execPayload func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody) error) (status remote.EngineStatus, latestValidHash common.Hash, validationError error, criticalError error) { - hd.lock.Lock() - defer hd.lock.Unlock() - maxDepth := uint64(16) - - currentHeight := rawdb.ReadCurrentBlockNumber(tx) - if currentHeight == nil { - criticalError = fmt.Errorf("could not read block number.") - return - } - - isAncestorPosBlock, criticalError := rawdb.Transitioned(tx, header.Number.Uint64()-1, terminalTotalDifficulty) - if criticalError != nil { - return - } - if store { - // If it is a continuation of the canonical chain we can stack it up. - if hd.nextForkState == nil { - hd.nextForkState = memdb.NewMemoryBatch(tx) - } else { - hd.nextForkState.UpdateTxn(tx) - } - hd.nextForkHash = header.Hash() - hd.lock.Unlock() - // Let's assemble the side fork chain if we have others building. - validationError = execPayload(hd.nextForkState, header, body, 0, nil, nil) - hd.lock.Lock() - if validationError != nil { - status = remote.EngineStatus_INVALID - if isAncestorPosBlock { - latestValidHash = header.ParentHash - } - return - } - status = remote.EngineStatus_VALID - latestValidHash = header.Hash() - hd.sideForksBlock[latestValidHash] = sideForkBlock{header, body} - hd.cleanupOutdateSideForks(*currentHeight, maxDepth) - return - } - // If the block is stored within the side fork it means it was already validated. - if _, ok := hd.sideForksBlock[header.Hash()]; ok { - status = remote.EngineStatus_VALID - latestValidHash = header.Hash() - return - } - - // if the block is not in range of MAX_DEPTH from head then we do not validate it. - if abs64(int64(*currentHeight)-header.Number.Int64()) > maxDepth { - status = remote.EngineStatus_ACCEPTED - return - } - // Let's assemble the side fork backwards - var foundCanonical bool - currentHash := header.ParentHash - foundCanonical, criticalError = rawdb.IsCanonicalHash(tx, currentHash) - if criticalError != nil { - return - } - - var bodiesChain []*types.RawBody - var headersChain []*types.Header - unwindPoint := header.Number.Uint64() - 1 - for !foundCanonical { - var sb sideForkBlock - var ok bool - if sb, ok = hd.sideForksBlock[currentHash]; !ok { - // We miss some components so we did not check validity. - status = remote.EngineStatus_ACCEPTED - return - } - headersChain = append(headersChain, sb.header) - bodiesChain = append(bodiesChain, sb.body) - currentHash = sb.header.ParentHash - foundCanonical, criticalError = rawdb.IsCanonicalHash(tx, currentHash) - if criticalError != nil { - return - } - unwindPoint = sb.header.Number.Uint64() - 1 - } - status = remote.EngineStatus_VALID - // if it is not canonical we validate it as a side fork. - batch := memdb.NewMemoryBatch(tx) - defer batch.Close() - hd.lock.Unlock() - validationError = execPayload(batch, header, body, unwindPoint, headersChain, bodiesChain) - hd.lock.Lock() - latestValidHash = header.Hash() - if validationError != nil { - if isAncestorPosBlock { - latestValidHash = header.ParentHash - } - status = remote.EngineStatus_INVALID - return - } - hd.sideForksBlock[header.Hash()] = sideForkBlock{header, body} - // After the we finished executing, we clean up old forks - hd.cleanupOutdateSideForks(*currentHeight, maxDepth) - return -} - -func (hd *HeaderDownload) cleanupOutdateSideForks(currentHeight uint64, maxDepth uint64) { - for hash, sb := range hd.sideForksBlock { - if abs64(int64(currentHeight)-sb.header.Number.Int64()) > maxDepth { - delete(hd.sideForksBlock, hash) - } - } -} - -func (hd *HeaderDownload) FlushNextForkState(tx kv.RwTx) error { - hd.lock.Lock() - defer hd.lock.Unlock() - if err := hd.nextForkState.Flush(tx); err != nil { - return err - } - - hd.nextForkState.Close() - hd.nextForkHash = common.Hash{} - hd.nextForkState = nil - return nil -} - -func (hd *HeaderDownload) CleanNextForkState(tx kv.RwTx, execPayload func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody) error) { - hd.lock.Lock() - defer hd.lock.Unlock() - sb, ok := hd.sideForksBlock[hd.nextForkHash] - // If we did not flush the fork state, then we need to notify the txpool. - if hd.nextForkState != nil && hd.nextForkHash != (common.Hash{}) && ok { - hd.nextForkState.UpdateTxn(tx) - if err := execPayload(hd.nextForkState, nil, nil, sb.header.Number.Uint64()-1, nil, nil); err != nil { - log.Warn("Could not clean payload", "err", err) - } - } - hd.nextForkHash = common.Hash{} - hd.nextForkState = nil -} - -func (hd *HeaderDownload) GetNextForkHash() common.Hash { - hd.lock.Lock() - defer hd.lock.Unlock() - return hd.nextForkHash -} - func (hd *HeaderDownload) SetPOSSync(posSync bool) { hd.lock.Lock() defer hd.lock.Unlock() diff --git a/turbo/stages/headerdownload/header_data_struct.go b/turbo/stages/headerdownload/header_data_struct.go index f2ed7797a62..1a097fcf28b 100644 --- a/turbo/stages/headerdownload/header_data_struct.go +++ b/turbo/stages/headerdownload/header_data_struct.go @@ -9,7 +9,6 @@ import ( lru "github.com/hashicorp/golang-lru" "github.com/ledgerwatch/erigon-lib/etl" - "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core/types" @@ -272,16 +271,10 @@ type Stats struct { RespMaxBlock uint64 } -type sideForkBlock struct { - header *types.Header - body *types.RawBody -} - type HeaderDownload struct { badHeaders map[common.Hash]struct{} anchors map[common.Hash]*Anchor // Mapping from parentHash to collection of anchors links map[common.Hash]*Link // Links by header hash - sideForksBlock map[common.Hash]sideForkBlock engine consensus.Engine insertQueue InsertQueue // Priority queue of non-persisted links that need to be verified and can be inserted seenAnnounces *SeenAnnounces // External announcement hashes, after header verification if hash is in this set - will broadcast it further @@ -322,8 +315,6 @@ type HeaderDownload struct { unsettledHeadHeight uint64 // Height of unsettledForkChoice.headBlockHash posDownloaderTip common.Hash // See https://hackmd.io/GDc0maGsQeKfP8o2C7L52w badPoSHeaders map[common.Hash]common.Hash // Invalid Tip -> Last Valid Ancestor - nextForkState *memdb.MemoryMutation // The db state of the next fork. - nextForkHash common.Hash // Hash of the next fork } // HeaderRecord encapsulates two forms of the same header - raw RLP encoding (to avoid duplicated decodings and encodings), and parsed value types.Header @@ -355,7 +346,6 @@ func NewHeaderDownload( PayloadStatusCh: make(chan privateapi.PayloadStatus, 1), headerReader: headerReader, badPoSHeaders: make(map[common.Hash]common.Hash), - sideForksBlock: make(map[common.Hash]sideForkBlock), } heap.Init(&hd.persistedLinkQueue) heap.Init(&hd.linkQueue) diff --git a/turbo/stages/mock_sentry.go b/turbo/stages/mock_sentry.go index b9fd0f8e133..97bc68a3da5 100644 --- a/turbo/stages/mock_sentry.go +++ b/turbo/stages/mock_sentry.go @@ -309,7 +309,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey mock.Sync = stagedsync.New( stagedsync.DefaultStages(mock.Ctx, prune, - stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, false, allSnapshots, snapshotsDownloader, blockReader, mock.tmpdir, mock.Notifications.Events, mock.Notifications, nil), + stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, false, allSnapshots, snapshotsDownloader, blockReader, mock.tmpdir, mock.Notifications.Events, mock.Notifications, engineapi.NewForkValidatorMock()), stagedsync.StageCumulativeIndexCfg(mock.DB), stagedsync.StageBlockHashesCfg(mock.DB, mock.tmpdir, mock.ChainConfig), stagedsync.StageBodiesCfg( diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 3fa3dc78637..cab355167f5 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -25,6 +25,7 @@ import ( "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/p2p" + "github.com/ledgerwatch/erigon/turbo/engineapi" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" @@ -88,7 +89,7 @@ func StageLoop( log.Error("Staged Sync", "err", err) if recoveryErr := hd.RecoverFromDb(db); recoveryErr != nil { - log.Error("Failed to recover header downloader", "err", recoveryErr) + log.Error("Failed to recover header sentriesClient", "err", recoveryErr) } time.Sleep(500 * time.Millisecond) // just to avoid too much similar errors in logs continue @@ -331,7 +332,7 @@ func NewStagedSync( snapDownloader proto_downloader.DownloaderClient, snapshots *snapshotsync.RoSnapshots, headCh chan *types.Block, - execPayload stagedsync.ExecutePayloadFunc, + execPayload func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody) error, ) (*stagedsync.Sync, error) { var blockReader services.FullBlockReader if cfg.Snapshot.Enabled { @@ -364,7 +365,7 @@ func NewStagedSync( tmpdir, notifications.Events, notifications, - execPayload), + engineapi.NewForkValidator(execPayload)), stagedsync.StageCumulativeIndexCfg(db), stagedsync.StageBlockHashesCfg(db, tmpdir, controlServer.ChainConfig), stagedsync.StageBodiesCfg( From 9637b25a429a07668fca0ac2f4e9bfcb3f2859b0 Mon Sep 17 00:00:00 2001 From: Artem Tsebrovskiy Date: Mon, 11 Jul 2022 15:58:08 +0100 Subject: [PATCH 070/152] [erigon2] print trie variant derived from trie instead switch log (#4672) * [erigon2] print trie variant derived from trie instead switch log * updated erigon-lib version * bumped erigon-lib version --- cmd/state/commands/erigon2.go | 7 ++++--- go.mod | 2 +- go.sum | 4 ++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/cmd/state/commands/erigon2.go b/cmd/state/commands/erigon2.go index 370433ecaa3..79027e5b5d5 100644 --- a/cmd/state/commands/erigon2.go +++ b/cmd/state/commands/erigon2.go @@ -149,17 +149,18 @@ func Erigon2(genesis *core.Genesis, chainConfig *params.ChainConfig, logger log. var trieVariant commitment.TrieVariant switch commitmentTrie { case "bin": - logger.Info("using Binary Patricia Hashed Trie for commitments") trieVariant = commitment.VariantBinPatriciaTrie blockRootMismatchExpected = true case "hex": fallthrough default: - logger.Info("using Hex Patricia Hashed Trie for commitments") trieVariant = commitment.VariantHexPatriciaTrie } - agg, err3 := aggregator.NewAggregator(aggPath, unwindLimit, aggregationStep, changesets, commitments, 100_000_000, commitment.InitializeTrie(trieVariant), rwTx) + trie := commitment.InitializeTrie(trieVariant) + logger.Info("commitment trie initialized", "variant", trie.Variant()) + + agg, err3 := aggregator.NewAggregator(aggPath, unwindLimit, aggregationStep, changesets, commitments, 100_000_000, trie, rwTx) if err3 != nil { return fmt.Errorf("create aggregator: %w", err3) } diff --git a/go.mod b/go.mod index a495ce75d69..7975a5a71a4 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220708142551-cf57648d7d41 + github.com/ledgerwatch/erigon-lib v0.0.0-20220710110825-21c6baf2871c github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index fab357ab66b..ee8469d1f66 100644 --- a/go.sum +++ b/go.sum @@ -383,8 +383,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220708142551-cf57648d7d41 h1:sTY+hHzUSFWpUiw7bUqmw1opanXeSltDBPtOzbTQLFQ= -github.com/ledgerwatch/erigon-lib v0.0.0-20220708142551-cf57648d7d41/go.mod h1:bttvdtZXjh803u/CeMerKYnWvVvXTICWSfpcMeQNtmc= +github.com/ledgerwatch/erigon-lib v0.0.0-20220710110825-21c6baf2871c h1:xcHZhKSsUezVVGiqMxhe2qlkoedgkCAyx1Zi+bY9Pxs= +github.com/ledgerwatch/erigon-lib v0.0.0-20220710110825-21c6baf2871c/go.mod h1:bttvdtZXjh803u/CeMerKYnWvVvXTICWSfpcMeQNtmc= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From e0845f229b3c55d0dae55505ac5703d2ebaca93c Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Tue, 12 Jul 2022 10:33:43 +0200 Subject: [PATCH 071/152] Don't feed invalid headers into DB (#4693) * Reduce code duplication in verifyAndSaveNewPoSHeader * Don't feed invalid headers into DB * remove superfluous comment --- eth/stagedsync/stage_headers.go | 38 +++++++++++---------------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 4e9fb14c4f4..c5bf40d0920 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -579,39 +579,21 @@ func verifyAndSaveNewPoSHeader( }, false, nil } - if err := headerInserter.FeedHeaderPoS(tx, header, headerHash); err != nil { - return nil, false, err - } - currentHeadHash := rawdb.ReadHeadHeaderHash(tx) - if currentHeadHash != header.ParentHash { - // Side chain or something weird - // TODO(yperbasis): considered non-canonical because some missing headers were downloaded but not canonized - // Or it's not a problem because forkChoice is updated frequently? - status, latestValidHash, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, header, body, false) - if criticalError != nil { - return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError - } - if validationError != nil { - cfg.hd.ReportBadHeaderPoS(headerHash, latestValidHash) - } - success = status == remote.EngineStatus_VALID || status == remote.EngineStatus_ACCEPTED - return &privateapi.PayloadStatus{ - Status: status, - LatestValidHash: latestValidHash, - ValidationError: validationError, - }, success, nil - } + canExtendCanonical := header.ParentHash == currentHeadHash + canExtendInMemory := cfg.memoryOverlay && (cfg.forkValidator.ExtendingForkHeadHash() == (common.Hash{}) || header.ParentHash == cfg.forkValidator.ExtendingForkHeadHash()) - if cfg.memoryOverlay && (cfg.forkValidator.ExtendingForkHeadHash() == (common.Hash{}) || header.ParentHash == cfg.forkValidator.ExtendingForkHeadHash()) { - status, latestValidHash, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, header, body, true) + if canExtendInMemory || !canExtendCanonical { + status, latestValidHash, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, header, body, canExtendCanonical) if criticalError != nil { return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError } - if validationError != nil { + success = validationError == nil + if !success { cfg.hd.ReportBadHeaderPoS(headerHash, latestValidHash) + } else if err := headerInserter.FeedHeaderPoS(tx, header, headerHash); err != nil { + return nil, false, err } - success = status == remote.EngineStatus_VALID || status == remote.EngineStatus_ACCEPTED return &privateapi.PayloadStatus{ Status: status, LatestValidHash: latestValidHash, @@ -620,6 +602,10 @@ func verifyAndSaveNewPoSHeader( } // OK, we're on the canonical chain + if err := headerInserter.FeedHeaderPoS(tx, header, headerHash); err != nil { + return nil, false, err + } + if requestStatus == engineapi.New { cfg.hd.SetPendingPayloadHash(headerHash) } From b161c27ac318e5689656929ee032aea5b1c0350d Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Tue, 12 Jul 2022 10:39:57 +0200 Subject: [PATCH 072/152] optimized one db read (#4694) --- eth/backend.go | 9 +++++---- eth/stagedsync/stage_finish.go | 25 +++++++++++++++---------- turbo/engineapi/fork_validator.go | 28 +++++++++++++++------------- turbo/stages/mock_sentry.go | 4 ++-- turbo/stages/stageloop.go | 6 +++--- 5 files changed, 40 insertions(+), 32 deletions(-) diff --git a/eth/backend.go b/eth/backend.go index 25fbd139b41..293f3989309 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -30,6 +30,7 @@ import ( "time" "github.com/ledgerwatch/erigon/eth/ethconsensusconfig" + "github.com/ledgerwatch/erigon/turbo/engineapi" "github.com/ledgerwatch/erigon/turbo/services" "google.golang.org/protobuf/types/known/emptypb" @@ -135,8 +136,8 @@ type Ethereum struct { txPool2Send *txpool2.Send txPool2GrpcServer txpool_proto.TxpoolServer notifyMiningAboutNewTxs chan struct{} - - downloader *downloader.Downloader + forkValidator *engineapi.ForkValidator + downloader *downloader.Downloader } // New creates a new Ethereum object (including the @@ -507,8 +508,8 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere if config.Ethstats != "" { headCh = make(chan *types.Block, 1) } - - backend.stagedSync, err = stages2.NewStagedSync(backend.sentryCtx, backend.log, backend.chainDB, stack.Config().P2P, *config, backend.sentriesClient, tmpdir, backend.notifications, backend.downloaderClient, allSnapshots, headCh, inMemoryExecution) + backend.forkValidator = engineapi.NewForkValidator(currentBlock.NumberU64(), inMemoryExecution) + backend.stagedSync, err = stages2.NewStagedSync(backend.sentryCtx, backend.log, backend.chainDB, stack.Config().P2P, *config, backend.sentriesClient, tmpdir, backend.notifications, backend.downloaderClient, allSnapshots, headCh, backend.forkValidator) if err != nil { return nil, err } diff --git a/eth/stagedsync/stage_finish.go b/eth/stagedsync/stage_finish.go index 2c6ad964399..c12ef0f410b 100644 --- a/eth/stagedsync/stage_finish.go +++ b/eth/stagedsync/stage_finish.go @@ -18,22 +18,25 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/ethdb/cbor" "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/engineapi" "github.com/ledgerwatch/log/v3" ) type FinishCfg struct { - db kv.RwDB - tmpDir string - log log.Logger - headCh chan *types.Block + db kv.RwDB + tmpDir string + log log.Logger + headCh chan *types.Block + forkValidator *engineapi.ForkValidator } -func StageFinishCfg(db kv.RwDB, tmpDir string, logger log.Logger, headCh chan *types.Block) FinishCfg { +func StageFinishCfg(db kv.RwDB, tmpDir string, logger log.Logger, headCh chan *types.Block, forkValidator *engineapi.ForkValidator) FinishCfg { return FinishCfg{ - db: db, - log: logger, - tmpDir: tmpDir, - headCh: headCh, + db: db, + log: logger, + tmpDir: tmpDir, + headCh: headCh, + forkValidator: forkValidator, } } @@ -56,12 +59,14 @@ func FinishForward(s *StageState, tx kv.RwTx, cfg FinishCfg, initialCycle bool) if executionAt <= s.BlockNumber { return nil } - rawdb.WriteHeadBlockHash(tx, rawdb.ReadHeadHeaderHash(tx)) err = s.Update(tx, executionAt) if err != nil { return err } + if cfg.forkValidator != nil { + cfg.forkValidator.NotifyCurrentHeight(executionAt) + } if initialCycle { if err := params.SetErigonVersion(tx, params.VersionKeyFinished); err != nil { diff --git a/turbo/engineapi/fork_validator.go b/turbo/engineapi/fork_validator.go index 316dc1e51c3..96969af2908 100644 --- a/turbo/engineapi/fork_validator.go +++ b/turbo/engineapi/fork_validator.go @@ -14,8 +14,6 @@ package engineapi import ( - "fmt" - "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/memdb" @@ -47,6 +45,8 @@ type ForkValidator struct { extendingForkHeadHash common.Hash // this is the function we use to perform payload validation. validatePayload validatePayloadFunc + // this is the current point where we processed the chain so far. + currentHeight uint64 } // abs64 is a utility method that given an int64, it returns its absolute value in uint64. @@ -57,16 +57,18 @@ func abs64(n int64) uint64 { return uint64(n) } -func NewForkValidatorMock() *ForkValidator { +func NewForkValidatorMock(currentHeight uint64) *ForkValidator { return &ForkValidator{ sideForksBlock: make(map[common.Hash]forkSegment), + currentHeight: currentHeight, } } -func NewForkValidator(validatePayload validatePayloadFunc) *ForkValidator { +func NewForkValidator(currentHeight uint64, validatePayload validatePayloadFunc) *ForkValidator { return &ForkValidator{ sideForksBlock: make(map[common.Hash]forkSegment), validatePayload: validatePayload, + currentHeight: currentHeight, } } @@ -75,6 +77,11 @@ func (fv *ForkValidator) ExtendingForkHeadHash() common.Hash { return fv.extendingForkHeadHash } +// NotifyCurrentHeight is to be called at the end of the stage cycle and repressent the last processed block. +func (fv *ForkValidator) NotifyCurrentHeight(currentHeight uint64) { + fv.currentHeight = currentHeight +} + // FlushExtendingFork flush the current extending fork if fcu chooses its head hash as the its forkchoice. func (fv *ForkValidator) FlushExtendingFork(tx kv.RwTx) error { // Flush changes to db. @@ -97,12 +104,7 @@ func (fv *ForkValidator) ValidatePayload(tx kv.RwTx, header *types.Header, body status = remote.EngineStatus_ACCEPTED return } - currentHeight := rawdb.ReadCurrentBlockNumber(tx) - if currentHeight == nil { - criticalError = fmt.Errorf("could not read block number.") - return - } - defer fv.clean(*currentHeight) + defer fv.clean() if extendCanonical { // If the new block extends the canonical chain we update extendingFork. @@ -133,7 +135,7 @@ func (fv *ForkValidator) ValidatePayload(tx kv.RwTx, header *types.Header, body } // if the block is not in range of maxForkDepth from head then we do not validate it. - if abs64(int64(*currentHeight)-header.Number.Int64()) > maxForkDepth { + if abs64(int64(fv.currentHeight)-header.Number.Int64()) > maxForkDepth { status = remote.EngineStatus_ACCEPTED return } @@ -200,9 +202,9 @@ func (fv *ForkValidator) Clear(tx kv.RwTx) { } // clean wipes out all outdated sideforks whose distance exceed the height of the head. -func (fv *ForkValidator) clean(currentHeight uint64) { +func (fv *ForkValidator) clean() { for hash, sb := range fv.sideForksBlock { - if abs64(int64(currentHeight)-sb.header.Number.Int64()) > maxForkDepth { + if abs64(int64(fv.currentHeight)-sb.header.Number.Int64()) > maxForkDepth { delete(fv.sideForksBlock, hash) } } diff --git a/turbo/stages/mock_sentry.go b/turbo/stages/mock_sentry.go index 97bc68a3da5..4a3cd2e28bf 100644 --- a/turbo/stages/mock_sentry.go +++ b/turbo/stages/mock_sentry.go @@ -309,7 +309,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey mock.Sync = stagedsync.New( stagedsync.DefaultStages(mock.Ctx, prune, - stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, false, allSnapshots, snapshotsDownloader, blockReader, mock.tmpdir, mock.Notifications.Events, mock.Notifications, engineapi.NewForkValidatorMock()), + stagedsync.StageHeadersCfg(mock.DB, mock.sentriesClient.Hd, mock.sentriesClient.Bd, *mock.ChainConfig, sendHeaderRequest, propagateNewBlockHashes, penalize, cfg.BatchSize, false, false, allSnapshots, snapshotsDownloader, blockReader, mock.tmpdir, mock.Notifications.Events, mock.Notifications, engineapi.NewForkValidatorMock(1)), stagedsync.StageCumulativeIndexCfg(mock.DB), stagedsync.StageBlockHashesCfg(mock.DB, mock.tmpdir, mock.ChainConfig), stagedsync.StageBodiesCfg( @@ -348,7 +348,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey stagedsync.StageLogIndexCfg(mock.DB, prune, mock.tmpdir), stagedsync.StageCallTracesCfg(mock.DB, prune, 0, mock.tmpdir), stagedsync.StageTxLookupCfg(mock.DB, prune, mock.tmpdir, allSnapshots, isBor), - stagedsync.StageFinishCfg(mock.DB, mock.tmpdir, mock.Log, nil), true), + stagedsync.StageFinishCfg(mock.DB, mock.tmpdir, mock.Log, nil, nil), true), stagedsync.DefaultUnwindOrder, stagedsync.DefaultPruneOrder, ) diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index cab355167f5..5f1cc0a48b2 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -332,7 +332,7 @@ func NewStagedSync( snapDownloader proto_downloader.DownloaderClient, snapshots *snapshotsync.RoSnapshots, headCh chan *types.Block, - execPayload func(kv.RwTx, *types.Header, *types.RawBody, uint64, []*types.Header, []*types.RawBody) error, + forkValidator *engineapi.ForkValidator, ) (*stagedsync.Sync, error) { var blockReader services.FullBlockReader if cfg.Snapshot.Enabled { @@ -365,7 +365,7 @@ func NewStagedSync( tmpdir, notifications.Events, notifications, - engineapi.NewForkValidator(execPayload)), + forkValidator), stagedsync.StageCumulativeIndexCfg(db), stagedsync.StageBlockHashesCfg(db, tmpdir, controlServer.ChainConfig), stagedsync.StageBodiesCfg( @@ -404,7 +404,7 @@ func NewStagedSync( stagedsync.StageLogIndexCfg(db, cfg.Prune, tmpdir), stagedsync.StageCallTracesCfg(db, cfg.Prune, 0, tmpdir), stagedsync.StageTxLookupCfg(db, cfg.Prune, tmpdir, snapshots, isBor), - stagedsync.StageFinishCfg(db, tmpdir, logger, headCh), runInTestMode), + stagedsync.StageFinishCfg(db, tmpdir, logger, headCh, forkValidator), runInTestMode), stagedsync.DefaultUnwindOrder, stagedsync.DefaultPruneOrder, ), nil From 6b6b74e034c22eac161307ec9e4d78f8b9a711cc Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Tue, 12 Jul 2022 15:25:32 +0200 Subject: [PATCH 073/152] removed code duplication (#4697) --- turbo/engineapi/fork_validator.go | 39 ++++++++++++++----------------- 1 file changed, 17 insertions(+), 22 deletions(-) diff --git a/turbo/engineapi/fork_validator.go b/turbo/engineapi/fork_validator.go index 96969af2908..e65db857423 100644 --- a/turbo/engineapi/fork_validator.go +++ b/turbo/engineapi/fork_validator.go @@ -115,17 +115,7 @@ func (fv *ForkValidator) ValidatePayload(tx kv.RwTx, header *types.Header, body } // Update fork head hash. fv.extendingForkHeadHash = header.Hash() - // Let's assemble the side fork chain if we have others building. - validationError = fv.validatePayload(fv.extendingFork, header, body, 0, nil, nil) - if validationError != nil { - status = remote.EngineStatus_INVALID - latestValidHash = header.ParentHash - return - } - status = remote.EngineStatus_VALID - latestValidHash = header.Hash() - fv.sideForksBlock[latestValidHash] = forkSegment{header, body} - return + return fv.validateAndStorePayload(fv.extendingFork, header, body, 0, nil, nil) } // If the block is stored within the side fork it means it was already validated. if _, ok := fv.sideForksBlock[header.Hash()]; ok { @@ -167,19 +157,10 @@ func (fv *ForkValidator) ValidatePayload(tx kv.RwTx, header *types.Header, body } unwindPoint = sb.header.Number.Uint64() - 1 } - status = remote.EngineStatus_VALID - // if it is not canonical we validate it as a side fork. + // if it is not canonical we validate it in memory and discard it aferwards. batch := memdb.NewMemoryBatch(tx) defer batch.Close() - validationError = fv.validatePayload(batch, header, body, unwindPoint, headersChain, bodiesChain) - latestValidHash = header.Hash() - if validationError != nil { - latestValidHash = header.ParentHash - status = remote.EngineStatus_INVALID - return - } - fv.sideForksBlock[header.Hash()] = forkSegment{header, body} - return + return fv.validateAndStorePayload(batch, header, body, unwindPoint, headersChain, bodiesChain) } // Clear wipes out current extending fork data, this method is called after fcu is called, @@ -201,6 +182,20 @@ func (fv *ForkValidator) Clear(tx kv.RwTx) { fv.extendingFork = nil } +// validateAndStorePayload validate and store a payload fork chain if such chain results valid. +func (fv *ForkValidator) validateAndStorePayload(tx kv.RwTx, header *types.Header, body *types.RawBody, unwindPoint uint64, headersChain []*types.Header, bodiesChain []*types.RawBody) (status remote.EngineStatus, latestValidHash common.Hash, validationError error, criticalError error) { + validationError = fv.validatePayload(tx, header, body, unwindPoint, headersChain, bodiesChain) + latestValidHash = header.Hash() + if validationError != nil { + latestValidHash = header.ParentHash + status = remote.EngineStatus_INVALID + return + } + status = remote.EngineStatus_VALID + fv.sideForksBlock[header.Hash()] = forkSegment{header, body} + return +} + // clean wipes out all outdated sideforks whose distance exceed the height of the head. func (fv *ForkValidator) clean() { for hash, sb := range fv.sideForksBlock { From cd5ef32f379ff51043b7ce1b0c3c471e18b99459 Mon Sep 17 00:00:00 2001 From: primal_concrete_sledge Date: Tue, 12 Jul 2022 19:21:52 +0400 Subject: [PATCH 074/152] Add config for Gnosis Chain (#4671) * Draft: gnosis-chain * Fix Gnosis allocation Co-authored-by: yperbasis --- cmd/hack/hack.go | 2 + cmd/utils/flags.go | 2 + consensus/aura/consensusconfig/embed.go | 5 +++ consensus/aura/consensusconfig/poagnosis.json | 25 +++++++++++ core/allocs/gnosis.json | 14 +++++++ core/genesis.go | 22 ++++++++++ params/bootnodes.go | 42 +++++++++++++++++++ params/chainspecs/gnosis.json | 21 ++++++++++ params/config.go | 20 ++++++++- params/networkname/network_name.go | 2 + 10 files changed, 153 insertions(+), 2 deletions(-) create mode 100644 consensus/aura/consensusconfig/poagnosis.json create mode 100644 core/allocs/gnosis.json create mode 100644 params/chainspecs/gnosis.json diff --git a/cmd/hack/hack.go b/cmd/hack/hack.go index 8434f6b3ebe..4f03dc3802c 100644 --- a/cmd/hack/hack.go +++ b/cmd/hack/hack.go @@ -1019,6 +1019,8 @@ func chainConfig(name string) error { chainConfig = params.MumbaiChainConfig case "bor-mainnet": chainConfig = params.BorMainnetChainConfig + case "gnosis": + chainConfig = params.GnosisChainConfig default: return fmt.Errorf("unknown name: %s", name) } diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 1ad11738fb9..583e84a3598 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1086,6 +1086,8 @@ func DataDirForNetwork(datadir string, network string) string { return filepath.Join(datadir, "bor-devnet") case networkname.SepoliaChainName: return filepath.Join(datadir, "sepolia") + case networkname.GnosisChainName: + return filepath.Join(datadir, "gnosis") default: return datadir } diff --git a/consensus/aura/consensusconfig/embed.go b/consensus/aura/consensusconfig/embed.go index 93eaae5bcaa..d26647b3205 100644 --- a/consensus/aura/consensusconfig/embed.go +++ b/consensus/aura/consensusconfig/embed.go @@ -9,10 +9,15 @@ import ( //go:embed poasokol.json var Sokol []byte +//go:embed poagnosis.json +var Gnosis []byte + func GetConfigByChain(chainName string) []byte { switch chainName { case networkname.SokolChainName: return Sokol + case networkname.GnosisChainName: + return Gnosis default: return Sokol } diff --git a/consensus/aura/consensusconfig/poagnosis.json b/consensus/aura/consensusconfig/poagnosis.json new file mode 100644 index 00000000000..e5474089ee5 --- /dev/null +++ b/consensus/aura/consensusconfig/poagnosis.json @@ -0,0 +1,25 @@ +{ + "stepDuration": 5, + "blockReward": "0x0", + "maximumUncleCountTransition": 0, + "maximumUncleCount": 0, + "validators": { + "multi": { + "0": { + "safeContract": "0xcace5b3c29211740e595850e80478416ee77ca21" + }, + "1300": { + "safeContract": "0x22e1229a2c5b95a60983b5577f745a603284f535" + }, + "9186425": { + "safeContract": "0xB87BE9f7196F2AE084Ca1DE6af5264292976e013" + } + } + }, + "blockRewardContractAddress": "0x867305d19606aadba405ce534e303d0e225f9556", + "blockRewardContractTransition": 1310, + "randomnessContractAddress": { + "9186425": "0x5870b0527DeDB1cFBD9534343Feda1a41Ce47766" + } +} + diff --git a/core/allocs/gnosis.json b/core/allocs/gnosis.json new file mode 100644 index 00000000000..ddb896259ff --- /dev/null +++ b/core/allocs/gnosis.json @@ -0,0 +1,14 @@ +{ + "0x0000000000000000000000000000000000000001": { + "balance": "0x1" + }, + "0x0000000000000000000000000000000000000002": { + "balance": "0x1" + }, + "0x0000000000000000000000000000000000000003": { + "balance": "0x1" + }, + "0x0000000000000000000000000000000000000004": { + "balance": "0x1" + } +} diff --git a/core/genesis.go b/core/genesis.go index c298a5e6de7..eb424e16e40 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -718,6 +718,26 @@ func DefaultBorDevnetGenesisBlock() *Genesis { } } +func DefaultGnosisGenesisBlock() *Genesis { + sealRlp, err := rlp.EncodeToBytes([][]byte{ + common.FromHex(""), + common.FromHex("0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), + }) + if err != nil { + panic(err) + } + return &Genesis{ + Config: params.GnosisChainConfig, + Timestamp: 0x0, //1558348305, + SealRlp: sealRlp, + GasLimit: 0x989680, + Difficulty: big.NewInt(0x20000), + //Mixhash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), + //Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"), + Alloc: readPrealloc("allocs/gnosis.json"), + } +} + // Pre-calculated version of: // DevnetSignPrivateKey = crypto.HexToECDSA(sha256.Sum256([]byte("erigon devnet key"))) // DevnetEtherbase=crypto.PubkeyToAddress(DevnetSignPrivateKey.PublicKey) @@ -798,6 +818,8 @@ func DefaultGenesisBlockByChainName(chain string) *Genesis { return DefaultBorDevnetGenesisBlock() case networkname.KilnDevnetChainName: return DefaultKilnDevnetGenesisBlock() + case networkname.GnosisChainName: + return DefaultGnosisGenesisBlock() default: return nil } diff --git a/params/bootnodes.go b/params/bootnodes.go index c11a73ae409..e32b4ff3b6d 100644 --- a/params/bootnodes.go +++ b/params/bootnodes.go @@ -209,6 +209,46 @@ var BorMainnetBootnodes = []string{ "enode://88116f4295f5a31538ae409e4d44ad40d22e44ee9342869e7d68bdec55b0f83c1530355ce8b41fbec0928a7d75a5745d528450d30aec92066ab6ba1ee351d710@159.203.9.164:30303", } +var GnosisBootnodes = []string{ + "enode://172fd36d5ff1bf9db202e0646c90719cec55507a1fa231ce955f6882d2a9295e65841c885d94d23a020347e3169889cef0718eba5a5f5f58dd185f3d5fa0e9b7@147.28.151.154:30303", + "enode://ee4eb9844cbd8f684a734b839b4931e37124358cb17c632e7d5a44fd9ce6d457c4a126a914ea1366670d186b9236820c1e9b2cb2d3584eaf6c540fc10d77f00d@147.28.151.146:30303", + "enode://86f5849a24b158cef9c0f9b03554c918cd20f9e3397b63ae2744d6392a28beb9e0373299cfc4446d9f8b97c1598bc161fe7c6b8e84929f5dec8ea6865e9e7414@145.40.69.162:30303", + "enode://b6ba2a508682143d3159f36b9ce96cf62c711c2e9d47763fe846602a62acb15dc20112fac40c488ce42ce2c9ad28635255a9e47688c1a8f0611b5a38706fd771@147.28.147.242:30303", + "enode://a20c13b1712d32028a277958346a5c29350e8a3e32d40de43a62cb35baa99f96f274960591e46be5a643be7ee77a15d6a4963170460156a77abcf500f0ba0ff0@104.237.150.151:30303", + "enode://f372b16932a4ee5b6be947556bcca1cf57e498267dd78a7a643a87514a0a5ef4f112cb6934aab5775d3e8940ba535e8f53dfa704e162a72970de61e6ef9fd9aa@45.79.158.26:30303", + "enode://fe9720c93e6335b8cacffa10df594c8c166208fa4be8dcb9275788e54111ae88899022fa9c358f2d0029ee57c2223a7fbc97eb06b294422d35ef796a49d87bff@94.237.98.201:30303", + "enode://6d12181aa8527251dd8f9d37a2ff7eadb46f2a90c69f2282352ac7889b105d6b5787a532facee656b29599ee1ee51eb5b1eb01d2a17190e32a6cbe6dfc996828@45.79.158.8:30303", + "enode://389a625160876776946bfea5a6ce4f4c761bd2062cf8e45e510da77595399ee50a802060868d9bf4580431fc2248cadc9ce61826b3513e090bd1cdd4ce11a9d8@161.97.172.191:30303", + "enode://d086bfbe0d15e841e403695c151920459261dd5d5f259858b32727e2bc64d92f48bdad3cc0120703dbb0e2abd2f51c0459cff517bfa16e683ca27018d82a6dcf@66.175.215.67:30303", + "enode://540a0bc258ba93e6fafc238f49eca0a2032b5d40b79c077dc9b9a304fd636af4167b638eb0f80aa455f5da1cf49b76c881f651ed301a1e28d6855a8a3fbe21a0@167.71.174.1:30303", + "enode://1ceea9d3fb22247edf85102f1e78cd31c2f330ace6ac2789c82766a232d56d7c3c1b2aeb504f2f761de754da22e7dfc5bdde3b00b8b98d1d003f6dd81612f8b3@66.175.211.178:30303", + "enode://4175e9ffd9ac9819c9c596a60b7748cc1c1846cf7e8db47ee97f8aa57e42e8cb3dad4201726498d34af94505b2c1429f9d3a508594c521080e8cd1a3fa24a3d5@37.120.245.155:30303", + "enode://b5eba653df9c583238ea238ca7cadf5d2746f1b4da81a8cfb2c1b600f62fe37df000fa5b5292059f74faa6bbf5af01b3b81f99284a1fbd5c0971d74db7dc4a34@66.175.213.149:30303", + "enode://2a8b64e96da7e67e525f04a157a0016b886dfb2d02553501abd0326b7b061aace7e92ad87c1225dc7d261b05e7886391552c812b386451597bff4b040eb43bec@173.212.236.163:30303", + "enode://5f187ae73b07db889c91e8955619b1e6b799696a4b7aab306f7c500a2ec4c7f66dfbea8ac28bde65100653a2fe84b96f2079aed2d61871e06b5a1e32c0f021af@45.79.150.105:30303", + "enode://22e442bf7b7fa9bb5960344ef5aa907b920a931bce83e14620b0378318b7eb73753f7245f0a8252590a71ad9568bcfbb9e5f03bd88ab3feb9c1096227bb8704a@168.119.136.44:30303", + "enode://ef98bc3c9195b9f27312ac646edb6d0096b04c983f93864c30b8f2b20c699ec974a7066cfae090832679e497d23655e0e315a2c96da4a27d75cc4693e6335bba@192.155.90.129:30303", + "enode://de1ab49beedda656976a8fcb01f91ebdb474178fd46e5ce87cf22f0eb90bc3a6721d619ec90c4a6453770a2c24a4bdbc4ef8b8111ffd49f491eb84016a3842d6@54.217.41.94:30303", + "enode://ba04a77c7c8ac0fdd325de91536c33bce3b71095de563aafc72e6ac4111ebf093570c7bdba48b06cc83b0af5596f72fc32563547160e067fcb45a1786b8f7150@45.56.105.53:30303", + "enode://56510b2d296000427e56eb0016d8454998c16347ec2c4ffed84cb82a996707de40ca2b9ea13c8796b4230b4c19ce46f844720ded93135d710b7bbc7352a061d3@50.35.89.213:30304", + "enode://a68b3f3f58ea56dcc70450d371bf0b83363d74cbfdb5f982be00536ae3168aa679c7e7e93bd9ffe34b59527173d73e0ebed0a105c095af2ee16bd1cc66103c80@69.164.215.62:30303", + "enode://e8c7a0db430429bb374c981438c0dbd95e565088a483388aa46d8377a3bd62f02cd83d7e2c7e5fc77606141bfef29d23d4285a7c1d9b7e743cf3029314506df7@80.240.16.221:30303", + "enode://80c8f6f27f80ba91830002a8ca64771f6baf440fd134e88fbecae3a67c8bc58722d624cecbd6439e1a2d28fbd0297d489fdaa40b10c2f3e07fee1913d52b3e30@45.79.185.92:30303", + "enode://da2449aaba873c40c6daf764de55f4b9eae24c4738daec893ef95b6ada96463c6b9624f8e376e1073d21dd820c5bb361e14575121b09bbd7735b6b556ee1b768@67.205.176.117:30303", + "enode://481e43a8e30cdfecfe3159dde960d9e65347c3e8c64dcedea87922df875e4d47a813f53c012920b6754e43cde47302cdfb120fd409b6aa2b47c48e391494c7f5@173.255.233.100:30303", + "enode://90b0a0e74a9a1ad258531b4ceec25587d8b52ff2cfb36206a34bf6ba1a8d21b2abd20da13260102508a2ac67afbeb2d2ab7a5e9d6bea3bce845cd81e655585cc@45.77.110.159:30303", + "enode://6012c883efeee664847a48784459980176a22f31bc98c2aae30011ad7ef0b44011364a0a9ae5eb056db1f052cf3556757bd97485677bbaf1781b131e43204971@69.164.222.63:30303", + "enode://5bc43a57273eb4012b59ce268f986cbeeb5f0f878aa25e3d2d71d9b7ff64029a9dd25a84303f80820a78d83ff3a2c570988d0fc68a17d355a98c20c0784aa14d@8.9.5.108:30303", + "enode://89e046a4f10c64265941789b2e3be900adf5132ced13756aeea126cf59b516445ed8053b600aa764860f1aad552f4f4f3b4250c59b3b8a84ead3d3527c005606@172.104.24.215:30303", + "enode://ab7f6c633ba2dc54795dfd2c739ba7d964f499541c0b8d8ba9d275bd3df1b789470a21a921a469fa515a3dfccc96a434a3fd016a169d88d0043fc6744f34288e@67.205.180.17:30303", + "enode://6674773f7aac78d5527fa90c847dcbca198de4081306406a8fec5c15f7a2e141362344041291dd10d0aafa7706a3d8f21a08b6f6834a5b1aab9cccd8ca35ccee@143.110.226.15:30303", + "enode://0caa2d84aef00d0bc5de6cf9db3e736da245d882ec8f91e201b3e1635960e62cbb2f8bfc57e679ff3e1d53da2773e31df624a56b2f457ecb51d09fdf9970c86b@167.99.4.175:30303", + "enode://7aa4c137b1ec078f2df3c17552e23c7213662819132821ed3aaa42f0212cb889dbb21211f9c5912c68fce577ab7fc99b0a47c0cb469ec0ad29c0acd9ce297659@45.33.84.107:30303", + "enode://e026b1a68e8a19106d14effc0df66050c494e10a6b8a4e9f6fd196d95306d7062d129a8c9510ffdbeaf3fe0154b884c116a0e77aec876c368e507de3420fba05@149.28.32.225:30303", + "enode://0a978bd436b850f61e31778fbbeb3e0182f91bb98a30c073674c741c182611e71842333c098d3db5108f06cd589c3a8341172e34be0421fa66d82f0dd83d8ae1@51.81.244.170:30303", + "enode://75f05df1e5a3094ed2c9df36f122b95852206c52288f777982503946d5b273c7ffd8bb06ad60a0df7a31510906d4090c7bd5fd9bcb04a5b4ac1825a2b7212f32@45.63.18.245:30303", +} + const dnsPrefix = "enrtree://AKA3AM6LPBYEUDMVNU3BSVQJ5AD45Y7YPOHJLEF6W26QOE4VTUDPE@" // KnownDNSNetwork returns the address of a public DNS-based node list for the given @@ -259,6 +299,8 @@ func BootnodeURLsOfChain(chain string) []string { return MumbaiBootnodes case networkname.BorMainnetChainName: return BorMainnetBootnodes + case networkname.GnosisChainName: + return GnosisBootnodes default: return []string{} } diff --git a/params/chainspecs/gnosis.json b/params/chainspecs/gnosis.json new file mode 100644 index 00000000000..9a0567a029e --- /dev/null +++ b/params/chainspecs/gnosis.json @@ -0,0 +1,21 @@ +{ + "ChainName": "gnosis", + "chainId": 100, + "consensus": "aura", + "homesteadBlock": 0, + "eip150Block": 0, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 0, + "byzantiumBlock": 0, + "constantinopleBlock": 1604400, + "petersburgBlock": 2508800, + "istanbulBlock": 7298030, + "berlinBlock": 16101500, + "londonBlock": 19040000, + "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "aura": { + "DBPath": "", + "InMemory": false, + "Etherbase": "0x0000000000000000000000000000000000000000" + } +} diff --git a/params/config.go b/params/config.go index 3c7f8dffb5a..faf6fa6fa61 100644 --- a/params/config.go +++ b/params/config.go @@ -74,6 +74,7 @@ var ( MumbaiGenesisHash = common.HexToHash("0x7b66506a9ebdbf30d32b43c5f15a3b1216269a1ec3a75aa3182b86176a2b1ca7") BorMainnetGenesisHash = common.HexToHash("0xa9c28ce2141b56c474f1dc504bee9b01eb1bd7d1a507580d5519d4437a97de1b") BorDevnetGenesisHash = common.HexToHash("0x5a06b25b0c6530708ea0b98a3409290e39dce6be7f558493aeb6e4b99a172a87") + GnosisGenesisHash = common.HexToHash("0x4f1dd23188aab3a76b463e4af801b52b1248ef073c648cbdc4c9333d3da79756") ) var ( @@ -165,6 +166,8 @@ var ( BorDevnetChainConfig = readChainSpec("chainspecs/bor-devnet.json") + GnosisChainConfig = readChainSpec("chainspecs/gnosis.json") + CliqueSnapshot = NewSnapshotConfig(10, 1024, 16384, true, "") TestChainConfig = &ChainConfig{ @@ -278,6 +281,11 @@ type CliqueConfig struct { Epoch uint64 `json:"epoch"` // Epoch length to reset votes and checkpoint } +// String implements the stringer interface, returning the consensus engine details. +func (c *CliqueConfig) String() string { + return "clique" +} + // AuRaConfig is the consensus engine configs for proof-of-authority based sealing. type AuRaConfig struct { DBPath string @@ -286,8 +294,8 @@ type AuRaConfig struct { } // String implements the stringer interface, returning the consensus engine details. -func (c *CliqueConfig) String() string { - return "clique" +func (c *AuRaConfig) String() string { + return "aura" } type ParliaConfig struct { @@ -378,6 +386,8 @@ func (c *ChainConfig) String() string { engine = c.Parlia case c.Bor != nil: engine = c.Bor + case c.Aura != nil: + engine = c.Aura default: engine = "unknown" } @@ -820,6 +830,8 @@ func ChainConfigByChainName(chain string) *ChainConfig { return BorMainnetChainConfig case networkname.BorDevnetChainName: return BorDevnetChainConfig + case networkname.GnosisChainName: + return GnosisChainConfig default: return nil } @@ -855,6 +867,8 @@ func GenesisHashByChainName(chain string) *common.Hash { return &BorMainnetGenesisHash case networkname.BorDevnetChainName: return &BorDevnetGenesisHash + case networkname.GnosisChainName: + return &GnosisGenesisHash default: return nil } @@ -888,6 +902,8 @@ func ChainConfigByGenesisHash(genesisHash common.Hash) *ChainConfig { return MumbaiChainConfig case genesisHash == BorMainnetGenesisHash: return BorMainnetChainConfig + case genesisHash == GnosisGenesisHash: + return GnosisChainConfig default: return nil } diff --git a/params/networkname/network_name.go b/params/networkname/network_name.go index 958bf698b1d..0bb2eab7ebb 100644 --- a/params/networkname/network_name.go +++ b/params/networkname/network_name.go @@ -16,6 +16,7 @@ const ( MumbaiChainName = "mumbai" BorMainnetChainName = "bor-mainnet" BorDevnetChainName = "bor-devnet" + GnosisChainName = "gnosis" ) var All = []string{ @@ -34,4 +35,5 @@ var All = []string{ MumbaiChainName, BorMainnetChainName, BorDevnetChainName, + GnosisChainName, } From 991d65c0d3054dc7d5037479d5d48d7b623e9eda Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Wed, 13 Jul 2022 08:37:01 +0100 Subject: [PATCH 075/152] Remove old Gnosis boot nodes (#4699) --- params/bootnodes.go | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/params/bootnodes.go b/params/bootnodes.go index e32b4ff3b6d..66614bf8801 100644 --- a/params/bootnodes.go +++ b/params/bootnodes.go @@ -214,39 +214,10 @@ var GnosisBootnodes = []string{ "enode://ee4eb9844cbd8f684a734b839b4931e37124358cb17c632e7d5a44fd9ce6d457c4a126a914ea1366670d186b9236820c1e9b2cb2d3584eaf6c540fc10d77f00d@147.28.151.146:30303", "enode://86f5849a24b158cef9c0f9b03554c918cd20f9e3397b63ae2744d6392a28beb9e0373299cfc4446d9f8b97c1598bc161fe7c6b8e84929f5dec8ea6865e9e7414@145.40.69.162:30303", "enode://b6ba2a508682143d3159f36b9ce96cf62c711c2e9d47763fe846602a62acb15dc20112fac40c488ce42ce2c9ad28635255a9e47688c1a8f0611b5a38706fd771@147.28.147.242:30303", - "enode://a20c13b1712d32028a277958346a5c29350e8a3e32d40de43a62cb35baa99f96f274960591e46be5a643be7ee77a15d6a4963170460156a77abcf500f0ba0ff0@104.237.150.151:30303", - "enode://f372b16932a4ee5b6be947556bcca1cf57e498267dd78a7a643a87514a0a5ef4f112cb6934aab5775d3e8940ba535e8f53dfa704e162a72970de61e6ef9fd9aa@45.79.158.26:30303", - "enode://fe9720c93e6335b8cacffa10df594c8c166208fa4be8dcb9275788e54111ae88899022fa9c358f2d0029ee57c2223a7fbc97eb06b294422d35ef796a49d87bff@94.237.98.201:30303", - "enode://6d12181aa8527251dd8f9d37a2ff7eadb46f2a90c69f2282352ac7889b105d6b5787a532facee656b29599ee1ee51eb5b1eb01d2a17190e32a6cbe6dfc996828@45.79.158.8:30303", "enode://389a625160876776946bfea5a6ce4f4c761bd2062cf8e45e510da77595399ee50a802060868d9bf4580431fc2248cadc9ce61826b3513e090bd1cdd4ce11a9d8@161.97.172.191:30303", - "enode://d086bfbe0d15e841e403695c151920459261dd5d5f259858b32727e2bc64d92f48bdad3cc0120703dbb0e2abd2f51c0459cff517bfa16e683ca27018d82a6dcf@66.175.215.67:30303", "enode://540a0bc258ba93e6fafc238f49eca0a2032b5d40b79c077dc9b9a304fd636af4167b638eb0f80aa455f5da1cf49b76c881f651ed301a1e28d6855a8a3fbe21a0@167.71.174.1:30303", - "enode://1ceea9d3fb22247edf85102f1e78cd31c2f330ace6ac2789c82766a232d56d7c3c1b2aeb504f2f761de754da22e7dfc5bdde3b00b8b98d1d003f6dd81612f8b3@66.175.211.178:30303", "enode://4175e9ffd9ac9819c9c596a60b7748cc1c1846cf7e8db47ee97f8aa57e42e8cb3dad4201726498d34af94505b2c1429f9d3a508594c521080e8cd1a3fa24a3d5@37.120.245.155:30303", - "enode://b5eba653df9c583238ea238ca7cadf5d2746f1b4da81a8cfb2c1b600f62fe37df000fa5b5292059f74faa6bbf5af01b3b81f99284a1fbd5c0971d74db7dc4a34@66.175.213.149:30303", - "enode://2a8b64e96da7e67e525f04a157a0016b886dfb2d02553501abd0326b7b061aace7e92ad87c1225dc7d261b05e7886391552c812b386451597bff4b040eb43bec@173.212.236.163:30303", - "enode://5f187ae73b07db889c91e8955619b1e6b799696a4b7aab306f7c500a2ec4c7f66dfbea8ac28bde65100653a2fe84b96f2079aed2d61871e06b5a1e32c0f021af@45.79.150.105:30303", "enode://22e442bf7b7fa9bb5960344ef5aa907b920a931bce83e14620b0378318b7eb73753f7245f0a8252590a71ad9568bcfbb9e5f03bd88ab3feb9c1096227bb8704a@168.119.136.44:30303", - "enode://ef98bc3c9195b9f27312ac646edb6d0096b04c983f93864c30b8f2b20c699ec974a7066cfae090832679e497d23655e0e315a2c96da4a27d75cc4693e6335bba@192.155.90.129:30303", - "enode://de1ab49beedda656976a8fcb01f91ebdb474178fd46e5ce87cf22f0eb90bc3a6721d619ec90c4a6453770a2c24a4bdbc4ef8b8111ffd49f491eb84016a3842d6@54.217.41.94:30303", - "enode://ba04a77c7c8ac0fdd325de91536c33bce3b71095de563aafc72e6ac4111ebf093570c7bdba48b06cc83b0af5596f72fc32563547160e067fcb45a1786b8f7150@45.56.105.53:30303", - "enode://56510b2d296000427e56eb0016d8454998c16347ec2c4ffed84cb82a996707de40ca2b9ea13c8796b4230b4c19ce46f844720ded93135d710b7bbc7352a061d3@50.35.89.213:30304", - "enode://a68b3f3f58ea56dcc70450d371bf0b83363d74cbfdb5f982be00536ae3168aa679c7e7e93bd9ffe34b59527173d73e0ebed0a105c095af2ee16bd1cc66103c80@69.164.215.62:30303", - "enode://e8c7a0db430429bb374c981438c0dbd95e565088a483388aa46d8377a3bd62f02cd83d7e2c7e5fc77606141bfef29d23d4285a7c1d9b7e743cf3029314506df7@80.240.16.221:30303", - "enode://80c8f6f27f80ba91830002a8ca64771f6baf440fd134e88fbecae3a67c8bc58722d624cecbd6439e1a2d28fbd0297d489fdaa40b10c2f3e07fee1913d52b3e30@45.79.185.92:30303", - "enode://da2449aaba873c40c6daf764de55f4b9eae24c4738daec893ef95b6ada96463c6b9624f8e376e1073d21dd820c5bb361e14575121b09bbd7735b6b556ee1b768@67.205.176.117:30303", - "enode://481e43a8e30cdfecfe3159dde960d9e65347c3e8c64dcedea87922df875e4d47a813f53c012920b6754e43cde47302cdfb120fd409b6aa2b47c48e391494c7f5@173.255.233.100:30303", - "enode://90b0a0e74a9a1ad258531b4ceec25587d8b52ff2cfb36206a34bf6ba1a8d21b2abd20da13260102508a2ac67afbeb2d2ab7a5e9d6bea3bce845cd81e655585cc@45.77.110.159:30303", - "enode://6012c883efeee664847a48784459980176a22f31bc98c2aae30011ad7ef0b44011364a0a9ae5eb056db1f052cf3556757bd97485677bbaf1781b131e43204971@69.164.222.63:30303", - "enode://5bc43a57273eb4012b59ce268f986cbeeb5f0f878aa25e3d2d71d9b7ff64029a9dd25a84303f80820a78d83ff3a2c570988d0fc68a17d355a98c20c0784aa14d@8.9.5.108:30303", - "enode://89e046a4f10c64265941789b2e3be900adf5132ced13756aeea126cf59b516445ed8053b600aa764860f1aad552f4f4f3b4250c59b3b8a84ead3d3527c005606@172.104.24.215:30303", - "enode://ab7f6c633ba2dc54795dfd2c739ba7d964f499541c0b8d8ba9d275bd3df1b789470a21a921a469fa515a3dfccc96a434a3fd016a169d88d0043fc6744f34288e@67.205.180.17:30303", - "enode://6674773f7aac78d5527fa90c847dcbca198de4081306406a8fec5c15f7a2e141362344041291dd10d0aafa7706a3d8f21a08b6f6834a5b1aab9cccd8ca35ccee@143.110.226.15:30303", - "enode://0caa2d84aef00d0bc5de6cf9db3e736da245d882ec8f91e201b3e1635960e62cbb2f8bfc57e679ff3e1d53da2773e31df624a56b2f457ecb51d09fdf9970c86b@167.99.4.175:30303", - "enode://7aa4c137b1ec078f2df3c17552e23c7213662819132821ed3aaa42f0212cb889dbb21211f9c5912c68fce577ab7fc99b0a47c0cb469ec0ad29c0acd9ce297659@45.33.84.107:30303", - "enode://e026b1a68e8a19106d14effc0df66050c494e10a6b8a4e9f6fd196d95306d7062d129a8c9510ffdbeaf3fe0154b884c116a0e77aec876c368e507de3420fba05@149.28.32.225:30303", - "enode://0a978bd436b850f61e31778fbbeb3e0182f91bb98a30c073674c741c182611e71842333c098d3db5108f06cd589c3a8341172e34be0421fa66d82f0dd83d8ae1@51.81.244.170:30303", - "enode://75f05df1e5a3094ed2c9df36f122b95852206c52288f777982503946d5b273c7ffd8bb06ad60a0df7a31510906d4090c7bd5fd9bcb04a5b4ac1825a2b7212f32@45.63.18.245:30303", } const dnsPrefix = "enrtree://AKA3AM6LPBYEUDMVNU3BSVQJ5AD45Y7YPOHJLEF6W26QOE4VTUDPE@" From ed42f98a986bf5cabd03da12df7a0a83384f3b17 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 13 Jul 2022 14:45:00 +0700 Subject: [PATCH 076/152] backward compatibility: use default UID=1000 GID=1000 (#4702) * backward compatibility: use default UID=1000 GID=1000 * backward compatibility: use default UID=1000 GID=1000 --- .env.example | 4 ++-- docker-compose.yml | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.env.example b/.env.example index 78ad26cbc5e..29bcc7c987f 100644 --- a/.env.example +++ b/.env.example @@ -2,5 +2,5 @@ ERIGON_USER=erigon # UID, GID of user inside docker process which must exist also on host OS -DOCKER_UID=3473 # random number [1001, 10000] chosen arbitrarily for example -DOCKER_GID=3473 # can choose any valid #. 1000 tends to be taken by first user +DOCKER_UID=1000 # random number [1001, 10000] chosen arbitrarily for example +DOCKER_GID=1000 # can choose any valid #. 1000 tends to be taken by first user diff --git a/docker-compose.yml b/docker-compose.yml index 1fed2c340c6..40cf128e883 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -25,11 +25,11 @@ services: erigon: image: thorax/erigon:${TAG:-latest} build: - args: - UID: ${DOCKER_UID} - GID: ${DOCKER_GID} - context: . - user: "${DOCKER_UID}:${DOCKER_GID}" + args: + UID: ${DOCKER_UID:1000} + GID: ${DOCKER_GID:1000} + context: . + user: "${DOCKER_UID:1000}:${DOCKER_GID:1000}" command: | erigon ${ERIGON_FLAGS-} --private.api.addr=0.0.0.0:9090 --sentry.api.addr=sentry:9091 --downloader.api.addr=downloader:9093 --txpool.disable @@ -67,7 +67,7 @@ services: prometheus: image: prom/prometheus:v2.36.2 - user: ${DOCKER_UID}:${DOCKER_GID} # Uses erigon user from Dockerfile + user: ${DOCKER_UID:1000}:${DOCKER_GID:1000} # Uses erigon user from Dockerfile command: --log.level=warn --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=150d --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles ports: [ "9090:9090" ] volumes: From dd1a5944a90dbb791c0d61dff5fd5161c83d2679 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Wed, 13 Jul 2022 11:07:21 +0100 Subject: [PATCH 077/152] Better logging for invalid PoS headers (#4703) --- eth/stagedsync/stage_headers.go | 15 +++++++++++---- turbo/stages/stageloop.go | 1 + 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index c5bf40d0920..10d93bebda8 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -482,11 +482,15 @@ func handleNewPayload( } bad, lastValidHash := cfg.hd.IsBadHeaderPoS(headerHash) - if !bad { + if bad { + log.Warn(fmt.Sprintf("[%s] Previously known bad block", s.LogPrefix()), "height", headerNumber, "hash", headerHash) + } else { bad, lastValidHash = cfg.hd.IsBadHeaderPoS(header.ParentHash) + if bad { + log.Warn(fmt.Sprintf("[%s] Previously known bad parent", s.LogPrefix()), "height", headerNumber, "hash", headerHash, "parentHash", header.ParentHash) + } } if bad { - log.Info(fmt.Sprintf("[%s] Previously known bad block", s.LogPrefix()), "height", headerNumber, "hash", headerHash) cfg.hd.BeaconRequestList.Remove(requestId) cfg.hd.ReportBadHeaderPoS(headerHash, lastValidHash) return &privateapi.PayloadStatus{ @@ -508,7 +512,8 @@ func handleNewPayload( return &privateapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } - if header.Number.Uint64() != parent.Number.Uint64()+1 { + if headerNumber != parent.Number.Uint64()+1 { + log.Warn(fmt.Sprintf("[%s] Invalid block number", s.LogPrefix()), "headerNumber", headerNumber, "parentNumber", parent.Number.Uint64()) cfg.hd.BeaconRequestList.Remove(requestId) cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) return &privateapi.PayloadStatus{ @@ -522,6 +527,7 @@ func handleNewPayload( for _, tx := range payloadMessage.Body.Transactions { if types.TypedTransactionMarshalledAsRlpString(tx) { + log.Warn(fmt.Sprintf("[%s] typed txn marshalled as RLP string", s.LogPrefix()), "tx", common.Bytes2Hex(tx)) cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) return &privateapi.PayloadStatus{ Status: remote.EngineStatus_INVALID, @@ -533,7 +539,7 @@ func handleNewPayload( transactions, err := types.DecodeTransactions(payloadMessage.Body.Transactions) if err != nil { - log.Warn("Error during Beacon transaction decoding", "err", err.Error()) + log.Warn(fmt.Sprintf("[%s] Error during Beacon transaction decoding", s.LogPrefix()), "err", err.Error()) cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) return &privateapi.PayloadStatus{ Status: remote.EngineStatus_INVALID, @@ -590,6 +596,7 @@ func verifyAndSaveNewPoSHeader( } success = validationError == nil if !success { + log.Warn("Verification failed for header", "hash", headerHash, "height", headerNumber, "err", validationError) cfg.hd.ReportBadHeaderPoS(headerHash, latestValidHash) } else if err := headerInserter.FeedHeaderPoS(tx, header, headerHash); err != nil { return nil, false, err diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 5f1cc0a48b2..36ae3fca7f7 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -47,6 +47,7 @@ func SendPayloadStatus(hd *headerdownload.HeaderDownload, headBlockHash common.H if headBlockHash == pendingPayloadHash { status = remote.EngineStatus_VALID } else { + log.Warn("Failed to execute pending payload", "pendingPayload", pendingPayloadHash, "headBlock", headBlockHash) status = remote.EngineStatus_INVALID } hd.PayloadStatusCh <- privateapi.PayloadStatus{ From 36586e52a10b3d61bbc789e52b7a53b23f0f455b Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 13 Jul 2022 17:39:20 +0700 Subject: [PATCH 078/152] Torrent: add fsync after piece download (#4700) --- cmd/downloader/downloader/downloader.go | 34 ++++-- .../downloader/downloader_grpc_server.go | 20 +++- cmd/downloader/downloader/util.go | 1 + go.mod | 51 +++++---- go.sum | 106 ++++++++++-------- 5 files changed, 128 insertions(+), 84 deletions(-) diff --git a/cmd/downloader/downloader/downloader.go b/cmd/downloader/downloader/downloader.go index 5c8debdc526..a6c341b6db6 100644 --- a/cmd/downloader/downloader/downloader.go +++ b/cmd/downloader/downloader/downloader.go @@ -110,14 +110,15 @@ func (d *Downloader) SnapDir() string { } func (d *Downloader) ReCalcStats(interval time.Duration) { + //Call this methods outside of `statsLock` critical section, because they have own locks with contention + torrents := d.torrentClient.Torrents() + connStats := d.torrentClient.ConnStats() + peers := make(map[torrent.PeerID]struct{}, 16) + d.statsLock.Lock() defer d.statsLock.Unlock() prevStats, stats := d.stats, d.stats - peers := make(map[torrent.PeerID]struct{}, 16) - torrents := d.torrentClient.Torrents() - connStats := d.torrentClient.ConnStats() - stats.Completed = true stats.BytesDownload = uint64(connStats.BytesReadUsefulIntendedData.Int64()) stats.BytesUpload = uint64(connStats.BytesWrittenData.Int64()) @@ -262,6 +263,8 @@ func (d *Downloader) verify() error { } func (d *Downloader) addSegments() error { + logEvery := time.NewTicker(20 * time.Second) + defer logEvery.Stop() if err := BuildTorrentFilesIfNeed(context.Background(), d.cfg.DataDir); err != nil { return err } @@ -269,12 +272,27 @@ func (d *Downloader) addSegments() error { if err != nil { return fmt.Errorf("seedableSegmentFiles: %w", err) } + wg := &sync.WaitGroup{} + i := atomic.NewInt64(0) for _, f := range files { - _, err := AddSegment(f, d.cfg.DataDir, d.torrentClient) - if err != nil { - return err - } + wg.Add(1) + go func(f string) { + defer wg.Done() + _, err := AddSegment(f, d.cfg.DataDir, d.torrentClient) + if err != nil { + log.Warn("[snapshots] AddSegment", "err", err) + return + } + + i.Inc() + select { + case <-logEvery.C: + log.Info("[snpshots] initializing", "files", fmt.Sprintf("%s/%d", i.String(), len(files))) + default: + } + }(f) } + wg.Wait() return nil } diff --git a/cmd/downloader/downloader/downloader_grpc_server.go b/cmd/downloader/downloader/downloader_grpc_server.go index 8ff4757f58c..c75516aa439 100644 --- a/cmd/downloader/downloader/downloader_grpc_server.go +++ b/cmd/downloader/downloader/downloader_grpc_server.go @@ -28,25 +28,33 @@ type GrpcServer struct { // Download - create new .torrent ONLY if initialSync, everything else Erigon can generate by itself func (s *GrpcServer) Download(ctx context.Context, request *proto_downloader.DownloadRequest) (*emptypb.Empty, error) { + logEvery := time.NewTicker(20 * time.Second) + defer logEvery.Stop() + torrentClient := s.d.Torrent() mi := &metainfo.MetaInfo{AnnounceList: Trackers} - for _, it := range request.Items { + for i, it := range request.Items { if it.TorrentHash == nil { // seed new snapshot if err := BuildTorrentFileIfNeed(it.Path, s.d.SnapDir()); err != nil { return nil, err } } + hash := Proto2InfoHash(it.TorrentHash) + if _, ok := torrentClient.Torrent(hash); ok { + continue + } + ok, err := AddSegment(it.Path, s.d.SnapDir(), torrentClient) if err != nil { return nil, fmt.Errorf("AddSegment: %w", err) } - if ok { - continue + select { + case <-logEvery.C: + log.Info("[snpshots] initializing", "files", fmt.Sprintf("%d/%d", i, len(request.Items))) + default: } - - hash := Proto2InfoHash(it.TorrentHash) - if _, ok := torrentClient.Torrent(hash); ok { + if ok { continue } diff --git a/cmd/downloader/downloader/util.go b/cmd/downloader/downloader/util.go index 905dfa101d5..d057250434e 100644 --- a/cmd/downloader/downloader/util.go +++ b/cmd/downloader/downloader/util.go @@ -310,6 +310,7 @@ func AddTorrentFile(torrentFilePath string, torrentClient *torrent.Client) (*tor ts.ChunkSize = 0 } + ts.DisallowDataDownload = true t, _, err := torrentClient.AddTorrentSpec(ts) if err != nil { return nil, err diff --git a/go.mod b/go.mod index 7975a5a71a4..ab125c696e3 100644 --- a/go.mod +++ b/go.mod @@ -7,8 +7,8 @@ require ( github.com/VictoriaMetrics/fastcache v1.10.0 github.com/VictoriaMetrics/metrics v1.18.1 github.com/anacrolix/go-libutp v1.2.0 - github.com/anacrolix/log v0.13.1 - github.com/anacrolix/torrent v1.44.0 + github.com/anacrolix/log v0.13.2-0.20220427063716-a4894bb521c6 + github.com/anacrolix/torrent v1.46.1-0.20220713100403-caa9400c52fe github.com/benesch/cgosymbolizer v0.0.0-20190515212042-bec6fe6e597b github.com/btcsuite/btcd v0.22.0-beta github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b @@ -74,21 +74,21 @@ require ( crawshaw.io/sqlite v0.3.3-0.20210127221821-98b1f83c5508 // indirect github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0 // indirect github.com/anacrolix/chansync v0.3.0 // indirect - github.com/anacrolix/dht/v2 v2.16.2-0.20220311024416-dd658f18fd51 // indirect + github.com/anacrolix/dht/v2 v2.18.0 // indirect github.com/anacrolix/envpprof v1.2.1 // indirect - github.com/anacrolix/generics v0.0.0-20220510042907-b50562b436ec // indirect + github.com/anacrolix/generics v0.0.0-20220618083756-f99e35403a60 // indirect github.com/anacrolix/missinggo v1.3.0 // indirect github.com/anacrolix/missinggo/perf v1.0.0 // indirect github.com/anacrolix/missinggo/v2 v2.7.0 // indirect github.com/anacrolix/mmsg v1.0.0 // indirect - github.com/anacrolix/multiless v0.2.1-0.20211218050420-533661eef5dc // indirect - github.com/anacrolix/stm v0.3.0 // indirect + github.com/anacrolix/multiless v0.3.0 // indirect + github.com/anacrolix/stm v0.4.0 // indirect github.com/anacrolix/sync v0.4.0 // indirect github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96 // indirect github.com/anacrolix/utp v0.1.0 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/benbjohnson/immutable v0.3.0 // indirect - github.com/bits-and-blooms/bitset v1.2.0 // indirect + github.com/bits-and-blooms/bitset v1.2.2 // indirect github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect @@ -96,7 +96,7 @@ require ( github.com/docker/docker v20.10.17+incompatible github.com/dustin/go-humanize v1.0.0 // indirect github.com/flanglet/kanzi-go v1.9.1-0.20211212184056-72dda96261ee // indirect - github.com/fsnotify/fsnotify v1.5.1 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 // indirect github.com/go-kit/kit v0.10.0 // indirect github.com/go-logfmt/logfmt v0.5.0 // indirect @@ -110,7 +110,7 @@ require ( github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/kr/text v0.2.0 // indirect - github.com/lispad/go-generics-tools v1.0.0 // indirect + github.com/lispad/go-generics-tools v1.1.0 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mattn/go-colorable v0.1.11 // indirect github.com/mattn/go-isatty v0.0.14 // indirect @@ -118,37 +118,37 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mschoch/smat v0.2.0 // indirect github.com/pion/datachannel v1.5.2 // indirect - github.com/pion/dtls/v2 v2.1.2 // indirect - github.com/pion/ice/v2 v2.1.20 // indirect - github.com/pion/interceptor v0.1.7 // indirect + github.com/pion/dtls/v2 v2.1.5 // indirect + github.com/pion/ice/v2 v2.2.6 // indirect + github.com/pion/interceptor v0.1.11 // indirect github.com/pion/logging v0.2.2 // indirect github.com/pion/mdns v0.0.5 // indirect github.com/pion/randutil v0.1.0 // indirect github.com/pion/rtcp v1.2.9 // indirect - github.com/pion/rtp v1.7.4 // indirect + github.com/pion/rtp v1.7.13 // indirect github.com/pion/sctp v1.8.2 // indirect - github.com/pion/sdp/v3 v3.0.4 // indirect - github.com/pion/srtp/v2 v2.0.5 // indirect - github.com/pion/transport v0.13.0 // indirect - github.com/pion/turn/v2 v2.0.6 // indirect + github.com/pion/sdp/v3 v3.0.5 // indirect + github.com/pion/srtp/v2 v2.0.9 // indirect + github.com/pion/transport v0.13.1 // indirect + github.com/pion/turn/v2 v2.0.8 // indirect github.com/pion/udp v0.1.1 // indirect - github.com/pion/webrtc/v3 v3.1.24-0.20220208053747-94262c1b2b38 // indirect + github.com/pion/webrtc/v3 v3.1.42 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect - github.com/rogpeppe/go-internal v1.8.0 // indirect - github.com/rs/dnscache v0.0.0-20210201191234-295bba877686 // indirect + github.com/rogpeppe/go-internal v1.8.1 // indirect + github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/tidwall/btree v0.7.2-0.20211211132910-4215444137fc // indirect + github.com/tidwall/btree v1.3.1 // indirect github.com/valyala/fastrand v1.1.0 // indirect github.com/valyala/histogram v1.2.0 // indirect go.etcd.io/bbolt v1.3.6 // indirect golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect - golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 // indirect + golang.org/x/net v0.0.0-20220630215102-69896b714898 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/tools v0.1.10 // indirect - golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect @@ -163,4 +163,7 @@ require ( modernc.org/token v1.0.0 // indirect ) -require gotest.tools/v3 v3.3.0 // indirect +require ( + github.com/alecthomas/atomic v0.1.0-alpha2 // indirect + gotest.tools/v3 v3.3.0 // indirect +) diff --git a/go.sum b/go.sum index ee8469d1f66..b687d0fb068 100644 --- a/go.sum +++ b/go.sum @@ -24,6 +24,10 @@ github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBA github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0 h1:byYvvbfSo3+9efR4IeReh77gVs4PnNDR3AMOE9NJ7a0= github.com/ajwerner/btree v0.0.0-20211221152037-f427b3e689c0/go.mod h1:q37NoqncT41qKc048STsifIt69LfUJ8SrWWcz/yam5k= +github.com/alecthomas/assert/v2 v2.0.0-alpha3 h1:pcHeMvQ3OMstAWgaeaXIAL8uzB9xMm2zlxt+/4ml8lk= +github.com/alecthomas/atomic v0.1.0-alpha2 h1:dqwXmax66gXvHhsOS4pGPZKqYOlTkapELkLb3MNdlH8= +github.com/alecthomas/atomic v0.1.0-alpha2/go.mod h1:zD6QGEyw49HIq19caJDc2NMXAy8rNi9ROrxtMXATfyI= +github.com/alecthomas/repr v0.0.0-20210801044451-80ca428c5142 h1:8Uy0oSf5co/NZXje7U1z8Mpep++QJOldL2hs/sBQf48= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -32,23 +36,23 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/anacrolix/chansync v0.3.0 h1:lRu9tbeuw3wl+PhMu/r+JJCRu5ArFXIluOgdF0ao6/U= github.com/anacrolix/chansync v0.3.0/go.mod h1:DZsatdsdXxD0WiwcGl0nJVwyjCKMDv+knl1q2iBjA2k= -github.com/anacrolix/dht/v2 v2.16.2-0.20220311024416-dd658f18fd51 h1:issCwqC43gQ7n0gg9rn0EeVYXnQMI7vlnWub4oidtlU= -github.com/anacrolix/dht/v2 v2.16.2-0.20220311024416-dd658f18fd51/go.mod h1:osiyaNrMLG9dw7wUtVMaII/NdCjlXeHjUcYzXnmop68= +github.com/anacrolix/dht/v2 v2.18.0 h1:btjVjzjKqO5nKGbJHJ2UmuwiRx+EgX3e+OCHC9+WRz8= +github.com/anacrolix/dht/v2 v2.18.0/go.mod h1:mxrSeP/LIY429SgWMO9o6UdjBjB8ZjBh6HHCmd8Ly1g= github.com/anacrolix/envpprof v0.0.0-20180404065416-323002cec2fa/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c= github.com/anacrolix/envpprof v1.0.0/go.mod h1:KgHhUaQMc8cC0+cEflSgCFNFbKwi5h54gqtVn8yhP7c= github.com/anacrolix/envpprof v1.1.0/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4= github.com/anacrolix/envpprof v1.2.1 h1:25TJe6t/i0AfzzldiGFKCpD+s+dk8lONBcacJZB2rdE= github.com/anacrolix/envpprof v1.2.1/go.mod h1:My7T5oSqVfEn4MD4Meczkw/f5lSIndGAKu/0SM/rkf4= -github.com/anacrolix/generics v0.0.0-20220510042907-b50562b436ec h1:OnHX2MpnlLBBee06jcpL6eBppz766BnquXnZKH6iGgI= -github.com/anacrolix/generics v0.0.0-20220510042907-b50562b436ec/go.mod h1:ff2rHB/joTV03aMSSn/AZNnaIpUw0h3njetGsaXcMy8= +github.com/anacrolix/generics v0.0.0-20220618083756-f99e35403a60 h1:k4/h2B1gGF+PJGyGHxs8nmHHt1pzWXZWBj6jn4OBlRc= +github.com/anacrolix/generics v0.0.0-20220618083756-f99e35403a60/go.mod h1:ff2rHB/joTV03aMSSn/AZNnaIpUw0h3njetGsaXcMy8= github.com/anacrolix/go-libutp v1.2.0 h1:sjxoB+/ARiKUR7IK/6wLWyADIBqGmu1fm0xo+8Yy7u0= github.com/anacrolix/go-libutp v1.2.0/go.mod h1:RrJ3KcaDcf9Jqp33YL5V/5CBEc6xMc7aJL8wXfuWL50= github.com/anacrolix/log v0.3.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= github.com/anacrolix/log v0.6.0/go.mod h1:lWvLTqzAnCWPJA08T2HCstZi0L1y2Wyvm3FJgwU9jwU= github.com/anacrolix/log v0.10.0/go.mod h1:s5yBP/j046fm9odtUTbHOfDUq/zh1W8OkPpJtnX0oQI= github.com/anacrolix/log v0.10.1-0.20220123034749-3920702c17f8/go.mod h1:GmnE2c0nvz8pOIPUSC9Rawgefy1sDXqposC2wgtBZE4= -github.com/anacrolix/log v0.13.1 h1:BmVwTdxHd5VcNrLylgKwph4P4wf+5VvPgOK4yi91fTY= -github.com/anacrolix/log v0.13.1/go.mod h1:D4+CvN8SnruK6zIFS/xPoRJmtvtnxs+CSfDQ+BFxZ68= +github.com/anacrolix/log v0.13.2-0.20220427063716-a4894bb521c6 h1:WH/Xcok0GpNID/NUV80CfTwUYXdbhR3pX/DXboxGhNI= +github.com/anacrolix/log v0.13.2-0.20220427063716-a4894bb521c6/go.mod h1:D4+CvN8SnruK6zIFS/xPoRJmtvtnxs+CSfDQ+BFxZ68= github.com/anacrolix/lsan v0.0.0-20211126052245-807000409a62 h1:P04VG6Td13FHMgS5ZBcJX23NPC/fiC4cp9bXwYujdYM= github.com/anacrolix/lsan v0.0.0-20211126052245-807000409a62/go.mod h1:66cFKPCO7Sl4vbFnAaSq7e4OXtdMhRSBagJGWgmpJbM= github.com/anacrolix/missinggo v0.0.0-20180725070939-60ef2fbf63df/go.mod h1:kwGiTUTZ0+p4vAz3VbAI5a30t2YbvemcmspjKwrAz5s= @@ -67,11 +71,11 @@ github.com/anacrolix/missinggo/v2 v2.7.0/go.mod h1:2IZIvmRTizALNYFYXsPR7ofXPzJgy github.com/anacrolix/mmsg v0.0.0-20180515031531-a4a3ba1fc8bb/go.mod h1:x2/ErsYUmT77kezS63+wzZp8E3byYB0gzirM/WMBLfw= github.com/anacrolix/mmsg v1.0.0 h1:btC7YLjOn29aTUAExJiVUhQOuf/8rhm+/nWCMAnL3Hg= github.com/anacrolix/mmsg v1.0.0/go.mod h1:x8kRaJY/dCrY9Al0PEcj1mb/uFHwP6GCJ9fLl4thEPc= -github.com/anacrolix/multiless v0.2.1-0.20211218050420-533661eef5dc h1:K047jUtd0Xv4SEpv/5DoBgDvj4ZNpT1SOVtMlFpRrh0= -github.com/anacrolix/multiless v0.2.1-0.20211218050420-533661eef5dc/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4= +github.com/anacrolix/multiless v0.3.0 h1:5Bu0DZncjE4e06b9r1Ap2tUY4Au0NToBP5RpuEngSis= +github.com/anacrolix/multiless v0.3.0/go.mod h1:TrCLEZfIDbMVfLoQt5tOoiBS/uq4y8+ojuEVVvTNPX4= github.com/anacrolix/stm v0.2.0/go.mod h1:zoVQRvSiGjGoTmbM0vSLIiaKjWtNPeTvXUSdJQA4hsg= -github.com/anacrolix/stm v0.3.0 h1:peQncJSNJtk1YBrFbW0DLKYqll+sa0kOk8EvXRcO+wA= -github.com/anacrolix/stm v0.3.0/go.mod h1:spImf/rXwiAUoYYJK1YCZeWkpaHZ3kzjGFjwK5OStfU= +github.com/anacrolix/stm v0.4.0 h1:tOGvuFwaBjeu1u9X1eIh9TX8OEedEiEQ1se1FjhFnXY= +github.com/anacrolix/stm v0.4.0/go.mod h1:GCkwqWoAsP7RfLW+jw+Z0ovrt2OO7wRzcTtFYMYY5t8= github.com/anacrolix/sync v0.0.0-20180808010631-44578de4e778/go.mod h1:s735Etp3joe/voe2sdaXLcqDdJSay1O0OPnM0ystjqk= github.com/anacrolix/sync v0.3.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DCOj/I0g= github.com/anacrolix/sync v0.4.0 h1:T+MdO/u87ir/ijWsTFsPYw5jVm0SMm4kVpg8t4KF38o= @@ -79,8 +83,8 @@ github.com/anacrolix/sync v0.4.0/go.mod h1:BbecHL6jDSExojhNtgTFSBcdGerzNc64tz3DC github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= github.com/anacrolix/tagflag v1.0.0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw= github.com/anacrolix/tagflag v1.1.0/go.mod h1:Scxs9CV10NQatSmbyjqmqmeQNwGzlNe0CMUMIxqHIG8= -github.com/anacrolix/torrent v1.44.0 h1:Yl58hCsX+4O7me5oUWQphg0G46bs22hJWLdEYAq250w= -github.com/anacrolix/torrent v1.44.0/go.mod h1:SsvA8hlN3q1gC1Pf+fJ7QrfWI+5DumO6tEl4bqf+D2U= +github.com/anacrolix/torrent v1.46.1-0.20220713100403-caa9400c52fe h1:Unun5w67tVuGWK8Z5ERcHFCYOeOVgeWUIgHmp7Z8Apw= +github.com/anacrolix/torrent v1.46.1-0.20220713100403-caa9400c52fe/go.mod h1:/XEFAKfEx08Eng4vZRqnthqPB7ZqTH6DSDEHzGpDJRk= github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96 h1:QAVZ3pN/J4/UziniAhJR2OZ9Ox5kOY2053tBbbqUPYA= github.com/anacrolix/upnp v0.1.3-0.20220123035249-922794e51c96/go.mod h1:Wa6n8cYIdaG35x15aH3Zy6d03f7P728QfdcDeD/IEOs= github.com/anacrolix/utp v0.1.0 h1:FOpQOmIwYsnENnz7tAGohA+r6iXpRjrq8ssKSre2Cp4= @@ -106,8 +110,9 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bits-and-blooms/bitset v1.2.2 h1:J5gbX05GpMdBjCvQ9MteIg2KKDExr7DrgK+Yc15FvIk= +github.com/bits-and-blooms/bitset v1.2.2/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bradfitz/iter v0.0.0-20140124041915-454541ec3da2/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo= github.com/bradfitz/iter v0.0.0-20190303215204-33e6a9893b0c/go.mod h1:PyRFw1Lt2wKX4ZVSQ2mk+PeDa1rxyObEDlApuIsUKuo= github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 h1:GKTyiRCL6zVf5wWaqKnf+7Qs6GbEPfd4iMOitWzXJx8= @@ -200,11 +205,12 @@ github.com/flanglet/kanzi-go v1.9.1-0.20211212184056-72dda96261ee/go.mod h1:/sUS github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= github.com/frankban/quicktest v1.9.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq9vcPtJmFl7Y= -github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61 h1:IZqZOB2fydHte3kUgxrzK5E1fW7RQGeDwE8F/ZZnUYc= github.com/garslo/gogen v0.0.0-20170306192744-1d203ffc1f61/go.mod h1:Q0X6pkwTILDlzrGEckF6HKjXe48EgsY/l7K7vhY4MW8= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -325,6 +331,7 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -391,8 +398,8 @@ github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf github.com/ledgerwatch/secp256k1 v1.0.0/go.mod h1:SPmqJFciiF/Q0mPt2jVs2dTr/1TZBTIA+kPMmKgBAak= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lispad/go-generics-tools v1.0.0 h1:pPtt9QERE5sGQfwvLakKe2sjhcNKKsDSnr3939ofj1E= -github.com/lispad/go-generics-tools v1.0.0/go.mod h1:stn7X24ZIyFvaSyttafq3VlJzGJJJkUtLYdbgi/gopM= +github.com/lispad/go-generics-tools v1.1.0 h1:mbSgcxdFVmpoyso1X/MJHXbSbSL3dD+qhRryyxk+/XY= +github.com/lispad/go-generics-tools v1.1.0/go.mod h1:2csd1EJljo/gy5qG4khXol7ivCPptNjG5Uv2X8MgK84= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= @@ -475,44 +482,43 @@ github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0 github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pion/datachannel v1.5.2 h1:piB93s8LGmbECrpO84DnkIVWasRMk3IimbcXkTQLE6E= github.com/pion/datachannel v1.5.2/go.mod h1:FTGQWaHrdCwIJ1rw6xBIfZVkslikjShim5yr05XFuCQ= -github.com/pion/dtls/v2 v2.1.1/go.mod h1:qG3gA7ZPZemBqpEFqRKyURYdKEwFZQCGb7gv9T3ON3Y= -github.com/pion/dtls/v2 v2.1.2 h1:22Q1Jk9L++Yo7BIf9130MonNPfPVb+YgdYLeyQotuAA= -github.com/pion/dtls/v2 v2.1.2/go.mod h1:o6+WvyLDAlXF7YiPB/RlskRoeK+/JtuaZa5emwQcWus= -github.com/pion/ice/v2 v2.1.20 h1:xpxXyX5b4WjCh/D905gzBeW/hbJxMEPx2ptVfrhVE6M= -github.com/pion/ice/v2 v2.1.20/go.mod h1:hEAldRzBhTtAfvlU1V/2/nLCMvveQWFKPNCop+63/Iw= -github.com/pion/interceptor v0.1.7 h1:HThW0tIIKT9RRoDWGURe8rlZVOx0fJHxBHpA0ej0+bo= -github.com/pion/interceptor v0.1.7/go.mod h1:Lh3JSl/cbJ2wP8I3ccrjh1K/deRGRn3UlSPuOTiHb6U= +github.com/pion/dtls/v2 v2.1.3/go.mod h1:o6+WvyLDAlXF7YiPB/RlskRoeK+/JtuaZa5emwQcWus= +github.com/pion/dtls/v2 v2.1.5 h1:jlh2vtIyUBShchoTDqpCCqiYCyRFJ/lvf/gQ8TALs+c= +github.com/pion/dtls/v2 v2.1.5/go.mod h1:BqCE7xPZbPSubGasRoDFJeTsyJtdD1FanJYL0JGheqY= +github.com/pion/ice/v2 v2.2.6 h1:R/vaLlI1J2gCx141L5PEwtuGAGcyS6e7E0hDeJFq5Ig= +github.com/pion/ice/v2 v2.2.6/go.mod h1:SWuHiOGP17lGromHTFadUe1EuPgFh/oCU6FCMZHooVE= +github.com/pion/interceptor v0.1.11 h1:00U6OlqxA3FFB50HSg25J/8cWi7P6FbSzw4eFn24Bvs= +github.com/pion/interceptor v0.1.11/go.mod h1:tbtKjZY14awXd7Bq0mmWvgtHB5MDaRN7HV3OZ/uy7s8= github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY= github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms= github.com/pion/mdns v0.0.5 h1:Q2oj/JB3NqfzY9xGZ1fPzZzK7sDSD8rZPOvcIQ10BCw= github.com/pion/mdns v0.0.5/go.mod h1:UgssrvdD3mxpi8tMxAXbsppL3vJ4Jipw1mTCW+al01g= github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA= github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8= -github.com/pion/rtcp v1.2.6/go.mod h1:52rMNPWFsjr39z9B9MhnkqhPLoeHTv1aN63o/42bWE0= github.com/pion/rtcp v1.2.9 h1:1ujStwg++IOLIEoOiIQ2s+qBuJ1VN81KW+9pMPsif+U= github.com/pion/rtcp v1.2.9/go.mod h1:qVPhiCzAm4D/rxb6XzKeyZiQK69yJpbUDJSF7TgrqNo= -github.com/pion/rtp v1.7.0/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko= -github.com/pion/rtp v1.7.4 h1:4dMbjb1SuynU5OpA3kz1zHK+u+eOCQjW3MAeVHf1ODA= -github.com/pion/rtp v1.7.4/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko= +github.com/pion/rtp v1.7.13 h1:qcHwlmtiI50t1XivvoawdCGTP4Uiypzfrsap+bijcoA= +github.com/pion/rtp v1.7.13/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko= github.com/pion/sctp v1.8.0/go.mod h1:xFe9cLMZ5Vj6eOzpyiKjT9SwGM4KpK/8Jbw5//jc+0s= github.com/pion/sctp v1.8.2 h1:yBBCIrUMJ4yFICL3RIvR4eh/H2BTTvlligmSTy+3kiA= github.com/pion/sctp v1.8.2/go.mod h1:xFe9cLMZ5Vj6eOzpyiKjT9SwGM4KpK/8Jbw5//jc+0s= -github.com/pion/sdp/v3 v3.0.4 h1:2Kf+dgrzJflNCSw3TV5v2VLeI0s/qkzy2r5jlR0wzf8= -github.com/pion/sdp/v3 v3.0.4/go.mod h1:bNiSknmJE0HYBprTHXKPQ3+JjacTv5uap92ueJZKsRk= -github.com/pion/srtp/v2 v2.0.5 h1:ks3wcTvIUE/GHndO3FAvROQ9opy0uLELpwHJaQ1yqhQ= -github.com/pion/srtp/v2 v2.0.5/go.mod h1:8k6AJlal740mrZ6WYxc4Dg6qDqqhxoRG2GSjlUhDF0A= +github.com/pion/sdp/v3 v3.0.5 h1:ouvI7IgGl+V4CrqskVtr3AaTrPvPisEOxwgpdktctkU= +github.com/pion/sdp/v3 v3.0.5/go.mod h1:iiFWFpQO8Fy3S5ldclBkpXqmWy02ns78NOKoLLL0YQw= +github.com/pion/srtp/v2 v2.0.9 h1:JJq3jClmDFBPX/F5roEb0U19jSU7eUhyDqR/NZ34EKQ= +github.com/pion/srtp/v2 v2.0.9/go.mod h1:5TtM9yw6lsH0ppNCehB/EjEUli7VkUgKSPJqWVqbhQ4= github.com/pion/stun v0.3.5 h1:uLUCBCkQby4S1cf6CGuR9QrVOKcvUwFeemaC865QHDg= github.com/pion/stun v0.3.5/go.mod h1:gDMim+47EeEtfWogA37n6qXZS88L5V6LqFcf+DZA2UA= github.com/pion/transport v0.12.2/go.mod h1:N3+vZQD9HlDP5GWkZ85LohxNsDcNgofQmyL6ojX5d8Q= github.com/pion/transport v0.12.3/go.mod h1:OViWW9SP2peE/HbwBvARicmAVnesphkNkCVZIWJ6q9A= -github.com/pion/transport v0.13.0 h1:KWTA5ZrQogizzYwPEciGtHPLwpAjE91FgXnyu+Hv2uY= github.com/pion/transport v0.13.0/go.mod h1:yxm9uXpK9bpBBWkITk13cLo1y5/ur5VQpG22ny6EP7g= -github.com/pion/turn/v2 v2.0.6 h1:AsXjSPR6Im15DMTB39NlfdTY9BQfieANPBjdg/aVNwY= -github.com/pion/turn/v2 v2.0.6/go.mod h1:+y7xl719J8bAEVpSXBXvTxStjJv3hbz9YFflvkpcGPw= +github.com/pion/transport v0.13.1 h1:/UH5yLeQtwm2VZIPjxwnNFxjS4DFhyLfS4GlfuKUzfA= +github.com/pion/transport v0.13.1/go.mod h1:EBxbqzyv+ZrmDb82XswEE0BjfQFtuw1Nu6sjnjWCsGg= +github.com/pion/turn/v2 v2.0.8 h1:KEstL92OUN3k5k8qxsXHpr7WWfrdp7iJZHx99ud8muw= +github.com/pion/turn/v2 v2.0.8/go.mod h1:+y7xl719J8bAEVpSXBXvTxStjJv3hbz9YFflvkpcGPw= github.com/pion/udp v0.1.1 h1:8UAPvyqmsxK8oOjloDk4wUt63TzFe9WEJkg5lChlj7o= github.com/pion/udp v0.1.1/go.mod h1:6AFo+CMdKQm7UiA0eUPA8/eVCTx8jBIITLZHc9DWX5M= -github.com/pion/webrtc/v3 v3.1.24-0.20220208053747-94262c1b2b38 h1:+IEql+S+YAj3S5e7Ftl/u4xPcZGG0WwLFsyFj6NRTz4= -github.com/pion/webrtc/v3 v3.1.24-0.20220208053747-94262c1b2b38/go.mod h1:L5S/oAhL0Fzt/rnftVQRrP80/j5jygY7XRZzWwFx6P4= +github.com/pion/webrtc/v3 v3.1.42 h1:wJEQFIXVanptnQcHOLTuIo4AtGB2+mG2x4OhIhnITOA= +github.com/pion/webrtc/v3 v3.1.42/go.mod h1:ffD9DulDrPxyWvDPUIPAOSAWx9GUlOExiJPf7cCcMLA= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -551,12 +557,13 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/rs/dnscache v0.0.0-20210201191234-295bba877686 h1:IJ6Df0uxPDtNoByV0KkzVKNseWvZFCNM/S9UoyOMCSI= -github.com/rs/dnscache v0.0.0-20210201191234-295bba877686/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA= +github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417 h1:Lt9DzQALzHoDwMBGJ6v8ObDPR0dzr2a6sXTB1Fq7IHs= +github.com/rs/dnscache v0.0.0-20211102005908-e0241e321417/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -606,8 +613,8 @@ github.com/tendermint/go-amino v0.14.1 h1:o2WudxNfdLNBwMyl2dqOJxiro5rfrEaU0Ugs6o github.com/tendermint/go-amino v0.14.1/go.mod h1:i/UKE5Uocn+argJJBb12qTZsCDBcAYMbR92AaJVmKso= github.com/tendermint/tendermint v0.31.11 h1:TIs//4WfEAG4TOZc2eUfJPI3T8KrywXQCCPnGAaM1Wo= github.com/tendermint/tendermint v0.31.11/go.mod h1:ymcPyWblXCplCPQjbOYbrF1fWnpslATMVqiGgWbZrlc= -github.com/tidwall/btree v0.7.2-0.20211211132910-4215444137fc h1:THtJVe/QBctKEe8kjnXwt7RAlvHNtUjFJOEmgZkN05w= -github.com/tidwall/btree v0.7.2-0.20211211132910-4215444137fc/go.mod h1:LGm8L/DZjPLmeWGjv5kFrY8dL4uVhMmzmmLYmsObdKE= +github.com/tidwall/btree v1.3.1 h1:636+tdVDs8Hjcf35Di260W2xCW4KuoXOKyk9QWOvCpA= +github.com/tidwall/btree v1.3.1/go.mod h1:LGm8L/DZjPLmeWGjv5kFrY8dL4uVhMmzmmLYmsObdKE= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= @@ -666,8 +673,9 @@ golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220516162934-403b01795ae8/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -712,10 +720,12 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211201190559-0a0e4e1bb54c/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 h1:HVyaeDAYux4pnY+D/SiwmLOR36ewZ4iGQIIrtnuCjFA= +golang.org/x/net v0.0.0-20220401154927-543a649e0bdd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220531201128-c960675eff93/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220630215102-69896b714898 h1:K7wO6V1IrczY9QOQ2WkVpw4JQSwCd52UsxVEirZUfiw= +golang.org/x/net v0.0.0-20220630215102-69896b714898/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -766,6 +776,9 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211030160813-b3129d9d1021/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e h1:CsOuNlbOuf0mzxJIefr6Q4uAUetRUwZE4qt7VfzP+xo= golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -804,8 +817,9 @@ golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= From 8c27879a6b45b4bc5d21c96e8e0ce475a85a9a48 Mon Sep 17 00:00:00 2001 From: Zachinquarantine Date: Wed, 13 Jul 2022 06:40:39 -0400 Subject: [PATCH 079/152] Remove NewKeyedTransactor function (#4472) * accounts/abi: remove NewKeyedTransactor function * Remove "imported and not used" package * Update state.go * Update statedb_insert_chain_transaction_test.go * Update statedb_chain_test.go * Update database_test.go * lint fixes Co-authored-by: awskii --- accounts/abi/bind/auth.go | 24 --------- cmd/pics/state.go | 15 ++++-- core/state/database_test.go | 51 ++++++++++--------- tests/statedb_chain_test.go | 5 +- .../statedb_insert_chain_transaction_test.go | 6 ++- 5 files changed, 47 insertions(+), 54 deletions(-) diff --git a/accounts/abi/bind/auth.go b/accounts/abi/bind/auth.go index 2fa2c150736..d2dda5a15c4 100644 --- a/accounts/abi/bind/auth.go +++ b/accounts/abi/bind/auth.go @@ -24,7 +24,6 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/log/v3" ) // ErrNoChainID is returned whenever the user failed to specify a chain id. @@ -33,29 +32,6 @@ var ErrNoChainID = errors.New("no chain id specified") // ErrNotAuthorized is returned when an account is not properly unlocked. var ErrNotAuthorized = errors.New("not authorized to sign this account") -// NewKeyedTransactor is a utility method to easily create a transaction signer -// from a single private key. -// -// Deprecated: Use NewKeyedTransactorWithChainID instead. -func NewKeyedTransactor(key *ecdsa.PrivateKey) *TransactOpts { - log.Warn("WARNING: NewKeyedTransactor has been deprecated in favour of NewKeyedTransactorWithChainID") - keyAddr := crypto.PubkeyToAddress(key.PublicKey) - signer := types.LatestSignerForChainID(nil) - return &TransactOpts{ - From: keyAddr, - Signer: func(address common.Address, tx types.Transaction) (types.Transaction, error) { - if address != keyAddr { - return nil, ErrNotAuthorized - } - signature, err := crypto.Sign(tx.SigningHash(nil).Bytes(), key) - if err != nil { - return nil, err - } - return tx.WithSignature(*signer, signature) - }, - } -} - // NewKeyedTransactorWithChainID is a utility method to easily create a transaction signer // from a single private key. func NewKeyedTransactorWithChainID(key *ecdsa.PrivateKey, chainID *big.Int) (*TransactOpts, error) { diff --git a/cmd/pics/state.go b/cmd/pics/state.go index 75cd63cd42a..fd91148acc2 100644 --- a/cmd/pics/state.go +++ b/cmd/pics/state.go @@ -288,9 +288,18 @@ func initialState1() error { contractBackend := backends.NewSimulatedBackendWithConfig(gspec.Alloc, gspec.Config, gspec.GasLimit) defer contractBackend.Close() - transactOpts := bind.NewKeyedTransactor(key) - transactOpts1 := bind.NewKeyedTransactor(key1) - transactOpts2 := bind.NewKeyedTransactor(key2) + transactOpts, err := bind.NewKeyedTransactorWithChainID(key, m.ChainConfig.ChainID) + if err != nil { + panic(err) + } + transactOpts1, err := bind.NewKeyedTransactorWithChainID(key1, m.ChainConfig.ChainID) + if err != nil { + panic(err) + } + transactOpts2, err := bind.NewKeyedTransactorWithChainID(key2, m.ChainConfig.ChainID) + if err != nil { + panic(err) + } var tokenContract *contracts.Token // We generate the blocks without plainstant because it's not supported in core.GenerateChain diff --git a/core/state/database_test.go b/core/state/database_test.go index b29b3a966a0..c7f69c8806e 100644 --- a/core/state/database_test.go +++ b/core/state/database_test.go @@ -72,12 +72,12 @@ func TestCreate2Revive(t *testing.T) { contractBackend := backends.NewSimulatedBackendWithConfig(gspec.Alloc, gspec.Config, gspec.GasLimit) defer contractBackend.Close() - transactOpts := bind.NewKeyedTransactor(key) + transactOpts, err := bind.NewKeyedTransactorWithChainID(key, m.ChainConfig.ChainID) + require.NoError(t, err) transactOpts.GasLimit = 1000000 var contractAddress common.Address var revive *contracts.Revive - var err error // Change this address whenever you make any changes in the code of the revive contract in // contracts/revive.sol var create2address = common.HexToAddress("e70fd65144383e1189bd710b1e23b61e26315ff4") @@ -263,12 +263,13 @@ func TestCreate2Polymorth(t *testing.T) { contractBackend := backends.NewSimulatedBackendWithConfig(gspec.Alloc, gspec.Config, gspec.GasLimit) defer contractBackend.Close() - transactOpts := bind.NewKeyedTransactor(key) + transactOpts, err := bind.NewKeyedTransactorWithChainID(key, m.ChainConfig.ChainID) + require.NoError(t, err) transactOpts.GasLimit = 1000000 var contractAddress common.Address var poly *contracts.Poly - var err error + // Change this address whenever you make any changes in the code of the poly contract in // contracts/poly.sol var create2address = common.HexToAddress("c66aa74c220476f244b7f45897a124d1a01ca8a8") @@ -509,12 +510,12 @@ func TestReorgOverSelfDestruct(t *testing.T) { contractBackend := backends.NewSimulatedBackendWithConfig(gspec.Alloc, gspec.Config, gspec.GasLimit) defer contractBackend.Close() - transactOpts := bind.NewKeyedTransactor(key) + transactOpts, err := bind.NewKeyedTransactorWithChainID(key, m.ChainConfig.ChainID) + require.NoError(t, err) transactOpts.GasLimit = 1000000 var contractAddress common.Address var selfDestruct *contracts.Selfdestruct - var err error // Here we generate 3 blocks, two of which (the one with "Change" invocation and "Destruct" invocation will be reverted during the reorg) chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 3, func(i int, block *core.BlockGen) { @@ -549,7 +550,8 @@ func TestReorgOverSelfDestruct(t *testing.T) { // Create a longer chain, with 4 blocks (with higher total difficulty) that reverts the change of stroage self-destruction of the contract contractBackendLonger := backends.NewSimulatedBackendWithConfig(gspec.Alloc, gspec.Config, gspec.GasLimit) defer contractBackendLonger.Close() - transactOptsLonger := bind.NewKeyedTransactor(key) + transactOptsLonger, err := bind.NewKeyedTransactorWithChainID(key, m.ChainConfig.ChainID) + require.NoError(t, err) transactOptsLonger.GasLimit = 1000000 longerChain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 4, func(i int, block *core.BlockGen) { @@ -658,12 +660,12 @@ func TestReorgOverStateChange(t *testing.T) { contractBackend := backends.NewSimulatedBackendWithConfig(gspec.Alloc, gspec.Config, gspec.GasLimit) defer contractBackend.Close() - transactOpts := bind.NewKeyedTransactor(key) + transactOpts, err := bind.NewKeyedTransactorWithChainID(key, m.ChainConfig.ChainID) + require.NoError(t, err) transactOpts.GasLimit = 1000000 var contractAddress common.Address var selfDestruct *contracts.Selfdestruct - var err error // Here we generate 3 blocks, two of which (the one with "Change" invocation and "Destruct" invocation will be reverted during the reorg) chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 2, func(i int, block *core.BlockGen) { @@ -692,7 +694,8 @@ func TestReorgOverStateChange(t *testing.T) { // Create a longer chain, with 4 blocks (with higher total difficulty) that reverts the change of stroage self-destruction of the contract contractBackendLonger := backends.NewSimulatedBackendWithConfig(gspec.Alloc, gspec.Config, gspec.GasLimit) defer contractBackendLonger.Close() - transactOptsLonger := bind.NewKeyedTransactor(key) + transactOptsLonger, err := bind.NewKeyedTransactorWithChainID(key, m.ChainConfig.ChainID) + require.NoError(t, err) transactOptsLonger.GasLimit = 1000000 longerChain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 3, func(i int, block *core.BlockGen) { var tx types.Transaction @@ -813,10 +816,8 @@ func TestCreateOnExistingStorage(t *testing.T) { contractBackend := backends.NewSimulatedBackendWithConfig(gspec.Alloc, gspec.Config, gspec.GasLimit) defer contractBackend.Close() - transactOpts, err := bind.NewKeyedTransactorWithChainID(key, gspec.Config.ChainID) - if err != nil { - t.Fatal(err) - } + transactOpts, err := bind.NewKeyedTransactorWithChainID(key, m.ChainConfig.ChainID) + require.NoError(t, err) transactOpts.GasLimit = 1000000 var contractAddress common.Address @@ -943,12 +944,12 @@ func TestEip2200Gas(t *testing.T) { contractBackend := backends.NewSimulatedBackendWithConfig(gspec.Alloc, gspec.Config, gspec.GasLimit) defer contractBackend.Close() - transactOpts := bind.NewKeyedTransactor(key) + transactOpts, err := bind.NewKeyedTransactorWithChainID(key, m.ChainConfig.ChainID) + require.NoError(t, err) transactOpts.GasLimit = 1000000 var contractAddress common.Address var selfDestruct *contracts.Selfdestruct - var err error // Here we generate 1 block with 2 transactions, first creates a contract with some initial values in the // It activates the SSTORE pricing rules specific to EIP-2200 (istanbul) @@ -1035,12 +1036,12 @@ func TestWrongIncarnation(t *testing.T) { contractBackend := backends.NewSimulatedBackendWithConfig(gspec.Alloc, gspec.Config, gspec.GasLimit) defer contractBackend.Close() - transactOpts := bind.NewKeyedTransactor(key) + transactOpts, err := bind.NewKeyedTransactorWithChainID(key, m.ChainConfig.ChainID) + require.NoError(t, err) transactOpts.GasLimit = 1000000 var contractAddress common.Address var changer *contracts.Changer - var err error chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 2, func(i int, block *core.BlockGen) { var tx types.Transaction @@ -1151,9 +1152,9 @@ func TestWrongIncarnation2(t *testing.T) { contractBackend := backends.NewSimulatedBackendWithConfig(gspec.Alloc, gspec.Config, gspec.GasLimit) defer contractBackend.Close() - transactOpts := bind.NewKeyedTransactor(key) + transactOpts, err := bind.NewKeyedTransactorWithChainID(key, m.ChainConfig.ChainID) + require.NoError(t, err) transactOpts.GasLimit = 1000000 - var err error var contractAddress common.Address @@ -1190,7 +1191,8 @@ func TestWrongIncarnation2(t *testing.T) { // Create a longer chain, with 4 blocks (with higher total difficulty) that reverts the change of stroage self-destruction of the contract contractBackendLonger := backends.NewSimulatedBackendWithConfig(gspec.Alloc, gspec.Config, gspec.GasLimit) - transactOptsLonger := bind.NewKeyedTransactor(key) + transactOptsLonger, err := bind.NewKeyedTransactorWithChainID(key, m.ChainConfig.ChainID) + require.NoError(t, err) transactOptsLonger.GasLimit = 1000000 longerChain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 3, func(i int, block *core.BlockGen) { var tx types.Transaction @@ -1402,13 +1404,13 @@ func TestRecreateAndRewind(t *testing.T) { m := stages.MockWithGenesis(t, gspec, key) contractBackend := backends.NewSimulatedBackendWithConfig(gspec.Alloc, gspec.Config, gspec.GasLimit) defer contractBackend.Close() - transactOpts := bind.NewKeyedTransactor(key) + transactOpts, err := bind.NewKeyedTransactorWithChainID(key, m.ChainConfig.ChainID) + require.NoError(t, err) transactOpts.GasLimit = 1000000 var revive *contracts.Revive2 var phoenix *contracts.Phoenix var reviveAddress common.Address var phoenixAddress common.Address - var err error chain, err1 := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 4, func(i int, block *core.BlockGen) { var tx types.Transaction @@ -1470,7 +1472,8 @@ func TestRecreateAndRewind(t *testing.T) { contractBackendLonger := backends.NewSimulatedBackendWithConfig(gspec.Alloc, gspec.Config, gspec.GasLimit) defer contractBackendLonger.Close() - transactOptsLonger := bind.NewKeyedTransactor(key) + transactOptsLonger, err := bind.NewKeyedTransactorWithChainID(key, m.ChainConfig.ChainID) + require.NoError(t, err) transactOptsLonger.GasLimit = 1000000 longerChain, err1 := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 5, func(i int, block *core.BlockGen) { var tx types.Transaction diff --git a/tests/statedb_chain_test.go b/tests/statedb_chain_test.go index a578c46e23f..b15b49c03c5 100644 --- a/tests/statedb_chain_test.go +++ b/tests/statedb_chain_test.go @@ -22,6 +22,7 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon/ethdb/olddb" + "github.com/stretchr/testify/require" "github.com/ledgerwatch/erigon/accounts/abi/bind" "github.com/ledgerwatch/erigon/accounts/abi/bind/backends" @@ -64,11 +65,11 @@ func TestSelfDestructReceive(t *testing.T) { contractBackend := backends.NewSimulatedBackendWithConfig(gspec.Alloc, gspec.Config, gspec.GasLimit) defer contractBackend.Close() - transactOpts := bind.NewKeyedTransactor(key) + transactOpts, err := bind.NewKeyedTransactorWithChainID(key, m.ChainConfig.ChainID) + require.NoError(t, err) var contractAddress common.Address var selfDestructorContract *contracts.SelfDestructor - var err error // There are two blocks // First block deploys a contract, then makes it self-destruct, and then sends 1 wei to the address of the contract, diff --git a/tests/statedb_insert_chain_transaction_test.go b/tests/statedb_insert_chain_transaction_test.go index 0d228d5d630..2a99d518a61 100644 --- a/tests/statedb_insert_chain_transaction_test.go +++ b/tests/statedb_insert_chain_transaction_test.go @@ -705,7 +705,11 @@ func getGenesis(funds ...*big.Int) initialData { for _, key := range keys { addr := crypto.PubkeyToAddress(key.PublicKey) addresses = append(addresses, addr) - transactOpts = append(transactOpts, bind.NewKeyedTransactor(key)) + to, err := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1)) + if err != nil { + panic(err) + } + transactOpts = append(transactOpts, to) allocs[addr] = core.GenesisAccount{Balance: accountFunds} } From af58e427f182af2916e4a113395230a8ea3d295d Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Wed, 13 Jul 2022 18:54:43 +0100 Subject: [PATCH 080/152] Ensure no in-memory execution when --experimental.overlay=false (#4708) * Ensure no in-memory execution when --experimental.overlay=false * bump log level * canExtendInMemory -> canExtendFork * forgot to FeedHeaderPoS * small simplification --- eth/stagedsync/stage_headers.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 10d93bebda8..498ae67ec20 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -587,9 +587,9 @@ func verifyAndSaveNewPoSHeader( currentHeadHash := rawdb.ReadHeadHeaderHash(tx) canExtendCanonical := header.ParentHash == currentHeadHash - canExtendInMemory := cfg.memoryOverlay && (cfg.forkValidator.ExtendingForkHeadHash() == (common.Hash{}) || header.ParentHash == cfg.forkValidator.ExtendingForkHeadHash()) + canExtendFork := cfg.forkValidator.ExtendingForkHeadHash() == (common.Hash{}) || header.ParentHash == cfg.forkValidator.ExtendingForkHeadHash() - if canExtendInMemory || !canExtendCanonical { + if cfg.memoryOverlay && (canExtendFork || !canExtendCanonical) { status, latestValidHash, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, header, body, canExtendCanonical) if criticalError != nil { return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError @@ -608,11 +608,16 @@ func verifyAndSaveNewPoSHeader( }, success, nil } - // OK, we're on the canonical chain if err := headerInserter.FeedHeaderPoS(tx, header, headerHash); err != nil { return nil, false, err } + if !canExtendCanonical { + log.Info("Side chain or something weird", "parentHash", header.ParentHash, "currentHead", currentHeadHash) + return &privateapi.PayloadStatus{Status: remote.EngineStatus_ACCEPTED}, true, nil + } + + // OK, we're on the canonical chain if requestStatus == engineapi.New { cfg.hd.SetPendingPayloadHash(headerHash) } From af661a9459d997f09b8f514daa25e46b00dce9b8 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Thu, 14 Jul 2022 10:03:15 +0100 Subject: [PATCH 081/152] Fix canExtendCanonical when some headers are downloaded (#4709) * Fix canExtendCanonical when some headers are downloaded * Restore original logic for forkValidator.ValidatePayload * Check FCU status --- eth/stagedsync/stage_headers.go | 58 +++++++++++++++++++++----------- turbo/stages/sentry_mock_test.go | 3 ++ 2 files changed, 41 insertions(+), 20 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 498ae67ec20..8dfbb1551b5 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -193,7 +193,7 @@ func HeadersPOS( var payloadStatus *privateapi.PayloadStatus var err error if forkChoiceInsteadOfNewPayload { - payloadStatus, err = startHandlingForkChoice(forkChoiceMessage, requestStatus, requestId, s, u, ctx, tx, cfg, headerInserter, cfg.blockReader) + payloadStatus, err = startHandlingForkChoice(forkChoiceMessage, requestStatus, requestId, s, u, ctx, tx, cfg, headerInserter) } else { payloadStatus, err = handleNewPayload(payloadMessage, requestStatus, requestId, s, ctx, tx, cfg, headerInserter) } @@ -267,7 +267,6 @@ func startHandlingForkChoice( tx kv.RwTx, cfg HeadersCfg, headerInserter *headerdownload.HeaderInserter, - headerReader services.HeaderReader, ) (*privateapi.PayloadStatus, error) { headerHash := forkChoice.HeadBlockHash log.Debug(fmt.Sprintf("[%s] Handling fork choice", s.LogPrefix()), "headerHash", headerHash) @@ -307,7 +306,7 @@ func startHandlingForkChoice( } // Header itself may already be in the snapshots, if CL starts off at much earlier state than Erigon - header, err := headerReader.HeaderByHash(ctx, tx, headerHash) + header, err := cfg.blockReader.HeaderByHash(ctx, tx, headerHash) if err != nil { log.Warn(fmt.Sprintf("[%s] Fork choice err (reading header by hash %x)", s.LogPrefix(), headerHash), "err", err) cfg.hd.BeaconRequestList.Remove(requestId) @@ -369,16 +368,9 @@ func startHandlingForkChoice( } cfg.hd.UpdateTopSeenHeightPoS(headerNumber) - forkingPoint := uint64(0) - if headerNumber > 0 { - parent, err := headerReader.Header(ctx, tx, header.ParentHash, headerNumber-1) - if err != nil { - return nil, err - } - forkingPoint, err = headerInserter.ForkingPoint(tx, header, parent) - if err != nil { - return nil, err - } + forkingPoint, err := forkingPoint(ctx, tx, headerInserter, cfg.blockReader, header) + if err != nil { + return nil, err } log.Info(fmt.Sprintf("[%s] Fork choice re-org", s.LogPrefix()), "headerNumber", headerNumber, "forkingPoint", forkingPoint) @@ -549,7 +541,7 @@ func handleNewPayload( } log.Debug(fmt.Sprintf("[%s] New payload begin verification", s.LogPrefix())) - response, success, err := verifyAndSaveNewPoSHeader(requestStatus, s, tx, cfg, header, payloadMessage.Body, headerInserter) + response, success, err := verifyAndSaveNewPoSHeader(requestStatus, s, ctx, tx, cfg, header, payloadMessage.Body, headerInserter) log.Debug(fmt.Sprintf("[%s] New payload verification ended", s.LogPrefix()), "success", success, "err", err) if err != nil || !success { return response, err @@ -566,6 +558,7 @@ func handleNewPayload( func verifyAndSaveNewPoSHeader( requestStatus engineapi.RequestStatus, s *StageState, + ctx context.Context, tx kv.RwTx, cfg HeadersCfg, header *types.Header, @@ -586,17 +579,24 @@ func verifyAndSaveNewPoSHeader( } currentHeadHash := rawdb.ReadHeadHeaderHash(tx) - canExtendCanonical := header.ParentHash == currentHeadHash + + forkingPoint, err := forkingPoint(ctx, tx, headerInserter, cfg.blockReader, header) + if err != nil { + return nil, false, err + } + forkingHash, err := cfg.blockReader.CanonicalHash(ctx, tx, forkingPoint) + + canExtendCanonical := forkingHash == currentHeadHash canExtendFork := cfg.forkValidator.ExtendingForkHeadHash() == (common.Hash{}) || header.ParentHash == cfg.forkValidator.ExtendingForkHeadHash() - if cfg.memoryOverlay && (canExtendFork || !canExtendCanonical) { - status, latestValidHash, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, header, body, canExtendCanonical) + if cfg.memoryOverlay && (canExtendFork || header.ParentHash != currentHeadHash) { + status, latestValidHash, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, header, body, header.ParentHash == currentHeadHash /* extendCanonical */) if criticalError != nil { - return &privateapi.PayloadStatus{CriticalError: criticalError}, false, criticalError + return nil, false, criticalError } success = validationError == nil if !success { - log.Warn("Verification failed for header", "hash", headerHash, "height", headerNumber, "err", validationError) + log.Warn("Validation failed for header", "hash", headerHash, "height", headerNumber, "err", validationError) cfg.hd.ReportBadHeaderPoS(headerHash, latestValidHash) } else if err := headerInserter.FeedHeaderPoS(tx, header, headerHash); err != nil { return nil, false, err @@ -613,7 +613,7 @@ func verifyAndSaveNewPoSHeader( } if !canExtendCanonical { - log.Info("Side chain or something weird", "parentHash", header.ParentHash, "currentHead", currentHeadHash) + log.Info("Side chain", "parentHash", header.ParentHash, "currentHead", currentHeadHash) return &privateapi.PayloadStatus{Status: remote.EngineStatus_ACCEPTED}, true, nil } @@ -708,6 +708,24 @@ func verifyAndSaveDownloadedPoSHeaders(tx kv.RwTx, cfg HeadersCfg, headerInserte cfg.hd.SetPosStatus(headerdownload.Idle) } +func forkingPoint( + ctx context.Context, + tx kv.RwTx, + headerInserter *headerdownload.HeaderInserter, + headerReader services.HeaderReader, + header *types.Header, +) (uint64, error) { + headerNumber := header.Number.Uint64() + if headerNumber == 0 { + return 0, nil + } + parent, err := headerReader.Header(ctx, tx, header.ParentHash, headerNumber-1) + if err != nil { + return 0, err + } + return headerInserter.ForkingPoint(tx, header, parent) +} + // HeadersPOW progresses Headers stage for Proof-of-Work headers func HeadersPOW( s *StageState, diff --git a/turbo/stages/sentry_mock_test.go b/turbo/stages/sentry_mock_test.go index f0a9c136efa..9ab7015e06b 100644 --- a/turbo/stages/sentry_mock_test.go +++ b/turbo/stages/sentry_mock_test.go @@ -605,6 +605,7 @@ func TestPoSDownloader(t *testing.T) { headBlockHash, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, false, m.UpdateHead, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) + assert.Equal(t, chain.TopBlock.Hash(), headBlockHash) // Point forkChoice to the head forkChoiceMessage := engineapi.ForkChoiceMessage{ @@ -617,6 +618,8 @@ func TestPoSDownloader(t *testing.T) { require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) + payloadStatus = m.ReceivePayloadStatus() + assert.Equal(t, remote.EngineStatus_VALID, payloadStatus.Status) assert.Equal(t, chain.TopBlock.Hash(), headBlockHash) } From 211dbfbb7505c93285a9839dbf4dae9b8b46f359 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Thu, 14 Jul 2022 11:01:57 +0100 Subject: [PATCH 082/152] fix(#4543): BeginRo use semaphore (erigon-lib bump) (#4712) --- cmd/rpcdaemon/cli/config.go | 3 ++- cmd/rpcdaemon22/cli/config.go | 3 ++- go.mod | 2 +- go.sum | 4 ++-- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/cmd/rpcdaemon/cli/config.go b/cmd/rpcdaemon/cli/config.go index d326e45911e..bd79d8a8da0 100644 --- a/cmd/rpcdaemon/cli/config.go +++ b/cmd/rpcdaemon/cli/config.go @@ -46,6 +46,7 @@ import ( "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" "github.com/ledgerwatch/log/v3" "github.com/spf13/cobra" + "golang.org/x/sync/semaphore" "google.golang.org/grpc" grpcHealth "google.golang.org/grpc/health" "google.golang.org/grpc/health/grpc_health_v1" @@ -255,7 +256,7 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, if cfg.WithDatadir { var rwKv kv.RwDB log.Trace("Creating chain db", "path", cfg.Dirs.Chaindata) - limiter := make(chan struct{}, cfg.DBReadConcurrency) + limiter := semaphore.NewWeighted(int64(cfg.DBReadConcurrency)) rwKv, err = kv2.NewMDBX(logger).RoTxsLimiter(limiter).Path(cfg.Dirs.Chaindata).Readonly().Open() if err != nil { return nil, nil, nil, nil, nil, nil, nil, nil, ff, err diff --git a/cmd/rpcdaemon22/cli/config.go b/cmd/rpcdaemon22/cli/config.go index 7f15ae9db48..abe200dfd3c 100644 --- a/cmd/rpcdaemon22/cli/config.go +++ b/cmd/rpcdaemon22/cli/config.go @@ -17,6 +17,7 @@ import ( "github.com/ledgerwatch/erigon/internal/debug" "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/rpc/rpccfg" + "golang.org/x/sync/semaphore" "github.com/ledgerwatch/erigon-lib/direct" "github.com/ledgerwatch/erigon-lib/gointerfaces" @@ -253,7 +254,7 @@ func RemoteServices(ctx context.Context, cfg httpcfg.HttpCfg, logger log.Logger, if cfg.WithDatadir { var rwKv kv.RwDB log.Trace("Creating chain db", "path", cfg.Dirs.Chaindata) - limiter := make(chan struct{}, cfg.DBReadConcurrency) + limiter := semaphore.NewWeighted(int64(cfg.DBReadConcurrency)) rwKv, err = kv2.NewMDBX(logger).RoTxsLimiter(limiter).Path(cfg.Dirs.Chaindata).Readonly().Open() if err != nil { return nil, nil, nil, nil, nil, nil, nil, nil, ff, nil, nil, err diff --git a/go.mod b/go.mod index ab125c696e3..b8e7bbe9d08 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220710110825-21c6baf2871c + github.com/ledgerwatch/erigon-lib v0.0.0-20220713123745-d629e31df75e github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index b687d0fb068..87deff96b61 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220710110825-21c6baf2871c h1:xcHZhKSsUezVVGiqMxhe2qlkoedgkCAyx1Zi+bY9Pxs= -github.com/ledgerwatch/erigon-lib v0.0.0-20220710110825-21c6baf2871c/go.mod h1:bttvdtZXjh803u/CeMerKYnWvVvXTICWSfpcMeQNtmc= +github.com/ledgerwatch/erigon-lib v0.0.0-20220713123745-d629e31df75e h1:lU3YEzEKf55d3Sd363FFwxMIHch7/59Xi4PLG4MHWcg= +github.com/ledgerwatch/erigon-lib v0.0.0-20220713123745-d629e31df75e/go.mod h1:lrUxxrH85rkNMGFT7K4aloNMOf7jG+bVYAHhmyi7oaU= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From 07e00b878c1ea38d39bf39f824c6bff433c94800 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Thu, 14 Jul 2022 11:30:50 +0100 Subject: [PATCH 083/152] use nested datadir/network path for db supporting legacy (#4713) Co-authored-by: Scott Fairclough --- cmd/utils/flags.go | 48 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 34 insertions(+), 14 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 583e84a3598..0e8e5a941a4 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -22,6 +22,7 @@ import ( "fmt" "io" "math/big" + "os" "path/filepath" "runtime" "strconv" @@ -32,14 +33,15 @@ import ( "github.com/c2h5oh/datasize" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon-lib/txpool" - "github.com/ledgerwatch/erigon/cmd/downloader/downloader/downloadercfg" - "github.com/ledgerwatch/erigon/node/nodecfg" - "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/log/v3" "github.com/spf13/cobra" "github.com/spf13/pflag" "github.com/urfave/cli" + "github.com/ledgerwatch/erigon/cmd/downloader/downloader/downloadercfg" + "github.com/ledgerwatch/erigon/node/nodecfg" + "github.com/ledgerwatch/erigon/node/nodecfg/datadir" + "github.com/ledgerwatch/erigon/eth/protocols/eth" "github.com/ledgerwatch/erigon/params/networkname" @@ -1069,30 +1071,48 @@ func DataDirForNetwork(datadir string, network string) string { case networkname.DevChainName: return "" // unless explicitly requested, use memory databases case networkname.RinkebyChainName: - return filepath.Join(datadir, "rinkeby") + return networkDataDirCheckingLegacy(datadir, "rinkeby") case networkname.GoerliChainName: - filepath.Join(datadir, "goerli") + return networkDataDirCheckingLegacy(datadir, "goerli") case networkname.KilnDevnetChainName: - filepath.Join(datadir, "kiln-devnet") + return networkDataDirCheckingLegacy(datadir, "kiln-devnet") case networkname.SokolChainName: - return filepath.Join(datadir, "sokol") + return networkDataDirCheckingLegacy(datadir, "sokol") case networkname.FermionChainName: - return filepath.Join(datadir, "fermion") + return networkDataDirCheckingLegacy(datadir, "fermion") case networkname.MumbaiChainName: - return filepath.Join(datadir, "mumbai") + return networkDataDirCheckingLegacy(datadir, "mumbai") case networkname.BorMainnetChainName: - return filepath.Join(datadir, "bor-mainnet") + return networkDataDirCheckingLegacy(datadir, "bor-mainnet") case networkname.BorDevnetChainName: - return filepath.Join(datadir, "bor-devnet") + return networkDataDirCheckingLegacy(datadir, "bor-devnet") case networkname.SepoliaChainName: - return filepath.Join(datadir, "sepolia") + return networkDataDirCheckingLegacy(datadir, "sepolia") case networkname.GnosisChainName: - return filepath.Join(datadir, "gnosis") + return networkDataDirCheckingLegacy(datadir, "gnosis") + default: return datadir } +} + +// networkDataDirCheckingLegacy checks if the datadir for the network already exists and uses that if found. +// if not checks for a LOCK file at the root of the datadir and uses this if found +// or by default assume a fresh node and to use the nested directory for the network +func networkDataDirCheckingLegacy(datadir, network string) string { + anticipated := filepath.Join(datadir, network) + + if _, err := os.Stat(anticipated); !os.IsNotExist(err) { + return anticipated + } + + legacyLockFile := filepath.Join(datadir, "LOCK") + if _, err := os.Stat(legacyLockFile); !os.IsNotExist(err) { + log.Info("Using legacy datadir") + return datadir + } - return datadir + return anticipated } func setDataDir(ctx *cli.Context, cfg *nodecfg.Config) { From 793ffcce3b19f02e69e37cf7b60837e1798d6f26 Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Thu, 14 Jul 2022 17:22:46 +0300 Subject: [PATCH 084/152] separated interrupt logic (#4714) --- eth/stagedsync/stage_headers.go | 39 +++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 8dfbb1551b5..5db5c099c43 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -164,16 +164,12 @@ func HeadersPOS( cfg.hd.SetHeaderReader(&chainReader{config: &cfg.chainConfig, tx: tx, blockReader: cfg.blockReader}) headerInserter := headerdownload.NewHeaderInserter(s.LogPrefix(), nil, s.BlockNumber, cfg.blockReader) - if interrupt != engineapi.None { - if interrupt == engineapi.Stopping { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: errors.New("server is stopping")} - } - if interrupt == engineapi.Synced { - verifyAndSaveDownloadedPoSHeaders(tx, cfg, headerInserter) - } - if !useExternalTx { - return tx.Commit() - } + interrupted, err := handleInterrupt(interrupt, cfg, tx, headerInserter, useExternalTx) + if err != nil { + return err + } + + if interrupted { return nil } @@ -181,20 +177,15 @@ func HeadersPOS( requestStatus := requestWithStatus.Status // Decide what kind of action we need to take place - var payloadMessage *engineapi.PayloadMessage forkChoiceMessage, forkChoiceInsteadOfNewPayload := request.(*engineapi.ForkChoiceMessage) - if !forkChoiceInsteadOfNewPayload { - payloadMessage = request.(*engineapi.PayloadMessage) - } - cfg.hd.ClearPendingPayloadHash() cfg.hd.SetPendingPayloadStatus(nil) var payloadStatus *privateapi.PayloadStatus - var err error if forkChoiceInsteadOfNewPayload { payloadStatus, err = startHandlingForkChoice(forkChoiceMessage, requestStatus, requestId, s, u, ctx, tx, cfg, headerInserter) } else { + payloadMessage := request.(*engineapi.PayloadMessage) payloadStatus, err = handleNewPayload(payloadMessage, requestStatus, requestId, s, ctx, tx, cfg, headerInserter) } @@ -726,6 +717,22 @@ func forkingPoint( return headerInserter.ForkingPoint(tx, header, parent) } +func handleInterrupt(interrupt engineapi.Interrupt, cfg HeadersCfg, tx kv.RwTx, headerInserter *headerdownload.HeaderInserter, useExternalTx bool) (bool, error) { + if interrupt != engineapi.None { + if interrupt == engineapi.Stopping { + cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: errors.New("server is stopping")} + } + if interrupt == engineapi.Synced { + verifyAndSaveDownloadedPoSHeaders(tx, cfg, headerInserter) + } + if !useExternalTx { + return true, tx.Commit() + } + return true, nil + } + return false, nil +} + // HeadersPOW progresses Headers stage for Proof-of-Work headers func HeadersPOW( s *StageState, From 759e77c71bb01d678ffa959992588cdb7a50e25b Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Thu, 14 Jul 2022 19:08:33 +0200 Subject: [PATCH 085/152] mod (#4717) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b8e7bbe9d08..afa34d5579d 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220713123745-d629e31df75e + github.com/ledgerwatch/erigon-lib v0.0.0-20220714160525-21aa65c1c383 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 87deff96b61..d59d34e8091 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220713123745-d629e31df75e h1:lU3YEzEKf55d3Sd363FFwxMIHch7/59Xi4PLG4MHWcg= -github.com/ledgerwatch/erigon-lib v0.0.0-20220713123745-d629e31df75e/go.mod h1:lrUxxrH85rkNMGFT7K4aloNMOf7jG+bVYAHhmyi7oaU= +github.com/ledgerwatch/erigon-lib v0.0.0-20220714160525-21aa65c1c383 h1:1EE1EIsDHok6NrzqQjGqkCj47APObiqFbgv+s7GJMrk= +github.com/ledgerwatch/erigon-lib v0.0.0-20220714160525-21aa65c1c383/go.mod h1:lrUxxrH85rkNMGFT7K4aloNMOf7jG+bVYAHhmyi7oaU= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From f18a5b08643261eba5fd2c2efdd1609c244aa81d Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Fri, 15 Jul 2022 13:17:07 +0700 Subject: [PATCH 086/152] integration to pass mdbx.Accede flag (#4719) --- cmd/integration/commands/root.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/integration/commands/root.go b/cmd/integration/commands/root.go index 5db26bfeacc..0bef08f0553 100644 --- a/cmd/integration/commands/root.go +++ b/cmd/integration/commands/root.go @@ -11,6 +11,7 @@ import ( "github.com/ledgerwatch/erigon/migrations" "github.com/ledgerwatch/log/v3" "github.com/spf13/cobra" + "github.com/torquem-ch/mdbx-go/mdbx" ) var rootCmd = &cobra.Command{ @@ -46,6 +47,9 @@ func dbCfg(label kv.Label, logger log.Logger, path string) kv2.MdbxOpts { } func openDB(opts kv2.MdbxOpts, applyMigrations bool) kv.RwDB { + // integration tool don't intent to create db, then easiest way to open db - it's pass mdbx.Accede flag, which allow + // to read all options from DB, instead of overriding them + opts = opts.Flags(func(f uint) uint { return f | mdbx.Accede }) db := opts.MustOpen() if applyMigrations { migrator := migrations.NewMigrator(opts.GetLabel()) From 7b57e26d84bb495d1a51fe12f7cfe0ccd245f191 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Fri, 15 Jul 2022 13:57:44 +0700 Subject: [PATCH 087/152] grafana: up security fix version #4721 --- docker-compose.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 40cf128e883..b5d77b58d1b 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -66,7 +66,7 @@ services: prometheus: - image: prom/prometheus:v2.36.2 + image: prom/prometheus:v2.37.0 user: ${DOCKER_UID:1000}:${DOCKER_GID:1000} # Uses erigon user from Dockerfile command: --log.level=warn --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=150d --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles ports: [ "9090:9090" ] @@ -76,7 +76,7 @@ services: restart: unless-stopped grafana: - image: grafana/grafana:9.0.2 + image: grafana/grafana:9.0.3 user: "472:0" # required for grafana version >= 7.3 ports: [ "3000:3000" ] volumes: From 92e2311eb3333dad6eb7a7673f68aeb85aaa63f9 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Fri, 15 Jul 2022 14:39:27 +0700 Subject: [PATCH 088/152] docker compose: fix interpolation format #4722 Open --- docker-compose.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index b5d77b58d1b..0c6cf567c46 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -20,16 +20,16 @@ x-erigon-service: &default-erigon-service volumes_from: [ erigon ] restart: unless-stopped mem_swappiness: 0 + user: ${DOCKER_UID:-1000}:${DOCKER_GID:-1000} services: erigon: image: thorax/erigon:${TAG:-latest} build: args: - UID: ${DOCKER_UID:1000} - GID: ${DOCKER_GID:1000} + UID: ${DOCKER_UID:-1000} + GID: ${DOCKER_GID:-1000} context: . - user: "${DOCKER_UID:1000}:${DOCKER_GID:1000}" command: | erigon ${ERIGON_FLAGS-} --private.api.addr=0.0.0.0:9090 --sentry.api.addr=sentry:9091 --downloader.api.addr=downloader:9093 --txpool.disable @@ -67,7 +67,7 @@ services: prometheus: image: prom/prometheus:v2.37.0 - user: ${DOCKER_UID:1000}:${DOCKER_GID:1000} # Uses erigon user from Dockerfile + user: ${DOCKER_UID:-1000}:${DOCKER_GID:-1000} # Uses erigon user from Dockerfile command: --log.level=warn --config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus --storage.tsdb.retention.time=150d --web.console.libraries=/usr/share/prometheus/console_libraries --web.console.templates=/usr/share/prometheus/consoles ports: [ "9090:9090" ] volumes: @@ -77,7 +77,7 @@ services: grafana: image: grafana/grafana:9.0.3 - user: "472:0" # required for grafana version >= 7.3 + user: 472:0 # required for grafana version >= 7.3 ports: [ "3000:3000" ] volumes: - ${ERIGON_GRAFANA_CONFIG:-./cmd/prometheus/grafana.ini}:/etc/grafana/grafana.ini From e8f83db20805f45ff9f883af5a5c26a727e098ab Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Fri, 15 Jul 2022 13:56:35 +0100 Subject: [PATCH 089/152] Gas api unit tests (#4715) * gas price initial unit tests * tweak(makefile): gas price test timeout increase increase test timeout in Makefile to 50s from 30s to cater for increased test time unit testing gas price logic. Co-authored-by: Scott Fairclough --- Makefile | 2 +- cmd/rpcdaemon/commands/eth_system_test.go | 93 +++++++++++++++++++++++ 2 files changed, 94 insertions(+), 1 deletion(-) create mode 100644 cmd/rpcdaemon/commands/eth_system_test.go diff --git a/Makefile b/Makefile index 1cff92359d3..3546d3e065e 100644 --- a/Makefile +++ b/Makefile @@ -127,7 +127,7 @@ db-tools: git-submodules @echo "Run \"$(GOBIN)/mdbx_stat -h\" to get info about mdbx db file." test: - $(GOTEST) --timeout 30s + $(GOTEST) --timeout 50s test-integration: $(GOTEST) --timeout 30m -tags $(BUILD_TAGS),integration diff --git a/cmd/rpcdaemon/commands/eth_system_test.go b/cmd/rpcdaemon/commands/eth_system_test.go new file mode 100644 index 00000000000..5163590e88e --- /dev/null +++ b/cmd/rpcdaemon/commands/eth_system_test.go @@ -0,0 +1,93 @@ +package commands + +import ( + "context" + "math" + "math/big" + "testing" + + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/erigon-lib/kv/kvcache" + + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/params" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/erigon/turbo/stages" +) + +func TestGasPrice(t *testing.T) { + + cases := []struct { + description string + chainSize int + expectedPrice *big.Int + }{ + { + description: "standard settings 60 blocks", + chainSize: 60, + expectedPrice: big.NewInt(params.GWei * int64(36)), + }, + { + description: "standard settings 30 blocks", + chainSize: 30, + expectedPrice: big.NewInt(params.GWei * int64(18)), + }, + } + + for _, testCase := range cases { + t.Run(testCase.description, func(t *testing.T) { + db := createGasPriceTestKV(t, testCase.chainSize) + defer db.Close() + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + base := NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false) + eth := NewEthAPI(base, db, nil, nil, nil, 5000000) + + ctx := context.Background() + result, err := eth.GasPrice(ctx) + if err != nil { + t.Fatalf("error getting gas price: %s", err) + } + + if testCase.expectedPrice.Cmp(result.ToInt()) != 0 { + t.Fatalf("gas price mismatch, want %d, got %d", testCase.expectedPrice, result.ToInt()) + } + }) + } + +} + +func createGasPriceTestKV(t *testing.T, chainSize int) kv.RwDB { + var ( + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr = crypto.PubkeyToAddress(key.PublicKey) + gspec = &core.Genesis{ + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}}, + } + signer = types.LatestSigner(gspec.Config) + ) + m := stages.MockWithGenesis(t, gspec, key) + + // Generate testing blocks + chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, chainSize, func(i int, b *core.BlockGen) { + b.SetCoinbase(common.Address{1}) + tx, txErr := types.SignTx(types.NewTransaction(b.TxNonce(addr), common.HexToAddress("deadbeef"), uint256.NewInt(100), 21000, uint256.NewInt(uint64(int64(i+1)*params.GWei)), nil), *signer, key) + if txErr != nil { + t.Fatalf("failed to create tx: %v", txErr) + } + b.AddTx(tx) + }, false) + if err != nil { + t.Error(err) + } + // Construct testing chain + if err = m.InsertChain(chain); err != nil { + t.Error(err) + } + + return m.DB +} From b6440eea1e2d5c8eb36ab176e275e0b0c12cb99d Mon Sep 17 00:00:00 2001 From: Levi Aul Date: Fri, 15 Jul 2022 07:04:23 -0700 Subject: [PATCH 090/152] Add erigon_getBalanceChangesInBlock RPC endpoint (#4609) * Add eth_getBalanceChangesInBlock RPC endpoint * Fix lints * added assertion for one test * moved balance change api from eth to erigon Co-authored-by: fatemebagherii --- cmd/rpcdaemon/commands/erigon_api.go | 2 + cmd/rpcdaemon/commands/erigon_block.go | 68 ++++++++++++++++++++++++++ cmd/rpcdaemon/commands/eth_api_test.go | 25 ++++++++++ 3 files changed, 95 insertions(+) diff --git a/cmd/rpcdaemon/commands/erigon_api.go b/cmd/rpcdaemon/commands/erigon_api.go index f976cf31f05..87781938642 100644 --- a/cmd/rpcdaemon/commands/erigon_api.go +++ b/cmd/rpcdaemon/commands/erigon_api.go @@ -5,6 +5,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/rpc" @@ -20,6 +21,7 @@ type ErigonAPI interface { GetHeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) GetHeaderByHash(_ context.Context, hash common.Hash) (*types.Header, error) GetBlockByTimestamp(ctx context.Context, timeStamp rpc.Timestamp, fullTx bool) (map[string]interface{}, error) + GetBalanceChangesInBlock(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (map[common.Address]*hexutil.Big, error) // Receipt related (see ./erigon_receipts.go) GetLogsByHash(ctx context.Context, hash common.Hash) ([][]*types.Log, error) diff --git a/cmd/rpcdaemon/commands/erigon_block.go b/cmd/rpcdaemon/commands/erigon_block.go index 7a5bf1bda1d..e56cba2a1d3 100644 --- a/cmd/rpcdaemon/commands/erigon_block.go +++ b/cmd/rpcdaemon/commands/erigon_block.go @@ -1,15 +1,21 @@ package commands import ( + "bytes" "context" "errors" "fmt" "sort" + "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/changeset" + "github.com/ledgerwatch/erigon/common/dbutils" + "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" @@ -168,3 +174,65 @@ func buildBlockResponse(db kv.Tx, blockNum uint64, fullTx bool) (map[string]inte } return response, err } + +func (api *ErigonImpl) GetBalanceChangesInBlock(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (map[common.Address]*hexutil.Big, error) { + tx, err := api.db.BeginRo(ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + blockNumber, _, _, err := rpchelper.GetBlockNumber(blockNrOrHash, tx, api.filters) + if err != nil { + return nil, err + } + + c, err := tx.Cursor(kv.AccountChangeSet) + if err != nil { + return nil, err + } + defer c.Close() + + startkey := dbutils.EncodeBlockNumber(blockNumber) + + decodeFn := changeset.Mapper[kv.AccountChangeSet].Decode + + balancesMapping := make(map[common.Address]*hexutil.Big) + + newReader, err := rpchelper.CreateStateReader(ctx, tx, blockNrOrHash, api.filters, api.stateCache) + if err != nil { + return nil, err + } + + for dbKey, dbValue, _ := c.Seek(startkey); bytes.Equal(dbKey, startkey) && dbKey != nil; dbKey, dbValue, _ = c.Next() { + _, addressBytes, v, err := decodeFn(dbKey, dbValue) + if err != nil { + return nil, err + } + + var oldAcc accounts.Account + if err = oldAcc.DecodeForStorage(v); err != nil { + return nil, err + } + oldBalance := oldAcc.Balance + + address := common.BytesToAddress(addressBytes) + + newAcc, err := newReader.ReadAccountData(address) + if err != nil { + return nil, err + } + + newBalance := uint256.NewInt(0) + if newAcc != nil { + newBalance = &newAcc.Balance + } + + if !oldBalance.Eq(newBalance) { + newBalanceDesc := (*hexutil.Big)(newBalance.ToBig()) + balancesMapping[address] = newBalanceDesc + } + } + + return balancesMapping, nil +} diff --git a/cmd/rpcdaemon/commands/eth_api_test.go b/cmd/rpcdaemon/commands/eth_api_test.go index cc940c43e66..043f620db7b 100644 --- a/cmd/rpcdaemon/commands/eth_api_test.go +++ b/cmd/rpcdaemon/commands/eth_api_test.go @@ -5,6 +5,8 @@ import ( "fmt" "testing" + "github.com/holiman/uint256" + "github.com/ledgerwatch/erigon/common/hexutil" "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/rpc" @@ -16,6 +18,29 @@ import ( "github.com/ledgerwatch/erigon/common" ) +func TestGetBalanceChangesInBlock(t *testing.T) { + assert := assert.New(t) + myBlockNum := rpc.BlockNumberOrHashWithNumber(0) + + db := rpcdaemontest.CreateTestKV(t) + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewErigonAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil) + balances, err := api.GetBalanceChangesInBlock(context.Background(), myBlockNum) + if err != nil { + t.Errorf("calling GetBalanceChangesInBlock resulted in an error: %v", err) + } + expected := map[common.Address]*hexutil.Big{ + common.HexToAddress("0x0D3ab14BBaD3D99F4203bd7a11aCB94882050E7e"): (*hexutil.Big)(uint256.NewInt(200000000000000000).ToBig()), + common.HexToAddress("0x703c4b2bD70c169f5717101CaeE543299Fc946C7"): (*hexutil.Big)(uint256.NewInt(300000000000000000).ToBig()), + common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): (*hexutil.Big)(uint256.NewInt(9000000000000000000).ToBig()), + } + assert.Equal(len(expected), len(balances)) + for i := range balances { + assert.Contains(expected, i, "%s is not expected to be present in the output.", i) + assert.Equal(balances[i], expected[i], "the value for %s is expected to be %v, but got %v.", i, expected[i], balances[i]) + } +} + func TestGetTransactionReceipt(t *testing.T) { db := rpcdaemontest.CreateTestKV(t) stateCache := kvcache.New(kvcache.DefaultCoherentConfig) From e04401491fa310ec7bc54d0cc530cbaded6d70e8 Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Sat, 16 Jul 2022 11:06:26 +0300 Subject: [PATCH 091/152] checking if we build torrent file (#4723) * checking if we build torrent file * only if torrentHash != nil * clearer separation of scenario * refactored Download * comments * ops * not using magnet with empty hash * moved log to top * ops * logs * log warns * bumped up log lvl * log --- .../downloader/downloader_grpc_server.go | 112 ++++++++++++------ 1 file changed, 78 insertions(+), 34 deletions(-) diff --git a/cmd/downloader/downloader/downloader_grpc_server.go b/cmd/downloader/downloader/downloader_grpc_server.go index c75516aa439..062d086c219 100644 --- a/cmd/downloader/downloader/downloader_grpc_server.go +++ b/cmd/downloader/downloader/downloader_grpc_server.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "github.com/anacrolix/torrent" "github.com/anacrolix/torrent/metainfo" "github.com/ledgerwatch/erigon-lib/gointerfaces" proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" @@ -32,49 +33,33 @@ func (s *GrpcServer) Download(ctx context.Context, request *proto_downloader.Dow defer logEvery.Stop() torrentClient := s.d.Torrent() - mi := &metainfo.MetaInfo{AnnounceList: Trackers} + snapDir := s.d.SnapDir() for i, it := range request.Items { - if it.TorrentHash == nil { // seed new snapshot - if err := BuildTorrentFileIfNeed(it.Path, s.d.SnapDir()); err != nil { - return nil, err - } - } - - hash := Proto2InfoHash(it.TorrentHash) - if _, ok := torrentClient.Torrent(hash); ok { - continue - } - - ok, err := AddSegment(it.Path, s.d.SnapDir(), torrentClient) - if err != nil { - return nil, fmt.Errorf("AddSegment: %w", err) - } select { case <-logEvery.C: - log.Info("[snpshots] initializing", "files", fmt.Sprintf("%d/%d", i, len(request.Items))) + log.Info("[snapshots] initializing", "files", fmt.Sprintf("%d/%d", i, len(request.Items))) default: } - if ok { - continue - } - magnet := mi.Magnet(&hash, nil) - go func(magnetUrl string) { - t, err := torrentClient.AddMagnet(magnetUrl) + if it.TorrentHash == nil { + // if we dont have the torrent hash then we seed a new snapshot + log.Info("[snapshots] seeding a new snapshot") + ok, err := seedNewSnapshot(it, torrentClient, snapDir) if err != nil { - log.Warn("[downloader] add magnet link", "err", err) - return + return nil, err } - t.DisallowDataDownload() - t.AllowDataUpload() - <-t.GotInfo() - - mi := t.Metainfo() - if err := CreateTorrentFileIfNotExists(s.d.SnapDir(), t.Info(), &mi); err != nil { - log.Warn("[downloader] create torrent file", "err", err) - return + if ok { + log.Debug("[snapshots] already have both seg and torrent file") + } else { + log.Warn("[snapshots] didn't get the seg or the torrent file") } - }(magnet.String()) + continue + } + + _, err := createMagnetLinkWithInfoHash(it.TorrentHash, torrentClient, snapDir) + if err != nil { + return nil, err + } } s.d.ReCalcStats(10 * time.Second) // immediately call ReCalc to set stat.Complete flag return &emptypb.Empty{}, nil @@ -110,3 +95,62 @@ func (s *GrpcServer) Stats(ctx context.Context, request *proto_downloader.StatsR func Proto2InfoHash(in *prototypes.H160) metainfo.Hash { return gointerfaces.ConvertH160toAddress(in) } + +// decides what we do depending on wether we have the .seg file or the .torrent file +// have .torrent no .seg => get .seg file from .torrent +// have .seg no .torrent => get .torrent from .seg +func seedNewSnapshot(it *proto_downloader.DownloadItem, torrentClient *torrent.Client, snapDir string) (bool, error) { + // if we dont have the torrent file we build it if we have the .seg file + if err := BuildTorrentFileIfNeed(it.Path, snapDir); err != nil { + return false, err + } + + // we add the .seg file we have and create the .torrent file if we dont have it + ok, err := AddSegment(it.Path, snapDir, torrentClient) + if err != nil { + return false, fmt.Errorf("AddSegment: %w", err) + } + + // torrent file does exist and seg + if !ok { + return false, nil + } + + // we skip the item in for loop since we build the seg and torrent file here + return true, nil +} + +// we dont have .seg or .torrent so we get them through the torrent hash +func createMagnetLinkWithInfoHash(hash *prototypes.H160, torrentClient *torrent.Client, snapDir string) (bool, error) { + mi := &metainfo.MetaInfo{AnnounceList: Trackers} + if hash == nil { + return false, nil + } + infoHash := Proto2InfoHash(hash) + log.Debug("[downloader] downloading torrent and seg file", "hash", infoHash) + + if _, ok := torrentClient.Torrent(infoHash); ok { + log.Debug("[downloader] torrent client related to hash found", "hash", infoHash) + return true, nil + } + + magnet := mi.Magnet(&infoHash, nil) + go func(magnetUrl string) { + t, err := torrentClient.AddMagnet(magnetUrl) + if err != nil { + log.Warn("[downloader] add magnet link", "err", err) + return + } + t.DisallowDataDownload() + t.AllowDataUpload() + <-t.GotInfo() + + mi := t.Metainfo() + if err := CreateTorrentFileIfNotExists(snapDir, t.Info(), &mi); err != nil { + log.Warn("[downloader] create torrent file", "err", err) + return + } + }(magnet.String()) + log.Debug("[downloader] downloaded both seg and torrent files", "hash", infoHash) + return false, nil +} From 8a754cd252d00ee33184582814bd1bc7b72a217e Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Sun, 17 Jul 2022 02:02:53 +0200 Subject: [PATCH 092/152] Added PoS download validation when applicable (#4728) * added incomplete version of PoS download validation * fixed stuff --- eth/stagedsync/default_stages.go | 16 ++++++++-------- eth/stagedsync/stage_headers.go | 25 ++++++++++++++++++++++--- eth/stagedsync/sync.go | 20 ++++++++++++++++++++ turbo/engineapi/fork_validator.go | 27 +++++++++++++++++++++++++-- turbo/stages/stageloop.go | 23 +++++++++++++---------- 5 files changed, 88 insertions(+), 23 deletions(-) diff --git a/eth/stagedsync/default_stages.go b/eth/stagedsync/default_stages.go index 359f7aed023..869fc7331fe 100644 --- a/eth/stagedsync/default_stages.go +++ b/eth/stagedsync/default_stages.go @@ -242,23 +242,23 @@ func StateStages(ctx context.Context, headers HeadersCfg, bodies BodiesCfg, bloc }, }, { - ID: stages.BlockHashes, - Description: "Write block hashes", + ID: stages.Bodies, + Description: "Download block bodies", Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { - return SpawnBlockHashStage(s, tx, blockHashCfg, ctx) + return BodiesForward(s, u, ctx, tx, bodies, false, false) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { - return UnwindBlockHashStage(u, tx, blockHashCfg, ctx) + return UnwindBodiesStage(u, tx, bodies, ctx) }, }, { - ID: stages.Bodies, - Description: "Download block bodies", + ID: stages.BlockHashes, + Description: "Write block hashes", Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { - return nil + return SpawnBlockHashStage(s, tx, blockHashCfg, ctx) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { - return UnwindBodiesStage(u, tx, bodies, ctx) + return UnwindBlockHashStage(u, tx, blockHashCfg, ctx) }, }, { diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 5db5c099c43..bf6ad576a4c 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -666,16 +666,32 @@ func schedulePoSDownload( func verifyAndSaveDownloadedPoSHeaders(tx kv.RwTx, cfg HeadersCfg, headerInserter *headerdownload.HeaderInserter) { var lastValidHash common.Hash - + var badChainError error headerLoadFunc := func(key, value []byte, _ etl.CurrentTableReader, _ etl.LoadNextFunc) error { var h types.Header if err := rlp.DecodeBytes(value, &h); err != nil { return err } + if badChainError != nil { + cfg.hd.ReportBadHeaderPoS(h.Hash(), lastValidHash) + return nil + } lastValidHash = h.ParentHash if err := cfg.hd.VerifyHeader(&h); err != nil { log.Warn("Verification failed for header", "hash", h.Hash(), "height", h.Number.Uint64(), "err", err) - return err + badChainError = err + cfg.hd.ReportBadHeaderPoS(h.Hash(), lastValidHash) + return nil + } + // Validate state if possible (bodies will be retrieved through body download) + _, _, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, &h, nil, false) + if criticalError != nil { + return criticalError + } + if validationError != nil { + badChainError = validationError + cfg.hd.ReportBadHeaderPoS(h.Hash(), lastValidHash) + return nil } return headerInserter.FeedHeaderPoS(tx, &h, h.Hash()) } @@ -686,7 +702,10 @@ func verifyAndSaveDownloadedPoSHeaders(tx kv.RwTx, cfg HeadersCfg, headerInserte }, }) - if err != nil { + if err != nil || badChainError != nil { + if err == nil { + err = badChainError + } log.Warn("Removing beacon request due to", "err", err, "requestId", cfg.hd.RequestId()) cfg.hd.BeaconRequestList.Remove(cfg.hd.RequestId()) cfg.hd.ReportBadHeaderPoS(cfg.hd.PoSDownloaderTip(), lastValidHash) diff --git a/eth/stagedsync/sync.go b/eth/stagedsync/sync.go index 5b26d418a39..2fd46f1c1fb 100644 --- a/eth/stagedsync/sync.go +++ b/eth/stagedsync/sync.go @@ -189,6 +189,26 @@ func (s *Sync) StageState(stage stages.SyncStage, tx kv.Tx, db kv.RoDB) (*StageS return &StageState{s, stage, blockNum}, nil } +func (s *Sync) RunUnwind(db kv.RwDB, tx kv.RwTx) error { + if s.unwindPoint == nil { + return nil + } + for j := 0; j < len(s.unwindOrder); j++ { + if s.unwindOrder[j] == nil || s.unwindOrder[j].Disabled || s.unwindOrder[j].Unwind == nil { + continue + } + if err := s.unwindStage(false, s.unwindOrder[j], db, tx); err != nil { + return err + } + } + s.prevUnwindPoint = s.unwindPoint + s.unwindPoint = nil + s.badBlock = common.Hash{} + if err := s.SetCurrentStage(s.stages[0].ID); err != nil { + return err + } + return nil +} func (s *Sync) Run(db kv.RwDB, tx kv.RwTx, firstCycle bool) error { s.prevUnwindPoint = nil s.timings = s.timings[:0] diff --git a/turbo/engineapi/fork_validator.go b/turbo/engineapi/fork_validator.go index e65db857423..91b87b31087 100644 --- a/turbo/engineapi/fork_validator.go +++ b/turbo/engineapi/fork_validator.go @@ -14,12 +14,15 @@ package engineapi import ( + "bytes" + "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/log/v3" ) @@ -177,7 +180,6 @@ func (fv *ForkValidator) Clear(tx kv.RwTx) { } fv.extendingFork.Rollback() } - // Clean all data relative to txpool fv.extendingForkHeadHash = common.Hash{} fv.extendingFork = nil } @@ -191,8 +193,29 @@ func (fv *ForkValidator) validateAndStorePayload(tx kv.RwTx, header *types.Heade status = remote.EngineStatus_INVALID return } + // If we do not have the body we can recover it from the batch. + if body == nil { + var bodyWithTxs *types.Body + bodyWithTxs, criticalError = rawdb.ReadBodyWithTransactions(tx, header.Hash(), header.Number.Uint64()) + if criticalError != nil { + return + } + var encodedTxs [][]byte + buf := bytes.NewBuffer(nil) + for _, tx := range bodyWithTxs.Transactions { + buf.Reset() + if criticalError = rlp.Encode(buf, tx); criticalError != nil { + return + } + encodedTxs = append(encodedTxs, common.CopyBytes(buf.Bytes())) + } + fv.sideForksBlock[header.Hash()] = forkSegment{header, &types.RawBody{ + Transactions: encodedTxs, + }} + } else { + fv.sideForksBlock[header.Hash()] = forkSegment{header, body} + } status = remote.EngineStatus_VALID - fv.sideForksBlock[header.Hash()] = forkSegment{header, body} return } diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 36ae3fca7f7..8a0caa5451f 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -259,7 +259,7 @@ func StateStep(ctx context.Context, batch kv.RwTx, stateSync *stagedsync.Sync, h if unwindPoint > 0 { // Run it through the unwind stateSync.UnwindTo(unwindPoint, common.Hash{}) - if err = stateSync.Run(nil, batch, false); err != nil { + if err = stateSync.RunUnwind(nil, batch); err != nil { return err } // Once we unwond we can start constructing the chain (assumption: len(headersChain) == len(bodiesChain)) @@ -282,17 +282,13 @@ func StateStep(ctx context.Context, batch kv.RwTx, stateSync *stagedsync.Sync, h } } // If we did not specify header or body we stop here - if header == nil || body == nil { + if header == nil { return nil } // Setup height := header.Number.Uint64() hash := header.Hash() // Prepare memory state for block execution - if err = rawdb.WriteRawBodyIfNotExists(batch, hash, height, body); err != nil { - return err - } - rawdb.WriteHeader(batch, header) if err = rawdb.WriteHeaderNumber(batch, hash, height); err != nil { return err @@ -309,11 +305,18 @@ func StateStep(ctx context.Context, batch kv.RwTx, stateSync *stagedsync.Sync, h if err = stages.SaveStageProgress(batch, stages.Headers, height); err != nil { return err } - - if err = stages.SaveStageProgress(batch, stages.Bodies, height); err != nil { - return err + if body != nil { + if err = stages.SaveStageProgress(batch, stages.Bodies, height); err != nil { + return err + } + if err = rawdb.WriteRawBodyIfNotExists(batch, hash, height, body); err != nil { + return err + } + } else { + if err = stages.SaveStageProgress(batch, stages.Bodies, height-1); err != nil { + return err + } } - // Run state sync if err = stateSync.Run(nil, batch, false); err != nil { return err From fb9f19334935fde694ed567c30af4baa2aa0786b Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Mon, 18 Jul 2022 03:04:02 +0200 Subject: [PATCH 093/152] fixed Two Block PoW Re-org to Higher-Height Chain (#4730) Co-authored-by: giuliorebuffo --- eth/stagedsync/stage_headers.go | 11 ++++++++++ turbo/engineapi/fork_validator.go | 7 +++++++ turbo/stages/stageloop.go | 35 ++++++++++++++++--------------- 3 files changed, 36 insertions(+), 17 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index bf6ad576a4c..3f2996bf937 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -667,6 +667,8 @@ func schedulePoSDownload( func verifyAndSaveDownloadedPoSHeaders(tx kv.RwTx, cfg HeadersCfg, headerInserter *headerdownload.HeaderInserter) { var lastValidHash common.Hash var badChainError error + var foundPow bool + headerLoadFunc := func(key, value []byte, _ etl.CurrentTableReader, _ etl.LoadNextFunc) error { var h types.Header if err := rlp.DecodeBytes(value, &h); err != nil { @@ -683,6 +685,15 @@ func verifyAndSaveDownloadedPoSHeaders(tx kv.RwTx, cfg HeadersCfg, headerInserte cfg.hd.ReportBadHeaderPoS(h.Hash(), lastValidHash) return nil } + // If we are in PoW range then block validation is not required anymore. + if foundPow { + return headerInserter.FeedHeaderPoS(tx, &h, h.Hash()) + } + + foundPow = h.Difficulty.Cmp(common.Big0) != 0 + if foundPow { + return headerInserter.FeedHeaderPoS(tx, &h, h.Hash()) + } // Validate state if possible (bodies will be retrieved through body download) _, _, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, &h, nil, false) if criticalError != nil { diff --git a/turbo/engineapi/fork_validator.go b/turbo/engineapi/fork_validator.go index 91b87b31087..ae83190cd4b 100644 --- a/turbo/engineapi/fork_validator.go +++ b/turbo/engineapi/fork_validator.go @@ -15,6 +15,7 @@ package engineapi import ( "bytes" + "fmt" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/kv" @@ -130,6 +131,7 @@ func (fv *ForkValidator) ValidatePayload(tx kv.RwTx, header *types.Header, body // if the block is not in range of maxForkDepth from head then we do not validate it. if abs64(int64(fv.currentHeight)-header.Number.Int64()) > maxForkDepth { status = remote.EngineStatus_ACCEPTED + fmt.Println("not in range") return } // Let's assemble the side fork backwards @@ -137,6 +139,7 @@ func (fv *ForkValidator) ValidatePayload(tx kv.RwTx, header *types.Header, body currentHash := header.ParentHash foundCanonical, criticalError = rawdb.IsCanonicalHash(tx, currentHash) if criticalError != nil { + fmt.Println("critical") return } @@ -160,6 +163,10 @@ func (fv *ForkValidator) ValidatePayload(tx kv.RwTx, header *types.Header, body } unwindPoint = sb.header.Number.Uint64() - 1 } + // Do not set an unwind point if we are already there. + if unwindPoint == fv.currentHeight { + unwindPoint = 0 + } // if it is not canonical we validate it in memory and discard it aferwards. batch := memdb.NewMemoryBatch(tx) defer batch.Close() diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 8a0caa5451f..5280dc8b7e3 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -262,25 +262,26 @@ func StateStep(ctx context.Context, batch kv.RwTx, stateSync *stagedsync.Sync, h if err = stateSync.RunUnwind(nil, batch); err != nil { return err } - // Once we unwond we can start constructing the chain (assumption: len(headersChain) == len(bodiesChain)) - for i := range headersChain { - currentHeader := headersChain[i] - currentBody := bodiesChain[i] - currentHeight := headersChain[i].Number.Uint64() - currentHash := headersChain[i].Hash() - // Prepare memory state for block execution - if err = rawdb.WriteRawBodyIfNotExists(batch, currentHash, currentHeight, currentBody); err != nil { - return err - } - rawdb.WriteHeader(batch, currentHeader) - if err = rawdb.WriteHeaderNumber(batch, currentHash, currentHeight); err != nil { - return err - } - if err = rawdb.WriteCanonicalHash(batch, currentHash, currentHeight); err != nil { - return err - } + } + // Once we unwond we can start constructing the chain (assumption: len(headersChain) == len(bodiesChain)) + for i := range headersChain { + currentHeader := headersChain[i] + currentBody := bodiesChain[i] + currentHeight := headersChain[i].Number.Uint64() + currentHash := headersChain[i].Hash() + // Prepare memory state for block execution + if err = rawdb.WriteRawBodyIfNotExists(batch, currentHash, currentHeight, currentBody); err != nil { + return err + } + rawdb.WriteHeader(batch, currentHeader) + if err = rawdb.WriteHeaderNumber(batch, currentHash, currentHeight); err != nil { + return err + } + if err = rawdb.WriteCanonicalHash(batch, currentHash, currentHeight); err != nil { + return err } } + // If we did not specify header or body we stop here if header == nil { return nil From ac9b7d8cc2dc75deb92de740dfce464552301c8e Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 18 Jul 2022 12:19:46 +0700 Subject: [PATCH 094/152] commitment: generic btree #4731 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index afa34d5579d..6f1ffe0d4d0 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220714160525-21aa65c1c383 + github.com/ledgerwatch/erigon-lib v0.0.0-20220718042200-78323471184c github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index d59d34e8091..55948da44b2 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220714160525-21aa65c1c383 h1:1EE1EIsDHok6NrzqQjGqkCj47APObiqFbgv+s7GJMrk= -github.com/ledgerwatch/erigon-lib v0.0.0-20220714160525-21aa65c1c383/go.mod h1:lrUxxrH85rkNMGFT7K4aloNMOf7jG+bVYAHhmyi7oaU= +github.com/ledgerwatch/erigon-lib v0.0.0-20220718042200-78323471184c h1:jkzM2nkZ+FNstxqaH8cq6PKskFSnilJ5QmRo49SI+o4= +github.com/ledgerwatch/erigon-lib v0.0.0-20220718042200-78323471184c/go.mod h1:lrUxxrH85rkNMGFT7K4aloNMOf7jG+bVYAHhmyi7oaU= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From 13bf5c30c11c54188a85c9b5915156852619eccf Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 18 Jul 2022 14:38:01 +0700 Subject: [PATCH 095/152] db migration fix: it was able run with delay #4732 --- migrations/reset_blocks.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/migrations/reset_blocks.go b/migrations/reset_blocks.go index 16256bf3267..30a187a1237 100644 --- a/migrations/reset_blocks.go +++ b/migrations/reset_blocks.go @@ -30,14 +30,14 @@ var resetBlocks = Migration{ if err := BeforeCommit(tx, nil, true); err != nil { return err } - return + return tx.Commit() } genesisBlock := rawdb.ReadHeaderByNumber(tx, 0) if genesisBlock == nil { if err := BeforeCommit(tx, nil, true); err != nil { return err } - return nil + return tx.Commit() } chainConfig, err := rawdb.ReadChainConfig(tx, genesisBlock.Hash()) if err != nil { From 15ca3d25c1a0d707d18c87bc5f55c9fddab375aa Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 18 Jul 2022 15:36:53 +0700 Subject: [PATCH 096/152] snapshots: mainnet to 15m #4733 --- turbo/snapshotsync/snapshothashes/erigon-snapshots | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/turbo/snapshotsync/snapshothashes/erigon-snapshots b/turbo/snapshotsync/snapshothashes/erigon-snapshots index 9cd91d0b377..d90ddcf7257 160000 --- a/turbo/snapshotsync/snapshothashes/erigon-snapshots +++ b/turbo/snapshotsync/snapshothashes/erigon-snapshots @@ -1 +1 @@ -Subproject commit 9cd91d0b377149102613f6bec46f28429aa3c761 +Subproject commit d90ddcf72579066b48d631fc5a84dcfbbf2bac49 From b7acf6c108d2a1205634e9b8c7c756d788d7e782 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 18 Jul 2022 17:12:13 +0700 Subject: [PATCH 097/152] compressor: generic sort (#4734) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 6f1ffe0d4d0..d6b6f4d3c58 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220718042200-78323471184c + github.com/ledgerwatch/erigon-lib v0.0.0-20220718054733-b645e40aa475 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 55948da44b2..19dabbc596a 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220718042200-78323471184c h1:jkzM2nkZ+FNstxqaH8cq6PKskFSnilJ5QmRo49SI+o4= -github.com/ledgerwatch/erigon-lib v0.0.0-20220718042200-78323471184c/go.mod h1:lrUxxrH85rkNMGFT7K4aloNMOf7jG+bVYAHhmyi7oaU= +github.com/ledgerwatch/erigon-lib v0.0.0-20220718054733-b645e40aa475 h1:WULehvYiLzt/pXBZBMEXNMC8w4S0PrgM0UC7r3J2Z1M= +github.com/ledgerwatch/erigon-lib v0.0.0-20220718054733-b645e40aa475/go.mod h1:lrUxxrH85rkNMGFT7K4aloNMOf7jG+bVYAHhmyi7oaU= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From 8dceb6fe8270bb0d47d4c4d3f5413f17e1255978 Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Mon, 18 Jul 2022 17:42:20 +0300 Subject: [PATCH 098/152] Auto download snapshots (#4729) * refactored request download * keeping track of missing snapshots * using slice mergeRange * request snapshots on reopen * passing arguments * passed in var * Revert "passed in var" This reverts commit 90478978dfa9f2a6dd5b1b051fc1d3f9e5f7a9c5. * Revert "passing arguments" This reverts commit 1e39c4152003796f6ff0bcfc188512d4a43bd18d. * Revert "request snapshots on reopen" This reverts commit d40212b973bc15db2b25cc1b0abb22051a3debb1. * added downloadRequest ; * downloading missing headers at start up * there shouldnt be an error anymore * not using nil; ; --- eth/stagedsync/stage_headers.go | 24 +++-- turbo/snapshotsync/block_snapshots.go | 109 +++++++++++++++------ turbo/snapshotsync/block_snapshots_test.go | 4 +- 3 files changed, 96 insertions(+), 41 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 3f2996bf937..688a94972cf 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -17,7 +17,6 @@ import ( proto_downloader "github.com/ledgerwatch/erigon-lib/gointerfaces/downloader" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/kv" - "github.com/ledgerwatch/erigon/cmd/downloader/downloadergrpc" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/rawdb" @@ -1333,11 +1332,20 @@ func WaitForDownloader(ctx context.Context, cfg HeadersCfg, tx kv.RwTx) error { return err } dbEmpty := len(snInDB) == 0 + var missingSnapshots []snapshotsync.MergeRange + if !dbEmpty { + _, missingSnapshots, err = snapshotsync.Segments(cfg.snapshots.Dir()) + if err != nil { + return err + } + } // send all hashes to the Downloader service preverified := snapshothashes.KnownConfig(cfg.chainConfig.ChainName).Preverified - req := &proto_downloader.DownloadRequest{Items: make([]*proto_downloader.DownloadItem, 0, len(preverified))} i := 0 + var downloadRequest []snapshotsync.DownloadRequest + // build all download requests + // builds preverified snapshots request for _, p := range preverified { _, has := snInDB[p.Name] if !dbEmpty && !has { @@ -1346,13 +1354,15 @@ func WaitForDownloader(ctx context.Context, cfg HeadersCfg, tx kv.RwTx) error { if dbEmpty { snInDB[p.Name] = p.Hash } - - req.Items = append(req.Items, &proto_downloader.DownloadItem{ - TorrentHash: downloadergrpc.String2Proto(p.Hash), - Path: p.Name, - }) + downloadRequest = append(downloadRequest, snapshotsync.NewDownloadRequest(nil, p.Name, p.Hash)) i++ } + // builds missing snapshots request + for _, r := range missingSnapshots { + downloadRequest = append(downloadRequest, snapshotsync.NewDownloadRequest(&r, "", "")) + } + req := snapshotsync.BuildProtoRequest(downloadRequest) + log.Info("[Snapshots] Fetching torrent files metadata") for { select { diff --git a/turbo/snapshotsync/block_snapshots.go b/turbo/snapshotsync/block_snapshots.go index 373079f3b6a..6f645bb7a21 100644 --- a/turbo/snapshotsync/block_snapshots.go +++ b/turbo/snapshotsync/block_snapshots.go @@ -24,6 +24,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/recsplit" types2 "github.com/ledgerwatch/erigon-lib/types" + "github.com/ledgerwatch/erigon/cmd/downloader/downloadergrpc" "github.com/ledgerwatch/erigon/cmd/hack/tool" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" @@ -40,6 +41,12 @@ import ( "golang.org/x/exp/slices" ) +type DownloadRequest struct { + ranges *MergeRange + path string + torrentHash string +} + type HeaderSegment struct { seg *compress.Decompressor // value: first_byte_of_header_hash + header_rlp idxHeaderHash *recsplit.Index // header_hash -> headers_segment_offset @@ -404,7 +411,7 @@ func (s *RoSnapshots) Reopen() error { s.Txs.lock.Lock() defer s.Txs.lock.Unlock() s.closeSegmentsLocked() - files, err := segments(s.dir) + files, _, err := Segments(s.dir) if err != nil { return err } @@ -499,7 +506,7 @@ func (s *RoSnapshots) ReopenSegments() error { s.Txs.lock.Lock() defer s.Txs.lock.Unlock() s.closeSegmentsLocked() - files, err := segments(s.dir) + files, _, err := Segments(s.dir) if err != nil { return err } @@ -786,19 +793,20 @@ func BuildIndices(ctx context.Context, s *RoSnapshots, chainID uint256.Int, tmpD return nil } -func noGaps(in []snap.FileInfo) (out []snap.FileInfo, err error) { +func noGaps(in []snap.FileInfo) (out []snap.FileInfo, missingSnapshots []MergeRange) { var prevTo uint64 for _, f := range in { if f.To <= prevTo { continue } if f.From != prevTo { // no gaps - return nil, fmt.Errorf("%w: from %d to %d", snap.ErrSnapshotMissed, prevTo, f.From) + missingSnapshots = append(missingSnapshots, MergeRange{prevTo, f.From}) + continue } prevTo = f.To out = append(out, f) } - return out, nil + return out, missingSnapshots } func allTypeOfSegmentsMustExist(dir string, in []snap.FileInfo) (res []snap.FileInfo) { @@ -846,10 +854,10 @@ func noOverlaps(in []snap.FileInfo) (res []snap.FileInfo) { return res } -func segments(dir string) (res []snap.FileInfo, err error) { +func Segments(dir string) (res []snap.FileInfo, missingSnapshots []MergeRange, err error) { list, err := snap.Segments(dir) if err != nil { - return nil, err + return nil, missingSnapshots, err } for _, f := range list { if f.T != snap.Headers { @@ -857,7 +865,8 @@ func segments(dir string) (res []snap.FileInfo, err error) { } res = append(res, f) } - return noGaps(noOverlaps(allTypeOfSegmentsMustExist(dir, res))) + res, missingSnapshots = noGaps(noOverlaps(allTypeOfSegmentsMustExist(dir, res))) + return res, missingSnapshots, nil } func chooseSegmentEnd(from, to, blocksPerFile uint64) uint64 { @@ -1003,24 +1012,8 @@ func retireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint25 if err := snapshots.Reopen(); err != nil { return fmt.Errorf("Reopen: %w", err) } - // start seed large .seg of large size - req := &proto_downloader.DownloadRequest{Items: make([]*proto_downloader.DownloadItem, 0, len(snap.AllSnapshotTypes))} - for _, r := range ranges { - if r.to-r.from != snap.DEFAULT_SEGMENT_SIZE { - continue - } - for _, t := range snap.AllSnapshotTypes { - req.Items = append(req.Items, &proto_downloader.DownloadItem{ - Path: snap.SegmentFileName(r.from, r.to, t), - }) - } - } - if len(req.Items) > 0 && downloader != nil { - if _, err := downloader.Download(ctx, req); err != nil { - return err - } - } - return nil + + return RequestSnapshotDownload(ctx, ranges, downloader) } func DumpBlocks(ctx context.Context, blockFrom, blockTo, blocksPerFile uint64, tmpDir, snapDir string, chainDB kv.RoDB, workers int, lvl log.Lvl) error { @@ -1725,13 +1718,13 @@ func NewMerger(tmpDir string, workers int, lvl log.Lvl, chainID uint256.Int, not return &Merger{tmpDir: tmpDir, workers: workers, lvl: lvl, chainID: chainID, notifier: notifier} } -type mergeRange struct { +type MergeRange struct { from, to uint64 } -func (r mergeRange) String() string { return fmt.Sprintf("%dk-%dk", r.from/1000, r.to/1000) } +func (r MergeRange) String() string { return fmt.Sprintf("%dk-%dk", r.from/1000, r.to/1000) } -func (*Merger) FindMergeRanges(snapshots *RoSnapshots) (res []mergeRange) { +func (*Merger) FindMergeRanges(snapshots *RoSnapshots) (res []MergeRange) { for i := len(snapshots.Headers.segments) - 1; i > 0; i-- { sn := snapshots.Headers.segments[i] if sn.To-sn.From >= snap.DEFAULT_SEGMENT_SIZE { // is complete .seg @@ -1746,14 +1739,14 @@ func (*Merger) FindMergeRanges(snapshots *RoSnapshots) (res []mergeRange) { break } aggFrom := sn.To - span - res = append(res, mergeRange{from: aggFrom, to: sn.To}) + res = append(res, MergeRange{from: aggFrom, to: sn.To}) for snapshots.Headers.segments[i].From > aggFrom { i-- } break } } - slices.SortFunc(res, func(i, j mergeRange) bool { return i.from < j.from }) + slices.SortFunc(res, func(i, j MergeRange) bool { return i.from < j.from }) return res } func (m *Merger) filesByRange(snapshots *RoSnapshots, from, to uint64) (toMergeHeaders, toMergeBodies, toMergeTxs []string, err error) { @@ -1781,7 +1774,7 @@ func (m *Merger) filesByRange(snapshots *RoSnapshots, from, to uint64) (toMergeH } // Merge does merge segments in given ranges -func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges []mergeRange, snapDir string, doIndex bool) error { +func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges []MergeRange, snapDir string, doIndex bool) error { if len(mergeRanges) == 0 { return nil } @@ -1944,3 +1937,55 @@ func assertSegment(segmentFile string) { panic(err) } } + +func NewDownloadRequest(ranges *MergeRange, path string, torrentHash string) DownloadRequest { + return DownloadRequest{ + ranges: ranges, + path: path, + torrentHash: torrentHash, + } +} + +// builds the snapshots download request and downloads them +func RequestSnapshotDownload(ctx context.Context, ranges []MergeRange, downloader proto_downloader.DownloaderClient) error { + // start seed large .seg of large size + var downloadRequest []DownloadRequest + for _, r := range ranges { + downloadRequest = append(downloadRequest, NewDownloadRequest(&r, "", "")) + } + req := BuildProtoRequest(downloadRequest) + if len(req.Items) > 0 && downloader != nil { + if _, err := downloader.Download(ctx, req); err != nil { + return err + } + } + return nil +} + +func BuildProtoRequest(downloadRequest []DownloadRequest) *proto_downloader.DownloadRequest { + req := &proto_downloader.DownloadRequest{Items: make([]*proto_downloader.DownloadItem, 0, len(snap.AllSnapshotTypes))} + for _, r := range downloadRequest { + if r.path != "" { + if r.torrentHash != "" { + req.Items = append(req.Items, &proto_downloader.DownloadItem{ + TorrentHash: downloadergrpc.String2Proto(r.torrentHash), + Path: r.path, + }) + } else { + req.Items = append(req.Items, &proto_downloader.DownloadItem{ + Path: r.path, + }) + } + } else { + if r.ranges.to-r.ranges.from != snap.DEFAULT_SEGMENT_SIZE { + continue + } + for _, t := range snap.AllSnapshotTypes { + req.Items = append(req.Items, &proto_downloader.DownloadItem{ + Path: snap.SegmentFileName(r.ranges.from, r.ranges.to, t), + }) + } + } + } + return req +} diff --git a/turbo/snapshotsync/block_snapshots_test.go b/turbo/snapshotsync/block_snapshots_test.go index f3c82864fd8..a41757048a3 100644 --- a/turbo/snapshotsync/block_snapshots_test.go +++ b/turbo/snapshotsync/block_snapshots_test.go @@ -148,8 +148,8 @@ func TestOpenAllSnapshot(t *testing.T) { createFile(500_000, 1_000_000, snap.Transactions) s = NewRoSnapshots(cfg, dir) err = s.Reopen() - require.Error(err) - require.Equal(0, len(s.Headers.segments)) //because, no gaps are allowed (expect snapshots from block 0) + require.NoError(err) + require.Equal(0, len(s.Headers.segments)) s.Close() createFile(0, 500_000, snap.Bodies) From 58628999793ed22891f30bfe15c09a7b96b7b271 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Mon, 18 Jul 2022 19:03:38 +0200 Subject: [PATCH 099/152] Fix index out of range in (*Accumulator) ChangeStorage (#4738) --- turbo/shards/state_change_accumulator.go | 1 + 1 file changed, 1 insertion(+) diff --git a/turbo/shards/state_change_accumulator.go b/turbo/shards/state_change_accumulator.go index dc7372c9537..ad5a5d89b39 100644 --- a/turbo/shards/state_change_accumulator.go +++ b/turbo/shards/state_change_accumulator.go @@ -105,6 +105,7 @@ func (a *Accumulator) DeleteAccount(address common.Address) { accountChange.Code = nil accountChange.StorageChanges = nil accountChange.Action = remote.Action_REMOVE + delete(a.storageChangeIndex, address) } // ChangeCode adds code to the latest change From 59dda485673c509ae3996ec82feaed072480a217 Mon Sep 17 00:00:00 2001 From: nanevardanyan Date: Tue, 19 Jul 2022 05:11:37 +0400 Subject: [PATCH 100/152] eth: replace maps with etl.Collectors (#4707) * WIP: eth: replace maps with etl.Collectors * WIP: eth: replace maps with etl.Collectors in pruneOldLogChunks * WIP: eth: use appendBuffer to avoid duplicates * WIP: eth: replace with oldestEntrySortableBuffer --- eth/stagedsync/stage_log_index.go | 50 ++++++++++++++++--------------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/eth/stagedsync/stage_log_index.go b/eth/stagedsync/stage_log_index.go index 3febf9d97a4..5b8a897471b 100644 --- a/eth/stagedsync/stage_log_index.go +++ b/eth/stagedsync/stage_log_index.go @@ -332,40 +332,35 @@ func truncateBitmaps(tx kv.RwTx, bucket string, inMem map[string]struct{}, to ui return nil } -func pruneOldLogChunks(tx kv.RwTx, bucket string, inMem map[string]struct{}, pruneTo uint64, logPrefix string, ctx context.Context) error { +func pruneOldLogChunks(tx kv.RwTx, bucket string, inMem *etl.Collector, pruneTo uint64, ctx context.Context) error { logEvery := time.NewTicker(logInterval) defer logEvery.Stop() - keys := make([]string, 0, len(inMem)) - for k := range inMem { - keys = append(keys, k) - } - slices.Sort(keys) + c, err := tx.RwCursor(bucket) if err != nil { return err } defer c.Close() - for _, kS := range keys { - seek := []byte(kS) - for k, _, err := c.Seek(seek); k != nil; k, _, err = c.Next() { + + if err := inMem.Load(tx, bucket, func(key, v []byte, table etl.CurrentTableReader, next etl.LoadNextFunc) error { + for k, _, err := c.Seek(key); k != nil; k, _, err = c.Next() { if err != nil { return err } - blockNum := uint64(binary.BigEndian.Uint32(k[len(seek):])) - if !bytes.HasPrefix(k, seek) || blockNum >= pruneTo { + blockNum := uint64(binary.BigEndian.Uint32(k[len(key):])) + if !bytes.HasPrefix(k, key) || blockNum >= pruneTo { break } - select { - case <-logEvery.C: - log.Info(fmt.Sprintf("[%s]", logPrefix), "table", kv.AccountsHistory, "block", blockNum) - case <-ctx.Done(): - return libcommon.ErrStopped - default: - } + if err = c.DeleteCurrent(); err != nil { return fmt.Errorf("failed delete, block=%d: %w", blockNum, err) } } + return nil + }, etl.TransformArgs{ + Quit: ctx.Done(), + }); err != nil { + return err } return nil } @@ -405,8 +400,11 @@ func pruneLogIndex(logPrefix string, tx kv.RwTx, tmpDir string, pruneTo uint64, logEvery := time.NewTicker(logInterval) defer logEvery.Stop() - topics := map[string]struct{}{} - addrs := map[string]struct{}{} + bufferSize := etl.BufferOptimalSize + topics := etl.NewCollector(logPrefix, tmpDir, etl.NewOldestEntryBuffer(bufferSize)) + defer topics.Close() + addrs := etl.NewCollector(logPrefix, tmpDir, etl.NewOldestEntryBuffer(bufferSize)) + defer addrs.Close() reader := bytes.NewReader(nil) { @@ -440,17 +438,21 @@ func pruneLogIndex(logPrefix string, tx kv.RwTx, tmpDir string, pruneTo uint64, for _, l := range logs { for _, topic := range l.Topics { - topics[string(topic.Bytes())] = struct{}{} + if err := topics.Collect(topic.Bytes(), nil); err != nil { + return err + } + } + if err := addrs.Collect(l.Address.Bytes(), nil); err != nil { + return err } - addrs[string(l.Address.Bytes())] = struct{}{} } } } - if err := pruneOldLogChunks(tx, kv.LogTopicIndex, topics, pruneTo, logPrefix, ctx); err != nil { + if err := pruneOldLogChunks(tx, kv.LogTopicIndex, topics, pruneTo, ctx); err != nil { return err } - if err := pruneOldLogChunks(tx, kv.LogAddressIndex, addrs, pruneTo, logPrefix, ctx); err != nil { + if err := pruneOldLogChunks(tx, kv.LogAddressIndex, addrs, pruneTo, ctx); err != nil { return err } return nil From 5d68f610bc3ab1b711479d11743c92c23d919083 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 19 Jul 2022 10:40:02 +0700 Subject: [PATCH 101/152] mdbx: use OS pagesize by default (but > 4Kb, and < 64Kb) #4743 --- cmd/utils/flags.go | 5 +++-- go.mod | 2 +- go.sum | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 0e8e5a941a4..d2bcb6eea9c 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -31,6 +31,7 @@ import ( "text/template" "github.com/c2h5oh/datasize" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon-lib/txpool" "github.com/ledgerwatch/log/v3" @@ -700,8 +701,8 @@ var ( } DbPageSizeFlag = cli.StringFlag{ Name: "db.pagesize", - Usage: "set mdbx pagesize on db creation: must be power of 2 and '256b <= pagesize <= 64kb' ", - Value: "4kb", + Usage: "set mdbx pagesize on db creation: must be power of 2 and '256b <= pagesize <= 64kb'. default: equal to OperationSystem's pageSize", + Value: datasize.ByteSize(kv.DefaultPageSize()).String(), } HealthCheckFlag = cli.BoolFlag{ diff --git a/go.mod b/go.mod index d6b6f4d3c58..08d8d2ea972 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220718054733-b645e40aa475 + github.com/ledgerwatch/erigon-lib v0.0.0-20220719032653-b4e402231b6e github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 19dabbc596a..5feb0e6cca0 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220718054733-b645e40aa475 h1:WULehvYiLzt/pXBZBMEXNMC8w4S0PrgM0UC7r3J2Z1M= -github.com/ledgerwatch/erigon-lib v0.0.0-20220718054733-b645e40aa475/go.mod h1:lrUxxrH85rkNMGFT7K4aloNMOf7jG+bVYAHhmyi7oaU= +github.com/ledgerwatch/erigon-lib v0.0.0-20220719032653-b4e402231b6e h1:5UltJUvO6qSku8+OLxnC9ynCHNrZ7JfVOoLdpM5aq3k= +github.com/ledgerwatch/erigon-lib v0.0.0-20220719032653-b4e402231b6e/go.mod h1:lrUxxrH85rkNMGFT7K4aloNMOf7jG+bVYAHhmyi7oaU= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From c7da7a6d90d6981c03f81632a8ebda9a638b909a Mon Sep 17 00:00:00 2001 From: michaelscheung Date: Mon, 18 Jul 2022 20:48:22 -0700 Subject: [PATCH 102/152] Support block parameter for integration stage_log_index (#4740) * Support block parameter for integration stage_log_index * Add logPrefix * Skip stage_log_index if endBlock < startBlock Co-authored-by: michaelscheung --- cmd/integration/commands/stages.go | 2 +- eth/stagedsync/default_stages.go | 2 +- eth/stagedsync/stage_log_index.go | 28 +++++++++++++++++++++----- eth/stagedsync/stage_log_index_test.go | 6 +++--- 4 files changed, 28 insertions(+), 10 deletions(-) diff --git a/cmd/integration/commands/stages.go b/cmd/integration/commands/stages.go index 45a51f3344f..d3cb77845d3 100644 --- a/cmd/integration/commands/stages.go +++ b/cmd/integration/commands/stages.go @@ -850,7 +850,7 @@ func stageLogIndex(db kv.RwDB, ctx context.Context) error { return err } } else { - if err := stagedsync.SpawnLogIndex(s, tx, cfg, ctx); err != nil { + if err := stagedsync.SpawnLogIndex(s, tx, cfg, ctx, block); err != nil { return err } } diff --git a/eth/stagedsync/default_stages.go b/eth/stagedsync/default_stages.go index 869fc7331fe..df37c6210e8 100644 --- a/eth/stagedsync/default_stages.go +++ b/eth/stagedsync/default_stages.go @@ -177,7 +177,7 @@ func DefaultStages(ctx context.Context, sm prune.Mode, headers HeadersCfg, cumul ID: stages.LogIndex, Description: "Generate receipt logs index", Forward: func(firstCycle bool, badBlockUnwind bool, s *StageState, u Unwinder, tx kv.RwTx) error { - return SpawnLogIndex(s, tx, logIndex, ctx) + return SpawnLogIndex(s, tx, logIndex, ctx, 0) }, Unwind: func(firstCycle bool, u *UnwindState, s *StageState, tx kv.RwTx) error { return UnwindLogIndex(u, s, tx, logIndex, ctx) diff --git a/eth/stagedsync/stage_log_index.go b/eth/stagedsync/stage_log_index.go index 5b8a897471b..43c78a1fae5 100644 --- a/eth/stagedsync/stage_log_index.go +++ b/eth/stagedsync/stage_log_index.go @@ -45,7 +45,7 @@ func StageLogIndexCfg(db kv.RwDB, prune prune.Mode, tmpDir string) LogIndexCfg { } } -func SpawnLogIndex(s *StageState, tx kv.RwTx, cfg LogIndexCfg, ctx context.Context) error { +func SpawnLogIndex(s *StageState, tx kv.RwTx, cfg LogIndexCfg, ctx context.Context, prematureEndBlock uint64) error { useExternalTx := tx != nil if !useExternalTx { var err error @@ -61,7 +61,15 @@ func SpawnLogIndex(s *StageState, tx kv.RwTx, cfg LogIndexCfg, ctx context.Conte if err != nil { return fmt.Errorf("getting last executed block: %w", err) } - if endBlock == s.BlockNumber { + // if prematureEndBlock is nonzero and less than the latest executed block, + // then we only run the log index stage until prematureEndBlock + if prematureEndBlock != 0 && prematureEndBlock < endBlock { + endBlock = prematureEndBlock + } + // It is possible that prematureEndBlock < s.BlockNumber, + // in which case it is important that we skip this stage, + // or else we could overwrite stage_at with prematureEndBlock + if endBlock <= s.BlockNumber { return nil } @@ -73,8 +81,7 @@ func SpawnLogIndex(s *StageState, tx kv.RwTx, cfg LogIndexCfg, ctx context.Conte if startBlock > 0 { startBlock++ } - - if err = promoteLogIndex(logPrefix, tx, startBlock, cfg, ctx); err != nil { + if err = promoteLogIndex(logPrefix, tx, startBlock, endBlock, cfg, ctx); err != nil { return err } if err = s.Update(tx, endBlock); err != nil { @@ -90,7 +97,7 @@ func SpawnLogIndex(s *StageState, tx kv.RwTx, cfg LogIndexCfg, ctx context.Conte return nil } -func promoteLogIndex(logPrefix string, tx kv.RwTx, start uint64, cfg LogIndexCfg, ctx context.Context) error { +func promoteLogIndex(logPrefix string, tx kv.RwTx, start uint64, endBlock uint64, cfg LogIndexCfg, ctx context.Context) error { quit := ctx.Done() logEvery := time.NewTicker(30 * time.Second) defer logEvery.Stop() @@ -112,6 +119,10 @@ func promoteLogIndex(logPrefix string, tx kv.RwTx, start uint64, cfg LogIndexCfg reader := bytes.NewReader(nil) + if endBlock != 0 { + log.Info(fmt.Sprintf("[%s] Running from blocks %d to %d", logPrefix, start, endBlock), "endBlock", endBlock) + } + for k, v, err := logs.Seek(dbutils.LogKey(start, 0)); k != nil; k, v, err = logs.Next() { if err != nil { return err @@ -122,6 +133,13 @@ func promoteLogIndex(logPrefix string, tx kv.RwTx, start uint64, cfg LogIndexCfg } blockNum := binary.BigEndian.Uint64(k[:8]) + // if endBlock is positive, we only run the stage up until endBlock + // if endBlock is zero, we run the stage for all available blocks + if endBlock != 0 && blockNum > endBlock { + log.Info(fmt.Sprintf("[%s] Reached user-specified end block", logPrefix), "endBlock", endBlock) + break + } + select { default: case <-logEvery.C: diff --git a/eth/stagedsync/stage_log_index_test.go b/eth/stagedsync/stage_log_index_test.go index 611c8a999c4..5f09221812a 100644 --- a/eth/stagedsync/stage_log_index_test.go +++ b/eth/stagedsync/stage_log_index_test.go @@ -101,7 +101,7 @@ func TestPromoteLogIndex(t *testing.T) { cfgCopy.bufLimit = 10 cfgCopy.flushEvery = time.Nanosecond - err := promoteLogIndex("logPrefix", tx, 0, cfgCopy, ctx) + err := promoteLogIndex("logPrefix", tx, 0, 0, cfgCopy, ctx) require.NoError(err) // Check indices GetCardinality (in how many blocks they meet) @@ -127,7 +127,7 @@ func TestPruneLogIndex(t *testing.T) { cfgCopy := cfg cfgCopy.bufLimit = 10 cfgCopy.flushEvery = time.Nanosecond - err := promoteLogIndex("logPrefix", tx, 0, cfgCopy, ctx) + err := promoteLogIndex("logPrefix", tx, 0, 0, cfgCopy, ctx) require.NoError(err) // Mode test @@ -166,7 +166,7 @@ func TestUnwindLogIndex(t *testing.T) { cfgCopy := cfg cfgCopy.bufLimit = 10 cfgCopy.flushEvery = time.Nanosecond - err := promoteLogIndex("logPrefix", tx, 0, cfgCopy, ctx) + err := promoteLogIndex("logPrefix", tx, 0, 0, cfgCopy, ctx) require.NoError(err) // Mode test From c9306ab8d0a4076b7f91b30df652a49adf7ef48b Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 19 Jul 2022 10:54:44 +0700 Subject: [PATCH 103/152] disable asserts in devel (#4746) * save * save --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3546d3e065e..492d6191edb 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,7 @@ DOCKER_TAG ?= thorax/erigon:latest # Pipe error below to /dev/null since Makefile structure kind of expects # Go to be available, but with docker it's not strictly necessary CGO_CFLAGS := $(shell $(GO) env CGO_CFLAGS 2>/dev/null) # don't lose default -CGO_CFLAGS += -DMDBX_FORCE_ASSERTIONS=1 # Enable MDBX's asserts by default in 'devel' branch and disable in 'stable' +CGO_CFLAGS += -DMDBX_FORCE_ASSERTIONS=0 # Enable MDBX's asserts by default in 'devel' branch and disable in releases CGO_CFLAGS := CGO_CFLAGS="$(CGO_CFLAGS)" DBG_CGO_CFLAGS += -DMDBX_DEBUG=1 From bda2697bcca0692a63f0d7147ead3c43da21f391 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 19 Jul 2022 10:56:14 +0700 Subject: [PATCH 104/152] linter version up #4745 --- .github/workflows/ci.yml | 4 +--- Makefile | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e5512bcee86..402b8c7cfde 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -53,9 +53,7 @@ jobs: if: runner.os == 'Linux' uses: golangci/golangci-lint-action@v3 with: - version: v1.46 - skip-pkg-cache: true - skip-build-cache: true + version: v1.47 - name: Test run: make test diff --git a/Makefile b/Makefile index 492d6191edb..1bcdbe10a37 100644 --- a/Makefile +++ b/Makefile @@ -141,7 +141,7 @@ lintci: lintci-deps: rm -f ./build/bin/golangci-lint - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./build/bin v1.46.2 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./build/bin v1.47.0 clean: go clean -cache From b70abd7aafde5fd983887a3f5911b7f666b1ddc2 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 19 Jul 2022 13:45:53 +0700 Subject: [PATCH 105/152] Grpc up v48 #532 * save * save --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 08d8d2ea972..ca35ad108d6 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220719032653-b4e402231b6e + github.com/ledgerwatch/erigon-lib v0.0.0-20220719040828-9ceeeac385ad github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 @@ -58,11 +58,11 @@ require ( github.com/xsleonard/go-merkle v1.1.0 go.uber.org/atomic v1.9.0 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d - golang.org/x/exp v0.0.0-20220706164943-b4a6d9510983 + golang.org/x/exp v0.0.0-20220713135740-79cabaa25d75 golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f - golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e + golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 golang.org/x/time v0.0.0-20220609170525-579cf78fd858 - google.golang.org/grpc v1.46.2 + google.golang.org/grpc v1.48.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 google.golang.org/protobuf v1.28.0 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c diff --git a/go.sum b/go.sum index 5feb0e6cca0..9e844b35822 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220719032653-b4e402231b6e h1:5UltJUvO6qSku8+OLxnC9ynCHNrZ7JfVOoLdpM5aq3k= -github.com/ledgerwatch/erigon-lib v0.0.0-20220719032653-b4e402231b6e/go.mod h1:lrUxxrH85rkNMGFT7K4aloNMOf7jG+bVYAHhmyi7oaU= +github.com/ledgerwatch/erigon-lib v0.0.0-20220719040828-9ceeeac385ad h1:fdAdq41F6zH39l6FgsfezXZElEFzl80fXqnB7gKWCTE= +github.com/ledgerwatch/erigon-lib v0.0.0-20220719040828-9ceeeac385ad/go.mod h1:KXCwHR5gW/dv9naTlrx4Du8Wzj6H3ndTBC+vw3hnyWU= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= @@ -679,8 +679,8 @@ golang.org/x/crypto v0.0.0-20220516162934-403b01795ae8/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20220706164943-b4a6d9510983 h1:sUweFwmLOje8KNfXAVqGGAsmgJ/F8jJ6wBLJDt4BTKY= -golang.org/x/exp v0.0.0-20220706164943-b4a6d9510983/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20220713135740-79cabaa25d75 h1:x03zeu7B2B11ySp+daztnwM5oBJ/8wGUSqrwcw9L0RA= +golang.org/x/exp v0.0.0-20220713135740-79cabaa25d75/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -779,8 +779,8 @@ golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220608164250-635b8c9b7f68/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e h1:CsOuNlbOuf0mzxJIefr6Q4uAUetRUwZE4qt7VfzP+xo= -golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -848,8 +848,8 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0 h1:rQOsyJ/8+ufEDJd/Gdsz7HG220Mh9HAhFHRGnIjda0w= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= From ab28089583570217cea4e6cc389b429d69f1d558 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Tue, 19 Jul 2022 11:11:08 +0200 Subject: [PATCH 106/152] Still fixing index out of range in (*Accumulator) ChangeStorage (#4751) --- turbo/shards/state_change_accumulator.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/turbo/shards/state_change_accumulator.go b/turbo/shards/state_change_accumulator.go index ad5a5d89b39..f2743020658 100644 --- a/turbo/shards/state_change_accumulator.go +++ b/turbo/shards/state_change_accumulator.go @@ -74,6 +74,7 @@ func (a *Accumulator) ChangeAccount(address common.Address, incarnation uint64, i = len(a.latestChange.Changes) a.latestChange.Changes = append(a.latestChange.Changes, &remote.AccountChange{Address: gointerfaces.ConvertAddressToH160(address)}) a.accountChangeIndex[address] = i + delete(a.storageChangeIndex, address) } accountChange := a.latestChange.Changes[i] switch accountChange.Action { @@ -116,6 +117,7 @@ func (a *Accumulator) ChangeCode(address common.Address, incarnation uint64, cod i = len(a.latestChange.Changes) a.latestChange.Changes = append(a.latestChange.Changes, &remote.AccountChange{Address: gointerfaces.ConvertAddressToH160(address), Action: remote.Action_CODE}) a.accountChangeIndex[address] = i + delete(a.storageChangeIndex, address) } accountChange := a.latestChange.Changes[i] switch accountChange.Action { @@ -137,6 +139,7 @@ func (a *Accumulator) ChangeStorage(address common.Address, incarnation uint64, i = len(a.latestChange.Changes) a.latestChange.Changes = append(a.latestChange.Changes, &remote.AccountChange{Address: gointerfaces.ConvertAddressToH160(address), Action: remote.Action_STORAGE}) a.accountChangeIndex[address] = i + delete(a.storageChangeIndex, address) } accountChange := a.latestChange.Changes[i] if accountChange.Action == remote.Action_REMOVE { From d3b424c9f6e935f1e9a9c0580b9f18aca24e6f66 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Tue, 19 Jul 2022 16:53:18 +0700 Subject: [PATCH 107/152] Mdbx: GC BigFoot (#4750) --- cmd/downloader/downloader/downloadercfg/logger.go | 3 +++ go.mod | 4 ++-- go.sum | 8 ++++---- libmdbx | 2 +- turbo/snapshotsync/snapshothashes/erigon-snapshots | 2 +- 5 files changed, 11 insertions(+), 8 deletions(-) diff --git a/cmd/downloader/downloader/downloadercfg/logger.go b/cmd/downloader/downloader/downloadercfg/logger.go index 7c71fa81e45..27989595f8e 100644 --- a/cmd/downloader/downloader/downloadercfg/logger.go +++ b/cmd/downloader/downloader/downloadercfg/logger.go @@ -74,6 +74,9 @@ func (b adapterHandler) Handle(r lg.Record) { if strings.Contains(str, "being sole dirtier of piece") { // suppress useless errors break } + if strings.Contains(str, "requested chunk too long") { // suppress useless errors + break + } log.Warn(str) case lg.Error: diff --git a/go.mod b/go.mod index ca35ad108d6..efcd1b91f37 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220719040828-9ceeeac385ad + github.com/ledgerwatch/erigon-lib v0.0.0-20220719082624-745b9b6b98dc github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 @@ -50,7 +50,7 @@ require ( github.com/stretchr/testify v1.8.0 github.com/tendermint/go-amino v0.14.1 github.com/tendermint/tendermint v0.31.11 - github.com/torquem-ch/mdbx-go v0.24.3-0.20220614090901-342411560dde + github.com/torquem-ch/mdbx-go v0.25.0 github.com/ugorji/go/codec v1.1.13 github.com/ugorji/go/codec/codecgen v1.1.13 github.com/urfave/cli v1.22.9 diff --git a/go.sum b/go.sum index 9e844b35822..985b44713ef 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220719040828-9ceeeac385ad h1:fdAdq41F6zH39l6FgsfezXZElEFzl80fXqnB7gKWCTE= -github.com/ledgerwatch/erigon-lib v0.0.0-20220719040828-9ceeeac385ad/go.mod h1:KXCwHR5gW/dv9naTlrx4Du8Wzj6H3ndTBC+vw3hnyWU= +github.com/ledgerwatch/erigon-lib v0.0.0-20220719082624-745b9b6b98dc h1:5opLy9YqL26YvSNGKxHcJO4X/R7Q3FU4ajp7jhbZPBE= +github.com/ledgerwatch/erigon-lib v0.0.0-20220719082624-745b9b6b98dc/go.mod h1:8wlgUF6YVdB3fjGg9VbQshirfJvi1h+qoHDYrPqAHoE= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= @@ -619,8 +619,8 @@ github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDW github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/torquem-ch/mdbx-go v0.24.3-0.20220614090901-342411560dde h1:1nzKGldWC9T0ApRfV0jzH28DaBy1Yg5+rmjSiJ/G0dI= -github.com/torquem-ch/mdbx-go v0.24.3-0.20220614090901-342411560dde/go.mod h1:T2fsoJDVppxfAPTLd1svUgH1kpPmeXdPESmroSHcL1E= +github.com/torquem-ch/mdbx-go v0.25.0 h1:k66O6GrqyAsXNn4tF87Q+ba4840aplv6O8Ph0FR1PCY= +github.com/torquem-ch/mdbx-go v0.25.0/go.mod h1:T2fsoJDVppxfAPTLd1svUgH1kpPmeXdPESmroSHcL1E= github.com/ugorji/go v1.1.13/go.mod h1:jxau1n+/wyTGLQoCkjok9r5zFa/FxT6eI5HiHKQszjc= github.com/ugorji/go/codec v1.1.13 h1:013LbFhocBoIqgHeIHKlV4JWYhqogATYWZhIcH0WHn4= github.com/ugorji/go/codec v1.1.13/go.mod h1:oNVt3Dq+FO91WNQ/9JnHKQP2QJxTzoN7wCBFCq1OeuU= diff --git a/libmdbx b/libmdbx index 5d2eb580fdd..0018164fef0 160000 --- a/libmdbx +++ b/libmdbx @@ -1 +1 @@ -Subproject commit 5d2eb580fdd61ccacf00aa93d7ee42e8e53afc8e +Subproject commit 0018164fef048b68dd84d503fde95dab5fdea94b diff --git a/turbo/snapshotsync/snapshothashes/erigon-snapshots b/turbo/snapshotsync/snapshothashes/erigon-snapshots index d90ddcf7257..7e85e4d0028 160000 --- a/turbo/snapshotsync/snapshothashes/erigon-snapshots +++ b/turbo/snapshotsync/snapshothashes/erigon-snapshots @@ -1 +1 @@ -Subproject commit d90ddcf72579066b48d631fc5a84dcfbbf2bac49 +Subproject commit 7e85e4d0028c27f747d97f65ac0b8c252a050b39 From e768227d38a317b5d41692567929d8228787994b Mon Sep 17 00:00:00 2001 From: Enrique Jose Avila Asapche Date: Tue, 19 Jul 2022 15:27:54 +0300 Subject: [PATCH 108/152] Merge range (#4749) * added merge range into segments * got rid of missing snapshot errors * reusing RequestSnapshotDownload * sleep out of download * ops * warning if we are missing snapshots --- eth/stagedsync/stage_headers.go | 7 +- turbo/snapshotsync/block_reader.go | 6 +- turbo/snapshotsync/block_snapshots.go | 107 ++++++++++----------- turbo/snapshotsync/block_snapshots_test.go | 4 +- turbo/snapshotsync/snap/files.go | 2 - 5 files changed, 63 insertions(+), 63 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 688a94972cf..30a38f385d8 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -1340,6 +1340,10 @@ func WaitForDownloader(ctx context.Context, cfg HeadersCfg, tx kv.RwTx) error { } } + if len(missingSnapshots) > 0 { + log.Warn("[Snapshots] downloading missing snapshots") + } + // send all hashes to the Downloader service preverified := snapshothashes.KnownConfig(cfg.chainConfig.ChainName).Preverified i := 0 @@ -1361,7 +1365,6 @@ func WaitForDownloader(ctx context.Context, cfg HeadersCfg, tx kv.RwTx) error { for _, r := range missingSnapshots { downloadRequest = append(downloadRequest, snapshotsync.NewDownloadRequest(&r, "", "")) } - req := snapshotsync.BuildProtoRequest(downloadRequest) log.Info("[Snapshots] Fetching torrent files metadata") for { @@ -1370,7 +1373,7 @@ func WaitForDownloader(ctx context.Context, cfg HeadersCfg, tx kv.RwTx) error { return ctx.Err() default: } - if _, err := cfg.snapshotDownloader.Download(ctx, req); err != nil { + if err := snapshotsync.RequestSnapshotDownload(ctx, downloadRequest, cfg.snapshotDownloader); err != nil { log.Error("[Snapshots] call downloader", "err", err) time.Sleep(10 * time.Second) continue diff --git a/turbo/snapshotsync/block_reader.go b/turbo/snapshotsync/block_reader.go index 4bde09ed7dc..babb6a0037e 100644 --- a/turbo/snapshotsync/block_reader.go +++ b/turbo/snapshotsync/block_reader.go @@ -516,7 +516,7 @@ func (back *BlockReaderWithSnapshots) headerFromSnapshot(blockHeight uint64, sn func (back *BlockReaderWithSnapshots) headerFromSnapshotByHash(hash common.Hash, sn *HeaderSegment, buf []byte) (*types.Header, error) { defer func() { if rec := recover(); rec != nil { - panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, sn.From, sn.To, dbg.Stack())) + panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, sn.ranges.from, sn.ranges.to, dbg.Stack())) } }() // avoid crash because Erigon's core does many things @@ -564,7 +564,7 @@ func (back *BlockReaderWithSnapshots) bodyFromSnapshot(blockHeight uint64, sn *B func (back *BlockReaderWithSnapshots) bodyForStorageFromSnapshot(blockHeight uint64, sn *BodySegment, buf []byte) (*types.BodyForStorage, []byte, error) { defer func() { if rec := recover(); rec != nil { - panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, sn.From, sn.To, dbg.Stack())) + panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, sn.ranges.from, sn.ranges.to, dbg.Stack())) } }() // avoid crash because Erigon's core does many things @@ -597,7 +597,7 @@ func (back *BlockReaderWithSnapshots) bodyForStorageFromSnapshot(blockHeight uin func (back *BlockReaderWithSnapshots) txsFromSnapshot(baseTxnID uint64, txsAmount uint32, txsSeg *TxnSegment, buf []byte) (txs []types.Transaction, senders []common.Address, err error) { defer func() { if rec := recover(); rec != nil { - panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, txsSeg.From, txsSeg.To, dbg.Stack())) + panic(fmt.Errorf("%+v, snapshot: %d-%d, trace: %s", rec, txsSeg.ranges.from, txsSeg.ranges.to, dbg.Stack())) } }() // avoid crash because Erigon's core does many things diff --git a/turbo/snapshotsync/block_snapshots.go b/turbo/snapshotsync/block_snapshots.go index 6f645bb7a21..8f7ea07da1f 100644 --- a/turbo/snapshotsync/block_snapshots.go +++ b/turbo/snapshotsync/block_snapshots.go @@ -50,20 +50,20 @@ type DownloadRequest struct { type HeaderSegment struct { seg *compress.Decompressor // value: first_byte_of_header_hash + header_rlp idxHeaderHash *recsplit.Index // header_hash -> headers_segment_offset - From, To uint64 + ranges MergeRange } type BodySegment struct { seg *compress.Decompressor // value: rlp(types.BodyForStorage) idxBodyNumber *recsplit.Index // block_num_u64 -> bodies_segment_offset - From, To uint64 + ranges MergeRange } type TxnSegment struct { Seg *compress.Decompressor // value: first_byte_of_transaction_hash + sender_address + transaction_rlp IdxTxnHash *recsplit.Index // transaction_hash -> transactions_segment_offset IdxTxnHash2BlockNum *recsplit.Index // transaction_hash -> block_number - From, To uint64 + ranges MergeRange } func (sn *HeaderSegment) close() { @@ -79,12 +79,12 @@ func (sn *HeaderSegment) close() { func (sn *HeaderSegment) reopen(dir string) (err error) { sn.close() - fileName := snap.SegmentFileName(sn.From, sn.To, snap.Headers) + fileName := snap.SegmentFileName(sn.ranges.from, sn.ranges.to, snap.Headers) sn.seg, err = compress.NewDecompressor(path.Join(dir, fileName)) if err != nil { return err } - sn.idxHeaderHash, err = recsplit.OpenIndex(path.Join(dir, snap.IdxFileName(sn.From, sn.To, snap.Headers.String()))) + sn.idxHeaderHash, err = recsplit.OpenIndex(path.Join(dir, snap.IdxFileName(sn.ranges.from, sn.ranges.to, snap.Headers.String()))) if err != nil { return err } @@ -104,12 +104,12 @@ func (sn *BodySegment) close() { func (sn *BodySegment) reopen(dir string) (err error) { sn.close() - fileName := snap.SegmentFileName(sn.From, sn.To, snap.Bodies) + fileName := snap.SegmentFileName(sn.ranges.from, sn.ranges.to, snap.Bodies) sn.seg, err = compress.NewDecompressor(path.Join(dir, fileName)) if err != nil { return err } - sn.idxBodyNumber, err = recsplit.OpenIndex(path.Join(dir, snap.IdxFileName(sn.From, sn.To, snap.Bodies.String()))) + sn.idxBodyNumber, err = recsplit.OpenIndex(path.Join(dir, snap.IdxFileName(sn.ranges.from, sn.ranges.to, snap.Bodies.String()))) if err != nil { return err } @@ -148,16 +148,16 @@ func (sn *TxnSegment) close() { } func (sn *TxnSegment) reopen(dir string) (err error) { sn.close() - fileName := snap.SegmentFileName(sn.From, sn.To, snap.Transactions) + fileName := snap.SegmentFileName(sn.ranges.from, sn.ranges.to, snap.Transactions) sn.Seg, err = compress.NewDecompressor(path.Join(dir, fileName)) if err != nil { return err } - sn.IdxTxnHash, err = recsplit.OpenIndex(path.Join(dir, snap.IdxFileName(sn.From, sn.To, snap.Transactions.String()))) + sn.IdxTxnHash, err = recsplit.OpenIndex(path.Join(dir, snap.IdxFileName(sn.ranges.from, sn.ranges.to, snap.Transactions.String()))) if err != nil { return err } - sn.IdxTxnHash2BlockNum, err = recsplit.OpenIndex(path.Join(dir, snap.IdxFileName(sn.From, sn.To, snap.Transactions2Block.String()))) + sn.IdxTxnHash2BlockNum, err = recsplit.OpenIndex(path.Join(dir, snap.IdxFileName(sn.ranges.from, sn.ranges.to, snap.Transactions2Block.String()))) if err != nil { return err } @@ -194,7 +194,7 @@ func (s *headerSegments) ViewSegment(blockNum uint64, f func(sn *HeaderSegment) s.lock.RLock() defer s.lock.RUnlock() for _, seg := range s.segments { - if !(blockNum >= seg.From && blockNum < seg.To) { + if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) { continue } return true, f(seg) @@ -232,7 +232,7 @@ func (s *bodySegments) ViewSegment(blockNum uint64, f func(*BodySegment) error) s.lock.RLock() defer s.lock.RUnlock() for _, seg := range s.segments { - if !(blockNum >= seg.From && blockNum < seg.To) { + if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) { continue } return true, f(seg) @@ -270,7 +270,7 @@ func (s *txnSegments) ViewSegment(blockNum uint64, f func(*TxnSegment) error) (f s.lock.RLock() defer s.lock.RUnlock() for _, seg := range s.segments { - if !(blockNum >= seg.From && blockNum < seg.To) { + if !(blockNum >= seg.ranges.from && blockNum < seg.ranges.to) { continue } return true, f(seg) @@ -323,7 +323,7 @@ func (s *RoSnapshots) idxAvailability() uint64 { if seg.idxHeaderHash == nil { continue } - headers = seg.To - 1 + headers = seg.ranges.to - 1 break } for i := len(s.Bodies.segments) - 1; i >= 0; i-- { @@ -331,7 +331,7 @@ func (s *RoSnapshots) idxAvailability() uint64 { if seg.idxBodyNumber == nil { continue } - bodies = seg.To - 1 + bodies = seg.ranges.to - 1 break } @@ -340,7 +340,7 @@ func (s *RoSnapshots) idxAvailability() uint64 { if seg.IdxTxnHash == nil || seg.IdxTxnHash2BlockNum == nil { continue } - txs = seg.To - 1 + txs = seg.ranges.to - 1 break } return cmp.Min(headers, cmp.Min(bodies, txs)) @@ -390,7 +390,7 @@ func (s *RoSnapshots) AsyncOpenAll(ctx context.Context) { return default: } - if err := s.Reopen(); err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, snap.ErrSnapshotMissed) { + if err := s.Reopen(); err != nil && !errors.Is(err, os.ErrNotExist) { log.Error("AsyncOpenAll", "err", err) } time.Sleep(15 * time.Second) @@ -422,7 +422,7 @@ func (s *RoSnapshots) Reopen() error { s.Txs.segments = s.Txs.segments[:0] for _, f := range files { { - seg := &BodySegment{From: f.From, To: f.To} + seg := &BodySegment{ranges: MergeRange{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Bodies) seg.seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -434,7 +434,7 @@ func (s *RoSnapshots) Reopen() error { s.Bodies.segments = append(s.Bodies.segments, seg) } { - seg := &HeaderSegment{From: f.From, To: f.To} + seg := &HeaderSegment{ranges: MergeRange{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Headers) seg.seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -446,7 +446,7 @@ func (s *RoSnapshots) Reopen() error { s.Headers.segments = append(s.Headers.segments, seg) } { - seg := &TxnSegment{From: f.From, To: f.To} + seg := &TxnSegment{ranges: MergeRange{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Transactions) seg.Seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -471,23 +471,23 @@ func (s *RoSnapshots) Reopen() error { s.segmentsReady.Store(true) for _, sn := range s.Headers.segments { - sn.idxHeaderHash, err = recsplit.OpenIndex(path.Join(s.dir, snap.IdxFileName(sn.From, sn.To, snap.Headers.String()))) + sn.idxHeaderHash, err = recsplit.OpenIndex(path.Join(s.dir, snap.IdxFileName(sn.ranges.from, sn.ranges.to, snap.Headers.String()))) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } } for _, sn := range s.Bodies.segments { - sn.idxBodyNumber, err = recsplit.OpenIndex(path.Join(s.dir, snap.IdxFileName(sn.From, sn.To, snap.Bodies.String()))) + sn.idxBodyNumber, err = recsplit.OpenIndex(path.Join(s.dir, snap.IdxFileName(sn.ranges.from, sn.ranges.to, snap.Bodies.String()))) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } } for _, sn := range s.Txs.segments { - sn.IdxTxnHash, err = recsplit.OpenIndex(path.Join(s.dir, snap.IdxFileName(sn.From, sn.To, snap.Transactions.String()))) + sn.IdxTxnHash, err = recsplit.OpenIndex(path.Join(s.dir, snap.IdxFileName(sn.ranges.from, sn.ranges.to, snap.Transactions.String()))) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } - sn.IdxTxnHash2BlockNum, err = recsplit.OpenIndex(path.Join(s.dir, snap.IdxFileName(sn.From, sn.To, snap.Transactions2Block.String()))) + sn.IdxTxnHash2BlockNum, err = recsplit.OpenIndex(path.Join(s.dir, snap.IdxFileName(sn.ranges.from, sn.ranges.to, snap.Transactions2Block.String()))) if err != nil && !errors.Is(err, os.ErrNotExist) { return err } @@ -517,7 +517,7 @@ func (s *RoSnapshots) ReopenSegments() error { var segmentsMaxSet bool for _, f := range files { { - seg := &BodySegment{From: f.From, To: f.To} + seg := &BodySegment{ranges: MergeRange{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Bodies) seg.seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -529,7 +529,7 @@ func (s *RoSnapshots) ReopenSegments() error { s.Bodies.segments = append(s.Bodies.segments, seg) } { - seg := &HeaderSegment{From: f.From, To: f.To} + seg := &HeaderSegment{ranges: MergeRange{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Headers) seg.seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -541,7 +541,7 @@ func (s *RoSnapshots) ReopenSegments() error { s.Headers.segments = append(s.Headers.segments, seg) } { - seg := &TxnSegment{From: f.From, To: f.To} + seg := &TxnSegment{ranges: MergeRange{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Transactions) seg.Seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -600,15 +600,15 @@ func (s *RoSnapshots) PrintDebug() { fmt.Printf("sn: %d, %d\n", s.segmentsMax.Load(), s.idxMax.Load()) fmt.Println(" == Snapshots, Header") for _, sn := range s.Headers.segments { - fmt.Printf("%d, %t\n", sn.From, sn.idxHeaderHash == nil) + fmt.Printf("%d, %t\n", sn.ranges.from, sn.idxHeaderHash == nil) } fmt.Println(" == Snapshots, Body") for _, sn := range s.Bodies.segments { - fmt.Printf("%d, %t\n", sn.From, sn.idxBodyNumber == nil) + fmt.Printf("%d, %t\n", sn.ranges.from, sn.idxBodyNumber == nil) } fmt.Println(" == Snapshots, Txs") for _, sn := range s.Txs.segments { - fmt.Printf("%d, %t, %t\n", sn.From, sn.IdxTxnHash == nil, sn.IdxTxnHash2BlockNum == nil) + fmt.Printf("%d, %t, %t\n", sn.ranges.from, sn.IdxTxnHash == nil, sn.IdxTxnHash2BlockNum == nil) } } func (s *RoSnapshots) ViewHeaders(blockNum uint64, f func(sn *HeaderSegment) error) (found bool, err error) { @@ -639,7 +639,7 @@ func BuildIndices(ctx context.Context, s *RoSnapshots, chainID uint256.Int, tmpD errs := make(chan error, len(segments)*2) workersCh := make(chan struct{}, workers) for _, sn := range segments { - if sn.From < from { + if sn.ranges.from < from { continue } @@ -667,7 +667,7 @@ func BuildIndices(ctx context.Context, s *RoSnapshots, chainID uint256.Int, tmpD default: } - }(sn.From, sn.To) + }(sn.ranges.from, sn.ranges.to) } go func() { wg.Wait() @@ -688,7 +688,7 @@ func BuildIndices(ctx context.Context, s *RoSnapshots, chainID uint256.Int, tmpD errs := make(chan error, len(segments)*2) workersCh := make(chan struct{}, workers) for _, sn := range segments { - if sn.From < from { + if sn.ranges.from < from { continue } @@ -716,7 +716,7 @@ func BuildIndices(ctx context.Context, s *RoSnapshots, chainID uint256.Int, tmpD default: } - }(sn.From, sn.To) + }(sn.ranges.from, sn.ranges.to) } go func() { wg.Wait() @@ -741,7 +741,7 @@ func BuildIndices(ctx context.Context, s *RoSnapshots, chainID uint256.Int, tmpD errs := make(chan error, len(segments)*2) workersCh := make(chan struct{}, workers) for i, sn := range segments { - if sn.From < from { + if sn.ranges.from < from { continue } @@ -772,7 +772,7 @@ func BuildIndices(ctx context.Context, s *RoSnapshots, chainID uint256.Int, tmpD default: } - }(sn.From, sn.To) + }(sn.ranges.from, sn.ranges.to) } go func() { wg.Wait() @@ -1013,7 +1013,12 @@ func retireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint25 return fmt.Errorf("Reopen: %w", err) } - return RequestSnapshotDownload(ctx, ranges, downloader) + var downloadRequest []DownloadRequest + for _, r := range ranges { + downloadRequest = append(downloadRequest, NewDownloadRequest(&r, "", "")) + } + + return RequestSnapshotDownload(ctx, downloadRequest, downloader) } func DumpBlocks(ctx context.Context, blockFrom, blockTo, blocksPerFile uint64, tmpDir, snapDir string, chainDB kv.RoDB, workers int, lvl log.Lvl) error { @@ -1727,20 +1732,20 @@ func (r MergeRange) String() string { return fmt.Sprintf("%dk-%dk", r.from/1000, func (*Merger) FindMergeRanges(snapshots *RoSnapshots) (res []MergeRange) { for i := len(snapshots.Headers.segments) - 1; i > 0; i-- { sn := snapshots.Headers.segments[i] - if sn.To-sn.From >= snap.DEFAULT_SEGMENT_SIZE { // is complete .seg + if sn.ranges.to-sn.ranges.from >= snap.DEFAULT_SEGMENT_SIZE { // is complete .seg continue } for _, span := range []uint64{500_000, 100_000, 10_000} { - if sn.To%span != 0 { + if sn.ranges.to%span != 0 { continue } - if sn.To-sn.From == span { + if sn.ranges.to-sn.ranges.from == span { break } - aggFrom := sn.To - span - res = append(res, MergeRange{from: aggFrom, to: sn.To}) - for snapshots.Headers.segments[i].From > aggFrom { + aggFrom := sn.ranges.to - span + res = append(res, MergeRange{from: aggFrom, to: sn.ranges.to}) + for snapshots.Headers.segments[i].ranges.from > aggFrom { i-- } break @@ -1754,10 +1759,10 @@ func (m *Merger) filesByRange(snapshots *RoSnapshots, from, to uint64) (toMergeH return snapshots.Bodies.View(func(bSegments []*BodySegment) error { return snapshots.Txs.View(func(tSegments []*TxnSegment) error { for i, sn := range hSegments { - if sn.From < from { + if sn.ranges.from < from { continue } - if sn.To > to { + if sn.ranges.to > to { break } @@ -1947,17 +1952,11 @@ func NewDownloadRequest(ranges *MergeRange, path string, torrentHash string) Dow } // builds the snapshots download request and downloads them -func RequestSnapshotDownload(ctx context.Context, ranges []MergeRange, downloader proto_downloader.DownloaderClient) error { +func RequestSnapshotDownload(ctx context.Context, downloadRequest []DownloadRequest, downloader proto_downloader.DownloaderClient) error { // start seed large .seg of large size - var downloadRequest []DownloadRequest - for _, r := range ranges { - downloadRequest = append(downloadRequest, NewDownloadRequest(&r, "", "")) - } req := BuildProtoRequest(downloadRequest) - if len(req.Items) > 0 && downloader != nil { - if _, err := downloader.Download(ctx, req); err != nil { - return err - } + if _, err := downloader.Download(ctx, req); err != nil { + return err } return nil } diff --git a/turbo/snapshotsync/block_snapshots_test.go b/turbo/snapshotsync/block_snapshots_test.go index a41757048a3..65a90ba54ad 100644 --- a/turbo/snapshotsync/block_snapshots_test.go +++ b/turbo/snapshotsync/block_snapshots_test.go @@ -163,14 +163,14 @@ func TestOpenAllSnapshot(t *testing.T) { require.Equal(2, len(s.Headers.segments)) ok, err := s.ViewTxs(10, func(sn *TxnSegment) error { - require.Equal(int(sn.To), 500_000) + require.Equal(int(sn.ranges.to), 500_000) return nil }) require.NoError(err) require.True(ok) ok, err = s.ViewTxs(500_000, func(sn *TxnSegment) error { - require.Equal(int(sn.To), 1_000_000) // [from:to) + require.Equal(int(sn.ranges.to), 1_000_000) // [from:to) return nil }) require.NoError(err) diff --git a/turbo/snapshotsync/snap/files.go b/turbo/snapshotsync/snap/files.go index fa009192ce0..90f0badda58 100644 --- a/turbo/snapshotsync/snap/files.go +++ b/turbo/snapshotsync/snap/files.go @@ -167,8 +167,6 @@ func TmpFiles(dir string) (res []string, err error) { return res, nil } -var ErrSnapshotMissed = fmt.Errorf("snapshot missed") - // ParseDir - reading dir ( func ParseDir(dir string) (res []FileInfo, err error) { files, err := os.ReadDir(dir) From 1110c350338d566612b0027c07140e35d448f442 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Tue, 19 Jul 2022 16:03:35 +0200 Subject: [PATCH 109/152] =?UTF-8?q?G=C3=B6rli=20Terminal=20Total=20Difficu?= =?UTF-8?q?lty=20(#4752)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- params/chainspecs/goerli.json | 1 + 1 file changed, 1 insertion(+) diff --git a/params/chainspecs/goerli.json b/params/chainspecs/goerli.json index 7596abd2be2..16a8cfede28 100644 --- a/params/chainspecs/goerli.json +++ b/params/chainspecs/goerli.json @@ -13,6 +13,7 @@ "istanbulBlock": 1561651, "berlinBlock": 4460644, "londonBlock": 5062605, + "terminalTotalDifficulty": 10790000, "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "clique": { "period": 15, From d0399012c262d24adbaef26a15dbef53cfd3e38b Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Tue, 19 Jul 2022 16:31:30 +0200 Subject: [PATCH 110/152] Fix txn removal in PendingPool (#4754) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index efcd1b91f37..438ff4300a5 100644 --- a/go.mod +++ b/go.mod @@ -36,7 +36,7 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220719082624-745b9b6b98dc + github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 985b44713ef..8905269c01d 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220719082624-745b9b6b98dc h1:5opLy9YqL26YvSNGKxHcJO4X/R7Q3FU4ajp7jhbZPBE= -github.com/ledgerwatch/erigon-lib v0.0.0-20220719082624-745b9b6b98dc/go.mod h1:8wlgUF6YVdB3fjGg9VbQshirfJvi1h+qoHDYrPqAHoE= +github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286 h1:AMu0iTB2BlgeBTxJvAa7amzz6WmyX5xxnLOF2LFhkTs= +github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286/go.mod h1:8wlgUF6YVdB3fjGg9VbQshirfJvi1h+qoHDYrPqAHoE= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= From d4f865d725050e7e5fb18aec8fed31c1d0398f7b Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Tue, 19 Jul 2022 22:31:15 +0200 Subject: [PATCH 111/152] Added proper cleanup when we get notified of new height (#4753) * added proper cleanup when we get notified of new height * added extra cleanup * removed bad if condition * fixed hive tests Co-authored-by: giuliorebuffo --- eth/stagedsync/stage_headers.go | 15 +++++++++------ turbo/engineapi/fork_validator.go | 25 ++++++++++++++++++++++--- 2 files changed, 31 insertions(+), 9 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 30a38f385d8..33b4c16ff8e 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -258,11 +258,11 @@ func startHandlingForkChoice( cfg HeadersCfg, headerInserter *headerdownload.HeaderInserter, ) (*privateapi.PayloadStatus, error) { - headerHash := forkChoice.HeadBlockHash - log.Debug(fmt.Sprintf("[%s] Handling fork choice", s.LogPrefix()), "headerHash", headerHash) if cfg.memoryOverlay { - defer cfg.forkValidator.Clear(tx) + defer cfg.forkValidator.ClearWithUnwind(tx) } + headerHash := forkChoice.HeadBlockHash + log.Debug(fmt.Sprintf("[%s] Handling fork choice", s.LogPrefix()), "headerHash", headerHash) currentHeadHash := rawdb.ReadHeadHeaderHash(tx) if currentHeadHash == headerHash { // no-op @@ -577,10 +577,11 @@ func verifyAndSaveNewPoSHeader( forkingHash, err := cfg.blockReader.CanonicalHash(ctx, tx, forkingPoint) canExtendCanonical := forkingHash == currentHeadHash - canExtendFork := cfg.forkValidator.ExtendingForkHeadHash() == (common.Hash{}) || header.ParentHash == cfg.forkValidator.ExtendingForkHeadHash() - if cfg.memoryOverlay && (canExtendFork || header.ParentHash != currentHeadHash) { - status, latestValidHash, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, header, body, header.ParentHash == currentHeadHash /* extendCanonical */) + if cfg.memoryOverlay { + extendingHash := cfg.forkValidator.ExtendingForkHeadHash() + extendCanonical := (extendingHash == common.Hash{} && header.ParentHash == currentHeadHash) || extendingHash == header.ParentHash + status, latestValidHash, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, header, body, extendCanonical) if criticalError != nil { return nil, false, criticalError } @@ -664,6 +665,8 @@ func schedulePoSDownload( } func verifyAndSaveDownloadedPoSHeaders(tx kv.RwTx, cfg HeadersCfg, headerInserter *headerdownload.HeaderInserter) { + defer cfg.forkValidator.Clear() + var lastValidHash common.Hash var badChainError error var foundPow bool diff --git a/turbo/engineapi/fork_validator.go b/turbo/engineapi/fork_validator.go index ae83190cd4b..7baaba9c052 100644 --- a/turbo/engineapi/fork_validator.go +++ b/turbo/engineapi/fork_validator.go @@ -84,6 +84,12 @@ func (fv *ForkValidator) ExtendingForkHeadHash() common.Hash { // NotifyCurrentHeight is to be called at the end of the stage cycle and repressent the last processed block. func (fv *ForkValidator) NotifyCurrentHeight(currentHeight uint64) { fv.currentHeight = currentHeight + // If the head changed,e previous assumptions on head are incorrect now. + if fv.extendingFork != nil { + fv.extendingFork.Rollback() + } + fv.extendingFork = nil + fv.extendingForkHeadHash = common.Hash{} } // FlushExtendingFork flush the current extending fork if fcu chooses its head hash as the its forkchoice. @@ -176,7 +182,16 @@ func (fv *ForkValidator) ValidatePayload(tx kv.RwTx, header *types.Header, body // Clear wipes out current extending fork data, this method is called after fcu is called, // because fcu decides what the head is and after the call is done all the non-chosed forks are // to be considered obsolete. -func (fv *ForkValidator) Clear(tx kv.RwTx) { +func (fv *ForkValidator) Clear() { + if fv.extendingFork != nil { + fv.extendingFork.Rollback() + } + fv.extendingForkHeadHash = common.Hash{} + fv.extendingFork = nil +} + +// Clear wipes out current extending fork data and notify txpool. +func (fv *ForkValidator) ClearWithUnwind(tx kv.RwTx) { sb, ok := fv.sideForksBlock[fv.extendingForkHeadHash] // If we did not flush the fork state, then we need to notify the txpool through unwind. if fv.extendingFork != nil && fv.extendingForkHeadHash != (common.Hash{}) && ok { @@ -187,8 +202,7 @@ func (fv *ForkValidator) Clear(tx kv.RwTx) { } fv.extendingFork.Rollback() } - fv.extendingForkHeadHash = common.Hash{} - fv.extendingFork = nil + fv.Clear() } // validateAndStorePayload validate and store a payload fork chain if such chain results valid. @@ -198,6 +212,11 @@ func (fv *ForkValidator) validateAndStorePayload(tx kv.RwTx, header *types.Heade if validationError != nil { latestValidHash = header.ParentHash status = remote.EngineStatus_INVALID + if fv.extendingFork != nil { + fv.extendingFork.Rollback() + fv.extendingFork = nil + } + fv.extendingForkHeadHash = common.Hash{} return } // If we do not have the body we can recover it from the batch. From 5805d963ea5c352d4b4baac0b2c966bf1f70b056 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 20 Jul 2022 09:34:12 +0700 Subject: [PATCH 112/152] erigon-snapshot: convert from git sumbodule to golang package (#4760) --- .gitmodules | 3 - eth/stagedsync/stage_headers.go | 4 +- eth/stagedsync/stage_senders.go | 6 +- go.mod | 6 +- go.sum | 2 + turbo/snapshotsync/block_snapshots.go | 4 +- turbo/snapshotsync/block_snapshots_test.go | 4 +- turbo/snapshotsync/snap/files.go | 4 +- turbo/snapshotsync/snapcfg/util.go | 112 ++++++++++++++++ turbo/snapshotsync/snapshothashes/embed.go | 123 ------------------ .../snapshothashes/erigon-snapshots | 1 - 11 files changed, 130 insertions(+), 139 deletions(-) create mode 100644 turbo/snapshotsync/snapcfg/util.go delete mode 100644 turbo/snapshotsync/snapshothashes/embed.go delete mode 160000 turbo/snapshotsync/snapshothashes/erigon-snapshots diff --git a/.gitmodules b/.gitmodules index e7ceb250819..e1a0db9182e 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,9 +1,6 @@ [submodule "tests"] path = tests/testdata url = https://github.com/ethereum/tests -[submodule "turbo/snapshotsync/snapshothashes/erigon-snapshots"] - path = turbo/snapshotsync/snapshothashes/erigon-snapshots - url = https://github.com/ledgerwatch/erigon-snapshot.git [submodule "cmd/downloader/trackers/trackerslist"] path = cmd/downloader/trackers/trackerslist url = https://github.com/ngosang/trackerslist.git diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 33b4c16ff8e..4b3a5224348 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -27,7 +27,7 @@ import ( "github.com/ledgerwatch/erigon/turbo/engineapi" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapshothashes" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapcfg" "github.com/ledgerwatch/erigon/turbo/stages/bodydownload" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/log/v3" @@ -1348,7 +1348,7 @@ func WaitForDownloader(ctx context.Context, cfg HeadersCfg, tx kv.RwTx) error { } // send all hashes to the Downloader service - preverified := snapshothashes.KnownConfig(cfg.chainConfig.ChainName).Preverified + preverified := snapcfg.KnownCfg(cfg.chainConfig.ChainName).Preverified i := 0 var downloadRequest []snapshotsync.DownloadRequest // build all download requests diff --git a/eth/stagedsync/stage_senders.go b/eth/stagedsync/stage_senders.go index ae01802da06..22f6c5ce513 100644 --- a/eth/stagedsync/stage_senders.go +++ b/eth/stagedsync/stage_senders.go @@ -24,7 +24,7 @@ import ( "github.com/ledgerwatch/erigon/ethdb/prune" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/snapshotsync" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapshothashes" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapcfg" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/log/v3" "github.com/ledgerwatch/secp256k1" @@ -42,7 +42,7 @@ type SendersCfg struct { prune prune.Mode chainConfig *params.ChainConfig blockRetire *snapshotsync.BlockRetire - snapshotHashesCfg *snapshothashes.Config + snapshotHashesCfg *snapcfg.Cfg hd *headerdownload.HeaderDownload } @@ -62,7 +62,7 @@ func StageSendersCfg(db kv.RwDB, chainCfg *params.ChainConfig, badBlockHalt bool chainConfig: chainCfg, prune: prune, blockRetire: br, - snapshotHashesCfg: snapshothashes.KnownConfig(chainCfg.ChainName), + snapshotHashesCfg: snapcfg.KnownCfg(chainCfg.ChainName), hd: hd, } } diff --git a/go.mod b/go.mod index 438ff4300a5..4ca64f2642a 100644 --- a/go.mod +++ b/go.mod @@ -2,6 +2,11 @@ module github.com/ledgerwatch/erigon go 1.18 +require ( + github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286 + github.com/ledgerwatch/erigon-snapshot v1.0.0 +) + require ( github.com/RoaringBitmap/roaring v1.2.1 github.com/VictoriaMetrics/fastcache v1.10.0 @@ -36,7 +41,6 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 diff --git a/go.sum b/go.sum index 8905269c01d..120e46a2e14 100644 --- a/go.sum +++ b/go.sum @@ -392,6 +392,8 @@ github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7 github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286 h1:AMu0iTB2BlgeBTxJvAa7amzz6WmyX5xxnLOF2LFhkTs= github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286/go.mod h1:8wlgUF6YVdB3fjGg9VbQshirfJvi1h+qoHDYrPqAHoE= +github.com/ledgerwatch/erigon-snapshot v1.0.0 h1:bp/7xoPdM5lK7LFdqEMH008RZmqxMZV0RUVEQiWs7v4= +github.com/ledgerwatch/erigon-snapshot v1.0.0/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= diff --git a/turbo/snapshotsync/block_snapshots.go b/turbo/snapshotsync/block_snapshots.go index 8f7ea07da1f..144f4c206e4 100644 --- a/turbo/snapshotsync/block_snapshots.go +++ b/turbo/snapshotsync/block_snapshots.go @@ -35,7 +35,7 @@ import ( "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapshothashes" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapcfg" "github.com/ledgerwatch/log/v3" "go.uber.org/atomic" "golang.org/x/exp/slices" @@ -309,7 +309,7 @@ func (s *RoSnapshots) IndicesMax() uint64 { return s.idxMax.Load() } func (s *RoSnapshots) SegmentsMax() uint64 { return s.segmentsMax.Load() } func (s *RoSnapshots) BlocksAvailable() uint64 { return cmp.Min(s.segmentsMax.Load(), s.idxMax.Load()) } -func (s *RoSnapshots) EnsureExpectedBlocksAreAvailable(cfg *snapshothashes.Config) error { +func (s *RoSnapshots) EnsureExpectedBlocksAreAvailable(cfg *snapcfg.Cfg) error { if s.BlocksAvailable() < cfg.ExpectBlocks { return fmt.Errorf("app must wait until all expected snapshots are available. Expected: %d, Available: %d", cfg.ExpectBlocks, s.BlocksAvailable()) } diff --git a/turbo/snapshotsync/block_snapshots_test.go b/turbo/snapshotsync/block_snapshots_test.go index 65a90ba54ad..fb1c8f8ab83 100644 --- a/turbo/snapshotsync/block_snapshots_test.go +++ b/turbo/snapshotsync/block_snapshots_test.go @@ -14,7 +14,7 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/params/networkname" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapshothashes" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapcfg" "github.com/ledgerwatch/log/v3" "github.com/stretchr/testify/require" ) @@ -127,7 +127,7 @@ func TestCanRetire(t *testing.T) { } func TestOpenAllSnapshot(t *testing.T) { dir, require := t.TempDir(), require.New(t) - chainSnapshotCfg := snapshothashes.KnownConfig(networkname.MainnetChainName) + chainSnapshotCfg := snapcfg.KnownCfg(networkname.MainnetChainName) chainSnapshotCfg.ExpectBlocks = math.MaxUint64 cfg := ethconfig.Snapshot{Enabled: true} createFile := func(from, to uint64, name snap.Type) { createTestSegmentFile(t, from, to, name, dir) } diff --git a/turbo/snapshotsync/snap/files.go b/turbo/snapshotsync/snap/files.go index 90f0badda58..3c24290c21b 100644 --- a/turbo/snapshotsync/snap/files.go +++ b/turbo/snapshotsync/snap/files.go @@ -10,7 +10,7 @@ import ( "strings" "github.com/ledgerwatch/erigon/common" - "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapshothashes" + "github.com/ledgerwatch/erigon/turbo/snapshotsync/snapcfg" "golang.org/x/exp/slices" ) @@ -214,7 +214,7 @@ func ParseDir(dir string) (res []FileInfo, err error) { } func RemoveNonPreverifiedFiles(chainName, snapDir string) error { - preverified := snapshothashes.KnownConfig(chainName).Preverified + preverified := snapcfg.KnownCfg(chainName).Preverified keep := map[string]struct{}{} for _, p := range preverified { ext := filepath.Ext(p.Name) diff --git a/turbo/snapshotsync/snapcfg/util.go b/turbo/snapshotsync/snapcfg/util.go new file mode 100644 index 00000000000..bf7e007b4a8 --- /dev/null +++ b/turbo/snapshotsync/snapcfg/util.go @@ -0,0 +1,112 @@ +package snapcfg + +import ( + _ "embed" + "path/filepath" + "strconv" + "strings" + + snapshothashes "github.com/ledgerwatch/erigon-snapshot" + "github.com/ledgerwatch/erigon/params/networkname" + "github.com/pelletier/go-toml/v2" + "golang.org/x/exp/slices" +) + +var Mainnet = fromToml(snapshothashes.Mainnet) + +var Goerli = fromToml(snapshothashes.Goerli) + +var Bsc = fromToml(snapshothashes.Bsc) + +var Ropsten = fromToml(snapshothashes.Ropsten) + +var Mumbai = fromToml(snapshothashes.Mumbai) + +var BorMainnet = fromToml(snapshothashes.BorMainnet) + +type PreverifiedItem struct { + Name string + Hash string +} +type Preverified []PreverifiedItem +type preverified map[string]string + +func fromToml(in []byte) (out Preverified) { + var outMap preverified + if err := toml.Unmarshal(in, &outMap); err != nil { + panic(err) + } + return doSort(outMap) +} +func doSort(in preverified) Preverified { + out := make(Preverified, 0, len(in)) + for k, v := range in { + out = append(out, PreverifiedItem{k, v}) + } + slices.SortFunc(out, func(i, j PreverifiedItem) bool { return i.Name < j.Name }) + return out +} + +var ( + MainnetChainSnapshotCfg = newCfg(Mainnet) + GoerliChainSnapshotCfg = newCfg(Goerli) + BscChainSnapshotCfg = newCfg(Bsc) + RopstenChainSnapshotCfg = newCfg(Ropsten) + MumbaiChainSnapshotCfg = newCfg(Mumbai) + BorMainnetChainSnapshotCfg = newCfg(BorMainnet) +) + +func newCfg(preverified Preverified) *Cfg { + return &Cfg{ExpectBlocks: maxBlockNum(preverified), Preverified: preverified} +} + +func maxBlockNum(preverified Preverified) uint64 { + max := uint64(0) + for _, p := range preverified { + _, fileName := filepath.Split(p.Name) + ext := filepath.Ext(fileName) + if ext != ".seg" { + continue + } + onlyName := fileName[:len(fileName)-len(ext)] + parts := strings.Split(onlyName, "-") + if parts[0] != "v1" { + panic("not implemented") + } + if parts[3] != "headers" { + continue + } + to, err := strconv.ParseUint(parts[2], 10, 64) + if err != nil { + panic(err) + } + if max < to { + max = to + } + } + if max == 0 { // to prevent underflow + return 0 + } + return max*1_000 - 1 +} + +type Cfg struct { + ExpectBlocks uint64 + Preverified Preverified +} + +var KnownCfgs = map[string]*Cfg{ + networkname.MainnetChainName: MainnetChainSnapshotCfg, + networkname.GoerliChainName: GoerliChainSnapshotCfg, + networkname.BSCChainName: BscChainSnapshotCfg, + networkname.RopstenChainName: RopstenChainSnapshotCfg, + networkname.MumbaiChainName: MumbaiChainSnapshotCfg, + networkname.BorMainnetChainName: BorMainnetChainSnapshotCfg, +} + +func KnownCfg(networkName string) *Cfg { + if c, ok := KnownCfgs[networkName]; ok { + return c + } + return newCfg(Preverified{}) +} diff --git a/turbo/snapshotsync/snapshothashes/embed.go b/turbo/snapshotsync/snapshothashes/embed.go deleted file mode 100644 index 8949a45ee45..00000000000 --- a/turbo/snapshotsync/snapshothashes/embed.go +++ /dev/null @@ -1,123 +0,0 @@ -package snapshothashes - -import ( - _ "embed" - "path/filepath" - "strconv" - "strings" - - "github.com/ledgerwatch/erigon/params/networkname" - "github.com/pelletier/go-toml/v2" - "golang.org/x/exp/slices" -) - -//go:embed erigon-snapshots/mainnet.toml -var mainnet []byte -var Mainnet = fromToml(mainnet) - -//go:embed erigon-snapshots/goerli.toml -var goerli []byte -var Goerli = fromToml(goerli) - -//go:embed erigon-snapshots/bsc.toml -var bsc []byte -var Bsc = fromToml(bsc) - -//go:embed erigon-snapshots/ropsten.toml -var ropsten []byte -var Ropsten = fromToml(ropsten) - -//go:embed erigon-snapshots/mumbai.toml -var mumbai []byte -var Mumbai = fromToml(mumbai) - -//go:embed erigon-snapshots/bor-mainnet.toml -var borMainnet []byte -var BorMainnet = fromToml(borMainnet) - -type PreverifiedItem struct { - Name string - Hash string -} -type Preverified []PreverifiedItem -type preverified map[string]string - -func fromToml(in []byte) (out Preverified) { - var outMap preverified - if err := toml.Unmarshal(in, &outMap); err != nil { - panic(err) - } - return doSort(outMap) -} -func doSort(in preverified) Preverified { - out := make(Preverified, 0, len(in)) - for k, v := range in { - out = append(out, PreverifiedItem{k, v}) - } - slices.SortFunc(out, func(i, j PreverifiedItem) bool { return i.Name < j.Name }) - return out -} - -var ( - MainnetChainSnapshotConfig = newConfig(Mainnet) - GoerliChainSnapshotConfig = newConfig(Goerli) - BscChainSnapshotConfig = newConfig(Bsc) - RopstenChainSnapshotConfig = newConfig(Ropsten) - MumbaiChainSnapshotConfig = newConfig(Mumbai) - BorMainnetChainSnapshotConfig = newConfig(BorMainnet) -) - -func newConfig(preverified Preverified) *Config { - return &Config{ExpectBlocks: maxBlockNum(preverified), Preverified: preverified} -} - -func maxBlockNum(preverified Preverified) uint64 { - max := uint64(0) - for _, p := range preverified { - _, fileName := filepath.Split(p.Name) - ext := filepath.Ext(fileName) - if ext != ".seg" { - continue - } - onlyName := fileName[:len(fileName)-len(ext)] - parts := strings.Split(onlyName, "-") - if parts[0] != "v1" { - panic("not implemented") - } - if parts[3] != "headers" { - continue - } - to, err := strconv.ParseUint(parts[2], 10, 64) - if err != nil { - panic(err) - } - if max < to { - max = to - } - } - if max == 0 { // to prevent underflow - return 0 - } - return max*1_000 - 1 -} - -type Config struct { - ExpectBlocks uint64 - Preverified Preverified -} - -var KnownConfigs map[string]*Config = map[string]*Config{ - networkname.MainnetChainName: MainnetChainSnapshotConfig, - networkname.GoerliChainName: GoerliChainSnapshotConfig, - networkname.BSCChainName: BscChainSnapshotConfig, - networkname.RopstenChainName: RopstenChainSnapshotConfig, - networkname.MumbaiChainName: MumbaiChainSnapshotConfig, - networkname.BorMainnetChainName: BorMainnetChainSnapshotConfig, -} - -func KnownConfig(networkName string) *Config { - if c, ok := KnownConfigs[networkName]; ok { - return c - } - return newConfig(Preverified{}) -} diff --git a/turbo/snapshotsync/snapshothashes/erigon-snapshots b/turbo/snapshotsync/snapshothashes/erigon-snapshots deleted file mode 160000 index 7e85e4d0028..00000000000 --- a/turbo/snapshotsync/snapshothashes/erigon-snapshots +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 7e85e4d0028c27f747d97f65ac0b8c252a050b39 From 1ecacde3a90f0e9da31bbafa66c19e97acc90068 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 20 Jul 2022 09:47:58 +0700 Subject: [PATCH 113/152] trackerslist: convert from git submodule to go package (#4761) --- .gitmodules | 4 -- cmd/downloader/trackers/embed.go | 29 +++------ cmd/downloader/trackers/trackerslist | 1 - go.mod | 5 +- go.sum | 2 + .../parallelcompress/decompress.go | 62 ------------------- 6 files changed, 14 insertions(+), 89 deletions(-) delete mode 160000 cmd/downloader/trackers/trackerslist delete mode 100644 turbo/snapshotsync/parallelcompress/decompress.go diff --git a/.gitmodules b/.gitmodules index e1a0db9182e..ae94b08f852 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,10 +1,6 @@ [submodule "tests"] path = tests/testdata url = https://github.com/ethereum/tests -[submodule "cmd/downloader/trackers/trackerslist"] - path = cmd/downloader/trackers/trackerslist - url = https://github.com/ngosang/trackerslist.git - [submodule "libmdbx"] path = libmdbx url = https://github.com/torquem-ch/libmdbx.git diff --git a/cmd/downloader/trackers/embed.go b/cmd/downloader/trackers/embed.go index 694f2eee40e..bf179e756aa 100644 --- a/cmd/downloader/trackers/embed.go +++ b/cmd/downloader/trackers/embed.go @@ -2,29 +2,18 @@ package trackers import ( "bufio" - _ "embed" "strings" -) - -//go:embed trackerslist/trackers_best.txt -var best string -var Best = split(best) - -//go:embed trackerslist/trackers_all_https.txt -var https string -var Https = split(https) -//go:embed trackerslist/trackers_all_http.txt -var http string -var Http = split(http) - -//go:embed trackerslist/trackers_all_udp.txt -var udp string -var Udp = split(udp) + "github.com/ledgerwatch/trackerslist" +) -//go:embed trackerslist/trackers_all_ws.txt -var ws string -var Ws = split(ws) +var ( + Best = split(trackerslist.Best) + Https = split(trackerslist.Https) + Http = split(trackerslist.Http) + Udp = split(trackerslist.Udp) + Ws = split(trackerslist.Ws) +) func split(txt string) (lines []string) { sc := bufio.NewScanner(strings.NewReader(txt)) diff --git a/cmd/downloader/trackers/trackerslist b/cmd/downloader/trackers/trackerslist deleted file mode 160000 index 17f277f3762..00000000000 --- a/cmd/downloader/trackers/trackerslist +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 17f277f376286f5a99db386421897d5f82031f57 diff --git a/go.mod b/go.mod index 4ca64f2642a..47404e9aac6 100644 --- a/go.mod +++ b/go.mod @@ -5,6 +5,9 @@ go 1.18 require ( github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286 github.com/ledgerwatch/erigon-snapshot v1.0.0 + github.com/ledgerwatch/log/v3 v3.4.1 + github.com/ledgerwatch/secp256k1 v1.0.0 + github.com/ledgerwatch/trackerslist v1.0.0 ) require ( @@ -41,8 +44,6 @@ require ( github.com/json-iterator/go v1.1.12 github.com/julienschmidt/httprouter v1.3.0 github.com/kevinburke/go-bindata v3.21.0+incompatible - github.com/ledgerwatch/log/v3 v3.4.1 - github.com/ledgerwatch/secp256k1 v1.0.0 github.com/nxadm/tail v1.4.9-0.20211216163028-4472660a31a6 github.com/pelletier/go-toml v1.9.5 github.com/pelletier/go-toml/v2 v2.0.2 diff --git a/go.sum b/go.sum index 120e46a2e14..22e34193ee1 100644 --- a/go.sum +++ b/go.sum @@ -398,6 +398,8 @@ github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY github.com/ledgerwatch/log/v3 v3.4.1/go.mod h1:VXcz6Ssn6XEeU92dCMc39/g1F0OYAjw1Mt+dGP5DjXY= github.com/ledgerwatch/secp256k1 v1.0.0 h1:Usvz87YoTG0uePIV8woOof5cQnLXGYa162rFf3YnwaQ= github.com/ledgerwatch/secp256k1 v1.0.0/go.mod h1:SPmqJFciiF/Q0mPt2jVs2dTr/1TZBTIA+kPMmKgBAak= +github.com/ledgerwatch/trackerslist v1.0.0 h1:6gnQu93WCTL4jPcdmc8UEmw56Cb8IFQHLGnevfIeLwo= +github.com/ledgerwatch/trackerslist v1.0.0/go.mod h1:pCC+eEw8izNcnBBiSwvIq8kKsxDLInAafSW275jqFrg= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lispad/go-generics-tools v1.1.0 h1:mbSgcxdFVmpoyso1X/MJHXbSbSL3dD+qhRryyxk+/XY= diff --git a/turbo/snapshotsync/parallelcompress/decompress.go b/turbo/snapshotsync/parallelcompress/decompress.go deleted file mode 100644 index 481717b2ab9..00000000000 --- a/turbo/snapshotsync/parallelcompress/decompress.go +++ /dev/null @@ -1,62 +0,0 @@ -package parallelcompress - -import ( - "bufio" - "encoding/binary" - "fmt" - "os" - "time" - - "github.com/ledgerwatch/erigon-lib/compress" - "github.com/ledgerwatch/erigon-lib/etl" - "github.com/ledgerwatch/log/v3" -) - -func Decompress(logPrefix, segFilePath, datFilePath string) error { - d, err := compress.NewDecompressor(segFilePath) - if err != nil { - return err - } - defer d.Close() - logEvery := time.NewTicker(20 * time.Second) - defer logEvery.Stop() - var df *os.File - if df, err = os.Create(datFilePath); err != nil { - return err - } - dw := bufio.NewWriterSize(df, etl.BufIOSize) - var word = make([]byte, 0, 256) - numBuf := make([]byte, binary.MaxVarintLen64) - var decodeTime time.Duration - g := d.MakeGetter() - start := time.Now() - wc := 0 - for g.HasNext() { - word, _ = g.Next(word[:0]) - decodeTime += time.Since(start) - n := binary.PutUvarint(numBuf, uint64(len(word))) - if _, e := dw.Write(numBuf[:n]); e != nil { - return e - } - if len(word) > 0 { - if _, e := dw.Write(word); e != nil { - return e - } - } - wc++ - select { - default: - case <-logEvery.C: - log.Info(fmt.Sprintf("[%s] Decompress", logPrefix), "millions", wc/1_000_000) - } - start = time.Now() - } - log.Info(fmt.Sprintf("[%s] Average decoding time", logPrefix), "per word", time.Duration(int64(decodeTime)/int64(wc))) - if err = dw.Flush(); err != nil { - return err - } - if err = df.Close(); err != nil { - return err - } - return nil -} From 2706b01cea4616ce199af19b8caf58b4b24a4363 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Wed, 20 Jul 2022 17:23:12 +0700 Subject: [PATCH 114/152] go mod (#4762) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 1bcdbe10a37..061abf375a5 100644 --- a/Makefile +++ b/Makefile @@ -80,7 +80,7 @@ docker-compose: validate_docker_build_args setup_xdg_data_home dbg: $(GO_DBG_BUILD) -o $(GOBIN)/ ./cmd/... -%.cmd: git-submodules +%.cmd: @# Note: $* is replaced by the command name @echo "Building $*" @cd ./cmd/$* && $(GOBUILD) -o $(GOBIN)/$* From f8c37be3b2f9ff5d594a29fbe33137b9d99ce37b Mon Sep 17 00:00:00 2001 From: Andrea Lanfranchi Date: Wed, 20 Jul 2022 13:42:33 +0200 Subject: [PATCH 115/152] Only test and db-tools require submodule update (#4765) --- wmake.ps1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wmake.ps1 b/wmake.ps1 index 0771cbbe230..b66e7842ea9 100644 --- a/wmake.ps1 +++ b/wmake.ps1 @@ -443,7 +443,7 @@ Write-Host @" "@ -if (!$WnoSubmoduleUpdate -and $BuildTargets[0] -ne "clean") { +if (!$WnoSubmoduleUpdate -and $BuildTargets[0] -ne "clean" -and ($BuildTargets.Contains("test") -or $BuildTargets.Contains("db-tools"))) { Write-Host " Updating git submodules ..." Invoke-Expression -Command "git.exe submodule update --init --recursive --force --quiet" if (!($?)) { From 00769e3dff674fab1911c1cb33366711a2163529 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Wed, 20 Jul 2022 15:16:20 +0200 Subject: [PATCH 116/152] Fix MDBX compilation on macOS (#4767) --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 47404e9aac6..8015c7659e0 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon go 1.18 require ( - github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286 + github.com/ledgerwatch/erigon-lib v0.0.0-20220720105945-114da7eca320 github.com/ledgerwatch/erigon-snapshot v1.0.0 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 @@ -55,7 +55,7 @@ require ( github.com/stretchr/testify v1.8.0 github.com/tendermint/go-amino v0.14.1 github.com/tendermint/tendermint v0.31.11 - github.com/torquem-ch/mdbx-go v0.25.0 + github.com/torquem-ch/mdbx-go v0.25.1-0.20220720103744-b96489e94ece github.com/ugorji/go/codec v1.1.13 github.com/ugorji/go/codec/codecgen v1.1.13 github.com/urfave/cli v1.22.9 diff --git a/go.sum b/go.sum index 22e34193ee1..eeda3cf9e5c 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286 h1:AMu0iTB2BlgeBTxJvAa7amzz6WmyX5xxnLOF2LFhkTs= -github.com/ledgerwatch/erigon-lib v0.0.0-20220719140506-af5355ee9286/go.mod h1:8wlgUF6YVdB3fjGg9VbQshirfJvi1h+qoHDYrPqAHoE= +github.com/ledgerwatch/erigon-lib v0.0.0-20220720105945-114da7eca320 h1:Wd2XPRsa/oVXz2j3a0554Ct8qAUS2IwZeTxdaCWzqbY= +github.com/ledgerwatch/erigon-lib v0.0.0-20220720105945-114da7eca320/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= github.com/ledgerwatch/erigon-snapshot v1.0.0 h1:bp/7xoPdM5lK7LFdqEMH008RZmqxMZV0RUVEQiWs7v4= github.com/ledgerwatch/erigon-snapshot v1.0.0/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= @@ -623,8 +623,8 @@ github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDW github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/torquem-ch/mdbx-go v0.25.0 h1:k66O6GrqyAsXNn4tF87Q+ba4840aplv6O8Ph0FR1PCY= -github.com/torquem-ch/mdbx-go v0.25.0/go.mod h1:T2fsoJDVppxfAPTLd1svUgH1kpPmeXdPESmroSHcL1E= +github.com/torquem-ch/mdbx-go v0.25.1-0.20220720103744-b96489e94ece h1:jwLF5BKBWPb00kMfRmSHJl0Hwe52HonOVpNkBJZR+XI= +github.com/torquem-ch/mdbx-go v0.25.1-0.20220720103744-b96489e94ece/go.mod h1:T2fsoJDVppxfAPTLd1svUgH1kpPmeXdPESmroSHcL1E= github.com/ugorji/go v1.1.13/go.mod h1:jxau1n+/wyTGLQoCkjok9r5zFa/FxT6eI5HiHKQszjc= github.com/ugorji/go/codec v1.1.13 h1:013LbFhocBoIqgHeIHKlV4JWYhqogATYWZhIcH0WHn4= github.com/ugorji/go/codec v1.1.13/go.mod h1:oNVt3Dq+FO91WNQ/9JnHKQP2QJxTzoN7wCBFCq1OeuU= From 9e8f625c533d967287035c1172e0e7b4941fa28b Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Wed, 20 Jul 2022 17:28:58 +0200 Subject: [PATCH 117/152] Fix txn removal in PendingPool (#4770) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8015c7659e0..c5023b793da 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon go 1.18 require ( - github.com/ledgerwatch/erigon-lib v0.0.0-20220720105945-114da7eca320 + github.com/ledgerwatch/erigon-lib v0.0.0-20220720144911-046e4165b52a github.com/ledgerwatch/erigon-snapshot v1.0.0 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 diff --git a/go.sum b/go.sum index eeda3cf9e5c..11bce9fb5ec 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220720105945-114da7eca320 h1:Wd2XPRsa/oVXz2j3a0554Ct8qAUS2IwZeTxdaCWzqbY= -github.com/ledgerwatch/erigon-lib v0.0.0-20220720105945-114da7eca320/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= +github.com/ledgerwatch/erigon-lib v0.0.0-20220720144911-046e4165b52a h1:fRjDLDbieEy48O5BvMf1+ib8loZMA3nSiRtjxbuIsYw= +github.com/ledgerwatch/erigon-lib v0.0.0-20220720144911-046e4165b52a/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= github.com/ledgerwatch/erigon-snapshot v1.0.0 h1:bp/7xoPdM5lK7LFdqEMH008RZmqxMZV0RUVEQiWs7v4= github.com/ledgerwatch/erigon-snapshot v1.0.0/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= From 73b028a5fd553519c24c8a36ce2bd8c96467f889 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Wed, 20 Jul 2022 18:16:42 +0200 Subject: [PATCH 118/152] better payload cleanup (#4772) Co-authored-by: giuliorebuffo --- eth/stagedsync/stage_headers.go | 2 +- turbo/engineapi/fork_validator.go | 96 ++++++++++++++++++++++++++++++- 2 files changed, 94 insertions(+), 4 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 4b3a5224348..19a44499c1f 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -259,7 +259,7 @@ func startHandlingForkChoice( headerInserter *headerdownload.HeaderInserter, ) (*privateapi.PayloadStatus, error) { if cfg.memoryOverlay { - defer cfg.forkValidator.ClearWithUnwind(tx) + defer cfg.forkValidator.ClearWithUnwind(tx, cfg.notifications.Accumulator, cfg.notifications.StateChangesConsumer) } headerHash := forkChoice.HeadBlockHash log.Debug(fmt.Sprintf("[%s] Handling fork choice", s.LogPrefix()), "headerHash", headerHash) diff --git a/turbo/engineapi/fork_validator.go b/turbo/engineapi/fork_validator.go index 7baaba9c052..a1b4f03bbb6 100644 --- a/turbo/engineapi/fork_validator.go +++ b/turbo/engineapi/fork_validator.go @@ -15,15 +15,22 @@ package engineapi import ( "bytes" + "context" + "encoding/binary" "fmt" + "github.com/ledgerwatch/erigon-lib/common/length" "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/memdb" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/changeset" + "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/types/accounts" "github.com/ledgerwatch/erigon/rlp" + "github.com/ledgerwatch/erigon/turbo/shards" "github.com/ledgerwatch/log/v3" ) @@ -81,6 +88,89 @@ func (fv *ForkValidator) ExtendingForkHeadHash() common.Hash { return fv.extendingForkHeadHash } +func (fv *ForkValidator) rewindAccumulator(to uint64, accumulator *shards.Accumulator, c shards.StateChangeConsumer) error { + hash, err := rawdb.ReadCanonicalHash(fv.extendingFork, to) + if err != nil { + return fmt.Errorf("read canonical hash of unwind point: %w", err) + } + header := rawdb.ReadHeader(fv.extendingFork, hash, to) + if header == nil { + return fmt.Errorf("could not find header for block: %d", to) + } + + txs, err := rawdb.RawTransactionsRange(fv.extendingFork, to, to+1) + if err != nil { + return err + } + // Start the changes + accumulator.StartChange(to, hash, txs, true) + accChangesCursor, err := fv.extendingFork.CursorDupSort(kv.AccountChangeSet) + if err != nil { + return err + } + defer accChangesCursor.Close() + + storageChangesCursor, err := fv.extendingFork.CursorDupSort(kv.StorageChangeSet) + if err != nil { + return err + } + defer storageChangesCursor.Close() + + startingKey := dbutils.EncodeBlockNumber(to) + // Unwind notifications on accounts + for k, v, err := accChangesCursor.Seek(startingKey); k != nil; k, v, err = accChangesCursor.Next() { + if err != nil { + return err + } + _, dbKey, dbValue, err := changeset.FromDBFormat(k, v) + if err != nil { + return err + } + if len(dbValue) > 0 { + var acc accounts.Account + if err := acc.DecodeForStorage(dbValue); err != nil { + return err + } + // Fetch the code hash + var address common.Address + copy(address[:], dbKey) + if acc.Incarnation > 0 && acc.IsEmptyCodeHash() { + if codeHash, err2 := fv.extendingFork.GetOne(kv.PlainContractCode, dbutils.PlainGenerateStoragePrefix(address[:], acc.Incarnation)); err2 == nil { + copy(acc.CodeHash[:], codeHash) + } + } + + newV := make([]byte, acc.EncodingLengthForStorage()) + acc.EncodeForStorage(newV) + accumulator.ChangeAccount(address, acc.Incarnation, newV) + } else { + var address common.Address + copy(address[:], dbKey) + accumulator.DeleteAccount(address) + } + } + // Unwind notifications on storage + for k, v, err := storageChangesCursor.Seek(startingKey); k != nil; k, v, err = accChangesCursor.Next() { + if err != nil { + return err + } + _, dbKey, dbValue, err := changeset.FromDBFormat(k, v) + if err != nil { + return err + } + var address common.Address + var incarnation uint64 + var location common.Hash + copy(address[:], dbKey[:length.Addr]) + incarnation = binary.BigEndian.Uint64(dbKey[length.Addr:]) + copy(location[:], dbKey[length.Addr+length.Incarnation:]) + accumulator.ChangeStorage(address, incarnation, location, common.CopyBytes(dbValue)) + } + accumulator.SendAndReset(context.Background(), c, header.BaseFee.Uint64(), header.GasLimit) + log.Info("Transaction pool notified of discard side fork.") + return nil +} + // NotifyCurrentHeight is to be called at the end of the stage cycle and repressent the last processed block. func (fv *ForkValidator) NotifyCurrentHeight(currentHeight uint64) { fv.currentHeight = currentHeight @@ -191,14 +281,14 @@ func (fv *ForkValidator) Clear() { } // Clear wipes out current extending fork data and notify txpool. -func (fv *ForkValidator) ClearWithUnwind(tx kv.RwTx) { +func (fv *ForkValidator) ClearWithUnwind(tx kv.RwTx, accumulator *shards.Accumulator, c shards.StateChangeConsumer) { sb, ok := fv.sideForksBlock[fv.extendingForkHeadHash] // If we did not flush the fork state, then we need to notify the txpool through unwind. if fv.extendingFork != nil && fv.extendingForkHeadHash != (common.Hash{}) && ok { fv.extendingFork.UpdateTxn(tx) // this will call unwind of extending fork to notify txpool of reverting transactions. - if err := fv.validatePayload(fv.extendingFork, nil, nil, sb.header.Number.Uint64()-1, nil, nil); err != nil { - log.Warn("Could not clean payload", "err", err) + if err := fv.rewindAccumulator(sb.header.Number.Uint64()-1, accumulator, c); err != nil { + log.Warn("could not notify txpool of invalid side fork", "err", err) } fv.extendingFork.Rollback() } From 7573a410692ff1499b68ae3c895f752d920f7ff4 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Thu, 21 Jul 2022 03:47:37 +0200 Subject: [PATCH 119/152] fixed accumulator nil case (#4773) Co-authored-by: giuliorebuffo --- turbo/engineapi/fork_validator.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/turbo/engineapi/fork_validator.go b/turbo/engineapi/fork_validator.go index a1b4f03bbb6..7e6853725ec 100644 --- a/turbo/engineapi/fork_validator.go +++ b/turbo/engineapi/fork_validator.go @@ -284,7 +284,7 @@ func (fv *ForkValidator) Clear() { func (fv *ForkValidator) ClearWithUnwind(tx kv.RwTx, accumulator *shards.Accumulator, c shards.StateChangeConsumer) { sb, ok := fv.sideForksBlock[fv.extendingForkHeadHash] // If we did not flush the fork state, then we need to notify the txpool through unwind. - if fv.extendingFork != nil && fv.extendingForkHeadHash != (common.Hash{}) && ok { + if fv.extendingFork != nil && accumulator != nil && fv.extendingForkHeadHash != (common.Hash{}) && ok { fv.extendingFork.UpdateTxn(tx) // this will call unwind of extending fork to notify txpool of reverting transactions. if err := fv.rewindAccumulator(sb.header.Number.Uint64()-1, accumulator, c); err != nil { From 1becfc509bba7eeb24c74aab8b64650b82b52a81 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Thu, 21 Jul 2022 04:06:26 +0200 Subject: [PATCH 120/152] extra reset before starting change in rewind side fork (#4774) * extra reset before starting change * extra reset before starting change Co-authored-by: giuliorebuffo --- turbo/engineapi/fork_validator.go | 1 + 1 file changed, 1 insertion(+) diff --git a/turbo/engineapi/fork_validator.go b/turbo/engineapi/fork_validator.go index 7e6853725ec..7fc99f5d1ae 100644 --- a/turbo/engineapi/fork_validator.go +++ b/turbo/engineapi/fork_validator.go @@ -103,6 +103,7 @@ func (fv *ForkValidator) rewindAccumulator(to uint64, accumulator *shards.Accumu return err } // Start the changes + accumulator.Reset(0) accumulator.StartChange(to, hash, txs, true) accChangesCursor, err := fv.extendingFork.CursorDupSort(kv.AccountChangeSet) if err != nil { From 770d7cf8bd6052f06d544e0218607539b8861eb9 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Thu, 21 Jul 2022 13:47:36 +0100 Subject: [PATCH 121/152] Hive CI output parse (#4737) * feat(ci): run hive tests as part of CI * feat(ci): add hive test runs and output parse * feat(ci): parse hive output for forked repos --- .github/workflows/ci.yml | 17 ++++++++++++-- .github/workflows/hive-results.yml | 37 ++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/hive-results.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 402b8c7cfde..af6a1c08223 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -104,8 +104,21 @@ jobs: fetch-depth: 0 # fetch git tags for "git describe" - name: make docker - run: DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker + run: DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker # check with root permissions, should be cached from previous build - name: sudo make docker - run: sudo DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker + run: sudo DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker + + - name: run hive + run: sudo mkdir /results && docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work gatewayfm/hive:latest --sim ethereum/sync --results-root=/work/results --client erigon_ci-$GITHUB_SHA --docker.output --loglevel 5 + + - name: parse hive output + run: docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work --entrypoint /app/hivecioutput gatewayfm/hive:latest --resultsdir=/work/results --outdir=/work/results + + - name: archive hive results + uses: actions/upload-artifact@v3 + if: always() + with: + name: hive-ci-output + path: results/*.xml diff --git a/.github/workflows/hive-results.yml b/.github/workflows/hive-results.yml new file mode 100644 index 00000000000..c67dc5fcfab --- /dev/null +++ b/.github/workflows/hive-results.yml @@ -0,0 +1,37 @@ +name: Hive results + +on: + workflow_run: + workflows: ["Continuous integration", "ci"] + types: + - completed + +jobs: + hive-results: + name: Hive results + runs-on: ubuntu-latest + if: github.event.workflow_run.conclusion != 'skipped' + + steps: + - name: Download and extract artifacts + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + run: | + mkdir -p artifacts && cd artifacts + + artifacts_url=${{ github.event.workflow_run.artifacts_url }} + + gh api "$artifacts_url" -q '.artifacts[] | [.name, .archive_download_url] | @tsv' | while read artifact + do + IFS=$'\t' read name url <<< "$artifact" + gh api $url > "$name.zip" + unzip -d "$name" "$name.zip" + done + + - name: Publish hive test results + uses: EnricoMi/publish-unit-test-result-action@v1 + with: + commit: ${{ github.event.workflow_run.head_sha }} + event_file: artifacts/Event File/event.json + event_name: ${{ github.event.workflow_run.event }} + files: "artifacts/**/*.xml" \ No newline at end of file From 42e59618a7e933c617e3cb50821e7ed5c454534f Mon Sep 17 00:00:00 2001 From: Igor Mandrigin Date: Thu, 21 Jul 2022 14:50:03 +0200 Subject: [PATCH 122/152] run engine tests on Hive --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index af6a1c08223..12ba6dac513 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -111,7 +111,7 @@ jobs: run: sudo DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker - name: run hive - run: sudo mkdir /results && docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work gatewayfm/hive:latest --sim ethereum/sync --results-root=/work/results --client erigon_ci-$GITHUB_SHA --docker.output --loglevel 5 + run: sudo mkdir /results && docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work gatewayfm/hive:latest --sim ethereum/engine --results-root=/work/results --client erigon_ci-$GITHUB_SHA --docker.output --loglevel 5 - name: parse hive output run: docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work --entrypoint /app/hivecioutput gatewayfm/hive:latest --resultsdir=/work/results --outdir=/work/results From 6060b87840c7698af4aec9835c403a817e547418 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Thu, 21 Jul 2022 19:40:00 +0200 Subject: [PATCH 123/152] Fix binary vs raw confusion for PoS transaction (#4781) * Replace PayloadMessage with Block * RawTransactions -> BinaryTransactions for clarity * add a log warning --- core/types/transaction.go | 7 +++--- eth/stagedsync/stage_headers.go | 42 +++++++------------------------ ethdb/privateapi/ethbackend.go | 43 ++++++++++++++++++++++---------- turbo/engineapi/request_list.go | 10 ++------ turbo/stages/mock_sentry.go | 2 +- turbo/stages/sentry_mock_test.go | 17 ++++--------- 6 files changed, 51 insertions(+), 70 deletions(-) diff --git a/core/types/transaction.go b/core/types/transaction.go index 1934103b24c..d5310d1866b 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -98,13 +98,14 @@ type TransactionMisc struct { from atomic.Value } -type RawTransactions [][]byte +// RLP-marshalled legacy transactions and binary-marshalled (not wrapped into an RLP string) typed (EIP-2718) transactions +type BinaryTransactions [][]byte -func (t RawTransactions) Len() int { +func (t BinaryTransactions) Len() int { return len(t) } -func (t RawTransactions) EncodeIndex(i int, w *bytes.Buffer) { +func (t BinaryTransactions) EncodeIndex(i int, w *bytes.Buffer) { w.Write(t[i]) } diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index 19a44499c1f..e1c316e7eb9 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -184,7 +184,7 @@ func HeadersPOS( if forkChoiceInsteadOfNewPayload { payloadStatus, err = startHandlingForkChoice(forkChoiceMessage, requestStatus, requestId, s, u, ctx, tx, cfg, headerInserter) } else { - payloadMessage := request.(*engineapi.PayloadMessage) + payloadMessage := request.(*types.Block) payloadStatus, err = handleNewPayload(payloadMessage, requestStatus, requestId, s, ctx, tx, cfg, headerInserter) } @@ -431,7 +431,7 @@ func finishHandlingForkChoice( } func handleNewPayload( - payloadMessage *engineapi.PayloadMessage, + block *types.Block, requestStatus engineapi.RequestStatus, requestId int, s *StageState, @@ -440,9 +440,9 @@ func handleNewPayload( cfg HeadersCfg, headerInserter *headerdownload.HeaderInserter, ) (*privateapi.PayloadStatus, error) { - header := payloadMessage.Header + header := block.Header() headerNumber := header.Number.Uint64() - headerHash := header.Hash() + headerHash := block.Hash() log.Debug(fmt.Sprintf("[%s] Handling new payload", s.LogPrefix()), "height", headerNumber, "hash", headerHash) cfg.hd.UpdateTopSeenHeightPoS(headerNumber) @@ -507,38 +507,14 @@ func handleNewPayload( cfg.hd.BeaconRequestList.Remove(requestId) - for _, tx := range payloadMessage.Body.Transactions { - if types.TypedTransactionMarshalledAsRlpString(tx) { - log.Warn(fmt.Sprintf("[%s] typed txn marshalled as RLP string", s.LogPrefix()), "tx", common.Bytes2Hex(tx)) - cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) - return &privateapi.PayloadStatus{ - Status: remote.EngineStatus_INVALID, - LatestValidHash: header.ParentHash, - ValidationError: errors.New("typed txn marshalled as RLP string"), - }, nil - } - } - - transactions, err := types.DecodeTransactions(payloadMessage.Body.Transactions) - if err != nil { - log.Warn(fmt.Sprintf("[%s] Error during Beacon transaction decoding", s.LogPrefix()), "err", err.Error()) - cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) - return &privateapi.PayloadStatus{ - Status: remote.EngineStatus_INVALID, - LatestValidHash: header.ParentHash, - ValidationError: err, - }, nil - } - log.Debug(fmt.Sprintf("[%s] New payload begin verification", s.LogPrefix())) - response, success, err := verifyAndSaveNewPoSHeader(requestStatus, s, ctx, tx, cfg, header, payloadMessage.Body, headerInserter) + response, success, err := verifyAndSaveNewPoSHeader(requestStatus, s, ctx, tx, cfg, block, headerInserter) log.Debug(fmt.Sprintf("[%s] New payload verification ended", s.LogPrefix()), "success", success, "err", err) if err != nil || !success { return response, err } if cfg.bodyDownload != nil { - block := types.NewBlockFromStorage(headerHash, header, transactions, nil) cfg.bodyDownload.AddToPrefetch(block) } @@ -551,12 +527,12 @@ func verifyAndSaveNewPoSHeader( ctx context.Context, tx kv.RwTx, cfg HeadersCfg, - header *types.Header, - body *types.RawBody, + block *types.Block, headerInserter *headerdownload.HeaderInserter, ) (response *privateapi.PayloadStatus, success bool, err error) { + header := block.Header() headerNumber := header.Number.Uint64() - headerHash := header.Hash() + headerHash := block.Hash() if verificationErr := cfg.hd.VerifyHeader(header); verificationErr != nil { log.Warn("Verification failed for header", "hash", headerHash, "height", headerNumber, "err", verificationErr) @@ -581,7 +557,7 @@ func verifyAndSaveNewPoSHeader( if cfg.memoryOverlay { extendingHash := cfg.forkValidator.ExtendingForkHeadHash() extendCanonical := (extendingHash == common.Hash{} && header.ParentHash == currentHeadHash) || extendingHash == header.ParentHash - status, latestValidHash, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, header, body, extendCanonical) + status, latestValidHash, validationError, criticalError := cfg.forkValidator.ValidatePayload(tx, header, block.RawBody(), extendCanonical) if criticalError != nil { return nil, false, criticalError } diff --git a/ethdb/privateapi/ethbackend.go b/ethdb/privateapi/ethbackend.go index 5925ad29009..f4abc963acf 100644 --- a/ethdb/privateapi/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -303,10 +303,37 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E Difficulty: serenity.SerenityDifficulty, Nonce: serenity.SerenityNonce, ReceiptHash: gointerfaces.ConvertH256ToHash(req.ReceiptRoot), - TxHash: types.DeriveSha(types.RawTransactions(req.Transactions)), + TxHash: types.DeriveSha(types.BinaryTransactions(req.Transactions)), } blockHash := gointerfaces.ConvertH256ToHash(req.BlockHash) + if header.Hash() != blockHash { + log.Error("[NewPayload] invalid block hash", "stated", common.Hash(blockHash), "actual", header.Hash()) + return &remote.EnginePayloadStatus{Status: remote.EngineStatus_INVALID_BLOCK_HASH}, nil + } + + for _, txn := range req.Transactions { + if types.TypedTransactionMarshalledAsRlpString(txn) { + log.Warn("[NewPayload] typed txn marshalled as RLP string", "txn", common.Bytes2Hex(txn)) + return &remote.EnginePayloadStatus{ + Status: remote.EngineStatus_INVALID, + LatestValidHash: nil, + ValidationError: "typed txn marshalled as RLP string", + }, nil + } + } + + transactions, err := types.DecodeTransactions(req.Transactions) + if err != nil { + log.Warn("[NewPayload] failed to decode transactions", "err", err) + return &remote.EnginePayloadStatus{ + Status: remote.EngineStatus_INVALID, + LatestValidHash: nil, + ValidationError: err.Error(), + }, nil + } + block := types.NewBlockFromStorage(blockHash, &header, transactions, nil) + tx, err := s.db.BeginRo(ctx) if err != nil { return nil, err @@ -322,6 +349,7 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E return &remote.EnginePayloadStatus{Status: remote.EngineStatus_INVALID, LatestValidHash: gointerfaces.ConvertHashToH256(common.Hash{})}, nil } tx.Rollback() + // If another payload is already commissioned then we just reply with syncing if s.stageLoopIsBusy() { // We are still syncing a commissioned payload @@ -333,11 +361,6 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E return &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } - if header.Hash() != blockHash { - log.Error("[NewPayload] invalid block hash", "stated", common.Hash(blockHash), "actual", header.Hash()) - return &remote.EnginePayloadStatus{Status: remote.EngineStatus_INVALID_BLOCK_HASH}, nil - } - // Lock the thread (We modify shared resources). log.Debug("[NewPayload] acquiring lock") s.lock.Lock() @@ -345,13 +368,7 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E log.Debug("[NewPayload] lock acquired") log.Debug("[NewPayload] sending block", "height", header.Number, "hash", common.Hash(blockHash)) - s.requestList.AddPayloadRequest(&engineapi.PayloadMessage{ - Header: &header, - Body: &types.RawBody{ - Transactions: req.Transactions, - Uncles: nil, - }, - }) + s.requestList.AddPayloadRequest(block) payloadStatus := <-s.statusCh log.Debug("[NewPayload] got reply", "payloadStatus", payloadStatus) diff --git a/turbo/engineapi/request_list.go b/turbo/engineapi/request_list.go index e66084c5fc9..11a2bc0ba13 100644 --- a/turbo/engineapi/request_list.go +++ b/turbo/engineapi/request_list.go @@ -10,12 +10,6 @@ import ( "github.com/ledgerwatch/erigon/core/types" ) -// The message we are going to send to the stage sync in NewPayload -type PayloadMessage struct { - Header *types.Header - Body *types.RawBody -} - // The message we are going to send to the stage sync in ForkchoiceUpdated type ForkChoiceMessage struct { HeadBlockHash common.Hash @@ -31,7 +25,7 @@ const ( // RequestStatus values ) type RequestWithStatus struct { - Message interface{} // *PayloadMessage or *ForkChoiceMessage + Message interface{} // *Block or *ForkChoiceMessage Status RequestStatus } @@ -59,7 +53,7 @@ func NewRequestList() *RequestList { return rl } -func (rl *RequestList) AddPayloadRequest(message *PayloadMessage) { +func (rl *RequestList) AddPayloadRequest(message *types.Block) { rl.syncCond.L.Lock() defer rl.syncCond.L.Unlock() diff --git a/turbo/stages/mock_sentry.go b/turbo/stages/mock_sentry.go index 4a3cd2e28bf..774e98fab39 100644 --- a/turbo/stages/mock_sentry.go +++ b/turbo/stages/mock_sentry.go @@ -522,7 +522,7 @@ func (ms *MockSentry) InsertChain(chain *core.ChainPack) error { return nil } -func (ms *MockSentry) SendPayloadRequest(message *engineapi.PayloadMessage) { +func (ms *MockSentry) SendPayloadRequest(message *types.Block) { ms.sentriesClient.Hd.BeaconRequestList.AddPayloadRequest(message) } diff --git a/turbo/stages/sentry_mock_test.go b/turbo/stages/sentry_mock_test.go index 9ab7015e06b..bd8e552c1be 100644 --- a/turbo/stages/sentry_mock_test.go +++ b/turbo/stages/sentry_mock_test.go @@ -571,12 +571,8 @@ func TestPoSDownloader(t *testing.T) { }, false /* intermediateHashes */) require.NoError(t, err) - // Send a payload with missing parent - payloadMessage := engineapi.PayloadMessage{ - Header: chain.TopBlock.Header(), - Body: chain.TopBlock.RawBody(), - } - m.SendPayloadRequest(&payloadMessage) + // Send a payload whose parent isn't downloaded yet + m.SendPayloadRequest(chain.TopBlock) headBlockHash, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, true, m.UpdateHead, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) @@ -640,12 +636,9 @@ func TestPoSSyncWithInvalidHeader(t *testing.T) { invalidTip := chain.TopBlock.Header() invalidTip.ParentHash = invalidParent.Hash() - // Send a payload with missing parent - payloadMessage := engineapi.PayloadMessage{ - Header: invalidTip, - Body: chain.TopBlock.RawBody(), - } - m.SendPayloadRequest(&payloadMessage) + // Send a payload with the parent missing + payloadMessage := types.NewBlockFromStorage(invalidTip.Hash(), invalidTip, chain.TopBlock.Transactions(), nil) + m.SendPayloadRequest(payloadMessage) headBlockHash, err := stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, true, m.UpdateHead, nil) require.NoError(t, err) stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) From 66758c79607e40bd96d1ccdeb0f57bd4b2d24524 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Fri, 22 Jul 2022 13:44:42 +0700 Subject: [PATCH 124/152] RetireBlocks: less arguments (#4785) * save * save --- cmd/state/commands/history22.go | 3 +- cmd/state/commands/state_recon.go | 3 +- core/state/state_recon_writer.go | 2 +- core/vm/lightclient/iavl/proof_path.go | 2 +- eth/stagedsync/stage_headers.go | 2 +- eth/stagedsync/stage_senders.go | 4 +- rpc/handler.go | 5 +- turbo/app/snapshots.go | 4 +- turbo/snapshotsync/block_snapshots.go | 72 +++++++++++--------------- 9 files changed, 37 insertions(+), 60 deletions(-) diff --git a/cmd/state/commands/history22.go b/cmd/state/commands/history22.go index a7ecf4d8ad9..02890f1f3c3 100644 --- a/cmd/state/commands/history22.go +++ b/cmd/state/commands/history22.go @@ -130,8 +130,7 @@ func History22(genesis *core.Genesis, logger log.Logger) error { prevTime := time.Now() var blockReader services.FullBlockReader - var allSnapshots *snapshotsync.RoSnapshots - allSnapshots = snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadir, "snapshots")) + allSnapshots := snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadir, "snapshots")) defer allSnapshots.Close() if err := allSnapshots.Reopen(); err != nil { return fmt.Errorf("reopen snapshot segments: %w", err) diff --git a/cmd/state/commands/state_recon.go b/cmd/state/commands/state_recon.go index 231fcca9499..db5ea046249 100644 --- a/cmd/state/commands/state_recon.go +++ b/cmd/state/commands/state_recon.go @@ -376,8 +376,7 @@ func Recon(genesis *core.Genesis, logger log.Logger) error { return err } var blockReader services.FullBlockReader - var allSnapshots *snapshotsync.RoSnapshots - allSnapshots = snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadir, "snapshots")) + allSnapshots := snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadir, "snapshots")) defer allSnapshots.Close() if err := allSnapshots.Reopen(); err != nil { return fmt.Errorf("reopen snapshot segments: %w", err) diff --git a/core/state/state_recon_writer.go b/core/state/state_recon_writer.go index 49f2635c75f..0d1c8e8e1ef 100644 --- a/core/state/state_recon_writer.go +++ b/core/state/state_recon_writer.go @@ -156,7 +156,7 @@ func (rs *ReconState) RollbackTx(txTask TxTask, dependency uint64) { if rs.doneBitmap.Contains(dependency) { heap.Push(&rs.queue, txTask) } else { - tt, _ := rs.triggers[dependency] + tt := rs.triggers[dependency] tt = append(tt, txTask) rs.triggers[dependency] = tt } diff --git a/core/vm/lightclient/iavl/proof_path.go b/core/vm/lightclient/iavl/proof_path.go index de366f33813..5b2609654bb 100644 --- a/core/vm/lightclient/iavl/proof_path.go +++ b/core/vm/lightclient/iavl/proof_path.go @@ -118,7 +118,7 @@ func (pl PathToLeaf) isRightmost() bool { } func (pl PathToLeaf) isEmpty() bool { - return pl == nil || len(pl) == 0 + return len(pl) == 0 } func (pl PathToLeaf) dropRoot() PathToLeaf { diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index e1c316e7eb9..b982d87c421 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -1311,7 +1311,7 @@ func WaitForDownloader(ctx context.Context, cfg HeadersCfg, tx kv.RwTx) error { return err } dbEmpty := len(snInDB) == 0 - var missingSnapshots []snapshotsync.MergeRange + var missingSnapshots []snapshotsync.Range if !dbEmpty { _, missingSnapshots, err = snapshotsync.Segments(cfg.snapshots.Dir()) if err != nil { diff --git a/eth/stagedsync/stage_senders.go b/eth/stagedsync/stage_senders.go index 22f6c5ce513..0a9db4af808 100644 --- a/eth/stagedsync/stage_senders.go +++ b/eth/stagedsync/stage_senders.go @@ -9,7 +9,6 @@ import ( "sync" "time" - "github.com/holiman/uint256" libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/common/length" @@ -430,8 +429,7 @@ func retireBlocksInSingleBackgroundThread(s *PruneState, cfg SendersCfg, ctx con } } - chainID, _ := uint256.FromBig(cfg.chainConfig.ChainID) - cfg.blockRetire.RetireBlocksInBackground(ctx, s.ForwardProgress, *chainID, log.LvlInfo) + cfg.blockRetire.RetireBlocksInBackground(ctx, s.ForwardProgress, log.LvlInfo) return nil } diff --git a/rpc/handler.go b/rpc/handler.go index 86985ea56e5..be73dbe7167 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -383,10 +383,7 @@ func (h *handler) handleCallMsg(ctx *callProc, msg *jsonrpcMessage, stream *json func (h *handler) isMethodAllowedByGranularControl(method string) bool { _, isForbidden := h.forbiddenList[method] if len(h.allowList) == 0 { - if isForbidden { - return false - } - return true + return !isForbidden } _, ok := h.allowList[method] diff --git a/turbo/app/snapshots.go b/turbo/app/snapshots.go index fd87d640159..b9c73f302d7 100644 --- a/turbo/app/snapshots.go +++ b/turbo/app/snapshots.go @@ -244,8 +244,6 @@ func doRetireCommand(cliCtx *cli.Context) error { defer chainDB.Close() cfg := ethconfig.NewSnapCfg(true, true, true) - chainConfig := tool.ChainConfigFromDB(chainDB) - chainID, _ := uint256.FromBig(chainConfig.ChainID) snapshots := snapshotsync.NewRoSnapshots(cfg, dirs.Snap) if err := snapshots.Reopen(); err != nil { return err @@ -256,7 +254,7 @@ func doRetireCommand(cliCtx *cli.Context) error { log.Info("Params", "from", from, "to", to, "every", every) for i := from; i < to; i += every { - if err := br.RetireBlocks(ctx, i, i+every, *chainID, log.LvlInfo); err != nil { + if err := br.RetireBlocks(ctx, i, i+every, log.LvlInfo); err != nil { panic(err) } if err := chainDB.Update(ctx, func(tx kv.RwTx) error { diff --git a/turbo/snapshotsync/block_snapshots.go b/turbo/snapshotsync/block_snapshots.go index 144f4c206e4..fc7850eca7a 100644 --- a/turbo/snapshotsync/block_snapshots.go +++ b/turbo/snapshotsync/block_snapshots.go @@ -42,7 +42,7 @@ import ( ) type DownloadRequest struct { - ranges *MergeRange + ranges *Range path string torrentHash string } @@ -50,20 +50,20 @@ type DownloadRequest struct { type HeaderSegment struct { seg *compress.Decompressor // value: first_byte_of_header_hash + header_rlp idxHeaderHash *recsplit.Index // header_hash -> headers_segment_offset - ranges MergeRange + ranges Range } type BodySegment struct { seg *compress.Decompressor // value: rlp(types.BodyForStorage) idxBodyNumber *recsplit.Index // block_num_u64 -> bodies_segment_offset - ranges MergeRange + ranges Range } type TxnSegment struct { Seg *compress.Decompressor // value: first_byte_of_transaction_hash + sender_address + transaction_rlp IdxTxnHash *recsplit.Index // transaction_hash -> transactions_segment_offset IdxTxnHash2BlockNum *recsplit.Index // transaction_hash -> block_number - ranges MergeRange + ranges Range } func (sn *HeaderSegment) close() { @@ -382,22 +382,6 @@ func (s *RoSnapshots) ReopenSomeIndices(types ...snap.Type) (err error) { return nil } -func (s *RoSnapshots) AsyncOpenAll(ctx context.Context) { - go func() { - for !s.segmentsReady.Load() || !s.indicesReady.Load() { - select { - case <-ctx.Done(): - return - default: - } - if err := s.Reopen(); err != nil && !errors.Is(err, os.ErrNotExist) { - log.Error("AsyncOpenAll", "err", err) - } - time.Sleep(15 * time.Second) - } - }() -} - // OptimisticReopen - optimistically open snapshots (ignoring error), useful at App startup because: // - user must be able: delete any snapshot file and Erigon will self-heal by re-downloading // - RPC return Nil for historical blocks if snapshots are not open @@ -422,7 +406,7 @@ func (s *RoSnapshots) Reopen() error { s.Txs.segments = s.Txs.segments[:0] for _, f := range files { { - seg := &BodySegment{ranges: MergeRange{f.From, f.To}} + seg := &BodySegment{ranges: Range{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Bodies) seg.seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -434,7 +418,7 @@ func (s *RoSnapshots) Reopen() error { s.Bodies.segments = append(s.Bodies.segments, seg) } { - seg := &HeaderSegment{ranges: MergeRange{f.From, f.To}} + seg := &HeaderSegment{ranges: Range{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Headers) seg.seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -446,7 +430,7 @@ func (s *RoSnapshots) Reopen() error { s.Headers.segments = append(s.Headers.segments, seg) } { - seg := &TxnSegment{ranges: MergeRange{f.From, f.To}} + seg := &TxnSegment{ranges: Range{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Transactions) seg.Seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -517,7 +501,7 @@ func (s *RoSnapshots) ReopenSegments() error { var segmentsMaxSet bool for _, f := range files { { - seg := &BodySegment{ranges: MergeRange{f.From, f.To}} + seg := &BodySegment{ranges: Range{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Bodies) seg.seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -529,7 +513,7 @@ func (s *RoSnapshots) ReopenSegments() error { s.Bodies.segments = append(s.Bodies.segments, seg) } { - seg := &HeaderSegment{ranges: MergeRange{f.From, f.To}} + seg := &HeaderSegment{ranges: Range{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Headers) seg.seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -541,7 +525,7 @@ func (s *RoSnapshots) ReopenSegments() error { s.Headers.segments = append(s.Headers.segments, seg) } { - seg := &TxnSegment{ranges: MergeRange{f.From, f.To}} + seg := &TxnSegment{ranges: Range{f.From, f.To}} fileName := snap.SegmentFileName(f.From, f.To, snap.Transactions) seg.Seg, err = compress.NewDecompressor(path.Join(s.dir, fileName)) if err != nil { @@ -793,14 +777,14 @@ func BuildIndices(ctx context.Context, s *RoSnapshots, chainID uint256.Int, tmpD return nil } -func noGaps(in []snap.FileInfo) (out []snap.FileInfo, missingSnapshots []MergeRange) { +func noGaps(in []snap.FileInfo) (out []snap.FileInfo, missingSnapshots []Range) { var prevTo uint64 for _, f := range in { if f.To <= prevTo { continue } if f.From != prevTo { // no gaps - missingSnapshots = append(missingSnapshots, MergeRange{prevTo, f.From}) + missingSnapshots = append(missingSnapshots, Range{prevTo, f.From}) continue } prevTo = f.To @@ -854,7 +838,7 @@ func noOverlaps(in []snap.FileInfo) (res []snap.FileInfo) { return res } -func Segments(dir string) (res []snap.FileInfo, missingSnapshots []MergeRange, err error) { +func Segments(dir string) (res []snap.FileInfo, missingSnapshots []Range, err error) { list, err := snap.Segments(dir) if err != nil { return nil, missingSnapshots, err @@ -944,10 +928,12 @@ func CanDeleteTo(curBlockNum uint64, snapshots *RoSnapshots) (blockTo uint64) { hardLimit := (curBlockNum/1_000)*1_000 - params.FullImmutabilityThreshold return cmp.Min(hardLimit, snapshots.BlocksAvailable()+1) } -func (br *BlockRetire) RetireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint256.Int, lvl log.Lvl) error { - return retireBlocks(ctx, blockFrom, blockTo, chainID, br.tmpDir, br.snapshots, br.db, br.workers, br.downloader, lvl, br.notifier) +func (br *BlockRetire) RetireBlocks(ctx context.Context, blockFrom, blockTo uint64, lvl log.Lvl) error { + chainConfig := tool.ChainConfigFromDB(br.db) + chainID, _ := uint256.FromBig(chainConfig.ChainID) + return retireBlocks(ctx, blockFrom, blockTo, *chainID, br.tmpDir, br.snapshots, br.db, br.workers, br.downloader, lvl, br.notifier) } -func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, forwardProgress uint64, chainID uint256.Int, lvl log.Lvl) { +func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, forwardProgress uint64, lvl log.Lvl) { if br.working.Load() { // go-routine is still working return @@ -968,7 +954,7 @@ func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, forwardProg return } - err := br.RetireBlocks(ctx, blockFrom, blockTo, chainID, lvl) + err := br.RetireBlocks(ctx, blockFrom, blockTo, lvl) br.result = &BlockRetireResult{ BlockFrom: blockFrom, BlockTo: blockTo, @@ -988,7 +974,7 @@ func retireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint25 return fmt.Errorf("DumpBlocks: %w", err) } if err := snapshots.Reopen(); err != nil { - return fmt.Errorf("Reopen: %w", err) + return fmt.Errorf("reopen: %w", err) } idxWorkers := workers if idxWorkers > 4 { @@ -998,7 +984,7 @@ func retireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint25 return err } if err := snapshots.Reopen(); err != nil { - return fmt.Errorf("Reopen: %w", err) + return fmt.Errorf("reopen: %w", err) } merger := NewMerger(tmpDir, workers, lvl, chainID, notifier) ranges := merger.FindMergeRanges(snapshots) @@ -1010,7 +996,7 @@ func retireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint25 return err } if err := snapshots.Reopen(); err != nil { - return fmt.Errorf("Reopen: %w", err) + return fmt.Errorf("reopen: %w", err) } var downloadRequest []DownloadRequest @@ -1723,13 +1709,13 @@ func NewMerger(tmpDir string, workers int, lvl log.Lvl, chainID uint256.Int, not return &Merger{tmpDir: tmpDir, workers: workers, lvl: lvl, chainID: chainID, notifier: notifier} } -type MergeRange struct { +type Range struct { from, to uint64 } -func (r MergeRange) String() string { return fmt.Sprintf("%dk-%dk", r.from/1000, r.to/1000) } +func (r Range) String() string { return fmt.Sprintf("%dk-%dk", r.from/1000, r.to/1000) } -func (*Merger) FindMergeRanges(snapshots *RoSnapshots) (res []MergeRange) { +func (*Merger) FindMergeRanges(snapshots *RoSnapshots) (res []Range) { for i := len(snapshots.Headers.segments) - 1; i > 0; i-- { sn := snapshots.Headers.segments[i] if sn.ranges.to-sn.ranges.from >= snap.DEFAULT_SEGMENT_SIZE { // is complete .seg @@ -1744,14 +1730,14 @@ func (*Merger) FindMergeRanges(snapshots *RoSnapshots) (res []MergeRange) { break } aggFrom := sn.ranges.to - span - res = append(res, MergeRange{from: aggFrom, to: sn.ranges.to}) + res = append(res, Range{from: aggFrom, to: sn.ranges.to}) for snapshots.Headers.segments[i].ranges.from > aggFrom { i-- } break } } - slices.SortFunc(res, func(i, j MergeRange) bool { return i.from < j.from }) + slices.SortFunc(res, func(i, j Range) bool { return i.from < j.from }) return res } func (m *Merger) filesByRange(snapshots *RoSnapshots, from, to uint64) (toMergeHeaders, toMergeBodies, toMergeTxs []string, err error) { @@ -1779,7 +1765,7 @@ func (m *Merger) filesByRange(snapshots *RoSnapshots, from, to uint64) (toMergeH } // Merge does merge segments in given ranges -func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges []MergeRange, snapDir string, doIndex bool) error { +func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges []Range, snapDir string, doIndex bool) error { if len(mergeRanges) == 0 { return nil } @@ -1943,7 +1929,7 @@ func assertSegment(segmentFile string) { } } -func NewDownloadRequest(ranges *MergeRange, path string, torrentHash string) DownloadRequest { +func NewDownloadRequest(ranges *Range, path string, torrentHash string) DownloadRequest { return DownloadRequest{ ranges: ranges, path: path, From 1d378b6618cf20ca48425e0805c85970d8ca07d2 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Fri, 22 Jul 2022 09:18:19 +0200 Subject: [PATCH 125/152] Filter out bad tx with wrong chain id during block building phase. (#4783) * filter out bad tx with wrong chain id * report bad txs Co-authored-by: giuliorebuffo --- eth/stagedsync/stage_mining_create_block.go | 3 +++ go.mod | 2 +- go.sum | 4 ++-- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/eth/stagedsync/stage_mining_create_block.go b/eth/stagedsync/stage_mining_create_block.go index ec6b3532624..f148f2ef811 100644 --- a/eth/stagedsync/stage_mining_create_block.go +++ b/eth/stagedsync/stage_mining_create_block.go @@ -141,6 +141,9 @@ func SpawnMiningCreateBlockStage(s *StageState, tx kv.RwTx, cfg MiningCreateBloc if err != nil { return err } + if transaction.GetChainID().ToBig().Cmp(cfg.chainConfig.ChainID) != 0 { + continue + } txs = append(txs, transaction) } var sender common.Address diff --git a/go.mod b/go.mod index c5023b793da..f96580dbf77 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon go 1.18 require ( - github.com/ledgerwatch/erigon-lib v0.0.0-20220720144911-046e4165b52a + github.com/ledgerwatch/erigon-lib v0.0.0-20220721212928-1331bb661a22 github.com/ledgerwatch/erigon-snapshot v1.0.0 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 diff --git a/go.sum b/go.sum index 11bce9fb5ec..3447e0747c9 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220720144911-046e4165b52a h1:fRjDLDbieEy48O5BvMf1+ib8loZMA3nSiRtjxbuIsYw= -github.com/ledgerwatch/erigon-lib v0.0.0-20220720144911-046e4165b52a/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= +github.com/ledgerwatch/erigon-lib v0.0.0-20220721212928-1331bb661a22 h1:cGxOEtCnkVzX+RcGQbQHiDuV8dQHnGqcwTFl9q8Hnkg= +github.com/ledgerwatch/erigon-lib v0.0.0-20220721212928-1331bb661a22/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= github.com/ledgerwatch/erigon-snapshot v1.0.0 h1:bp/7xoPdM5lK7LFdqEMH008RZmqxMZV0RUVEQiWs7v4= github.com/ledgerwatch/erigon-snapshot v1.0.0/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= From a1777accd83814120def6b38150c2e381c957992 Mon Sep 17 00:00:00 2001 From: Artem Tsebrovskiy Date: Fri, 22 Jul 2022 09:47:33 +0100 Subject: [PATCH 126/152] fixed passing of raw byte slices to tx processing (#4782) --- cmd/sentry/sentry/sentry_multi_client.go | 2 +- turbo/stages/bodydownload/body_algos.go | 18 +++++++++++++++--- turbo/stages/bodydownload/body_data_struct.go | 4 ++-- 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/cmd/sentry/sentry/sentry_multi_client.go b/cmd/sentry/sentry/sentry_multi_client.go index 1359398c43c..5abd7cb853a 100644 --- a/cmd/sentry/sentry/sentry_multi_client.go +++ b/cmd/sentry/sentry/sentry_multi_client.go @@ -500,7 +500,7 @@ func (cs *MultiClient) blockBodies66(inreq *proto_sentry.InboundMessage, _ direc return fmt.Errorf("decode BlockBodiesPacket66: %w", err) } txs, uncles := request.BlockRawBodiesPacket.Unpack() - cs.Bd.DeliverBodies(txs, uncles, uint64(len(inreq.Data)), ConvertH512ToPeerID(inreq.PeerId)) + cs.Bd.DeliverBodies(&txs, &uncles, uint64(len(inreq.Data)), ConvertH512ToPeerID(inreq.PeerId)) return nil } diff --git a/turbo/stages/bodydownload/body_algos.go b/turbo/stages/bodydownload/body_algos.go index c2d392c379b..edb406bcc51 100644 --- a/turbo/stages/bodydownload/body_algos.go +++ b/turbo/stages/bodydownload/body_algos.go @@ -9,6 +9,8 @@ import ( "github.com/holiman/uint256" "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/kv" + "github.com/ledgerwatch/log/v3" + "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core/rawdb" @@ -17,7 +19,6 @@ import ( "github.com/ledgerwatch/erigon/turbo/adapter" "github.com/ledgerwatch/erigon/turbo/services" "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" - "github.com/ledgerwatch/log/v3" ) const BlockBufferSize = 128 @@ -195,7 +196,7 @@ func (bd *BodyDownload) RequestSent(bodyReq *BodyRequest, timeWithTimeout uint64 } // DeliverBodies takes the block body received from a peer and adds it to the various data structures -func (bd *BodyDownload) DeliverBodies(txs [][][]byte, uncles [][]*types.Header, lenOfP2PMsg uint64, peerID [64]byte) { +func (bd *BodyDownload) DeliverBodies(txs *[][][]byte, uncles *[][]*types.Header, lenOfP2PMsg uint64, peerID [64]byte) { bd.deliveryCh <- Delivery{txs: txs, uncles: uncles, lenOfP2PMessage: lenOfP2PMsg, peerID: peerID} select { @@ -240,8 +241,19 @@ Loop: break Loop } + if delivery.txs == nil { + log.Warn("nil transactions delivered", "peer_id", delivery.peerID, "p2p_msg_len", delivery.lenOfP2PMessage) + } + if delivery.uncles == nil { + log.Warn("nil uncles delivered", "peer_id", delivery.peerID, "p2p_msg_len", delivery.lenOfP2PMessage) + } + if delivery.txs == nil || delivery.uncles == nil { + log.Debug("delivery body processing has been skipped due to nil tx|data") + continue + } + reqMap := make(map[uint64]*BodyRequest) - txs, uncles, lenOfP2PMessage, _ := delivery.txs, delivery.uncles, delivery.lenOfP2PMessage, delivery.peerID + txs, uncles, lenOfP2PMessage, _ := *delivery.txs, *delivery.uncles, delivery.lenOfP2PMessage, delivery.peerID var delivered, undelivered int for i := range txs { diff --git a/turbo/stages/bodydownload/body_data_struct.go b/turbo/stages/bodydownload/body_data_struct.go index 56995ea9c4e..eedbf1c3ea0 100644 --- a/turbo/stages/bodydownload/body_data_struct.go +++ b/turbo/stages/bodydownload/body_data_struct.go @@ -14,8 +14,8 @@ const MaxBodiesInRequest = 1024 type Delivery struct { peerID [64]byte - txs [][][]byte - uncles [][]*types.Header + txs *[][][]byte + uncles *[][]*types.Header lenOfP2PMessage uint64 } From 46a8c531ced28dd6e0b55020305aa7045b059ca1 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Fri, 22 Jul 2022 11:07:58 +0200 Subject: [PATCH 127/152] Optimized PoS header downloader (#4775) * optimized PoS header downloader * removed println * comments * ops * Restore schedulePoSDownload params + simplify Co-authored-by: giuliorebuffo Co-authored-by: yperbasis --- eth/stagedsync/stage_headers.go | 9 +++------ turbo/engineapi/fork_validator.go | 1 - turbo/stages/headerdownload/header_algos.go | 2 +- 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index b982d87c421..e8a4b7f863b 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -305,9 +305,8 @@ func startHandlingForkChoice( if header == nil { log.Info(fmt.Sprintf("[%s] Fork choice missing header with hash %x", s.LogPrefix(), headerHash)) - hashToDownload := headerHash cfg.hd.SetPoSDownloaderTip(headerHash) - schedulePoSDownload(requestId, hashToDownload, 0 /* header height is unknown, setting to 0 */, s, cfg) + schedulePoSDownload(requestId, headerHash, 0 /* header height is unknown, setting to 0 */, s, cfg) return &privateapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } @@ -487,10 +486,8 @@ func handleNewPayload( } if parent == nil { log.Info(fmt.Sprintf("[%s] New payload missing parent", s.LogPrefix())) - hashToDownload := header.ParentHash - heightToDownload := headerNumber - 1 cfg.hd.SetPoSDownloaderTip(headerHash) - schedulePoSDownload(requestId, hashToDownload, heightToDownload, s, cfg) + schedulePoSDownload(requestId, header.ParentHash, headerNumber-1, s, cfg) return &privateapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } @@ -629,7 +626,7 @@ func schedulePoSDownload( cfg.hd.SetRequestId(requestId) cfg.hd.SetHeaderToDownloadPoS(hashToDownload, heightToDownload) - cfg.hd.SetPOSSync(true) // This needs to be called afrer SetHeaderToDownloadPOS because SetHeaderToDownloadPOS sets `posAnchor` member field which is used by ProcessHeadersPOS + cfg.hd.SetPOSSync(true) // This needs to be called after SetHeaderToDownloadPOS because SetHeaderToDownloadPOS sets `posAnchor` member field which is used by ProcessHeadersPOS //nolint headerCollector := etl.NewCollector(s.LogPrefix(), cfg.tmpdir, etl.NewSortableBuffer(etl.BufferOptimalSize)) diff --git a/turbo/engineapi/fork_validator.go b/turbo/engineapi/fork_validator.go index 7fc99f5d1ae..a1ae9a1b85c 100644 --- a/turbo/engineapi/fork_validator.go +++ b/turbo/engineapi/fork_validator.go @@ -228,7 +228,6 @@ func (fv *ForkValidator) ValidatePayload(tx kv.RwTx, header *types.Header, body // if the block is not in range of maxForkDepth from head then we do not validate it. if abs64(int64(fv.currentHeight)-header.Number.Int64()) > maxForkDepth { status = remote.EngineStatus_ACCEPTED - fmt.Println("not in range") return } // Let's assemble the side fork backwards diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 131b3b641ee..1f9d9f83808 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -437,7 +437,7 @@ func (hd *HeaderDownload) requestMoreHeadersForPOS(currentTime time.Time) (timeo request = &HeaderRequest{ Anchor: anchor, Hash: anchor.parentHash, - Number: 0, // Since posAnchor may be an estimate, do not specify it here + Number: anchor.blockHeight - 1, Length: 192, Skip: 0, Reverse: true, From cd8b10f57e5f0a25a40cc2df312c2560494f1a1f Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Fri, 22 Jul 2022 16:54:05 +0700 Subject: [PATCH 128/152] snapshot merger: smaller interface (#4786) * save * save * save --- .github/workflows/ci.yml | 2 +- turbo/app/snapshots.go | 8 +- turbo/snapshotsync/block_snapshots.go | 144 ++++++++++----------- turbo/snapshotsync/block_snapshots_test.go | 4 +- 4 files changed, 76 insertions(+), 82 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 12ba6dac513..018f9934933 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -116,7 +116,7 @@ jobs: - name: parse hive output run: docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work --entrypoint /app/hivecioutput gatewayfm/hive:latest --resultsdir=/work/results --outdir=/work/results - - name: archive hive results + - name: archive hive results uses: actions/upload-artifact@v3 if: always() with: diff --git a/turbo/app/snapshots.go b/turbo/app/snapshots.go index b9c73f302d7..13a1155c629 100644 --- a/turbo/app/snapshots.go +++ b/turbo/app/snapshots.go @@ -130,13 +130,7 @@ func doIndicesCommand(cliCtx *cli.Context) error { if rebuild { cfg := ethconfig.NewSnapCfg(true, true, false) - workers := runtime.GOMAXPROCS(-1) - 1 - if workers < 1 { - workers = 1 - } - if workers > 4 { - workers = 4 - } + workers := cmp.InRange(1, 4, runtime.GOMAXPROCS(-1)-1) if err := rebuildIndices(ctx, chainDB, cfg, dirs, from, workers); err != nil { log.Error("Error", "err", err) } diff --git a/turbo/snapshotsync/block_snapshots.go b/turbo/snapshotsync/block_snapshots.go index fc7850eca7a..1ba201f4cc0 100644 --- a/turbo/snapshotsync/block_snapshots.go +++ b/turbo/snapshotsync/block_snapshots.go @@ -482,6 +482,17 @@ func (s *RoSnapshots) Reopen() error { return nil } + +func (s *RoSnapshots) Ranges() (ranges []Range) { + _ = s.Headers.View(func(segments []*HeaderSegment) error { + for _, sn := range segments { + ranges = append(ranges, sn.ranges) + } + return nil + }) + return ranges +} + func (s *RoSnapshots) ReopenSegments() error { s.Headers.lock.Lock() defer s.Headers.lock.Unlock() @@ -614,6 +625,25 @@ func (s *RoSnapshots) ViewTxs(blockNum uint64, f func(sn *TxnSegment) error) (fo return s.Txs.ViewSegment(blockNum, f) } +func buildIdx(ctx context.Context, sn snap.FileInfo, chainID uint256.Int, tmpDir string, lvl log.Lvl) error { + switch sn.T { + case snap.Headers: + if err := HeadersIdx(ctx, sn.Path, sn.From, tmpDir, lvl); err != nil { + return err + } + case snap.Bodies: + if err := BodiesIdx(ctx, sn.Path, sn.From, tmpDir, lvl); err != nil { + return err + } + case snap.Transactions: + dir, _ := filepath.Split(sn.Path) + if err := TransactionsIdx(ctx, chainID, sn.From, sn.To, dir, tmpDir, lvl); err != nil { + return err + } + } + return nil +} + func BuildIndices(ctx context.Context, s *RoSnapshots, chainID uint256.Int, tmpDir string, from uint64, workers int, lvl log.Lvl) error { log.Log(lvl, "[snapshots] Build indices", "from", from) logEvery := time.NewTicker(20 * time.Second) @@ -987,11 +1017,11 @@ func retireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint25 return fmt.Errorf("reopen: %w", err) } merger := NewMerger(tmpDir, workers, lvl, chainID, notifier) - ranges := merger.FindMergeRanges(snapshots) - if len(ranges) == 0 { + rangesToMerge := merger.FindMergeRanges(snapshots.Ranges()) + if len(rangesToMerge) == 0 { return nil } - err := merger.Merge(ctx, snapshots, ranges, snapshots.Dir(), true) + err := merger.Merge(ctx, snapshots, rangesToMerge, snapshots.Dir(), true) if err != nil { return err } @@ -1000,7 +1030,7 @@ func retireBlocks(ctx context.Context, blockFrom, blockTo uint64, chainID uint25 } var downloadRequest []DownloadRequest - for _, r := range ranges { + for _, r := range rangesToMerge { downloadRequest = append(downloadRequest, NewDownloadRequest(&r, "", "")) } @@ -1019,18 +1049,18 @@ func DumpBlocks(ctx context.Context, blockFrom, blockTo, blocksPerFile uint64, t return nil } func dumpBlocksRange(ctx context.Context, blockFrom, blockTo uint64, tmpDir, snapDir string, chainDB kv.RoDB, workers int, lvl log.Lvl) error { - segmentFile := filepath.Join(snapDir, snap.SegmentFileName(blockFrom, blockTo, snap.Headers)) - if err := DumpHeaders(ctx, chainDB, segmentFile, tmpDir, blockFrom, blockTo, workers, lvl); err != nil { + f, _ := snap.ParseFileName(snapDir, snap.SegmentFileName(blockFrom, blockTo, snap.Headers)) + if err := DumpHeaders(ctx, chainDB, f.Path, tmpDir, blockFrom, blockTo, workers, lvl); err != nil { return fmt.Errorf("DumpHeaders: %w", err) } - segmentFile = filepath.Join(snapDir, snap.SegmentFileName(blockFrom, blockTo, snap.Bodies)) - if err := DumpBodies(ctx, chainDB, segmentFile, tmpDir, blockFrom, blockTo, workers, lvl); err != nil { + f, _ = snap.ParseFileName(snapDir, snap.SegmentFileName(blockFrom, blockTo, snap.Bodies)) + if err := DumpBodies(ctx, chainDB, f.Path, tmpDir, blockFrom, blockTo, workers, lvl); err != nil { return fmt.Errorf("DumpBodies: %w", err) } - segmentFile = filepath.Join(snapDir, snap.SegmentFileName(blockFrom, blockTo, snap.Transactions)) - if _, err := DumpTxs(ctx, chainDB, segmentFile, tmpDir, blockFrom, blockTo, workers, lvl); err != nil { + f, _ = snap.ParseFileName(snapDir, snap.SegmentFileName(blockFrom, blockTo, snap.Transactions)) + if _, err := DumpTxs(ctx, chainDB, f.Path, tmpDir, blockFrom, blockTo, workers, lvl); err != nil { return fmt.Errorf("DumpTxs: %w", err) } @@ -1048,7 +1078,7 @@ func DumpTxs(ctx context.Context, db kv.RoDB, segmentFile, tmpDir string, blockF chainConfig := tool.ChainConfigFromDB(db) chainID, _ := uint256.FromBig(chainConfig.ChainID) - f, err := compress.NewCompressor(ctx, "Transactions", segmentFile, tmpDir, compress.MinPatternScore, workers, lvl) + f, err := compress.NewCompressor(ctx, "Snapshots Txs", segmentFile, tmpDir, compress.MinPatternScore, workers, lvl) if err != nil { return 0, fmt.Errorf("NewCompressor: %w, %s", err, segmentFile) } @@ -1222,7 +1252,7 @@ func DumpHeaders(ctx context.Context, db kv.RoDB, segmentFilePath, tmpDir string logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() - f, err := compress.NewCompressor(ctx, "Headers", segmentFilePath, tmpDir, compress.MinPatternScore, workers, lvl) + f, err := compress.NewCompressor(ctx, "Snapshots Headers", segmentFilePath, tmpDir, compress.MinPatternScore, workers, lvl) if err != nil { return err } @@ -1285,7 +1315,7 @@ func DumpBodies(ctx context.Context, db kv.RoDB, segmentFilePath, tmpDir string, logEvery := time.NewTicker(20 * time.Second) defer logEvery.Stop() - f, err := compress.NewCompressor(ctx, "Bodies", segmentFilePath, tmpDir, compress.MinPatternScore, workers, lvl) + f, err := compress.NewCompressor(ctx, "Snapshots Bodies", segmentFilePath, tmpDir, compress.MinPatternScore, workers, lvl) if err != nil { return err } @@ -1715,33 +1745,35 @@ type Range struct { func (r Range) String() string { return fmt.Sprintf("%dk-%dk", r.from/1000, r.to/1000) } -func (*Merger) FindMergeRanges(snapshots *RoSnapshots) (res []Range) { - for i := len(snapshots.Headers.segments) - 1; i > 0; i-- { - sn := snapshots.Headers.segments[i] - if sn.ranges.to-sn.ranges.from >= snap.DEFAULT_SEGMENT_SIZE { // is complete .seg +func (*Merger) FindMergeRanges(currentRanges []Range) (toMerge []Range) { + for i := len(currentRanges) - 1; i > 0; i-- { + r := currentRanges[i] + if r.to-r.from >= snap.DEFAULT_SEGMENT_SIZE { // is complete .seg continue } for _, span := range []uint64{500_000, 100_000, 10_000} { - if sn.ranges.to%span != 0 { + if r.to%span != 0 { continue } - if sn.ranges.to-sn.ranges.from == span { + if r.to-r.from == span { break } - aggFrom := sn.ranges.to - span - res = append(res, Range{from: aggFrom, to: sn.ranges.to}) - for snapshots.Headers.segments[i].ranges.from > aggFrom { + aggFrom := r.to - span + toMerge = append(toMerge, Range{from: aggFrom, to: r.to}) + for currentRanges[i].from > aggFrom { i-- } break } } - slices.SortFunc(res, func(i, j Range) bool { return i.from < j.from }) - return res + slices.SortFunc(toMerge, func(i, j Range) bool { return i.from < j.from }) + return toMerge } -func (m *Merger) filesByRange(snapshots *RoSnapshots, from, to uint64) (toMergeHeaders, toMergeBodies, toMergeTxs []string, err error) { - err = snapshots.Headers.View(func(hSegments []*HeaderSegment) error { + +func (m *Merger) filesByRange(snapshots *RoSnapshots, from, to uint64) (map[snap.Type][]string, error) { + toMerge := map[snap.Type][]string{} + err := snapshots.Headers.View(func(hSegments []*HeaderSegment) error { return snapshots.Bodies.View(func(bSegments []*BodySegment) error { return snapshots.Txs.View(func(tSegments []*TxnSegment) error { for i, sn := range hSegments { @@ -1751,17 +1783,16 @@ func (m *Merger) filesByRange(snapshots *RoSnapshots, from, to uint64) (toMergeH if sn.ranges.to > to { break } - - toMergeHeaders = append(toMergeHeaders, hSegments[i].seg.FilePath()) - toMergeBodies = append(toMergeBodies, bSegments[i].seg.FilePath()) - toMergeTxs = append(toMergeTxs, tSegments[i].Seg.FilePath()) + toMerge[snap.Headers] = append(toMerge[snap.Headers], hSegments[i].seg.FilePath()) + toMerge[snap.Bodies] = append(toMerge[snap.Bodies], bSegments[i].seg.FilePath()) + toMerge[snap.Transactions] = append(toMerge[snap.Transactions], tSegments[i].Seg.FilePath()) } return nil }) }) }) - return + return toMerge, err } // Merge does merge segments in given ranges @@ -1773,42 +1804,18 @@ func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges defer logEvery.Stop() log.Log(m.lvl, "[snapshots] Merge segments", "ranges", fmt.Sprintf("%v", mergeRanges)) for _, r := range mergeRanges { - toMergeHeaders, toMergeBodies, toMergeTxs, err := m.filesByRange(snapshots, r.from, r.to) + toMerge, err := m.filesByRange(snapshots, r.from, r.to) if err != nil { return err } - { - segFilePath := filepath.Join(snapDir, snap.SegmentFileName(r.from, r.to, snap.Bodies)) - if err := m.merge(ctx, toMergeBodies, segFilePath, logEvery); err != nil { - return fmt.Errorf("mergeByAppendSegments: %w", err) - } - if doIndex { - if err := BodiesIdx(ctx, segFilePath, r.from, m.tmpDir, m.lvl); err != nil { - return fmt.Errorf("BodiesIdx: %w", err) - } - } - } - - { - segFilePath := filepath.Join(snapDir, snap.SegmentFileName(r.from, r.to, snap.Headers)) - if err := m.merge(ctx, toMergeHeaders, segFilePath, logEvery); err != nil { - return fmt.Errorf("mergeByAppendSegments: %w", err) - } - if doIndex { - if err := HeadersIdx(ctx, segFilePath, r.from, m.tmpDir, m.lvl); err != nil { - return fmt.Errorf("HeadersIdx: %w", err) - } - } - } - - { - segFilePath := filepath.Join(snapDir, snap.SegmentFileName(r.from, r.to, snap.Transactions)) - if err := m.merge(ctx, toMergeTxs, segFilePath, logEvery); err != nil { + for _, t := range snap.AllSnapshotTypes { + f, _ := snap.ParseFileName(snapDir, snap.SegmentFileName(r.from, r.to, t)) + if err := m.merge(ctx, toMerge[t], f.Path, logEvery); err != nil { return fmt.Errorf("mergeByAppendSegments: %w", err) } if doIndex { - if err := TransactionsIdx(ctx, m.chainID, r.from, r.to, snapDir, m.tmpDir, m.lvl); err != nil { - return fmt.Errorf("TransactionsIdx: %w", err) + if err := buildIdx(ctx, f, m.chainID, m.tmpDir, m.lvl); err != nil { + return err } } } @@ -1820,17 +1827,10 @@ func (m *Merger) Merge(ctx context.Context, snapshots *RoSnapshots, mergeRanges m.notifier.OnNewSnapshot() time.Sleep(1 * time.Second) // i working on blocking API - to ensure client does not use } - - if err := m.removeOldFiles(toMergeHeaders, snapDir); err != nil { - return err - } - - if err := m.removeOldFiles(toMergeBodies, snapDir); err != nil { - return err - } - - if err := m.removeOldFiles(toMergeTxs, snapDir); err != nil { - return err + for _, t := range snap.AllSnapshotTypes { + if err := m.removeOldFiles(toMerge[t], snapDir); err != nil { + return err + } } } log.Log(m.lvl, "[snapshots] Merge done", "from", mergeRanges[0].from) diff --git a/turbo/snapshotsync/block_snapshots_test.go b/turbo/snapshotsync/block_snapshots_test.go index fb1c8f8ab83..bcb75e7695a 100644 --- a/turbo/snapshotsync/block_snapshots_test.go +++ b/turbo/snapshotsync/block_snapshots_test.go @@ -77,7 +77,7 @@ func TestMergeSnapshots(t *testing.T) { { merger := NewMerger(dir, 1, log.LvlInfo, uint256.Int{}, nil) - ranges := merger.FindMergeRanges(s) + ranges := merger.FindMergeRanges(s.Ranges()) require.True(len(ranges) > 0) err := merger.Merge(context.Background(), s, ranges, s.Dir(), false) require.NoError(err) @@ -92,7 +92,7 @@ func TestMergeSnapshots(t *testing.T) { { merger := NewMerger(dir, 1, log.LvlInfo, uint256.Int{}, nil) - ranges := merger.FindMergeRanges(s) + ranges := merger.FindMergeRanges(s.Ranges()) require.True(len(ranges) == 0) err := merger.Merge(context.Background(), s, ranges, s.Dir(), false) require.NoError(err) From d2bbf22e3d4dc4b845d881d08a52cd7231a433ff Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Fri, 22 Jul 2022 13:26:05 +0100 Subject: [PATCH 129/152] hive - run on successful CI (#4791) * feat(ci): run hive tests as part of CI * feat(ci): hive CI tidy up (#2) run hive on successful CI only run on non-draft PR only --- .github/workflows/ci.yml | 13 +++++++++-- .github/workflows/hive-results.yml | 2 +- .github/workflows/hive.yml | 35 ++++++++++++++++++++++++++++++ 3 files changed, 47 insertions(+), 3 deletions(-) create mode 100644 .github/workflows/hive.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 018f9934933..26a80271b30 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,4 +1,4 @@ -name: Continuous integration +name: CI on: push: branches: @@ -10,8 +10,15 @@ on: - devel - alpha - stable + types: + - opened + - reopened + - synchronize + - ready_for_review + jobs: tests: + if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }} strategy: matrix: os: [ ubuntu-20.04, macos-11 ] # list of os: https://github.com/actions/virtual-environments @@ -59,6 +66,7 @@ jobs: run: make test tests-windows: + if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }} strategy: matrix: os: [ windows-2022 ] @@ -97,6 +105,7 @@ jobs: run: .\wmake.ps1 test docker: + if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }} runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v3 @@ -121,4 +130,4 @@ jobs: if: always() with: name: hive-ci-output - path: results/*.xml + path: results/*.xml diff --git a/.github/workflows/hive-results.yml b/.github/workflows/hive-results.yml index c67dc5fcfab..b23dc2ce478 100644 --- a/.github/workflows/hive-results.yml +++ b/.github/workflows/hive-results.yml @@ -2,7 +2,7 @@ name: Hive results on: workflow_run: - workflows: ["Continuous integration", "ci"] + workflows: ["Hive"] types: - completed diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml new file mode 100644 index 00000000000..3705a1fe9ca --- /dev/null +++ b/.github/workflows/hive.yml @@ -0,0 +1,35 @@ +name: Hive +on: + workflow_run: + workflows: ["CI"] + types: + - completed + +jobs: + hive: + runs-on: ubuntu-20.04 + if: ${{ github.event.workflow_run.conclusion == 'success' }} + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 # fetch git tags for "git describe" + + - name: build erigon image + run: DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker + + # check with root permissions, should be cached from previous build + - name: build erigon image (root permissions) + run: sudo DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker + + - name: run hive + run: sudo mkdir /results && docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work gatewayfm/hive:latest --sim ethereum/engine --results-root=/work/results --client erigon_ci-$GITHUB_SHA --docker.output --loglevel 5 + + - name: parse hive output + run: docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work --entrypoint /app/hivecioutput gatewayfm/hive:latest --resultsdir=/work/results --outdir=/work/results + + - name: archive hive results + uses: actions/upload-artifact@v3 + if: always() + with: + name: hive-ci-output + path: results/*.xml From d6001225e7c9e13eb5a9ab34d12d60403c861df6 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Fri, 22 Jul 2022 13:26:18 +0100 Subject: [PATCH 130/152] feat(makefile): add documentation and coverage command (#4792) --- .gitignore | 1 + Makefile | 35 ++++++++++++++++++++++++++++++++--- README.md | 2 ++ 3 files changed, 35 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index c86d201a6e4..1e70d76a79c 100644 --- a/.gitignore +++ b/.gitignore @@ -76,4 +76,5 @@ go.work docker-compose.*.yml .env +coverage.out diff --git a/Makefile b/Makefile index 061abf375a5..7866cf895a1 100644 --- a/Makefile +++ b/Makefile @@ -33,12 +33,14 @@ GOTEST = GODEBUG=cgocheck=0 $(GO) test $(GO_FLAGS) ./... -p 2 default: all +## go-version: print and verify go version go-version: @if [ $(shell $(GO) version | cut -c 16-17) -lt 18 ]; then \ echo "minimum required Golang version is 1.18"; \ exit 1 ;\ fi +## validate_docker_build_args: ensure docker build args are valid validate_docker_build_args: @echo "Docker build args:" @echo " DOCKER_UID: $(DOCKER_UID)" @@ -51,6 +53,7 @@ validate_docker_build_args: fi @echo "✔️ host OS user exists: $(shell id -nu $(DOCKER_UID))" +## docker: validate, update submodules and build with docker docker: validate_docker_build_args git-submodules DOCKER_BUILDKIT=1 $(DOCKER) build -t ${DOCKER_TAG} \ --build-arg "BUILD_DATE=$(shell date -Iseconds)" \ @@ -67,16 +70,18 @@ ifdef XDG_DATA_HOME endif xdg_data_home_subdirs = $(xdg_data_home)/erigon $(xdg_data_home)/erigon-grafana $(xdg_data_home)/erigon-prometheus +## setup_xdg_data_home: TODO setup_xdg_data_home: mkdir -p $(xdg_data_home_subdirs) ls -aln $(xdg_data_home) | grep -E "472.*0.*erigon-grafana" || sudo chown -R 472:0 $(xdg_data_home)/erigon-grafana @echo "✔️ xdg_data_home setup" @ls -al $(xdg_data_home) +## docker-compose: validate build args, setup xdg data home, and run docker-compose up docker-compose: validate_docker_build_args setup_xdg_data_home docker-compose up -# debug build allows see C stack traces, run it with GOTRACEBACK=crash. You don't need debug build for C pit for profiling. To profile C code use SETCGOTRCKEBACK=1 +## dbg debug build allows see C stack traces, run it with GOTRACEBACK=crash. You don't need debug build for C pit for profiling. To profile C code use SETCGOTRCKEBACK=1 dbg: $(GO_DBG_BUILD) -o $(GOBIN)/ ./cmd/... @@ -86,8 +91,10 @@ dbg: @cd ./cmd/$* && $(GOBUILD) -o $(GOBIN)/$* @echo "Run \"$(GOBIN)/$*\" to launch $*." +## geth: run erigon (TODO: remove?) geth: erigon +## erigon: build erigon erigon: go-version erigon.cmd @rm -f $(GOBIN)/tg # Remove old binary to prevent confusion where users still use it because of the scripts @@ -108,8 +115,10 @@ COMMANDS += txpool # build each command using %.cmd rule $(COMMANDS): %: %.cmd +## all: run erigon with all commands all: erigon $(COMMANDS) +## db-tools: build db tools db-tools: git-submodules @echo "Building db-tools" @@ -126,23 +135,29 @@ db-tools: git-submodules cp libmdbx/mdbx_stat $(GOBIN) @echo "Run \"$(GOBIN)/mdbx_stat -h\" to get info about mdbx db file." +## test: run unit tests with a 50s timeout test: $(GOTEST) --timeout 50s +## test-integration: run integration tests with a 30m timeout test-integration: $(GOTEST) --timeout 30m -tags $(BUILD_TAGS),integration +## lint: run golangci-lint with .golangci.yml config file lint: @./build/bin/golangci-lint run --config ./.golangci.yml +## lintci: run golangci-lint (additionally outputs message before run) lintci: @echo "--> Running linter for code" @./build/bin/golangci-lint run --config ./.golangci.yml +## lintci-deps: (re)installs golangci-lint to build/bin/golangci-lint lintci-deps: rm -f ./build/bin/golangci-lint curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./build/bin v1.47.0 +## clean: cleans the go cache, build dir, libmdbx db dir clean: go clean -cache rm -fr build/* @@ -151,6 +166,7 @@ clean: # The devtools target installs tools required for 'go generate'. # You need to put $GOBIN (or $GOPATH/bin) in your PATH to use 'go generate'. +## devtools: installs dev tools (and checks for npm installation etc.) devtools: # Notice! If you adding new binary - add it also to cmd/hack/binary-deps/main.go file $(GOBUILD) -o $(GOBIN)/go-bindata github.com/kevinburke/go-bindata/go-bindata @@ -165,16 +181,20 @@ devtools: @type "solc" 2> /dev/null || echo 'Please install solc' @type "protoc" 2> /dev/null || echo 'Please install protoc' +## bindings: generate test contracts and core contracts bindings: PATH=$(GOBIN):$(PATH) go generate ./tests/contracts/ PATH=$(GOBIN):$(PATH) go generate ./core/state/contracts/ +## prometheus: run prometheus and grafana with docker-compose prometheus: docker-compose up prometheus grafana +## escape: run escape path={path} to check for memory leaks e.g. run escape path=cmd/erigon escape: cd $(path) && go test -gcflags "-m -m" -run none -bench=BenchmarkJumpdest* -benchmem -memprofile mem.out +## git-submodules: update git submodules git-submodules: @[ -d ".git" ] || (echo "Not a git repository" && exit 1) @echo "Updating git submodules" @@ -189,7 +209,7 @@ ERIGON_USER_UID ?= 3473 ERIGON_USER_GID ?= 3473 ERIGON_USER_XDG_DATA_HOME ?= ~$(ERIGON_USER)/.local/share -# create "erigon" user +## user_linux: create "erigon" user (Linux) user_linux: ifdef DOCKER sudo groupadd -f docker @@ -203,7 +223,7 @@ ifdef DOCKER endif sudo -u $(ERIGON_USER) mkdir -p $(ERIGON_USER_XDG_DATA_HOME) -# create "erigon" user +## user_macos: create "erigon" user (MacOS) user_macos: sudo dscl . -create /Users/$(ERIGON_USER) sudo dscl . -create /Users/$(ERIGON_USER) UserShell /bin/bash @@ -212,3 +232,12 @@ user_macos: sudo dscl . -create /Users/$(ERIGON_USER) NFSHomeDirectory /Users/$(ERIGON_USER) sudo dscl . -append /Groups/admin GroupMembership $(ERIGON_USER) sudo -u $(ERIGON_USER) mkdir -p $(ERIGON_USER_XDG_DATA_HOME) + +## coverage: run code coverage report and output total coverage % +coverage: + @go test -coverprofile=coverage.out ./... > /dev/null 2>&1 && go tool cover -func coverage.out | grep total | awk '{print substr($$3, 1, length($$3)-1)}' + +## help: print commands help +help : Makefile + @sed -n 's/^##//p' $< + diff --git a/README.md b/README.md index 5a85d03e55a..5fdff12f27a 100644 --- a/README.md +++ b/README.md @@ -77,6 +77,8 @@ Use `--datadir` to choose where to store data. Use `--chain=bor-mainnet` for Polygon Mainnet and `--chain=mumbai` for Polygon Mumbai. +Running `make help` will list and describe the convenience commands available in the [Makefile](./Makefile) + ### Modularity Erigon by default is "all in one binary" solution, but it's possible start TxPool as separated processes. From 37ba45a627a5c2d905557d068726e33d6b1e93ea Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Fri, 22 Jul 2022 16:09:50 +0100 Subject: [PATCH 131/152] feat(ci): badges for hive and code coverage on devel (#4793) * feat(ci): badges for hive and code coverage on devel * feat(ci): hive CI tidy up (#2) run hive on successful CI only run on non-draft PR only --- .github/workflows/ci.yml | 36 ++++++++++++++++++- .github/workflows/coverage.yml | 55 ++++++++++++++++++++++++++++++ .github/workflows/hive-results.yml | 46 ++++++++++++++++++++++--- .github/workflows/hive.yml | 35 ------------------- README.md | 4 +++ 5 files changed, 136 insertions(+), 40 deletions(-) create mode 100644 .github/workflows/coverage.yml delete mode 100644 .github/workflows/hive.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 26a80271b30..9781d2e3441 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -119,6 +119,25 @@ jobs: - name: sudo make docker run: sudo DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker + hive: + needs: + - tests + - tests-windows + - docker + runs-on: ubuntu-20.04 + if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }} + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 # fetch git tags for "git describe" + + - name: build erigon image + run: DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker + + # check with root permissions, should be cached from previous build + - name: build erigon image (root permissions) + run: sudo DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker + - name: run hive run: sudo mkdir /results && docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work gatewayfm/hive:latest --sim ethereum/engine --results-root=/work/results --client erigon_ci-$GITHUB_SHA --docker.output --loglevel 5 @@ -130,4 +149,19 @@ jobs: if: always() with: name: hive-ci-output - path: results/*.xml + path: results/*.xml + + event_file: + needs: + - tests + - tests-windows + - docker + name: archive event file + runs-on: ubuntu-latest + if: ${{ github.event_name == 'push' || !github.event.pull_request.draft }} + steps: + - name: upload + uses: actions/upload-artifact@v2 + with: + name: event file + path: ${{ github.event_path }} diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml new file mode 100644 index 00000000000..72deaea1d53 --- /dev/null +++ b/.github/workflows/coverage.yml @@ -0,0 +1,55 @@ +name: Coverage +on: + push: + branches: + - devel + +jobs: + coverage: + runs-on: ubuntu-20.04 + + steps: + - uses: actions/checkout@v3 + - run: git submodule update --init --recursive --force + + - uses: actions/setup-go@v3 + with: + go-version: 1.18.x + + - name: install dependencies on Linux + if: runner.os == 'Linux' + run: sudo apt update && sudo apt install build-essential + + - name: run coverage + run: echo "COVERAGE=$(make coverage)" >> $GITHUB_ENV + + - name: set badge color + shell: bash + run: | + if [ ${{ env.COVERAGE }} -lt 40 ] + then + echo "BADGE_COLOR=800000" >> $GITHUB_ENV + elif [ ${{ env.COVERAGE }} -lt 75 ] + then + echo "BADGE_COLOR=696969" >> $GITHUB_ENV + else + echo "BADGE_COLOR=31c653" >> $GITHUB_ENV + fi + + - name: create badge + uses: emibcn/badge-action@d6f51ff11b5c3382b3b88689ae2d6db22d9737d1 + with: + label: Coverage + status: ${{ env.COVERAGE }} + color: ${{ env.BADGE_COLOR }} + path: badge.svg + + - name: upload badge to gist + if: > + github.event_name == 'workflow_run' && github.event.workflow_run.head_branch == 'devel' || + github.event_name != 'workflow_run' && github.ref == 'refs/heads/devel' + uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d + with: + token: ${{ secrets.GIST_TOKEN }} + gistURL: https://gist.githubusercontent.com/revittm/ee38e9beb22353eef6b88f2ad6ed7aa9 + file: badge.svg \ No newline at end of file diff --git a/.github/workflows/hive-results.yml b/.github/workflows/hive-results.yml index b23dc2ce478..8147393e090 100644 --- a/.github/workflows/hive-results.yml +++ b/.github/workflows/hive-results.yml @@ -2,7 +2,7 @@ name: Hive results on: workflow_run: - workflows: ["Hive"] + workflows: ["CI"] types: - completed @@ -12,8 +12,13 @@ jobs: runs-on: ubuntu-latest if: github.event.workflow_run.conclusion != 'skipped' + permissions: + checks: write + pull-requests: write+ + actions: read + steps: - - name: Download and extract artifacts + - name: download and extract artifacts env: GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} run: | @@ -28,10 +33,43 @@ jobs: unzip -d "$name" "$name.zip" done - - name: Publish hive test results + - name: publish hive test results uses: EnricoMi/publish-unit-test-result-action@v1 with: commit: ${{ github.event.workflow_run.head_sha }} event_file: artifacts/Event File/event.json event_name: ${{ github.event.workflow_run.event }} - files: "artifacts/**/*.xml" \ No newline at end of file + files: "artifacts/**/*.xml" + + - name: set badge color + shell: bash + run: | + case ${{ fromJSON( steps.test-results.outputs.json ).conclusion }} in + success) + echo "BADGE_COLOR=31c653" >> $GITHUB_ENV + ;; + failure) + echo "BADGE_COLOR=800000" >> $GITHUB_ENV + ;; + neutral) + echo "BADGE_COLOR=696969" >> $GITHUB_ENV + ;; + esac + + - name: create badge + uses: emibcn/badge-action@d6f51ff11b5c3382b3b88689ae2d6db22d9737d1 + with: + label: Hive + status: '${{ fromJSON( steps.test-results.outputs.json ).formatted.stats.tests }} tests, ${{ fromJSON( steps.test-results.outputs.json ).formatted.stats.runs }} runs: ${{ fromJSON( steps.test-results.outputs.json ).conclusion }}' + color: ${{ env.BADGE_COLOR }} + path: badge.svg + + - name: upload badge to gist + if: > + github.event_name == 'workflow_run' && github.event.workflow_run.head_branch == 'devel' || + github.event_name != 'workflow_run' && github.ref == 'refs/heads/devel' + uses: andymckay/append-gist-action@1fbfbbce708a39bd45846f0955ed5521f2099c6d + with: + token: ${{ secrets.GIST_TOKEN }} + gistURL: https://gist.githubusercontent.com/revittm/dc492845ba6eb694e6c7279224634b20 + file: badge.svg \ No newline at end of file diff --git a/.github/workflows/hive.yml b/.github/workflows/hive.yml deleted file mode 100644 index 3705a1fe9ca..00000000000 --- a/.github/workflows/hive.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: Hive -on: - workflow_run: - workflows: ["CI"] - types: - - completed - -jobs: - hive: - runs-on: ubuntu-20.04 - if: ${{ github.event.workflow_run.conclusion == 'success' }} - steps: - - uses: actions/checkout@v3 - with: - fetch-depth: 0 # fetch git tags for "git describe" - - - name: build erigon image - run: DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker - - # check with root permissions, should be cached from previous build - - name: build erigon image (root permissions) - run: sudo DOCKER_TAG=thorax/erigon:ci-$GITHUB_SHA DOCKER_UID=$(id -u) DOCKER_GID=$(id -g) make docker - - - name: run hive - run: sudo mkdir /results && docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work gatewayfm/hive:latest --sim ethereum/engine --results-root=/work/results --client erigon_ci-$GITHUB_SHA --docker.output --loglevel 5 - - - name: parse hive output - run: docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v ${{ github.workspace }}:/work --entrypoint /app/hivecioutput gatewayfm/hive:latest --resultsdir=/work/results --outdir=/work/results - - - name: archive hive results - uses: actions/upload-artifact@v3 - if: always() - with: - name: hive-ci-output - path: results/*.xml diff --git a/README.md b/README.md index 5fdff12f27a..760f5fdb034 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,10 @@ Erigon is an implementation of Ethereum (aka "Ethereum client"), on the efficien ![Build status](https://github.com/ledgerwatch/erigon/actions/workflows/ci.yml/badge.svg) +![Coverage](https://gist.githubusercontent.com/revittm/ee38e9beb22353eef6b88f2ad6ed7aa9/raw/19f7838ede42d896370aff17753346a01fc5d4ad/badge.svg) + +![Hive](https://gist.githubusercontent.com/revittm/dc492845ba6eb694e6c7279224634b20/raw/51f5a73aa31c5ae2f199969321161bcb9abc5c10/badge.svg) + - [System Requirements](#system-requirements) From 0d979e18aa996c5df728810c0125b4e674407d16 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Fri, 22 Jul 2022 16:18:45 +0100 Subject: [PATCH 132/152] fix(readme): latest badge.svg gist links (#4795) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 760f5fdb034..9f2810c1a1c 100644 --- a/README.md +++ b/README.md @@ -4,9 +4,9 @@ Erigon is an implementation of Ethereum (aka "Ethereum client"), on the efficien ![Build status](https://github.com/ledgerwatch/erigon/actions/workflows/ci.yml/badge.svg) -![Coverage](https://gist.githubusercontent.com/revittm/ee38e9beb22353eef6b88f2ad6ed7aa9/raw/19f7838ede42d896370aff17753346a01fc5d4ad/badge.svg) +![Coverage](https://gist.githubusercontent.com/revittm/ee38e9beb22353eef6b88f2ad6ed7aa9/raw/badge.svg) -![Hive](https://gist.githubusercontent.com/revittm/dc492845ba6eb694e6c7279224634b20/raw/51f5a73aa31c5ae2f199969321161bcb9abc5c10/badge.svg) +![Hive](https://gist.githubusercontent.com/revittm/dc492845ba6eb694e6c7279224634b20/raw/badge.svg) From 01641e3900b21460a018c7a4542e72d4303400a9 Mon Sep 17 00:00:00 2001 From: Max Revitt Date: Fri, 22 Jul 2022 17:05:08 +0100 Subject: [PATCH 133/152] fix(ci): hive results workflow syntax (#4796) --- .github/workflows/hive-results.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/hive-results.yml b/.github/workflows/hive-results.yml index 8147393e090..d6191e38449 100644 --- a/.github/workflows/hive-results.yml +++ b/.github/workflows/hive-results.yml @@ -14,7 +14,7 @@ jobs: permissions: checks: write - pull-requests: write+ + pull-requests: write actions: read steps: From dd4bae789b990ed315ea6c607a8c910600f87337 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sat, 23 Jul 2022 09:33:45 +0700 Subject: [PATCH 134/152] Pool: parse rlp chain id for non-legacy transactions --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f96580dbf77..cb13e6b794f 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon go 1.18 require ( - github.com/ledgerwatch/erigon-lib v0.0.0-20220721212928-1331bb661a22 + github.com/ledgerwatch/erigon-lib v0.0.0-20220723021732-85f70d75cea8 github.com/ledgerwatch/erigon-snapshot v1.0.0 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 diff --git a/go.sum b/go.sum index 3447e0747c9..716a031501d 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220721212928-1331bb661a22 h1:cGxOEtCnkVzX+RcGQbQHiDuV8dQHnGqcwTFl9q8Hnkg= -github.com/ledgerwatch/erigon-lib v0.0.0-20220721212928-1331bb661a22/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= +github.com/ledgerwatch/erigon-lib v0.0.0-20220723021732-85f70d75cea8 h1:rP2lzEZfJsakfZGDr7yaNzuuLZ7zDZw6LMEU/cTl+8o= +github.com/ledgerwatch/erigon-lib v0.0.0-20220723021732-85f70d75cea8/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= github.com/ledgerwatch/erigon-snapshot v1.0.0 h1:bp/7xoPdM5lK7LFdqEMH008RZmqxMZV0RUVEQiWs7v4= github.com/ledgerwatch/erigon-snapshot v1.0.0/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= From 6a759a34f81ae9b551022d963630149fca7d4a78 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sat, 23 Jul 2022 10:06:35 +0700 Subject: [PATCH 135/152] pool: metrics (#4800) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index cb13e6b794f..56c600a4d8b 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon go 1.18 require ( - github.com/ledgerwatch/erigon-lib v0.0.0-20220723021732-85f70d75cea8 + github.com/ledgerwatch/erigon-lib v0.0.0-20220723030450-59aa1c78c72f github.com/ledgerwatch/erigon-snapshot v1.0.0 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 diff --git a/go.sum b/go.sum index 716a031501d..8d9b8062a76 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220723021732-85f70d75cea8 h1:rP2lzEZfJsakfZGDr7yaNzuuLZ7zDZw6LMEU/cTl+8o= -github.com/ledgerwatch/erigon-lib v0.0.0-20220723021732-85f70d75cea8/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= +github.com/ledgerwatch/erigon-lib v0.0.0-20220723030450-59aa1c78c72f h1:fLIc9erXDu+CmtjZdiDX3cfNMIfhkolkKop+LQFwW2c= +github.com/ledgerwatch/erigon-lib v0.0.0-20220723030450-59aa1c78c72f/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= github.com/ledgerwatch/erigon-snapshot v1.0.0 h1:bp/7xoPdM5lK7LFdqEMH008RZmqxMZV0RUVEQiWs7v4= github.com/ledgerwatch/erigon-snapshot v1.0.0/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= From 62873649d9764f184a511f0dcd73a6a131bd1f7f Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sat, 23 Jul 2022 10:09:24 +0700 Subject: [PATCH 136/152] lint up 47.2 (#4801) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 7866cf895a1..c666796d954 100644 --- a/Makefile +++ b/Makefile @@ -155,7 +155,7 @@ lintci: ## lintci-deps: (re)installs golangci-lint to build/bin/golangci-lint lintci-deps: rm -f ./build/bin/golangci-lint - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./build/bin v1.47.0 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b ./build/bin v1.47.2 ## clean: cleans the go cache, build dir, libmdbx db dir clean: From 95f0338ddb0e8f0f05be31f11551b4c5dc302ed8 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sat, 23 Jul 2022 10:13:13 +0700 Subject: [PATCH 137/152] Pool: parse rlp chain id for non-legacy transactions #4802 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 56c600a4d8b..c733dbefb05 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon go 1.18 require ( - github.com/ledgerwatch/erigon-lib v0.0.0-20220723030450-59aa1c78c72f + github.com/ledgerwatch/erigon-lib v0.0.0-20220723031125-6f7794e88b5e github.com/ledgerwatch/erigon-snapshot v1.0.0 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 diff --git a/go.sum b/go.sum index 8d9b8062a76..38d9a79d686 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220723030450-59aa1c78c72f h1:fLIc9erXDu+CmtjZdiDX3cfNMIfhkolkKop+LQFwW2c= -github.com/ledgerwatch/erigon-lib v0.0.0-20220723030450-59aa1c78c72f/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= +github.com/ledgerwatch/erigon-lib v0.0.0-20220723031125-6f7794e88b5e h1:4tZnz9FCTIalm6VtGXBZX713Y+lcHqpMK6L3wP7OSHY= +github.com/ledgerwatch/erigon-lib v0.0.0-20220723031125-6f7794e88b5e/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= github.com/ledgerwatch/erigon-snapshot v1.0.0 h1:bp/7xoPdM5lK7LFdqEMH008RZmqxMZV0RUVEQiWs7v4= github.com/ledgerwatch/erigon-snapshot v1.0.0/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= From 68e35417fc134022edada387796201af36bfe77f Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Sat, 23 Jul 2022 11:09:16 +0700 Subject: [PATCH 138/152] RetireBlocks: encapsulate delete logic --- core/rawdb/accessors_chain.go | 65 ++++++++++++++++++++++++ eth/stagedsync/stage.go | 71 --------------------------- eth/stagedsync/stage_execute.go | 10 ++-- eth/stagedsync/stage_senders.go | 24 +++------ turbo/app/snapshots.go | 39 ++++++--------- turbo/snapshotsync/block_snapshots.go | 20 ++++++++ 6 files changed, 113 insertions(+), 116 deletions(-) diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index f27f11fc96e..b4403770621 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -25,6 +25,7 @@ import ( "math/big" "time" + common2 "github.com/ledgerwatch/erigon-lib/common" libcommon "github.com/ledgerwatch/erigon-lib/common/cmp" "github.com/ledgerwatch/erigon-lib/common/dbg" "github.com/ledgerwatch/erigon-lib/kv" @@ -1643,3 +1644,67 @@ func WriteSnapshots(tx kv.RwTx, list map[string]string) error { } return nil } + +// PruneTable has `limit` parameter to avoid too large data deletes per one sync cycle - better delete by small portions to reduce db.FreeList size +func PruneTable(tx kv.RwTx, table string, pruneTo uint64, ctx context.Context, limit int) error { + c, err := tx.RwCursor(table) + + if err != nil { + return fmt.Errorf("failed to create cursor for pruning %w", err) + } + defer c.Close() + + i := 0 + for k, _, err := c.First(); k != nil; k, _, err = c.Next() { + if err != nil { + return err + } + i++ + if i > limit { + break + } + + blockNum := binary.BigEndian.Uint64(k) + if blockNum >= pruneTo { + break + } + select { + case <-ctx.Done(): + return common2.ErrStopped + default: + } + if err = c.DeleteCurrent(); err != nil { + return fmt.Errorf("failed to remove for block %d: %w", blockNum, err) + } + } + return nil +} + +func PruneTableDupSort(tx kv.RwTx, table string, logPrefix string, pruneTo uint64, logEvery *time.Ticker, ctx context.Context) error { + c, err := tx.RwCursorDupSort(table) + if err != nil { + return fmt.Errorf("failed to create cursor for pruning %w", err) + } + defer c.Close() + + for k, _, err := c.First(); k != nil; k, _, err = c.NextNoDup() { + if err != nil { + return fmt.Errorf("failed to move %s cleanup cursor: %w", table, err) + } + blockNum := binary.BigEndian.Uint64(k) + if blockNum >= pruneTo { + break + } + select { + case <-logEvery.C: + log.Info(fmt.Sprintf("[%s]", logPrefix), "table", table, "block", blockNum) + case <-ctx.Done(): + return common2.ErrStopped + default: + } + if err = c.DeleteCurrentDuplicates(); err != nil { + return fmt.Errorf("failed to remove for block %d: %w", blockNum, err) + } + } + return nil +} diff --git a/eth/stagedsync/stage.go b/eth/stagedsync/stage.go index 5224e3c80c5..3565fcdb7ca 100644 --- a/eth/stagedsync/stage.go +++ b/eth/stagedsync/stage.go @@ -1,16 +1,9 @@ package stagedsync import ( - "context" - "encoding/binary" - "fmt" - "time" - - libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/log/v3" ) // ExecFunc is the execution function for the stage to move forward. @@ -108,67 +101,3 @@ func (s *PruneState) Done(db kv.Putter) error { func (s *PruneState) DoneAt(db kv.Putter, blockNum uint64) error { return stages.SaveStagePruneProgress(db, s.ID, blockNum) } - -// PruneTable has `limit` parameter to avoid too large data deletes per one sync cycle - better delete by small portions to reduce db.FreeList size -func PruneTable(tx kv.RwTx, table string, pruneTo uint64, ctx context.Context, limit int) error { - c, err := tx.RwCursor(table) - - if err != nil { - return fmt.Errorf("failed to create cursor for pruning %w", err) - } - defer c.Close() - - i := 0 - for k, _, err := c.First(); k != nil; k, _, err = c.Next() { - if err != nil { - return err - } - i++ - if i > limit { - break - } - - blockNum := binary.BigEndian.Uint64(k) - if blockNum >= pruneTo { - break - } - select { - case <-ctx.Done(): - return libcommon.ErrStopped - default: - } - if err = c.DeleteCurrent(); err != nil { - return fmt.Errorf("failed to remove for block %d: %w", blockNum, err) - } - } - return nil -} - -func PruneTableDupSort(tx kv.RwTx, table string, logPrefix string, pruneTo uint64, logEvery *time.Ticker, ctx context.Context) error { - c, err := tx.RwCursorDupSort(table) - if err != nil { - return fmt.Errorf("failed to create cursor for pruning %w", err) - } - defer c.Close() - - for k, _, err := c.First(); k != nil; k, _, err = c.NextNoDup() { - if err != nil { - return fmt.Errorf("failed to move %s cleanup cursor: %w", table, err) - } - blockNum := binary.BigEndian.Uint64(k) - if blockNum >= pruneTo { - break - } - select { - case <-logEvery.C: - log.Info(fmt.Sprintf("[%s]", logPrefix), "table", table, "block", blockNum) - case <-ctx.Done(): - return libcommon.ErrStopped - default: - } - if err = c.DeleteCurrentDuplicates(); err != nil { - return fmt.Errorf("failed to remove for block %d: %w", blockNum, err) - } - } - return nil -} diff --git a/eth/stagedsync/stage_execute.go b/eth/stagedsync/stage_execute.go index 0bb8ace725d..47856806474 100644 --- a/eth/stagedsync/stage_execute.go +++ b/eth/stagedsync/stage_execute.go @@ -599,25 +599,25 @@ func PruneExecutionStage(s *PruneState, tx kv.RwTx, cfg ExecuteBlockCfg, ctx con defer logEvery.Stop() if cfg.prune.History.Enabled() { - if err = PruneTableDupSort(tx, kv.AccountChangeSet, logPrefix, cfg.prune.History.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { + if err = rawdb.PruneTableDupSort(tx, kv.AccountChangeSet, logPrefix, cfg.prune.History.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { return err } - if err = PruneTableDupSort(tx, kv.StorageChangeSet, logPrefix, cfg.prune.History.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { + if err = rawdb.PruneTableDupSort(tx, kv.StorageChangeSet, logPrefix, cfg.prune.History.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { return err } } if cfg.prune.Receipts.Enabled() { - if err = PruneTable(tx, kv.Receipts, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { + if err = rawdb.PruneTable(tx, kv.Receipts, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { return err } // LogIndex.Prune will read everything what not pruned here - if err = PruneTable(tx, kv.Log, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { + if err = rawdb.PruneTable(tx, kv.Log, cfg.prune.Receipts.PruneTo(s.ForwardProgress), ctx, math.MaxInt32); err != nil { return err } } if cfg.prune.CallTraces.Enabled() { - if err = PruneTableDupSort(tx, kv.CallTraceSet, logPrefix, cfg.prune.CallTraces.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { + if err = rawdb.PruneTableDupSort(tx, kv.CallTraceSet, logPrefix, cfg.prune.CallTraces.PruneTo(s.ForwardProgress), logEvery, ctx); err != nil { return err } } diff --git a/eth/stagedsync/stage_senders.go b/eth/stagedsync/stage_senders.go index 0a9db4af808..a3f6b4ee47d 100644 --- a/eth/stagedsync/stage_senders.go +++ b/eth/stagedsync/stage_senders.go @@ -386,26 +386,18 @@ func PruneSendersStage(s *PruneState, tx kv.RwTx, cfg SendersCfg, ctx context.Co defer tx.Rollback() } + sn := cfg.blockRetire.Snapshots() // With snapsync - can prune old data only after snapshot for this data created: CanDeleteTo() - if cfg.blockRetire.Snapshots() != nil && cfg.blockRetire.Snapshots().Cfg().Enabled { - if cfg.blockRetire.Snapshots().Cfg().Produce { - if !cfg.blockRetire.Snapshots().Cfg().KeepBlocks { - canDeleteTo := snapshotsync.CanDeleteTo(s.ForwardProgress, cfg.blockRetire.Snapshots()) - if _, _, err := rawdb.DeleteAncientBlocks(tx, canDeleteTo, 100); err != nil { - return nil - } - if err = PruneTable(tx, kv.Senders, canDeleteTo, ctx, 100); err != nil { - return err - } - } - - if err := retireBlocksInSingleBackgroundThread(s, cfg, ctx); err != nil { - return fmt.Errorf("retireBlocksInSingleBackgroundThread: %w", err) - } + if sn != nil && sn.Cfg().Enabled && sn.Cfg().Produce { + if err := cfg.blockRetire.PruneAncientBlocks(tx); err != nil { + return err + } + if err := retireBlocksInSingleBackgroundThread(s, cfg, ctx); err != nil { + return fmt.Errorf("retireBlocksInSingleBackgroundThread: %w", err) } } else if cfg.prune.TxIndex.Enabled() { to := cfg.prune.TxIndex.PruneTo(s.ForwardProgress) - if err = PruneTable(tx, kv.Senders, to, ctx, 1_000); err != nil { + if err = rawdb.PruneTable(tx, kv.Senders, to, ctx, 1_000); err != nil { return err } } diff --git a/turbo/app/snapshots.go b/turbo/app/snapshots.go index 13a1155c629..92cda81a06f 100644 --- a/turbo/app/snapshots.go +++ b/turbo/app/snapshots.go @@ -21,9 +21,7 @@ import ( "github.com/ledgerwatch/erigon-lib/kv/mdbx" "github.com/ledgerwatch/erigon/cmd/hack/tool" "github.com/ledgerwatch/erigon/cmd/utils" - "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/eth/ethconfig" - "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/internal/debug" "github.com/ledgerwatch/erigon/node/nodecfg/datadir" "github.com/ledgerwatch/erigon/params" @@ -234,8 +232,8 @@ func doRetireCommand(cliCtx *cli.Context) error { to := cliCtx.Uint64(SnapshotToFlag.Name) every := cliCtx.Uint64(SnapshotEveryFlag.Name) - chainDB := mdbx.NewMDBX(log.New()).Label(kv.ChainDB).Path(dirs.Chaindata).MustOpen() - defer chainDB.Close() + db := mdbx.NewMDBX(log.New()).Label(kv.ChainDB).Path(dirs.Chaindata).MustOpen() + defer db.Close() cfg := ethconfig.NewSnapCfg(true, true, true) snapshots := snapshotsync.NewRoSnapshots(cfg, dirs.Snap) @@ -244,21 +242,17 @@ func doRetireCommand(cliCtx *cli.Context) error { } workers := cmp.Max(1, runtime.GOMAXPROCS(-1)-1) - br := snapshotsync.NewBlockRetire(workers, dirs.Tmp, snapshots, chainDB, nil, nil) + br := snapshotsync.NewBlockRetire(workers, dirs.Tmp, snapshots, db, nil, nil) log.Info("Params", "from", from, "to", to, "every", every) for i := from; i < to; i += every { if err := br.RetireBlocks(ctx, i, i+every, log.LvlInfo); err != nil { panic(err) } - if err := chainDB.Update(ctx, func(tx kv.RwTx) error { - progress, _ := stages.GetStageProgress(tx, stages.Headers) - canDeleteTo := snapshotsync.CanDeleteTo(progress, br.Snapshots()) - deletedFrom, deletedTo, err := rawdb.DeleteAncientBlocks(tx, canDeleteTo, 100) - if err != nil { - return nil + if err := db.Update(ctx, func(tx kv.RwTx) error { + if err := br.PruneAncientBlocks(tx); err != nil { + return err } - log.Info("Deleted blocks", "from", deletedFrom, "to", deletedTo) return nil }); err != nil { return err @@ -283,17 +277,17 @@ func doSnapshotCommand(cliCtx *cli.Context) error { dir.MustExist(filepath.Join(dirs.Snap, "db")) // this folder will be checked on existance - to understand that snapshots are ready dir.MustExist(dirs.Tmp) - chainDB := mdbx.NewMDBX(log.New()).Label(kv.ChainDB).Path(dirs.Chaindata).Readonly().MustOpen() - defer chainDB.Close() + db := mdbx.NewMDBX(log.New()).Label(kv.ChainDB).Path(dirs.Chaindata).Readonly().MustOpen() + defer db.Close() - if err := snapshotBlocks(ctx, chainDB, fromBlock, toBlock, segmentSize, dirs.Snap, dirs.Tmp); err != nil { + if err := snapshotBlocks(ctx, db, fromBlock, toBlock, segmentSize, dirs.Snap, dirs.Tmp); err != nil { log.Error("Error", "err", err) } return nil } -func rebuildIndices(ctx context.Context, chainDB kv.RoDB, cfg ethconfig.Snapshot, dirs datadir.Dirs, from uint64, workers int) error { - chainConfig := tool.ChainConfigFromDB(chainDB) +func rebuildIndices(ctx context.Context, db kv.RoDB, cfg ethconfig.Snapshot, dirs datadir.Dirs, from uint64, workers int) error { + chainConfig := tool.ChainConfigFromDB(db) chainID, _ := uint256.FromBig(chainConfig.ChainID) allSnapshots := snapshotsync.NewRoSnapshots(cfg, dirs.Snap) @@ -306,7 +300,7 @@ func rebuildIndices(ctx context.Context, chainDB kv.RoDB, cfg ethconfig.Snapshot return nil } -func snapshotBlocks(ctx context.Context, chainDB kv.RoDB, fromBlock, toBlock, blocksPerFile uint64, snapDir, tmpDir string) error { +func snapshotBlocks(ctx context.Context, db kv.RoDB, fromBlock, toBlock, blocksPerFile uint64, snapDir, tmpDir string) error { var last uint64 if toBlock > 0 { @@ -331,7 +325,7 @@ func snapshotBlocks(ctx context.Context, chainDB kv.RoDB, fromBlock, toBlock, bl return last, nil } - if err := chainDB.View(context.Background(), func(tx kv.Tx) (err error) { + if err := db.View(context.Background(), func(tx kv.Tx) (err error) { last, err = lastChunk(tx, blocksPerFile) return err }); err != nil { @@ -340,11 +334,8 @@ func snapshotBlocks(ctx context.Context, chainDB kv.RoDB, fromBlock, toBlock, bl } log.Info("Last body number", "last", last) - workers := runtime.GOMAXPROCS(-1) - 1 - if workers < 1 { - workers = 1 - } - if err := snapshotsync.DumpBlocks(ctx, fromBlock, last, blocksPerFile, tmpDir, snapDir, chainDB, workers, log.LvlInfo); err != nil { + workers := cmp.Max(1, runtime.GOMAXPROCS(-1)-1) + if err := snapshotsync.DumpBlocks(ctx, fromBlock, last, blocksPerFile, tmpDir, snapDir, db, workers, log.LvlInfo); err != nil { return fmt.Errorf("DumpBlocks: %w", err) } return nil diff --git a/turbo/snapshotsync/block_snapshots.go b/turbo/snapshotsync/block_snapshots.go index 1ba201f4cc0..c23c6da9068 100644 --- a/turbo/snapshotsync/block_snapshots.go +++ b/turbo/snapshotsync/block_snapshots.go @@ -32,6 +32,7 @@ import ( "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/eth/stagedsync/stages" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/snapshotsync/snap" @@ -963,6 +964,25 @@ func (br *BlockRetire) RetireBlocks(ctx context.Context, blockFrom, blockTo uint chainID, _ := uint256.FromBig(chainConfig.ChainID) return retireBlocks(ctx, blockFrom, blockTo, *chainID, br.tmpDir, br.snapshots, br.db, br.workers, br.downloader, lvl, br.notifier) } + +func (br *BlockRetire) PruneAncientBlocks(tx kv.RwTx) error { + if !br.snapshots.cfg.KeepBlocks { + return nil + } + currentProgress, err := stages.GetStageProgress(tx, stages.Senders) + if err != nil { + return err + } + canDeleteTo := CanDeleteTo(currentProgress, br.snapshots) + if _, _, err := rawdb.DeleteAncientBlocks(tx, canDeleteTo, 100); err != nil { + return nil + } + if err := rawdb.PruneTable(tx, kv.Senders, canDeleteTo, context.Background(), 100); err != nil { + return err + } + return nil +} + func (br *BlockRetire) RetireBlocksInBackground(ctx context.Context, forwardProgress uint64, lvl log.Lvl) { if br.working.Load() { // go-routine is still working From d2389a1f26915e47deab2bde686f721406b89600 Mon Sep 17 00:00:00 2001 From: Andrew Ashikhmin <34320705+yperbasis@users.noreply.github.com> Date: Sat, 23 Jul 2022 18:02:31 +0200 Subject: [PATCH 139/152] Sepolia MergeNetsplit block (#4804) --- core/forkid/forkid_test.go | 10 ++++++++++ params/chainspecs/sepolia.json | 2 +- params/config.go | 4 +++- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index f29b109985c..c2c97de8239 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -135,6 +135,16 @@ func TestCreation(t *testing.T) { {6000000, ID{Hash: checksumToBytes(0xB8C6299D), Next: 0}}, // Future London block }, }, + // Sepolia test cases + { + params.SepoliaChainConfig, + params.SepoliaGenesisHash, + []testcase{ + {0, ID{Hash: checksumToBytes(0xfe3366e7), Next: 1735371}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople, Petersburg, Istanbul, Berlin and first London block + {1735370, ID{Hash: checksumToBytes(0xfe3366e7), Next: 1735371}}, // Last pre-MergeNetsplit block + {1735371, ID{Hash: checksumToBytes(0xb96cbd13), Next: 0}}, // First MergeNetsplit block + }, + }, } for i, tt := range tests { for j, ttt := range tt.cases { diff --git a/params/chainspecs/sepolia.json b/params/chainspecs/sepolia.json index ec4f7538390..628a7440ded 100644 --- a/params/chainspecs/sepolia.json +++ b/params/chainspecs/sepolia.json @@ -15,6 +15,6 @@ "berlinBlock": 0, "londonBlock": 0, "terminalTotalDifficulty": 17000000000000000, - "terminalBlockHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "mergeNetsplitBlock": 1735371, "ethash": {} } diff --git a/params/config.go b/params/config.go index faf6fa6fa61..200af283c7f 100644 --- a/params/config.go +++ b/params/config.go @@ -405,7 +405,7 @@ func (c *ChainConfig) String() string { ) } - return fmt.Sprintf("{ChainID: %v, Homestead: %v, DAO: %v, DAO Support: %v, Tangerine Whistle: %v, Spurious Dragon: %v, Byzantium: %v, Constantinople: %v, Petersburg: %v, Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Arrow Glacier: %v, Gray Glacier: %v, Terminal Total Difficulty: %v, Engine: %v}", + return fmt.Sprintf("{ChainID: %v, Homestead: %v, DAO: %v, DAO Support: %v, Tangerine Whistle: %v, Spurious Dragon: %v, Byzantium: %v, Constantinople: %v, Petersburg: %v, Istanbul: %v, Muir Glacier: %v, Berlin: %v, London: %v, Arrow Glacier: %v, Gray Glacier: %v, Terminal Total Difficulty: %v, Merge Netsplit: %v, Engine: %v}", c.ChainID, c.HomesteadBlock, c.DAOForkBlock, @@ -422,6 +422,7 @@ func (c *ChainConfig) String() string { c.ArrowGlacierBlock, c.GrayGlacierBlock, c.TerminalTotalDifficulty, + c.MergeNetsplitBlock, engine, ) } @@ -615,6 +616,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error { {name: "londonBlock", block: c.LondonBlock}, {name: "arrowGlacierBlock", block: c.ArrowGlacierBlock, optional: true}, {name: "grayGlacierBlock", block: c.GrayGlacierBlock, optional: true}, + {name: "mergeNetsplitBlock", block: c.MergeNetsplitBlock, optional: true}, } { if lastFork.name != "" { // Next one must be higher number From 1cb6be02a546b72907d35c7b3a3435f3bc3b09dc Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Sat, 23 Jul 2022 18:57:23 +0200 Subject: [PATCH 140/152] Avoid constantly triggering stageloop when using Engine API (#4797) * avoid constantly triggering stageloop when using Engine API * fix lint + test * fixed comments * ops * little fixes here and there Co-authored-by: giuliorebuffo --- cmd/rpcdaemon/commands/eth_subscribe_test.go | 2 +- cmd/rpcdaemon/rpcdaemontest/test_util.go | 2 +- cmd/rpcdaemon22/rpcdaemontest/test_util.go | 2 +- eth/backend.go | 3 +- eth/stagedsync/stage_headers.go | 108 ++-------- ethdb/privateapi/engine_test.go | 33 ++-- ethdb/privateapi/ethbackend.go | 187 +++++++++++++----- turbo/engineapi/request_list.go | 11 ++ turbo/stages/headerdownload/header_algos.go | 5 +- .../headerdownload/header_data_struct.go | 23 ++- turbo/stages/mock_sentry.go | 2 +- turbo/stages/sentry_mock_test.go | 9 +- turbo/stages/stageloop.go | 7 +- 13 files changed, 208 insertions(+), 186 deletions(-) diff --git a/cmd/rpcdaemon/commands/eth_subscribe_test.go b/cmd/rpcdaemon/commands/eth_subscribe_test.go index b7c8531e3d4..24f38220c45 100644 --- a/cmd/rpcdaemon/commands/eth_subscribe_test.go +++ b/cmd/rpcdaemon/commands/eth_subscribe_test.go @@ -40,7 +40,7 @@ func TestEthSubscribe(t *testing.T) { m.ReceiveWg.Wait() // Wait for all messages to be processed before we proceeed ctx := context.Background() - backendServer := privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, snapshotsync.NewBlockReader(), nil, nil, nil, nil, false) + backendServer := privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, snapshotsync.NewBlockReader(), nil, nil, nil, false) backendClient := direct.NewEthBackendClientDirect(backendServer) backend := rpcservices.NewRemoteBackend(backendClient, m.DB, snapshotsync.NewBlockReader()) ff := rpchelper.New(ctx, backend, nil, nil, func() {}) diff --git a/cmd/rpcdaemon/rpcdaemontest/test_util.go b/cmd/rpcdaemon/rpcdaemontest/test_util.go index ee53c28a891..f54a8429b29 100644 --- a/cmd/rpcdaemon/rpcdaemontest/test_util.go +++ b/cmd/rpcdaemon/rpcdaemontest/test_util.go @@ -292,7 +292,7 @@ func CreateTestGrpcConn(t *testing.T, m *stages.MockSentry) (context.Context, *g ethashApi := apis[1].Service.(*ethash.API) server := grpc.NewServer() - remote.RegisterETHBACKENDServer(server, privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, snapshotsync.NewBlockReader(), nil, nil, nil, nil, false)) + remote.RegisterETHBACKENDServer(server, privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, snapshotsync.NewBlockReader(), nil, nil, nil, false)) txpool.RegisterTxpoolServer(server, m.TxPoolGrpcServer) txpool.RegisterMiningServer(server, privateapi.NewMiningServer(ctx, &IsMiningMock{}, ethashApi)) starknet.RegisterCAIROVMServer(server, &starknet.UnimplementedCAIROVMServer{}) diff --git a/cmd/rpcdaemon22/rpcdaemontest/test_util.go b/cmd/rpcdaemon22/rpcdaemontest/test_util.go index ad73c3faad4..a2d6c1eb143 100644 --- a/cmd/rpcdaemon22/rpcdaemontest/test_util.go +++ b/cmd/rpcdaemon22/rpcdaemontest/test_util.go @@ -293,7 +293,7 @@ func CreateTestGrpcConn(t *testing.T, m *stages.MockSentry) (context.Context, *g ethashApi := apis[1].Service.(*ethash.API) server := grpc.NewServer() - remote.RegisterETHBACKENDServer(server, privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, snapshotsync.NewBlockReader(), nil, nil, nil, nil, false)) + remote.RegisterETHBACKENDServer(server, privateapi.NewEthBackendServer(ctx, nil, m.DB, m.Notifications.Events, snapshotsync.NewBlockReader(), nil, nil, nil, false)) txpool.RegisterTxpoolServer(server, m.TxPoolGrpcServer) txpool.RegisterMiningServer(server, privateapi.NewMiningServer(ctx, &IsMiningMock{}, ethashApi)) starknet.RegisterCAIROVMServer(server, &starknet.UnimplementedCAIROVMServer{}) diff --git a/eth/backend.go b/eth/backend.go index 293f3989309..89cd9332846 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -411,8 +411,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere // Initialize ethbackend ethBackendRPC := privateapi.NewEthBackendServer(ctx, backend, backend.chainDB, backend.notifications.Events, - blockReader, chainConfig, backend.sentriesClient.Hd.BeaconRequestList, backend.sentriesClient.Hd.PayloadStatusCh, - assembleBlockPOS, config.Miner.EnabledPOS) + blockReader, chainConfig, assembleBlockPOS, backend.sentriesClient.Hd, config.Miner.EnabledPOS) miningRPC = privateapi.NewMiningServer(ctx, backend, ethashApi) if stack.Config().PrivateApiAddr != "" { diff --git a/eth/stagedsync/stage_headers.go b/eth/stagedsync/stage_headers.go index e8a4b7f863b..8e96b5b9bf6 100644 --- a/eth/stagedsync/stage_headers.go +++ b/eth/stagedsync/stage_headers.go @@ -180,7 +180,7 @@ func HeadersPOS( cfg.hd.ClearPendingPayloadHash() cfg.hd.SetPendingPayloadStatus(nil) - var payloadStatus *privateapi.PayloadStatus + var payloadStatus *engineapi.PayloadStatus if forkChoiceInsteadOfNewPayload { payloadStatus, err = startHandlingForkChoice(forkChoiceMessage, requestStatus, requestId, s, u, ctx, tx, cfg, headerInserter) } else { @@ -190,7 +190,7 @@ func HeadersPOS( if err != nil { if requestStatus == engineapi.New { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} + cfg.hd.PayloadStatusCh <- engineapi.PayloadStatus{CriticalError: err} } return err } @@ -257,7 +257,7 @@ func startHandlingForkChoice( tx kv.RwTx, cfg HeadersCfg, headerInserter *headerdownload.HeaderInserter, -) (*privateapi.PayloadStatus, error) { +) (*engineapi.PayloadStatus, error) { if cfg.memoryOverlay { defer cfg.forkValidator.ClearWithUnwind(tx, cfg.notifications.Accumulator, cfg.notifications.StateChangesConsumer) } @@ -274,27 +274,17 @@ func startHandlingForkChoice( return nil, err } if canonical { - return &privateapi.PayloadStatus{ + return &engineapi.PayloadStatus{ Status: remote.EngineStatus_VALID, LatestValidHash: currentHeadHash, }, nil } else { - return &privateapi.PayloadStatus{ + return &engineapi.PayloadStatus{ CriticalError: &privateapi.InvalidForkchoiceStateErr, }, nil } } - bad, lastValidHash := cfg.hd.IsBadHeaderPoS(headerHash) - if bad { - log.Warn(fmt.Sprintf("[%s] Fork choice bad head block", s.LogPrefix()), "headerHash", headerHash) - cfg.hd.BeaconRequestList.Remove(requestId) - return &privateapi.PayloadStatus{ - Status: remote.EngineStatus_INVALID, - LatestValidHash: lastValidHash, - }, nil - } - // Header itself may already be in the snapshots, if CL starts off at much earlier state than Erigon header, err := cfg.blockReader.HeaderByHash(ctx, tx, headerHash) if err != nil { @@ -307,33 +297,12 @@ func startHandlingForkChoice( log.Info(fmt.Sprintf("[%s] Fork choice missing header with hash %x", s.LogPrefix(), headerHash)) cfg.hd.SetPoSDownloaderTip(headerHash) schedulePoSDownload(requestId, headerHash, 0 /* header height is unknown, setting to 0 */, s, cfg) - return &privateapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil + return &engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } cfg.hd.BeaconRequestList.Remove(requestId) headerNumber := header.Number.Uint64() - // If header is canonical, then no reorgs are required - canonicalHash, err := rawdb.ReadCanonicalHash(tx, headerNumber) - if err != nil { - log.Warn(fmt.Sprintf("[%s] Fork choice err (reading canonical hash of %d)", s.LogPrefix(), headerNumber), "err", err) - cfg.hd.BeaconRequestList.Remove(requestId) - return nil, err - } - - if headerHash == canonicalHash { - log.Info(fmt.Sprintf("[%s] Fork choice on previously known block", s.LogPrefix())) - cfg.hd.BeaconRequestList.Remove(requestId) - // Per the Engine API spec: - // Client software MAY skip an update of the forkchoice state and MUST NOT begin a payload build process - // if forkchoiceState.headBlockHash references an ancestor of the head of canonical chain. - // In the case of such an event, client software MUST return - // {payloadStatus: {status: VALID, latestValidHash: forkchoiceState.headBlockHash, validationError: null}, payloadId: null}. - return &privateapi.PayloadStatus{ - Status: remote.EngineStatus_VALID, - LatestValidHash: headerHash, - }, nil - } if cfg.memoryOverlay && headerHash == cfg.forkValidator.ExtendingForkHeadHash() { log.Info("Flushing in-memory state") @@ -350,7 +319,7 @@ func startHandlingForkChoice( cfg.hd.SetPendingPayloadHash(headerHash) return nil, nil } else { - return &privateapi.PayloadStatus{ + return &engineapi.PayloadStatus{ CriticalError: &privateapi.InvalidForkchoiceStateErr, }, nil } @@ -369,7 +338,7 @@ func startHandlingForkChoice( // TODO(yperbasis): what if some bodies are missing and we have to download them? cfg.hd.SetPendingPayloadHash(headerHash) } else { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{Status: remote.EngineStatus_SYNCING} + cfg.hd.PayloadStatusCh <- engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING} } } @@ -418,7 +387,7 @@ func finishHandlingForkChoice( if !canonical { if cfg.hd.GetPendingPayloadHash() != (common.Hash{}) { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{ + cfg.hd.PayloadStatusCh <- engineapi.PayloadStatus{ CriticalError: &privateapi.InvalidForkchoiceStateErr, } } @@ -438,7 +407,7 @@ func handleNewPayload( tx kv.RwTx, cfg HeadersCfg, headerInserter *headerdownload.HeaderInserter, -) (*privateapi.PayloadStatus, error) { +) (*engineapi.PayloadStatus, error) { header := block.Header() headerNumber := header.Number.Uint64() headerHash := block.Hash() @@ -446,40 +415,6 @@ func handleNewPayload( log.Debug(fmt.Sprintf("[%s] Handling new payload", s.LogPrefix()), "height", headerNumber, "hash", headerHash) cfg.hd.UpdateTopSeenHeightPoS(headerNumber) - existingCanonicalHash, err := rawdb.ReadCanonicalHash(tx, headerNumber) - if err != nil { - log.Warn(fmt.Sprintf("[%s] New payload err", s.LogPrefix()), "err", err) - cfg.hd.BeaconRequestList.Remove(requestId) - return nil, err - } - - if existingCanonicalHash != (common.Hash{}) && headerHash == existingCanonicalHash { - log.Info(fmt.Sprintf("[%s] New payload: previously received valid header %d", s.LogPrefix(), headerNumber)) - cfg.hd.BeaconRequestList.Remove(requestId) - return &privateapi.PayloadStatus{ - Status: remote.EngineStatus_VALID, - LatestValidHash: headerHash, - }, nil - } - - bad, lastValidHash := cfg.hd.IsBadHeaderPoS(headerHash) - if bad { - log.Warn(fmt.Sprintf("[%s] Previously known bad block", s.LogPrefix()), "height", headerNumber, "hash", headerHash) - } else { - bad, lastValidHash = cfg.hd.IsBadHeaderPoS(header.ParentHash) - if bad { - log.Warn(fmt.Sprintf("[%s] Previously known bad parent", s.LogPrefix()), "height", headerNumber, "hash", headerHash, "parentHash", header.ParentHash) - } - } - if bad { - cfg.hd.BeaconRequestList.Remove(requestId) - cfg.hd.ReportBadHeaderPoS(headerHash, lastValidHash) - return &privateapi.PayloadStatus{ - Status: remote.EngineStatus_INVALID, - LatestValidHash: lastValidHash, - }, nil - } - parent, err := cfg.blockReader.HeaderByHash(ctx, tx, header.ParentHash) if err != nil { return nil, err @@ -488,18 +423,7 @@ func handleNewPayload( log.Info(fmt.Sprintf("[%s] New payload missing parent", s.LogPrefix())) cfg.hd.SetPoSDownloaderTip(headerHash) schedulePoSDownload(requestId, header.ParentHash, headerNumber-1, s, cfg) - return &privateapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil - } - - if headerNumber != parent.Number.Uint64()+1 { - log.Warn(fmt.Sprintf("[%s] Invalid block number", s.LogPrefix()), "headerNumber", headerNumber, "parentNumber", parent.Number.Uint64()) - cfg.hd.BeaconRequestList.Remove(requestId) - cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) - return &privateapi.PayloadStatus{ - Status: remote.EngineStatus_INVALID, - LatestValidHash: header.ParentHash, - ValidationError: errors.New("invalid block number"), - }, nil + return &engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } cfg.hd.BeaconRequestList.Remove(requestId) @@ -526,7 +450,7 @@ func verifyAndSaveNewPoSHeader( cfg HeadersCfg, block *types.Block, headerInserter *headerdownload.HeaderInserter, -) (response *privateapi.PayloadStatus, success bool, err error) { +) (response *engineapi.PayloadStatus, success bool, err error) { header := block.Header() headerNumber := header.Number.Uint64() headerHash := block.Hash() @@ -534,7 +458,7 @@ func verifyAndSaveNewPoSHeader( if verificationErr := cfg.hd.VerifyHeader(header); verificationErr != nil { log.Warn("Verification failed for header", "hash", headerHash, "height", headerNumber, "err", verificationErr) cfg.hd.ReportBadHeaderPoS(headerHash, header.ParentHash) - return &privateapi.PayloadStatus{ + return &engineapi.PayloadStatus{ Status: remote.EngineStatus_INVALID, LatestValidHash: header.ParentHash, ValidationError: verificationErr, @@ -565,7 +489,7 @@ func verifyAndSaveNewPoSHeader( } else if err := headerInserter.FeedHeaderPoS(tx, header, headerHash); err != nil { return nil, false, err } - return &privateapi.PayloadStatus{ + return &engineapi.PayloadStatus{ Status: status, LatestValidHash: latestValidHash, ValidationError: validationError, @@ -578,7 +502,7 @@ func verifyAndSaveNewPoSHeader( if !canExtendCanonical { log.Info("Side chain", "parentHash", header.ParentHash, "currentHead", currentHeadHash) - return &privateapi.PayloadStatus{Status: remote.EngineStatus_ACCEPTED}, true, nil + return &engineapi.PayloadStatus{Status: remote.EngineStatus_ACCEPTED}, true, nil } // OK, we're on the canonical chain @@ -725,7 +649,7 @@ func forkingPoint( func handleInterrupt(interrupt engineapi.Interrupt, cfg HeadersCfg, tx kv.RwTx, headerInserter *headerdownload.HeaderInserter, useExternalTx bool) (bool, error) { if interrupt != engineapi.None { if interrupt == engineapi.Stopping { - cfg.hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: errors.New("server is stopping")} + cfg.hd.PayloadStatusCh <- engineapi.PayloadStatus{CriticalError: errors.New("server is stopping")} } if interrupt == engineapi.Synced { verifyAndSaveDownloadedPoSHeaders(tx, cfg, headerInserter) diff --git a/ethdb/privateapi/engine_test.go b/ethdb/privateapi/engine_test.go index 42903819ad9..74b0eab1efa 100644 --- a/ethdb/privateapi/engine_test.go +++ b/ethdb/privateapi/engine_test.go @@ -13,6 +13,7 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/turbo/engineapi" + "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/stretchr/testify/require" ) @@ -89,11 +90,10 @@ func TestMockDownloadRequest(t *testing.T) { require := require.New(t) makeTestDb(ctx, db) - beaconRequestList := engineapi.NewRequestList() - statusCh := make(chan PayloadStatus) + hd := headerdownload.NewHeaderDownload(0, 0, nil, nil) events := NewEvents() - backend := NewEthBackendServer(ctx, nil, db, events, nil, ¶ms.ChainConfig{TerminalTotalDifficulty: common.Big1}, beaconRequestList, statusCh, nil, false) + backend := NewEthBackendServer(ctx, nil, db, events, nil, ¶ms.ChainConfig{TerminalTotalDifficulty: common.Big1}, nil, hd, false) var err error var reply *remote.EnginePayloadStatus @@ -104,8 +104,8 @@ func TestMockDownloadRequest(t *testing.T) { done <- true }() - beaconRequestList.WaitForRequest(true) - statusCh <- PayloadStatus{Status: remote.EngineStatus_SYNCING} + hd.BeaconRequestList.WaitForRequest(true) + hd.PayloadStatusCh <- engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING} <-done require.NoError(err) require.Equal(reply.Status, remote.EngineStatus_SYNCING) @@ -148,11 +148,10 @@ func TestMockValidExecution(t *testing.T) { makeTestDb(ctx, db) - beaconRequestList := engineapi.NewRequestList() - statusCh := make(chan PayloadStatus) + hd := headerdownload.NewHeaderDownload(0, 0, nil, nil) events := NewEvents() - backend := NewEthBackendServer(ctx, nil, db, events, nil, ¶ms.ChainConfig{TerminalTotalDifficulty: common.Big1}, beaconRequestList, statusCh, nil, false) + backend := NewEthBackendServer(ctx, nil, db, events, nil, ¶ms.ChainConfig{TerminalTotalDifficulty: common.Big1}, nil, hd, false) var err error var reply *remote.EnginePayloadStatus @@ -163,9 +162,9 @@ func TestMockValidExecution(t *testing.T) { done <- true }() - beaconRequestList.WaitForRequest(true) + hd.BeaconRequestList.WaitForRequest(true) - statusCh <- PayloadStatus{ + hd.PayloadStatusCh <- engineapi.PayloadStatus{ Status: remote.EngineStatus_VALID, LatestValidHash: payload3Hash, } @@ -184,11 +183,10 @@ func TestMockInvalidExecution(t *testing.T) { makeTestDb(ctx, db) - beaconRequestList := engineapi.NewRequestList() - statusCh := make(chan PayloadStatus) + hd := headerdownload.NewHeaderDownload(0, 0, nil, nil) events := NewEvents() - backend := NewEthBackendServer(ctx, nil, db, events, nil, ¶ms.ChainConfig{TerminalTotalDifficulty: common.Big1}, beaconRequestList, statusCh, nil, false) + backend := NewEthBackendServer(ctx, nil, db, events, nil, ¶ms.ChainConfig{TerminalTotalDifficulty: common.Big1}, nil, hd, false) var err error var reply *remote.EnginePayloadStatus @@ -199,9 +197,9 @@ func TestMockInvalidExecution(t *testing.T) { done <- true }() - beaconRequestList.WaitForRequest(true) + hd.BeaconRequestList.WaitForRequest(true) // Simulate invalid status - statusCh <- PayloadStatus{ + hd.PayloadStatusCh <- engineapi.PayloadStatus{ Status: remote.EngineStatus_INVALID, LatestValidHash: startingHeadHash, } @@ -220,11 +218,10 @@ func TestNoTTD(t *testing.T) { makeTestDb(ctx, db) - beaconRequestList := engineapi.NewRequestList() - statusCh := make(chan PayloadStatus) + hd := headerdownload.NewHeaderDownload(0, 0, nil, nil) events := NewEvents() - backend := NewEthBackendServer(ctx, nil, db, events, nil, ¶ms.ChainConfig{}, beaconRequestList, statusCh, nil, false) + backend := NewEthBackendServer(ctx, nil, db, events, nil, ¶ms.ChainConfig{}, nil, hd, false) var err error diff --git a/ethdb/privateapi/ethbackend.go b/ethdb/privateapi/ethbackend.go index f4abc963acf..6e1c22b2dfd 100644 --- a/ethdb/privateapi/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -24,6 +24,7 @@ import ( "github.com/ledgerwatch/erigon/turbo/builder" "github.com/ledgerwatch/erigon/turbo/engineapi" "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/stages/headerdownload" "github.com/ledgerwatch/log/v3" "golang.org/x/exp/slices" "google.golang.org/protobuf/types/known/emptypb" @@ -55,14 +56,12 @@ type EthBackendServer struct { // Block proposing for proof-of-stake payloadId uint64 builders map[uint64]*builder.BlockBuilder - // Send Beacon Chain requests to staged sync - requestList *engineapi.RequestList - // Replies to newPayload & forkchoice requests - statusCh <-chan PayloadStatus + builderFunc builder.BlockBuilderFunc proposing bool lock sync.Mutex // Engine API is asynchronous, we want to avoid CL to call different APIs at the same time logsFilter *LogsFilterAggregator + hd *headerdownload.HeaderDownload } type EthBackend interface { @@ -73,23 +72,12 @@ type EthBackend interface { Peers(ctx context.Context) (*remote.PeersReply, error) } -// This is the status of a newly execute block. -// Hash: Block hash -// Status: block's status -type PayloadStatus struct { - Status remote.EngineStatus - LatestValidHash common.Hash - ValidationError error - CriticalError error -} - func NewEthBackendServer(ctx context.Context, eth EthBackend, db kv.RwDB, events *Events, blockReader services.BlockAndTxnReader, - config *params.ChainConfig, requestList *engineapi.RequestList, statusCh <-chan PayloadStatus, - builderFunc builder.BlockBuilderFunc, proposing bool, + config *params.ChainConfig, builderFunc builder.BlockBuilderFunc, hd *headerdownload.HeaderDownload, proposing bool, ) *EthBackendServer { s := &EthBackendServer{ctx: ctx, eth: eth, events: events, db: db, blockReader: blockReader, config: config, - requestList: requestList, statusCh: statusCh, builders: make(map[uint64]*builder.BlockBuilder), - builderFunc: builderFunc, proposing: proposing, logsFilter: NewLogsFilterAggregator(events), + builders: make(map[uint64]*builder.BlockBuilder), + builderFunc: builderFunc, proposing: proposing, logsFilter: NewLogsFilterAggregator(events), hd: hd, } ch, clean := s.events.AddLogsSubscription() @@ -244,7 +232,7 @@ func (s *EthBackendServer) Block(ctx context.Context, req *remote.BlockRequest) return &remote.BlockReply{BlockRlp: blockRlp, Senders: sendersBytes}, nil } -func convertPayloadStatus(payloadStatus *PayloadStatus) *remote.EnginePayloadStatus { +func convertPayloadStatus(payloadStatus *engineapi.PayloadStatus) *remote.EnginePayloadStatus { reply := remote.EnginePayloadStatus{Status: payloadStatus.Status} if payloadStatus.LatestValidHash != (common.Hash{}) { reply.LatestValidHash = gointerfaces.ConvertHashToH256(payloadStatus.LatestValidHash) @@ -257,7 +245,7 @@ func convertPayloadStatus(payloadStatus *PayloadStatus) *remote.EnginePayloadSta func (s *EthBackendServer) stageLoopIsBusy() bool { for i := 0; i < 20; i++ { - if !s.requestList.IsWaiting() { + if !s.hd.BeaconRequestList.IsWaiting() { // This might happen, for example, in the following scenario: // 1) CL sends NewPayload and immediately after that ForkChoiceUpdated. // 2) We happily process NewPayload and stage loop is at the end. @@ -269,7 +257,7 @@ func (s *EthBackendServer) stageLoopIsBusy() bool { time.Sleep(5 * time.Millisecond) } } - return !s.requestList.IsWaiting() + return !s.hd.BeaconRequestList.IsWaiting() } // EngineNewPayloadV1 validates and possibly executes payload @@ -344,12 +332,21 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E if err != nil { return nil, err } + + tx.Rollback() + if parentTd != nil && parentTd.Cmp(s.config.TerminalTotalDifficulty) < 0 { log.Warn("[NewPayload] TTD not reached yet", "height", header.Number, "hash", common.Hash(blockHash)) return &remote.EnginePayloadStatus{Status: remote.EngineStatus_INVALID, LatestValidHash: gointerfaces.ConvertHashToH256(common.Hash{})}, nil } - tx.Rollback() + possibleStatus, err := s.getPayloadStatusFromHashIfPossible(blockHash, req.BlockNumber, header.ParentHash, true) + if err != nil { + return nil, err + } + if possibleStatus != nil { + return convertPayloadStatus(possibleStatus), nil + } // If another payload is already commissioned then we just reply with syncing if s.stageLoopIsBusy() { // We are still syncing a commissioned payload @@ -360,17 +357,13 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E log.Debug("[NewPayload] stage loop is busy") return &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, nil } - - // Lock the thread (We modify shared resources). - log.Debug("[NewPayload] acquiring lock") s.lock.Lock() defer s.lock.Unlock() - log.Debug("[NewPayload] lock acquired") log.Debug("[NewPayload] sending block", "height", header.Number, "hash", common.Hash(blockHash)) - s.requestList.AddPayloadRequest(block) + s.hd.BeaconRequestList.AddPayloadRequest(block) - payloadStatus := <-s.statusCh + payloadStatus := <-s.hd.PayloadStatusCh log.Debug("[NewPayload] got reply", "payloadStatus", payloadStatus) if payloadStatus.CriticalError != nil { @@ -380,6 +373,100 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E return convertPayloadStatus(&payloadStatus), nil } +// Check if we can make out a status from the payload hash/head hash. +func (s *EthBackendServer) getPayloadStatusFromHashIfPossible(blockHash common.Hash, blockNumber uint64, parentHash common.Hash, newPayload bool) (*engineapi.PayloadStatus, error) { + if s.hd == nil { + return nil, nil + } + var prefix string + if newPayload { + prefix = "NewPayload" + } else { + prefix = "ForkChoiceUpdated" + } + tx, err := s.db.BeginRo(s.ctx) + if err != nil { + return nil, err + } + defer tx.Rollback() + + header, err := rawdb.ReadHeaderByHash(tx, blockHash) + if err != nil { + return nil, err + } + var parent *types.Header + if newPayload { + parent, err = rawdb.ReadHeaderByHash(tx, parentHash) + } + if err != nil { + return nil, err + } + + var canonicalHash common.Hash + if header != nil { + canonicalHash, err = rawdb.ReadCanonicalHash(tx, header.Number.Uint64()) + } + if err != nil { + return nil, err + } + + if newPayload && parent != nil && blockNumber != parent.Number.Uint64()+1 { + log.Warn(fmt.Sprintf("[%s] Invalid block number", prefix), "headerNumber", blockNumber, "parentNumber", parent.Number.Uint64()) + s.hd.ReportBadHeaderPoS(blockHash, parent.Hash()) + return &engineapi.PayloadStatus{ + Status: remote.EngineStatus_INVALID, + LatestValidHash: parent.Hash(), + ValidationError: errors.New("invalid block number"), + }, nil + } + // Check if we already determined if the hash is attributed to a previously received invalid header. + bad, lastValidHash := s.hd.IsBadHeaderPoS(blockHash) + if bad { + log.Warn(fmt.Sprintf("[%s] Previously known bad block", prefix), "hash", blockHash) + } else if newPayload { + bad, lastValidHash = s.hd.IsBadHeaderPoS(parentHash) + if bad { + log.Warn(fmt.Sprintf("[%s] Previously known bad block", prefix), "hash", blockHash, "parentHash", parentHash) + } + } + if bad { + s.hd.ReportBadHeaderPoS(blockHash, lastValidHash) + return &engineapi.PayloadStatus{Status: remote.EngineStatus_INVALID, LatestValidHash: lastValidHash}, nil + } + + // If header is already validated or has a missing parent, you can either return VALID or SYNCING. + if newPayload { + if header != nil && canonicalHash == blockHash { + return &engineapi.PayloadStatus{Status: remote.EngineStatus_VALID, LatestValidHash: blockHash}, nil + } + + if parent == nil && s.hd.PosStatus() == headerdownload.Syncing { + return &engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil + } + + return nil, nil + } + + if header == nil { + if s.hd.PosStatus() == headerdownload.Syncing { + return &engineapi.PayloadStatus{Status: remote.EngineStatus_SYNCING}, nil + + } + return nil, nil + } + + headHash := rawdb.ReadHeadBlockHash(tx) + if err != nil { + return nil, err + } + + if blockHash != headHash && canonicalHash == blockHash { + return &engineapi.PayloadStatus{Status: remote.EngineStatus_VALID, LatestValidHash: blockHash}, nil + } + + return nil, nil +} + // EngineGetPayloadV1 retrieves previously assembled payload (Validators only) func (s *EthBackendServer) EngineGetPayloadV1(ctx context.Context, req *remote.EngineGetPayloadRequest) (*types2.ExecutionPayload, error) { if !s.proposing { @@ -451,6 +538,7 @@ func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *r return nil, err } defer tx1.Rollback() + td, err := rawdb.ReadTdByHash(tx1, forkChoice.HeadBlockHash) tx1.Rollback() if err != nil { @@ -463,31 +551,38 @@ func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *r }, nil } - if s.stageLoopIsBusy() { - log.Debug("[ForkChoiceUpdated] stage loop is busy") - return &remote.EngineForkChoiceUpdatedReply{ - PayloadStatus: &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, - }, nil + status, err := s.getPayloadStatusFromHashIfPossible(forkChoice.HeadBlockHash, 0, common.Hash{}, false) + if err != nil { + return nil, err } + if status == nil { + if s.stageLoopIsBusy() { + log.Debug("[ForkChoiceUpdated] stage loop is busy") + return &remote.EngineForkChoiceUpdatedReply{ + PayloadStatus: &remote.EnginePayloadStatus{Status: remote.EngineStatus_SYNCING}, + }, nil + } + s.lock.Lock() + defer s.lock.Unlock() - log.Debug("[ForkChoiceUpdated] acquiring lock") - s.lock.Lock() - defer s.lock.Unlock() - log.Debug("[ForkChoiceUpdated] lock acquired") - - log.Debug("[ForkChoiceUpdated] sending forkChoiceMessage", "head", forkChoice.HeadBlockHash) - s.requestList.AddForkChoiceRequest(&forkChoice) + log.Debug("[ForkChoiceUpdated] sending forkChoiceMessage", "head", forkChoice.HeadBlockHash) + s.hd.BeaconRequestList.AddForkChoiceRequest(&forkChoice) - status := <-s.statusCh - log.Debug("[ForkChoiceUpdated] got reply", "payloadStatus", status) + statusRef := <-s.hd.PayloadStatusCh + status = &statusRef + log.Debug("[ForkChoiceUpdated] got reply", "payloadStatus", status) - if status.CriticalError != nil { - return nil, status.CriticalError + if status.CriticalError != nil { + return nil, status.CriticalError + } + } else { + s.lock.Lock() + defer s.lock.Unlock() } // No need for payload building if req.PayloadAttributes == nil || status.Status != remote.EngineStatus_VALID { - return &remote.EngineForkChoiceUpdatedReply{PayloadStatus: convertPayloadStatus(&status)}, nil + return &remote.EngineForkChoiceUpdatedReply{PayloadStatus: convertPayloadStatus(status)}, nil } if !s.proposing { @@ -514,7 +609,7 @@ func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *r log.Warn("Skipping payload building because forkchoiceState.headBlockHash is not the head of the canonical chain", "forkChoice.HeadBlockHash", forkChoice.HeadBlockHash, "headHeader.Hash", headHeader.Hash()) - return &remote.EngineForkChoiceUpdatedReply{PayloadStatus: convertPayloadStatus(&status)}, nil + return &remote.EngineForkChoiceUpdatedReply{PayloadStatus: convertPayloadStatus(status)}, nil } if headHeader.Time >= req.PayloadAttributes.Timestamp { diff --git a/turbo/engineapi/request_list.go b/turbo/engineapi/request_list.go index 11a2bc0ba13..455a38825c0 100644 --- a/turbo/engineapi/request_list.go +++ b/turbo/engineapi/request_list.go @@ -6,10 +6,21 @@ import ( "github.com/emirpasic/gods/maps/treemap" + "github.com/ledgerwatch/erigon-lib/gointerfaces/remote" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/core/types" ) +// This is the status of a newly execute block. +// Hash: Block hash +// Status: block's status +type PayloadStatus struct { + Status remote.EngineStatus + LatestValidHash common.Hash + ValidationError error + CriticalError error +} + // The message we are going to send to the stage sync in ForkchoiceUpdated type ForkChoiceMessage struct { HeadBlockHash common.Hash diff --git a/turbo/stages/headerdownload/header_algos.go b/turbo/stages/headerdownload/header_algos.go index 1f9d9f83808..53cea0779bc 100644 --- a/turbo/stages/headerdownload/header_algos.go +++ b/turbo/stages/headerdownload/header_algos.go @@ -26,7 +26,6 @@ import ( "github.com/ledgerwatch/erigon/core/rawdb" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/engineapi" @@ -1134,13 +1133,13 @@ func (hd *HeaderDownload) ClearPendingPayloadHash() { hd.pendingPayloadHash = common.Hash{} } -func (hd *HeaderDownload) GetPendingPayloadStatus() *privateapi.PayloadStatus { +func (hd *HeaderDownload) GetPendingPayloadStatus() *engineapi.PayloadStatus { hd.lock.RLock() defer hd.lock.RUnlock() return hd.pendingPayloadStatus } -func (hd *HeaderDownload) SetPendingPayloadStatus(response *privateapi.PayloadStatus) { +func (hd *HeaderDownload) SetPendingPayloadStatus(response *engineapi.PayloadStatus) { hd.lock.Lock() defer hd.lock.Unlock() hd.pendingPayloadStatus = response diff --git a/turbo/stages/headerdownload/header_data_struct.go b/turbo/stages/headerdownload/header_data_struct.go index 1a097fcf28b..a858f1554fa 100644 --- a/turbo/stages/headerdownload/header_data_struct.go +++ b/turbo/stages/headerdownload/header_data_struct.go @@ -12,7 +12,6 @@ import ( "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/consensus" "github.com/ledgerwatch/erigon/core/types" - "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/rlp" "github.com/ledgerwatch/erigon/turbo/engineapi" "github.com/ledgerwatch/erigon/turbo/services" @@ -305,16 +304,16 @@ type HeaderDownload struct { requestId int posAnchor *Anchor posStatus SyncStatus - posSync bool // Whether the chain is syncing in the PoS mode - headersCollector *etl.Collector // ETL collector for headers - BeaconRequestList *engineapi.RequestList // Requests from ethbackend to staged sync - PayloadStatusCh chan privateapi.PayloadStatus // Responses (validation/execution status) - pendingPayloadHash common.Hash // Header whose status we still should send to PayloadStatusCh - pendingPayloadStatus *privateapi.PayloadStatus // Alternatively, there can be an already prepared response to send to PayloadStatusCh - unsettledForkChoice *engineapi.ForkChoiceMessage // Forkchoice to process after unwind - unsettledHeadHeight uint64 // Height of unsettledForkChoice.headBlockHash - posDownloaderTip common.Hash // See https://hackmd.io/GDc0maGsQeKfP8o2C7L52w - badPoSHeaders map[common.Hash]common.Hash // Invalid Tip -> Last Valid Ancestor + posSync bool // Whether the chain is syncing in the PoS mode + headersCollector *etl.Collector // ETL collector for headers + BeaconRequestList *engineapi.RequestList // Requests from ethbackend to staged sync + PayloadStatusCh chan engineapi.PayloadStatus // Responses (validation/execution status) + pendingPayloadHash common.Hash // Header whose status we still should send to PayloadStatusCh + pendingPayloadStatus *engineapi.PayloadStatus // Alternatively, there can be an already prepared response to send to PayloadStatusCh + unsettledForkChoice *engineapi.ForkChoiceMessage // Forkchoice to process after unwind + unsettledHeadHeight uint64 // Height of unsettledForkChoice.headBlockHash + posDownloaderTip common.Hash // See https://hackmd.io/GDc0maGsQeKfP8o2C7L52w + badPoSHeaders map[common.Hash]common.Hash // Invalid Tip -> Last Valid Ancestor } // HeaderRecord encapsulates two forms of the same header - raw RLP encoding (to avoid duplicated decodings and encodings), and parsed value types.Header @@ -343,7 +342,7 @@ func NewHeaderDownload( DeliveryNotify: make(chan struct{}, 1), QuitPoWMining: make(chan struct{}), BeaconRequestList: engineapi.NewRequestList(), - PayloadStatusCh: make(chan privateapi.PayloadStatus, 1), + PayloadStatusCh: make(chan engineapi.PayloadStatus, 1), headerReader: headerReader, badPoSHeaders: make(map[common.Hash]common.Hash), } diff --git a/turbo/stages/mock_sentry.go b/turbo/stages/mock_sentry.go index 774e98fab39..134bdc48ff2 100644 --- a/turbo/stages/mock_sentry.go +++ b/turbo/stages/mock_sentry.go @@ -530,7 +530,7 @@ func (ms *MockSentry) SendForkChoiceRequest(message *engineapi.ForkChoiceMessage ms.sentriesClient.Hd.BeaconRequestList.AddForkChoiceRequest(message) } -func (ms *MockSentry) ReceivePayloadStatus() privateapi.PayloadStatus { +func (ms *MockSentry) ReceivePayloadStatus() engineapi.PayloadStatus { return <-ms.sentriesClient.Hd.PayloadStatusCh } diff --git a/turbo/stages/sentry_mock_test.go b/turbo/stages/sentry_mock_test.go index bd8e552c1be..6e72276dea8 100644 --- a/turbo/stages/sentry_mock_test.go +++ b/turbo/stages/sentry_mock_test.go @@ -669,11 +669,10 @@ func TestPoSSyncWithInvalidHeader(t *testing.T) { FinalizedBlockHash: invalidTip.Hash(), } m.SendForkChoiceRequest(&forkChoiceMessage) - headBlockHash, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, false, m.UpdateHead, nil) + _, err = stages.StageLoopStep(m.Ctx, m.DB, m.Sync, 0, m.Notifications, false, m.UpdateHead, nil) require.NoError(t, err) - stages.SendPayloadStatus(m.HeaderDownload(), headBlockHash, err) - payloadStatus2 := m.ReceivePayloadStatus() - require.Equal(t, remote.EngineStatus_INVALID, payloadStatus2.Status) - assert.Equal(t, lastValidHeader.Hash(), payloadStatus2.LatestValidHash) + bad, lastValidHash := m.HeaderDownload().IsBadHeaderPoS(invalidTip.Hash()) + assert.True(t, bad) + assert.Equal(t, lastValidHash, lastValidHeader.Hash()) } diff --git a/turbo/stages/stageloop.go b/turbo/stages/stageloop.go index 5280dc8b7e3..e2552990af9 100644 --- a/turbo/stages/stageloop.go +++ b/turbo/stages/stageloop.go @@ -23,7 +23,6 @@ import ( "github.com/ledgerwatch/erigon/eth/ethconfig" "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/eth/stagedsync/stages" - "github.com/ledgerwatch/erigon/ethdb/privateapi" "github.com/ledgerwatch/erigon/p2p" "github.com/ledgerwatch/erigon/turbo/engineapi" "github.com/ledgerwatch/erigon/turbo/services" @@ -35,13 +34,13 @@ import ( func SendPayloadStatus(hd *headerdownload.HeaderDownload, headBlockHash common.Hash, err error) { if pendingPayloadStatus := hd.GetPendingPayloadStatus(); pendingPayloadStatus != nil { if err != nil { - hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} + hd.PayloadStatusCh <- engineapi.PayloadStatus{CriticalError: err} } else { hd.PayloadStatusCh <- *pendingPayloadStatus } } else if pendingPayloadHash := hd.GetPendingPayloadHash(); pendingPayloadHash != (common.Hash{}) { if err != nil { - hd.PayloadStatusCh <- privateapi.PayloadStatus{CriticalError: err} + hd.PayloadStatusCh <- engineapi.PayloadStatus{CriticalError: err} } else { var status remote.EngineStatus if headBlockHash == pendingPayloadHash { @@ -50,7 +49,7 @@ func SendPayloadStatus(hd *headerdownload.HeaderDownload, headBlockHash common.H log.Warn("Failed to execute pending payload", "pendingPayload", pendingPayloadHash, "headBlock", headBlockHash) status = remote.EngineStatus_INVALID } - hd.PayloadStatusCh <- privateapi.PayloadStatus{ + hd.PayloadStatusCh <- engineapi.PayloadStatus{ Status: status, LatestValidHash: headBlockHash, } From 81d106bc9df901cf9c5a2cbae6aa695437ecd367 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Sat, 23 Jul 2022 18:39:08 +0100 Subject: [PATCH 141/152] Experiment in parallel execution (#4652) * Restructure tx execution * fixes * Fixes and traces * Tracing * More tracing * Drain the result channel * Intermediate * more efficient parallel exec * Sorted buffer * Fix results size * fix for the recon * Fix compilation * Sort keys in Write and Read sets, fix compilation in rpcdaemon22 * Update to latest erigon-lib * Update to erigon-lib * Remove go.mod replace * Update erigon-lib * Update to erigon-lib main * Fix lint Co-authored-by: Alexey Sharp Co-authored-by: Alex Sharp --- cmd/rpcdaemon22/commands/eth_receipts.go | 12 +- cmd/rpcdaemon22/commands/trace_filtering.go | 7 +- cmd/state/commands/erigon22.go | 12 +- cmd/state/commands/history22.go | 2 +- cmd/state/commands/replay_tx.go | 195 +++++++++++ cmd/state/commands/state_recon.go | 61 ++-- cmd/state/commands/state_recon_1.go | 242 ++++++++++---- core/state/history_reader_22.go | 15 +- core/state/intra_block_state.go | 2 +- core/state/recon_state_1.go | 345 +++++++++++++------- go.mod | 2 +- go.sum | 4 +- 12 files changed, 656 insertions(+), 243 deletions(-) create mode 100644 cmd/state/commands/replay_tx.go diff --git a/cmd/rpcdaemon22/commands/eth_receipts.go b/cmd/rpcdaemon22/commands/eth_receipts.go index 1a72299a2f0..0dd4d4d6efa 100644 --- a/cmd/rpcdaemon22/commands/eth_receipts.go +++ b/cmd/rpcdaemon22/commands/eth_receipts.go @@ -128,7 +128,9 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ txNumbers := roaring64.New() txNumbers.AddRange(fromTxNum, toTxNum) // [min,max) - topicsBitmap, err := getTopicsBitmap(api._agg, tx, crit.Topics, fromTxNum, toTxNum) + ac := api._agg.MakeContext() + + topicsBitmap, err := getTopicsBitmap(ac, tx, crit.Topics, fromTxNum, toTxNum) if err != nil { return nil, err } @@ -139,7 +141,7 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ var addrBitmap *roaring64.Bitmap for _, addr := range crit.Addresses { var bitmapForORing roaring64.Bitmap - it := api._agg.LogAddrIterator(addr.Bytes(), fromTxNum, toTxNum, nil) + it := ac.LogAddrIterator(addr.Bytes(), fromTxNum, toTxNum, nil) for it.HasNext() { bitmapForORing.Add(it.Next()) } @@ -162,7 +164,7 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ var lastHeader *types.Header var lastSigner *types.Signer var lastRules *params.Rules - stateReader := state.NewHistoryReader22(api._agg, nil /* ReadIndices */) + stateReader := state.NewHistoryReader22(ac, nil /* ReadIndices */) iter := txNumbers.Iterator() for iter.HasNext() { txNum := iter.Next() @@ -233,12 +235,12 @@ func (api *APIImpl) GetLogs(ctx context.Context, crit filters.FilterCriteria) ([ // {{}, {B}} matches any topic in first position AND B in second position // {{A}, {B}} matches topic A in first position AND B in second position // {{A, B}, {C, D}} matches topic (A OR B) in first position AND (C OR D) in second position -func getTopicsBitmap(a *libstate.Aggregator, c kv.Tx, topics [][]common.Hash, from, to uint64) (*roaring64.Bitmap, error) { +func getTopicsBitmap(ac *libstate.AggregatorContext, c kv.Tx, topics [][]common.Hash, from, to uint64) (*roaring64.Bitmap, error) { var result *roaring64.Bitmap for _, sub := range topics { var bitmapForORing roaring64.Bitmap for _, topic := range sub { - it := a.LogTopicIterator(topic.Bytes(), from, to, nil) + it := ac.LogTopicIterator(topic.Bytes(), from, to, nil) for it.HasNext() { bitmapForORing.Add(it.Next()) } diff --git a/cmd/rpcdaemon22/commands/trace_filtering.go b/cmd/rpcdaemon22/commands/trace_filtering.go index 7dd35347407..fc456811666 100644 --- a/cmd/rpcdaemon22/commands/trace_filtering.go +++ b/cmd/rpcdaemon22/commands/trace_filtering.go @@ -253,10 +253,11 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str allTxs roaring64.Bitmap txsTo roaring64.Bitmap ) + ac := api._agg.MakeContext() for _, addr := range req.FromAddress { if addr != nil { - it := api._agg.TraceFromIterator(addr.Bytes(), fromTxNum, toTxNum, nil) + it := ac.TraceFromIterator(addr.Bytes(), fromTxNum, toTxNum, nil) for it.HasNext() { allTxs.Add(it.Next()) } @@ -266,7 +267,7 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str for _, addr := range req.ToAddress { if addr != nil { - it := api._agg.TraceToIterator(addr.Bytes(), fromTxNum, toTxNum, nil) + it := ac.TraceToIterator(addr.Bytes(), fromTxNum, toTxNum, nil) for it.HasNext() { txsTo.Add(it.Next()) } @@ -319,7 +320,7 @@ func (api *TraceAPIImpl) Filter(ctx context.Context, req TraceFilterRequest, str var lastHeader *types.Header var lastSigner *types.Signer var lastRules *params.Rules - stateReader := state.NewHistoryReader22(api._agg, nil /* ReadIndices */) + stateReader := state.NewHistoryReader22(ac, nil /* ReadIndices */) noop := state.NewNoopWriter() for it.HasNext() { txNum := uint64(it.Next()) diff --git a/cmd/state/commands/erigon22.go b/cmd/state/commands/erigon22.go index 0bcc8538c13..3e0d8202121 100644 --- a/cmd/state/commands/erigon22.go +++ b/cmd/state/commands/erigon22.go @@ -171,7 +171,7 @@ func Erigon22(genesis *core.Genesis, chainConfig *params.ChainConfig, logger log } return h } - readWrapper := &ReaderWrapper22{r: agg, roTx: rwTx} + readWrapper := &ReaderWrapper22{ac: agg.MakeContext(), roTx: rwTx} writeWrapper := &WriterWrapper22{w: agg} for !interrupt { @@ -396,7 +396,7 @@ func processBlock22(startTxNum uint64, trace bool, txNumStart uint64, rw *Reader // Implements StateReader and StateWriter type ReaderWrapper22 struct { roTx kv.Tx - r *libstate.Aggregator + ac *libstate.AggregatorContext blockNum uint64 } @@ -406,7 +406,7 @@ type WriterWrapper22 struct { } func (rw *ReaderWrapper22) ReadAccountData(address common.Address) (*accounts.Account, error) { - enc, err := rw.r.ReadAccountData(address.Bytes(), rw.roTx) + enc, err := rw.ac.ReadAccountData(address.Bytes(), rw.roTx) if err != nil { return nil, err } @@ -444,7 +444,7 @@ func (rw *ReaderWrapper22) ReadAccountData(address common.Address) (*accounts.Ac } func (rw *ReaderWrapper22) ReadAccountStorage(address common.Address, incarnation uint64, key *common.Hash) ([]byte, error) { - enc, err := rw.r.ReadAccountStorage(address.Bytes(), key.Bytes(), rw.roTx) + enc, err := rw.ac.ReadAccountStorage(address.Bytes(), key.Bytes(), rw.roTx) if err != nil { return nil, err } @@ -458,11 +458,11 @@ func (rw *ReaderWrapper22) ReadAccountStorage(address common.Address, incarnatio } func (rw *ReaderWrapper22) ReadAccountCode(address common.Address, incarnation uint64, codeHash common.Hash) ([]byte, error) { - return rw.r.ReadAccountCode(address.Bytes(), rw.roTx) + return rw.ac.ReadAccountCode(address.Bytes(), rw.roTx) } func (rw *ReaderWrapper22) ReadAccountCodeSize(address common.Address, incarnation uint64, codeHash common.Hash) (int, error) { - return rw.r.ReadAccountCodeSize(address.Bytes(), rw.roTx) + return rw.ac.ReadAccountCodeSize(address.Bytes(), rw.roTx) } func (rw *ReaderWrapper22) ReadAccountIncarnation(address common.Address) (uint64, error) { diff --git a/cmd/state/commands/history22.go b/cmd/state/commands/history22.go index 02890f1f3c3..d1976cd7f75 100644 --- a/cmd/state/commands/history22.go +++ b/cmd/state/commands/history22.go @@ -136,6 +136,7 @@ func History22(genesis *core.Genesis, logger log.Logger) error { return fmt.Errorf("reopen snapshot segments: %w", err) } blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) + readWrapper := state.NewHistoryReader22(h.MakeContext(), ri) for !interrupt { select { @@ -169,7 +170,6 @@ func History22(genesis *core.Genesis, logger log.Logger) error { txNum += uint64(len(b.Transactions())) + 2 // Pre and Post block transaction continue } - readWrapper := state.NewHistoryReader22(h, ri) if traceBlock != 0 { readWrapper.SetTrace(blockNum == uint64(traceBlock)) } diff --git a/cmd/state/commands/replay_tx.go b/cmd/state/commands/replay_tx.go new file mode 100644 index 00000000000..e1c5552592f --- /dev/null +++ b/cmd/state/commands/replay_tx.go @@ -0,0 +1,195 @@ +package commands + +import ( + "context" + "fmt" + "path" + "path/filepath" + "sort" + + "github.com/ledgerwatch/erigon-lib/kv/memdb" + libstate "github.com/ledgerwatch/erigon-lib/state" + "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/core" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/core/vm" + "github.com/ledgerwatch/erigon/eth/ethconfig" + "github.com/ledgerwatch/erigon/turbo/services" + "github.com/ledgerwatch/erigon/turbo/snapshotsync" + "github.com/ledgerwatch/log/v3" + "github.com/spf13/cobra" +) + +var txhash string +var txnum uint64 + +func init() { + withDataDir(replayTxCmd) + rootCmd.AddCommand(replayTxCmd) + replayTxCmd.Flags().StringVar(&txhash, "txhash", "", "hash of the transaction to replay") + replayTxCmd.Flags().Uint64Var(&txnum, "txnum", 0, "tx num for replay") +} + +var replayTxCmd = &cobra.Command{ + Use: "replaytx", + Short: "Experimental command to replay a given transaction using only history", + RunE: func(cmd *cobra.Command, args []string) error { + return ReplayTx(genesis) + }, +} + +func ReplayTx(genesis *core.Genesis) error { + var blockReader services.FullBlockReader + var allSnapshots *snapshotsync.RoSnapshots + allSnapshots = snapshotsync.NewRoSnapshots(ethconfig.NewSnapCfg(true, false, true), path.Join(datadir, "snapshots")) + defer allSnapshots.Close() + if err := allSnapshots.Reopen(); err != nil { + return fmt.Errorf("reopen snapshot segments: %w", err) + } + blockReader = snapshotsync.NewBlockReaderWithSnapshots(allSnapshots) + // Compute mapping blockNum -> last TxNum in that block + txNums := make([]uint64, allSnapshots.BlocksAvailable()+1) + if err := allSnapshots.Bodies.View(func(bs []*snapshotsync.BodySegment) error { + for _, b := range bs { + if err := b.Iterate(func(blockNum, baseTxNum, txAmount uint64) { + txNums[blockNum] = baseTxNum + txAmount + }); err != nil { + return err + } + } + return nil + }); err != nil { + return fmt.Errorf("build txNum => blockNum mapping: %w", err) + } + ctx := context.Background() + var txNum uint64 + if txhash != "" { + txnHash := common.HexToHash(txhash) + fmt.Printf("Tx hash = [%x]\n", txnHash) + db := memdb.New() + roTx, err := db.BeginRo(ctx) + if err != nil { + return err + } + defer roTx.Rollback() + bn, ok, err := blockReader.TxnLookup(ctx, roTx, txnHash) + if err != nil { + return err + } + if !ok { + return fmt.Errorf("transaction not found") + } + fmt.Printf("Found in block %d\n", bn) + var header *types.Header + if header, err = blockReader.HeaderByNumber(ctx, nil, bn); err != nil { + return err + } + blockHash := header.Hash() + b, _, err := blockReader.BlockWithSenders(ctx, nil, blockHash, bn) + if err != nil { + return err + } + txs := b.Transactions() + var txIndex int + for txIndex = 0; txIndex < len(txs); txIndex++ { + if txs[txIndex].Hash() == txnHash { + fmt.Printf("txIndex = %d\n", txIndex) + break + } + } + txNum = txNums[bn-1] + 1 + uint64(txIndex) + } else { + txNum = txnum + } + fmt.Printf("txNum = %d\n", txNum) + aggPath := filepath.Join(datadir, "erigon23") + agg, err := libstate.NewAggregator(aggPath, AggregationStep) + if err != nil { + return fmt.Errorf("create history: %w", err) + } + defer agg.Close() + ac := agg.MakeContext() + workCh := make(chan state.TxTask) + rs := state.NewReconState(workCh) + if err = replayTxNum(ctx, allSnapshots, blockReader, txNum, txNums, rs, ac); err != nil { + return err + } + return nil +} + +func replayTxNum(ctx context.Context, allSnapshots *snapshotsync.RoSnapshots, blockReader services.FullBlockReader, + txNum uint64, txNums []uint64, rs *state.ReconState, ac *libstate.AggregatorContext, +) error { + bn := uint64(sort.Search(len(txNums), func(i int) bool { + return txNums[i] > txNum + })) + txIndex := int(txNum - txNums[bn-1] - 1) + fmt.Printf("bn=%d, txIndex=%d\n", bn, txIndex) + var header *types.Header + var err error + if header, err = blockReader.HeaderByNumber(ctx, nil, bn); err != nil { + return err + } + blockHash := header.Hash() + b, _, err := blockReader.BlockWithSenders(ctx, nil, blockHash, bn) + if err != nil { + return err + } + txn := b.Transactions()[txIndex] + stateWriter := state.NewStateReconWriter(ac, rs) + stateReader := state.NewHistoryReaderNoState(ac, rs) + stateReader.SetTxNum(txNum) + stateWriter.SetTxNum(txNum) + noop := state.NewNoopWriter() + rules := chainConfig.Rules(bn) + for { + stateReader.ResetError() + ibs := state.New(stateReader) + gp := new(core.GasPool).AddGas(txn.GetGas()) + //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d, gas=%d, input=[%x]\n", txNum, blockNum, txIndex, txn.GetGas(), txn.GetData()) + vmConfig := vm.Config{NoReceipts: true, SkipAnalysis: core.SkipAnalysis(chainConfig, bn)} + contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } + getHeader := func(hash common.Hash, number uint64) *types.Header { + h, err := blockReader.Header(ctx, nil, hash, number) + if err != nil { + panic(err) + } + return h + } + getHashFn := core.GetHashFn(header, getHeader) + logger := log.New() + engine := initConsensusEngine(chainConfig, logger, allSnapshots) + txnHash := txn.Hash() + blockContext := core.NewEVMBlockContext(header, getHashFn, engine, nil /* author */, contractHasTEVM) + ibs.Prepare(txnHash, blockHash, txIndex) + msg, err := txn.AsMessage(*types.MakeSigner(chainConfig, bn), header.BaseFee, rules) + if err != nil { + return err + } + txContext := core.NewEVMTxContext(msg) + vmenv := vm.NewEVM(blockContext, txContext, ibs, chainConfig, vmConfig) + + _, err = core.ApplyMessage(vmenv, msg, gp, true /* refunds */, false /* gasBailout */) + if err != nil { + return fmt.Errorf("could not apply tx %d [%x] failed: %w", txIndex, txnHash, err) + } + if err = ibs.FinalizeTx(rules, noop); err != nil { + return err + } + if dependency, ok := stateReader.ReadError(); ok { + fmt.Printf("dependency %d on %d\n", txNum, dependency) + if err = replayTxNum(ctx, allSnapshots, blockReader, dependency, txNums, rs, ac); err != nil { + return err + } + } else { + if err = ibs.CommitBlock(rules, stateWriter); err != nil { + return err + } + break + } + } + rs.CommitTxNum(txNum) + fmt.Printf("commited %d\n", txNum) + return nil +} diff --git a/cmd/state/commands/state_recon.go b/cmd/state/commands/state_recon.go index db5ea046249..12a214838c4 100644 --- a/cmd/state/commands/state_recon.go +++ b/cmd/state/commands/state_recon.go @@ -131,9 +131,7 @@ func (rw *ReconWorker) runTxTask(txTask state.TxTask) { } else if daoForkTx { //fmt.Printf("txNum=%d, blockNum=%d, DAO fork\n", txNum, blockNum) misc.ApplyDAOHardFork(ibs) - if err := ibs.FinalizeTx(rules, noop); err != nil { - panic(err) - } + ibs.SoftFinalise() } else if txTask.Final { if txTask.BlockNum > 0 { //fmt.Printf("txNum=%d, blockNum=%d, finalisation of the block\n", txNum, blockNum) @@ -147,16 +145,25 @@ func (rw *ReconWorker) runTxTask(txTask state.TxTask) { } else { txHash := txTask.Tx.Hash() gp := new(core.GasPool).AddGas(txTask.Tx.GetGas()) - //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d, gas=%d, input=[%x]\n", txNum, blockNum, txIndex, txn.GetGas(), txn.GetData()) - usedGas := new(uint64) vmConfig := vm.Config{NoReceipts: true, SkipAnalysis: core.SkipAnalysis(rw.chainConfig, txTask.BlockNum)} contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } - ibs.Prepare(txHash, txTask.BlockHash, txTask.TxIndex) getHashFn := core.GetHashFn(txTask.Header, rw.getHeader) - _, _, err = core.ApplyTransaction(rw.chainConfig, getHashFn, rw.engine, nil, gp, ibs, noop, txTask.Header, txTask.Tx, usedGas, vmConfig, contractHasTEVM) + blockContext := core.NewEVMBlockContext(txTask.Header, getHashFn, rw.engine, nil /* author */, contractHasTEVM) + ibs.Prepare(txHash, txTask.BlockHash, txTask.TxIndex) + msg, err := txTask.Tx.AsMessage(*types.MakeSigner(rw.chainConfig, txTask.BlockNum), txTask.Header.BaseFee, rules) + if err != nil { + panic(err) + } + txContext := core.NewEVMTxContext(msg) + vmenv := vm.NewEVM(blockContext, txContext, ibs, rw.chainConfig, vmConfig) + //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d, evm=%p\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex, vmenv) + _, err = core.ApplyMessage(vmenv, msg, gp, true /* refunds */, false /* gasBailout */) if err != nil { panic(fmt.Errorf("could not apply tx %d [%x] failed: %w", txTask.TxIndex, txHash, err)) } + if err = ibs.FinalizeTx(rules, noop); err != nil { + panic(err) + } } if dependency, ok := rw.stateReader.ReadError(); ok { //fmt.Printf("rollback %d\n", txNum) @@ -259,12 +266,8 @@ func (fw *FillWorker) fillStorage(plainStateCollector *etl.Collector) { fw.currentKey = key compositeKey := dbutils.PlainGenerateCompositeStorageKey(key[:20], state.FirstContractIncarnation, key[20:]) if len(val) > 0 { - if len(val) > 1 || val[0] != 0 { - if err := plainStateCollector.Collect(compositeKey, val); err != nil { - panic(err) - } - } else { - fmt.Printf("Storage [%x] => [%x]\n", compositeKey, val) + if err := plainStateCollector.Collect(compositeKey, val); err != nil { + panic(err) } //fmt.Printf("Storage [%x] => [%x]\n", compositeKey, val) } @@ -283,19 +286,15 @@ func (fw *FillWorker) fillCode(codeCollector, plainContractCollector *etl.Collec fw.currentKey = key compositeKey := dbutils.PlainGenerateStoragePrefix(key, state.FirstContractIncarnation) if len(val) > 0 { - if len(val) > 1 || val[0] != 0 { - codeHash, err := common.HashData(val) - if err != nil { - panic(err) - } - if err = codeCollector.Collect(codeHash[:], val); err != nil { - panic(err) - } - if err = plainContractCollector.Collect(compositeKey, codeHash[:]); err != nil { - panic(err) - } - } else { - fmt.Printf("Code [%x] => [%x]\n", compositeKey, val) + codeHash, err := common.HashData(val) + if err != nil { + panic(err) + } + if err = codeCollector.Collect(codeHash[:], val); err != nil { + panic(err) + } + if err = plainContractCollector.Collect(compositeKey, codeHash[:]); err != nil { + panic(err) } //fmt.Printf("Code [%x] => [%x]\n", compositeKey, val) } @@ -600,9 +599,9 @@ func Recon(genesis *core.Genesis, logger log.Logger) error { } }() var inputTxNum uint64 + var header *types.Header for bn := uint64(0); bn < blockNum; bn++ { - header, err := blockReader.HeaderByNumber(ctx, nil, bn) - if err != nil { + if header, err = blockReader.HeaderByNumber(ctx, nil, bn); err != nil { panic(err) } blockHash := header.Hash() @@ -851,11 +850,15 @@ func Recon(genesis *core.Genesis, logger log.Logger) error { if rwTx, err = db.BeginRw(ctx); err != nil { return err } - if _, err = stagedsync.RegenerateIntermediateHashes("recon", rwTx, stagedsync.StageTrieCfg(db, false /* checkRoot */, false /* saveHashesToDB */, false /* badBlockHalt */, tmpDir, blockReader, nil /* HeaderDownload */), common.Hash{}, make(chan struct{}, 1)); err != nil { + var rootHash common.Hash + if rootHash, err = stagedsync.RegenerateIntermediateHashes("recon", rwTx, stagedsync.StageTrieCfg(db, false /* checkRoot */, false /* saveHashesToDB */, false /* badBlockHalt */, tmpDir, blockReader, nil /* HeaderDownload */), common.Hash{}, make(chan struct{}, 1)); err != nil { return err } if err = rwTx.Commit(); err != nil { return err } + if rootHash != header.Root { + log.Error("Incorrect root hash", "expected", fmt.Sprintf("%x", header.Root)) + } return nil } diff --git a/cmd/state/commands/state_recon_1.go b/cmd/state/commands/state_recon_1.go index b149406a892..0ae69fe40f6 100644 --- a/cmd/state/commands/state_recon_1.go +++ b/cmd/state/commands/state_recon_1.go @@ -11,6 +11,7 @@ import ( "path/filepath" "runtime" "sync" + "sync/atomic" "syscall" "time" @@ -31,6 +32,7 @@ import ( "github.com/ledgerwatch/erigon/turbo/snapshotsync" "github.com/ledgerwatch/log/v3" "github.com/spf13/cobra" + "golang.org/x/sync/semaphore" ) func init() { @@ -103,11 +105,12 @@ func (rw *ReconWorker1) run() { } rw.engine = initConsensusEngine(rw.chainConfig, rw.logger, rw.allSnapshots) for txTask, ok := rw.rs.Schedule(); ok; txTask, ok = rw.rs.Schedule() { - rw.runTxTask(txTask) + rw.runTxTask(&txTask) + rw.resultCh <- txTask // Needs to have outside of the lock } } -func (rw *ReconWorker1) runTxTask(txTask state.TxTask) { +func (rw *ReconWorker1) runTxTask(txTask *state.TxTask) { rw.lock.Lock() defer rw.lock.Unlock() txTask.Error = nil @@ -115,74 +118,103 @@ func (rw *ReconWorker1) runTxTask(txTask state.TxTask) { rw.stateWriter.SetTxNum(txTask.TxNum) rw.stateReader.ResetReadSet() rw.stateWriter.ResetWriteSet() - rules := rw.chainConfig.Rules(txTask.BlockNum) ibs := state.New(rw.stateReader) daoForkTx := rw.chainConfig.DAOForkSupport && rw.chainConfig.DAOForkBlock != nil && rw.chainConfig.DAOForkBlock.Uint64() == txTask.BlockNum && txTask.TxIndex == -1 var err error if txTask.BlockNum == 0 && txTask.TxIndex == -1 { - fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txTask.TxNum, txTask.BlockNum) + //fmt.Printf("txNum=%d, blockNum=%d, Genesis\n", txTask.TxNum, txTask.BlockNum) // Genesis block _, ibs, err = rw.genesis.ToBlock() if err != nil { panic(err) } } else if daoForkTx { - fmt.Printf("txNum=%d, blockNum=%d, DAO fork\n", txTask.TxNum, txTask.BlockNum) + //fmt.Printf("txNum=%d, blockNum=%d, DAO fork\n", txTask.TxNum, txTask.BlockNum) misc.ApplyDAOHardFork(ibs) ibs.SoftFinalise() } else if txTask.TxIndex == -1 { // Block initialisation } else if txTask.Final { if txTask.BlockNum > 0 { - fmt.Printf("txNum=%d, blockNum=%d, finalisation of the block\n", txTask.TxNum, txTask.BlockNum) + //fmt.Printf("txNum=%d, blockNum=%d, finalisation of the block\n", txTask.TxNum, txTask.BlockNum) // End of block transaction in a block if _, _, err := rw.engine.Finalize(rw.chainConfig, txTask.Header, ibs, txTask.Block.Transactions(), txTask.Block.Uncles(), nil /* receipts */, nil, nil, nil); err != nil { panic(fmt.Errorf("finalize of block %d failed: %w", txTask.BlockNum, err)) } } } else { - fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex) + //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex) txHash := txTask.Tx.Hash() gp := new(core.GasPool).AddGas(txTask.Tx.GetGas()) - //fmt.Printf("txNum=%d, blockNum=%d, txIndex=%d, gas=%d, input=[%x]\n", txNum, blockNum, txIndex, txn.GetGas(), txn.GetData()) - usedGas := new(uint64) vmConfig := vm.Config{NoReceipts: true, SkipAnalysis: core.SkipAnalysis(rw.chainConfig, txTask.BlockNum)} contractHasTEVM := func(contractHash common.Hash) (bool, error) { return false, nil } ibs.Prepare(txHash, txTask.BlockHash, txTask.TxIndex) - vmConfig.SkipAnalysis = core.SkipAnalysis(rw.chainConfig, txTask.BlockNum) getHashFn := core.GetHashFn(txTask.Header, rw.getHeader) blockContext := core.NewEVMBlockContext(txTask.Header, getHashFn, rw.engine, nil /* author */, contractHasTEVM) - vmenv := vm.NewEVM(blockContext, vm.TxContext{}, ibs, rw.chainConfig, vmConfig) - msg, err := txTask.Tx.AsMessage(*types.MakeSigner(rw.chainConfig, txTask.BlockNum), txTask.Header.BaseFee, rules) + msg, err := txTask.Tx.AsMessage(*types.MakeSigner(rw.chainConfig, txTask.BlockNum), txTask.Header.BaseFee, txTask.Rules) if err != nil { panic(err) } txContext := core.NewEVMTxContext(msg) - - // Update the evm with the new transaction context. - vmenv.Reset(txContext, ibs) - - result, err := core.ApplyMessage(vmenv, msg, gp, true /* refunds */, false /* gasBailout */) - if err != nil { + vmenv := vm.NewEVM(blockContext, txContext, ibs, rw.chainConfig, vmConfig) + if _, err = core.ApplyMessage(vmenv, msg, gp, true /* refunds */, false /* gasBailout */); err != nil { txTask.Error = err + //fmt.Printf("error=%v\n", err) } // Update the state with pending changes ibs.SoftFinalise() - *usedGas += result.UsedGas } // Prepare read set, write set and balanceIncrease set and send for serialisation if txTask.Error == nil { txTask.BalanceIncreaseSet = ibs.BalanceIncreaseSet() - for addr, bal := range txTask.BalanceIncreaseSet { - fmt.Printf("[%x]=>[%d]\n", addr, &bal) - } - if err = ibs.MakeWriteSet(rules, rw.stateWriter); err != nil { + //for addr, bal := range txTask.BalanceIncreaseSet { + // fmt.Printf("[%x]=>[%d]\n", addr, &bal) + //} + if err = ibs.MakeWriteSet(txTask.Rules, rw.stateWriter); err != nil { panic(err) } - txTask.ReadKeys, txTask.ReadVals = rw.stateReader.ReadSet() - txTask.WriteKeys, txTask.WriteVals = rw.stateWriter.WriteSet() + txTask.ReadLists = rw.stateReader.ReadSet() + txTask.WriteLists = rw.stateWriter.WriteSet() + size := (20 + 32) * len(txTask.BalanceIncreaseSet) + for _, list := range txTask.ReadLists { + for _, b := range list.Keys { + size += len(b) + } + for _, b := range list.Vals { + size += len(b) + } + } + for _, list := range txTask.WriteLists { + for _, b := range list.Keys { + size += len(b) + } + for _, b := range list.Vals { + size += len(b) + } + } + txTask.ResultsSize = int64(size) + } +} + +func processResultQueue(rws *state.TxTaskQueue, outputTxNum *uint64, rs *state.ReconState1, applyTx kv.Tx, + triggerCount *uint64, outputBlockNum *uint64, repeatCount *uint64, resultsSize *int64) { + for rws.Len() > 0 && (*rws)[0].TxNum == *outputTxNum { + txTask := heap.Pop(rws).(state.TxTask) + atomic.AddInt64(resultsSize, -txTask.ResultsSize) + if txTask.Error == nil && rs.ReadsValid(txTask.ReadLists) { + if err := rs.Apply(txTask.Rules.IsSpuriousDragon, applyTx, txTask); err != nil { + panic(err) + } + *triggerCount += rs.CommitTxNum(txTask.Sender, txTask.TxNum) + *outputTxNum++ + *outputBlockNum = txTask.BlockNum + //fmt.Printf("Applied %d block %d txIndex %d\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex) + } else { + rs.AddWork(txTask) + *repeatCount++ + //fmt.Printf("Rolled back %d block %d txIndex %d\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex) + } } - rw.resultCh <- txTask } func Recon1(genesis *core.Genesis, logger log.Logger) error { @@ -204,7 +236,8 @@ func Recon1(genesis *core.Genesis, logger log.Logger) error { } else if err = os.RemoveAll(reconDbPath); err != nil { return err } - db, err := kv2.NewMDBX(logger).Path(reconDbPath).WriteMap().Open() + limiter := semaphore.NewWeighted(int64(runtime.NumCPU() + 1)) + db, err := kv2.NewMDBX(logger).Path(reconDbPath).RoTxsLimiter(limiter).Open() if err != nil { return err } @@ -236,9 +269,18 @@ func Recon1(genesis *core.Genesis, logger log.Logger) error { fmt.Printf("Corresponding block num = %d, txNum = %d\n", blockNum, txNum) workerCount := runtime.NumCPU() workCh := make(chan state.TxTask, 128) - rs := state.NewReconState1(workCh) + rs := state.NewReconState1() var lock sync.RWMutex reconWorkers := make([]*ReconWorker1, workerCount) + var applyTx kv.Tx + defer func() { + if applyTx != nil { + applyTx.Rollback() + } + }() + if applyTx, err = db.BeginRo(ctx); err != nil { + return err + } roTxs := make([]kv.Tx, workerCount) defer func() { for i := 0; i < workerCount; i++ { @@ -263,71 +305,118 @@ func Recon1(genesis *core.Genesis, logger log.Logger) error { for i := 0; i < workerCount; i++ { go reconWorkers[i].run() } - commitThreshold := uint64(256 * 1024 * 1024) + commitThreshold := uint64(1024 * 1024 * 1024) + resultsThreshold := int64(1024 * 1024 * 1024) count := uint64(0) - rollbackCount := uint64(0) + repeatCount := uint64(0) + triggerCount := uint64(0) total := txNum prevCount := uint64(0) - prevRollbackCount := uint64(0) + prevRepeatCount := uint64(0) + //prevTriggerCount := uint64(0) + resultsSize := int64(0) prevTime := time.Now() logEvery := time.NewTicker(logInterval) defer logEvery.Stop() var rws state.TxTaskQueue + var rwsLock sync.Mutex + rwsReceiveCond := sync.NewCond(&rwsLock) heap.Init(&rws) var outputTxNum uint64 + var inputBlockNum, outputBlockNum uint64 + var prevOutputBlockNum uint64 // Go-routine gathering results from the workers go func() { + defer rs.Finish() for outputTxNum < txNum { select { case txTask := <-resultCh: - if txTask.TxNum == outputTxNum { - // Try to apply without placing on the queue first - if txTask.Error == nil && rs.ReadsValid(txTask.ReadKeys, txTask.ReadVals) { - rs.Apply(txTask.WriteKeys, txTask.WriteVals, txTask.BalanceIncreaseSet) - rs.CommitTxNum(txTask.Sender, txTask.TxNum) - outputTxNum++ - } else { - rs.RollbackTx(txTask) - } - } else { + //fmt.Printf("Saved %d block %d txIndex %d\n", txTask.TxNum, txTask.BlockNum, txTask.TxIndex) + func() { + rwsLock.Lock() + defer rwsLock.Unlock() + atomic.AddInt64(&resultsSize, txTask.ResultsSize) heap.Push(&rws, txTask) - } - for rws.Len() > 0 && rws[0].TxNum == outputTxNum { - txTask = heap.Pop(&rws).(state.TxTask) - if txTask.Error == nil && rs.ReadsValid(txTask.ReadKeys, txTask.ReadVals) { - rs.Apply(txTask.WriteKeys, txTask.WriteVals, txTask.BalanceIncreaseSet) - rs.CommitTxNum(txTask.Sender, txTask.TxNum) - outputTxNum++ - } else { - rs.RollbackTx(txTask) - } - } + processResultQueue(&rws, &outputTxNum, rs, applyTx, &triggerCount, &outputBlockNum, &repeatCount, &resultsSize) + rwsReceiveCond.Signal() + }() case <-logEvery.C: var m runtime.MemStats libcommon.ReadMemStats(&m) sizeEstimate := rs.SizeEstimate() count = rs.DoneCount() - rollbackCount = rs.RollbackCount() currentTime := time.Now() interval := currentTime.Sub(prevTime) speedTx := float64(count-prevCount) / (float64(interval) / float64(time.Second)) + speedBlock := float64(outputBlockNum-prevOutputBlockNum) / (float64(interval) / float64(time.Second)) progress := 100.0 * float64(count) / float64(total) var repeatRatio float64 if count > prevCount { - repeatRatio = 100.0 * float64(rollbackCount-prevRollbackCount) / float64(count-prevCount) + repeatRatio = 100.0 * float64(repeatCount-prevRepeatCount) / float64(count-prevCount) } - prevTime = currentTime - prevCount = count - prevRollbackCount = rollbackCount - log.Info("Transaction replay", "workers", workerCount, "progress", fmt.Sprintf("%.2f%%", progress), "tx/s", fmt.Sprintf("%.1f", speedTx), "repeat ratio", fmt.Sprintf("%.2f%%", repeatRatio), "buffer", libcommon.ByteCount(sizeEstimate), + log.Info("Transaction replay", + //"workers", workerCount, + "at block", outputBlockNum, + "input block", atomic.LoadUint64(&inputBlockNum), + "progress", fmt.Sprintf("%.2f%%", progress), + "blk/s", fmt.Sprintf("%.1f", speedBlock), + "tx/s", fmt.Sprintf("%.1f", speedTx), + //"repeats", repeatCount-prevRepeatCount, + //"triggered", triggerCount-prevTriggerCount, + "result queue", rws.Len(), + "results size", libcommon.ByteCount(uint64(atomic.LoadInt64(&resultsSize))), + "repeat ratio", fmt.Sprintf("%.2f%%", repeatRatio), + "buffer", libcommon.ByteCount(sizeEstimate), "alloc", libcommon.ByteCount(m.Alloc), "sys", libcommon.ByteCount(m.Sys), ) + prevTime = currentTime + prevCount = count + prevOutputBlockNum = outputBlockNum + prevRepeatCount = repeatCount + //prevTriggerCount = triggerCount if sizeEstimate >= commitThreshold { commitStart := time.Now() log.Info("Committing...") err := func() error { - lock.Lock() + rwsLock.Lock() + defer rwsLock.Unlock() + // Drain results (and process) channel because read sets do not carry over + for { + var drained bool + for !drained { + select { + case txTask := <-resultCh: + atomic.AddInt64(&resultsSize, txTask.ResultsSize) + heap.Push(&rws, txTask) + default: + drained = true + } + } + processResultQueue(&rws, &outputTxNum, rs, applyTx, &triggerCount, &outputBlockNum, &repeatCount, &resultsSize) + if rws.Len() == 0 { + break + } + } + rwsReceiveCond.Signal() + lock.Lock() // This is to prevent workers from starting work on any new txTask defer lock.Unlock() + // Drain results channel because read sets do not carry over + var drained bool + for !drained { + select { + case txTask := <-resultCh: + rs.AddWork(txTask) + default: + drained = true + } + } + // Drain results queue as well + for rws.Len() > 0 { + txTask := heap.Pop(&rws).(state.TxTask) + atomic.AddInt64(&resultsSize, -txTask.ResultsSize) + rs.AddWork(txTask) + } + applyTx.Rollback() for i := 0; i < workerCount; i++ { roTxs[i].Rollback() } @@ -341,6 +430,9 @@ func Recon1(genesis *core.Genesis, logger log.Logger) error { if err = rwTx.Commit(); err != nil { return err } + if applyTx, err = db.BeginRo(ctx); err != nil { + return err + } for i := 0; i < workerCount; i++ { if roTxs[i], err = db.BeginRo(ctx); err != nil { return err @@ -358,21 +450,32 @@ func Recon1(genesis *core.Genesis, logger log.Logger) error { } }() var inputTxNum uint64 + var header *types.Header for blockNum := uint64(0); blockNum <= block; blockNum++ { - header, err := blockReader.HeaderByNumber(ctx, nil, blockNum) - if err != nil { - panic(err) + atomic.StoreUint64(&inputBlockNum, blockNum) + rules := chainConfig.Rules(blockNum) + if header, err = blockReader.HeaderByNumber(ctx, nil, blockNum); err != nil { + return err } blockHash := header.Hash() b, _, err := blockReader.BlockWithSenders(ctx, nil, blockHash, blockNum) if err != nil { - panic(err) + return err } txs := b.Transactions() for txIndex := -1; txIndex <= len(txs); txIndex++ { + // Do not oversend, wait for the result heap to go under certain size + func() { + rwsLock.Lock() + defer rwsLock.Unlock() + for rws.Len() > 128 || atomic.LoadInt64(&resultsSize) >= resultsThreshold || rs.SizeEstimate() >= commitThreshold { + rwsReceiveCond.Wait() + } + }() txTask := state.TxTask{ Header: header, BlockNum: blockNum, + Rules: rules, Block: b, TxNum: inputTxNum, TxIndex: txIndex, @@ -384,13 +487,18 @@ func Recon1(genesis *core.Genesis, logger log.Logger) error { if sender, ok := txs[txIndex].GetSender(); ok { txTask.Sender = &sender } + if ok := rs.RegisterSender(txTask); ok { + rs.AddWork(txTask) + } + } else { + rs.AddWork(txTask) } - workCh <- txTask inputTxNum++ } } close(workCh) wg.Wait() + applyTx.Rollback() for i := 0; i < workerCount; i++ { roTxs[i].Rollback() } @@ -424,11 +532,15 @@ func Recon1(genesis *core.Genesis, logger log.Logger) error { if rwTx, err = db.BeginRw(ctx); err != nil { return err } - if _, err = stagedsync.RegenerateIntermediateHashes("recon", rwTx, stagedsync.StageTrieCfg(db, false /* checkRoot */, false /* saveHashesToDB */, false /* badBlockHalt */, tmpDir, blockReader, nil /* HeaderDownload */), common.Hash{}, make(chan struct{}, 1)); err != nil { + var rootHash common.Hash + if rootHash, err = stagedsync.RegenerateIntermediateHashes("recon", rwTx, stagedsync.StageTrieCfg(db, false /* checkRoot */, false /* saveHashesToDB */, false /* badBlockHalt */, tmpDir, blockReader, nil /* HeaderDownload */), common.Hash{}, make(chan struct{}, 1)); err != nil { return err } if err = rwTx.Commit(); err != nil { return err } + if rootHash != header.Root { + log.Error("Incorrect root hash", "expected", fmt.Sprintf("%x", header.Root)) + } return nil } diff --git a/core/state/history_reader_22.go b/core/state/history_reader_22.go index be89d3bf43b..d0844f868de 100644 --- a/core/state/history_reader_22.go +++ b/core/state/history_reader_22.go @@ -21,14 +21,14 @@ func bytesToUint64(buf []byte) (x uint64) { // Implements StateReader and StateWriter type HistoryReader22 struct { - a *libstate.Aggregator + ac *libstate.AggregatorContext ri *libstate.ReadIndices txNum uint64 trace bool } -func NewHistoryReader22(a *libstate.Aggregator, ri *libstate.ReadIndices) *HistoryReader22 { - return &HistoryReader22{a: a, ri: ri} +func NewHistoryReader22(ac *libstate.AggregatorContext, ri *libstate.ReadIndices) *HistoryReader22 { + return &HistoryReader22{ac: ac, ri: ri} } func (hr *HistoryReader22) SetTx(tx kv.RwTx) { @@ -37,7 +37,6 @@ func (hr *HistoryReader22) SetTx(tx kv.RwTx) { func (hr *HistoryReader22) SetTxNum(txNum uint64) { hr.txNum = txNum - hr.a.SetTxNum(txNum) if hr.ri != nil { hr.ri.SetTxNum(txNum) } @@ -57,7 +56,7 @@ func (hr *HistoryReader22) ReadAccountData(address common.Address) (*accounts.Ac return nil, err } } - enc, err := hr.a.ReadAccountDataBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) + enc, err := hr.ac.ReadAccountDataBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) if err != nil { return nil, err } @@ -108,7 +107,7 @@ func (hr *HistoryReader22) ReadAccountStorage(address common.Address, incarnatio return nil, err } } - enc, err := hr.a.ReadAccountStorageBeforeTxNum(address.Bytes(), key.Bytes(), hr.txNum, nil /* roTx */) + enc, err := hr.ac.ReadAccountStorageBeforeTxNum(address.Bytes(), key.Bytes(), hr.txNum, nil /* roTx */) if err != nil { return nil, err } @@ -131,7 +130,7 @@ func (hr *HistoryReader22) ReadAccountCode(address common.Address, incarnation u return nil, err } } - enc, err := hr.a.ReadAccountCodeBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) + enc, err := hr.ac.ReadAccountCodeBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) if err != nil { return nil, err } @@ -147,7 +146,7 @@ func (hr *HistoryReader22) ReadAccountCodeSize(address common.Address, incarnati return 0, err } } - size, err := hr.a.ReadAccountCodeSizeBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) + size, err := hr.ac.ReadAccountCodeSizeBeforeTxNum(address.Bytes(), hr.txNum, nil /* roTx */) if err != nil { return 0, err } diff --git a/core/state/intra_block_state.go b/core/state/intra_block_state.go index 4c8dc91db7b..ba4859c9103 100644 --- a/core/state/intra_block_state.go +++ b/core/state/intra_block_state.go @@ -172,7 +172,7 @@ func (sdb *IntraBlockState) AddRefund(gas uint64) { func (sdb *IntraBlockState) SubRefund(gas uint64) { sdb.journal.append(refundChange{prev: sdb.refund}) if gas > sdb.refund { - panic("Refund counter below zero") + sdb.setErrorUnsafe(fmt.Errorf("Refund counter below zero")) } sdb.refund -= gas } diff --git a/core/state/recon_state_1.go b/core/state/recon_state_1.go index 3487a7e4ab8..7572a3a61cf 100644 --- a/core/state/recon_state_1.go +++ b/core/state/recon_state_1.go @@ -5,14 +5,19 @@ import ( "container/heap" "encoding/binary" "fmt" + "sort" "sync" + "unsafe" + "github.com/google/btree" "github.com/holiman/uint256" + libcommon "github.com/ledgerwatch/erigon-lib/common" "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon/common" "github.com/ledgerwatch/erigon/common/dbutils" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/core/types/accounts" + "github.com/ledgerwatch/erigon/params" ) // ReadWriteSet contains ReadSet, WriteSet and BalanceIncrease of a transaction, @@ -21,6 +26,7 @@ import ( type TxTask struct { TxNum uint64 BlockNum uint64 + Rules *params.Rules Header *types.Header Block *types.Block BlockHash common.Hash @@ -29,10 +35,9 @@ type TxTask struct { Final bool Tx types.Transaction BalanceIncreaseSet map[common.Address]uint256.Int - ReadKeys map[string][][]byte - ReadVals map[string][][]byte - WriteKeys map[string][][]byte - WriteVals map[string][][]byte + ReadLists map[string]*KvList + WriteLists map[string]*KvList + ResultsSize int64 Error error } @@ -60,48 +65,50 @@ func (h *TxTaskQueue) Pop() interface{} { return c[len(c)-1] } +const CodeSizeTable = "CodeSize" + type ReconState1 struct { - lock sync.RWMutex - triggers map[uint64]TxTask - senderTxNums map[common.Address]uint64 - workCh chan TxTask - queue TxTaskQueue - changes map[string]map[string][]byte - sizeEstimate uint64 - rollbackCount uint64 - txsDone uint64 -} - -func NewReconState1(workCh chan TxTask) *ReconState1 { + lock sync.RWMutex + receiveWork *sync.Cond + triggers map[uint64]TxTask + senderTxNums map[common.Address]uint64 + triggerLock sync.RWMutex + queue TxTaskQueue + queueLock sync.Mutex + changes map[string]*btree.BTreeG[ReconStateItem1] + sizeEstimate uint64 + txsDone uint64 + finished bool +} + +type ReconStateItem1 struct { + key []byte + val []byte +} + +func reconStateItem1Less(i, j ReconStateItem1) bool { + return bytes.Compare(i.key, j.key) < 0 +} + +func NewReconState1() *ReconState1 { rs := &ReconState1{ - workCh: workCh, triggers: map[uint64]TxTask{}, senderTxNums: map[common.Address]uint64{}, - changes: map[string]map[string][]byte{}, + changes: map[string]*btree.BTreeG[ReconStateItem1]{}, } + rs.receiveWork = sync.NewCond(&rs.queueLock) return rs } func (rs *ReconState1) put(table string, key, val []byte) { t, ok := rs.changes[table] if !ok { - t = map[string][]byte{} - rs.changes[table] = t - } - t[string(key)] = val - rs.sizeEstimate += uint64(len(key)) + uint64(len(val)) -} - -func (rs *ReconState1) Delete(table string, key []byte) { - rs.lock.Lock() - defer rs.lock.Unlock() - t, ok := rs.changes[table] - if !ok { - t = map[string][]byte{} + t = btree.NewG[ReconStateItem1](32, reconStateItem1Less) rs.changes[table] = t } - t[string(key)] = nil - rs.sizeEstimate += uint64(len(key)) + item := ReconStateItem1{key: libcommon.Copy(key), val: libcommon.Copy(val)} + t.ReplaceOrInsert(item) + rs.sizeEstimate += uint64(unsafe.Sizeof(item)) + uint64(len(key)) + uint64(len(val)) } func (rs *ReconState1) Get(table string, key []byte) []byte { @@ -115,40 +122,45 @@ func (rs *ReconState1) get(table string, key []byte) []byte { if !ok { return nil } - return t[string(key)] + if i, ok := t.Get(ReconStateItem1{key: key}); ok { + return i.val + } + return nil } func (rs *ReconState1) Flush(rwTx kv.RwTx) error { rs.lock.Lock() defer rs.lock.Unlock() for table, t := range rs.changes { - for ks, val := range t { - if len(val) > 0 { - if err := rwTx.Put(table, []byte(ks), val); err != nil { - return err + var err error + t.Ascend(func(item ReconStateItem1) bool { + if len(item.val) == 0 { + if err = rwTx.Delete(table, item.key, nil); err != nil { + return false + } + //fmt.Printf("Flush [%x]=>\n", ks) + } else { + if err = rwTx.Put(table, item.key, item.val); err != nil { + return false } + //fmt.Printf("Flush [%x]=>[%x]\n", ks, val) } + return true + }) + if err != nil { + return err } + t.Clear(true) } - rs.changes = map[string]map[string][]byte{} rs.sizeEstimate = 0 return nil } func (rs *ReconState1) Schedule() (TxTask, bool) { - rs.lock.Lock() - defer rs.lock.Unlock() - for rs.queue.Len() < 16 { - txTask, ok := <-rs.workCh - if !ok { - // No more work, channel is closed - break - } - if txTask.Sender == nil { - heap.Push(&rs.queue, txTask) - } else if rs.registerSender(txTask) { - heap.Push(&rs.queue, txTask) - } + rs.queueLock.Lock() + defer rs.queueLock.Unlock() + for !rs.finished && rs.queue.Len() == 0 { + rs.receiveWork.Wait() } if rs.queue.Len() > 0 { return heap.Pop(&rs.queue).(TxTask), true @@ -156,7 +168,9 @@ func (rs *ReconState1) Schedule() (TxTask, bool) { return TxTask{}, false } -func (rs *ReconState1) registerSender(txTask TxTask) bool { +func (rs *ReconState1) RegisterSender(txTask TxTask) bool { + rs.triggerLock.Lock() + defer rs.triggerLock.Unlock() lastTxNum, deferral := rs.senderTxNums[*txTask.Sender] if deferral { // Transactions with the same sender have obvious data dependency, no point running it before lastTxNum @@ -169,11 +183,16 @@ func (rs *ReconState1) registerSender(txTask TxTask) bool { return !deferral } -func (rs *ReconState1) CommitTxNum(sender *common.Address, txNum uint64) { - rs.lock.Lock() - defer rs.lock.Unlock() +func (rs *ReconState1) CommitTxNum(sender *common.Address, txNum uint64) uint64 { + rs.queueLock.Lock() + defer rs.queueLock.Unlock() + rs.triggerLock.Lock() + defer rs.triggerLock.Unlock() + count := uint64(0) if triggered, ok := rs.triggers[txNum]; ok { heap.Push(&rs.queue, triggered) + rs.receiveWork.Signal() + count++ delete(rs.triggers, txNum) } if sender != nil { @@ -183,37 +202,65 @@ func (rs *ReconState1) CommitTxNum(sender *common.Address, txNum uint64) { } } rs.txsDone++ + return count } -func (rs *ReconState1) RollbackTx(txTask TxTask) { - rs.lock.Lock() - defer rs.lock.Unlock() +func (rs *ReconState1) AddWork(txTask TxTask) { + txTask.BalanceIncreaseSet = nil + txTask.ReadLists = nil + txTask.WriteLists = nil + txTask.ResultsSize = 0 + rs.queueLock.Lock() + defer rs.queueLock.Unlock() heap.Push(&rs.queue, txTask) - rs.rollbackCount++ + rs.receiveWork.Signal() } -func (rs *ReconState1) Apply(writeKeys, writeVals map[string][][]byte, balanceIncreaseSet map[common.Address]uint256.Int) { +func (rs *ReconState1) Finish() { + rs.queueLock.Lock() + defer rs.queueLock.Unlock() + rs.finished = true + rs.receiveWork.Broadcast() +} + +func (rs *ReconState1) Apply(emptyRemoval bool, roTx kv.Tx, txTask TxTask) error { rs.lock.Lock() defer rs.lock.Unlock() - for table, keyList := range writeKeys { - valList := writeVals[table] - for i, key := range keyList { - val := valList[i] - rs.put(table, key, val) + if txTask.WriteLists != nil { + for table, list := range txTask.WriteLists { + for i, key := range list.Keys { + val := list.Vals[i] + rs.put(table, key, val) + } } } - for addr, increase := range balanceIncreaseSet { + for addr, increase := range txTask.BalanceIncreaseSet { + //if increase.IsZero() { + // continue + //} enc := rs.get(kv.PlainState, addr.Bytes()) + if enc == nil { + var err error + enc, err = roTx.GetOne(kv.PlainState, addr.Bytes()) + if err != nil { + return err + } + } var a accounts.Account if err := a.DecodeForStorage(enc); err != nil { - panic(err) + return err } a.Balance.Add(&a.Balance, &increase) - l := a.EncodingLengthForStorage() - enc = make([]byte, l) - a.EncodeForStorage(enc) + if emptyRemoval && a.Nonce == 0 && a.Balance.IsZero() && a.IsEmptyCodeHash() { + enc = []byte{} + } else { + l := a.EncodingLengthForStorage() + enc = make([]byte, l) + a.EncodeForStorage(enc) + } rs.put(kv.PlainState, addr.Bytes(), enc) } + return nil } func (rs *ReconState1) DoneCount() uint64 { @@ -222,49 +269,80 @@ func (rs *ReconState1) DoneCount() uint64 { return rs.txsDone } -func (rs *ReconState1) RollbackCount() uint64 { - rs.lock.RLock() - defer rs.lock.RUnlock() - return rs.rollbackCount -} - func (rs *ReconState1) SizeEstimate() uint64 { rs.lock.RLock() defer rs.lock.RUnlock() return rs.sizeEstimate } -func (rs *ReconState1) ReadsValid(readKeys, readVals map[string][][]byte) bool { +func (rs *ReconState1) ReadsValid(readLists map[string]*KvList) bool { rs.lock.RLock() defer rs.lock.RUnlock() - for table, keyList := range readKeys { - t, ok := rs.changes[table] + //fmt.Printf("ValidReads\n") + for table, list := range readLists { + //fmt.Printf("Table %s\n", table) + var t *btree.BTreeG[ReconStateItem1] + var ok bool + if table == CodeSizeTable { + t, ok = rs.changes[kv.Code] + } else { + t, ok = rs.changes[table] + } if !ok { continue } - valList := readVals[table] - for i, key := range keyList { - val := valList[i] - if rereadVal, ok := t[string(key)]; ok { - if !bytes.Equal(val, rereadVal) { + for i, key := range list.Keys { + val := list.Vals[i] + if item, ok := t.Get(ReconStateItem1{key: key}); ok { + //fmt.Printf("key [%x] => [%x] vs [%x]\n", key, val, rereadVal) + if table == CodeSizeTable { + if binary.BigEndian.Uint64(val) != uint64(len(item.val)) { + return false + } + } else if !bytes.Equal(val, item.val) { return false } + } else { + //fmt.Printf("key [%x] => [%x] not present in changes\n", key, val) } } } return true } +// KvList sort.Interface to sort write list by keys +type KvList struct { + Keys, Vals [][]byte +} + +func (l KvList) Len() int { + return len(l.Keys) +} + +func (l KvList) Less(i, j int) bool { + return bytes.Compare(l.Keys[i], l.Keys[j]) < 0 +} + +func (l *KvList) Swap(i, j int) { + l.Keys[i], l.Keys[j] = l.Keys[j], l.Keys[i] + l.Vals[i], l.Vals[j] = l.Vals[j], l.Vals[i] +} + type StateReconWriter1 struct { - rs *ReconState1 - txNum uint64 - writeKeys map[string][][]byte - writeVals map[string][][]byte + rs *ReconState1 + txNum uint64 + writeLists map[string]*KvList } func NewStateReconWriter1(rs *ReconState1) *StateReconWriter1 { return &StateReconWriter1{ rs: rs, + writeLists: map[string]*KvList{ + kv.PlainState: {}, + kv.Code: {}, + kv.PlainContractCode: {}, + kv.IncarnationMap: {}, + }, } } @@ -273,42 +351,49 @@ func (w *StateReconWriter1) SetTxNum(txNum uint64) { } func (w *StateReconWriter1) ResetWriteSet() { - w.writeKeys = map[string][][]byte{} - w.writeVals = map[string][][]byte{} + w.writeLists = map[string]*KvList{ + kv.PlainState: {}, + kv.Code: {}, + kv.PlainContractCode: {}, + kv.IncarnationMap: {}, + } } -func (w *StateReconWriter1) WriteSet() (map[string][][]byte, map[string][][]byte) { - return w.writeKeys, w.writeVals +func (w *StateReconWriter1) WriteSet() map[string]*KvList { + for _, list := range w.writeLists { + sort.Sort(list) + } + return w.writeLists } func (w *StateReconWriter1) UpdateAccountData(address common.Address, original, account *accounts.Account) error { value := make([]byte, account.EncodingLengthForStorage()) account.EncodeForStorage(value) //fmt.Printf("account [%x]=>{Balance: %d, Nonce: %d, Root: %x, CodeHash: %x} txNum: %d\n", address, &account.Balance, account.Nonce, account.Root, account.CodeHash, w.txNum) - w.writeKeys[kv.PlainState] = append(w.writeKeys[kv.PlainState], address.Bytes()) - w.writeVals[kv.PlainState] = append(w.writeVals[kv.PlainState], value) + w.writeLists[kv.PlainState].Keys = append(w.writeLists[kv.PlainState].Keys, address.Bytes()) + w.writeLists[kv.PlainState].Vals = append(w.writeLists[kv.PlainState].Vals, value) return nil } func (w *StateReconWriter1) UpdateAccountCode(address common.Address, incarnation uint64, codeHash common.Hash, code []byte) error { - w.writeKeys[kv.Code] = append(w.writeKeys[kv.Code], codeHash.Bytes()) - w.writeVals[kv.Code] = append(w.writeVals[kv.Code], code) + w.writeLists[kv.Code].Keys = append(w.writeLists[kv.Code].Keys, codeHash.Bytes()) + w.writeLists[kv.Code].Vals = append(w.writeLists[kv.Code].Vals, code) if len(code) > 0 { //fmt.Printf("code [%x] => [%x] CodeHash: %x, txNum: %d\n", address, code, codeHash, w.txNum) - w.writeKeys[kv.PlainContractCode] = append(w.writeKeys[kv.PlainContractCode], dbutils.PlainGenerateStoragePrefix(address[:], incarnation)) - w.writeVals[kv.PlainContractCode] = append(w.writeVals[kv.PlainContractCode], codeHash.Bytes()) + w.writeLists[kv.PlainContractCode].Keys = append(w.writeLists[kv.PlainContractCode].Keys, dbutils.PlainGenerateStoragePrefix(address[:], incarnation)) + w.writeLists[kv.PlainContractCode].Vals = append(w.writeLists[kv.PlainContractCode].Vals, codeHash.Bytes()) } return nil } func (w *StateReconWriter1) DeleteAccount(address common.Address, original *accounts.Account) error { - w.writeKeys[kv.PlainState] = append(w.writeKeys[kv.PlainState], address.Bytes()) - w.writeVals[kv.PlainState] = append(w.writeVals[kv.PlainState], nil) + w.writeLists[kv.PlainState].Keys = append(w.writeLists[kv.PlainState].Keys, address.Bytes()) + w.writeLists[kv.PlainState].Vals = append(w.writeLists[kv.PlainState].Vals, []byte{}) if original.Incarnation > 0 { var b [8]byte binary.BigEndian.PutUint64(b[:], original.Incarnation) - w.writeKeys[kv.IncarnationMap] = append(w.writeKeys[kv.IncarnationMap], address.Bytes()) - w.writeVals[kv.IncarnationMap] = append(w.writeVals[kv.IncarnationMap], b[:]) + w.writeLists[kv.IncarnationMap].Keys = append(w.writeLists[kv.IncarnationMap].Keys, address.Bytes()) + w.writeLists[kv.IncarnationMap].Vals = append(w.writeLists[kv.IncarnationMap].Vals, b[:]) } return nil } @@ -317,8 +402,8 @@ func (w *StateReconWriter1) WriteAccountStorage(address common.Address, incarnat if *original == *value { return nil } - w.writeKeys[kv.PlainState] = append(w.writeKeys[kv.PlainState], dbutils.PlainGenerateCompositeStorageKey(address.Bytes(), incarnation, key.Bytes())) - w.writeVals[kv.PlainState] = append(w.writeVals[kv.PlainState], value.Bytes()) + w.writeLists[kv.PlainState].Keys = append(w.writeLists[kv.PlainState].Keys, dbutils.PlainGenerateCompositeStorageKey(address.Bytes(), incarnation, key.Bytes())) + w.writeLists[kv.PlainState].Vals = append(w.writeLists[kv.PlainState].Vals, value.Bytes()) //fmt.Printf("storage [%x] [%x] => [%x], txNum: %d\n", address, *key, v, w.txNum) return nil } @@ -335,12 +420,19 @@ type StateReconReader1 struct { readError bool stateTxNum uint64 composite []byte - readKeys map[string][][]byte - readVals map[string][][]byte + readLists map[string]*KvList } func NewStateReconReader1(rs *ReconState1) *StateReconReader1 { - return &StateReconReader1{rs: rs} + return &StateReconReader1{ + rs: rs, + readLists: map[string]*KvList{ + kv.PlainState: {}, + kv.Code: {}, + CodeSizeTable: {}, + kv.IncarnationMap: {}, + }, + } } func (r *StateReconReader1) SetTxNum(txNum uint64) { @@ -352,12 +444,19 @@ func (r *StateReconReader1) SetTx(tx kv.Tx) { } func (r *StateReconReader1) ResetReadSet() { - r.readKeys = map[string][][]byte{} - r.readVals = map[string][][]byte{} + r.readLists = map[string]*KvList{ + kv.PlainState: {}, + kv.Code: {}, + CodeSizeTable: {}, + kv.IncarnationMap: {}, + } } -func (r *StateReconReader1) ReadSet() (map[string][][]byte, map[string][][]byte) { - return r.readKeys, r.readVals +func (r *StateReconReader1) ReadSet() map[string]*KvList { + for _, list := range r.readLists { + sort.Sort(list) + } + return r.readLists } func (r *StateReconReader1) SetTrace(trace bool) { @@ -373,8 +472,8 @@ func (r *StateReconReader1) ReadAccountData(address common.Address) (*accounts.A return nil, err } } - r.readKeys[kv.PlainState] = append(r.readKeys[kv.PlainState], address.Bytes()) - r.readVals[kv.PlainState] = append(r.readVals[kv.PlainState], enc) + r.readLists[kv.PlainState].Keys = append(r.readLists[kv.PlainState].Keys, address.Bytes()) + r.readLists[kv.PlainState].Vals = append(r.readLists[kv.PlainState].Vals, common.CopyBytes(enc)) if len(enc) == 0 { return nil, nil } @@ -406,8 +505,8 @@ func (r *StateReconReader1) ReadAccountStorage(address common.Address, incarnati return nil, err } } - r.readKeys[kv.PlainState] = append(r.readKeys[kv.PlainState], r.composite) - r.readVals[kv.PlainState] = append(r.readVals[kv.PlainState], enc) + r.readLists[kv.PlainState].Keys = append(r.readLists[kv.PlainState].Keys, common.CopyBytes(r.composite)) + r.readLists[kv.PlainState].Vals = append(r.readLists[kv.PlainState].Vals, common.CopyBytes(enc)) if r.trace { if enc == nil { fmt.Printf("ReadAccountStorage [%x] [%x] => [], txNum: %d\n", address, key.Bytes(), r.txNum) @@ -430,8 +529,8 @@ func (r *StateReconReader1) ReadAccountCode(address common.Address, incarnation return nil, err } } - r.readKeys[kv.Code] = append(r.readKeys[kv.Code], address.Bytes()) - r.readVals[kv.Code] = append(r.readVals[kv.Code], enc) + r.readLists[kv.Code].Keys = append(r.readLists[kv.Code].Keys, address.Bytes()) + r.readLists[kv.Code].Vals = append(r.readLists[kv.Code].Vals, common.CopyBytes(enc)) if r.trace { fmt.Printf("ReadAccountCode [%x] => [%x], txNum: %d\n", address, enc, r.txNum) } @@ -447,8 +546,10 @@ func (r *StateReconReader1) ReadAccountCodeSize(address common.Address, incarnat return 0, err } } - r.readKeys[kv.Code] = append(r.readKeys[kv.Code], address.Bytes()) - r.readVals[kv.Code] = append(r.readVals[kv.Code], enc) + var sizebuf [8]byte + binary.BigEndian.PutUint64(sizebuf[:], uint64(len(enc))) + r.readLists[CodeSizeTable].Keys = append(r.readLists[CodeSizeTable].Keys, address.Bytes()) + r.readLists[CodeSizeTable].Vals = append(r.readLists[CodeSizeTable].Vals, sizebuf[:]) size := len(enc) if r.trace { fmt.Printf("ReadAccountCodeSize [%x] => [%d], txNum: %d\n", address, size, r.txNum) @@ -465,8 +566,8 @@ func (r *StateReconReader1) ReadAccountIncarnation(address common.Address) (uint return 0, err } } - r.readKeys[kv.IncarnationMap] = append(r.readKeys[kv.IncarnationMap], address.Bytes()) - r.readVals[kv.IncarnationMap] = append(r.readVals[kv.IncarnationMap], enc) + r.readLists[kv.IncarnationMap].Keys = append(r.readLists[kv.IncarnationMap].Keys, address.Bytes()) + r.readLists[kv.IncarnationMap].Vals = append(r.readLists[kv.IncarnationMap].Vals, common.CopyBytes(enc)) if len(enc) == 0 { return 0, nil } diff --git a/go.mod b/go.mod index c733dbefb05..f24623b10a0 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon go 1.18 require ( - github.com/ledgerwatch/erigon-lib v0.0.0-20220723031125-6f7794e88b5e + github.com/ledgerwatch/erigon-lib v0.0.0-20220723080652-596d10ea2e13 github.com/ledgerwatch/erigon-snapshot v1.0.0 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 diff --git a/go.sum b/go.sum index 38d9a79d686..1ea3734179d 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220723031125-6f7794e88b5e h1:4tZnz9FCTIalm6VtGXBZX713Y+lcHqpMK6L3wP7OSHY= -github.com/ledgerwatch/erigon-lib v0.0.0-20220723031125-6f7794e88b5e/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= +github.com/ledgerwatch/erigon-lib v0.0.0-20220723080652-596d10ea2e13 h1:GsmPUJO6xeifKSxxnG+BUwGEFggljkchaYm/HomvIQs= +github.com/ledgerwatch/erigon-lib v0.0.0-20220723080652-596d10ea2e13/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= github.com/ledgerwatch/erigon-snapshot v1.0.0 h1:bp/7xoPdM5lK7LFdqEMH008RZmqxMZV0RUVEQiWs7v4= github.com/ledgerwatch/erigon-snapshot v1.0.0/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc= From 4c0ab19bc672acfbe97f903abf961d4ccd2cdc47 Mon Sep 17 00:00:00 2001 From: banteg <4562643+banteg@users.noreply.github.com> Date: Sun, 24 Jul 2022 09:21:31 +0400 Subject: [PATCH 142/152] fix(vmtrace): return value pushed by smod (#4806) --- cmd/rpcdaemon/commands/trace_adhoc.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/rpcdaemon/commands/trace_adhoc.go b/cmd/rpcdaemon/commands/trace_adhoc.go index 2493edf0daa..c7f958e30f1 100644 --- a/cmd/rpcdaemon/commands/trace_adhoc.go +++ b/cmd/rpcdaemon/commands/trace_adhoc.go @@ -448,7 +448,7 @@ func (ot *OeTracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost vm.ADD, vm.EXP, vm.CALLER, vm.SHA3, vm.SUB, vm.ADDRESS, vm.GAS, vm.MUL, vm.RETURNDATASIZE, vm.NOT, vm.SHR, vm.SHL, vm.EXTCODESIZE, vm.SLT, vm.OR, vm.NUMBER, vm.PC, vm.TIMESTAMP, vm.BALANCE, vm.SELFBALANCE, vm.MULMOD, vm.ADDMOD, vm.BASEFEE, vm.BLOCKHASH, vm.BYTE, vm.XOR, vm.ORIGIN, vm.CODESIZE, vm.MOD, vm.SIGNEXTEND, vm.GASLIMIT, vm.DIFFICULTY, vm.SGT, vm.GASPRICE, - vm.MSIZE, vm.EXTCODEHASH: + vm.MSIZE, vm.EXTCODEHASH, vm.SMOD: showStack = 1 } for i := showStack - 1; i >= 0; i-- { From 1533bea3f68b357d35b904c2393fbb4c467158cf Mon Sep 17 00:00:00 2001 From: banteg <4562643+banteg@users.noreply.github.com> Date: Sun, 24 Jul 2022 11:56:37 +0400 Subject: [PATCH 143/152] fix(vmtrace): missing pushes (#4808) * fix(vmtrace): add chainid stack value * fix(vmtrace): add coinbase stack value --- cmd/rpcdaemon/commands/trace_adhoc.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/rpcdaemon/commands/trace_adhoc.go b/cmd/rpcdaemon/commands/trace_adhoc.go index c7f958e30f1..b180f26951f 100644 --- a/cmd/rpcdaemon/commands/trace_adhoc.go +++ b/cmd/rpcdaemon/commands/trace_adhoc.go @@ -448,7 +448,7 @@ func (ot *OeTracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost vm.ADD, vm.EXP, vm.CALLER, vm.SHA3, vm.SUB, vm.ADDRESS, vm.GAS, vm.MUL, vm.RETURNDATASIZE, vm.NOT, vm.SHR, vm.SHL, vm.EXTCODESIZE, vm.SLT, vm.OR, vm.NUMBER, vm.PC, vm.TIMESTAMP, vm.BALANCE, vm.SELFBALANCE, vm.MULMOD, vm.ADDMOD, vm.BASEFEE, vm.BLOCKHASH, vm.BYTE, vm.XOR, vm.ORIGIN, vm.CODESIZE, vm.MOD, vm.SIGNEXTEND, vm.GASLIMIT, vm.DIFFICULTY, vm.SGT, vm.GASPRICE, - vm.MSIZE, vm.EXTCODEHASH, vm.SMOD: + vm.MSIZE, vm.EXTCODEHASH, vm.SMOD, vm.CHAINID, vm.COINBASE: showStack = 1 } for i := showStack - 1; i >= 0; i-- { From e85796a38c6f25943bb7969e32c68269c1dcb705 Mon Sep 17 00:00:00 2001 From: dmitriyselivanov Date: Sun, 24 Jul 2022 11:50:00 +0300 Subject: [PATCH 144/152] rpcdaemon: added test for eth_call in case of a pruned block (#4776) --- cmd/rpcdaemon/commands/eth_call_test.go | 118 ++++++++++++++++++++++++ 1 file changed, 118 insertions(+) diff --git a/cmd/rpcdaemon/commands/eth_call_test.go b/cmd/rpcdaemon/commands/eth_call_test.go index fc204365a4f..6812cca6889 100644 --- a/cmd/rpcdaemon/commands/eth_call_test.go +++ b/cmd/rpcdaemon/commands/eth_call_test.go @@ -3,14 +3,28 @@ package commands import ( "context" "fmt" + "math/big" "testing" + "time" + + "github.com/holiman/uint256" + "github.com/stretchr/testify/assert" "github.com/ledgerwatch/erigon-lib/gointerfaces/txpool" + "github.com/ledgerwatch/erigon-lib/kv" "github.com/ledgerwatch/erigon-lib/kv/kvcache" "github.com/ledgerwatch/erigon/cmd/rpcdaemon/rpcdaemontest" "github.com/ledgerwatch/erigon/common" + "github.com/ledgerwatch/erigon/common/hexutil" + "github.com/ledgerwatch/erigon/common/math" + "github.com/ledgerwatch/erigon/core" "github.com/ledgerwatch/erigon/core/rawdb" + "github.com/ledgerwatch/erigon/core/state" + "github.com/ledgerwatch/erigon/core/types" + "github.com/ledgerwatch/erigon/crypto" + "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/internal/ethapi" + "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" "github.com/ledgerwatch/erigon/turbo/rpchelper" "github.com/ledgerwatch/erigon/turbo/snapshotsync" @@ -50,6 +64,29 @@ func TestEthCallNonCanonical(t *testing.T) { } } +func TestEthCallToPrunedBlock(t *testing.T) { + pruneTo := uint64(3) + ethCallBlockNumber := rpc.BlockNumber(2) + + db, bankAddress, contractAddress := chainWithDeployedContract(t) + + prune(t, db, pruneTo) + + stateCache := kvcache.New(kvcache.DefaultCoherentConfig) + api := NewEthAPI(NewBaseApi(nil, stateCache, snapshotsync.NewBlockReader(), false), db, nil, nil, nil, 5000000) + + callData := hexutil.MustDecode("0x2e64cec1") + callDataBytes := hexutil.Bytes(callData) + + if _, err := api.Call(context.Background(), ethapi.CallArgs{ + From: &bankAddress, + To: &contractAddress, + Data: &callDataBytes, + }, rpc.BlockNumberOrHashWithNumber(ethCallBlockNumber), nil); err != nil { + t.Errorf("unexpected error: %v", err) + } +} + func TestGetBlockByTimestampLatestTime(t *testing.T) { ctx := context.Background() db := rpcdaemontest.CreateTestKV(t) @@ -261,3 +298,84 @@ func TestGetBlockByTimestamp(t *testing.T) { t.Errorf("Retrieved the wrong block.\nexpected block hash: %s expected timestamp: %d\nblock hash retrieved: %s timestamp retrieved: %d", response["hash"], response["timestamp"], block["hash"], block["timestamp"]) } } + +func chainWithDeployedContract(t *testing.T) (kv.RwDB, common.Address, common.Address) { + var ( + signer = types.LatestSignerForChainID(nil) + bankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + bankAddress = crypto.PubkeyToAddress(bankKey.PublicKey) + bankFunds = big.NewInt(1e9) + contract = hexutil.MustDecode("0x608060405234801561001057600080fd5b50610150806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c80632e64cec11461003b5780636057361d14610059575b600080fd5b610043610075565b60405161005091906100d9565b60405180910390f35b610073600480360381019061006e919061009d565b61007e565b005b60008054905090565b8060008190555050565b60008135905061009781610103565b92915050565b6000602082840312156100b3576100b26100fe565b5b60006100c184828501610088565b91505092915050565b6100d3816100f4565b82525050565b60006020820190506100ee60008301846100ca565b92915050565b6000819050919050565b600080fd5b61010c816100f4565b811461011757600080fd5b5056fea26469706673582212209a159a4f3847890f10bfb87871a61eba91c5dbf5ee3cf6398207e292eee22a1664736f6c63430008070033") + gspec = &core.Genesis{ + Config: params.AllEthashProtocolChanges, + Alloc: core.GenesisAlloc{bankAddress: {Balance: bankFunds}}, + } + ) + m := stages.MockWithGenesis(t, gspec, bankKey) + db := m.DB + + var contractAddr common.Address + + chain, err := core.GenerateChain(m.ChainConfig, m.Genesis, m.Engine, m.DB, 2, func(i int, block *core.BlockGen) { + nonce := block.TxNonce(bankAddress) + switch i { + case 0: + tx, err := types.SignTx(types.NewContractCreation(nonce, new(uint256.Int), 1e6, new(uint256.Int), contract), *signer, bankKey) + assert.NoError(t, err) + block.AddTx(tx) + contractAddr = crypto.CreateAddress(bankAddress, nonce) + case 1: + txn, err := types.SignTx(types.NewTransaction(nonce, contractAddr, new(uint256.Int), 90000, new(uint256.Int), nil), *signer, bankKey) + assert.NoError(t, err) + block.AddTx(txn) + } + }, false /* intermediateHashes */) + if err != nil { + t.Fatalf("generate blocks: %v", err) + } + + err = m.InsertChain(chain) + assert.NoError(t, err) + + tx, err := db.BeginRo(context.Background()) + if err != nil { + t.Fatalf("read only db tx to read state: %v", err) + } + defer tx.Rollback() + + st := state.New(state.NewPlainState(tx, 1)) + assert.NoError(t, err) + assert.False(t, st.Exist(contractAddr), "Contract should not exist at block #1") + + st = state.New(state.NewPlainState(tx, 2)) + assert.NoError(t, err) + assert.True(t, st.Exist(contractAddr), "Contract should exist at block #2") + + return db, bankAddress, contractAddr +} + +func prune(t *testing.T, db kv.RwDB, pruneTo uint64) { + ctx := context.Background() + tx, err := db.BeginRw(ctx) + assert.NoError(t, err) + + logEvery := time.NewTicker(20 * time.Second) + + err = stagedsync.PruneTableDupSort(tx, kv.AccountChangeSet, "", pruneTo, logEvery, ctx) + assert.NoError(t, err) + + err = stagedsync.PruneTableDupSort(tx, kv.StorageChangeSet, "", pruneTo, logEvery, ctx) + assert.NoError(t, err) + + err = stagedsync.PruneTable(tx, kv.Receipts, pruneTo, ctx, math.MaxInt32) + assert.NoError(t, err) + + err = stagedsync.PruneTable(tx, kv.Log, pruneTo, ctx, math.MaxInt32) + assert.NoError(t, err) + + err = stagedsync.PruneTableDupSort(tx, kv.CallTraceSet, "", pruneTo, logEvery, ctx) + assert.NoError(t, err) + + err = tx.Commit() + assert.NoError(t, err) +} From 648175717590fd76fa10afbe8894f2f691f62a73 Mon Sep 17 00:00:00 2001 From: bgelb Date: Sun, 24 Jul 2022 01:50:43 -0700 Subject: [PATCH 145/152] fix regressions in trace_call and eth_createAccessList introduced by PR #3517 (#4807) --- cmd/rpcdaemon/commands/eth_call.go | 2 +- cmd/rpcdaemon/commands/trace_adhoc.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/rpcdaemon/commands/eth_call.go b/cmd/rpcdaemon/commands/eth_call.go index 714c0ecc195..e32d7d814bb 100644 --- a/cmd/rpcdaemon/commands/eth_call.go +++ b/cmd/rpcdaemon/commands/eth_call.go @@ -320,7 +320,7 @@ func (api *APIImpl) CreateAccessList(ctx context.Context, args ethapi.CallArgs, } stateReader = state.NewCachedReader2(cacheView, tx) } else { - stateReader = state.NewPlainState(tx, blockNumber) + stateReader = state.NewPlainState(tx, blockNumber+1) } header := block.Header() diff --git a/cmd/rpcdaemon/commands/trace_adhoc.go b/cmd/rpcdaemon/commands/trace_adhoc.go index b180f26951f..acb42089017 100644 --- a/cmd/rpcdaemon/commands/trace_adhoc.go +++ b/cmd/rpcdaemon/commands/trace_adhoc.go @@ -879,7 +879,7 @@ func (api *TraceAPIImpl) Call(ctx context.Context, args TraceCallParam, traceTyp } stateReader = state.NewCachedReader2(cacheView, tx) } else { - stateReader = state.NewPlainState(tx, blockNumber) + stateReader = state.NewPlainState(tx, blockNumber+1) } ibs := state.New(stateReader) From 6f53d1ef4d71d467c9bc641ff5bf5c2228154bf2 Mon Sep 17 00:00:00 2001 From: ledgerwatch Date: Sun, 24 Jul 2022 10:44:52 +0100 Subject: [PATCH 146/152] Fix test compilation error (#4809) Co-authored-by: Alexey Sharp --- cmd/rpcdaemon/commands/eth_call_test.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/cmd/rpcdaemon/commands/eth_call_test.go b/cmd/rpcdaemon/commands/eth_call_test.go index 6812cca6889..7f20bc8daca 100644 --- a/cmd/rpcdaemon/commands/eth_call_test.go +++ b/cmd/rpcdaemon/commands/eth_call_test.go @@ -22,7 +22,6 @@ import ( "github.com/ledgerwatch/erigon/core/state" "github.com/ledgerwatch/erigon/core/types" "github.com/ledgerwatch/erigon/crypto" - "github.com/ledgerwatch/erigon/eth/stagedsync" "github.com/ledgerwatch/erigon/internal/ethapi" "github.com/ledgerwatch/erigon/params" "github.com/ledgerwatch/erigon/rpc" @@ -361,19 +360,19 @@ func prune(t *testing.T, db kv.RwDB, pruneTo uint64) { logEvery := time.NewTicker(20 * time.Second) - err = stagedsync.PruneTableDupSort(tx, kv.AccountChangeSet, "", pruneTo, logEvery, ctx) + err = rawdb.PruneTableDupSort(tx, kv.AccountChangeSet, "", pruneTo, logEvery, ctx) assert.NoError(t, err) - err = stagedsync.PruneTableDupSort(tx, kv.StorageChangeSet, "", pruneTo, logEvery, ctx) + err = rawdb.PruneTableDupSort(tx, kv.StorageChangeSet, "", pruneTo, logEvery, ctx) assert.NoError(t, err) - err = stagedsync.PruneTable(tx, kv.Receipts, pruneTo, ctx, math.MaxInt32) + err = rawdb.PruneTable(tx, kv.Receipts, pruneTo, ctx, math.MaxInt32) assert.NoError(t, err) - err = stagedsync.PruneTable(tx, kv.Log, pruneTo, ctx, math.MaxInt32) + err = rawdb.PruneTable(tx, kv.Log, pruneTo, ctx, math.MaxInt32) assert.NoError(t, err) - err = stagedsync.PruneTableDupSort(tx, kv.CallTraceSet, "", pruneTo, logEvery, ctx) + err = rawdb.PruneTableDupSort(tx, kv.CallTraceSet, "", pruneTo, logEvery, ctx) assert.NoError(t, err) err = tx.Commit() From a3727463974072dc8c09d599e2320f2e66982a22 Mon Sep 17 00:00:00 2001 From: Giulio rebuffo Date: Sun, 24 Jul 2022 16:20:08 +0200 Subject: [PATCH 147/152] Move some checks out enginePayload and fcu (#4805) * made in refactoring * test Co-authored-by: giuliorebuffo --- ethdb/privateapi/engine_test.go | 2 +- ethdb/privateapi/ethbackend.go | 74 +++++++++++---------------------- 2 files changed, 26 insertions(+), 50 deletions(-) diff --git a/ethdb/privateapi/engine_test.go b/ethdb/privateapi/engine_test.go index 74b0eab1efa..499dd602890 100644 --- a/ethdb/privateapi/engine_test.go +++ b/ethdb/privateapi/engine_test.go @@ -230,7 +230,7 @@ func TestNoTTD(t *testing.T) { go func() { _, err = backend.EngineNewPayloadV1(ctx, &types2.ExecutionPayload{ ParentHash: gointerfaces.ConvertHashToH256(common.HexToHash("0x2")), - BlockHash: gointerfaces.ConvertHashToH256(common.HexToHash("0x3")), + BlockHash: gointerfaces.ConvertHashToH256(common.HexToHash("0xe6a580606b065e08034dcd6eea026cfdcbd3b41918d98b41cb9bf797d0c27033")), ReceiptRoot: gointerfaces.ConvertHashToH256(common.HexToHash("0x4")), StateRoot: gointerfaces.ConvertHashToH256(common.HexToHash("0x4")), PrevRandao: gointerfaces.ConvertHashToH256(common.HexToHash("0x0b3")), diff --git a/ethdb/privateapi/ethbackend.go b/ethdb/privateapi/ethbackend.go index 6e1c22b2dfd..e9c2e448113 100644 --- a/ethdb/privateapi/ethbackend.go +++ b/ethdb/privateapi/ethbackend.go @@ -234,7 +234,7 @@ func (s *EthBackendServer) Block(ctx context.Context, req *remote.BlockRequest) func convertPayloadStatus(payloadStatus *engineapi.PayloadStatus) *remote.EnginePayloadStatus { reply := remote.EnginePayloadStatus{Status: payloadStatus.Status} - if payloadStatus.LatestValidHash != (common.Hash{}) { + if payloadStatus.Status != remote.EngineStatus_SYNCING { reply.LatestValidHash = gointerfaces.ConvertHashToH256(payloadStatus.LatestValidHash) } if payloadStatus.ValidationError != nil { @@ -262,11 +262,6 @@ func (s *EthBackendServer) stageLoopIsBusy() bool { // EngineNewPayloadV1 validates and possibly executes payload func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.ExecutionPayload) (*remote.EnginePayloadStatus, error) { - if s.config.TerminalTotalDifficulty == nil { - log.Error("[NewPayload] not a proof-of-stake chain") - return nil, fmt.Errorf("not a proof-of-stake chain") - } - var baseFee *big.Int eip1559 := false @@ -322,24 +317,6 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E } block := types.NewBlockFromStorage(blockHash, &header, transactions, nil) - tx, err := s.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx.Rollback() - - parentTd, err := rawdb.ReadTd(tx, header.ParentHash, req.BlockNumber-1) - if err != nil { - return nil, err - } - - tx.Rollback() - - if parentTd != nil && parentTd.Cmp(s.config.TerminalTotalDifficulty) < 0 { - log.Warn("[NewPayload] TTD not reached yet", "height", header.Number, "hash", common.Hash(blockHash)) - return &remote.EnginePayloadStatus{Status: remote.EngineStatus_INVALID, LatestValidHash: gointerfaces.ConvertHashToH256(common.Hash{})}, nil - } - possibleStatus, err := s.getPayloadStatusFromHashIfPossible(blockHash, req.BlockNumber, header.ParentHash, true) if err != nil { return nil, err @@ -375,15 +352,22 @@ func (s *EthBackendServer) EngineNewPayloadV1(ctx context.Context, req *types2.E // Check if we can make out a status from the payload hash/head hash. func (s *EthBackendServer) getPayloadStatusFromHashIfPossible(blockHash common.Hash, blockNumber uint64, parentHash common.Hash, newPayload bool) (*engineapi.PayloadStatus, error) { - if s.hd == nil { - return nil, nil - } + // Determine which prefix to use for logs var prefix string if newPayload { prefix = "NewPayload" } else { prefix = "ForkChoiceUpdated" } + if s.config.TerminalTotalDifficulty == nil { + log.Error(fmt.Sprintf("[%s] not a proof-of-stake chain", prefix)) + return nil, fmt.Errorf("not a proof-of-stake chain") + } + + if s.hd == nil { + return nil, nil + } + tx, err := s.db.BeginRo(s.ctx) if err != nil { return nil, err @@ -394,13 +378,27 @@ func (s *EthBackendServer) getPayloadStatusFromHashIfPossible(blockHash common.H if err != nil { return nil, err } + // Retrieve parent and total difficulty. var parent *types.Header + var td *big.Int if newPayload { + // Obtain TD parent, err = rawdb.ReadHeaderByHash(tx, parentHash) + if err != nil { + return nil, err + } + td, err = rawdb.ReadTdByHash(tx, parentHash) + } else { + td, err = rawdb.ReadTdByHash(tx, blockHash) } if err != nil { return nil, err } + // Check if we already reached TTD. + if td != nil && td.Cmp(s.config.TerminalTotalDifficulty) < 0 { + log.Warn(fmt.Sprintf("[%s] TTD not reached yet", prefix), "hash", common.Hash(blockHash)) + return &engineapi.PayloadStatus{Status: remote.EngineStatus_INVALID, LatestValidHash: common.Hash{}}, nil + } var canonicalHash common.Hash if header != nil { @@ -523,34 +521,12 @@ func (s *EthBackendServer) EngineGetPayloadV1(ctx context.Context, req *remote.E // EngineForkChoiceUpdatedV1 either states new block head or request the assembling of a new block func (s *EthBackendServer) EngineForkChoiceUpdatedV1(ctx context.Context, req *remote.EngineForkChoiceUpdatedRequest) (*remote.EngineForkChoiceUpdatedReply, error) { - if s.config.TerminalTotalDifficulty == nil { - return nil, fmt.Errorf("not a proof-of-stake chain") - } - forkChoice := engineapi.ForkChoiceMessage{ HeadBlockHash: gointerfaces.ConvertH256ToHash(req.ForkchoiceState.HeadBlockHash), SafeBlockHash: gointerfaces.ConvertH256ToHash(req.ForkchoiceState.SafeBlockHash), FinalizedBlockHash: gointerfaces.ConvertH256ToHash(req.ForkchoiceState.FinalizedBlockHash), } - tx1, err := s.db.BeginRo(ctx) - if err != nil { - return nil, err - } - defer tx1.Rollback() - - td, err := rawdb.ReadTdByHash(tx1, forkChoice.HeadBlockHash) - tx1.Rollback() - if err != nil { - return nil, err - } - if td != nil && td.Cmp(s.config.TerminalTotalDifficulty) < 0 { - log.Warn("[ForkChoiceUpdated] TTD not reached yet", "forkChoice", forkChoice) - return &remote.EngineForkChoiceUpdatedReply{ - PayloadStatus: &remote.EnginePayloadStatus{Status: remote.EngineStatus_INVALID, LatestValidHash: gointerfaces.ConvertHashToH256(common.Hash{})}, - }, nil - } - status, err := s.getPayloadStatusFromHashIfPossible(forkChoice.HeadBlockHash, 0, common.Hash{}, false) if err != nil { return nil, err From 7826a33b876d6681c4ae52f93e00d9b9cf7dc4e0 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 25 Jul 2022 11:26:42 +0700 Subject: [PATCH 148/152] save (#4817)afix "grafana user format" #4817 --- docker-compose.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker-compose.yml b/docker-compose.yml index 0c6cf567c46..1d5567d8817 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -77,7 +77,7 @@ services: grafana: image: grafana/grafana:9.0.3 - user: 472:0 # required for grafana version >= 7.3 + user: "472:0" # required for grafana version >= 7.3 ports: [ "3000:3000" ] volumes: - ${ERIGON_GRAFANA_CONFIG:-./cmd/prometheus/grafana.ini}:/etc/grafana/grafana.ini From b231856c1cb38a53efe7b1a957a7d137d2f691e4 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 25 Jul 2022 11:29:34 +0700 Subject: [PATCH 149/152] avoid sudo in makefile #4818 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c666796d954..360f3d0bb0f 100644 --- a/Makefile +++ b/Makefile @@ -73,7 +73,7 @@ xdg_data_home_subdirs = $(xdg_data_home)/erigon $(xdg_data_home)/erigon-grafana ## setup_xdg_data_home: TODO setup_xdg_data_home: mkdir -p $(xdg_data_home_subdirs) - ls -aln $(xdg_data_home) | grep -E "472.*0.*erigon-grafana" || sudo chown -R 472:0 $(xdg_data_home)/erigon-grafana + ls -aln $(xdg_data_home) | grep -E "472.*0.*erigon-grafana" || chown -R 472:0 $(xdg_data_home)/erigon-grafana @echo "✔️ xdg_data_home setup" @ls -al $(xdg_data_home) From 9e371fef5ce22c8be559c630cc3c415e89848db1 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 25 Jul 2022 11:31:57 +0700 Subject: [PATCH 150/152] remove only etl-tmp content, but not dir itself #4816 --- eth/backend.go | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/eth/backend.go b/eth/backend.go index 89cd9332846..ef965703603 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -149,7 +149,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere } tmpdir := stack.Config().Dirs.Tmp - if err := os.RemoveAll(tmpdir); err != nil { // clean it on startup + if err := RemoveContents(tmpdir); err != nil { // clean it on startup return nil, fmt.Errorf("clean tmp dir: %s, %w", tmpdir, err) } @@ -912,3 +912,23 @@ func (s *Ethereum) SentryCtx() context.Context { func (s *Ethereum) SentryControlServer() *sentry.MultiClient { return s.sentriesClient } + +// RemoveContents is like os.RemoveAll, but preserve dir itself +func RemoveContents(dir string) error { + d, err := os.Open(dir) + if err != nil { + return err + } + defer d.Close() + names, err := d.Readdirnames(-1) + if err != nil { + return err + } + for _, name := range names { + err = os.RemoveAll(filepath.Join(dir, name)) + if err != nil { + return err + } + } + return nil +} From b20f7ecdd12039bde65d596ed98f7113ee2b73fe Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 25 Jul 2022 12:49:29 +0700 Subject: [PATCH 151/152] docker_hub_default_pid (#4819) --- hooks/build | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hooks/build b/hooks/build index c242d1ab85f..8598f66b14c 100755 --- a/hooks/build +++ b/hooks/build @@ -10,9 +10,9 @@ set -o pipefail # fail if anything in pipe fails # $(id -u) and $(id -g) will be 0 # # so we need to specify the erigon user uid/gid in the image -# choose 3473 matching defaults in .env.example +# choose 1000 matching defaults in .env.example DOCKER_FLAGS="-t ${IMAGE_NAME}" \ -DOCKER_UID=3473 \ -DOCKER_GID=3473 \ +DOCKER_UID=1000 \ +DOCKER_GID=1000 \ GIT_TAG=$(git describe --tags '--match=v*' --dirty) \ make docker From 6faf337b27bad35c0ccb323478567b6d502d12a8 Mon Sep 17 00:00:00 2001 From: Alex Sharov Date: Mon, 25 Jul 2022 13:06:56 +0700 Subject: [PATCH 152/152] pool: allow non-parsable txs in db, skip them with warning --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index f24623b10a0..a3a7d1d77e7 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/ledgerwatch/erigon go 1.18 require ( - github.com/ledgerwatch/erigon-lib v0.0.0-20220723080652-596d10ea2e13 + github.com/ledgerwatch/erigon-lib v0.0.0-20220725060110-41265c634d13 github.com/ledgerwatch/erigon-snapshot v1.0.0 github.com/ledgerwatch/log/v3 v3.4.1 github.com/ledgerwatch/secp256k1 v1.0.0 diff --git a/go.sum b/go.sum index 1ea3734179d..aff84e1bf89 100644 --- a/go.sum +++ b/go.sum @@ -390,8 +390,8 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170224010052-a616ab194758 h1:0D5M2HQSGD3PYPwICLl+/9oulQauOuETfgFvhBDffs0= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= -github.com/ledgerwatch/erigon-lib v0.0.0-20220723080652-596d10ea2e13 h1:GsmPUJO6xeifKSxxnG+BUwGEFggljkchaYm/HomvIQs= -github.com/ledgerwatch/erigon-lib v0.0.0-20220723080652-596d10ea2e13/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= +github.com/ledgerwatch/erigon-lib v0.0.0-20220725060110-41265c634d13 h1:wms8ybhc0kbaOro5eO0wj+yLVRj21W0ocszsusFV+lY= +github.com/ledgerwatch/erigon-lib v0.0.0-20220725060110-41265c634d13/go.mod h1:mq8M03qcnaqXZ/yjNuWoyZQ5V8r5JbXw5JYmy4WNUZQ= github.com/ledgerwatch/erigon-snapshot v1.0.0 h1:bp/7xoPdM5lK7LFdqEMH008RZmqxMZV0RUVEQiWs7v4= github.com/ledgerwatch/erigon-snapshot v1.0.0/go.mod h1:3AuPxZc85jkehh/HA9h8gabv5MSi3kb/ddtzBsTVJFo= github.com/ledgerwatch/log/v3 v3.4.1 h1:/xGwlVulXnsO9Uq+tzaExc8OWmXXHU0dnLalpbnY5Bc=