Skip to content

Commit

Permalink
Merge pull request #2368 from ledgerwatch/stable-2021-07-03
Browse files Browse the repository at this point in the history
Stable 2021 07 03
  • Loading branch information
mandrigin committed Jul 14, 2021
2 parents f3d5f83 + 84b9ba1 commit 36ae9ec
Show file tree
Hide file tree
Showing 133 changed files with 3,643 additions and 1,500 deletions.
19 changes: 0 additions & 19 deletions .github/dependabot.yml

This file was deleted.

2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -68,3 +68,5 @@ docker-compose.dev.yml

/ethdb/*.fail

libmdbx/build/*
tests/testdata/*
29 changes: 9 additions & 20 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,31 +1,20 @@
FROM golang:1.16-alpine3.13 as builder

ARG git_commit
ENV GIT_COMMIT=$git_commit

ARG git_branch
ENV GIT_BRANCH=$git_branch

ARG git_tag
ENV GIT_TAG=$git_tag

# for linters to avoid warnings. we won't use linters in Docker anyway
ENV LATEST_COMMIT="undefined"
FROM docker.io/library/golang:1.16-alpine3.13 as builder

RUN apk --no-cache add make gcc g++ linux-headers git bash ca-certificates libgcc libstdc++

WORKDIR /app
ADD . .

# next 2 lines helping utilize docker cache
COPY go.mod go.sum ./
RUN go mod download
RUN make erigon rpcdaemon integration sentry

ADD . .
RUN make all
FROM docker.io/library/alpine:3.13

FROM alpine:3.13
RUN mkdir -p /var/lib/erigon
VOLUME /var/lib/erigon

RUN apk add --no-cache ca-certificates libgcc libstdc++ tzdata
COPY --from=builder /app/build/bin/* /usr/local/bin/

EXPOSE 8545 8546 30303 30303/udp 8080 9090 6060
WORKDIR /var/lib/erigon

EXPOSE 8545 8546 30303 30303/udp 30304 30304/udp 8080 9090 6060
28 changes: 13 additions & 15 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -177,33 +177,27 @@ it can run from a snapshot of a database for read-only calls.

#### **For local DB**

This is only possible if RPC daemon runs on the same computer as Erigon. This mode of operation uses shared memory access to the database of Erigon, which is reported to have better performance than accessing via TPC socket (see "For remote DB" section below)
This is only possible if RPC daemon runs on the same computer as Erigon. This mode uses shared memory access to the database of Erigon, which has better performance than accessing via TPC socket (see "For remote DB" section below). Provide both `--datadir` and `--private.api.addr` options:
```
> make erigon
> ./build/bin/erigon --private.api.addr=localhost:9090
> make rpcdaemon
> ./build/bin/rpcdaemon --datadir ~/Library/Erigon/ --http.api=eth,debug,net
> ./build/bin/rpcdaemon --datadir=<your_data_dir> --private.api.addr=localhost:9090 --http.api=eth,erigon,web3,net,debug,trace,txpool,shh
```

In this mode, some RPC API methods do not work. Please see "For dual mode" section below on how to fix that.

#### **For remote DB**

This works regardless of whether RPC daemon is on the same computer with Erigon, or on a different one. They use TPC socket connection to pass data between them. To use this mode, run Erigon in one terminal window

```
> make erigon
> ./build/bin/erigon --private.api.addr=localhost:9090
```

Run RPC daemon
```
> ./build/bin/rpcdaemon --private.api.addr=localhost:9090 --http.api=eth,debug,net
> make rpcdaemon
> ./build/bin/rpcdaemon --private.api.addr=localhost:9090 --http.api=eth,erigon,web3,net,debug,trace,txpool,shh
```

**gRPC ports**: `9090` erigon, `9091` sentry, `9092` consensus engine, `9093` snapshot downloader, `9094` TxPool

**For dual mode**

If both `--datadir` and `--private.api.addr` options are used for RPC daemon, it works in a "dual" mode. This only works when RPC daemon is on the same computer as Erigon. In this mode, most data transfer from Erigon to RPC daemon happens via shared memory, only certain things (like new header notifications) happen via TPC socket.

Supported JSON-RPC calls ([eth](./cmd/rpcdaemon/commands/eth_api.go), [debug](./cmd/rpcdaemon/commands/debug_api.go), [net](./cmd/rpcdaemon/commands/net_api.go), [web3](./cmd/rpcdaemon/commands/web3_api.go)):

For a details on the implementation status of each command, [see this table](./cmd/rpcdaemon/README.md#rpc-implementation-status).
Expand Down Expand Up @@ -312,6 +306,10 @@ If genesis sync passed, then it's fine to run multiple Erigon on same Disk.
Please read https://github.com/ledgerwatch/erigon/issues/1516#issuecomment-811958891
In short: network-disks are bad for blocks execution - because blocks execution reading data from db non-parallel non-batched way.

### rpcdaemon "Dual-Mode" does not work with Docker Container
### rpcdaemon "Local-Mode" does not work with Docker Container

Running rpcdaemon in "Dual-Mode" (including the `--datadir` flag) generally results in better performance for RPC calls, however, this does not work when running erigon and rpcdaemon in separate containers. For the absolute best performance bare metal is recommended at this time.
Running rpcdaemon in "Local-Mode" (including the `--datadir` flag) generally results in better performance for
RPC calls, however, this does not work when running erigon and rpcdaemon in separate containers and datadir as volume.
But it works fine if run erigon and rpcdaemon in same container.
For the absolute best performance bare metal is recommended at this time.
Please, help us configure Docker volume driver to support MMAP files with POSIX file-locks (MDBX).
9 changes: 6 additions & 3 deletions accounts/abi/bind/backends/simulated.go
Original file line number Diff line number Diff line change
Expand Up @@ -244,8 +244,8 @@ func (b *SimulatedBackend) TransactionReceipt(ctx context.Context, txHash common
return nil, err
}
defer tx.Rollback()
receipt, _, _, _ := rawdb.ReadReceipt(tx, txHash)
return receipt, nil
receipt, _, _, _, err := rawdb.ReadReceipt(tx, txHash)
return receipt, err
}

// TransactionByHash checks the pool of pending transactions in addition to the
Expand All @@ -266,7 +266,10 @@ func (b *SimulatedBackend) TransactionByHash(ctx context.Context, txHash common.
if txn != nil {
return txn, true, nil
}
txn, _, _, _ = rawdb.ReadTransaction(tx, txHash)
txn, _, _, _, err = rawdb.ReadTransaction(tx, txHash)
if err != nil {
return nil, false, err
}
if txn != nil {
return txn, false, nil
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/hack/hack.go
Original file line number Diff line number Diff line change
Expand Up @@ -2321,7 +2321,7 @@ func runBlock(ibs *state.IntraBlockState, txnWriter state.StateWriter, blockWrit

if !vmConfig.ReadOnly {
// Finalize the block, applying any consensus engine specific extras (e.g. block rewards)
if _, err := engine.FinalizeAndAssemble(chainConfig, header, ibs, block.Transactions(), block.Uncles(), receipts, nil); err != nil {
if _, err := engine.FinalizeAndAssemble(chainConfig, header, ibs, block.Transactions(), block.Uncles(), receipts, nil, nil); err != nil {
return nil, fmt.Errorf("finalize of block %d failed: %v", block.NumberU64(), err)
}

Expand Down
3 changes: 3 additions & 0 deletions cmd/integration/commands/reset_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,9 @@ func resetExec(tx ethdb.RwTx, g *core.Genesis) error {
if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.CallTraceSet); err != nil {
return err
}
if err := tx.(ethdb.BucketMigrator).ClearBucket(dbutils.Epoch); err != nil {
return err
}
if err := stages.SaveStageProgress(tx, stages.Execution, 0); err != nil {
return err
}
Expand Down
6 changes: 3 additions & 3 deletions cmd/integration/commands/snapshot_check.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ var cmdSnapshotCheck = &cobra.Command{
}

func snapshotCheck(ctx context.Context, db ethdb.RwKV, isNew bool, tmpDir string) (err error) {
_, engine, chainConfig, vmConfig, _, sync, _, _, _ := newSync(ctx, db)
_, engine, chainConfig, vmConfig, _, sync, _, _ := newSync(ctx, db, nil)

var snapshotBlock uint64 = 11_000_000
var lastBlockHeaderNumber, blockNum uint64
Expand Down Expand Up @@ -247,8 +247,8 @@ func snapshotCheck(ctx context.Context, db ethdb.RwKV, isNew bool, tmpDir string
log.Info("Stage4", "progress", stage4.BlockNumber)

err = stagedsync.SpawnExecuteBlocksStage(stage4, sync, tx, blockNumber, ch,
stagedsync.StageExecuteBlocksCfg(db, false, false, false, 0, batchSize, nil, chainConfig, engine, vmConfig, tmpDir), nil,
)
stagedsync.StageExecuteBlocksCfg(db, false, false, false, 0, batchSize, nil, chainConfig, engine, vmConfig, nil, false, tmpDir),
false)
if err != nil {
return fmt.Errorf("execution err %w", err)
}
Expand Down
57 changes: 33 additions & 24 deletions cmd/integration/commands/stages.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ import (
"github.com/ledgerwatch/erigon/consensus"
"github.com/ledgerwatch/erigon/consensus/ethash"
"github.com/ledgerwatch/erigon/core"
"github.com/ledgerwatch/erigon/core/types"
"github.com/ledgerwatch/erigon/core/vm"
"github.com/ledgerwatch/erigon/eth/ethconfig"
"github.com/ledgerwatch/erigon/eth/fetcher"
Expand Down Expand Up @@ -367,7 +366,7 @@ func stageBodies(db ethdb.RwKV, ctx context.Context) error {

func stageSenders(db ethdb.RwKV, ctx context.Context) error {
tmpdir := path.Join(datadir, etl.TmpDirName)
_, _, chainConfig, _, _, sync, _, _, _ := newSync(ctx, db)
_, _, chainConfig, _, _, sync, _, _ := newSync(ctx, db, nil)

tx, err := db.BeginRw(ctx)
if err != nil {
Expand Down Expand Up @@ -408,7 +407,7 @@ func stageSenders(db ethdb.RwKV, ctx context.Context) error {
}

func stageExec(db ethdb.RwKV, ctx context.Context) error {
sm, engine, chainConfig, vmConfig, _, sync, _, _, _ := newSync(ctx, db)
sm, engine, chainConfig, vmConfig, _, sync, _, _ := newSync(ctx, db, nil)

if reset {
genesis, _ := byChain()
Expand Down Expand Up @@ -436,25 +435,25 @@ func stageExec(db ethdb.RwKV, ctx context.Context) error {

log.Info("Stage4", "progress", execStage.BlockNumber)
ch := ctx.Done()
cfg := stagedsync.StageExecuteBlocksCfg(db, sm.Receipts, sm.CallTraces, sm.TEVM, 0, batchSize, nil, chainConfig, engine, vmConfig, tmpDBPath)
cfg := stagedsync.StageExecuteBlocksCfg(db, sm.Receipts, sm.CallTraces, sm.TEVM, 0, batchSize, nil, chainConfig, engine, vmConfig, nil, false, tmpDBPath)
if unwind > 0 {
u := &stagedsync.UnwindState{Stage: stages.Execution, UnwindPoint: execStage.BlockNumber - unwind}
err := stagedsync.UnwindExecutionStage(u, execStage, nil, ch, cfg, nil)
err := stagedsync.UnwindExecutionStage(u, execStage, nil, ch, cfg, false)
if err != nil {
return err
}
return nil
}

err := stagedsync.SpawnExecuteBlocksStage(execStage, sync, nil, block, ch, cfg, nil)
err := stagedsync.SpawnExecuteBlocksStage(execStage, sync, nil, block, ch, cfg, false)
if err != nil {
return err
}
return nil
}

func stageTrie(db ethdb.RwKV, ctx context.Context) error {
_, _, _, _, _, sync, _, _, _ := newSync(ctx, db)
_, _, _, _, _, sync, _, _ := newSync(ctx, db, nil)
tmpdir := path.Join(datadir, etl.TmpDirName)

if reset {
Expand Down Expand Up @@ -497,7 +496,7 @@ func stageTrie(db ethdb.RwKV, ctx context.Context) error {
func stageHashState(db ethdb.RwKV, ctx context.Context) error {
tmpdir := path.Join(datadir, etl.TmpDirName)

_, _, _, _, _, sync, _, _, _ := newSync(ctx, db)
_, _, _, _, _, sync, _, _ := newSync(ctx, db, nil)

tx, err := db.BeginRw(ctx)
if err != nil {
Expand Down Expand Up @@ -537,7 +536,7 @@ func stageHashState(db ethdb.RwKV, ctx context.Context) error {
func stageLogIndex(db ethdb.RwKV, ctx context.Context) error {
tmpdir := path.Join(datadir, etl.TmpDirName)

_, _, _, _, _, sync, _, _, _ := newSync(ctx, db)
_, _, _, _, _, sync, _, _ := newSync(ctx, db, nil)
tx, err := db.BeginRw(ctx)
if err != nil {
return err
Expand Down Expand Up @@ -576,7 +575,7 @@ func stageLogIndex(db ethdb.RwKV, ctx context.Context) error {
func stageCallTraces(kv ethdb.RwKV, ctx context.Context) error {
tmpdir := path.Join(datadir, etl.TmpDirName)

_, engine, chainConfig, _, _, sync, _, _, _ := newSync(ctx, kv)
_, engine, chainConfig, _, _, sync, _, _ := newSync(ctx, kv, nil)
tx, err := kv.BeginRw(ctx)
if err != nil {
return err
Expand Down Expand Up @@ -620,7 +619,7 @@ func stageCallTraces(kv ethdb.RwKV, ctx context.Context) error {

func stageHistory(db ethdb.RwKV, ctx context.Context) error {
tmpdir := path.Join(datadir, etl.TmpDirName)
_, _, _, _, _, sync, _, _, _ := newSync(ctx, db)
_, _, _, _, _, sync, _, _ := newSync(ctx, db, nil)
tx, err := db.BeginRw(ctx)
if err != nil {
return err
Expand Down Expand Up @@ -667,7 +666,7 @@ func stageHistory(db ethdb.RwKV, ctx context.Context) error {
func stageTxLookup(db ethdb.RwKV, ctx context.Context) error {
tmpdir := path.Join(datadir, etl.TmpDirName)

_, _, _, _, _, sync, _, _, _ := newSync(ctx, db)
_, _, _, _, _, sync, _, _ := newSync(ctx, db, nil)

tx, err := db.BeginRw(ctx)
if err != nil {
Expand Down Expand Up @@ -758,7 +757,7 @@ func byChain() (*core.Genesis, *params.ChainConfig) {
return genesis, chainConfig
}

func newSync(ctx context.Context, db ethdb.RwKV) (ethdb.StorageMode, consensus.Engine, *params.ChainConfig, *vm.Config, *core.TxPool, *stagedsync.State, *stagedsync.StagedSync, chan *types.Block, chan *types.Block) {
func newSync(ctx context.Context, db ethdb.RwKV, miningConfig *params.MiningConfig) (ethdb.StorageMode, consensus.Engine, *params.ChainConfig, *vm.Config, *core.TxPool, *stagedsync.State, *stagedsync.State, stagedsync.MiningState) {
tmpdir := path.Join(datadir, etl.TmpDirName)
snapshotDir = path.Join(datadir, "erigon", "snapshot")

Expand Down Expand Up @@ -796,7 +795,6 @@ func newSync(ctx context.Context, db ethdb.RwKV) (ethdb.StorageMode, consensus.E

var batchSize datasize.ByteSize
must(batchSize.UnmarshalText([]byte(batchSizeStr)))
bodyDownloadTimeoutSeconds := 30 // TODO: convert to duration, make configurable

blockDownloaderWindow := 65536
downloadServer, err := download.NewControlServer(db, "", chainConfig, genesisBlock.Hash(), engine, 1, nil, blockDownloaderWindow)
Expand All @@ -814,35 +812,46 @@ func newSync(ctx context.Context, db ethdb.RwKV) (ethdb.StorageMode, consensus.E
}

txPoolP2PServer.TxFetcher = fetcher.NewTxFetcher(txPool.Has, txPool.AddRemotes, fetchTx)
st, err := stages2.NewStagedSync2(context.Background(), db, sm, batchSize,
bodyDownloadTimeoutSeconds,

cfg := ethconfig.Defaults
cfg.StorageMode = sm
cfg.BatchSize = batchSize
if miningConfig != nil {
cfg.Miner = *miningConfig
}

st, err := stages2.NewStagedSync2(context.Background(), db, cfg,
downloadServer,
tmpdir,
snapshotDir,
txPool,
txPoolP2PServer,
nil, nil, nil,
)
if err != nil {
panic(err)
}
pendingResultCh := make(chan *types.Block, 1)
miningResultCh := make(chan *types.Block, 1)
miner := stagedsync.NewMiningState(&cfg.Miner)

stMining := stagedsync.New(
stagedsync.MiningStages(
stagedsync.StageMiningCreateBlockCfg(db, ethconfig.Defaults.Miner, *chainConfig, engine, txPool, tmpdir),
stagedsync.StageMiningExecCfg(db, ethconfig.Defaults.Miner, events, *chainConfig, engine, &vm.Config{}, tmpdir),
stagedsync.StageMiningCreateBlockCfg(db, miner, *chainConfig, engine, txPool, tmpdir),
stagedsync.StageMiningExecCfg(db, miner, events, *chainConfig, engine, &vm.Config{}, tmpdir),
stagedsync.StageHashStateCfg(db, tmpdir),
stagedsync.StageTrieCfg(db, false, true, tmpdir),
stagedsync.StageMiningFinishCfg(db, *chainConfig, engine, pendingResultCh, miningResultCh, nil),
stagedsync.StageMiningFinishCfg(db, *chainConfig, engine, miner, ctx.Done()),
),
stagedsync.MiningUnwindOrder(),
stagedsync.OptionalParameters{},
)

var sync *stagedsync.State
var miningSync *stagedsync.State
if err := db.View(context.Background(), func(tx ethdb.Tx) (err error) {
sync, err = st.Prepare(vmConfig, nil, tx, sm, ctx.Done(), false, nil, nil)
sync, err = st.Prepare(nil, tx, ctx.Done(), false)
if err != nil {
return nil
}
miningSync, err = stMining.Prepare(nil, tx, ctx.Done(), false)
if err != nil {
return nil
}
Expand All @@ -851,7 +860,7 @@ func newSync(ctx context.Context, db ethdb.RwKV) (ethdb.StorageMode, consensus.E
panic(err)
}

return sm, engine, chainConfig, vmConfig, txPool, sync, stMining, pendingResultCh, miningResultCh
return sm, engine, chainConfig, vmConfig, txPool, sync, miningSync, miner
}

func progress(tx ethdb.KVGetter, stage stages.SyncStage) uint64 {
Expand Down
Loading

0 comments on commit 36ae9ec

Please sign in to comment.