Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

implement server kv.Snapsthos() method #4831

Merged
merged 2 commits into from
Jul 26, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions cmd/rpcdaemon/cli/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ func checkDbCompatibility(ctx context.Context, db kv.RoDB) error {
return nil
}

func EmbeddedServices(ctx context.Context, erigonDB kv.RoDB, stateCacheCfg kvcache.CoherentConfig, blockReader services.FullBlockReader, ethBackendServer remote.ETHBACKENDServer,
func EmbeddedServices(ctx context.Context, erigonDB kv.RoDB, stateCacheCfg kvcache.CoherentConfig, blockReader services.FullBlockReader, snapshots remotedbserver.Snapsthots, ethBackendServer remote.ETHBACKENDServer,
txPoolServer txpool.TxpoolServer, miningServer txpool.MiningServer,
) (
eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, starknet *rpcservices.StarknetService, stateCache kvcache.Cache, ff *rpchelper.Filters, err error,
Expand All @@ -225,7 +225,7 @@ func EmbeddedServices(ctx context.Context, erigonDB kv.RoDB, stateCacheCfg kvcac
} else {
stateCache = kvcache.NewDummy()
}
kvRPC := remotedbserver.NewKvServer(ctx, erigonDB)
kvRPC := remotedbserver.NewKvServer(ctx, erigonDB, snapshots)
stateDiffClient := direct.NewStateDiffClientDirect(kvRPC)
subscribeToStateChangesLoop(ctx, stateDiffClient, stateCache)

Expand Down
4 changes: 2 additions & 2 deletions cmd/rpcdaemon22/cli/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ func checkDbCompatibility(ctx context.Context, db kv.RoDB) error {
return nil
}

func EmbeddedServices(ctx context.Context, erigonDB kv.RoDB, stateCacheCfg kvcache.CoherentConfig, blockReader services.FullBlockReader, ethBackendServer remote.ETHBACKENDServer,
func EmbeddedServices(ctx context.Context, erigonDB kv.RoDB, stateCacheCfg kvcache.CoherentConfig, blockReader services.FullBlockReader, snapshots remotedbserver.Snapsthots, ethBackendServer remote.ETHBACKENDServer,
txPoolServer txpool.TxpoolServer, miningServer txpool.MiningServer,
) (
eth rpchelper.ApiBackend, txPool txpool.TxpoolClient, mining txpool.MiningClient, starknet *rpcservices.StarknetService, stateCache kvcache.Cache, ff *rpchelper.Filters, err error,
Expand All @@ -220,7 +220,7 @@ func EmbeddedServices(ctx context.Context, erigonDB kv.RoDB, stateCacheCfg kvcac
} else {
stateCache = kvcache.NewDummy()
}
kvRPC := remotedbserver.NewKvServer(ctx, erigonDB)
kvRPC := remotedbserver.NewKvServer(ctx, erigonDB, snapshots)
stateDiffClient := direct.NewStateDiffClientDirect(kvRPC)
subscribeToStateChangesLoop(ctx, stateDiffClient, stateCache)

Expand Down
75 changes: 38 additions & 37 deletions eth/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -192,11 +192,37 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere
params.ApplyBinanceSmartChainParams()
}

if err := chainKv.Update(context.Background(), func(tx kv.RwTx) error {
if err = stagedsync.UpdateMetrics(tx); err != nil {
return err
}

config.Prune, err = prune.EnsureNotChanged(tx, config.Prune)
if err != nil {
return err
}
isCorrectSync, useSnapshots, err := snap.EnsureNotChanged(tx, config.Snapshot)
if err != nil {
return err
}
// if we are in the incorrect syncmode then we change it to the appropriate one
if !isCorrectSync {
log.Warn("Incorrect snapshot enablement", "got", config.Sync.UseSnapshots, "change_to", useSnapshots)
config.Sync.UseSnapshots = useSnapshots
config.Snapshot.Enabled = ethconfig.UseSnapshotsByChainName(chainConfig.ChainName) && useSnapshots
}
log.Info("Effective", "prune_flags", config.Prune.String(), "snapshot_flags", config.Snapshot.String())

return nil
}); err != nil {
return nil, err
}

ctx, ctxCancel := context.WithCancel(context.Background())
log.Info("Using snapshots", "on", config.Snapshot.Enabled)

// kv_remote architecture does blocks on stream.Send - means current architecture require unlimited amount of txs to provide good throughput
//limiter := make(chan struct{}, kv.ReadersLimit)
kvRPC := remotedbserver.NewKvServer(ctx, chainKv) // mdbx.NewMDBX(logger).RoTxsLimiter(limiter).Readonly().Path(filepath.Join(stack.Config().DataDir, "chaindata")).Label(kv.ChainDB).MustOpen())
backend := &Ethereum{
sentryCtx: ctx,
sentryCancel: ctxCancel,
Expand All @@ -210,11 +236,18 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere
waitForStageLoopStop: make(chan struct{}),
waitForMiningStop: make(chan struct{}),
notifications: &stagedsync.Notifications{
Events: privateapi.NewEvents(),
Accumulator: shards.NewAccumulator(chainConfig),
StateChangesConsumer: kvRPC,
Events: privateapi.NewEvents(),
Accumulator: shards.NewAccumulator(chainConfig),
},
}
blockReader, allSnapshots, err := backend.setUpBlockReader(ctx, config.Snapshot.Enabled, config)
if err != nil {
return nil, err
}

kvRPC := remotedbserver.NewKvServer(ctx, chainKv, allSnapshots)
backend.notifications.StateChangesConsumer = kvRPC

backend.gasPrice, _ = uint256.FromBig(config.Miner.GasPrice)

var sentries []direct.SentryClient
Expand Down Expand Up @@ -285,38 +318,6 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere
}

log.Info("Initialising Ethereum protocol", "network", config.NetworkID)
log.Info("Using snapshots", "on", config.Snapshot.Enabled)

if err := chainKv.Update(context.Background(), func(tx kv.RwTx) error {
if err = stagedsync.UpdateMetrics(tx); err != nil {
return err
}

config.Prune, err = prune.EnsureNotChanged(tx, config.Prune)
if err != nil {
return err
}
isCorrectSync, useSnapshots, err := snap.EnsureNotChanged(tx, config.Snapshot)
if err != nil {
return err
}
// if we are in the incorrect syncmode then we change it to the appropriate one
if !isCorrectSync {
log.Warn("Incorrect snapshot enablement", "got", config.Sync.UseSnapshots, "change_to", useSnapshots)
config.Sync.UseSnapshots = useSnapshots
config.Snapshot.Enabled = ethconfig.UseSnapshotsByChainName(chainConfig.ChainName) && useSnapshots
}
log.Info("Effective", "prune_flags", config.Prune.String(), "snapshot_flags", config.Snapshot.String())

return nil
}); err != nil {
return nil, err
}

blockReader, allSnapshots, err := backend.setUpBlockReader(ctx, config.Snapshot.Enabled, config)
if err != nil {
return nil, err
}
backend.engine = ethconsensusconfig.CreateConsensusEngine(chainConfig, logger, consensusConfig, config.Miner.Notify, config.Miner.Noverify, config.HeimdallURL, config.WithoutHeimdall, stack.DataDir(), allSnapshots)

backend.sentriesClient, err = sentry.NewMultiClient(
Expand Down Expand Up @@ -548,7 +549,7 @@ func New(stack *node.Node, config *ethconfig.Config, logger log.Logger) (*Ethere
httpRpcCfg := stack.Config().Http
if httpRpcCfg.Enabled {
ethRpcClient, txPoolRpcClient, miningRpcClient, starkNetRpcClient, stateCache, ff, err := cli.EmbeddedServices(
ctx, chainKv, httpRpcCfg.StateCache, blockReader,
ctx, chainKv, httpRpcCfg.StateCache, blockReader, allSnapshots,
ethBackendRPC,
backend.txPool2GrpcServer,
miningRPC,
Expand Down
1 change: 1 addition & 0 deletions turbo/snapshotsync/block_snapshots.go
Original file line number Diff line number Diff line change
Expand Up @@ -302,6 +302,7 @@ func NewRoSnapshots(cfg ethconfig.Snapshot, snapDir string) *RoSnapshots {
return &RoSnapshots{dir: snapDir, cfg: cfg, Headers: &headerSegments{}, Bodies: &bodySegments{}, Txs: &txnSegments{}}
}

func (s *RoSnapshots) Files() []string { return []string{} }
func (s *RoSnapshots) Cfg() ethconfig.Snapshot { return s.cfg }
func (s *RoSnapshots) Dir() string { return s.dir }
func (s *RoSnapshots) SegmentsReady() bool { return s.segmentsReady.Load() }
Expand Down
2 changes: 1 addition & 1 deletion turbo/stages/mock_sentry.go
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ func MockWithEverything(t *testing.T, gspec *core.Genesis, key *ecdsa.PrivateKey
db := memdb.New()
ctx, ctxCancel := context.WithCancel(context.Background())

erigonGrpcServeer := remotedbserver.NewKvServer(ctx, db)
erigonGrpcServeer := remotedbserver.NewKvServer(ctx, db, nil)
mock := &MockSentry{
Ctx: ctx, cancel: ctxCancel, DB: db,
t: t,
Expand Down