Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ForkID logic for an old batch and fix error handling in execute batch #1803

Merged
merged 8 commits into from
Mar 16, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions cmd/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ func start(cliCtx *cli.Context) error {
log.Infof("Chain ID read from POE SC = %v", l2ChainID)

ctx := context.Background()
st := newState(ctx, c, l2ChainID, currentForkID, forkIDIntervals, stateSqlDB)
st := newState(ctx, c, l2ChainID, forkIDIntervals, stateSqlDB)

ethTxManagerStorage, err := ethtxmanager.NewPostgresStorage(c.StateDB)
if err != nil {
Expand Down Expand Up @@ -274,7 +274,7 @@ func waitSignal(cancelFuncs []context.CancelFunc) {
}
}

func newState(ctx context.Context, c *config.Config, l2ChainID uint64, currentForkID uint64, forkIDIntervals []state.ForkIDInterval, sqlDB *pgxpool.Pool) *state.State {
func newState(ctx context.Context, c *config.Config, l2ChainID uint64, forkIDIntervals []state.ForkIDInterval, sqlDB *pgxpool.Pool) *state.State {
stateDb := state.NewPostgresStorage(sqlDB)
executorClient, _, _ := executor.NewExecutorClient(ctx, c.Executor)
stateDBClient, _, _ := merkletree.NewMTDBServiceClient(ctx, c.MTClient)
Expand Down
48 changes: 42 additions & 6 deletions etherman/etherman.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,8 @@ const (
TrustedVerifyBatchOrder EventOrder = "TrustedVerifyBatch"
// SequenceForceBatchesOrder identifies a SequenceForceBatches event
SequenceForceBatchesOrder EventOrder = "SequenceForceBatches"
// ForkIDsOrder identifies an updateZkevmVersion event
ForkIDsOrder EventOrder = "forkIDs"
)

type ethereumClient interface {
Expand Down Expand Up @@ -333,30 +335,64 @@ func (etherMan *Client) processEvent(ctx context.Context, vLog types.Log, blocks
log.Debug("EmergencyStateDeactivated event detected")
return nil
case updateZkEVMVersionSignatureHash:
log.Debug("UpdateZkEVMVersion event detected")
return nil
return etherMan.updateZkevmVersion(ctx, vLog, blocks, blocksOrder)
}
log.Warn("Event not registered: ", vLog)
return nil
}

func (etherMan *Client) updateZkevmVersion(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error {
log.Debug("UpdateZkEVMVersion event detected")
zkevmVersion, err := etherMan.PoE.ParseUpdateZkEVMVersion(vLog)
if err != nil {
log.Error("error parsing UpdateZkEVMVersion event. Error: ", err)
return err
}
fork := ForkID{
BatchNumber: zkevmVersion.NumBatch,
ForkID: zkevmVersion.ForkID,
Version: zkevmVersion.Version,
}
if len(*blocks) == 0 || ((*blocks)[len(*blocks)-1].BlockHash != vLog.BlockHash || (*blocks)[len(*blocks)-1].BlockNumber != vLog.BlockNumber) {
fullBlock, err := etherMan.EthClient.BlockByHash(ctx, vLog.BlockHash)
if err != nil {
return fmt.Errorf("error getting hashParent. BlockNumber: %d. Error: %w", vLog.BlockNumber, err)
}
t := time.Unix(int64(fullBlock.Time()), 0)
block := prepareBlock(vLog, t, fullBlock)
block.ForkIDs = append(block.ForkIDs, fork)
*blocks = append(*blocks, block)
} else if (*blocks)[len(*blocks)-1].BlockHash == vLog.BlockHash && (*blocks)[len(*blocks)-1].BlockNumber == vLog.BlockNumber {
(*blocks)[len(*blocks)-1].ForkIDs = append((*blocks)[len(*blocks)-1].ForkIDs, fork)
} else {
log.Error("Error processing updateZkevmVersion event. BlockHash:", vLog.BlockHash, ". BlockNumber: ", vLog.BlockNumber)
return fmt.Errorf("error processing updateZkevmVersion event")
}
or := Order{
Name: ForkIDsOrder,
Pos: len((*blocks)[len(*blocks)-1].ForkIDs) - 1,
}
(*blocksOrder)[(*blocks)[len(*blocks)-1].BlockHash] = append((*blocksOrder)[(*blocks)[len(*blocks)-1].BlockHash], or)
return nil
}

func (etherMan *Client) updateGlobalExitRootEvent(ctx context.Context, vLog types.Log, blocks *[]Block, blocksOrder *map[common.Hash][]Order) error {
log.Debug("UpdateGlobalExitRoot event detected")
globalExitRoot, err := etherMan.GlobalExitRootManager.ParseUpdateGlobalExitRoot(vLog)
if err != nil {
return err
}
fullBlock, err := etherMan.EthClient.BlockByHash(ctx, vLog.BlockHash)
if err != nil {
return fmt.Errorf("error getting hashParent. BlockNumber: %d. Error: %w", vLog.BlockNumber, err)
}
var gExitRoot GlobalExitRoot
gExitRoot.MainnetExitRoot = common.BytesToHash(globalExitRoot.MainnetExitRoot[:])
gExitRoot.RollupExitRoot = common.BytesToHash(globalExitRoot.RollupExitRoot[:])
gExitRoot.BlockNumber = vLog.BlockNumber
gExitRoot.GlobalExitRoot = hash(globalExitRoot.MainnetExitRoot, globalExitRoot.RollupExitRoot)

if len(*blocks) == 0 || ((*blocks)[len(*blocks)-1].BlockHash != vLog.BlockHash || (*blocks)[len(*blocks)-1].BlockNumber != vLog.BlockNumber) {
fullBlock, err := etherMan.EthClient.BlockByHash(ctx, vLog.BlockHash)
if err != nil {
return fmt.Errorf("error getting hashParent. BlockNumber: %d. Error: %w", vLog.BlockNumber, err)
}
t := time.Unix(int64(fullBlock.Time()), 0)
block := prepareBlock(vLog, t, fullBlock)
block.GlobalExitRoots = append(block.GlobalExitRoots, gExitRoot)
Expand Down
110 changes: 65 additions & 45 deletions etherman/etherman_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,10 +75,10 @@ func TestGEREvent(t *testing.T) {
finalBlockNumber := finalBlock.NumberU64()
blocks, _, err := etherman.GetRollupInfoByBlockRange(ctx, initBlock.NumberU64(), &finalBlockNumber)
require.NoError(t, err)

assert.Equal(t, uint64(2), blocks[0].GlobalExitRoots[0].BlockNumber)
assert.NotEqual(t, common.Hash{}, blocks[0].GlobalExitRoots[0].MainnetExitRoot)
assert.Equal(t, common.Hash{}, blocks[0].GlobalExitRoots[0].RollupExitRoot)
t.Log("Blocks: ", blocks)
assert.Equal(t, uint64(2), blocks[1].GlobalExitRoots[0].BlockNumber)
assert.NotEqual(t, common.Hash{}, blocks[1].GlobalExitRoots[0].MainnetExitRoot)
assert.Equal(t, common.Hash{}, blocks[1].GlobalExitRoots[0].RollupExitRoot)
}

func TestForcedBatchEvent(t *testing.T) {
Expand Down Expand Up @@ -107,13 +107,14 @@ func TestForcedBatchEvent(t *testing.T) {
finalBlockNumber := finalBlock.NumberU64()
blocks, _, err := etherman.GetRollupInfoByBlockRange(ctx, initBlock.NumberU64(), &finalBlockNumber)
require.NoError(t, err)
assert.Equal(t, uint64(2), blocks[0].BlockNumber)
assert.Equal(t, uint64(2), blocks[0].ForcedBatches[0].BlockNumber)
assert.NotEqual(t, common.Hash{}, blocks[0].ForcedBatches[0].GlobalExitRoot)
assert.NotEqual(t, time.Time{}, blocks[0].ForcedBatches[0].ForcedAt)
assert.Equal(t, uint64(1), blocks[0].ForcedBatches[0].ForcedBatchNumber)
assert.Equal(t, rawTxs, hex.EncodeToString(blocks[0].ForcedBatches[0].RawTxsData))
assert.Equal(t, auth.From, blocks[0].ForcedBatches[0].Sequencer)
t.Log("Blocks: ", blocks)
assert.Equal(t, uint64(2), blocks[1].BlockNumber)
assert.Equal(t, uint64(2), blocks[1].ForcedBatches[0].BlockNumber)
assert.NotEqual(t, common.Hash{}, blocks[1].ForcedBatches[0].GlobalExitRoot)
assert.NotEqual(t, time.Time{}, blocks[1].ForcedBatches[0].ForcedAt)
assert.Equal(t, uint64(1), blocks[1].ForcedBatches[0].ForcedBatchNumber)
assert.Equal(t, rawTxs, hex.EncodeToString(blocks[1].ForcedBatches[0].RawTxsData))
assert.Equal(t, auth.From, blocks[1].ForcedBatches[0].Sequencer)
}

func TestSequencedBatchesEvent(t *testing.T) {
Expand Down Expand Up @@ -152,11 +153,12 @@ func TestSequencedBatchesEvent(t *testing.T) {
currentBlockNumber := currentBlock.NumberU64()
blocks, _, err := etherman.GetRollupInfoByBlockRange(ctx, initBlock.NumberU64(), &currentBlockNumber)
require.NoError(t, err)
t.Log("Blocks: ", blocks)
var sequences []polygonzkevm.PolygonZkEVMBatchData
sequences = append(sequences, polygonzkevm.PolygonZkEVMBatchData{
GlobalExitRoot: ger,
Timestamp: currentBlock.Time(),
MinForcedTimestamp: uint64(blocks[1].ForcedBatches[0].ForcedAt.Unix()),
MinForcedTimestamp: uint64(blocks[2].ForcedBatches[0].ForcedAt.Unix()),
Transactions: common.Hex2Bytes(rawTxs),
})
sequences = append(sequences, polygonzkevm.PolygonZkEVMBatchData{
Expand All @@ -177,15 +179,16 @@ func TestSequencedBatchesEvent(t *testing.T) {
finalBlockNumber := finalBlock.NumberU64()
blocks, order, err := etherman.GetRollupInfoByBlockRange(ctx, initBlock.NumberU64(), &finalBlockNumber)
require.NoError(t, err)
assert.Equal(t, 3, len(blocks))
assert.Equal(t, 1, len(blocks[2].SequencedBatches))
assert.Equal(t, common.Hex2Bytes(rawTxs), blocks[2].SequencedBatches[0][1].Transactions)
assert.Equal(t, currentBlock.Time(), blocks[2].SequencedBatches[0][0].Timestamp)
assert.Equal(t, ger, blocks[2].SequencedBatches[0][0].GlobalExitRoot)
assert.Equal(t, auth.From, blocks[2].SequencedBatches[0][0].Coinbase)
assert.Equal(t, auth.From, blocks[2].SequencedBatches[0][0].SequencerAddr)
assert.Equal(t, currentBlock.Time(), blocks[2].SequencedBatches[0][0].MinForcedTimestamp)
assert.Equal(t, 0, order[blocks[2].BlockHash][0].Pos)
t.Log("Blocks: ", blocks)
assert.Equal(t, 4, len(blocks))
assert.Equal(t, 1, len(blocks[3].SequencedBatches))
assert.Equal(t, common.Hex2Bytes(rawTxs), blocks[3].SequencedBatches[0][1].Transactions)
assert.Equal(t, currentBlock.Time(), blocks[3].SequencedBatches[0][0].Timestamp)
assert.Equal(t, ger, blocks[3].SequencedBatches[0][0].GlobalExitRoot)
assert.Equal(t, auth.From, blocks[3].SequencedBatches[0][0].Coinbase)
assert.Equal(t, auth.From, blocks[3].SequencedBatches[0][0].SequencerAddr)
assert.Equal(t, currentBlock.Time(), blocks[3].SequencedBatches[0][0].MinForcedTimestamp)
assert.Equal(t, 0, order[blocks[3].BlockHash][0].Pos)
}

func TestVerifyBatchEvent(t *testing.T) {
Expand Down Expand Up @@ -223,15 +226,15 @@ func TestVerifyBatchEvent(t *testing.T) {
finalBlockNumber := finalBlock.NumberU64()
blocks, order, err := etherman.GetRollupInfoByBlockRange(ctx, initBlock.NumberU64(), &finalBlockNumber)
require.NoError(t, err)

assert.Equal(t, uint64(3), blocks[1].BlockNumber)
assert.Equal(t, uint64(1), blocks[1].VerifiedBatches[0].BatchNumber)
assert.NotEqual(t, common.Address{}, blocks[1].VerifiedBatches[0].Aggregator)
assert.NotEqual(t, common.Hash{}, blocks[1].VerifiedBatches[0].TxHash)
assert.Equal(t, GlobalExitRootsOrder, order[blocks[1].BlockHash][0].Name)
assert.Equal(t, TrustedVerifyBatchOrder, order[blocks[1].BlockHash][1].Name)
assert.Equal(t, 0, order[blocks[1].BlockHash][0].Pos)
assert.Equal(t, 0, order[blocks[1].BlockHash][1].Pos)
t.Log("Blocks: ", blocks)
assert.Equal(t, uint64(3), blocks[2].BlockNumber)
assert.Equal(t, uint64(1), blocks[2].VerifiedBatches[0].BatchNumber)
assert.NotEqual(t, common.Address{}, blocks[2].VerifiedBatches[0].Aggregator)
assert.NotEqual(t, common.Hash{}, blocks[2].VerifiedBatches[0].TxHash)
assert.Equal(t, GlobalExitRootsOrder, order[blocks[2].BlockHash][0].Name)
assert.Equal(t, TrustedVerifyBatchOrder, order[blocks[2].BlockHash][1].Name)
assert.Equal(t, 0, order[blocks[2].BlockHash][0].Pos)
assert.Equal(t, 0, order[blocks[2].BlockHash][1].Pos)
}

func TestSequenceForceBatchesEvent(t *testing.T) {
Expand Down Expand Up @@ -262,11 +265,12 @@ func TestSequenceForceBatchesEvent(t *testing.T) {
finalBlockNumber := finalBlock.NumberU64()
blocks, _, err := etherman.GetRollupInfoByBlockRange(ctx, initBlock.NumberU64(), &finalBlockNumber)
require.NoError(t, err)
t.Log("Blocks: ", blocks)

forceBatchData := polygonzkevm.PolygonZkEVMForcedBatchData{
Transactions: blocks[0].ForcedBatches[0].RawTxsData,
GlobalExitRoot: blocks[0].ForcedBatches[0].GlobalExitRoot,
MinForcedTimestamp: uint64(blocks[0].ForcedBatches[0].ForcedAt.Unix()),
Transactions: blocks[1].ForcedBatches[0].RawTxsData,
GlobalExitRoot: blocks[1].ForcedBatches[0].GlobalExitRoot,
MinForcedTimestamp: uint64(blocks[1].ForcedBatches[0].ForcedAt.Unix()),
}
_, err = etherman.PoE.SequenceForceBatches(auth, []polygonzkevm.PolygonZkEVMForcedBatchData{forceBatchData})
require.NoError(t, err)
Expand All @@ -278,10 +282,11 @@ func TestSequenceForceBatchesEvent(t *testing.T) {
finalBlockNumber = finalBlock.NumberU64()
blocks, order, err := etherman.GetRollupInfoByBlockRange(ctx, initBlock.NumberU64(), &finalBlockNumber)
require.NoError(t, err)
assert.Equal(t, uint64(4), blocks[1].BlockNumber)
assert.Equal(t, uint64(1), blocks[1].SequencedForceBatches[0][0].BatchNumber)
assert.Equal(t, uint64(20), blocks[1].SequencedForceBatches[0][0].MinForcedTimestamp)
assert.Equal(t, 0, order[blocks[1].BlockHash][0].Pos)
t.Log("Blocks: ", blocks)
assert.Equal(t, uint64(4), blocks[2].BlockNumber)
assert.Equal(t, uint64(1), blocks[2].SequencedForceBatches[0][0].BatchNumber)
assert.Equal(t, uint64(20), blocks[2].SequencedForceBatches[0][0].MinForcedTimestamp)
assert.Equal(t, 0, order[blocks[2].BlockHash][0].Pos)
}

func TestSendSequences(t *testing.T) {
Expand Down Expand Up @@ -326,14 +331,15 @@ func TestSendSequences(t *testing.T) {
finalBlockNumber := finalBlock.NumberU64()
blocks, order, err := etherman.GetRollupInfoByBlockRange(ctx, initBlock.NumberU64(), &finalBlockNumber)
require.NoError(t, err)
assert.Equal(t, 2, len(blocks))
assert.Equal(t, 1, len(blocks[1].SequencedBatches))
assert.Equal(t, currentBlock.Time()-1, blocks[1].SequencedBatches[0][0].Timestamp)
assert.Equal(t, ger, blocks[1].SequencedBatches[0][0].GlobalExitRoot)
assert.Equal(t, auth.From, blocks[1].SequencedBatches[0][0].Coinbase)
assert.Equal(t, auth.From, blocks[1].SequencedBatches[0][0].SequencerAddr)
assert.Equal(t, uint64(0), blocks[1].SequencedBatches[0][0].MinForcedTimestamp)
assert.Equal(t, 0, order[blocks[1].BlockHash][0].Pos)
t.Log("Blocks: ", blocks)
assert.Equal(t, 3, len(blocks))
assert.Equal(t, 1, len(blocks[2].SequencedBatches))
assert.Equal(t, currentBlock.Time()-1, blocks[2].SequencedBatches[0][0].Timestamp)
assert.Equal(t, ger, blocks[2].SequencedBatches[0][0].GlobalExitRoot)
assert.Equal(t, auth.From, blocks[2].SequencedBatches[0][0].Coinbase)
assert.Equal(t, auth.From, blocks[2].SequencedBatches[0][0].SequencerAddr)
assert.Equal(t, uint64(0), blocks[2].SequencedBatches[0][0].MinForcedTimestamp)
assert.Equal(t, 0, order[blocks[2].BlockHash][0].Pos)
}

func TestGasPrice(t *testing.T) {
Expand Down Expand Up @@ -399,4 +405,18 @@ func TestGetForks(t *testing.T) {
assert.Equal(t, uint64(0), forks[0].FromBatchNumber)
assert.Equal(t, uint64(math.MaxUint64), forks[0].ToBatchNumber)
assert.Equal(t, "v1", forks[0].Version)
// Now read the event
finalBlock, err := etherman.EthClient.BlockByNumber(ctx, nil)
require.NoError(t, err)
finalBlockNumber := finalBlock.NumberU64()
blocks, order, err := etherman.GetRollupInfoByBlockRange(ctx, 0, &finalBlockNumber)
require.NoError(t, err)
t.Logf("Blocks: %+v", blocks)
assert.Equal(t, 1, len(blocks))
assert.Equal(t, 1, len(blocks[0].ForkIDs))
assert.Equal(t, 0, order[blocks[0].BlockHash][0].Pos)
assert.Equal(t, ForkIDsOrder, order[blocks[0].BlockHash][0].Name)
assert.Equal(t, uint64(0), blocks[0].ForkIDs[0].BatchNumber)
assert.Equal(t, uint64(1), blocks[0].ForkIDs[0].ForkID)
assert.Equal(t, "v1", blocks[0].ForkIDs[0].Version)
}
8 changes: 8 additions & 0 deletions etherman/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ type Block struct {
SequencedBatches [][]SequencedBatch
VerifiedBatches []VerifiedBatch
SequencedForceBatches [][]SequencedForceBatch
ForkIDs []ForkID
ReceivedAt time.Time
}

Expand Down Expand Up @@ -66,3 +67,10 @@ type SequencedForceBatch struct {
Nonce uint64
polygonzkevm.PolygonZkEVMForcedBatchData
}

// ForkID is a sturct to track the ForkID event.
type ForkID struct {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What do you think about merging this with state/forkid.go ? Maybe moving everything to the state or the etherman. (IMO this is more related to state)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see your point, but the problem is that the state.ForkID struct doesn't fit very well with the needs of the event. The synchronizer translates the info received from L1 to the infor required by the state. We are doing the same thing with other structs such as etherman.SequencedBatch and state.Batch and state.VirtualBatch

BatchNumber uint64
ForkID uint64
Version string
}
47 changes: 44 additions & 3 deletions state/pgstatestorage.go
Original file line number Diff line number Diff line change
Expand Up @@ -105,15 +105,43 @@ func (p *PostgresStorage) Reset(ctx context.Context, blockNumber uint64, dbTx pg
return nil
}

// ResetForkID resets the state to reprocess the newer batches with the correct forkID
func (p *PostgresStorage) ResetForkID(ctx context.Context, batchNumber, forkID uint64, version string, dbTx pgx.Tx) error {
e := p.getExecQuerier(dbTx)
const resetVirtualStateSQL = "delete from state.block where block_num >=(select min(block_num) from state.virtual_batch where batch_num >= $1)"
if _, err := e.Exec(ctx, resetVirtualStateSQL, batchNumber); err != nil {
return err
}
err := p.ResetTrustedState(ctx, batchNumber-1, dbTx)
if err != nil {
return err
}
reorg := TrustedReorg{
BatchNumber: batchNumber,
Reason: fmt.Sprintf("New ForkID: %d. Version: %s", forkID, version),
}
err = p.AddTrustedReorg(ctx, &reorg, dbTx)
if err != nil {
return err
}

// Delete proofs for higher batches
const deleteProofsSQL = "delete from state.proof where batch_num >= $1 or (batch_num <= $1 and batch_num_final >= $1)"
if _, err := e.Exec(ctx, deleteProofsSQL, batchNumber); err != nil {
return err
}

return nil
}

// ResetTrustedState removes the batches with number greater than the given one
// from the database.
func (p *PostgresStorage) ResetTrustedState(ctx context.Context, batchNum uint64, dbTx pgx.Tx) error {
func (p *PostgresStorage) ResetTrustedState(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) error {
const resetTrustedStateSQL = "DELETE FROM state.batch WHERE batch_num > $1"
e := p.getExecQuerier(dbTx)
if _, err := e.Exec(ctx, resetTrustedStateSQL, batchNum); err != nil {
if _, err := e.Exec(ctx, resetTrustedStateSQL, batchNumber); err != nil {
return err
}
// TODO Find a way to put txs in the pool again
return nil
}

Expand Down Expand Up @@ -2239,6 +2267,19 @@ func (p *PostgresStorage) CountReorgs(ctx context.Context, dbTx pgx.Tx) (uint64,
return count, nil
}

// GetForkIDTrustedReorgCount returns the forkID
func (p *PostgresStorage) GetForkIDTrustedReorgCount(ctx context.Context, forkID uint64, version string, dbTx pgx.Tx) (uint64, error) {
const forkIDTrustedReorgSQL = "SELECT COUNT(*) FROM state.trusted_reorg WHERE reason=$1"

var count uint64
q := p.getExecQuerier(dbTx)
err := q.QueryRow(ctx, forkIDTrustedReorgSQL, fmt.Sprintf("New ForkID: %d. Version: %s", forkID, version)).Scan(&count)
if err != nil {
return 0, err
}
return count, nil
}

// GetReorgedTransactions returns the transactions that were reorged
func (p *PostgresStorage) GetReorgedTransactions(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) ([]*types.Transaction, error) {
const getReorgedTransactionsSql = "SELECT encoded FROM state.transaction t INNER JOIN state.l2block b ON t.l2_block_num = b.block_num WHERE b.batch_num >= $1 ORDER BY l2_block_num ASC"
Expand Down
15 changes: 11 additions & 4 deletions state/state.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,12 @@ func (s *State) PrepareWebSocket() {
go s.handleEvents()
}

// UpdateForkIDIntervals updates the forkID intervals
func (s *State) UpdateForkIDIntervals(intervals []ForkIDInterval) {
log.Infof("Updating forkIDs. Setting %d forkIDs", len(intervals))
s.cfg.ForkIDIntervals = intervals
}

// BeginStateTransaction starts a state transaction
func (s *State) BeginStateTransaction(ctx context.Context) (pgx.Tx, error) {
tx, err := s.Begin(ctx)
Expand Down Expand Up @@ -559,10 +565,11 @@ func (s *State) ExecuteBatch(ctx context.Context, batch Batch, updateMerkleTree

processBatchResponse, err := s.executorClient.ProcessBatch(ctx, processBatchRequest)
if err != nil {
if processBatchResponse.Error != executor.EXECUTOR_ERROR_NO_ERROR {
err = executor.ExecutorErr(processBatchResponse.Error)
s.LogExecutorError(processBatchResponse.Error, processBatchRequest)
}
log.Error("error executing batch: ", err)
return nil, err
} else if processBatchResponse != nil && processBatchResponse.Error != executor.EXECUTOR_ERROR_NO_ERROR {
err = executor.ExecutorErr(processBatchResponse.Error)
s.LogExecutorError(processBatchResponse.Error, processBatchRequest)
}

return processBatchResponse, err
Expand Down
Loading