Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion common/database/db.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ func (g *gormLogger) Error(_ context.Context, msg string, data ...interface{}) {
func (g *gormLogger) Trace(_ context.Context, begin time.Time, fc func() (string, int64), err error) {
elapsed := time.Since(begin)
sql, rowsAffected := fc()
g.gethLogger.Debug("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
g.gethLogger.Trace("gorm", "line", utils.FileWithLineNum(), "cost", elapsed, "sql", sql, "rowsAffected", rowsAffected, "err", err)
}

// InitDB init the db handler
Expand Down
2 changes: 1 addition & 1 deletion common/version/version.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import (
"runtime/debug"
)

var tag = "v4.5.15"
var tag = "v4.5.16"

var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
Expand Down
28 changes: 22 additions & 6 deletions rollup/internal/controller/relayer/l2_relayer.go
Original file line number Diff line number Diff line change
Expand Up @@ -326,16 +326,24 @@ func (r *Layer2Relayer) ProcessPendingBatches() {

// if backlog outgrow max size, force‐submit enough oldest batches
backlogCount, err := r.batchOrm.GetFailedAndPendingBatchesCount(r.ctx)
r.metrics.rollupL2RelayerBacklogCounts.Set(float64(backlogCount))

if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
}

var forceSubmit bool

oldestBatchTimestamp := dbBatches[0].CreatedAt
startChunk, err := r.chunkOrm.GetChunkByIndex(r.ctx, dbBatches[0].StartChunkIndex)
oldestBlockTimestamp := time.Unix(int64(startChunk.StartBlockTime), 0)
if err != nil {
log.Error("failed to get first chunk", "err", err, "batch index", dbBatches[0].Index, "chunk index", dbBatches[0].StartChunkIndex)
return
}

// if the batch with the oldest index is too old, we force submit all batches that we have so far in the next step
if r.cfg.BatchSubmission.TimeoutSec > 0 && time.Since(oldestBatchTimestamp) > time.Duration(r.cfg.BatchSubmission.TimeoutSec)*time.Second {
if r.cfg.BatchSubmission.TimeoutSec > 0 && time.Since(oldestBlockTimestamp) > time.Duration(r.cfg.BatchSubmission.TimeoutSec)*time.Second {
forceSubmit = true
}

Expand All @@ -346,10 +354,12 @@ func (r *Layer2Relayer) ProcessPendingBatches() {

if !forceSubmit {
// check if we should skip submitting the batch based on the fee target
skip, err := r.skipSubmitByFee(oldestBatchTimestamp)
skip, err := r.skipSubmitByFee(oldestBlockTimestamp, r.metrics)
// return if not hitting target price
if skip {
log.Debug("Skipping batch submission", "reason", err)
log.Debug("Skipping batch submission", "first batch index", dbBatches[0].Index, "backlog count", backlogCount, "reason", err)
log.Debug("first batch index", dbBatches[0].Index)
log.Debug("backlog count", backlogCount)
return
}
if err != nil {
Expand Down Expand Up @@ -432,7 +442,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
}

if forceSubmit {
log.Info("Forcing submission of batches due to timeout", "batch index", batchesToSubmit[0].Batch.Index, "created at", batchesToSubmit[0].Batch.CreatedAt)
log.Info("Forcing submission of batches due to timeout", "batch index", batchesToSubmit[0].Batch.Index, "first block created at", oldestBlockTimestamp)
}

// We have at least 1 batch to commit
Expand Down Expand Up @@ -497,6 +507,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
r.metrics.rollupL2RelayerCommitThroughput.Add(float64(totalGasUsed))
r.metrics.rollupL2RelayerProcessPendingBatchSuccessTotal.Add(float64(len(batchesToSubmit)))
r.metrics.rollupL2RelayerProcessBatchesPerTxCount.Set(float64(len(batchesToSubmit)))
r.metrics.rollupL2RelayerCommitLatency.Set(time.Since(oldestBlockTimestamp).Seconds())

log.Info("Sent the commitBatches tx to layer1", "batches count", len(batchesToSubmit), "start index", firstBatch.Index, "start hash", firstBatch.Hash, "end index", lastBatch.Index, "end hash", lastBatch.Hash, "tx hash", txHash.String())
}
Expand Down Expand Up @@ -1079,7 +1090,7 @@ func calculateTargetPrice(windowSec uint64, strategy StrategyParams, firstTime t
// skipSubmitByFee returns (true, nil) when submission should be skipped right now
// because the blob‐fee is above target and the timeout window hasn’t yet elapsed.
// Otherwise returns (false, err)
func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time) (bool, error) {
func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time, metrics *l2RelayerMetrics) (bool, error) {
windowSec := uint64(r.cfg.BatchSubmission.TimeoutSec)

hist, err := r.fetchBlobFeeHistory(windowSec)
Expand All @@ -1094,6 +1105,11 @@ func (r *Layer2Relayer) skipSubmitByFee(oldest time.Time) (bool, error) {
target := calculateTargetPrice(windowSec, r.batchStrategy, oldest, hist)
current := hist[len(hist)-1]

currentFloat, _ := current.Float64()
targetFloat, _ := target.Float64()
metrics.rollupL2RelayerCurrentBlobPrice.Set(currentFloat)
metrics.rollupL2RelayerTargetBlobPrice.Set(targetFloat)

// if current fee > target and still inside the timeout window, skip
if current.Cmp(target) > 0 && time.Since(oldest) < time.Duration(windowSec)*time.Second {
return true, fmt.Errorf(
Expand Down
21 changes: 21 additions & 0 deletions rollup/internal/controller/relayer/l2_relayer_metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,11 @@ type l2RelayerMetrics struct {

rollupL2RelayerCommitBlockHeight prometheus.Gauge
rollupL2RelayerCommitThroughput prometheus.Counter

rollupL2RelayerCurrentBlobPrice prometheus.Gauge
rollupL2RelayerTargetBlobPrice prometheus.Gauge
rollupL2RelayerCommitLatency prometheus.Gauge
rollupL2RelayerBacklogCounts prometheus.Gauge
}

var (
Expand Down Expand Up @@ -104,6 +109,22 @@ func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
Name: "rollup_l2_relayer_commit_throughput",
Help: "The cumulative gas used in blocks committed by the L2 relayer",
}),
rollupL2RelayerTargetBlobPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_relayer_target_blob_price",
Help: "The target blob price for the L2 relayer's submission strategy",
}),
rollupL2RelayerCurrentBlobPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_relayer_current_blob_price",
Help: "The current blob price",
}),
rollupL2RelayerCommitLatency: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_relayer_commit_latency",
Help: "The latency of the commit measured from oldest blocktime",
}),
rollupL2RelayerBacklogCounts: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_l2_relayer_backlog_counts",
Help: "The number of pending batches in the backlog",
}),
}
})
return l2RelayerMetric
Expand Down
Loading