diff --git a/Makefile b/Makefile index afced62bf..68cc21ace 100644 --- a/Makefile +++ b/Makefile @@ -129,7 +129,7 @@ actors-gen: .PHONY: tasks-gen tasks-gen: - go run ./chain/indexer/tablegen + go run ./chain/indexer/tasktype/tablegen go fmt ./... .PHONY: itest diff --git a/chain/tipset.go b/chain/cache/tipset.go similarity index 98% rename from chain/tipset.go rename to chain/cache/tipset.go index 9978df829..41ca373f9 100644 --- a/chain/tipset.go +++ b/chain/cache/tipset.go @@ -1,4 +1,4 @@ -package chain +package cache import ( "context" @@ -6,12 +6,9 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/chain/types" - logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" ) -var log = logging.Logger("lily/chain") - var ( ErrCacheEmpty = errors.New("cache empty") ErrAddOutOfOrder = errors.New("added tipset height lower than current head") diff --git a/chain/tipset_test.go b/chain/cache/tipset_test.go similarity index 99% rename from chain/tipset_test.go rename to chain/cache/tipset_test.go index e7223c9df..1441c1c75 100644 --- a/chain/tipset_test.go +++ b/chain/cache/tipset_test.go @@ -1,4 +1,4 @@ -package chain +package cache import ( "context" diff --git a/chain/export/export.go b/chain/export/export.go index 992c2ca23..8967ed85a 100644 --- a/chain/export/export.go +++ b/chain/export/export.go @@ -17,7 +17,7 @@ import ( "gopkg.in/cheggaaa/pb.v1" ) -var log = logging.Logger("lily/chain") +var log = logging.Logger("lily/chain/export") type ChainExporter struct { store blockstore.Blockstore // blockstore chain is exported from diff --git a/chain/fill.go b/chain/fill.go deleted file mode 100644 index bdfac8a33..000000000 --- a/chain/fill.go +++ /dev/null @@ -1,155 +0,0 @@ -package chain - -import ( - "context" - "sort" - "time" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/chain/types" - "github.com/go-pg/pg/v10" - logging "github.com/ipfs/go-log/v2" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lily/chain/datasource" - "github.com/filecoin-project/lily/chain/indexer" - "github.com/filecoin-project/lily/lens" - "github.com/filecoin-project/lily/model/visor" - "github.com/filecoin-project/lily/storage" -) - -var fillLog = logging.Logger("lily/chain/fill") - -type GapFiller struct { - DB *storage.Database - node lens.API - name string - minHeight, maxHeight uint64 - tasks []string - done chan struct{} -} - -func NewGapFiller(node lens.API, db *storage.Database, name string, minHeight, maxHeight uint64, tasks []string) *GapFiller { - return &GapFiller{ - DB: db, - node: node, - name: name, - maxHeight: maxHeight, - minHeight: minHeight, - tasks: tasks, - } -} - -func (g *GapFiller) Run(ctx context.Context) error { - // init the done channel for each run since jobs may be started and stopped. - g.done = make(chan struct{}) - defer close(g.done) - - gaps, heights, err := g.consolidateGaps(ctx) - if err != nil { - return err - } - fillStart := time.Now() - fillLog.Infow("gap fill start", "start", fillStart.String(), "total_epoch_gaps", len(gaps), "from", g.minHeight, "to", g.maxHeight, "task", g.tasks, "reporter", g.name) - - taskAPI, err := datasource.NewDataSource(g.node) - if err != nil { - return err - } - for _, height := range heights { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - runStart := time.Now() - index, err := indexer.NewManager(taskAPI, g.DB, g.name, gaps[height]) - if err != nil { - return err - } - - fillLog.Infow("filling gap", "height", heights, "reporter", g.name) - ts, err := g.node.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(height), types.EmptyTSK) - if err != nil { - return err - } - fillLog.Infof("got tipset for height %d, tipset height %d", heights, ts.Height()) - if success, err := index.TipSet(ctx, ts); err != nil { - fillLog.Errorw("fill indexing encountered fatal error", "height", height, "tipset", ts.Key().String(), "error", err, "tasks", gaps[height], "reporter", g.name) - return err - } else if !success { - fillLog.Errorw("fill indexing failed to successfully index tipset, skipping fill for tipset, gap remains", "height", height, "tipset", ts.Key().String(), "tasks", gaps[height], "reporter", g.name) - continue - } - fillLog.Infow("fill success", "epoch", ts.Height(), "tasks_filled", gaps[height], "duration", time.Since(runStart), "reporter", g.name) - - if err := g.setGapsFilled(ctx, height, gaps[height]...); err != nil { - return err - } - } - fillLog.Infow("gap fill complete", "duration", time.Since(fillStart), "total_epoch_gaps", len(gaps), "from", g.minHeight, "to", g.maxHeight, "task", g.tasks, "reporter", g.name) - - return nil -} - -func (g *GapFiller) Done() <-chan struct{} { - return g.done -} - -// returns a map of heights to missing tasks, and a list of heights to iterate the map in order with. -func (g *GapFiller) consolidateGaps(ctx context.Context) (map[int64][]string, []int64, error) { - gaps, err := g.queryGaps(ctx) - if err != nil { - return nil, nil, err - } - // used to walk gaps in order, should help optimize some caching. - heights := make([]int64, 0, len(gaps)) - out := make(map[int64][]string) - for _, gap := range gaps { - if _, ok := out[gap.Height]; !ok { - heights = append(heights, gap.Height) - } - out[gap.Height] = append(out[gap.Height], gap.Task) - } - sort.Slice(heights, func(i, j int) bool { - return heights[i] < heights[j] - }) - return out, heights, nil -} - -func (g *GapFiller) queryGaps(ctx context.Context) ([]*visor.GapReport, error) { - var out []*visor.GapReport - if len(g.tasks) != 0 { - if err := g.DB.AsORM().ModelContext(ctx, &out). - Order("height desc"). - Where("status = ?", "GAP"). - Where("task = ANY (?)", pg.Array(g.tasks)). - Where("height >= ?", g.minHeight). - Where("height <= ?", g.maxHeight). - Select(); err != nil { - return nil, xerrors.Errorf("querying gap reports: %w", err) - } - } else { - if err := g.DB.AsORM().ModelContext(ctx, &out). - Order("height desc"). - Where("status = ?", "GAP"). - Where("height >= ?", g.minHeight). - Where("height <= ?", g.maxHeight). - Select(); err != nil { - return nil, xerrors.Errorf("querying gap reports: %w", err) - } - } - return out, nil -} - -// mark all gaps at height as filled. -func (g *GapFiller) setGapsFilled(ctx context.Context, height int64, tasks ...string) error { - if _, err := g.DB.AsORM().ModelContext(ctx, &visor.GapReport{}). - Set("status = 'FILLED'"). - Where("height = ?", height). - Where("task = ANY (?)", pg.Array(tasks)). - Update(); err != nil { - return err - } - return nil -} diff --git a/chain/gap/fill.go b/chain/gap/fill.go new file mode 100644 index 000000000..32258e944 --- /dev/null +++ b/chain/gap/fill.go @@ -0,0 +1,98 @@ +package gap + +import ( + "context" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/types" + logging "github.com/ipfs/go-log/v2" + + "github.com/filecoin-project/lily/chain/datasource" + "github.com/filecoin-project/lily/chain/indexer" + "github.com/filecoin-project/lily/chain/indexer/integrated" + "github.com/filecoin-project/lily/lens" + "github.com/filecoin-project/lily/storage" +) + +var log = logging.Logger("lily/chain/gap") + +type Filler struct { + DB *storage.Database + node lens.API + name string + minHeight, maxHeight uint64 + tasks []string + done chan struct{} +} + +func NewFiller(node lens.API, db *storage.Database, name string, minHeight, maxHeight uint64, tasks []string) *Filler { + return &Filler{ + DB: db, + node: node, + name: name, + maxHeight: maxHeight, + minHeight: minHeight, + tasks: tasks, + } +} + +func (g *Filler) Run(ctx context.Context) error { + // init the done channel for each run since jobs may be started and stopped. + g.done = make(chan struct{}) + defer close(g.done) + + gaps, heights, err := g.DB.ConsolidateGaps(ctx, g.minHeight, g.maxHeight, g.tasks...) + if err != nil { + return err + } + fillStart := time.Now() + log.Infow("gap fill start", "start", fillStart.String(), "total_epoch_gaps", len(gaps), "from", g.minHeight, "to", g.maxHeight, "task", g.tasks, "reporter", g.name) + + taskAPI, err := datasource.NewDataSource(g.node) + if err != nil { + return err + } + + index, err := integrated.NewManager(taskAPI, g.DB, g.name) + if err != nil { + return err + } + + for _, height := range heights { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + runStart := time.Now() + + log.Infow("filling gap", "height", heights, "reporter", g.name) + + ts, err := g.node.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(height), types.EmptyTSK) + if err != nil { + return err + } + + log.Infof("got tipset for height %d, tipset height %d", heights, ts.Height()) + if success, err := index.TipSet(ctx, ts, indexer.WithTasks(gaps[height])); err != nil { + log.Errorw("fill indexing encountered fatal error", "height", height, "tipset", ts.Key().String(), "error", err, "tasks", gaps[height], "reporter", g.name) + return err + } else if !success { + log.Errorw("fill indexing failed to successfully index tipset, skipping fill for tipset, gap remains", "height", height, "tipset", ts.Key().String(), "tasks", gaps[height], "reporter", g.name) + continue + } + log.Infow("fill success", "epoch", ts.Height(), "tasks_filled", gaps[height], "duration", time.Since(runStart), "reporter", g.name) + + if err := g.DB.SetGapsFilled(ctx, height, gaps[height]...); err != nil { + return err + } + } + log.Infow("gap fill complete", "duration", time.Since(fillStart), "total_epoch_gaps", len(gaps), "from", g.minHeight, "to", g.maxHeight, "task", g.tasks, "reporter", g.name) + + return nil +} + +func (g *Filler) Done() <-chan struct{} { + return g.done +} diff --git a/chain/find.go b/chain/gap/find.go similarity index 85% rename from chain/find.go rename to chain/gap/find.go index bd2491943..3cbf0a713 100644 --- a/chain/find.go +++ b/chain/gap/find.go @@ -1,4 +1,4 @@ -package chain +package gap import ( "context" @@ -12,7 +12,7 @@ import ( "github.com/filecoin-project/lily/storage" ) -type GapIndexer struct { +type Finder struct { DB *storage.Database node lens.API name string @@ -21,8 +21,8 @@ type GapIndexer struct { done chan struct{} } -func NewGapIndexer(node lens.API, db *storage.Database, name string, minHeight, maxHeight uint64, tasks []string) *GapIndexer { - return &GapIndexer{ +func NewFinder(node lens.API, db *storage.Database, name string, minHeight, maxHeight uint64, tasks []string) *Finder { + return &Finder{ DB: db, node: node, name: name, @@ -38,7 +38,7 @@ type TaskHeight struct { Status string } -func (g *GapIndexer) Find(ctx context.Context) (visor.GapReportList, error) { +func (g *Finder) Find(ctx context.Context) (visor.GapReportList, error) { log.Debug("finding task epoch gaps") start := time.Now() var result []TaskHeight @@ -80,7 +80,7 @@ SELECT * FROM gap_find(?,?,?,?,?); return out, nil } -func (g *GapIndexer) Run(ctx context.Context) error { +func (g *Finder) Run(ctx context.Context) error { // init the done channel for each run since jobs may be started and stopped. g.done = make(chan struct{}) defer close(g.done) @@ -101,6 +101,6 @@ func (g *GapIndexer) Run(ctx context.Context) error { return g.DB.PersistBatch(ctx, gaps) } -func (g *GapIndexer) Done() <-chan struct{} { +func (g *Finder) Done() <-chan struct{} { return g.done } diff --git a/chain/find_test.go b/chain/gap/find_test.go similarity index 76% rename from chain/find_test.go rename to chain/gap/find_test.go index 2106e93fb..e5f0143fa 100644 --- a/chain/find_test.go +++ b/chain/gap/find_test.go @@ -1,4 +1,4 @@ -package chain +package gap import ( "context" @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/filecoin-project/lily/chain/indexer" + "github.com/filecoin-project/lily/chain/indexer/tasktype" "github.com/filecoin-project/lily/model/visor" "github.com/filecoin-project/lily/storage" "github.com/filecoin-project/lily/testutil" @@ -40,27 +40,27 @@ func TestFind(t *testing.T) { truncate(t, db) gapHeight := int64(1) pre := NewPREditor(t, db, t.Name()) - pre.initialize(maxHeight, indexer.AllTableTasks...) + pre.initialize(maxHeight, tasktype.AllTableTasks...) pre.deleteEpochStatus(gapHeight, visor.ProcessingStatusOK) strg, err := storage.NewDatabaseFromDB(ctx, db, "public") require.NoError(t, err, "NewDatabaseFromDB") - actual, err := NewGapIndexer(nil, strg, t.Name(), minHeight, maxHeight, indexer.AllTableTasks).Find(ctx) + actual, err := NewFinder(nil, strg, t.Name(), minHeight, maxHeight, tasktype.AllTableTasks).Find(ctx) require.NoError(t, err) - expected := makeGapReportList(gapHeight, indexer.AllTableTasks...) + expected := makeGapReportList(gapHeight, tasktype.AllTableTasks...) assertGapReportsEqual(t, expected, actual) }) t.Run("gap all tasks at epoch 1 4 5", func(t *testing.T) { truncate(t, db) gapHeights := []int64{1, 4, 5} - gapTasks := indexer.AllTableTasks + gapTasks := tasktype.AllTableTasks pre := NewPREditor(t, db, t.Name()) pre.truncate() - pre.initialize(maxHeight, indexer.AllTableTasks...) + pre.initialize(maxHeight, tasktype.AllTableTasks...) var expected visor.GapReportList for _, height := range gapHeights { @@ -71,7 +71,7 @@ func TestFind(t *testing.T) { strg, err := storage.NewDatabaseFromDB(ctx, db, "public") require.NoError(t, err, "NewDatabaseFromDB") - actual, err := NewGapIndexer(nil, strg, t.Name(), minHeight, maxHeight, gapTasks).Find(ctx) + actual, err := NewFinder(nil, strg, t.Name(), minHeight, maxHeight, gapTasks).Find(ctx) require.NoError(t, err) assertGapReportsEqual(t, expected, actual) @@ -80,16 +80,16 @@ func TestFind(t *testing.T) { t.Run("gap at epoch 2 for miner and init task", func(t *testing.T) { truncate(t, db) gapHeight := int64(2) - gapTasks := []string{indexer.MinerInfo, indexer.IdAddress} + gapTasks := []string{tasktype.MinerInfo, tasktype.IdAddress} pre := NewPREditor(t, db, t.Name()) - pre.initialize(maxHeight, indexer.AllTableTasks...) + pre.initialize(maxHeight, tasktype.AllTableTasks...) pre.deleteEpochStatus(gapHeight, visor.ProcessingStatusOK, WithTasks(gapTasks...)) strg, err := storage.NewDatabaseFromDB(ctx, db, "public") require.NoError(t, err, "NewDatabaseFromDB") - actual, err := NewGapIndexer(nil, strg, t.Name(), minHeight, maxHeight, indexer.AllTableTasks).Find(ctx) + actual, err := NewFinder(nil, strg, t.Name(), minHeight, maxHeight, tasktype.AllTableTasks).Find(ctx) require.NoError(t, err) expected := makeGapReportList(gapHeight, gapTasks...) @@ -99,57 +99,57 @@ func TestFind(t *testing.T) { t.Run("gap at epoch 2 for miner and init task epoch 10 blocks messages market", func(t *testing.T) { truncate(t, db) pre := NewPREditor(t, db, t.Name()) - pre.initialize(maxHeight, indexer.AllTableTasks...) - pre.deleteEpochStatus(2, visor.ProcessingStatusOK, WithTasks(indexer.MinerInfo, indexer.IdAddress)) - pre.deleteEpochStatus(10, visor.ProcessingStatusOK, WithTasks(indexer.BlockHeader, indexer.Message, indexer.MarketDealProposal)) + pre.initialize(maxHeight, tasktype.AllTableTasks...) + pre.deleteEpochStatus(2, visor.ProcessingStatusOK, WithTasks(tasktype.MinerInfo, tasktype.IdAddress)) + pre.deleteEpochStatus(10, visor.ProcessingStatusOK, WithTasks(tasktype.BlockHeader, tasktype.Message, tasktype.MarketDealProposal)) strg, err := storage.NewDatabaseFromDB(ctx, db, "public") require.NoError(t, err, "NewDatabaseFromDB") - actual, err := NewGapIndexer(nil, strg, t.Name(), minHeight, maxHeight, indexer.AllTableTasks).Find(ctx) + actual, err := NewFinder(nil, strg, t.Name(), minHeight, maxHeight, tasktype.AllTableTasks).Find(ctx) require.NoError(t, err) - expected := makeGapReportList(2, indexer.MinerInfo, indexer.IdAddress) - expected = append(expected, makeGapReportList(10, indexer.BlockHeader, indexer.Message, indexer.MarketDealProposal)...) + expected := makeGapReportList(2, tasktype.MinerInfo, tasktype.IdAddress) + expected = append(expected, makeGapReportList(10, tasktype.BlockHeader, tasktype.Message, tasktype.MarketDealProposal)...) assertGapReportsEqual(t, expected, actual) }) t.Run("skip all tasks at epoch 1 and miner task at epoch 5", func(t *testing.T) { truncate(t, db) pre := NewPREditor(t, db, t.Name()) - pre.initialize(maxHeight, indexer.AllTableTasks...) + pre.initialize(maxHeight, tasktype.AllTableTasks...) pre.updateEpochStatus(1, visor.ProcessingStatusSkip) - pre.updateEpochStatus(5, visor.ProcessingStatusSkip, WithTasks(indexer.MinerInfo)) + pre.updateEpochStatus(5, visor.ProcessingStatusSkip, WithTasks(tasktype.MinerInfo)) strg, err := storage.NewDatabaseFromDB(ctx, db, "public") require.NoError(t, err, "NewDatabaseFromDB") - actual, err := NewGapIndexer(nil, strg, t.Name(), minHeight, maxHeight, indexer.AllTableTasks).Find(ctx) + actual, err := NewFinder(nil, strg, t.Name(), minHeight, maxHeight, tasktype.AllTableTasks).Find(ctx) require.NoError(t, err) - expected := makeGapReportList(1, indexer.AllTableTasks...) - expected = append(expected, makeGapReportList(5, indexer.MinerInfo)...) + expected := makeGapReportList(1, tasktype.AllTableTasks...) + expected = append(expected, makeGapReportList(5, tasktype.MinerInfo)...) assertGapReportsEqual(t, expected, actual) }) t.Run("gap at epoch 2 for miner and init task, miner errors in 8, all errors in 9", func(t *testing.T) { truncate(t, db) pre := NewPREditor(t, db, t.Name()) - pre.initialize(maxHeight, indexer.AllTableTasks...) + pre.initialize(maxHeight, tasktype.AllTableTasks...) - pre.deleteEpochStatus(2, visor.ProcessingStatusOK, WithTasks(indexer.MinerInfo, indexer.IdAddress)) - pre.updateEpochStatus(8, visor.ProcessingStatusError, WithTasks(indexer.MinerInfo)) + pre.deleteEpochStatus(2, visor.ProcessingStatusOK, WithTasks(tasktype.MinerInfo, tasktype.IdAddress)) + pre.updateEpochStatus(8, visor.ProcessingStatusError, WithTasks(tasktype.MinerInfo)) pre.updateEpochStatus(9, visor.ProcessingStatusError) strg, err := storage.NewDatabaseFromDB(ctx, db, "public") require.NoError(t, err, "NewDatabaseFromDB") - actual, err := NewGapIndexer(nil, strg, t.Name(), minHeight, maxHeight, indexer.AllTableTasks).Find(ctx) + actual, err := NewFinder(nil, strg, t.Name(), minHeight, maxHeight, tasktype.AllTableTasks).Find(ctx) require.NoError(t, err) - expected := makeGapReportList(2, indexer.MinerInfo, indexer.IdAddress) - expected = append(expected, makeGapReportList(8, indexer.MinerInfo)...) - expected = append(expected, makeGapReportList(9, indexer.AllTableTasks...)...) + expected := makeGapReportList(2, tasktype.MinerInfo, tasktype.IdAddress) + expected = append(expected, makeGapReportList(8, tasktype.MinerInfo)...) + expected = append(expected, makeGapReportList(9, tasktype.AllTableTasks...)...) assertGapReportsEqual(t, expected, actual) }) @@ -158,36 +158,36 @@ func TestFind(t *testing.T) { truncate(t, db) pre1 := NewPREditor(t, db, "reporter1") pre2 := NewPREditor(t, db, "reporter2") - pre1.initialize(maxHeight, indexer.AllTableTasks...) - pre2.initialize(maxHeight, indexer.AllTableTasks...) - pre1.deleteEpochStatus(2, visor.ProcessingStatusOK, WithTasks(indexer.MinerInfo, indexer.IdAddress)) - pre2.deleteEpochStatus(2, visor.ProcessingStatusOK, WithTasks(indexer.MinerInfo, indexer.IdAddress)) + pre1.initialize(maxHeight, tasktype.AllTableTasks...) + pre2.initialize(maxHeight, tasktype.AllTableTasks...) + pre1.deleteEpochStatus(2, visor.ProcessingStatusOK, WithTasks(tasktype.MinerInfo, tasktype.IdAddress)) + pre2.deleteEpochStatus(2, visor.ProcessingStatusOK, WithTasks(tasktype.MinerInfo, tasktype.IdAddress)) strg, err := storage.NewDatabaseFromDB(ctx, db, "public") require.NoError(t, err, "NewDatabaseFromDB") - actual, err := NewGapIndexer(nil, strg, t.Name(), minHeight, maxHeight, indexer.AllTableTasks).Find(ctx) + actual, err := NewFinder(nil, strg, t.Name(), minHeight, maxHeight, tasktype.AllTableTasks).Find(ctx) require.NoError(t, err) - expected := makeGapReportList(2, indexer.MinerInfo, indexer.IdAddress) + expected := makeGapReportList(2, tasktype.MinerInfo, tasktype.IdAddress) assertGapReportsEqual(t, expected, actual) }) t.Run("(sub task indexer, full reports table) gap at epoch 2 for messages and init task", func(t *testing.T) { truncate(t, db) - gapTasks := []string{indexer.Message, indexer.IdAddress} - monitoringTasks := append(gapTasks, []string{indexer.BlockHeader, indexer.ChainEconomics}...) + gapTasks := []string{tasktype.Message, tasktype.IdAddress} + monitoringTasks := append(gapTasks, []string{tasktype.BlockHeader, tasktype.ChainEconomics}...) pre := NewPREditor(t, db, t.Name()) - pre.initialize(maxHeight, indexer.AllTableTasks...) + pre.initialize(maxHeight, tasktype.AllTableTasks...) pre.deleteEpochStatus(2, visor.ProcessingStatusOK, WithTasks(gapTasks...)) strg, err := storage.NewDatabaseFromDB(ctx, db, "public") require.NoError(t, err, "NewDatabaseFromDB") // tasks to find gaps in - actual, err := NewGapIndexer(nil, strg, t.Name(), minHeight, maxHeight, monitoringTasks).Find(ctx) + actual, err := NewFinder(nil, strg, t.Name(), minHeight, maxHeight, monitoringTasks).Find(ctx) require.NoError(t, err) expected := makeGapReportList(2, gapTasks...) @@ -198,8 +198,8 @@ func TestFind(t *testing.T) { truncate(t, db) // tasks to create gaps for - gapTasks := []string{indexer.Message, indexer.IdAddress} - monitoringTasks := append(gapTasks, []string{indexer.BlockHeader, indexer.ChainEconomics}...) + gapTasks := []string{tasktype.Message, tasktype.IdAddress} + monitoringTasks := append(gapTasks, []string{tasktype.BlockHeader, tasktype.ChainEconomics}...) pre := NewPREditor(t, db, t.Name()) pre.initialize(maxHeight, monitoringTasks...) @@ -208,7 +208,7 @@ func TestFind(t *testing.T) { strg, err := storage.NewDatabaseFromDB(ctx, db, "public") require.NoError(t, err, "NewDatabaseFromDB") - actual, err := NewGapIndexer(nil, strg, t.Name(), minHeight, maxHeight, monitoringTasks).Find(ctx) + actual, err := NewFinder(nil, strg, t.Name(), minHeight, maxHeight, monitoringTasks).Find(ctx) require.NoError(t, err) expected := makeGapReportList(2, gapTasks...) @@ -219,16 +219,16 @@ func TestFind(t *testing.T) { truncate(t, db) pre := NewPREditor(t, db, t.Name()) - pre.initialize(maxHeight, indexer.AllTableTasks...) - pre.updateEpochStatus(2, visor.ProcessingStatusError, WithTasks(indexer.AllTableTasks...)) + pre.initialize(maxHeight, tasktype.AllTableTasks...) + pre.updateEpochStatus(2, visor.ProcessingStatusError, WithTasks(tasktype.AllTableTasks...)) strg, err := storage.NewDatabaseFromDB(ctx, db, "public") require.NoError(t, err, "NewDatabaseFromDB") - actual, err := NewGapIndexer(nil, strg, t.Name(), minHeight, maxHeight, indexer.AllTableTasks).Find(ctx) + actual, err := NewFinder(nil, strg, t.Name(), minHeight, maxHeight, tasktype.AllTableTasks).Find(ctx) require.NoError(t, err) - expected := makeGapReportList(2, indexer.AllTableTasks...) + expected := makeGapReportList(2, tasktype.AllTableTasks...) assertGapReportsEqual(t, expected, actual) }) @@ -236,14 +236,14 @@ func TestFind(t *testing.T) { truncate(t, db) pre1 := NewPREditor(t, db, "reporter1") - pre1.initialize(maxHeight, indexer.AllTableTasks...) + pre1.initialize(maxHeight, tasktype.AllTableTasks...) pre2 := NewPREditor(t, db, "reporter2") - pre2.insertEpochStatus(2, visor.ProcessingStatusError, WithTasks(indexer.IdAddress, indexer.MinerInfo)) + pre2.insertEpochStatus(2, visor.ProcessingStatusError, WithTasks(tasktype.IdAddress, tasktype.MinerInfo)) strg, err := storage.NewDatabaseFromDB(ctx, db, "public") require.NoError(t, err, "NewDatabaseFromDB") - actual, err := NewGapIndexer(nil, strg, t.Name(), minHeight, maxHeight, indexer.AllTableTasks).Find(ctx) + actual, err := NewFinder(nil, strg, t.Name(), minHeight, maxHeight, tasktype.AllTableTasks).Find(ctx) require.NoError(t, err) // expect nothing since tasks have an OK status dispite the error @@ -254,7 +254,7 @@ func TestFind(t *testing.T) { truncate(t, db) pre1 := NewPREditor(t, db, "reporter1") - pre1.initialize(maxHeight, indexer.AllTableTasks...) + pre1.initialize(maxHeight, tasktype.AllTableTasks...) pre1.updateEpochStatus(2, visor.ProcessingStatusSkip) pre2 := NewPREditor(t, db, "reporter2") @@ -263,7 +263,7 @@ func TestFind(t *testing.T) { strg, err := storage.NewDatabaseFromDB(ctx, db, "public") require.NoError(t, err, "NewDatabaseFromDB") - actual, err := NewGapIndexer(nil, strg, t.Name(), minHeight, maxHeight, indexer.AllTableTasks).Find(ctx) + actual, err := NewFinder(nil, strg, t.Name(), minHeight, maxHeight, tasktype.AllTableTasks).Find(ctx) require.NoError(t, err) // no gaps should be found since the epoch has OK's for all tasks; the SKIPS are ignored. @@ -274,7 +274,7 @@ func TestFind(t *testing.T) { truncate(t, db) pre1 := NewPREditor(t, db, "reporter1") - pre1.initialize(maxHeight, indexer.AllTableTasks...) + pre1.initialize(maxHeight, tasktype.AllTableTasks...) pre1.updateEpochStatus(2, visor.ProcessingStatusSkip) pre1.updateEpochStatus(8, visor.ProcessingStatusSkip) @@ -289,7 +289,7 @@ func TestFind(t *testing.T) { strg, err := storage.NewDatabaseFromDB(ctx, db, "public") require.NoError(t, err, "NewDatabaseFromDB") - actual, err := NewGapIndexer(nil, strg, t.Name(), minHeight, maxHeight, indexer.AllTableTasks).Find(ctx) + actual, err := NewFinder(nil, strg, t.Name(), minHeight, maxHeight, tasktype.AllTableTasks).Find(ctx) require.NoError(t, err) // no gaps should be found since the epoch has OK's for all tasks; the SKIPS and ERRORs are ignored. @@ -300,24 +300,24 @@ func TestFind(t *testing.T) { truncate(t, db) pre1 := NewPREditor(t, db, "reporter1") - pre1.initialize(maxHeight, indexer.AllTableTasks...) + pre1.initialize(maxHeight, tasktype.AllTableTasks...) pre1.updateEpochStatus(2, visor.ProcessingStatusSkip) pre1.updateEpochStatus(8, visor.ProcessingStatusSkip) pre2 := NewPREditor(t, db, "reporter2") - pre2.initialize(maxHeight, indexer.AllTableTasks...) + pre2.initialize(maxHeight, tasktype.AllTableTasks...) pre2.updateEpochStatus(2, visor.ProcessingStatusError) pre2.updateEpochStatus(8, visor.ProcessingStatusError) pre3 := NewPREditor(t, db, "reporter3") - pre3.initialize(maxHeight, indexer.AllTableTasks...) + pre3.initialize(maxHeight, tasktype.AllTableTasks...) pre3.updateEpochStatus(2, visor.ProcessingStatusOK) pre3.updateEpochStatus(8, visor.ProcessingStatusOK) strg, err := storage.NewDatabaseFromDB(ctx, db, "public") require.NoError(t, err, "NewDatabaseFromDB") - actual, err := NewGapIndexer(nil, strg, t.Name(), minHeight, maxHeight, indexer.AllTableTasks).Find(ctx) + actual, err := NewFinder(nil, strg, t.Name(), minHeight, maxHeight, tasktype.AllTableTasks).Find(ctx) require.NoError(t, err) // no gaps should be found since the epoch has OK's for all tasks; the SKIPS and ERRORs are ignored. @@ -328,7 +328,7 @@ func TestFind(t *testing.T) { truncate(t, db) pre1 := NewPREditor(t, db, "reporter1") - pre1.initialize(maxHeight, indexer.AllTableTasks...) + pre1.initialize(maxHeight, tasktype.AllTableTasks...) pre1.updateEpochStatus(2, visor.ProcessingStatusSkip) pre2 := NewPREditor(t, db, "reporter2") @@ -337,10 +337,10 @@ func TestFind(t *testing.T) { strg, err := storage.NewDatabaseFromDB(ctx, db, "public") require.NoError(t, err, "NewDatabaseFromDB") - actual, err := NewGapIndexer(nil, strg, t.Name(), minHeight, maxHeight, indexer.AllTableTasks).Find(ctx) + actual, err := NewFinder(nil, strg, t.Name(), minHeight, maxHeight, tasktype.AllTableTasks).Find(ctx) require.NoError(t, err) - expected := makeGapReportList(2, indexer.AllTableTasks...) + expected := makeGapReportList(2, tasktype.AllTableTasks...) assertGapReportsEqual(t, expected, actual) }) @@ -348,17 +348,17 @@ func TestFind(t *testing.T) { truncate(t, db) pre1 := NewPREditor(t, db, "reporter1") - pre1.initialize(maxHeight, indexer.AllTableTasks...) + pre1.initialize(maxHeight, tasktype.AllTableTasks...) pre1.updateEpochStatus(2, visor.ProcessingStatusInfo, WithStatusInformation(visor.ProcessingStatusInformationNullRound)) pre1.updateEpochStatus(3, visor.ProcessingStatusInfo, WithStatusInformation("not the permitted null round")) strg, err := storage.NewDatabaseFromDB(ctx, db, "public") require.NoError(t, err, "NewDatabaseFromDB") - actual, err := NewGapIndexer(nil, strg, t.Name(), minHeight, maxHeight, indexer.AllTableTasks).Find(ctx) + actual, err := NewFinder(nil, strg, t.Name(), minHeight, maxHeight, tasktype.AllTableTasks).Find(ctx) require.NoError(t, err) - expected := makeGapReportList(3, indexer.AllTableTasks...) + expected := makeGapReportList(3, tasktype.AllTableTasks...) assertGapReportsEqual(t, expected, actual) }) } @@ -495,7 +495,7 @@ func (e *PREditor) updateEpochStatus(epoch int64, status string, opts ...PREdito q := &PREditorQuery{ epoch: epoch, status: status, - tasks: indexer.AllTableTasks, + tasks: tasktype.AllTableTasks, } for _, opt := range opts { opt(q) @@ -516,7 +516,7 @@ func (e *PREditor) insertEpochStatus(epoch int64, status string, opts ...PREdito q := &PREditorQuery{ epoch: epoch, status: status, - tasks: indexer.AllTableTasks, + tasks: tasktype.AllTableTasks, } for _, opt := range opts { opt(q) @@ -535,7 +535,7 @@ func (e *PREditor) deleteEpochStatus(epoch int64, status string, opts ...PREdito q := &PREditorQuery{ epoch: epoch, status: status, - tasks: indexer.AllTableTasks, + tasks: tasktype.AllTableTasks, } for _, opt := range opts { opt(q) diff --git a/chain/gap/notify.go b/chain/gap/notify.go new file mode 100644 index 000000000..0f141923d --- /dev/null +++ b/chain/gap/notify.go @@ -0,0 +1,72 @@ +package gap + +import ( + "context" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/types" + + "github.com/filecoin-project/lily/chain/indexer" + "github.com/filecoin-project/lily/chain/indexer/distributed" + "github.com/filecoin-project/lily/chain/indexer/distributed/queue" + "github.com/filecoin-project/lily/lens" + "github.com/filecoin-project/lily/storage" +) + +type Notifier struct { + DB *storage.Database + queue *queue.AsynQ + node lens.API + name string + minHeight, maxHeight uint64 + tasks []string + done chan struct{} +} + +func NewNotifier(node lens.API, db *storage.Database, queue *queue.AsynQ, name string, minHeight, maxHeight uint64, tasks []string) *Notifier { + return &Notifier{ + DB: db, + queue: queue, + node: node, + name: name, + maxHeight: maxHeight, + minHeight: minHeight, + tasks: tasks, + } +} + +func (g *Notifier) Run(ctx context.Context) error { + // init the done channel for each run since jobs may be started and stopped. + g.done = make(chan struct{}) + defer close(g.done) + + gaps, heights, err := g.DB.ConsolidateGaps(ctx, g.minHeight, g.maxHeight, g.tasks...) + if err != nil { + return err + } + + idx := distributed.NewTipSetIndexer(g.queue) + + for _, height := range heights { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + ts, err := g.node.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(height), types.EmptyTSK) + if err != nil { + return err + } + + if _, err := idx.TipSet(ctx, ts, indexer.WithIndexerType(indexer.Fill), indexer.WithTasks(gaps[height])); err != nil { + return err + } + } + + return nil +} + +func (g *Notifier) Done() <-chan struct{} { + return g.done +} diff --git a/chain/indexer/distributed/catalog.go b/chain/indexer/distributed/catalog.go new file mode 100644 index 000000000..bd234d63a --- /dev/null +++ b/chain/indexer/distributed/catalog.go @@ -0,0 +1,54 @@ +package distributed + +import ( + logging "github.com/ipfs/go-log/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lily/config" +) + +var log = logging.Logger("lily/distributed") + +// NewCatalog returns a Catalog configured with the values specified in config.QueueConfig. Error is non-nill if +// config.QueueConfig contains a duplicate queue name. +func NewCatalog(cfg config.QueueConfig) (*Catalog, error) { + c := &Catalog{ + queues: map[string]config.AsynqRedisConfig{}, + } + + for name, nc := range cfg.Asynq { + if _, exists := c.queues[name]; exists { + return nil, xerrors.Errorf("duplicate queue name: %q", name) + } + log.Debugw("registering queue", "name", name, "type", "redis") + + c.queues[name] = config.AsynqRedisConfig{ + Network: nc.Network, + Addr: nc.Addr, + Username: nc.Username, + Password: nc.Password, + DB: nc.DB, + PoolSize: nc.PoolSize, + } + } + return c, nil +} + +// Catalog contains a map of queue names to their configurations. Catalog is used to configure the distributed indexer. +type Catalog struct { + queues map[string]config.AsynqRedisConfig +} + +// AsynqConfig returns a config.AsynqRedisConfig by `name`. And error is returned if name is empty or if a +// config.AsynqRedisConfig doesn't exist for `name`. +func (c *Catalog) AsynqConfig(name string) (config.AsynqRedisConfig, error) { + if name == "" { + return config.AsynqRedisConfig{}, xerrors.Errorf("queue config name required") + } + + n, exists := c.queues[name] + if !exists { + return config.AsynqRedisConfig{}, xerrors.Errorf("unknown queue: %q", name) + } + return n, nil +} diff --git a/chain/indexer/distributed/queue/client.go b/chain/indexer/distributed/queue/client.go new file mode 100644 index 000000000..9fe6cbb57 --- /dev/null +++ b/chain/indexer/distributed/queue/client.go @@ -0,0 +1,65 @@ +package queue + +import ( + "context" + + "github.com/filecoin-project/lotus/chain/types" + "github.com/hibiken/asynq" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + + "github.com/filecoin-project/lily/chain/indexer" + "github.com/filecoin-project/lily/chain/indexer/distributed" + "github.com/filecoin-project/lily/chain/indexer/distributed/queue/tasks" + "github.com/filecoin-project/lily/config" +) + +var _ distributed.Queue = (*AsynQ)(nil) + +type AsynQ struct { + c *asynq.Client +} + +func NewAsynq(cfg config.AsynqRedisConfig) *AsynQ { + asynqClient := asynq.NewClient(asynq.RedisClientOpt{ + Network: cfg.Network, + Addr: cfg.Addr, + Username: cfg.Username, + Password: cfg.Password, + DB: cfg.DB, + PoolSize: cfg.PoolSize, + }) + + return &AsynQ{c: asynqClient} +} + +func (r *AsynQ) EnqueueTipSet(ctx context.Context, ts *types.TipSet, indexType indexer.IndexerType, taskNames ...string) error { + ctx, span := otel.Tracer("").Start(ctx, "AsnyQ.EnqueueTipSet") + defer span.End() + + var task *asynq.Task + var err error + if indexType == indexer.Fill { + task, err = tasks.NewGapFillTipSetTask(ctx, ts, taskNames) + if err != nil { + return err + } + } else { + task, err = tasks.NewIndexTipSetTask(ctx, ts, taskNames) + if err != nil { + return err + } + } + + if span.IsRecording() { + span.SetAttributes(attribute.String("task_type", task.Type()), attribute.StringSlice("tasks", taskNames), attribute.String("index_type", indexType.String())) + } + + _, err = r.c.EnqueueContext(ctx, task, asynq.Queue(indexType.String())) + if err != nil { + return err + } + + return nil + +} diff --git a/chain/indexer/distributed/queue/tasks/gapfill.go b/chain/indexer/distributed/queue/tasks/gapfill.go new file mode 100644 index 000000000..d8e9ee7ba --- /dev/null +++ b/chain/indexer/distributed/queue/tasks/gapfill.go @@ -0,0 +1,84 @@ +package tasks + +import ( + "context" + "encoding/json" + "strings" + + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/types" + "github.com/hibiken/asynq" + "go.opentelemetry.io/otel/trace" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lily/chain/indexer" + "github.com/filecoin-project/lily/chain/indexer/distributed/queue/tracing" + "github.com/filecoin-project/lily/storage" +) + +const ( + TypeGapFillTipSet = "tipset:gapfill" +) + +type GapFillTipSetPayload struct { + TipSet *types.TipSet + Tasks []string + TraceCarrier *tracing.TraceCarrier `json:",omitempty"` +} + +// HasTraceCarrier returns true iff payload contains a trace. +func (g *GapFillTipSetPayload) HasTraceCarrier() bool { + return !(g.TraceCarrier == nil) +} + +func NewGapFillTipSetTask(ctx context.Context, ts *types.TipSet, tasks []string) (*asynq.Task, error) { + payload, err := json.Marshal(GapFillTipSetPayload{TipSet: ts, Tasks: tasks, TraceCarrier: tracing.NewTraceCarrier(trace.SpanFromContext(ctx).SpanContext())}) + if err != nil { + return nil, err + } + return asynq.NewTask(TypeGapFillTipSet, payload), nil +} + +type AsynqGapFillTipSetTaskHandler struct { + indexer indexer.Indexer + db *storage.Database +} + +func NewGapFillHandler(indexer indexer.Indexer, db *storage.Database) *AsynqGapFillTipSetTaskHandler { + return &AsynqGapFillTipSetTaskHandler{indexer: indexer, db: db} +} + +func (gh *AsynqGapFillTipSetTaskHandler) HandleGapFillTipSetTask(ctx context.Context, t *asynq.Task) error { + var p GapFillTipSetPayload + if err := json.Unmarshal(t.Payload(), &p); err != nil { + return err + } + log.Infow("gap fill tipset", "tipset", p.TipSet.String(), "height", p.TipSet.Height(), "tasks", p.Tasks) + + if p.HasTraceCarrier() { + if sc := p.TraceCarrier.AsSpanContext(); sc.IsValid() { + ctx = trace.ContextWithRemoteSpanContext(ctx, sc) + } + } + + success, err := gh.indexer.TipSet(ctx, p.TipSet, indexer.WithTasks(p.Tasks)) + if err != nil { + if strings.Contains(err.Error(), blockstore.ErrNotFound.Error()) { + // return SkipRetry to prevent the task from being retried since nodes do not contain the block + // TODO: later, reschedule task in "backfill" queue with lily nodes capable of syncing the required data. + return xerrors.Errorf("indexing tipset for gap fill tipset.(height) %s.(%d): Error %s : %w", p.TipSet.Key().String(), p.TipSet.Height(), err, asynq.SkipRetry) + } else { + return err + } + } + if !success { + log.Errorw("failed to gap fill task successfully", "height", p.TipSet.Height(), "tipset", p.TipSet.Key().String()) + return xerrors.Errorf("gap filling tipset.(height) %s.(%d)", p.TipSet.Key(), p.TipSet.Height()) + } else { + if err := gh.db.SetGapsFilled(ctx, int64(p.TipSet.Height()), p.Tasks...); err != nil { + log.Errorw("failed to mark gap as filled", "error", err) + return err + } + } + return nil +} diff --git a/chain/indexer/distributed/queue/tasks/index.go b/chain/indexer/distributed/queue/tasks/index.go new file mode 100644 index 000000000..05fa413db --- /dev/null +++ b/chain/indexer/distributed/queue/tasks/index.go @@ -0,0 +1,80 @@ +package tasks + +import ( + "context" + "encoding/json" + "strings" + + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/types" + "github.com/hibiken/asynq" + logging "github.com/ipfs/go-log/v2" + "go.opentelemetry.io/otel/trace" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lily/chain/indexer" + "github.com/filecoin-project/lily/chain/indexer/distributed/queue/tracing" +) + +var log = logging.Logger("lily/queue/tasks") + +const ( + TypeIndexTipSet = "tipset:index" +) + +type IndexTipSetPayload struct { + TipSet *types.TipSet + Tasks []string + TraceCarrier *tracing.TraceCarrier `json:",omitempty"` +} + +// HasTraceCarrier returns true iff payload contains a trace. +func (i *IndexTipSetPayload) HasTraceCarrier() bool { + return !(i.TraceCarrier == nil) +} + +func NewIndexTipSetTask(ctx context.Context, ts *types.TipSet, tasks []string) (*asynq.Task, error) { + payload, err := json.Marshal(IndexTipSetPayload{TipSet: ts, Tasks: tasks, TraceCarrier: tracing.NewTraceCarrier(trace.SpanFromContext(ctx).SpanContext())}) + if err != nil { + return nil, err + } + return asynq.NewTask(TypeIndexTipSet, payload), nil +} + +type AsynqTipSetTaskHandler struct { + indexer indexer.Indexer +} + +func NewIndexHandler(i indexer.Indexer) *AsynqTipSetTaskHandler { + return &AsynqTipSetTaskHandler{indexer: i} +} + +func (ih *AsynqTipSetTaskHandler) HandleIndexTipSetTask(ctx context.Context, t *asynq.Task) error { + var p IndexTipSetPayload + if err := json.Unmarshal(t.Payload(), &p); err != nil { + return err + } + log.Infow("indexing tipset", "tipset", p.TipSet.String(), "height", p.TipSet.Height(), "tasks", p.Tasks) + + if p.HasTraceCarrier() { + if sc := p.TraceCarrier.AsSpanContext(); sc.IsValid() { + ctx = trace.ContextWithRemoteSpanContext(ctx, sc) + } + } + + success, err := ih.indexer.TipSet(ctx, p.TipSet, indexer.WithTasks(p.Tasks)) + if err != nil { + return err + } + if !success { + log.Errorw("failed to index tipset successfully", "height", p.TipSet.Height(), "tipset", p.TipSet.Key().String()) + return xerrors.Errorf("indexing tipset.(height) %s.(%d)", p.TipSet.Key().String(), p.TipSet.Height()) + } + if strings.Contains(err.Error(), blockstore.ErrNotFound.Error()) { + log.Errorw("failed to index tipset", "height", p.TipSet.Height(), "tipset", p.TipSet.Key().String(), "error", err) + // return SkipRetry to prevent the task from being retried since nodes do not contain the block + // TODO: later, reschedule task in "backfill" queue with lily nodes capable of syncing the required data. + return xerrors.Errorf("indexing tipset.(height) %s.(%d): Error %s : %w", p.TipSet.Key().String(), p.TipSet.Height(), err, asynq.SkipRetry) + } + return nil +} diff --git a/chain/indexer/distributed/queue/tracing/carrier.go b/chain/indexer/distributed/queue/tracing/carrier.go new file mode 100644 index 000000000..caf106460 --- /dev/null +++ b/chain/indexer/distributed/queue/tracing/carrier.go @@ -0,0 +1,74 @@ +package tracing + +import ( + "encoding/json" + + "go.opentelemetry.io/otel/trace" +) + +/* +TraceCarrier is required to Marshal and Unmarshall trace.SpanContext across RPC boundaries since the +OpenTelemetry spec dictates trace.SpanContext must be read only, and therefore does not implement a +JSON unmarshaller. +Context: https://github.com/open-telemetry/opentelemetry-go/issues/1927#issuecomment-842663910 +*/ + +// NewTraceCarrier accepts a trace.SpanContext and returns a TraceCarrier. +func NewTraceCarrier(sc trace.SpanContext) *TraceCarrier { + if sc.IsValid() { + return &TraceCarrier{ + TraceID: sc.TraceID(), + SpanID: sc.SpanID(), + Remote: sc.IsRemote(), + } + } + return nil +} + +// TraceCarrier is a wrapper that allows trace.SpanContext's to be round-tripped through JSON. +type TraceCarrier struct { + TraceID trace.TraceID `json:"traceID"` + SpanID trace.SpanID `json:"spanID"` + Remote bool `json:"remote"` +} + +//MarshalJSON converts TraceCarrier to a trace.SpanContext and marshals it to JSON. +func (c *TraceCarrier) MarshalJSON() ([]byte, error) { + return c.AsSpanContext().MarshalJSON() +} + +// UnmarshalJSON unmarshalls a serialized trace.SpanContext to a TraceCarrier. +func (c *TraceCarrier) UnmarshalJSON(b []byte) error { + var data traceCarrierInfo + if err := json.Unmarshal(b, &data); err != nil { + return err + } + var err error + c.TraceID, err = trace.TraceIDFromHex(data.TraceID) + if err != nil { + return err + } + c.SpanID, err = trace.SpanIDFromHex(data.SpanID) + if err != nil { + return err + } + c.Remote = data.Remote + + return nil +} + +// AsSpanContext converts TraceCarrier to a trace.SpanContext. +func (c *TraceCarrier) AsSpanContext() trace.SpanContext { + return trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: c.TraceID, + SpanID: c.SpanID, + Remote: c.Remote, + }) +} + +// carrierInfo is a helper used to deserialize a SpanContext from JSON. +type traceCarrierInfo struct { + TraceID string + SpanID string + Remote bool +} diff --git a/chain/indexer/distributed/queue/worker.go b/chain/indexer/distributed/queue/worker.go new file mode 100644 index 000000000..310b8c8c0 --- /dev/null +++ b/chain/indexer/distributed/queue/worker.go @@ -0,0 +1,108 @@ +package queue + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/hibiken/asynq" + logging "github.com/ipfs/go-log/v2" + "go.opentelemetry.io/otel/trace" + + "github.com/filecoin-project/lily/chain/indexer" + "github.com/filecoin-project/lily/chain/indexer/distributed/queue/tasks" + "github.com/filecoin-project/lily/config" + "github.com/filecoin-project/lily/storage" +) + +var log = logging.Logger("lily/asynq") + +type AsynqWorker struct { + name string + concurrency int + cfg config.AsynqRedisConfig + mux *asynq.ServeMux + done chan struct{} +} + +func NewAsynqWorker(i indexer.Indexer, db *storage.Database, name string, concurrency int, cfg config.AsynqRedisConfig) *AsynqWorker { + mux := asynq.NewServeMux() + mux.HandleFunc(tasks.TypeIndexTipSet, tasks.NewIndexHandler(i).HandleIndexTipSetTask) + mux.HandleFunc(tasks.TypeGapFillTipSet, tasks.NewGapFillHandler(i, db).HandleGapFillTipSetTask) + return &AsynqWorker{ + name: name, + concurrency: concurrency, + cfg: cfg, + mux: mux, + } +} + +func (t *AsynqWorker) Run(ctx context.Context) error { + t.done = make(chan struct{}) + defer close(t.done) + + srv := asynq.NewServer( + asynq.RedisClientOpt{ + Network: t.cfg.Network, + Addr: t.cfg.Addr, + Username: t.cfg.Username, + Password: t.cfg.Password, + DB: t.cfg.DB, + PoolSize: t.cfg.PoolSize, + }, + asynq.Config{ + Concurrency: t.concurrency, + Logger: log.With("process", fmt.Sprintf("AsynqWorker-%s", t.name)), + LogLevel: asynq.DebugLevel, + Queues: map[string]int{ + indexer.Watch.String(): 6, + indexer.Walk.String(): 2, + indexer.Index.String(): 1, + indexer.Fill.String(): 1, + }, + StrictPriority: false, + ErrorHandler: &WorkerErrorHandler{}, + }, + ) + go func() { + <-ctx.Done() + srv.Shutdown() + }() + return srv.Run(t.mux) +} + +func (t *AsynqWorker) Done() <-chan struct{} { + return t.done +} + +type WorkerErrorHandler struct { +} + +func (w *WorkerErrorHandler) HandleError(ctx context.Context, task *asynq.Task, err error) { + switch task.Type() { + case tasks.TypeIndexTipSet: + var p tasks.IndexTipSetPayload + if err := json.Unmarshal(task.Payload(), &p); err != nil { + log.Errorw("failed to decode task type (developer error?)", "error", err) + } + if p.HasTraceCarrier() { + if sc := p.TraceCarrier.AsSpanContext(); sc.IsValid() { + ctx = trace.ContextWithRemoteSpanContext(ctx, sc) + trace.SpanFromContext(ctx).RecordError(err) + } + } + log.Errorw("task failed", "type", task.Type(), "tipset", p.TipSet.Key().String(), "height", p.TipSet.Height(), "tasks", p.Tasks, "error", err) + case tasks.TypeGapFillTipSet: + var p tasks.GapFillTipSetPayload + if err := json.Unmarshal(task.Payload(), &p); err != nil { + log.Errorw("failed to decode task type (developer error?)", "error", err) + } + if p.HasTraceCarrier() { + if sc := p.TraceCarrier.AsSpanContext(); sc.IsValid() { + ctx = trace.ContextWithRemoteSpanContext(ctx, sc) + trace.SpanFromContext(ctx).RecordError(err) + } + } + log.Errorw("task failed", "type", task.Type(), "tipset", p.TipSet.Key().String(), "height", p.TipSet.Height(), "tasks", p.Tasks, "error", err) + } +} diff --git a/chain/indexer/distributed/tipset.go b/chain/indexer/distributed/tipset.go new file mode 100644 index 000000000..9e7b4d20e --- /dev/null +++ b/chain/indexer/distributed/tipset.go @@ -0,0 +1,39 @@ +package distributed + +import ( + "context" + + "github.com/filecoin-project/lotus/chain/types" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lily/chain/indexer" +) + +var _ indexer.Indexer = (*TipSetIndexer)(nil) + +type Queue interface { + EnqueueTipSet(ctx context.Context, ts *types.TipSet, indexType indexer.IndexerType, tasks ...string) error +} + +type TipSetIndexer struct { + q Queue +} + +func NewTipSetIndexer(q Queue) *TipSetIndexer { + return &TipSetIndexer{q: q} +} + +func (t *TipSetIndexer) TipSet(ctx context.Context, ts *types.TipSet, opts ...indexer.Option) (bool, error) { + o, err := indexer.ConstructOptions(opts...) + if err != nil { + return false, err + } + if o.IndexType == indexer.Undefined { + return false, xerrors.Errorf("indexer type required") + } + log.Infow("index tipset", "height", ts.Height(), "type", o.IndexType.String(), "tasks", o.Tasks) + if err := t.q.EnqueueTipSet(ctx, ts, o.IndexType, o.Tasks...); err != nil { + return false, err + } + return true, nil +} diff --git a/chain/indexer/exporter.go b/chain/indexer/exporter.go index c70840884..5e7f2ee6e 100644 --- a/chain/indexer/exporter.go +++ b/chain/indexer/exporter.go @@ -18,7 +18,7 @@ import ( "github.com/filecoin-project/lily/model" ) -var mdlLog = logging.Logger("lily/index/exporter") +var log = logging.Logger("lily/index/exporter") func NewModelExporter(name string) *ModelExporter { return &ModelExporter{ @@ -66,7 +66,7 @@ func (me *ModelExporter) ExportResult(ctx context.Context, strg model.Storage, h stats.Record(ctx, metrics.PersistFailure.M(1)) return xerrors.Errorf("persist result (%s.%T): %w", res.Name, res.Model, err) } - mdlLog.Infow("model data persisted", "height", height, "task", res.Name, "duration", time.Since(start), "reporter", me.name) + log.Infow("model data persisted", "height", height, "task", res.Name, "duration", time.Since(start), "reporter", me.name) return nil }) } diff --git a/chain/indexer/integrated/builder.go b/chain/indexer/integrated/builder.go new file mode 100644 index 000000000..ded780eb7 --- /dev/null +++ b/chain/indexer/integrated/builder.go @@ -0,0 +1,40 @@ +package integrated + +import "github.com/filecoin-project/lily/tasks" + +func NewBuilder(node tasks.DataSource, name string) *Builder { + return &Builder{api: node, name: name} +} + +type Builder struct { + options []func(ti *TipSetIndexer) + api tasks.DataSource + name string +} + +func (b *Builder) add(cb func(ti *TipSetIndexer)) { + b.options = append(b.options, cb) +} + +func (b *Builder) WithTasks(tasks []string) *Builder { + b.add(func(ti *TipSetIndexer) { + ti.taskNames = tasks + }) + return b +} + +func (b *Builder) Build() (*TipSetIndexer, error) { + ti := &TipSetIndexer{ + name: b.name, + node: b.api, + } + + for _, opt := range b.options { + opt(ti) + } + + if err := ti.init(); err != nil { + return nil, err + } + return ti, nil +} diff --git a/chain/indexer/manager.go b/chain/indexer/integrated/manager.go similarity index 75% rename from chain/indexer/manager.go rename to chain/indexer/integrated/manager.go index 6990dbb27..701e2dc42 100644 --- a/chain/indexer/manager.go +++ b/chain/indexer/integrated/manager.go @@ -1,4 +1,4 @@ -package indexer +package integrated import ( "context" @@ -8,6 +8,7 @@ import ( logging "github.com/ipfs/go-log/v2" "go.opentelemetry.io/otel" + "github.com/filecoin-project/lily/chain/indexer" "github.com/filecoin-project/lily/model" visormodel "github.com/filecoin-project/lily/model/visor" "github.com/filecoin-project/lily/tasks" @@ -15,22 +16,18 @@ import ( var log = logging.Logger("lily/index/manager") -type Indexer interface { - TipSet(ctx context.Context, ts *types.TipSet) (chan *Result, chan error, error) -} - type Exporter interface { - ExportResult(ctx context.Context, strg model.Storage, height int64, m []*ModelResult) error + ExportResult(ctx context.Context, strg model.Storage, height int64, m []*indexer.ModelResult) error } // Manager manages the execution of an Indexer. It may be used to index TipSets both serially or in parallel. type Manager struct { - api tasks.DataSource - storage model.Storage - indexer Indexer - exporter Exporter - window time.Duration - name string + api tasks.DataSource + storage model.Storage + indexBuilder *Builder + exporter Exporter + window time.Duration + name string } type ManagerOpt func(i *Manager) @@ -44,24 +41,8 @@ func WithWindow(w time.Duration) ManagerOpt { } } -// WithExporter overrides the Manager's default Exporter with the provided Exporter. -// An Exporter is used to export the results of the Manager's Indexer. -func WithExporter(e Exporter) ManagerOpt { - return func(m *Manager) { - m.exporter = e - } -} - -// WithIndexer overrides the Manager's default Indexer with the provided Indexer. -// An Indexer is used to collect state from a tipset. -func WithIndexer(i Indexer) ManagerOpt { - return func(m *Manager) { - m.indexer = i - } -} - // NewManager returns a default Manager. Any provided ManagerOpt's will override Manager's default values. -func NewManager(api tasks.DataSource, strg model.Storage, name string, tasks []string, opts ...ManagerOpt) (*Manager, error) { +func NewManager(api tasks.DataSource, strg model.Storage, name string, opts ...ManagerOpt) (*Manager, error) { im := &Manager{ api: api, storage: strg, @@ -73,16 +54,10 @@ func NewManager(api tasks.DataSource, strg model.Storage, name string, tasks []s opt(im) } - if im.indexer == nil { - var err error - im.indexer, err = NewTipSetIndexer(api, name, tasks) - if err != nil { - return nil, err - } - } + im.indexBuilder = NewBuilder(api, name) if im.exporter == nil { - im.exporter = NewModelExporter(name) + im.exporter = indexer.NewModelExporter(name) } return im, nil } @@ -90,7 +65,11 @@ func NewManager(api tasks.DataSource, strg model.Storage, name string, tasks []s // TipSet synchronously indexes and persists `ts`. TipSet returns an error if the Manager's Indexer encounters a // fatal error. TipSet returns false if one or more of the Indexer's tasks complete with a status `ERROR` or `SKIPPED`, else returns true. // Upon cancellation of `ctx` TipSet will persist all incomplete tasks with status `SKIPPED` before returning. -func (i *Manager) TipSet(ctx context.Context, ts *types.TipSet) (bool, error) { +func (i *Manager) TipSet(ctx context.Context, ts *types.TipSet, options ...indexer.Option) (bool, error) { + opts, err := indexer.ConstructOptions(options...) + if err != nil { + return false, err + } ctx, span := otel.Tracer("").Start(ctx, "Manager.TipSet") defer span.End() lg := log.With("height", ts.Height(), "reporter", i.name) @@ -108,8 +87,13 @@ func (i *Manager) TipSet(ctx context.Context, ts *types.TipSet) (bool, error) { } defer cancel() + idxer, err := i.indexBuilder.WithTasks(opts.Tasks).Build() + if err != nil { + return false, err + } + // asynchronously begin indexing tipset `ts`, returning results as they become avaiable. - taskResults, taskErrors, err := i.indexer.TipSet(procCtx, ts) + taskResults, taskErrors, err := idxer.TipSet(procCtx, ts) // indexer suffered fatal error, abort. if err != nil { return false, err @@ -120,7 +104,7 @@ func (i *Manager) TipSet(ctx context.Context, ts *types.TipSet) (bool, error) { return true, nil } - var modelResults []*ModelResult + var modelResults []*indexer.ModelResult success := true // collect all the results, recording if any of the tasks were skipped or errored for res := range taskResults { @@ -138,7 +122,7 @@ func (i *Manager) TipSet(ctx context.Context, ts *types.TipSet) (bool, error) { lg.Infow("task success", "task", res.Name, "status", report.Status, "duration", report.CompletedAt.Sub(report.StartedAt)) } } - modelResults = append(modelResults, &ModelResult{ + modelResults = append(modelResults, &indexer.ModelResult{ Name: res.Name, Model: model.PersistableList{res.Report, res.Data}, }) diff --git a/chain/indexer/processor/builder.go b/chain/indexer/integrated/processor/builder.go similarity index 100% rename from chain/indexer/processor/builder.go rename to chain/indexer/integrated/processor/builder.go diff --git a/chain/indexer/processor/state.go b/chain/indexer/integrated/processor/state.go similarity index 100% rename from chain/indexer/processor/state.go rename to chain/indexer/integrated/processor/state.go diff --git a/chain/indexer/tipset.go b/chain/indexer/integrated/tipset.go similarity index 56% rename from chain/indexer/tipset.go rename to chain/indexer/integrated/tipset.go index 3599ec97c..1a3ce9773 100644 --- a/chain/indexer/tipset.go +++ b/chain/indexer/integrated/tipset.go @@ -1,10 +1,18 @@ -package indexer +package integrated import ( "context" "time" + "github.com/ipfs/go-cid" + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "golang.org/x/xerrors" + "github.com/filecoin-project/lotus/chain/types" + saminer1 "github.com/filecoin-project/specs-actors/actors/builtin/miner" saminer2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" saminer3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" @@ -12,13 +20,6 @@ import ( saminer5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" saminer6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner" saminer7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner" - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" - "go.opencensus.io/stats" - "go.opencensus.io/tag" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "golang.org/x/xerrors" init_ "github.com/filecoin-project/lily/chain/actors/builtin/init" "github.com/filecoin-project/lily/chain/actors/builtin/market" @@ -27,26 +28,27 @@ import ( "github.com/filecoin-project/lily/chain/actors/builtin/power" "github.com/filecoin-project/lily/chain/actors/builtin/reward" "github.com/filecoin-project/lily/chain/actors/builtin/verifreg" - "github.com/filecoin-project/lily/chain/indexer/processor" + "github.com/filecoin-project/lily/chain/indexer/integrated/processor" + "github.com/filecoin-project/lily/chain/indexer/tasktype" "github.com/filecoin-project/lily/metrics" "github.com/filecoin-project/lily/model" visormodel "github.com/filecoin-project/lily/model/visor" - "github.com/filecoin-project/lily/tasks" + taskapi "github.com/filecoin-project/lily/tasks" "github.com/filecoin-project/lily/tasks/actorstate" - init_2 "github.com/filecoin-project/lily/tasks/actorstate/init_" - market2 "github.com/filecoin-project/lily/tasks/actorstate/market" - miner2 "github.com/filecoin-project/lily/tasks/actorstate/miner" - multisig2 "github.com/filecoin-project/lily/tasks/actorstate/multisig" - power2 "github.com/filecoin-project/lily/tasks/actorstate/power" - "github.com/filecoin-project/lily/tasks/actorstate/raw" - reward2 "github.com/filecoin-project/lily/tasks/actorstate/reward" - verifreg2 "github.com/filecoin-project/lily/tasks/actorstate/verifreg" + inittask "github.com/filecoin-project/lily/tasks/actorstate/init_" + markettask "github.com/filecoin-project/lily/tasks/actorstate/market" + minertask "github.com/filecoin-project/lily/tasks/actorstate/miner" + multisigtask "github.com/filecoin-project/lily/tasks/actorstate/multisig" + powertask "github.com/filecoin-project/lily/tasks/actorstate/power" + rawtask "github.com/filecoin-project/lily/tasks/actorstate/raw" + rewardtask "github.com/filecoin-project/lily/tasks/actorstate/reward" + verifregtask "github.com/filecoin-project/lily/tasks/actorstate/verifreg" "github.com/filecoin-project/lily/tasks/blocks/drand" "github.com/filecoin-project/lily/tasks/blocks/headers" "github.com/filecoin-project/lily/tasks/blocks/parents" "github.com/filecoin-project/lily/tasks/chaineconomics" "github.com/filecoin-project/lily/tasks/consensus" - "github.com/filecoin-project/lily/tasks/indexer" + indexTask "github.com/filecoin-project/lily/tasks/indexer" "github.com/filecoin-project/lily/tasks/messageexecutions/internal_message" "github.com/filecoin-project/lily/tasks/messageexecutions/internal_parsed_message" "github.com/filecoin-project/lily/tasks/messages/block_message" @@ -58,48 +60,37 @@ import ( "github.com/filecoin-project/lily/tasks/msapprovals" ) -var tsLog = logging.Logger("lily/index/tipset") - -// TipSetIndexer waits for tipsets and persists their block data into a database. +// TipSetIndexer extracts block, message and actor state data from a tipset and persists it to storage. Extraction +// and persistence are concurrent. Extraction of the a tipset can proceed while data from the previous extraction is +// being persisted. The indexer may be given a time window in which to complete data extraction. The name of the +// indexer is used as the reporter in the visor_processing_reports table. type TipSetIndexer struct { - name string - node tasks.DataSource - tasks []string + name string + node taskapi.DataSource + taskNames []string procBuilder *processor.Builder } -type TipSetIndexerOpt func(t *TipSetIndexer) - -// NewTipSetIndexer extracts block, message and actor state data from a tipset and persists it to storage. Extraction -// and persistence are concurrent. Extraction of the a tipset can proceed while data from the previous extraction is -// being persisted. The indexer may be given a time window in which to complete data extraction. The name of the -// indexer is used as the reporter in the visor_processing_reports table. -func NewTipSetIndexer(node tasks.DataSource, name string, tasks []string, options ...TipSetIndexerOpt) (*TipSetIndexer, error) { +func (ti *TipSetIndexer) init() error { var indexerTasks []string - for _, task := range tasks { - if tables, found := TaskLookup[task]; found { + for _, taskName := range ti.taskNames { + if tables, found := tasktype.TaskLookup[taskName]; found { // if this is a task look up its corresponding tables indexerTasks = append(indexerTasks, tables...) - } else if _, found := TableLookup[task]; found { + } else if _, found := tasktype.TableLookup[taskName]; found { // it's not a task, maybe it's a table, if it is added to task list, else this is an unknown task - indexerTasks = append(indexerTasks, task) + indexerTasks = append(indexerTasks, taskName) } else { - return nil, xerrors.Errorf("unknown task: %s", task) + return xerrors.Errorf("unknown task: %s", taskName) } } - tsi := &TipSetIndexer{ - name: name, - node: node, - tasks: indexerTasks, - } - tipsetProcessors := map[string]processor.TipSetProcessor{} tipsetsProcessors := map[string]processor.TipSetsProcessor{} actorProcessors := map[string]processor.ActorProcessor{} reportProcessors := map[string]processor.ReportProcessor{ - "builtin": indexer.NewTask(node), + "builtin": indexTask.NewTask(ti.node), } for _, t := range indexerTasks { @@ -107,182 +98,184 @@ func NewTipSetIndexer(node tasks.DataSource, name string, tasks []string, option // // miners // - case MinerCurrentDeadlineInfo: - actorProcessors[t] = actorstate.NewTask(node, actorstate.NewTypedActorExtractorMap( - miner.AllCodes(), miner2.DeadlineInfoExtractor{}, + case tasktype.MinerCurrentDeadlineInfo: + actorProcessors[t] = actorstate.NewTask(ti.node, actorstate.NewTypedActorExtractorMap( + miner.AllCodes(), minertask.DeadlineInfoExtractor{}, )) - case MinerFeeDebt: - actorProcessors[t] = actorstate.NewTask(node, actorstate.NewTypedActorExtractorMap( - miner.AllCodes(), miner2.FeeDebtExtractor{}, + case tasktype.MinerFeeDebt: + actorProcessors[t] = actorstate.NewTask(ti.node, actorstate.NewTypedActorExtractorMap( + miner.AllCodes(), minertask.FeeDebtExtractor{}, )) - case MinerInfo: - actorProcessors[t] = actorstate.NewTask(node, actorstate.NewTypedActorExtractorMap( - miner.AllCodes(), miner2.InfoExtractor{}, + case tasktype.MinerInfo: + actorProcessors[t] = actorstate.NewTask(ti.node, actorstate.NewTypedActorExtractorMap( + miner.AllCodes(), minertask.InfoExtractor{}, )) - case MinerLockedFund: - actorProcessors[t] = actorstate.NewTask(node, actorstate.NewTypedActorExtractorMap( - miner.AllCodes(), miner2.InfoExtractor{}, + case tasktype.MinerLockedFund: + actorProcessors[t] = actorstate.NewTask(ti.node, actorstate.NewTypedActorExtractorMap( + miner.AllCodes(), minertask.InfoExtractor{}, )) - case MinerPreCommitInfo: - actorProcessors[t] = actorstate.NewTask(node, actorstate.NewTypedActorExtractorMap( - miner.AllCodes(), miner2.PreCommitInfoExtractor{}, + case tasktype.MinerPreCommitInfo: + actorProcessors[t] = actorstate.NewTask(ti.node, actorstate.NewTypedActorExtractorMap( + miner.AllCodes(), minertask.PreCommitInfoExtractor{}, )) - case MinerSectorDeal: - actorProcessors[t] = actorstate.NewTask(node, actorstate.NewTypedActorExtractorMap( - miner.AllCodes(), miner2.SectorDealsExtractor{}, + case tasktype.MinerSectorDeal: + actorProcessors[t] = actorstate.NewTask(ti.node, actorstate.NewTypedActorExtractorMap( + miner.AllCodes(), minertask.SectorDealsExtractor{}, )) - case MinerSectorEvent: - actorProcessors[t] = actorstate.NewTask(node, actorstate.NewTypedActorExtractorMap( - miner.AllCodes(), miner2.SectorEventsExtractor{}, + case tasktype.MinerSectorEvent: + actorProcessors[t] = actorstate.NewTask(ti.node, actorstate.NewTypedActorExtractorMap( + miner.AllCodes(), minertask.SectorEventsExtractor{}, )) - case MinerSectorPost: - actorProcessors[t] = actorstate.NewTask(node, actorstate.NewTypedActorExtractorMap( - miner.AllCodes(), miner2.PoStExtractor{}, + case tasktype.MinerSectorPost: + actorProcessors[t] = actorstate.NewTask(ti.node, actorstate.NewTypedActorExtractorMap( + miner.AllCodes(), minertask.PoStExtractor{}, )) - case MinerSectorInfoV1_6: - actorProcessors[t] = actorstate.NewTask(node, actorstate.NewCustomTypedActorExtractorMap( + case tasktype.MinerSectorInfoV1_6: + actorProcessors[t] = actorstate.NewTask(ti.node, actorstate.NewCustomTypedActorExtractorMap( map[cid.Cid][]actorstate.ActorStateExtractor{ - saminer1.Actor{}.Code(): {miner2.SectorInfoExtractor{}}, - saminer2.Actor{}.Code(): {miner2.SectorInfoExtractor{}}, - saminer3.Actor{}.Code(): {miner2.SectorInfoExtractor{}}, - saminer4.Actor{}.Code(): {miner2.SectorInfoExtractor{}}, - saminer5.Actor{}.Code(): {miner2.SectorInfoExtractor{}}, - saminer6.Actor{}.Code(): {miner2.SectorInfoExtractor{}}, + saminer1.Actor{}.Code(): {minertask.SectorInfoExtractor{}}, + saminer2.Actor{}.Code(): {minertask.SectorInfoExtractor{}}, + saminer3.Actor{}.Code(): {minertask.SectorInfoExtractor{}}, + saminer4.Actor{}.Code(): {minertask.SectorInfoExtractor{}}, + saminer5.Actor{}.Code(): {minertask.SectorInfoExtractor{}}, + saminer6.Actor{}.Code(): {minertask.SectorInfoExtractor{}}, }, )) - case MinerSectorInfoV7: - actorProcessors[t] = actorstate.NewTask(node, actorstate.NewCustomTypedActorExtractorMap( + case tasktype.MinerSectorInfoV7: + actorProcessors[t] = actorstate.NewTask(ti.node, actorstate.NewCustomTypedActorExtractorMap( map[cid.Cid][]actorstate.ActorStateExtractor{ - saminer7.Actor{}.Code(): {miner2.V7SectorInfoExtractor{}}, + saminer7.Actor{}.Code(): {minertask.V7SectorInfoExtractor{}}, }, )) // // Power // - case PowerActorClaim: - actorProcessors[t] = actorstate.NewTask(node, actorstate.NewTypedActorExtractorMap( + case tasktype.PowerActorClaim: + actorProcessors[t] = actorstate.NewTask(ti.node, actorstate.NewTypedActorExtractorMap( power.AllCodes(), - power2.ClaimedPowerExtractor{}, + powertask.ClaimedPowerExtractor{}, )) - case ChainPower: - actorProcessors[t] = actorstate.NewTask(node, actorstate.NewTypedActorExtractorMap( + case tasktype.ChainPower: + actorProcessors[t] = actorstate.NewTask(ti.node, actorstate.NewTypedActorExtractorMap( power.AllCodes(), - power2.ChainPowerExtractor{}, + powertask.ChainPowerExtractor{}, )) // // Reward // - case ChainReward: - actorProcessors[t] = actorstate.NewTask(node, actorstate.NewTypedActorExtractorMap( + case tasktype.ChainReward: + actorProcessors[t] = actorstate.NewTask(ti.node, actorstate.NewTypedActorExtractorMap( reward.AllCodes(), - reward2.RewardExtractor{}, + rewardtask.RewardExtractor{}, )) // // Init // - case IdAddress: - actorProcessors[t] = actorstate.NewTask(node, actorstate.NewTypedActorExtractorMap( + case tasktype.IdAddress: + actorProcessors[t] = actorstate.NewTask(ti.node, actorstate.NewTypedActorExtractorMap( init_.AllCodes(), - init_2.InitExtractor{}, + inittask.InitExtractor{}, )) // // Market // - case MarketDealState: - actorProcessors[t] = actorstate.NewTask(node, actorstate.NewTypedActorExtractorMap( + case tasktype.MarketDealState: + actorProcessors[t] = actorstate.NewTask(ti.node, actorstate.NewTypedActorExtractorMap( market.AllCodes(), - market2.DealStateExtractor{}, + markettask.DealStateExtractor{}, )) - case MarketDealProposal: - actorProcessors[t] = actorstate.NewTask(node, actorstate.NewTypedActorExtractorMap( + case tasktype.MarketDealProposal: + actorProcessors[t] = actorstate.NewTask(ti.node, actorstate.NewTypedActorExtractorMap( market.AllCodes(), - market2.DealProposalExtractor{}, + markettask.DealProposalExtractor{}, )) // // Multisig // - case MultisigTransaction: - actorProcessors[t] = actorstate.NewTask(node, actorstate.NewTypedActorExtractorMap( + case tasktype.MultisigTransaction: + actorProcessors[t] = actorstate.NewTask(ti.node, actorstate.NewTypedActorExtractorMap( multisig.AllCodes(), - multisig2.MultiSigActorExtractor{}, + multisigtask.MultiSigActorExtractor{}, )) // // Verified Registry // - case VerifiedRegistryVerifier: - actorProcessors[t] = actorstate.NewTask(node, actorstate.NewTypedActorExtractorMap(verifreg.AllCodes(), - verifreg2.VerifierExtractor{}, + case tasktype.VerifiedRegistryVerifier: + actorProcessors[t] = actorstate.NewTask(ti.node, actorstate.NewTypedActorExtractorMap(verifreg.AllCodes(), + verifregtask.VerifierExtractor{}, )) - case VerifiedRegistryVerifiedClient: - actorProcessors[t] = actorstate.NewTask(node, actorstate.NewTypedActorExtractorMap(verifreg.AllCodes(), - verifreg2.ClientExtractor{}, + case tasktype.VerifiedRegistryVerifiedClient: + actorProcessors[t] = actorstate.NewTask(ti.node, actorstate.NewTypedActorExtractorMap(verifreg.AllCodes(), + verifregtask.ClientExtractor{}, )) // // Raw Actors // - case Actor: + case tasktype.Actor: rae := &actorstate.RawActorExtractorMap{} - rae.Register(&raw.RawActorExtractor{}) - actorProcessors[t] = actorstate.NewTask(node, rae) - case ActorState: + rae.Register(&rawtask.RawActorExtractor{}) + actorProcessors[t] = actorstate.NewTask(ti.node, rae) + case tasktype.ActorState: rae := &actorstate.RawActorExtractorMap{} - rae.Register(&raw.RawActorStateExtractor{}) - actorProcessors[t] = actorstate.NewTask(node, rae) - - case Message: - tipsetsProcessors[t] = message.NewTask(node) - case GasOutputs: - tipsetsProcessors[t] = gas_output.NewTask(node) - case BlockMessage: - tipsetsProcessors[t] = block_message.NewTask(node) - case ParsedMessage: - tipsetsProcessors[t] = parsed_message.NewTask(node) - case Receipt: - tipsetsProcessors[t] = receipt.NewTask(node) - case InternalMessage: - tipsetsProcessors[t] = internal_message.NewTask(node) - case InternalParsedMessage: - tipsetsProcessors[t] = internal_parsed_message.NewTask(node) - case MessageGasEconomy: - tipsetsProcessors[t] = gas_economy.NewTask(node) - - case MultisigApproval: - tipsetsProcessors[t] = msapprovals.NewTask(node) - - case BlockHeader: + rae.Register(&rawtask.RawActorStateExtractor{}) + actorProcessors[t] = actorstate.NewTask(ti.node, rae) + + // + // Messages + // + case tasktype.Message: + tipsetsProcessors[t] = message.NewTask(ti.node) + case tasktype.GasOutputs: + tipsetsProcessors[t] = gas_output.NewTask(ti.node) + case tasktype.BlockMessage: + tipsetsProcessors[t] = block_message.NewTask(ti.node) + case tasktype.ParsedMessage: + tipsetsProcessors[t] = parsed_message.NewTask(ti.node) + case tasktype.Receipt: + tipsetsProcessors[t] = receipt.NewTask(ti.node) + case tasktype.InternalMessage: + tipsetsProcessors[t] = internal_message.NewTask(ti.node) + case tasktype.InternalParsedMessage: + tipsetsProcessors[t] = internal_parsed_message.NewTask(ti.node) + case tasktype.MessageGasEconomy: + tipsetsProcessors[t] = gas_economy.NewTask(ti.node) + case tasktype.MultisigApproval: + tipsetsProcessors[t] = msapprovals.NewTask(ti.node) + + // + // Blocks + // + case tasktype.BlockHeader: tipsetProcessors[t] = headers.NewTask() - case BlockParent: + case tasktype.BlockParent: tipsetProcessors[t] = parents.NewTask() - case DrandBlockEntrie: + case tasktype.DrandBlockEntrie: tipsetProcessors[t] = drand.NewTask() - case ChainEconomics: - tipsetProcessors[t] = chaineconomics.NewTask(node) - case ChainConsensus: - tipsetProcessors[t] = consensus.NewTask(node) + + case tasktype.ChainEconomics: + tipsetProcessors[t] = chaineconomics.NewTask(ti.node) + case tasktype.ChainConsensus: + tipsetProcessors[t] = consensus.NewTask(ti.node) default: - return nil, xerrors.Errorf("unknown task: %s", t) + return xerrors.Errorf("unknown task: %s", t) } } - tsi.procBuilder = processor.NewBuilder(node, name). + ti.procBuilder = processor.NewBuilder(ti.node, ti.name). WithTipSetProcessors(tipsetProcessors). WithTipSetsProcessors(tipsetsProcessors). WithActorProcessors(actorProcessors). WithBuiltinProcessors(reportProcessors) - for _, opt := range options { - opt(tsi) - } - - return tsi, nil + return nil } type Result struct { @@ -323,11 +316,11 @@ func (t *TipSetIndexer) TipSet(ctx context.Context, ts *types.TipSet) (chan *Res attribute.String("executed_tipset", executed.String()), attribute.Int64("executed_height", int64(executed.Height())), attribute.String("name", t.name), - attribute.StringSlice("tasks", t.tasks), + attribute.StringSlice("tasks", t.taskNames), ) } - tsLog.Infow("index", "reporter", t.name, "current", current.Height(), "executed", executed.Height()) + log.Infow("index", "reporter", t.name, "current", current.Height(), "executed", executed.Height()) stateResults, taskNames := t.procBuilder.Build().State(ctx, current, executed) // build list of executing tasks, used below to label incomplete tasks as skipped. @@ -375,7 +368,7 @@ func (t *TipSetIndexer) TipSet(ctx context.Context, ts *types.TipSet) (chan *Res // received a result default: - llt := tsLog.With("height", current.Height(), "task", res.Task, "reporter", t.name) + llt := log.With("height", current.Height(), "task", res.Task, "reporter", t.name) // Was there a fatal error? if res.Error != nil { diff --git a/chain/indexer/interface.go b/chain/indexer/interface.go new file mode 100644 index 000000000..ae152acdd --- /dev/null +++ b/chain/indexer/interface.go @@ -0,0 +1,116 @@ +package indexer + +import ( + "context" + "fmt" + "strings" + + "github.com/filecoin-project/lotus/chain/types" + "golang.org/x/xerrors" +) + +// Option specifies the index processing behavior. The interface allows implementations of the Indexer interface +// to be configured independently without changing the declaration of the Indexer.TipSet method. +type Option interface { + // String returns a string representation of the option. + String() string + + // Type describes the type of the option. + Type() OptionType + + // Value returns a value used to create this option. + Value() interface{} +} + +type OptionType int + +const ( + IndexTypeOpt OptionType = iota + TasksOpt +) + +type ( + indexTypeOption int + tasksTypeOption []string +) + +// WithTasks returns and Option that specifies the tasks to be indexed. +// It is used by both the distributed and integrated indexers. +func WithTasks(tasks []string) Option { + return tasksTypeOption(tasks) +} + +func (t tasksTypeOption) String() string { return fmt.Sprintf("Tasks(%s)", strings.Join(t, ",")) } +func (t tasksTypeOption) Type() OptionType { return TasksOpt } +func (t tasksTypeOption) Value() interface{} { return []string(t) } + +type IndexerType int + +func (i IndexerType) String() string { + switch i { + case Undefined: + return "undefined" + case Watch: + return "watch" + case Walk: + return "walk" + case Index: + return "index" + case Fill: + return "fill" + default: + panic(fmt.Sprintf("developer error unknown indexer type: %d", i)) + } +} + +// WithIndexerType returns and Option that specifies the type of index operation being performed. +// It is used by the distributed indexer to determine priority of the TipSet being indexed. +func WithIndexerType(it IndexerType) Option { + return indexTypeOption(it) +} + +const ( + Undefined IndexerType = iota + Watch + Walk + Index + Fill +) + +func (o indexTypeOption) String() string { return fmt.Sprintf("IndexerType(%d)", o) } +func (o indexTypeOption) Type() OptionType { return IndexTypeOpt } +func (o indexTypeOption) Value() interface{} { return IndexerType(o) } + +// IndexerOptions are used by implementations of the Indexer interface for configuration. +type IndexerOptions struct { + IndexType IndexerType + Tasks []string +} + +// ConstructOptions returns an IndexerOptions struct that may be used to configured implementations of the Indexer interface. +func ConstructOptions(opts ...Option) (IndexerOptions, error) { + res := IndexerOptions{ + IndexType: Undefined, + } + + for _, opt := range opts { + switch o := opt.(type) { + case indexTypeOption: + res.IndexType = IndexerType(o) + case tasksTypeOption: + res.Tasks = []string(o) + if len(res.Tasks) == 0 { + return IndexerOptions{}, xerrors.Errorf("tasks options cannot be empty") + } + default: + } + } + return res, nil +} + +// Indexer implemented to index TipSets. +type Indexer interface { + // TipSet indexes a TipSet. The returned error is non-nill if a fatal error is encountered. True is returned if the + // TipSet is indexed successfully, false if returned if the TipSet was only paritally indexer. + TipSet(ctx context.Context, ts *types.TipSet, opts ...Option) (bool, error) +} diff --git a/chain/indexer/table_tasks.go b/chain/indexer/tasktype/table_tasks.go similarity index 99% rename from chain/indexer/table_tasks.go rename to chain/indexer/tasktype/table_tasks.go index 1a74346fa..deccd34b9 100644 --- a/chain/indexer/table_tasks.go +++ b/chain/indexer/tasktype/table_tasks.go @@ -1,5 +1,5 @@ // Code generate by: `make tasks-gen`. DO NOT EDIT. -package indexer +package tasktype const ( BlockHeader = "block_header" diff --git a/chain/indexer/table_tasks.go.template b/chain/indexer/tasktype/table_tasks.go.template similarity index 97% rename from chain/indexer/table_tasks.go.template rename to chain/indexer/tasktype/table_tasks.go.template index 21ed2c348..41b23ee53 100644 --- a/chain/indexer/table_tasks.go.template +++ b/chain/indexer/tasktype/table_tasks.go.template @@ -1,5 +1,5 @@ // Code generate by: `make tasks-gen`. DO NOT EDIT. -package indexer +package tasktype const ( {{- range $idx, $tn := .tableNames}} diff --git a/chain/indexer/tablegen/generator/gen.go b/chain/indexer/tasktype/tablegen/generator/gen.go similarity index 98% rename from chain/indexer/tablegen/generator/gen.go rename to chain/indexer/tasktype/tablegen/generator/gen.go index 9a3f3624b..a136756ef 100644 --- a/chain/indexer/tablegen/generator/gen.go +++ b/chain/indexer/tasktype/tablegen/generator/gen.go @@ -18,7 +18,7 @@ import ( ) func Gen() error { - taskDir := "./chain/indexer" + taskDir := "./chain/indexer/tasktype" rf, err := ioutil.ReadFile(filepath.Join(taskDir, "table_tasks.go.template")) if err != nil { return xerrors.Errorf("loading registry template: %w", err) diff --git a/chain/indexer/tablegen/main.go b/chain/indexer/tasktype/tablegen/main.go similarity index 55% rename from chain/indexer/tablegen/main.go rename to chain/indexer/tasktype/tablegen/main.go index c4122bb59..2a6aa1b9e 100644 --- a/chain/indexer/tablegen/main.go +++ b/chain/indexer/tasktype/tablegen/main.go @@ -1,7 +1,7 @@ package main import ( - "github.com/filecoin-project/lily/chain/indexer/tablegen/generator" + "github.com/filecoin-project/lily/chain/indexer/tasktype/tablegen/generator" ) func main() { diff --git a/chain/indexer/tasks.go b/chain/indexer/tasktype/tasks.go similarity index 99% rename from chain/indexer/tasks.go rename to chain/indexer/tasktype/tasks.go index fe75652bb..75cfd9e24 100644 --- a/chain/indexer/tasks.go +++ b/chain/indexer/tasktype/tasks.go @@ -1,4 +1,4 @@ -package indexer +package tasktype const ( ActorStatesRawTask = "actorstatesraw" // task that only extracts raw actor state diff --git a/chain/walker.go b/chain/walk/walker.go similarity index 88% rename from chain/walker.go rename to chain/walk/walker.go index aa23057ed..66fe92962 100644 --- a/chain/walker.go +++ b/chain/walk/walker.go @@ -1,10 +1,11 @@ -package chain +package walk import ( "context" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/chain/types" + logging "github.com/ipfs/go-log/v2" "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" "golang.org/x/xerrors" @@ -13,11 +14,14 @@ import ( "github.com/filecoin-project/lily/lens" ) -func NewWalker(obs *indexer.Manager, node lens.API, name string, minHeight, maxHeight int64) *Walker { +var log = logging.Logger("lily/chain/walk") + +func NewWalker(obs indexer.Indexer, node lens.API, name string, tasks []string, minHeight, maxHeight int64) *Walker { return &Walker{ node: node, obs: obs, name: name, + tasks: tasks, minHeight: minHeight, maxHeight: maxHeight, } @@ -26,8 +30,9 @@ func NewWalker(obs *indexer.Manager, node lens.API, name string, minHeight, maxH // Walker is a job that indexes blocks by walking the chain history. type Walker struct { node lens.API - obs *indexer.Manager + obs indexer.Indexer name string + tasks []string minHeight int64 // limit persisting to tipsets equal to or above this height maxHeight int64 // limit persisting to tipsets equal to or below this height} done chan struct{} @@ -91,7 +96,7 @@ func (c *Walker) WalkChain(ctx context.Context, node lens.API, ts *types.TipSet) default: } log.Infow("walk tipset", "height", ts.Height(), "reporter", c.name) - if success, err := c.obs.TipSet(ctx, ts); err != nil { + if success, err := c.obs.TipSet(ctx, ts, indexer.WithIndexerType(indexer.Walk), indexer.WithTasks(c.tasks)); err != nil { span.RecordError(err) return xerrors.Errorf("notify tipset: %w", err) } else if !success { diff --git a/chain/walker_test.go b/chain/walk/walker_test.go similarity index 86% rename from chain/walker_test.go rename to chain/walk/walker_test.go index 339c37360..b3cac13b5 100644 --- a/chain/walker_test.go +++ b/chain/walk/walker_test.go @@ -1,4 +1,4 @@ -package chain +package walk import ( "context" @@ -13,7 +13,8 @@ import ( "github.com/filecoin-project/lily/chain/actors/builtin" "github.com/filecoin-project/lily/chain/datasource" - "github.com/filecoin-project/lily/chain/indexer" + "github.com/filecoin-project/lily/chain/indexer/integrated" + "github.com/filecoin-project/lily/chain/indexer/tasktype" "github.com/filecoin-project/lily/model/blocks" "github.com/filecoin-project/lily/storage" "github.com/filecoin-project/lily/testutil" @@ -32,7 +33,7 @@ func TestWalker(t *testing.T) { defer func() { require.NoError(t, cleanup()) }() t.Logf("truncating database tables") - err = truncateBlockTables(t, db) + err = testutil.TruncateBlockTables(t, db) require.NoError(t, err, "truncating tables") t.Logf("preparing chain") @@ -48,7 +49,7 @@ func TestWalker(t *testing.T) { t.Logf("collecting chain blocks from tipset before head") - bhs, err := collectBlockHeaders(nodeAPI, head) + bhs, err := testutil.CollectBlockHeaders(nodeAPI, head) require.NoError(t, err, "collect chain blocks") cids := bhs.Cids() @@ -58,12 +59,15 @@ func TestWalker(t *testing.T) { require.NoError(t, err, "NewDatabaseFromDB") logging.SetAllLoggers(logging.LevelInfo) + taskAPI, err := datasource.NewDataSource(nodeAPI) require.NoError(t, err) - im, err := indexer.NewManager(taskAPI, strg, t.Name(), []string{indexer.BlocksTask}, indexer.WithWindow(builtin.EpochDurationSeconds*time.Second)) + + im, err := integrated.NewManager(taskAPI, strg, t.Name(), integrated.WithWindow(builtin.EpochDurationSeconds*time.Second)) require.NoError(t, err, "NewManager") + t.Logf("initializing indexer") - idx := NewWalker(im, nodeAPI, t.Name(), 0, int64(head.Height())) + idx := NewWalker(im, nodeAPI, t.Name(), []string{tasktype.BlocksTask}, 0, int64(head.Height())) t.Logf("indexing chain") err = idx.WalkChain(ctx, nodeAPI, head) diff --git a/chain/watch/observer.go b/chain/watch/observer.go new file mode 100644 index 000000000..2a4d8abac --- /dev/null +++ b/chain/watch/observer.go @@ -0,0 +1,155 @@ +package watch + +import ( + "context" + "sync" + + "github.com/filecoin-project/lotus/chain/events" + "github.com/filecoin-project/lotus/chain/types" +) + +// A HeadEvent is a notification of a change at the head of the chain +type HeadEvent struct { + Type string + TipSet *types.TipSet +} + +// Constants for HeadEvent types +const ( + // HeadEventRevert indicates that the event signals a reversion of a tipset from the chain + HeadEventRevert = "revert" + + // HeadEventRevert indicates that the event signals the application of a tipset to the chain + HeadEventApply = "apply" + + // HeadEventRevert indicates that the event signals the current known head tipset + HeadEventCurrent = "current" +) + +var _ events.TipSetObserver = (*TipSetObserver)(nil) + +type TipSetObserver struct { + mu sync.Mutex // protects following fields + events chan *HeadEvent // created lazily, closed by first cancel call + err error // set to non-nil by the first cancel call + + // size of the buffer to maintain for events. Using a buffer reduces chance + // that the emitter of events will block when sending to this notifier. + bufferSize int +} + +func (h *TipSetObserver) eventsCh() chan *HeadEvent { + // caller must hold mu + if h.events == nil { + h.events = make(chan *HeadEvent, h.bufferSize) + } + return h.events +} + +func (h *TipSetObserver) HeadEvents() <-chan *HeadEvent { + h.mu.Lock() + ev := h.eventsCh() + h.mu.Unlock() + return ev +} + +func (h *TipSetObserver) Err() error { + h.mu.Lock() + err := h.err + h.mu.Unlock() + return err +} + +func (h *TipSetObserver) Cancel(err error) { + h.mu.Lock() + if h.err != nil { + h.mu.Unlock() + return + } + h.err = err + if h.events == nil { + h.events = make(chan *HeadEvent, h.bufferSize) + } + close(h.events) + h.mu.Unlock() +} + +func (h *TipSetObserver) SetCurrent(ctx context.Context, ts *types.TipSet) error { + h.mu.Lock() + if h.err != nil { + err := h.err + h.mu.Unlock() + return err + } + ev := h.eventsCh() + h.mu.Unlock() + + // This is imprecise since it's inherently racy but good enough to emit + // a warning that the event may block the sender + if len(ev) == cap(ev) { + log.Warnw("head notifier buffer at capacity", "queued", len(ev)) + } + + log.Debugw("head notifier setting head", "tipset", ts.Key().String()) + ev <- &HeadEvent{ + Type: HeadEventCurrent, + TipSet: ts, + } + return nil +} + +func (h *TipSetObserver) Apply(ctx context.Context, from, to *types.TipSet) error { + h.mu.Lock() + if h.err != nil { + err := h.err + h.mu.Unlock() + return err + } + ev := h.eventsCh() + h.mu.Unlock() + + // This is imprecise since it's inherently racy but good enough to emit + // a warning that the event may block the sender + if len(ev) == cap(ev) { + log.Warnw("head notifier buffer at capacity", "queued", len(ev)) + } + + log.Debugw("head notifier apply", "to", to.Key().String(), "from", from.Key().String()) + select { + case ev <- &HeadEvent{ + Type: HeadEventApply, + TipSet: to, + }: + default: + log.Errorw("head notifier event channel blocked dropping apply event", "to", to.Key().String(), "from", from.Key().String()) + } + return nil +} + +func (h *TipSetObserver) Revert(ctx context.Context, from, to *types.TipSet) error { + h.mu.Lock() + if h.err != nil { + err := h.err + h.mu.Unlock() + return err + } + ev := h.eventsCh() + h.mu.Unlock() + + // This is imprecise since it's inherently racy but good enough to emit + // a warning that the event may block the sender + if len(ev) == cap(ev) { + log.Warnw("head notifier buffer at capacity", "queued", len(ev)) + } + + log.Debugw("head notifier revert", "to", to.Key().String(), "from", from.Key().String()) + select { + case ev <- &HeadEvent{ + Type: HeadEventRevert, + TipSet: from, + }: + default: + log.Errorw("head notifier event channel blocked dropping revert event", "to", to.Key().String(), "from", from.Key().String()) + } + return nil +} diff --git a/chain/watch/watcher.go b/chain/watch/watcher.go new file mode 100644 index 000000000..1484874e7 --- /dev/null +++ b/chain/watch/watcher.go @@ -0,0 +1,269 @@ +package watch + +import ( + "context" + "errors" + "sync" + "sync/atomic" + + "github.com/filecoin-project/lotus/chain/events" + "github.com/filecoin-project/lotus/chain/types" + "github.com/gammazero/workerpool" + logging "github.com/ipfs/go-log/v2" + "go.opencensus.io/stats" + "go.opentelemetry.io/otel" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lily/chain/cache" + "github.com/filecoin-project/lily/chain/indexer" + "github.com/filecoin-project/lily/chain/indexer/tasktype" + "github.com/filecoin-project/lily/metrics" +) + +var log = logging.Logger("lily/chain/watch") + +type WatcherAPI interface { + Observe(obs events.TipSetObserver) *types.TipSet + //Unregister(obs events.TipSetObserver) bool + ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) +} + +type WatcherOpt func(w *Watcher) + +func WithTasks(tasks ...string) WatcherOpt { + return func(w *Watcher) { + w.tasks = tasks + } +} + +func WithConfidence(c int) WatcherOpt { + return func(w *Watcher) { + w.confidence = c + } +} + +func WithConcurrentWorkers(p int) WatcherOpt { + return func(w *Watcher) { + w.poolSize = p + } +} + +func WithBufferSize(b int) WatcherOpt { + return func(w *Watcher) { + w.bufferSize = b + } +} + +// Watcher is a task that indexes blocks by following the chain head. +type Watcher struct { + // required + api WatcherAPI + name string + + // options with defaults + confidence int // size of tipset cache + bufferSize int // size of the buffer for incoming tipset notifications. + poolSize int + tasks []string + + // created internally + done chan struct{} + indexer indexer.Indexer + cache *cache.TipSetCache // caches tipsets for possible reversion + pool *workerpool.WorkerPool // used for async tipset indexing + tsObserver *TipSetObserver + + // metric tracking + active int64 // must be accessed using atomic operations, updated automatically. + + // error handling + fatalMu sync.Mutex + fatal error +} + +var ( + WatcherDefaultBufferSize = 5 + WatcherDefaultConfidence = 1 + WatcherDefaultConcurrentWorkers = 1 + WatcherDefaultTasks = tasktype.AllTableTasks +) + +// NewWatcher creates a new Watcher. confidence sets the number of tipsets that will be held +// in a cache awaiting possible reversion. Tipsets will be written to the database when they are evicted from +// the cache due to incoming later tipsets. +func NewWatcher(api WatcherAPI, indexer indexer.Indexer, name string, opts ...WatcherOpt) *Watcher { + w := &Watcher{ + api: api, + name: name, + indexer: indexer, + + bufferSize: WatcherDefaultBufferSize, + confidence: WatcherDefaultConfidence, + poolSize: WatcherDefaultConcurrentWorkers, + tasks: WatcherDefaultTasks, + } + + for _, opt := range opts { + opt(w) + } + return w +} + +func (c *Watcher) init(ctx context.Context) error { + c.done = make(chan struct{}) + c.pool = workerpool.New(c.poolSize) + + c.tsObserver = &TipSetObserver{bufferSize: c.bufferSize} + head := c.api.Observe(c.tsObserver) + if err := c.tsObserver.SetCurrent(ctx, head); err != nil { + return err + } + + c.cache = cache.NewTipSetCache(c.confidence) + if err := c.cache.Warm(ctx, head, c.api.ChainGetTipSet); err != nil { + return err + } + + return nil +} + +func (c *Watcher) close() { + // ensure we clear the fatal error after shut down, this allows the watcher to be restarted without reinitializing its state. + c.setFatalError(nil) + // ensure we shut down the pool when the watcher stops. + c.pool.Stop() + // ensure we reset the tipset cache to avoid process stale state if watcher is restarted. + c.cache.Reset() + // unregister the observer + // TODO https://github.com/filecoin-project/lotus/pull/8441 + //c.api.Unregister(notifier) + // close channel to signal completion + close(c.done) +} + +// Run starts following the chain head and blocks until the context is done or +// an error occurs. +func (c *Watcher) Run(ctx context.Context) error { + if err := c.init(ctx); err != nil { + return err + } + defer c.close() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case he, ok := <-c.tsObserver.HeadEvents(): + if !ok { + return c.tsObserver.Err() + } + if he != nil && he.TipSet != nil { + metrics.RecordCount(ctx, metrics.WatchHeight, int(he.TipSet.Height())) + } + + if err := c.index(ctx, he); err != nil { + return xerrors.Errorf("index: %w", err) + } + } + } + +} + +func (c *Watcher) Done() <-chan struct{} { + return c.done +} + +func (c *Watcher) index(ctx context.Context, he *HeadEvent) error { + switch he.Type { + case HeadEventCurrent: + err := c.cache.SetCurrent(he.TipSet) + if err != nil { + log.Errorw("tipset cache set current", "error", err.Error(), "reporter", c.name) + } + + // If we have a zero confidence window then we need to notify every tipset we see + if c.confidence == 0 { + if err := c.indexTipSetAsync(ctx, he.TipSet); err != nil { + return xerrors.Errorf("notify tipset: %w", err) + } + } + case HeadEventApply: + tail, err := c.cache.Add(he.TipSet) + if err != nil { + log.Errorw("tipset cache add", "error", err.Error(), "reporter", c.name) + } + + // Send the tipset that fell out of the confidence window to the observer + if tail != nil { + if err := c.indexTipSetAsync(ctx, tail); err != nil { + return xerrors.Errorf("notify tipset: %w", err) + } + } + + case HeadEventRevert: + err := c.cache.Revert(he.TipSet) + if err != nil { + if errors.Is(err, cache.ErrEmptyRevert) { + // The chain is unwinding but our cache is empty. This probably means we have already processed + // the tipset being reverted and may process it again or an alternate heaviest tipset for this height. + metrics.RecordInc(ctx, metrics.TipSetCacheEmptyRevert) + } + log.Errorw("tipset cache revert", "error", err.Error(), "reporter", c.name) + } + } + + metrics.RecordCount(ctx, metrics.TipSetCacheSize, c.cache.Size()) + metrics.RecordCount(ctx, metrics.TipSetCacheDepth, c.cache.Len()) + + log.Debugw("tipset cache", "height", c.cache.Height(), "tail_height", c.cache.TailHeight(), "length", c.cache.Len(), "reporter", c.name) + + return nil +} + +// indexTipSetAsync is called when a new tipset has been discovered +func (c *Watcher) indexTipSetAsync(ctx context.Context, ts *types.TipSet) error { + if err := c.fatalError(); err != nil { + return err + } + + stats.Record(ctx, metrics.WatcherActiveWorkers.M(c.active)) + stats.Record(ctx, metrics.WatcherWaitingWorkers.M(int64(c.pool.WaitingQueueSize()))) + if c.pool.WaitingQueueSize() > c.pool.Size() { + log.Warnw("queuing worker in watcher pool", "waiting", c.pool.WaitingQueueSize(), "reporter", c.name) + } + log.Infow("submitting tipset for async indexing", "height", ts.Height(), "active", c.active, "reporter", c.name) + + ctx, span := otel.Tracer("").Start(ctx, "Watcher.indexTipSetAsync") + c.pool.Submit(func() { + atomic.AddInt64(&c.active, 1) + defer func() { + atomic.AddInt64(&c.active, -1) + span.End() + }() + + ts := ts + success, err := c.indexer.TipSet(ctx, ts, indexer.WithIndexerType(indexer.Watch), indexer.WithTasks(c.tasks)) + if err != nil { + log.Errorw("watcher suffered fatal error", "error", err, "height", ts.Height(), "tipset", ts.Key().String(), "reporter", c.name) + c.setFatalError(err) + return + } + if !success { + log.Warnw("watcher failed to fully index tipset", "height", ts.Height(), "tipset", ts.Key().String(), "reporter", c.name) + } + }) + return nil +} + +func (c *Watcher) setFatalError(err error) { + c.fatalMu.Lock() + c.fatal = err + c.fatalMu.Unlock() +} + +func (c *Watcher) fatalError() error { + c.fatalMu.Lock() + out := c.fatal + c.fatalMu.Unlock() + return out +} diff --git a/chain/watcher_test.go b/chain/watch/watcher_test.go similarity index 71% rename from chain/watcher_test.go rename to chain/watch/watcher_test.go index fc6d1099e..e5b815574 100644 --- a/chain/watcher_test.go +++ b/chain/watch/watcher_test.go @@ -1,4 +1,4 @@ -package chain +package watch import ( "context" @@ -9,7 +9,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/build" - types "github.com/filecoin-project/lotus/chain/types" itestkit "github.com/filecoin-project/lotus/itests/kit" "github.com/filecoin-project/specs-actors/actors/builtin/miner" "github.com/filecoin-project/specs-actors/actors/builtin/power" @@ -21,9 +20,10 @@ import ( "github.com/stretchr/testify/require" "github.com/filecoin-project/lily/chain/actors/builtin" + "github.com/filecoin-project/lily/chain/cache" "github.com/filecoin-project/lily/chain/datasource" - "github.com/filecoin-project/lily/chain/indexer" - "github.com/filecoin-project/lily/lens" + "github.com/filecoin-project/lily/chain/indexer/integrated" + "github.com/filecoin-project/lily/chain/indexer/tasktype" "github.com/filecoin-project/lily/model/blocks" "github.com/filecoin-project/lily/storage" "github.com/filecoin-project/lily/testutil" @@ -58,7 +58,7 @@ func TestWatcher(t *testing.T) { defer func() { require.NoError(t, cleanup()) }() t.Logf("truncating database tables") - err = truncateBlockTables(t, db) + err = testutil.TruncateBlockTables(t, db) require.NoError(t, err, "truncating tables") t.Logf("preparing chain") @@ -71,10 +71,11 @@ func TestWatcher(t *testing.T) { taskAPI, err := datasource.NewDataSource(nodeAPI) require.NoError(t, err) - im, err := indexer.NewManager(taskAPI, strg, t.Name(), []string{indexer.BlocksTask}, indexer.WithWindow(builtin.EpochDurationSeconds*time.Second)) + im, err := integrated.NewManager(taskAPI, strg, t.Name(), integrated.WithWindow(builtin.EpochDurationSeconds*time.Second)) require.NoError(t, err, "NewManager") t.Logf("initializing indexer") - idx := NewWatcher(nil, im, t.Name(), 0, 1, 5) + idx := NewWatcher(nil, im, t.Name(), WithConfidence(0), WithConcurrentWorkers(1), WithBufferSize(5), WithTasks(tasktype.BlocksTask)) + idx.cache = cache.NewTipSetCache(0) // the watchers worker pool and cache are initialized in its Run method, since we don't call that here initialize them now. idx.pool = workerpool.New(1) @@ -85,7 +86,7 @@ func TestWatcher(t *testing.T) { t.Logf("mining first block") bm.MineUntilBlock(ctx, full, nil) first := <-newHeads - var bhs blockHeaderList + var bhs testutil.BlockHeaderList for _, head := range first { bhs = append(bhs, head.Val.Blocks()...) } @@ -163,67 +164,3 @@ func TestWatcher(t *testing.T) { } }) } - -type blockHeaderList []*types.BlockHeader - -func (b blockHeaderList) Cids() []string { - var cids []string - for _, bh := range b { - cids = append(cids, bh.Cid().String()) - } - return cids -} - -func (b blockHeaderList) Rounds() []uint64 { - var rounds []uint64 - for _, bh := range b { - for _, ent := range bh.BeaconEntries { - rounds = append(rounds, ent.Round) - } - } - - return rounds -} - -// collectBlockHeaders walks the chain to collect blocks that should be indexed -func collectBlockHeaders(n lens.API, ts *types.TipSet) (blockHeaderList, error) { - blocks := ts.Blocks() - - for _, bh := range ts.Blocks() { - if bh.Height < 2 { - continue - } - - parent, err := n.ChainGetTipSet(context.TODO(), types.NewTipSetKey(bh.Parents...)) - if err != nil { - return nil, err - } - - pblocks, err := collectBlockHeaders(n, parent) - if err != nil { - return nil, err - } - blocks = append(blocks, pblocks...) - - } - return blocks, nil -} - -// truncateBlockTables ensures the indexing tables are empty -func truncateBlockTables(tb testing.TB, db *pg.DB) error { - _, err := db.Exec(`TRUNCATE TABLE block_headers`) - require.NoError(tb, err, "block_headers") - - _, err = db.Exec(`TRUNCATE TABLE block_parents`) - require.NoError(tb, err, "block_parents") - - _, err = db.Exec(`TRUNCATE TABLE drand_block_entries`) - require.NoError(tb, err, "drand_block_entries") - - return nil -} - -type NullHeadNotifier struct{} - -func (NullHeadNotifier) HeadEvents() <-chan *HeadEvent { return nil } -func (NullHeadNotifier) Err() error { return nil } diff --git a/chain/watcher.go b/chain/watcher.go deleted file mode 100644 index a79ab0750..000000000 --- a/chain/watcher.go +++ /dev/null @@ -1,368 +0,0 @@ -package chain - -import ( - "context" - "errors" - "sync" - "sync/atomic" - - "github.com/filecoin-project/lotus/chain/events" - "github.com/filecoin-project/lotus/chain/types" - "github.com/gammazero/workerpool" - "go.opencensus.io/stats" - "go.opentelemetry.io/otel" - "golang.org/x/xerrors" - - "github.com/filecoin-project/lily/chain/indexer" - "github.com/filecoin-project/lily/metrics" -) - -type WatcherAPI interface { - Observe(obs events.TipSetObserver) *types.TipSet - //Unregister(obs events.TipSetObserver) bool - ChainGetTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) -} - -// NewWatcher creates a new Watcher. confidence sets the number of tipsets that will be held -// in a cache awaiting possible reversion. Tipsets will be written to the database when they are evicted from -// the cache due to incoming later tipsets. -func NewWatcher(api WatcherAPI, indexer *indexer.Manager, name string, confidence int, poolSize int, bufferSize int) *Watcher { - return &Watcher{ - api: api, - bufferSize: bufferSize, - indexer: indexer, - name: name, - confidence: confidence, - cache: NewTipSetCache(confidence), - poolSize: poolSize, - } -} - -// Watcher is a task that indexes blocks by following the chain head. -type Watcher struct { - api WatcherAPI - indexer *indexer.Manager - name string - confidence int // size of tipset cache - bufferSize int // size of the buffer for incoming tipset notifications. - cache *TipSetCache // caches tipsets for possible reversion - done chan struct{} - - // used for async tipset indexing - poolSize int - pool *workerpool.WorkerPool - active int64 // must be accessed using atomic operations, updated automatically. - - fatalMu sync.Mutex - fatal error -} - -// Run starts following the chain head and blocks until the context is done or -// an error occurs. -func (c *Watcher) Run(ctx context.Context) error { - // init the done channel for each run since jobs may be started and stopped. - c.done = make(chan struct{}) - - // create a worker pool with workers to index tipsets as they become avaiable. - c.pool = workerpool.New(c.poolSize) - - // create a tipset notifier, register it to observe tipset applications and set its current head. - notifier := &TipSetObserver{bufferSize: c.bufferSize} - head := c.api.Observe(notifier) - if err := notifier.SetCurrent(ctx, head); err != nil { - return err - } - - // warm the tipset cache with confidence tipsets - if err := c.cache.Warm(ctx, head, c.api.ChainGetTipSet); err != nil { - return err - } - - defer func() { - // ensure we clear the fatal error after shut down, this allows the watcher to be restarted without reinitializing its state. - c.setFatalError(nil) - // ensure we shut down the pool when the watcher stops. - c.pool.Stop() - // ensure we reset the tipset cache to avoid process stale state if watcher is restarted. - c.cache.Reset() - // unregister the observer - // TODO https://github.com/filecoin-project/lotus/pull/8441 - //c.api.Unregister(notifier) - close(c.done) - }() - - for { - select { - case <-ctx.Done(): - return ctx.Err() - case he, ok := <-notifier.HeadEvents(): - if !ok { - return notifier.Err() - } - if he != nil && he.TipSet != nil { - metrics.RecordCount(ctx, metrics.WatchHeight, int(he.TipSet.Height())) - } - - if err := c.index(ctx, he); err != nil { - return xerrors.Errorf("index: %w", err) - } - } - } -} - -func (c *Watcher) Done() <-chan struct{} { - return c.done -} - -func (c *Watcher) index(ctx context.Context, he *HeadEvent) error { - switch he.Type { - case HeadEventCurrent: - err := c.cache.SetCurrent(he.TipSet) - if err != nil { - log.Errorw("tipset cache set current", "error", err.Error(), "reporter", c.name) - } - - // If we have a zero confidence window then we need to notify every tipset we see - if c.confidence == 0 { - if err := c.indexTipSetAsync(ctx, he.TipSet); err != nil { - return xerrors.Errorf("notify tipset: %w", err) - } - } - case HeadEventApply: - tail, err := c.cache.Add(he.TipSet) - if err != nil { - log.Errorw("tipset cache add", "error", err.Error(), "reporter", c.name) - } - - // Send the tipset that fell out of the confidence window to the observer - if tail != nil { - if err := c.indexTipSetAsync(ctx, tail); err != nil { - return xerrors.Errorf("notify tipset: %w", err) - } - } - - case HeadEventRevert: - err := c.cache.Revert(he.TipSet) - if err != nil { - if errors.Is(err, ErrEmptyRevert) { - // The chain is unwinding but our cache is empty. This probably means we have already processed - // the tipset being reverted and may process it again or an alternate heaviest tipset for this height. - metrics.RecordInc(ctx, metrics.TipSetCacheEmptyRevert) - } - log.Errorw("tipset cache revert", "error", err.Error(), "reporter", c.name) - } - } - - metrics.RecordCount(ctx, metrics.TipSetCacheSize, c.cache.Size()) - metrics.RecordCount(ctx, metrics.TipSetCacheDepth, c.cache.Len()) - - log.Debugw("tipset cache", "height", c.cache.Height(), "tail_height", c.cache.TailHeight(), "length", c.cache.Len(), "reporter", c.name) - - return nil -} - -// indexTipSetAsync is called when a new tipset has been discovered -func (c *Watcher) indexTipSetAsync(ctx context.Context, ts *types.TipSet) error { - if err := c.fatalError(); err != nil { - return err - } - - stats.Record(ctx, metrics.WatcherActiveWorkers.M(c.active)) - stats.Record(ctx, metrics.WatcherWaitingWorkers.M(int64(c.pool.WaitingQueueSize()))) - if c.pool.WaitingQueueSize() > c.pool.Size() { - log.Warnw("queuing worker in watcher pool", "waiting", c.pool.WaitingQueueSize(), "reporter", c.name) - } - log.Infow("submitting tipset for async indexing", "height", ts.Height(), "active", c.active, "reporter", c.name) - - ctx, span := otel.Tracer("").Start(ctx, "Watcher.indexTipSetAsync") - c.pool.Submit(func() { - atomic.AddInt64(&c.active, 1) - defer func() { - atomic.AddInt64(&c.active, -1) - span.End() - }() - - ts := ts - success, err := c.indexer.TipSet(ctx, ts) - if err != nil { - log.Errorw("watcher suffered fatal error", "error", err, "height", ts.Height(), "tipset", ts.Key().String(), "reporter", c.name) - c.setFatalError(err) - return - } - if !success { - log.Warnw("watcher failed to fully index tipset", "height", ts.Height(), "tipset", ts.Key().String(), "reporter", c.name) - } - }) - return nil -} - -func (c *Watcher) setFatalError(err error) { - c.fatalMu.Lock() - c.fatal = err - c.fatalMu.Unlock() -} - -func (c *Watcher) fatalError() error { - c.fatalMu.Lock() - out := c.fatal - c.fatalMu.Unlock() - return out -} - -// A HeadNotifier reports tipset events that occur at the head of the chain -type HeadNotifier interface { - // HeadEvents returns a channel that receives head events. It may be closed - // by the sender of the events, in which case Err will return a non-nil error - // explaining why. HeadEvents may return nil if this implementation will never - // notify any events. - HeadEvents() <-chan *HeadEvent - - // Err returns the reason for the closing of the HeadEvents channel. - Err() error -} - -// A HeadEvent is a notification of a change at the head of the chain -type HeadEvent struct { - Type string - TipSet *types.TipSet -} - -// Constants for HeadEvent types -const ( - // HeadEventRevert indicates that the event signals a reversion of a tipset from the chain - HeadEventRevert = "revert" - - // HeadEventRevert indicates that the event signals the application of a tipset to the chain - HeadEventApply = "apply" - - // HeadEventRevert indicates that the event signals the current known head tipset - HeadEventCurrent = "current" -) - -var _ events.TipSetObserver = (*TipSetObserver)(nil) - -type TipSetObserver struct { - mu sync.Mutex // protects following fields - events chan *HeadEvent // created lazily, closed by first cancel call - err error // set to non-nil by the first cancel call - - // size of the buffer to maintain for events. Using a buffer reduces chance - // that the emitter of events will block when sending to this notifier. - bufferSize int -} - -func (h *TipSetObserver) eventsCh() chan *HeadEvent { - // caller must hold mu - if h.events == nil { - h.events = make(chan *HeadEvent, h.bufferSize) - } - return h.events -} - -func (h *TipSetObserver) HeadEvents() <-chan *HeadEvent { - h.mu.Lock() - ev := h.eventsCh() - h.mu.Unlock() - return ev -} - -func (h *TipSetObserver) Err() error { - h.mu.Lock() - err := h.err - h.mu.Unlock() - return err -} - -func (h *TipSetObserver) Cancel(err error) { - h.mu.Lock() - if h.err != nil { - h.mu.Unlock() - return - } - h.err = err - if h.events == nil { - h.events = make(chan *HeadEvent, h.bufferSize) - } - close(h.events) - h.mu.Unlock() -} - -func (h *TipSetObserver) SetCurrent(ctx context.Context, ts *types.TipSet) error { - h.mu.Lock() - if h.err != nil { - err := h.err - h.mu.Unlock() - return err - } - ev := h.eventsCh() - h.mu.Unlock() - - // This is imprecise since it's inherently racy but good enough to emit - // a warning that the event may block the sender - if len(ev) == cap(ev) { - log.Warnw("head notifier buffer at capacity", "queued", len(ev)) - } - - log.Debugw("head notifier setting head", "tipset", ts.Key().String()) - ev <- &HeadEvent{ - Type: HeadEventCurrent, - TipSet: ts, - } - return nil -} - -func (h *TipSetObserver) Apply(ctx context.Context, from, to *types.TipSet) error { - h.mu.Lock() - if h.err != nil { - err := h.err - h.mu.Unlock() - return err - } - ev := h.eventsCh() - h.mu.Unlock() - - // This is imprecise since it's inherently racy but good enough to emit - // a warning that the event may block the sender - if len(ev) == cap(ev) { - log.Warnw("head notifier buffer at capacity", "queued", len(ev)) - } - - log.Debugw("head notifier apply", "to", to.Key().String(), "from", from.Key().String()) - select { - case ev <- &HeadEvent{ - Type: HeadEventApply, - TipSet: to, - }: - default: - log.Errorw("head notifier event channel blocked dropping apply event", "to", to.Key().String(), "from", from.Key().String()) - } - return nil -} - -func (h *TipSetObserver) Revert(ctx context.Context, from, to *types.TipSet) error { - h.mu.Lock() - if h.err != nil { - err := h.err - h.mu.Unlock() - return err - } - ev := h.eventsCh() - h.mu.Unlock() - - // This is imprecise since it's inherently racy but good enough to emit - // a warning that the event may block the sender - if len(ev) == cap(ev) { - log.Warnw("head notifier buffer at capacity", "queued", len(ev)) - } - - log.Debugw("head notifier revert", "to", to.Key().String(), "from", from.Key().String()) - select { - case ev <- &HeadEvent{ - Type: HeadEventRevert, - TipSet: from, - }: - default: - log.Errorw("head notifier event channel blocked dropping revert event", "to", to.Key().String(), "from", from.Key().String()) - } - return nil -} diff --git a/commands/daemon.go b/commands/daemon.go index d88aa5947..25bcab1c9 100644 --- a/commands/daemon.go +++ b/commands/daemon.go @@ -21,6 +21,7 @@ import ( "github.com/urfave/cli/v2" "golang.org/x/xerrors" + "github.com/filecoin-project/lily/chain/indexer/distributed" "github.com/filecoin-project/lily/commands/util" "github.com/filecoin-project/lily/config" "github.com/filecoin-project/lily/lens/lily" @@ -244,6 +245,7 @@ Note that jobs are not persisted between restarts of the daemon. See node.Override(new(*events.Events), modules.NewEvents), node.Override(new(*schedule.Scheduler), schedule.NewSchedulerDaemon), node.Override(new(*storage.Catalog), modules.NewStorageCatalog), + node.Override(new(*distributed.Catalog), modules.NewQueueCatalog), node.Override(new(*lutil.CacheConfig), modules.CacheConfig(cacheFlags.BlockstoreCacheSize, cacheFlags.StatestoreCacheSize)), // End Injection diff --git a/commands/gap.go b/commands/gap.go index 6524db214..1f2e7a135 100644 --- a/commands/gap.go +++ b/commands/gap.go @@ -10,8 +10,9 @@ import ( "github.com/urfave/cli/v2" "golang.org/x/xerrors" - "github.com/filecoin-project/lily/chain/indexer" + "github.com/filecoin-project/lily/chain/indexer/tasktype" "github.com/filecoin-project/lily/lens/lily" + "github.com/filecoin-project/lily/schedule" ) type gapOps struct { @@ -22,6 +23,7 @@ type gapOps struct { name string from uint64 to uint64 + queue string } var gapFlags gapOps @@ -88,6 +90,13 @@ var GapFillCmd = &cli.Command{ Destination: &gapFlags.from, Required: true, }, + &cli.StringFlag{ + Name: "queue", + Usage: "Name of queue that fill will write missing tipset tasks to.", + EnvVars: []string{"LILY_FILL_QUEUE"}, + Value: "", + Destination: &gapFlags.queue, + }, }, Before: func(cctx *cli.Context) error { from, to := gapFlags.from, gapFlags.to @@ -100,17 +109,11 @@ var GapFillCmd = &cli.Command{ Action: func(cctx *cli.Context) error { ctx := lotuscli.ReqContext(cctx) - api, closer, err := GetAPI(ctx, gapFlags.apiAddr, gapFlags.apiToken) - if err != nil { - return err - } - defer closer() - - var tasks []string + var taskList []string if gapFlags.tasks == "" { - tasks = indexer.AllTableTasks + taskList = tasktype.AllTableTasks } else { - tasks = strings.Split(gapFlags.tasks, ",") + taskList = strings.Split(gapFlags.tasks, ",") } fillName := fmt.Sprintf("fill_%d", time.Now().Unix()) @@ -118,19 +121,44 @@ var GapFillCmd = &cli.Command{ fillName = gapFlags.name } - res, err := api.LilyGapFill(ctx, &lily.LilyGapFillConfig{ - RestartOnFailure: false, - RestartOnCompletion: false, - RestartDelay: 0, - Storage: gapFlags.storage, - Name: fillName, - Tasks: tasks, - To: gapFlags.to, - From: gapFlags.from, - }) + api, closer, err := GetAPI(ctx, gapFlags.apiAddr, gapFlags.apiToken) if err != nil { return err } + defer closer() + + var res *schedule.JobSubmitResult + if gapFlags.queue == "" { + res, err = api.LilyGapFill(ctx, &lily.LilyGapFillConfig{ + RestartOnFailure: false, + RestartOnCompletion: false, + RestartDelay: 0, + Storage: gapFlags.storage, + Name: fillName, + Tasks: taskList, + To: gapFlags.to, + From: gapFlags.from, + }) + if err != nil { + return err + } + } else { + res, err = api.LilyGapFillNotify(ctx, &lily.LilyGapFillNotifyConfig{ + RestartOnFailure: false, + RestartOnCompletion: false, + RestartDelay: 0, + Storage: gapFlags.storage, + Name: fillName, + Tasks: taskList, + To: gapFlags.to, + From: gapFlags.from, + Queue: gapFlags.queue, + }) + if err != nil { + return err + } + } + if err := printNewJob(os.Stdout, res); err != nil { return err } @@ -201,11 +229,11 @@ var GapFindCmd = &cli.Command{ findName = gapFlags.name } - var tasks []string + var taskList []string if gapFlags.tasks == "" { - tasks = indexer.AllTableTasks + taskList = tasktype.AllTableTasks } else { - tasks = strings.Split(gapFlags.tasks, ",") + taskList = strings.Split(gapFlags.tasks, ",") } res, err := api.LilyGapFind(ctx, &lily.LilyGapFindConfig{ @@ -213,7 +241,7 @@ var GapFindCmd = &cli.Command{ RestartOnCompletion: false, RestartDelay: 0, Storage: gapFlags.storage, - Tasks: tasks, + Tasks: taskList, Name: findName, To: gapFlags.to, From: gapFlags.from, diff --git a/commands/help.go b/commands/help.go index 81493b92b..8751bf735 100644 --- a/commands/help.go +++ b/commands/help.go @@ -7,7 +7,7 @@ import ( "github.com/urfave/cli/v2" "golang.org/x/xerrors" - "github.com/filecoin-project/lily/chain/indexer" + "github.com/filecoin-project/lily/chain/indexer/tasktype" ) var HelpModelsListCmd = &cli.Command{ @@ -15,8 +15,8 @@ var HelpModelsListCmd = &cli.Command{ Action: func(cctx *cli.Context) error { // initialize tabwriter t := table.NewWriter() t.AppendHeader(table.Row{"Model", "Description"}) - for _, m := range indexer.AllTableTasks { - comment := indexer.TableComment[m] + for _, m := range tasktype.AllTableTasks { + comment := tasktype.TableComment[m] t.AppendRow(table.Row{m, comment}) t.AppendSeparator() } @@ -32,16 +32,16 @@ var HelpModelsDescribeCmd = &cli.Command{ return xerrors.Errorf("model name required, run `lily help models-list`, to see all available models") } mname := cctx.Args().First() - if _, found := indexer.TableLookup[mname]; !found { + if _, found := tasktype.TableLookup[mname]; !found { return xerrors.Errorf("model %s doesn't exist", mname) } - modelFields := indexer.TableFieldComments[mname] + modelFields := tasktype.TableFieldComments[mname] t := table.NewWriter() t.AppendHeader(table.Row{"Fields", "Description"}) t.SortBy([]table.SortBy{ {Name: "Fields", Mode: table.Asc}}) - t.SetCaption(indexer.TableComment[mname]) + t.SetCaption(tasktype.TableComment[mname]) for field, comment := range modelFields { t.AppendRow(table.Row{field, comment}) t.AppendSeparator() diff --git a/commands/index.go b/commands/index.go index 0c2fd982f..72173b2ab 100644 --- a/commands/index.go +++ b/commands/index.go @@ -14,7 +14,7 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/lily/chain/actors/builtin" - "github.com/filecoin-project/lily/chain/indexer" + "github.com/filecoin-project/lily/chain/indexer/tasktype" "github.com/filecoin-project/lily/lens/lily" ) @@ -25,6 +25,7 @@ type indexOps struct { apiToken string name string window time.Duration + queue string } var indexFlags indexOps @@ -50,17 +51,9 @@ var IndexTipSetCmd = &cli.Command{ return xerrors.Errorf("failed to parse tipset key: %w", err) } - tasks := strings.Split(indexFlags.tasks, ",") + taskList := strings.Split(indexFlags.tasks, ",") if indexFlags.tasks == "*" { - tasks = indexer.AllTableTasks - } - - cfg := &lily.LilyIndexConfig{ - TipSet: tsk, - Name: indexName, - Tasks: tasks, - Storage: indexFlags.storage, - Window: indexFlags.window, + taskList = tasktype.AllTableTasks } api, closer, err := GetAPI(ctx, indexFlags.apiAddr, indexFlags.apiToken) @@ -69,9 +62,31 @@ var IndexTipSetCmd = &cli.Command{ } defer closer() - _, err = api.LilyIndex(ctx, cfg) - if err != nil { - return err + if indexFlags.queue == "" { + cfg := &lily.LilyIndexConfig{ + TipSet: tsk, + Name: indexName, + Tasks: taskList, + Storage: indexFlags.storage, + Window: indexFlags.window, + } + + _, err = api.LilyIndex(ctx, cfg) + if err != nil { + return err + } + } else { + cfg := &lily.LilyIndexNotifyConfig{ + TipSet: tsk, + Name: indexName, + Tasks: taskList, + Queue: indexFlags.queue, + } + + _, err = api.LilyIndexNotify(ctx, cfg) + if err != nil { + return err + } } return nil @@ -94,16 +109,22 @@ var IndexHeightCmd = &cli.Command{ return xerrors.Errorf("height argument required") } - api, closer, err := GetAPI(ctx, indexFlags.apiAddr, indexFlags.apiToken) + height, err := strconv.ParseInt(cctx.Args().First(), 10, 46) if err != nil { return err } - defer closer() - height, err := strconv.ParseInt(cctx.Args().First(), 10, 46) + taskList := strings.Split(indexFlags.tasks, ",") + if indexFlags.tasks == "*" { + taskList = tasktype.AllTableTasks + } + + api, closer, err := GetAPI(ctx, indexFlags.apiAddr, indexFlags.apiToken) if err != nil { return err } + defer closer() + ts, err := api.ChainGetTipSetByHeight(ctx, abi.ChainEpoch(height), types.EmptyTSK) if err != nil { return err @@ -113,22 +134,31 @@ var IndexHeightCmd = &cli.Command{ log.Warnf("height (%d) is null round, indexing height %d", height, ts.Height()) } - tasks := strings.Split(indexFlags.tasks, ",") - if indexFlags.tasks == "*" { - tasks = indexer.AllTableTasks - } - - cfg := &lily.LilyIndexConfig{ - TipSet: ts.Key(), - Name: indexName, - Tasks: tasks, - Storage: indexFlags.storage, - Window: indexFlags.window, - } - - _, err = api.LilyIndex(ctx, cfg) - if err != nil { - return err + if indexFlags.queue == "" { + cfg := &lily.LilyIndexConfig{ + TipSet: ts.Key(), + Name: indexName, + Tasks: taskList, + Storage: indexFlags.storage, + Window: indexFlags.window, + } + + _, err = api.LilyIndex(ctx, cfg) + if err != nil { + return err + } + } else { + cfg := &lily.LilyIndexNotifyConfig{ + TipSet: ts.Key(), + Name: indexName, + Tasks: taskList, + Queue: indexFlags.queue, + } + + _, err = api.LilyIndexNotify(ctx, cfg) + if err != nil { + return err + } } return nil @@ -180,6 +210,13 @@ var IndexCmd = &cli.Command{ Value: builtin.EpochDurationSeconds * time.Second, Destination: &indexFlags.window, }, + &cli.StringFlag{ + Name: "queue", + Usage: "Name of queue that index will write tipsets to. If empty the node will index tipsets locally. If populated the node will write the tipset to the queue for tipset-workers to consume", + EnvVars: []string{"LILY_INDEX_QUEUE"}, + Value: "", + Destination: &indexFlags.queue, + }, }, Subcommands: []*cli.Command{ IndexTipSetCmd, diff --git a/commands/setup.go b/commands/setup.go index 9a0f19505..d5e4f4ec1 100644 --- a/commands/setup.go +++ b/commands/setup.go @@ -1,16 +1,19 @@ package commands import ( - octrace "go.opencensus.io/trace" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/bridge/opencensus" "net/http" "net/http/pprof" "strings" "time" + "github.com/hibiken/asynq" + octrace "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/bridge/opencensus" + "contrib.go.opencensus.io/exporter/prometheus" lotusmetrics "github.com/filecoin-project/lotus/metrics" + asynqmetrics "github.com/hibiken/asynq/x/metrics" logging "github.com/ipfs/go-log/v2" metricsprom "github.com/ipfs/go-metrics-prometheus" _ "github.com/lib/pq" @@ -44,6 +47,11 @@ var VisorTracingFlags VisorTracingOpts type VisorMetricOpts struct { PrometheusPort string + RedisNetwork string + RedisAddr string + RedisUsername string + RedisPassword string + RedisDB int } var VisorMetricFlags VisorMetricOpts @@ -78,7 +86,6 @@ func setupMetrics(flags VisorMetricOpts) error { registry := prom.NewRegistry() goCollector := collectors.NewGoCollector() procCollector := collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}) - registry.MustRegister(goCollector, procCollector) pe, err := prometheus.NewExporter(prometheus.Options{ Namespace: "visor", Registry: registry, @@ -87,6 +94,17 @@ func setupMetrics(flags VisorMetricOpts) error { return err } + inspector := asynq.NewInspector(asynq.RedisClientOpt{ + Addr: flags.RedisAddr, + DB: flags.RedisDB, + Password: flags.RedisPassword, + Username: flags.RedisUsername, + }) + registry.MustRegister( + goCollector, + procCollector, + asynqmetrics.NewQueueMetricsCollector(inspector), + ) // register prometheus with opencensus view.RegisterExporter(pe) view.SetReportingPeriod(2 * time.Second) diff --git a/commands/walk.go b/commands/walk.go index b6fefd7bd..075c98250 100644 --- a/commands/walk.go +++ b/commands/walk.go @@ -11,9 +11,9 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/lily/chain/actors/builtin" + "github.com/filecoin-project/lily/chain/indexer/tasktype" "github.com/filecoin-project/lily/lens/lily" - - "github.com/filecoin-project/lily/chain/indexer" + "github.com/filecoin-project/lily/schedule" ) type walkOps struct { @@ -26,6 +26,7 @@ type walkOps struct { apiToken string name string workers int + queue string } var walkFlags walkOps @@ -96,6 +97,13 @@ var WalkCmd = &cli.Command{ Value: 1, Destination: &walkFlags.workers, }, + &cli.StringFlag{ + Name: "queue", + Usage: "Name of queue that walked will write tipsets to. If empty the node will walk and index tipsets locally. If populated the node will write tipsets to the queue for tipset-workers to consume", + EnvVars: []string{"LILY_WALK_QUEUE"}, + Value: "", + Destination: &walkFlags.queue, + }, }, Before: func(cctx *cli.Context) error { from, to := walkFlags.from, walkFlags.to @@ -113,22 +121,9 @@ var WalkCmd = &cli.Command{ walkName = walkFlags.name } - tasks := strings.Split(walkFlags.tasks, ",") + taskList := strings.Split(walkFlags.tasks, ",") if walkFlags.tasks == "*" { - tasks = indexer.AllTableTasks - } - - cfg := &lily.LilyWalkConfig{ - Name: walkName, - Tasks: tasks, - Window: walkFlags.window, - From: walkFlags.from, - To: walkFlags.to, - RestartDelay: 0, - RestartOnCompletion: false, - RestartOnFailure: false, - Storage: walkFlags.storage, - Workers: walkFlags.workers, + taskList = tasktype.AllTableTasks } api, closer, err := GetAPI(ctx, walkFlags.apiAddr, walkFlags.apiToken) @@ -137,13 +132,47 @@ var WalkCmd = &cli.Command{ } defer closer() - res, err := api.LilyWalk(ctx, cfg) - if err != nil { - return err + var res *schedule.JobSubmitResult + if walkFlags.queue == "" { + cfg := &lily.LilyWalkConfig{ + Name: walkName, + Tasks: taskList, + Window: walkFlags.window, + From: walkFlags.from, + To: walkFlags.to, + RestartDelay: 0, + RestartOnCompletion: false, + RestartOnFailure: false, + Storage: walkFlags.storage, + Workers: walkFlags.workers, + } + + res, err = api.LilyWalk(ctx, cfg) + if err != nil { + return err + } + } else { + cfg := &lily.LilyWalkNotifyConfig{ + Name: walkName, + Tasks: taskList, + From: walkFlags.from, + To: walkFlags.to, + RestartDelay: 0, + RestartOnCompletion: false, + RestartOnFailure: false, + Queue: walkFlags.queue, + } + + res, err = api.LilyWalkNotify(ctx, cfg) + if err != nil { + return err + } } + if err := printNewJob(os.Stdout, res); err != nil { return err } + return nil }, } diff --git a/commands/watch.go b/commands/watch.go index 6efca16e7..daad55c9b 100644 --- a/commands/watch.go +++ b/commands/watch.go @@ -10,8 +10,9 @@ import ( "github.com/filecoin-project/specs-actors/actors/builtin" "github.com/urfave/cli/v2" - "github.com/filecoin-project/lily/chain/indexer" + "github.com/filecoin-project/lily/chain/indexer/tasktype" "github.com/filecoin-project/lily/lens/lily" + "github.com/filecoin-project/lily/schedule" ) type watchOps struct { @@ -24,6 +25,7 @@ type watchOps struct { name string workers int bufferSize int + queue string } var watchFlags watchOps @@ -94,6 +96,13 @@ var WatchCmd = &cli.Command{ Value: "", Destination: &watchFlags.name, }, + &cli.StringFlag{ + Name: "queue", + Usage: "Name of queue that watcher will write tipsets to. If empty the node will watch and index tipsets locally. If populated the node will write tipsets to the queue for tipset-workers to consume", + EnvVars: []string{"LILY_WATCH_QUEUE"}, + Value: "", + Destination: &watchFlags.queue, + }, }, Action: func(cctx *cli.Context) error { ctx := lotuscli.ReqContext(cctx) @@ -103,22 +112,9 @@ var WatchCmd = &cli.Command{ watchName = watchFlags.name } - tasks := strings.Split(watchFlags.tasks, ",") + taskList := strings.Split(watchFlags.tasks, ",") if watchFlags.tasks == "*" { - tasks = indexer.AllTableTasks - } - - cfg := &lily.LilyWatchConfig{ - Name: watchName, - Tasks: tasks, - Window: watchFlags.window, - Confidence: watchFlags.confidence, - RestartDelay: 0, - RestartOnCompletion: false, - RestartOnFailure: true, - Storage: watchFlags.storage, - Workers: watchFlags.workers, - BufferSize: watchFlags.bufferSize, + taskList = tasktype.AllTableTasks } api, closer, err := GetAPI(ctx, watchFlags.apiAddr, watchFlags.apiToken) @@ -127,9 +123,41 @@ var WatchCmd = &cli.Command{ } defer closer() - res, err := api.LilyWatch(ctx, cfg) - if err != nil { - return err + var res *schedule.JobSubmitResult + if watchFlags.queue == "" { + cfg := &lily.LilyWatchConfig{ + Name: watchName, + Tasks: taskList, + Window: watchFlags.window, + Confidence: watchFlags.confidence, + RestartDelay: 0, + RestartOnCompletion: false, + RestartOnFailure: true, + Storage: watchFlags.storage, + Workers: watchFlags.workers, + BufferSize: watchFlags.bufferSize, + } + + res, err = api.LilyWatch(ctx, cfg) + if err != nil { + return err + } + } else { + cfg := &lily.LilyWatchNotifyConfig{ + Name: watchName, + Tasks: taskList, + Confidence: watchFlags.confidence, + RestartDelay: 0, + RestartOnCompletion: false, + RestartOnFailure: true, + BufferSize: watchFlags.bufferSize, + Queue: watchFlags.queue, + } + + res, err = api.LilyWatchNotify(ctx, cfg) + if err != nil { + return err + } } if err := printNewJob(os.Stdout, res); err != nil { return err diff --git a/commands/worker.go b/commands/worker.go new file mode 100644 index 000000000..4afc77a0d --- /dev/null +++ b/commands/worker.go @@ -0,0 +1,98 @@ +package commands + +import ( + "os" + + lotuscli "github.com/filecoin-project/lotus/cli" + "github.com/urfave/cli/v2" + + "github.com/filecoin-project/lily/lens/lily" +) + +var WorkerCmd = &cli.Command{ + Name: "worker-start", + Subcommands: []*cli.Command{ + TipSetWorkerCmd, + }, +} + +var tipsetWorkerFlags struct { + queue string + name string + storage string + concurrency int +} + +var TipSetWorkerCmd = &cli.Command{ + Name: "tipset-processor", + Flags: flagSet( + clientAPIFlagSet, + []cli.Flag{ + &cli.IntFlag{ + Name: "concurrency", + Usage: "Concurrency sets the maximum number of concurrent processing of tasks. If set to a zero or negative value it will be set to the number of CPUs usable by the current process.", + Value: 1, + Destination: &tipsetWorkerFlags.concurrency, + }, + &cli.StringFlag{ + Name: "storage", + Usage: "Name of storage that results will be written to.", + EnvVars: []string{"LILY_STORAGE"}, + Value: "", + Destination: &tipsetWorkerFlags.storage, + }, + &cli.StringFlag{ + Name: "name", + Usage: "Name of job for easy identification later.", + EnvVars: []string{"LILY_JOB_NAME"}, + Value: "", + Destination: &tipsetWorkerFlags.name, + }, + &cli.StringFlag{ + Name: "queue", + Usage: "Name of queue worker will consume work from.", + EnvVars: []string{"LILY_TSWORKER_QUEUE"}, + Value: "", + Destination: &tipsetWorkerFlags.queue, + }, + }, + ), + Action: func(cctx *cli.Context) error { + ctx := lotuscli.ReqContext(cctx) + + api, closer, err := GetAPI(ctx, clientAPIFlags.apiAddr, clientAPIFlags.apiToken) + if err != nil { + return err + } + defer closer() + + if tipsetWorkerFlags.name == "" { + id, err := api.ID(ctx) + if err != nil { + return err + } + tipsetWorkerFlags.name = id.ShortString() + } + + cfg := &lily.LilyTipSetWorkerConfig{ + Concurrency: tipsetWorkerFlags.concurrency, + Storage: tipsetWorkerFlags.storage, + Name: tipsetWorkerFlags.name, + RestartOnFailure: true, + RestartOnCompletion: false, + RestartDelay: 0, + Queue: tipsetWorkerFlags.queue, + } + + res, err := api.StartTipSetWorker(ctx, cfg) + if err != nil { + return err + } + + if err := printNewJob(os.Stdout, res); err != nil { + return err + } + + return nil + }, +} diff --git a/config/config.go b/config/config.go index ccbe59ea5..cab00333c 100644 --- a/config/config.go +++ b/config/config.go @@ -19,6 +19,7 @@ type Conf struct { Client config.Client Chainstore config.Chainstore Storage StorageConf + Queue QueueConfig } type StorageConf struct { @@ -42,6 +43,30 @@ type FileStorageConf struct { FilePattern string // pattern to use for filenames written in the path specified } +type QueueConfig struct { + Asynq map[string]AsynqRedisConfig +} + +type AsynqRedisConfig struct { + // Network type to use, either tcp or unix. + // Default is tcp. + Network string + // Redis server address in "host:port" format. + Addr string + // Username to authenticate the current connection when Redis ACLs are used. + // See: https://redis.io/commands/auth. + Username string + // Password to authenticate the current connection. + // See: https://redis.io/commands/auth. + Password string + // Redis DB to select after connecting to a server. + // See: https://redis.io/commands/select. + DB int + // Maximum number of socket connections. + // Default is 10 connections per every CPU as reported by runtime.NumCPU. + PoolSize int +} + func DefaultConf() *Conf { return &Conf{ Common: config.Common{ @@ -107,6 +132,18 @@ func SampleConf() *Conf { }, }, } + cfg.Queue = QueueConfig{ + Asynq: map[string]AsynqRedisConfig{ + "Asynq1": { + Network: "tcp", + Addr: "127.0.0.1:6379", + Username: "", + Password: "", + DB: 0, + PoolSize: 0, + }, + }, + } return &cfg } diff --git a/go.mod b/go.mod index 6fc9106e3..8bd4210d6 100644 --- a/go.mod +++ b/go.mod @@ -62,7 +62,12 @@ require ( require k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 -require github.com/jedib0t/go-pretty/v6 v6.2.7 +require ( + github.com/hibiken/asynq v0.23.0 + github.com/hibiken/asynq/x v0.0.0-20220413130846-5c723f597e01 + github.com/jedib0t/go-pretty/v6 v6.2.7 + go.opentelemetry.io/otel/trace v1.3.0 +) require ( github.com/DataDog/zstd v1.4.1 // indirect @@ -97,6 +102,7 @@ require ( github.com/dgraph-io/badger/v2 v2.2007.3 // indirect github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/docker/go-units v0.4.0 // indirect github.com/drand/drand v1.3.0 // indirect github.com/drand/kyber v1.1.7 // indirect @@ -141,6 +147,7 @@ require ( github.com/go-logr/stdr v1.2.0 // indirect github.com/go-ole/go-ole v1.2.5 // indirect github.com/go-pg/zerochecker v0.2.0 // indirect + github.com/go-redis/redis/v8 v8.11.4 // indirect github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -296,12 +303,14 @@ require ( github.com/raulk/go-watchdog v1.2.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect github.com/rivo/uniseg v0.2.0 // indirect + github.com/robfig/cron/v3 v3.0.1 // indirect github.com/rs/cors v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shirou/gopsutil v2.18.12+incompatible // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/cast v1.3.1 // indirect github.com/stretchr/objx v0.2.0 // indirect github.com/syndtr/goleveldb v1.0.0 // indirect github.com/tj/go-spin v1.1.0 // indirect @@ -326,7 +335,6 @@ require ( github.com/zondax/ledger-go v0.12.1 // indirect go.opentelemetry.io/otel/metric v0.25.0 // indirect go.opentelemetry.io/otel/sdk/export/metric v0.25.0 // indirect - go.opentelemetry.io/otel/trace v1.3.0 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/dig v1.12.0 // indirect go4.org v0.0.0-20200411211856-f5505b9728dd // indirect @@ -336,6 +344,7 @@ require ( golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect golang.org/x/sys v0.0.0-20211209171907-798191bca915 // indirect golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf // indirect + golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect golang.org/x/tools v0.1.7 // indirect google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4 // indirect google.golang.org/grpc v1.40.0 // indirect diff --git a/go.sum b/go.sum index 929ef3e49..801bee103 100644 --- a/go.sum +++ b/go.sum @@ -247,6 +247,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/drand/bls12-381 v0.3.2/go.mod h1:dtcLgPtYT38L3NO6mPDYH0nbpc5tjPassDqiniuAt4Y= @@ -479,6 +481,9 @@ github.com/go-pg/pg/v10 v10.10.6 h1:1vNtPZ4Z9dWUw/TjJwOfFUbF5nEq1IkR6yG8Mq/Iwso= github.com/go-pg/pg/v10 v10.10.6/go.mod h1:GLmFXufrElQHf5uzM3BQlcfwV3nsgnHue5uzjQ6Nqxg= github.com/go-pg/zerochecker v0.2.0 h1:pp7f72c3DobMWOb2ErtZsnrPaSvHd2W4o9//8HtF4mU= github.com/go-pg/zerochecker v0.2.0/go.mod h1:NJZ4wKL0NmTtz0GKCoJ8kym6Xn/EQzXRl2OnAe7MmDo= +github.com/go-redis/redis/v8 v8.11.2/go.mod h1:DLomh7y2e3ggQXQLd1YgmvIfecPJoFl7WU5SOQ/r06M= +github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg= +github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -666,6 +671,11 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hibiken/asynq v0.21.0/go.mod h1:tyc63ojaW8SJ5SBm8mvI4DDONsguP5HE85EEl4Qr5Ig= +github.com/hibiken/asynq v0.23.0 h1:kmKkNFgqiXBatC8oz94Mer6uvKoGn4STlIVDV5wnKyE= +github.com/hibiken/asynq v0.23.0/go.mod h1:K70jPVx+CAmmQrXot7Dru0D52EO7ob4BIun3ri5z1Qw= +github.com/hibiken/asynq/x v0.0.0-20220413130846-5c723f597e01 h1:J7DifsxqICyQiH/vpb0x0Vo6jO5TLGfgNCvpzkAG4aY= +github.com/hibiken/asynq/x v0.0.0-20220413130846-5c723f597e01/go.mod h1:peQOc7fHI/j0XGdqjIWhTFjQnWryhtkzyzb30oUYdwU= github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= @@ -1665,6 +1675,7 @@ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0 github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= @@ -1675,8 +1686,10 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= +github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c= +github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333/go.mod h1:Ag6rSXkHIckQmjFBCweJEEt1mrTPBv8b9W4aU/NQWfI= github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= @@ -1790,6 +1803,8 @@ github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qq github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -1869,6 +1884,8 @@ github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2 github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -2083,6 +2100,7 @@ go.uber.org/dig v1.12.0 h1:l1GQeZpEbss0/M4l/ZotuBndCrkMdjnygzgcuOjAdaY= go.uber.org/dig v1.12.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= go.uber.org/fx v1.15.0 h1:kcfBpAm98n0ksanyyZLFE/Q3T7yPi13Ge2liu3TxR+A= go.uber.org/fx v1.15.0/go.mod h1:jI3RazQUhGv5KkpZIRv+kuP4CcgX3fnc0qX8bLnzbx8= +go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= @@ -2260,6 +2278,7 @@ golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201022231255-08b38378de70/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= @@ -2423,6 +2442,7 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/itests/node.go b/itests/node.go index f140671d2..0c1e8f77e 100644 --- a/itests/node.go +++ b/itests/node.go @@ -2,14 +2,10 @@ package itests import ( "context" - "github.com/filecoin-project/lily/commands" - "github.com/filecoin-project/lily/commands/util" - "github.com/filecoin-project/lily/config" - "github.com/filecoin-project/lily/lens/lily" - "github.com/filecoin-project/lily/lens/lily/modules" - lutil "github.com/filecoin-project/lily/lens/util" - "github.com/filecoin-project/lily/schedule" - "github.com/filecoin-project/lily/storage" + "io/fs" + "io/ioutil" + "testing" + "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/lib/peermgr" @@ -19,9 +15,16 @@ import ( "github.com/filecoin-project/lotus/node/repo" "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/require" - "io/fs" - "io/ioutil" - "testing" + + "github.com/filecoin-project/lily/chain/indexer/distributed" + "github.com/filecoin-project/lily/commands" + "github.com/filecoin-project/lily/commands/util" + "github.com/filecoin-project/lily/config" + "github.com/filecoin-project/lily/lens/lily" + "github.com/filecoin-project/lily/lens/lily/modules" + lutil "github.com/filecoin-project/lily/lens/util" + "github.com/filecoin-project/lily/schedule" + "github.com/filecoin-project/lily/storage" ) type TestNodeConfig struct { @@ -65,6 +68,7 @@ func NewTestNode(t testing.TB, ctx context.Context, cfg *TestNodeConfig) (lily.L node.Override(new(*events.Events), modules.NewEvents), node.Override(new(*schedule.Scheduler), schedule.NewSchedulerDaemon), node.Override(new(*storage.Catalog), modules.NewStorageCatalog), + node.Override(new(*distributed.Catalog), modules.NewQueueCatalog), // End Injection node.Override(new(dtypes.Bootstrapper), false), diff --git a/itests/validators.go b/itests/validators.go index 5b8d5198a..ee04b401b 100644 --- a/itests/validators.go +++ b/itests/validators.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/filecoin-project/lily/chain/indexer" + "github.com/filecoin-project/lily/chain/indexer/tasktype" "github.com/filecoin-project/lily/lens/lily" "github.com/filecoin-project/lily/model/actors/common" "github.com/filecoin-project/lily/model/blocks" @@ -21,17 +21,17 @@ import ( ) var TaskModels = map[string][]string{ - indexer.MessagesTask: {"receipts", "block_messages"}, - indexer.BlocksTask: {"block_headers", "block_parents", "drand_block_entries"}, - indexer.ChainConsensusTask: {"chain_consensus"}, - indexer.ActorStatesRawTask: {"actors", "actor_states"}, + tasktype.MessagesTask: {"receipts", "block_messages"}, + tasktype.BlocksTask: {"block_headers", "block_parents", "drand_block_entries"}, + tasktype.ChainConsensusTask: {"chain_consensus"}, + tasktype.ActorStatesRawTask: {"actors", "actor_states"}, } var TaskValidators = map[string][]interface{}{ - indexer.MessagesTask: {BlockMessagesValidator{}, ReceiptsValidator{}}, - indexer.BlocksTask: {BlockHeaderValidator{}, BlockParentsValidator{}, DrandBlockEntriesValidator{}}, - indexer.ChainConsensusTask: {ChainConsensusValidator{}}, - indexer.ActorStatesRawTask: {ActorStatesValidator{}, ActorValidator{}}, + tasktype.MessagesTask: {BlockMessagesValidator{}, ReceiptsValidator{}}, + tasktype.BlocksTask: {BlockHeaderValidator{}, BlockParentsValidator{}, DrandBlockEntriesValidator{}}, + tasktype.ChainConsensusTask: {ChainConsensusValidator{}}, + tasktype.ActorStatesRawTask: {ActorStatesValidator{}, ActorValidator{}}, } type TipSetStateValidator interface { diff --git a/itests/vector_test.go b/itests/vector_test.go index 0bc603669..8ce3c00d8 100644 --- a/itests/vector_test.go +++ b/itests/vector_test.go @@ -12,7 +12,7 @@ import ( logging "github.com/ipfs/go-log/v2" "github.com/stretchr/testify/require" - "github.com/filecoin-project/lily/chain/indexer" + "github.com/filecoin-project/lily/chain/indexer/tasktype" tstorage "github.com/filecoin-project/lily/storage/testing" ) @@ -33,7 +33,7 @@ func TestCalibrationVector(t *testing.T) { tvb := NewVectorWalkValidatorBuilder(vf). WithDatabase(strg). WithRange(vf.From, vf.To). - WithTasks(indexer.ActorStatesRawTask, indexer.BlocksTask, indexer.MessagesTask, indexer.ChainConsensusTask) + WithTasks(tasktype.ActorStatesRawTask, tasktype.BlocksTask, tasktype.MessagesTask, tasktype.ChainConsensusTask) vw := tvb.Build(ctx, t) stop := vw.Run(ctx) diff --git a/lens/interface.go b/lens/interface.go index d750e6a87..056b074fe 100644 --- a/lens/interface.go +++ b/lens/interface.go @@ -22,7 +22,6 @@ type API interface { GetExecutedAndBlockMessagesForTipset(ctx context.Context, ts, pts *types.TipSet) (*TipSetMessages, error) GetMessageExecutionsForTipSet(ctx context.Context, ts, pts *types.TipSet) ([]*MessageExecution, error) } - type StoreAPI interface { // TODO this should be the lotus store not the specs-actors store. Store() adt.Store diff --git a/lens/lily/api.go b/lens/lily/api.go index fa3ffb4f4..5ca37ac46 100644 --- a/lens/lily/api.go +++ b/lens/lily/api.go @@ -24,6 +24,10 @@ type LilyAPI interface { LilyWalk(ctx context.Context, cfg *LilyWalkConfig) (*schedule.JobSubmitResult, error) LilySurvey(ctx context.Context, cfg *LilySurveyConfig) (*schedule.JobSubmitResult, error) + LilyIndexNotify(ctx context.Context, cfg *LilyIndexNotifyConfig) (interface{}, error) + LilyWatchNotify(ctx context.Context, cfg *LilyWatchNotifyConfig) (*schedule.JobSubmitResult, error) + LilyWalkNotify(ctx context.Context, cfg *LilyWalkNotifyConfig) (*schedule.JobSubmitResult, error) + LilyJobStart(ctx context.Context, ID schedule.JobID) error LilyJobStop(ctx context.Context, ID schedule.JobID) error LilyJobWait(ctx context.Context, ID schedule.JobID) (*schedule.JobListResult, error) @@ -31,6 +35,7 @@ type LilyAPI interface { LilyGapFind(ctx context.Context, cfg *LilyGapFindConfig) (*schedule.JobSubmitResult, error) LilyGapFill(ctx context.Context, cfg *LilyGapFillConfig) (*schedule.JobSubmitResult, error) + LilyGapFillNotify(ctx context.Context, cfg *LilyGapFillNotifyConfig) (*schedule.JobSubmitResult, error) // SyncState returns the current status of the chain sync system. SyncState(context.Context) (*api.SyncState, error) //perm:read @@ -64,6 +69,8 @@ type LilyAPI interface { NetPubsubScores(context.Context) ([]api.PubsubScore, error) NetAgentVersion(ctx context.Context, p peer.ID) (string, error) NetPeerInfo(context.Context, peer.ID) (*api.ExtendedPeerInfo, error) + + StartTipSetWorker(ctx context.Context, cfg *LilyTipSetWorkerConfig) (*schedule.JobSubmitResult, error) } type LilyIndexConfig struct { @@ -74,6 +81,13 @@ type LilyIndexConfig struct { Window time.Duration } +type LilyIndexNotifyConfig struct { + TipSet types.TipSetKey + Name string + Tasks []string + Queue string +} + type LilyWatchConfig struct { Name string Tasks []string @@ -87,6 +101,17 @@ type LilyWatchConfig struct { BufferSize int // number of tipsets to buffer from notifier service } +type LilyWatchNotifyConfig struct { + Name string + Tasks []string + Confidence int + RestartOnFailure bool + RestartOnCompletion bool + RestartDelay time.Duration + BufferSize int // number of tipsets to buffer from notifier service + Queue string +} + type LilyWalkConfig struct { From int64 To int64 @@ -100,6 +125,17 @@ type LilyWalkConfig struct { Workers int // number of indexing jobs that can run in parallel } +type LilyWalkNotifyConfig struct { + From int64 + To int64 + Name string + Tasks []string + RestartOnFailure bool + RestartOnCompletion bool + RestartDelay time.Duration + Queue string +} + type LilyGapFindConfig struct { RestartOnFailure bool RestartOnCompletion bool @@ -122,6 +158,18 @@ type LilyGapFillConfig struct { Tasks []string // name of tasks to fill gaps for } +type LilyGapFillNotifyConfig struct { + RestartOnFailure bool + RestartOnCompletion bool + RestartDelay time.Duration + Storage string // name of storage system to use, cannot be empty and must be Database storage. + Name string + To uint64 + From uint64 + Tasks []string // name of tasks to fill gaps for + Queue string +} + type LilySurveyConfig struct { Name string Tasks []string @@ -131,3 +179,19 @@ type LilySurveyConfig struct { RestartDelay time.Duration Storage string // name of storage system to use, may be empty } + +type LilyTipSetWorkerConfig struct { + Queue string + + // Concurrency sets the maximum number of concurrent processing of tasks. + // If set to a zero or negative value, NewServer will overwrite the value + // to the number of CPUs usable by the current process. + Concurrency int + // Storage sets the name of storage system to use, may be empty + Storage string + // Name sets the job name + Name string + RestartOnFailure bool + RestartOnCompletion bool + RestartDelay time.Duration +} diff --git a/lens/lily/impl.go b/lens/lily/impl.go index 877a29204..f9a8a0094 100644 --- a/lens/lily/impl.go +++ b/lens/lily/impl.go @@ -22,11 +22,15 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/lily/chain/datasource" + "github.com/filecoin-project/lily/chain/gap" "github.com/filecoin-project/lily/chain/indexer" - "github.com/filecoin-project/lily/lens/lily/modules" - - "github.com/filecoin-project/lily/chain" + "github.com/filecoin-project/lily/chain/indexer/distributed" + "github.com/filecoin-project/lily/chain/indexer/distributed/queue" + "github.com/filecoin-project/lily/chain/indexer/integrated" + "github.com/filecoin-project/lily/chain/walk" + "github.com/filecoin-project/lily/chain/watch" "github.com/filecoin-project/lily/lens" + "github.com/filecoin-project/lily/lens/lily/modules" "github.com/filecoin-project/lily/lens/util" "github.com/filecoin-project/lily/network" "github.com/filecoin-project/lily/schedule" @@ -43,11 +47,15 @@ type LilyNodeAPI struct { full.StateAPI full.SyncAPI common.CommonAPI - Events *events.Events - Scheduler *schedule.Scheduler + Events *events.Events + Scheduler *schedule.Scheduler + + ExecMonitor stmgr.ExecMonitor + CacheConfig *util.CacheConfig + StorageCatalog *storage.Catalog - ExecMonitor stmgr.ExecMonitor - CacheConfig *util.CacheConfig + QueueCatalog *distributed.Catalog + actorStore adt.Store actorStoreInit sync.Once } @@ -65,6 +73,55 @@ func (m *LilyNodeAPI) ChainGetTipSetAfterHeight(ctx context.Context, epoch abi.C return m.ChainAPI.Chain.GetTipsetByHeight(ctx, epoch, ts, false) } +func (m *LilyNodeAPI) StartTipSetWorker(_ context.Context, cfg *LilyTipSetWorkerConfig) (*schedule.JobSubmitResult, error) { + ctx := context.Background() + log.Infow("starting TipSetWorker", "name", cfg.Name) + md := storage.Metadata{ + JobName: cfg.Name, + } + + // create a database connection for this watch, ensure its pingable, and run migrations if needed/configured to. + strg, err := m.StorageCatalog.Connect(ctx, cfg.Storage, md) + if err != nil { + return nil, err + } + + qcfg, err := m.QueueCatalog.AsynqConfig(cfg.Queue) + if err != nil { + return nil, err + } + + taskAPI, err := datasource.NewDataSource(m) + if err != nil { + return nil, err + } + + im, err := integrated.NewManager(taskAPI, strg, cfg.Name) + if err != nil { + return nil, err + } + + db, err := m.StorageCatalog.ConnectAsDatabase(ctx, cfg.Storage, md) + if err != nil { + return nil, err + } + + res := m.Scheduler.Submit(&schedule.JobConfig{ + Name: cfg.Name, + Type: "tipset-worker", + Params: map[string]string{ + "queue": cfg.Queue, + "storage": cfg.Storage, + "concurrency": strconv.Itoa(cfg.Concurrency), + }, + Job: queue.NewAsynqWorker(im, db, cfg.Name, 1, qcfg), + RestartOnFailure: cfg.RestartOnFailure, + RestartOnCompletion: cfg.RestartOnCompletion, + RestartDelay: cfg.RestartDelay, + }) + return res, nil +} + func (m *LilyNodeAPI) LilyIndex(_ context.Context, cfg *LilyIndexConfig) (interface{}, error) { md := storage.Metadata{ JobName: cfg.Name, @@ -84,7 +141,7 @@ func (m *LilyNodeAPI) LilyIndex(_ context.Context, cfg *LilyIndexConfig) (interf } // instantiate an indexer to extract block, message, and actor state data from observed tipsets and persists it to the storage. - im, err := indexer.NewManager(taskAPI, strg, cfg.Name, cfg.Tasks, indexer.WithWindow(cfg.Window)) + im, err := integrated.NewManager(taskAPI, strg, cfg.Name, integrated.WithWindow(cfg.Window)) if err != nil { return nil, err } @@ -94,10 +151,28 @@ func (m *LilyNodeAPI) LilyIndex(_ context.Context, cfg *LilyIndexConfig) (interf return nil, err } - success, err := im.TipSet(ctx, ts) + success, err := im.TipSet(ctx, ts, indexer.WithTasks(cfg.Tasks)) return success, err +} + +func (m *LilyNodeAPI) LilyIndexNotify(_ context.Context, cfg *LilyIndexNotifyConfig) (interface{}, error) { + // the context's passed to these methods live for the duration of the clients request, so make a new one. + ctx := context.Background() + + qcfg, err := m.QueueCatalog.AsynqConfig(cfg.Queue) + if err != nil { + return nil, err + } + ts, err := m.ChainGetTipSet(ctx, cfg.TipSet) + if err != nil { + return nil, err + } + + idx := distributed.NewTipSetIndexer(queue.NewAsynq(qcfg)) + + return idx.TipSet(ctx, ts, indexer.WithIndexerType(indexer.Index), indexer.WithTasks(cfg.Tasks)) } type watcherAPIWrapper struct { @@ -113,6 +188,11 @@ func (m *LilyNodeAPI) LilyWatch(_ context.Context, cfg *LilyWatchConfig) (*sched JobName: cfg.Name, } + wapi := &watcherAPIWrapper{ + Events: m.Events, + ChainModuleAPI: m.ChainModuleAPI, + } + // create a database connection for this watch, ensure its pingable, and run migrations if needed/configured to. strg, err := m.StorageCatalog.Connect(ctx, cfg.Storage, md) if err != nil { @@ -125,11 +205,18 @@ func (m *LilyNodeAPI) LilyWatch(_ context.Context, cfg *LilyWatchConfig) (*sched } // instantiate an indexer to extract block, message, and actor state data from observed tipsets and persists it to the storage. - im, err := indexer.NewManager(taskAPI, strg, cfg.Name, cfg.Tasks, indexer.WithWindow(cfg.Window)) + idx, err := integrated.NewManager(taskAPI, strg, cfg.Name, integrated.WithWindow(cfg.Window)) if err != nil { return nil, err } + watchJob := watch.NewWatcher(wapi, idx, cfg.Name, + watch.WithTasks(cfg.Tasks...), + watch.WithConfidence(cfg.Confidence), + watch.WithConcurrentWorkers(cfg.Workers), + watch.WithBufferSize(cfg.BufferSize), + ) + res := m.Scheduler.Submit(&schedule.JobConfig{ Name: cfg.Name, Type: "watch", @@ -140,11 +227,8 @@ func (m *LilyNodeAPI) LilyWatch(_ context.Context, cfg *LilyWatchConfig) (*sched "worker": strconv.Itoa(cfg.Workers), "buffer": strconv.Itoa(cfg.BufferSize), }, - Tasks: cfg.Tasks, - Job: chain.NewWatcher(&watcherAPIWrapper{ - Events: m.Events, - ChainModuleAPI: m.ChainModuleAPI, - }, im, cfg.Name, cfg.Confidence, cfg.Workers, cfg.BufferSize), + Tasks: cfg.Tasks, + Job: watchJob, RestartOnFailure: cfg.RestartOnFailure, RestartOnCompletion: cfg.RestartOnCompletion, RestartDelay: cfg.RestartDelay, @@ -153,6 +237,42 @@ func (m *LilyNodeAPI) LilyWatch(_ context.Context, cfg *LilyWatchConfig) (*sched return res, nil } +func (m *LilyNodeAPI) LilyWatchNotify(_ context.Context, cfg *LilyWatchNotifyConfig) (*schedule.JobSubmitResult, error) { + wapi := &watcherAPIWrapper{ + Events: m.Events, + ChainModuleAPI: m.ChainModuleAPI, + } + + qcfg, err := m.QueueCatalog.AsynqConfig(cfg.Queue) + if err != nil { + return nil, err + } + idx := distributed.NewTipSetIndexer(queue.NewAsynq(qcfg)) + + watchJob := watch.NewWatcher(wapi, idx, cfg.Name, + watch.WithTasks(cfg.Tasks...), + watch.WithConfidence(cfg.Confidence), + watch.WithBufferSize(cfg.BufferSize), + ) + + res := m.Scheduler.Submit(&schedule.JobConfig{ + Name: cfg.Name, + Type: "watch-notify", + Params: map[string]string{ + "confidence": strconv.Itoa(cfg.Confidence), + "buffer": strconv.Itoa(cfg.BufferSize), + "queue": cfg.Queue, + }, + Tasks: cfg.Tasks, + Job: watchJob, + RestartOnFailure: cfg.RestartOnFailure, + RestartOnCompletion: cfg.RestartOnCompletion, + RestartDelay: cfg.RestartDelay, + }) + + return res, err +} + func (m *LilyNodeAPI) LilyWalk(_ context.Context, cfg *LilyWalkConfig) (*schedule.JobSubmitResult, error) { // the context's passed to these methods live for the duration of the clients request, so make a new one. ctx := context.Background() @@ -173,7 +293,7 @@ func (m *LilyNodeAPI) LilyWalk(_ context.Context, cfg *LilyWalkConfig) (*schedul } // instantiate an indexer to extract block, message, and actor state data from observed tipsets and persists it to the storage. - im, err := indexer.NewManager(taskAPI, strg, cfg.Name, cfg.Tasks, indexer.WithWindow(cfg.Window)) + idx, err := integrated.NewManager(taskAPI, strg, cfg.Name, integrated.WithWindow(cfg.Window)) if err != nil { return nil, err } @@ -188,7 +308,32 @@ func (m *LilyNodeAPI) LilyWalk(_ context.Context, cfg *LilyWalkConfig) (*schedul "storage": cfg.Storage, }, Tasks: cfg.Tasks, - Job: chain.NewWalker(im, m, cfg.Name, cfg.From, cfg.To), + Job: walk.NewWalker(idx, m, cfg.Name, cfg.Tasks, cfg.From, cfg.To), + RestartOnFailure: cfg.RestartOnFailure, + RestartOnCompletion: cfg.RestartOnCompletion, + RestartDelay: cfg.RestartDelay, + }) + + return res, nil +} + +func (m *LilyNodeAPI) LilyWalkNotify(_ context.Context, cfg *LilyWalkNotifyConfig) (*schedule.JobSubmitResult, error) { + qcfg, err := m.QueueCatalog.AsynqConfig(cfg.Queue) + if err != nil { + return nil, err + } + idx := distributed.NewTipSetIndexer(queue.NewAsynq(qcfg)) + + res := m.Scheduler.Submit(&schedule.JobConfig{ + Name: cfg.Name, + Type: "walk-notify", + Params: map[string]string{ + "minHeight": fmt.Sprintf("%d", cfg.From), + "maxHeight": fmt.Sprintf("%d", cfg.To), + "queue": cfg.Queue, + }, + Tasks: cfg.Tasks, + Job: walk.NewWalker(idx, m, cfg.Name, cfg.Tasks, cfg.From, cfg.To), RestartOnFailure: cfg.RestartOnFailure, RestartOnCompletion: cfg.RestartOnCompletion, RestartDelay: cfg.RestartDelay, @@ -213,14 +358,14 @@ func (m *LilyNodeAPI) LilyGapFind(_ context.Context, cfg *LilyGapFindConfig) (*s res := m.Scheduler.Submit(&schedule.JobConfig{ Name: cfg.Name, - Type: "Find", + Type: "find", Tasks: cfg.Tasks, Params: map[string]string{ "minHeight": fmt.Sprintf("%d", cfg.From), "maxHeight": fmt.Sprintf("%d", cfg.To), "storage": cfg.Storage, }, - Job: chain.NewGapIndexer(m, db, cfg.Name, cfg.From, cfg.To, cfg.Tasks), + Job: gap.NewFinder(m, db, cfg.Name, cfg.From, cfg.To, cfg.Tasks), RestartOnFailure: cfg.RestartOnFailure, RestartOnCompletion: cfg.RestartOnCompletion, RestartDelay: cfg.RestartDelay, @@ -245,14 +390,51 @@ func (m *LilyNodeAPI) LilyGapFill(_ context.Context, cfg *LilyGapFillConfig) (*s res := m.Scheduler.Submit(&schedule.JobConfig{ Name: cfg.Name, - Type: "Fill", + Type: "fill", + Params: map[string]string{ + "minHeight": fmt.Sprintf("%d", cfg.From), + "maxHeight": fmt.Sprintf("%d", cfg.To), + "storage": cfg.Storage, + }, + Tasks: cfg.Tasks, + Job: gap.NewFiller(m, db, cfg.Name, cfg.From, cfg.To, cfg.Tasks), + RestartOnFailure: cfg.RestartOnFailure, + RestartOnCompletion: cfg.RestartOnCompletion, + RestartDelay: cfg.RestartDelay, + }) + + return res, nil +} + +func (m *LilyNodeAPI) LilyGapFillNotify(_ context.Context, cfg *LilyGapFillNotifyConfig) (*schedule.JobSubmitResult, error) { + // the context's passed to these methods live for the duration of the clients request, so make a new one. + ctx := context.Background() + + md := storage.Metadata{ + JobName: cfg.Name, + } + + qcfg, err := m.QueueCatalog.AsynqConfig(cfg.Queue) + if err != nil { + return nil, err + } + + // create a database connection for this watch, ensure its pingable, and run migrations if needed/configured to. + db, err := m.StorageCatalog.ConnectAsDatabase(ctx, cfg.Storage, md) + if err != nil { + return nil, err + } + res := m.Scheduler.Submit(&schedule.JobConfig{ + Name: cfg.Name, + Type: "fill-notify", Params: map[string]string{ "minHeight": fmt.Sprintf("%d", cfg.From), "maxHeight": fmt.Sprintf("%d", cfg.To), "storage": cfg.Storage, + "queue": cfg.Queue, }, Tasks: cfg.Tasks, - Job: chain.NewGapFiller(m, db, cfg.Name, cfg.From, cfg.To, cfg.Tasks), + Job: gap.NewNotifier(m, db, queue.NewAsynq(qcfg), cfg.Name, cfg.From, cfg.To, cfg.Tasks), RestartOnFailure: cfg.RestartOnFailure, RestartOnCompletion: cfg.RestartOnCompletion, RestartDelay: cfg.RestartDelay, diff --git a/lens/lily/modules/config.go b/lens/lily/modules/config.go new file mode 100644 index 000000000..79fcaf899 --- /dev/null +++ b/lens/lily/modules/config.go @@ -0,0 +1,25 @@ +package modules + +import ( + "github.com/filecoin-project/lotus/node/modules/helpers" + "go.uber.org/fx" + + "github.com/filecoin-project/lily/chain/indexer/distributed" + + "github.com/filecoin-project/lily/config" + "github.com/filecoin-project/lily/storage" +) + +func NewStorageCatalog(mctx helpers.MetricsCtx, lc fx.Lifecycle, cfg *config.Conf) (*storage.Catalog, error) { + return storage.NewCatalog(cfg.Storage) +} + +func LoadConf(path string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (*config.Conf, error) { + return func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (*config.Conf, error) { + return config.FromFile(path) + } +} + +func NewQueueCatalog(mctx helpers.MetricsCtx, lc fx.Lifecycle, cfg *config.Conf) (*distributed.Catalog, error) { + return distributed.NewCatalog(cfg.Queue) +} diff --git a/lens/lily/modules/events.go b/lens/lily/modules/events.go index 7b07ba015..e5825fce5 100644 --- a/lens/lily/modules/events.go +++ b/lens/lily/modules/events.go @@ -5,9 +5,6 @@ import ( "github.com/filecoin-project/lotus/node/impl/full" "github.com/filecoin-project/lotus/node/modules/helpers" "go.uber.org/fx" - - "github.com/filecoin-project/lily/config" - "github.com/filecoin-project/lily/storage" ) func NewEvents(mctx helpers.MetricsCtx, lc fx.Lifecycle, chainAPI full.ChainModuleAPI, stateAPI full.StateModuleAPI) (*events.Events, error) { @@ -21,13 +18,3 @@ func NewEvents(mctx helpers.MetricsCtx, lc fx.Lifecycle, chainAPI full.ChainModu return events.NewEventsWithConfidence(mctx, api, 10) } - -func NewStorageCatalog(mctx helpers.MetricsCtx, lc fx.Lifecycle, cfg *config.Conf) (*storage.Catalog, error) { - return storage.NewCatalog(cfg.Storage) -} - -func LoadConf(path string) func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (*config.Conf, error) { - return func(mctx helpers.MetricsCtx, lc fx.Lifecycle) (*config.Conf, error) { - return config.FromFile(path) - } -} diff --git a/lens/lily/struct.go b/lens/lily/struct.go index 1a03ec216..ce0ef608f 100644 --- a/lens/lily/struct.go +++ b/lens/lily/struct.go @@ -34,6 +34,11 @@ type LilyAPIStruct struct { LilyWalk func(context.Context, *LilyWalkConfig) (*schedule.JobSubmitResult, error) `perm:"read"` LilySurvey func(context.Context, *LilySurveyConfig) (*schedule.JobSubmitResult, error) `perm:"read"` + LilyIndexNotify func(ctx context.Context, config *LilyIndexNotifyConfig) (interface{}, error) `perm:"read"` + LilyWatchNotify func(ctx context.Context, config *LilyWatchNotifyConfig) (*schedule.JobSubmitResult, error) `perm:"read"` + LilyWalkNotify func(ctx context.Context, config *LilyWalkNotifyConfig) (*schedule.JobSubmitResult, error) `perm:"read"` + LilyGapFillNotify func(ctx context.Context, config *LilyGapFillNotifyConfig) (*schedule.JobSubmitResult, error) `perm:"read"` + LilyJobStart func(ctx context.Context, ID schedule.JobID) error `perm:"read"` LilyJobStop func(ctx context.Context, ID schedule.JobID) error `perm:"read"` LilyJobWait func(ctx context.Context, ID schedule.JobID) (*schedule.JobListResult, error) `perm:"read"` @@ -70,6 +75,8 @@ type LilyAPIStruct struct { NetPubsubScores func(context.Context) ([]api.PubsubScore, error) `perm:"read"` NetAgentVersion func(ctx context.Context, p peer.ID) (string, error) `perm:"read"` NetPeerInfo func(context.Context, peer.ID) (*api.ExtendedPeerInfo, error) `perm:"read"` + + StartTipSetWorker func(ctx context.Context, cfg *LilyTipSetWorkerConfig) (*schedule.JobSubmitResult, error) `perm:"read"` } } @@ -216,3 +223,23 @@ func (s *LilyAPIStruct) NetAgentVersion(ctx context.Context, p peer.ID) (string, func (s *LilyAPIStruct) NetPeerInfo(ctx context.Context, p peer.ID) (*api.ExtendedPeerInfo, error) { return s.Internal.NetPeerInfo(ctx, p) } + +func (s *LilyAPIStruct) StartTipSetWorker(ctx context.Context, cfg *LilyTipSetWorkerConfig) (*schedule.JobSubmitResult, error) { + return s.Internal.StartTipSetWorker(ctx, cfg) +} + +func (s *LilyAPIStruct) LilyIndexNotify(ctx context.Context, cfg *LilyIndexNotifyConfig) (interface{}, error) { + return s.Internal.LilyIndexNotify(ctx, cfg) +} + +func (s *LilyAPIStruct) LilyWatchNotify(ctx context.Context, cfg *LilyWatchNotifyConfig) (*schedule.JobSubmitResult, error) { + return s.Internal.LilyWatchNotify(ctx, cfg) +} + +func (s *LilyAPIStruct) LilyWalkNotify(ctx context.Context, cfg *LilyWalkNotifyConfig) (*schedule.JobSubmitResult, error) { + return s.Internal.LilyWalkNotify(ctx, cfg) +} + +func (s *LilyAPIStruct) LilyGapFillNotify(ctx context.Context, cfg *LilyGapFillNotifyConfig) (*schedule.JobSubmitResult, error) { + return s.Internal.LilyGapFillNotify(ctx, cfg) +} diff --git a/main.go b/main.go index a2754efb2..6a95888c2 100644 --- a/main.go +++ b/main.go @@ -121,6 +121,37 @@ func main() { Value: ":9991", Destination: &commands.VisorMetricFlags.PrometheusPort, }, + &cli.StringFlag{ + Name: "redis-addr", + EnvVars: []string{"LILY_REDIS_ADDR"}, + Usage: `Redis server address in "host:port" format`, + Value: "127.0.0.1:6379", + Destination: &commands.VisorMetricFlags.RedisAddr, + }, + + &cli.StringFlag{ + Name: "redis-username", + EnvVars: []string{"LILY_REDIS_USERNAME"}, + Usage: `Username to authenticate the current connection when redis ACLs are used.`, + Value: "", + Destination: &commands.VisorMetricFlags.RedisUsername, + }, + + &cli.StringFlag{ + Name: "redis-password", + EnvVars: []string{"LILY_REDIS_PASSWORD"}, + Usage: `Password to authenticate the current connection`, + Value: "", + Destination: &commands.VisorMetricFlags.RedisPassword, + }, + + &cli.IntFlag{ + Name: "redis-db", + EnvVars: []string{"LILY_REDIS_DB"}, + Usage: `Redis DB to select after connection to server`, + Value: 0, + Destination: &commands.VisorMetricFlags.RedisDB, + }, }, HideHelp: true, Metadata: commands.Metadata(), @@ -142,6 +173,7 @@ func main() { commands.WaitApiCmd, commands.WalkCmd, commands.WatchCmd, + commands.WorkerCmd, }, } app.Setup() diff --git a/storage/sql.go b/storage/sql.go index 5160cb3c1..695e2a03e 100644 --- a/storage/sql.go +++ b/storage/sql.go @@ -28,6 +28,7 @@ import ( "github.com/filecoin-project/lily/model/derived" "github.com/filecoin-project/lily/model/messages" "github.com/filecoin-project/lily/model/msapprovals" + "github.com/filecoin-project/lily/model/visor" "github.com/filecoin-project/lily/schemas" ) @@ -489,3 +490,61 @@ func GenerateUpsertStrings(model interface{}) (string, string) { } return conflict.String(), upsert.String() } + +// returns a map of heights to missing tasks, and a list of heights to iterate the map in order with. +func (d *Database) ConsolidateGaps(ctx context.Context, minHeight, maxHeight uint64, tasks ...string) (map[int64][]string, []int64, error) { + gaps, err := d.QueryGaps(ctx, minHeight, maxHeight, tasks...) + if err != nil { + return nil, nil, err + } + // used to walk gaps in order, should help optimize some caching. + heights := make([]int64, 0, len(gaps)) + out := make(map[int64][]string) + for _, gap := range gaps { + if _, ok := out[gap.Height]; !ok { + heights = append(heights, gap.Height) + } + out[gap.Height] = append(out[gap.Height], gap.Task) + } + sort.Slice(heights, func(i, j int) bool { + return heights[i] < heights[j] + }) + return out, heights, nil +} + +func (d *Database) QueryGaps(ctx context.Context, minHeight, maxHeight uint64, tasks ...string) ([]*visor.GapReport, error) { + var out []*visor.GapReport + if len(tasks) != 0 { + if err := d.AsORM().ModelContext(ctx, &out). + Order("height desc"). + Where("status = ?", "GAP"). + Where("task = ANY (?)", pg.Array(tasks)). + Where("height >= ?", minHeight). + Where("height <= ?", maxHeight). + Select(); err != nil { + return nil, xerrors.Errorf("querying gap reports: %w", err) + } + } else { + if err := d.AsORM().ModelContext(ctx, &out). + Order("height desc"). + Where("status = ?", "GAP"). + Where("height >= ?", minHeight). + Where("height <= ?", maxHeight). + Select(); err != nil { + return nil, xerrors.Errorf("querying gap reports: %w", err) + } + } + return out, nil +} + +// mark all gaps at height as filled. +func (d *Database) SetGapsFilled(ctx context.Context, height int64, tasks ...string) error { + if _, err := d.AsORM().ModelContext(ctx, &visor.GapReport{}). + Set("status = 'FILLED'"). + Where("height = ?", height). + Where("task = ANY (?)", pg.Array(tasks)). + Update(); err != nil { + return err + } + return nil +} diff --git a/testutil/db.go b/testutil/db.go index ca437cc3c..8e8f476db 100644 --- a/testutil/db.go +++ b/testutil/db.go @@ -90,3 +90,17 @@ func tryTestDatabaseLock(ctx context.Context, db *pg.DB) func(context.Context) ( return acquired, err } } + +// TruncateBlockTables ensures the indexing tables are empty +func TruncateBlockTables(tb testing.TB, db *pg.DB) error { + _, err := db.Exec(`TRUNCATE TABLE block_headers`) + require.NoError(tb, err, "block_headers") + + _, err = db.Exec(`TRUNCATE TABLE block_parents`) + require.NoError(tb, err, "block_parents") + + _, err = db.Exec(`TRUNCATE TABLE drand_block_entries`) + require.NoError(tb, err, "drand_block_entries") + + return nil +} diff --git a/testutil/filtypes.go b/testutil/filtypes.go index 6686a5833..cf0eaeecc 100644 --- a/testutil/filtypes.go +++ b/testutil/filtypes.go @@ -1,6 +1,7 @@ package testutil import ( + "context" "testing" "github.com/filecoin-project/go-state-types/abi" @@ -8,6 +9,8 @@ import ( "github.com/filecoin-project/lotus/chain/types" tutils "github.com/filecoin-project/specs-actors/support/testing" "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lily/lens" ) func FakeTipset(t testing.TB) *types.TipSet { @@ -31,3 +34,48 @@ func FakeBlockHeader(t testing.TB, height int64, stateRoot cid.Cid) *types.Block Timestamp: 0, } } + +type BlockHeaderList []*types.BlockHeader + +func (b BlockHeaderList) Cids() []string { + var cids []string + for _, bh := range b { + cids = append(cids, bh.Cid().String()) + } + return cids +} + +func (b BlockHeaderList) Rounds() []uint64 { + var rounds []uint64 + for _, bh := range b { + for _, ent := range bh.BeaconEntries { + rounds = append(rounds, ent.Round) + } + } + + return rounds +} + +// CollectBlockHeaders walks the chain to collect blocks that should be indexed +func CollectBlockHeaders(n lens.API, ts *types.TipSet) (BlockHeaderList, error) { + blocks := ts.Blocks() + + for _, bh := range ts.Blocks() { + if bh.Height < 2 { + continue + } + + parent, err := n.ChainGetTipSet(context.TODO(), types.NewTipSetKey(bh.Parents...)) + if err != nil { + return nil, err + } + + pblocks, err := CollectBlockHeaders(n, parent) + if err != nil { + return nil, err + } + blocks = append(blocks, pblocks...) + + } + return blocks, nil +}