Skip to content

Commit

Permalink
Merge pull request #410 from oasisprotocol/andrew7234/ci-e2e
Browse files Browse the repository at this point in the history
Andrew7234/ci e2e
  • Loading branch information
Andrew7234 committed May 19, 2023
2 parents e8be16c + 1c8e4e6 commit 9ae106d
Show file tree
Hide file tree
Showing 104 changed files with 19,356 additions and 65 deletions.
29 changes: 29 additions & 0 deletions .github/workflows/ci-test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -113,3 +113,32 @@ jobs:
/tmp/indexer_db.sql
/tmp/*.log
if: success() || failure() # but not if job is manually cancelled

test-e2e-regression:
runs-on: ubuntu-20.04
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: 1.19
- name: Autogenerate Go code
run: |
go install github.com/deepmap/oapi-codegen/cmd/oapi-codegen@v1.12
make codegen-go
- name: Build Go
run: |
make oasis-indexer
- name: Start db
run: |
make postgres
- name: Run e2e regression tests
run : |
make test-e2e-regression
- uses: actions/upload-artifact@v3
with:
name: Actual indexer api responses
path: |
tests/e2e_regression/actual
if: failure() # but not if success or job is manually cancelled
3 changes: 1 addition & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,7 @@ docker/node/etc/genesis.json

# Testing data
tests/e2e/testnet/net-runner
tests/e2e_regression/expected
tests/e2e_regression/actual
tests/e2e_regression/actual/*

# Log output.
**/*.log
Expand Down
7 changes: 4 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -60,11 +60,12 @@ test-e2e:

fill-cache-for-e2e-regression: oasis-indexer
cp tests/e2e_regression/e2e_config.yml /tmp/indexer_fill_e2e_regression_cache.yml
sed -E -i='' 's/query_on_cache_miss: false/query_on_cache_miss: true/g' /tmp/indexer_fill_e2e_regression_cache.yml
sed -i -E 's/query_on_cache_miss: false/query_on_cache_miss: true/g' /tmp/indexer_fill_e2e_regression_cache.yml
./oasis-indexer --config /tmp/indexer_fill_e2e_regression_cache.yml analyze

# Run the api tests locally, assuming the environment is set up with an oasis-node that is
# accessible as specified in the config file.
test-e2e-regression: export TZ=UTC
test-e2e-regression: oasis-indexer
./oasis-indexer --config tests/e2e_regression/e2e_config.yml analyze
@$(ECHO) "$(CYAN)*** Indexer finished; starting api tests...$(OFF)"
Expand Down Expand Up @@ -117,9 +118,9 @@ postgres:
-e POSTGRES_PASSWORD=password \
-e POSTGRES_DB=indexer \
-d postgres -c log_statement=all
@sleep 1 # Experimentally enough for postgres to start accepting connections
@sleep 3 # Experimentally enough for postgres to start accepting connections
# Create a read-only user to mimic the production environment.
docker exec -it indexer-postgres psql -U rwuser indexer -c "CREATE ROLE indexer_readonly; CREATE USER api WITH PASSWORD 'password' IN ROLE indexer_readonly;"
docker exec indexer-postgres psql -U rwuser indexer -c "CREATE ROLE indexer_readonly; CREATE USER api WITH PASSWORD 'password' IN ROLE indexer_readonly;"

# Attach to the local DB from "make postgres"
psql:
Expand Down
59 changes: 39 additions & 20 deletions analyzer/block/block.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ package block
import (
"context"
"fmt"
"math"
"time"

"github.com/jackc/pgx/v5"
Expand Down Expand Up @@ -40,8 +41,6 @@ type BlockProcessor interface {
//
// The implementation must commit processed blocks (update the chain.processed_blocks record with processed_time timestamp).
ProcessBlock(ctx context.Context, height uint64) error
// SourceLatestBlockHeight returns the latest block height available in the source storage.
SourceLatestBlockHeight(ctx context.Context) (uint64, error)
}

var _ analyzer.Analyzer = (*blockBasedAnalyzer)(nil)
Expand Down Expand Up @@ -165,6 +164,15 @@ func (b *blockBasedAnalyzer) Start(ctx context.Context) {
return
}

// The default max block height that the indexer will process. This value is not
// indicative of the maximum height the Oasis blockchain can reach; rather it
// is set to golang's maximum int64 value for convenience.
var to uint64 = math.MaxInt64
// Clamp the latest block height to the configured range.
if b.config.To != 0 {
to = b.config.To
}

// Start processing blocks.
backoff, err := util.NewBackoff(
100*time.Millisecond,
Expand Down Expand Up @@ -192,23 +200,6 @@ func (b *blockBasedAnalyzer) Start(ctx context.Context) {
}
batchCtx, batchCtxCancel = context.WithTimeout(ctx, lockExpiryMinutes*time.Minute)

var to uint64
// Get the latest available block on the source.
latestBlockHeight, err := b.processor.SourceLatestBlockHeight(ctx)
if err != nil {
b.logger.Error("failed to query latest block height on source",
"err", err,
)
backoff.Failure()
continue
}
to = latestBlockHeight

// Clamp the latest block height to the configured range.
if b.config.To != 0 && b.config.To < latestBlockHeight {
to = b.config.To
}

// Pick a batch of blocks to process.
b.logger.Info("picking a batch of blocks to process", "from", b.config.From, "to", to)
heights, err := b.fetchBatchForProcessing(ctx, b.config.From, to)
Expand All @@ -223,13 +214,41 @@ func (b *blockBasedAnalyzer) Start(ctx context.Context) {
// Process blocks.
b.logger.Debug("picked blocks for processing", "heights", heights)
for _, height := range heights {
// If running in slow-sync, we are likely at the tip of the chain and are picking up
// blocks that are not yet available. In this case, wait before processing every block,
// so that the backoff mechanism can tweak the per-block wait time as needed.
//
// Note: If the batch size is greater than 50, the time required to process the blocks
// in the batch will exceed the current lock expiry of 5min. The analyzer will terminate
// the batch early and attempt to refresh the locks for a new batch.
if b.slowSync {
select {
case <-time.After(backoff.Timeout()):
// Process the next block
case <-batchCtx.Done():
b.logger.Info("batch locks expiring; refreshing batch")
break
case <-ctx.Done():
batchCtxCancel()
b.logger.Warn("shutting down block analyzer", "reason", ctx.Err())
return
}
}
b.logger.Info("processing block", "height", height)

bCtx, cancel := context.WithTimeout(batchCtx, processBlockTimeout)
if err := b.processor.ProcessBlock(bCtx, height); err != nil {
cancel()
backoff.Failure()
b.logger.Error("error processing block", "height", height, "err", err)

if err == analyzer.ErrOutOfRange {
b.logger.Info("no data available; will retry",
"height", height,
"retry_interval_ms", backoff.Timeout().Milliseconds(),
)
} else {
b.logger.Error("error processing block", "height", height, "err", err)
}

// If running in slow-sync, stop processing the batch on error so that
// the blocks are always processed in order.
Expand Down
5 changes: 0 additions & 5 deletions analyzer/block/block_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,11 +73,6 @@ func (m *mockProcessor) ProcessBlock(ctx context.Context, height uint64) error {
return nil
}

// SourceLatestBlockHeight implements block.BlockProcessor.
func (m *mockProcessor) SourceLatestBlockHeight(ctx context.Context) (uint64, error) {
return m.latestBlockHeight, nil
}

var _ block.BlockProcessor = (*mockProcessor)(nil)

func setupDB(t *testing.T) *postgres.Client {
Expand Down
11 changes: 4 additions & 7 deletions analyzer/consensus/consensus.go
Original file line number Diff line number Diff line change
Expand Up @@ -175,22 +175,19 @@ func (m *processor) processGenesis(ctx context.Context, chainContext string) err
return nil
}

// Implements BlockProcessor interface.
func (m *processor) SourceLatestBlockHeight(ctx context.Context) (uint64, error) {
h, err := m.source.LatestBlockHeight(ctx)
return uint64(h), err
}

// Implements BlockProcessor interface.
func (m *processor) ProcessBlock(ctx context.Context, uheight uint64) error {
if uheight >= math.MaxInt64 {
if uheight > math.MaxInt64 {
return fmt.Errorf("height %d is too large", uheight)
}
height := int64(uheight)

// Fetch all data.
data, err := m.source.AllData(ctx, height)
if err != nil {
if strings.Contains(err.Error(), fmt.Sprintf("%d must be less than or equal to the current blockchain height", height)) {
return analyzer.ErrOutOfRange
}
return err
}

Expand Down
9 changes: 4 additions & 5 deletions analyzer/runtime/runtime.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ package runtime
import (
"context"
"fmt"
"strings"

sdkConfig "github.com/oasisprotocol/oasis-sdk/client-sdk/go/config"
"github.com/oasisprotocol/oasis-sdk/client-sdk/go/modules/rewards"
Expand Down Expand Up @@ -94,6 +95,9 @@ func (m *processor) ProcessBlock(ctx context.Context, round uint64) error {
// Fetch all data.
data, err := m.source.AllData(ctx, round)
if err != nil {
if strings.Contains(err.Error(), "roothash: block not found") {
return analyzer.ErrOutOfRange
}
return err
}

Expand Down Expand Up @@ -254,8 +258,3 @@ func (m *processor) queueDbUpdates(batch *storage.QueryBatch, data *BlockData) {
batch.Queue(queries.RuntimeEVMTokenBalanceAnalysisInsert, m.runtime, key.TokenAddress, key.AccountAddress, data.Header.Round)
}
}

// Implements BlockProcessor interface.
func (m *processor) SourceLatestBlockHeight(ctx context.Context) (uint64, error) {
return m.source.LatestBlockHeight(ctx)
}
18 changes: 16 additions & 2 deletions cmd/api/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ package api
import (
"net/http"
"os"
"strings"
"time"

"github.com/go-chi/chi/v5"
Expand Down Expand Up @@ -36,6 +37,20 @@ var (
}
)

// specFileServer is a wrapper around `http.FileServer` that
// serves files from `rootDir`, and also hardcodes the MIME type for
// YAML files to `application/x-yaml`. The latter is a hack to
// make the HTTP headers independent of the OS's MIME type database.
type specFileServer struct{ rootDir string }

func (srv specFileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if strings.HasSuffix(r.URL.Path, ".yaml") || strings.HasSuffix(r.URL.Path, ".yml") {
w.Header().Set("Content-Type", "application/x-yaml")
}
// "api/spec" is the local path from which we serve the files.
http.FileServer(http.Dir(srv.rootDir)).ServeHTTP(w, r)
}

func runServer(cmd *cobra.Command, args []string) {
// Initialize config.
cfg, err := config.InitConfig(configFile)
Expand Down Expand Up @@ -118,8 +133,7 @@ func (s *Service) Start() {
// Routes to static files (openapi spec).
staticFileRouter := chi.NewRouter()
staticFileRouter.Route("/v1/spec", func(r chi.Router) {
specServer := http.FileServer(http.Dir("api/spec"))
r.Handle("/*", http.StripPrefix("/v1/spec", specServer))
r.Handle("/*", http.StripPrefix("/v1/spec", specFileServer{rootDir: "api/spec"}))
})

// A "strict handler" that handles the great majority of requests.
Expand Down
17 changes: 9 additions & 8 deletions tests/e2e_regression/e2e_config.yml
Original file line number Diff line number Diff line change
@@ -1,24 +1,25 @@
analysis:
source:
cache:
cache_dir: tests/e2e_regression/data
query_on_cache_miss: true
cache_dir: tests/e2e_regression/rpc-cache
query_on_cache_miss: false
chain_name: mainnet
nodes:
damask:
rpc: unix:/tmp/node.sock
default:
rpc: unix:/tmp/node.sock
fast_startup: true
analyzers:
metadata_registry:
interval: 5m
aggregate_stats:
tx_volume_interval: 5m
# metadata_registry:
# interval: 5m
# aggregate_stats:
# tx_volume_interval: 5m
consensus:
from: 8_048_956 # Damask genesis
to: 8_049_056 # 100 blocks; fast enough for early testing
emerald:
from: 1_003_298 # round at Damask genesis
to: 1_003_398
to: 1_003_398
# sapphire:
# from: 0 # first round in Damask
storage:
Expand Down
10 changes: 10 additions & 0 deletions tests/e2e_regression/expected/account.body
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
{
"address": "oasis1qp0302fv0gz858azasg663ax2epakk5fcssgza7j",
"allowances": [],
"available": "744704363502",
"debonding": "0",
"debonding_delegations_balance": "0",
"delegations_balance": "0",
"escrow": "0",
"nonce": 5
}
6 changes: 6 additions & 0 deletions tests/e2e_regression/expected/account.headers
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
HTTP/1.1 200 OK
Content-Type: application/json
Vary: Origin
Date: UNINTERESTING
Content-Length: UNINTERESTING

0 comments on commit 9ae106d

Please sign in to comment.