From 70a32755a8cb9ea3eadbf1cc4fe3e33279ccf3b4 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Fri, 10 Oct 2025 16:08:13 -0700 Subject: [PATCH 01/56] wip: support damm v2 --- api/v1_coin.go | 176 ++-- api/v1_coins.go | 11 + config/config.go | 1 + ddl/functions/calculate_artist_coin_fees.sql | 55 ++ ddl/functions/handle_damm_v2_pool.sql | 21 + ddl/migrations/0169_damm_and_positions.sql | 137 ++++ solana/indexer/backfill.go | 2 +- solana/indexer/damm_v2.go | 761 ++++++++++++++++++ solana/indexer/dbc.go | 208 +++++ solana/indexer/processor.go | 49 +- solana/indexer/solana_indexer.go | 35 +- solana/indexer/subscription.go | 5 +- solana/indexer/unprocessed_transactions.go | 26 +- solana/indexer/utils.go | 16 +- .../spl/programs/meteora_damm_v2/accounts.go | 13 + solana/spl/programs/meteora_damm_v2/client.go | 113 +++ .../programs/meteora_damm_v2/client_test.go | 24 + .../programs/meteora_damm_v2/instruction.go | 9 + solana/spl/programs/meteora_damm_v2/types.go | 131 +++ .../programs/meteora_damm_v2/types_test.go | 46 ++ .../spl/programs/meteora_damm_v2/uint256le.go | 49 ++ solana/spl/programs/meteora_damm_v2/utils.go | 27 + .../programs/meteora_dbc/MigrationDammV2.go | 107 +++ solana/spl/programs/meteora_dbc/client.go | 2 - .../spl/programs/meteora_dbc/instruction.go | 120 +++ 25 files changed, 1972 insertions(+), 172 deletions(-) create mode 100644 ddl/functions/calculate_artist_coin_fees.sql create mode 100644 ddl/functions/handle_damm_v2_pool.sql create mode 100644 ddl/migrations/0169_damm_and_positions.sql create mode 100644 solana/indexer/damm_v2.go create mode 100644 solana/indexer/dbc.go create mode 100644 solana/spl/programs/meteora_damm_v2/accounts.go create mode 100644 solana/spl/programs/meteora_damm_v2/client.go create mode 100644 solana/spl/programs/meteora_damm_v2/client_test.go create mode 100644 solana/spl/programs/meteora_damm_v2/instruction.go create mode 100644 solana/spl/programs/meteora_damm_v2/types.go create mode 100644 solana/spl/programs/meteora_damm_v2/types_test.go create mode 100644 solana/spl/programs/meteora_damm_v2/uint256le.go create mode 100644 solana/spl/programs/meteora_damm_v2/utils.go create mode 100644 solana/spl/programs/meteora_dbc/MigrationDammV2.go create mode 100644 solana/spl/programs/meteora_dbc/instruction.go diff --git a/api/v1_coin.go b/api/v1_coin.go index 998d6e9d..f0d1b1db 100644 --- a/api/v1_coin.go +++ b/api/v1_coin.go @@ -5,15 +5,7 @@ import ( "github.com/jackc/pgx/v5" ) -func (app *ApiServer) v1Coin(c *fiber.Ctx) error { - mint := c.Params("mint") - if mint == "" { - return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{ - "error": "mint parameter is required", - }) - } - - sql := ` +const sharedSql = ` SELECT artist_coins.name, artist_coins.mint, @@ -28,46 +20,46 @@ func (app *ApiServer) v1Coin(c *fiber.Ctx) error { artist_coins.link_4, artist_coins.has_discord, artist_coins.created_at, - artist_coins.updated_at as coin_updated_at, - COALESCE(artist_coin_stats.market_cap, 0) as market_cap, - COALESCE(artist_coin_stats.fdv, 0) as fdv, - COALESCE(artist_coin_stats.liquidity, 0) as liquidity, - COALESCE(artist_coin_stats.last_trade_unix_time, 0) as last_trade_unix_time, - COALESCE(artist_coin_stats.last_trade_human_time, '') as last_trade_human_time, - COALESCE(artist_coin_stats.price, 0) as price, - COALESCE(artist_coin_stats.history_24h_price, 0) as history_24h_price, - COALESCE(artist_coin_stats.price_change_24h_percent, 0) as price_change_24h_percent, - COALESCE(artist_coin_stats.unique_wallet_24h, 0) as unique_wallet_24h, - COALESCE(artist_coin_stats.unique_wallet_history_24h, 0) as unique_wallet_history_24h, - COALESCE(artist_coin_stats.unique_wallet_24h_change_percent, 0) as unique_wallet_24h_change_percent, - COALESCE(artist_coin_stats.total_supply, 0) as total_supply, - COALESCE(artist_coin_stats.circulating_supply, 0) as circulating_supply, - COALESCE(artist_coin_stats.holder, 0) as holder, - COALESCE(artist_coin_stats.trade_24h, 0) as trade_24h, - COALESCE(artist_coin_stats.trade_history_24h, 0) as trade_history_24h, - COALESCE(artist_coin_stats.trade_24h_change_percent, 0) as trade_24h_change_percent, - COALESCE(artist_coin_stats.sell_24h, 0) as sell_24h, - COALESCE(artist_coin_stats.sell_history_24h, 0) as sell_history_24h, - COALESCE(artist_coin_stats.sell_24h_change_percent, 0) as sell_24h_change_percent, - COALESCE(artist_coin_stats.buy_24h, 0) as buy_24h, - COALESCE(artist_coin_stats.buy_history_24h, 0) as buy_history_24h, - COALESCE(artist_coin_stats.buy_24h_change_percent, 0) as buy_24h_change_percent, - COALESCE(artist_coin_stats.v_24h, 0) as v_24h, - COALESCE(artist_coin_stats.v_24h_usd, 0) as v_24h_usd, - COALESCE(artist_coin_stats.v_history_24h, 0) as v_history_24h, - COALESCE(artist_coin_stats.v_history_24h_usd, 0) as v_history_24h_usd, - COALESCE(artist_coin_stats.v_24h_change_percent, 0) as v_24h_change_percent, - COALESCE(artist_coin_stats.v_buy_24h, 0) as v_buy_24h, - COALESCE(artist_coin_stats.v_buy_24h_usd, 0) as v_buy_24h_usd, - COALESCE(artist_coin_stats.v_buy_history_24h, 0) as v_buy_history_24h, - COALESCE(artist_coin_stats.v_buy_history_24h_usd, 0) as v_buy_history_24h_usd, - COALESCE(artist_coin_stats.v_buy_24h_change_percent, 0) as v_buy_24h_change_percent, - COALESCE(artist_coin_stats.v_sell_24h, 0) as v_sell_24h, - COALESCE(artist_coin_stats.v_sell_24h_usd, 0) as v_sell_24h_usd, - COALESCE(artist_coin_stats.v_sell_history_24h, 0) as v_sell_history_24h, - COALESCE(artist_coin_stats.v_sell_history_24h_usd, 0) as v_sell_history_24h_usd, - COALESCE(artist_coin_stats.v_sell_24h_change_percent, 0) as v_sell_24h_change_percent, - COALESCE(artist_coin_stats.number_markets, 0) as number_markets, + artist_coins.updated_at AS coin_updated_at, + COALESCE(artist_coin_stats.market_cap, 0) AS market_cap, + COALESCE(artist_coin_stats.fdv, 0) AS fdv, + COALESCE(artist_coin_stats.liquidity, 0) AS liquidity, + COALESCE(artist_coin_stats.last_trade_unix_time, 0) AS last_trade_unix_time, + COALESCE(artist_coin_stats.last_trade_human_time, '') AS last_trade_human_time, + COALESCE(artist_coin_stats.price, 0) AS price, + COALESCE(artist_coin_stats.history_24h_price, 0) AS history_24h_price, + COALESCE(artist_coin_stats.price_change_24h_percent, 0) AS price_change_24h_percent, + COALESCE(artist_coin_stats.unique_wallet_24h, 0) AS unique_wallet_24h, + COALESCE(artist_coin_stats.unique_wallet_history_24h, 0) AS unique_wallet_history_24h, + COALESCE(artist_coin_stats.unique_wallet_24h_change_percent, 0) AS unique_wallet_24h_change_percent, + COALESCE(artist_coin_stats.total_supply, 0) AS total_supply, + COALESCE(artist_coin_stats.circulating_supply, 0) AS circulating_supply, + COALESCE(artist_coin_stats.holder, 0) AS holder, + COALESCE(artist_coin_stats.trade_24h, 0) AS trade_24h, + COALESCE(artist_coin_stats.trade_history_24h, 0) AS trade_history_24h, + COALESCE(artist_coin_stats.trade_24h_change_percent, 0) AS trade_24h_change_percent, + COALESCE(artist_coin_stats.sell_24h, 0) AS sell_24h, + COALESCE(artist_coin_stats.sell_history_24h, 0) AS sell_history_24h, + COALESCE(artist_coin_stats.sell_24h_change_percent, 0) AS sell_24h_change_percent, + COALESCE(artist_coin_stats.buy_24h, 0) AS buy_24h, + COALESCE(artist_coin_stats.buy_history_24h, 0) AS buy_history_24h, + COALESCE(artist_coin_stats.buy_24h_change_percent, 0) AS buy_24h_change_percent, + COALESCE(artist_coin_stats.v_24h, 0) AS v_24h, + COALESCE(artist_coin_stats.v_24h_usd, 0) AS v_24h_usd, + COALESCE(artist_coin_stats.v_history_24h, 0) AS v_history_24h, + COALESCE(artist_coin_stats.v_history_24h_usd, 0) AS v_history_24h_usd, + COALESCE(artist_coin_stats.v_24h_change_percent, 0) AS v_24h_change_percent, + COALESCE(artist_coin_stats.v_buy_24h, 0) AS v_buy_24h, + COALESCE(artist_coin_stats.v_buy_24h_usd, 0) AS v_buy_24h_usd, + COALESCE(artist_coin_stats.v_buy_history_24h, 0) AS v_buy_history_24h, + COALESCE(artist_coin_stats.v_buy_history_24h_usd, 0) AS v_buy_history_24h_usd, + COALESCE(artist_coin_stats.v_buy_24h_change_percent, 0) AS v_buy_24h_change_percent, + COALESCE(artist_coin_stats.v_sell_24h, 0) AS v_sell_24h, + COALESCE(artist_coin_stats.v_sell_24h_usd, 0) AS v_sell_24h_usd, + COALESCE(artist_coin_stats.v_sell_history_24h, 0) AS v_sell_history_24h, + COALESCE(artist_coin_stats.v_sell_history_24h_usd, 0) AS v_sell_history_24h_usd, + COALESCE(artist_coin_stats.v_sell_24h_change_percent, 0) AS v_sell_24h_change_percent, + COALESCE(artist_coin_stats.number_markets, 0) AS number_markets, JSON_BUILD_OBJECT( 'address', COALESCE(artist_coin_pools.address, ''), 'price', COALESCE(artist_coin_pools.price, 0), @@ -78,12 +70,25 @@ func (app *ApiServer) v1Coin(c *fiber.Ctx) error { 'totalTradingQuoteFee', COALESCE(artist_coin_pools.total_trading_quote_fee, 0), 'creatorWalletAddress', COALESCE(artist_coin_pools.creator_wallet_address, '') ) AS dynamic_bonding_curve, - COALESCE(artist_coin_stats.updated_at, artist_coins.created_at) as updated_at + ROW_TO_JSON(calculate_artist_coin_fees(artist_coins.mint)) AS artist_fees, + COALESCE(artist_coin_stats.updated_at, artist_coins.created_at) AS updated_at FROM artist_coins LEFT JOIN artist_coin_stats ON artist_coin_stats.mint = artist_coins.mint LEFT JOIN artist_coin_pools ON artist_coin_pools.base_mint = artist_coins.mint +` + +func (app *ApiServer) v1Coin(c *fiber.Ctx) error { + mint := c.Params("mint") + if mint == "" { + return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{ + "error": "mint parameter is required", + }) + } + + sql := ` + ` + sharedSql + ` WHERE artist_coins.mint = @mint LIMIT 1 ` @@ -114,76 +119,7 @@ func (app *ApiServer) v1CoinByTicker(c *fiber.Ctx) error { } sql := ` - SELECT - artist_coins.name, - artist_coins.mint, - artist_coins.ticker, - artist_coins.decimals, - artist_coins.user_id, - artist_coins.logo_uri, - artist_coins.description, - artist_coins.link_1, - artist_coins.link_2, - artist_coins.link_3, - artist_coins.link_4, - artist_coins.has_discord, - artist_coins.created_at, - artist_coins.updated_at as coin_updated_at, - COALESCE(artist_coin_stats.market_cap, 0) as market_cap, - COALESCE(artist_coin_stats.fdv, 0) as fdv, - COALESCE(artist_coin_stats.liquidity, 0) as liquidity, - COALESCE(artist_coin_stats.last_trade_unix_time, 0) as last_trade_unix_time, - COALESCE(artist_coin_stats.last_trade_human_time, '') as last_trade_human_time, - COALESCE(artist_coin_stats.price, 0) as price, - COALESCE(artist_coin_stats.history_24h_price, 0) as history_24h_price, - COALESCE(artist_coin_stats.price_change_24h_percent, 0) as price_change_24h_percent, - COALESCE(artist_coin_stats.unique_wallet_24h, 0) as unique_wallet_24h, - COALESCE(artist_coin_stats.unique_wallet_history_24h, 0) as unique_wallet_history_24h, - COALESCE(artist_coin_stats.unique_wallet_24h_change_percent, 0) as unique_wallet_24h_change_percent, - COALESCE(artist_coin_stats.total_supply, 0) as total_supply, - COALESCE(artist_coin_stats.circulating_supply, 0) as circulating_supply, - COALESCE(artist_coin_stats.holder, 0) as holder, - COALESCE(artist_coin_stats.trade_24h, 0) as trade_24h, - COALESCE(artist_coin_stats.trade_history_24h, 0) as trade_history_24h, - COALESCE(artist_coin_stats.trade_24h_change_percent, 0) as trade_24h_change_percent, - COALESCE(artist_coin_stats.sell_24h, 0) as sell_24h, - COALESCE(artist_coin_stats.sell_history_24h, 0) as sell_history_24h, - COALESCE(artist_coin_stats.sell_24h_change_percent, 0) as sell_24h_change_percent, - COALESCE(artist_coin_stats.buy_24h, 0) as buy_24h, - COALESCE(artist_coin_stats.buy_history_24h, 0) as buy_history_24h, - COALESCE(artist_coin_stats.buy_24h_change_percent, 0) as buy_24h_change_percent, - COALESCE(artist_coin_stats.v_24h, 0) as v_24h, - COALESCE(artist_coin_stats.v_24h_usd, 0) as v_24h_usd, - COALESCE(artist_coin_stats.v_history_24h, 0) as v_history_24h, - COALESCE(artist_coin_stats.v_history_24h_usd, 0) as v_history_24h_usd, - COALESCE(artist_coin_stats.v_24h_change_percent, 0) as v_24h_change_percent, - COALESCE(artist_coin_stats.v_buy_24h, 0) as v_buy_24h, - COALESCE(artist_coin_stats.v_buy_24h_usd, 0) as v_buy_24h_usd, - COALESCE(artist_coin_stats.v_buy_history_24h, 0) as v_buy_history_24h, - COALESCE(artist_coin_stats.v_buy_history_24h_usd, 0) as v_buy_history_24h_usd, - COALESCE(artist_coin_stats.v_buy_24h_change_percent, 0) as v_buy_24h_change_percent, - COALESCE(artist_coin_stats.v_sell_24h, 0) as v_sell_24h, - COALESCE(artist_coin_stats.v_sell_24h_usd, 0) as v_sell_24h_usd, - COALESCE(artist_coin_stats.v_sell_history_24h, 0) as v_sell_history_24h, - COALESCE(artist_coin_stats.v_sell_history_24h_usd, 0) as v_sell_history_24h_usd, - COALESCE(artist_coin_stats.v_sell_24h_change_percent, 0) as v_sell_24h_change_percent, - COALESCE(artist_coin_stats.number_markets, 0) as number_markets, - JSON_BUILD_OBJECT( - 'address', COALESCE(artist_coin_pools.address, ''), - 'price', COALESCE(artist_coin_pools.price, 0), - 'priceUSD', COALESCE(artist_coin_pools.price_usd, 0), - 'curveProgress', COALESCE(artist_coin_pools.curve_progress, 0), - 'isMigrated', COALESCE(artist_coin_pools.is_migrated, false), - 'creatorQuoteFee', COALESCE(artist_coin_pools.creator_quote_fee, 0), - 'totalTradingQuoteFee', COALESCE(artist_coin_pools.total_trading_quote_fee, 0), - 'creatorWalletAddress', COALESCE(artist_coin_pools.creator_wallet_address, '') - ) AS dynamic_bonding_curve, - COALESCE(artist_coin_stats.updated_at, artist_coins.created_at) as updated_at - FROM artist_coins - LEFT JOIN artist_coin_stats - ON artist_coin_stats.mint = artist_coins.mint - LEFT JOIN artist_coin_pools - ON artist_coin_pools.base_mint = artist_coins.mint + ` + sharedSql + ` WHERE artist_coins.ticker = @ticker LIMIT 1 ` diff --git a/api/v1_coins.go b/api/v1_coins.go index 763a9d23..dae90d7d 100644 --- a/api/v1_coins.go +++ b/api/v1_coins.go @@ -9,6 +9,15 @@ import ( "github.com/jackc/pgx/v5" ) +type ArtistCoinFees struct { + UnclaimedDbcFees float64 `json:"unclaimed_dbc_fees" db:"unclaimed_dbc_fees"` + TotalDbcFees float64 `json:"total_dbc_fees" db:"total_dbc_fees"` + UnclaimedDammV2Fees float64 `json:"unclaimed_damm_v2_fees" db:"unclaimed_damm_v2_fees"` + TotalDammV2Fees float64 `json:"total_damm_v2_fees" db:"total_damm_v2_fees"` + UnclaimedFees float64 `json:"unclaimed_fees" db:"unclaimed_fees"` + TotalFees float64 `json:"total_fees" db:"total_fees"` +} + type ArtistCoin struct { Name string `json:"name"` Ticker string `json:"ticker"` @@ -65,6 +74,7 @@ type ArtistCoin struct { VSell24hChangePercent float64 `json:"vSell24hChangePercent" db:"v_sell_24h_change_percent"` NumberMarkets int `json:"numberMarkets" db:"number_markets"` DynamicBondingCurve *DynamicBondingCurveInsights `json:"dynamicBondingCurve" db:"dynamic_bonding_curve"` + ArtistFees *ArtistCoinFees `json:"artistFees" db:"artist_fees"` UpdatedAt time.Time `json:"updatedAt" db:"updated_at"` } @@ -189,6 +199,7 @@ func (app *ApiServer) v1Coins(c *fiber.Ctx) error { 'totalTradingQuoteFee', COALESCE(artist_coin_pools.total_trading_quote_fee, 0), 'creatorWalletAddress', COALESCE(artist_coin_pools.creator_wallet_address, '') ) AS dynamic_bonding_curve, + ROW_TO_JSON(calculate_artist_coin_fees(artist_coins.mint)) AS artist_fees, COALESCE(artist_coin_stats.updated_at, artist_coins.created_at) as updated_at FROM artist_coins LEFT JOIN artist_coin_stats diff --git a/config/config.go b/config/config.go index 821e0a27..10f24cd0 100644 --- a/config/config.go +++ b/config/config.go @@ -91,6 +91,7 @@ func init() { Cfg.AudiusdChainID = core_config.DevAcdcChainID Cfg.AudiusdEntityManagerAddress = core_config.DevAcdcAddress + Cfg.SolanaIndexerRetryInterval = 10 * time.Second case "stage": fallthrough case "staging": diff --git a/ddl/functions/calculate_artist_coin_fees.sql b/ddl/functions/calculate_artist_coin_fees.sql new file mode 100644 index 00000000..0c5c3dd4 --- /dev/null +++ b/ddl/functions/calculate_artist_coin_fees.sql @@ -0,0 +1,55 @@ +BEGIN; +DROP FUNCTION IF EXISTS calculate_artist_coin_fees(TEXT); +CREATE OR REPLACE FUNCTION calculate_artist_coin_fees(artist_coin_mint TEXT) +RETURNS TABLE ( + unclaimed_dbc_fees NUMERIC, + total_dbc_fees NUMERIC, + unclaimed_damm_v2_fees NUMERIC, + total_damm_v2_fees NUMERIC, + unclaimed_fees NUMERIC, + total_fees NUMERIC +) LANGUAGE sql AS $function$ + WITH + damm_fees AS ( + SELECT + pool.token_a_mint AS mint, + ( + pool.fee_b_per_liquidity + * ( + position.unlocked_liquidity + position.vested_liquidity + position.permanent_locked_liquidity + ) + / POWER (2, 128) + + position.fee_b_pending + ) AS total_damm_v2_fees, + ( + (pool.fee_b_per_liquidity - position.fee_b_per_token_checkpoint) + * ( + position.unlocked_liquidity + position.vested_liquidity + position.permanent_locked_liquidity + ) + / POWER (2, 128) + + position.fee_b_pending + ) AS unclaimed_damm_v2_fees + FROM sol_meteora_damm_v2_pools pool + JOIN sol_meteora_dbc_migrations migration ON migration.base_mint = pool.token_a_mint + JOIN sol_meteora_damm_v2_positions position ON position.address = migration.first_position + WHERE pool.token_a_mint = artist_coin_mint + ), + dbc_fees AS ( + SELECT + base_mint AS mint, + total_trading_quote_fee / 2 AS total_dbc_fees, + creator_quote_fee / 2 AS unclaimed_dbc_fees + FROM artist_coin_pools + WHERE base_mint = artist_coin_mint + ) + SELECT + FLOOR(COALESCE(dbc_fees.unclaimed_dbc_fees, 0)) AS unclaimed_dbc_fees, + FLOOR(COALESCE(dbc_fees.total_dbc_fees, 0)) AS total_dbc_fees, + FLOOR(COALESCE(damm_fees.unclaimed_damm_v2_fees, 0)) AS unclaimed_damm_v2_fees, + FLOOR(COALESCE(damm_fees.total_damm_v2_fees, 0)) AS total_damm_v2_fees, + FLOOR(COALESCE(dbc_fees.unclaimed_dbc_fees, 0) + COALESCE(damm_fees.unclaimed_damm_v2_fees, 0)) AS unclaimed_fees, + FLOOR(COALESCE(dbc_fees.total_dbc_fees, 0) + COALESCE(damm_fees.total_damm_v2_fees, 0)) AS total_fees + FROM dbc_fees + FULL OUTER JOIN damm_fees USING (mint); +$function$; +COMMIT; \ No newline at end of file diff --git a/ddl/functions/handle_damm_v2_pool.sql b/ddl/functions/handle_damm_v2_pool.sql new file mode 100644 index 00000000..b5515b48 --- /dev/null +++ b/ddl/functions/handle_damm_v2_pool.sql @@ -0,0 +1,21 @@ +CREATE OR REPLACE FUNCTION handle_meteora_dbc_migrations() +RETURNS trigger AS $$ +BEGIN + PERFORM pg_notify('meteora_dbc_migration', json_build_object('operation', TG_OP)::text); + RETURN NEW; + EXCEPTION + WHEN OTHERS THEN + RAISE WARNING 'An error occurred in %: %', TG_NAME, SQLERRM; + RETURN NULL; +END; +$$ LANGUAGE plpgsql; + +DO $$ +BEGIN + CREATE TRIGGER on_meteora_dbc_migrations + AFTER INSERT OR DELETE ON sol_meteora_dbc_migrations + FOR EACH ROW EXECUTE FUNCTION handle_meteora_dbc_migrations(); +EXCEPTION + WHEN others THEN NULL; -- Ignore if trigger already exists +END $$; +COMMENT ON TRIGGER on_meteora_dbc_migrations ON sol_meteora_dbc_migrations IS 'Notifies when a DBC pool migrates to a DAMM V2 pool.' \ No newline at end of file diff --git a/ddl/migrations/0169_damm_and_positions.sql b/ddl/migrations/0169_damm_and_positions.sql new file mode 100644 index 00000000..6bb0ddae --- /dev/null +++ b/ddl/migrations/0169_damm_and_positions.sql @@ -0,0 +1,137 @@ +CREATE TABLE IF NOT EXISTS sol_meteora_dbc_migrations ( + signature TEXT NOT NULL, + instruction_index INT NOT NULL, + slot BIGINT NOT NULL, + dbc_pool TEXT NOT NULL, + migration_metadata TEXT NOT NULL, + config TEXT NOT NULL, + dbc_pool_authority TEXT NOT NULL, + damm_v2_pool TEXT NOT NULL, + first_position_nft_mint TEXT NOT NULL, + first_position_nft_account TEXT NOT NULL, + first_position TEXT NOT NULL, + second_position_nft_mint TEXT NOT NULL, + second_position_nft_account TEXT NOT NULL, + second_position TEXT NOT NULL, + damm_pool_authority TEXT NOT NULL, + base_mint TEXT NOT NULL, + quote_mint TEXT NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY (signature, instruction_index) +); +CREATE INDEX IF NOT EXISTS sol_meteora_dbc_migrations_base_mint_idx ON sol_meteora_dbc_migrations(base_mint); +COMMENT ON TABLE sol_meteora_dbc_migrations IS 'Tracks migrations from DBC pools to DAMM V2 pools.'; +COMMENT ON INDEX sol_meteora_dbc_migrations_base_mint_idx IS 'Used for finding artist positions by base_mint.'; + +CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_pools ( + address TEXT PRIMARY KEY, + token_a_mint TEXT NOT NULL, + token_b_mint TEXT NOT NULL, + token_a_vault TEXT NOT NULL, + token_b_vault TEXT NOT NULL, + whitelisted_vault TEXT NOT NULL, + partner TEXT NOT NULL, + liquidity NUMERIC NOT NULL, + protocol_a_fee BIGINT NOT NULL, + protocol_b_fee BIGINT NOT NULL, + partner_a_fee BIGINT NOT NULL, + partner_b_fee BIGINT NOT NULL, + sqrt_min_price NUMERIC NOT NULL, + sqrt_max_price NUMERIC NOT NULL, + sqrt_price NUMERIC NOT NULL, + activation_point BIGINT NOT NULL, + activation_type SMALLINT NOT NULL, + pool_status SMALLINT NOT NULL, + token_a_flag SMALLINT NOT NULL, + token_b_flag SMALLINT NOT NULL, + collect_fee_mode SMALLINT NOT NULL, + pool_type SMALLINT NOT NULL, + fee_a_per_liquidity BIGINT NOT NULL, + fee_b_per_liquidity BIGINT NOT NULL, + permanent_lock_liquidity NUMERIC NOT NULL, + creator TEXT NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP +); +COMMENT ON TABLE sol_meteora_damm_v2_pools IS 'Tracks DAMM V2 pool state. Join with sol_meteora_damm_v2_pool_metrics, sol_meteora_damm_v2_pool_fees, sol_meteora_damm_v2_pool_base_fees, and sol_meteora_damm_v2_pool_dynamic_fees for full pool state.'; + +CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_pool_metrics ( + pool TEXT PRIMARY KEY REFERENCES sol_meteora_damm_v2_pools(address) ON DELETE CASCADE, + total_lp_a_fee NUMERIC NOT NULL, + total_lp_b_fee NUMERIC NOT NULL, + total_protocol_a_fee NUMERIC NOT NULL, + total_protocol_b_fee NUMERIC NOT NULL, + total_partner_a_fee NUMERIC NOT NULL, + total_partner_b_fee NUMERIC NOT NULL, + total_position BIGINT NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP +); +COMMENT ON TABLE sol_meteora_damm_v2_pool_metrics IS 'Tracks aggregated metrics for DAMM V2 pools. A slice of the DAMM V2 pool state.'; + +CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_pool_fees ( + pool TEXT PRIMARY KEY REFERENCES sol_meteora_damm_v2_pools(address) ON DELETE CASCADE, + protocol_fee_percent SMALLINT NOT NULL, + partner_fee_percent SMALLINT NOT NULL, + referral_fee_percent SMALLINT NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP +); +COMMENT ON TABLE sol_meteora_damm_v2_pool_fees IS 'Tracks fee configuration for DAMM V2 pools. A slice of the DAMM V2 pool state.'; + +CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_pool_base_fees ( + pool TEXT PRIMARY KEY REFERENCES sol_meteora_damm_v2_pools(address) ON DELETE CASCADE, + cliff_fee_numerator BIGINT NOT NULL, + fee_scheduler_mode SMALLINT NOT NULL, + number_of_period SMALLINT NOT NULL, + period_frequency BIGINT NOT NULL, + reduction_factor BIGINT NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP +); +COMMENT ON TABLE sol_meteora_damm_v2_pool_base_fees IS 'Tracks base fee configuration for DAMM V2 pools. A slice of the DAMM V2 pool state.'; + +CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_pool_dynamic_fees ( + pool TEXT PRIMARY KEY REFERENCES sol_meteora_damm_v2_pools(address) ON DELETE CASCADE, + initialized SMALLINT NOT NULL, + max_volatility_accumulator INTEGER NOT NULL, + variable_fee_control INTEGER NOT NULL, + bin_step SMALLINT NOT NULL, + filter_period SMALLINT NOT NULL, + decay_period SMALLINT NOT NULL, + reduction_factor SMALLINT NOT NULL, + last_update_timestamp BIGINT NOT NULL, + bin_step_u128 NUMERIC NOT NULL, + sqrt_price_reference NUMERIC NOT NULL, + volatility_accumulator NUMERIC NOT NULL, + volatility_reference NUMERIC NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP +); +COMMENT ON TABLE sol_meteora_damm_v2_pool_dynamic_fees IS 'Tracks dynamic fee configuration for DAMM V2 pools. A slice of the DAMM V2 pool state.'; + +CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_positions ( + address TEXT PRIMARY KEY, + pool TEXT NOT NULL REFERENCES sol_meteora_damm_v2_pools(address) ON DELETE CASCADE, + nft_mint TEXT NOT NULL, + fee_a_per_token_checkpoint BIGINT NOT NULL, + fee_b_per_token_checkpoint BIGINT NOT NULL, + fee_a_pending BIGINT NOT NULL, + fee_b_pending BIGINT NOT NULL, + unlocked_liquidity NUMERIC NOT NULL, + vested_liquidity NUMERIC NOT NULL, + permanent_locked_liquidity NUMERIC NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP +); +COMMENT ON TABLE sol_meteora_damm_v2_positions IS 'Tracks DAMM V2 positions representing a claim to the liquidity and associated fees in a DAMM V2 pool. Join with sol_meteora_damm_v2_position_metrics for full position state.'; + +CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_position_metrics ( + position TEXT PRIMARY KEY REFERENCES sol_meteora_damm_v2_positions(address) ON DELETE CASCADE, + total_claimed_a_fee BIGINT NOT NULL, + total_claimed_b_fee BIGINT NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP +); +COMMENT ON TABLE sol_meteora_damm_v2_position_metrics IS 'Tracks aggregated metrics for DAMM V2 positions. A slice of the DAMM V2 position state.'; \ No newline at end of file diff --git a/solana/indexer/backfill.go b/solana/indexer/backfill.go index fca052dd..0535fd68 100644 --- a/solana/indexer/backfill.go +++ b/solana/indexer/backfill.go @@ -105,7 +105,7 @@ func (s *SolanaIndexer) backfillAddressTransactions(ctx context.Context, address } opts.Before = before - res, err := withRetries(func() ([]*rpc.TransactionSignature, error) { + res, err := withRetriesResult(func() ([]*rpc.TransactionSignature, error) { return s.rpcClient.GetSignaturesForAddressWithOpts(ctx, address, &opts) }, 5, time.Second*1) if err != nil { diff --git a/solana/indexer/damm_v2.go b/solana/indexer/damm_v2.go new file mode 100644 index 00000000..dc381e6e --- /dev/null +++ b/solana/indexer/damm_v2.go @@ -0,0 +1,761 @@ +package indexer + +import ( + "context" + "fmt" + + "api.audius.co/database" + "api.audius.co/solana/spl/programs/meteora_damm_v2" + bin "github.com/gagliardetto/binary" + "github.com/gagliardetto/solana-go" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" + "go.uber.org/zap" +) + +type notificationCallback func(ctx context.Context, notification *pgconn.Notification) + +type DammV2Indexer struct { + pool database.DbPool + grpcConfig GrpcConfig + logger *zap.Logger +} + +const MAX_DAMM_V2_POOLS_PER_SUBSCRIPTION = 10000 +const DAMM_V2_POOL_SUBSCRIPTION_KEY = "dammV2Pools" +const DBC__MIGRATION_NOTIFICATION_NAME = "meteora_dbc_migration" + +func (d *DammV2Indexer) Start(ctx context.Context) { + // To ensure only one subscription task is running at a time, keep track of + // the last cancel function and call it on the next notification. + var lastCancel context.CancelFunc + + // Ensure all gRPC clients are closed on shutdown + var grpcClients []GrpcClient + defer (func() { + for _, client := range grpcClients { + client.Close() + } + })() + + handleNotif := func(ctx context.Context, notification *pgconn.Notification) { + // Cancel the previous task if it exists + subCtx, cancel := context.WithCancel(ctx) + if lastCancel != nil { + lastCancel() + } + for _, client := range grpcClients { + client.Close() + } + clients, err := subscribeToDammV2Pools(subCtx, d.pool, d.grpcConfig, d.logger) + grpcClients = clients + if err != nil { + d.logger.Error("failed to resubscribe to DAMM V2 pools", zap.Error(err)) + return + } + lastCancel = cancel + } + + // Setup initial subscription + clients, err := subscribeToDammV2Pools(ctx, d.pool, d.grpcConfig, d.logger) + if err != nil { + d.logger.Error("failed to subscribe to DAMM V2 pools", zap.Error(err)) + return + } + grpcClients = clients + + // Watch for new pools to be added + err = watchPgNotification(ctx, d.pool, DBC__MIGRATION_NOTIFICATION_NAME, handleNotif, d.logger) + if err != nil { + d.logger.Error("failed to watch for DAMM V2 pool changes", zap.Error(err)) + return + } + + for { + select { + case <-ctx.Done(): + d.logger.Info("received shutdown signal, stopping DAMM V2 indexer") + return + default: + } + } +} + +func subscribeToDammV2Pools(ctx context.Context, db database.DBTX, grpcConfig GrpcConfig, logger *zap.Logger) ([]GrpcClient, error) { + done := false + page := 0 + pageSize := MAX_DAMM_V2_POOLS_PER_SUBSCRIPTION + total := 0 + grpcClients := make([]GrpcClient, 0) + for !done { + dammV2Pools, err := getWatchedDammV2Pools(ctx, db, pageSize, page*pageSize) + if err != nil { + return nil, fmt.Errorf("failed to get watched DAMM V2 pools: %w", err) + } + if len(dammV2Pools) == 0 { + logger.Info("no DAMM V2 pools to subscribe to") + return grpcClients, nil + } + total += len(dammV2Pools) + + logger.Debug("subscribing to DAMM V2 pools....", zap.Int("numPools", len(dammV2Pools))) + subscription := makeDammV2SubscriptionRequest(dammV2Pools) + + handleMessage := func(ctx context.Context, msg *pb.SubscribeUpdate) { + handleDammV2Message(ctx, db, msg, logger) + } + + grpcClient := NewGrpcClient(grpcConfig) + err = grpcClient.Subscribe(ctx, subscription, handleMessage, func(err error) { + logger.Error("error in DAMM V2 subscription", zap.Error(err)) + }) + if err != nil { + return nil, fmt.Errorf("failed to subscribe to DAMM V2 pools: %w", err) + } + grpcClients = append(grpcClients, grpcClient) + + if len(dammV2Pools) < pageSize { + done = true + } + page++ + } + logger.Info("subscribed to DAMM V2 pools", zap.Int("numPools", total)) + return grpcClients, nil +} + +func watchPgNotification(ctx context.Context, pool database.DbPool, notification string, callback notificationCallback, logger *zap.Logger) error { + if logger == nil { + logger = zap.NewNop() + } + + childLogger := logger.With(zap.String("notification", notification)) + + conn, err := pool.Acquire(ctx) + if err != nil { + return fmt.Errorf("failed to acquire database connection: %w", err) + } + + rawConn := conn.Conn() + _, err = rawConn.Exec(ctx, fmt.Sprintf(`LISTEN %s`, notification)) + if err != nil { + return fmt.Errorf("failed to listen for %s changes: %w", notification, err) + } + + go func() { + defer func() { + if rawConn != nil && !rawConn.PgConn().IsClosed() && ctx.Err() != nil { + _, _ = rawConn.Exec(ctx, fmt.Sprintf(`UNLISTEN %s`, notification)) + } + childLogger.Info("received shutdown signal, stopping notification watcher") + conn.Release() + }() + for { + select { + case <-ctx.Done(): + return + default: + } + + notif, err := rawConn.WaitForNotification(ctx) + if err != nil { + childLogger.Error("failed waiting for notification", zap.Error(err)) + } + if notif == nil { + childLogger.Warn("received nil notification, continuing to wait for notifications") + continue + } + callback(ctx, notif) + } + }() + return nil +} + +func makeDammV2SubscriptionRequest(dammV2Pools []string) *pb.SubscribeRequest { + commitment := pb.CommitmentLevel_CONFIRMED + subscription := &pb.SubscribeRequest{ + Commitment: &commitment, + } + + // Listen for slot updates for checkpointing + subscription.Slots = make(map[string]*pb.SubscribeRequestFilterSlots) + subscription.Slots["checkpoints"] = &pb.SubscribeRequestFilterSlots{} + + // fromSlot := uint64(372380625) + // subscription.FromSlot = &fromSlot + + subscription.Accounts = make(map[string]*pb.SubscribeRequestFilterAccounts) + + // Listen to all watched pools + accountFilter := pb.SubscribeRequestFilterAccounts{ + Owner: []string{meteora_damm_v2.ProgramID.String()}, + Account: dammV2Pools, + } + subscription.Accounts[DAMM_V2_POOL_SUBSCRIPTION_KEY] = &accountFilter + + // Listen to all positions for each pool + for _, pool := range dammV2Pools { + accountFilter := pb.SubscribeRequestFilterAccounts{ + Owner: []string{meteora_damm_v2.ProgramID.String()}, + Filters: []*pb.SubscribeRequestFilterAccountsFilter{ + { + Filter: &pb.SubscribeRequestFilterAccountsFilter_Memcmp{ + Memcmp: &pb.SubscribeRequestFilterAccountsFilterMemcmp{ + Offset: 8, // Offset of the pool field in the position account (after discriminator) + Data: &pb.SubscribeRequestFilterAccountsFilterMemcmp_Base58{ + Base58: pool, + }, + }, + }, + }, + { + Filter: &pb.SubscribeRequestFilterAccountsFilter_Datasize{ + Datasize: 408, // byte size of a Position account + }, + }, + }, + } + subscription.Accounts[pool] = &accountFilter + } + + return subscription +} + +func handleDammV2Message(ctx context.Context, db database.DBTX, msg *pb.SubscribeUpdate, logger *zap.Logger) { + accUpdate := msg.GetAccount() + if accUpdate != nil { + if msg.Filters[0] == DAMM_V2_POOL_SUBSCRIPTION_KEY { + err := processDammV2PoolUpdate(ctx, db, accUpdate) + if err != nil { + logger.Error("failed to process DAMM V2 pool update", zap.Error(err)) + } else { + logger.Debug("processed DAMM V2 pool update", zap.String("account", solana.PublicKeyFromBytes(accUpdate.Account.Pubkey).String())) + } + } else { + err := processDammV2PositionUpdate(ctx, db, accUpdate) + if err != nil { + logger.Error("failed to process DAMM V2 position update", zap.Error(err)) + } else { + logger.Debug("processed DAMM V2 position update", zap.String("account", solana.PublicKeyFromBytes(accUpdate.Account.Pubkey).String())) + } + } + + } +} + +func processDammV2PoolUpdate( + ctx context.Context, + db database.DBTX, + update *pb.SubscribeUpdateAccount, +) error { + account := solana.PublicKeyFromBytes(update.Account.Pubkey) + var pool meteora_damm_v2.Pool + err := bin.NewBorshDecoder(update.Account.Data).Decode(&pool) + if err != nil { + return err + } + err = upsertDammV2Pool(ctx, db, account, &pool) + if err != nil { + return err + } + err = upsertDammV2PoolMetrics(ctx, db, account, &pool.Metrics) + if err != nil { + return err + } + err = upsertDammV2PoolFees(ctx, db, account, &pool.PoolFees) + if err != nil { + return err + } + err = upsertDammV2PoolBaseFee(ctx, db, account, &pool.PoolFees.BaseFee) + if err != nil { + return err + } + err = upsertDammV2PoolDynamicFee(ctx, db, account, &pool.PoolFees.DynamicFee) + if err != nil { + return err + } + return nil +} + +func processDammV2PositionUpdate( + ctx context.Context, + db database.DBTX, + update *pb.SubscribeUpdateAccount, +) error { + account := solana.PublicKeyFromBytes(update.Account.Pubkey) + var position meteora_damm_v2.PositionState + err := bin.NewBorshDecoder(update.Account.Data).Decode(&position) + if err != nil { + return err + } + err = upsertDammV2Position(ctx, db, account, &position) + if err != nil { + return err + } + err = upsertDammV2PositionMetrics(ctx, db, account, &position.Metrics) + if err != nil { + return err + } + return nil +} + +func getWatchedDammV2Pools(ctx context.Context, db database.DBTX, limit int, offset int) ([]string, error) { + sql := ` + SELECT damm_v2_pool + FROM sol_meteora_dbc_migrations + LIMIT @limit OFFSET @offset + ;` + rows, err := db.Query(ctx, sql, pgx.NamedArgs{ + "limit": limit, + "offset": offset, + }) + if err != nil { + return nil, err + } + defer rows.Close() + + var pools []string + for rows.Next() { + var address string + if err := rows.Scan(&address); err != nil { + return nil, err + } + pools = append(pools, address) + } + return pools, nil +} + +func upsertDammV2Pool( + ctx context.Context, + db database.DBTX, + account solana.PublicKey, + pool *meteora_damm_v2.Pool, +) error { + sqlPool := ` + INSERT INTO sol_meteora_damm_v2_pools ( + address, + token_a_mint, + token_b_mint, + token_a_vault, + token_b_vault, + whitelisted_vault, + partner, + liquidity, + protocol_a_fee, + protocol_b_fee, + partner_a_fee, + partner_b_fee, + sqrt_min_price, + sqrt_max_price, + sqrt_price, + activation_point, + activation_type, + pool_status, + token_a_flag, + token_b_flag, + collect_fee_mode, + pool_type, + fee_a_per_liquidity, + fee_b_per_liquidity, + permanent_lock_liquidity, + creator, + created_at, + updated_at + ) VALUES ( + @address, + @token_a_mint, + @token_b_mint, + @token_a_vault, + @token_b_vault, + @whitelisted_vault, + @partner, + @liquidity, + @protocol_a_fee, + @protocol_b_fee, + @partner_a_fee, + @partner_b_fee, + @sqrt_min_price, + @sqrt_max_price, + @sqrt_price, + @activation_point, + @activation_type, + @pool_status, + @token_a_flag, + @token_b_flag, + @collect_fee_mode, + @pool_type, + @fee_a_per_liquidity, + @fee_b_per_liquidity, + @permanent_lock_liquidity, + @creator, + NOW(), + NOW() + ) + ON CONFLICT (address) DO UPDATE SET + token_a_mint = EXCLUDED.token_a_mint, + token_b_mint = EXCLUDED.token_b_mint, + token_a_vault = EXCLUDED.token_a_vault, + token_b_vault = EXCLUDED.token_b_vault, + whitelisted_vault = EXCLUDED.whitelisted_vault, + partner = EXCLUDED.partner, + liquidity = EXCLUDED.liquidity, + protocol_a_fee = EXCLUDED.protocol_a_fee, + protocol_b_fee = EXCLUDED.protocol_b_fee, + partner_a_fee = EXCLUDED.partner_a_fee, + partner_b_fee = EXCLUDED.partner_b_fee, + sqrt_min_price = EXCLUDED.sqrt_min_price, + sqrt_max_price = EXCLUDED.sqrt_max_price, + sqrt_price = EXCLUDED.sqrt_price, + activation_point = EXCLUDED.activation_point, + activation_type = EXCLUDED.activation_type, + pool_status = EXCLUDED.pool_status, + token_a_flag = EXCLUDED.token_a_flag, + token_b_flag = EXCLUDED.token_b_flag, + collect_fee_mode = EXCLUDED.collect_fee_mode, + pool_type = EXCLUDED.pool_type, + fee_a_per_liquidity = EXCLUDED.fee_a_per_liquidity, + fee_b_per_liquidity = EXCLUDED.fee_b_per_liquidity, + permanent_lock_liquidity = EXCLUDED.permanent_lock_liquidity, + creator = EXCLUDED.creator, + updated_at = NOW() + ` + args := pgx.NamedArgs{ + "address": account.String(), + "token_a_mint": pool.TokenAMint.String(), + "token_b_mint": pool.TokenBMint.String(), + "token_a_vault": pool.TokenAVault.String(), + "token_b_vault": pool.TokenBVault.String(), + "whitelisted_vault": pool.WhitelistedVault.String(), + "partner": pool.Partner.String(), + "liquidity": pool.Liquidity.String(), + "protocol_a_fee": pool.Metrics.TotalProtocolAFee, + "protocol_b_fee": pool.Metrics.TotalProtocolBFee, + "partner_a_fee": pool.Metrics.TotalPartnerAFee, + "partner_b_fee": pool.Metrics.TotalPartnerBFee, + "sqrt_min_price": pool.SqrtMinPrice.BigInt(), + "sqrt_max_price": pool.SqrtMaxPrice.BigInt(), + "sqrt_price": pool.SqrtPrice.BigInt(), + "activation_point": pool.ActivationPoint, + "activation_type": pool.ActivationType, + "pool_status": pool.PoolStatus, + "token_a_flag": pool.TokenAFlag, + "token_b_flag": pool.TokenBFlag, + "collect_fee_mode": pool.CollectFeeMode, + "pool_type": pool.PoolType, + "fee_a_per_liquidity": pool.FeeAPerLiquidity, + "fee_b_per_liquidity": pool.FeeBPerLiquidity, + "permanent_lock_liquidity": pool.PermanentLockLiquidity.BigInt(), + "creator": pool.Creator.String(), + } + _, err := db.Exec(ctx, sqlPool, args) + + return err +} + +func upsertDammV2PoolMetrics( + ctx context.Context, + db database.DBTX, + account solana.PublicKey, + metrics *meteora_damm_v2.PoolMetrics, +) error { + sqlMetrics := ` + INSERT INTO sol_meteora_damm_v2_pool_metrics ( + pool, + total_lp_a_fee, + total_lp_b_fee, + total_protocol_a_fee, + total_protocol_b_fee, + total_partner_a_fee, + total_partner_b_fee, + total_position, + created_at, + updated_at + ) VALUES ( + @pool, + @total_lp_a_fee, + @total_lp_b_fee, + @total_protocol_a_fee, + @total_protocol_b_fee, + @total_partner_a_fee, + @total_partner_b_fee, + @total_position, + NOW(), + NOW() + ) + ON CONFLICT (pool) DO UPDATE SET + total_lp_a_fee = EXCLUDED.total_lp_a_fee, + total_lp_b_fee = EXCLUDED.total_lp_b_fee, + total_protocol_a_fee = EXCLUDED.total_protocol_a_fee, + total_protocol_b_fee = EXCLUDED.total_protocol_b_fee, + total_partner_a_fee = EXCLUDED.total_partner_a_fee, + total_partner_b_fee = EXCLUDED.total_partner_b_fee, + total_position = EXCLUDED.total_position, + updated_at = NOW() + ` + + _, err := db.Exec(ctx, sqlMetrics, pgx.NamedArgs{ + "pool": account.String(), + "total_lp_a_fee": metrics.TotalLpAFee, + "total_lp_b_fee": metrics.TotalLpBFee, + "total_protocol_a_fee": metrics.TotalProtocolAFee, + "total_protocol_b_fee": metrics.TotalProtocolBFee, + "total_partner_a_fee": metrics.TotalPartnerAFee, + "total_partner_b_fee": metrics.TotalPartnerBFee, + "total_position": metrics.TotalPosition, + }) + return err +} + +func upsertDammV2PoolFees( + ctx context.Context, + db database.DBTX, + account solana.PublicKey, + fees *meteora_damm_v2.PoolFeesStruct, +) error { + sqlFees := ` + INSERT INTO sol_meteora_damm_v2_pool_fees ( + pool, + partner_fee_percent, + protocol_fee_percent, + referral_fee_percent, + created_at, + updated_at + ) VALUES ( + @pool, + @partner_fee_percent, + @protocol_fee_percent, + @referral_fee_percent, + NOW(), + NOW() + ) + ON CONFLICT (pool) DO UPDATE SET + partner_fee_percent = EXCLUDED.partner_fee_percent, + protocol_fee_percent = EXCLUDED.protocol_fee_percent, + referral_fee_percent = EXCLUDED.referral_fee_percent, + updated_at = NOW() + ` + + _, err := db.Exec(ctx, sqlFees, pgx.NamedArgs{ + "pool": account.String(), + "partner_fee_percent": fees.PartnerFeePercent, + "protocol_fee_percent": fees.ProtocolFeePercent, + "referral_fee_percent": fees.ReferralFeePercent, + }) + return err +} + +func upsertDammV2PoolBaseFee( + ctx context.Context, + db database.DBTX, + account solana.PublicKey, + baseFee *meteora_damm_v2.BaseFeeStruct, +) error { + sqlBaseFee := ` + INSERT INTO sol_meteora_damm_v2_pool_base_fees ( + pool, + cliff_fee_numerator, + fee_scheduler_mode, + number_of_period, + period_frequency, + reduction_factor, + created_at, + updated_at + ) VALUES ( + @pool, + @cliff_fee_numerator, + @fee_scheduler_mode, + @number_of_period, + @period_frequency, + @reduction_factor, + NOW(), + NOW() + ) + ON CONFLICT (pool) DO UPDATE SET + cliff_fee_numerator = EXCLUDED.cliff_fee_numerator, + fee_scheduler_mode = EXCLUDED.fee_scheduler_mode, + number_of_period = EXCLUDED.number_of_period, + period_frequency = EXCLUDED.period_frequency, + reduction_factor = EXCLUDED.reduction_factor, + updated_at = NOW() + ` + + _, err := db.Exec(ctx, sqlBaseFee, pgx.NamedArgs{ + "pool": account.String(), + "cliff_fee_numerator": baseFee.CliffFeeNumerator, + "fee_scheduler_mode": baseFee.FeeSchedulerMode, + "number_of_period": baseFee.NumberOfPeriod, + "period_frequency": baseFee.PeriodFrequency, + "reduction_factor": baseFee.ReductionFactor, + }) + return err +} + +func upsertDammV2PoolDynamicFee( + ctx context.Context, + db database.DBTX, + account solana.PublicKey, + dynamicFee *meteora_damm_v2.DynamicFeeStruct, +) error { + sqlDynamicFee := ` + INSERT INTO sol_meteora_damm_v2_pool_dynamic_fees ( + pool, + initialized, + max_volatility_accumulator, + variable_fee_control, + bin_step, + filter_period, + decay_period, + reduction_factor, + last_update_timestamp, + bin_step_u128, + sqrt_price_reference, + volatility_accumulator, + volatility_reference, + created_at, + updated_at + ) VALUES ( + @pool, + @initialized, + @max_volatility_accumulator, + @variable_fee_control, + @bin_step, + @filter_period, + @decay_period, + @reduction_factor, + @last_update_timestamp, + @bin_step_u128, + @sqrt_price_reference, + @volatility_accumulator, + @volatility_reference, + NOW(), + NOW() + ) + ON CONFLICT (pool) DO UPDATE SET + initialized = EXCLUDED.initialized, + max_volatility_accumulator = EXCLUDED.max_volatility_accumulator, + variable_fee_control = EXCLUDED.variable_fee_control, + bin_step = EXCLUDED.bin_step, + filter_period = EXCLUDED.filter_period, + decay_period = EXCLUDED.decay_period, + reduction_factor = EXCLUDED.reduction_factor, + last_update_timestamp = EXCLUDED.last_update_timestamp, + bin_step_u128 = EXCLUDED.bin_step_u128, + sqrt_price_reference = EXCLUDED.sqrt_price_reference, + volatility_accumulator = EXCLUDED.volatility_accumulator, + volatility_reference = EXCLUDED.volatility_reference, + updated_at = NOW() + ` + + _, err := db.Exec(ctx, sqlDynamicFee, pgx.NamedArgs{ + "pool": account.String(), + "initialized": dynamicFee.Initialized, + "max_volatility_accumulator": dynamicFee.MaxVolatilityAccumulator, + "variable_fee_control": dynamicFee.VariableFeeControl, + "bin_step": dynamicFee.BinStep, + "filter_period": dynamicFee.FilterPeriod, + "decay_period": dynamicFee.DecayPeriod, + "reduction_factor": dynamicFee.ReductionFactor, + "last_update_timestamp": dynamicFee.LastUpdateTimestamp, + "bin_step_u128": dynamicFee.BinStepU128, + "sqrt_price_reference": dynamicFee.SqrtPriceReference, + "volatility_accumulator": dynamicFee.VolatilityAccumulator, + "volatility_reference": dynamicFee.VolatilityReference, + }) + return err +} + +func upsertDammV2Position( + ctx context.Context, + db database.DBTX, + account solana.PublicKey, + position *meteora_damm_v2.PositionState, +) error { + sql := ` + INSERT INTO sol_meteora_damm_v2_positions ( + address, + pool, + nft_mint, + fee_a_per_token_checkpoint, + fee_b_per_token_checkpoint, + fee_a_pending, + fee_b_pending, + unlocked_liquidity, + vested_liquidity, + permanent_locked_liquidity, + updated_at, + created_at + ) VALUES ( + @address, + @pool, + @nft_mint, + @fee_a_per_token_checkpoint, + @fee_b_per_token_checkpoint, + @fee_a_pending, + @fee_b_pending, + @unlocked_liquidity, + @vested_liquidity, + @permanent_locked_liquidity, + NOW(), + NOW() + ) + ON CONFLICT (address) DO UPDATE SET + pool = EXCLUDED.pool, + nft_mint = EXCLUDED.nft_mint, + fee_a_per_token_checkpoint = EXCLUDED.fee_a_per_token_checkpoint, + fee_b_per_token_checkpoint = EXCLUDED.fee_b_per_token_checkpoint, + fee_a_pending = EXCLUDED.fee_a_pending, + fee_b_pending = EXCLUDED.fee_b_pending, + unlocked_liquidity = EXCLUDED.unlocked_liquidity, + vested_liquidity = EXCLUDED.vested_liquidity, + permanent_locked_liquidity = EXCLUDED.permanent_locked_liquidity, + updated_at = NOW() + ` + + _, err := db.Exec(ctx, sql, pgx.NamedArgs{ + "address": account.String(), + "pool": position.Pool.String(), + "nft_mint": position.NftMint.String(), + "fee_a_per_token_checkpoint": position.FeeAPerTokenCheckpoint, + "fee_b_per_token_checkpoint": position.FeeBPerTokenCheckpoint, + "fee_a_pending": position.FeeAPending, + "fee_b_pending": position.FeeBPending, + "unlocked_liquidity": position.UnlockedLiquidity.BigInt(), + "vested_liquidity": position.VestedLiquidity.BigInt(), + "permanent_locked_liquidity": position.PermanentLockedLiquidity.BigInt(), + }) + return err +} + +func upsertDammV2PositionMetrics( + ctx context.Context, + db database.DBTX, + account solana.PublicKey, + metrics *meteora_damm_v2.PositionMetrics, +) error { + sql := ` + INSERT INTO sol_meteora_damm_v2_position_metrics ( + position, + total_claimed_a_fee, + total_claimed_b_fee, + created_at, + updated_at + ) VALUES ( + @position, + @total_claimed_a_fee, + @total_claimed_b_fee, + NOW(), + NOW() + ) + ON CONFLICT (position) DO UPDATE SET + total_claimed_a_fee = EXCLUDED.total_claimed_a_fee, + total_claimed_b_fee = EXCLUDED.total_claimed_b_fee, + updated_at = NOW() + ` + + _, err := db.Exec(ctx, sql, pgx.NamedArgs{ + "position": account.String(), + "total_claimed_a_fee": metrics.TotalClaimedAFee, + "total_claimed_b_fee": metrics.TotalClaimedBFee, + }) + return err +} diff --git a/solana/indexer/dbc.go b/solana/indexer/dbc.go new file mode 100644 index 00000000..b1be576e --- /dev/null +++ b/solana/indexer/dbc.go @@ -0,0 +1,208 @@ +package indexer + +import ( + "context" + "fmt" + "strings" + "time" + + "api.audius.co/database" + "api.audius.co/solana/spl/programs/meteora_damm_v2" + "api.audius.co/solana/spl/programs/meteora_dbc" + "github.com/gagliardetto/solana-go" + "github.com/jackc/pgx/v5" + "go.uber.org/zap" +) + +func processDbcInstruction( + ctx context.Context, + db database.DBTX, + rpcClient RpcClient, + slot uint64, + tx *solana.Transaction, + instructionIndex int, + instruction solana.CompiledInstruction, + signature string, + instLogger *zap.Logger, +) error { + accounts, err := instruction.ResolveInstructionAccounts(&tx.Message) + if err != nil { + return fmt.Errorf("error resolving instruction accounts %d: %w", instructionIndex, err) + } + + inst, err := meteora_dbc.DecodeInstruction(accounts, []byte(instruction.Data)) + if err != nil { + // Ignore unknown instruction types. + // Not all DBC instruction types are implemented yet. + // See: solana/spl/programs/meteora_dbc/instruction.go + // See: https://github.com/gagliardetto/binary/blob/v0.8.0/variant.go#L315 + if strings.Contains(err.Error(), "no known type for type") { + return nil // ignore unknown instruction types + } + return fmt.Errorf("error decoding meteora_dbc instruction %d: %w", instructionIndex, err) + } + + switch inst.TypeID { + case meteora_dbc.InstructionImplDef.TypeID(meteora_dbc.Instruction_MigrationDammV2): + { + if migrationInst, ok := inst.Impl.(*meteora_dbc.MigrationDammV2); ok { + err := insertDbcMigration(ctx, db, dbcMigrationRow{ + signature: signature, + instructionIndex: instructionIndex, + slot: slot, + dbcPool: migrationInst.GetVirtualPool().PublicKey.String(), + migrationMetadata: migrationInst.GetMigrationMetadata().PublicKey.String(), + config: migrationInst.GetConfig().PublicKey.String(), + dbcPoolAuthority: migrationInst.GetPoolAuthority().PublicKey.String(), + dammV2Pool: migrationInst.GetPool().PublicKey.String(), + firstPositionNftMint: migrationInst.GetFirstPositionNftMint().PublicKey.String(), + firstPositionNftAccount: migrationInst.GetFirstPositionNftAccount().PublicKey.String(), + firstPosition: migrationInst.GetFirstPosition().PublicKey.String(), + secondPositionNftMint: migrationInst.GetSecondPositionNftMint().PublicKey.String(), + secondPositionNftAccount: migrationInst.GetSecondPositionNftAccount().PublicKey.String(), + secondPosition: migrationInst.GetSecondPosition().PublicKey.String(), + dammPoolAuthority: migrationInst.GetPoolAuthority().PublicKey.String(), + baseMint: migrationInst.GetBaseMint().PublicKey.String(), + quoteMint: migrationInst.GetQuoteMint().PublicKey.String(), + }) + if err != nil { + return fmt.Errorf("failed to insert dbc migration at instruction %d: %w", instructionIndex, err) + } + instLogger.Info("dbc migrationDammV2", + zap.String("mint", migrationInst.GetBaseMint().PublicKey.String()), + zap.String("dbcPool", migrationInst.GetVirtualPool().PublicKey.String()), + zap.String("dammV2Pool", migrationInst.GetPool().PublicKey.String()), + ) + + // Also index the pool and positions + + var dammPool meteora_damm_v2.Pool + err = withRetries(func() error { + return rpcClient.GetAccountDataBorshInto(ctx, migrationInst.GetPool().PublicKey, &dammPool) + }, 5, time.Second*1) + if err != nil { + return fmt.Errorf("failed to get damm v2 pool account data after retries: %w", err) + } else { + err = upsertDammV2Pool(ctx, db, migrationInst.GetPool().PublicKey, &dammPool) + if err != nil { + return fmt.Errorf("failed to upsert damm v2 pool: %w", err) + } + } + + var firstPosition meteora_damm_v2.PositionState + err = withRetries(func() error { + return rpcClient.GetAccountDataBorshInto(ctx, migrationInst.GetFirstPosition().PublicKey, &firstPosition) + }, 5, time.Second*1) + if err != nil { + return fmt.Errorf("failed to get first damm v2 position account data: %w", err) + } else { + err = upsertDammV2Position(ctx, db, migrationInst.GetFirstPosition().PublicKey, &firstPosition) + if err != nil { + return fmt.Errorf("failed to upsert first damm v2 position: %w", err) + } + } + + var secondPosition meteora_damm_v2.PositionState + err = withRetries(func() error { + return rpcClient.GetAccountDataBorshInto(ctx, migrationInst.GetSecondPosition().PublicKey, &secondPosition) + }, 5, time.Second*1) + if err != nil { + return fmt.Errorf("failed to get second damm v2 position account data: %w", err) + } else { + err = upsertDammV2Position(ctx, db, migrationInst.GetSecondPosition().PublicKey, &secondPosition) + if err != nil { + return fmt.Errorf("failed to upsert second damm v2 position: %w", err) + } + } + } + } + } + return nil +} + +type dbcMigrationRow struct { + signature string + instructionIndex int + slot uint64 + dbcPool string + migrationMetadata string + config string + dbcPoolAuthority string + dammV2Pool string + firstPositionNftMint string + firstPositionNftAccount string + firstPosition string + secondPositionNftMint string + secondPositionNftAccount string + secondPosition string + dammPoolAuthority string + baseMint string + quoteMint string +} + +func insertDbcMigration(ctx context.Context, db database.DBTX, row dbcMigrationRow) error { + sql := ` + INSERT INTO sol_meteora_dbc_migrations ( + signature, + instruction_index, + slot, + dbc_pool, + migration_metadata, + config, + dbc_pool_authority, + damm_v2_pool, + first_position_nft_mint, + first_position_nft_account, + first_position, + second_position_nft_mint, + second_position_nft_account, + second_position, + damm_pool_authority, + base_mint, + quote_mint, + created_at, + updated_at + ) VALUES ( + @signature, + @instructionIndex, + @slot, + @dbcPool, + @migrationMetadata, + @config, + @dbcPoolAuthority, + @dammV2Pool, + @firstPositionNftMint, + @firstPositionNftAccount, + @firstPosition, + @secondPositionNftMint, + @secondPositionNftAccount, + @secondPosition, + @dammPoolAuthority, + @baseMint, + @quoteMint, + NOW(), + NOW() + ) + ON CONFLICT DO NOTHING + ` + _, err := db.Exec(ctx, sql, pgx.NamedArgs{ + "signature": row.signature, + "instructionIndex": row.instructionIndex, + "slot": row.slot, + "dbcPool": row.dbcPool, + "migrationMetadata": row.migrationMetadata, + "config": row.config, + "dbcPoolAuthority": row.dbcPoolAuthority, + "dammV2Pool": row.dammV2Pool, + "firstPositionNftMint": row.firstPositionNftMint, + "firstPositionNftAccount": row.firstPositionNftAccount, + "firstPosition": row.firstPosition, + "secondPositionNftMint": row.secondPositionNftMint, + "secondPositionNftAccount": row.secondPositionNftAccount, + "secondPosition": row.secondPosition, + "dammPoolAuthority": row.dammPoolAuthority, + "baseMint": row.baseMint, + "quoteMint": row.quoteMint, + }) + return err +} diff --git a/solana/indexer/processor.go b/solana/indexer/processor.go index aecd5be8..c59ddfd4 100644 --- a/solana/indexer/processor.go +++ b/solana/indexer/processor.go @@ -8,6 +8,7 @@ import ( "api.audius.co/config" "api.audius.co/database" "api.audius.co/solana/spl/programs/claimable_tokens" + "api.audius.co/solana/spl/programs/meteora_dbc" "api.audius.co/solana/spl/programs/payment_router" "api.audius.co/solana/spl/programs/reward_manager" "github.com/gagliardetto/solana-go" @@ -61,32 +62,33 @@ func (p *DefaultProcessor) ProcessSignature(ctx context.Context, slot uint64, tx // Check if the transaction is in the cache if p.transactionCache != nil { - if _, ok := p.transactionCache.Get(txSig); ok { + if res, ok := p.transactionCache.Get(txSig); ok { logger.Debug("cache hit") - // If we hit the cache, it's already been processed - return nil + txRes = res } else { logger.Debug("cache miss") } } - // If the transaction is not in the cache, fetch it from the RPC - res, err := withRetries(func() (*rpc.GetTransactionResult, error) { - return p.rpcClient.GetTransaction( - ctx, - txSig, - &rpc.GetTransactionOpts{ - Commitment: rpc.CommitmentConfirmed, - MaxSupportedTransactionVersion: &rpc.MaxSupportedTransactionVersion0, - }, - ) - }, 5, 1*time.Second) - if err != nil { - return fmt.Errorf("failed to get transaction: %w", err) - } - if p.transactionCache != nil { - p.transactionCache.Set(txSig, res) - txRes = res + if txRes == nil { + // If the transaction is not in the cache, fetch it from the RPC + res, err := withRetriesResult(func() (*rpc.GetTransactionResult, error) { + return p.rpcClient.GetTransaction( + ctx, + txSig, + &rpc.GetTransactionOpts{ + Commitment: rpc.CommitmentConfirmed, + MaxSupportedTransactionVersion: &rpc.MaxSupportedTransactionVersion0, + }, + ) + }, 5, 1*time.Second) + if err != nil { + return fmt.Errorf("failed to get transaction: %w", err) + } + if p.transactionCache != nil { + p.transactionCache.Set(txSig, res) + txRes = res + } } tx, err := txRes.Transaction.GetTransaction() @@ -175,6 +177,13 @@ func (p *DefaultProcessor) ProcessTransaction( return fmt.Errorf("error processing payment_router instruction %d: %w", instructionIndex, err) } } + case meteora_dbc.ProgramID: + { + err := processDbcInstruction(ctx, p.pool, p.rpcClient, slot, tx, instructionIndex, instruction, signature, instLogger) + if err != nil { + return fmt.Errorf("error processing meteora_dbc instruction %d: %w", instructionIndex, err) + } + } } } diff --git a/solana/indexer/solana_indexer.go b/solana/indexer/solana_indexer.go index f2edef52..4a174da6 100644 --- a/solana/indexer/solana_indexer.go +++ b/solana/indexer/solana_indexer.go @@ -3,11 +3,9 @@ package indexer import ( "context" "fmt" - "time" "api.audius.co/config" "api.audius.co/database" - "api.audius.co/jobs" "api.audius.co/logging" "github.com/gagliardetto/solana-go" "github.com/gagliardetto/solana-go/rpc" @@ -43,6 +41,8 @@ type SolanaIndexer struct { pool database.DbPool workerCount int32 + dammV2Indexer *DammV2Indexer + checkpointId string logger *zap.Logger @@ -77,6 +77,16 @@ func New(config config.Config) *SolanaIndexer { MaxReconnectAttempts: 5, }) + dammV2Indexer := &DammV2Indexer{ + pool: pool, + grpcConfig: GrpcConfig{ + Server: config.SolanaConfig.GrpcProvider, + ApiToken: config.SolanaConfig.GrpcToken, + MaxReconnectAttempts: 5, + }, + logger: logger, + } + s := &SolanaIndexer{ rpcClient: rpcClient, grpcClient: grpcClient, @@ -84,6 +94,9 @@ func New(config config.Config) *SolanaIndexer { config: config, pool: pool, workerCount: workerCount, + + dammV2Indexer: dammV2Indexer, + processor: NewDefaultProcessor( rpcClient, pool, @@ -97,15 +110,17 @@ func New(config config.Config) *SolanaIndexer { func (s *SolanaIndexer) Start(ctx context.Context) error { go s.ScheduleRetries(ctx, s.config.SolanaIndexerRetryInterval) - statsJob := jobs.NewCoinStatsJob(s.config, s.pool) - statsCtx := context.WithoutCancel(ctx) - statsJob.ScheduleEvery(statsCtx, 5*time.Minute) - go statsJob.Run(statsCtx) + // statsJob := jobs.NewCoinStatsJob(s.config, s.pool) + // statsCtx := context.WithoutCancel(ctx) + // statsJob.ScheduleEvery(statsCtx, 5*time.Minute) + // go statsJob.Run(statsCtx) + + // dbcJob := jobs.NewCoinDBCJob(s.config, s.pool) + // dbcCtx := context.WithoutCancel(ctx) + // dbcJob.ScheduleEvery(dbcCtx, 5*time.Minute) + // go dbcJob.Run(dbcCtx) - dbcJob := jobs.NewCoinDBCJob(s.config, s.pool) - dbcCtx := context.WithoutCancel(ctx) - dbcJob.ScheduleEvery(dbcCtx, 5*time.Minute) - go dbcJob.Run(dbcCtx) + go s.dammV2Indexer.Start(ctx) err := s.Subscribe(ctx) if err != nil { diff --git a/solana/indexer/subscription.go b/solana/indexer/subscription.go index 7a3c2976..50729566 100644 --- a/solana/indexer/subscription.go +++ b/solana/indexer/subscription.go @@ -103,7 +103,7 @@ func (s *SolanaIndexer) Subscribe(ctx context.Context) error { return fmt.Errorf("failed to get last indexed slot: %w", err) } - latestSlot, err := withRetries(func() (uint64, error) { + latestSlot, err := withRetriesResult(func() (uint64, error) { return s.rpcClient.GetSlot(ctx, "confirmed") }, 5, time.Second*2) if err != nil { @@ -221,7 +221,7 @@ func buildSubscriptionRequest(mintAddresses []string, dbcPoolConfigs []string) ( for _, config := range dbcPoolConfigs { dbcFilter := pb.SubscribeRequestFilterAccounts{ - Owner: []string{meteora_dbc.DbcProgramID.String()}, + Owner: []string{meteora_dbc.ProgramID.String()}, Filters: []*pb.SubscribeRequestFilterAccountsFilter{ { Filter: &pb.SubscribeRequestFilterAccountsFilter_Memcmp{ @@ -266,7 +266,6 @@ func (s *SolanaIndexer) handleMessage(ctx context.Context, msg *pb.SubscribeUpda if slotUpdate := msg.GetSlot(); slotUpdate != nil && slotUpdate.Slot > 0 { // only update every 10 slots to reduce db load and write latency if slotUpdate.Slot%10 == 0 { - s.logger.Debug("slot update", zap.Uint64("slot", slotUpdate.Slot)) err := updateCheckpoint(ctx, s.pool, s.checkpointId, slotUpdate.Slot) if err != nil { logger.Error("failed to update slot checkpoint", zap.Error(err)) diff --git a/solana/indexer/unprocessed_transactions.go b/solana/indexer/unprocessed_transactions.go index 7e6b90c3..d44900a7 100644 --- a/solana/indexer/unprocessed_transactions.go +++ b/solana/indexer/unprocessed_transactions.go @@ -15,28 +15,24 @@ func (s *SolanaIndexer) ScheduleRetries(ctx context.Context, interval time.Durat ticker := time.NewTicker(interval) defer ticker.Stop() - go func() { - for { - select { - case <-ctx.Done(): - s.logger.Info("context cancelled, stopping retry ticker") - return - case <-ticker.C: - err := s.RetryUnprocessedTransactions(ctx) - if err != nil { - s.logger.Error("failed to retry unprocessed transactions", zap.Error(err)) - } + for { + select { + case <-ctx.Done(): + s.logger.Info("context cancelled, stopping retry ticker") + return + case <-ticker.C: + err := s.RetryUnprocessedTransactions(ctx) + if err != nil { + s.logger.Error("failed to retry unprocessed transactions", zap.Error(err)) } } - }() + } } func (s *SolanaIndexer) RetryUnprocessedTransactions(ctx context.Context) error { limit := 100 offset := 0 - logger := s.logger.With( - zap.String("indexerSource", "retryUnprocessedTransactions"), - ) + logger := s.logger.Named("RetryUnprocessedTransactions") count := 0 start := time.Now() logger.Debug("starting retry of unprocessed transactions...") diff --git a/solana/indexer/utils.go b/solana/indexer/utils.go index f2be0dad..3716f440 100644 --- a/solana/indexer/utils.go +++ b/solana/indexer/utils.go @@ -9,7 +9,21 @@ import ( "github.com/jackc/pgx/v5" ) -func withRetries[T any](f func() (T, error), maxRetries int, interval time.Duration) (T, error) { +func withRetries(f func() error, maxRetries int, interval time.Duration) error { + err := f() + retries := 0 + for err != nil && retries < maxRetries { + time.Sleep(interval) + err = f() + retries++ + } + if err != nil { + return fmt.Errorf("retry failed: %w", err) + } + return nil +} + +func withRetriesResult[T any](f func() (T, error), maxRetries int, interval time.Duration) (T, error) { result, err := f() retries := 0 for err != nil && retries < maxRetries { diff --git a/solana/spl/programs/meteora_damm_v2/accounts.go b/solana/spl/programs/meteora_damm_v2/accounts.go new file mode 100644 index 00000000..21c54f0a --- /dev/null +++ b/solana/spl/programs/meteora_damm_v2/accounts.go @@ -0,0 +1,13 @@ +package meteora_damm_v2 + +import "github.com/gagliardetto/solana-go" + +// Derives the position PDA from a position NFT mint +func DerivePositionPDA(positionNft solana.PublicKey) (solana.PublicKey, error) { + seeds := [][]byte{[]byte("position"), positionNft.Bytes()} + address, _, err := solana.FindProgramAddress(seeds, ProgramID) + if err != nil { + return solana.PublicKey{}, err + } + return address, nil +} diff --git a/solana/spl/programs/meteora_damm_v2/client.go b/solana/spl/programs/meteora_damm_v2/client.go new file mode 100644 index 00000000..0775ede5 --- /dev/null +++ b/solana/spl/programs/meteora_damm_v2/client.go @@ -0,0 +1,113 @@ +package meteora_damm_v2 + +import ( + "context" + "math/big" + "sort" + + bin "github.com/gagliardetto/binary" + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/programs/token" + "github.com/gagliardetto/solana-go/rpc" + "go.uber.org/zap" +) + +type RpcClient interface { + GetAccountDataBorshInto(ctx context.Context, account solana.PublicKey, out interface{}) error +} + +type Client struct { + client *rpc.Client + logger *zap.Logger +} + +func NewClient( + client *rpc.Client, + logger *zap.Logger, +) *Client { + return &Client{ + client: client, + logger: logger, + } +} + +// Gets the current Pool state. +func (c *Client) GetPool(ctx context.Context, account solana.PublicKey) (*Pool, error) { + var pool Pool + err := c.client.GetAccountDataBorshInto(ctx, account, &pool) + if err != nil { + return nil, err + } + return &pool, nil +} + +// Gets a position by its address. +func (c *Client) GetPosition(ctx context.Context, account solana.PublicKey) (*PositionState, error) { + var position PositionState + err := c.client.GetAccountDataBorshInto(ctx, account, &position) + if err != nil { + return nil, err + } + return &position, nil +} + +// Gets all position NFTs held by a wallet. +func (c *Client) GetPositionNFTs(ctx context.Context, owner solana.PublicKey) ([]solana.PublicKey, error) { + accounts, err := c.client.GetTokenAccountsByOwner(ctx, owner, &rpc.GetTokenAccountsConfig{ + ProgramId: &solana.Token2022ProgramID, + }, &rpc.GetTokenAccountsOpts{}) + if err != nil { + return nil, err + } + + var positionNFTs []solana.PublicKey + for _, acc := range accounts.Value { + data := token.Account{} + bin.NewBorshDecoder(acc.Account.Data.GetBinary()).Decode(&data) + if data.Amount == uint64(1) { + positionNFTs = append(positionNFTs, data.Mint) + } + } + + return positionNFTs, nil +} + +// Gets all the positions by an owner, sorted by total liquidity (descending). +func (c *Client) GetPositionsByOwner(ctx context.Context, owner solana.PublicKey) ([]*PositionState, error) { + positionNFTs, err := c.GetPositionNFTs(ctx, owner) + if err != nil { + return nil, err + } + + var positions []*PositionState + for _, nft := range positionNFTs { + pda, err := DerivePositionPDA(nft) + if err != nil { + return nil, err + } + position, err := c.GetPosition(ctx, pda) + if err != nil { + return nil, err + } + positions = append(positions, position) + } + + // Sort positions by total liquidity + sort.Slice(positions, func(i, j int) bool { + vestedLiquidity := (&big.Int{}).SetBytes(positions[i].VestedLiquidity.Bytes()) + permanentLockedLiquidity := (&big.Int{}).SetBytes(positions[i].PermanentLockedLiquidity.Bytes()) + unlockedLiquidity := (&big.Int{}).SetBytes(positions[i].UnlockedLiquidity.Bytes()) + totalLiquidityI := (&big.Int{}).Add(vestedLiquidity, permanentLockedLiquidity) + totalLiquidityI.Add(totalLiquidityI, unlockedLiquidity) + + vestedLiquidity = (&big.Int{}).SetBytes(positions[j].VestedLiquidity.Bytes()) + permanentLockedLiquidity = (&big.Int{}).SetBytes(positions[j].PermanentLockedLiquidity.Bytes()) + unlockedLiquidity = (&big.Int{}).SetBytes(positions[j].UnlockedLiquidity.Bytes()) + totalLiquidityJ := (&big.Int{}).Add(vestedLiquidity, permanentLockedLiquidity) + totalLiquidityJ.Add(totalLiquidityJ, unlockedLiquidity) + + return totalLiquidityJ.Cmp(totalLiquidityI) < 0 + }) + + return positions, nil +} diff --git a/solana/spl/programs/meteora_damm_v2/client_test.go b/solana/spl/programs/meteora_damm_v2/client_test.go new file mode 100644 index 00000000..12f1266a --- /dev/null +++ b/solana/spl/programs/meteora_damm_v2/client_test.go @@ -0,0 +1,24 @@ +package meteora_damm_v2_test + +import ( + "context" + "testing" + + "api.audius.co/solana/spl/programs/meteora_damm_v2" + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" + "github.com/stretchr/testify/require" + "go.uber.org/zap" +) + +func TestActualFetch(t *testing.T) { + ctx := context.Background() + client := meteora_damm_v2.NewClient(rpc.New(rpc.MainNetBeta_RPC), zap.NewNop()) + + owner, err := solana.PublicKeyFromBase58("EF1zneAqA2mwjkD3Lj7sQnMhR2uorGqEHXNtAWfGdCu2") + require.NoError(t, err) + + positions, err := client.GetPositionsByOwner(ctx, owner) + require.NoError(t, err) + require.Greater(t, len(positions), 0) +} diff --git a/solana/spl/programs/meteora_damm_v2/instruction.go b/solana/spl/programs/meteora_damm_v2/instruction.go new file mode 100644 index 00000000..3e6d1d5d --- /dev/null +++ b/solana/spl/programs/meteora_damm_v2/instruction.go @@ -0,0 +1,9 @@ +package meteora_damm_v2 + +import "github.com/gagliardetto/solana-go" + +var ProgramID = solana.MustPublicKeyFromBase58("cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG") + +func SetProgramID(pubkey solana.PublicKey) { + ProgramID = pubkey +} diff --git a/solana/spl/programs/meteora_damm_v2/types.go b/solana/spl/programs/meteora_damm_v2/types.go new file mode 100644 index 00000000..65cd22ee --- /dev/null +++ b/solana/spl/programs/meteora_damm_v2/types.go @@ -0,0 +1,131 @@ +package meteora_damm_v2 + +import ( + bin "github.com/gagliardetto/binary" + "github.com/gagliardetto/solana-go" +) + +type BaseFeeStruct struct { + CliffFeeNumerator uint64 + FeeSchedulerMode uint8 + Padding0 [5]uint8 + NumberOfPeriod uint16 + PeriodFrequency uint64 + ReductionFactor uint64 + Padding1 uint64 +} + +type DynamicFeeStruct struct { + Initialized uint8 + Padding [7]uint8 + MaxVolatilityAccumulator uint32 + VariableFeeControl uint32 + BinStep uint16 + FilterPeriod uint16 + DecayPeriod uint16 + ReductionFactor uint16 + LastUpdateTimestamp uint64 + BinStepU128 bin.Uint128 + SqrtPriceReference bin.Uint128 + VolatilityAccumulator bin.Uint128 + VolatilityReference bin.Uint128 +} + +type PoolFeesStruct struct { + BaseFee BaseFeeStruct + ProtocolFeePercent uint8 + PartnerFeePercent uint8 + ReferralFeePercent uint8 + Padding0 [5]uint8 + DynamicFee DynamicFeeStruct + Padding1 [2]uint64 +} + +type PoolMetrics struct { + TotalLpAFee bin.Uint128 + TotalLpBFee bin.Uint128 + TotalProtocolAFee uint64 + TotalProtocolBFee uint64 + TotalPartnerAFee uint64 + TotalPartnerBFee uint64 + TotalPosition uint64 + Padding uint64 +} + +type RewardInfo struct { + Initialized uint8 + RewardTokenFlag uint8 + Padding0 [6]uint8 + Padding1 [8]uint8 + Mint solana.PublicKey + Vault solana.PublicKey + Funder solana.PublicKey + RewardDuration uint64 + RewardDurationEnd uint64 + RewardRate bin.Uint128 + RewardPerTokenStored [32]uint8 + LastUpdateTime uint64 + CumulativeSecondsWithEmptyLiquidity uint64 +} + +type Pool struct { + Discriminator [8]uint8 + PoolFees PoolFeesStruct + TokenAMint solana.PublicKey + TokenBMint solana.PublicKey + TokenAVault solana.PublicKey + TokenBVault solana.PublicKey + WhitelistedVault solana.PublicKey + Partner solana.PublicKey + Liquidity bin.Uint128 + Padding bin.Uint128 + ProtocolAFee uint64 + ProtocolBFee uint64 + PartnerAFee uint64 + PartnerBFee uint64 + SqrtMinPrice bin.Uint128 + SqrtMaxPrice bin.Uint128 + SqrtPrice bin.Uint128 + ActivationPoint uint64 + ActivationType uint8 + PoolStatus uint8 + TokenAFlag uint8 + TokenBFlag uint8 + CollectFeeMode uint8 + PoolType uint8 + Padding0 [2]uint8 + FeeAPerLiquidity Uint256LE + FeeBPerLiquidity Uint256LE + PermanentLockLiquidity bin.Uint128 + Metrics PoolMetrics + Creator solana.PublicKey + Padding1 [6]uint64 + RewardInfos [2]RewardInfo +} + +type PositionMetrics struct { + TotalClaimedAFee uint64 + TotalClaimedBFee uint64 +} + +type UserRewardInfo struct { + RewardPerTokenCheckpoint [32]uint8 + RewardPendings uint64 + TotalClaimedRewards uint64 +} + +type PositionState struct { + Discriminator [8]uint8 + Pool solana.PublicKey + NftMint solana.PublicKey + FeeAPerTokenCheckpoint Uint256LE + FeeBPerTokenCheckpoint Uint256LE + FeeAPending uint64 + FeeBPending uint64 + UnlockedLiquidity bin.Uint128 + VestedLiquidity bin.Uint128 + PermanentLockedLiquidity bin.Uint128 + Metrics PositionMetrics + RewardInfos [2]UserRewardInfo + Padding [6]bin.Uint128 +} diff --git a/solana/spl/programs/meteora_damm_v2/types_test.go b/solana/spl/programs/meteora_damm_v2/types_test.go new file mode 100644 index 00000000..a08b148c --- /dev/null +++ b/solana/spl/programs/meteora_damm_v2/types_test.go @@ -0,0 +1,46 @@ +package meteora_damm_v2 + +import ( + "encoding/base64" + "testing" + + bin "github.com/gagliardetto/binary" + "github.com/test-go/testify/assert" + "github.com/test-go/testify/require" +) + +func TestDecodingPool(t *testing.T) { + // Example data from mainnet + // Source: https://explorer.solana.com/address/D9iJqMbgQJLFt5PAAiTJTMNsMAMueukzoe1EK2r1g3WH + data := "8ZptBBGxbbyAlpgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUAAAAAAABAAAAAAAAAGCk3AC8AwAAAQAKAHgAiBPGROhoAAAAAMsQx7q4jQYAAAAAAAAAAAChIqYBNRzVAQAAAAAAAAAA4CICAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACOkzYTTyijBQphnsA7NYukXXDff56Bp/GJdn5GamlMZ7/DPMLnXBSHbMN5KDkE9JB3ZpESJXuzrf82mLYCJJQHm/3HSrkp1wPzAbe6y0uFypnr4Yeci2kPU8TWr9TEmTY/2aZknQDPJED5N2M3ytBL5gl4lD8TdKznaJkMDHT44AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANpjaB9yhrzIBnGeLCtQogFXJDv8lmixFSC8U4Q+3NsISFBg5M0NQHAAaMJHGBEGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFiinY8UAAAAAAAAAAAAAAAAAAAAAAAAAFA7AQABAAAAAAAAAAAAAACbV2lOqRpchLHE/v8AAAAAIiTN1Ql11QEAAAAAAAAAAIhx5mgAAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJE91kBWow0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAASFBg5M0NQHAAaMJHGBEGAAAAAAAAAAAAAAAAAAAAAAASYim9UgAAAAAAAAAAAAAAAAAAAAAAAABYop2PFAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAAAAAAAAAAA2mNoH3KGvMgGcZ4sK1CiAVckO/yWaLEVILxThD7c2wgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + bytes, err := base64.StdEncoding.DecodeString(data) + require.NoError(t, err) + + var pool Pool + err = bin.NewBorshDecoder(bytes).Decode(&pool) + require.NoError(t, err) + + assert.Equal(t, int(10000000), int(pool.PoolFees.BaseFee.CliffFeeNumerator)) + assert.Equal(t, "bnWKPK7YTUJTe3A3HTGEJrUEoAddRgRjWSwf7MwxMP3", pool.TokenAMint.String()) + assert.Equal(t, "9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM", pool.TokenBMint.String()) + assert.Equal(t, "31500505798829827035928817465053256", pool.Liquidity.String()) + assert.Equal(t, "3838765547535761", pool.FeeBPerLiquidity.String()) + assert.Equal(t, int(1759932808), int(pool.ActivationPoint)) + // assert.Equal(t, "", pool.FeeBPerLiquidity.String()) +} + +func TestDecodingPositionState(t *testing.T) { + // Example data from mainnet + // Source: https://explorer.solana.com/address/5bYLydDXt1K5zroychcbrVbhGRUpheXdq5w41uccazPB + data := "qryP5HpA99C0h5iaMb9or5qzYmaPKH7cBpP1GTyw5pa9SMlEQMuk4oeLsnqCTyioPLOFt664lEHr2woSYFq4Z3N6xFLWwGDSAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADUszHGm5oNAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACQoMPLmBiA4ADThI4wIAwAAAAAAAAAAABGmGkQpAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + bytes, err := base64.StdEncoding.DecodeString(data) + require.NoError(t, err) + + var position PositionState + err = bin.NewBorshDecoder(bytes).Decode(&position) + require.NoError(t, err) + + assert.Equal(t, "D9iJqMbgQJLFt5PAAiTJTMNsMAMueukzoe1EK2r1g3WH", position.Pool.String()) + assert.Equal(t, "A87b7M7UnQCicj6Ui7ktCL9CoN9xnnLbp3bezoDS26uX", position.NftMint.String()) + assert.Equal(t, "15750252899414913517964408732526628", position.PermanentLockedLiquidity.String()) +} diff --git a/solana/spl/programs/meteora_damm_v2/uint256le.go b/solana/spl/programs/meteora_damm_v2/uint256le.go new file mode 100644 index 00000000..523cb206 --- /dev/null +++ b/solana/spl/programs/meteora_damm_v2/uint256le.go @@ -0,0 +1,49 @@ +package meteora_damm_v2 + +import ( + "database/sql/driver" + "fmt" + "math/big" + + bin "github.com/gagliardetto/binary" +) + +// Struct wrapper for little-endian uint256 (as big.Int). +// Implements Borsh serialization and database Valuer interface +type Uint256LE struct { + big.Int +} + +func (i *Uint256LE) UnmarshalWithDecoder(decoder *bin.Decoder) error { + var b [32]byte + err := decoder.Decode(&b) + if err != nil { + return err + } + i.SetBytes(reverseBytes(b[:])) + return nil +} + +func (i Uint256LE) MarshalWithEncoder(encoder *bin.Encoder) error { + b := i.Bytes() + if len(b) > 32 { + return fmt.Errorf("Int256LE: integer too large to encode") + } + padded := make([]byte, 32) + copy(padded[32-len(b):], b) + _, err := encoder.Write(reverseBytes(padded)) + return err +} + +func (i Uint256LE) Value() (driver.Value, error) { + return i.String(), nil +} + +// reverseBytes reverses a byte slice to match TypeScript Buffer.reverse() behavior +func reverseBytes(b []byte) []byte { + reversed := make([]byte, len(b)) + for i, j := 0, len(b)-1; i < len(b); i, j = i+1, j-1 { + reversed[i] = b[j] + } + return reversed +} diff --git a/solana/spl/programs/meteora_damm_v2/utils.go b/solana/spl/programs/meteora_damm_v2/utils.go new file mode 100644 index 00000000..771cdb39 --- /dev/null +++ b/solana/spl/programs/meteora_damm_v2/utils.go @@ -0,0 +1,27 @@ +package meteora_damm_v2 + +import ( + "math/big" +) + +const LIQUIDITY_SCALE = 128 + +func GetUnclaimedFees(pool *Pool, position *PositionState) (*big.Int, *big.Int) { + totalPositionLiquidity := big.NewInt(0).Add( + big.NewInt(0).Add(position.UnlockedLiquidity.BigInt(), position.VestedLiquidity.BigInt()), + position.PermanentLockedLiquidity.BigInt(), + ) + + feeA := big.NewInt(0).Sub(&pool.FeeAPerLiquidity.Int, &position.FeeAPerTokenCheckpoint.Int) + feeB := big.NewInt(0).Sub(&pool.FeeBPerLiquidity.Int, &position.FeeBPerTokenCheckpoint.Int) + + feeA.Mul(feeA, totalPositionLiquidity) + feeB.Mul(feeB, totalPositionLiquidity) + feeA.Rsh(feeA, LIQUIDITY_SCALE) + feeB.Rsh(feeB, LIQUIDITY_SCALE) + + feeA.Add(feeA, big.NewInt(0).SetUint64(position.FeeAPending)) + feeB.Add(feeB, big.NewInt(0).SetUint64(position.FeeBPending)) + + return feeA, feeB +} diff --git a/solana/spl/programs/meteora_dbc/MigrationDammV2.go b/solana/spl/programs/meteora_dbc/MigrationDammV2.go new file mode 100644 index 00000000..5e6c24aa --- /dev/null +++ b/solana/spl/programs/meteora_dbc/MigrationDammV2.go @@ -0,0 +1,107 @@ +package meteora_dbc + +import "github.com/gagliardetto/solana-go" + +type MigrationDammV2 struct { + solana.AccountMetaSlice `bin:"-" borsh_skip:"true"` +} + +func (inst *MigrationDammV2) GetVirtualPool() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(0) +} + +func (inst *MigrationDammV2) GetMigrationMetadata() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(1) +} + +func (inst *MigrationDammV2) GetConfig() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(2) +} + +func (inst *MigrationDammV2) GetPoolAuthority() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(3) +} + +func (inst *MigrationDammV2) GetPool() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(4) +} + +func (inst *MigrationDammV2) GetFirstPositionNftMint() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(5) +} + +func (inst *MigrationDammV2) GetFirstPositionNftAccount() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(6) +} + +func (inst *MigrationDammV2) GetFirstPosition() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(7) +} + +func (inst *MigrationDammV2) GetSecondPositionNftMint() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(8) +} + +func (inst *MigrationDammV2) GetSecondPositionNftAccount() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(9) +} + +func (inst *MigrationDammV2) GetSecondPosition() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(10) +} + +func (inst *MigrationDammV2) GetDammPoolAuthority() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(11) +} + +func (inst *MigrationDammV2) GetAmmProgram() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(12) +} + +func (inst *MigrationDammV2) GetBaseMint() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(13) +} + +func (inst *MigrationDammV2) GetQuoteMint() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(14) +} + +func (inst *MigrationDammV2) GetTokenAVault() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(15) +} + +func (inst *MigrationDammV2) GetTokenBVault() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(16) +} + +func (inst *MigrationDammV2) GetBaseVault() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(17) +} + +func (inst *MigrationDammV2) GetQuoteVault() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(18) +} + +func (inst *MigrationDammV2) GetPayer() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(19) +} + +func (inst *MigrationDammV2) GetTokenBaseProgram() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(20) +} + +func (inst *MigrationDammV2) GetTokenQuoteProgram() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(21) +} + +func (inst *MigrationDammV2) GetToken2022Program() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(22) +} + +func (inst *MigrationDammV2) GetDammEventAuthority() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(23) +} + +func (inst *MigrationDammV2) GetSystemProgram() *solana.AccountMeta { + return inst.AccountMetaSlice.Get(24) +} diff --git a/solana/spl/programs/meteora_dbc/client.go b/solana/spl/programs/meteora_dbc/client.go index 4e854c7e..824bbfe6 100644 --- a/solana/spl/programs/meteora_dbc/client.go +++ b/solana/spl/programs/meteora_dbc/client.go @@ -7,8 +7,6 @@ import ( "go.uber.org/zap" ) -var DbcProgramID = solana.MustPublicKeyFromBase58("dbcij3LWUppWqq96dh6gJWwBifmcGfLSB5D4DuSMaqN") - type RpcClient interface { GetAccountDataBorshInto(ctx context.Context, account solana.PublicKey, out interface{}) error } diff --git a/solana/spl/programs/meteora_dbc/instruction.go b/solana/spl/programs/meteora_dbc/instruction.go new file mode 100644 index 00000000..fe1baa0b --- /dev/null +++ b/solana/spl/programs/meteora_dbc/instruction.go @@ -0,0 +1,120 @@ +package meteora_dbc + +import ( + "bytes" + "fmt" + + "github.com/davecgh/go-spew/spew" + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/text" + "github.com/gagliardetto/treeout" + + bin "github.com/gagliardetto/binary" +) + +const ( + Instruction_MigrationDammV2 = "migration_damm_v2" +) + +var ProgramID = solana.MustPublicKeyFromBase58("dbcij3LWUppWqq96dh6gJWwBifmcGfLSB5D4DuSMaqN") + +type Instruction struct { + bin.BaseVariant +} + +func init() { + solana.RegisterInstructionDecoder(ProgramID, registryDecodeInstruction) +} + +func SetProgramID(pubkey solana.PublicKey) { + ProgramID = pubkey +} + +func DecodeInstruction(accounts []*solana.AccountMeta, data []byte) (*Instruction, error) { + inst := new(Instruction) + if err := bin.NewBorshDecoder(data).Decode(inst); err != nil { + return nil, fmt.Errorf("unable to decode instruction: %w", err) + } + if v, ok := inst.Impl.(solana.AccountsSettable); ok { + err := v.SetAccounts(accounts) + if err != nil { + return nil, fmt.Errorf("unable to set accounts for instruction: %w", err) + } + } + return inst, nil +} + +func registryDecodeInstruction(accounts []*solana.AccountMeta, data []byte) (interface{}, error) { + inst, err := DecodeInstruction(accounts, data) + if err != nil { + return nil, err + } + return inst, nil +} + +var ( + _ solana.Instruction = (*Instruction)(nil) + _ text.TextEncodable = (*Instruction)(nil) + _ bin.BinaryUnmarshaler = (*Instruction)(nil) + _ bin.BinaryMarshaler = (*Instruction)(nil) + _ text.EncodableToTree = (*Instruction)(nil) +) + +// ----- solana.Instruction Implementation ----- + +func (inst *Instruction) ProgramID() solana.PublicKey { + return ProgramID +} + +func (inst *Instruction) Accounts() (out []*solana.AccountMeta) { + return inst.Impl.(solana.AccountsGettable).GetAccounts() +} + +func (inst *Instruction) Data() ([]byte, error) { + buf := new(bytes.Buffer) + if err := bin.NewBorshEncoder(buf).Encode(inst); err != nil { + return nil, fmt.Errorf("unable to encode instruction: %w", err) + } + return buf.Bytes(), nil +} + +// ----- text.TextEncodable Implementation ----- + +func (inst *Instruction) TextEncode(encoder *text.Encoder, option *text.Option) error { + return encoder.Encode(inst.Impl, option) +} + +// ----- text.EncodableToTree Implementation ----- + +func (inst *Instruction) EncodeToTree(parent treeout.Branches) { + if enToTree, ok := inst.Impl.(text.EncodableToTree); ok { + enToTree.EncodeToTree(parent) + } else { + parent.Child(spew.Sdump(inst)) + } +} + +// ----- bin.BinaryUnmarshaler Implementation ----- + +var InstructionImplDef = bin.NewVariantDefinition( + bin.AnchorTypeIDEncoding, + []bin.VariantType{ + { + Name: Instruction_MigrationDammV2, Type: (*MigrationDammV2)(nil), + }, + }, +) + +func (inst *Instruction) UnmarshalWithDecoder(decoder *bin.Decoder) error { + return inst.BaseVariant.UnmarshalBinaryVariant(decoder, InstructionImplDef) +} + +// ----- bin.BinaryMarshaler Implementation ----- + +func (inst Instruction) MarshalWithEncoder(encoder *bin.Encoder) error { + err := encoder.WriteBytes(inst.TypeID.Bytes(), false) + if err != nil { + return fmt.Errorf("unable to write variant type: %w", err) + } + return encoder.Encode(inst.Impl) +} From 88681cc2b5ec2cabf98914bad00db3837b22adca Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Mon, 13 Oct 2025 10:29:42 -0700 Subject: [PATCH 02/56] add slot to remove race conditions, move pgNotify watcher --- ddl/functions/calculate_artist_coin_fees.sql | 3 + ddl/migrations/0169_damm_and_positions.sql | 6 ++ solana/indexer/damm_v2.go | 101 +++++++++---------- solana/indexer/dbc.go | 6 +- solana/indexer/utils.go | 51 ++++++++++ 5 files changed, 108 insertions(+), 59 deletions(-) diff --git a/ddl/functions/calculate_artist_coin_fees.sql b/ddl/functions/calculate_artist_coin_fees.sql index 0c5c3dd4..7cdd1114 100644 --- a/ddl/functions/calculate_artist_coin_fees.sql +++ b/ddl/functions/calculate_artist_coin_fees.sql @@ -11,6 +11,9 @@ RETURNS TABLE ( ) LANGUAGE sql AS $function$ WITH damm_fees AS ( + -- fee = totalLiquidity * feePerTokenStore + -- precision: (totalLiquidity * feePerTokenStore) >> 128 + -- See: https://github.com/MeteoraAg/damm-v2-sdk/blob/70d1af59689039a1dc700dee8f741db48024d02d/src/helpers/utils.ts#L190-L191 SELECT pool.token_a_mint AS mint, ( diff --git a/ddl/migrations/0169_damm_and_positions.sql b/ddl/migrations/0169_damm_and_positions.sql index 6bb0ddae..73f3399e 100644 --- a/ddl/migrations/0169_damm_and_positions.sql +++ b/ddl/migrations/0169_damm_and_positions.sql @@ -58,6 +58,7 @@ COMMENT ON TABLE sol_meteora_damm_v2_pools IS 'Tracks DAMM V2 pool state. Join w CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_pool_metrics ( pool TEXT PRIMARY KEY REFERENCES sol_meteora_damm_v2_pools(address) ON DELETE CASCADE, + slot BIGINT NOT NULL, total_lp_a_fee NUMERIC NOT NULL, total_lp_b_fee NUMERIC NOT NULL, total_protocol_a_fee NUMERIC NOT NULL, @@ -72,6 +73,7 @@ COMMENT ON TABLE sol_meteora_damm_v2_pool_metrics IS 'Tracks aggregated metrics CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_pool_fees ( pool TEXT PRIMARY KEY REFERENCES sol_meteora_damm_v2_pools(address) ON DELETE CASCADE, + slot BIGINT NOT NULL, protocol_fee_percent SMALLINT NOT NULL, partner_fee_percent SMALLINT NOT NULL, referral_fee_percent SMALLINT NOT NULL, @@ -82,6 +84,7 @@ COMMENT ON TABLE sol_meteora_damm_v2_pool_fees IS 'Tracks fee configuration for CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_pool_base_fees ( pool TEXT PRIMARY KEY REFERENCES sol_meteora_damm_v2_pools(address) ON DELETE CASCADE, + slot BIGINT NOT NULL, cliff_fee_numerator BIGINT NOT NULL, fee_scheduler_mode SMALLINT NOT NULL, number_of_period SMALLINT NOT NULL, @@ -94,6 +97,7 @@ COMMENT ON TABLE sol_meteora_damm_v2_pool_base_fees IS 'Tracks base fee configur CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_pool_dynamic_fees ( pool TEXT PRIMARY KEY REFERENCES sol_meteora_damm_v2_pools(address) ON DELETE CASCADE, + slot BIGINT NOT NULL, initialized SMALLINT NOT NULL, max_volatility_accumulator INTEGER NOT NULL, variable_fee_control INTEGER NOT NULL, @@ -113,6 +117,7 @@ COMMENT ON TABLE sol_meteora_damm_v2_pool_dynamic_fees IS 'Tracks dynamic fee co CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_positions ( address TEXT PRIMARY KEY, + slot BIGINT NOT NULL, pool TEXT NOT NULL REFERENCES sol_meteora_damm_v2_pools(address) ON DELETE CASCADE, nft_mint TEXT NOT NULL, fee_a_per_token_checkpoint BIGINT NOT NULL, @@ -129,6 +134,7 @@ COMMENT ON TABLE sol_meteora_damm_v2_positions IS 'Tracks DAMM V2 positions repr CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_position_metrics ( position TEXT PRIMARY KEY REFERENCES sol_meteora_damm_v2_positions(address) ON DELETE CASCADE, + slot BIGINT NOT NULL, total_claimed_a_fee BIGINT NOT NULL, total_claimed_b_fee BIGINT NOT NULL, created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, diff --git a/solana/indexer/damm_v2.go b/solana/indexer/damm_v2.go index dc381e6e..617b5e64 100644 --- a/solana/indexer/damm_v2.go +++ b/solana/indexer/damm_v2.go @@ -14,8 +14,6 @@ import ( "go.uber.org/zap" ) -type notificationCallback func(ctx context.Context, notification *pgconn.Notification) - type DammV2Indexer struct { pool database.DbPool grpcConfig GrpcConfig @@ -124,53 +122,6 @@ func subscribeToDammV2Pools(ctx context.Context, db database.DBTX, grpcConfig Gr return grpcClients, nil } -func watchPgNotification(ctx context.Context, pool database.DbPool, notification string, callback notificationCallback, logger *zap.Logger) error { - if logger == nil { - logger = zap.NewNop() - } - - childLogger := logger.With(zap.String("notification", notification)) - - conn, err := pool.Acquire(ctx) - if err != nil { - return fmt.Errorf("failed to acquire database connection: %w", err) - } - - rawConn := conn.Conn() - _, err = rawConn.Exec(ctx, fmt.Sprintf(`LISTEN %s`, notification)) - if err != nil { - return fmt.Errorf("failed to listen for %s changes: %w", notification, err) - } - - go func() { - defer func() { - if rawConn != nil && !rawConn.PgConn().IsClosed() && ctx.Err() != nil { - _, _ = rawConn.Exec(ctx, fmt.Sprintf(`UNLISTEN %s`, notification)) - } - childLogger.Info("received shutdown signal, stopping notification watcher") - conn.Release() - }() - for { - select { - case <-ctx.Done(): - return - default: - } - - notif, err := rawConn.WaitForNotification(ctx) - if err != nil { - childLogger.Error("failed waiting for notification", zap.Error(err)) - } - if notif == nil { - childLogger.Warn("received nil notification, continuing to wait for notifications") - continue - } - callback(ctx, notif) - } - }() - return nil -} - func makeDammV2SubscriptionRequest(dammV2Pools []string) *pb.SubscribeRequest { commitment := pb.CommitmentLevel_CONFIRMED subscription := &pb.SubscribeRequest{ @@ -254,23 +205,23 @@ func processDammV2PoolUpdate( if err != nil { return err } - err = upsertDammV2Pool(ctx, db, account, &pool) + err = upsertDammV2Pool(ctx, db, update.Slot, account, &pool) if err != nil { return err } - err = upsertDammV2PoolMetrics(ctx, db, account, &pool.Metrics) + err = upsertDammV2PoolMetrics(ctx, db, update.Slot, account, &pool.Metrics) if err != nil { return err } - err = upsertDammV2PoolFees(ctx, db, account, &pool.PoolFees) + err = upsertDammV2PoolFees(ctx, db, update.Slot, account, &pool.PoolFees) if err != nil { return err } - err = upsertDammV2PoolBaseFee(ctx, db, account, &pool.PoolFees.BaseFee) + err = upsertDammV2PoolBaseFee(ctx, db, update.Slot, account, &pool.PoolFees.BaseFee) if err != nil { return err } - err = upsertDammV2PoolDynamicFee(ctx, db, account, &pool.PoolFees.DynamicFee) + err = upsertDammV2PoolDynamicFee(ctx, db, update.Slot, account, &pool.PoolFees.DynamicFee) if err != nil { return err } @@ -288,11 +239,11 @@ func processDammV2PositionUpdate( if err != nil { return err } - err = upsertDammV2Position(ctx, db, account, &position) + err = upsertDammV2Position(ctx, db, update.Slot, account, &position) if err != nil { return err } - err = upsertDammV2PositionMetrics(ctx, db, account, &position.Metrics) + err = upsertDammV2PositionMetrics(ctx, db, update.Slot, account, &position.Metrics) if err != nil { return err } @@ -328,12 +279,14 @@ func getWatchedDammV2Pools(ctx context.Context, db database.DBTX, limit int, off func upsertDammV2Pool( ctx context.Context, db database.DBTX, + slot uint64, account solana.PublicKey, pool *meteora_damm_v2.Pool, ) error { sqlPool := ` INSERT INTO sol_meteora_damm_v2_pools ( address, + slot, token_a_mint, token_b_mint, token_a_vault, @@ -363,6 +316,7 @@ func upsertDammV2Pool( updated_at ) VALUES ( @address, + @slot, @token_a_mint, @token_b_mint, @token_a_vault, @@ -392,6 +346,7 @@ func upsertDammV2Pool( NOW() ) ON CONFLICT (address) DO UPDATE SET + slot = EXCLUDED.slot, token_a_mint = EXCLUDED.token_a_mint, token_b_mint = EXCLUDED.token_b_mint, token_a_vault = EXCLUDED.token_a_vault, @@ -418,9 +373,11 @@ func upsertDammV2Pool( permanent_lock_liquidity = EXCLUDED.permanent_lock_liquidity, creator = EXCLUDED.creator, updated_at = NOW() + WHERE EXCLUDED.slot > sol_meteora_damm_v2_pools.slot ` args := pgx.NamedArgs{ "address": account.String(), + "slot": slot, "token_a_mint": pool.TokenAMint.String(), "token_b_mint": pool.TokenBMint.String(), "token_a_vault": pool.TokenAVault.String(), @@ -455,12 +412,14 @@ func upsertDammV2Pool( func upsertDammV2PoolMetrics( ctx context.Context, db database.DBTX, + slot uint64, account solana.PublicKey, metrics *meteora_damm_v2.PoolMetrics, ) error { sqlMetrics := ` INSERT INTO sol_meteora_damm_v2_pool_metrics ( pool, + slot, total_lp_a_fee, total_lp_b_fee, total_protocol_a_fee, @@ -472,6 +431,7 @@ func upsertDammV2PoolMetrics( updated_at ) VALUES ( @pool, + @slot, @total_lp_a_fee, @total_lp_b_fee, @total_protocol_a_fee, @@ -483,6 +443,7 @@ func upsertDammV2PoolMetrics( NOW() ) ON CONFLICT (pool) DO UPDATE SET + slot = EXCLUDED.slot, total_lp_a_fee = EXCLUDED.total_lp_a_fee, total_lp_b_fee = EXCLUDED.total_lp_b_fee, total_protocol_a_fee = EXCLUDED.total_protocol_a_fee, @@ -491,10 +452,12 @@ func upsertDammV2PoolMetrics( total_partner_b_fee = EXCLUDED.total_partner_b_fee, total_position = EXCLUDED.total_position, updated_at = NOW() + WHERE EXCLUDED.slot > sol_meteora_damm_v2_pool_metrics.slot ` _, err := db.Exec(ctx, sqlMetrics, pgx.NamedArgs{ "pool": account.String(), + "slot": slot, "total_lp_a_fee": metrics.TotalLpAFee, "total_lp_b_fee": metrics.TotalLpBFee, "total_protocol_a_fee": metrics.TotalProtocolAFee, @@ -509,12 +472,14 @@ func upsertDammV2PoolMetrics( func upsertDammV2PoolFees( ctx context.Context, db database.DBTX, + slot uint64, account solana.PublicKey, fees *meteora_damm_v2.PoolFeesStruct, ) error { sqlFees := ` INSERT INTO sol_meteora_damm_v2_pool_fees ( pool, + slot, partner_fee_percent, protocol_fee_percent, referral_fee_percent, @@ -522,6 +487,7 @@ func upsertDammV2PoolFees( updated_at ) VALUES ( @pool, + @slot, @partner_fee_percent, @protocol_fee_percent, @referral_fee_percent, @@ -529,10 +495,12 @@ func upsertDammV2PoolFees( NOW() ) ON CONFLICT (pool) DO UPDATE SET + slot = EXCLUDED.slot, partner_fee_percent = EXCLUDED.partner_fee_percent, protocol_fee_percent = EXCLUDED.protocol_fee_percent, referral_fee_percent = EXCLUDED.referral_fee_percent, updated_at = NOW() + WHERE EXCLUDED.slot > sol_meteora_damm_v2_pool_fees.slot ` _, err := db.Exec(ctx, sqlFees, pgx.NamedArgs{ @@ -547,12 +515,14 @@ func upsertDammV2PoolFees( func upsertDammV2PoolBaseFee( ctx context.Context, db database.DBTX, + slot uint64, account solana.PublicKey, baseFee *meteora_damm_v2.BaseFeeStruct, ) error { sqlBaseFee := ` INSERT INTO sol_meteora_damm_v2_pool_base_fees ( pool, + slot, cliff_fee_numerator, fee_scheduler_mode, number_of_period, @@ -562,6 +532,7 @@ func upsertDammV2PoolBaseFee( updated_at ) VALUES ( @pool, + @slot, @cliff_fee_numerator, @fee_scheduler_mode, @number_of_period, @@ -571,16 +542,19 @@ func upsertDammV2PoolBaseFee( NOW() ) ON CONFLICT (pool) DO UPDATE SET + slot = EXCLUDED.slot, cliff_fee_numerator = EXCLUDED.cliff_fee_numerator, fee_scheduler_mode = EXCLUDED.fee_scheduler_mode, number_of_period = EXCLUDED.number_of_period, period_frequency = EXCLUDED.period_frequency, reduction_factor = EXCLUDED.reduction_factor, updated_at = NOW() + WHERE EXCLUDED.slot > sol_meteora_damm_v2_pool_base_fees.slot ` _, err := db.Exec(ctx, sqlBaseFee, pgx.NamedArgs{ "pool": account.String(), + "slot": slot, "cliff_fee_numerator": baseFee.CliffFeeNumerator, "fee_scheduler_mode": baseFee.FeeSchedulerMode, "number_of_period": baseFee.NumberOfPeriod, @@ -593,12 +567,14 @@ func upsertDammV2PoolBaseFee( func upsertDammV2PoolDynamicFee( ctx context.Context, db database.DBTX, + slot uint64, account solana.PublicKey, dynamicFee *meteora_damm_v2.DynamicFeeStruct, ) error { sqlDynamicFee := ` INSERT INTO sol_meteora_damm_v2_pool_dynamic_fees ( pool, + slot, initialized, max_volatility_accumulator, variable_fee_control, @@ -615,6 +591,7 @@ func upsertDammV2PoolDynamicFee( updated_at ) VALUES ( @pool, + @slot, @initialized, @max_volatility_accumulator, @variable_fee_control, @@ -631,6 +608,7 @@ func upsertDammV2PoolDynamicFee( NOW() ) ON CONFLICT (pool) DO UPDATE SET + slot = EXCLUDED.slot, initialized = EXCLUDED.initialized, max_volatility_accumulator = EXCLUDED.max_volatility_accumulator, variable_fee_control = EXCLUDED.variable_fee_control, @@ -644,6 +622,7 @@ func upsertDammV2PoolDynamicFee( volatility_accumulator = EXCLUDED.volatility_accumulator, volatility_reference = EXCLUDED.volatility_reference, updated_at = NOW() + WHERE EXCLUDED.slot > sol_meteora_damm_v2_pool_dynamic_fees.slot ` _, err := db.Exec(ctx, sqlDynamicFee, pgx.NamedArgs{ @@ -667,12 +646,14 @@ func upsertDammV2PoolDynamicFee( func upsertDammV2Position( ctx context.Context, db database.DBTX, + slot uint64, account solana.PublicKey, position *meteora_damm_v2.PositionState, ) error { sql := ` INSERT INTO sol_meteora_damm_v2_positions ( address, + slot, pool, nft_mint, fee_a_per_token_checkpoint, @@ -686,6 +667,7 @@ func upsertDammV2Position( created_at ) VALUES ( @address, + @slot, @pool, @nft_mint, @fee_a_per_token_checkpoint, @@ -699,6 +681,7 @@ func upsertDammV2Position( NOW() ) ON CONFLICT (address) DO UPDATE SET + slot = EXCLUDED.slot, pool = EXCLUDED.pool, nft_mint = EXCLUDED.nft_mint, fee_a_per_token_checkpoint = EXCLUDED.fee_a_per_token_checkpoint, @@ -709,6 +692,7 @@ func upsertDammV2Position( vested_liquidity = EXCLUDED.vested_liquidity, permanent_locked_liquidity = EXCLUDED.permanent_locked_liquidity, updated_at = NOW() + WHERE EXCLUDED.slot > sol_meteora_damm_v2_positions.slot ` _, err := db.Exec(ctx, sql, pgx.NamedArgs{ @@ -729,27 +713,32 @@ func upsertDammV2Position( func upsertDammV2PositionMetrics( ctx context.Context, db database.DBTX, + slot uint64, account solana.PublicKey, metrics *meteora_damm_v2.PositionMetrics, ) error { sql := ` INSERT INTO sol_meteora_damm_v2_position_metrics ( position, + slot, total_claimed_a_fee, total_claimed_b_fee, created_at, updated_at ) VALUES ( @position, + @slot, @total_claimed_a_fee, @total_claimed_b_fee, NOW(), NOW() ) ON CONFLICT (position) DO UPDATE SET + slot = EXCLUDED.slot, total_claimed_a_fee = EXCLUDED.total_claimed_a_fee, total_claimed_b_fee = EXCLUDED.total_claimed_b_fee, updated_at = NOW() + WHERE EXCLUDED.slot > sol_meteora_damm_v2_position_metrics.slot ` _, err := db.Exec(ctx, sql, pgx.NamedArgs{ diff --git a/solana/indexer/dbc.go b/solana/indexer/dbc.go index b1be576e..4460344b 100644 --- a/solana/indexer/dbc.go +++ b/solana/indexer/dbc.go @@ -83,7 +83,7 @@ func processDbcInstruction( if err != nil { return fmt.Errorf("failed to get damm v2 pool account data after retries: %w", err) } else { - err = upsertDammV2Pool(ctx, db, migrationInst.GetPool().PublicKey, &dammPool) + err = upsertDammV2Pool(ctx, db, slot, migrationInst.GetPool().PublicKey, &dammPool) if err != nil { return fmt.Errorf("failed to upsert damm v2 pool: %w", err) } @@ -96,7 +96,7 @@ func processDbcInstruction( if err != nil { return fmt.Errorf("failed to get first damm v2 position account data: %w", err) } else { - err = upsertDammV2Position(ctx, db, migrationInst.GetFirstPosition().PublicKey, &firstPosition) + err = upsertDammV2Position(ctx, db, slot, migrationInst.GetFirstPosition().PublicKey, &firstPosition) if err != nil { return fmt.Errorf("failed to upsert first damm v2 position: %w", err) } @@ -109,7 +109,7 @@ func processDbcInstruction( if err != nil { return fmt.Errorf("failed to get second damm v2 position account data: %w", err) } else { - err = upsertDammV2Position(ctx, db, migrationInst.GetSecondPosition().PublicKey, &secondPosition) + err = upsertDammV2Position(ctx, db, slot, migrationInst.GetSecondPosition().PublicKey, &secondPosition) if err != nil { return fmt.Errorf("failed to upsert second damm v2 position: %w", err) } diff --git a/solana/indexer/utils.go b/solana/indexer/utils.go index 3716f440..625536f3 100644 --- a/solana/indexer/utils.go +++ b/solana/indexer/utils.go @@ -7,6 +7,8 @@ import ( "api.audius.co/database" "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + "go.uber.org/zap" ) func withRetries(f func() error, maxRetries int, interval time.Duration) error { @@ -59,3 +61,52 @@ func getArtistCoins(ctx context.Context, db database.DBTX, forceRefresh bool) ([ mintsCache = mintAddresses return mintAddresses, nil } + +type notificationCallback func(ctx context.Context, notification *pgconn.Notification) + +func watchPgNotification(ctx context.Context, pool database.DbPool, notification string, callback notificationCallback, logger *zap.Logger) error { + if logger == nil { + logger = zap.NewNop() + } + + childLogger := logger.With(zap.String("notification", notification)) + + conn, err := pool.Acquire(ctx) + if err != nil { + return fmt.Errorf("failed to acquire database connection: %w", err) + } + + rawConn := conn.Conn() + _, err = rawConn.Exec(ctx, fmt.Sprintf(`LISTEN %s`, notification)) + if err != nil { + return fmt.Errorf("failed to listen for %s changes: %w", notification, err) + } + + go func() { + defer func() { + if rawConn != nil && !rawConn.PgConn().IsClosed() && ctx.Err() != nil { + _, _ = rawConn.Exec(ctx, fmt.Sprintf(`UNLISTEN %s`, notification)) + } + childLogger.Info("received shutdown signal, stopping notification watcher") + conn.Release() + }() + for { + select { + case <-ctx.Done(): + return + default: + } + + notif, err := rawConn.WaitForNotification(ctx) + if err != nil { + childLogger.Error("failed waiting for notification", zap.Error(err)) + } + if notif == nil { + childLogger.Warn("received nil notification, continuing to wait for notifications") + continue + } + callback(ctx, notif) + } + }() + return nil +} From 8add5704d2c1db087fe3d0667417aea30109c7af Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Mon, 13 Oct 2025 10:31:48 -0700 Subject: [PATCH 03/56] typo --- solana/indexer/damm_v2.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/solana/indexer/damm_v2.go b/solana/indexer/damm_v2.go index 617b5e64..8da7e7de 100644 --- a/solana/indexer/damm_v2.go +++ b/solana/indexer/damm_v2.go @@ -22,7 +22,7 @@ type DammV2Indexer struct { const MAX_DAMM_V2_POOLS_PER_SUBSCRIPTION = 10000 const DAMM_V2_POOL_SUBSCRIPTION_KEY = "dammV2Pools" -const DBC__MIGRATION_NOTIFICATION_NAME = "meteora_dbc_migration" +const DBC_MIGRATION_NOTIFICATION_NAME = "meteora_dbc_migration" func (d *DammV2Indexer) Start(ctx context.Context) { // To ensure only one subscription task is running at a time, keep track of @@ -64,7 +64,7 @@ func (d *DammV2Indexer) Start(ctx context.Context) { grpcClients = clients // Watch for new pools to be added - err = watchPgNotification(ctx, d.pool, DBC__MIGRATION_NOTIFICATION_NAME, handleNotif, d.logger) + err = watchPgNotification(ctx, d.pool, DBC_MIGRATION_NOTIFICATION_NAME, handleNotif, d.logger) if err != nil { d.logger.Error("failed to watch for DAMM V2 pool changes", zap.Error(err)) return From b3b792faaf4704218ed44bd4a3de60c49d0c90fa Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Mon, 13 Oct 2025 12:25:14 -0700 Subject: [PATCH 04/56] remove unused --- .../spl/programs/meteora_damm_v2/accounts.go | 13 -- solana/spl/programs/meteora_damm_v2/client.go | 113 ------------------ .../programs/meteora_damm_v2/client_test.go | 24 ---- solana/spl/programs/meteora_damm_v2/utils.go | 27 ----- 4 files changed, 177 deletions(-) delete mode 100644 solana/spl/programs/meteora_damm_v2/accounts.go delete mode 100644 solana/spl/programs/meteora_damm_v2/client.go delete mode 100644 solana/spl/programs/meteora_damm_v2/client_test.go delete mode 100644 solana/spl/programs/meteora_damm_v2/utils.go diff --git a/solana/spl/programs/meteora_damm_v2/accounts.go b/solana/spl/programs/meteora_damm_v2/accounts.go deleted file mode 100644 index 21c54f0a..00000000 --- a/solana/spl/programs/meteora_damm_v2/accounts.go +++ /dev/null @@ -1,13 +0,0 @@ -package meteora_damm_v2 - -import "github.com/gagliardetto/solana-go" - -// Derives the position PDA from a position NFT mint -func DerivePositionPDA(positionNft solana.PublicKey) (solana.PublicKey, error) { - seeds := [][]byte{[]byte("position"), positionNft.Bytes()} - address, _, err := solana.FindProgramAddress(seeds, ProgramID) - if err != nil { - return solana.PublicKey{}, err - } - return address, nil -} diff --git a/solana/spl/programs/meteora_damm_v2/client.go b/solana/spl/programs/meteora_damm_v2/client.go deleted file mode 100644 index 0775ede5..00000000 --- a/solana/spl/programs/meteora_damm_v2/client.go +++ /dev/null @@ -1,113 +0,0 @@ -package meteora_damm_v2 - -import ( - "context" - "math/big" - "sort" - - bin "github.com/gagliardetto/binary" - "github.com/gagliardetto/solana-go" - "github.com/gagliardetto/solana-go/programs/token" - "github.com/gagliardetto/solana-go/rpc" - "go.uber.org/zap" -) - -type RpcClient interface { - GetAccountDataBorshInto(ctx context.Context, account solana.PublicKey, out interface{}) error -} - -type Client struct { - client *rpc.Client - logger *zap.Logger -} - -func NewClient( - client *rpc.Client, - logger *zap.Logger, -) *Client { - return &Client{ - client: client, - logger: logger, - } -} - -// Gets the current Pool state. -func (c *Client) GetPool(ctx context.Context, account solana.PublicKey) (*Pool, error) { - var pool Pool - err := c.client.GetAccountDataBorshInto(ctx, account, &pool) - if err != nil { - return nil, err - } - return &pool, nil -} - -// Gets a position by its address. -func (c *Client) GetPosition(ctx context.Context, account solana.PublicKey) (*PositionState, error) { - var position PositionState - err := c.client.GetAccountDataBorshInto(ctx, account, &position) - if err != nil { - return nil, err - } - return &position, nil -} - -// Gets all position NFTs held by a wallet. -func (c *Client) GetPositionNFTs(ctx context.Context, owner solana.PublicKey) ([]solana.PublicKey, error) { - accounts, err := c.client.GetTokenAccountsByOwner(ctx, owner, &rpc.GetTokenAccountsConfig{ - ProgramId: &solana.Token2022ProgramID, - }, &rpc.GetTokenAccountsOpts{}) - if err != nil { - return nil, err - } - - var positionNFTs []solana.PublicKey - for _, acc := range accounts.Value { - data := token.Account{} - bin.NewBorshDecoder(acc.Account.Data.GetBinary()).Decode(&data) - if data.Amount == uint64(1) { - positionNFTs = append(positionNFTs, data.Mint) - } - } - - return positionNFTs, nil -} - -// Gets all the positions by an owner, sorted by total liquidity (descending). -func (c *Client) GetPositionsByOwner(ctx context.Context, owner solana.PublicKey) ([]*PositionState, error) { - positionNFTs, err := c.GetPositionNFTs(ctx, owner) - if err != nil { - return nil, err - } - - var positions []*PositionState - for _, nft := range positionNFTs { - pda, err := DerivePositionPDA(nft) - if err != nil { - return nil, err - } - position, err := c.GetPosition(ctx, pda) - if err != nil { - return nil, err - } - positions = append(positions, position) - } - - // Sort positions by total liquidity - sort.Slice(positions, func(i, j int) bool { - vestedLiquidity := (&big.Int{}).SetBytes(positions[i].VestedLiquidity.Bytes()) - permanentLockedLiquidity := (&big.Int{}).SetBytes(positions[i].PermanentLockedLiquidity.Bytes()) - unlockedLiquidity := (&big.Int{}).SetBytes(positions[i].UnlockedLiquidity.Bytes()) - totalLiquidityI := (&big.Int{}).Add(vestedLiquidity, permanentLockedLiquidity) - totalLiquidityI.Add(totalLiquidityI, unlockedLiquidity) - - vestedLiquidity = (&big.Int{}).SetBytes(positions[j].VestedLiquidity.Bytes()) - permanentLockedLiquidity = (&big.Int{}).SetBytes(positions[j].PermanentLockedLiquidity.Bytes()) - unlockedLiquidity = (&big.Int{}).SetBytes(positions[j].UnlockedLiquidity.Bytes()) - totalLiquidityJ := (&big.Int{}).Add(vestedLiquidity, permanentLockedLiquidity) - totalLiquidityJ.Add(totalLiquidityJ, unlockedLiquidity) - - return totalLiquidityJ.Cmp(totalLiquidityI) < 0 - }) - - return positions, nil -} diff --git a/solana/spl/programs/meteora_damm_v2/client_test.go b/solana/spl/programs/meteora_damm_v2/client_test.go deleted file mode 100644 index 12f1266a..00000000 --- a/solana/spl/programs/meteora_damm_v2/client_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package meteora_damm_v2_test - -import ( - "context" - "testing" - - "api.audius.co/solana/spl/programs/meteora_damm_v2" - "github.com/gagliardetto/solana-go" - "github.com/gagliardetto/solana-go/rpc" - "github.com/stretchr/testify/require" - "go.uber.org/zap" -) - -func TestActualFetch(t *testing.T) { - ctx := context.Background() - client := meteora_damm_v2.NewClient(rpc.New(rpc.MainNetBeta_RPC), zap.NewNop()) - - owner, err := solana.PublicKeyFromBase58("EF1zneAqA2mwjkD3Lj7sQnMhR2uorGqEHXNtAWfGdCu2") - require.NoError(t, err) - - positions, err := client.GetPositionsByOwner(ctx, owner) - require.NoError(t, err) - require.Greater(t, len(positions), 0) -} diff --git a/solana/spl/programs/meteora_damm_v2/utils.go b/solana/spl/programs/meteora_damm_v2/utils.go deleted file mode 100644 index 771cdb39..00000000 --- a/solana/spl/programs/meteora_damm_v2/utils.go +++ /dev/null @@ -1,27 +0,0 @@ -package meteora_damm_v2 - -import ( - "math/big" -) - -const LIQUIDITY_SCALE = 128 - -func GetUnclaimedFees(pool *Pool, position *PositionState) (*big.Int, *big.Int) { - totalPositionLiquidity := big.NewInt(0).Add( - big.NewInt(0).Add(position.UnlockedLiquidity.BigInt(), position.VestedLiquidity.BigInt()), - position.PermanentLockedLiquidity.BigInt(), - ) - - feeA := big.NewInt(0).Sub(&pool.FeeAPerLiquidity.Int, &position.FeeAPerTokenCheckpoint.Int) - feeB := big.NewInt(0).Sub(&pool.FeeBPerLiquidity.Int, &position.FeeBPerTokenCheckpoint.Int) - - feeA.Mul(feeA, totalPositionLiquidity) - feeB.Mul(feeB, totalPositionLiquidity) - feeA.Rsh(feeA, LIQUIDITY_SCALE) - feeB.Rsh(feeB, LIQUIDITY_SCALE) - - feeA.Add(feeA, big.NewInt(0).SetUint64(position.FeeAPending)) - feeB.Add(feeB, big.NewInt(0).SetUint64(position.FeeBPending)) - - return feeA, feeB -} From 65e1f522c1419442ec63d38a865ea681246ff980 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Tue, 14 Oct 2025 08:35:18 -0700 Subject: [PATCH 05/56] Create TokenIndexer --- ddl/migrations/0170_sol_retry_queue.sql | 17 + solana/indexer/balance_changes.go | 6 +- solana/indexer/checkpoints.go | 69 +- .../{damm_v2.go => damm_v2_indexer.go} | 125 ++-- solana/indexer/processor.go | 104 ++- solana/indexer/retry_queue.go | 102 +++ solana/indexer/solana_indexer.go | 154 ++++- solana/indexer/subscription.go | 622 +++++++++--------- solana/indexer/subscription_test.go | 450 ++++++------- solana/indexer/token_indexer.go | 281 ++++++++ solana/indexer/utils.go | 90 ++- 11 files changed, 1321 insertions(+), 699 deletions(-) create mode 100644 ddl/migrations/0170_sol_retry_queue.sql rename solana/indexer/{damm_v2.go => damm_v2_indexer.go} (86%) create mode 100644 solana/indexer/retry_queue.go create mode 100644 solana/indexer/token_indexer.go diff --git a/ddl/migrations/0170_sol_retry_queue.sql b/ddl/migrations/0170_sol_retry_queue.sql new file mode 100644 index 00000000..e0372c21 --- /dev/null +++ b/ddl/migrations/0170_sol_retry_queue.sql @@ -0,0 +1,17 @@ +CREATE TABLE IF NOT EXISTS sol_retry_queue ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + indexer TEXT NOT NULL, + update JSONB NOT NULL, + error TEXT NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP +); +COMMENT ON TABLE sol_retry_queue IS 'Queue for retrying failed indexer updates.'; +COMMENT ON COLUMN sol_retry_queue.indexer IS 'The name of the indexer that failed (e.g., token_indexer, damm_v2_indexer).'; +COMMENT ON COLUMN sol_retry_queue.update IS 'The JSONB update data that failed to process.'; +COMMENT ON COLUMN sol_retry_queue.error IS 'The error message from the failure.'; +COMMENT ON COLUMN sol_retry_queue.created_at IS 'The timestamp when the retry entry was created.'; +COMMENT ON COLUMN sol_retry_queue.updated_at IS 'The timestamp when the retry entry was last updated.'; + +ALTER TABLE sol_slot_checkpoints ADD COLUMN IF NOT EXISTS name TEXT; +COMMENT ON COLUMN sol_slot_checkpoints.name IS 'The name of the indexer this checkpoint is for (e.g., token_indexer, damm_v2_indexer).'; \ No newline at end of file diff --git a/solana/indexer/balance_changes.go b/solana/indexer/balance_changes.go index 1e971f0a..c6c9f88b 100644 --- a/solana/indexer/balance_changes.go +++ b/solana/indexer/balance_changes.go @@ -22,13 +22,9 @@ func processBalanceChanges( meta *rpc.TransactionMeta, tx *solana.Transaction, blockTime time.Time, + trackedMints []string, txLogger *zap.Logger, ) error { - trackedMints, err := getArtistCoins(ctx, db, false) - if err != nil { - return fmt.Errorf("failed to get artist coins: %w", err) - } - balanceChanges, err := extractBalanceChanges(meta, tx, trackedMints) if err != nil { return fmt.Errorf("failed to extract token balance changes: %w", err) diff --git a/solana/indexer/checkpoints.go b/solana/indexer/checkpoints.go index f5229bb1..63f228fe 100644 --- a/solana/indexer/checkpoints.go +++ b/solana/indexer/checkpoints.go @@ -6,12 +6,61 @@ import ( "encoding/hex" "encoding/json" "fmt" + "time" "api.audius.co/database" "github.com/jackc/pgx/v5" pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" + "go.uber.org/zap" ) +func ensureCheckpoint( + ctx context.Context, + name string, + db database.DBTX, + rpcClient RpcClient, + subscription *pb.SubscribeRequest, + logger *zap.Logger, +) (string, uint64, error) { + lastIndexedSlot, err := getCheckpointSlot(ctx, db, name, subscription) + if err != nil { + return "", 0, fmt.Errorf("failed to get last indexed slot: %w", err) + } + + latestSlot, err := withRetriesResult(func() (uint64, error) { + return rpcClient.GetSlot(ctx, "confirmed") + }, 5, time.Second*2) + if err != nil { + return "", 0, fmt.Errorf("failed to get slot: %w", err) + } + + var fromSlot uint64 + minimumSlot := uint64(0) + if latestSlot > MAX_SLOT_GAP { + minimumSlot = latestSlot - MAX_SLOT_GAP + } + + if lastIndexedSlot > minimumSlot { + // Existing subscription reconnecting, continue from last indexed slot + fromSlot = lastIndexedSlot + } else if lastIndexedSlot == 0 { + // New subscription, continue from latest slot - 100 + fromSlot = latestSlot - 100 // start 100 slots back to be safe + logger.Warn("no last indexed slot found, starting from most recent slot (less 100 for safety) and skipping backfill", zap.Uint64("fromSlot", fromSlot)) + } else { + // Existing subscription that's too old, continue from as far back as possible + fromSlot = minimumSlot + logger.Warn("last indexed slot is too old, starting from minimum slot", zap.Uint64("fromSlot", fromSlot), zap.Uint64("toSlot", lastIndexedSlot)) + } + + checkpointId, err := insertCheckpointStart(ctx, db, name, fromSlot, subscription) + if err != nil { + return "", 0, fmt.Errorf("failed to start checkpoint: %w", err) + } + + return checkpointId, fromSlot, nil +} + func insertBackfillCheckpoint(ctx context.Context, db database.DBTX, fromSlot uint64, toSlot uint64, address string) (string, error) { obj := map[string]string{ "type": "backfill", @@ -44,7 +93,13 @@ func insertBackfillCheckpoint(ctx context.Context, db database.DBTX, fromSlot ui return checkpointId, nil } -func insertCheckpointStart(ctx context.Context, db database.DBTX, fromSlot uint64, subscription *pb.SubscribeRequest) (string, error) { +func insertCheckpointStart( + ctx context.Context, + db database.DBTX, + name string, + fromSlot uint64, + subscription *pb.SubscribeRequest, +) (string, error) { subscriptionJson, err := json.Marshal(subscription) if err != nil { return "", fmt.Errorf("failed to marshal subscription request: %w", err) @@ -55,10 +110,11 @@ func insertCheckpointStart(ctx context.Context, db database.DBTX, fromSlot uint6 var checkpointId string err = db.QueryRow(ctx, ` - INSERT INTO sol_slot_checkpoints (from_slot, to_slot, subscription, subscription_hash) - VALUES (@from_slot, @to_slot, @subscription, @subscription_hash) + INSERT INTO sol_slot_checkpoints (name, from_slot, to_slot, subscription, subscription_hash) + VALUES (@name, @from_slot, @to_slot, @subscription, @subscription_hash) RETURNING id; `, pgx.NamedArgs{ + "name": name, "from_slot": fromSlot, "to_slot": fromSlot, "subscription": string(subscriptionJson), @@ -86,7 +142,7 @@ func updateCheckpoint(ctx context.Context, db database.DBTX, id string, slot uin return err } -func getCheckpointSlot(ctx context.Context, db database.DBTX, subscription *pb.SubscribeRequest) (uint64, error) { +func getCheckpointSlot(ctx context.Context, db database.DBTX, name string, subscription *pb.SubscribeRequest) (uint64, error) { subscriptionJson, err := json.Marshal(subscription) if err != nil { return 0, fmt.Errorf("failed to marshal subscription request: %w", err) @@ -98,12 +154,13 @@ func getCheckpointSlot(ctx context.Context, db database.DBTX, subscription *pb.S sql := ` SELECT COALESCE(MAX(to_slot), 0) FROM sol_slot_checkpoints - WHERE subscription_hash = @subscription_hash + WHERE name = @name + AND subscription_hash = @subscription_hash LIMIT 1; ` var lastIndexedSlot uint64 - err = db.QueryRow(ctx, sql, pgx.NamedArgs{"subscription_hash": subscriptionHash}).Scan(&lastIndexedSlot) + err = db.QueryRow(ctx, sql, pgx.NamedArgs{"name": name, "subscription_hash": subscriptionHash}).Scan(&lastIndexedSlot) if err != nil && err != pgx.ErrNoRows { return 0, fmt.Errorf("failed to scan last slot: %w", err) } diff --git a/solana/indexer/damm_v2.go b/solana/indexer/damm_v2_indexer.go similarity index 86% rename from solana/indexer/damm_v2.go rename to solana/indexer/damm_v2_indexer.go index 8da7e7de..ce15e055 100644 --- a/solana/indexer/damm_v2.go +++ b/solana/indexer/damm_v2_indexer.go @@ -17,9 +17,11 @@ import ( type DammV2Indexer struct { pool database.DbPool grpcConfig GrpcConfig + rpcClient RpcClient logger *zap.Logger } +const DAMM_V2_INDEXER_NAME = "damm_v2" const MAX_DAMM_V2_POOLS_PER_SUBSCRIPTION = 10000 const DAMM_V2_POOL_SUBSCRIPTION_KEY = "dammV2Pools" const DBC_MIGRATION_NOTIFICATION_NAME = "meteora_dbc_migration" @@ -37,26 +39,33 @@ func (d *DammV2Indexer) Start(ctx context.Context) { } })() + // On notification, cancel the previous subscription task (if any) and start a new one handleNotif := func(ctx context.Context, notification *pgconn.Notification) { - // Cancel the previous task if it exists subCtx, cancel := context.WithCancel(ctx) + + // Cancel previous subscription task if lastCancel != nil { lastCancel() } + + // Close previous gRPC clients for _, client := range grpcClients { client.Close() } - clients, err := subscribeToDammV2Pools(subCtx, d.pool, d.grpcConfig, d.logger) + + // Resubscribe to all DAMM V2 pools + clients, err := d.subscribeToDammV2Pools(subCtx) grpcClients = clients if err != nil { d.logger.Error("failed to resubscribe to DAMM V2 pools", zap.Error(err)) return } + lastCancel = cancel } // Setup initial subscription - clients, err := subscribeToDammV2Pools(ctx, d.pool, d.grpcConfig, d.logger) + clients, err := d.subscribeToDammV2Pools(ctx) if err != nil { d.logger.Error("failed to subscribe to DAMM V2 pools", zap.Error(err)) return @@ -70,6 +79,7 @@ func (d *DammV2Indexer) Start(ctx context.Context) { return } + // Wait for shutdown for { select { case <-ctx.Done(): @@ -80,33 +90,79 @@ func (d *DammV2Indexer) Start(ctx context.Context) { } } -func subscribeToDammV2Pools(ctx context.Context, db database.DBTX, grpcConfig GrpcConfig, logger *zap.Logger) ([]GrpcClient, error) { +// Handles a single update message from the gRPC subscription +func (d *DammV2Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) error { + // Handle slot updates + slotUpdate := msg.GetSlot() + if slotUpdate != nil { + // only update every 10 slots to reduce db load and write latency + if slotUpdate.Slot%10 == 0 { + // Use the filter as the checkpoint ID + checkpointId := msg.Filters[0] + + err := updateCheckpoint(ctx, d.pool, checkpointId, slotUpdate.Slot) + if err != nil { + d.logger.Error("failed to update slot checkpoint", zap.Error(err)) + } + } + } + + // Handle DAMM V2 account updates + accUpdate := msg.GetAccount() + if accUpdate != nil { + if msg.Filters[0] == DAMM_V2_POOL_SUBSCRIPTION_KEY { + err := processDammV2PoolUpdate(ctx, d.pool, accUpdate) + if err != nil { + return fmt.Errorf("failed to process DAMM V2 pool update: %w", err) + } + d.logger.Debug("processed DAMM V2 pool update", zap.String("account", solana.PublicKeyFromBytes(accUpdate.Account.Pubkey).String())) + } else { + err := processDammV2PositionUpdate(ctx, d.pool, accUpdate) + if err != nil { + return fmt.Errorf("failed to process DAMM V2 position update: %w", err) + } + d.logger.Debug("processed DAMM V2 position update", zap.String("account", solana.PublicKeyFromBytes(accUpdate.Account.Pubkey).String())) + } + } + return nil +} + +func (d *DammV2Indexer) subscribeToDammV2Pools(ctx context.Context) ([]GrpcClient, error) { done := false page := 0 pageSize := MAX_DAMM_V2_POOLS_PER_SUBSCRIPTION total := 0 grpcClients := make([]GrpcClient, 0) for !done { - dammV2Pools, err := getWatchedDammV2Pools(ctx, db, pageSize, page*pageSize) + dammV2Pools, err := getWatchedDammV2Pools(ctx, d.pool, pageSize, page*pageSize) if err != nil { return nil, fmt.Errorf("failed to get watched DAMM V2 pools: %w", err) } if len(dammV2Pools) == 0 { - logger.Info("no DAMM V2 pools to subscribe to") + d.logger.Info("no DAMM V2 pools to subscribe to") return grpcClients, nil } total += len(dammV2Pools) - logger.Debug("subscribing to DAMM V2 pools....", zap.Int("numPools", len(dammV2Pools))) - subscription := makeDammV2SubscriptionRequest(dammV2Pools) + d.logger.Debug("subscribing to DAMM V2 pools....", zap.Int("numPools", len(dammV2Pools))) + subscription := d.makeDammV2SubscriptionRequest(ctx, dammV2Pools) + // Handle each message from the subscription handleMessage := func(ctx context.Context, msg *pb.SubscribeUpdate) { - handleDammV2Message(ctx, db, msg, logger) + err := d.HandleUpdate(ctx, msg) + if err != nil { + d.logger.Error("failed to handle DAMM V2 update", zap.Error(err)) + + // Add messages that failed to process to the retry queue + if err := addToRetryQueue(ctx, d.pool, DAMM_V2_INDEXER_NAME, msg, err.Error()); err != nil { + d.logger.Error("failed to add to retry queue", zap.Error(err)) + } + } } - grpcClient := NewGrpcClient(grpcConfig) + grpcClient := NewGrpcClient(d.grpcConfig) err = grpcClient.Subscribe(ctx, subscription, handleMessage, func(err error) { - logger.Error("error in DAMM V2 subscription", zap.Error(err)) + d.logger.Error("error in DAMM V2 subscription", zap.Error(err)) }) if err != nil { return nil, fmt.Errorf("failed to subscribe to DAMM V2 pools: %w", err) @@ -118,26 +174,18 @@ func subscribeToDammV2Pools(ctx context.Context, db database.DBTX, grpcConfig Gr } page++ } - logger.Info("subscribed to DAMM V2 pools", zap.Int("numPools", total)) + d.logger.Info("subscribed to DAMM V2 pools", zap.Int("numPools", total)) return grpcClients, nil } -func makeDammV2SubscriptionRequest(dammV2Pools []string) *pb.SubscribeRequest { +func (d *DammV2Indexer) makeDammV2SubscriptionRequest(ctx context.Context, dammV2Pools []string) *pb.SubscribeRequest { commitment := pb.CommitmentLevel_CONFIRMED subscription := &pb.SubscribeRequest{ Commitment: &commitment, } - // Listen for slot updates for checkpointing - subscription.Slots = make(map[string]*pb.SubscribeRequestFilterSlots) - subscription.Slots["checkpoints"] = &pb.SubscribeRequestFilterSlots{} - - // fromSlot := uint64(372380625) - // subscription.FromSlot = &fromSlot - - subscription.Accounts = make(map[string]*pb.SubscribeRequestFilterAccounts) - // Listen to all watched pools + subscription.Accounts = make(map[string]*pb.SubscribeRequestFilterAccounts) accountFilter := pb.SubscribeRequestFilterAccounts{ Owner: []string{meteora_damm_v2.ProgramID.String()}, Account: dammV2Pools, @@ -169,29 +217,20 @@ func makeDammV2SubscriptionRequest(dammV2Pools []string) *pb.SubscribeRequest { subscription.Accounts[pool] = &accountFilter } - return subscription -} + // Ensure this subscription has a checkpoint + checkpointId, fromSlot, err := ensureCheckpoint(ctx, DAMM_V2_INDEXER_NAME, d.pool, d.rpcClient, subscription, d.logger) + if err != nil { + d.logger.Error("failed to ensure checkpoint", zap.Error(err)) + } -func handleDammV2Message(ctx context.Context, db database.DBTX, msg *pb.SubscribeUpdate, logger *zap.Logger) { - accUpdate := msg.GetAccount() - if accUpdate != nil { - if msg.Filters[0] == DAMM_V2_POOL_SUBSCRIPTION_KEY { - err := processDammV2PoolUpdate(ctx, db, accUpdate) - if err != nil { - logger.Error("failed to process DAMM V2 pool update", zap.Error(err)) - } else { - logger.Debug("processed DAMM V2 pool update", zap.String("account", solana.PublicKeyFromBytes(accUpdate.Account.Pubkey).String())) - } - } else { - err := processDammV2PositionUpdate(ctx, db, accUpdate) - if err != nil { - logger.Error("failed to process DAMM V2 position update", zap.Error(err)) - } else { - logger.Debug("processed DAMM V2 position update", zap.String("account", solana.PublicKeyFromBytes(accUpdate.Account.Pubkey).String())) - } - } + // Set the from slot for the subscription + subscription.FromSlot = &fromSlot - } + // Listen for slots for making checkpoints + subscription.Slots = make(map[string]*pb.SubscribeRequestFilterSlots) + subscription.Slots[checkpointId] = &pb.SubscribeRequestFilterSlots{} + + return subscription } func processDammV2PoolUpdate( diff --git a/solana/indexer/processor.go b/solana/indexer/processor.go index c59ddfd4..c8b50e16 100644 --- a/solana/indexer/processor.go +++ b/solana/indexer/processor.go @@ -7,10 +7,6 @@ import ( "api.audius.co/config" "api.audius.co/database" - "api.audius.co/solana/spl/programs/claimable_tokens" - "api.audius.co/solana/spl/programs/meteora_dbc" - "api.audius.co/solana/spl/programs/payment_router" - "api.audius.co/solana/spl/programs/reward_manager" "github.com/gagliardetto/solana-go" "github.com/gagliardetto/solana-go/rpc" "github.com/maypok86/otter" @@ -117,12 +113,12 @@ func (p *DefaultProcessor) ProcessTransaction( if meta == nil { return fmt.Errorf("missing tx meta") } - if logger == nil { - logger = zap.NewNop() - } - txLogger := logger.With( - zap.String("signature", tx.Signatures[0].String()), - ) + // if logger == nil { + // logger = zap.NewNop() + // } + // txLogger := logger.With( + // zap.String("signature", tx.Signatures[0].String()), + // ) // Resolve address lookup tables addressTables := make(map[solana.PublicKey]solana.PublicKeySlice) @@ -142,50 +138,50 @@ func (p *DefaultProcessor) ProcessTransaction( } tx.Message.SetAddressTables(addressTables) - signature := tx.Signatures[0].String() - - err := processBalanceChanges(ctx, p.pool, slot, meta, tx, blockTime, txLogger) - if err != nil { - return fmt.Errorf("failed to process balance changes: %w", err) - } - - for instructionIndex, instruction := range tx.Message.Instructions { - programId := tx.Message.AccountKeys[instruction.ProgramIDIndex] - instLogger := txLogger.With( - zap.String("programId", programId.String()), - zap.Int("instructionIndex", instructionIndex), - ) - switch programId { - case claimable_tokens.ProgramID: - { - err := processClaimableTokensInstruction(ctx, p.pool, slot, tx, instructionIndex, instruction, signature, instLogger) - if err != nil { - return fmt.Errorf("error processing claimable_tokens instruction %d: %w", instructionIndex, err) - } - } - case reward_manager.ProgramID: - { - err := processRewardManagerInstruction(ctx, p.pool, slot, tx, instructionIndex, instruction, signature, instLogger) - if err != nil { - return fmt.Errorf("error processing reward_manager instruction %d: %w", instructionIndex, err) - } - } - case payment_router.ProgramID: - { - err := processPaymentRouterInstruction(ctx, p.pool, slot, tx, instructionIndex, instruction, signature, blockTime, p.config, instLogger) - if err != nil { - return fmt.Errorf("error processing payment_router instruction %d: %w", instructionIndex, err) - } - } - case meteora_dbc.ProgramID: - { - err := processDbcInstruction(ctx, p.pool, p.rpcClient, slot, tx, instructionIndex, instruction, signature, instLogger) - if err != nil { - return fmt.Errorf("error processing meteora_dbc instruction %d: %w", instructionIndex, err) - } - } - } - } + // signature := tx.Signatures[0].String() + + // err := processBalanceChanges(ctx, p.pool, slot, meta, tx, blockTime, txLogger) + // if err != nil { + // return fmt.Errorf("failed to process balance changes: %w", err) + // } + + // for instructionIndex, instruction := range tx.Message.Instructions { + // programId := tx.Message.AccountKeys[instruction.ProgramIDIndex] + // instLogger := txLogger.With( + // zap.String("programId", programId.String()), + // zap.Int("instructionIndex", instructionIndex), + // ) + // switch programId { + // case claimable_tokens.ProgramID: + // { + // err := processClaimableTokensInstruction(ctx, p.pool, slot, tx, instructionIndex, instruction, signature, instLogger) + // if err != nil { + // return fmt.Errorf("error processing claimable_tokens instruction %d: %w", instructionIndex, err) + // } + // } + // case reward_manager.ProgramID: + // { + // err := processRewardManagerInstruction(ctx, p.pool, slot, tx, instructionIndex, instruction, signature, instLogger) + // if err != nil { + // return fmt.Errorf("error processing reward_manager instruction %d: %w", instructionIndex, err) + // } + // } + // case payment_router.ProgramID: + // { + // err := processPaymentRouterInstruction(ctx, p.pool, slot, tx, instructionIndex, instruction, signature, blockTime, p.config, instLogger) + // if err != nil { + // return fmt.Errorf("error processing payment_router instruction %d: %w", instructionIndex, err) + // } + // } + // case meteora_dbc.ProgramID: + // { + // err := processDbcInstruction(ctx, p.pool, p.rpcClient, slot, tx, instructionIndex, instruction, signature, instLogger) + // if err != nil { + // return fmt.Errorf("error processing meteora_dbc instruction %d: %w", instructionIndex, err) + // } + // } + // } + // } return nil } diff --git a/solana/indexer/retry_queue.go b/solana/indexer/retry_queue.go new file mode 100644 index 00000000..06e42290 --- /dev/null +++ b/solana/indexer/retry_queue.go @@ -0,0 +1,102 @@ +package indexer + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "api.audius.co/database" + "github.com/jackc/pgx/v5" + "google.golang.org/protobuf/encoding/protojson" + + pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" +) + +type retryQueueItem struct { + ID string + Indexer string + Update retryQueueUpdate + Error string + CreatedAt time.Time + UpdatedAt time.Time +} + +type retryQueueUpdate struct { + *pb.SubscribeUpdate +} + +var ( + _ json.Marshaler = (*retryQueueUpdate)(nil) + _ json.Unmarshaler = (*retryQueueUpdate)(nil) +) + +func (r retryQueueUpdate) MarshalJSON() ([]byte, error) { + if r.SubscribeUpdate == nil { + return []byte("{}"), nil + } + res, err := protojson.Marshal(r.SubscribeUpdate) + fmt.Printf("Marshaled JSON: %s, error: %v\n", res, err) + return res, err +} + +func (r *retryQueueUpdate) UnmarshalJSON(data []byte) error { + fmt.Printf("Unmarshaling JSON: %s\n", data) + if r.SubscribeUpdate == nil { + r.SubscribeUpdate = &pb.SubscribeUpdate{} + } + return protojson.Unmarshal(data, r.SubscribeUpdate) +} + +func getRetryQueue(ctx context.Context, db database.DBTX, limit, offset int) ([]retryQueueItem, error) { + sql := `SELECT id, indexer, update, error, created_at, updated_at + FROM sol_retry_queue + ORDER BY created_at ASC + LIMIT @limit OFFSET @offset` + + rows, err := db.Query(ctx, sql, pgx.NamedArgs{ + "limit": limit, + "offset": offset, + }) + + if err != nil { + if err == pgx.ErrNoRows { + return nil, nil // No items found, return empty slice + } + return nil, fmt.Errorf("failed to query retry queue: %w", err) + } + + items, err := pgx.CollectRows(rows, pgx.RowToStructByName[retryQueueItem]) + if err != nil { + return nil, fmt.Errorf("failed to collect retry queue items: %w", err) + } + return items, nil +} + +func addToRetryQueue(ctx context.Context, db database.DBTX, indexer string, update *pb.SubscribeUpdate, errorMessage string) error { + sql := ` + INSERT INTO sol_retry_queue (indexer, update, error) + VALUES (@indexer, @update, @error) + ON CONFLICT (id) DO UPDATE SET error = @error, updated_at = NOW() + ;` + _, err := db.Exec(ctx, sql, pgx.NamedArgs{ + "indexer": indexer, + "update": retryQueueUpdate{update}, + "error": errorMessage, + }) + if err != nil { + return fmt.Errorf("failed to insert into retry queue: %w", err) + } + return nil +} + +func deleteFromRetryQueue(ctx context.Context, db database.DBTX, id string) error { + sql := `DELETE FROM sol_retry_queue WHERE id = @id;` + _, err := db.Exec(ctx, sql, pgx.NamedArgs{ + "id": id, + }) + if err != nil { + return fmt.Errorf("failed to delete from retry queue: %w", err) + } + return nil +} diff --git a/solana/indexer/solana_indexer.go b/solana/indexer/solana_indexer.go index 4a174da6..a926b932 100644 --- a/solana/indexer/solana_indexer.go +++ b/solana/indexer/solana_indexer.go @@ -3,13 +3,16 @@ package indexer import ( "context" "fmt" + "time" "api.audius.co/config" "api.audius.co/database" + "api.audius.co/jobs" "api.audius.co/logging" "github.com/gagliardetto/solana-go" "github.com/gagliardetto/solana-go/rpc" "github.com/jackc/pgx/v5/pgxpool" + "github.com/maypok86/otter" pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" "go.uber.org/zap" ) @@ -32,6 +35,8 @@ type GrpcClient interface { Close() } +const MAX_SLOT_GAP = 2500 + type SolanaIndexer struct { rpcClient RpcClient grpcClient GrpcClient @@ -42,6 +47,7 @@ type SolanaIndexer struct { workerCount int32 dammV2Indexer *DammV2Indexer + tokenIndexer *TokenIndexer checkpointId string @@ -71,69 +77,153 @@ func New(config config.Config) *SolanaIndexer { panic(fmt.Errorf("error connecting to database: %w", err)) } - grpcClient := NewGrpcClient(GrpcConfig{ + grpcConfig := GrpcConfig{ Server: config.SolanaConfig.GrpcProvider, ApiToken: config.SolanaConfig.GrpcToken, MaxReconnectAttempts: 5, - }) + } + + transactionCache, err := otter.MustBuilder[solana.Signature, *rpc.GetTransactionResult](50). + WithTTL(30 * time.Second). + CollectStats(). + Build() + + if err != nil { + panic(fmt.Errorf("failed to create transaction cache: %w", err)) + } dammV2Indexer := &DammV2Indexer{ - pool: pool, - grpcConfig: GrpcConfig{ - Server: config.SolanaConfig.GrpcProvider, - ApiToken: config.SolanaConfig.GrpcToken, - MaxReconnectAttempts: 5, - }, - logger: logger, + pool: pool, + grpcConfig: grpcConfig, + rpcClient: rpcClient, + logger: logger.Named("DammV2Indexer"), + } + + tokenIndexer := &TokenIndexer{ + pool: pool, + grpcConfig: grpcConfig, + rpcClient: rpcClient, + logger: logger.Named("TokenIndexer"), + transactionCache: &transactionCache, } s := &SolanaIndexer{ rpcClient: rpcClient, - grpcClient: grpcClient, logger: logger, config: config, pool: pool, workerCount: workerCount, dammV2Indexer: dammV2Indexer, - - processor: NewDefaultProcessor( - rpcClient, - pool, - config, - ), + tokenIndexer: tokenIndexer, } return s } func (s *SolanaIndexer) Start(ctx context.Context) error { - go s.ScheduleRetries(ctx, s.config.SolanaIndexerRetryInterval) + go s.ScheduleProcessRetryQueue(ctx, s.config.SolanaIndexerRetryInterval) - // statsJob := jobs.NewCoinStatsJob(s.config, s.pool) - // statsCtx := context.WithoutCancel(ctx) - // statsJob.ScheduleEvery(statsCtx, 5*time.Minute) - // go statsJob.Run(statsCtx) + statsJob := jobs.NewCoinStatsJob(s.config, s.pool) + statsCtx := context.WithoutCancel(ctx) + statsJob.ScheduleEvery(statsCtx, 5*time.Minute) + go statsJob.Run(statsCtx) - // dbcJob := jobs.NewCoinDBCJob(s.config, s.pool) - // dbcCtx := context.WithoutCancel(ctx) - // dbcJob.ScheduleEvery(dbcCtx, 5*time.Minute) - // go dbcJob.Run(dbcCtx) + dbcJob := jobs.NewCoinDBCJob(s.config, s.pool) + dbcCtx := context.WithoutCancel(ctx) + dbcJob.ScheduleEvery(dbcCtx, 5*time.Minute) + go dbcJob.Run(dbcCtx) + go s.tokenIndexer.Start(ctx) go s.dammV2Indexer.Start(ctx) - err := s.Subscribe(ctx) - if err != nil { - return fmt.Errorf("failed to subscribe: %w", err) + for { + select { + case <-ctx.Done(): + s.logger.Info("received shutdown signal, stopping solana indexer") + return nil + default: + } } +} +func (s *SolanaIndexer) ScheduleProcessRetryQueue(ctx context.Context, interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + s.logger.Info("context cancelled, stopping retry ticker") + return + case <-ticker.C: + err := s.ProcessRetryQueue(ctx) + if err != nil { + s.logger.Error("failed to retry unprocessed transactions", zap.Error(err)) + } + } + } +} + +func (s *SolanaIndexer) ProcessRetryQueue(ctx context.Context) error { + limit := 100 + offset := 0 + logger := s.logger.Named("RetryQueue") + count := 0 + start := time.Now() + for { + queue, err := getRetryQueue(ctx, s.pool, limit, offset) + if err != nil { + return fmt.Errorf("failed to fetch retry queue: %w", err) + } + if len(queue) == 0 { + break + } + + for _, item := range queue { + switch item.Indexer { + case "token": + err := s.tokenIndexer.HandleUpdate(ctx, item.Update.SubscribeUpdate) + if err != nil { + logger.Error("failed to retry token_indexer", zap.Error(err)) + offset++ + } else { + err = deleteFromRetryQueue(ctx, s.pool, item.ID) + if err != nil { + logger.Error("failed to delete from retry queue", zap.Error(err)) + } + } + case "dammv2": + err := s.dammV2Indexer.HandleUpdate(ctx, item.Update.SubscribeUpdate) + if err != nil { + logger.Error("failed to retry damm_v2_indexer", zap.Error(err)) + offset++ + } else { + err = deleteFromRetryQueue(ctx, s.pool, item.ID) + if err != nil { + logger.Error("failed to delete from retry queue", zap.Error(err)) + } + } + default: + logger.Warn("unknown indexer in retry queue", zap.String("indexer", item.Indexer)) + } + count++ + } + } + + if count == 0 { + logger.Debug("no unprocessed transactions to retry") + return nil + } + + logger.Info("finished processing retry queue", + zap.Int("count", count), + zap.Int("failed", offset), + zap.Duration("duration", time.Since(start)), + ) return nil } func (s *SolanaIndexer) Close() { - if p, ok := s.processor.(*DefaultProcessor); ok { - p.ReportCacheStats(s.logger) - } - s.grpcClient.Close() s.pool.Close() } diff --git a/solana/indexer/subscription.go b/solana/indexer/subscription.go index 50729566..94bc9387 100644 --- a/solana/indexer/subscription.go +++ b/solana/indexer/subscription.go @@ -1,313 +1,313 @@ package indexer -import ( - "context" - "encoding/json" - "fmt" - "strings" - "time" - - "api.audius.co/jobs" - "api.audius.co/logging" - "api.audius.co/solana/spl/programs/meteora_dbc" - bin "github.com/gagliardetto/binary" - "github.com/gagliardetto/solana-go" - pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" - "go.uber.org/zap" -) - -// LaserStream from Helius only keeps the last 3000 slots. -// Subtract 10 slots to be sure that the subscription doesn't fail. -var MAX_SLOT_GAP = uint64(2990) - -// Used to find graduation progress of DBC pools -const AUDIO_DECIMALS = 8 - -type artistCoinsChangedNotification struct { - Operation string `json:"operation"` - NewMint string `json:"new_mint"` - OldMint string `json:"old_mint"` -} - -func (s *SolanaIndexer) Subscribe(ctx context.Context) error { - // Set up workers to process updates concurrently - msgChan := make(chan *pb.SubscribeUpdate, 3000) - for i := range s.workerCount { - go func(workerId int32) { - for msg := range msgChan { - s.handleMessage(ctx, msg) - } - }(i) - } - defer close(msgChan) - - // On a new message, queue the message to the worker pool - onMessage := func(ctx context.Context, msg *pb.SubscribeUpdate) { - select { - case <-ctx.Done(): - s.logger.Warn("subscription context cancelled, stopping message processing") - return - case msgChan <- msg: - } - } - - // Flush the logger every 15 seconds to ensure logs are written out - go logging.SyncOnTicks(ctx, s.logger, time.Second*15) - - // Acquire a connection to the database and listen for artist coins changes - conn, err := s.pool.Acquire(ctx) - if err != nil { - return fmt.Errorf("failed to acquire database connection: %w", err) - } - defer conn.Release() - - rawConn := conn.Conn() - _, err = rawConn.Exec(ctx, `LISTEN artist_coins_changed`) - if err != nil { - return fmt.Errorf("failed to listen for artist coins changes: %w", err) - } - - // Log when we receive a shutdown signal - defer func() { - s.logger.Info("received shutdown signal, stopping subscription") - }() - - // Loop to reset subscription when the artist coins notification is received - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - coins, err := getArtistCoins(ctx, s.pool, true) - if err != nil { - return fmt.Errorf("failed to get artist coins: %w", err) - } - - var dbcPoolConfigs []string - for _, config := range s.config.SolanaConfig.DbcPoolConfigs { - dbcPoolConfigs = append(dbcPoolConfigs, config.String()) - } - - subscription, err := buildSubscriptionRequest(coins, dbcPoolConfigs) - if err != nil { - return fmt.Errorf("failed to create subscription: %w", err) - } - - // Check if a backfill is needed with the new subscription - // and find the slot to continue from. - - lastIndexedSlot, err := getCheckpointSlot(ctx, s.pool, subscription) - if err != nil { - return fmt.Errorf("failed to get last indexed slot: %w", err) - } - - latestSlot, err := withRetriesResult(func() (uint64, error) { - return s.rpcClient.GetSlot(ctx, "confirmed") - }, 5, time.Second*2) - if err != nil { - return fmt.Errorf("failed to get slot: %w", err) - } - - var fromSlot uint64 - minimumSlot := uint64(0) - if latestSlot > MAX_SLOT_GAP { - minimumSlot = latestSlot - MAX_SLOT_GAP - } - if lastIndexedSlot > minimumSlot { - fromSlot = lastIndexedSlot - } else { - if lastIndexedSlot == 0 { - fromSlot = latestSlot - 100 // start 100 slots back to be safe - s.logger.Warn("no last indexed slot found, starting from most recent slot (less 100 for safety) and skipping backfill", zap.Uint64("fromSlot", fromSlot)) - } else { - fromSlot = minimumSlot - s.logger.Warn("last indexed slot is too old, starting from minimum slot and backfilling", zap.Uint64("fromSlot", fromSlot), zap.Uint64("toSlot", lastIndexedSlot)) - go func(fromSlot, toSlot uint64) { - err := s.Backfill(ctx, fromSlot, toSlot) - if err != nil { - s.logger.Error("failed to backfill", zap.Uint64("fromSlot", fromSlot), zap.Uint64("toSlot", toSlot), zap.Error(err)) - } - }(lastIndexedSlot, fromSlot) - } - } - - s.checkpointId, err = insertCheckpointStart(ctx, s.pool, fromSlot, subscription) - if err != nil { - return fmt.Errorf("failed to start checkpoint: %w", err) - } - - subscription.FromSlot = &fromSlot - - subCtx, cancel := context.WithCancel(ctx) - defer cancel() - - if err := s.grpcClient.Subscribe(subCtx, subscription, onMessage, s.onError); err != nil { - return fmt.Errorf("failed to subscribe to gRPC server: %w", err) - } - - s.logger.Info("Solana indexer subscribed and listening...", zap.Uint64("fromSlot", fromSlot)) - - for { - notif, err := rawConn.WaitForNotification(ctx) - if err != nil { - return fmt.Errorf("failed to wait for notification: %w", err) - } - - if notif == nil { - s.logger.Warn("received nil notification, continuing to wait for artist_coins changes") - continue - } - if strings.HasPrefix(notif.Channel, "artist_coins_changed") { - var notifData artistCoinsChangedNotification - err := json.Unmarshal([]byte(notif.Payload), ¬ifData) - if err != nil { - s.logger.Error("failed to unmarshal artist_coins changed notification", zap.Error(err)) - continue - } - if notifData.Operation != "INSERT" && notifData.Operation != "DELETE" { - // ignore updates - only care if mints are added or removed - continue - } - s.logger.Info("artist_coins changed, re-starting subscription", - zap.String("oldMint", notifData.OldMint), - zap.String("newMint", notifData.NewMint), - zap.String("operation", notifData.Operation)) - cancel() - s.grpcClient.Close() - <-subCtx.Done() - break - } - } - } -} - -func buildSubscriptionRequest(mintAddresses []string, dbcPoolConfigs []string) (*pb.SubscribeRequest, error) { - commitment := pb.CommitmentLevel_CONFIRMED - subscription := &pb.SubscribeRequest{ - Commitment: &commitment, - } - - // Listen for slots for making checkpoints - subscription.Slots = make(map[string]*pb.SubscribeRequestFilterSlots) - subscription.Slots["checkpoints"] = &pb.SubscribeRequestFilterSlots{} - - // Listen to all the token accounts for the mints we care about - subscription.Accounts = make(map[string]*pb.SubscribeRequestFilterAccounts) - for _, mint := range mintAddresses { - accountFilter := pb.SubscribeRequestFilterAccounts{ - Owner: []string{solana.TokenProgramID.String()}, - Filters: []*pb.SubscribeRequestFilterAccountsFilter{ - { - Filter: &pb.SubscribeRequestFilterAccountsFilter_TokenAccountState{ - TokenAccountState: true, - }, - }, - { - Filter: &pb.SubscribeRequestFilterAccountsFilter_Memcmp{ - Memcmp: &pb.SubscribeRequestFilterAccountsFilterMemcmp{ - Offset: 0, - Data: &pb.SubscribeRequestFilterAccountsFilterMemcmp_Base58{ - Base58: mint, - }, - }, - }, - }, - }, - } - subscription.Accounts[mint] = &accountFilter - } - - for _, config := range dbcPoolConfigs { - dbcFilter := pb.SubscribeRequestFilterAccounts{ - Owner: []string{meteora_dbc.ProgramID.String()}, - Filters: []*pb.SubscribeRequestFilterAccountsFilter{ - { - Filter: &pb.SubscribeRequestFilterAccountsFilter_Memcmp{ - Memcmp: &pb.SubscribeRequestFilterAccountsFilterMemcmp{ - // Config is at byte offset 72 in the account data - // see Pool struct in meteora_dbc/state.go - Offset: 72, - Data: &pb.SubscribeRequestFilterAccountsFilterMemcmp_Base58{ - Base58: config, - }, - }, - }, - }, - }, - } - subscription.Accounts[config] = &dbcFilter - } - - // Listen to all the Audius programs for transactions (currently redundant) - // programs := []string{ - // claimable_tokens.ProgramID.String(), - // reward_manager.ProgramID.String(), - // payment_router.ProgramID.String(), - // } - // vote := false - // failed := false - // subscription.Transactions = make(map[string]*pb.SubscribeRequestFilterTransactions) - // transactionFilter := pb.SubscribeRequestFilterTransactions{ - // Vote: &vote, - // Failed: &failed, - // AccountInclude: programs, - // } - // subscription.Transactions["audiusPrograms"] = &transactionFilter - - return subscription, nil -} - -// Handles a message from the gRPC subscription. -func (s *SolanaIndexer) handleMessage(ctx context.Context, msg *pb.SubscribeUpdate) { - logger := s.logger.With(zap.String("indexerSource", "grpc")) - - if slotUpdate := msg.GetSlot(); slotUpdate != nil && slotUpdate.Slot > 0 { - // only update every 10 slots to reduce db load and write latency - if slotUpdate.Slot%10 == 0 { - err := updateCheckpoint(ctx, s.pool, s.checkpointId, slotUpdate.Slot) - if err != nil { - logger.Error("failed to update slot checkpoint", zap.Error(err)) - } - } - } - - accUpdate := msg.GetAccount() - if accUpdate != nil { - for _, filterName := range msg.Filters { - for _, config := range s.config.SolanaConfig.DbcPoolConfigs { - if filterName == config.String() { - account := solana.PublicKeyFromBytes([]byte(accUpdate.Account.Pubkey)) - logger.Info("Updating DBC pool", zap.String("pool", account.String()), zap.String("config", config.String())) - var pool meteora_dbc.Pool - err := bin.NewBinDecoder(accUpdate.Account.Data).Decode(&pool) - - dbcClient := meteora_dbc.NewClient(s.rpcClient, logger) - poolConfig, err := dbcClient.GetPoolConfig(ctx, pool.Config) - if err != nil || poolConfig == nil { - logger.Error("failed to get DBC pool config", zap.String("pool", account.String()), zap.String("config", config.String()), zap.Error(err)) - continue - } - jobs.NewCoinDBCJob(s.config, s.pool).UpsertPool(ctx, account, pool, *poolConfig) - - if err != nil { - logger.Error("failed to update DBC pool", zap.String("pool", account.String()), zap.Error(err)) - } - } - } - } - txSig := solana.SignatureFromBytes(accUpdate.Account.TxnSignature) - err := s.processor.ProcessSignature(ctx, accUpdate.Slot, txSig, logger) - if err != nil { - logger.Error("failed to process signature", zap.Error(err)) - if insertErr := insertUnprocessedTransaction(ctx, s.pool, txSig.String(), accUpdate.Slot, err.Error()); insertErr != nil { - logger.Error("failed to insert unprocessed transaction", zap.Error(insertErr)) - } - } - } -} - -func (s *SolanaIndexer) onError(err error) { - s.logger.Error("error in solana indexer", zap.Error(err)) -} +// import ( +// "context" +// "encoding/json" +// "fmt" +// "strings" +// "time" + +// "api.audius.co/jobs" +// "api.audius.co/logging" +// "api.audius.co/solana/spl/programs/meteora_dbc" +// bin "github.com/gagliardetto/binary" +// "github.com/gagliardetto/solana-go" +// pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" +// "go.uber.org/zap" +// ) + +// // LaserStream from Helius only keeps the last 3000 slots. +// // Subtract 10 slots to be sure that the subscription doesn't fail. +// var MAX_SLOT_GAP = uint64(2990) + +// // Used to find graduation progress of DBC pools +// const AUDIO_DECIMALS = 8 + +// type artistCoinsChangedNotification struct { +// Operation string `json:"operation"` +// NewMint string `json:"new_mint"` +// OldMint string `json:"old_mint"` +// } + +// func (s *SolanaIndexer) Subscribe(ctx context.Context) error { +// // Set up workers to process updates concurrently +// msgChan := make(chan *pb.SubscribeUpdate, 3000) +// for i := range s.workerCount { +// go func(workerId int32) { +// for msg := range msgChan { +// s.handleMessage(ctx, msg) +// } +// }(i) +// } +// defer close(msgChan) + +// // On a new message, queue the message to the worker pool +// onMessage := func(ctx context.Context, msg *pb.SubscribeUpdate) { +// select { +// case <-ctx.Done(): +// s.logger.Warn("subscription context cancelled, stopping message processing") +// return +// case msgChan <- msg: +// } +// } + +// // Flush the logger every 15 seconds to ensure logs are written out +// go logging.SyncOnTicks(ctx, s.logger, time.Second*15) + +// // Acquire a connection to the database and listen for artist coins changes +// conn, err := s.pool.Acquire(ctx) +// if err != nil { +// return fmt.Errorf("failed to acquire database connection: %w", err) +// } +// defer conn.Release() + +// rawConn := conn.Conn() +// _, err = rawConn.Exec(ctx, `LISTEN artist_coins_changed`) +// if err != nil { +// return fmt.Errorf("failed to listen for artist coins changes: %w", err) +// } + +// // Log when we receive a shutdown signal +// defer func() { +// s.logger.Info("received shutdown signal, stopping subscription") +// }() + +// // Loop to reset subscription when the artist coins notification is received +// for { +// select { +// case <-ctx.Done(): +// return ctx.Err() +// default: +// } + +// coins, err := getArtistCoins(ctx, s.pool, 0, 100) +// if err != nil { +// return fmt.Errorf("failed to get artist coins: %w", err) +// } + +// var dbcPoolConfigs []string +// for _, config := range s.config.SolanaConfig.DbcPoolConfigs { +// dbcPoolConfigs = append(dbcPoolConfigs, config.String()) +// } + +// subscription, err := buildSubscriptionRequest(coins, dbcPoolConfigs) +// if err != nil { +// return fmt.Errorf("failed to create subscription: %w", err) +// } + +// // Check if a backfill is needed with the new subscription +// // and find the slot to continue from. + +// lastIndexedSlot, err := getCheckpointSlot(ctx, s.pool, subscription) +// if err != nil { +// return fmt.Errorf("failed to get last indexed slot: %w", err) +// } + +// latestSlot, err := withRetriesResult(func() (uint64, error) { +// return s.rpcClient.GetSlot(ctx, "confirmed") +// }, 5, time.Second*2) +// if err != nil { +// return fmt.Errorf("failed to get slot: %w", err) +// } + +// var fromSlot uint64 +// minimumSlot := uint64(0) +// if latestSlot > MAX_SLOT_GAP { +// minimumSlot = latestSlot - MAX_SLOT_GAP +// } +// if lastIndexedSlot > minimumSlot { +// fromSlot = lastIndexedSlot +// } else { +// if lastIndexedSlot == 0 { +// fromSlot = latestSlot - 100 // start 100 slots back to be safe +// s.logger.Warn("no last indexed slot found, starting from most recent slot (less 100 for safety) and skipping backfill", zap.Uint64("fromSlot", fromSlot)) +// } else { +// fromSlot = minimumSlot +// s.logger.Warn("last indexed slot is too old, starting from minimum slot and backfilling", zap.Uint64("fromSlot", fromSlot), zap.Uint64("toSlot", lastIndexedSlot)) +// go func(fromSlot, toSlot uint64) { +// err := s.Backfill(ctx, fromSlot, toSlot) +// if err != nil { +// s.logger.Error("failed to backfill", zap.Uint64("fromSlot", fromSlot), zap.Uint64("toSlot", toSlot), zap.Error(err)) +// } +// }(lastIndexedSlot, fromSlot) +// } +// } + +// s.checkpointId, err = insertCheckpointStart(ctx, s.pool, fromSlot, subscription) +// if err != nil { +// return fmt.Errorf("failed to start checkpoint: %w", err) +// } + +// subscription.FromSlot = &fromSlot + +// subCtx, cancel := context.WithCancel(ctx) +// defer cancel() + +// if err := s.grpcClient.Subscribe(subCtx, subscription, onMessage, s.onError); err != nil { +// return fmt.Errorf("failed to subscribe to gRPC server: %w", err) +// } + +// s.logger.Info("Solana indexer subscribed and listening...", zap.Uint64("fromSlot", fromSlot)) + +// for { +// notif, err := rawConn.WaitForNotification(ctx) +// if err != nil { +// return fmt.Errorf("failed to wait for notification: %w", err) +// } + +// if notif == nil { +// s.logger.Warn("received nil notification, continuing to wait for artist_coins changes") +// continue +// } +// if strings.HasPrefix(notif.Channel, "artist_coins_changed") { +// var notifData artistCoinsChangedNotification +// err := json.Unmarshal([]byte(notif.Payload), ¬ifData) +// if err != nil { +// s.logger.Error("failed to unmarshal artist_coins changed notification", zap.Error(err)) +// continue +// } +// if notifData.Operation != "INSERT" && notifData.Operation != "DELETE" { +// // ignore updates - only care if mints are added or removed +// continue +// } +// s.logger.Info("artist_coins changed, re-starting subscription", +// zap.String("oldMint", notifData.OldMint), +// zap.String("newMint", notifData.NewMint), +// zap.String("operation", notifData.Operation)) +// cancel() +// s.grpcClient.Close() +// <-subCtx.Done() +// break +// } +// } +// } +// } + +// func buildSubscriptionRequest(mintAddresses []string, dbcPoolConfigs []string) (*pb.SubscribeRequest, error) { +// commitment := pb.CommitmentLevel_CONFIRMED +// subscription := &pb.SubscribeRequest{ +// Commitment: &commitment, +// } + +// // Listen for slots for making checkpoints +// subscription.Slots = make(map[string]*pb.SubscribeRequestFilterSlots) +// subscription.Slots["checkpoints"] = &pb.SubscribeRequestFilterSlots{} + +// // Listen to all the token accounts for the mints we care about +// subscription.Accounts = make(map[string]*pb.SubscribeRequestFilterAccounts) +// for _, mint := range mintAddresses { +// accountFilter := pb.SubscribeRequestFilterAccounts{ +// Owner: []string{solana.TokenProgramID.String()}, +// Filters: []*pb.SubscribeRequestFilterAccountsFilter{ +// { +// Filter: &pb.SubscribeRequestFilterAccountsFilter_TokenAccountState{ +// TokenAccountState: true, +// }, +// }, +// { +// Filter: &pb.SubscribeRequestFilterAccountsFilter_Memcmp{ +// Memcmp: &pb.SubscribeRequestFilterAccountsFilterMemcmp{ +// Offset: 0, +// Data: &pb.SubscribeRequestFilterAccountsFilterMemcmp_Base58{ +// Base58: mint, +// }, +// }, +// }, +// }, +// }, +// } +// subscription.Accounts[mint] = &accountFilter +// } + +// for _, config := range dbcPoolConfigs { +// dbcFilter := pb.SubscribeRequestFilterAccounts{ +// Owner: []string{meteora_dbc.ProgramID.String()}, +// Filters: []*pb.SubscribeRequestFilterAccountsFilter{ +// { +// Filter: &pb.SubscribeRequestFilterAccountsFilter_Memcmp{ +// Memcmp: &pb.SubscribeRequestFilterAccountsFilterMemcmp{ +// // Config is at byte offset 72 in the account data +// // see Pool struct in meteora_dbc/state.go +// Offset: 72, +// Data: &pb.SubscribeRequestFilterAccountsFilterMemcmp_Base58{ +// Base58: config, +// }, +// }, +// }, +// }, +// }, +// } +// subscription.Accounts[config] = &dbcFilter +// } + +// // Listen to all the Audius programs for transactions (currently redundant) +// // programs := []string{ +// // claimable_tokens.ProgramID.String(), +// // reward_manager.ProgramID.String(), +// // payment_router.ProgramID.String(), +// // } +// // vote := false +// // failed := false +// // subscription.Transactions = make(map[string]*pb.SubscribeRequestFilterTransactions) +// // transactionFilter := pb.SubscribeRequestFilterTransactions{ +// // Vote: &vote, +// // Failed: &failed, +// // AccountInclude: programs, +// // } +// // subscription.Transactions["audiusPrograms"] = &transactionFilter + +// return subscription, nil +// } + +// // Handles a message from the gRPC subscription. +// func (s *SolanaIndexer) handleMessage(ctx context.Context, msg *pb.SubscribeUpdate) { +// logger := s.logger.With(zap.String("indexerSource", "grpc")) + +// if slotUpdate := msg.GetSlot(); slotUpdate != nil && slotUpdate.Slot > 0 { +// // only update every 10 slots to reduce db load and write latency +// if slotUpdate.Slot%10 == 0 { +// err := updateCheckpoint(ctx, s.pool, s.checkpointId, slotUpdate.Slot) +// if err != nil { +// logger.Error("failed to update slot checkpoint", zap.Error(err)) +// } +// } +// } + +// accUpdate := msg.GetAccount() +// if accUpdate != nil { +// for _, filterName := range msg.Filters { +// for _, config := range s.config.SolanaConfig.DbcPoolConfigs { +// if filterName == config.String() { +// account := solana.PublicKeyFromBytes([]byte(accUpdate.Account.Pubkey)) +// logger.Info("Updating DBC pool", zap.String("pool", account.String()), zap.String("config", config.String())) +// var pool meteora_dbc.Pool +// err := bin.NewBinDecoder(accUpdate.Account.Data).Decode(&pool) + +// dbcClient := meteora_dbc.NewClient(s.rpcClient, logger) +// poolConfig, err := dbcClient.GetPoolConfig(ctx, pool.Config) +// if err != nil || poolConfig == nil { +// logger.Error("failed to get DBC pool config", zap.String("pool", account.String()), zap.String("config", config.String()), zap.Error(err)) +// continue +// } +// jobs.NewCoinDBCJob(s.config, s.pool).UpsertPool(ctx, account, pool, *poolConfig) + +// if err != nil { +// logger.Error("failed to update DBC pool", zap.String("pool", account.String()), zap.Error(err)) +// } +// } +// } +// } +// txSig := solana.SignatureFromBytes(accUpdate.Account.TxnSignature) +// err := s.processor.ProcessSignature(ctx, accUpdate.Slot, txSig, logger) +// if err != nil { +// logger.Error("failed to process signature", zap.Error(err)) +// if insertErr := insertUnprocessedTransaction(ctx, s.pool, txSig.String(), accUpdate.Slot, err.Error()); insertErr != nil { +// logger.Error("failed to insert unprocessed transaction", zap.Error(insertErr)) +// } +// } +// } +// } + +// func (s *SolanaIndexer) onError(err error) { +// s.logger.Error("error in solana indexer", zap.Error(err)) +// } diff --git a/solana/indexer/subscription_test.go b/solana/indexer/subscription_test.go index 307e8fa0..8c94edda 100644 --- a/solana/indexer/subscription_test.go +++ b/solana/indexer/subscription_test.go @@ -1,227 +1,227 @@ package indexer -import ( - "context" - "errors" - "testing" - "time" - - "api.audius.co/database" - "github.com/gagliardetto/solana-go" - "github.com/gagliardetto/solana-go/rpc" - pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" - "github.com/stretchr/testify/mock" - "github.com/test-go/testify/assert" - "github.com/test-go/testify/require" - "go.uber.org/zap" -) - -type mockGrpcClient struct { - mock.Mock -} - -func (m *mockGrpcClient) Subscribe( - ctx context.Context, - subRequest *pb.SubscribeRequest, - dataCallback DataCallback, - errorCallback ErrorCallback, -) error { - args := m.Called(ctx, subRequest, dataCallback, errorCallback) - return args.Error(0) -} - -func (m *mockGrpcClient) Close() { - m.Called() -} - -type mockRpcClient struct { - mock.Mock -} - -func (m *mockRpcClient) GetBlockWithOpts(ctx context.Context, slot uint64, opts *rpc.GetBlockOpts) (*rpc.GetBlockResult, error) { - args := m.Called(ctx, slot, opts) - return args.Get(0).(*rpc.GetBlockResult), args.Error(1) -} - -func (m *mockRpcClient) GetSlot(ctx context.Context, commitment rpc.CommitmentType) (uint64, error) { - args := m.Called(ctx, commitment) - return args.Get(0).(uint64), args.Error(1) -} - -func (m *mockRpcClient) GetSignaturesForAddressWithOpts(ctx context.Context, address solana.PublicKey, opts *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error) { - args := m.Called(ctx, address, opts) - return args.Get(0).([]*rpc.TransactionSignature), args.Error(1) -} - -func (m *mockRpcClient) GetTransaction(ctx context.Context, signature solana.Signature, opts *rpc.GetTransactionOpts) (*rpc.GetTransactionResult, error) { - args := m.Called(ctx, signature, opts) - return args.Get(0).(*rpc.GetTransactionResult), args.Error(1) -} - -func (m *mockRpcClient) GetAccountDataBorshInto(ctx context.Context, account solana.PublicKey, out interface{}) error { - args := m.Called(ctx, account, out) - return args.Error(0) -} - -// Tests that the subscription is made for the artist coins in the database -// and is updated as new artist coins are added and removed. -func TestSubscription(t *testing.T) { - pool := database.CreateTestDatabase(t, "test_solana_indexer") - - mint1 := "4k3Dyjzvzp8eXQ2f1b6d5c7g8f9h1j2k3l4m5n6o7p8q9r0s1t2u3v4w5x6y7z8" - mint2 := "9zL1k3Dyjzvzp8eXQ2f1b6d5c7g8f9h1j2k3l4m5n6o7p8q9r0s1t2u3v4w5x6y7z8" - - database.Seed(pool, database.FixtureMap{ - "artist_coins": { - { - "user_id": 1, - "mint": mint1, - "ticker": "TEST", - "decimals": 8, - }, - }, - }) - - grpcMock := &mockGrpcClient{} - - // Initial subscription should include the artist coin in the database. - grpcMock.On("Subscribe", - mock.Anything, - mock.MatchedBy(func(req *pb.SubscribeRequest) bool { - for _, account := range req.Accounts { - for _, filter := range account.Filters { - if f, ok := filter.Filter.(*pb.SubscribeRequestFilterAccountsFilter_Memcmp); ok { - if f.Memcmp.GetBase58() == mint1 { - return true - } - } - } - } - return false - }), - mock.Anything, - mock.Anything, - ).Return(nil) - - // After inserting a new artist coin, the subscription should be updated to include it. - grpcMock.On("Subscribe", - mock.Anything, - mock.MatchedBy(func(req *pb.SubscribeRequest) bool { - foundFirst := false - foundSecond := false - for _, account := range req.Accounts { - for _, filter := range account.Filters { - if f, ok := filter.Filter.(*pb.SubscribeRequestFilterAccountsFilter_Memcmp); ok { - if f.Memcmp.GetBase58() == mint1 { - foundFirst = true - } - if f.Memcmp.GetBase58() == mint2 { - foundSecond = true - } - } - } - } - return foundFirst && foundSecond - }), - mock.Anything, - mock.Anything, - ).Return(nil) - - // After removing artist coins, the subscription should not include the removed mints - grpcMock.On("Subscribe", - mock.Anything, - mock.MatchedBy(func(req *pb.SubscribeRequest) bool { - for _, account := range req.Accounts { - for _, filter := range account.Filters { - if f, ok := filter.Filter.(*pb.SubscribeRequestFilterAccountsFilter_Memcmp); ok { - if f.Memcmp.GetBase58() == mint1 { - return false - } - if f.Memcmp.GetBase58() == mint2 { - return false - } - } - } - } - return true - }), - mock.Anything, - mock.Anything, - ).Return(nil) - - grpcMock.On("Close").Return() - - rpcMock := &mockRpcClient{} - rpcMock.On("GetSlot", mock.Anything, mock.Anything). - Return(uint64(100), nil) - - s := &SolanaIndexer{ - grpcClient: grpcMock, - rpcClient: rpcMock, - pool: pool, - logger: zap.NewNop(), - } - - ctx, cancel := context.WithCancel(context.Background()) - - done := make(chan error, 1) - go func() { - done <- s.Subscribe(ctx) - }() - - time.Sleep(200 * time.Millisecond) - - _, err := pool.Exec(ctx, ` - INSERT INTO artist_coins (user_id, mint, ticker, decimals) - VALUES ($1, $2, $3, $4) - `, 1, mint2, "TEST2", 9) - if err != nil { - t.Fatalf("failed to insert new artist coin: %v", err) - } - - time.Sleep(200 * time.Millisecond) - - _, err = pool.Exec(ctx, "DELETE FROM artist_coins") - if err != nil { - t.Fatalf("failed to delete artist coins: %v", err) - } - - time.Sleep(200 * time.Millisecond) - - cancel() - - err = <-done - assert.True(t, errors.Is(err, context.Canceled), err.Error()) - grpcMock.AssertExpectations(t) -} - -func TestSubscription_Unprocessed(t *testing.T) { - pool := database.CreateTestDatabase(t, "test_solana_indexer") - processor := &mockProcessor{} - - processor.On("ProcessSignature", mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(errors.New("test error")) - - s := &SolanaIndexer{ - processor: processor, - pool: pool, - logger: zap.NewNop(), - } - - signature := solana.MustSignatureFromBase58("58sUxCqs2sbErrZhH1A1YcFrYpK35Ph2AHpySxkCcRkeer1bJmfyCRKxQ7qeR26AA1qEnDb58KJwviDJXGqkAStQ") - - s.handleMessage(t.Context(), &pb.SubscribeUpdate{ - UpdateOneof: &pb.SubscribeUpdate_Account{ - Account: &pb.SubscribeUpdateAccount{ - Account: &pb.SubscribeUpdateAccountInfo{ - TxnSignature: signature[:], - }, - }, - }, - }) - - unprocessedTxs, err := getUnprocessedTransactions(t.Context(), pool, 100, 0) - require.NoError(t, err, "failed to get unprocessed transactions") - assert.Len(t, unprocessedTxs, 1, "expected one unprocessed transaction") - assert.Equal(t, "58sUxCqs2sbErrZhH1A1YcFrYpK35Ph2AHpySxkCcRkeer1bJmfyCRKxQ7qeR26AA1qEnDb58KJwviDJXGqkAStQ", unprocessedTxs[0].Signature, "unexpected unprocessed transaction") -} +// import ( +// "context" +// "errors" +// "testing" +// "time" + +// "api.audius.co/database" +// "github.com/gagliardetto/solana-go" +// "github.com/gagliardetto/solana-go/rpc" +// pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" +// "github.com/stretchr/testify/mock" +// "github.com/test-go/testify/assert" +// "github.com/test-go/testify/require" +// "go.uber.org/zap" +// ) + +// type mockGrpcClient struct { +// mock.Mock +// } + +// func (m *mockGrpcClient) Subscribe( +// ctx context.Context, +// subRequest *pb.SubscribeRequest, +// dataCallback DataCallback, +// errorCallback ErrorCallback, +// ) error { +// args := m.Called(ctx, subRequest, dataCallback, errorCallback) +// return args.Error(0) +// } + +// func (m *mockGrpcClient) Close() { +// m.Called() +// } + +// type mockRpcClient struct { +// mock.Mock +// } + +// func (m *mockRpcClient) GetBlockWithOpts(ctx context.Context, slot uint64, opts *rpc.GetBlockOpts) (*rpc.GetBlockResult, error) { +// args := m.Called(ctx, slot, opts) +// return args.Get(0).(*rpc.GetBlockResult), args.Error(1) +// } + +// func (m *mockRpcClient) GetSlot(ctx context.Context, commitment rpc.CommitmentType) (uint64, error) { +// args := m.Called(ctx, commitment) +// return args.Get(0).(uint64), args.Error(1) +// } + +// func (m *mockRpcClient) GetSignaturesForAddressWithOpts(ctx context.Context, address solana.PublicKey, opts *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error) { +// args := m.Called(ctx, address, opts) +// return args.Get(0).([]*rpc.TransactionSignature), args.Error(1) +// } + +// func (m *mockRpcClient) GetTransaction(ctx context.Context, signature solana.Signature, opts *rpc.GetTransactionOpts) (*rpc.GetTransactionResult, error) { +// args := m.Called(ctx, signature, opts) +// return args.Get(0).(*rpc.GetTransactionResult), args.Error(1) +// } + +// func (m *mockRpcClient) GetAccountDataBorshInto(ctx context.Context, account solana.PublicKey, out interface{}) error { +// args := m.Called(ctx, account, out) +// return args.Error(0) +// } + +// // Tests that the subscription is made for the artist coins in the database +// // and is updated as new artist coins are added and removed. +// func TestSubscription(t *testing.T) { +// pool := database.CreateTestDatabase(t, "test_solana_indexer") + +// mint1 := "4k3Dyjzvzp8eXQ2f1b6d5c7g8f9h1j2k3l4m5n6o7p8q9r0s1t2u3v4w5x6y7z8" +// mint2 := "9zL1k3Dyjzvzp8eXQ2f1b6d5c7g8f9h1j2k3l4m5n6o7p8q9r0s1t2u3v4w5x6y7z8" + +// database.Seed(pool, database.FixtureMap{ +// "artist_coins": { +// { +// "user_id": 1, +// "mint": mint1, +// "ticker": "TEST", +// "decimals": 8, +// }, +// }, +// }) + +// grpcMock := &mockGrpcClient{} + +// // Initial subscription should include the artist coin in the database. +// grpcMock.On("Subscribe", +// mock.Anything, +// mock.MatchedBy(func(req *pb.SubscribeRequest) bool { +// for _, account := range req.Accounts { +// for _, filter := range account.Filters { +// if f, ok := filter.Filter.(*pb.SubscribeRequestFilterAccountsFilter_Memcmp); ok { +// if f.Memcmp.GetBase58() == mint1 { +// return true +// } +// } +// } +// } +// return false +// }), +// mock.Anything, +// mock.Anything, +// ).Return(nil) + +// // After inserting a new artist coin, the subscription should be updated to include it. +// grpcMock.On("Subscribe", +// mock.Anything, +// mock.MatchedBy(func(req *pb.SubscribeRequest) bool { +// foundFirst := false +// foundSecond := false +// for _, account := range req.Accounts { +// for _, filter := range account.Filters { +// if f, ok := filter.Filter.(*pb.SubscribeRequestFilterAccountsFilter_Memcmp); ok { +// if f.Memcmp.GetBase58() == mint1 { +// foundFirst = true +// } +// if f.Memcmp.GetBase58() == mint2 { +// foundSecond = true +// } +// } +// } +// } +// return foundFirst && foundSecond +// }), +// mock.Anything, +// mock.Anything, +// ).Return(nil) + +// // After removing artist coins, the subscription should not include the removed mints +// grpcMock.On("Subscribe", +// mock.Anything, +// mock.MatchedBy(func(req *pb.SubscribeRequest) bool { +// for _, account := range req.Accounts { +// for _, filter := range account.Filters { +// if f, ok := filter.Filter.(*pb.SubscribeRequestFilterAccountsFilter_Memcmp); ok { +// if f.Memcmp.GetBase58() == mint1 { +// return false +// } +// if f.Memcmp.GetBase58() == mint2 { +// return false +// } +// } +// } +// } +// return true +// }), +// mock.Anything, +// mock.Anything, +// ).Return(nil) + +// grpcMock.On("Close").Return() + +// rpcMock := &mockRpcClient{} +// rpcMock.On("GetSlot", mock.Anything, mock.Anything). +// Return(uint64(100), nil) + +// s := &SolanaIndexer{ +// grpcClient: grpcMock, +// rpcClient: rpcMock, +// pool: pool, +// logger: zap.NewNop(), +// } + +// ctx, cancel := context.WithCancel(context.Background()) + +// done := make(chan error, 1) +// go func() { +// done <- s.Subscribe(ctx) +// }() + +// time.Sleep(200 * time.Millisecond) + +// _, err := pool.Exec(ctx, ` +// INSERT INTO artist_coins (user_id, mint, ticker, decimals) +// VALUES ($1, $2, $3, $4) +// `, 1, mint2, "TEST2", 9) +// if err != nil { +// t.Fatalf("failed to insert new artist coin: %v", err) +// } + +// time.Sleep(200 * time.Millisecond) + +// _, err = pool.Exec(ctx, "DELETE FROM artist_coins") +// if err != nil { +// t.Fatalf("failed to delete artist coins: %v", err) +// } + +// time.Sleep(200 * time.Millisecond) + +// cancel() + +// err = <-done +// assert.True(t, errors.Is(err, context.Canceled), err.Error()) +// grpcMock.AssertExpectations(t) +// } + +// func TestSubscription_Unprocessed(t *testing.T) { +// pool := database.CreateTestDatabase(t, "test_solana_indexer") +// processor := &mockProcessor{} + +// processor.On("ProcessSignature", mock.Anything, mock.Anything, mock.Anything, mock.Anything). +// Return(errors.New("test error")) + +// s := &SolanaIndexer{ +// processor: processor, +// pool: pool, +// logger: zap.NewNop(), +// } + +// signature := solana.MustSignatureFromBase58("58sUxCqs2sbErrZhH1A1YcFrYpK35Ph2AHpySxkCcRkeer1bJmfyCRKxQ7qeR26AA1qEnDb58KJwviDJXGqkAStQ") + +// s.handleMessage(t.Context(), &pb.SubscribeUpdate{ +// UpdateOneof: &pb.SubscribeUpdate_Account{ +// Account: &pb.SubscribeUpdateAccount{ +// Account: &pb.SubscribeUpdateAccountInfo{ +// TxnSignature: signature[:], +// }, +// }, +// }, +// }) + +// unprocessedTxs, err := getUnprocessedTransactions(t.Context(), pool, 100, 0) +// require.NoError(t, err, "failed to get unprocessed transactions") +// assert.Len(t, unprocessedTxs, 1, "expected one unprocessed transaction") +// assert.Equal(t, "58sUxCqs2sbErrZhH1A1YcFrYpK35Ph2AHpySxkCcRkeer1bJmfyCRKxQ7qeR26AA1qEnDb58KJwviDJXGqkAStQ", unprocessedTxs[0].Signature, "unexpected unprocessed transaction") +// } diff --git a/solana/indexer/token_indexer.go b/solana/indexer/token_indexer.go new file mode 100644 index 00000000..9caf34ed --- /dev/null +++ b/solana/indexer/token_indexer.go @@ -0,0 +1,281 @@ +package indexer + +import ( + "context" + "fmt" + + "api.audius.co/database" + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + "github.com/maypok86/otter" + pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" + "go.uber.org/zap" +) + +type TokenIndexer struct { + pool database.DbPool + grpcConfig GrpcConfig + rpcClient RpcClient + + logger *zap.Logger + + // Shared cache for recently fetched transactions + transactionCache *otter.Cache[solana.Signature, *rpc.GetTransactionResult] +} + +const TOKEN_INDEXER_NAME = "token" +const ARTIST_COIN_NOTIFICATION_NAME = "artist_coins_changed" +const MAX_ARTIST_COIN_MINTS_PER_SUBSCRIPTION = 10000 +const WORKER_CHANNEL_SIZE = 3000 +const WORKER_COUNT = 50 + +func (t *TokenIndexer) Start(ctx context.Context) { + // To ensure only one subscription task is running at a time, keep track of + // the last cancel function and call it on the next notification. + var lastCancel context.CancelFunc + + // Set up a worker pool for handling messages to keep up with high throughput + workerChan := make(chan *pb.SubscribeUpdate, WORKER_CHANNEL_SIZE) + for i := range WORKER_COUNT { + go func(workerID int) { + for updateMessage := range workerChan { + err := t.HandleUpdate(ctx, updateMessage) + if err != nil { + t.logger.Error("failed to handle token update", zap.Int("workerID", workerID), zap.Error(err)) + + // Add messages that failed to process to the retry queue + if err := addToRetryQueue(ctx, t.pool, TOKEN_INDEXER_NAME, updateMessage, err.Error()); err != nil { + t.logger.Error("failed to add to retry queue", zap.Error(err)) + } + } + } + }(i) + } + + // Ensure all gRPC clients are closed on shutdown and that the workers are closed + var grpcClients []GrpcClient + defer (func() { + for _, client := range grpcClients { + client.Close() + } + close(workerChan) + })() + + // Post messages to the worker pool + handleUpdate := func(ctx context.Context, message *pb.SubscribeUpdate) { + select { + case <-ctx.Done(): + t.logger.Warn("context cancelled, not handling update") + return + case workerChan <- message: + } + } + + // On notification, cancel the previous subscription task (if any) and start a new one + handleNotif := func(ctx context.Context, notification *pgconn.Notification) { + subCtx, cancel := context.WithCancel(ctx) + + // Cancel previous subscription task + if lastCancel != nil { + lastCancel() + } + + // Close previous gRPC clients + for _, client := range grpcClients { + client.Close() + } + + // Resubscribe to all artist coins + // TODO: Optimize this to only add/remove new coins instead of resubscribing to all + clients, err := t.subscribeToArtistCoins(subCtx, handleUpdate) + grpcClients = clients + if err != nil { + t.logger.Error("failed to resubscribe to artist coins", zap.Error(err)) + return + } + + lastCancel = cancel + } + + // Initial subscription to all artist coins + clients, err := t.subscribeToArtistCoins(ctx, handleUpdate) + if err != nil { + t.logger.Error("failed to subscribe to artist coins", zap.Error(err)) + return + } + grpcClients = clients + + // Watch for new coins to be added + err = watchPgNotification(ctx, t.pool, ARTIST_COIN_NOTIFICATION_NAME, handleNotif, t.logger) + if err != nil { + t.logger.Error("failed to watch for artist coin changes", zap.Error(err)) + return + } + + // Wait for shutdown + for { + select { + case <-ctx.Done(): + t.logger.Info("received shutdown signal, stopping artist coin indexer") + return + default: + } + } +} + +// Handles a single update message from the gRPC subscription +func (t *TokenIndexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) error { + // Handle slot updates + slotUpdate := msg.GetSlot() + if slotUpdate != nil { + // only update every 10 slots to reduce db load and write latency + if slotUpdate.Slot%10 == 0 { + // Use the filter as the checkpoint ID + checkpointId := msg.Filters[0] + + err := updateCheckpoint(ctx, t.pool, checkpointId, slotUpdate.Slot) + if err != nil { + t.logger.Error("failed to update slot checkpoint", zap.Error(err)) + } + } + } + + // Handle balance changes + accUpdate := msg.GetAccount() + if accUpdate != nil { + txSig := solana.SignatureFromBytes(accUpdate.Account.TxnSignature) + + // Fetch the transaction details + txRes, err := fetchTransactionWithCache(ctx, t.transactionCache, t.rpcClient, txSig) + if err != nil { + return fmt.Errorf("failed to fetch transaction: %w", err) + } + + // Decode the transaction + tx, err := txRes.Transaction.GetTransaction() + if err != nil { + return fmt.Errorf("failed to decode transaction: %w", err) + } + + // Add the lookup table accounts to the message accounts + tx = resolveLookupTables(ctx, t.rpcClient, tx, txRes.Meta) + + // Extract the mints we're tracking using the subscription's filters + trackedMints := msg.Filters + + err = processBalanceChanges(ctx, t.pool, accUpdate.Slot, txRes.Meta, tx, txRes.BlockTime.Time(), trackedMints, t.logger) + if err != nil { + return fmt.Errorf("failed to process balance changes: %w", err) + } + } + return nil +} + +func (t *TokenIndexer) subscribeToArtistCoins(ctx context.Context, handleUpdate func(ctx context.Context, message *pb.SubscribeUpdate)) ([]GrpcClient, error) { + done := false + page := 0 + pageSize := MAX_ARTIST_COIN_MINTS_PER_SUBSCRIPTION + grpcClients := make([]GrpcClient, 0) + total := 0 + for !done { + mints, err := getArtistCoins(ctx, t.pool, pageSize, page*pageSize) + if err != nil { + return nil, fmt.Errorf("failed to get artist coins: %w", err) + } + if len(mints) == 0 { + t.logger.Info("no more artist coins to subscribe to, exiting") + return grpcClients, nil + } + total += len(mints) + t.logger.Debug("subscribing to artist coins...", zap.Int("numCoins", len(mints))) + subscription, err := t.makeMintSubscriptionRequest(ctx, mints) + if err != nil { + return nil, fmt.Errorf("failed to make mint subscription request: %w", err) + } + + grpcClient := NewGrpcClient(t.grpcConfig) + err = grpcClient.Subscribe(ctx, subscription, handleUpdate, func(err error) { + t.logger.Error("error in token subscription", zap.Error(err)) + }) + if err != nil { + return nil, fmt.Errorf("failed to subscribe to artist coins: %w", err) + } + grpcClients = append(grpcClients, grpcClient) + + if len(mints) < pageSize { + done = true + } + page++ + } + t.logger.Info("subscribed to artist coins", zap.Int("numCoins", total)) + return grpcClients, nil +} + +func (t *TokenIndexer) makeMintSubscriptionRequest(ctx context.Context, mintAddresses []string) (*pb.SubscribeRequest, error) { + commitment := pb.CommitmentLevel_CONFIRMED + subscription := &pb.SubscribeRequest{ + Commitment: &commitment, + } + + // Listen to all the token accounts for the mints we care about + subscription.Accounts = make(map[string]*pb.SubscribeRequestFilterAccounts) + for _, mint := range mintAddresses { + accountFilter := pb.SubscribeRequestFilterAccounts{ + Owner: []string{solana.TokenProgramID.String()}, + Filters: []*pb.SubscribeRequestFilterAccountsFilter{ + { + Filter: &pb.SubscribeRequestFilterAccountsFilter_TokenAccountState{ + TokenAccountState: true, + }, + }, + { + Filter: &pb.SubscribeRequestFilterAccountsFilter_Memcmp{ + Memcmp: &pb.SubscribeRequestFilterAccountsFilterMemcmp{ + Offset: 0, // Mint is at offset 0 + Data: &pb.SubscribeRequestFilterAccountsFilterMemcmp_Base58{ + Base58: mint, + }, + }, + }, + }, + }, + } + subscription.Accounts[mint] = &accountFilter + } + + // Ensure this subscription has a checkpoint + checkpointId, fromSlot, err := ensureCheckpoint(ctx, TOKEN_INDEXER_NAME, t.pool, t.rpcClient, subscription, t.logger) + if err != nil { + return nil, fmt.Errorf("failed to set from slot: %w", err) + } + + // Set the from slot for the subscription + subscription.FromSlot = &fromSlot + + // Listen for slots for making checkpoints + subscription.Slots = make(map[string]*pb.SubscribeRequestFilterSlots) + subscription.Slots[checkpointId] = &pb.SubscribeRequestFilterSlots{} + + return subscription, nil +} + +func getArtistCoins(ctx context.Context, db database.DBTX, limit int, offset int) ([]string, error) { + sqlMints := `SELECT mint FROM artist_coins LIMIT @limit OFFSET @offset` + rows, err := db.Query(ctx, sqlMints, pgx.NamedArgs{ + "limit": limit, + "offset": offset, + }) + if err != nil { + if err == pgx.ErrNoRows { + return nil, nil // No mints found, return empty slice + } + return nil, fmt.Errorf("failed to query mints: %w", err) + } + mintAddresses, err := pgx.CollectRows(rows, pgx.RowTo[string]) + if err != nil { + return nil, fmt.Errorf("failed to collect mints: %w", err) + } + return mintAddresses, nil +} diff --git a/solana/indexer/utils.go b/solana/indexer/utils.go index 625536f3..7944b569 100644 --- a/solana/indexer/utils.go +++ b/solana/indexer/utils.go @@ -6,8 +6,10 @@ import ( "time" "api.audius.co/database" - "github.com/jackc/pgx/v5" + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" "github.com/jackc/pgx/v5/pgconn" + "github.com/maypok86/otter" "go.uber.org/zap" ) @@ -40,28 +42,6 @@ func withRetriesResult[T any](f func() (T, error), maxRetries int, interval time return result, nil } -var mintsCache []string - -func getArtistCoins(ctx context.Context, db database.DBTX, forceRefresh bool) ([]string, error) { - if !forceRefresh && mintsCache != nil { - return mintsCache, nil - } - sqlMints := `SELECT mint FROM artist_coins` - rows, err := db.Query(ctx, sqlMints) - if err != nil { - if err == pgx.ErrNoRows { - return nil, nil // No mints found, return empty slice - } - return nil, fmt.Errorf("failed to query mints: %w", err) - } - mintAddresses, err := pgx.CollectRows(rows, pgx.RowTo[string]) - if err != nil { - return nil, fmt.Errorf("failed to collect mints: %w", err) - } - mintsCache = mintAddresses - return mintAddresses, nil -} - type notificationCallback func(ctx context.Context, notification *pgconn.Notification) func watchPgNotification(ctx context.Context, pool database.DbPool, notification string, callback notificationCallback, logger *zap.Logger) error { @@ -110,3 +90,67 @@ func watchPgNotification(ctx context.Context, pool database.DbPool, notification }() return nil } + +// Gets a transaction from a cache or fetches it from the RPC. Handles retries. +func fetchTransactionWithCache( + ctx context.Context, + transactionCache *otter.Cache[solana.Signature, + *rpc.GetTransactionResult], + rpcClient RpcClient, + signature solana.Signature, +) (*rpc.GetTransactionResult, error) { + // Check if the transaction is in the cache + if transactionCache != nil { + if res, ok := transactionCache.Get(signature); ok { + return res, nil + } + } + + // If the transaction is not in the cache, fetch it from the RPC + res, err := withRetriesResult(func() (*rpc.GetTransactionResult, error) { + return rpcClient.GetTransaction( + ctx, + signature, + &rpc.GetTransactionOpts{ + Commitment: rpc.CommitmentConfirmed, + MaxSupportedTransactionVersion: &rpc.MaxSupportedTransactionVersion0, + }, + ) + }, 5, 1*time.Second) + if err != nil { + return nil, fmt.Errorf("failed to get transaction: %w", err) + } + + // Store the fetched transaction in the cache + if transactionCache != nil { + transactionCache.Set(signature, res) + } + + return res, nil +} + +// Resolves address lookup tables in the given transaction using the provided metadata. +func resolveLookupTables( + ctx context.Context, + rpcClient RpcClient, + tx *solana.Transaction, + meta *rpc.TransactionMeta, +) *solana.Transaction { + addressTables := make(map[solana.PublicKey]solana.PublicKeySlice) + writablePos := 0 + readonlyPos := 0 + for _, lu := range tx.Message.AddressTableLookups { + addresses := make(solana.PublicKeySlice, 256) + for _, idx := range lu.WritableIndexes { + addresses[idx] = meta.LoadedAddresses.Writable[writablePos] + writablePos += 1 + } + for _, idx := range lu.ReadonlyIndexes { + addresses[idx] = meta.LoadedAddresses.ReadOnly[readonlyPos] + readonlyPos += 1 + } + addressTables[lu.AccountKey] = addresses + } + tx.Message.SetAddressTables(addressTables) + return tx +} From f8bf089d40435baac0d5c9346ffdb28913c86a0b Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Tue, 14 Oct 2025 08:53:33 -0700 Subject: [PATCH 06/56] removals --- ddl/migrations/0170_sol_retry_queue.sql | 4 +- solana/indexer/db_insert_test.go | 4 +- solana/indexer/processor.go | 199 ------------- solana/indexer/subscription.go | 313 --------------------- solana/indexer/unprocessed_transactions.go | 121 -------- 5 files changed, 5 insertions(+), 636 deletions(-) delete mode 100644 solana/indexer/processor.go delete mode 100644 solana/indexer/subscription.go delete mode 100644 solana/indexer/unprocessed_transactions.go diff --git a/ddl/migrations/0170_sol_retry_queue.sql b/ddl/migrations/0170_sol_retry_queue.sql index e0372c21..2184e0ac 100644 --- a/ddl/migrations/0170_sol_retry_queue.sql +++ b/ddl/migrations/0170_sol_retry_queue.sql @@ -14,4 +14,6 @@ COMMENT ON COLUMN sol_retry_queue.created_at IS 'The timestamp when the retry en COMMENT ON COLUMN sol_retry_queue.updated_at IS 'The timestamp when the retry entry was last updated.'; ALTER TABLE sol_slot_checkpoints ADD COLUMN IF NOT EXISTS name TEXT; -COMMENT ON COLUMN sol_slot_checkpoints.name IS 'The name of the indexer this checkpoint is for (e.g., token_indexer, damm_v2_indexer).'; \ No newline at end of file +COMMENT ON COLUMN sol_slot_checkpoints.name IS 'The name of the indexer this checkpoint is for (e.g., token_indexer, damm_v2_indexer).'; + +DROP TABLE IF EXISTS sol_unprocessed_txs; \ No newline at end of file diff --git a/solana/indexer/db_insert_test.go b/solana/indexer/db_insert_test.go index 35b87131..f381c366 100644 --- a/solana/indexer/db_insert_test.go +++ b/solana/indexer/db_insert_test.go @@ -94,14 +94,14 @@ func TestInserts(t *testing.T) { assert.NoError(t, err, "failed to insert reward disbursement") req := proto.SubscribeRequest{} - id, err := insertCheckpointStart(t.Context(), pool, 100, &req) + id, err := insertCheckpointStart(t.Context(), pool, "backfill", 100, &req) assert.NoError(t, err, "failed to insert checkpoint start") assert.NotEmpty(t, id, "checkpoint ID should not be empty") err = updateCheckpoint(t.Context(), pool, id, 201) assert.NoError(t, err, "failed to update checkpoint") - slot, err := getCheckpointSlot(t.Context(), pool, &req) + slot, err := getCheckpointSlot(t.Context(), pool, "backfill", &req) assert.NoError(t, err, "failed to get checkpoint slot") assert.Equal(t, uint64(201), slot, "checkpoint slot should match updated value") diff --git a/solana/indexer/processor.go b/solana/indexer/processor.go deleted file mode 100644 index c8b50e16..00000000 --- a/solana/indexer/processor.go +++ /dev/null @@ -1,199 +0,0 @@ -package indexer - -import ( - "context" - "fmt" - "time" - - "api.audius.co/config" - "api.audius.co/database" - "github.com/gagliardetto/solana-go" - "github.com/gagliardetto/solana-go/rpc" - "github.com/maypok86/otter" - "go.uber.org/zap" -) - -type Processor interface { - ProcessSignature(ctx context.Context, slot uint64, txSig solana.Signature, logger *zap.Logger) error - ProcessTransaction( - ctx context.Context, - slot uint64, - meta *rpc.TransactionMeta, - tx *solana.Transaction, - blockTime time.Time, - logger *zap.Logger, - ) error -} - -type DefaultProcessor struct { - rpcClient RpcClient - pool database.DbPool - config config.Config - transactionCache *otter.Cache[solana.Signature, *rpc.GetTransactionResult] -} - -func NewDefaultProcessor( - rpcClient RpcClient, - pool database.DbPool, - config config.Config, -) *DefaultProcessor { - cache, err := otter.MustBuilder[solana.Signature, *rpc.GetTransactionResult](50). - WithTTL(30 * time.Second). - CollectStats(). - Build() - - if err != nil { - panic(fmt.Errorf("failed to create transaction cache: %w", err)) - } - return &DefaultProcessor{ - rpcClient: rpcClient, - pool: pool, - config: config, - transactionCache: &cache, - } -} - -func (p *DefaultProcessor) ProcessSignature(ctx context.Context, slot uint64, txSig solana.Signature, logger *zap.Logger) error { - var txRes *rpc.GetTransactionResult - - // Check if the transaction is in the cache - if p.transactionCache != nil { - if res, ok := p.transactionCache.Get(txSig); ok { - logger.Debug("cache hit") - txRes = res - } else { - logger.Debug("cache miss") - } - } - - if txRes == nil { - // If the transaction is not in the cache, fetch it from the RPC - res, err := withRetriesResult(func() (*rpc.GetTransactionResult, error) { - return p.rpcClient.GetTransaction( - ctx, - txSig, - &rpc.GetTransactionOpts{ - Commitment: rpc.CommitmentConfirmed, - MaxSupportedTransactionVersion: &rpc.MaxSupportedTransactionVersion0, - }, - ) - }, 5, 1*time.Second) - if err != nil { - return fmt.Errorf("failed to get transaction: %w", err) - } - if p.transactionCache != nil { - p.transactionCache.Set(txSig, res) - txRes = res - } - } - - tx, err := txRes.Transaction.GetTransaction() - if err != nil { - return fmt.Errorf("failed to decode transaction: %w", err) - } - - err = p.ProcessTransaction(ctx, slot, txRes.Meta, tx, txRes.BlockTime.Time(), logger) - if err != nil { - return fmt.Errorf("failed to process transaction: %w", err) - } - return nil -} - -func (p *DefaultProcessor) ProcessTransaction( - ctx context.Context, - slot uint64, - meta *rpc.TransactionMeta, - tx *solana.Transaction, - blockTime time.Time, - logger *zap.Logger, -) error { - if tx == nil { - return fmt.Errorf("no transaction to process") - } - if meta == nil { - return fmt.Errorf("missing tx meta") - } - // if logger == nil { - // logger = zap.NewNop() - // } - // txLogger := logger.With( - // zap.String("signature", tx.Signatures[0].String()), - // ) - - // Resolve address lookup tables - addressTables := make(map[solana.PublicKey]solana.PublicKeySlice) - writablePos := 0 - readonlyPos := 0 - for _, lu := range tx.Message.AddressTableLookups { - addresses := make(solana.PublicKeySlice, 256) - for _, idx := range lu.WritableIndexes { - addresses[idx] = meta.LoadedAddresses.Writable[writablePos] - writablePos += 1 - } - for _, idx := range lu.ReadonlyIndexes { - addresses[idx] = meta.LoadedAddresses.ReadOnly[readonlyPos] - readonlyPos += 1 - } - addressTables[lu.AccountKey] = addresses - } - tx.Message.SetAddressTables(addressTables) - - // signature := tx.Signatures[0].String() - - // err := processBalanceChanges(ctx, p.pool, slot, meta, tx, blockTime, txLogger) - // if err != nil { - // return fmt.Errorf("failed to process balance changes: %w", err) - // } - - // for instructionIndex, instruction := range tx.Message.Instructions { - // programId := tx.Message.AccountKeys[instruction.ProgramIDIndex] - // instLogger := txLogger.With( - // zap.String("programId", programId.String()), - // zap.Int("instructionIndex", instructionIndex), - // ) - // switch programId { - // case claimable_tokens.ProgramID: - // { - // err := processClaimableTokensInstruction(ctx, p.pool, slot, tx, instructionIndex, instruction, signature, instLogger) - // if err != nil { - // return fmt.Errorf("error processing claimable_tokens instruction %d: %w", instructionIndex, err) - // } - // } - // case reward_manager.ProgramID: - // { - // err := processRewardManagerInstruction(ctx, p.pool, slot, tx, instructionIndex, instruction, signature, instLogger) - // if err != nil { - // return fmt.Errorf("error processing reward_manager instruction %d: %w", instructionIndex, err) - // } - // } - // case payment_router.ProgramID: - // { - // err := processPaymentRouterInstruction(ctx, p.pool, slot, tx, instructionIndex, instruction, signature, blockTime, p.config, instLogger) - // if err != nil { - // return fmt.Errorf("error processing payment_router instruction %d: %w", instructionIndex, err) - // } - // } - // case meteora_dbc.ProgramID: - // { - // err := processDbcInstruction(ctx, p.pool, p.rpcClient, slot, tx, instructionIndex, instruction, signature, instLogger) - // if err != nil { - // return fmt.Errorf("error processing meteora_dbc instruction %d: %w", instructionIndex, err) - // } - // } - // } - // } - - return nil -} - -func (p *DefaultProcessor) ReportCacheStats(logger *zap.Logger) { - stats := p.transactionCache.Stats() - logger.Info("transaction cache stats", - zap.Int64("hits", stats.Hits()), - zap.Int64("misses", stats.Misses()), - zap.Int64("evictions", stats.EvictedCount()), - zap.Int64("evictionCost", stats.EvictedCost()), - zap.Int64("rejectedSets", stats.RejectedSets()), - zap.Float64("ratio", stats.Ratio()), - ) -} diff --git a/solana/indexer/subscription.go b/solana/indexer/subscription.go deleted file mode 100644 index 94bc9387..00000000 --- a/solana/indexer/subscription.go +++ /dev/null @@ -1,313 +0,0 @@ -package indexer - -// import ( -// "context" -// "encoding/json" -// "fmt" -// "strings" -// "time" - -// "api.audius.co/jobs" -// "api.audius.co/logging" -// "api.audius.co/solana/spl/programs/meteora_dbc" -// bin "github.com/gagliardetto/binary" -// "github.com/gagliardetto/solana-go" -// pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" -// "go.uber.org/zap" -// ) - -// // LaserStream from Helius only keeps the last 3000 slots. -// // Subtract 10 slots to be sure that the subscription doesn't fail. -// var MAX_SLOT_GAP = uint64(2990) - -// // Used to find graduation progress of DBC pools -// const AUDIO_DECIMALS = 8 - -// type artistCoinsChangedNotification struct { -// Operation string `json:"operation"` -// NewMint string `json:"new_mint"` -// OldMint string `json:"old_mint"` -// } - -// func (s *SolanaIndexer) Subscribe(ctx context.Context) error { -// // Set up workers to process updates concurrently -// msgChan := make(chan *pb.SubscribeUpdate, 3000) -// for i := range s.workerCount { -// go func(workerId int32) { -// for msg := range msgChan { -// s.handleMessage(ctx, msg) -// } -// }(i) -// } -// defer close(msgChan) - -// // On a new message, queue the message to the worker pool -// onMessage := func(ctx context.Context, msg *pb.SubscribeUpdate) { -// select { -// case <-ctx.Done(): -// s.logger.Warn("subscription context cancelled, stopping message processing") -// return -// case msgChan <- msg: -// } -// } - -// // Flush the logger every 15 seconds to ensure logs are written out -// go logging.SyncOnTicks(ctx, s.logger, time.Second*15) - -// // Acquire a connection to the database and listen for artist coins changes -// conn, err := s.pool.Acquire(ctx) -// if err != nil { -// return fmt.Errorf("failed to acquire database connection: %w", err) -// } -// defer conn.Release() - -// rawConn := conn.Conn() -// _, err = rawConn.Exec(ctx, `LISTEN artist_coins_changed`) -// if err != nil { -// return fmt.Errorf("failed to listen for artist coins changes: %w", err) -// } - -// // Log when we receive a shutdown signal -// defer func() { -// s.logger.Info("received shutdown signal, stopping subscription") -// }() - -// // Loop to reset subscription when the artist coins notification is received -// for { -// select { -// case <-ctx.Done(): -// return ctx.Err() -// default: -// } - -// coins, err := getArtistCoins(ctx, s.pool, 0, 100) -// if err != nil { -// return fmt.Errorf("failed to get artist coins: %w", err) -// } - -// var dbcPoolConfigs []string -// for _, config := range s.config.SolanaConfig.DbcPoolConfigs { -// dbcPoolConfigs = append(dbcPoolConfigs, config.String()) -// } - -// subscription, err := buildSubscriptionRequest(coins, dbcPoolConfigs) -// if err != nil { -// return fmt.Errorf("failed to create subscription: %w", err) -// } - -// // Check if a backfill is needed with the new subscription -// // and find the slot to continue from. - -// lastIndexedSlot, err := getCheckpointSlot(ctx, s.pool, subscription) -// if err != nil { -// return fmt.Errorf("failed to get last indexed slot: %w", err) -// } - -// latestSlot, err := withRetriesResult(func() (uint64, error) { -// return s.rpcClient.GetSlot(ctx, "confirmed") -// }, 5, time.Second*2) -// if err != nil { -// return fmt.Errorf("failed to get slot: %w", err) -// } - -// var fromSlot uint64 -// minimumSlot := uint64(0) -// if latestSlot > MAX_SLOT_GAP { -// minimumSlot = latestSlot - MAX_SLOT_GAP -// } -// if lastIndexedSlot > minimumSlot { -// fromSlot = lastIndexedSlot -// } else { -// if lastIndexedSlot == 0 { -// fromSlot = latestSlot - 100 // start 100 slots back to be safe -// s.logger.Warn("no last indexed slot found, starting from most recent slot (less 100 for safety) and skipping backfill", zap.Uint64("fromSlot", fromSlot)) -// } else { -// fromSlot = minimumSlot -// s.logger.Warn("last indexed slot is too old, starting from minimum slot and backfilling", zap.Uint64("fromSlot", fromSlot), zap.Uint64("toSlot", lastIndexedSlot)) -// go func(fromSlot, toSlot uint64) { -// err := s.Backfill(ctx, fromSlot, toSlot) -// if err != nil { -// s.logger.Error("failed to backfill", zap.Uint64("fromSlot", fromSlot), zap.Uint64("toSlot", toSlot), zap.Error(err)) -// } -// }(lastIndexedSlot, fromSlot) -// } -// } - -// s.checkpointId, err = insertCheckpointStart(ctx, s.pool, fromSlot, subscription) -// if err != nil { -// return fmt.Errorf("failed to start checkpoint: %w", err) -// } - -// subscription.FromSlot = &fromSlot - -// subCtx, cancel := context.WithCancel(ctx) -// defer cancel() - -// if err := s.grpcClient.Subscribe(subCtx, subscription, onMessage, s.onError); err != nil { -// return fmt.Errorf("failed to subscribe to gRPC server: %w", err) -// } - -// s.logger.Info("Solana indexer subscribed and listening...", zap.Uint64("fromSlot", fromSlot)) - -// for { -// notif, err := rawConn.WaitForNotification(ctx) -// if err != nil { -// return fmt.Errorf("failed to wait for notification: %w", err) -// } - -// if notif == nil { -// s.logger.Warn("received nil notification, continuing to wait for artist_coins changes") -// continue -// } -// if strings.HasPrefix(notif.Channel, "artist_coins_changed") { -// var notifData artistCoinsChangedNotification -// err := json.Unmarshal([]byte(notif.Payload), ¬ifData) -// if err != nil { -// s.logger.Error("failed to unmarshal artist_coins changed notification", zap.Error(err)) -// continue -// } -// if notifData.Operation != "INSERT" && notifData.Operation != "DELETE" { -// // ignore updates - only care if mints are added or removed -// continue -// } -// s.logger.Info("artist_coins changed, re-starting subscription", -// zap.String("oldMint", notifData.OldMint), -// zap.String("newMint", notifData.NewMint), -// zap.String("operation", notifData.Operation)) -// cancel() -// s.grpcClient.Close() -// <-subCtx.Done() -// break -// } -// } -// } -// } - -// func buildSubscriptionRequest(mintAddresses []string, dbcPoolConfigs []string) (*pb.SubscribeRequest, error) { -// commitment := pb.CommitmentLevel_CONFIRMED -// subscription := &pb.SubscribeRequest{ -// Commitment: &commitment, -// } - -// // Listen for slots for making checkpoints -// subscription.Slots = make(map[string]*pb.SubscribeRequestFilterSlots) -// subscription.Slots["checkpoints"] = &pb.SubscribeRequestFilterSlots{} - -// // Listen to all the token accounts for the mints we care about -// subscription.Accounts = make(map[string]*pb.SubscribeRequestFilterAccounts) -// for _, mint := range mintAddresses { -// accountFilter := pb.SubscribeRequestFilterAccounts{ -// Owner: []string{solana.TokenProgramID.String()}, -// Filters: []*pb.SubscribeRequestFilterAccountsFilter{ -// { -// Filter: &pb.SubscribeRequestFilterAccountsFilter_TokenAccountState{ -// TokenAccountState: true, -// }, -// }, -// { -// Filter: &pb.SubscribeRequestFilterAccountsFilter_Memcmp{ -// Memcmp: &pb.SubscribeRequestFilterAccountsFilterMemcmp{ -// Offset: 0, -// Data: &pb.SubscribeRequestFilterAccountsFilterMemcmp_Base58{ -// Base58: mint, -// }, -// }, -// }, -// }, -// }, -// } -// subscription.Accounts[mint] = &accountFilter -// } - -// for _, config := range dbcPoolConfigs { -// dbcFilter := pb.SubscribeRequestFilterAccounts{ -// Owner: []string{meteora_dbc.ProgramID.String()}, -// Filters: []*pb.SubscribeRequestFilterAccountsFilter{ -// { -// Filter: &pb.SubscribeRequestFilterAccountsFilter_Memcmp{ -// Memcmp: &pb.SubscribeRequestFilterAccountsFilterMemcmp{ -// // Config is at byte offset 72 in the account data -// // see Pool struct in meteora_dbc/state.go -// Offset: 72, -// Data: &pb.SubscribeRequestFilterAccountsFilterMemcmp_Base58{ -// Base58: config, -// }, -// }, -// }, -// }, -// }, -// } -// subscription.Accounts[config] = &dbcFilter -// } - -// // Listen to all the Audius programs for transactions (currently redundant) -// // programs := []string{ -// // claimable_tokens.ProgramID.String(), -// // reward_manager.ProgramID.String(), -// // payment_router.ProgramID.String(), -// // } -// // vote := false -// // failed := false -// // subscription.Transactions = make(map[string]*pb.SubscribeRequestFilterTransactions) -// // transactionFilter := pb.SubscribeRequestFilterTransactions{ -// // Vote: &vote, -// // Failed: &failed, -// // AccountInclude: programs, -// // } -// // subscription.Transactions["audiusPrograms"] = &transactionFilter - -// return subscription, nil -// } - -// // Handles a message from the gRPC subscription. -// func (s *SolanaIndexer) handleMessage(ctx context.Context, msg *pb.SubscribeUpdate) { -// logger := s.logger.With(zap.String("indexerSource", "grpc")) - -// if slotUpdate := msg.GetSlot(); slotUpdate != nil && slotUpdate.Slot > 0 { -// // only update every 10 slots to reduce db load and write latency -// if slotUpdate.Slot%10 == 0 { -// err := updateCheckpoint(ctx, s.pool, s.checkpointId, slotUpdate.Slot) -// if err != nil { -// logger.Error("failed to update slot checkpoint", zap.Error(err)) -// } -// } -// } - -// accUpdate := msg.GetAccount() -// if accUpdate != nil { -// for _, filterName := range msg.Filters { -// for _, config := range s.config.SolanaConfig.DbcPoolConfigs { -// if filterName == config.String() { -// account := solana.PublicKeyFromBytes([]byte(accUpdate.Account.Pubkey)) -// logger.Info("Updating DBC pool", zap.String("pool", account.String()), zap.String("config", config.String())) -// var pool meteora_dbc.Pool -// err := bin.NewBinDecoder(accUpdate.Account.Data).Decode(&pool) - -// dbcClient := meteora_dbc.NewClient(s.rpcClient, logger) -// poolConfig, err := dbcClient.GetPoolConfig(ctx, pool.Config) -// if err != nil || poolConfig == nil { -// logger.Error("failed to get DBC pool config", zap.String("pool", account.String()), zap.String("config", config.String()), zap.Error(err)) -// continue -// } -// jobs.NewCoinDBCJob(s.config, s.pool).UpsertPool(ctx, account, pool, *poolConfig) - -// if err != nil { -// logger.Error("failed to update DBC pool", zap.String("pool", account.String()), zap.Error(err)) -// } -// } -// } -// } -// txSig := solana.SignatureFromBytes(accUpdate.Account.TxnSignature) -// err := s.processor.ProcessSignature(ctx, accUpdate.Slot, txSig, logger) -// if err != nil { -// logger.Error("failed to process signature", zap.Error(err)) -// if insertErr := insertUnprocessedTransaction(ctx, s.pool, txSig.String(), accUpdate.Slot, err.Error()); insertErr != nil { -// logger.Error("failed to insert unprocessed transaction", zap.Error(insertErr)) -// } -// } -// } -// } - -// func (s *SolanaIndexer) onError(err error) { -// s.logger.Error("error in solana indexer", zap.Error(err)) -// } diff --git a/solana/indexer/unprocessed_transactions.go b/solana/indexer/unprocessed_transactions.go deleted file mode 100644 index d44900a7..00000000 --- a/solana/indexer/unprocessed_transactions.go +++ /dev/null @@ -1,121 +0,0 @@ -package indexer - -import ( - "context" - "fmt" - "time" - - "api.audius.co/database" - "github.com/gagliardetto/solana-go" - "github.com/jackc/pgx/v5" - "go.uber.org/zap" -) - -func (s *SolanaIndexer) ScheduleRetries(ctx context.Context, interval time.Duration) { - ticker := time.NewTicker(interval) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - s.logger.Info("context cancelled, stopping retry ticker") - return - case <-ticker.C: - err := s.RetryUnprocessedTransactions(ctx) - if err != nil { - s.logger.Error("failed to retry unprocessed transactions", zap.Error(err)) - } - } - } -} - -func (s *SolanaIndexer) RetryUnprocessedTransactions(ctx context.Context) error { - limit := 100 - offset := 0 - logger := s.logger.Named("RetryUnprocessedTransactions") - count := 0 - start := time.Now() - logger.Debug("starting retry of unprocessed transactions...") - for { - failedTxs, err := getUnprocessedTransactions(ctx, s.pool, limit, offset) - if err != nil { - return fmt.Errorf("failed to fetch unprocessed transactions: %w", err) - } - if len(failedTxs) == 0 { - break - } - - for _, tx := range failedTxs { - count++ - err = s.processor.ProcessSignature(ctx, tx.Slot, solana.MustSignatureFromBase58(tx.Signature), logger) - if err != nil { - logger.Error("failed to process transaction", zap.String("signature", tx.Signature), zap.Error(err)) - offset++ - continue - } - logger.Debug("successfully processed transaction", zap.String("signature", tx.Signature)) - deleteUnprocessedTransaction(ctx, s.pool, tx.Signature) - } - } - if count == 0 { - logger.Debug("no unprocessed transactions to retry") - return nil - } - logger.Info("finished retry of unprocessed transactions", - zap.Int("count", count), - zap.Int("failed", offset), - zap.Duration("duration", time.Since(start)), - ) - return nil -} - -type unprocessedTransaction struct { - Signature string - Slot uint64 -} - -func getUnprocessedTransactions(ctx context.Context, db database.DBTX, limit, offset int) ([]unprocessedTransaction, error) { - sql := `SELECT signature, slot FROM sol_unprocessed_txs LIMIT @limit OFFSET @offset;` - rows, err := db.Query(ctx, sql, pgx.NamedArgs{ - "limit": limit, - "offset": offset, - }) - if err != nil { - if err == pgx.ErrNoRows { - return nil, nil - } - return nil, fmt.Errorf("failed to query unprocessed transactions: %w", err) - } - signatures, err := pgx.CollectRows(rows, pgx.RowToStructByName[unprocessedTransaction]) - if err != nil { - return nil, fmt.Errorf("failed to collect unprocessed transaction signatures: %w", err) - } - return signatures, nil -} - -func insertUnprocessedTransaction(ctx context.Context, db database.DBTX, signature string, slot uint64, errorMessage string) error { - sql := ` - INSERT INTO sol_unprocessed_txs (signature, slot, error_message) VALUES (@signature, @slot, @error_message) - ON CONFLICT (signature) DO UPDATE SET error_message = @error_message, updated_at = NOW() - ;` - _, err := db.Exec(ctx, sql, pgx.NamedArgs{ - "signature": signature, - "slot": slot, - "error_message": errorMessage, - }) - if err != nil { - return fmt.Errorf("failed to insert unprocessed transaction: %w", err) - } - return nil -} - -func deleteUnprocessedTransaction(ctx context.Context, db database.DBTX, signature string) error { - sql := `DELETE FROM sol_unprocessed_txs WHERE signature = @signature;` - _, err := db.Exec(ctx, sql, pgx.NamedArgs{ - "signature": signature, - }) - if err != nil { - return fmt.Errorf("failed to delete unprocessed transaction: %w", err) - } - return nil -} From bc781b2cbd74797b5239d1b9e9b92bf8203c1fdc Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Tue, 14 Oct 2025 11:30:40 -0700 Subject: [PATCH 07/56] Move indexers to own packages --- solana/indexer/backfill.go | 13 +- solana/indexer/backfill_test.go | 8 +- solana/indexer/{ => common}/checkpoints.go | 16 +- solana/indexer/common/checkpoints_test.go | 30 + solana/indexer/{ => common}/grpc_client.go | 8 +- solana/indexer/{ => common}/retry_queue.go | 8 +- solana/indexer/common/types.go | 33 ++ solana/indexer/{ => common}/utils.go | 14 +- .../indexer.go} | 50 +- solana/indexer/dbc.go | 77 ++- .../indexer/{ => program}/claimable_tokens.go | 2 +- solana/indexer/program/db_insert_test.go | 78 +++ solana/indexer/program/indexer.go | 170 ++++++ solana/indexer/{ => program}/memos.go | 2 +- solana/indexer/{ => program}/memos_test.go | 2 +- .../indexer/{ => program}/payment_router.go | 2 +- .../indexer/{ => program}/reward_manager.go | 2 +- .../{ => program}/validate_purchase.go | 2 +- .../{ => program}/validate_purchase_test.go | 4 +- solana/indexer/solana_indexer.go | 91 ++- solana/indexer/{ => token}/balance_changes.go | 2 +- .../balance_changes_test.go} | 111 +--- .../{token_indexer.go => token/indexer.go} | 52 +- .../indexer/unprocessed_transactions_test.go | 166 +++--- sql/01_schema.sql | 539 +++++++++++++++++- sql/02_test_template.sql | 5 +- 26 files changed, 1136 insertions(+), 351 deletions(-) rename solana/indexer/{ => common}/checkpoints.go (92%) create mode 100644 solana/indexer/common/checkpoints_test.go rename solana/indexer/{ => common}/grpc_client.go (98%) rename solana/indexer/{ => common}/retry_queue.go (91%) create mode 100644 solana/indexer/common/types.go rename solana/indexer/{ => common}/utils.go (91%) rename solana/indexer/{damm_v2_indexer.go => damm_v2/indexer.go} (93%) rename solana/indexer/{ => program}/claimable_tokens.go (99%) create mode 100644 solana/indexer/program/db_insert_test.go create mode 100644 solana/indexer/program/indexer.go rename solana/indexer/{ => program}/memos.go (99%) rename solana/indexer/{ => program}/memos_test.go (99%) rename solana/indexer/{ => program}/payment_router.go (99%) rename solana/indexer/{ => program}/reward_manager.go (99%) rename solana/indexer/{ => program}/validate_purchase.go (99%) rename solana/indexer/{ => program}/validate_purchase_test.go (97%) rename solana/indexer/{ => token}/balance_changes.go (99%) rename solana/indexer/{db_insert_test.go => token/balance_changes_test.go} (77%) rename solana/indexer/{token_indexer.go => token/indexer.go} (81%) diff --git a/solana/indexer/backfill.go b/solana/indexer/backfill.go index 0535fd68..385f0e55 100644 --- a/solana/indexer/backfill.go +++ b/solana/indexer/backfill.go @@ -7,6 +7,7 @@ import ( "time" "api.audius.co/database" + "api.audius.co/solana/indexer/common" "api.audius.co/solana/spl/programs/claimable_tokens" "api.audius.co/solana/spl/programs/payment_router" "api.audius.co/solana/spl/programs/reward_manager" @@ -105,7 +106,7 @@ func (s *SolanaIndexer) backfillAddressTransactions(ctx context.Context, address } opts.Before = before - res, err := withRetriesResult(func() ([]*rpc.TransactionSignature, error) { + res, err := common.WithRetriesResult(func() ([]*rpc.TransactionSignature, error) { return s.rpcClient.GetSignaturesForAddressWithOpts(ctx, address, &opts) }, 5, time.Second*1) if err != nil { @@ -166,10 +167,10 @@ func (s *SolanaIndexer) backfillAddressTransactions(ctx context.Context, address continue } - err = s.processor.ProcessSignature(ctx, sig.Slot, sig.Signature, logger) - if err != nil { - logger.Error("failed to process signature", zap.Error(err)) - } + // err = s.processor.ProcessSignature(ctx, sig.Slot, sig.Signature, logger) + // if err != nil { + // logger.Error("failed to process signature", zap.Error(err)) + // } lastIndexedSig = sig.Signature @@ -182,7 +183,7 @@ func (s *SolanaIndexer) backfillAddressTransactions(ctx context.Context, address zap.Int("count", len(res)), ) } - checkpoint, err := insertBackfillCheckpoint(ctx, s.pool, fromSlot, toSlot, address.String()) + checkpoint, err := common.InsertBackfillCheckpoint(ctx, s.pool, fromSlot, toSlot, address.String()) if err != nil { logger.Error("failed to insert backfill checkpoint", zap.Error(err)) } diff --git a/solana/indexer/backfill_test.go b/solana/indexer/backfill_test.go index 4090f93d..2a7f71df 100644 --- a/solana/indexer/backfill_test.go +++ b/solana/indexer/backfill_test.go @@ -223,8 +223,8 @@ func TestBackfillContinue(t *testing.T) { s := &SolanaIndexer{ rpcClient: rpcFake, pool: poolMock, - processor: processorMock, - logger: zap.NewNop(), + // processor: processorMock, + logger: zap.NewNop(), } err = s.Backfill(context.Background(), 100, 200) @@ -385,8 +385,8 @@ func TestBackfillFresh(t *testing.T) { s := &SolanaIndexer{ rpcClient: rpcFake, pool: poolMock, - processor: processorMock, - logger: zap.NewNop(), + // processor: processorMock, + logger: zap.NewNop(), } err = s.Backfill(context.Background(), 100, 200) diff --git a/solana/indexer/checkpoints.go b/solana/indexer/common/checkpoints.go similarity index 92% rename from solana/indexer/checkpoints.go rename to solana/indexer/common/checkpoints.go index 63f228fe..fe7698ba 100644 --- a/solana/indexer/checkpoints.go +++ b/solana/indexer/common/checkpoints.go @@ -1,4 +1,4 @@ -package indexer +package common import ( "context" @@ -14,7 +14,9 @@ import ( "go.uber.org/zap" ) -func ensureCheckpoint( +const MAX_SLOT_GAP = 2500 + +func EnsureCheckpoint( ctx context.Context, name string, db database.DBTX, @@ -22,12 +24,12 @@ func ensureCheckpoint( subscription *pb.SubscribeRequest, logger *zap.Logger, ) (string, uint64, error) { - lastIndexedSlot, err := getCheckpointSlot(ctx, db, name, subscription) + lastIndexedSlot, err := GetCheckpointSlot(ctx, db, name, subscription) if err != nil { return "", 0, fmt.Errorf("failed to get last indexed slot: %w", err) } - latestSlot, err := withRetriesResult(func() (uint64, error) { + latestSlot, err := WithRetriesResult(func() (uint64, error) { return rpcClient.GetSlot(ctx, "confirmed") }, 5, time.Second*2) if err != nil { @@ -61,7 +63,7 @@ func ensureCheckpoint( return checkpointId, fromSlot, nil } -func insertBackfillCheckpoint(ctx context.Context, db database.DBTX, fromSlot uint64, toSlot uint64, address string) (string, error) { +func InsertBackfillCheckpoint(ctx context.Context, db database.DBTX, fromSlot uint64, toSlot uint64, address string) (string, error) { obj := map[string]string{ "type": "backfill", "address": address, @@ -128,7 +130,7 @@ func insertCheckpointStart( return checkpointId, nil } -func updateCheckpoint(ctx context.Context, db database.DBTX, id string, slot uint64) error { +func UpdateCheckpoint(ctx context.Context, db database.DBTX, id string, slot uint64) error { _, err := db.Exec(ctx, ` UPDATE sol_slot_checkpoints SET to_slot = @to_slot, @@ -142,7 +144,7 @@ func updateCheckpoint(ctx context.Context, db database.DBTX, id string, slot uin return err } -func getCheckpointSlot(ctx context.Context, db database.DBTX, name string, subscription *pb.SubscribeRequest) (uint64, error) { +func GetCheckpointSlot(ctx context.Context, db database.DBTX, name string, subscription *pb.SubscribeRequest) (uint64, error) { subscriptionJson, err := json.Marshal(subscription) if err != nil { return 0, fmt.Errorf("failed to marshal subscription request: %w", err) diff --git a/solana/indexer/common/checkpoints_test.go b/solana/indexer/common/checkpoints_test.go new file mode 100644 index 00000000..fab3be23 --- /dev/null +++ b/solana/indexer/common/checkpoints_test.go @@ -0,0 +1,30 @@ +package common + +import ( + "testing" + + "api.audius.co/database" + "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" + "github.com/test-go/testify/assert" +) + +func TestCheckpoints(t *testing.T) { + pool := database.CreateTestDatabase(t, "test_solana_indexer_common") + defer pool.Close() + + req := proto.SubscribeRequest{} + id, err := insertCheckpointStart(t.Context(), pool, "backfill", 100, &req) + assert.NoError(t, err, "failed to insert checkpoint start") + assert.NotEmpty(t, id, "checkpoint ID should not be empty") + + err = UpdateCheckpoint(t.Context(), pool, id, 201) + assert.NoError(t, err, "failed to update checkpoint") + + slot, err := GetCheckpointSlot(t.Context(), pool, "backfill", &req) + assert.NoError(t, err, "failed to get checkpoint slot") + assert.Equal(t, uint64(201), slot, "checkpoint slot should match updated value") + + id2, err := InsertBackfillCheckpoint(t.Context(), pool, 100, 200, "foo") + assert.NoError(t, err, "failed to insert backfill checkpoint") + assert.NotEmpty(t, id2, "backfill checkpoint ID should not be empty") +} diff --git a/solana/indexer/grpc_client.go b/solana/indexer/common/grpc_client.go similarity index 98% rename from solana/indexer/grpc_client.go rename to solana/indexer/common/grpc_client.go index 82298484..e025e1b0 100644 --- a/solana/indexer/grpc_client.go +++ b/solana/indexer/common/grpc_client.go @@ -1,4 +1,4 @@ -package indexer +package common import ( "context" @@ -46,12 +46,6 @@ type DefaultGrpcClient struct { hasInternalSlotSub bool } -type GrpcConfig struct { - Server string - ApiToken string - MaxReconnectAttempts int -} - // Creates a new gRPC client. func NewGrpcClient(config GrpcConfig) *DefaultGrpcClient { return &DefaultGrpcClient{ diff --git a/solana/indexer/retry_queue.go b/solana/indexer/common/retry_queue.go similarity index 91% rename from solana/indexer/retry_queue.go rename to solana/indexer/common/retry_queue.go index 06e42290..86854f42 100644 --- a/solana/indexer/retry_queue.go +++ b/solana/indexer/common/retry_queue.go @@ -1,4 +1,4 @@ -package indexer +package common import ( "context" @@ -48,7 +48,7 @@ func (r *retryQueueUpdate) UnmarshalJSON(data []byte) error { return protojson.Unmarshal(data, r.SubscribeUpdate) } -func getRetryQueue(ctx context.Context, db database.DBTX, limit, offset int) ([]retryQueueItem, error) { +func GetRetryQueue(ctx context.Context, db database.DBTX, limit, offset int) ([]retryQueueItem, error) { sql := `SELECT id, indexer, update, error, created_at, updated_at FROM sol_retry_queue ORDER BY created_at ASC @@ -73,7 +73,7 @@ func getRetryQueue(ctx context.Context, db database.DBTX, limit, offset int) ([] return items, nil } -func addToRetryQueue(ctx context.Context, db database.DBTX, indexer string, update *pb.SubscribeUpdate, errorMessage string) error { +func AddToRetryQueue(ctx context.Context, db database.DBTX, indexer string, update *pb.SubscribeUpdate, errorMessage string) error { sql := ` INSERT INTO sol_retry_queue (indexer, update, error) VALUES (@indexer, @update, @error) @@ -90,7 +90,7 @@ func addToRetryQueue(ctx context.Context, db database.DBTX, indexer string, upda return nil } -func deleteFromRetryQueue(ctx context.Context, db database.DBTX, id string) error { +func DeleteFromRetryQueue(ctx context.Context, db database.DBTX, id string) error { sql := `DELETE FROM sol_retry_queue WHERE id = @id;` _, err := db.Exec(ctx, sql, pgx.NamedArgs{ "id": id, diff --git a/solana/indexer/common/types.go b/solana/indexer/common/types.go new file mode 100644 index 00000000..04b0ccf9 --- /dev/null +++ b/solana/indexer/common/types.go @@ -0,0 +1,33 @@ +package common + +import ( + "context" + + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" + pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" +) + +type RpcClient interface { + GetBlockWithOpts(context.Context, uint64, *rpc.GetBlockOpts) (*rpc.GetBlockResult, error) + GetSlot(context.Context, rpc.CommitmentType) (uint64, error) + GetSignaturesForAddressWithOpts(context.Context, solana.PublicKey, *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error) + GetTransaction(context.Context, solana.Signature, *rpc.GetTransactionOpts) (*rpc.GetTransactionResult, error) + GetAccountDataBorshInto(ctx context.Context, account solana.PublicKey, out interface{}) error +} + +type GrpcClient interface { + Subscribe( + ctx context.Context, + subRequest *pb.SubscribeRequest, + dataCallback DataCallback, + errorCallback ErrorCallback, + ) error + Close() +} + +type GrpcConfig struct { + Server string + ApiToken string + MaxReconnectAttempts int +} diff --git a/solana/indexer/utils.go b/solana/indexer/common/utils.go similarity index 91% rename from solana/indexer/utils.go rename to solana/indexer/common/utils.go index 7944b569..8dd15521 100644 --- a/solana/indexer/utils.go +++ b/solana/indexer/common/utils.go @@ -1,4 +1,4 @@ -package indexer +package common import ( "context" @@ -13,7 +13,7 @@ import ( "go.uber.org/zap" ) -func withRetries(f func() error, maxRetries int, interval time.Duration) error { +func WithRetries(f func() error, maxRetries int, interval time.Duration) error { err := f() retries := 0 for err != nil && retries < maxRetries { @@ -27,7 +27,7 @@ func withRetries(f func() error, maxRetries int, interval time.Duration) error { return nil } -func withRetriesResult[T any](f func() (T, error), maxRetries int, interval time.Duration) (T, error) { +func WithRetriesResult[T any](f func() (T, error), maxRetries int, interval time.Duration) (T, error) { result, err := f() retries := 0 for err != nil && retries < maxRetries { @@ -44,7 +44,7 @@ func withRetriesResult[T any](f func() (T, error), maxRetries int, interval time type notificationCallback func(ctx context.Context, notification *pgconn.Notification) -func watchPgNotification(ctx context.Context, pool database.DbPool, notification string, callback notificationCallback, logger *zap.Logger) error { +func WatchPgNotification(ctx context.Context, pool database.DbPool, notification string, callback notificationCallback, logger *zap.Logger) error { if logger == nil { logger = zap.NewNop() } @@ -92,7 +92,7 @@ func watchPgNotification(ctx context.Context, pool database.DbPool, notification } // Gets a transaction from a cache or fetches it from the RPC. Handles retries. -func fetchTransactionWithCache( +func FetchTransactionWithCache( ctx context.Context, transactionCache *otter.Cache[solana.Signature, *rpc.GetTransactionResult], @@ -107,7 +107,7 @@ func fetchTransactionWithCache( } // If the transaction is not in the cache, fetch it from the RPC - res, err := withRetriesResult(func() (*rpc.GetTransactionResult, error) { + res, err := WithRetriesResult(func() (*rpc.GetTransactionResult, error) { return rpcClient.GetTransaction( ctx, signature, @@ -130,7 +130,7 @@ func fetchTransactionWithCache( } // Resolves address lookup tables in the given transaction using the provided metadata. -func resolveLookupTables( +func ResolveLookupTables( ctx context.Context, rpcClient RpcClient, tx *solana.Transaction, diff --git a/solana/indexer/damm_v2_indexer.go b/solana/indexer/damm_v2/indexer.go similarity index 93% rename from solana/indexer/damm_v2_indexer.go rename to solana/indexer/damm_v2/indexer.go index ce15e055..c585e6ad 100644 --- a/solana/indexer/damm_v2_indexer.go +++ b/solana/indexer/damm_v2/indexer.go @@ -1,10 +1,11 @@ -package indexer +package damm_v2 import ( "context" "fmt" "api.audius.co/database" + "api.audius.co/solana/indexer/common" "api.audius.co/solana/spl/programs/meteora_damm_v2" bin "github.com/gagliardetto/binary" "github.com/gagliardetto/solana-go" @@ -14,10 +15,10 @@ import ( "go.uber.org/zap" ) -type DammV2Indexer struct { +type Indexer struct { pool database.DbPool - grpcConfig GrpcConfig - rpcClient RpcClient + grpcConfig common.GrpcConfig + rpcClient common.RpcClient logger *zap.Logger } @@ -26,13 +27,27 @@ const MAX_DAMM_V2_POOLS_PER_SUBSCRIPTION = 10000 const DAMM_V2_POOL_SUBSCRIPTION_KEY = "dammV2Pools" const DBC_MIGRATION_NOTIFICATION_NAME = "meteora_dbc_migration" -func (d *DammV2Indexer) Start(ctx context.Context) { +func New( + config common.GrpcConfig, + rpcClient common.RpcClient, + pool database.DbPool, + logger *zap.Logger, +) *Indexer { + return &Indexer{ + pool: pool, + grpcConfig: config, + rpcClient: rpcClient, + logger: logger.Named("DammV2Indexer"), + } +} + +func (d *Indexer) Start(ctx context.Context) { // To ensure only one subscription task is running at a time, keep track of // the last cancel function and call it on the next notification. var lastCancel context.CancelFunc // Ensure all gRPC clients are closed on shutdown - var grpcClients []GrpcClient + var grpcClients []common.GrpcClient defer (func() { for _, client := range grpcClients { client.Close() @@ -58,6 +73,7 @@ func (d *DammV2Indexer) Start(ctx context.Context) { grpcClients = clients if err != nil { d.logger.Error("failed to resubscribe to DAMM V2 pools", zap.Error(err)) + cancel() return } @@ -73,7 +89,7 @@ func (d *DammV2Indexer) Start(ctx context.Context) { grpcClients = clients // Watch for new pools to be added - err = watchPgNotification(ctx, d.pool, DBC_MIGRATION_NOTIFICATION_NAME, handleNotif, d.logger) + err = common.WatchPgNotification(ctx, d.pool, DBC_MIGRATION_NOTIFICATION_NAME, handleNotif, d.logger) if err != nil { d.logger.Error("failed to watch for DAMM V2 pool changes", zap.Error(err)) return @@ -91,7 +107,7 @@ func (d *DammV2Indexer) Start(ctx context.Context) { } // Handles a single update message from the gRPC subscription -func (d *DammV2Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) error { +func (d *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) error { // Handle slot updates slotUpdate := msg.GetSlot() if slotUpdate != nil { @@ -100,7 +116,7 @@ func (d *DammV2Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdat // Use the filter as the checkpoint ID checkpointId := msg.Filters[0] - err := updateCheckpoint(ctx, d.pool, checkpointId, slotUpdate.Slot) + err := common.UpdateCheckpoint(ctx, d.pool, checkpointId, slotUpdate.Slot) if err != nil { d.logger.Error("failed to update slot checkpoint", zap.Error(err)) } @@ -127,12 +143,12 @@ func (d *DammV2Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdat return nil } -func (d *DammV2Indexer) subscribeToDammV2Pools(ctx context.Context) ([]GrpcClient, error) { +func (d *Indexer) subscribeToDammV2Pools(ctx context.Context) ([]common.GrpcClient, error) { done := false page := 0 pageSize := MAX_DAMM_V2_POOLS_PER_SUBSCRIPTION total := 0 - grpcClients := make([]GrpcClient, 0) + grpcClients := make([]common.GrpcClient, 0) for !done { dammV2Pools, err := getWatchedDammV2Pools(ctx, d.pool, pageSize, page*pageSize) if err != nil { @@ -154,13 +170,13 @@ func (d *DammV2Indexer) subscribeToDammV2Pools(ctx context.Context) ([]GrpcClien d.logger.Error("failed to handle DAMM V2 update", zap.Error(err)) // Add messages that failed to process to the retry queue - if err := addToRetryQueue(ctx, d.pool, DAMM_V2_INDEXER_NAME, msg, err.Error()); err != nil { + if err := common.AddToRetryQueue(ctx, d.pool, DAMM_V2_INDEXER_NAME, msg, err.Error()); err != nil { d.logger.Error("failed to add to retry queue", zap.Error(err)) } } } - grpcClient := NewGrpcClient(d.grpcConfig) + grpcClient := common.NewGrpcClient(d.grpcConfig) err = grpcClient.Subscribe(ctx, subscription, handleMessage, func(err error) { d.logger.Error("error in DAMM V2 subscription", zap.Error(err)) }) @@ -178,7 +194,7 @@ func (d *DammV2Indexer) subscribeToDammV2Pools(ctx context.Context) ([]GrpcClien return grpcClients, nil } -func (d *DammV2Indexer) makeDammV2SubscriptionRequest(ctx context.Context, dammV2Pools []string) *pb.SubscribeRequest { +func (d *Indexer) makeDammV2SubscriptionRequest(ctx context.Context, dammV2Pools []string) *pb.SubscribeRequest { commitment := pb.CommitmentLevel_CONFIRMED subscription := &pb.SubscribeRequest{ Commitment: &commitment, @@ -218,7 +234,7 @@ func (d *DammV2Indexer) makeDammV2SubscriptionRequest(ctx context.Context, dammV } // Ensure this subscription has a checkpoint - checkpointId, fromSlot, err := ensureCheckpoint(ctx, DAMM_V2_INDEXER_NAME, d.pool, d.rpcClient, subscription, d.logger) + checkpointId, fromSlot, err := common.EnsureCheckpoint(ctx, DAMM_V2_INDEXER_NAME, d.pool, d.rpcClient, subscription, d.logger) if err != nil { d.logger.Error("failed to ensure checkpoint", zap.Error(err)) } @@ -544,6 +560,7 @@ func upsertDammV2PoolFees( _, err := db.Exec(ctx, sqlFees, pgx.NamedArgs{ "pool": account.String(), + "slot": slot, "partner_fee_percent": fees.PartnerFeePercent, "protocol_fee_percent": fees.ProtocolFeePercent, "referral_fee_percent": fees.ReferralFeePercent, @@ -666,6 +683,7 @@ func upsertDammV2PoolDynamicFee( _, err := db.Exec(ctx, sqlDynamicFee, pgx.NamedArgs{ "pool": account.String(), + "slot": slot, "initialized": dynamicFee.Initialized, "max_volatility_accumulator": dynamicFee.MaxVolatilityAccumulator, "variable_fee_control": dynamicFee.VariableFeeControl, @@ -736,6 +754,7 @@ func upsertDammV2Position( _, err := db.Exec(ctx, sql, pgx.NamedArgs{ "address": account.String(), + "slot": slot, "pool": position.Pool.String(), "nft_mint": position.NftMint.String(), "fee_a_per_token_checkpoint": position.FeeAPerTokenCheckpoint, @@ -782,6 +801,7 @@ func upsertDammV2PositionMetrics( _, err := db.Exec(ctx, sql, pgx.NamedArgs{ "position": account.String(), + "slot": slot, "total_claimed_a_fee": metrics.TotalClaimedAFee, "total_claimed_b_fee": metrics.TotalClaimedBFee, }) diff --git a/solana/indexer/dbc.go b/solana/indexer/dbc.go index 4460344b..1bf732eb 100644 --- a/solana/indexer/dbc.go +++ b/solana/indexer/dbc.go @@ -4,10 +4,9 @@ import ( "context" "fmt" "strings" - "time" "api.audius.co/database" - "api.audius.co/solana/spl/programs/meteora_damm_v2" + "api.audius.co/solana/indexer/common" "api.audius.co/solana/spl/programs/meteora_dbc" "github.com/gagliardetto/solana-go" "github.com/jackc/pgx/v5" @@ -17,7 +16,7 @@ import ( func processDbcInstruction( ctx context.Context, db database.DBTX, - rpcClient RpcClient, + rpcClient common.RpcClient, slot uint64, tx *solana.Transaction, instructionIndex int, @@ -76,44 +75,44 @@ func processDbcInstruction( // Also index the pool and positions - var dammPool meteora_damm_v2.Pool - err = withRetries(func() error { - return rpcClient.GetAccountDataBorshInto(ctx, migrationInst.GetPool().PublicKey, &dammPool) - }, 5, time.Second*1) - if err != nil { - return fmt.Errorf("failed to get damm v2 pool account data after retries: %w", err) - } else { - err = upsertDammV2Pool(ctx, db, slot, migrationInst.GetPool().PublicKey, &dammPool) - if err != nil { - return fmt.Errorf("failed to upsert damm v2 pool: %w", err) - } - } + // var dammPool meteora_damm_v2.Pool + // err = common.WithRetries(func() error { + // return rpcClient.GetAccountDataBorshInto(ctx, migrationInst.GetPool().PublicKey, &dammPool) + // }, 5, time.Second*1) + // if err != nil { + // return fmt.Errorf("failed to get damm v2 pool account data after retries: %w", err) + // } else { + // err = upsertDammV2Pool(ctx, db, slot, migrationInst.GetPool().PublicKey, &dammPool) + // if err != nil { + // return fmt.Errorf("failed to upsert damm v2 pool: %w", err) + // } + // } - var firstPosition meteora_damm_v2.PositionState - err = withRetries(func() error { - return rpcClient.GetAccountDataBorshInto(ctx, migrationInst.GetFirstPosition().PublicKey, &firstPosition) - }, 5, time.Second*1) - if err != nil { - return fmt.Errorf("failed to get first damm v2 position account data: %w", err) - } else { - err = upsertDammV2Position(ctx, db, slot, migrationInst.GetFirstPosition().PublicKey, &firstPosition) - if err != nil { - return fmt.Errorf("failed to upsert first damm v2 position: %w", err) - } - } + // var firstPosition meteora_damm_v2.PositionState + // err = common.WithRetries(func() error { + // return rpcClient.GetAccountDataBorshInto(ctx, migrationInst.GetFirstPosition().PublicKey, &firstPosition) + // }, 5, time.Second*1) + // if err != nil { + // return fmt.Errorf("failed to get first damm v2 position account data: %w", err) + // } else { + // err = upsertDammV2Position(ctx, db, slot, migrationInst.GetFirstPosition().PublicKey, &firstPosition) + // if err != nil { + // return fmt.Errorf("failed to upsert first damm v2 position: %w", err) + // } + // } - var secondPosition meteora_damm_v2.PositionState - err = withRetries(func() error { - return rpcClient.GetAccountDataBorshInto(ctx, migrationInst.GetSecondPosition().PublicKey, &secondPosition) - }, 5, time.Second*1) - if err != nil { - return fmt.Errorf("failed to get second damm v2 position account data: %w", err) - } else { - err = upsertDammV2Position(ctx, db, slot, migrationInst.GetSecondPosition().PublicKey, &secondPosition) - if err != nil { - return fmt.Errorf("failed to upsert second damm v2 position: %w", err) - } - } + // var secondPosition meteora_damm_v2.PositionState + // err = common.WithRetries(func() error { + // return rpcClient.GetAccountDataBorshInto(ctx, migrationInst.GetSecondPosition().PublicKey, &secondPosition) + // }, 5, time.Second*1) + // if err != nil { + // return fmt.Errorf("failed to get second damm v2 position account data: %w", err) + // } else { + // err = upsertDammV2Position(ctx, db, slot, migrationInst.GetSecondPosition().PublicKey, &secondPosition) + // if err != nil { + // return fmt.Errorf("failed to upsert second damm v2 position: %w", err) + // } + // } } } } diff --git a/solana/indexer/claimable_tokens.go b/solana/indexer/program/claimable_tokens.go similarity index 99% rename from solana/indexer/claimable_tokens.go rename to solana/indexer/program/claimable_tokens.go index ec5cfd66..60eda8d2 100644 --- a/solana/indexer/claimable_tokens.go +++ b/solana/indexer/program/claimable_tokens.go @@ -1,4 +1,4 @@ -package indexer +package program import ( "context" diff --git a/solana/indexer/program/db_insert_test.go b/solana/indexer/program/db_insert_test.go new file mode 100644 index 00000000..2335673c --- /dev/null +++ b/solana/indexer/program/db_insert_test.go @@ -0,0 +1,78 @@ +package program + +import ( + "testing" + + "api.audius.co/database" + "github.com/test-go/testify/assert" +) + +// Ensures the database matches the expected schema for the inserts +func TestInserts(t *testing.T) { + pool := database.CreateTestDatabase(t, "test_solana_indexer_program") + defer pool.Close() + + err := insertClaimableAccount(t.Context(), pool, claimableAccountsRow{ + signature: "signature2", + instructionIndex: 0, + slot: 12345, + mint: "mint2", + ethereumAddress: "0x1234567890abcdef1234567890abcdef", + account: "account2", + }) + assert.NoError(t, err, "failed to insert claimable account") + + err = insertClaimableAccountTransfer(t.Context(), pool, claimableAccountTransfersRow{ + signature: "signature3", + instructionIndex: 0, + amount: 1000, + slot: 12345, + fromAccount: "fromAccount2", + toAccount: "toAccount2", + senderEthAddress: "0xabcdef1234567890abcdef1234567890", + }) + assert.NoError(t, err, "failed to insert claimable account transfer") + + err = insertPayment(t.Context(), pool, paymentRow{ + signature: "signature4", + instructionIndex: 0, + amount: 5000, + slot: 12345, + routeIndex: 0, + toAccount: "toAccount3", + }) + assert.NoError(t, err, "failed to insert payment router transaction") + + err = insertPurchase(t.Context(), pool, purchaseRow{ + signature: "signature5", + instructionIndex: 0, + amount: 10000, + slot: 12345, + fromAccount: "fromAccount3", + parsedPurchaseMemo: parsedPurchaseMemo{ + ContentId: 123, + ContentType: "track", + ValidAfterBlocknumber: 12345678, + BuyerUserId: 1, + AccessType: "stream", + }, + parsedLocationMemo: parsedLocationMemo{ + City: "San Francisco", + Country: "USA", + Region: "California", + }, + isValid: nil, + }) + assert.NoError(t, err, "failed to insert purchase") + + err = insertRewardDisbursement(t.Context(), pool, rewardDisbursementsRow{ + signature: "signature6", + instructionIndex: 0, + amount: 2000, + slot: 12345, + userBank: "userBank1", + challengeId: "challenge1", + specifier: "specifier1", + }) + assert.NoError(t, err, "failed to insert reward disbursement") +} diff --git a/solana/indexer/program/indexer.go b/solana/indexer/program/indexer.go new file mode 100644 index 00000000..72579533 --- /dev/null +++ b/solana/indexer/program/indexer.go @@ -0,0 +1,170 @@ +package program + +import ( + "context" + "encoding/json" + "fmt" + + "api.audius.co/config" + "api.audius.co/database" + "api.audius.co/solana/indexer/common" + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" + pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" + "go.uber.org/zap" +) + +const NAME = "program" + +type Indexer struct { + pool database.DbPool + grpcConfig common.GrpcConfig + rpcClient common.RpcClient + config config.SolanaConfig + logger *zap.Logger +} + +func New( + grpcConfig common.GrpcConfig, + rpcClient common.RpcClient, + pool database.DbPool, + config config.SolanaConfig, + logger *zap.Logger, +) *Indexer { + return &Indexer{ + pool: pool, + grpcConfig: grpcConfig, + rpcClient: rpcClient, + config: config, + logger: logger.Named("ProgramIndexer"), + } +} + +func (i *Indexer) Start(ctx context.Context) { + client, err := i.subscribe(ctx) + if err != nil { + i.logger.Fatal("failed to start subscription", zap.Error(err)) + } + defer client.Close() + + i.logger.Info("subscribed") + + // Wait for shutdown + for { + select { + case <-ctx.Done(): + i.logger.Info("received shutdown signal, stopping indexer") + return + default: + } + } +} + +func (i *Indexer) subscribe(ctx context.Context) (common.GrpcClient, error) { + programIds := []string{ + i.config.RewardManagerProgramID.String(), + i.config.PaymentRouterProgramID.String(), + i.config.ClaimableTokensProgramID.String(), + } + + subscription := i.makeSubscriptionRequest(ctx, programIds) + + handleMessage := func(ctx context.Context, update *pb.SubscribeUpdate) { + err := i.HandleUpdate(ctx, update) + if err != nil { + i.logger.Error("failed to handle update", zap.Error(err)) + + // Add messages that failed to process to the retry queue + if err := common.AddToRetryQueue(ctx, i.pool, NAME, update, err.Error()); err != nil { + i.logger.Error("failed to add to retry queue", zap.Error(err)) + } + } + } + + client := common.NewGrpcClient(i.grpcConfig) + err := client.Subscribe(ctx, subscription, handleMessage, func(err error) { + i.logger.Error("subscription error", zap.Error(err)) + }) + if err != nil { + return nil, fmt.Errorf("failed to start subscription: %w", err) + } + return client, nil +} + +func (i *Indexer) makeSubscriptionRequest(ctx context.Context, programIds []string) *pb.SubscribeRequest { + commitment := pb.CommitmentLevel_CONFIRMED + subscription := &pb.SubscribeRequest{ + Commitment: &commitment, + } + + // Filter to only the relevant program IDs + subscription.Transactions = make(map[string]*pb.SubscribeRequestFilterTransactions) + subscription.Transactions[NAME] = &pb.SubscribeRequestFilterTransactions{ + AccountInclude: programIds, + } + + // Ensure this subscription has a checkpoint + checkpointId, fromSlot, err := common.EnsureCheckpoint(ctx, NAME, i.pool, i.rpcClient, subscription, i.logger) + if err != nil { + i.logger.Error("failed to ensure checkpoint", zap.Error(err)) + } + + // Set the from slot for the subscription + subscription.FromSlot = &fromSlot + + // Listen for slots for making checkpoints + subscription.Slots = make(map[string]*pb.SubscribeRequestFilterSlots) + subscription.Slots[checkpointId] = &pb.SubscribeRequestFilterSlots{} + + return subscription +} + +func (i *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) error { + // Handle slot updates + slotUpdate := msg.GetSlot() + if slotUpdate != nil { + // only update every 10 slots to reduce db load and write latency + if slotUpdate.Slot%10 == 0 { + // Use the filter as the checkpoint ID + checkpointId := msg.Filters[0] + + err := common.UpdateCheckpoint(ctx, i.pool, checkpointId, slotUpdate.Slot) + if err != nil { + i.logger.Error("failed to update slot checkpoint", zap.Error(err)) + } + } + } + + // Handle transaction updates + txUpdate := msg.GetTransaction() + if txUpdate != nil { + i.logger.Debug("processing transaction...", zap.String("signature", string(txUpdate.Transaction.Transaction.Signatures[0])), zap.Uint64("slot", txUpdate.Slot)) + + bytes, err := json.Marshal(txUpdate.Transaction.Transaction) + if err != nil { + return fmt.Errorf("failed to marshal transaction: %w", err) + } + + var tx solana.Transaction + err = json.Unmarshal(bytes, &tx) + if err != nil { + return fmt.Errorf("failed to unmarshal transaction: %w", err) + } + + metaJson, err := json.Marshal(txUpdate.Transaction.Meta) + if err != nil { + return fmt.Errorf("failed to marshal transaction meta: %w", err) + } + + var meta rpc.TransactionMeta + err = json.Unmarshal(metaJson, &meta) + if err != nil { + return fmt.Errorf("failed to unmarshal transaction meta: %w", err) + } + + tx = *common.ResolveLookupTables(ctx, i.rpcClient, &tx, &meta) + + } + + return nil +} diff --git a/solana/indexer/memos.go b/solana/indexer/program/memos.go similarity index 99% rename from solana/indexer/memos.go rename to solana/indexer/program/memos.go index eaf8a788..f4e7f969 100644 --- a/solana/indexer/memos.go +++ b/solana/indexer/program/memos.go @@ -1,4 +1,4 @@ -package indexer +package program import ( "encoding/json" diff --git a/solana/indexer/memos_test.go b/solana/indexer/program/memos_test.go similarity index 99% rename from solana/indexer/memos_test.go rename to solana/indexer/program/memos_test.go index 6c69d79b..0ffb6a5c 100644 --- a/solana/indexer/memos_test.go +++ b/solana/indexer/program/memos_test.go @@ -1,4 +1,4 @@ -package indexer +package program import ( "testing" diff --git a/solana/indexer/payment_router.go b/solana/indexer/program/payment_router.go similarity index 99% rename from solana/indexer/payment_router.go rename to solana/indexer/program/payment_router.go index 8f344d52..a975607a 100644 --- a/solana/indexer/payment_router.go +++ b/solana/indexer/program/payment_router.go @@ -1,4 +1,4 @@ -package indexer +package program import ( "context" diff --git a/solana/indexer/reward_manager.go b/solana/indexer/program/reward_manager.go similarity index 99% rename from solana/indexer/reward_manager.go rename to solana/indexer/program/reward_manager.go index 460e160c..ae083880 100644 --- a/solana/indexer/reward_manager.go +++ b/solana/indexer/program/reward_manager.go @@ -1,4 +1,4 @@ -package indexer +package program import ( "context" diff --git a/solana/indexer/validate_purchase.go b/solana/indexer/program/validate_purchase.go similarity index 99% rename from solana/indexer/validate_purchase.go rename to solana/indexer/program/validate_purchase.go index 2c8f66d9..a3842bf9 100644 --- a/solana/indexer/validate_purchase.go +++ b/solana/indexer/program/validate_purchase.go @@ -1,4 +1,4 @@ -package indexer +package program import ( "context" diff --git a/solana/indexer/validate_purchase_test.go b/solana/indexer/program/validate_purchase_test.go similarity index 97% rename from solana/indexer/validate_purchase_test.go rename to solana/indexer/program/validate_purchase_test.go index 66fb024a..7006a3fd 100644 --- a/solana/indexer/validate_purchase_test.go +++ b/solana/indexer/program/validate_purchase_test.go @@ -1,4 +1,4 @@ -package indexer +package program import ( "strconv" @@ -17,7 +17,7 @@ func TestPurchaseValidation(t *testing.T) { ctx := t.Context() - pool := database.CreateTestDatabase(t, "test_solana_indexer") + pool := database.CreateTestDatabase(t, "test_solana_indexer_program") sellerUserId := 1 priceCents := 100 diff --git a/solana/indexer/solana_indexer.go b/solana/indexer/solana_indexer.go index a926b932..4ec7e816 100644 --- a/solana/indexer/solana_indexer.go +++ b/solana/indexer/solana_indexer.go @@ -7,47 +7,29 @@ import ( "api.audius.co/config" "api.audius.co/database" - "api.audius.co/jobs" "api.audius.co/logging" + "api.audius.co/solana/indexer/common" + "api.audius.co/solana/indexer/damm_v2" + "api.audius.co/solana/indexer/program" + "api.audius.co/solana/indexer/token" "github.com/gagliardetto/solana-go" "github.com/gagliardetto/solana-go/rpc" "github.com/jackc/pgx/v5/pgxpool" "github.com/maypok86/otter" - pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" "go.uber.org/zap" ) -type RpcClient interface { - GetBlockWithOpts(context.Context, uint64, *rpc.GetBlockOpts) (*rpc.GetBlockResult, error) - GetSlot(context.Context, rpc.CommitmentType) (uint64, error) - GetSignaturesForAddressWithOpts(context.Context, solana.PublicKey, *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error) - GetTransaction(context.Context, solana.Signature, *rpc.GetTransactionOpts) (*rpc.GetTransactionResult, error) - GetAccountDataBorshInto(ctx context.Context, account solana.PublicKey, out interface{}) error -} - -type GrpcClient interface { - Subscribe( - ctx context.Context, - subRequest *pb.SubscribeRequest, - dataCallback DataCallback, - errorCallback ErrorCallback, - ) error - Close() -} - -const MAX_SLOT_GAP = 2500 - type SolanaIndexer struct { - rpcClient RpcClient - grpcClient GrpcClient - processor Processor + rpcClient common.RpcClient + grpcClient common.GrpcClient config config.Config pool database.DbPool workerCount int32 - dammV2Indexer *DammV2Indexer - tokenIndexer *TokenIndexer + dammV2Indexer *damm_v2.Indexer + tokenIndexer *token.Indexer + programIndexer *program.Indexer checkpointId string @@ -77,7 +59,7 @@ func New(config config.Config) *SolanaIndexer { panic(fmt.Errorf("error connecting to database: %w", err)) } - grpcConfig := GrpcConfig{ + grpcConfig := common.GrpcConfig{ Server: config.SolanaConfig.GrpcProvider, ApiToken: config.SolanaConfig.GrpcToken, MaxReconnectAttempts: 5, @@ -92,20 +74,13 @@ func New(config config.Config) *SolanaIndexer { panic(fmt.Errorf("failed to create transaction cache: %w", err)) } - dammV2Indexer := &DammV2Indexer{ - pool: pool, - grpcConfig: grpcConfig, - rpcClient: rpcClient, - logger: logger.Named("DammV2Indexer"), - } - - tokenIndexer := &TokenIndexer{ - pool: pool, - grpcConfig: grpcConfig, - rpcClient: rpcClient, - logger: logger.Named("TokenIndexer"), - transactionCache: &transactionCache, - } + dammV2Indexer := damm_v2.New(grpcConfig, rpcClient, pool, logger) + tokenIndexer := token.New( + grpcConfig, rpcClient, pool, &transactionCache, logger, + ) + programIndexer := program.New( + grpcConfig, rpcClient, pool, config.SolanaConfig, logger, + ) s := &SolanaIndexer{ rpcClient: rpcClient, @@ -114,8 +89,9 @@ func New(config config.Config) *SolanaIndexer { pool: pool, workerCount: workerCount, - dammV2Indexer: dammV2Indexer, - tokenIndexer: tokenIndexer, + dammV2Indexer: dammV2Indexer, + tokenIndexer: tokenIndexer, + programIndexer: programIndexer, } return s @@ -124,18 +100,19 @@ func New(config config.Config) *SolanaIndexer { func (s *SolanaIndexer) Start(ctx context.Context) error { go s.ScheduleProcessRetryQueue(ctx, s.config.SolanaIndexerRetryInterval) - statsJob := jobs.NewCoinStatsJob(s.config, s.pool) - statsCtx := context.WithoutCancel(ctx) - statsJob.ScheduleEvery(statsCtx, 5*time.Minute) - go statsJob.Run(statsCtx) + // statsJob := jobs.NewCoinStatsJob(s.config, s.pool) + // statsCtx := context.WithoutCancel(ctx) + // statsJob.ScheduleEvery(statsCtx, 5*time.Minute) + // go statsJob.Run(statsCtx) - dbcJob := jobs.NewCoinDBCJob(s.config, s.pool) - dbcCtx := context.WithoutCancel(ctx) - dbcJob.ScheduleEvery(dbcCtx, 5*time.Minute) - go dbcJob.Run(dbcCtx) + // dbcJob := jobs.NewCoinDBCJob(s.config, s.pool) + // dbcCtx := context.WithoutCancel(ctx) + // dbcJob.ScheduleEvery(dbcCtx, 5*time.Minute) + // go dbcJob.Run(dbcCtx) - go s.tokenIndexer.Start(ctx) - go s.dammV2Indexer.Start(ctx) + // go s.tokenIndexer.Start(ctx) + // go s.dammV2Indexer.Start(ctx) + go s.programIndexer.Start(ctx) for { select { @@ -172,7 +149,7 @@ func (s *SolanaIndexer) ProcessRetryQueue(ctx context.Context) error { count := 0 start := time.Now() for { - queue, err := getRetryQueue(ctx, s.pool, limit, offset) + queue, err := common.GetRetryQueue(ctx, s.pool, limit, offset) if err != nil { return fmt.Errorf("failed to fetch retry queue: %w", err) } @@ -188,7 +165,7 @@ func (s *SolanaIndexer) ProcessRetryQueue(ctx context.Context) error { logger.Error("failed to retry token_indexer", zap.Error(err)) offset++ } else { - err = deleteFromRetryQueue(ctx, s.pool, item.ID) + err = common.DeleteFromRetryQueue(ctx, s.pool, item.ID) if err != nil { logger.Error("failed to delete from retry queue", zap.Error(err)) } @@ -199,7 +176,7 @@ func (s *SolanaIndexer) ProcessRetryQueue(ctx context.Context) error { logger.Error("failed to retry damm_v2_indexer", zap.Error(err)) offset++ } else { - err = deleteFromRetryQueue(ctx, s.pool, item.ID) + err = common.DeleteFromRetryQueue(ctx, s.pool, item.ID) if err != nil { logger.Error("failed to delete from retry queue", zap.Error(err)) } diff --git a/solana/indexer/balance_changes.go b/solana/indexer/token/balance_changes.go similarity index 99% rename from solana/indexer/balance_changes.go rename to solana/indexer/token/balance_changes.go index c6c9f88b..08e3b30a 100644 --- a/solana/indexer/balance_changes.go +++ b/solana/indexer/token/balance_changes.go @@ -1,4 +1,4 @@ -package indexer +package token import ( "context" diff --git a/solana/indexer/db_insert_test.go b/solana/indexer/token/balance_changes_test.go similarity index 77% rename from solana/indexer/db_insert_test.go rename to solana/indexer/token/balance_changes_test.go index f381c366..af0aafd8 100644 --- a/solana/indexer/db_insert_test.go +++ b/solana/indexer/token/balance_changes_test.go @@ -1,17 +1,16 @@ -package indexer +package token import ( "testing" "time" "api.audius.co/database" - "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" + "github.com/jackc/pgx/v5" "github.com/test-go/testify/assert" ) -// Ensures the database matches the expected schema for the inserts -func TestInserts(t *testing.T) { - pool := database.CreateTestDatabase(t, "test_solana_indexer") +func TestInsertBalanceChange(t *testing.T) { + pool := database.CreateTestDatabase(t, "test_solana_indexer_token") defer pool.Close() err := insertBalanceChange(t.Context(), pool, balanceChangeRow{ @@ -29,89 +28,10 @@ func TestInserts(t *testing.T) { }) assert.NoError(t, err, "failed to insert balance change") - err = insertClaimableAccount(t.Context(), pool, claimableAccountsRow{ - signature: "signature2", - instructionIndex: 0, - slot: 12345, - mint: "mint2", - ethereumAddress: "0x1234567890abcdef1234567890abcdef", - account: "account2", - }) - assert.NoError(t, err, "failed to insert claimable account") - - err = insertClaimableAccountTransfer(t.Context(), pool, claimableAccountTransfersRow{ - signature: "signature3", - instructionIndex: 0, - amount: 1000, - slot: 12345, - fromAccount: "fromAccount2", - toAccount: "toAccount2", - senderEthAddress: "0xabcdef1234567890abcdef1234567890", - }) - assert.NoError(t, err, "failed to insert claimable account transfer") - - err = insertPayment(t.Context(), pool, paymentRow{ - signature: "signature4", - instructionIndex: 0, - amount: 5000, - slot: 12345, - routeIndex: 0, - toAccount: "toAccount3", - }) - assert.NoError(t, err, "failed to insert payment router transaction") - - err = insertPurchase(t.Context(), pool, purchaseRow{ - signature: "signature5", - instructionIndex: 0, - amount: 10000, - slot: 12345, - fromAccount: "fromAccount3", - parsedPurchaseMemo: parsedPurchaseMemo{ - ContentId: 123, - ContentType: "track", - ValidAfterBlocknumber: 12345678, - BuyerUserId: 1, - AccessType: "stream", - }, - parsedLocationMemo: parsedLocationMemo{ - City: "San Francisco", - Country: "USA", - Region: "California", - }, - isValid: nil, - }) - assert.NoError(t, err, "failed to insert purchase") - - err = insertRewardDisbursement(t.Context(), pool, rewardDisbursementsRow{ - signature: "signature6", - instructionIndex: 0, - amount: 2000, - slot: 12345, - userBank: "userBank1", - challengeId: "challenge1", - specifier: "specifier1", - }) - assert.NoError(t, err, "failed to insert reward disbursement") - - req := proto.SubscribeRequest{} - id, err := insertCheckpointStart(t.Context(), pool, "backfill", 100, &req) - assert.NoError(t, err, "failed to insert checkpoint start") - assert.NotEmpty(t, id, "checkpoint ID should not be empty") - - err = updateCheckpoint(t.Context(), pool, id, 201) - assert.NoError(t, err, "failed to update checkpoint") - - slot, err := getCheckpointSlot(t.Context(), pool, "backfill", &req) - assert.NoError(t, err, "failed to get checkpoint slot") - assert.Equal(t, uint64(201), slot, "checkpoint slot should match updated value") - - id2, err := insertBackfillCheckpoint(t.Context(), pool, 100, 200, "foo") - assert.NoError(t, err, "failed to insert backfill checkpoint") - assert.NotEmpty(t, id2, "backfill checkpoint ID should not be empty") } func TestInsertBalanceChangeTriggers(t *testing.T) { - pool := database.CreateTestDatabase(t, "test_solana_indexer") + pool := database.CreateTestDatabase(t, "test_solana_indexer_token") defer pool.Close() database.Seed(pool, database.FixtureMap{ @@ -419,13 +339,20 @@ func TestInsertBalanceChangeTriggers(t *testing.T) { assert.Equal(t, int64(0), userBalance, "user balance should not be updated yet for claimable account 2") // Now insert the claimable account and verify the user balance is updated - err = insertClaimableAccount(t.Context(), pool, claimableAccountsRow{ - signature: "signature7", - instructionIndex: 0, - slot: 10008, - mint: "mint2", - ethereumAddress: "0x1234567890abcdef1234567890abcdef", - account: "claimable-account-2", + sql := ` + INSERT INTO sol_claimable_accounts + (signature, instruction_index, slot, mint, ethereum_address, account) + VALUES + (@signature, @instructionIndex, @slot, @mint, @ethereumAddress, @account) + ON CONFLICT DO NOTHING + ;` + _, err = pool.Exec(t.Context(), sql, pgx.NamedArgs{ + "signature": "signature7", + "instructionIndex": 0, + "slot": 10008, + "mint": "mint2", + "ethereumAddress": "0x1234567890abcdef1234567890abcdef", + "account": "claimable-account-2", }) assert.NoError(t, err, "failed to insert claimable account for claimable account 2") err = pool.QueryRow(t.Context(), diff --git a/solana/indexer/token_indexer.go b/solana/indexer/token/indexer.go similarity index 81% rename from solana/indexer/token_indexer.go rename to solana/indexer/token/indexer.go index 9caf34ed..39a09ff8 100644 --- a/solana/indexer/token_indexer.go +++ b/solana/indexer/token/indexer.go @@ -1,10 +1,11 @@ -package indexer +package token import ( "context" "fmt" "api.audius.co/database" + "api.audius.co/solana/indexer/common" "github.com/gagliardetto/solana-go" "github.com/gagliardetto/solana-go/rpc" "github.com/jackc/pgx/v5" @@ -14,10 +15,10 @@ import ( "go.uber.org/zap" ) -type TokenIndexer struct { +type Indexer struct { pool database.DbPool - grpcConfig GrpcConfig - rpcClient RpcClient + grpcConfig common.GrpcConfig + rpcClient common.RpcClient logger *zap.Logger @@ -31,7 +32,23 @@ const MAX_ARTIST_COIN_MINTS_PER_SUBSCRIPTION = 10000 const WORKER_CHANNEL_SIZE = 3000 const WORKER_COUNT = 50 -func (t *TokenIndexer) Start(ctx context.Context) { +func New( + config common.GrpcConfig, + rpcClient common.RpcClient, + pool database.DbPool, + transactionCache *otter.Cache[solana.Signature, *rpc.GetTransactionResult], + logger *zap.Logger, +) *Indexer { + return &Indexer{ + pool: pool, + grpcConfig: config, + rpcClient: rpcClient, + transactionCache: transactionCache, + logger: logger.Named("TokenIndexer"), + } +} + +func (t *Indexer) Start(ctx context.Context) { // To ensure only one subscription task is running at a time, keep track of // the last cancel function and call it on the next notification. var lastCancel context.CancelFunc @@ -46,7 +63,7 @@ func (t *TokenIndexer) Start(ctx context.Context) { t.logger.Error("failed to handle token update", zap.Int("workerID", workerID), zap.Error(err)) // Add messages that failed to process to the retry queue - if err := addToRetryQueue(ctx, t.pool, TOKEN_INDEXER_NAME, updateMessage, err.Error()); err != nil { + if err := common.AddToRetryQueue(ctx, t.pool, TOKEN_INDEXER_NAME, updateMessage, err.Error()); err != nil { t.logger.Error("failed to add to retry queue", zap.Error(err)) } } @@ -55,7 +72,7 @@ func (t *TokenIndexer) Start(ctx context.Context) { } // Ensure all gRPC clients are closed on shutdown and that the workers are closed - var grpcClients []GrpcClient + var grpcClients []common.GrpcClient defer (func() { for _, client := range grpcClients { client.Close() @@ -93,6 +110,7 @@ func (t *TokenIndexer) Start(ctx context.Context) { grpcClients = clients if err != nil { t.logger.Error("failed to resubscribe to artist coins", zap.Error(err)) + cancel() return } @@ -108,7 +126,7 @@ func (t *TokenIndexer) Start(ctx context.Context) { grpcClients = clients // Watch for new coins to be added - err = watchPgNotification(ctx, t.pool, ARTIST_COIN_NOTIFICATION_NAME, handleNotif, t.logger) + err = common.WatchPgNotification(ctx, t.pool, ARTIST_COIN_NOTIFICATION_NAME, handleNotif, t.logger) if err != nil { t.logger.Error("failed to watch for artist coin changes", zap.Error(err)) return @@ -126,7 +144,7 @@ func (t *TokenIndexer) Start(ctx context.Context) { } // Handles a single update message from the gRPC subscription -func (t *TokenIndexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) error { +func (t *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) error { // Handle slot updates slotUpdate := msg.GetSlot() if slotUpdate != nil { @@ -135,7 +153,7 @@ func (t *TokenIndexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate // Use the filter as the checkpoint ID checkpointId := msg.Filters[0] - err := updateCheckpoint(ctx, t.pool, checkpointId, slotUpdate.Slot) + err := common.UpdateCheckpoint(ctx, t.pool, checkpointId, slotUpdate.Slot) if err != nil { t.logger.Error("failed to update slot checkpoint", zap.Error(err)) } @@ -148,7 +166,7 @@ func (t *TokenIndexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate txSig := solana.SignatureFromBytes(accUpdate.Account.TxnSignature) // Fetch the transaction details - txRes, err := fetchTransactionWithCache(ctx, t.transactionCache, t.rpcClient, txSig) + txRes, err := common.FetchTransactionWithCache(ctx, t.transactionCache, t.rpcClient, txSig) if err != nil { return fmt.Errorf("failed to fetch transaction: %w", err) } @@ -160,7 +178,7 @@ func (t *TokenIndexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate } // Add the lookup table accounts to the message accounts - tx = resolveLookupTables(ctx, t.rpcClient, tx, txRes.Meta) + tx = common.ResolveLookupTables(ctx, t.rpcClient, tx, txRes.Meta) // Extract the mints we're tracking using the subscription's filters trackedMints := msg.Filters @@ -173,11 +191,11 @@ func (t *TokenIndexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate return nil } -func (t *TokenIndexer) subscribeToArtistCoins(ctx context.Context, handleUpdate func(ctx context.Context, message *pb.SubscribeUpdate)) ([]GrpcClient, error) { +func (t *Indexer) subscribeToArtistCoins(ctx context.Context, handleUpdate func(ctx context.Context, message *pb.SubscribeUpdate)) ([]common.GrpcClient, error) { done := false page := 0 pageSize := MAX_ARTIST_COIN_MINTS_PER_SUBSCRIPTION - grpcClients := make([]GrpcClient, 0) + grpcClients := make([]common.GrpcClient, 0) total := 0 for !done { mints, err := getArtistCoins(ctx, t.pool, pageSize, page*pageSize) @@ -195,7 +213,7 @@ func (t *TokenIndexer) subscribeToArtistCoins(ctx context.Context, handleUpdate return nil, fmt.Errorf("failed to make mint subscription request: %w", err) } - grpcClient := NewGrpcClient(t.grpcConfig) + grpcClient := common.NewGrpcClient(t.grpcConfig) err = grpcClient.Subscribe(ctx, subscription, handleUpdate, func(err error) { t.logger.Error("error in token subscription", zap.Error(err)) }) @@ -213,7 +231,7 @@ func (t *TokenIndexer) subscribeToArtistCoins(ctx context.Context, handleUpdate return grpcClients, nil } -func (t *TokenIndexer) makeMintSubscriptionRequest(ctx context.Context, mintAddresses []string) (*pb.SubscribeRequest, error) { +func (t *Indexer) makeMintSubscriptionRequest(ctx context.Context, mintAddresses []string) (*pb.SubscribeRequest, error) { commitment := pb.CommitmentLevel_CONFIRMED subscription := &pb.SubscribeRequest{ Commitment: &commitment, @@ -246,7 +264,7 @@ func (t *TokenIndexer) makeMintSubscriptionRequest(ctx context.Context, mintAddr } // Ensure this subscription has a checkpoint - checkpointId, fromSlot, err := ensureCheckpoint(ctx, TOKEN_INDEXER_NAME, t.pool, t.rpcClient, subscription, t.logger) + checkpointId, fromSlot, err := common.EnsureCheckpoint(ctx, TOKEN_INDEXER_NAME, t.pool, t.rpcClient, subscription, t.logger) if err != nil { return nil, fmt.Errorf("failed to set from slot: %w", err) } diff --git a/solana/indexer/unprocessed_transactions_test.go b/solana/indexer/unprocessed_transactions_test.go index db4f4eef..b6c41df4 100644 --- a/solana/indexer/unprocessed_transactions_test.go +++ b/solana/indexer/unprocessed_transactions_test.go @@ -1,85 +1,85 @@ package indexer -import ( - "errors" - "strconv" - "testing" - - "api.audius.co/database" - "github.com/gagliardetto/solana-go" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "github.com/test-go/testify/assert" - "go.uber.org/zap" -) - -func TestUnprocessedTransactions(t *testing.T) { - ctx := t.Context() - pool := database.CreateTestDatabase(t, "test_solana_indexer") - defer pool.Close() - - // Insert a test unprocessed transaction - signature := "test_signature" - errorMessage := "test error message" - err := insertUnprocessedTransaction(ctx, pool, signature, 0, errorMessage) - require.NoError(t, err) - - // Verify the transaction was inserted - res, err := getUnprocessedTransactions(ctx, pool, 10, 0) - require.NoError(t, err) - assert.Len(t, res, 1) - assert.Equal(t, signature, res[0].Signature) - - // Delete the unprocessed transaction - err = deleteUnprocessedTransaction(ctx, pool, signature) - require.NoError(t, err) - - // Verify the transaction was deleted - res, err = getUnprocessedTransactions(ctx, pool, 10, 0) - require.NoError(t, err) - assert.Len(t, res, 0) -} - -func TestRetryUnprocessedTransactions(t *testing.T) { - ctx := t.Context() - pool := database.CreateTestDatabase(t, "test_solana_indexer") - defer pool.Close() - - unprocessedTransactionsCount := 543 - processor := &mockProcessor{} - - var failingSigBytes [64]byte - copy(failingSigBytes[:], []byte("test_signature_73")) - failingSig := solana.SignatureFromBytes(failingSigBytes[:]) - - // Mock the processor to fail on a specific signature - processor.On("ProcessSignature", ctx, mock.Anything, failingSig, mock.Anything). - Return(errors.New("fake failure")).Times(1) - - // Everything else should succeed - processor.On("ProcessSignature", ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(nil).Times(unprocessedTransactionsCount - 1) - - s := &SolanaIndexer{ - processor: processor, - pool: pool, - logger: zap.NewNop(), - } - - for i := range unprocessedTransactionsCount { - var sigBytes [64]byte - copy(sigBytes[:], []byte("test_signature_"+strconv.FormatInt(int64(i), 10))) - signature := solana.SignatureFromBytes(sigBytes[:]) - insertUnprocessedTransaction(ctx, pool, signature.String(), 0, "test error message") - } - - err := s.RetryUnprocessedTransactions(ctx) - require.NoError(t, err) - processor.AssertNumberOfCalls(t, "ProcessSignature", unprocessedTransactionsCount) - - // Verify all transactions but #73 were processed - unprocessedTxs, err := getUnprocessedTransactions(ctx, pool, 100, 0) - require.NoError(t, err) - assert.Len(t, unprocessedTxs, 1, "expected a single unprocessed transaction after retry") - assert.Equal(t, failingSig.String(), unprocessedTxs[0].Signature, "expected the failing transaction to remain unprocessed") -} +// import ( +// "errors" +// "strconv" +// "testing" + +// "api.audius.co/database" +// "github.com/gagliardetto/solana-go" +// "github.com/stretchr/testify/mock" +// "github.com/stretchr/testify/require" +// "github.com/test-go/testify/assert" +// "go.uber.org/zap" +// ) + +// func TestUnprocessedTransactions(t *testing.T) { +// ctx := t.Context() +// pool := database.CreateTestDatabase(t, "test_solana_indexer") +// defer pool.Close() + +// // Insert a test unprocessed transaction +// signature := "test_signature" +// errorMessage := "test error message" +// err := insertUnprocessedTransaction(ctx, pool, signature, 0, errorMessage) +// require.NoError(t, err) + +// // Verify the transaction was inserted +// res, err := getUnprocessedTransactions(ctx, pool, 10, 0) +// require.NoError(t, err) +// assert.Len(t, res, 1) +// assert.Equal(t, signature, res[0].Signature) + +// // Delete the unprocessed transaction +// err = deleteUnprocessedTransaction(ctx, pool, signature) +// require.NoError(t, err) + +// // Verify the transaction was deleted +// res, err = getUnprocessedTransactions(ctx, pool, 10, 0) +// require.NoError(t, err) +// assert.Len(t, res, 0) +// } + +// func TestRetryUnprocessedTransactions(t *testing.T) { +// ctx := t.Context() +// pool := database.CreateTestDatabase(t, "test_solana_indexer") +// defer pool.Close() + +// unprocessedTransactionsCount := 543 +// processor := &mockProcessor{} + +// var failingSigBytes [64]byte +// copy(failingSigBytes[:], []byte("test_signature_73")) +// failingSig := solana.SignatureFromBytes(failingSigBytes[:]) + +// // Mock the processor to fail on a specific signature +// processor.On("ProcessSignature", ctx, mock.Anything, failingSig, mock.Anything). +// Return(errors.New("fake failure")).Times(1) + +// // Everything else should succeed +// processor.On("ProcessSignature", ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything). +// Return(nil).Times(unprocessedTransactionsCount - 1) + +// s := &SolanaIndexer{ +// processor: processor, +// pool: pool, +// logger: zap.NewNop(), +// } + +// for i := range unprocessedTransactionsCount { +// var sigBytes [64]byte +// copy(sigBytes[:], []byte("test_signature_"+strconv.FormatInt(int64(i), 10))) +// signature := solana.SignatureFromBytes(sigBytes[:]) +// insertUnprocessedTransaction(ctx, pool, signature.String(), 0, "test error message") +// } + +// err := s.RetryUnprocessedTransactions(ctx) +// require.NoError(t, err) +// processor.AssertNumberOfCalls(t, "ProcessSignature", unprocessedTransactionsCount) + +// // Verify all transactions but #73 were processed +// unprocessedTxs, err := getUnprocessedTransactions(ctx, pool, 100, 0) +// require.NoError(t, err) +// assert.Len(t, unprocessedTxs, 1, "expected a single unprocessed transaction after retry") +// assert.Equal(t, failingSig.String(), unprocessedTxs[0].Signature, "expected the failing transaction to remain unprocessed") +// } diff --git a/sql/01_schema.sql b/sql/01_schema.sql index d0f254ed..8b1e526e 100644 --- a/sql/01_schema.sql +++ b/sql/01_schema.sql @@ -3,8 +3,8 @@ -- --- Dumped from database version 17.6 (Debian 17.6-2.pgdg13+1) --- Dumped by pg_dump version 17.6 (Debian 17.6-2.pgdg13+1) +-- Dumped from database version 17.6 (Debian 17.6-1.pgdg13+1) +-- Dumped by pg_dump version 17.6 (Debian 17.6-1.pgdg13+1) SET statement_timeout = 0; SET lock_timeout = 0; @@ -978,6 +978,61 @@ END $$; +-- +-- Name: calculate_artist_coin_fees(text); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION public.calculate_artist_coin_fees(artist_coin_mint text) RETURNS TABLE(unclaimed_dbc_fees numeric, total_dbc_fees numeric, unclaimed_damm_v2_fees numeric, total_damm_v2_fees numeric, unclaimed_fees numeric, total_fees numeric) + LANGUAGE sql + AS $$ + WITH + damm_fees AS ( + -- fee = totalLiquidity * feePerTokenStore + -- precision: (totalLiquidity * feePerTokenStore) >> 128 + -- See: https://github.com/MeteoraAg/damm-v2-sdk/blob/70d1af59689039a1dc700dee8f741db48024d02d/src/helpers/utils.ts#L190-L191 + SELECT + pool.token_a_mint AS mint, + ( + pool.fee_b_per_liquidity + * ( + position.unlocked_liquidity + position.vested_liquidity + position.permanent_locked_liquidity + ) + / POWER (2, 128) + + position.fee_b_pending + ) AS total_damm_v2_fees, + ( + (pool.fee_b_per_liquidity - position.fee_b_per_token_checkpoint) + * ( + position.unlocked_liquidity + position.vested_liquidity + position.permanent_locked_liquidity + ) + / POWER (2, 128) + + position.fee_b_pending + ) AS unclaimed_damm_v2_fees + FROM sol_meteora_damm_v2_pools pool + JOIN sol_meteora_dbc_migrations migration ON migration.base_mint = pool.token_a_mint + JOIN sol_meteora_damm_v2_positions position ON position.address = migration.first_position + WHERE pool.token_a_mint = artist_coin_mint + ), + dbc_fees AS ( + SELECT + base_mint AS mint, + total_trading_quote_fee / 2 AS total_dbc_fees, + creator_quote_fee / 2 AS unclaimed_dbc_fees + FROM artist_coin_pools + WHERE base_mint = artist_coin_mint + ) + SELECT + FLOOR(COALESCE(dbc_fees.unclaimed_dbc_fees, 0)) AS unclaimed_dbc_fees, + FLOOR(COALESCE(dbc_fees.total_dbc_fees, 0)) AS total_dbc_fees, + FLOOR(COALESCE(damm_fees.unclaimed_damm_v2_fees, 0)) AS unclaimed_damm_v2_fees, + FLOOR(COALESCE(damm_fees.total_damm_v2_fees, 0)) AS total_damm_v2_fees, + FLOOR(COALESCE(dbc_fees.unclaimed_dbc_fees, 0) + COALESCE(damm_fees.unclaimed_damm_v2_fees, 0)) AS unclaimed_fees, + FLOOR(COALESCE(dbc_fees.total_dbc_fees, 0) + COALESCE(damm_fees.total_damm_v2_fees, 0)) AS total_fees + FROM dbc_fees + FULL OUTER JOIN damm_fees USING (mint); +$$; + + -- -- Name: chat_allowed(integer, integer); Type: FUNCTION; Schema: public; Owner: - -- @@ -2358,6 +2413,24 @@ end; $$; +-- +-- Name: handle_meteora_dbc_migrations(); Type: FUNCTION; Schema: public; Owner: - +-- + +CREATE FUNCTION public.handle_meteora_dbc_migrations() RETURNS trigger + LANGUAGE plpgsql + AS $$ +BEGIN + PERFORM pg_notify('meteora_dbc_migration', json_build_object('operation', TG_OP)::text); + RETURN NEW; + EXCEPTION + WHEN OTHERS THEN + RAISE WARNING 'An error occurred in %: %', TG_NAME, SQLERRM; + RETURN NULL; +END; +$$; + + -- -- Name: handle_on_user_challenge(); Type: FUNCTION; Schema: public; Owner: - -- @@ -7092,6 +7165,239 @@ CREATE TABLE public.sol_claimable_accounts ( COMMENT ON TABLE public.sol_claimable_accounts IS 'Stores claimable tokens program Create instructions for tracked mints.'; +-- +-- Name: sol_keypairs; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.sol_keypairs ( + public_key character varying NOT NULL, + private_key bytea NOT NULL +); + + +-- +-- Name: sol_meteora_damm_v2_pool_base_fees; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.sol_meteora_damm_v2_pool_base_fees ( + pool text NOT NULL, + cliff_fee_numerator bigint NOT NULL, + fee_scheduler_mode smallint NOT NULL, + number_of_period smallint NOT NULL, + period_frequency bigint NOT NULL, + reduction_factor bigint NOT NULL, + created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL +); + + +-- +-- Name: TABLE sol_meteora_damm_v2_pool_base_fees; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON TABLE public.sol_meteora_damm_v2_pool_base_fees IS 'Tracks base fee configuration for DAMM V2 pools. A slice of the DAMM V2 pool state.'; + + +-- +-- Name: sol_meteora_damm_v2_pool_dynamic_fees; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.sol_meteora_damm_v2_pool_dynamic_fees ( + pool text NOT NULL, + initialized smallint NOT NULL, + max_volatility_accumulator integer NOT NULL, + variable_fee_control integer NOT NULL, + bin_step smallint NOT NULL, + filter_period smallint NOT NULL, + decay_period smallint NOT NULL, + reduction_factor smallint NOT NULL, + last_update_timestamp bigint NOT NULL, + bin_step_u128 numeric NOT NULL, + sqrt_price_reference numeric NOT NULL, + volatility_accumulator numeric NOT NULL, + volatility_reference numeric NOT NULL, + created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL +); + + +-- +-- Name: TABLE sol_meteora_damm_v2_pool_dynamic_fees; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON TABLE public.sol_meteora_damm_v2_pool_dynamic_fees IS 'Tracks dynamic fee configuration for DAMM V2 pools. A slice of the DAMM V2 pool state.'; + + +-- +-- Name: sol_meteora_damm_v2_pool_fees; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.sol_meteora_damm_v2_pool_fees ( + pool text NOT NULL, + protocol_fee_percent smallint NOT NULL, + partner_fee_percent smallint NOT NULL, + referral_fee_percent smallint NOT NULL, + created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL +); + + +-- +-- Name: TABLE sol_meteora_damm_v2_pool_fees; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON TABLE public.sol_meteora_damm_v2_pool_fees IS 'Tracks fee configuration for DAMM V2 pools. A slice of the DAMM V2 pool state.'; + + +-- +-- Name: sol_meteora_damm_v2_pool_metrics; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.sol_meteora_damm_v2_pool_metrics ( + pool text NOT NULL, + total_lp_a_fee numeric NOT NULL, + total_lp_b_fee numeric NOT NULL, + total_protocol_a_fee numeric NOT NULL, + total_protocol_b_fee numeric NOT NULL, + total_partner_a_fee numeric NOT NULL, + total_partner_b_fee numeric NOT NULL, + total_position bigint NOT NULL, + created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL +); + + +-- +-- Name: TABLE sol_meteora_damm_v2_pool_metrics; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON TABLE public.sol_meteora_damm_v2_pool_metrics IS 'Tracks aggregated metrics for DAMM V2 pools. A slice of the DAMM V2 pool state.'; + + +-- +-- Name: sol_meteora_damm_v2_pools; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.sol_meteora_damm_v2_pools ( + address text NOT NULL, + token_a_mint text NOT NULL, + token_b_mint text NOT NULL, + token_a_vault text NOT NULL, + token_b_vault text NOT NULL, + whitelisted_vault text NOT NULL, + partner text NOT NULL, + liquidity numeric NOT NULL, + protocol_a_fee bigint NOT NULL, + protocol_b_fee bigint NOT NULL, + partner_a_fee bigint NOT NULL, + partner_b_fee bigint NOT NULL, + sqrt_min_price numeric NOT NULL, + sqrt_max_price numeric NOT NULL, + sqrt_price numeric NOT NULL, + activation_point bigint NOT NULL, + activation_type smallint NOT NULL, + pool_status smallint NOT NULL, + token_a_flag smallint NOT NULL, + token_b_flag smallint NOT NULL, + collect_fee_mode smallint NOT NULL, + pool_type smallint NOT NULL, + fee_a_per_liquidity bigint NOT NULL, + fee_b_per_liquidity bigint NOT NULL, + permanent_lock_liquidity numeric NOT NULL, + creator text NOT NULL, + created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL +); + + +-- +-- Name: TABLE sol_meteora_damm_v2_pools; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON TABLE public.sol_meteora_damm_v2_pools IS 'Tracks DAMM V2 pool state. Join with sol_meteora_damm_v2_pool_metrics, sol_meteora_damm_v2_pool_fees, sol_meteora_damm_v2_pool_base_fees, and sol_meteora_damm_v2_pool_dynamic_fees for full pool state.'; + + +-- +-- Name: sol_meteora_damm_v2_position_metrics; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.sol_meteora_damm_v2_position_metrics ( + "position" text NOT NULL, + total_claimed_a_fee bigint NOT NULL, + total_claimed_b_fee bigint NOT NULL, + created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL +); + + +-- +-- Name: TABLE sol_meteora_damm_v2_position_metrics; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON TABLE public.sol_meteora_damm_v2_position_metrics IS 'Tracks aggregated metrics for DAMM V2 positions. A slice of the DAMM V2 position state.'; + + +-- +-- Name: sol_meteora_damm_v2_positions; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.sol_meteora_damm_v2_positions ( + address text NOT NULL, + pool text NOT NULL, + nft_mint text NOT NULL, + fee_a_per_token_checkpoint bigint NOT NULL, + fee_b_per_token_checkpoint bigint NOT NULL, + fee_a_pending bigint NOT NULL, + fee_b_pending bigint NOT NULL, + unlocked_liquidity numeric NOT NULL, + vested_liquidity numeric NOT NULL, + permanent_locked_liquidity numeric NOT NULL, + created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL +); + + +-- +-- Name: TABLE sol_meteora_damm_v2_positions; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON TABLE public.sol_meteora_damm_v2_positions IS 'Tracks DAMM V2 positions representing a claim to the liquidity and associated fees in a DAMM V2 pool. Join with sol_meteora_damm_v2_position_metrics for full position state.'; + + +-- +-- Name: sol_meteora_dbc_migrations; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.sol_meteora_dbc_migrations ( + signature text NOT NULL, + instruction_index integer NOT NULL, + slot bigint NOT NULL, + dbc_pool text NOT NULL, + migration_metadata text NOT NULL, + config text NOT NULL, + dbc_pool_authority text NOT NULL, + damm_v2_pool text NOT NULL, + first_position_nft_mint text NOT NULL, + first_position_nft_account text NOT NULL, + first_position text NOT NULL, + second_position_nft_mint text NOT NULL, + second_position_nft_account text NOT NULL, + second_position text NOT NULL, + damm_pool_authority text NOT NULL, + base_mint text NOT NULL, + quote_mint text NOT NULL, + created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL +); + + +-- +-- Name: TABLE sol_meteora_dbc_migrations; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON TABLE public.sol_meteora_dbc_migrations IS 'Tracks migrations from DBC pools to DAMM V2 pools.'; + + -- -- Name: sol_payments; Type: TABLE; Schema: public; Owner: - -- @@ -7156,6 +7462,62 @@ COMMENT ON COLUMN public.sol_purchases.valid_after_blocknumber IS 'Purchase tran COMMENT ON COLUMN public.sol_purchases.is_valid IS 'A purchase is valid if it meets the pricing information set by the artist. If the pricing information is not available yet (as indicated by the valid_after_blocknumber), then is_valid will be NULL which indicates a "pending" state.'; +-- +-- Name: sol_retry_queue; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.sol_retry_queue ( + id uuid DEFAULT gen_random_uuid() NOT NULL, + indexer text NOT NULL, + update jsonb NOT NULL, + error text NOT NULL, + created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL +); + + +-- +-- Name: TABLE sol_retry_queue; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON TABLE public.sol_retry_queue IS 'Queue for retrying failed indexer updates.'; + + +-- +-- Name: COLUMN sol_retry_queue.indexer; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON COLUMN public.sol_retry_queue.indexer IS 'The name of the indexer that failed (e.g., token_indexer, damm_v2_indexer).'; + + +-- +-- Name: COLUMN sol_retry_queue.update; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON COLUMN public.sol_retry_queue.update IS 'The JSONB update data that failed to process.'; + + +-- +-- Name: COLUMN sol_retry_queue.error; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON COLUMN public.sol_retry_queue.error IS 'The error message from the failure.'; + + +-- +-- Name: COLUMN sol_retry_queue.created_at; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON COLUMN public.sol_retry_queue.created_at IS 'The timestamp when the retry entry was created.'; + + +-- +-- Name: COLUMN sol_retry_queue.updated_at; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON COLUMN public.sol_retry_queue.updated_at IS 'The timestamp when the retry entry was last updated.'; + + -- -- Name: sol_reward_disbursements; Type: TABLE; Schema: public; Owner: - -- @@ -7189,7 +7551,8 @@ CREATE TABLE public.sol_slot_checkpoints ( subscription_hash text NOT NULL, subscription jsonb NOT NULL, updated_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL + created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + name text ); @@ -7200,6 +7563,13 @@ CREATE TABLE public.sol_slot_checkpoints ( COMMENT ON TABLE public.sol_slot_checkpoints IS 'Stores checkpoints for Solana slots to track indexing progress.'; +-- +-- Name: COLUMN sol_slot_checkpoints.name; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON COLUMN public.sol_slot_checkpoints.name IS 'The name of the indexer this checkpoint is for (e.g., token_indexer, damm_v2_indexer).'; + + -- -- Name: sol_swaps; Type: TABLE; Schema: public; Owner: - -- @@ -8774,6 +9144,78 @@ ALTER TABLE ONLY public.sol_claimable_accounts ADD CONSTRAINT sol_claimable_accounts_pkey PRIMARY KEY (signature, instruction_index); +-- +-- Name: sol_keypairs sol_keypairs_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.sol_keypairs + ADD CONSTRAINT sol_keypairs_pkey PRIMARY KEY (public_key); + + +-- +-- Name: sol_meteora_damm_v2_pool_base_fees sol_meteora_damm_v2_pool_base_fees_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.sol_meteora_damm_v2_pool_base_fees + ADD CONSTRAINT sol_meteora_damm_v2_pool_base_fees_pkey PRIMARY KEY (pool); + + +-- +-- Name: sol_meteora_damm_v2_pool_dynamic_fees sol_meteora_damm_v2_pool_dynamic_fees_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.sol_meteora_damm_v2_pool_dynamic_fees + ADD CONSTRAINT sol_meteora_damm_v2_pool_dynamic_fees_pkey PRIMARY KEY (pool); + + +-- +-- Name: sol_meteora_damm_v2_pool_fees sol_meteora_damm_v2_pool_fees_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.sol_meteora_damm_v2_pool_fees + ADD CONSTRAINT sol_meteora_damm_v2_pool_fees_pkey PRIMARY KEY (pool); + + +-- +-- Name: sol_meteora_damm_v2_pool_metrics sol_meteora_damm_v2_pool_metrics_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.sol_meteora_damm_v2_pool_metrics + ADD CONSTRAINT sol_meteora_damm_v2_pool_metrics_pkey PRIMARY KEY (pool); + + +-- +-- Name: sol_meteora_damm_v2_pools sol_meteora_damm_v2_pools_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.sol_meteora_damm_v2_pools + ADD CONSTRAINT sol_meteora_damm_v2_pools_pkey PRIMARY KEY (address); + + +-- +-- Name: sol_meteora_damm_v2_position_metrics sol_meteora_damm_v2_position_metrics_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.sol_meteora_damm_v2_position_metrics + ADD CONSTRAINT sol_meteora_damm_v2_position_metrics_pkey PRIMARY KEY ("position"); + + +-- +-- Name: sol_meteora_damm_v2_positions sol_meteora_damm_v2_positions_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.sol_meteora_damm_v2_positions + ADD CONSTRAINT sol_meteora_damm_v2_positions_pkey PRIMARY KEY (address); + + +-- +-- Name: sol_meteora_dbc_migrations sol_meteora_dbc_migrations_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.sol_meteora_dbc_migrations + ADD CONSTRAINT sol_meteora_dbc_migrations_pkey PRIMARY KEY (signature, instruction_index); + + -- -- Name: sol_payments sol_payments_pkey; Type: CONSTRAINT; Schema: public; Owner: - -- @@ -8790,6 +9232,14 @@ ALTER TABLE ONLY public.sol_purchases ADD CONSTRAINT sol_purchases_pkey PRIMARY KEY (signature, instruction_index); +-- +-- Name: sol_retry_queue sol_retry_queue_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.sol_retry_queue + ADD CONSTRAINT sol_retry_queue_pkey PRIMARY KEY (id); + + -- -- Name: sol_reward_disbursements sol_reward_disbursements_pkey; Type: CONSTRAINT; Schema: public; Owner: - -- @@ -9926,6 +10376,20 @@ CREATE INDEX sol_claimable_accounts_ethereum_address_idx ON public.sol_claimable COMMENT ON INDEX public.sol_claimable_accounts_ethereum_address_idx IS 'Used for getting account by user wallet and mint.'; +-- +-- Name: sol_meteora_dbc_migrations_base_mint_idx; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX sol_meteora_dbc_migrations_base_mint_idx ON public.sol_meteora_dbc_migrations USING btree (base_mint); + + +-- +-- Name: INDEX sol_meteora_dbc_migrations_base_mint_idx; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON INDEX public.sol_meteora_dbc_migrations_base_mint_idx IS 'Used for finding artist positions by base_mint.'; + + -- -- Name: sol_payments_to_account; Type: INDEX; Schema: public; Owner: - -- @@ -10367,6 +10831,20 @@ CREATE TRIGGER on_event AFTER INSERT ON public.events FOR EACH ROW EXECUTE FUNCT CREATE TRIGGER on_follow AFTER INSERT ON public.follows FOR EACH ROW EXECUTE FUNCTION public.handle_follow(); +-- +-- Name: sol_meteora_dbc_migrations on_meteora_dbc_migrations; Type: TRIGGER; Schema: public; Owner: - +-- + +CREATE TRIGGER on_meteora_dbc_migrations AFTER INSERT OR DELETE ON public.sol_meteora_dbc_migrations FOR EACH ROW EXECUTE FUNCTION public.handle_meteora_dbc_migrations(); + + +-- +-- Name: TRIGGER on_meteora_dbc_migrations ON sol_meteora_dbc_migrations; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON TRIGGER on_meteora_dbc_migrations ON public.sol_meteora_dbc_migrations IS 'Notifies when a DBC pool migrates to a DAMM V2 pool.'; + + -- -- Name: plays on_play; Type: TRIGGER; Schema: public; Owner: - -- @@ -10521,6 +10999,13 @@ CREATE TRIGGER trg_aggregate_plays AFTER INSERT OR UPDATE ON public.aggregate_pl CREATE TRIGGER trg_aggregate_user AFTER INSERT OR UPDATE ON public.aggregate_user FOR EACH ROW EXECUTE FUNCTION public.on_new_row(); +-- +-- Name: artist_coins trg_artist_coins; Type: TRIGGER; Schema: public; Owner: - +-- + +CREATE TRIGGER trg_artist_coins AFTER INSERT OR UPDATE ON public.artist_coins FOR EACH ROW EXECUTE FUNCTION public.on_new_row(); + + -- -- Name: follows trg_follows; Type: TRIGGER; Schema: public; Owner: - -- @@ -10784,6 +11269,54 @@ ALTER TABLE ONLY public.saves ADD CONSTRAINT saves_blocknumber_fkey FOREIGN KEY (blocknumber) REFERENCES public.blocks(number) ON DELETE CASCADE; +-- +-- Name: sol_meteora_damm_v2_pool_base_fees sol_meteora_damm_v2_pool_base_fees_pool_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.sol_meteora_damm_v2_pool_base_fees + ADD CONSTRAINT sol_meteora_damm_v2_pool_base_fees_pool_fkey FOREIGN KEY (pool) REFERENCES public.sol_meteora_damm_v2_pools(address) ON DELETE CASCADE; + + +-- +-- Name: sol_meteora_damm_v2_pool_dynamic_fees sol_meteora_damm_v2_pool_dynamic_fees_pool_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.sol_meteora_damm_v2_pool_dynamic_fees + ADD CONSTRAINT sol_meteora_damm_v2_pool_dynamic_fees_pool_fkey FOREIGN KEY (pool) REFERENCES public.sol_meteora_damm_v2_pools(address) ON DELETE CASCADE; + + +-- +-- Name: sol_meteora_damm_v2_pool_fees sol_meteora_damm_v2_pool_fees_pool_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.sol_meteora_damm_v2_pool_fees + ADD CONSTRAINT sol_meteora_damm_v2_pool_fees_pool_fkey FOREIGN KEY (pool) REFERENCES public.sol_meteora_damm_v2_pools(address) ON DELETE CASCADE; + + +-- +-- Name: sol_meteora_damm_v2_pool_metrics sol_meteora_damm_v2_pool_metrics_pool_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.sol_meteora_damm_v2_pool_metrics + ADD CONSTRAINT sol_meteora_damm_v2_pool_metrics_pool_fkey FOREIGN KEY (pool) REFERENCES public.sol_meteora_damm_v2_pools(address) ON DELETE CASCADE; + + +-- +-- Name: sol_meteora_damm_v2_position_metrics sol_meteora_damm_v2_position_metrics_position_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.sol_meteora_damm_v2_position_metrics + ADD CONSTRAINT sol_meteora_damm_v2_position_metrics_position_fkey FOREIGN KEY ("position") REFERENCES public.sol_meteora_damm_v2_positions(address) ON DELETE CASCADE; + + +-- +-- Name: sol_meteora_damm_v2_positions sol_meteora_damm_v2_positions_pool_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.sol_meteora_damm_v2_positions + ADD CONSTRAINT sol_meteora_damm_v2_positions_pool_fkey FOREIGN KEY (pool) REFERENCES public.sol_meteora_damm_v2_pools(address) ON DELETE CASCADE; + + -- -- Name: subscriptions subscriptions_blocknumber_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - -- diff --git a/sql/02_test_template.sql b/sql/02_test_template.sql index 8f919981..7c3eab8d 100644 --- a/sql/02_test_template.sql +++ b/sql/02_test_template.sql @@ -3,4 +3,7 @@ CREATE DATABASE test_comms TEMPLATE postgres; CREATE DATABASE test_database TEMPLATE postgres; CREATE DATABASE test_hll TEMPLATE postgres; CREATE DATABASE test_indexer TEMPLATE postgres; -CREATE DATABASE test_solana_indexer TEMPLATE postgres; +CREATE DATABASE test_solana_indexer_common TEMPLATE postgres; +CREATE DATABASE test_solana_indexer_token TEMPLATE postgres; +CREATE DATABASE test_solana_indexer_damm_v2 TEMPLATE postgres; +CREATE DATABASE test_solana_indexer_program TEMPLATE postgres; \ No newline at end of file From b56a4331c37a3fd9da046946af529b0b61bc5060 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Tue, 14 Oct 2025 16:00:50 -0700 Subject: [PATCH 08/56] program indexer, artist_coins columns, etc --- ddl/functions/handle_artist_coins.sql | 22 +- ddl/functions/handle_damm_v2_pool.sql | 21 - ddl/migrations/0171_artist_coins_pools.sql | 5 + .../{token => common}/balance_changes.go | 4 +- .../{token => common}/balance_changes_test.go | 2 +- solana/indexer/common/utils.go | 10 +- solana/indexer/damm_v2/indexer.go | 60 +-- solana/indexer/{ => dbc}/dbc.go | 2 +- solana/indexer/dbc/indexer.go | 426 ++++++++++++++++++ solana/indexer/program/indexer.go | 204 +++++---- solana/indexer/solana_indexer.go | 30 +- solana/indexer/token/indexer.go | 59 +-- 12 files changed, 672 insertions(+), 173 deletions(-) delete mode 100644 ddl/functions/handle_damm_v2_pool.sql create mode 100644 ddl/migrations/0171_artist_coins_pools.sql rename solana/indexer/{token => common}/balance_changes.go (98%) rename solana/indexer/{token => common}/balance_changes_test.go (99%) rename solana/indexer/{ => dbc}/dbc.go (99%) create mode 100644 solana/indexer/dbc/indexer.go diff --git a/ddl/functions/handle_artist_coins.sql b/ddl/functions/handle_artist_coins.sql index fb3c3049..5a972a2c 100644 --- a/ddl/functions/handle_artist_coins.sql +++ b/ddl/functions/handle_artist_coins.sql @@ -1,7 +1,27 @@ CREATE OR REPLACE FUNCTION handle_artist_coins_change() RETURNS trigger AS $$ BEGIN - PERFORM pg_notify('artist_coins_changed', json_build_object('operation', TG_OP, 'new_mint', NEW.mint, 'old_mint', OLD.mint)::text); + IF (OLD.mint IS NULL AND NEW.mint IS NOT NULL) + OR (OLD.mint IS NOT NULL AND NEW.mint IS NULL) + OR OLD.mint != NEW.mint + THEN + PERFORM pg_notify('artist_coins_mint_changed', NEW.mint); + END IF; + + IF (OLD.dbc_pool IS NULL AND NEW.dbc_pool IS NOT NULL) + OR (OLD.dbc_pool IS NOT NULL AND NEW.dbc_pool IS NULL) + OR OLD.dbc_pool != NEW.dbc_pool + THEN + PERFORM pg_notify('artist_coins_dbc_pool_changed', NEW.dbc_pool); + END IF; + + IF (OLD.damm_v2_pool IS NULL AND NEW.damm_v2_pool IS NOT NULL) + OR (OLD.damm_v2_pool IS NOT NULL AND NEW.damm_v2_pool IS NULL) + OR OLD.damm_v2_pool != NEW.damm_v2_pool + THEN + PERFORM pg_notify('artist_coins_damm_v2_pool_changed', NEW.damm_v2_pool); + END IF; + RETURN NEW; EXCEPTION WHEN OTHERS THEN diff --git a/ddl/functions/handle_damm_v2_pool.sql b/ddl/functions/handle_damm_v2_pool.sql deleted file mode 100644 index b5515b48..00000000 --- a/ddl/functions/handle_damm_v2_pool.sql +++ /dev/null @@ -1,21 +0,0 @@ -CREATE OR REPLACE FUNCTION handle_meteora_dbc_migrations() -RETURNS trigger AS $$ -BEGIN - PERFORM pg_notify('meteora_dbc_migration', json_build_object('operation', TG_OP)::text); - RETURN NEW; - EXCEPTION - WHEN OTHERS THEN - RAISE WARNING 'An error occurred in %: %', TG_NAME, SQLERRM; - RETURN NULL; -END; -$$ LANGUAGE plpgsql; - -DO $$ -BEGIN - CREATE TRIGGER on_meteora_dbc_migrations - AFTER INSERT OR DELETE ON sol_meteora_dbc_migrations - FOR EACH ROW EXECUTE FUNCTION handle_meteora_dbc_migrations(); -EXCEPTION - WHEN others THEN NULL; -- Ignore if trigger already exists -END $$; -COMMENT ON TRIGGER on_meteora_dbc_migrations ON sol_meteora_dbc_migrations IS 'Notifies when a DBC pool migrates to a DAMM V2 pool.' \ No newline at end of file diff --git a/ddl/migrations/0171_artist_coins_pools.sql b/ddl/migrations/0171_artist_coins_pools.sql new file mode 100644 index 00000000..cb60eee0 --- /dev/null +++ b/ddl/migrations/0171_artist_coins_pools.sql @@ -0,0 +1,5 @@ +ALTER TABLE IF EXISTS artist_coins +ADD COLUMN IF NOT EXISTS dbc_pool TEXT, +ADD COLUMN IF NOT EXISTS damm_v2_pool TEXT; +COMMENT ON COLUMN artist_coins.dbc_pool IS 'The associated DBC pool address for this artist coin, if any. Used in solana indexer.'; +COMMENT ON COLUMN artist_coins.damm_v2_pool IS 'The canonical DAMM V2 pool address for this artist coin, if any. Used in solana indexer.'; \ No newline at end of file diff --git a/solana/indexer/token/balance_changes.go b/solana/indexer/common/balance_changes.go similarity index 98% rename from solana/indexer/token/balance_changes.go rename to solana/indexer/common/balance_changes.go index 08e3b30a..d9fbdcbd 100644 --- a/solana/indexer/token/balance_changes.go +++ b/solana/indexer/common/balance_changes.go @@ -1,4 +1,4 @@ -package token +package common import ( "context" @@ -15,7 +15,7 @@ import ( "go.uber.org/zap" ) -func processBalanceChanges( +func ProcessBalanceChanges( ctx context.Context, db database.DBTX, slot uint64, diff --git a/solana/indexer/token/balance_changes_test.go b/solana/indexer/common/balance_changes_test.go similarity index 99% rename from solana/indexer/token/balance_changes_test.go rename to solana/indexer/common/balance_changes_test.go index af0aafd8..3742974b 100644 --- a/solana/indexer/token/balance_changes_test.go +++ b/solana/indexer/common/balance_changes_test.go @@ -1,4 +1,4 @@ -package token +package common import ( "testing" diff --git a/solana/indexer/common/utils.go b/solana/indexer/common/utils.go index 8dd15521..9e3eba6e 100644 --- a/solana/indexer/common/utils.go +++ b/solana/indexer/common/utils.go @@ -2,6 +2,7 @@ package common import ( "context" + "errors" "fmt" "time" @@ -67,7 +68,7 @@ func WatchPgNotification(ctx context.Context, pool database.DbPool, notification if rawConn != nil && !rawConn.PgConn().IsClosed() && ctx.Err() != nil { _, _ = rawConn.Exec(ctx, fmt.Sprintf(`UNLISTEN %s`, notification)) } - childLogger.Info("received shutdown signal, stopping notification watcher") + childLogger.Debug("received shutdown signal, stopping notification watcher") conn.Release() }() for { @@ -79,12 +80,17 @@ func WatchPgNotification(ctx context.Context, pool database.DbPool, notification notif, err := rawConn.WaitForNotification(ctx) if err != nil { - childLogger.Error("failed waiting for notification", zap.Error(err)) + if !errors.Is(err, context.Canceled) { + childLogger.Error("failed waiting for notification", zap.Error(err)) + } + continue } if notif == nil { childLogger.Warn("received nil notification, continuing to wait for notifications") continue } + + childLogger.Debug("received notification", zap.String("payload", notif.Payload)) callback(ctx, notif) } }() diff --git a/solana/indexer/damm_v2/indexer.go b/solana/indexer/damm_v2/indexer.go index c585e6ad..e1c8e9b2 100644 --- a/solana/indexer/damm_v2/indexer.go +++ b/solana/indexer/damm_v2/indexer.go @@ -22,10 +22,11 @@ type Indexer struct { logger *zap.Logger } -const DAMM_V2_INDEXER_NAME = "damm_v2" -const MAX_DAMM_V2_POOLS_PER_SUBSCRIPTION = 10000 -const DAMM_V2_POOL_SUBSCRIPTION_KEY = "dammV2Pools" -const DBC_MIGRATION_NOTIFICATION_NAME = "meteora_dbc_migration" +const ( + NAME = "damm_v2" + MAX_POOLS_PER_SUBSCRIPTION = 10000 // Arbitrary + NOTIFICATION_NAME = "artist_coins_damm_v2_pool_changed" +) func New( config common.GrpcConfig, @@ -69,7 +70,8 @@ func (d *Indexer) Start(ctx context.Context) { } // Resubscribe to all DAMM V2 pools - clients, err := d.subscribeToDammV2Pools(subCtx) + // TODO: Optimize this to only add/remove DAMM V2 pools instead of resubscribing to all + clients, err := d.subscribe(subCtx) grpcClients = clients if err != nil { d.logger.Error("failed to resubscribe to DAMM V2 pools", zap.Error(err)) @@ -81,7 +83,7 @@ func (d *Indexer) Start(ctx context.Context) { } // Setup initial subscription - clients, err := d.subscribeToDammV2Pools(ctx) + clients, err := d.subscribe(ctx) if err != nil { d.logger.Error("failed to subscribe to DAMM V2 pools", zap.Error(err)) return @@ -89,7 +91,7 @@ func (d *Indexer) Start(ctx context.Context) { grpcClients = clients // Watch for new pools to be added - err = common.WatchPgNotification(ctx, d.pool, DBC_MIGRATION_NOTIFICATION_NAME, handleNotif, d.logger) + err = common.WatchPgNotification(ctx, d.pool, NOTIFICATION_NAME, handleNotif, d.logger) if err != nil { d.logger.Error("failed to watch for DAMM V2 pool changes", zap.Error(err)) return @@ -126,7 +128,7 @@ func (d *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) err // Handle DAMM V2 account updates accUpdate := msg.GetAccount() if accUpdate != nil { - if msg.Filters[0] == DAMM_V2_POOL_SUBSCRIPTION_KEY { + if msg.Filters[0] == NAME { err := processDammV2PoolUpdate(ctx, d.pool, accUpdate) if err != nil { return fmt.Errorf("failed to process DAMM V2 pool update: %w", err) @@ -143,34 +145,34 @@ func (d *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) err return nil } -func (d *Indexer) subscribeToDammV2Pools(ctx context.Context) ([]common.GrpcClient, error) { +func (d *Indexer) subscribe(ctx context.Context) ([]common.GrpcClient, error) { done := false page := 0 - pageSize := MAX_DAMM_V2_POOLS_PER_SUBSCRIPTION + pageSize := MAX_POOLS_PER_SUBSCRIPTION total := 0 grpcClients := make([]common.GrpcClient, 0) for !done { - dammV2Pools, err := getWatchedDammV2Pools(ctx, d.pool, pageSize, page*pageSize) + pools, err := getSubscribedDammV2Pools(ctx, d.pool, pageSize, page*pageSize) if err != nil { - return nil, fmt.Errorf("failed to get watched DAMM V2 pools: %w", err) + return nil, fmt.Errorf("failed to get pools: %w", err) } - if len(dammV2Pools) == 0 { - d.logger.Info("no DAMM V2 pools to subscribe to") + if len(pools) == 0 { + d.logger.Info("no pools to subscribe to") return grpcClients, nil } - total += len(dammV2Pools) + total += len(pools) - d.logger.Debug("subscribing to DAMM V2 pools....", zap.Int("numPools", len(dammV2Pools))) - subscription := d.makeDammV2SubscriptionRequest(ctx, dammV2Pools) + d.logger.Debug("subscribing to pools....", zap.Int("numPools", len(pools))) + subscription := d.makeSubscriptionRequest(ctx, pools) // Handle each message from the subscription handleMessage := func(ctx context.Context, msg *pb.SubscribeUpdate) { err := d.HandleUpdate(ctx, msg) if err != nil { - d.logger.Error("failed to handle DAMM V2 update", zap.Error(err)) + d.logger.Error("failed to handle update", zap.Error(err)) // Add messages that failed to process to the retry queue - if err := common.AddToRetryQueue(ctx, d.pool, DAMM_V2_INDEXER_NAME, msg, err.Error()); err != nil { + if err := common.AddToRetryQueue(ctx, d.pool, NAME, msg, err.Error()); err != nil { d.logger.Error("failed to add to retry queue", zap.Error(err)) } } @@ -178,23 +180,23 @@ func (d *Indexer) subscribeToDammV2Pools(ctx context.Context) ([]common.GrpcClie grpcClient := common.NewGrpcClient(d.grpcConfig) err = grpcClient.Subscribe(ctx, subscription, handleMessage, func(err error) { - d.logger.Error("error in DAMM V2 subscription", zap.Error(err)) + d.logger.Error("error in subscription", zap.Error(err)) }) if err != nil { - return nil, fmt.Errorf("failed to subscribe to DAMM V2 pools: %w", err) + return nil, fmt.Errorf("failed to subscribe to pools: %w", err) } grpcClients = append(grpcClients, grpcClient) - if len(dammV2Pools) < pageSize { + if len(pools) < pageSize { done = true } page++ } - d.logger.Info("subscribed to DAMM V2 pools", zap.Int("numPools", total)) + d.logger.Info("subscribed to pools", zap.Int("count", total)) return grpcClients, nil } -func (d *Indexer) makeDammV2SubscriptionRequest(ctx context.Context, dammV2Pools []string) *pb.SubscribeRequest { +func (d *Indexer) makeSubscriptionRequest(ctx context.Context, dammV2Pools []string) *pb.SubscribeRequest { commitment := pb.CommitmentLevel_CONFIRMED subscription := &pb.SubscribeRequest{ Commitment: &commitment, @@ -206,7 +208,7 @@ func (d *Indexer) makeDammV2SubscriptionRequest(ctx context.Context, dammV2Pools Owner: []string{meteora_damm_v2.ProgramID.String()}, Account: dammV2Pools, } - subscription.Accounts[DAMM_V2_POOL_SUBSCRIPTION_KEY] = &accountFilter + subscription.Accounts[NAME] = &accountFilter // Listen to all positions for each pool for _, pool := range dammV2Pools { @@ -234,7 +236,7 @@ func (d *Indexer) makeDammV2SubscriptionRequest(ctx context.Context, dammV2Pools } // Ensure this subscription has a checkpoint - checkpointId, fromSlot, err := common.EnsureCheckpoint(ctx, DAMM_V2_INDEXER_NAME, d.pool, d.rpcClient, subscription, d.logger) + checkpointId, fromSlot, err := common.EnsureCheckpoint(ctx, NAME, d.pool, d.rpcClient, subscription, d.logger) if err != nil { d.logger.Error("failed to ensure checkpoint", zap.Error(err)) } @@ -305,10 +307,12 @@ func processDammV2PositionUpdate( return nil } -func getWatchedDammV2Pools(ctx context.Context, db database.DBTX, limit int, offset int) ([]string, error) { +// Gets the canonical DAMM V2 pools from the artist coins table. +func getSubscribedDammV2Pools(ctx context.Context, db database.DBTX, limit int, offset int) ([]string, error) { sql := ` SELECT damm_v2_pool - FROM sol_meteora_dbc_migrations + FROM artist_coins + WHERE damm_v2_pool IS NOT NULL LIMIT @limit OFFSET @offset ;` rows, err := db.Query(ctx, sql, pgx.NamedArgs{ diff --git a/solana/indexer/dbc.go b/solana/indexer/dbc/dbc.go similarity index 99% rename from solana/indexer/dbc.go rename to solana/indexer/dbc/dbc.go index 1bf732eb..b07db725 100644 --- a/solana/indexer/dbc.go +++ b/solana/indexer/dbc/dbc.go @@ -1,4 +1,4 @@ -package indexer +package dbc import ( "context" diff --git a/solana/indexer/dbc/indexer.go b/solana/indexer/dbc/indexer.go new file mode 100644 index 00000000..94b612cf --- /dev/null +++ b/solana/indexer/dbc/indexer.go @@ -0,0 +1,426 @@ +package dbc + +import ( + "context" + "fmt" + + "api.audius.co/database" + "api.audius.co/solana/indexer/common" + "api.audius.co/solana/spl/programs/meteora_damm_v2" + "api.audius.co/solana/spl/programs/meteora_dbc" + bin "github.com/gagliardetto/binary" + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgconn" + "github.com/maypok86/otter" + pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" + "go.uber.org/zap" +) + +type Indexer struct { + pool database.DbPool + grpcConfig common.GrpcConfig + rpcClient common.RpcClient + transactionCache *otter.Cache[solana.Signature, *rpc.GetTransactionResult] + logger *zap.Logger +} + +const ( + NAME = "dbc" + MAX_POOLS_PER_SUBSCRIPTION = 10000 // Arbitrary + NOTIFICATION_NAME = "artist_coins_dbc_pool_changed" +) + +func New( + grpcConfig common.GrpcConfig, + rpcClient common.RpcClient, + pool database.DbPool, + transactionCache *otter.Cache[solana.Signature, *rpc.GetTransactionResult], + logger *zap.Logger, +) *Indexer { + return &Indexer{ + pool: pool, + grpcConfig: grpcConfig, + rpcClient: rpcClient, + transactionCache: transactionCache, + logger: logger.Named("DBCIndexer"), + } +} + +func (d *Indexer) Start(ctx context.Context) { + // To ensure only one subscription task is running at a time, keep track of + // the last cancel function and call it on the next notification. + var lastCancel context.CancelFunc + + // Ensure all gRPC clients are closed on shutdown + var grpcClients []common.GrpcClient + defer (func() { + for _, client := range grpcClients { + client.Close() + } + })() + + // On notification, cancel the previous subscription task (if any) and start a new one + handleNotif := func(ctx context.Context, notification *pgconn.Notification) { + subCtx, cancel := context.WithCancel(ctx) + + // Cancel previous subscription task + if lastCancel != nil { + lastCancel() + } + + // Close previous gRPC clients + for _, client := range grpcClients { + client.Close() + } + + // Resubscribe to all DBC pools + // TODO: Optimize this to only add/remove DBC pools instead of resubscribing to all + clients, err := d.subscribe(subCtx) + grpcClients = clients + if err != nil { + d.logger.Error("failed to resubscribe to DBC pools", zap.Error(err)) + cancel() + return + } + + lastCancel = cancel + } + + // Setup initial subscription + clients, err := d.subscribe(ctx) + if err != nil { + d.logger.Error("failed to subscribe to DAMM V2 pools", zap.Error(err)) + return + } + grpcClients = clients + + // Watch for new pools to be added + err = common.WatchPgNotification(ctx, d.pool, NOTIFICATION_NAME, handleNotif, d.logger) + if err != nil { + d.logger.Error("failed to watch for DBC pool changes", zap.Error(err)) + return + } + + // Wait for shutdown + for { + select { + case <-ctx.Done(): + d.logger.Info("received shutdown signal, stopping indexer") + return + default: + } + } +} + +func (d *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) error { + // Handle slot updates + slotUpdate := msg.GetSlot() + if slotUpdate != nil { + // only update every 10 slots to reduce db load and write latency + if slotUpdate.Slot%10 == 0 { + // Use the filter as the checkpoint ID + checkpointId := msg.Filters[0] + + err := common.UpdateCheckpoint(ctx, d.pool, checkpointId, slotUpdate.Slot) + if err != nil { + d.logger.Error("failed to update slot checkpoint", zap.Error(err)) + } + } + } + + // Handle account updates + accountUpdate := msg.GetAccount() + if accountUpdate != nil { + var pool meteora_dbc.Pool + err := bin.NewBorshDecoder(accountUpdate.Account.Data).Decode(&pool) + if err != nil { + return fmt.Errorf("failed to decode DBC pool account: %w", err) + } + + account := solana.PublicKeyFromBytes(accountUpdate.Account.Pubkey) + err = upsertDbcPool(ctx, d.pool, accountUpdate.Slot, account, &pool) + if err != nil { + return fmt.Errorf("failed to upsert DBC pool: %w", err) + } + + // If the pool is migrated, check for the migration transaction and process it + if pool.IsMigrated == uint8(1) { + txSig := solana.SignatureFromBytes(accountUpdate.Account.TxnSignature) + + // Fetch the transaction details + txRes, err := common.FetchTransactionWithCache(ctx, d.transactionCache, d.rpcClient, txSig) + if err != nil { + return fmt.Errorf("failed to fetch transaction: %w", err) + } + + // Decode the transaction + tx, err := txRes.Transaction.GetTransaction() + if err != nil { + return fmt.Errorf("failed to decode transaction: %w", err) + } + + // Process the transaction + err = d.processTransaction(ctx, accountUpdate.Slot, tx) + } + } + return nil +} + +func (d *Indexer) subscribe(ctx context.Context) ([]common.GrpcClient, error) { + done := false + page := 0 + pageSize := MAX_POOLS_PER_SUBSCRIPTION + total := 0 + grpcClients := make([]common.GrpcClient, 0) + for !done { + pools, err := getSubscribedDbcPools(ctx, d.pool, pageSize, page*pageSize) + if err != nil { + return nil, fmt.Errorf("failed to get pools: %w", err) + } + if len(pools) == 0 { + d.logger.Info("no pools to subscribe to") + return grpcClients, nil + } + total += len(pools) + + d.logger.Debug("subscribing to pools....", zap.Int("numPools", len(pools))) + subscription := d.makeSubscriptionRequest(ctx, pools) + + // Handle each message from the subscription + handleMessage := func(ctx context.Context, msg *pb.SubscribeUpdate) { + err := d.HandleUpdate(ctx, msg) + if err != nil { + d.logger.Error("failed to handle update", zap.Error(err)) + + // Add messages that failed to process to the retry queue + if err := common.AddToRetryQueue(ctx, d.pool, NAME, msg, err.Error()); err != nil { + d.logger.Error("failed to add to retry queue", zap.Error(err)) + } + } + } + + grpcClient := common.NewGrpcClient(d.grpcConfig) + err = grpcClient.Subscribe(ctx, subscription, handleMessage, func(err error) { + d.logger.Error("error in subscription", zap.Error(err)) + }) + if err != nil { + return nil, fmt.Errorf("failed to subscribe to pools: %w", err) + } + grpcClients = append(grpcClients, grpcClient) + + if len(pools) < pageSize { + done = true + } + page++ + } + d.logger.Info("subscribed to pools", zap.Int("count", total)) + return grpcClients, nil +} + +func (d *Indexer) makeSubscriptionRequest(ctx context.Context, pools []string) *pb.SubscribeRequest { + commitment := pb.CommitmentLevel_CONFIRMED + subscription := &pb.SubscribeRequest{ + Commitment: &commitment, + } + + // Listen to all watched pools + subscription.Accounts = make(map[string]*pb.SubscribeRequestFilterAccounts) + accountFilter := pb.SubscribeRequestFilterAccounts{ + Owner: []string{meteora_damm_v2.ProgramID.String()}, + Account: pools, + } + subscription.Accounts[NAME] = &accountFilter + + // Ensure this subscription has a checkpoint + checkpointId, fromSlot, err := common.EnsureCheckpoint(ctx, NAME, d.pool, d.rpcClient, subscription, d.logger) + if err != nil { + d.logger.Error("failed to ensure checkpoint", zap.Error(err)) + } + + // Set the from slot for the subscription + subscription.FromSlot = &fromSlot + + // Listen for slots for making checkpoints + subscription.Slots = make(map[string]*pb.SubscribeRequestFilterSlots) + subscription.Slots[checkpointId] = &pb.SubscribeRequestFilterSlots{} + + return subscription +} + +func (i *Indexer) processTransaction(ctx context.Context, slot uint64, tx *solana.Transaction) error { + signature := tx.Signatures[0].String() + logger := i.logger.With( + zap.String("signature", signature), + zap.Uint64("slot", slot), + ) + + // Process individual instructions + for instructionIndex, instruction := range tx.Message.Instructions { + programId := tx.Message.AccountKeys[instruction.ProgramIDIndex] + instLogger := logger.With( + zap.String("programId", programId.String()), + zap.Int("instructionIndex", instructionIndex), + ) + switch programId { + case meteora_dbc.ProgramID: + { + err := processDbcInstruction(ctx, i.pool, i.rpcClient, slot, tx, instructionIndex, instruction, signature, instLogger) + if err != nil { + return fmt.Errorf("error processing meteora_dbc instruction %d: %w", instructionIndex, err) + } + } + } + } + return nil +} + +// Gets the canonical DBC pools from the artist coins table. +func getSubscribedDbcPools(ctx context.Context, db database.DBTX, limit int, offset int) ([]string, error) { + sql := ` + SELECT dbc_pool + FROM artist_coins + WHERE dbc_pool IS NOT NULL + LIMIT @limit OFFSET @offset + ;` + rows, err := db.Query(ctx, sql, pgx.NamedArgs{ + "limit": limit, + "offset": offset, + }) + if err != nil { + return nil, err + } + defer rows.Close() + + var pools []string + for rows.Next() { + var address string + if err := rows.Scan(&address); err != nil { + return nil, err + } + pools = append(pools, address) + } + return pools, nil +} + +func upsertDbcPool( + ctx context.Context, + db database.DBTX, + slot uint64, + account solana.PublicKey, + pool *meteora_dbc.Pool, +) error { + sql := ` + INSERT INTO sol_meteora_dbc_pools ( + address, + slot, + config, + creator, + base_mint, + base_vault, + quote_vault, + base_reserve, + quote_reserve, + protocol_base_fee, + partner_base_fee, + partner_quote_fee, + sqrt_price, + activation_point, + pool_type, + is_migrated, + is_partner_withdraw_surplus, + is_protocol_withdraw_surplus, + migration_progress, + is_withdraw_leftover, + is_creator_withdraw_surplus, + migration_fee_withdraw_status, + finish_curve_timestamp, + creator_base_fee, + creator_quote_fee, + created_at, + updated_at + ) VALUES ( + @address, + @slot, + @config, + @creator, + @base_mint, + @base_vault, + @quote_vault, + @base_reserve, + @quote_reserve, + @protocol_base_fee, + @partner_base_fee, + @partner_quote_fee, + @sqrt_price, + @activation_point, + @pool_type, + @is_migrated, + @is_partner_withdraw_surplus, + @is_protocol_withdraw_surplus, + @migration_progress, + @is_withdraw_leftover, + @is_creator_withdraw_surplus, + @migration_fee_withdraw_status, + @finish_curve_timestamp, + @creator_base_fee, + @creator_quote_fee, + NOW(), + NOW() + ) ON CONFLICT (address) DO UPDATE SET + slot = EXCLUDED.slot, + config = EXCLUDED.config, + creator = EXCLUDED.creator, + base_mint = EXCLUDED.base_mint, + base_vault = EXCLUDED.base_vault, + quote_vault = EXCLUDED.quote_vault, + base_reserve = EXCLUDED.base_reserve, + quote_reserve = EXCLUDED.quote_reserve, + protocol_base_fee = EXCLUDED.protocol_base_fee, + partner_base_fee = EXCLUDED.partner_base_fee, + partner_quote_fee = EXCLUDED.partner_quote_fee, + sqrt_price = EXCLUDED.sqrt_price, + activation_point = EXCLUDED.activation_point, + pool_type = EXCLUDED.pool_type, + is_migrated = EXCLUDED.is_migrated, + is_partner_withdraw_surplus = EXCLUDED.is_partner_withdraw_surplus, + is_protocol_withdraw_surplus = EXCLUDED.is_protocol_withdraw_surplus, + migration_progress = EXCLUDED.migration_progress, + is_withdraw_leftover = EXCLUDED.is_withdraw_leftover, + is_creator_withdraw_surplus = EXCLUDED.is_creator_withdraw_surplus, + migration_fee_withdraw_status = EXCLUDED.migration_fee_withdraw_status, + finish_curve_timestamp = EXCLUDED.finish_curve_timestamp, + creator_base_fee = EXCLUDED.creator_base_fee, + creator_quote_fee = EXCLUDED.creator_quote_fee, + updated_at = NOW() + ;` + _, err := db.Exec(ctx, sql, pgx.NamedArgs{ + "address": account.String(), + "slot": slot, + "config": pool.Config.String(), + "creator": pool.Creator.String(), + "base_mint": pool.BaseMint.String(), + "base_vault": pool.BaseVault.String(), + "quote_vault": pool.QuoteVault.String(), + "base_reserve": pool.BaseReserve, + "quote_reserve": pool.QuoteReserve, + "protocol_base_fee": pool.ProtocolBaseFee, + "partner_base_fee": pool.PartnerBaseFee, + "partner_quote_fee": pool.PartnerQuoteFee, + "sqrt_price": pool.SqrtPrice.BigInt(), + "activation_point": pool.ActivationPoint, + "pool_type": pool.PoolType, + "is_migrated": pool.IsMigrated, + "is_partner_withdraw_surplus": pool.IsPartnerWithdrawSurplus, + "is_protocol_withdraw_surplus": pool.IsProtocolWithdrawSurplus, + "migration_progress": pool.MigrationProgress, + "is_withdraw_leftover": pool.IsWithdrawLeftover, + "is_creator_withdraw_surplus": pool.IsCreatorWithdrawSurplus, + "migration_fee_withdraw_status": pool.MigrationFeeWithdrawStatus, + "finish_curve_timestamp": pool.FinishCurveTimestamp, + "creator_base_fee": pool.CreatorBaseFee, + "creator_quote_fee": pool.CreatorQuoteFee, + }) + return err +} diff --git a/solana/indexer/program/indexer.go b/solana/indexer/program/indexer.go index 72579533..b2a42528 100644 --- a/solana/indexer/program/indexer.go +++ b/solana/indexer/program/indexer.go @@ -2,14 +2,18 @@ package program import ( "context" - "encoding/json" "fmt" + "time" "api.audius.co/config" "api.audius.co/database" "api.audius.co/solana/indexer/common" + "api.audius.co/solana/spl/programs/claimable_tokens" + "api.audius.co/solana/spl/programs/payment_router" + "api.audius.co/solana/spl/programs/reward_manager" "github.com/gagliardetto/solana-go" "github.com/gagliardetto/solana-go/rpc" + "github.com/maypok86/otter" pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" "go.uber.org/zap" ) @@ -17,81 +21,132 @@ import ( const NAME = "program" type Indexer struct { - pool database.DbPool - grpcConfig common.GrpcConfig - rpcClient common.RpcClient - config config.SolanaConfig - logger *zap.Logger + pool database.DbPool + grpcConfig common.GrpcConfig + rpcClient common.RpcClient + config config.Config + transactionCache *otter.Cache[solana.Signature, *rpc.GetTransactionResult] + logger *zap.Logger } func New( grpcConfig common.GrpcConfig, rpcClient common.RpcClient, pool database.DbPool, - config config.SolanaConfig, + config config.Config, + transactionCache *otter.Cache[solana.Signature, *rpc.GetTransactionResult], logger *zap.Logger, ) *Indexer { return &Indexer{ - pool: pool, - grpcConfig: grpcConfig, - rpcClient: rpcClient, - config: config, - logger: logger.Named("ProgramIndexer"), + pool: pool, + grpcConfig: grpcConfig, + rpcClient: rpcClient, + config: config, + transactionCache: transactionCache, + logger: logger.Named("ProgramIndexer"), } } -func (i *Indexer) Start(ctx context.Context) { - client, err := i.subscribe(ctx) +func (d *Indexer) Start(ctx context.Context) { + client, err := d.subscribe(ctx) if err != nil { - i.logger.Fatal("failed to start subscription", zap.Error(err)) + d.logger.Fatal("failed to start subscription", zap.Error(err)) } defer client.Close() - i.logger.Info("subscribed") - // Wait for shutdown for { select { case <-ctx.Done(): - i.logger.Info("received shutdown signal, stopping indexer") + d.logger.Info("received shutdown signal, stopping indexer") return default: } } } -func (i *Indexer) subscribe(ctx context.Context) (common.GrpcClient, error) { - programIds := []string{ - i.config.RewardManagerProgramID.String(), - i.config.PaymentRouterProgramID.String(), - i.config.ClaimableTokensProgramID.String(), +func (d *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) error { + // Handle slot updates + slotUpdate := msg.GetSlot() + if slotUpdate != nil { + // only update every 10 slots to reduce db load and write latency + if slotUpdate.Slot%10 == 0 { + // Use the filter as the checkpoint ID + checkpointId := msg.Filters[0] + + err := common.UpdateCheckpoint(ctx, d.pool, checkpointId, slotUpdate.Slot) + if err != nil { + d.logger.Error("failed to update slot checkpoint", zap.Error(err)) + } + } } - subscription := i.makeSubscriptionRequest(ctx, programIds) + // Handle transaction updates + txUpdate := msg.GetTransaction() + if txUpdate != nil { + txSig := solana.SignatureFromBytes(txUpdate.Transaction.Signature) + + // Fetch the transaction details + // Note: Could also convert the subscription transaction to a solana.Transaction, + // but that could be error prone and the transaction is probably already in the cache anyway. + // Also, we need the blocktime which the subscription doesn't seem to provide. + txRes, err := common.FetchTransactionWithCache(ctx, d.transactionCache, d.rpcClient, txSig) + if err != nil { + return fmt.Errorf("failed to fetch transaction: %w", err) + } - handleMessage := func(ctx context.Context, update *pb.SubscribeUpdate) { - err := i.HandleUpdate(ctx, update) + // Decode the transaction + tx, err := txRes.Transaction.GetTransaction() if err != nil { - i.logger.Error("failed to handle update", zap.Error(err)) + return fmt.Errorf("failed to decode transaction: %w", err) + } + + // Add the lookup table accounts to the message accounts + tx = common.ResolveLookupTables(ctx, d.rpcClient, tx, txRes.Meta) + + // Process the transaction + d.processTransaction(ctx, txUpdate.Slot, txRes.Meta, tx, txRes.BlockTime.Time()) + + return nil + } + + return nil +} +func (d *Indexer) subscribe(ctx context.Context) (common.GrpcClient, error) { + programIds := []string{ + d.config.SolanaConfig.RewardManagerProgramID.String(), + d.config.SolanaConfig.PaymentRouterProgramID.String(), + d.config.SolanaConfig.ClaimableTokensProgramID.String(), + } + + d.logger.Info("subscribing to programs...", zap.Int("count", len(programIds))) + subscription := d.makeSubscriptionRequest(ctx, programIds) + handleMessage := func(ctx context.Context, update *pb.SubscribeUpdate) { + err := d.HandleUpdate(ctx, update) + if err != nil { + d.logger.Error("failed to handle update", zap.Error(err)) // Add messages that failed to process to the retry queue - if err := common.AddToRetryQueue(ctx, i.pool, NAME, update, err.Error()); err != nil { - i.logger.Error("failed to add to retry queue", zap.Error(err)) + if err := common.AddToRetryQueue(ctx, d.pool, NAME, update, err.Error()); err != nil { + d.logger.Error("failed to add to retry queue", zap.Error(err)) } } } - client := common.NewGrpcClient(i.grpcConfig) + client := common.NewGrpcClient(d.grpcConfig) err := client.Subscribe(ctx, subscription, handleMessage, func(err error) { - i.logger.Error("subscription error", zap.Error(err)) + d.logger.Error("subscription error", zap.Error(err)) }) if err != nil { return nil, fmt.Errorf("failed to start subscription: %w", err) } + + d.logger.Info("subscribed to programs", zap.Int("count", len(programIds))) return client, nil } -func (i *Indexer) makeSubscriptionRequest(ctx context.Context, programIds []string) *pb.SubscribeRequest { +// Makes a subscription to the relevant program IDs and adds slot checkpointing +func (d *Indexer) makeSubscriptionRequest(ctx context.Context, programIds []string) *pb.SubscribeRequest { commitment := pb.CommitmentLevel_CONFIRMED subscription := &pb.SubscribeRequest{ Commitment: &commitment, @@ -104,9 +159,9 @@ func (i *Indexer) makeSubscriptionRequest(ctx context.Context, programIds []stri } // Ensure this subscription has a checkpoint - checkpointId, fromSlot, err := common.EnsureCheckpoint(ctx, NAME, i.pool, i.rpcClient, subscription, i.logger) + checkpointId, fromSlot, err := common.EnsureCheckpoint(ctx, NAME, d.pool, d.rpcClient, subscription, d.logger) if err != nil { - i.logger.Error("failed to ensure checkpoint", zap.Error(err)) + d.logger.Error("failed to ensure checkpoint", zap.Error(err)) } // Set the from slot for the subscription @@ -119,52 +174,47 @@ func (i *Indexer) makeSubscriptionRequest(ctx context.Context, programIds []stri return subscription } -func (i *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) error { - // Handle slot updates - slotUpdate := msg.GetSlot() - if slotUpdate != nil { - // only update every 10 slots to reduce db load and write latency - if slotUpdate.Slot%10 == 0 { - // Use the filter as the checkpoint ID - checkpointId := msg.Filters[0] - - err := common.UpdateCheckpoint(ctx, i.pool, checkpointId, slotUpdate.Slot) - if err != nil { - i.logger.Error("failed to update slot checkpoint", zap.Error(err)) +func (d *Indexer) processTransaction(ctx context.Context, slot uint64, meta *rpc.TransactionMeta, tx *solana.Transaction, blockTime time.Time) error { + signature := tx.Signatures[0].String() + logger := d.logger.With( + zap.String("signature", signature), + zap.Uint64("slot", slot), + ) + + // Process balance changes for USDC (other tokens will be tracked by TokenIndexer) + common.ProcessBalanceChanges(ctx, d.pool, slot, meta, tx, blockTime, []string{d.config.SolanaConfig.MintUSDC.String()}, logger) + + // Process individual instructions + for instructionIndex, instruction := range tx.Message.Instructions { + programId := tx.Message.AccountKeys[instruction.ProgramIDIndex] + instLogger := logger.With( + zap.String("programId", programId.String()), + zap.Int("instructionIndex", instructionIndex), + ) + switch programId { + case claimable_tokens.ProgramID: + { + err := processClaimableTokensInstruction(ctx, d.pool, slot, tx, instructionIndex, instruction, signature, instLogger) + if err != nil { + return fmt.Errorf("error processing claimable_tokens instruction %d: %w", instructionIndex, err) + } + } + case reward_manager.ProgramID: + { + err := processRewardManagerInstruction(ctx, d.pool, slot, tx, instructionIndex, instruction, signature, instLogger) + if err != nil { + return fmt.Errorf("error processing reward_manager instruction %d: %w", instructionIndex, err) + } + } + case payment_router.ProgramID: + { + err := processPaymentRouterInstruction(ctx, d.pool, slot, tx, instructionIndex, instruction, signature, blockTime, d.config, instLogger) + if err != nil { + return fmt.Errorf("error processing payment_router instruction %d: %w", instructionIndex, err) + } } } } - // Handle transaction updates - txUpdate := msg.GetTransaction() - if txUpdate != nil { - i.logger.Debug("processing transaction...", zap.String("signature", string(txUpdate.Transaction.Transaction.Signatures[0])), zap.Uint64("slot", txUpdate.Slot)) - - bytes, err := json.Marshal(txUpdate.Transaction.Transaction) - if err != nil { - return fmt.Errorf("failed to marshal transaction: %w", err) - } - - var tx solana.Transaction - err = json.Unmarshal(bytes, &tx) - if err != nil { - return fmt.Errorf("failed to unmarshal transaction: %w", err) - } - - metaJson, err := json.Marshal(txUpdate.Transaction.Meta) - if err != nil { - return fmt.Errorf("failed to marshal transaction meta: %w", err) - } - - var meta rpc.TransactionMeta - err = json.Unmarshal(metaJson, &meta) - if err != nil { - return fmt.Errorf("failed to unmarshal transaction meta: %w", err) - } - - tx = *common.ResolveLookupTables(ctx, i.rpcClient, &tx, &meta) - - } - return nil } diff --git a/solana/indexer/solana_indexer.go b/solana/indexer/solana_indexer.go index 4ec7e816..ed0c3d66 100644 --- a/solana/indexer/solana_indexer.go +++ b/solana/indexer/solana_indexer.go @@ -7,9 +7,11 @@ import ( "api.audius.co/config" "api.audius.co/database" + "api.audius.co/jobs" "api.audius.co/logging" "api.audius.co/solana/indexer/common" "api.audius.co/solana/indexer/damm_v2" + "api.audius.co/solana/indexer/dbc" "api.audius.co/solana/indexer/program" "api.audius.co/solana/indexer/token" "github.com/gagliardetto/solana-go" @@ -30,6 +32,7 @@ type SolanaIndexer struct { dammV2Indexer *damm_v2.Indexer tokenIndexer *token.Indexer programIndexer *program.Indexer + dbcIndexer *dbc.Indexer checkpointId string @@ -79,7 +82,10 @@ func New(config config.Config) *SolanaIndexer { grpcConfig, rpcClient, pool, &transactionCache, logger, ) programIndexer := program.New( - grpcConfig, rpcClient, pool, config.SolanaConfig, logger, + grpcConfig, rpcClient, pool, config, &transactionCache, logger, + ) + dbcIndexer := dbc.New( + grpcConfig, rpcClient, pool, &transactionCache, logger, ) s := &SolanaIndexer{ @@ -92,6 +98,7 @@ func New(config config.Config) *SolanaIndexer { dammV2Indexer: dammV2Indexer, tokenIndexer: tokenIndexer, programIndexer: programIndexer, + dbcIndexer: dbcIndexer, } return s @@ -100,19 +107,20 @@ func New(config config.Config) *SolanaIndexer { func (s *SolanaIndexer) Start(ctx context.Context) error { go s.ScheduleProcessRetryQueue(ctx, s.config.SolanaIndexerRetryInterval) - // statsJob := jobs.NewCoinStatsJob(s.config, s.pool) - // statsCtx := context.WithoutCancel(ctx) - // statsJob.ScheduleEvery(statsCtx, 5*time.Minute) - // go statsJob.Run(statsCtx) + statsJob := jobs.NewCoinStatsJob(s.config, s.pool) + statsCtx := context.WithoutCancel(ctx) + statsJob.ScheduleEvery(statsCtx, 5*time.Minute) + go statsJob.Run(statsCtx) - // dbcJob := jobs.NewCoinDBCJob(s.config, s.pool) - // dbcCtx := context.WithoutCancel(ctx) - // dbcJob.ScheduleEvery(dbcCtx, 5*time.Minute) - // go dbcJob.Run(dbcCtx) + dbcJob := jobs.NewCoinDBCJob(s.config, s.pool) + dbcCtx := context.WithoutCancel(ctx) + dbcJob.ScheduleEvery(dbcCtx, 5*time.Minute) + go dbcJob.Run(dbcCtx) - // go s.tokenIndexer.Start(ctx) - // go s.dammV2Indexer.Start(ctx) + go s.tokenIndexer.Start(ctx) + go s.dammV2Indexer.Start(ctx) go s.programIndexer.Start(ctx) + go s.dbcIndexer.Start(ctx) for { select { diff --git a/solana/indexer/token/indexer.go b/solana/indexer/token/indexer.go index 39a09ff8..c3fc897b 100644 --- a/solana/indexer/token/indexer.go +++ b/solana/indexer/token/indexer.go @@ -27,7 +27,7 @@ type Indexer struct { } const TOKEN_INDEXER_NAME = "token" -const ARTIST_COIN_NOTIFICATION_NAME = "artist_coins_changed" +const ARTIST_COIN_NOTIFICATION_NAME = "artist_coins_mint_changed" const MAX_ARTIST_COIN_MINTS_PER_SUBSCRIPTION = 10000 const WORKER_CHANNEL_SIZE = 3000 const WORKER_COUNT = 50 @@ -48,7 +48,7 @@ func New( } } -func (t *Indexer) Start(ctx context.Context) { +func (d *Indexer) Start(ctx context.Context) { // To ensure only one subscription task is running at a time, keep track of // the last cancel function and call it on the next notification. var lastCancel context.CancelFunc @@ -58,13 +58,13 @@ func (t *Indexer) Start(ctx context.Context) { for i := range WORKER_COUNT { go func(workerID int) { for updateMessage := range workerChan { - err := t.HandleUpdate(ctx, updateMessage) + err := d.HandleUpdate(ctx, updateMessage) if err != nil { - t.logger.Error("failed to handle token update", zap.Int("workerID", workerID), zap.Error(err)) + d.logger.Error("failed to handle token update", zap.Int("workerID", workerID), zap.Error(err)) // Add messages that failed to process to the retry queue - if err := common.AddToRetryQueue(ctx, t.pool, TOKEN_INDEXER_NAME, updateMessage, err.Error()); err != nil { - t.logger.Error("failed to add to retry queue", zap.Error(err)) + if err := common.AddToRetryQueue(ctx, d.pool, TOKEN_INDEXER_NAME, updateMessage, err.Error()); err != nil { + d.logger.Error("failed to add to retry queue", zap.Error(err)) } } } @@ -84,7 +84,7 @@ func (t *Indexer) Start(ctx context.Context) { handleUpdate := func(ctx context.Context, message *pb.SubscribeUpdate) { select { case <-ctx.Done(): - t.logger.Warn("context cancelled, not handling update") + d.logger.Warn("context cancelled, not handling update") return case workerChan <- message: } @@ -105,11 +105,11 @@ func (t *Indexer) Start(ctx context.Context) { } // Resubscribe to all artist coins - // TODO: Optimize this to only add/remove new coins instead of resubscribing to all - clients, err := t.subscribeToArtistCoins(subCtx, handleUpdate) + // TODO: Optimize this to only add/remove coins instead of resubscribing to all + clients, err := d.subscribeToArtistCoins(subCtx, handleUpdate) grpcClients = clients if err != nil { - t.logger.Error("failed to resubscribe to artist coins", zap.Error(err)) + d.logger.Error("failed to resubscribe to artist coins", zap.Error(err)) cancel() return } @@ -118,17 +118,17 @@ func (t *Indexer) Start(ctx context.Context) { } // Initial subscription to all artist coins - clients, err := t.subscribeToArtistCoins(ctx, handleUpdate) + clients, err := d.subscribeToArtistCoins(ctx, handleUpdate) if err != nil { - t.logger.Error("failed to subscribe to artist coins", zap.Error(err)) + d.logger.Error("failed to subscribe to artist coins", zap.Error(err)) return } grpcClients = clients // Watch for new coins to be added - err = common.WatchPgNotification(ctx, t.pool, ARTIST_COIN_NOTIFICATION_NAME, handleNotif, t.logger) + err = common.WatchPgNotification(ctx, d.pool, ARTIST_COIN_NOTIFICATION_NAME, handleNotif, d.logger) if err != nil { - t.logger.Error("failed to watch for artist coin changes", zap.Error(err)) + d.logger.Error("failed to watch for artist coin changes", zap.Error(err)) return } @@ -136,7 +136,7 @@ func (t *Indexer) Start(ctx context.Context) { for { select { case <-ctx.Done(): - t.logger.Info("received shutdown signal, stopping artist coin indexer") + d.logger.Info("received shutdown signal, stopping indexer") return default: } @@ -144,7 +144,7 @@ func (t *Indexer) Start(ctx context.Context) { } // Handles a single update message from the gRPC subscription -func (t *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) error { +func (d *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) error { // Handle slot updates slotUpdate := msg.GetSlot() if slotUpdate != nil { @@ -153,9 +153,9 @@ func (t *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) err // Use the filter as the checkpoint ID checkpointId := msg.Filters[0] - err := common.UpdateCheckpoint(ctx, t.pool, checkpointId, slotUpdate.Slot) + err := common.UpdateCheckpoint(ctx, d.pool, checkpointId, slotUpdate.Slot) if err != nil { - t.logger.Error("failed to update slot checkpoint", zap.Error(err)) + d.logger.Error("failed to update slot checkpoint", zap.Error(err)) } } } @@ -166,7 +166,7 @@ func (t *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) err txSig := solana.SignatureFromBytes(accUpdate.Account.TxnSignature) // Fetch the transaction details - txRes, err := common.FetchTransactionWithCache(ctx, t.transactionCache, t.rpcClient, txSig) + txRes, err := common.FetchTransactionWithCache(ctx, d.transactionCache, d.rpcClient, txSig) if err != nil { return fmt.Errorf("failed to fetch transaction: %w", err) } @@ -178,12 +178,13 @@ func (t *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) err } // Add the lookup table accounts to the message accounts - tx = common.ResolveLookupTables(ctx, t.rpcClient, tx, txRes.Meta) + tx = common.ResolveLookupTables(ctx, d.rpcClient, tx, txRes.Meta) // Extract the mints we're tracking using the subscription's filters trackedMints := msg.Filters - err = processBalanceChanges(ctx, t.pool, accUpdate.Slot, txRes.Meta, tx, txRes.BlockTime.Time(), trackedMints, t.logger) + // Process balance changes for this subscription's mints + err = common.ProcessBalanceChanges(ctx, d.pool, accUpdate.Slot, txRes.Meta, tx, txRes.BlockTime.Time(), trackedMints, d.logger) if err != nil { return fmt.Errorf("failed to process balance changes: %w", err) } @@ -191,31 +192,31 @@ func (t *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) err return nil } -func (t *Indexer) subscribeToArtistCoins(ctx context.Context, handleUpdate func(ctx context.Context, message *pb.SubscribeUpdate)) ([]common.GrpcClient, error) { +func (d *Indexer) subscribeToArtistCoins(ctx context.Context, handleUpdate func(ctx context.Context, message *pb.SubscribeUpdate)) ([]common.GrpcClient, error) { done := false page := 0 pageSize := MAX_ARTIST_COIN_MINTS_PER_SUBSCRIPTION grpcClients := make([]common.GrpcClient, 0) total := 0 for !done { - mints, err := getArtistCoins(ctx, t.pool, pageSize, page*pageSize) + mints, err := getArtistCoins(ctx, d.pool, pageSize, page*pageSize) if err != nil { return nil, fmt.Errorf("failed to get artist coins: %w", err) } if len(mints) == 0 { - t.logger.Info("no more artist coins to subscribe to, exiting") + d.logger.Info("no more artist coins to subscribe to, exiting") return grpcClients, nil } total += len(mints) - t.logger.Debug("subscribing to artist coins...", zap.Int("numCoins", len(mints))) - subscription, err := t.makeMintSubscriptionRequest(ctx, mints) + d.logger.Debug("subscribing to artist coins...", zap.Int("numCoins", len(mints))) + subscription, err := d.makeMintSubscriptionRequest(ctx, mints) if err != nil { return nil, fmt.Errorf("failed to make mint subscription request: %w", err) } - grpcClient := common.NewGrpcClient(t.grpcConfig) + grpcClient := common.NewGrpcClient(d.grpcConfig) err = grpcClient.Subscribe(ctx, subscription, handleUpdate, func(err error) { - t.logger.Error("error in token subscription", zap.Error(err)) + d.logger.Error("error in token subscription", zap.Error(err)) }) if err != nil { return nil, fmt.Errorf("failed to subscribe to artist coins: %w", err) @@ -227,7 +228,7 @@ func (t *Indexer) subscribeToArtistCoins(ctx context.Context, handleUpdate func( } page++ } - t.logger.Info("subscribed to artist coins", zap.Int("numCoins", total)) + d.logger.Info("subscribed to artist coins", zap.Int("count", total)) return grpcClients, nil } From 21cda92fc01ae43cef7b50fe3844961c461486c7 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Tue, 14 Oct 2025 16:58:06 -0700 Subject: [PATCH 09/56] tests --- solana/indexer/common/checkpoints_test.go | 53 +++++++ solana/indexer/common/pg_notify.go | 69 +++++++++ solana/indexer/common/pg_notify_test.go | 53 +++++++ solana/indexer/common/retry_queue.go | 20 +-- solana/indexer/common/retry_queue_test.go | 53 +++++++ solana/indexer/common/retry_util.go | 35 +++++ solana/indexer/common/retry_util_test.go | 57 ++++++++ solana/indexer/common/transaction.go | 75 ++++++++++ solana/indexer/common/transaction_test.go | 65 +++++++++ solana/indexer/common/utils.go | 162 ---------------------- solana/indexer/damm_v2/indexer.go | 12 +- solana/indexer/dbc/dbc.go | 70 ++++------ solana/indexer/dbc/indexer.go | 14 +- solana/indexer/token/indexer.go | 22 +-- 14 files changed, 520 insertions(+), 240 deletions(-) create mode 100644 solana/indexer/common/pg_notify.go create mode 100644 solana/indexer/common/pg_notify_test.go create mode 100644 solana/indexer/common/retry_queue_test.go create mode 100644 solana/indexer/common/retry_util.go create mode 100644 solana/indexer/common/retry_util_test.go create mode 100644 solana/indexer/common/transaction.go create mode 100644 solana/indexer/common/transaction_test.go delete mode 100644 solana/indexer/common/utils.go diff --git a/solana/indexer/common/checkpoints_test.go b/solana/indexer/common/checkpoints_test.go index fab3be23..a6895277 100644 --- a/solana/indexer/common/checkpoints_test.go +++ b/solana/indexer/common/checkpoints_test.go @@ -1,11 +1,17 @@ package common import ( + "context" "testing" "api.audius.co/database" + "api.audius.co/solana/indexer/fake_rpc_client" + "github.com/gagliardetto/solana-go/rpc" "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" + pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" + "github.com/stretchr/testify/require" "github.com/test-go/testify/assert" + "go.uber.org/zap" ) func TestCheckpoints(t *testing.T) { @@ -28,3 +34,50 @@ func TestCheckpoints(t *testing.T) { assert.NoError(t, err, "failed to insert backfill checkpoint") assert.NotEmpty(t, id2, "backfill checkpoint ID should not be empty") } + +func TestEnsureCheckpoints(t *testing.T) { + pool := database.CreateTestDatabase(t, "test_solana_indexer_common") + defer pool.Close() + + // Setup mock RPC clients + rpcClientInit := &fake_rpc_client.FakeRpcClient{ + GetSlotFunc: func(ctx context.Context, commitment rpc.CommitmentType) (uint64, error) { + return 200, nil + }, + } + + rpcClientWithinMax := &fake_rpc_client.FakeRpcClient{ + GetSlotFunc: func(ctx context.Context, commitment rpc.CommitmentType) (uint64, error) { + return 2500, nil + }, + } + + rpcClientBeyondMax := &fake_rpc_client.FakeRpcClient{ + GetSlotFunc: func(ctx context.Context, commitment rpc.CommitmentType) (uint64, error) { + return 5000, nil + }, + } + + logger := zap.NewNop() + commitment := pb.CommitmentLevel_CONFIRMED + req := proto.SubscribeRequest{ + Commitment: &commitment, + } + checkpointId, slot, err := EnsureCheckpoint(t.Context(), "backfill", pool, rpcClientInit, &req, logger) + require.NoError(t, err, "failed to ensure checkpoint") + assert.NotEmpty(t, checkpointId, "checkpoint ID should not be empty") + assert.Equal(t, uint64(100), slot, "slot should be current slot - 100 for new subscription") + initialId := checkpointId + + checkpointId, slot, err = EnsureCheckpoint(t.Context(), "backfill", pool, rpcClientWithinMax, &req, logger) + require.NoError(t, err, "failed to ensure checkpoint") + assert.NotEmpty(t, checkpointId, "checkpoint ID should not be empty") + assert.Equal(t, uint64(100), slot, "slot should be last indexed slot") + + checkpointId, slot, err = EnsureCheckpoint(t.Context(), "backfill", pool, rpcClientBeyondMax, &req, logger) + require.NoError(t, err, "failed to ensure checkpoint") + assert.NotEmpty(t, checkpointId, "checkpoint ID should not be empty") + assert.NotEqual(t, initialId, checkpointId, "checkpoint ID should be new") + assert.Equal(t, uint64(2500), slot, "slot should be current slot - MAX_SLOT_GAP") + +} diff --git a/solana/indexer/common/pg_notify.go b/solana/indexer/common/pg_notify.go new file mode 100644 index 00000000..62cdbe81 --- /dev/null +++ b/solana/indexer/common/pg_notify.go @@ -0,0 +1,69 @@ +package common + +import ( + "context" + "errors" + "fmt" + + "api.audius.co/database" + "github.com/jackc/pgx/v5/pgconn" + "go.uber.org/zap" +) + +type notificationCallback func(ctx context.Context, notification *pgconn.Notification) + +// Listens for a notification and fires a callback when one is received. +// The function spawns a goroutine to listen for notifications, so it returns +// immediately. The caller should ensure the context is cancelled when they want +// to stop listening and wait indefinitely to listen. +func WatchPgNotification(ctx context.Context, pool database.DbPool, notification string, callback notificationCallback, logger *zap.Logger) error { + if logger == nil { + logger = zap.NewNop() + } + + childLogger := logger.With(zap.String("notification", notification)) + + conn, err := pool.Acquire(ctx) + if err != nil { + return fmt.Errorf("failed to acquire database connection: %w", err) + } + + rawConn := conn.Conn() + _, err = rawConn.Exec(ctx, fmt.Sprintf(`LISTEN %s`, notification)) + if err != nil { + return fmt.Errorf("failed to listen for %s changes: %w", notification, err) + } + + go func() { + defer func() { + if rawConn != nil && !rawConn.PgConn().IsClosed() && ctx.Err() != nil { + _, _ = rawConn.Exec(ctx, fmt.Sprintf(`UNLISTEN %s`, notification)) + } + childLogger.Debug("received shutdown signal, stopping notification watcher") + conn.Release() + }() + for { + select { + case <-ctx.Done(): + return + default: + } + + notif, err := rawConn.WaitForNotification(ctx) + if err != nil { + if !errors.Is(err, context.Canceled) { + childLogger.Error("failed waiting for notification", zap.Error(err)) + } + continue + } + if notif == nil { + childLogger.Warn("received nil notification, continuing to wait for notifications") + continue + } + + childLogger.Debug("received notification", zap.String("payload", notif.Payload)) + callback(ctx, notif) + } + }() + return nil +} diff --git a/solana/indexer/common/pg_notify_test.go b/solana/indexer/common/pg_notify_test.go new file mode 100644 index 00000000..0a3253e6 --- /dev/null +++ b/solana/indexer/common/pg_notify_test.go @@ -0,0 +1,53 @@ +package common + +import ( + "context" + "testing" + "time" + + "api.audius.co/database" + "github.com/jackc/pgx/v5/pgconn" + "github.com/stretchr/testify/require" + "github.com/test-go/testify/assert" + "go.uber.org/zap" +) + +func TestWatchNotification(t *testing.T) { + pool := database.CreateTestDatabase(t, "test_solana_indexer_common") + defer pool.Close() + + notif := "test_notification" + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) + defer cancel() + + notifChan := make(chan *pgconn.Notification, 1) + + // Callback to capture the notification + callback := func(ctx context.Context, notification *pgconn.Notification) { + notifChan <- notification + } + + logger := zap.NewNop() + err := WatchPgNotification(ctx, pool, notif, callback, logger) + require.NoError(t, err, "failed to listen for notifications") + + conn, err := pool.Acquire(ctx) + require.NoError(t, err, "failed to acquire database connection") + defer conn.Release() + + // Send a test notification + _, err = conn.Exec(ctx, "NOTIFY "+notif+", 'payload'") + require.NoError(t, err, "failed to send notification") + + // Wait for the notification to be received + select { + case <-ctx.Done(): + t.Fatal("timed out waiting for notification") + case n := <-notifChan: + require.NotNil(t, n, "notification should not be nil") + assert.Equal(t, notif, n.Channel, "notification channel should match") + assert.Equal(t, "payload", n.Payload, "notification payload should match") + default: + } + +} diff --git a/solana/indexer/common/retry_queue.go b/solana/indexer/common/retry_queue.go index 86854f42..87e4f33a 100644 --- a/solana/indexer/common/retry_queue.go +++ b/solana/indexer/common/retry_queue.go @@ -13,25 +13,25 @@ import ( pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" ) -type retryQueueItem struct { +type RetryQueueItem struct { ID string Indexer string - Update retryQueueUpdate + Update RetryQueueUpdate Error string CreatedAt time.Time UpdatedAt time.Time } -type retryQueueUpdate struct { +type RetryQueueUpdate struct { *pb.SubscribeUpdate } var ( - _ json.Marshaler = (*retryQueueUpdate)(nil) - _ json.Unmarshaler = (*retryQueueUpdate)(nil) + _ json.Marshaler = (*RetryQueueUpdate)(nil) + _ json.Unmarshaler = (*RetryQueueUpdate)(nil) ) -func (r retryQueueUpdate) MarshalJSON() ([]byte, error) { +func (r RetryQueueUpdate) MarshalJSON() ([]byte, error) { if r.SubscribeUpdate == nil { return []byte("{}"), nil } @@ -40,7 +40,7 @@ func (r retryQueueUpdate) MarshalJSON() ([]byte, error) { return res, err } -func (r *retryQueueUpdate) UnmarshalJSON(data []byte) error { +func (r *RetryQueueUpdate) UnmarshalJSON(data []byte) error { fmt.Printf("Unmarshaling JSON: %s\n", data) if r.SubscribeUpdate == nil { r.SubscribeUpdate = &pb.SubscribeUpdate{} @@ -48,7 +48,7 @@ func (r *retryQueueUpdate) UnmarshalJSON(data []byte) error { return protojson.Unmarshal(data, r.SubscribeUpdate) } -func GetRetryQueue(ctx context.Context, db database.DBTX, limit, offset int) ([]retryQueueItem, error) { +func GetRetryQueue(ctx context.Context, db database.DBTX, limit, offset int) ([]RetryQueueItem, error) { sql := `SELECT id, indexer, update, error, created_at, updated_at FROM sol_retry_queue ORDER BY created_at ASC @@ -66,7 +66,7 @@ func GetRetryQueue(ctx context.Context, db database.DBTX, limit, offset int) ([] return nil, fmt.Errorf("failed to query retry queue: %w", err) } - items, err := pgx.CollectRows(rows, pgx.RowToStructByName[retryQueueItem]) + items, err := pgx.CollectRows(rows, pgx.RowToStructByName[RetryQueueItem]) if err != nil { return nil, fmt.Errorf("failed to collect retry queue items: %w", err) } @@ -81,7 +81,7 @@ func AddToRetryQueue(ctx context.Context, db database.DBTX, indexer string, upda ;` _, err := db.Exec(ctx, sql, pgx.NamedArgs{ "indexer": indexer, - "update": retryQueueUpdate{update}, + "update": RetryQueueUpdate{update}, "error": errorMessage, }) if err != nil { diff --git a/solana/indexer/common/retry_queue_test.go b/solana/indexer/common/retry_queue_test.go new file mode 100644 index 00000000..d641c577 --- /dev/null +++ b/solana/indexer/common/retry_queue_test.go @@ -0,0 +1,53 @@ +package common + +import ( + "testing" + + "api.audius.co/database" + pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" + "github.com/test-go/testify/assert" +) + +func TestRetryQueue(t *testing.T) { + pool := database.CreateTestDatabase(t, "test_solana_indexer_common") + defer pool.Close() + + slot := uint64(123456) + // Create a sample SubscribeUpdate + update := &pb.SubscribeUpdate{ + UpdateOneof: &pb.SubscribeUpdate_Slot{ + Slot: &pb.SubscribeUpdateSlot{ + Slot: slot, + }, + }, + } + + indexer := "test_indexer" + errorMsg := "test error" + + // Add to retry queue + err := AddToRetryQueue(t.Context(), pool, indexer, update, errorMsg) + assert.NoError(t, err, "AddToRetryQueue should not error") + + // Get from retry queue + items, err := GetRetryQueue(t.Context(), pool, 10, 0) + assert.NoError(t, err, "GetRetryQueue should not error") + assert.NotEmpty(t, items, "Retry queue should have at least one item") + + item := items[0] + assert.Equal(t, indexer, item.Indexer) + assert.Equal(t, errorMsg, item.Error) + assert.NotNil(t, item.Update.SubscribeUpdate) + assert.Equal(t, slot, item.Update.SubscribeUpdate.GetSlot().Slot) + assert.NotNil(t, item.CreatedAt) + assert.NotNil(t, item.UpdatedAt) + + // Delete from retry queue + err = DeleteFromRetryQueue(t.Context(), pool, item.ID) + assert.NoError(t, err, "DeleteFromRetryQueue should not error") + + // Ensure item is deleted + items, err = GetRetryQueue(t.Context(), pool, 10, 0) + assert.NoError(t, err, "GetRetryQueue after delete should not error") + assert.Empty(t, items, "Retry queue should be empty after delete") +} diff --git a/solana/indexer/common/retry_util.go b/solana/indexer/common/retry_util.go new file mode 100644 index 00000000..4b0c391b --- /dev/null +++ b/solana/indexer/common/retry_util.go @@ -0,0 +1,35 @@ +package common + +import ( + "fmt" + "time" +) + +func WithRetries(f func() error, maxRetries int, interval time.Duration) error { + err := f() + retries := 0 + for err != nil && retries < maxRetries { + time.Sleep(interval) + err = f() + retries++ + } + if err != nil { + return fmt.Errorf("retry failed: %w", err) + } + return nil +} + +func WithRetriesResult[T any](f func() (T, error), maxRetries int, interval time.Duration) (T, error) { + result, err := f() + retries := 0 + for err != nil && retries < maxRetries { + time.Sleep(interval) + result, err = f() + retries++ + } + if err != nil { + var zero T + return zero, fmt.Errorf("retry failed: %w", err) + } + return result, nil +} diff --git a/solana/indexer/common/retry_util_test.go b/solana/indexer/common/retry_util_test.go new file mode 100644 index 00000000..f08cbc06 --- /dev/null +++ b/solana/indexer/common/retry_util_test.go @@ -0,0 +1,57 @@ +package common + +import ( + "fmt" + "testing" + "time" + + "github.com/test-go/testify/assert" +) + +func TestRetryUtil(t *testing.T) { + attempts := 0 + err := WithRetries(func() error { + attempts++ + if attempts < 3 { + return fmt.Errorf("fail") + } + return nil + }, 3, time.Millisecond*10) + assert.NoError(t, err) + assert.Equal(t, 3, attempts) +} + +func TestRetryUtil_Error(t *testing.T) { + attempts := 0 + err := WithRetries(func() error { + attempts++ + return fmt.Errorf("fail") + }, 3, time.Millisecond*10) + assert.Error(t, err) + assert.Equal(t, 4, attempts) // initial attempt + 3 retries +} + +func TestRetryUtilResult(t *testing.T) { + attempts := 0 + result, err := WithRetriesResult(func() (string, error) { + attempts++ + if attempts < 3 { + return "", fmt.Errorf("fail") + } + return "success", nil + }, 3, time.Millisecond*10) + assert.NoError(t, err) + assert.Equal(t, "success", result) + assert.Equal(t, 3, attempts) +} + +func TestRetryUtilResult_Error(t *testing.T) { + attempts := 0 + result, err := WithRetriesResult(func() (string, error) { + attempts++ + return "", fmt.Errorf("fail") + }, 3, time.Millisecond*10) + assert.Error(t, err) + assert.Equal(t, "", result) + assert.Equal(t, 4, attempts) // initial attempt + 3 retries +} diff --git a/solana/indexer/common/transaction.go b/solana/indexer/common/transaction.go new file mode 100644 index 00000000..fc1208cd --- /dev/null +++ b/solana/indexer/common/transaction.go @@ -0,0 +1,75 @@ +package common + +import ( + "context" + "fmt" + "time" + + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" + "github.com/maypok86/otter" +) + +// Gets a transaction from a cache or fetches it from the RPC. Handles retries. +func FetchTransactionWithCache( + ctx context.Context, + transactionCache *otter.Cache[solana.Signature, + *rpc.GetTransactionResult], + rpcClient RpcClient, + signature solana.Signature, +) (*rpc.GetTransactionResult, error) { + // Check if the transaction is in the cache + if transactionCache != nil { + if res, ok := transactionCache.Get(signature); ok { + return res, nil + } + } + + // If the transaction is not in the cache, fetch it from the RPC + res, err := WithRetriesResult(func() (*rpc.GetTransactionResult, error) { + return rpcClient.GetTransaction( + ctx, + signature, + &rpc.GetTransactionOpts{ + Commitment: rpc.CommitmentConfirmed, + MaxSupportedTransactionVersion: &rpc.MaxSupportedTransactionVersion0, + }, + ) + }, 5, 1*time.Second) + if err != nil { + return nil, fmt.Errorf("failed to get transaction: %w", err) + } + + // Store the fetched transaction in the cache + if transactionCache != nil { + transactionCache.Set(signature, res) + } + + return res, nil +} + +// Resolves address lookup tables in the given transaction using the provided metadata. +func ResolveLookupTables( + ctx context.Context, + rpcClient RpcClient, + tx *solana.Transaction, + meta *rpc.TransactionMeta, +) *solana.Transaction { + addressTables := make(map[solana.PublicKey]solana.PublicKeySlice) + writablePos := 0 + readonlyPos := 0 + for _, lu := range tx.Message.AddressTableLookups { + addresses := make(solana.PublicKeySlice, 256) + for _, idx := range lu.WritableIndexes { + addresses[idx] = meta.LoadedAddresses.Writable[writablePos] + writablePos += 1 + } + for _, idx := range lu.ReadonlyIndexes { + addresses[idx] = meta.LoadedAddresses.ReadOnly[readonlyPos] + readonlyPos += 1 + } + addressTables[lu.AccountKey] = addresses + } + tx.Message.SetAddressTables(addressTables) + return tx +} diff --git a/solana/indexer/common/transaction_test.go b/solana/indexer/common/transaction_test.go new file mode 100644 index 00000000..a22806b2 --- /dev/null +++ b/solana/indexer/common/transaction_test.go @@ -0,0 +1,65 @@ +package common + +import ( + "context" + "testing" + + "api.audius.co/solana/indexer/fake_rpc_client" + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" + "github.com/maypok86/otter" + "github.com/test-go/testify/assert" +) + +func TestFetchTransactionWithCache_CacheHit(t *testing.T) { + cache, err := otter.MustBuilder[solana.Signature, *rpc.GetTransactionResult](10). + Build() + assert.NoError(t, err, "failed to create cache") + sig := solana.Signature{} + expected := &rpc.GetTransactionResult{} + cache.Set(sig, expected) + + result, err := FetchTransactionWithCache(context.Background(), &cache, nil, sig) + assert.NoError(t, err) + assert.Equal(t, expected, result) +} + +func TestFetchTransactionWithCache_CacheMiss(t *testing.T) { + cache, err := otter.MustBuilder[solana.Signature, *rpc.GetTransactionResult](10). + Build() + assert.NoError(t, err, "failed to create cache") + sig := solana.Signature{} + expected := &rpc.GetTransactionResult{} + fakeRpc := &fake_rpc_client.FakeRpcClient{ + GetTransactionFunc: func(ctx context.Context, signature solana.Signature, opts *rpc.GetTransactionOpts) (*rpc.GetTransactionResult, error) { + return expected, nil + }, + } + result, err := FetchTransactionWithCache(context.Background(), &cache, fakeRpc, sig) + assert.NoError(t, err) + assert.Equal(t, expected, result) +} + +func TestResolveLookupTables(t *testing.T) { + tx := &solana.Transaction{ + Message: solana.Message{ + AddressTableLookups: []solana.MessageAddressTableLookup{ + { + AccountKey: solana.PublicKey{}, + WritableIndexes: []uint8{0}, + ReadonlyIndexes: []uint8{1}, + }, + }, + }, + } + meta := &rpc.TransactionMeta{ + LoadedAddresses: rpc.LoadedAddresses{ + Writable: []solana.PublicKey{{1}}, + ReadOnly: []solana.PublicKey{{2}}, + }, + } + ResolveLookupTables(context.Background(), nil, tx, meta) + addressTables := tx.Message.GetAddressTables() + assert.Equal(t, solana.PublicKey{1}, addressTables[solana.PublicKey{}][0]) + assert.Equal(t, solana.PublicKey{2}, addressTables[solana.PublicKey{}][1]) +} diff --git a/solana/indexer/common/utils.go b/solana/indexer/common/utils.go deleted file mode 100644 index 9e3eba6e..00000000 --- a/solana/indexer/common/utils.go +++ /dev/null @@ -1,162 +0,0 @@ -package common - -import ( - "context" - "errors" - "fmt" - "time" - - "api.audius.co/database" - "github.com/gagliardetto/solana-go" - "github.com/gagliardetto/solana-go/rpc" - "github.com/jackc/pgx/v5/pgconn" - "github.com/maypok86/otter" - "go.uber.org/zap" -) - -func WithRetries(f func() error, maxRetries int, interval time.Duration) error { - err := f() - retries := 0 - for err != nil && retries < maxRetries { - time.Sleep(interval) - err = f() - retries++ - } - if err != nil { - return fmt.Errorf("retry failed: %w", err) - } - return nil -} - -func WithRetriesResult[T any](f func() (T, error), maxRetries int, interval time.Duration) (T, error) { - result, err := f() - retries := 0 - for err != nil && retries < maxRetries { - time.Sleep(interval) - result, err = f() - retries++ - } - if err != nil { - var zero T - return zero, fmt.Errorf("retry failed: %w", err) - } - return result, nil -} - -type notificationCallback func(ctx context.Context, notification *pgconn.Notification) - -func WatchPgNotification(ctx context.Context, pool database.DbPool, notification string, callback notificationCallback, logger *zap.Logger) error { - if logger == nil { - logger = zap.NewNop() - } - - childLogger := logger.With(zap.String("notification", notification)) - - conn, err := pool.Acquire(ctx) - if err != nil { - return fmt.Errorf("failed to acquire database connection: %w", err) - } - - rawConn := conn.Conn() - _, err = rawConn.Exec(ctx, fmt.Sprintf(`LISTEN %s`, notification)) - if err != nil { - return fmt.Errorf("failed to listen for %s changes: %w", notification, err) - } - - go func() { - defer func() { - if rawConn != nil && !rawConn.PgConn().IsClosed() && ctx.Err() != nil { - _, _ = rawConn.Exec(ctx, fmt.Sprintf(`UNLISTEN %s`, notification)) - } - childLogger.Debug("received shutdown signal, stopping notification watcher") - conn.Release() - }() - for { - select { - case <-ctx.Done(): - return - default: - } - - notif, err := rawConn.WaitForNotification(ctx) - if err != nil { - if !errors.Is(err, context.Canceled) { - childLogger.Error("failed waiting for notification", zap.Error(err)) - } - continue - } - if notif == nil { - childLogger.Warn("received nil notification, continuing to wait for notifications") - continue - } - - childLogger.Debug("received notification", zap.String("payload", notif.Payload)) - callback(ctx, notif) - } - }() - return nil -} - -// Gets a transaction from a cache or fetches it from the RPC. Handles retries. -func FetchTransactionWithCache( - ctx context.Context, - transactionCache *otter.Cache[solana.Signature, - *rpc.GetTransactionResult], - rpcClient RpcClient, - signature solana.Signature, -) (*rpc.GetTransactionResult, error) { - // Check if the transaction is in the cache - if transactionCache != nil { - if res, ok := transactionCache.Get(signature); ok { - return res, nil - } - } - - // If the transaction is not in the cache, fetch it from the RPC - res, err := WithRetriesResult(func() (*rpc.GetTransactionResult, error) { - return rpcClient.GetTransaction( - ctx, - signature, - &rpc.GetTransactionOpts{ - Commitment: rpc.CommitmentConfirmed, - MaxSupportedTransactionVersion: &rpc.MaxSupportedTransactionVersion0, - }, - ) - }, 5, 1*time.Second) - if err != nil { - return nil, fmt.Errorf("failed to get transaction: %w", err) - } - - // Store the fetched transaction in the cache - if transactionCache != nil { - transactionCache.Set(signature, res) - } - - return res, nil -} - -// Resolves address lookup tables in the given transaction using the provided metadata. -func ResolveLookupTables( - ctx context.Context, - rpcClient RpcClient, - tx *solana.Transaction, - meta *rpc.TransactionMeta, -) *solana.Transaction { - addressTables := make(map[solana.PublicKey]solana.PublicKeySlice) - writablePos := 0 - readonlyPos := 0 - for _, lu := range tx.Message.AddressTableLookups { - addresses := make(solana.PublicKeySlice, 256) - for _, idx := range lu.WritableIndexes { - addresses[idx] = meta.LoadedAddresses.Writable[writablePos] - writablePos += 1 - } - for _, idx := range lu.ReadonlyIndexes { - addresses[idx] = meta.LoadedAddresses.ReadOnly[readonlyPos] - readonlyPos += 1 - } - addressTables[lu.AccountKey] = addresses - } - tx.Message.SetAddressTables(addressTables) - return tx -} diff --git a/solana/indexer/damm_v2/indexer.go b/solana/indexer/damm_v2/indexer.go index e1c8e9b2..d8bfd8d5 100644 --- a/solana/indexer/damm_v2/indexer.go +++ b/solana/indexer/damm_v2/indexer.go @@ -15,6 +15,12 @@ import ( "go.uber.org/zap" ) +const ( + NAME = "damm_v2" + MAX_POOLS_PER_SUBSCRIPTION = 10000 // Arbitrary + NOTIFICATION_NAME = "artist_coins_damm_v2_pool_changed" +) + type Indexer struct { pool database.DbPool grpcConfig common.GrpcConfig @@ -22,12 +28,6 @@ type Indexer struct { logger *zap.Logger } -const ( - NAME = "damm_v2" - MAX_POOLS_PER_SUBSCRIPTION = 10000 // Arbitrary - NOTIFICATION_NAME = "artist_coins_damm_v2_pool_changed" -) - func New( config common.GrpcConfig, rpcClient common.RpcClient, diff --git a/solana/indexer/dbc/dbc.go b/solana/indexer/dbc/dbc.go index b07db725..fa1678cc 100644 --- a/solana/indexer/dbc/dbc.go +++ b/solana/indexer/dbc/dbc.go @@ -6,7 +6,6 @@ import ( "strings" "api.audius.co/database" - "api.audius.co/solana/indexer/common" "api.audius.co/solana/spl/programs/meteora_dbc" "github.com/gagliardetto/solana-go" "github.com/jackc/pgx/v5" @@ -16,7 +15,6 @@ import ( func processDbcInstruction( ctx context.Context, db database.DBTX, - rpcClient common.RpcClient, slot uint64, tx *solana.Transaction, instructionIndex int, @@ -45,7 +43,7 @@ func processDbcInstruction( case meteora_dbc.InstructionImplDef.TypeID(meteora_dbc.Instruction_MigrationDammV2): { if migrationInst, ok := inst.Impl.(*meteora_dbc.MigrationDammV2); ok { - err := insertDbcMigration(ctx, db, dbcMigrationRow{ + err := upsertDbcMigration(ctx, db, dbcMigrationRow{ signature: signature, instructionIndex: instructionIndex, slot: slot, @@ -65,7 +63,7 @@ func processDbcInstruction( quoteMint: migrationInst.GetQuoteMint().PublicKey.String(), }) if err != nil { - return fmt.Errorf("failed to insert dbc migration at instruction %d: %w", instructionIndex, err) + return fmt.Errorf("failed to upsert dbc migration at instruction %d: %w", instructionIndex, err) } instLogger.Info("dbc migrationDammV2", zap.String("mint", migrationInst.GetBaseMint().PublicKey.String()), @@ -73,46 +71,14 @@ func processDbcInstruction( zap.String("dammV2Pool", migrationInst.GetPool().PublicKey.String()), ) - // Also index the pool and positions - - // var dammPool meteora_damm_v2.Pool - // err = common.WithRetries(func() error { - // return rpcClient.GetAccountDataBorshInto(ctx, migrationInst.GetPool().PublicKey, &dammPool) - // }, 5, time.Second*1) - // if err != nil { - // return fmt.Errorf("failed to get damm v2 pool account data after retries: %w", err) - // } else { - // err = upsertDammV2Pool(ctx, db, slot, migrationInst.GetPool().PublicKey, &dammPool) - // if err != nil { - // return fmt.Errorf("failed to upsert damm v2 pool: %w", err) - // } - // } - - // var firstPosition meteora_damm_v2.PositionState - // err = common.WithRetries(func() error { - // return rpcClient.GetAccountDataBorshInto(ctx, migrationInst.GetFirstPosition().PublicKey, &firstPosition) - // }, 5, time.Second*1) - // if err != nil { - // return fmt.Errorf("failed to get first damm v2 position account data: %w", err) - // } else { - // err = upsertDammV2Position(ctx, db, slot, migrationInst.GetFirstPosition().PublicKey, &firstPosition) - // if err != nil { - // return fmt.Errorf("failed to upsert first damm v2 position: %w", err) - // } - // } - - // var secondPosition meteora_damm_v2.PositionState - // err = common.WithRetries(func() error { - // return rpcClient.GetAccountDataBorshInto(ctx, migrationInst.GetSecondPosition().PublicKey, &secondPosition) - // }, 5, time.Second*1) - // if err != nil { - // return fmt.Errorf("failed to get second damm v2 position account data: %w", err) - // } else { - // err = upsertDammV2Position(ctx, db, slot, migrationInst.GetSecondPosition().PublicKey, &secondPosition) - // if err != nil { - // return fmt.Errorf("failed to upsert second damm v2 position: %w", err) - // } - // } + err = updateArtistCoinDammV2Pool(ctx, db, migrationInst.GetBaseMint().PublicKey.String(), migrationInst.GetPool().PublicKey.String()) + if err != nil { + return fmt.Errorf("failed to update artist coin with damm v2 pool at instruction %d: %w", instructionIndex, err) + } + instLogger.Info("updated artist coin with damm v2 pool", + zap.String("mint", migrationInst.GetBaseMint().PublicKey.String()), + zap.String("dammV2Pool", migrationInst.GetPool().PublicKey.String()), + ) } } } @@ -139,7 +105,7 @@ type dbcMigrationRow struct { quoteMint string } -func insertDbcMigration(ctx context.Context, db database.DBTX, row dbcMigrationRow) error { +func upsertDbcMigration(ctx context.Context, db database.DBTX, row dbcMigrationRow) error { sql := ` INSERT INTO sol_meteora_dbc_migrations ( signature, @@ -205,3 +171,17 @@ func insertDbcMigration(ctx context.Context, db database.DBTX, row dbcMigrationR }) return err } + +func updateArtistCoinDammV2Pool(ctx context.Context, db database.DBTX, mint string, dammV2Pool string) error { + sql := ` + UPDATE artist_coins + SET damm_v2_pool = @dammV2Pool, + updated_at = NOW() + WHERE mint = @mint; + ` + _, err := db.Exec(ctx, sql, pgx.NamedArgs{ + "mint": mint, + "dammV2Pool": dammV2Pool, + }) + return err +} diff --git a/solana/indexer/dbc/indexer.go b/solana/indexer/dbc/indexer.go index 94b612cf..0b526eb4 100644 --- a/solana/indexer/dbc/indexer.go +++ b/solana/indexer/dbc/indexer.go @@ -18,6 +18,12 @@ import ( "go.uber.org/zap" ) +const ( + NAME = "dbc" + MAX_POOLS_PER_SUBSCRIPTION = 10000 // Arbitrary + NOTIFICATION_NAME = "artist_coins_dbc_pool_changed" +) + type Indexer struct { pool database.DbPool grpcConfig common.GrpcConfig @@ -26,12 +32,6 @@ type Indexer struct { logger *zap.Logger } -const ( - NAME = "dbc" - MAX_POOLS_PER_SUBSCRIPTION = 10000 // Arbitrary - NOTIFICATION_NAME = "artist_coins_dbc_pool_changed" -) - func New( grpcConfig common.GrpcConfig, rpcClient common.RpcClient, @@ -266,7 +266,7 @@ func (i *Indexer) processTransaction(ctx context.Context, slot uint64, tx *solan switch programId { case meteora_dbc.ProgramID: { - err := processDbcInstruction(ctx, i.pool, i.rpcClient, slot, tx, instructionIndex, instruction, signature, instLogger) + err := processDbcInstruction(ctx, i.pool, slot, tx, instructionIndex, instruction, signature, instLogger) if err != nil { return fmt.Errorf("error processing meteora_dbc instruction %d: %w", instructionIndex, err) } diff --git a/solana/indexer/token/indexer.go b/solana/indexer/token/indexer.go index c3fc897b..93cbd30f 100644 --- a/solana/indexer/token/indexer.go +++ b/solana/indexer/token/indexer.go @@ -15,6 +15,14 @@ import ( "go.uber.org/zap" ) +const ( + NAME = "token" + NOTIFICATION_NAME = "artist_coins_mint_changed" + MAX_MINTS_PER_SUBSCRIPTION = 10000 // Arbitrary + WORKER_CHANNEL_SIZE = 3000 + WORKER_COUNT = 50 +) + type Indexer struct { pool database.DbPool grpcConfig common.GrpcConfig @@ -26,12 +34,6 @@ type Indexer struct { transactionCache *otter.Cache[solana.Signature, *rpc.GetTransactionResult] } -const TOKEN_INDEXER_NAME = "token" -const ARTIST_COIN_NOTIFICATION_NAME = "artist_coins_mint_changed" -const MAX_ARTIST_COIN_MINTS_PER_SUBSCRIPTION = 10000 -const WORKER_CHANNEL_SIZE = 3000 -const WORKER_COUNT = 50 - func New( config common.GrpcConfig, rpcClient common.RpcClient, @@ -63,7 +65,7 @@ func (d *Indexer) Start(ctx context.Context) { d.logger.Error("failed to handle token update", zap.Int("workerID", workerID), zap.Error(err)) // Add messages that failed to process to the retry queue - if err := common.AddToRetryQueue(ctx, d.pool, TOKEN_INDEXER_NAME, updateMessage, err.Error()); err != nil { + if err := common.AddToRetryQueue(ctx, d.pool, NAME, updateMessage, err.Error()); err != nil { d.logger.Error("failed to add to retry queue", zap.Error(err)) } } @@ -126,7 +128,7 @@ func (d *Indexer) Start(ctx context.Context) { grpcClients = clients // Watch for new coins to be added - err = common.WatchPgNotification(ctx, d.pool, ARTIST_COIN_NOTIFICATION_NAME, handleNotif, d.logger) + err = common.WatchPgNotification(ctx, d.pool, NOTIFICATION_NAME, handleNotif, d.logger) if err != nil { d.logger.Error("failed to watch for artist coin changes", zap.Error(err)) return @@ -195,7 +197,7 @@ func (d *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) err func (d *Indexer) subscribeToArtistCoins(ctx context.Context, handleUpdate func(ctx context.Context, message *pb.SubscribeUpdate)) ([]common.GrpcClient, error) { done := false page := 0 - pageSize := MAX_ARTIST_COIN_MINTS_PER_SUBSCRIPTION + pageSize := MAX_MINTS_PER_SUBSCRIPTION grpcClients := make([]common.GrpcClient, 0) total := 0 for !done { @@ -265,7 +267,7 @@ func (t *Indexer) makeMintSubscriptionRequest(ctx context.Context, mintAddresses } // Ensure this subscription has a checkpoint - checkpointId, fromSlot, err := common.EnsureCheckpoint(ctx, TOKEN_INDEXER_NAME, t.pool, t.rpcClient, subscription, t.logger) + checkpointId, fromSlot, err := common.EnsureCheckpoint(ctx, NAME, t.pool, t.rpcClient, subscription, t.logger) if err != nil { return nil, fmt.Errorf("failed to set from slot: %w", err) } From 7ccf02d23ed84d4a596c137ce193f0316c10418b Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Tue, 14 Oct 2025 18:05:44 -0700 Subject: [PATCH 10/56] fix address=>account and test schema --- ddl/functions/calculate_artist_coin_fees.sql | 2 +- ddl/migrations/0169_damm_and_positions.sql | 17 +-- sql/01_schema.sql | 116 +++++++++---------- 3 files changed, 64 insertions(+), 71 deletions(-) diff --git a/ddl/functions/calculate_artist_coin_fees.sql b/ddl/functions/calculate_artist_coin_fees.sql index 7cdd1114..4478779e 100644 --- a/ddl/functions/calculate_artist_coin_fees.sql +++ b/ddl/functions/calculate_artist_coin_fees.sql @@ -34,7 +34,7 @@ RETURNS TABLE ( ) AS unclaimed_damm_v2_fees FROM sol_meteora_damm_v2_pools pool JOIN sol_meteora_dbc_migrations migration ON migration.base_mint = pool.token_a_mint - JOIN sol_meteora_damm_v2_positions position ON position.address = migration.first_position + JOIN sol_meteora_damm_v2_positions position ON position.account = migration.first_position WHERE pool.token_a_mint = artist_coin_mint ), dbc_fees AS ( diff --git a/ddl/migrations/0169_damm_and_positions.sql b/ddl/migrations/0169_damm_and_positions.sql index 73f3399e..1c85f759 100644 --- a/ddl/migrations/0169_damm_and_positions.sql +++ b/ddl/migrations/0169_damm_and_positions.sql @@ -25,7 +25,8 @@ COMMENT ON TABLE sol_meteora_dbc_migrations IS 'Tracks migrations from DBC pools COMMENT ON INDEX sol_meteora_dbc_migrations_base_mint_idx IS 'Used for finding artist positions by base_mint.'; CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_pools ( - address TEXT PRIMARY KEY, + account TEXT PRIMARY KEY, + slot BIGINT NOT NULL, token_a_mint TEXT NOT NULL, token_b_mint TEXT NOT NULL, token_a_vault TEXT NOT NULL, @@ -57,7 +58,7 @@ CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_pools ( COMMENT ON TABLE sol_meteora_damm_v2_pools IS 'Tracks DAMM V2 pool state. Join with sol_meteora_damm_v2_pool_metrics, sol_meteora_damm_v2_pool_fees, sol_meteora_damm_v2_pool_base_fees, and sol_meteora_damm_v2_pool_dynamic_fees for full pool state.'; CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_pool_metrics ( - pool TEXT PRIMARY KEY REFERENCES sol_meteora_damm_v2_pools(address) ON DELETE CASCADE, + pool TEXT PRIMARY KEY REFERENCES sol_meteora_damm_v2_pools(account) ON DELETE CASCADE, slot BIGINT NOT NULL, total_lp_a_fee NUMERIC NOT NULL, total_lp_b_fee NUMERIC NOT NULL, @@ -72,7 +73,7 @@ CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_pool_metrics ( COMMENT ON TABLE sol_meteora_damm_v2_pool_metrics IS 'Tracks aggregated metrics for DAMM V2 pools. A slice of the DAMM V2 pool state.'; CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_pool_fees ( - pool TEXT PRIMARY KEY REFERENCES sol_meteora_damm_v2_pools(address) ON DELETE CASCADE, + pool TEXT PRIMARY KEY REFERENCES sol_meteora_damm_v2_pools(account) ON DELETE CASCADE, slot BIGINT NOT NULL, protocol_fee_percent SMALLINT NOT NULL, partner_fee_percent SMALLINT NOT NULL, @@ -83,7 +84,7 @@ CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_pool_fees ( COMMENT ON TABLE sol_meteora_damm_v2_pool_fees IS 'Tracks fee configuration for DAMM V2 pools. A slice of the DAMM V2 pool state.'; CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_pool_base_fees ( - pool TEXT PRIMARY KEY REFERENCES sol_meteora_damm_v2_pools(address) ON DELETE CASCADE, + pool TEXT PRIMARY KEY REFERENCES sol_meteora_damm_v2_pools(account) ON DELETE CASCADE, slot BIGINT NOT NULL, cliff_fee_numerator BIGINT NOT NULL, fee_scheduler_mode SMALLINT NOT NULL, @@ -96,7 +97,7 @@ CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_pool_base_fees ( COMMENT ON TABLE sol_meteora_damm_v2_pool_base_fees IS 'Tracks base fee configuration for DAMM V2 pools. A slice of the DAMM V2 pool state.'; CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_pool_dynamic_fees ( - pool TEXT PRIMARY KEY REFERENCES sol_meteora_damm_v2_pools(address) ON DELETE CASCADE, + pool TEXT PRIMARY KEY REFERENCES sol_meteora_damm_v2_pools(account) ON DELETE CASCADE, slot BIGINT NOT NULL, initialized SMALLINT NOT NULL, max_volatility_accumulator INTEGER NOT NULL, @@ -116,9 +117,9 @@ CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_pool_dynamic_fees ( COMMENT ON TABLE sol_meteora_damm_v2_pool_dynamic_fees IS 'Tracks dynamic fee configuration for DAMM V2 pools. A slice of the DAMM V2 pool state.'; CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_positions ( - address TEXT PRIMARY KEY, + account TEXT PRIMARY KEY, slot BIGINT NOT NULL, - pool TEXT NOT NULL REFERENCES sol_meteora_damm_v2_pools(address) ON DELETE CASCADE, + pool TEXT NOT NULL, nft_mint TEXT NOT NULL, fee_a_per_token_checkpoint BIGINT NOT NULL, fee_b_per_token_checkpoint BIGINT NOT NULL, @@ -133,7 +134,7 @@ CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_positions ( COMMENT ON TABLE sol_meteora_damm_v2_positions IS 'Tracks DAMM V2 positions representing a claim to the liquidity and associated fees in a DAMM V2 pool. Join with sol_meteora_damm_v2_position_metrics for full position state.'; CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_position_metrics ( - position TEXT PRIMARY KEY REFERENCES sol_meteora_damm_v2_positions(address) ON DELETE CASCADE, + position TEXT PRIMARY KEY REFERENCES sol_meteora_damm_v2_positions(account) ON DELETE CASCADE, slot BIGINT NOT NULL, total_claimed_a_fee BIGINT NOT NULL, total_claimed_b_fee BIGINT NOT NULL, diff --git a/sql/01_schema.sql b/sql/01_schema.sql index 8b1e526e..7ce7cacc 100644 --- a/sql/01_schema.sql +++ b/sql/01_schema.sql @@ -1010,7 +1010,7 @@ CREATE FUNCTION public.calculate_artist_coin_fees(artist_coin_mint text) RETURNS ) AS unclaimed_damm_v2_fees FROM sol_meteora_damm_v2_pools pool JOIN sol_meteora_dbc_migrations migration ON migration.base_mint = pool.token_a_mint - JOIN sol_meteora_damm_v2_positions position ON position.address = migration.first_position + JOIN sol_meteora_damm_v2_positions position ON position.account = migration.first_position WHERE pool.token_a_mint = artist_coin_mint ), dbc_fees AS ( @@ -2001,7 +2001,27 @@ CREATE FUNCTION public.handle_artist_coins_change() RETURNS trigger LANGUAGE plpgsql AS $$ BEGIN - PERFORM pg_notify('artist_coins_changed', json_build_object('operation', TG_OP, 'new_mint', NEW.mint, 'old_mint', OLD.mint)::text); + IF (OLD.mint IS NULL AND NEW.mint IS NOT NULL) + OR (OLD.mint IS NOT NULL AND NEW.mint IS NULL) + OR OLD.mint != NEW.mint + THEN + PERFORM pg_notify('artist_coins_mint_changed', NEW.mint); + END IF; + + IF (OLD.dbc_pool IS NULL AND NEW.dbc_pool IS NOT NULL) + OR (OLD.dbc_pool IS NOT NULL AND NEW.dbc_pool IS NULL) + OR OLD.dbc_pool != NEW.dbc_pool + THEN + PERFORM pg_notify('artist_coins_dbc_pool_changed', NEW.dbc_pool); + END IF; + + IF (OLD.damm_v2_pool IS NULL AND NEW.damm_v2_pool IS NOT NULL) + OR (OLD.damm_v2_pool IS NOT NULL AND NEW.damm_v2_pool IS NULL) + OR OLD.damm_v2_pool != NEW.damm_v2_pool + THEN + PERFORM pg_notify('artist_coins_damm_v2_pool_changed', NEW.damm_v2_pool); + END IF; + RETURN NEW; EXCEPTION WHEN OTHERS THEN @@ -5803,7 +5823,9 @@ CREATE TABLE public.artist_coins ( link_1 text, link_2 text, link_3 text, - link_4 text + link_4 text, + damm_v2_pool text, + dbc_pool text ); @@ -5814,6 +5836,20 @@ CREATE TABLE public.artist_coins ( COMMENT ON TABLE public.artist_coins IS 'Stores the token mints for artist coins that the indexer is tracking and their tickers.'; +-- +-- Name: COLUMN artist_coins.damm_v2_pool; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON COLUMN public.artist_coins.damm_v2_pool IS 'The canonical DAMM V2 pool address for this artist coin, if any. Used in solana indexer.'; + + +-- +-- Name: COLUMN artist_coins.dbc_pool; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON COLUMN public.artist_coins.dbc_pool IS 'The associated DBC pool address for this artist coin, if any. Used in solana indexer.'; + + -- -- Name: associated_wallets; Type: TABLE; Schema: public; Owner: - -- @@ -7181,6 +7217,7 @@ CREATE TABLE public.sol_keypairs ( CREATE TABLE public.sol_meteora_damm_v2_pool_base_fees ( pool text NOT NULL, + slot bigint NOT NULL, cliff_fee_numerator bigint NOT NULL, fee_scheduler_mode smallint NOT NULL, number_of_period smallint NOT NULL, @@ -7204,6 +7241,7 @@ COMMENT ON TABLE public.sol_meteora_damm_v2_pool_base_fees IS 'Tracks base fee c CREATE TABLE public.sol_meteora_damm_v2_pool_dynamic_fees ( pool text NOT NULL, + slot bigint NOT NULL, initialized smallint NOT NULL, max_volatility_accumulator integer NOT NULL, variable_fee_control integer NOT NULL, @@ -7234,6 +7272,7 @@ COMMENT ON TABLE public.sol_meteora_damm_v2_pool_dynamic_fees IS 'Tracks dynamic CREATE TABLE public.sol_meteora_damm_v2_pool_fees ( pool text NOT NULL, + slot bigint NOT NULL, protocol_fee_percent smallint NOT NULL, partner_fee_percent smallint NOT NULL, referral_fee_percent smallint NOT NULL, @@ -7255,6 +7294,7 @@ COMMENT ON TABLE public.sol_meteora_damm_v2_pool_fees IS 'Tracks fee configurati CREATE TABLE public.sol_meteora_damm_v2_pool_metrics ( pool text NOT NULL, + slot bigint NOT NULL, total_lp_a_fee numeric NOT NULL, total_lp_b_fee numeric NOT NULL, total_protocol_a_fee numeric NOT NULL, @@ -7279,7 +7319,8 @@ COMMENT ON TABLE public.sol_meteora_damm_v2_pool_metrics IS 'Tracks aggregated m -- CREATE TABLE public.sol_meteora_damm_v2_pools ( - address text NOT NULL, + account text NOT NULL, + slot bigint NOT NULL, token_a_mint text NOT NULL, token_b_mint text NOT NULL, token_a_vault text NOT NULL, @@ -7323,6 +7364,7 @@ COMMENT ON TABLE public.sol_meteora_damm_v2_pools IS 'Tracks DAMM V2 pool state. CREATE TABLE public.sol_meteora_damm_v2_position_metrics ( "position" text NOT NULL, + slot bigint NOT NULL, total_claimed_a_fee bigint NOT NULL, total_claimed_b_fee bigint NOT NULL, created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, @@ -7342,7 +7384,8 @@ COMMENT ON TABLE public.sol_meteora_damm_v2_position_metrics IS 'Tracks aggregat -- CREATE TABLE public.sol_meteora_damm_v2_positions ( - address text NOT NULL, + account text NOT NULL, + slot bigint NOT NULL, pool text NOT NULL, nft_mint text NOT NULL, fee_a_per_token_checkpoint bigint NOT NULL, @@ -7662,19 +7705,6 @@ CREATE TABLE public.sol_token_transfers ( COMMENT ON TABLE public.sol_token_transfers IS 'Stores SPL token transfers for tracked mints.'; --- --- Name: sol_unprocessed_txs; Type: TABLE; Schema: public; Owner: - --- - -CREATE TABLE public.sol_unprocessed_txs ( - signature text NOT NULL, - error_message text, - created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - updated_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, - slot bigint DEFAULT 0 NOT NULL -); - - -- -- Name: sol_user_balances; Type: TABLE; Schema: public; Owner: - -- @@ -9189,7 +9219,7 @@ ALTER TABLE ONLY public.sol_meteora_damm_v2_pool_metrics -- ALTER TABLE ONLY public.sol_meteora_damm_v2_pools - ADD CONSTRAINT sol_meteora_damm_v2_pools_pkey PRIMARY KEY (address); + ADD CONSTRAINT sol_meteora_damm_v2_pools_pkey PRIMARY KEY (account); -- @@ -9205,7 +9235,7 @@ ALTER TABLE ONLY public.sol_meteora_damm_v2_position_metrics -- ALTER TABLE ONLY public.sol_meteora_damm_v2_positions - ADD CONSTRAINT sol_meteora_damm_v2_positions_pkey PRIMARY KEY (address); + ADD CONSTRAINT sol_meteora_damm_v2_positions_pkey PRIMARY KEY (account); -- @@ -9288,14 +9318,6 @@ ALTER TABLE ONLY public.sol_token_transfers ADD CONSTRAINT sol_token_transfers_pkey PRIMARY KEY (signature, instruction_index); --- --- Name: sol_unprocessed_txs sol_unprocessed_txs_pkey; Type: CONSTRAINT; Schema: public; Owner: - --- - -ALTER TABLE ONLY public.sol_unprocessed_txs - ADD CONSTRAINT sol_unprocessed_txs_pkey PRIMARY KEY (signature); - - -- -- Name: sol_user_balances sol_user_balances_pkey; Type: CONSTRAINT; Schema: public; Owner: - -- @@ -10831,20 +10853,6 @@ CREATE TRIGGER on_event AFTER INSERT ON public.events FOR EACH ROW EXECUTE FUNCT CREATE TRIGGER on_follow AFTER INSERT ON public.follows FOR EACH ROW EXECUTE FUNCTION public.handle_follow(); --- --- Name: sol_meteora_dbc_migrations on_meteora_dbc_migrations; Type: TRIGGER; Schema: public; Owner: - --- - -CREATE TRIGGER on_meteora_dbc_migrations AFTER INSERT OR DELETE ON public.sol_meteora_dbc_migrations FOR EACH ROW EXECUTE FUNCTION public.handle_meteora_dbc_migrations(); - - --- --- Name: TRIGGER on_meteora_dbc_migrations ON sol_meteora_dbc_migrations; Type: COMMENT; Schema: public; Owner: - --- - -COMMENT ON TRIGGER on_meteora_dbc_migrations ON public.sol_meteora_dbc_migrations IS 'Notifies when a DBC pool migrates to a DAMM V2 pool.'; - - -- -- Name: plays on_play; Type: TRIGGER; Schema: public; Owner: - -- @@ -11274,7 +11282,7 @@ ALTER TABLE ONLY public.saves -- ALTER TABLE ONLY public.sol_meteora_damm_v2_pool_base_fees - ADD CONSTRAINT sol_meteora_damm_v2_pool_base_fees_pool_fkey FOREIGN KEY (pool) REFERENCES public.sol_meteora_damm_v2_pools(address) ON DELETE CASCADE; + ADD CONSTRAINT sol_meteora_damm_v2_pool_base_fees_pool_fkey FOREIGN KEY (pool) REFERENCES public.sol_meteora_damm_v2_pools(account) ON DELETE CASCADE; -- @@ -11282,7 +11290,7 @@ ALTER TABLE ONLY public.sol_meteora_damm_v2_pool_base_fees -- ALTER TABLE ONLY public.sol_meteora_damm_v2_pool_dynamic_fees - ADD CONSTRAINT sol_meteora_damm_v2_pool_dynamic_fees_pool_fkey FOREIGN KEY (pool) REFERENCES public.sol_meteora_damm_v2_pools(address) ON DELETE CASCADE; + ADD CONSTRAINT sol_meteora_damm_v2_pool_dynamic_fees_pool_fkey FOREIGN KEY (pool) REFERENCES public.sol_meteora_damm_v2_pools(account) ON DELETE CASCADE; -- @@ -11290,7 +11298,7 @@ ALTER TABLE ONLY public.sol_meteora_damm_v2_pool_dynamic_fees -- ALTER TABLE ONLY public.sol_meteora_damm_v2_pool_fees - ADD CONSTRAINT sol_meteora_damm_v2_pool_fees_pool_fkey FOREIGN KEY (pool) REFERENCES public.sol_meteora_damm_v2_pools(address) ON DELETE CASCADE; + ADD CONSTRAINT sol_meteora_damm_v2_pool_fees_pool_fkey FOREIGN KEY (pool) REFERENCES public.sol_meteora_damm_v2_pools(account) ON DELETE CASCADE; -- @@ -11298,23 +11306,7 @@ ALTER TABLE ONLY public.sol_meteora_damm_v2_pool_fees -- ALTER TABLE ONLY public.sol_meteora_damm_v2_pool_metrics - ADD CONSTRAINT sol_meteora_damm_v2_pool_metrics_pool_fkey FOREIGN KEY (pool) REFERENCES public.sol_meteora_damm_v2_pools(address) ON DELETE CASCADE; - - --- --- Name: sol_meteora_damm_v2_position_metrics sol_meteora_damm_v2_position_metrics_position_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - --- - -ALTER TABLE ONLY public.sol_meteora_damm_v2_position_metrics - ADD CONSTRAINT sol_meteora_damm_v2_position_metrics_position_fkey FOREIGN KEY ("position") REFERENCES public.sol_meteora_damm_v2_positions(address) ON DELETE CASCADE; - - --- --- Name: sol_meteora_damm_v2_positions sol_meteora_damm_v2_positions_pool_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - --- - -ALTER TABLE ONLY public.sol_meteora_damm_v2_positions - ADD CONSTRAINT sol_meteora_damm_v2_positions_pool_fkey FOREIGN KEY (pool) REFERENCES public.sol_meteora_damm_v2_pools(address) ON DELETE CASCADE; + ADD CONSTRAINT sol_meteora_damm_v2_pool_metrics_pool_fkey FOREIGN KEY (pool) REFERENCES public.sol_meteora_damm_v2_pools(account) ON DELETE CASCADE; -- From 7b4a6e1c6678e9c84ab1e7e231d3588a073e20e9 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Tue, 14 Oct 2025 18:06:04 -0700 Subject: [PATCH 11/56] add some tests, fix sql txs --- solana/indexer/common/checkpoints.go | 4 +- solana/indexer/common/checkpoints_test.go | 2 +- solana/indexer/damm_v2/damm_v2.go | 487 ++++++++++++++++++++ solana/indexer/damm_v2/indexer.go | 517 ++-------------------- solana/indexer/damm_v2/indexer_test.go | 126 ++++++ solana/indexer/dbc/indexer.go | 2 +- solana/indexer/token/indexer.go | 2 +- 7 files changed, 648 insertions(+), 492 deletions(-) create mode 100644 solana/indexer/damm_v2/damm_v2.go create mode 100644 solana/indexer/damm_v2/indexer_test.go diff --git a/solana/indexer/common/checkpoints.go b/solana/indexer/common/checkpoints.go index fe7698ba..ea4ab68d 100644 --- a/solana/indexer/common/checkpoints.go +++ b/solana/indexer/common/checkpoints.go @@ -55,7 +55,7 @@ func EnsureCheckpoint( logger.Warn("last indexed slot is too old, starting from minimum slot", zap.Uint64("fromSlot", fromSlot), zap.Uint64("toSlot", lastIndexedSlot)) } - checkpointId, err := insertCheckpointStart(ctx, db, name, fromSlot, subscription) + checkpointId, err := InsertCheckpointStart(ctx, db, name, fromSlot, subscription) if err != nil { return "", 0, fmt.Errorf("failed to start checkpoint: %w", err) } @@ -95,7 +95,7 @@ func InsertBackfillCheckpoint(ctx context.Context, db database.DBTX, fromSlot ui return checkpointId, nil } -func insertCheckpointStart( +func InsertCheckpointStart( ctx context.Context, db database.DBTX, name string, diff --git a/solana/indexer/common/checkpoints_test.go b/solana/indexer/common/checkpoints_test.go index a6895277..5ea47284 100644 --- a/solana/indexer/common/checkpoints_test.go +++ b/solana/indexer/common/checkpoints_test.go @@ -19,7 +19,7 @@ func TestCheckpoints(t *testing.T) { defer pool.Close() req := proto.SubscribeRequest{} - id, err := insertCheckpointStart(t.Context(), pool, "backfill", 100, &req) + id, err := InsertCheckpointStart(t.Context(), pool, "backfill", 100, &req) assert.NoError(t, err, "failed to insert checkpoint start") assert.NotEmpty(t, id, "checkpoint ID should not be empty") diff --git a/solana/indexer/damm_v2/damm_v2.go b/solana/indexer/damm_v2/damm_v2.go new file mode 100644 index 00000000..4584f608 --- /dev/null +++ b/solana/indexer/damm_v2/damm_v2.go @@ -0,0 +1,487 @@ +package damm_v2 + +import ( + "context" + + "api.audius.co/database" + "api.audius.co/solana/spl/programs/meteora_damm_v2" + "github.com/gagliardetto/solana-go" + "github.com/jackc/pgx/v5" +) + +func upsertDammV2Pool( + ctx context.Context, + db database.DBTX, + slot uint64, + account solana.PublicKey, + pool *meteora_damm_v2.Pool, +) error { + sqlPool := ` + INSERT INTO sol_meteora_damm_v2_pools ( + account, + slot, + token_a_mint, + token_b_mint, + token_a_vault, + token_b_vault, + whitelisted_vault, + partner, + liquidity, + protocol_a_fee, + protocol_b_fee, + partner_a_fee, + partner_b_fee, + sqrt_min_price, + sqrt_max_price, + sqrt_price, + activation_point, + activation_type, + pool_status, + token_a_flag, + token_b_flag, + collect_fee_mode, + pool_type, + fee_a_per_liquidity, + fee_b_per_liquidity, + permanent_lock_liquidity, + creator, + created_at, + updated_at + ) VALUES ( + @account, + @slot, + @token_a_mint, + @token_b_mint, + @token_a_vault, + @token_b_vault, + @whitelisted_vault, + @partner, + @liquidity, + @protocol_a_fee, + @protocol_b_fee, + @partner_a_fee, + @partner_b_fee, + @sqrt_min_price, + @sqrt_max_price, + @sqrt_price, + @activation_point, + @activation_type, + @pool_status, + @token_a_flag, + @token_b_flag, + @collect_fee_mode, + @pool_type, + @fee_a_per_liquidity, + @fee_b_per_liquidity, + @permanent_lock_liquidity, + @creator, + NOW(), + NOW() + ) + ON CONFLICT (account) DO UPDATE SET + slot = EXCLUDED.slot, + token_a_mint = EXCLUDED.token_a_mint, + token_b_mint = EXCLUDED.token_b_mint, + token_a_vault = EXCLUDED.token_a_vault, + token_b_vault = EXCLUDED.token_b_vault, + whitelisted_vault = EXCLUDED.whitelisted_vault, + partner = EXCLUDED.partner, + liquidity = EXCLUDED.liquidity, + protocol_a_fee = EXCLUDED.protocol_a_fee, + protocol_b_fee = EXCLUDED.protocol_b_fee, + partner_a_fee = EXCLUDED.partner_a_fee, + partner_b_fee = EXCLUDED.partner_b_fee, + sqrt_min_price = EXCLUDED.sqrt_min_price, + sqrt_max_price = EXCLUDED.sqrt_max_price, + sqrt_price = EXCLUDED.sqrt_price, + activation_point = EXCLUDED.activation_point, + activation_type = EXCLUDED.activation_type, + pool_status = EXCLUDED.pool_status, + token_a_flag = EXCLUDED.token_a_flag, + token_b_flag = EXCLUDED.token_b_flag, + collect_fee_mode = EXCLUDED.collect_fee_mode, + pool_type = EXCLUDED.pool_type, + fee_a_per_liquidity = EXCLUDED.fee_a_per_liquidity, + fee_b_per_liquidity = EXCLUDED.fee_b_per_liquidity, + permanent_lock_liquidity = EXCLUDED.permanent_lock_liquidity, + creator = EXCLUDED.creator, + updated_at = NOW() + WHERE EXCLUDED.slot > sol_meteora_damm_v2_pools.slot + ` + args := pgx.NamedArgs{ + "account": account.String(), + "slot": slot, + "token_a_mint": pool.TokenAMint.String(), + "token_b_mint": pool.TokenBMint.String(), + "token_a_vault": pool.TokenAVault.String(), + "token_b_vault": pool.TokenBVault.String(), + "whitelisted_vault": pool.WhitelistedVault.String(), + "partner": pool.Partner.String(), + "liquidity": pool.Liquidity.String(), + "protocol_a_fee": pool.Metrics.TotalProtocolAFee, + "protocol_b_fee": pool.Metrics.TotalProtocolBFee, + "partner_a_fee": pool.Metrics.TotalPartnerAFee, + "partner_b_fee": pool.Metrics.TotalPartnerBFee, + "sqrt_min_price": pool.SqrtMinPrice.BigInt(), + "sqrt_max_price": pool.SqrtMaxPrice.BigInt(), + "sqrt_price": pool.SqrtPrice.BigInt(), + "activation_point": pool.ActivationPoint, + "activation_type": pool.ActivationType, + "pool_status": pool.PoolStatus, + "token_a_flag": pool.TokenAFlag, + "token_b_flag": pool.TokenBFlag, + "collect_fee_mode": pool.CollectFeeMode, + "pool_type": pool.PoolType, + "fee_a_per_liquidity": pool.FeeAPerLiquidity, + "fee_b_per_liquidity": pool.FeeBPerLiquidity, + "permanent_lock_liquidity": pool.PermanentLockLiquidity.BigInt(), + "creator": pool.Creator.String(), + } + _, err := db.Exec(ctx, sqlPool, args) + + return err +} + +func upsertDammV2PoolMetrics( + ctx context.Context, + db database.DBTX, + slot uint64, + account solana.PublicKey, + metrics *meteora_damm_v2.PoolMetrics, +) error { + sqlMetrics := ` + INSERT INTO sol_meteora_damm_v2_pool_metrics ( + pool, + slot, + total_lp_a_fee, + total_lp_b_fee, + total_protocol_a_fee, + total_protocol_b_fee, + total_partner_a_fee, + total_partner_b_fee, + total_position, + created_at, + updated_at + ) VALUES ( + @pool, + @slot, + @total_lp_a_fee, + @total_lp_b_fee, + @total_protocol_a_fee, + @total_protocol_b_fee, + @total_partner_a_fee, + @total_partner_b_fee, + @total_position, + NOW(), + NOW() + ) + ON CONFLICT (pool) DO UPDATE SET + slot = EXCLUDED.slot, + total_lp_a_fee = EXCLUDED.total_lp_a_fee, + total_lp_b_fee = EXCLUDED.total_lp_b_fee, + total_protocol_a_fee = EXCLUDED.total_protocol_a_fee, + total_protocol_b_fee = EXCLUDED.total_protocol_b_fee, + total_partner_a_fee = EXCLUDED.total_partner_a_fee, + total_partner_b_fee = EXCLUDED.total_partner_b_fee, + total_position = EXCLUDED.total_position, + updated_at = NOW() + WHERE EXCLUDED.slot > sol_meteora_damm_v2_pool_metrics.slot + ` + + _, err := db.Exec(ctx, sqlMetrics, pgx.NamedArgs{ + "pool": account.String(), + "slot": slot, + "total_lp_a_fee": metrics.TotalLpAFee, + "total_lp_b_fee": metrics.TotalLpBFee, + "total_protocol_a_fee": metrics.TotalProtocolAFee, + "total_protocol_b_fee": metrics.TotalProtocolBFee, + "total_partner_a_fee": metrics.TotalPartnerAFee, + "total_partner_b_fee": metrics.TotalPartnerBFee, + "total_position": metrics.TotalPosition, + }) + return err +} + +func upsertDammV2PoolFees( + ctx context.Context, + db database.DBTX, + slot uint64, + account solana.PublicKey, + fees *meteora_damm_v2.PoolFeesStruct, +) error { + sqlFees := ` + INSERT INTO sol_meteora_damm_v2_pool_fees ( + pool, + slot, + partner_fee_percent, + protocol_fee_percent, + referral_fee_percent, + created_at, + updated_at + ) VALUES ( + @pool, + @slot, + @partner_fee_percent, + @protocol_fee_percent, + @referral_fee_percent, + NOW(), + NOW() + ) + ON CONFLICT (pool) DO UPDATE SET + slot = EXCLUDED.slot, + partner_fee_percent = EXCLUDED.partner_fee_percent, + protocol_fee_percent = EXCLUDED.protocol_fee_percent, + referral_fee_percent = EXCLUDED.referral_fee_percent, + updated_at = NOW() + WHERE EXCLUDED.slot > sol_meteora_damm_v2_pool_fees.slot + ` + + _, err := db.Exec(ctx, sqlFees, pgx.NamedArgs{ + "pool": account.String(), + "slot": slot, + "partner_fee_percent": fees.PartnerFeePercent, + "protocol_fee_percent": fees.ProtocolFeePercent, + "referral_fee_percent": fees.ReferralFeePercent, + }) + return err +} + +func upsertDammV2PoolBaseFee( + ctx context.Context, + db database.DBTX, + slot uint64, + account solana.PublicKey, + baseFee *meteora_damm_v2.BaseFeeStruct, +) error { + sqlBaseFee := ` + INSERT INTO sol_meteora_damm_v2_pool_base_fees ( + pool, + slot, + cliff_fee_numerator, + fee_scheduler_mode, + number_of_period, + period_frequency, + reduction_factor, + created_at, + updated_at + ) VALUES ( + @pool, + @slot, + @cliff_fee_numerator, + @fee_scheduler_mode, + @number_of_period, + @period_frequency, + @reduction_factor, + NOW(), + NOW() + ) + ON CONFLICT (pool) DO UPDATE SET + slot = EXCLUDED.slot, + cliff_fee_numerator = EXCLUDED.cliff_fee_numerator, + fee_scheduler_mode = EXCLUDED.fee_scheduler_mode, + number_of_period = EXCLUDED.number_of_period, + period_frequency = EXCLUDED.period_frequency, + reduction_factor = EXCLUDED.reduction_factor, + updated_at = NOW() + WHERE EXCLUDED.slot > sol_meteora_damm_v2_pool_base_fees.slot + ` + + _, err := db.Exec(ctx, sqlBaseFee, pgx.NamedArgs{ + "pool": account.String(), + "slot": slot, + "cliff_fee_numerator": baseFee.CliffFeeNumerator, + "fee_scheduler_mode": baseFee.FeeSchedulerMode, + "number_of_period": baseFee.NumberOfPeriod, + "period_frequency": baseFee.PeriodFrequency, + "reduction_factor": baseFee.ReductionFactor, + }) + return err +} + +func upsertDammV2PoolDynamicFee( + ctx context.Context, + db database.DBTX, + slot uint64, + account solana.PublicKey, + dynamicFee *meteora_damm_v2.DynamicFeeStruct, +) error { + sqlDynamicFee := ` + INSERT INTO sol_meteora_damm_v2_pool_dynamic_fees ( + pool, + slot, + initialized, + max_volatility_accumulator, + variable_fee_control, + bin_step, + filter_period, + decay_period, + reduction_factor, + last_update_timestamp, + bin_step_u128, + sqrt_price_reference, + volatility_accumulator, + volatility_reference, + created_at, + updated_at + ) VALUES ( + @pool, + @slot, + @initialized, + @max_volatility_accumulator, + @variable_fee_control, + @bin_step, + @filter_period, + @decay_period, + @reduction_factor, + @last_update_timestamp, + @bin_step_u128, + @sqrt_price_reference, + @volatility_accumulator, + @volatility_reference, + NOW(), + NOW() + ) + ON CONFLICT (pool) DO UPDATE SET + slot = EXCLUDED.slot, + initialized = EXCLUDED.initialized, + max_volatility_accumulator = EXCLUDED.max_volatility_accumulator, + variable_fee_control = EXCLUDED.variable_fee_control, + bin_step = EXCLUDED.bin_step, + filter_period = EXCLUDED.filter_period, + decay_period = EXCLUDED.decay_period, + reduction_factor = EXCLUDED.reduction_factor, + last_update_timestamp = EXCLUDED.last_update_timestamp, + bin_step_u128 = EXCLUDED.bin_step_u128, + sqrt_price_reference = EXCLUDED.sqrt_price_reference, + volatility_accumulator = EXCLUDED.volatility_accumulator, + volatility_reference = EXCLUDED.volatility_reference, + updated_at = NOW() + WHERE EXCLUDED.slot > sol_meteora_damm_v2_pool_dynamic_fees.slot + ` + + _, err := db.Exec(ctx, sqlDynamicFee, pgx.NamedArgs{ + "pool": account.String(), + "slot": slot, + "initialized": dynamicFee.Initialized, + "max_volatility_accumulator": dynamicFee.MaxVolatilityAccumulator, + "variable_fee_control": dynamicFee.VariableFeeControl, + "bin_step": dynamicFee.BinStep, + "filter_period": dynamicFee.FilterPeriod, + "decay_period": dynamicFee.DecayPeriod, + "reduction_factor": dynamicFee.ReductionFactor, + "last_update_timestamp": dynamicFee.LastUpdateTimestamp, + "bin_step_u128": dynamicFee.BinStepU128, + "sqrt_price_reference": dynamicFee.SqrtPriceReference, + "volatility_accumulator": dynamicFee.VolatilityAccumulator, + "volatility_reference": dynamicFee.VolatilityReference, + }) + return err +} + +func upsertDammV2Position( + ctx context.Context, + db database.DBTX, + slot uint64, + account solana.PublicKey, + position *meteora_damm_v2.PositionState, +) error { + sql := ` + INSERT INTO sol_meteora_damm_v2_positions ( + account, + slot, + pool, + nft_mint, + fee_a_per_token_checkpoint, + fee_b_per_token_checkpoint, + fee_a_pending, + fee_b_pending, + unlocked_liquidity, + vested_liquidity, + permanent_locked_liquidity, + updated_at, + created_at + ) VALUES ( + @account, + @slot, + @pool, + @nft_mint, + @fee_a_per_token_checkpoint, + @fee_b_per_token_checkpoint, + @fee_a_pending, + @fee_b_pending, + @unlocked_liquidity, + @vested_liquidity, + @permanent_locked_liquidity, + NOW(), + NOW() + ) + ON CONFLICT (account) DO UPDATE SET + slot = EXCLUDED.slot, + pool = EXCLUDED.pool, + nft_mint = EXCLUDED.nft_mint, + fee_a_per_token_checkpoint = EXCLUDED.fee_a_per_token_checkpoint, + fee_b_per_token_checkpoint = EXCLUDED.fee_b_per_token_checkpoint, + fee_a_pending = EXCLUDED.fee_a_pending, + fee_b_pending = EXCLUDED.fee_b_pending, + unlocked_liquidity = EXCLUDED.unlocked_liquidity, + vested_liquidity = EXCLUDED.vested_liquidity, + permanent_locked_liquidity = EXCLUDED.permanent_locked_liquidity, + updated_at = NOW() + WHERE EXCLUDED.slot > sol_meteora_damm_v2_positions.slot + ` + + _, err := db.Exec(ctx, sql, pgx.NamedArgs{ + "account": account.String(), + "slot": slot, + "pool": position.Pool.String(), + "nft_mint": position.NftMint.String(), + "fee_a_per_token_checkpoint": position.FeeAPerTokenCheckpoint, + "fee_b_per_token_checkpoint": position.FeeBPerTokenCheckpoint, + "fee_a_pending": position.FeeAPending, + "fee_b_pending": position.FeeBPending, + "unlocked_liquidity": position.UnlockedLiquidity.BigInt(), + "vested_liquidity": position.VestedLiquidity.BigInt(), + "permanent_locked_liquidity": position.PermanentLockedLiquidity.BigInt(), + }) + return err +} + +func upsertDammV2PositionMetrics( + ctx context.Context, + db database.DBTX, + slot uint64, + account solana.PublicKey, + metrics *meteora_damm_v2.PositionMetrics, +) error { + sql := ` + INSERT INTO sol_meteora_damm_v2_position_metrics ( + position, + slot, + total_claimed_a_fee, + total_claimed_b_fee, + created_at, + updated_at + ) VALUES ( + @position, + @slot, + @total_claimed_a_fee, + @total_claimed_b_fee, + NOW(), + NOW() + ) + ON CONFLICT (position) DO UPDATE SET + slot = EXCLUDED.slot, + total_claimed_a_fee = EXCLUDED.total_claimed_a_fee, + total_claimed_b_fee = EXCLUDED.total_claimed_b_fee, + updated_at = NOW() + WHERE EXCLUDED.slot > sol_meteora_damm_v2_position_metrics.slot + ` + + _, err := db.Exec(ctx, sql, pgx.NamedArgs{ + "position": account.String(), + "slot": slot, + "total_claimed_a_fee": metrics.TotalClaimedAFee, + "total_claimed_b_fee": metrics.TotalClaimedBFee, + }) + return err +} diff --git a/solana/indexer/damm_v2/indexer.go b/solana/indexer/damm_v2/indexer.go index d8bfd8d5..9f097da6 100644 --- a/solana/indexer/damm_v2/indexer.go +++ b/solana/indexer/damm_v2/indexer.go @@ -162,7 +162,7 @@ func (d *Indexer) subscribe(ctx context.Context) ([]common.GrpcClient, error) { } total += len(pools) - d.logger.Debug("subscribing to pools....", zap.Int("numPools", len(pools))) + d.logger.Debug("subscribing to pools....", zap.Int("count", len(pools))) subscription := d.makeSubscriptionRequest(ctx, pools) // Handle each message from the subscription @@ -253,32 +253,42 @@ func (d *Indexer) makeSubscriptionRequest(ctx context.Context, dammV2Pools []str func processDammV2PoolUpdate( ctx context.Context, - db database.DBTX, + db database.DbPool, update *pb.SubscribeUpdateAccount, ) error { + tx, err := db.Begin(ctx) + if err != nil { + return err + } + defer tx.Rollback(ctx) + account := solana.PublicKeyFromBytes(update.Account.Pubkey) var pool meteora_damm_v2.Pool - err := bin.NewBorshDecoder(update.Account.Data).Decode(&pool) + err = bin.NewBorshDecoder(update.Account.Data).Decode(&pool) + if err != nil { + return err + } + err = upsertDammV2Pool(ctx, tx, update.Slot, account, &pool) if err != nil { return err } - err = upsertDammV2Pool(ctx, db, update.Slot, account, &pool) + err = upsertDammV2PoolMetrics(ctx, tx, update.Slot, account, &pool.Metrics) if err != nil { return err } - err = upsertDammV2PoolMetrics(ctx, db, update.Slot, account, &pool.Metrics) + err = upsertDammV2PoolFees(ctx, tx, update.Slot, account, &pool.PoolFees) if err != nil { return err } - err = upsertDammV2PoolFees(ctx, db, update.Slot, account, &pool.PoolFees) + err = upsertDammV2PoolBaseFee(ctx, tx, update.Slot, account, &pool.PoolFees.BaseFee) if err != nil { return err } - err = upsertDammV2PoolBaseFee(ctx, db, update.Slot, account, &pool.PoolFees.BaseFee) + err = upsertDammV2PoolDynamicFee(ctx, tx, update.Slot, account, &pool.PoolFees.DynamicFee) if err != nil { return err } - err = upsertDammV2PoolDynamicFee(ctx, db, update.Slot, account, &pool.PoolFees.DynamicFee) + err = tx.Commit(ctx) if err != nil { return err } @@ -287,12 +297,18 @@ func processDammV2PoolUpdate( func processDammV2PositionUpdate( ctx context.Context, - db database.DBTX, + db database.DbPool, update *pb.SubscribeUpdateAccount, ) error { + tx, err := db.Begin(ctx) + if err != nil { + return err + } + defer tx.Rollback(ctx) + account := solana.PublicKeyFromBytes(update.Account.Pubkey) var position meteora_damm_v2.PositionState - err := bin.NewBorshDecoder(update.Account.Data).Decode(&position) + err = bin.NewBorshDecoder(update.Account.Data).Decode(&position) if err != nil { return err } @@ -304,6 +320,10 @@ func processDammV2PositionUpdate( if err != nil { return err } + err = tx.Commit(ctx) + if err != nil { + return err + } return nil } @@ -334,480 +354,3 @@ func getSubscribedDammV2Pools(ctx context.Context, db database.DBTX, limit int, } return pools, nil } - -func upsertDammV2Pool( - ctx context.Context, - db database.DBTX, - slot uint64, - account solana.PublicKey, - pool *meteora_damm_v2.Pool, -) error { - sqlPool := ` - INSERT INTO sol_meteora_damm_v2_pools ( - address, - slot, - token_a_mint, - token_b_mint, - token_a_vault, - token_b_vault, - whitelisted_vault, - partner, - liquidity, - protocol_a_fee, - protocol_b_fee, - partner_a_fee, - partner_b_fee, - sqrt_min_price, - sqrt_max_price, - sqrt_price, - activation_point, - activation_type, - pool_status, - token_a_flag, - token_b_flag, - collect_fee_mode, - pool_type, - fee_a_per_liquidity, - fee_b_per_liquidity, - permanent_lock_liquidity, - creator, - created_at, - updated_at - ) VALUES ( - @address, - @slot, - @token_a_mint, - @token_b_mint, - @token_a_vault, - @token_b_vault, - @whitelisted_vault, - @partner, - @liquidity, - @protocol_a_fee, - @protocol_b_fee, - @partner_a_fee, - @partner_b_fee, - @sqrt_min_price, - @sqrt_max_price, - @sqrt_price, - @activation_point, - @activation_type, - @pool_status, - @token_a_flag, - @token_b_flag, - @collect_fee_mode, - @pool_type, - @fee_a_per_liquidity, - @fee_b_per_liquidity, - @permanent_lock_liquidity, - @creator, - NOW(), - NOW() - ) - ON CONFLICT (address) DO UPDATE SET - slot = EXCLUDED.slot, - token_a_mint = EXCLUDED.token_a_mint, - token_b_mint = EXCLUDED.token_b_mint, - token_a_vault = EXCLUDED.token_a_vault, - token_b_vault = EXCLUDED.token_b_vault, - whitelisted_vault = EXCLUDED.whitelisted_vault, - partner = EXCLUDED.partner, - liquidity = EXCLUDED.liquidity, - protocol_a_fee = EXCLUDED.protocol_a_fee, - protocol_b_fee = EXCLUDED.protocol_b_fee, - partner_a_fee = EXCLUDED.partner_a_fee, - partner_b_fee = EXCLUDED.partner_b_fee, - sqrt_min_price = EXCLUDED.sqrt_min_price, - sqrt_max_price = EXCLUDED.sqrt_max_price, - sqrt_price = EXCLUDED.sqrt_price, - activation_point = EXCLUDED.activation_point, - activation_type = EXCLUDED.activation_type, - pool_status = EXCLUDED.pool_status, - token_a_flag = EXCLUDED.token_a_flag, - token_b_flag = EXCLUDED.token_b_flag, - collect_fee_mode = EXCLUDED.collect_fee_mode, - pool_type = EXCLUDED.pool_type, - fee_a_per_liquidity = EXCLUDED.fee_a_per_liquidity, - fee_b_per_liquidity = EXCLUDED.fee_b_per_liquidity, - permanent_lock_liquidity = EXCLUDED.permanent_lock_liquidity, - creator = EXCLUDED.creator, - updated_at = NOW() - WHERE EXCLUDED.slot > sol_meteora_damm_v2_pools.slot - ` - args := pgx.NamedArgs{ - "address": account.String(), - "slot": slot, - "token_a_mint": pool.TokenAMint.String(), - "token_b_mint": pool.TokenBMint.String(), - "token_a_vault": pool.TokenAVault.String(), - "token_b_vault": pool.TokenBVault.String(), - "whitelisted_vault": pool.WhitelistedVault.String(), - "partner": pool.Partner.String(), - "liquidity": pool.Liquidity.String(), - "protocol_a_fee": pool.Metrics.TotalProtocolAFee, - "protocol_b_fee": pool.Metrics.TotalProtocolBFee, - "partner_a_fee": pool.Metrics.TotalPartnerAFee, - "partner_b_fee": pool.Metrics.TotalPartnerBFee, - "sqrt_min_price": pool.SqrtMinPrice.BigInt(), - "sqrt_max_price": pool.SqrtMaxPrice.BigInt(), - "sqrt_price": pool.SqrtPrice.BigInt(), - "activation_point": pool.ActivationPoint, - "activation_type": pool.ActivationType, - "pool_status": pool.PoolStatus, - "token_a_flag": pool.TokenAFlag, - "token_b_flag": pool.TokenBFlag, - "collect_fee_mode": pool.CollectFeeMode, - "pool_type": pool.PoolType, - "fee_a_per_liquidity": pool.FeeAPerLiquidity, - "fee_b_per_liquidity": pool.FeeBPerLiquidity, - "permanent_lock_liquidity": pool.PermanentLockLiquidity.BigInt(), - "creator": pool.Creator.String(), - } - _, err := db.Exec(ctx, sqlPool, args) - - return err -} - -func upsertDammV2PoolMetrics( - ctx context.Context, - db database.DBTX, - slot uint64, - account solana.PublicKey, - metrics *meteora_damm_v2.PoolMetrics, -) error { - sqlMetrics := ` - INSERT INTO sol_meteora_damm_v2_pool_metrics ( - pool, - slot, - total_lp_a_fee, - total_lp_b_fee, - total_protocol_a_fee, - total_protocol_b_fee, - total_partner_a_fee, - total_partner_b_fee, - total_position, - created_at, - updated_at - ) VALUES ( - @pool, - @slot, - @total_lp_a_fee, - @total_lp_b_fee, - @total_protocol_a_fee, - @total_protocol_b_fee, - @total_partner_a_fee, - @total_partner_b_fee, - @total_position, - NOW(), - NOW() - ) - ON CONFLICT (pool) DO UPDATE SET - slot = EXCLUDED.slot, - total_lp_a_fee = EXCLUDED.total_lp_a_fee, - total_lp_b_fee = EXCLUDED.total_lp_b_fee, - total_protocol_a_fee = EXCLUDED.total_protocol_a_fee, - total_protocol_b_fee = EXCLUDED.total_protocol_b_fee, - total_partner_a_fee = EXCLUDED.total_partner_a_fee, - total_partner_b_fee = EXCLUDED.total_partner_b_fee, - total_position = EXCLUDED.total_position, - updated_at = NOW() - WHERE EXCLUDED.slot > sol_meteora_damm_v2_pool_metrics.slot - ` - - _, err := db.Exec(ctx, sqlMetrics, pgx.NamedArgs{ - "pool": account.String(), - "slot": slot, - "total_lp_a_fee": metrics.TotalLpAFee, - "total_lp_b_fee": metrics.TotalLpBFee, - "total_protocol_a_fee": metrics.TotalProtocolAFee, - "total_protocol_b_fee": metrics.TotalProtocolBFee, - "total_partner_a_fee": metrics.TotalPartnerAFee, - "total_partner_b_fee": metrics.TotalPartnerBFee, - "total_position": metrics.TotalPosition, - }) - return err -} - -func upsertDammV2PoolFees( - ctx context.Context, - db database.DBTX, - slot uint64, - account solana.PublicKey, - fees *meteora_damm_v2.PoolFeesStruct, -) error { - sqlFees := ` - INSERT INTO sol_meteora_damm_v2_pool_fees ( - pool, - slot, - partner_fee_percent, - protocol_fee_percent, - referral_fee_percent, - created_at, - updated_at - ) VALUES ( - @pool, - @slot, - @partner_fee_percent, - @protocol_fee_percent, - @referral_fee_percent, - NOW(), - NOW() - ) - ON CONFLICT (pool) DO UPDATE SET - slot = EXCLUDED.slot, - partner_fee_percent = EXCLUDED.partner_fee_percent, - protocol_fee_percent = EXCLUDED.protocol_fee_percent, - referral_fee_percent = EXCLUDED.referral_fee_percent, - updated_at = NOW() - WHERE EXCLUDED.slot > sol_meteora_damm_v2_pool_fees.slot - ` - - _, err := db.Exec(ctx, sqlFees, pgx.NamedArgs{ - "pool": account.String(), - "slot": slot, - "partner_fee_percent": fees.PartnerFeePercent, - "protocol_fee_percent": fees.ProtocolFeePercent, - "referral_fee_percent": fees.ReferralFeePercent, - }) - return err -} - -func upsertDammV2PoolBaseFee( - ctx context.Context, - db database.DBTX, - slot uint64, - account solana.PublicKey, - baseFee *meteora_damm_v2.BaseFeeStruct, -) error { - sqlBaseFee := ` - INSERT INTO sol_meteora_damm_v2_pool_base_fees ( - pool, - slot, - cliff_fee_numerator, - fee_scheduler_mode, - number_of_period, - period_frequency, - reduction_factor, - created_at, - updated_at - ) VALUES ( - @pool, - @slot, - @cliff_fee_numerator, - @fee_scheduler_mode, - @number_of_period, - @period_frequency, - @reduction_factor, - NOW(), - NOW() - ) - ON CONFLICT (pool) DO UPDATE SET - slot = EXCLUDED.slot, - cliff_fee_numerator = EXCLUDED.cliff_fee_numerator, - fee_scheduler_mode = EXCLUDED.fee_scheduler_mode, - number_of_period = EXCLUDED.number_of_period, - period_frequency = EXCLUDED.period_frequency, - reduction_factor = EXCLUDED.reduction_factor, - updated_at = NOW() - WHERE EXCLUDED.slot > sol_meteora_damm_v2_pool_base_fees.slot - ` - - _, err := db.Exec(ctx, sqlBaseFee, pgx.NamedArgs{ - "pool": account.String(), - "slot": slot, - "cliff_fee_numerator": baseFee.CliffFeeNumerator, - "fee_scheduler_mode": baseFee.FeeSchedulerMode, - "number_of_period": baseFee.NumberOfPeriod, - "period_frequency": baseFee.PeriodFrequency, - "reduction_factor": baseFee.ReductionFactor, - }) - return err -} - -func upsertDammV2PoolDynamicFee( - ctx context.Context, - db database.DBTX, - slot uint64, - account solana.PublicKey, - dynamicFee *meteora_damm_v2.DynamicFeeStruct, -) error { - sqlDynamicFee := ` - INSERT INTO sol_meteora_damm_v2_pool_dynamic_fees ( - pool, - slot, - initialized, - max_volatility_accumulator, - variable_fee_control, - bin_step, - filter_period, - decay_period, - reduction_factor, - last_update_timestamp, - bin_step_u128, - sqrt_price_reference, - volatility_accumulator, - volatility_reference, - created_at, - updated_at - ) VALUES ( - @pool, - @slot, - @initialized, - @max_volatility_accumulator, - @variable_fee_control, - @bin_step, - @filter_period, - @decay_period, - @reduction_factor, - @last_update_timestamp, - @bin_step_u128, - @sqrt_price_reference, - @volatility_accumulator, - @volatility_reference, - NOW(), - NOW() - ) - ON CONFLICT (pool) DO UPDATE SET - slot = EXCLUDED.slot, - initialized = EXCLUDED.initialized, - max_volatility_accumulator = EXCLUDED.max_volatility_accumulator, - variable_fee_control = EXCLUDED.variable_fee_control, - bin_step = EXCLUDED.bin_step, - filter_period = EXCLUDED.filter_period, - decay_period = EXCLUDED.decay_period, - reduction_factor = EXCLUDED.reduction_factor, - last_update_timestamp = EXCLUDED.last_update_timestamp, - bin_step_u128 = EXCLUDED.bin_step_u128, - sqrt_price_reference = EXCLUDED.sqrt_price_reference, - volatility_accumulator = EXCLUDED.volatility_accumulator, - volatility_reference = EXCLUDED.volatility_reference, - updated_at = NOW() - WHERE EXCLUDED.slot > sol_meteora_damm_v2_pool_dynamic_fees.slot - ` - - _, err := db.Exec(ctx, sqlDynamicFee, pgx.NamedArgs{ - "pool": account.String(), - "slot": slot, - "initialized": dynamicFee.Initialized, - "max_volatility_accumulator": dynamicFee.MaxVolatilityAccumulator, - "variable_fee_control": dynamicFee.VariableFeeControl, - "bin_step": dynamicFee.BinStep, - "filter_period": dynamicFee.FilterPeriod, - "decay_period": dynamicFee.DecayPeriod, - "reduction_factor": dynamicFee.ReductionFactor, - "last_update_timestamp": dynamicFee.LastUpdateTimestamp, - "bin_step_u128": dynamicFee.BinStepU128, - "sqrt_price_reference": dynamicFee.SqrtPriceReference, - "volatility_accumulator": dynamicFee.VolatilityAccumulator, - "volatility_reference": dynamicFee.VolatilityReference, - }) - return err -} - -func upsertDammV2Position( - ctx context.Context, - db database.DBTX, - slot uint64, - account solana.PublicKey, - position *meteora_damm_v2.PositionState, -) error { - sql := ` - INSERT INTO sol_meteora_damm_v2_positions ( - address, - slot, - pool, - nft_mint, - fee_a_per_token_checkpoint, - fee_b_per_token_checkpoint, - fee_a_pending, - fee_b_pending, - unlocked_liquidity, - vested_liquidity, - permanent_locked_liquidity, - updated_at, - created_at - ) VALUES ( - @address, - @slot, - @pool, - @nft_mint, - @fee_a_per_token_checkpoint, - @fee_b_per_token_checkpoint, - @fee_a_pending, - @fee_b_pending, - @unlocked_liquidity, - @vested_liquidity, - @permanent_locked_liquidity, - NOW(), - NOW() - ) - ON CONFLICT (address) DO UPDATE SET - slot = EXCLUDED.slot, - pool = EXCLUDED.pool, - nft_mint = EXCLUDED.nft_mint, - fee_a_per_token_checkpoint = EXCLUDED.fee_a_per_token_checkpoint, - fee_b_per_token_checkpoint = EXCLUDED.fee_b_per_token_checkpoint, - fee_a_pending = EXCLUDED.fee_a_pending, - fee_b_pending = EXCLUDED.fee_b_pending, - unlocked_liquidity = EXCLUDED.unlocked_liquidity, - vested_liquidity = EXCLUDED.vested_liquidity, - permanent_locked_liquidity = EXCLUDED.permanent_locked_liquidity, - updated_at = NOW() - WHERE EXCLUDED.slot > sol_meteora_damm_v2_positions.slot - ` - - _, err := db.Exec(ctx, sql, pgx.NamedArgs{ - "address": account.String(), - "slot": slot, - "pool": position.Pool.String(), - "nft_mint": position.NftMint.String(), - "fee_a_per_token_checkpoint": position.FeeAPerTokenCheckpoint, - "fee_b_per_token_checkpoint": position.FeeBPerTokenCheckpoint, - "fee_a_pending": position.FeeAPending, - "fee_b_pending": position.FeeBPending, - "unlocked_liquidity": position.UnlockedLiquidity.BigInt(), - "vested_liquidity": position.VestedLiquidity.BigInt(), - "permanent_locked_liquidity": position.PermanentLockedLiquidity.BigInt(), - }) - return err -} - -func upsertDammV2PositionMetrics( - ctx context.Context, - db database.DBTX, - slot uint64, - account solana.PublicKey, - metrics *meteora_damm_v2.PositionMetrics, -) error { - sql := ` - INSERT INTO sol_meteora_damm_v2_position_metrics ( - position, - slot, - total_claimed_a_fee, - total_claimed_b_fee, - created_at, - updated_at - ) VALUES ( - @position, - @slot, - @total_claimed_a_fee, - @total_claimed_b_fee, - NOW(), - NOW() - ) - ON CONFLICT (position) DO UPDATE SET - slot = EXCLUDED.slot, - total_claimed_a_fee = EXCLUDED.total_claimed_a_fee, - total_claimed_b_fee = EXCLUDED.total_claimed_b_fee, - updated_at = NOW() - WHERE EXCLUDED.slot > sol_meteora_damm_v2_position_metrics.slot - ` - - _, err := db.Exec(ctx, sql, pgx.NamedArgs{ - "position": account.String(), - "slot": slot, - "total_claimed_a_fee": metrics.TotalClaimedAFee, - "total_claimed_b_fee": metrics.TotalClaimedBFee, - }) - return err -} diff --git a/solana/indexer/damm_v2/indexer_test.go b/solana/indexer/damm_v2/indexer_test.go new file mode 100644 index 00000000..36a0cead --- /dev/null +++ b/solana/indexer/damm_v2/indexer_test.go @@ -0,0 +1,126 @@ +package damm_v2 + +import ( + "encoding/base64" + "testing" + + "api.audius.co/database" + "api.audius.co/solana/indexer/common" + "api.audius.co/solana/indexer/fake_rpc_client" + "github.com/gagliardetto/solana-go" + pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" + "github.com/test-go/testify/assert" + "github.com/test-go/testify/require" + "go.uber.org/zap" +) + +func TestHandleUpdate_SlotCheckpoint(t *testing.T) { + pool := database.CreateTestDatabase(t, "test_solana_indexer_damm_v2") + rpcClient := fake_rpc_client.FakeRpcClient{} + logger := zap.NewNop() + + indexer := New(common.GrpcConfig{}, &rpcClient, pool, logger) + + expectedSlot := uint64(1500) + + request := pb.SubscribeRequest{} + checkpointId, err := common.InsertCheckpointStart(t.Context(), pool, "test", 1000, &request) + update := pb.SubscribeUpdate{ + Filters: []string{checkpointId}, + UpdateOneof: &pb.SubscribeUpdate_Slot{ + Slot: &pb.SubscribeUpdateSlot{ + Slot: expectedSlot, + }, + }, + } + + err = indexer.HandleUpdate(t.Context(), &update) + require.NoError(t, err) + + slot, err := common.GetCheckpointSlot(t.Context(), pool, "test", &request) + require.NoError(t, err) + assert.Equal(t, expectedSlot, slot, "checkpoint slot should be updated") +} + +func TestHandleUpdate_DammV2PoolUpdate(t *testing.T) { + pool := database.CreateTestDatabase(t, "test_solana_indexer_damm_v2") + rpcClient := fake_rpc_client.FakeRpcClient{} + logger := zap.NewNop() + + indexer := New(common.GrpcConfig{}, &rpcClient, pool, logger) + + // From real on-chain account data + address := solana.MustPublicKeyFromBase58("D9iJqMbgQJLFt5PAAiTJTMNsMAMueukzoe1EK2r1g3WH") + poolBase64 := "8ZptBBGxbbyAlpgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUAAAAAAABAAAAAAAAAGCk3AC8AwAAAQAKAHgAiBPGROhoAAAAAMsQx7q4jQYAAAAAAAAAAAChIqYBNRzVAQAAAAAAAAAA4CICAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACOkzYTTyijBQphnsA7NYukXXDff56Bp/GJdn5GamlMZ7/DPMLnXBSHbMN5KDkE9JB3ZpESJXuzrf82mLYCJJQHm/3HSrkp1wPzAbe6y0uFypnr4Yeci2kPU8TWr9TEmTY/2aZknQDPJED5N2M3ytBL5gl4lD8TdKznaJkMDHT44AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANpjaB9yhrzIBnGeLCtQogFXJDv8lmixFSC8U4Q+3NsISFBg5M0NQHAAaMJHGBEGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFiinY8UAAAAAAAAAAAAAAAAAAAAAAAAAFA7AQABAAAAAAAAAAAAAACbV2lOqRpchLHE/v8AAAAAIiTN1Ql11QEAAAAAAAAAAIhx5mgAAAAAAQAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJE91kBWow0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAASFBg5M0NQHAAaMJHGBEGAAAAAAAAAAAAAAAAAAAAAAASYim9UgAAAAAAAAAAAAAAAAAAAAAAAABYop2PFAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAAAAAAAAAAA2mNoH3KGvMgGcZ4sK1CiAVckO/yWaLEVILxThD7c2wgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=" + poolData, err := base64.StdEncoding.DecodeString(poolBase64) + require.NoError(t, err) + + update := pb.SubscribeUpdate{ + Filters: []string{NAME}, + UpdateOneof: &pb.SubscribeUpdate_Account{ + Account: &pb.SubscribeUpdateAccount{ + Account: &pb.SubscribeUpdateAccountInfo{ + Pubkey: address.Bytes(), + Data: poolData, + }, + }, + }, + } + + err = indexer.HandleUpdate(t.Context(), &update) + require.NoError(t, err) + + rows, err := pool.Query(t.Context(), ` + SELECT EXISTS ( + SELECT 1 + FROM sol_meteora_damm_v2_pools + JOIN sol_meteora_damm_v2_pool_metrics ON sol_meteora_damm_v2_pool_metrics.pool = sol_meteora_damm_v2_pools.account + JOIN sol_meteora_damm_v2_pool_fees ON sol_meteora_damm_v2_pool_fees.pool = sol_meteora_damm_v2_pools.account + JOIN sol_meteora_damm_v2_pool_base_fees ON sol_meteora_damm_v2_pool_base_fees.pool = sol_meteora_damm_v2_pools.account + JOIN sol_meteora_damm_v2_pool_dynamic_fees ON sol_meteora_damm_v2_pool_dynamic_fees.pool = sol_meteora_damm_v2_pools.account + LIMIT 1 + ) + `) + require.NoError(t, err) + defer rows.Close() +} + +func TestHandleUpdate_DammV2PositionUpdate(t *testing.T) { + pool := database.CreateTestDatabase(t, "test_solana_indexer_damm_v2") + rpcClient := fake_rpc_client.FakeRpcClient{} + logger := zap.NewNop() + + indexer := New(common.GrpcConfig{}, &rpcClient, pool, logger) + + // From real on-chain account data + address := solana.MustPublicKeyFromBase58("5bYLydDXt1K5zroychcbrVbhGRUpheXdq5w41uccazPB") + poolBase64 := "qryP5HpA99C0h5iaMb9or5qzYmaPKH7cBpP1GTyw5pa9SMlEQMuk4oeLsnqCTyioPLOFt664lEHr2woSYFq4Z3N6xFLWwGDSAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADUszHGm5oNAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACQoMPLmBiA4ADThI4wIAwAAAAAAAAAAABGmGkQpAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + poolData, err := base64.StdEncoding.DecodeString(poolBase64) + require.NoError(t, err) + + update := pb.SubscribeUpdate{ + Filters: []string{address.String()}, + UpdateOneof: &pb.SubscribeUpdate_Account{ + Account: &pb.SubscribeUpdateAccount{ + Account: &pb.SubscribeUpdateAccountInfo{ + Pubkey: address.Bytes(), + Data: poolData, + }, + }, + }, + } + + err = indexer.HandleUpdate(t.Context(), &update) + require.NoError(t, err) + + rows, err := pool.Query(t.Context(), ` + SELECT EXISTS ( + SELECT 1 + FROM sol_meteora_damm_v2_positions + JOIN sol_meteora_damm_v2_position_metrics ON sol_meteora_damm_v2_position_metrics.position = sol_meteora_damm_v2_positions.account + LIMIT 1 + ) + `) + require.NoError(t, err) + defer rows.Close() +} diff --git a/solana/indexer/dbc/indexer.go b/solana/indexer/dbc/indexer.go index 0b526eb4..027a5580 100644 --- a/solana/indexer/dbc/indexer.go +++ b/solana/indexer/dbc/indexer.go @@ -185,7 +185,7 @@ func (d *Indexer) subscribe(ctx context.Context) ([]common.GrpcClient, error) { } total += len(pools) - d.logger.Debug("subscribing to pools....", zap.Int("numPools", len(pools))) + d.logger.Debug("subscribing to pools....", zap.Int("count", len(pools))) subscription := d.makeSubscriptionRequest(ctx, pools) // Handle each message from the subscription diff --git a/solana/indexer/token/indexer.go b/solana/indexer/token/indexer.go index 93cbd30f..0b2b9f26 100644 --- a/solana/indexer/token/indexer.go +++ b/solana/indexer/token/indexer.go @@ -210,7 +210,7 @@ func (d *Indexer) subscribeToArtistCoins(ctx context.Context, handleUpdate func( return grpcClients, nil } total += len(mints) - d.logger.Debug("subscribing to artist coins...", zap.Int("numCoins", len(mints))) + d.logger.Debug("subscribing to artist coins...", zap.Int("count", len(mints))) subscription, err := d.makeMintSubscriptionRequest(ctx, mints) if err != nil { return nil, fmt.Errorf("failed to make mint subscription request: %w", err) From fa7dba4a1da32805faed26acea1b9ff9cea339d8 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Tue, 14 Oct 2025 18:53:19 -0700 Subject: [PATCH 12/56] fix schema again, more tests --- ddl/migrations/0171_artist_coins_pools.sql | 32 +- solana/indexer/dbc/dbc.go | 125 +++- solana/indexer/dbc/indexer.go | 121 ---- solana/indexer/dbc/indexer_test.go | 111 ++++ .../migration_transaction_test_fixture.json | 611 ++++++++++++++++++ sql/01_schema.sql | 43 ++ 6 files changed, 919 insertions(+), 124 deletions(-) create mode 100644 solana/indexer/dbc/indexer_test.go create mode 100644 solana/indexer/dbc/migration_transaction_test_fixture.json diff --git a/ddl/migrations/0171_artist_coins_pools.sql b/ddl/migrations/0171_artist_coins_pools.sql index cb60eee0..a89b182e 100644 --- a/ddl/migrations/0171_artist_coins_pools.sql +++ b/ddl/migrations/0171_artist_coins_pools.sql @@ -2,4 +2,34 @@ ALTER TABLE IF EXISTS artist_coins ADD COLUMN IF NOT EXISTS dbc_pool TEXT, ADD COLUMN IF NOT EXISTS damm_v2_pool TEXT; COMMENT ON COLUMN artist_coins.dbc_pool IS 'The associated DBC pool address for this artist coin, if any. Used in solana indexer.'; -COMMENT ON COLUMN artist_coins.damm_v2_pool IS 'The canonical DAMM V2 pool address for this artist coin, if any. Used in solana indexer.'; \ No newline at end of file +COMMENT ON COLUMN artist_coins.damm_v2_pool IS 'The canonical DAMM V2 pool address for this artist coin, if any. Used in solana indexer.'; + +CREATE TABLE IF NOT EXISTS sol_meteora_dbc_pools ( + account TEXT PRIMARY KEY, + slot BIGINT NOT NULL, + config TEXT NOT NULL, + creator TEXT NOT NULL, + base_mint TEXT NOT NULL, + base_vault TEXT NOT NULL, + quote_vault TEXT NOT NULL, + base_reserve BIGINT NOT NULL, + quote_reserve BIGINT NOT NULL, + protocol_base_fee BIGINT NOT NULL, + partner_base_fee BIGINT NOT NULL, + partner_quote_fee BIGINT NOT NULL, + sqrt_price NUMERIC NOT NULL, + activation_point BIGINT NOT NULL, + pool_type SMALLINT NOT NULL, + is_migrated SMALLINT NOT NULL, + is_partner_withdraw_surplus SMALLINT NOT NULL, + is_protocol_withdraw_surplus SMALLINT NOT NULL, + migration_progress SMALLINT NOT NULL, + is_withdraw_leftover SMALLINT NOT NULL, + is_creator_withdraw_surplus SMALLINT NOT NULL, + migration_fee_withdraw_status SMALLINT NOT NULL, + finish_curve_timestamp BIGINT NOT NULL, + creator_base_fee BIGINT NOT NULL, + creator_quote_fee BIGINT NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP +) \ No newline at end of file diff --git a/solana/indexer/dbc/dbc.go b/solana/indexer/dbc/dbc.go index fa1678cc..03e19ed1 100644 --- a/solana/indexer/dbc/dbc.go +++ b/solana/indexer/dbc/dbc.go @@ -43,7 +43,7 @@ func processDbcInstruction( case meteora_dbc.InstructionImplDef.TypeID(meteora_dbc.Instruction_MigrationDammV2): { if migrationInst, ok := inst.Impl.(*meteora_dbc.MigrationDammV2); ok { - err := upsertDbcMigration(ctx, db, dbcMigrationRow{ + err := insertDbcMigration(ctx, db, dbcMigrationRow{ signature: signature, instructionIndex: instructionIndex, slot: slot, @@ -105,7 +105,7 @@ type dbcMigrationRow struct { quoteMint string } -func upsertDbcMigration(ctx context.Context, db database.DBTX, row dbcMigrationRow) error { +func insertDbcMigration(ctx context.Context, db database.DBTX, row dbcMigrationRow) error { sql := ` INSERT INTO sol_meteora_dbc_migrations ( signature, @@ -185,3 +185,124 @@ func updateArtistCoinDammV2Pool(ctx context.Context, db database.DBTX, mint stri }) return err } + +func upsertDbcPool( + ctx context.Context, + db database.DBTX, + slot uint64, + account solana.PublicKey, + pool *meteora_dbc.Pool, +) error { + sql := ` + INSERT INTO sol_meteora_dbc_pools ( + account, + slot, + config, + creator, + base_mint, + base_vault, + quote_vault, + base_reserve, + quote_reserve, + protocol_base_fee, + partner_base_fee, + partner_quote_fee, + sqrt_price, + activation_point, + pool_type, + is_migrated, + is_partner_withdraw_surplus, + is_protocol_withdraw_surplus, + migration_progress, + is_withdraw_leftover, + is_creator_withdraw_surplus, + migration_fee_withdraw_status, + finish_curve_timestamp, + creator_base_fee, + creator_quote_fee, + created_at, + updated_at + ) VALUES ( + @account, + @slot, + @config, + @creator, + @base_mint, + @base_vault, + @quote_vault, + @base_reserve, + @quote_reserve, + @protocol_base_fee, + @partner_base_fee, + @partner_quote_fee, + @sqrt_price, + @activation_point, + @pool_type, + @is_migrated, + @is_partner_withdraw_surplus, + @is_protocol_withdraw_surplus, + @migration_progress, + @is_withdraw_leftover, + @is_creator_withdraw_surplus, + @migration_fee_withdraw_status, + @finish_curve_timestamp, + @creator_base_fee, + @creator_quote_fee, + NOW(), + NOW() + ) ON CONFLICT (account) DO UPDATE SET + slot = EXCLUDED.slot, + config = EXCLUDED.config, + creator = EXCLUDED.creator, + base_mint = EXCLUDED.base_mint, + base_vault = EXCLUDED.base_vault, + quote_vault = EXCLUDED.quote_vault, + base_reserve = EXCLUDED.base_reserve, + quote_reserve = EXCLUDED.quote_reserve, + protocol_base_fee = EXCLUDED.protocol_base_fee, + partner_base_fee = EXCLUDED.partner_base_fee, + partner_quote_fee = EXCLUDED.partner_quote_fee, + sqrt_price = EXCLUDED.sqrt_price, + activation_point = EXCLUDED.activation_point, + pool_type = EXCLUDED.pool_type, + is_migrated = EXCLUDED.is_migrated, + is_partner_withdraw_surplus = EXCLUDED.is_partner_withdraw_surplus, + is_protocol_withdraw_surplus = EXCLUDED.is_protocol_withdraw_surplus, + migration_progress = EXCLUDED.migration_progress, + is_withdraw_leftover = EXCLUDED.is_withdraw_leftover, + is_creator_withdraw_surplus = EXCLUDED.is_creator_withdraw_surplus, + migration_fee_withdraw_status = EXCLUDED.migration_fee_withdraw_status, + finish_curve_timestamp = EXCLUDED.finish_curve_timestamp, + creator_base_fee = EXCLUDED.creator_base_fee, + creator_quote_fee = EXCLUDED.creator_quote_fee, + updated_at = NOW() + ;` + _, err := db.Exec(ctx, sql, pgx.NamedArgs{ + "account": account.String(), + "slot": slot, + "config": pool.Config.String(), + "creator": pool.Creator.String(), + "base_mint": pool.BaseMint.String(), + "base_vault": pool.BaseVault.String(), + "quote_vault": pool.QuoteVault.String(), + "base_reserve": pool.BaseReserve, + "quote_reserve": pool.QuoteReserve, + "protocol_base_fee": pool.ProtocolBaseFee, + "partner_base_fee": pool.PartnerBaseFee, + "partner_quote_fee": pool.PartnerQuoteFee, + "sqrt_price": pool.SqrtPrice.BigInt(), + "activation_point": pool.ActivationPoint, + "pool_type": pool.PoolType, + "is_migrated": pool.IsMigrated, + "is_partner_withdraw_surplus": pool.IsPartnerWithdrawSurplus, + "is_protocol_withdraw_surplus": pool.IsProtocolWithdrawSurplus, + "migration_progress": pool.MigrationProgress, + "is_withdraw_leftover": pool.IsWithdrawLeftover, + "is_creator_withdraw_surplus": pool.IsCreatorWithdrawSurplus, + "migration_fee_withdraw_status": pool.MigrationFeeWithdrawStatus, + "finish_curve_timestamp": pool.FinishCurveTimestamp, + "creator_base_fee": pool.CreatorBaseFee, + "creator_quote_fee": pool.CreatorQuoteFee, + }) + return err +} diff --git a/solana/indexer/dbc/indexer.go b/solana/indexer/dbc/indexer.go index 027a5580..4bfab987 100644 --- a/solana/indexer/dbc/indexer.go +++ b/solana/indexer/dbc/indexer.go @@ -303,124 +303,3 @@ func getSubscribedDbcPools(ctx context.Context, db database.DBTX, limit int, off } return pools, nil } - -func upsertDbcPool( - ctx context.Context, - db database.DBTX, - slot uint64, - account solana.PublicKey, - pool *meteora_dbc.Pool, -) error { - sql := ` - INSERT INTO sol_meteora_dbc_pools ( - address, - slot, - config, - creator, - base_mint, - base_vault, - quote_vault, - base_reserve, - quote_reserve, - protocol_base_fee, - partner_base_fee, - partner_quote_fee, - sqrt_price, - activation_point, - pool_type, - is_migrated, - is_partner_withdraw_surplus, - is_protocol_withdraw_surplus, - migration_progress, - is_withdraw_leftover, - is_creator_withdraw_surplus, - migration_fee_withdraw_status, - finish_curve_timestamp, - creator_base_fee, - creator_quote_fee, - created_at, - updated_at - ) VALUES ( - @address, - @slot, - @config, - @creator, - @base_mint, - @base_vault, - @quote_vault, - @base_reserve, - @quote_reserve, - @protocol_base_fee, - @partner_base_fee, - @partner_quote_fee, - @sqrt_price, - @activation_point, - @pool_type, - @is_migrated, - @is_partner_withdraw_surplus, - @is_protocol_withdraw_surplus, - @migration_progress, - @is_withdraw_leftover, - @is_creator_withdraw_surplus, - @migration_fee_withdraw_status, - @finish_curve_timestamp, - @creator_base_fee, - @creator_quote_fee, - NOW(), - NOW() - ) ON CONFLICT (address) DO UPDATE SET - slot = EXCLUDED.slot, - config = EXCLUDED.config, - creator = EXCLUDED.creator, - base_mint = EXCLUDED.base_mint, - base_vault = EXCLUDED.base_vault, - quote_vault = EXCLUDED.quote_vault, - base_reserve = EXCLUDED.base_reserve, - quote_reserve = EXCLUDED.quote_reserve, - protocol_base_fee = EXCLUDED.protocol_base_fee, - partner_base_fee = EXCLUDED.partner_base_fee, - partner_quote_fee = EXCLUDED.partner_quote_fee, - sqrt_price = EXCLUDED.sqrt_price, - activation_point = EXCLUDED.activation_point, - pool_type = EXCLUDED.pool_type, - is_migrated = EXCLUDED.is_migrated, - is_partner_withdraw_surplus = EXCLUDED.is_partner_withdraw_surplus, - is_protocol_withdraw_surplus = EXCLUDED.is_protocol_withdraw_surplus, - migration_progress = EXCLUDED.migration_progress, - is_withdraw_leftover = EXCLUDED.is_withdraw_leftover, - is_creator_withdraw_surplus = EXCLUDED.is_creator_withdraw_surplus, - migration_fee_withdraw_status = EXCLUDED.migration_fee_withdraw_status, - finish_curve_timestamp = EXCLUDED.finish_curve_timestamp, - creator_base_fee = EXCLUDED.creator_base_fee, - creator_quote_fee = EXCLUDED.creator_quote_fee, - updated_at = NOW() - ;` - _, err := db.Exec(ctx, sql, pgx.NamedArgs{ - "address": account.String(), - "slot": slot, - "config": pool.Config.String(), - "creator": pool.Creator.String(), - "base_mint": pool.BaseMint.String(), - "base_vault": pool.BaseVault.String(), - "quote_vault": pool.QuoteVault.String(), - "base_reserve": pool.BaseReserve, - "quote_reserve": pool.QuoteReserve, - "protocol_base_fee": pool.ProtocolBaseFee, - "partner_base_fee": pool.PartnerBaseFee, - "partner_quote_fee": pool.PartnerQuoteFee, - "sqrt_price": pool.SqrtPrice.BigInt(), - "activation_point": pool.ActivationPoint, - "pool_type": pool.PoolType, - "is_migrated": pool.IsMigrated, - "is_partner_withdraw_surplus": pool.IsPartnerWithdrawSurplus, - "is_protocol_withdraw_surplus": pool.IsProtocolWithdrawSurplus, - "migration_progress": pool.MigrationProgress, - "is_withdraw_leftover": pool.IsWithdrawLeftover, - "is_creator_withdraw_surplus": pool.IsCreatorWithdrawSurplus, - "migration_fee_withdraw_status": pool.MigrationFeeWithdrawStatus, - "finish_curve_timestamp": pool.FinishCurveTimestamp, - "creator_base_fee": pool.CreatorBaseFee, - "creator_quote_fee": pool.CreatorQuoteFee, - }) - return err -} diff --git a/solana/indexer/dbc/indexer_test.go b/solana/indexer/dbc/indexer_test.go new file mode 100644 index 00000000..f994d300 --- /dev/null +++ b/solana/indexer/dbc/indexer_test.go @@ -0,0 +1,111 @@ +package dbc + +import ( + "encoding/base64" + "encoding/json" + "os" + "testing" + + "api.audius.co/database" + "api.audius.co/solana/indexer/common" + "api.audius.co/solana/indexer/fake_rpc_client" + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" + "github.com/maypok86/otter" + pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" + "github.com/test-go/testify/assert" + "github.com/test-go/testify/require" + "go.uber.org/zap" +) + +func TestHandleUpdate_SlotCheckpoint(t *testing.T) { + pool := database.CreateTestDatabase(t, "test_solana_indexer_damm_v2") + rpcClient := fake_rpc_client.FakeRpcClient{} + logger := zap.NewNop() + + indexer := New(common.GrpcConfig{}, &rpcClient, pool, nil, logger) + + expectedSlot := uint64(1500) + + request := pb.SubscribeRequest{} + checkpointId, err := common.InsertCheckpointStart(t.Context(), pool, "test", 1000, &request) + update := pb.SubscribeUpdate{ + Filters: []string{checkpointId}, + UpdateOneof: &pb.SubscribeUpdate_Slot{ + Slot: &pb.SubscribeUpdateSlot{ + Slot: expectedSlot, + }, + }, + } + + err = indexer.HandleUpdate(t.Context(), &update) + require.NoError(t, err) + + slot, err := common.GetCheckpointSlot(t.Context(), pool, "test", &request) + require.NoError(t, err) + assert.Equal(t, expectedSlot, slot, "checkpoint slot should be updated") +} + +func TestHandleUpdate_Migration(t *testing.T) { + pool := database.CreateTestDatabase(t, "test_solana_indexer_damm_v2") + rpcClient := fake_rpc_client.FakeRpcClient{} + transactionCache, err := otter.MustBuilder[solana.Signature, *rpc.GetTransactionResult](10).Build() + require.NoError(t, err, "failed to create cache") + logger := zap.NewNop() + + // Add artist coin + _, err = pool.Exec(t.Context(), ` + INSERT INTO artist_coins (mint, ticker, name, decimals, user_id, dbc_pool) + VALUES ('bearR26zyyB3fNQm5wWv1ZfN8MPQDUMwaAuoG79b1Yj', 'BEAR', 'Bear', 9, 0, 'J5LCsaaCWcYmzes8qwKmg89zzEtnbYkxFxD9YRU5auPY') + `) + require.NoError(t, err, "failed to insert artist coin") + + // Fetched using RPC call and copy/pasted the result + respJsonBytes, err := os.ReadFile("./migration_transaction_test_fixture.json") + require.NoError(t, err) + respJson := string(respJsonBytes) + + var resp rpc.GetTransactionResult + err = json.Unmarshal([]byte(respJson), &resp) + require.NoError(t, err) + + txSig := solana.MustSignatureFromBase58("93takW7UMBsJgGNH9oARpTT5EiEtJ7c2u6PCzHAsFMQ6P2Sejy5zJpn4sAaxMLHcfLPvMtFE87piofkH22oxuFz") + + transactionCache.Set(txSig, &resp) + + poolAddress := solana.MustPublicKeyFromBase58("J5LCsaaCWcYmzes8qwKmg89zzEtnbYkxFxD9YRU5auPY") + poolBase64 := "1eAF0WJFd1wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAG9Tc1jgMJCyOX1vksZXFU3IYphR0oodY0slRsdoUdgKrMKOzc0mg2NPexupQDHoGcVoHifYtOmHGEwAVsO0Z3QjgPlTbLz1Ps0B4mY1IShFnTtqdLRHMu8x62whqE/2AvYhAP2AbR4EbLQJaWf2CK3epbbZGBxR+R3zD54jzqne4Uh2rsFMELtnbPjkj1rWBozcMGqiGiqiysf2F5cey7qsvh70GiWgKlc9OkIgPAAAAAAAAAAAAALvd+QoIAAAAAAAAAAAAAAAAAAAAAAAAAGCPwvUoXI8CAAAAAAAAAAC7W7cVAAAAAAABAAADAAAAAAAAAAAAAAC73fkKCAAAAAAAAAAAAAAABXfnKyAAAADP/cloAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==" + poolData, err := base64.StdEncoding.DecodeString(poolBase64) + require.NoError(t, err) + + update := pb.SubscribeUpdate{ + UpdateOneof: &pb.SubscribeUpdate_Account{ + Account: &pb.SubscribeUpdateAccount{ + Account: &pb.SubscribeUpdateAccountInfo{ + Pubkey: poolAddress.Bytes(), + Data: poolData, + TxnSignature: txSig[:], + }, + }, + }, + } + + indexer := New(common.GrpcConfig{}, &rpcClient, pool, &transactionCache, logger) + err = indexer.HandleUpdate(t.Context(), &update) + require.NoError(t, err) + + sql := ` + SELECT EXISTS ( + SELECT 1 + FROM artist_coins + JOIN sol_meteora_dbc_pools ON sol_meteora_dbc_pools.account = artist_coins.dbc_pool + JOIN sol_meteora_dbc_migrations ON sol_meteora_dbc_migrations.dbc_pool = sol_meteora_dbc_pools.account + WHERE artist_coins.damm_v2_pool IS NOT NULL + LIMIT 1 + ) + ` + var exists bool + err = pool.QueryRow(t.Context(), sql).Scan(&exists) + require.NoError(t, err, "failed to query for dbc pool") + assert.True(t, exists, "damm v2 pool should exist after migration") +} diff --git a/solana/indexer/dbc/migration_transaction_test_fixture.json b/solana/indexer/dbc/migration_transaction_test_fixture.json new file mode 100644 index 00000000..66e87dbc --- /dev/null +++ b/solana/indexer/dbc/migration_transaction_test_fixture.json @@ -0,0 +1,611 @@ +{ + "blockTime": 1758068458, + "meta": { + "computeUnitsConsumed": 304563, + "err": null, + "fee": 15000, + "innerInstructions": [ + { + "index": 0, + "instructions": [ + { + "accounts": [0, 12], + "data": "3Bxs4NRZ15a54oAf", + "programIdIndex": 16, + "stackHeight": 2 + }, + { + "accounts": [ + 12, 2, 9, 12, 24, 23, 4, 13, 8, 6, 7, 3, 10, 11, 25, 25, 26, 16, + 18, 21 + ], + "data": "MzM2vE8YY5Ud5NMeQQUet2RKbv35C7fY8tM1dGYcS8ZpHxNYYfskXU7y", + "programIdIndex": 21, + "stackHeight": 2 + }, + { + "accounts": [12, 2], + "data": "11116syqhkHvfhehK9JhX2gRezgRQWPmBGxir9ufp4aJY8ADq4ygfLSW3KSGKNHXG6yasM", + "programIdIndex": 16, + "stackHeight": 3 + }, + { + "accounts": [2], + "data": "GC8ZamphTwhd8Un6WvtVXoQ7tKghze2AHkEKsDqPVPzQegssQsEQyqVjPpwpV74GCoAcFTihDT7pCfTehFjGQFpf1E", + "programIdIndex": 26, + "stackHeight": 3 + }, + { + "accounts": [2], + "data": "Znn1DCeEvctY5amCnp857wZ1kCBdXRhF4KTQ9qAXye5R7z", + "programIdIndex": 26, + "stackHeight": 3 + }, + { + "accounts": [2], + "data": "bPc3QVmUEXxDBFECWtBHKobwtKjzEknzKtgZZ7MerSzM7krnYbyVG8UaARJFXStSShjT6CwHyZPaCZzVp74eAHuCijA", + "programIdIndex": 26, + "stackHeight": 3 + }, + { + "accounts": [12, 9], + "data": "11119os1e9qSs2u7TsThXqkBSRVFxhmYp1G2FxiRqj5Byu8nwhABYCwsXzbzvy7GU1yGsm", + "programIdIndex": 16, + "stackHeight": 3 + }, + { + "accounts": [9, 2], + "data": "6bqCTyyN8REmv5mAz7MZQ1CURihLYPK4BepSfmcPaaVLK", + "programIdIndex": 26, + "stackHeight": 3 + }, + { + "accounts": [12, 4], + "data": "11115ioGfdg3qqie62vEKFrQyv3SheEhVGPEHL3qHfmAGn823BgWxsG8APXSbFn9ncHAs6", + "programIdIndex": 16, + "stackHeight": 3 + }, + { + "accounts": [12, 13], + "data": "11115jJ3wfqpy1rBHuw3dUPuJHNA43JUUXTvnHkxCRNT4WpsBdLCHffnqquP2N9fznwMpQ", + "programIdIndex": 16, + "stackHeight": 3 + }, + { + "accounts": [12, 7], + "data": "11119os1e9qSs2u7TsThXqkBSRVFxhmYaFKFZ1waB2X7armDmvK3p5GmLdUxYdg3h7QSrL", + "programIdIndex": 16, + "stackHeight": 3 + }, + { + "accounts": [7, 8], + "data": "6dUVVPBgVKHLXypVa91ymh7y97N3WZrSqQKXBFoGXkV3A", + "programIdIndex": 25, + "stackHeight": 3 + }, + { + "accounts": [12, 3], + "data": "11119os1e9qSs2u7TsThXqkBSRVFxhmYaFKFZ1waB2X7armDmvK3p5GmLdUxYdg3h7QSrL", + "programIdIndex": 16, + "stackHeight": 3 + }, + { + "accounts": [3, 6], + "data": "6dUVVPBgVKHLXypVa91ymh7y97N3WZrSqQKXBFoGXkV3A", + "programIdIndex": 25, + "stackHeight": 3 + }, + { + "accounts": [2, 23, 2, 23], + "data": "YzZQyW1KJYUbSgfn3pqcVK8ghZbQYKEosYeoCZHEsxwHdrNzyiu7NBG3ssEUXzWPFtB4DcL8LBnVaSCNoYZWMjKPaqH4M5W62T816ckkaTXvR2G8GJEuxCch1AXpHXcHKPXDCaALVfCbSvty2DwTeczTjxnSDjpKfBtiSumFiPChqwSoYCE", + "programIdIndex": 26, + "stackHeight": 3 + }, + { + "accounts": [12, 2], + "data": "3Bxs4R4W7QypLfgj", + "programIdIndex": 16, + "stackHeight": 3 + }, + { + "accounts": [2, 9, 23], + "data": "6AuM4xMCPFhR", + "programIdIndex": 26, + "stackHeight": 3 + }, + { + "accounts": [18], + "data": "DjRuqDRQjw1RAFHJRKFoHpjuyJdqDUZtEABpj1sAdo8tV6vEYoT2NbPp7D4XqCGtQTjEoTKJ77gXFQvGjZsE2RapodUnxNkrns4LtdNdganSDtSFUrCu5voQHHm1NRhT5wLcobLRxCRePE1xeAf91TkgtcngQ321DsFwj92AxHPdXxuVb3dxwCsT87vGVX7NTpJMz", + "programIdIndex": 21, + "stackHeight": 3 + }, + { + "accounts": [10, 8, 7, 12], + "data": "imZkmd2aPjezU", + "programIdIndex": 25, + "stackHeight": 3 + }, + { + "accounts": [11, 6, 3, 12], + "data": "j5ykk8AgtZp5u", + "programIdIndex": 25, + "stackHeight": 3 + }, + { + "accounts": [18], + "data": "jEnBrpov8hSPNtjDYcTTtmBdMz4QTR3rodjyJsWwdJRNsuLpqQtYyArJfeNe52xUjjyuCbbrBxWre8Eb1sLANExenmQdJJDGNssibQHbq8ajNmCxSAwY98dXzSc2NKTGKTHf9V7e2djw1RUvzQ3feitJnim1y852hk4CCjzGimsLD5TEjxN6TeD5dhE78DQgeHddMXuoUanAjKBkKUqnonTim9GUXxcs5Njii1JhV384Y4ZvifEiv6vWrNjDnbNoVCEPkFuASGSY9QMhG1dMhJCpm9DTmGiyFMCgpWWSsC5KN9C7EpAzZjRz5GfzFLz8j6Yqaq5TnMenAYsAbzQaPtZ7Cq4nYTKd3cVLbxCpD8yG2g3L9JaULPko9P9aE1XPNkGCgKc9H24DrhggEUNAQubAnWG5fZWHk1kq5DnARqtqxxcJuz2EuNuJP6LuESPbb1Sjo2kxAiBRA2DvgJg93TbhvsQHdiSBcJb2uyWyrZzmyewQ3JTN53DAPZSPupP2aoUA4P1", + "programIdIndex": 21, + "stackHeight": 3 + }, + { + "accounts": [4, 13, 9, 12, 18, 21], + "data": "G75vHFFjjB7LN6NHV3nXzotMNatYC2TDh", + "programIdIndex": 21, + "stackHeight": 2 + }, + { + "accounts": [18], + "data": "jrmy2PY3XLuAJWP2bPxaDEpbj4wz9b721TgL7dgFpLppsi1sPu9uBjSSKVWkSeyZocGiaggaaJrEjAvfTHa6PXnvQb7VdT8FGYfMDQqd4xXW49rqdi4WVDzRZZ7roGH8zEB9Rnc1B9ie6a8vtzMTYWZKu", + "programIdIndex": 21, + "stackHeight": 3 + }, + { + "accounts": [9, 12], + "data": "bmb9FzHUEQbdQd4nU52Y1Zy3NYAkKD9joiNycx52sjHnMHe", + "programIdIndex": 26, + "stackHeight": 2 + }, + { + "accounts": [12, 1, 14, 4, 5, 23, 0, 26, 16, 18, 21], + "data": "9AqYNzLPMCx", + "programIdIndex": 21, + "stackHeight": 2 + }, + { + "accounts": [0, 1], + "data": "11116syqhkHvfhehK9JhX2gRezgRQWPmBGxir9ufp4aJY8ADq4ygfLSW3KSGKNHXG6yasM", + "programIdIndex": 16, + "stackHeight": 3 + }, + { + "accounts": [1], + "data": "GC8ZamphTwhd8Un6WvtVXoQ7tKghze2AHkEKsDqPVPzQegmHp2PdpysgF6Hr5n3TfCEBzwpwxRak9wwtsPzt5oAGz9", + "programIdIndex": 26, + "stackHeight": 3 + }, + { + "accounts": [1], + "data": "Znn1DCeEvctY5amCnp857wZ1kCBdXRhF4KTQ9qAXye5R7z", + "programIdIndex": 26, + "stackHeight": 3 + }, + { + "accounts": [1], + "data": "bPc3QVmUEXxDBFECWtBHKobwtKjzEknzKtgZZ7MerSzM7krnYbyVG8UaARJFXStSShjT6CwHyZPaCZzVp74eAHuCijA", + "programIdIndex": 26, + "stackHeight": 3 + }, + { + "accounts": [0, 14], + "data": "11119os1e9qSs2u7TsThXqkBSRVFxhmYp1G2FxiRqj5Byu8nwhABYCwsXzbzvy7GU1yGsm", + "programIdIndex": 16, + "stackHeight": 3 + }, + { + "accounts": [14, 1], + "data": "6bqCTyyN8REmv5mAz7MZQ1CURihLYPK4BepSfmcPaaVLK", + "programIdIndex": 26, + "stackHeight": 3 + }, + { + "accounts": [0, 5], + "data": "11115jJ3wfqpy1rBHuw3dUPuJHNA43JUUXTvnHkxCRNT4WpsBdLCHffnqquP2N9fznwMpQ", + "programIdIndex": 16, + "stackHeight": 3 + }, + { + "accounts": [1, 23, 1, 23], + "data": "YzZQyW1KJYUbSgfn3pqcVK8ghZbQYKEosYeoCZHEsxwHdrNzyiu7NBG3ssEUXzWPFtB4DcL8LBnVaSCNoYZWMjKPaqH4M5W62T816ckkaTXvR2G8GJEuxCch1AXpHXcHKPXDCaALVfCbSvty2DwTeczTjxnSDjpKfBtiSumFiPChqwSoYCE", + "programIdIndex": 26, + "stackHeight": 3 + }, + { + "accounts": [0, 1], + "data": "3Bxs4R4W7QypLfgj", + "programIdIndex": 16, + "stackHeight": 3 + }, + { + "accounts": [1, 14, 23], + "data": "6AuM4xMCPFhR", + "programIdIndex": 26, + "stackHeight": 3 + }, + { + "accounts": [18], + "data": "DjRuqDRQjw1RAFHJRKFoHpjuyJdqDUZtEABpj1sAdo8tV6vEYoT2NbPp7D4XqCGtQTjEoTKJ77gXFQvGjZsE2RapodUnxNkrns4LtdNdganSDrSy7NKHfbXiSd5iACVsSu3WPXTfwEPQz3asyYLNbfYWYxQTg17pghnqXM6DEnx8fDjudz8w6MHUovbAQSGous9JD", + "programIdIndex": 21, + "stackHeight": 3 + }, + { + "accounts": [4, 5, 10, 11, 7, 3, 8, 6, 14, 12, 25, 25, 18, 21], + "data": "A2PehjWuU2ZVXmfgyLdg5AVcFNi1AXDCvsp6nEDhT8Rhgi2ojyCdKp2", + "programIdIndex": 21, + "stackHeight": 2 + }, + { + "accounts": [10, 8, 7, 12], + "data": "imZkmd2aPjezU", + "programIdIndex": 25, + "stackHeight": 3 + }, + { + "accounts": [11, 6, 3, 12], + "data": "j5ykk8AgtZp5u", + "programIdIndex": 25, + "stackHeight": 3 + }, + { + "accounts": [18], + "data": "4nMqxPPYfh5u4mHLXK7hZgtg6W8Enebpbp61zgLNG45rpf8mg2EmiuCdrE4z1dVunu59hL5MJiPorv1DibP6Rcf86Y6pes3xtVq1Ffuru3NhiEJGZ5XSCQ2GCG1nQdoZvpwWHfba4u47UWLCT2C7qmRVtREDEuPB5Mwiz2Vx7vBj7V6zX7epaQ27Wp8TviAiTnJXU7JK8szRZdQMRMQjxrCo92ujy7qWbXVzKWvf2uuBeHJCF", + "programIdIndex": 21, + "stackHeight": 3 + }, + { + "accounts": [4, 5, 14, 12, 18, 21], + "data": "G75vHFFjjB7LN6NHV3nXzotMNatYC2TDh", + "programIdIndex": 21, + "stackHeight": 2 + }, + { + "accounts": [18], + "data": "jrmy2PY3XLuAJWP2bPxaDEpbj4wz9b721TgL7dgFpLppsi1sPu9uBjSSKVWkSeyZoaHSDCny9yaYtWFNF4NWkVVozXEjcV61sNEGYnWrfAKKic4DkWwUogaU6RAJQneBiRRtTaVr1kvYamHHsvLSTYohZ", + "programIdIndex": 21, + "stackHeight": 3 + }, + { + "accounts": [14, 12], + "data": "bmb5VFGAy8m896CdHajZMzgX4qYJPxvAkKibkw6hsRbTvUR", + "programIdIndex": 26, + "stackHeight": 2 + } + ] + } + ], + "loadedAddresses": { "readonly": [], "writable": [] }, + "logMessages": [ + "Program dbcij3LWUppWqq96dh6gJWwBifmcGfLSB5D4DuSMaqN invoke [1]", + "Program log: Instruction: MigrationDammV2", + "Program log: create pool", + "Program log: transfer lamport to pool_authority", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG invoke [2]", + "Program log: Instruction: InitializePool", + "Program 11111111111111111111111111111111 invoke [3]", + "Program 11111111111111111111111111111111 success", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb invoke [3]", + "Program log: MetadataPointerInstruction::Initialize", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb consumed 691 of 455940 compute units", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb success", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb invoke [3]", + "Program log: Instruction: InitializeMintCloseAuthority", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb consumed 768 of 453455 compute units", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb success", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb invoke [3]", + "Program log: Instruction: InitializeMint2", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb consumed 1657 of 450941 compute units", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb success", + "Program 11111111111111111111111111111111 invoke [3]", + "Program 11111111111111111111111111111111 success", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb invoke [3]", + "Program log: Instruction: InitializeAccount3", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb consumed 1784 of 439528 compute units", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb success", + "Program 11111111111111111111111111111111 invoke [3]", + "Program 11111111111111111111111111111111 success", + "Program 11111111111111111111111111111111 invoke [3]", + "Program 11111111111111111111111111111111 success", + "Program 11111111111111111111111111111111 invoke [3]", + "Program 11111111111111111111111111111111 success", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [3]", + "Program log: Instruction: InitializeAccount3", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 4188 of 416182 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program 11111111111111111111111111111111 invoke [3]", + "Program 11111111111111111111111111111111 success", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [3]", + "Program log: Instruction: InitializeAccount3", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 4214 of 404243 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb invoke [3]", + "Program log: TokenMetadataInstruction: Initialize", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb consumed 3508 of 388251 compute units", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb success", + "Program 11111111111111111111111111111111 invoke [3]", + "Program 11111111111111111111111111111111 success", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb invoke [3]", + "Program log: Instruction: MintTo", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb consumed 1631 of 380115 compute units", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb success", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG invoke [3]", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG consumed 2092 of 375488 compute units", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG success", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [3]", + "Program log: Instruction: TransferChecked", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 6147 of 370850 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [3]", + "Program log: Instruction: TransferChecked", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 6173 of 362303 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG invoke [3]", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG consumed 2092 of 350798 compute units", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG success", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG consumed 120414 of 468148 compute units", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG success", + "Program log: lock permanent liquidity for first position", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG invoke [2]", + "Program log: Instruction: PermanentLockPosition", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG invoke [3]", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG consumed 2092 of 335671 compute units", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG success", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG consumed 10111 of 343182 compute units", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG success", + "Program log: transfer ownership of the first position", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb invoke [2]", + "Program log: Instruction: SetAuthority", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb consumed 833 of 330811 compute units", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb success", + "Program log: create second position", + "Program log: create position", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG invoke [2]", + "Program log: Instruction: CreatePosition", + "Program 11111111111111111111111111111111 invoke [3]", + "Program 11111111111111111111111111111111 success", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb invoke [3]", + "Program log: MetadataPointerInstruction::Initialize", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb consumed 691 of 310064 compute units", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb success", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb invoke [3]", + "Program log: Instruction: InitializeMintCloseAuthority", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb consumed 768 of 307579 compute units", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb success", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb invoke [3]", + "Program log: Instruction: InitializeMint2", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb consumed 1657 of 305064 compute units", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb success", + "Program 11111111111111111111111111111111 invoke [3]", + "Program 11111111111111111111111111111111 success", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb invoke [3]", + "Program log: Instruction: InitializeAccount3", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb consumed 1784 of 292151 compute units", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb success", + "Program 11111111111111111111111111111111 invoke [3]", + "Program 11111111111111111111111111111111 success", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb invoke [3]", + "Program log: TokenMetadataInstruction: Initialize", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb consumed 3508 of 277440 compute units", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb success", + "Program 11111111111111111111111111111111 invoke [3]", + "Program 11111111111111111111111111111111 success", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb invoke [3]", + "Program log: Instruction: MintTo", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb consumed 1631 of 269304 compute units", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb success", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG invoke [3]", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG consumed 2092 of 264678 compute units", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG success", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG consumed 56790 of 318704 compute units", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG success", + "Program log: add liquidity", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG invoke [2]", + "Program log: Instruction: AddLiquidity", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [3]", + "Program log: Instruction: TransferChecked", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 6147 of 235183 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [3]", + "Program log: Instruction: TransferChecked", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 6173 of 226636 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG invoke [3]", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG consumed 2092 of 217281 compute units", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG success", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG consumed 37989 of 252399 compute units", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG success", + "Program log: lock liquidity", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG invoke [2]", + "Program log: Instruction: PermanentLockPosition", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG invoke [3]", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG consumed 2092 of 202535 compute units", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG success", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG consumed 10111 of 210046 compute units", + "Program cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG success", + "Program log: set authority", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb invoke [2]", + "Program log: Instruction: SetAuthority", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb consumed 833 of 197709 compute units", + "Program TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb success", + "Program dbcij3LWUppWqq96dh6gJWwBifmcGfLSB5D4DuSMaqN consumed 304413 of 500000 compute units", + "Program dbcij3LWUppWqq96dh6gJWwBifmcGfLSB5D4DuSMaqN success", + "Program ComputeBudget111111111111111111111111111111 invoke [1]", + "Program ComputeBudget111111111111111111111111111111 success" + ], + "postBalances": [ + 3030867321, 4127280, 4127280, 2039280, 8630400, 3730560, 260042654, + 2039280, 1461600, 2039280, 2039280, 2039280, 542066173330, 3730560, + 2039280, 3841920, 1, 8184960, 0, 2491680, 1, 1141506, 1151511, + 31324699330, 3173760, 4676183832, 1141469 + ], + "postTokenBalances": [ + { + "accountIndex": 3, + "mint": "9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM", + "owner": "HLnpSz9h2S4hiLQ43rnSD9XkcUThA7B8hQMKmDaiTLcC", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "17076458013140", + "decimals": 8, + "uiAmount": 170764.5801314, + "uiAmountString": "170764.5801314" + } + }, + { + "accountIndex": 7, + "mint": "bearR26zyyB3fNQm5wWv1ZfN8MPQDUMwaAuoG79b1Yj", + "owner": "HLnpSz9h2S4hiLQ43rnSD9XkcUThA7B8hQMKmDaiTLcC", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "170764584107040162", + "decimals": 9, + "uiAmount": 170764584.10704017, + "uiAmountString": "170764584.107040162" + } + }, + { + "accountIndex": 9, + "mint": "DgWF4Huj8a4yyGBPaAw4kqQFBuD5BujbHN7VKWaEBp4t", + "owner": "CXFjXpXVQqv4jy5bjauLvECrDsKESQB6BPkSiC6dAmak", + "programId": "TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb", + "uiTokenAmount": { + "amount": "1", + "decimals": 0, + "uiAmount": 1.0, + "uiAmountString": "1" + } + }, + { + "accountIndex": 10, + "mint": "bearR26zyyB3fNQm5wWv1ZfN8MPQDUMwaAuoG79b1Yj", + "owner": "FhVo3mqL8PW5pH5U2CN4XE33DokiyZnUwuGpH2hmHLuM", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "79235415892709947", + "decimals": 9, + "uiAmount": 79235415.89270994, + "uiAmountString": "79235415.892709947" + } + }, + { + "accountIndex": 11, + "mint": "9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM", + "owner": "FhVo3mqL8PW5pH5U2CN4XE33DokiyZnUwuGpH2hmHLuM", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "175472468609", + "decimals": 8, + "uiAmount": 1754.72468609, + "uiAmountString": "1754.72468609" + } + }, + { + "accountIndex": 14, + "mint": "76uQDWksWWvFKHn4ZNPTpR9jJ9x3eqgsmcHdb8FmXS3o", + "owner": "8kWiEZFeuaPCanbJkwL4PvWDmx4zsLnRoXjUPBnvrLmX", + "programId": "TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb", + "uiTokenAmount": { + "amount": "1", + "decimals": 0, + "uiAmount": 1.0, + "uiAmountString": "1" + } + } + ], + "preBalances": [ + 3090779441, 0, 0, 0, 0, 0, 260042654, 0, 1461600, 0, 2039280, 2039280, + 542038779410, 0, 0, 3841920, 1, 8184960, 0, 2491680, 1, 1141506, 1151511, + 31324699330, 3173760, 4676183832, 1141469 + ], + "preTokenBalances": [ + { + "accountIndex": 10, + "mint": "bearR26zyyB3fNQm5wWv1ZfN8MPQDUMwaAuoG79b1Yj", + "owner": "FhVo3mqL8PW5pH5U2CN4XE33DokiyZnUwuGpH2hmHLuM", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "249999999999750109", + "decimals": 9, + "uiAmount": 249999999.9997501, + "uiAmountString": "249999999.999750109" + } + }, + { + "accountIndex": 11, + "mint": "9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM", + "owner": "FhVo3mqL8PW5pH5U2CN4XE33DokiyZnUwuGpH2hmHLuM", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "17251930481749", + "decimals": 8, + "uiAmount": 172519.30481749, + "uiAmountString": "172519.30481749" + } + } + ], + "rewards": [], + "status": { "Ok": null } + }, + "slot": 367312008, + "transaction": { + "message": { + "accountKeys": [ + "CXFjXpXVQqv4jy5bjauLvECrDsKESQB6BPkSiC6dAmak", + "76uQDWksWWvFKHn4ZNPTpR9jJ9x3eqgsmcHdb8FmXS3o", + "DgWF4Huj8a4yyGBPaAw4kqQFBuD5BujbHN7VKWaEBp4t", + "22xSxHAEbtvrD8ks1HGFrDNheqrxLF2HMyg15y57x6ve", + "9avVRGRvPsSYiXKBMHnC6RNPbwN5yE3v7fD8FibgScwA", + "9jUTYHEypL6XwYRSk5CZaKMEmNuQoRaPM17JDaNpk6G7", + "9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM", + "9RpE5TgoJG6Jh4vuZ3PMuVVpWhhCzWsU1EEUDh7zzMei", + "bearR26zyyB3fNQm5wWv1ZfN8MPQDUMwaAuoG79b1Yj", + "CL5nkYmVBNHRDZLZovyK6RGbv2SFjJRfPgf4pFknitBn", + "DkrZF8DT18gu4n9Q26c46r49ciJwNt8S8jbQhouEEJnA", + "DQWe8KbbGSUyESbbfzG9keBmhtjMRbRVg4e3wH2h1KFT", + "FhVo3mqL8PW5pH5U2CN4XE33DokiyZnUwuGpH2hmHLuM", + "GRYndBmh1aoQjMXQm4NPs7oQ288o6xghrWVXytyM2c1Q", + "GXKmP3hBvRnor2ieNVrcAVGgDev1HUYNrwjZ4yJDwtU5", + "J5LCsaaCWcYmzes8qwKmg89zzEtnbYkxFxD9YRU5auPY", + "11111111111111111111111111111111", + "2seGMFauXC22DX8hbop1gh54W1uW8YREWhsU7JuCptTj", + "3rmHSu74h1ZcmAisVcWerTCiRDQbUrBKmcwptYGjHfet", + "BFjrGaLwPznyjCQ47dbP8xVy1QLjdJ7w8CpwZ4sZwqpf", + "ComputeBudget111111111111111111111111111111", + "cpamdpZCGKUy5JxQXB4dcpGPiikHawvSWAd6mEn1sGG", + "dbcij3LWUppWqq96dh6gJWwBifmcGfLSB5D4DuSMaqN", + "HLnpSz9h2S4hiLQ43rnSD9XkcUThA7B8hQMKmDaiTLcC", + "Hv8Lmzmnju6m7kcokVKvwqz7QPmdX9XfKjJsXz8RXcjp", + "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "TokenzQdBNbLqP5VEhdkAS6EPFLC1PHnBqCXEpPxuEb" + ], + "header": { + "numReadonlySignedAccounts": 0, + "numReadonlyUnsignedAccounts": 11, + "numRequiredSignatures": 3 + }, + "instructions": [ + { + "accounts": [ + 15, 19, 17, 12, 4, 2, 9, 13, 1, 14, 5, 23, 21, 8, 6, 7, 3, 10, 11, + 0, 25, 25, 26, 18, 16, 24 + ], + "data": "TCqN7bA2Pd9", + "programIdIndex": 22, + "stackHeight": null + }, + { + "accounts": [], + "data": "EvSMNP", + "programIdIndex": 20, + "stackHeight": null + } + ], + "recentBlockhash": "3ZkGvak2RCBFjG6aTLe9m5MW67YtpZiNfCKHKFg7XZvG" + }, + "signatures": [ + "93takW7UMBsJgGNH9oARpTT5EiEtJ7c2u6PCzHAsFMQ6P2Sejy5zJpn4sAaxMLHcfLPvMtFE87piofkH22oxuFz", + "5XyrpFBpg6WY8jioU9j1dkwiZAb32oiFVCqK1nz8jVirfdMQpjiWQxWmh7SwfAAkiWfxCoeLM9fNGtLsNt7XQnnD", + "3KcFxHHuXFDUN2Z7YMBAvvGKhx5hGudQTi5JnX566aZ4MEzMaekph2tBxKTpGtNCFKVtLdeig7bHmZY2TGtkRY87" + ] + } +} diff --git a/sql/01_schema.sql b/sql/01_schema.sql index 7ce7cacc..50db7f49 100644 --- a/sql/01_schema.sql +++ b/sql/01_schema.sql @@ -7441,6 +7441,41 @@ CREATE TABLE public.sol_meteora_dbc_migrations ( COMMENT ON TABLE public.sol_meteora_dbc_migrations IS 'Tracks migrations from DBC pools to DAMM V2 pools.'; +-- +-- Name: sol_meteora_dbc_pools; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.sol_meteora_dbc_pools ( + account text NOT NULL, + slot bigint NOT NULL, + config text NOT NULL, + creator text NOT NULL, + base_mint text NOT NULL, + base_vault text NOT NULL, + quote_vault text NOT NULL, + base_reserve bigint NOT NULL, + quote_reserve bigint NOT NULL, + protocol_base_fee bigint NOT NULL, + partner_base_fee bigint NOT NULL, + partner_quote_fee bigint NOT NULL, + sqrt_price numeric NOT NULL, + activation_point bigint NOT NULL, + pool_type smallint NOT NULL, + is_migrated smallint NOT NULL, + is_partner_withdraw_surplus smallint NOT NULL, + is_protocol_withdraw_surplus smallint NOT NULL, + migration_progress smallint NOT NULL, + is_withdraw_leftover smallint NOT NULL, + is_creator_withdraw_surplus smallint NOT NULL, + migration_fee_withdraw_status smallint NOT NULL, + finish_curve_timestamp bigint NOT NULL, + creator_base_fee bigint NOT NULL, + creator_quote_fee bigint NOT NULL, + created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, + updated_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL +); + + -- -- Name: sol_payments; Type: TABLE; Schema: public; Owner: - -- @@ -9246,6 +9281,14 @@ ALTER TABLE ONLY public.sol_meteora_dbc_migrations ADD CONSTRAINT sol_meteora_dbc_migrations_pkey PRIMARY KEY (signature, instruction_index); +-- +-- Name: sol_meteora_dbc_pools sol_meteora_dbc_pools_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.sol_meteora_dbc_pools + ADD CONSTRAINT sol_meteora_dbc_pools_pkey PRIMARY KEY (account); + + -- -- Name: sol_payments sol_payments_pkey; Type: CONSTRAINT; Schema: public; Owner: - -- From 38b886c4707fe4cd6653261624c86dceec1335b2 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Tue, 14 Oct 2025 23:54:19 -0700 Subject: [PATCH 13/56] add tests for token indexer --- ...mable_tokens_transaction_test_fixture.json | 178 ++++++++++++++ solana/indexer/token/indexer.go | 6 +- solana/indexer/token/indexer_test.go | 218 ++++++++++++++++++ 3 files changed, 401 insertions(+), 1 deletion(-) create mode 100644 solana/indexer/token/claimable_tokens_transaction_test_fixture.json create mode 100644 solana/indexer/token/indexer_test.go diff --git a/solana/indexer/token/claimable_tokens_transaction_test_fixture.json b/solana/indexer/token/claimable_tokens_transaction_test_fixture.json new file mode 100644 index 00000000..3ace1d3b --- /dev/null +++ b/solana/indexer/token/claimable_tokens_transaction_test_fixture.json @@ -0,0 +1,178 @@ +{ + "blockTime": 1760507814, + "meta": { + "computeUnitsConsumed": 29640, + "err": null, + "fee": 16636, + "innerInstructions": [ + { + "index": 1, + "instructions": [ + { + "accounts": [0, 3], + "data": "3Bxs4WPPUAqJnbno", + "programIdIndex": 9, + "stackHeight": 2 + }, + { + "accounts": [3], + "data": "9krTD1vx42WjN3ks", + "programIdIndex": 9, + "stackHeight": 2 + }, + { + "accounts": [3], + "data": "SYXsQvN8RnUmDGz5cuWrVVPxa38sqPVC1xo9DussbTkruNKv", + "programIdIndex": 9, + "stackHeight": 2 + }, + { + "accounts": [1, 2, 6, 6], + "data": "3dc9GqkYQ76X", + "programIdIndex": 10, + "stackHeight": 2 + } + ] + } + ], + "loadedAddresses": { "readonly": [], "writable": [] }, + "logMessages": [ + "Program Ewkv3JahEFRKkcJmpoKB7pXbnUHwjAyXiwEo4ZY2rezQ invoke [1]", + "Program log: Instruction: Transfer", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [2]", + "Program log: Instruction: Transfer", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 4728 of 20128 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program Ewkv3JahEFRKkcJmpoKB7pXbnUHwjAyXiwEo4ZY2rezQ consumed 29340 of 44235 compute units", + "Program Ewkv3JahEFRKkcJmpoKB7pXbnUHwjAyXiwEo4ZY2rezQ success", + "Program ComputeBudget111111111111111111111111111111 invoke [1]", + "Program ComputeBudget111111111111111111111111111111 success", + "Program ComputeBudget111111111111111111111111111111 invoke [1]", + "Program ComputeBudget111111111111111111111111111111 success" + ], + "postBalances": [ + 1486794178, 2039280, 2039280, 953520, 1, 1141440, 0, 1009200, 0, 1, + 5299606121, 1 + ], + "postTokenBalances": [ + { + "accountIndex": 1, + "mint": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", + "owner": "F1vVY6VtF5oLT2QYEqy6276JGGhgaLEDZMamoFsJWSYk", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "3300", + "decimals": 6, + "uiAmount": 0.0033, + "uiAmountString": "0.0033" + } + }, + { + "accountIndex": 2, + "mint": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", + "owner": "FFwKgUzzmvFv1mhqexs2muRAphgMMyR1kMtiigPeoksw", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "1430000", + "decimals": 6, + "uiAmount": 1.43, + "uiAmountString": "1.43" + } + } + ], + "preBalances": [ + 1487764334, 2039280, 2039280, 0, 1, 1141440, 0, 1009200, 0, 1, 5299606121, + 1 + ], + "preTokenBalances": [ + { + "accountIndex": 1, + "mint": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", + "owner": "F1vVY6VtF5oLT2QYEqy6276JGGhgaLEDZMamoFsJWSYk", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "93300", + "decimals": 6, + "uiAmount": 0.0933, + "uiAmountString": "0.0933" + } + }, + { + "accountIndex": 2, + "mint": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", + "owner": "FFwKgUzzmvFv1mhqexs2muRAphgMMyR1kMtiigPeoksw", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "1340000", + "decimals": 6, + "uiAmount": 1.34, + "uiAmountString": "1.34" + } + } + ], + "rewards": [], + "status": { "Ok": null } + }, + "slot": 373471965, + "transaction": { + "message": { + "accountKeys": [ + "HXqdXhJiRe2reQVWmWq13V8gjGtVP7rSh27va5gC3M3P", + "DUiUiDme6XoqaD86AdmqY2BDSg3PrCidszKpNbZhfkpo", + "AaF7Y7PCk54xrBvbwJEbGY8p5FnZ2zjzzPRnY4VsF17n", + "7dYBN19SXTGfJdopv2pWPuvUbHokKFHHY1oe9NN85ef9", + "KeccakSecp256k11111111111111111111111111111", + "Ewkv3JahEFRKkcJmpoKB7pXbnUHwjAyXiwEo4ZY2rezQ", + "F1vVY6VtF5oLT2QYEqy6276JGGhgaLEDZMamoFsJWSYk", + "SysvarRent111111111111111111111111111111111", + "Sysvar1nstructions1111111111111111111111111", + "11111111111111111111111111111111", + "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "ComputeBudget111111111111111111111111111111" + ], + "addressTableLookups": [], + "header": { + "numReadonlySignedAccounts": 0, + "numReadonlyUnsignedAccounts": 8, + "numRequiredSignatures": 1 + }, + "instructions": [ + { + "accounts": [], + "data": "H4eCheRWTZDTCFYQbQi57mJyCuuD2F4LqtYatMcPC6263MUTFc88ynMjHHxwhdQbHv3VskWkRfxcKCw4rB3xtRDruTyVTwJ8bd28pCqPXnpwBqubQ28TraiiGGXE2YgbptFp8Umbq1jzAWT5G3cCa93Huq7kJum6McdgmsdeK2SB1CvNA1d24f1MBUE5YES7jGkLo", + "programIdIndex": 4, + "stackHeight": null + }, + { + "accounts": [0, 1, 2, 3, 6, 7, 8, 9, 10], + "data": "5XWEdf4QuNwprrC9oHM5FubyAyXR", + "programIdIndex": 5, + "stackHeight": null + }, + { + "accounts": [], + "data": "3uedW6ymeow5", + "programIdIndex": 11, + "stackHeight": null + }, + { + "accounts": [], + "data": "KJ1vKh", + "programIdIndex": 11, + "stackHeight": null + } + ], + "recentBlockhash": "BzZuaV5cYjfPVytC8Vh66oZkwP8mWzryYtKpwJQZEU6S" + }, + "signatures": [ + "3HKyrGEH5nDJfMfuJk5cNEBSxNWZb3yBNksqFgEy1XLhwMV31oxyiG5Ju84i6EEYp2gk8EhDohroPrDGVWsK6hJY" + ] + }, + "version": 0 +} diff --git a/solana/indexer/token/indexer.go b/solana/indexer/token/indexer.go index 0b2b9f26..0c19e604 100644 --- a/solana/indexer/token/indexer.go +++ b/solana/indexer/token/indexer.go @@ -186,7 +186,11 @@ func (d *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) err trackedMints := msg.Filters // Process balance changes for this subscription's mints - err = common.ProcessBalanceChanges(ctx, d.pool, accUpdate.Slot, txRes.Meta, tx, txRes.BlockTime.Time(), trackedMints, d.logger) + logger := d.logger.With( + zap.String("signature", txSig.String()), + zap.Uint64("slot", txRes.Slot), + ) + err = common.ProcessBalanceChanges(ctx, d.pool, txRes.Slot, txRes.Meta, tx, txRes.BlockTime.Time(), trackedMints, logger) if err != nil { return fmt.Errorf("failed to process balance changes: %w", err) } diff --git a/solana/indexer/token/indexer_test.go b/solana/indexer/token/indexer_test.go new file mode 100644 index 00000000..ee61484b --- /dev/null +++ b/solana/indexer/token/indexer_test.go @@ -0,0 +1,218 @@ +package token + +import ( + "encoding/json" + "os" + "testing" + + "api.audius.co/database" + "api.audius.co/solana/indexer/common" + "api.audius.co/solana/indexer/fake_rpc_client" + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" + "github.com/jackc/pgx/v5" + "github.com/maypok86/otter" + pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" + "github.com/test-go/testify/assert" + "github.com/test-go/testify/require" + "go.uber.org/zap" +) + +func TestHandleUpdate_SlotCheckpoint(t *testing.T) { + pool := database.CreateTestDatabase(t, "test_solana_indexer_damm_v2") + rpcClient := fake_rpc_client.FakeRpcClient{} + logger := zap.NewNop() + + indexer := New(common.GrpcConfig{}, &rpcClient, pool, nil, logger) + + expectedSlot := uint64(1500) + + request := pb.SubscribeRequest{} + checkpointId, err := common.InsertCheckpointStart(t.Context(), pool, "test", 1000, &request) + update := pb.SubscribeUpdate{ + Filters: []string{checkpointId}, + UpdateOneof: &pb.SubscribeUpdate_Slot{ + Slot: &pb.SubscribeUpdateSlot{ + Slot: expectedSlot, + }, + }, + } + + indexer.HandleUpdate(t.Context(), &update) + + slot, err := common.GetCheckpointSlot(t.Context(), pool, "test", &request) + require.NoError(t, err) + assert.Equal(t, expectedSlot, slot, "checkpoint slot should be updated") +} + +func TestHandleUpdate_BalanceChange(t *testing.T) { + pool := database.CreateTestDatabase(t, "test_solana_indexer_token") + + mint := "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v" + sender := "F1vVY6VtF5oLT2QYEqy6276JGGhgaLEDZMamoFsJWSYk" + senderTokenAccount := "DUiUiDme6XoqaD86AdmqY2BDSg3PrCidszKpNbZhfkpo" + expectedSenderBalance := uint64(3300) + + sender2 := "2bX4g7yV3aHjv6v1d8Z8n5K5e5f5L5e5f5L5e5f5L5e5" + sender2TokenAccount := "8bX4g7yV3aHjv6v1d8Z8n5K5e5f5L5e5f5L5e5f5L5e5" + expectedSender2Balance := uint64(1000) + + receiver := "FFwKgUzzmvFv1mhqexs2muRAphgMMyR1kMtiigPeoksw" + receiverTokenAccount := "AaF7Y7PCk54xrBvbwJEbGY8p5FnZ2zjzzPRnY4VsF17n" + expectedReceiverBalance := uint64(1430000) + + database.Seed(pool, database.FixtureMap{ + "users": []map[string]any{ + {"user_id": 1, "wallet": "0x123"}, + {"user_id": 2}, + }, + "associated_wallets": []map[string]any{ + {"id": 1, "user_id": 1, "wallet": sender2, "chain": "sol"}, + {"id": 2, "user_id": 2, "wallet": receiver, "chain": "sol"}, + }, + "sol_claimable_accounts": []map[string]any{ + {"signature": "abc", "ethereum_address": "0x123", "account": senderTokenAccount, "mint": mint}, + }, + "sol_token_account_balances": []map[string]any{ + {"account": sender2TokenAccount, "mint": mint, "owner": sender2, "balance": expectedSender2Balance}, + }, + }) + + rpcClient := fake_rpc_client.FakeRpcClient{} + transactionCache, err := otter.MustBuilder[solana.Signature, *rpc.GetTransactionResult](10).Build() + require.NoError(t, err, "failed to create cache") + logger := zap.NewNop() + + txSig := solana.MustSignatureFromBase58("3HKyrGEH5nDJfMfuJk5cNEBSxNWZb3yBNksqFgEy1XLhwMV31oxyiG5Ju84i6EEYp2gk8EhDohroPrDGVWsK6hJY") + slot := uint64(373471965) + + txJson, err := os.ReadFile("./claimable_tokens_transaction_test_fixture.json") + require.NoError(t, err, "failed to read transaction test fixture") + + var txResult rpc.GetTransactionResult + err = json.Unmarshal(txJson, &txResult) + require.NoError(t, err, "failed to unmarshal transaction test fixture") + + transactionCache.Set(txSig, &txResult) + + update := pb.SubscribeUpdate{ + Filters: []string{mint}, + UpdateOneof: &pb.SubscribeUpdate_Account{ + Account: &pb.SubscribeUpdateAccount{ + Slot: slot, + Account: &pb.SubscribeUpdateAccountInfo{ + TxnSignature: txSig[:], + }, + }, + }, + } + + indexer := New(common.GrpcConfig{}, &rpcClient, pool, &transactionCache, logger) + err = indexer.HandleUpdate(t.Context(), &update) + require.NoError(t, err, "failed to handle update") + + // Check that balance changes exist + sql := ` + SELECT EXISTS ( + SELECT 1 + FROM sol_token_account_balance_changes + WHERE signature = @signature + AND mint = @mint + AND owner = @owner + AND account = @account + AND change = @change + AND balance = @balance + AND slot = @slot + LIMIT 1 + ) + ` + + // Sender balance change + var exists bool + err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ + "signature": txSig.String(), + "mint": mint, + "owner": sender, + "account": senderTokenAccount, + "change": int64(-90000), + "balance": expectedSenderBalance, + "slot": slot, + }).Scan(&exists) + require.NoError(t, err, "failed to query for balance change") + assert.True(t, exists, "balance change should exist") + + // Receiver balance change + err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ + "signature": txSig.String(), + "mint": mint, + "owner": receiver, + "account": receiverTokenAccount, + "change": int64(90000), + "balance": expectedReceiverBalance, + "slot": slot, + }).Scan(&exists) + require.NoError(t, err, "failed to query for balance change") + assert.True(t, exists, "balance change should exist") + + // Check that token account balances are updated + sql = ` + SELECT EXISTS ( + SELECT 1 + FROM sol_token_account_balances + WHERE account = @account + AND mint = @mint + AND owner = @owner + AND balance = @balance + ) + ` + + // Sender balance + err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ + "account": senderTokenAccount, + "mint": mint, + "owner": sender, + "balance": expectedSenderBalance, + }).Scan(&exists) + require.NoError(t, err, "failed to query for sender balance") + assert.True(t, exists, "sender balance should be updated") + + // Receiver balance + err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ + "account": receiverTokenAccount, + "mint": mint, + "owner": receiver, + "balance": expectedReceiverBalance, + }).Scan(&exists) + require.NoError(t, err, "failed to query for receiver balance") + assert.True(t, exists, "receiver balance should be updated") + + // Check user balances are updated + sql = ` + SELECT EXISTS ( + SELECT 1 + FROM sol_user_balances + WHERE user_id = @userId + AND mint = @mint + AND balance = @balance + LIMIT 1 + ) + ` + + // Sender user balance + err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ + "userId": 1, + "mint": mint, + "balance": expectedSenderBalance + expectedSender2Balance, + }).Scan(&exists) + require.NoError(t, err, "failed to query for sender user balance") + assert.True(t, exists, "sender user balance should be updated") + + // Receiver user balance + err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ + "userId": 2, + "mint": mint, + "balance": expectedReceiverBalance, + }).Scan(&exists) + require.NoError(t, err, "failed to query for receiver user balance") + assert.True(t, exists, "receiver user balance should be updated") +} From a8849a2265bfe88e25942820a53f6ea9e34e937c Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Wed, 15 Oct 2025 01:42:59 -0700 Subject: [PATCH 14/56] add program indexer tests, add recipient eth address to reward disbursement --- .../0172_recipient_eth_address_rewards.sql | 16 + solana/indexer/program/db_insert_test.go | 78 ---- ..._tokens_init_transaction_test_fixture.json | 130 ++++++ ...ens_transfer_transaction_test_fixture.json | 154 +++++++ ...ter_purchase_transaction_test_fixture.json | 368 ++++++++++++++++ ...ger_evaluate_transaction_test_fixture.json | 182 ++++++++ solana/indexer/program/indexer.go | 8 +- solana/indexer/program/indexer_test.go | 409 ++++++++++++++++++ solana/indexer/program/reward_manager.go | 49 ++- solana/indexer/token/indexer_test.go | 38 +- sql/01_schema.sql | 10 +- 11 files changed, 1317 insertions(+), 125 deletions(-) create mode 100644 ddl/migrations/0172_recipient_eth_address_rewards.sql delete mode 100644 solana/indexer/program/db_insert_test.go create mode 100644 solana/indexer/program/fixtures/claimable_tokens_init_transaction_test_fixture.json create mode 100644 solana/indexer/program/fixtures/claimable_tokens_transfer_transaction_test_fixture.json create mode 100644 solana/indexer/program/fixtures/payment_router_purchase_transaction_test_fixture.json create mode 100644 solana/indexer/program/fixtures/reward_manager_evaluate_transaction_test_fixture.json create mode 100644 solana/indexer/program/indexer_test.go diff --git a/ddl/migrations/0172_recipient_eth_address_rewards.sql b/ddl/migrations/0172_recipient_eth_address_rewards.sql new file mode 100644 index 00000000..c4cb09d5 --- /dev/null +++ b/ddl/migrations/0172_recipient_eth_address_rewards.sql @@ -0,0 +1,16 @@ +BEGIN; +ALTER TABLE sol_reward_disbursements +ADD COLUMN IF NOT EXISTS recipient_eth_address TEXT DEFAULT ''; +COMMENT ON COLUMN sol_reward_disbursements.recipient_eth_address IS 'The Ethereum address of the recipient of the reward.'; + +UPDATE sol_reward_disbursements +SET recipient_eth_address = users.wallet +FROM users +JOIN sol_claimable_accounts + ON sol_claimable_accounts.ethereum_address = users.wallet +WHERE sol_claimable_accounts.account = sol_reward_disbursements.user_bank + AND sol_reward_disbursements.recipient_eth_address = ''; + +ALTER TABLE sol_reward_disbursements +ALTER COLUMN recipient_eth_address DROP DEFAULT; +COMMIT; \ No newline at end of file diff --git a/solana/indexer/program/db_insert_test.go b/solana/indexer/program/db_insert_test.go deleted file mode 100644 index 2335673c..00000000 --- a/solana/indexer/program/db_insert_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package program - -import ( - "testing" - - "api.audius.co/database" - "github.com/test-go/testify/assert" -) - -// Ensures the database matches the expected schema for the inserts -func TestInserts(t *testing.T) { - pool := database.CreateTestDatabase(t, "test_solana_indexer_program") - defer pool.Close() - - err := insertClaimableAccount(t.Context(), pool, claimableAccountsRow{ - signature: "signature2", - instructionIndex: 0, - slot: 12345, - mint: "mint2", - ethereumAddress: "0x1234567890abcdef1234567890abcdef", - account: "account2", - }) - assert.NoError(t, err, "failed to insert claimable account") - - err = insertClaimableAccountTransfer(t.Context(), pool, claimableAccountTransfersRow{ - signature: "signature3", - instructionIndex: 0, - amount: 1000, - slot: 12345, - fromAccount: "fromAccount2", - toAccount: "toAccount2", - senderEthAddress: "0xabcdef1234567890abcdef1234567890", - }) - assert.NoError(t, err, "failed to insert claimable account transfer") - - err = insertPayment(t.Context(), pool, paymentRow{ - signature: "signature4", - instructionIndex: 0, - amount: 5000, - slot: 12345, - routeIndex: 0, - toAccount: "toAccount3", - }) - assert.NoError(t, err, "failed to insert payment router transaction") - - err = insertPurchase(t.Context(), pool, purchaseRow{ - signature: "signature5", - instructionIndex: 0, - amount: 10000, - slot: 12345, - fromAccount: "fromAccount3", - parsedPurchaseMemo: parsedPurchaseMemo{ - ContentId: 123, - ContentType: "track", - ValidAfterBlocknumber: 12345678, - BuyerUserId: 1, - AccessType: "stream", - }, - parsedLocationMemo: parsedLocationMemo{ - City: "San Francisco", - Country: "USA", - Region: "California", - }, - isValid: nil, - }) - assert.NoError(t, err, "failed to insert purchase") - - err = insertRewardDisbursement(t.Context(), pool, rewardDisbursementsRow{ - signature: "signature6", - instructionIndex: 0, - amount: 2000, - slot: 12345, - userBank: "userBank1", - challengeId: "challenge1", - specifier: "specifier1", - }) - assert.NoError(t, err, "failed to insert reward disbursement") -} diff --git a/solana/indexer/program/fixtures/claimable_tokens_init_transaction_test_fixture.json b/solana/indexer/program/fixtures/claimable_tokens_init_transaction_test_fixture.json new file mode 100644 index 00000000..5307cf8d --- /dev/null +++ b/solana/indexer/program/fixtures/claimable_tokens_init_transaction_test_fixture.json @@ -0,0 +1,130 @@ +{ + "blockTime": 1760512292, + "meta": { + "computeUnitsConsumed": 25901, + "err": null, + "fee": 10794, + "innerInstructions": [ + { + "index": 0, + "instructions": [ + { + "accounts": [0, 1], + "data": "3Bxs4h24hBtQy9rw", + "programIdIndex": 7, + "stackHeight": 2 + }, + { + "accounts": [1, 4], + "data": "2h1LQHBzPPUdytA5unYoiBM9hyMMstpxjeEugC473kPsrMnebSwevdmSiZWq4Ti9VunYC6N6VmGFv5a4UvXuku2MHMdecKFBQ7Qp2c3ULmxwFcHRcUKnBk1TAx19FP428jPM6dou5gBre4qGes8qMRh2G", + "programIdIndex": 7, + "stackHeight": 2 + }, + { + "accounts": [1, 4], + "data": "3YW74tADuZCgn2JRiKLJoBypjYj1BEc7gSJBX8tWTgQiGQpT4dAcuRMPeA5KuGpUTG5U8y6qkTPhenRXWrKwNvmYHJ9RhxwMoxZpWamAMKYx35QDmzFDVDjFh1G5h9zDETbgLfuevPwseG", + "programIdIndex": 7, + "stackHeight": 2 + }, + { + "accounts": [1, 3, 4, 5], + "data": "2", + "programIdIndex": 6, + "stackHeight": 2 + } + ] + } + ], + "loadedAddresses": { "readonly": [], "writable": [] }, + "logMessages": [ + "Program Ewkv3JahEFRKkcJmpoKB7pXbnUHwjAyXiwEo4ZY2rezQ invoke [1]", + "Program log: Instruction: CreateTokenAccount", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [2]", + "Program log: Instruction: InitializeAccount", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 4528 of 17988 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program Ewkv3JahEFRKkcJmpoKB7pXbnUHwjAyXiwEo4ZY2rezQ consumed 25601 of 38626 compute units", + "Program Ewkv3JahEFRKkcJmpoKB7pXbnUHwjAyXiwEo4ZY2rezQ success", + "Program ComputeBudget111111111111111111111111111111 invoke [1]", + "Program ComputeBudget111111111111111111111111111111 success", + "Program ComputeBudget111111111111111111111111111111 invoke [1]", + "Program ComputeBudget111111111111111111111111111111 success" + ], + "postBalances": [ + 1402858260, 2039280, 1141440, 419042035970, 0, 1009200, 5299606121, 1, 1 + ], + "postTokenBalances": [ + { + "accountIndex": 1, + "mint": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", + "owner": "F1vVY6VtF5oLT2QYEqy6276JGGhgaLEDZMamoFsJWSYk", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "0", + "decimals": 6, + "uiAmount": null, + "uiAmountString": "0" + } + } + ], + "preBalances": [ + 1404908334, 0, 1141440, 419042035970, 0, 1009200, 5299606121, 1, 1 + ], + "preTokenBalances": [], + "rewards": [], + "status": { "Ok": null } + }, + "slot": 373483258, + "transaction": { + "message": { + "accountKeys": [ + "7KTfSwSFCqbuJDRbCCjM2vkS3ocNbXpc76F5KV5BSwLp", + "C9v6wyJRwtwKiGsozxAykkk2jkVnxGf7buzkXhfGvApY", + "Ewkv3JahEFRKkcJmpoKB7pXbnUHwjAyXiwEo4ZY2rezQ", + "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", + "F1vVY6VtF5oLT2QYEqy6276JGGhgaLEDZMamoFsJWSYk", + "SysvarRent111111111111111111111111111111111", + "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "11111111111111111111111111111111", + "ComputeBudget111111111111111111111111111111" + ], + "addressTableLookups": [], + "header": { + "numReadonlySignedAccounts": 0, + "numReadonlyUnsignedAccounts": 7, + "numRequiredSignatures": 1 + }, + "instructions": [ + { + "accounts": [0, 3, 4, 1, 5, 6, 7], + "data": "13yEGisXA11xfqw9YmywSHmW7HpuC", + "programIdIndex": 2, + "stackHeight": null + }, + { + "accounts": [], + "data": "3uedW6ymeow5", + "programIdIndex": 8, + "stackHeight": null + }, + { + "accounts": [], + "data": "KszFD9", + "programIdIndex": 8, + "stackHeight": null + } + ], + "recentBlockhash": "FxuDmYr4DWXhaFYGwvWHH5GfAw9FSa2m1RAdfj2gJ42a" + }, + "signatures": [ + "4WnUo5yDzE8yT82VzRwg9N5UC5sSXRPaaKSeQhfLSRHSCCfufpLWeg4x14dCSdAy66A5aV4ewv4KMc6fXEKwWF3m" + ] + }, + "version": 0 +} diff --git a/solana/indexer/program/fixtures/claimable_tokens_transfer_transaction_test_fixture.json b/solana/indexer/program/fixtures/claimable_tokens_transfer_transaction_test_fixture.json new file mode 100644 index 00000000..5375dd88 --- /dev/null +++ b/solana/indexer/program/fixtures/claimable_tokens_transfer_transaction_test_fixture.json @@ -0,0 +1,154 @@ +{ + "blockTime": 1760509053, + "meta": { + "computeUnitsConsumed": 24366, + "err": null, + "fee": 15449, + "innerInstructions": [ + { + "index": 1, + "instructions": [ + { + "accounts": [1, 2, 6, 6], + "data": "3Dc8EpW7Kr3R", + "programIdIndex": 10, + "stackHeight": 2 + } + ] + } + ], + "loadedAddresses": { "readonly": [], "writable": [] }, + "logMessages": [ + "Program Ewkv3JahEFRKkcJmpoKB7pXbnUHwjAyXiwEo4ZY2rezQ invoke [1]", + "Program log: Instruction: Transfer", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [2]", + "Program log: Instruction: Transfer", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 4728 of 17491 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program Ewkv3JahEFRKkcJmpoKB7pXbnUHwjAyXiwEo4ZY2rezQ consumed 24066 of 36324 compute units", + "Program Ewkv3JahEFRKkcJmpoKB7pXbnUHwjAyXiwEo4ZY2rezQ success", + "Program ComputeBudget111111111111111111111111111111 invoke [1]", + "Program ComputeBudget111111111111111111111111111111 success", + "Program ComputeBudget111111111111111111111111111111 invoke [1]", + "Program ComputeBudget111111111111111111111111111111 success" + ], + "postBalances": [ + 1397121645, 2039280, 2039280, 953520, 1, 1141440, 12447638, 1009200, 0, 1, + 5299606121, 1 + ], + "postTokenBalances": [ + { + "accountIndex": 1, + "mint": "9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM", + "owner": "5ZiE3vAkrdXBgyFL7KqG3RoEGBws4CjRcXVbABDLZTgx", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "0", + "decimals": 8, + "uiAmount": null, + "uiAmountString": "0" + } + }, + { + "accountIndex": 2, + "mint": "9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM", + "owner": "5ZiE3vAkrdXBgyFL7KqG3RoEGBws4CjRcXVbABDLZTgx", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "2161220375", + "decimals": 8, + "uiAmount": 21.61220375, + "uiAmountString": "21.61220375" + } + } + ], + "preBalances": [ + 1397137094, 2039280, 2039280, 953520, 1, 1141440, 12447638, 1009200, 0, 1, + 5299606121, 1 + ], + "preTokenBalances": [ + { + "accountIndex": 1, + "mint": "9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM", + "owner": "5ZiE3vAkrdXBgyFL7KqG3RoEGBws4CjRcXVbABDLZTgx", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "100000000", + "decimals": 8, + "uiAmount": 1, + "uiAmountString": "1" + } + }, + { + "accountIndex": 2, + "mint": "9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM", + "owner": "5ZiE3vAkrdXBgyFL7KqG3RoEGBws4CjRcXVbABDLZTgx", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "2061220375", + "decimals": 8, + "uiAmount": 20.61220375, + "uiAmountString": "20.61220375" + } + } + ], + "rewards": [], + "status": { "Ok": null } + }, + "slot": 373475076, + "transaction": { + "message": { + "accountKeys": [ + "CVz5MyiEZ8Viu2iGJzKrb5xX9CK6j9kQh6yLUKV5SocU", + "9y3ZhQkFCSy323oahK9LW9h4yKoGjWSSomqwN9QsTb6C", + "5V9wSDJKbhpjAj9FE1GWZyNqgoDSroUXJMDtNZzWHeWo", + "TVPzW1tSNyDCK7hNVkobtkjP81yztfMiNr1fxrruujP", + "KeccakSecp256k11111111111111111111111111111", + "Ewkv3JahEFRKkcJmpoKB7pXbnUHwjAyXiwEo4ZY2rezQ", + "5ZiE3vAkrdXBgyFL7KqG3RoEGBws4CjRcXVbABDLZTgx", + "SysvarRent111111111111111111111111111111111", + "Sysvar1nstructions1111111111111111111111111", + "11111111111111111111111111111111", + "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "ComputeBudget111111111111111111111111111111" + ], + "addressTableLookups": [], + "header": { + "numReadonlySignedAccounts": 0, + "numReadonlyUnsignedAccounts": 8, + "numRequiredSignatures": 1 + }, + "instructions": [ + { + "accounts": [], + "data": "H4eCheRWTZDTCFYQBwyP8gNk46pPXSdtV3jU4PmNyowNefZDRFiZWFfNvuapAo5UDRUBJoQecfxcEcsRuxJaXQZ6bobsxqw14Rv5b47KgE9zjFkYLiSt6xeNrGBhVNzW4PAxZs5uT9TuEwX8wZ1WG9gqrq1fxWv8RGZG37gULk8DMhkKCU45v7iDQ1yjD6zhNdw4B", + "programIdIndex": 4, + "stackHeight": null + }, + { + "accounts": [0, 1, 2, 3, 6, 7, 8, 9, 10], + "data": "5R6DM7EpWUHZDHy8vn1MEzwcuBgz", + "programIdIndex": 5, + "stackHeight": null + }, + { + "accounts": [], + "data": "3uedW6ymeow5", + "programIdIndex": 11, + "stackHeight": null + }, + { + "accounts": [], + "data": "KvuCST", + "programIdIndex": 11, + "stackHeight": null + } + ], + "recentBlockhash": "9Y1qdrU1yFRNtKSUWBhEtLwLofzga3hVvGn64CxSk7y4" + }, + "signatures": [ + "267Sv7Ub29fVDZ7a2ah326WwZDi5FfitiAwJ8Dvx6TKnBwRzZNELT9zf1LCrFNHj8sT88PJGx7yFQaLUw2AcKnDV" + ] + }, + "version": 0 +} diff --git a/solana/indexer/program/fixtures/payment_router_purchase_transaction_test_fixture.json b/solana/indexer/program/fixtures/payment_router_purchase_transaction_test_fixture.json new file mode 100644 index 00000000..51771e27 --- /dev/null +++ b/solana/indexer/program/fixtures/payment_router_purchase_transaction_test_fixture.json @@ -0,0 +1,368 @@ +{ + "blockTime": 1760412670, + "meta": { + "computeUnitsConsumed": 167446, + "err": null, + "fee": 15730, + "innerInstructions": [ + { + "index": 2, + "instructions": [ + { + "accounts": [0, 2, 1, 14, 17, 9], + "data": "1", + "programIdIndex": 18, + "stackHeight": 2 + }, + { + "accounts": [14], + "data": "84eT", + "programIdIndex": 9, + "stackHeight": 3 + }, + { + "accounts": [0, 2], + "data": "11119os1e9qSs2u7TsThXqkBSRVFxhmYaFKFZ1waB2X7armDmvK3p5GmLdUxYdg3h7QSrL", + "programIdIndex": 17, + "stackHeight": 3 + }, + { + "accounts": [2], + "data": "P", + "programIdIndex": 9, + "stackHeight": 3 + }, + { + "accounts": [2, 14], + "data": "6aWqHN3VVRphAnAZNUJoXFNKoVfnUfTxAoYNhk4UW71Jj", + "programIdIndex": 9, + "stackHeight": 3 + }, + { + "accounts": [2, 1], + "data": "bnuDMkWuL7Hagdo3XSspgyGwU8ryhyKLTvEdrBcGeAbNye5", + "programIdIndex": 9, + "stackHeight": 2 + }, + { + "accounts": [13, 2, 13], + "data": "3mimF1vf45io", + "programIdIndex": 9, + "stackHeight": 2 + } + ] + }, + { + "index": 4, + "instructions": [ + { + "accounts": [4, 5, 11, 11], + "data": "3VgYDANxsyLs", + "programIdIndex": 9, + "stackHeight": 2 + }, + { + "accounts": [4, 6, 11, 11], + "data": "3VfVJ4RDQDb5", + "programIdIndex": 9, + "stackHeight": 2 + } + ] + }, + { + "index": 7, + "instructions": [ + { + "accounts": [2, 0, 0], + "data": "A", + "programIdIndex": 9, + "stackHeight": 2 + } + ] + } + ], + "loadedAddresses": { + "readonly": [ + "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", + "Sysvar1nstructions1111111111111111111111111", + "SysvarRent111111111111111111111111111111111", + "11111111111111111111111111111111", + "ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL", + "5FC5n5aQhegvcDjofnVGXPcUg5YenJVatzfNt25HVpGf" + ], + "writable": ["HKeSPzkRKok3G7Et6yzF6myDjz7ximk4iyPdqtFE15Pm"] + }, + "logMessages": [ + "Program ComputeBudget111111111111111111111111111111 invoke [1]", + "Program ComputeBudget111111111111111111111111111111 success", + "Program ComputeBudget111111111111111111111111111111 invoke [1]", + "Program ComputeBudget111111111111111111111111111111 success", + "Program FD1amxhTsDpwzoVX41dxp2ygAESURV2zdUACzxM1Dfw9 invoke [1]", + "Program log: Instruction: RedeemCredits", + "Program ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL invoke [2]", + "Program log: Create", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [3]", + "Program log: Instruction: GetAccountDataSize", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 1622 of 154663 compute units", + "Program return: TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA pQAAAAAAAAA=", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program 11111111111111111111111111111111 invoke [3]", + "Program 11111111111111111111111111111111 success", + "Program log: Initialize the associated token account", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [3]", + "Program log: Instruction: InitializeImmutableOwner", + "Program log: Please upgrade to SPL Token 2022 for immutable owner support", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 1405 of 148023 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [3]", + "Program log: Instruction: InitializeAccount3", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 4241 of 144139 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL consumed 22044 of 161638 compute units", + "Program ATokenGPvbdGVxr1b2hvZbsiqW5xWH25efTNsLJA8knL success", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [2]", + "Program log: Instruction: SetAuthority", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 2896 of 137043 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [2]", + "Program log: Instruction: Transfer", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 4603 of 121563 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program FD1amxhTsDpwzoVX41dxp2ygAESURV2zdUACzxM1Dfw9 consumed 75131 of 190699 compute units", + "Program FD1amxhTsDpwzoVX41dxp2ygAESURV2zdUACzxM1Dfw9 success", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [1]", + "Program log: Instruction: Transfer", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 4693 of 115568 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program paytYpX3LPN98TAeen6bFFeraGSuWnomZmCXjAsoqPa invoke [1]", + "Program log: Instruction: Route", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [2]", + "Program log: Instruction: Transfer", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 4728 of 100138 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [2]", + "Program log: Instruction: Transfer", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 4728 of 92409 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program log: All transfers complete!", + "Program paytYpX3LPN98TAeen6bFFeraGSuWnomZmCXjAsoqPa consumed 23837 of 110875 compute units", + "Program paytYpX3LPN98TAeen6bFFeraGSuWnomZmCXjAsoqPa success", + "Program MemoSq4gqABAXKb96qnH8TysNcWxMyWCqXgDLGmfcHr invoke [1]", + "Program log: Memo (len 42): \"track:194382587:106452905:474820902:stream\"", + "Program MemoSq4gqABAXKb96qnH8TysNcWxMyWCqXgDLGmfcHr consumed 16611 of 87038 compute units", + "Program MemoSq4gqABAXKb96qnH8TysNcWxMyWCqXgDLGmfcHr success", + "Program MemoSq4gqABAXKb96qnH8TysNcWxMyWCqXgDLGmfcHr invoke [1]", + "Program log: Memo (len 64): \"geo:{\\\"city\\\":\\\"Newark\\\",\\\"region\\\":\\\"Texas\\\",\\\"country\\\":\\\"United States\\\"}\"", + "Program MemoSq4gqABAXKb96qnH8TysNcWxMyWCqXgDLGmfcHr consumed 24112 of 70427 compute units", + "Program MemoSq4gqABAXKb96qnH8TysNcWxMyWCqXgDLGmfcHr success", + "Program FD1amxhTsDpwzoVX41dxp2ygAESURV2zdUACzxM1Dfw9 invoke [1]", + "Program log: Instruction: CleanupIx", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [2]", + "Program log: Instruction: CloseAccount", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 2945 of 27589 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program FD1amxhTsDpwzoVX41dxp2ygAESURV2zdUACzxM1Dfw9 consumed 22762 of 46315 compute units", + "Program FD1amxhTsDpwzoVX41dxp2ygAESURV2zdUACzxM1Dfw9 success" + ], + "postBalances": [ + 1007070294, 0, 0, 0, 2039280, 2039280, 2039280, 1, 1141441, 5299606121, + 1141440, 946560, 521498895, 2039280, 419013773554, 0, 1009200, 1, + 789146954, 1426800 + ], + "postTokenBalances": [ + { + "accountIndex": 4, + "mint": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", + "owner": "8L2FL5g9y9CzAFY1471tLAXBUsupdp1kNeFuP648mqxR", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "0", + "decimals": 6, + "uiAmount": null, + "uiAmountString": "0" + } + }, + { + "accountIndex": 5, + "mint": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", + "owner": "F1vVY6VtF5oLT2QYEqy6276JGGhgaLEDZMamoFsJWSYk", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "40504905", + "decimals": 6, + "uiAmount": 40.504905, + "uiAmountString": "40.504905" + } + }, + { + "accountIndex": 6, + "mint": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", + "owner": "Bn6vYeXx1SUHjtTPZrWBAvmx4RNpQtB7EJmeug4HeT7a", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "10300000", + "decimals": 6, + "uiAmount": 10.3, + "uiAmountString": "10.3" + } + }, + { + "accountIndex": 13, + "mint": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", + "owner": "HKeSPzkRKok3G7Et6yzF6myDjz7ximk4iyPdqtFE15Pm", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "4000000", + "decimals": 6, + "uiAmount": 4, + "uiAmountString": "4" + } + } + ], + "preBalances": [ + 1005805384, 0, 0, 1280640, 2039280, 2039280, 2039280, 1, 1141441, + 5299606121, 1141440, 946560, 521498895, 2039280, 419013773554, 0, 1009200, + 1, 789146954, 1426800 + ], + "preTokenBalances": [ + { + "accountIndex": 4, + "mint": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", + "owner": "8L2FL5g9y9CzAFY1471tLAXBUsupdp1kNeFuP648mqxR", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "0", + "decimals": 6, + "uiAmount": null, + "uiAmountString": "0" + } + }, + { + "accountIndex": 5, + "mint": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", + "owner": "F1vVY6VtF5oLT2QYEqy6276JGGhgaLEDZMamoFsJWSYk", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "27004905", + "decimals": 6, + "uiAmount": 27.004905, + "uiAmountString": "27.004905" + } + }, + { + "accountIndex": 6, + "mint": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", + "owner": "Bn6vYeXx1SUHjtTPZrWBAvmx4RNpQtB7EJmeug4HeT7a", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "8800000", + "decimals": 6, + "uiAmount": 8.8, + "uiAmountString": "8.8" + } + }, + { + "accountIndex": 13, + "mint": "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v", + "owner": "HKeSPzkRKok3G7Et6yzF6myDjz7ximk4iyPdqtFE15Pm", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "19000000", + "decimals": 6, + "uiAmount": 19, + "uiAmountString": "19" + } + } + ], + "rewards": [], + "status": { "Ok": null } + }, + "slot": 373232547, + "transaction": { + "message": { + "accountKeys": [ + "Dqw5tync83A4yuzWsLrmHpU2UqLfKnJk2anjc9DSZYvZ", + "EP8cRqxh8yRLWgTrP9cBmPtQznCfFigU6dCrFUngorsm", + "8Z8rCYLuUcLfAJYPZxMgn6i9ifg9znQxrckXgZh6kYvN", + "FdsfNpUcXAHK7HMdRq64cZak6gLa4eEgSivPExRAKFD8", + "7YRsw96JjbLKfXY51c64kSvTK8opgxw292GT8J1HGKf3", + "8rdZD9XgrxxTZmK2GQaKnSUnZYfvEPFuMpzHjo3dL4Wp", + "7vGA3fcjvxa3A11MAxmyhFtYowPLLCNyvoxxgN3NN2Vf", + "ComputeBudget111111111111111111111111111111", + "FD1amxhTsDpwzoVX41dxp2ygAESURV2zdUACzxM1Dfw9", + "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "paytYpX3LPN98TAeen6bFFeraGSuWnomZmCXjAsoqPa", + "8L2FL5g9y9CzAFY1471tLAXBUsupdp1kNeFuP648mqxR", + "MemoSq4gqABAXKb96qnH8TysNcWxMyWCqXgDLGmfcHr" + ], + "addressTableLookups": [ + { + "accountKey": "J7tZMWG2B4G6ab5GJq29WUqeU5TYih56pTsBkWrxED6f", + "readonlyIndexes": [50, 78, 79, 75, 76, 36], + "writableIndexes": [60] + } + ], + "header": { + "numReadonlySignedAccounts": 0, + "numReadonlyUnsignedAccounts": 6, + "numRequiredSignatures": 2 + }, + "instructions": [ + { + "accounts": [], + "data": "EhWycB", + "programIdIndex": 7, + "stackHeight": null + }, + { + "accounts": [], + "data": "3MZf3aXK9QFy", + "programIdIndex": 7, + "stackHeight": null + }, + { + "accounts": [0, 1, 2, 14, 13, 3, 9, 15, 16, 17, 18, 19], + "data": "HgtdpwwGJw7247Cn5hUj1Z", + "programIdIndex": 8, + "stackHeight": null + }, + { + "accounts": [2, 4, 1], + "data": "3mimF1vf45io", + "programIdIndex": 9, + "stackHeight": null + }, + { + "accounts": [4, 11, 9, 5, 6], + "data": "8gKxDaCUhQW62R3YUbGsEDgRoF73dXigRoRcqzGYjsNcaR3Fd4s", + "programIdIndex": 10, + "stackHeight": null + }, + { + "accounts": [], + "data": "2wiiubwHCqaMgwuirWx5Fk8qDj6aKXzgmT3HrAkPT3UeH58o6yg8pGxVZN", + "programIdIndex": 12, + "stackHeight": null + }, + { + "accounts": [], + "data": "34u8n1gh4ChUxj7Po5bEiQ1tceStWmLZr4TLyQpxXEPb38ka7dwrWL1Vjkfru8hZ1E27iXMFZrw8fxjdgg1BBBRv", + "programIdIndex": 12, + "stackHeight": null + }, + { + "accounts": [1, 2, 13, 3, 0, 9, 15], + "data": "W3224vLAN6CfAq5SviDnNT", + "programIdIndex": 8, + "stackHeight": null + } + ], + "recentBlockhash": "9qLXH7ZG3SoLH5VGZ4UTK5MdnW98p3XGc3Co5UJM14j" + }, + "signatures": [ + "4cDX7FuWB9tZgfqaNiYjPjY2pxcWUinv5PCHhize2F73xRNqiomCBmwuxZMm1Ja9ueyaRjVBUVfgrJ3s2yFBJpq5", + "2i9rwXonEwaNfKkCRLYSmPdDmashHMQiC1yvVc8BSyhJWzJkzGGBRrUNxfAsbqX3dKXNh16hqsFQy8KotmtKPG5E" + ] + }, + "version": 0 +} diff --git a/solana/indexer/program/fixtures/reward_manager_evaluate_transaction_test_fixture.json b/solana/indexer/program/fixtures/reward_manager_evaluate_transaction_test_fixture.json new file mode 100644 index 00000000..cd54f071 --- /dev/null +++ b/solana/indexer/program/fixtures/reward_manager_evaluate_transaction_test_fixture.json @@ -0,0 +1,182 @@ +{ + "blockTime": 1760507800, + "meta": { + "computeUnitsConsumed": 35230, + "err": null, + "fee": 5000, + "innerInstructions": [ + { + "index": 0, + "instructions": [ + { + "accounts": [6, 2, 8], + "data": "3Dc8EpW7Kr3R", + "programIdIndex": 11, + "stackHeight": 2 + }, + { + "accounts": [0, 3], + "data": "3Bxs49175da2o1zw", + "programIdIndex": 12, + "stackHeight": 2 + }, + { + "accounts": [3], + "data": "9krTCzbLfv4BRBcj", + "programIdIndex": 12, + "stackHeight": 2 + }, + { + "accounts": [3], + "data": "SYXsPCAS12XUEFvhVCEScVBsRUs1Lvxihmo8qVdn6ETKJKzE", + "programIdIndex": 12, + "stackHeight": 2 + } + ] + } + ], + "loadedAddresses": { + "readonly": [ + "71hWFVYokLaN1PNYzTAWi13EfJ7Xt9VbSWUKsXUT8mxE", + "8n2y76BtYed3EPwAkhDgdWQNtkazw6c9gY1RXDLy37KF", + "8CrkKMAsR8pMNtmR65t5WwrLTXT1FUJRfWwUGLfMU8R1", + "SysvarRent111111111111111111111111111111111", + "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "11111111111111111111111111111111" + ], + "writable": ["3V9opXNpHmPPymKeq7CYD8wWMH8wzFXmqEkNdzfsZhYq"] + }, + "logMessages": [ + "Program DDZDcYdQFEMwcu2Mwo75yGFjJ1mUQyyXLWzhZLEVFcei invoke [1]", + "Program log: Instruction: Transfer", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [2]", + "Program log: Instruction: Transfer", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 4645 of 23081 compute units", + "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program 11111111111111111111111111111111 invoke [2]", + "Program 11111111111111111111111111111111 success", + "Program DDZDcYdQFEMwcu2Mwo75yGFjJ1mUQyyXLWzhZLEVFcei consumed 34930 of 42916 compute units", + "Program DDZDcYdQFEMwcu2Mwo75yGFjJ1mUQyyXLWzhZLEVFcei success", + "Program ComputeBudget111111111111111111111111111111 invoke [1]", + "Program ComputeBudget111111111111111111111111111111 success", + "Program ComputeBudget111111111111111111111111111111 invoke [1]", + "Program ComputeBudget111111111111111111111111111111 success" + ], + "postBalances": [ + 1414472546, 0, 2039280, 897840, 1141440, 1, 2039280, 1350240, 4396393, + 1398960, 1009200, 5299606121, 1 + ], + "postTokenBalances": [ + { + "accountIndex": 2, + "mint": "9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM", + "owner": "5ZiE3vAkrdXBgyFL7KqG3RoEGBws4CjRcXVbABDLZTgx", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "9000000000", + "decimals": 8, + "uiAmount": 90, + "uiAmountString": "90" + } + }, + { + "accountIndex": 6, + "mint": "9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM", + "owner": "8n2y76BtYed3EPwAkhDgdWQNtkazw6c9gY1RXDLy37KF", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "2616574775551047", + "decimals": 8, + "uiAmount": 26165747.75551047, + "uiAmountString": "26165747.75551047" + } + } + ], + "preBalances": [ + 1408401466, 6973920, 2039280, 0, 1141440, 1, 2039280, 1350240, 4396393, + 1398960, 1009200, 5299606121, 1 + ], + "preTokenBalances": [ + { + "accountIndex": 2, + "mint": "9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM", + "owner": "5ZiE3vAkrdXBgyFL7KqG3RoEGBws4CjRcXVbABDLZTgx", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "8900000000", + "decimals": 8, + "uiAmount": 89, + "uiAmountString": "89" + } + }, + { + "accountIndex": 6, + "mint": "9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM", + "owner": "8n2y76BtYed3EPwAkhDgdWQNtkazw6c9gY1RXDLy37KF", + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "uiTokenAmount": { + "amount": "2616574875551047", + "decimals": 8, + "uiAmount": 26165748.75551047, + "uiAmountString": "26165748.75551047" + } + } + ], + "rewards": [], + "status": { "Ok": null } + }, + "slot": 373471928, + "transaction": { + "message": { + "accountKeys": [ + "C4MZpYiddDuWVofhs4BkUPyUiH78bFnaxhQVBB5fvko5", + "B1H9zmCzV2qB3UD25KVRFzfwuWkX2cZ3Uawe7epgboE7", + "45UesTt1A8zG6ZjKPJnigNU53DzVuoKpRCV5bKoQNEtZ", + "EHHqA7S3g6VYXMLmzmMEdoGqVZy2a7HDWNNNBSXpUAA2", + "DDZDcYdQFEMwcu2Mwo75yGFjJ1mUQyyXLWzhZLEVFcei", + "ComputeBudget111111111111111111111111111111" + ], + "addressTableLookups": [ + { + "accountKey": "4UQwpGupH66RgQrWRqmPM9Two6VJEE68VZ7GeqZ3mvVv", + "readonlyIndexes": [5, 6, 8, 1, 3, 0], + "writableIndexes": [7] + } + ], + "header": { + "numReadonlySignedAccounts": 0, + "numReadonlyUnsignedAccounts": 2, + "numRequiredSignatures": 1 + }, + "instructions": [ + { + "accounts": [1, 7, 8, 6, 2, 3, 9, 0, 10, 11, 12], + "data": "8RMoXXC1taGWJZMAAjapFJbJUckEL1jpdUW46B8BTB2DfKub1aq94cMDNLmjq5ndsDArvpNQ", + "programIdIndex": 4, + "stackHeight": null + }, + { + "accounts": [], + "data": "JJAivb", + "programIdIndex": 5, + "stackHeight": null + }, + { + "accounts": [], + "data": "3DTZbgwsozUF", + "programIdIndex": 5, + "stackHeight": null + } + ], + "recentBlockhash": "4P46LNCe9hF8BVgxeFS7MAMgsxBN9ZQqHqV5rGq1sUgu" + }, + "signatures": [ + "474H2JPrzyHgBxypz9K9V2DyZjo8ha7FhSASyc2WYseMFTJtgaBjuEaVx4p59hwVh53V1CYmEJNVUxcG1PbnTvJT" + ] + }, + "version": 0 +} diff --git a/solana/indexer/program/indexer.go b/solana/indexer/program/indexer.go index b2a42528..7651b2a1 100644 --- a/solana/indexer/program/indexer.go +++ b/solana/indexer/program/indexer.go @@ -105,7 +105,7 @@ func (d *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) err tx = common.ResolveLookupTables(ctx, d.rpcClient, tx, txRes.Meta) // Process the transaction - d.processTransaction(ctx, txUpdate.Slot, txRes.Meta, tx, txRes.BlockTime.Time()) + d.processTransaction(ctx, txRes.Slot, txRes.Meta, tx, txRes.BlockTime.Time()) return nil } @@ -115,9 +115,9 @@ func (d *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) err func (d *Indexer) subscribe(ctx context.Context) (common.GrpcClient, error) { programIds := []string{ - d.config.SolanaConfig.RewardManagerProgramID.String(), - d.config.SolanaConfig.PaymentRouterProgramID.String(), - d.config.SolanaConfig.ClaimableTokensProgramID.String(), + reward_manager.ProgramID.String(), + payment_router.ProgramID.String(), + claimable_tokens.ProgramID.String(), } d.logger.Info("subscribing to programs...", zap.Int("count", len(programIds))) diff --git a/solana/indexer/program/indexer_test.go b/solana/indexer/program/indexer_test.go new file mode 100644 index 00000000..20cb8552 --- /dev/null +++ b/solana/indexer/program/indexer_test.go @@ -0,0 +1,409 @@ +package program + +import ( + "encoding/json" + "os" + "testing" + + "api.audius.co/config" + "api.audius.co/database" + "api.audius.co/solana/indexer/common" + "api.audius.co/solana/indexer/fake_rpc_client" + "api.audius.co/solana/spl/programs/claimable_tokens" + "api.audius.co/solana/spl/programs/payment_router" + "api.audius.co/solana/spl/programs/reward_manager" + "github.com/gagliardetto/solana-go" + "github.com/gagliardetto/solana-go/rpc" + "github.com/jackc/pgx/v5" + "github.com/maypok86/otter" + pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" + "github.com/test-go/testify/assert" + "github.com/test-go/testify/require" + "go.uber.org/zap" +) + +func TestHandleUpdate_SlotCheckpoint(t *testing.T) { + pool := database.CreateTestDatabase(t, "test_solana_indexer_program") + rpcClient := fake_rpc_client.FakeRpcClient{} + logger := zap.NewNop() + + indexer := New(common.GrpcConfig{}, &rpcClient, pool, config.Cfg, nil, logger) + + expectedSlot := uint64(1500) + + request := pb.SubscribeRequest{} + checkpointId, err := common.InsertCheckpointStart(t.Context(), pool, "test", 1000, &request) + update := pb.SubscribeUpdate{ + Filters: []string{checkpointId}, + UpdateOneof: &pb.SubscribeUpdate_Slot{ + Slot: &pb.SubscribeUpdateSlot{ + Slot: expectedSlot, + }, + }, + } + + indexer.HandleUpdate(t.Context(), &update) + + slot, err := common.GetCheckpointSlot(t.Context(), pool, "test", &request) + require.NoError(t, err) + assert.Equal(t, expectedSlot, slot, "checkpoint slot should be updated") +} + +func TestHandleUpdate_ClaimableInit(t *testing.T) { + // Deps + pool := database.CreateTestDatabase(t, "test_solana_indexer_program") + rpcClient := fake_rpc_client.FakeRpcClient{} + transactionCache, err := otter.MustBuilder[solana.Signature, *rpc.GetTransactionResult](10).Build() + require.NoError(t, err, "failed to create cache") + logger := zap.NewNop() + + // Expected results + txSig := solana.MustSignatureFromBase58("4WnUo5yDzE8yT82VzRwg9N5UC5sSXRPaaKSeQhfLSRHSCCfufpLWeg4x14dCSdAy66A5aV4ewv4KMc6fXEKwWF3m") + account := "C9v6wyJRwtwKiGsozxAykkk2jkVnxGf7buzkXhfGvApY" + mint := "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v" + ethereumAddress := "0xd52503493a1fe2d9b3dfcf412337828a97bc196f" + slot := uint64(373483258) + + // Load fixture (taken from real transaction on production) + txJson, err := os.ReadFile("./fixtures/claimable_tokens_init_transaction_test_fixture.json") + require.NoError(t, err, "failed to read transaction test fixture") + + var txResult rpc.GetTransactionResult + err = json.Unmarshal(txJson, &txResult) + require.NoError(t, err, "failed to unmarshal transaction test fixture") + + // Fixture uses production claimable tokens program ID + claimable_tokens.SetProgramID(solana.MustPublicKeyFromBase58("Ewkv3JahEFRKkcJmpoKB7pXbnUHwjAyXiwEo4ZY2rezQ")) + + // Put the transaction in the cache so that the indexer loads it from there + transactionCache.Set(txSig, &txResult) + + // Create the update + update := pb.SubscribeUpdate{ + UpdateOneof: &pb.SubscribeUpdate_Transaction{ + Transaction: &pb.SubscribeUpdateTransaction{ + Transaction: &pb.SubscribeUpdateTransactionInfo{ + Signature: txSig[:], + }, + }, + }, + } + + // Run the test + indexer := New(common.GrpcConfig{}, &rpcClient, pool, config.Cfg, &transactionCache, logger) + err = indexer.HandleUpdate(t.Context(), &update) + require.NoError(t, err, "failed to handle update") + + // Check the claimable account was inserted + var exists bool + sql := ` + SELECT EXISTS ( + SELECT 1 + FROM sol_claimable_accounts + WHERE signature = @signature + AND instruction_index = @instruction_index + AND slot = @slot + AND mint = @mint + AND ethereum_address = @ethereum_address + AND account = @account + LIMIT 1 + ) + ` + err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ + "signature": txSig.String(), + "instruction_index": 0, + "slot": slot, + "mint": mint, + "ethereum_address": ethereumAddress, + "account": account, + }).Scan(&exists) + require.NoError(t, err) + assert.True(t, exists, "claimable account should exist") +} + +func TestHandleUpdate_ClaimableTransfer(t *testing.T) { + // Deps + pool := database.CreateTestDatabase(t, "test_solana_indexer_program") + rpcClient := fake_rpc_client.FakeRpcClient{} + transactionCache, err := otter.MustBuilder[solana.Signature, *rpc.GetTransactionResult](10).Build() + require.NoError(t, err, "failed to create cache") + logger := zap.NewNop() + + // Expected results + txSig := solana.MustSignatureFromBase58("267Sv7Ub29fVDZ7a2ah326WwZDi5FfitiAwJ8Dvx6TKnBwRzZNELT9zf1LCrFNHj8sT88PJGx7yFQaLUw2AcKnDV") + amount := int64(100000000) + fromAccount := "9y3ZhQkFCSy323oahK9LW9h4yKoGjWSSomqwN9QsTb6C" + toAccount := "5V9wSDJKbhpjAj9FE1GWZyNqgoDSroUXJMDtNZzWHeWo" + senderEthAddress := "0x3ced9f71aa6e8a20279a2f932d673db609f5a247" + slot := uint64(373475076) + + // Load fixture (taken from real transaction on production) + txJson, err := os.ReadFile("./fixtures/claimable_tokens_transfer_transaction_test_fixture.json") + require.NoError(t, err, "failed to read transaction test fixture") + + var txResult rpc.GetTransactionResult + err = json.Unmarshal(txJson, &txResult) + require.NoError(t, err, "failed to unmarshal transaction test fixture") + + // Fixture uses production claimable tokens program ID + claimable_tokens.SetProgramID(solana.MustPublicKeyFromBase58("Ewkv3JahEFRKkcJmpoKB7pXbnUHwjAyXiwEo4ZY2rezQ")) + + // Put the transaction in the cache so that the indexer loads it from there + transactionCache.Set(txSig, &txResult) + + // Create the update + update := pb.SubscribeUpdate{ + UpdateOneof: &pb.SubscribeUpdate_Transaction{ + Transaction: &pb.SubscribeUpdateTransaction{ + Transaction: &pb.SubscribeUpdateTransactionInfo{ + Signature: txSig[:], + }, + }, + }, + } + + // Run the test + indexer := New(common.GrpcConfig{}, &rpcClient, pool, config.Cfg, &transactionCache, logger) + err = indexer.HandleUpdate(t.Context(), &update) + require.NoError(t, err, "failed to handle update") + + // Check the claimable account transfer was inserted + var exists bool + sql := ` + SELECT EXISTS ( + SELECT 1 + FROM sol_claimable_account_transfers + WHERE signature = @signature + AND instruction_index = @instruction_index + AND amount = @amount + AND slot = @slot + AND from_account = @from_account + AND to_account = @to_account + AND sender_eth_address = @sender_eth_address + LIMIT 1 + ) + ` + err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ + "signature": txSig.String(), + "instruction_index": 1, + "slot": slot, + "amount": amount, + "from_account": fromAccount, + "to_account": toAccount, + "sender_eth_address": senderEthAddress, + }).Scan(&exists) + require.NoError(t, err) + assert.True(t, exists, "claimable account transfer should exist") +} + +func TestHandleUpdate_RewardDisbursement(t *testing.T) { + // Deps + pool := database.CreateTestDatabase(t, "test_solana_indexer_program") + rpcClient := fake_rpc_client.FakeRpcClient{} + transactionCache, err := otter.MustBuilder[solana.Signature, *rpc.GetTransactionResult](10).Build() + require.NoError(t, err, "failed to create cache") + logger := zap.NewNop() + + // Expected results + txSig := solana.MustSignatureFromBase58("474H2JPrzyHgBxypz9K9V2DyZjo8ha7FhSASyc2WYseMFTJtgaBjuEaVx4p59hwVh53V1CYmEJNVUxcG1PbnTvJT") + amount := int64(100000000) + challengeID := "e" + specifier := "29d545402025101505" + userBank := "45UesTt1A8zG6ZjKPJnigNU53DzVuoKpRCV5bKoQNEtZ" + recipientEthAddress := "0xdffde5a630da4d07c988c668b2c929637096d96d" + slot := uint64(373471928) + + // Load fixture (taken from real transaction on production) + txJson, err := os.ReadFile("./fixtures/reward_manager_evaluate_transaction_test_fixture.json") + require.NoError(t, err, "failed to read transaction test fixture") + + var txResult rpc.GetTransactionResult + err = json.Unmarshal(txJson, &txResult) + require.NoError(t, err, "failed to unmarshal transaction test fixture") + + // Fixture uses production reward manager program ID + reward_manager.SetProgramID(solana.MustPublicKeyFromBase58("DDZDcYdQFEMwcu2Mwo75yGFjJ1mUQyyXLWzhZLEVFcei")) + + // Put the transaction in the cache so that the indexer loads it from there + transactionCache.Set(txSig, &txResult) + + // Create the update + update := pb.SubscribeUpdate{ + UpdateOneof: &pb.SubscribeUpdate_Transaction{ + Transaction: &pb.SubscribeUpdateTransaction{ + Transaction: &pb.SubscribeUpdateTransactionInfo{ + Signature: txSig[:], + }, + }, + }, + } + + // Run the test + indexer := New(common.GrpcConfig{}, &rpcClient, pool, config.Cfg, &transactionCache, logger) + err = indexer.HandleUpdate(t.Context(), &update) + require.NoError(t, err, "failed to handle update") + + // Check the claimable account transfer was inserted + var exists bool + sql := ` + SELECT EXISTS ( + SELECT 1 + FROM sol_reward_disbursements + WHERE signature = @signature + AND instruction_index = @instruction_index + AND amount = @amount + AND slot = @slot + AND user_bank = @user_bank + AND challenge_id = @challenge_id + AND specifier = @specifier + AND recipient_eth_address = @recipient_eth_address + LIMIT 1 + ) + ` + err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ + "signature": txSig.String(), + "instruction_index": 0, + "slot": slot, + "amount": amount, + "user_bank": userBank, + "challenge_id": challengeID, + "specifier": specifier, + "recipient_eth_address": recipientEthAddress, + }).Scan(&exists) + require.NoError(t, err) + assert.True(t, exists, "reward disbursement should exist") +} + +func TestHandleUpdate_PaymentRouterPurchase(t *testing.T) { + // Deps + pool := database.CreateTestDatabase(t, "test_solana_indexer_program") + rpcClient := fake_rpc_client.FakeRpcClient{} + transactionCache, err := otter.MustBuilder[solana.Signature, *rpc.GetTransactionResult](10).Build() + require.NoError(t, err, "failed to create cache") + logger := zap.NewNop() + + // Expected results + txSig := solana.MustSignatureFromBase58("4cDX7FuWB9tZgfqaNiYjPjY2pxcWUinv5PCHhize2F73xRNqiomCBmwuxZMm1Ja9ueyaRjVBUVfgrJ3s2yFBJpq5") + fromAccount := "7YRsw96JjbLKfXY51c64kSvTK8opgxw292GT8J1HGKf3" + amount := int64(15000000) + contentType := "track" + contentID := 194382587 + validAfterBlocknumber := 106452905 + buyerUserID := 474820902 + accessType := "stream" + slot := uint64(373232547) + city := "Newark" + region := "Texas" + country := "United States" + receiver := "8rdZD9XgrxxTZmK2GQaKnSUnZYfvEPFuMpzHjo3dL4Wp" + receiverAmount := uint64(13500000) + network := "7vGA3fcjvxa3A11MAxmyhFtYowPLLCNyvoxxgN3NN2Vf" + networkAmount := uint64(1500000) + + // Load fixture (taken from real transaction on production) + txJson, err := os.ReadFile("./fixtures/payment_router_purchase_transaction_test_fixture.json") + require.NoError(t, err, "failed to read transaction test fixture") + + var txResult rpc.GetTransactionResult + err = json.Unmarshal(txJson, &txResult) + require.NoError(t, err, "failed to unmarshal transaction test fixture") + + // Fixture uses production reward manager program ID + payment_router.SetProgramID(solana.MustPublicKeyFromBase58("paytYpX3LPN98TAeen6bFFeraGSuWnomZmCXjAsoqPa")) + + // Put the transaction in the cache so that the indexer loads it from there + transactionCache.Set(txSig, &txResult) + + // Create the update + update := pb.SubscribeUpdate{ + UpdateOneof: &pb.SubscribeUpdate_Transaction{ + Transaction: &pb.SubscribeUpdateTransaction{ + Transaction: &pb.SubscribeUpdateTransactionInfo{ + Signature: txSig[:], + }, + }, + }, + } + + // Run the test + indexer := New(common.GrpcConfig{}, &rpcClient, pool, config.Cfg, &transactionCache, logger) + err = indexer.HandleUpdate(t.Context(), &update) + require.NoError(t, err, "failed to handle update") + + // Check the purchase was inserted + var exists bool + sql := ` + SELECT EXISTS ( + SELECT 1 + FROM sol_purchases + WHERE signature = @signature + AND instruction_index = @instruction_index + AND amount = @amount + AND slot = @slot + AND from_account = @from_account + AND content_type = @content_type + AND content_id = @content_id + AND buyer_user_id = @buyer_user_id + AND access_type = @access_type + AND valid_after_blocknumber = @valid_after_blocknumber + AND city = @city + AND region = @region + AND country = @country + LIMIT 1 + ) + ` + err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ + "signature": txSig.String(), + "instruction_index": 4, + "slot": slot, + "amount": amount, + "from_account": fromAccount, + "content_type": contentType, + "content_id": contentID, + "buyer_user_id": buyerUserID, + "access_type": accessType, + "valid_after_blocknumber": validAfterBlocknumber, + "city": city, + "region": region, + "country": country, + }).Scan(&exists) + require.NoError(t, err) + assert.True(t, exists, "purchase should exist") + + // Check the payments were inserted + sql = ` + SELECT EXISTS ( + SELECT 1 + FROM sol_payments + WHERE signature = @signature + AND instruction_index = @instruction_index + AND amount = @amount + AND slot = @slot + AND route_index = @route_index + AND to_account = @to_account + LIMIT 1 + ) + ` + + err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ + "signature": txSig.String(), + "instruction_index": 4, + "slot": slot, + "amount": receiverAmount, + "route_index": 0, + "to_account": receiver, + }).Scan(&exists) + require.NoError(t, err) + assert.True(t, exists, "receiver payment should exist") + + err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ + "signature": txSig.String(), + "instruction_index": 4, + "slot": slot, + "amount": networkAmount, + "route_index": 1, + "to_account": network, + }).Scan(&exists) + require.NoError(t, err) + assert.True(t, exists, "network payment should exist") +} diff --git a/solana/indexer/program/reward_manager.go b/solana/indexer/program/reward_manager.go index ae083880..2d65d33a 100644 --- a/solana/indexer/program/reward_manager.go +++ b/solana/indexer/program/reward_manager.go @@ -35,13 +35,14 @@ func processRewardManagerInstruction( if claimInst, ok := inst.Impl.(*reward_manager.EvaluateAttestation); ok { disbursementIdParts := strings.Split(claimInst.DisbursementId, ":") err := insertRewardDisbursement(ctx, db, rewardDisbursementsRow{ - signature: signature, - instructionIndex: instructionIndex, - amount: claimInst.Amount, - slot: slot, - userBank: claimInst.DestinationUserBankAccount().PublicKey.String(), - challengeId: disbursementIdParts[0], - specifier: strings.Join(disbursementIdParts[1:], ":"), + signature: signature, + instructionIndex: instructionIndex, + amount: claimInst.Amount, + slot: slot, + userBank: claimInst.DestinationUserBankAccount().PublicKey.String(), + challengeId: disbursementIdParts[0], + specifier: strings.Join(disbursementIdParts[1:], ":"), + recipientEthAddress: strings.ToLower(claimInst.RecipientEthAddress.String()), }) if err != nil { return fmt.Errorf("failed to insert reward disbursement at instruction: %w", err) @@ -58,31 +59,33 @@ func processRewardManagerInstruction( } type rewardDisbursementsRow struct { - signature string - instructionIndex int - amount uint64 - slot uint64 - userBank string - challengeId string - specifier string + signature string + instructionIndex int + amount uint64 + slot uint64 + userBank string + challengeId string + specifier string + recipientEthAddress string } func insertRewardDisbursement(ctx context.Context, db database.DBTX, row rewardDisbursementsRow) error { sql := ` INSERT INTO sol_reward_disbursements - (signature, instruction_index, amount, slot, user_bank, challenge_id, specifier) + (signature, instruction_index, amount, slot, user_bank, challenge_id, specifier, recipient_eth_address) VALUES - (@signature, @instructionIndex, @amount, @slot, @userBank, @challengeId, @specifier) + (@signature, @instructionIndex, @amount, @slot, @userBank, @challengeId, @specifier, @recipientEthAddress) ON CONFLICT DO NOTHING ;` _, err := db.Exec(ctx, sql, pgx.NamedArgs{ - "signature": row.signature, - "instructionIndex": row.instructionIndex, - "amount": row.amount, - "slot": row.slot, - "userBank": row.userBank, - "challengeId": row.challengeId, - "specifier": row.specifier, + "signature": row.signature, + "instructionIndex": row.instructionIndex, + "amount": row.amount, + "slot": row.slot, + "userBank": row.userBank, + "challengeId": row.challengeId, + "specifier": row.specifier, + "recipientEthAddress": row.recipientEthAddress, }) return err } diff --git a/solana/indexer/token/indexer_test.go b/solana/indexer/token/indexer_test.go index ee61484b..333aa9fd 100644 --- a/solana/indexer/token/indexer_test.go +++ b/solana/indexer/token/indexer_test.go @@ -19,7 +19,7 @@ import ( ) func TestHandleUpdate_SlotCheckpoint(t *testing.T) { - pool := database.CreateTestDatabase(t, "test_solana_indexer_damm_v2") + pool := database.CreateTestDatabase(t, "test_solana_indexer_token") rpcClient := fake_rpc_client.FakeRpcClient{} logger := zap.NewNop() @@ -49,16 +49,16 @@ func TestHandleUpdate_BalanceChange(t *testing.T) { pool := database.CreateTestDatabase(t, "test_solana_indexer_token") mint := "EPjFWdd5AufqSSqeM2qN1xzybapC8G4wEGGkZwyTDt1v" - sender := "F1vVY6VtF5oLT2QYEqy6276JGGhgaLEDZMamoFsJWSYk" - senderTokenAccount := "DUiUiDme6XoqaD86AdmqY2BDSg3PrCidszKpNbZhfkpo" + senderOwner := "F1vVY6VtF5oLT2QYEqy6276JGGhgaLEDZMamoFsJWSYk" + sender := "DUiUiDme6XoqaD86AdmqY2BDSg3PrCidszKpNbZhfkpo" expectedSenderBalance := uint64(3300) - sender2 := "2bX4g7yV3aHjv6v1d8Z8n5K5e5f5L5e5f5L5e5f5L5e5" - sender2TokenAccount := "8bX4g7yV3aHjv6v1d8Z8n5K5e5f5L5e5f5L5e5f5L5e5" + senderOwner2 := "2bX4g7yV3aHjv6v1d8Z8n5K5e5f5L5e5f5L5e5f5L5e5" + sender2 := "8bX4g7yV3aHjv6v1d8Z8n5K5e5f5L5e5f5L5e5f5L5e5" expectedSender2Balance := uint64(1000) - receiver := "FFwKgUzzmvFv1mhqexs2muRAphgMMyR1kMtiigPeoksw" - receiverTokenAccount := "AaF7Y7PCk54xrBvbwJEbGY8p5FnZ2zjzzPRnY4VsF17n" + receiverOwner := "FFwKgUzzmvFv1mhqexs2muRAphgMMyR1kMtiigPeoksw" + receiver := "AaF7Y7PCk54xrBvbwJEbGY8p5FnZ2zjzzPRnY4VsF17n" expectedReceiverBalance := uint64(1430000) database.Seed(pool, database.FixtureMap{ @@ -67,14 +67,14 @@ func TestHandleUpdate_BalanceChange(t *testing.T) { {"user_id": 2}, }, "associated_wallets": []map[string]any{ - {"id": 1, "user_id": 1, "wallet": sender2, "chain": "sol"}, - {"id": 2, "user_id": 2, "wallet": receiver, "chain": "sol"}, + {"id": 1, "user_id": 1, "wallet": senderOwner2, "chain": "sol"}, + {"id": 2, "user_id": 2, "wallet": receiverOwner, "chain": "sol"}, }, "sol_claimable_accounts": []map[string]any{ - {"signature": "abc", "ethereum_address": "0x123", "account": senderTokenAccount, "mint": mint}, + {"signature": "abc", "ethereum_address": "0x123", "account": sender, "mint": mint}, }, "sol_token_account_balances": []map[string]any{ - {"account": sender2TokenAccount, "mint": mint, "owner": sender2, "balance": expectedSender2Balance}, + {"account": sender2, "mint": mint, "owner": senderOwner2, "balance": expectedSender2Balance}, }, }) @@ -132,8 +132,8 @@ func TestHandleUpdate_BalanceChange(t *testing.T) { err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ "signature": txSig.String(), "mint": mint, - "owner": sender, - "account": senderTokenAccount, + "owner": senderOwner, + "account": sender, "change": int64(-90000), "balance": expectedSenderBalance, "slot": slot, @@ -145,8 +145,8 @@ func TestHandleUpdate_BalanceChange(t *testing.T) { err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ "signature": txSig.String(), "mint": mint, - "owner": receiver, - "account": receiverTokenAccount, + "owner": receiverOwner, + "account": receiver, "change": int64(90000), "balance": expectedReceiverBalance, "slot": slot, @@ -168,9 +168,9 @@ func TestHandleUpdate_BalanceChange(t *testing.T) { // Sender balance err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ - "account": senderTokenAccount, + "account": sender, "mint": mint, - "owner": sender, + "owner": senderOwner, "balance": expectedSenderBalance, }).Scan(&exists) require.NoError(t, err, "failed to query for sender balance") @@ -178,9 +178,9 @@ func TestHandleUpdate_BalanceChange(t *testing.T) { // Receiver balance err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ - "account": receiverTokenAccount, + "account": receiver, "mint": mint, - "owner": receiver, + "owner": receiverOwner, "balance": expectedReceiverBalance, }).Scan(&exists) require.NoError(t, err, "failed to query for receiver balance") diff --git a/sql/01_schema.sql b/sql/01_schema.sql index 50db7f49..19e91066 100644 --- a/sql/01_schema.sql +++ b/sql/01_schema.sql @@ -7607,7 +7607,8 @@ CREATE TABLE public.sol_reward_disbursements ( slot bigint NOT NULL, user_bank character varying NOT NULL, challenge_id character varying NOT NULL, - specifier character varying NOT NULL + specifier character varying NOT NULL, + recipient_eth_address text ); @@ -7618,6 +7619,13 @@ CREATE TABLE public.sol_reward_disbursements ( COMMENT ON TABLE public.sol_reward_disbursements IS 'Stores reward manager program Evaluate instructions for tracked mints.'; +-- +-- Name: COLUMN sol_reward_disbursements.recipient_eth_address; Type: COMMENT; Schema: public; Owner: - +-- + +COMMENT ON COLUMN public.sol_reward_disbursements.recipient_eth_address IS 'The Ethereum address of the recipient of the reward.'; + + -- -- Name: sol_slot_checkpoints; Type: TABLE; Schema: public; Owner: - -- From 05a2169c6dd1aabc906310c0690cf277c27137dc Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Wed, 15 Oct 2025 03:02:05 -0700 Subject: [PATCH 15/56] delete unused tests --- solana/indexer/processor_test.go | 758 ------------------ solana/indexer/subscription_test.go | 227 ------ .../indexer/unprocessed_transactions_test.go | 85 -- 3 files changed, 1070 deletions(-) delete mode 100644 solana/indexer/processor_test.go delete mode 100644 solana/indexer/subscription_test.go delete mode 100644 solana/indexer/unprocessed_transactions_test.go diff --git a/solana/indexer/processor_test.go b/solana/indexer/processor_test.go deleted file mode 100644 index 02dcb2a0..00000000 --- a/solana/indexer/processor_test.go +++ /dev/null @@ -1,758 +0,0 @@ -package indexer - -import ( - "bytes" - "encoding/json" - "strings" - "testing" - "time" - - "api.audius.co/config" - "api.audius.co/database" - "api.audius.co/solana/indexer/fake_rpc_client" - "api.audius.co/solana/spl/programs/claimable_tokens" - "api.audius.co/solana/spl/programs/payment_router" - "api.audius.co/solana/spl/programs/reward_manager" - "api.audius.co/solana/spl/programs/secp256k1" - "github.com/ethereum/go-ethereum/common" - bin "github.com/gagliardetto/binary" - "github.com/gagliardetto/solana-go" - "github.com/gagliardetto/solana-go/programs/memo" - "github.com/gagliardetto/solana-go/rpc" - "github.com/jackc/pgx/v5" - "github.com/pashagolub/pgxmock/v4" - "github.com/stretchr/testify/require" - "github.com/test-go/testify/assert" - "go.uber.org/zap" -) - -func TestProcessTransaction_CallsInsertClaimableAccount(t *testing.T) { - // Create a valid CreateTokenAccount instruction - ethAddress := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") - mint := solana.MustPublicKeyFromBase58("9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM") - payer, err := solana.NewRandomPrivateKey() - require.NoError(t, err) - createInst, err := claimable_tokens.NewCreateTokenAccountInstruction(ethAddress, mint, payer.PublicKey()) - require.NoError(t, err) - inst, err := createInst.ValidateAndBuild() - require.NoError(t, err) - - // Compose the transaction message - tx, err := solana.NewTransactionBuilder().AddInstruction(inst).Build() - require.NoError(t, err) - - _, err = tx.Sign(func(publicKey solana.PublicKey) *solana.PrivateKey { - return &payer - }) - require.NoError(t, err) - - meta := &rpc.TransactionMeta{ - LoadedAddresses: rpc.LoadedAddresses{ - Writable: []solana.PublicKey{}, - ReadOnly: []solana.PublicKey{}, - }, - } - - // Args - logger := zap.NewNop() - ctx := t.Context() - slot := uint64(1) - blockTime := time.Now() - - // Mock DB - poolMock, err := pgxmock.NewPool() - require.NoError(t, err, "failed to create mock database pool") - defer poolMock.Close() - poolMock.ExpectQuery("SELECT mint FROM artist_coins"). - WillReturnError(pgx.ErrNoRows) - poolMock.ExpectExec("INSERT INTO sol_claimable_accounts"). - WithArgs(pgx.NamedArgs{ - "signature": tx.Signatures[0].String(), - "instructionIndex": 0, - "slot": slot, - "mint": mint.String(), - "ethereumAddress": strings.ToLower(ethAddress.String()), - "account": createInst.UserBank().PublicKey.String(), - }). - WillReturnResult(pgxmock.NewResult("INSERT", 1)) - - p := &DefaultProcessor{ - pool: poolMock, - } - - err = p.ProcessTransaction(ctx, slot, meta, tx, blockTime, logger) - require.NoError(t, err) - assert.NoError(t, poolMock.ExpectationsWereMet()) -} - -func TestProcessTransaction_CallsInsertClaimableAccountTransfer(t *testing.T) { - // Create a valid CreateTokenAccount instruction - ethAddress := common.HexToAddress("0x1234567890abcdef1234567890abcdef12345678") - mint := solana.MustPublicKeyFromBase58("9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM") - payer, err := solana.NewRandomPrivateKey() - require.NoError(t, err) - destination, err := solana.NewRandomPrivateKey() - require.NoError(t, err) - transferInst, err := claimable_tokens.NewTransferInstruction(ethAddress, mint, payer.PublicKey(), destination.PublicKey()) - require.NoError(t, err) - inst, err := transferInst.ValidateAndBuild() - require.NoError(t, err) - - amount := uint64(1) - nonce := uint64(2) - - // Create Secp256k1 inst - msg := claimable_tokens.SignedTransferData{ - Nonce: nonce, - Destination: destination.PublicKey(), - Amount: amount, - } - message := &bytes.Buffer{} - err = bin.NewBinEncoder(message).Encode(msg) - require.NoError(t, err) - secp := secp256k1.NewSecp256k1Instruction( - ethAddress, - message.Bytes(), - []byte{}, // Doesn't matter - 0, - ).Build() - - // Compose the transaction message - tx, err := solana.NewTransactionBuilder(). - AddInstruction(secp). - AddInstruction(inst). - SetFeePayer(payer.PublicKey()). - Build() - require.NoError(t, err) - - _, err = tx.Sign(func(publicKey solana.PublicKey) *solana.PrivateKey { - return &payer - }) - require.NoError(t, err) - - meta := &rpc.TransactionMeta{ - LoadedAddresses: rpc.LoadedAddresses{ - Writable: []solana.PublicKey{}, - ReadOnly: []solana.PublicKey{}, - }, - } - - // Args - logger := zap.NewNop() - ctx := t.Context() - slot := uint64(1) - blockTime := time.Now() - - // Mock DB - poolMock, err := pgxmock.NewPool() - require.NoError(t, err, "failed to create mock database pool") - defer poolMock.Close() - poolMock.ExpectQuery("SELECT mint FROM artist_coins"). - WillReturnError(pgx.ErrNoRows) - poolMock.ExpectExec("INSERT INTO sol_claimable_account_transfers"). - WithArgs(pgx.NamedArgs{ - "signature": tx.Signatures[0].String(), - "instructionIndex": 1, - "amount": amount, - "slot": slot, - "fromAccount": transferInst.SenderUserBank().PublicKey.String(), - "toAccount": transferInst.Destination().PublicKey.String(), - "senderEthAddress": strings.ToLower(ethAddress.String()), - }). - WillReturnResult(pgxmock.NewResult("INSERT", 1)) - - p := &DefaultProcessor{ - pool: poolMock, - } - - err = p.ProcessTransaction(ctx, slot, meta, tx, blockTime, logger) - assert.NoError(t, err) - assert.NoError(t, poolMock.ExpectationsWereMet()) -} - -func TestProcessTransaction_CallsInsertRewardDisbursement(t *testing.T) { - // Setup EvaluateAttestation instruction - ethAddress := common.HexToAddress("0x3f6d9fcf0d4466dd5886e3b1def017adfb7916b4") - rewardState := solana.MustPublicKeyFromBase58("GaiG9LDYHfZGqeNaoGRzFEnLiwUT7WiC6sA6FDJX9ZPq") - destinationUserBank := solana.MustPublicKeyFromBase58("Cjv8dvVfWU8wUYAR82T5oZ4nHLB6EyGNvpPBzw3r76Qy") - authority := solana.MustPublicKeyFromBase58("6mpecd6bJCpH8oDwwjqPzTPU6QacnwW3cR9pAwEwkYJa") - tokenSource := solana.MustPublicKeyFromBase58("HJQj8P47BdA7ugjQEn45LaESYrxhiZDygmukt8iumFZJ") - payer, err := solana.NewRandomPrivateKey() - require.NoError(t, err) - disbursement := solana.MustPublicKeyFromBase58("3qQfuDEBWEmxRo5G4J2a4eYUVf9u1LWzLgRPndiwew2w") - oracle := solana.MustPublicKeyFromBase58("FNz5mur7EFh1LyH5HDaKyWVx7vcfGK6gRizEpDqMfgGk") - amount := uint64(200000000) - disbursementId := "ft:37364e80" - - inst := reward_manager.NewEvaluateAttestationInstructionBuilder(). - SetDisbursementId(disbursementId). - SetRecipientEthAddress(ethAddress). - SetAmount(amount). - SetAttestationsAccount(rewardState). - SetRewardManagerStateAccount(rewardState). - SetAuthorityAccount(authority). - SetTokenSourceAccount(tokenSource). - SetDestinationUserBankAccount(destinationUserBank). - SetDisbursementAccount(disbursement). - SetAntiAbuseOracleAccount(oracle). - SetPayerAccount(payer.PublicKey()) - require.NoError(t, inst.Validate()) - - tx, err := solana.NewTransactionBuilder(). - AddInstruction(inst.Build()). - Build() - require.NoError(t, err) - - signatures, err := tx.Sign(func(publicKey solana.PublicKey) *solana.PrivateKey { - return &payer - }) - require.NoError(t, err) - - meta := &rpc.TransactionMeta{ - LoadedAddresses: rpc.LoadedAddresses{ - Writable: []solana.PublicKey{}, - ReadOnly: []solana.PublicKey{}, - }, - } - - // Args - logger := zap.NewNop() - ctx := t.Context() - slot := uint64(1) - blockTime := time.Now() - - // Mock DB - poolMock, err := pgxmock.NewPool() - require.NoError(t, err, "failed to create mock database pool") - defer poolMock.Close() - poolMock.ExpectQuery("SELECT mint FROM artist_coins"). - WillReturnError(pgx.ErrNoRows) - poolMock.ExpectExec("INSERT INTO sol_reward_disbursements"). - WithArgs(pgx.NamedArgs{ - "signature": signatures[0].String(), - "instructionIndex": 0, - "amount": amount, - "slot": slot, - "userBank": destinationUserBank.String(), - "challengeId": "ft", - "specifier": "37364e80", - }). - WillReturnResult(pgxmock.NewResult("INSERT", 1)) - - p := &DefaultProcessor{ - pool: poolMock, - } - - err = p.ProcessTransaction(ctx, slot, meta, tx, blockTime, logger) - assert.NoError(t, err) - assert.NoError(t, poolMock.ExpectationsWereMet()) -} - -func TestProcessTransaction_CallsInsertPayment(t *testing.T) { - // Setup Route instruction - sender, err := solana.NewRandomPrivateKey() - require.NoError(t, err) - - dest, err := solana.NewRandomPrivateKey() - require.NoError(t, err) - - amount := uint64(1000) - routeInst := payment_router.NewRouteInstruction( - sender.PublicKey(), - sender.PublicKey(), - uint8(0), - map[solana.PublicKey]uint64{ - dest.PublicKey(): amount, - }, - ).Build() - - payer, err := solana.NewRandomPrivateKey() - require.NoError(t, err) - - tx, err := solana.NewTransactionBuilder(). - AddInstruction(routeInst). - SetFeePayer(payer.PublicKey()). - Build() - require.NoError(t, err) - - signatures, err := tx.Sign(func(publicKey solana.PublicKey) *solana.PrivateKey { - return &payer - }) - require.NoError(t, err) - - meta := &rpc.TransactionMeta{ - LoadedAddresses: rpc.LoadedAddresses{ - Writable: []solana.PublicKey{}, - ReadOnly: []solana.PublicKey{}, - }, - } - - logger := zap.NewNop() - ctx := t.Context() - slot := uint64(1) - blockTime := time.Now() - - poolMock, err := pgxmock.NewPool() - require.NoError(t, err, "failed to create mock database pool") - defer poolMock.Close() - poolMock.ExpectQuery("SELECT mint FROM artist_coins"). - WillReturnError(pgx.ErrNoRows) - poolMock.ExpectExec("INSERT INTO sol_payments"). - WithArgs(pgx.NamedArgs{ - "signature": signatures[0].String(), - "instructionIndex": 0, - "amount": amount, - "slot": slot, - "routeIndex": 0, - "toAccount": dest.PublicKey().String(), - }). - WillReturnResult(pgxmock.NewResult("INSERT", 1)) - - p := &DefaultProcessor{ - pool: poolMock, - } - - err = p.ProcessTransaction(ctx, slot, meta, tx, blockTime, logger) - require.NoError(t, err) - assert.NoError(t, poolMock.ExpectationsWereMet()) -} - -func TestProcessTransaction_CallsInsertPurchase(t *testing.T) { - // Setup Route instruction - sender, err := solana.NewRandomPrivateKey() - require.NoError(t, err) - - dest, err := solana.NewRandomPrivateKey() - require.NoError(t, err) - - amount := uint64(1000) - routeInst := payment_router.NewRouteInstruction( - sender.PublicKey(), - sender.PublicKey(), - uint8(0), - map[solana.PublicKey]uint64{ - dest.PublicKey(): amount, - }, - ).Build() - - purchaseMemoInst := memo.NewMemoInstruction( - []byte("track:1:100:2:stream"), - sender.PublicKey(), - ).Build() - - geoMemoInst := memo.NewMemoInstruction( - []byte(`geo:{"city":"Minneapolis","region":"MN","country":"USA"}`), - sender.PublicKey(), - ).Build() - - payer, err := solana.NewRandomPrivateKey() - require.NoError(t, err) - - tx, err := solana.NewTransactionBuilder(). - AddInstruction(routeInst). - AddInstruction(purchaseMemoInst). - AddInstruction(geoMemoInst). - SetFeePayer(payer.PublicKey()). - Build() - require.NoError(t, err) - - signatures, err := tx.Sign(func(publicKey solana.PublicKey) *solana.PrivateKey { - return &payer - }) - require.NoError(t, err) - - meta := &rpc.TransactionMeta{ - LoadedAddresses: rpc.LoadedAddresses{ - Writable: []solana.PublicKey{}, - ReadOnly: []solana.PublicKey{}, - }, - } - - // Args - logger := zap.NewNop() - ctx := t.Context() - slot := uint64(1) - blockTime := time.Now() - - // Mock DB - poolMock, err := pgxmock.NewPool() - require.NoError(t, err, "failed to create mock database pool") - defer poolMock.Close() - poolMock.ExpectQuery("SELECT mint FROM artist_coins"). - WillReturnError(pgx.ErrNoRows) - poolMock.ExpectExec("INSERT INTO sol_payments"). - WithArgs(pgx.NamedArgs{ - "signature": signatures[0].String(), - "instructionIndex": 0, - "amount": amount, - "slot": slot, - "routeIndex": 0, - "toAccount": dest.PublicKey().String(), - }). - WillReturnResult(pgxmock.NewResult("INSERT", 1)) - poolMock.ExpectExec("INSERT INTO sol_purchases"). - WithArgs(pgx.NamedArgs{ - "signature": signatures[0].String(), - "instructionIndex": 0, - "amount": amount, - "slot": slot, - "fromAccount": sender.PublicKey().String(), - "contentType": "track", - "contentId": 1, - "buyerUserId": 2, - "accessType": "stream", - "validAfterBlocknumber": 100, - "isValid": (*bool)(nil), - "city": "Minneapolis", - "region": "MN", - "country": "USA", - }). - WillReturnResult(pgxmock.NewResult("INSERT", 1)) - - p := &DefaultProcessor{ - pool: poolMock, - } - - err = p.ProcessTransaction(ctx, slot, meta, tx, blockTime, logger) - assert.NoError(t, err) - assert.NoError(t, poolMock.ExpectationsWereMet()) -} - -func TestProcessTransaction_CallsInsertBalanceChange(t *testing.T) { - // Setup a transaction with token balance changes - account := solana.MustPublicKeyFromBase58("HJQj8P47BdA7ugjQEn45LaESYrxhiZDygmukt8iumFZJ") - owner := solana.MustPublicKeyFromBase58("TT1eRKxi2Rj3oEvsFMe9W5hrcPmpXqKkNj7wC83AhXk") - account2 := solana.MustPublicKeyFromBase58("Cjv8dvVfWU8wUYAR82T5oZ4nHLB6EyGNvpPBzw3r76Qy") - owner2 := solana.MustPublicKeyFromBase58("dRiftyHA39MWEi3m9aunc5MzRF1JYuBsbn6VPcn33UH") - mint := solana.MustPublicKeyFromBase58("9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM") - account3 := solana.MustPublicKeyFromBase58("7sYw5JpQw8rTn2vQh3dX4bG6k9L2mN1pA5eF8cV3uZxT") - mint2 := solana.MustPublicKeyFromBase58("2k8s5d3zqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2x") - tx := &solana.Transaction{ - Signatures: []solana.Signature{ - solana.MustSignatureFromBase58("5ZVE83uvxQ36BmUM4kPn2foPyQCbsEepEkDTinC8bfSwHJdVCia6q3Wvnfa2Ls71SZoBmqoWPyJuPuUm8XcG92Hr"), - }, - Message: solana.Message{ - AccountKeys: []solana.PublicKey{ - account, - account2, - account3, - }, - }, - } - meta := &rpc.TransactionMeta{ - PreTokenBalances: []rpc.TokenBalance{ - { - AccountIndex: 0, - Owner: &owner, - Mint: mint, - UiTokenAmount: &rpc.UiTokenAmount{ - Amount: "1000", - }, - }, - // Should be excluded, wrong mint - { - AccountIndex: 2, - Owner: &owner2, - Mint: mint2, - UiTokenAmount: &rpc.UiTokenAmount{ - Amount: "0", - }, - }, - }, - PostTokenBalances: []rpc.TokenBalance{ - { - AccountIndex: 0, - Owner: &owner, - Mint: mint, - UiTokenAmount: &rpc.UiTokenAmount{ - Amount: "2000", - }, - }, - { - AccountIndex: 1, - Owner: &owner2, - Mint: mint, - UiTokenAmount: &rpc.UiTokenAmount{ - Amount: "0", - }, - }, - }, - LoadedAddresses: rpc.LoadedAddresses{ - Writable: []solana.PublicKey{}, - ReadOnly: []solana.PublicKey{}, - }, - } - - // Args - logger := zap.NewNop() - ctx := t.Context() - slot := uint64(1) - blockTime := time.Now() - - expectedArgs := pgx.NamedArgs{ - "account": account.String(), - "mint": mint.String(), - "owner": owner.String(), - "change": int64(1000), - "balance": uint64(2000), - "signature": tx.Signatures[0].String(), - "slot": slot, - "blockTimestamp": blockTime.UTC(), - } - - expectedArgs2 := pgx.NamedArgs{ - "account": account2.String(), - "mint": mint.String(), - "owner": owner2.String(), - "change": int64(0), - "balance": uint64(0), - "signature": tx.Signatures[0].String(), - "slot": slot, - "blockTimestamp": blockTime.UTC(), - } - - poolMock, err := pgxmock.NewPool() - require.NoError(t, err, "failed to create mock database pool") - defer poolMock.Close() - // balance change insertion order can vary - poolMock.MatchExpectationsInOrder(false) - poolMock.ExpectQuery("SELECT mint FROM artist_coins"). - WillReturnRows( - pgxmock.NewRows([]string{"mints"}). - AddRow(mint.String())) // Only the first mint - poolMock.ExpectExec("INSERT INTO sol_token_account_balance_changes"). - WithArgs(expectedArgs). - WillReturnResult(pgxmock.NewResult("INSERT", 1)) - poolMock.ExpectExec("INSERT INTO sol_token_account_balance_changes"). - WithArgs(expectedArgs2). - WillReturnResult(pgxmock.NewResult("INSERT", 1)) - - p := &DefaultProcessor{ - pool: poolMock, - } - - err = p.ProcessTransaction(ctx, slot, meta, tx, blockTime, logger) - assert.NoError(t, err) - assert.NoError(t, poolMock.ExpectationsWereMet()) -} - -func TestProcessSignature_HandlesLoadedAddresses(t *testing.T) { - // prod reward manager disbursement w/ lookup tables - /* - curl -X POST https://api.mainnet-beta.solana.com \ - -H "Content-Type: application/json" \ - -d '{ - "jsonrpc": "2.0", - "id": 1, - "method": "getTransaction", - "params": [ - "58sUxCqs2sbErrZhH1A1YcFrYpK35Ph2AHpySxkCcRkeer1bJmfyCRKxQ7qeR26AA1qEnDb58KJwviDJXGqkAStQ", - { - "maxSupportedTransactionVersion": 0 - } - ] - }' - */ - txResJson := ` - { - "blockTime": 1753149679, - "meta": { - "computeUnitsConsumed": 38054, - "err": null, - "fee": 35450, - "innerInstructions": [ - { - "index": 0, - "instructions": [ - { - "accounts": [6, 2, 8], - "data": "3Dc8EpW7Kr3R", - "programIdIndex": 11, - "stackHeight": 2 - }, - { - "accounts": [0, 3], - "data": "3Bxs49175da2o1zw", - "programIdIndex": 12, - "stackHeight": 2 - }, - { - "accounts": [3], - "data": "9krTCzbLfv4BRBcj", - "programIdIndex": 12, - "stackHeight": 2 - }, - { - "accounts": [3], - "data": "SYXsPCAS12XUEFvhVCEScVBsRUs1Lvxihmo8qVdn6ETKJKzE", - "programIdIndex": 12, - "stackHeight": 2 - } - ] - } - ], - "loadedAddresses": { - "readonly": [ - "71hWFVYokLaN1PNYzTAWi13EfJ7Xt9VbSWUKsXUT8mxE", - "8n2y76BtYed3EPwAkhDgdWQNtkazw6c9gY1RXDLy37KF", - "8CrkKMAsR8pMNtmR65t5WwrLTXT1FUJRfWwUGLfMU8R1", - "SysvarRent111111111111111111111111111111111", - "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", - "11111111111111111111111111111111" - ], - "writable": ["3V9opXNpHmPPymKeq7CYD8wWMH8wzFXmqEkNdzfsZhYq"] - }, - "logMessages": [ - "Program DDZDcYdQFEMwcu2Mwo75yGFjJ1mUQyyXLWzhZLEVFcei invoke [1]", - "Program log: Instruction: Transfer", - "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA invoke [2]", - "Program log: Instruction: Transfer", - "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA consumed 4645 of 183191 compute units", - "Program TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA success", - "Program 11111111111111111111111111111111 invoke [2]", - "Program 11111111111111111111111111111111 success", - "Program 11111111111111111111111111111111 invoke [2]", - "Program 11111111111111111111111111111111 success", - "Program 11111111111111111111111111111111 invoke [2]", - "Program 11111111111111111111111111111111 success", - "Program DDZDcYdQFEMwcu2Mwo75yGFjJ1mUQyyXLWzhZLEVFcei consumed 37904 of 203000 compute units", - "Program DDZDcYdQFEMwcu2Mwo75yGFjJ1mUQyyXLWzhZLEVFcei success", - "Program ComputeBudget111111111111111111111111111111 invoke [1]", - "Program ComputeBudget111111111111111111111111111111 success" - ], - "postBalances": [ - 1499028959, 0, 2039280, 897840, 1141440, 1, 2039280, 1350240, 4392391, - 1398960, 1009200, 4513213226, 1 - ], - "postTokenBalances": [ - { - "accountIndex": 2, - "mint": "9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM", - "owner": "5ZiE3vAkrdXBgyFL7KqG3RoEGBws4CjRcXVbABDLZTgx", - "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", - "uiTokenAmount": { - "amount": "13900000000", - "decimals": 8, - "uiAmount": 139.0, - "uiAmountString": "139" - } - }, - { - "accountIndex": 6, - "mint": "9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM", - "owner": "8n2y76BtYed3EPwAkhDgdWQNtkazw6c9gY1RXDLy37KF", - "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", - "uiTokenAmount": { - "amount": "2754676375551047", - "decimals": 8, - "uiAmount": 27546763.75551047, - "uiAmountString": "27546763.75551047" - } - } - ], - "preBalances": [ - 1492988329, 6973920, 2039280, 0, 1141440, 1, 2039280, 1350240, 4392391, - 1398960, 1009200, 4513213226, 1 - ], - "preTokenBalances": [ - { - "accountIndex": 2, - "mint": "9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM", - "owner": "5ZiE3vAkrdXBgyFL7KqG3RoEGBws4CjRcXVbABDLZTgx", - "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", - "uiTokenAmount": { - "amount": "13800000000", - "decimals": 8, - "uiAmount": 138.0, - "uiAmountString": "138" - } - }, - { - "accountIndex": 6, - "mint": "9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM", - "owner": "8n2y76BtYed3EPwAkhDgdWQNtkazw6c9gY1RXDLy37KF", - "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", - "uiTokenAmount": { - "amount": "2754676475551047", - "decimals": 8, - "uiAmount": 27546764.75551047, - "uiAmountString": "27546764.75551047" - } - } - ], - "rewards": [], - "status": { "Ok": null } - }, - "slot": 354896657, - "transaction": { - "message": { - "accountKeys": [ - "C4MZpYiddDuWVofhs4BkUPyUiH78bFnaxhQVBB5fvko5", - "8WCWQBxc3V7bDEF5poQYkNGLjsr9mzUuVSxfqs9Ksuv1", - "EXYhWM17WbWw49tHFpi9pHUxKDwAPBK5rzWxTQPZFN2b", - "CzbB1oPD1YSUthSr5TkN4m8EGsjN8z3rVgwRRyE4oaBc", - "DDZDcYdQFEMwcu2Mwo75yGFjJ1mUQyyXLWzhZLEVFcei", - "ComputeBudget111111111111111111111111111111" - ], - "addressTableLookups": [ - { - "accountKey": "4UQwpGupH66RgQrWRqmPM9Two6VJEE68VZ7GeqZ3mvVv", - "readonlyIndexes": [5, 6, 8, 1, 3, 0], - "writableIndexes": [7] - } - ], - "header": { - "numReadonlySignedAccounts": 0, - "numReadonlyUnsignedAccounts": 2, - "numRequiredSignatures": 1 - }, - "instructions": [ - { - "accounts": [1, 7, 8, 6, 2, 3, 9, 0, 10, 11, 12], - "data": "8RMoXXC1taGWJZMAAjapFT6hJjcNVRVRbFPcbNHphz9uuwbKXdkcGK3aB5ChyFExjDUXjAbv", - "programIdIndex": 4, - "stackHeight": null - }, - { - "accounts": [], - "data": "3uedW6ymeow5", - "programIdIndex": 5, - "stackHeight": null - } - ], - "recentBlockhash": "9bxHRc5pMC3JZMSgVPeps7XfkT4c8X3Qp5n5tQTrZKdx" - }, - "signatures": [ - "58sUxCqs2sbErrZhH1A1YcFrYpK35Ph2AHpySxkCcRkeer1bJmfyCRKxQ7qeR26AA1qEnDb58KJwviDJXGqkAStQ" - ] - }, - "version": 0 - } - ` - txRes := rpc.GetTransactionResult{} - err := json.Unmarshal([]byte(txResJson), &txRes) - require.NoError(t, err, "failed to unmarshal transaction result") - - fakeRpcClient := fake_rpc_client.NewWithTransactions([]*rpc.GetTransactionResult{ - &txRes, - }) - - pool := database.CreateTestDatabase(t, "test_solana_indexer") - p := NewDefaultProcessor( - fakeRpcClient, - pool, - config.Cfg, - ) - - // Use prod reward program ID - reward_manager.SetProgramID(solana.MustPublicKeyFromBase58(config.ProdRewardManagerProgramID)) - - err = p.ProcessSignature(t.Context(), 354896657, solana.MustSignatureFromBase58("58sUxCqs2sbErrZhH1A1YcFrYpK35Ph2AHpySxkCcRkeer1bJmfyCRKxQ7qeR26AA1qEnDb58KJwviDJXGqkAStQ"), zap.NewNop()) - require.NoError(t, err, "failed to process signature") - - row := pool.QueryRow(t.Context(), "SELECT EXISTS (SELECT 1 FROM sol_reward_disbursements WHERE signature = $1)", "58sUxCqs2sbErrZhH1A1YcFrYpK35Ph2AHpySxkCcRkeer1bJmfyCRKxQ7qeR26AA1qEnDb58KJwviDJXGqkAStQ") - var exists bool - row.Scan(&exists) - - require.True(t, exists, "expected reward disbursement to exist") -} diff --git a/solana/indexer/subscription_test.go b/solana/indexer/subscription_test.go deleted file mode 100644 index 8c94edda..00000000 --- a/solana/indexer/subscription_test.go +++ /dev/null @@ -1,227 +0,0 @@ -package indexer - -// import ( -// "context" -// "errors" -// "testing" -// "time" - -// "api.audius.co/database" -// "github.com/gagliardetto/solana-go" -// "github.com/gagliardetto/solana-go/rpc" -// pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" -// "github.com/stretchr/testify/mock" -// "github.com/test-go/testify/assert" -// "github.com/test-go/testify/require" -// "go.uber.org/zap" -// ) - -// type mockGrpcClient struct { -// mock.Mock -// } - -// func (m *mockGrpcClient) Subscribe( -// ctx context.Context, -// subRequest *pb.SubscribeRequest, -// dataCallback DataCallback, -// errorCallback ErrorCallback, -// ) error { -// args := m.Called(ctx, subRequest, dataCallback, errorCallback) -// return args.Error(0) -// } - -// func (m *mockGrpcClient) Close() { -// m.Called() -// } - -// type mockRpcClient struct { -// mock.Mock -// } - -// func (m *mockRpcClient) GetBlockWithOpts(ctx context.Context, slot uint64, opts *rpc.GetBlockOpts) (*rpc.GetBlockResult, error) { -// args := m.Called(ctx, slot, opts) -// return args.Get(0).(*rpc.GetBlockResult), args.Error(1) -// } - -// func (m *mockRpcClient) GetSlot(ctx context.Context, commitment rpc.CommitmentType) (uint64, error) { -// args := m.Called(ctx, commitment) -// return args.Get(0).(uint64), args.Error(1) -// } - -// func (m *mockRpcClient) GetSignaturesForAddressWithOpts(ctx context.Context, address solana.PublicKey, opts *rpc.GetSignaturesForAddressOpts) ([]*rpc.TransactionSignature, error) { -// args := m.Called(ctx, address, opts) -// return args.Get(0).([]*rpc.TransactionSignature), args.Error(1) -// } - -// func (m *mockRpcClient) GetTransaction(ctx context.Context, signature solana.Signature, opts *rpc.GetTransactionOpts) (*rpc.GetTransactionResult, error) { -// args := m.Called(ctx, signature, opts) -// return args.Get(0).(*rpc.GetTransactionResult), args.Error(1) -// } - -// func (m *mockRpcClient) GetAccountDataBorshInto(ctx context.Context, account solana.PublicKey, out interface{}) error { -// args := m.Called(ctx, account, out) -// return args.Error(0) -// } - -// // Tests that the subscription is made for the artist coins in the database -// // and is updated as new artist coins are added and removed. -// func TestSubscription(t *testing.T) { -// pool := database.CreateTestDatabase(t, "test_solana_indexer") - -// mint1 := "4k3Dyjzvzp8eXQ2f1b6d5c7g8f9h1j2k3l4m5n6o7p8q9r0s1t2u3v4w5x6y7z8" -// mint2 := "9zL1k3Dyjzvzp8eXQ2f1b6d5c7g8f9h1j2k3l4m5n6o7p8q9r0s1t2u3v4w5x6y7z8" - -// database.Seed(pool, database.FixtureMap{ -// "artist_coins": { -// { -// "user_id": 1, -// "mint": mint1, -// "ticker": "TEST", -// "decimals": 8, -// }, -// }, -// }) - -// grpcMock := &mockGrpcClient{} - -// // Initial subscription should include the artist coin in the database. -// grpcMock.On("Subscribe", -// mock.Anything, -// mock.MatchedBy(func(req *pb.SubscribeRequest) bool { -// for _, account := range req.Accounts { -// for _, filter := range account.Filters { -// if f, ok := filter.Filter.(*pb.SubscribeRequestFilterAccountsFilter_Memcmp); ok { -// if f.Memcmp.GetBase58() == mint1 { -// return true -// } -// } -// } -// } -// return false -// }), -// mock.Anything, -// mock.Anything, -// ).Return(nil) - -// // After inserting a new artist coin, the subscription should be updated to include it. -// grpcMock.On("Subscribe", -// mock.Anything, -// mock.MatchedBy(func(req *pb.SubscribeRequest) bool { -// foundFirst := false -// foundSecond := false -// for _, account := range req.Accounts { -// for _, filter := range account.Filters { -// if f, ok := filter.Filter.(*pb.SubscribeRequestFilterAccountsFilter_Memcmp); ok { -// if f.Memcmp.GetBase58() == mint1 { -// foundFirst = true -// } -// if f.Memcmp.GetBase58() == mint2 { -// foundSecond = true -// } -// } -// } -// } -// return foundFirst && foundSecond -// }), -// mock.Anything, -// mock.Anything, -// ).Return(nil) - -// // After removing artist coins, the subscription should not include the removed mints -// grpcMock.On("Subscribe", -// mock.Anything, -// mock.MatchedBy(func(req *pb.SubscribeRequest) bool { -// for _, account := range req.Accounts { -// for _, filter := range account.Filters { -// if f, ok := filter.Filter.(*pb.SubscribeRequestFilterAccountsFilter_Memcmp); ok { -// if f.Memcmp.GetBase58() == mint1 { -// return false -// } -// if f.Memcmp.GetBase58() == mint2 { -// return false -// } -// } -// } -// } -// return true -// }), -// mock.Anything, -// mock.Anything, -// ).Return(nil) - -// grpcMock.On("Close").Return() - -// rpcMock := &mockRpcClient{} -// rpcMock.On("GetSlot", mock.Anything, mock.Anything). -// Return(uint64(100), nil) - -// s := &SolanaIndexer{ -// grpcClient: grpcMock, -// rpcClient: rpcMock, -// pool: pool, -// logger: zap.NewNop(), -// } - -// ctx, cancel := context.WithCancel(context.Background()) - -// done := make(chan error, 1) -// go func() { -// done <- s.Subscribe(ctx) -// }() - -// time.Sleep(200 * time.Millisecond) - -// _, err := pool.Exec(ctx, ` -// INSERT INTO artist_coins (user_id, mint, ticker, decimals) -// VALUES ($1, $2, $3, $4) -// `, 1, mint2, "TEST2", 9) -// if err != nil { -// t.Fatalf("failed to insert new artist coin: %v", err) -// } - -// time.Sleep(200 * time.Millisecond) - -// _, err = pool.Exec(ctx, "DELETE FROM artist_coins") -// if err != nil { -// t.Fatalf("failed to delete artist coins: %v", err) -// } - -// time.Sleep(200 * time.Millisecond) - -// cancel() - -// err = <-done -// assert.True(t, errors.Is(err, context.Canceled), err.Error()) -// grpcMock.AssertExpectations(t) -// } - -// func TestSubscription_Unprocessed(t *testing.T) { -// pool := database.CreateTestDatabase(t, "test_solana_indexer") -// processor := &mockProcessor{} - -// processor.On("ProcessSignature", mock.Anything, mock.Anything, mock.Anything, mock.Anything). -// Return(errors.New("test error")) - -// s := &SolanaIndexer{ -// processor: processor, -// pool: pool, -// logger: zap.NewNop(), -// } - -// signature := solana.MustSignatureFromBase58("58sUxCqs2sbErrZhH1A1YcFrYpK35Ph2AHpySxkCcRkeer1bJmfyCRKxQ7qeR26AA1qEnDb58KJwviDJXGqkAStQ") - -// s.handleMessage(t.Context(), &pb.SubscribeUpdate{ -// UpdateOneof: &pb.SubscribeUpdate_Account{ -// Account: &pb.SubscribeUpdateAccount{ -// Account: &pb.SubscribeUpdateAccountInfo{ -// TxnSignature: signature[:], -// }, -// }, -// }, -// }) - -// unprocessedTxs, err := getUnprocessedTransactions(t.Context(), pool, 100, 0) -// require.NoError(t, err, "failed to get unprocessed transactions") -// assert.Len(t, unprocessedTxs, 1, "expected one unprocessed transaction") -// assert.Equal(t, "58sUxCqs2sbErrZhH1A1YcFrYpK35Ph2AHpySxkCcRkeer1bJmfyCRKxQ7qeR26AA1qEnDb58KJwviDJXGqkAStQ", unprocessedTxs[0].Signature, "unexpected unprocessed transaction") -// } diff --git a/solana/indexer/unprocessed_transactions_test.go b/solana/indexer/unprocessed_transactions_test.go deleted file mode 100644 index b6c41df4..00000000 --- a/solana/indexer/unprocessed_transactions_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package indexer - -// import ( -// "errors" -// "strconv" -// "testing" - -// "api.audius.co/database" -// "github.com/gagliardetto/solana-go" -// "github.com/stretchr/testify/mock" -// "github.com/stretchr/testify/require" -// "github.com/test-go/testify/assert" -// "go.uber.org/zap" -// ) - -// func TestUnprocessedTransactions(t *testing.T) { -// ctx := t.Context() -// pool := database.CreateTestDatabase(t, "test_solana_indexer") -// defer pool.Close() - -// // Insert a test unprocessed transaction -// signature := "test_signature" -// errorMessage := "test error message" -// err := insertUnprocessedTransaction(ctx, pool, signature, 0, errorMessage) -// require.NoError(t, err) - -// // Verify the transaction was inserted -// res, err := getUnprocessedTransactions(ctx, pool, 10, 0) -// require.NoError(t, err) -// assert.Len(t, res, 1) -// assert.Equal(t, signature, res[0].Signature) - -// // Delete the unprocessed transaction -// err = deleteUnprocessedTransaction(ctx, pool, signature) -// require.NoError(t, err) - -// // Verify the transaction was deleted -// res, err = getUnprocessedTransactions(ctx, pool, 10, 0) -// require.NoError(t, err) -// assert.Len(t, res, 0) -// } - -// func TestRetryUnprocessedTransactions(t *testing.T) { -// ctx := t.Context() -// pool := database.CreateTestDatabase(t, "test_solana_indexer") -// defer pool.Close() - -// unprocessedTransactionsCount := 543 -// processor := &mockProcessor{} - -// var failingSigBytes [64]byte -// copy(failingSigBytes[:], []byte("test_signature_73")) -// failingSig := solana.SignatureFromBytes(failingSigBytes[:]) - -// // Mock the processor to fail on a specific signature -// processor.On("ProcessSignature", ctx, mock.Anything, failingSig, mock.Anything). -// Return(errors.New("fake failure")).Times(1) - -// // Everything else should succeed -// processor.On("ProcessSignature", ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything). -// Return(nil).Times(unprocessedTransactionsCount - 1) - -// s := &SolanaIndexer{ -// processor: processor, -// pool: pool, -// logger: zap.NewNop(), -// } - -// for i := range unprocessedTransactionsCount { -// var sigBytes [64]byte -// copy(sigBytes[:], []byte("test_signature_"+strconv.FormatInt(int64(i), 10))) -// signature := solana.SignatureFromBytes(sigBytes[:]) -// insertUnprocessedTransaction(ctx, pool, signature.String(), 0, "test error message") -// } - -// err := s.RetryUnprocessedTransactions(ctx) -// require.NoError(t, err) -// processor.AssertNumberOfCalls(t, "ProcessSignature", unprocessedTransactionsCount) - -// // Verify all transactions but #73 were processed -// unprocessedTxs, err := getUnprocessedTransactions(ctx, pool, 100, 0) -// require.NoError(t, err) -// assert.Len(t, unprocessedTxs, 1, "expected a single unprocessed transaction after retry") -// assert.Equal(t, failingSig.String(), unprocessedTxs[0].Signature, "expected the failing transaction to remain unprocessed") -// } From f7665537402c59d9b83b2556ededb77e3590bd42 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Wed, 15 Oct 2025 03:02:42 -0700 Subject: [PATCH 16/56] Make grpc factory and test start --- solana/indexer/common/grpc_client.go | 2 +- solana/indexer/damm_v2/indexer.go | 20 ++-- solana/indexer/damm_v2/indexer_test.go | 158 ++++++++++++++++++++++++- 3 files changed, 167 insertions(+), 13 deletions(-) diff --git a/solana/indexer/common/grpc_client.go b/solana/indexer/common/grpc_client.go index e025e1b0..d7f5986d 100644 --- a/solana/indexer/common/grpc_client.go +++ b/solana/indexer/common/grpc_client.go @@ -47,7 +47,7 @@ type DefaultGrpcClient struct { } // Creates a new gRPC client. -func NewGrpcClient(config GrpcConfig) *DefaultGrpcClient { +func NewGrpcClient(config GrpcConfig) GrpcClient { return &DefaultGrpcClient{ config: config, } diff --git a/solana/indexer/damm_v2/indexer.go b/solana/indexer/damm_v2/indexer.go index 9f097da6..4343fd04 100644 --- a/solana/indexer/damm_v2/indexer.go +++ b/solana/indexer/damm_v2/indexer.go @@ -22,10 +22,11 @@ const ( ) type Indexer struct { - pool database.DbPool - grpcConfig common.GrpcConfig - rpcClient common.RpcClient - logger *zap.Logger + pool database.DbPool + grpcConfig common.GrpcConfig + grpcFactory func(common.GrpcConfig) common.GrpcClient + rpcClient common.RpcClient + logger *zap.Logger } func New( @@ -35,10 +36,11 @@ func New( logger *zap.Logger, ) *Indexer { return &Indexer{ - pool: pool, - grpcConfig: config, - rpcClient: rpcClient, - logger: logger.Named("DammV2Indexer"), + pool: pool, + grpcConfig: config, + grpcFactory: common.NewGrpcClient, + rpcClient: rpcClient, + logger: logger.Named("DammV2Indexer"), } } @@ -178,7 +180,7 @@ func (d *Indexer) subscribe(ctx context.Context) ([]common.GrpcClient, error) { } } - grpcClient := common.NewGrpcClient(d.grpcConfig) + grpcClient := d.grpcFactory(d.grpcConfig) err = grpcClient.Subscribe(ctx, subscription, handleMessage, func(err error) { d.logger.Error("error in subscription", zap.Error(err)) }) diff --git a/solana/indexer/damm_v2/indexer_test.go b/solana/indexer/damm_v2/indexer_test.go index 36a0cead..9466db8b 100644 --- a/solana/indexer/damm_v2/indexer_test.go +++ b/solana/indexer/damm_v2/indexer_test.go @@ -1,17 +1,22 @@ package damm_v2 import ( + "context" "encoding/base64" "testing" + "time" "api.audius.co/database" "api.audius.co/solana/indexer/common" "api.audius.co/solana/indexer/fake_rpc_client" "github.com/gagliardetto/solana-go" + "github.com/jackc/pgx/v5" pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" + "github.com/stretchr/testify/mock" "github.com/test-go/testify/assert" "github.com/test-go/testify/require" "go.uber.org/zap" + "google.golang.org/protobuf/encoding/protojson" ) func TestHandleUpdate_SlotCheckpoint(t *testing.T) { @@ -94,8 +99,8 @@ func TestHandleUpdate_DammV2PositionUpdate(t *testing.T) { // From real on-chain account data address := solana.MustPublicKeyFromBase58("5bYLydDXt1K5zroychcbrVbhGRUpheXdq5w41uccazPB") - poolBase64 := "qryP5HpA99C0h5iaMb9or5qzYmaPKH7cBpP1GTyw5pa9SMlEQMuk4oeLsnqCTyioPLOFt664lEHr2woSYFq4Z3N6xFLWwGDSAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADUszHGm5oNAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACQoMPLmBiA4ADThI4wIAwAAAAAAAAAAABGmGkQpAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" - poolData, err := base64.StdEncoding.DecodeString(poolBase64) + positionBase64 := "qryP5HpA99C0h5iaMb9or5qzYmaPKH7cBpP1GTyw5pa9SMlEQMuk4oeLsnqCTyioPLOFt664lEHr2woSYFq4Z3N6xFLWwGDSAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADUszHGm5oNAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACQoMPLmBiA4ADThI4wIAwAAAAAAAAAAABGmGkQpAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + positionData, err := base64.StdEncoding.DecodeString(positionBase64) require.NoError(t, err) update := pb.SubscribeUpdate{ @@ -104,7 +109,7 @@ func TestHandleUpdate_DammV2PositionUpdate(t *testing.T) { Account: &pb.SubscribeUpdateAccount{ Account: &pb.SubscribeUpdateAccountInfo{ Pubkey: address.Bytes(), - Data: poolData, + Data: positionData, }, }, }, @@ -124,3 +129,150 @@ func TestHandleUpdate_DammV2PositionUpdate(t *testing.T) { require.NoError(t, err) defer rows.Close() } + +type grpcClientMock struct { + mock.Mock + + onUpdate common.DataCallback +} + +func (m *grpcClientMock) Subscribe(ctx context.Context, req *pb.SubscribeRequest, onUpdate common.DataCallback, onError common.ErrorCallback) error { + args := m.Called(ctx, req, onUpdate, onError) + m.onUpdate = onUpdate + return args.Error(0) +} + +func (m *grpcClientMock) Close() { + m.Called() +} + +func TestStart(t *testing.T) { + pool := database.CreateTestDatabase(t, "test_solana_indexer_damm_v2") + + // Fake an update for a Position and a Pool with missing data (should fail) + positionAddress := solana.MustPublicKeyFromBase58("5bYLydDXt1K5zroychcbrVbhGRUpheXdq5w41uccazPB") + positionUpdate := pb.SubscribeUpdate{ + Filters: []string{positionAddress.String()}, + UpdateOneof: &pb.SubscribeUpdate_Account{ + Account: &pb.SubscribeUpdateAccount{ + Account: &pb.SubscribeUpdateAccountInfo{ + Pubkey: positionAddress.Bytes(), + }, + }, + }, + } + dammPoolAddress := solana.MustPublicKeyFromBase58("D9iJqMbgQJLFt5PAAiTJTMNsMAMueukzoe1EK2r1g3WH") + poolUpdate := pb.SubscribeUpdate{ + Filters: []string{NAME}, + UpdateOneof: &pb.SubscribeUpdate_Account{ + Account: &pb.SubscribeUpdateAccount{ + Account: &pb.SubscribeUpdateAccountInfo{ + Pubkey: dammPoolAddress.Bytes(), + }, + }, + }, + } + dammPoolAddress2 := solana.MustPublicKeyFromBase58("8Z8rCYLuUcLfAJYPZxMgn6i9ifg9znQxrckXgZh6kYvN") + + database.Seed(pool, database.FixtureMap{ + "artist_coins": []map[string]any{ + { + "mint": "abc", + "ticker": "", + "user_id": 0, + "decimals": 9, + "damm_v2_pool": dammPoolAddress.String(), + }, + }, + }) + + rpcClient := fake_rpc_client.FakeRpcClient{} + logger := zap.NewNop() + + grpcMock := grpcClientMock{} + grpcMock.On("Subscribe", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) + grpcMock.On("Close").Return() + + indexer := New(common.GrpcConfig{}, &rpcClient, pool, logger) + indexer.grpcFactory = func(config common.GrpcConfig) common.GrpcClient { + return &grpcMock + } + + ctx, cancel := context.WithTimeout(t.Context(), time.Second*5) + defer cancel() + go indexer.Start(ctx) + + for { + if grpcMock.onUpdate != nil { + break + } + time.Sleep(time.Millisecond * 10) + } + + // Assert the original subscription included the actual account + grpcMock.AssertCalled(t, "Subscribe", mock.Anything, mock.MatchedBy(func(req *pb.SubscribeRequest) bool { + hasDammFilter := len(req.Accounts[NAME].Account) == 1 && + req.Accounts[NAME].Account[0] == dammPoolAddress.String() + hasPositionFilter := req.Accounts[dammPoolAddress.String()]. + Filters[0]. + Filter.(*pb.SubscribeRequestFilterAccountsFilter_Memcmp). + Memcmp. + Data.(*pb.SubscribeRequestFilterAccountsFilterMemcmp_Base58). + Base58 == dammPoolAddress.String() + return hasDammFilter && hasPositionFilter + }), mock.Anything, mock.Anything) + + // Send the updates (with missing data) + grpcMock.onUpdate(ctx, &positionUpdate) + grpcMock.onUpdate(ctx, &poolUpdate) + grpcMock.onUpdate = nil + + // Update the DB to trigger a refresh of the subscription + sql := `UPDATE artist_coins SET damm_v2_pool = @damm_v2_pool WHERE mint = 'abc'` + _, err := pool.Exec(ctx, sql, pgx.NamedArgs{ + "damm_v2_pool": dammPoolAddress2.String(), + }) + require.NoError(t, err) + + for { + if grpcMock.onUpdate != nil { + break + } + time.Sleep(time.Millisecond * 10) + } + + cancel() + + // Assert that on refresh, the subscription included the updated account + grpcMock.AssertCalled(t, "Subscribe", mock.Anything, mock.MatchedBy(func(req *pb.SubscribeRequest) bool { + hasDammFilter := len(req.Accounts[NAME].Account) == 1 && + req.Accounts[NAME].Account[0] == dammPoolAddress2.String() + hasPositionFilter := req.Accounts[dammPoolAddress2.String()]. + Filters[0]. + Filter.(*pb.SubscribeRequestFilterAccountsFilter_Memcmp). + Memcmp. + Data.(*pb.SubscribeRequestFilterAccountsFilterMemcmp_Base58). + Base58 == dammPoolAddress2.String() + return hasDammFilter && hasPositionFilter + }), mock.Anything, mock.Anything) + + // Assert that the updates with missing data were added to the retry queue + positionUpdateJson, err := protojson.Marshal(common.RetryQueueUpdate{SubscribeUpdate: &positionUpdate}) + require.NoError(t, err) + poolUpdateJson, err := protojson.Marshal(common.RetryQueueUpdate{SubscribeUpdate: &poolUpdate}) + require.NoError(t, err) + + var exists bool + sql = `SELECT EXISTS (SELECT 1 FROM sol_retry_queue WHERE update = @update)` + err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ + "update": positionUpdateJson, + }).Scan(&exists) + require.NoError(t, err) + assert.True(t, exists, "failed position update should be added to retry queue") + + err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ + "update": poolUpdateJson, + }).Scan(&exists) + require.NoError(t, err) + assert.True(t, exists, "failed pool update should be added to retry queue") +} From 692513ac55bd9d57e6a55cb2bebbffd6567959ed Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Wed, 15 Oct 2025 03:02:48 -0700 Subject: [PATCH 17/56] remove println --- solana/indexer/common/retry_queue.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/solana/indexer/common/retry_queue.go b/solana/indexer/common/retry_queue.go index 87e4f33a..7dac653d 100644 --- a/solana/indexer/common/retry_queue.go +++ b/solana/indexer/common/retry_queue.go @@ -36,12 +36,10 @@ func (r RetryQueueUpdate) MarshalJSON() ([]byte, error) { return []byte("{}"), nil } res, err := protojson.Marshal(r.SubscribeUpdate) - fmt.Printf("Marshaled JSON: %s, error: %v\n", res, err) return res, err } func (r *RetryQueueUpdate) UnmarshalJSON(data []byte) error { - fmt.Printf("Unmarshaling JSON: %s\n", data) if r.SubscribeUpdate == nil { r.SubscribeUpdate = &pb.SubscribeUpdate{} } From a0f15567ab2a39a4801e8d4a7100c5c0ee70a715 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Wed, 15 Oct 2025 03:03:54 -0700 Subject: [PATCH 18/56] rename --- solana/indexer/damm_v2/indexer_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solana/indexer/damm_v2/indexer_test.go b/solana/indexer/damm_v2/indexer_test.go index 9466db8b..98af640d 100644 --- a/solana/indexer/damm_v2/indexer_test.go +++ b/solana/indexer/damm_v2/indexer_test.go @@ -146,7 +146,7 @@ func (m *grpcClientMock) Close() { m.Called() } -func TestStart(t *testing.T) { +func TestSubscription(t *testing.T) { pool := database.CreateTestDatabase(t, "test_solana_indexer_damm_v2") // Fake an update for a Position and a Pool with missing data (should fail) From 1ec4c7da4a80f2c0375bd2a7567e7dfe65fe465f Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Wed, 15 Oct 2025 10:39:10 -0700 Subject: [PATCH 19/56] backfiller --- solana/indexer/{backfill.go => backfiller.go} | 49 ++++++++++++++++--- .../{backfill_test.go => backfiller_test.go} | 41 +++++++++------- solana/indexer/program/indexer.go | 4 +- 3 files changed, 67 insertions(+), 27 deletions(-) rename solana/indexer/{backfill.go => backfiller.go} (80%) rename solana/indexer/{backfill_test.go => backfiller_test.go} (89%) diff --git a/solana/indexer/backfill.go b/solana/indexer/backfiller.go similarity index 80% rename from solana/indexer/backfill.go rename to solana/indexer/backfiller.go index 385f0e55..1f8605f0 100644 --- a/solana/indexer/backfill.go +++ b/solana/indexer/backfiller.go @@ -14,12 +14,31 @@ import ( "github.com/gagliardetto/solana-go" "github.com/gagliardetto/solana-go/rpc" "github.com/jackc/pgx/v5" + "github.com/maypok86/otter" "go.uber.org/zap" ) var TRANSACTION_DELAY_MS = uint(5) -func (s *SolanaIndexer) Backfill(ctx context.Context, fromSlot uint64, toSlot uint64) error { +type backfillProcessor interface { + ProcessTransaction( + ctx context.Context, + slot uint64, + meta *rpc.TransactionMeta, + tx *solana.Transaction, + blockTime time.Time, + ) error +} + +type Backfiller struct { + rpcClient common.RpcClient + pool database.DbPool + processor backfillProcessor + transactionCache *otter.Cache[solana.Signature, *rpc.GetTransactionResult] + logger *zap.Logger +} + +func (s *Backfiller) Start(ctx context.Context, fromSlot uint64, toSlot uint64) error { txRange, err := getTransactionRange(ctx, s.pool, fromSlot, toSlot) if err != nil { return fmt.Errorf("failed to get transaction range: %w", err) @@ -73,7 +92,7 @@ func (s *SolanaIndexer) Backfill(ctx context.Context, fromSlot uint64, toSlot ui } // Fetches and processes transactions for a given address within a given signature/slot range. -func (s *SolanaIndexer) backfillAddressTransactions(ctx context.Context, address solana.PublicKey, txRange transactionRange, fromSlot uint64, toSlot uint64) { +func (s *Backfiller) backfillAddressTransactions(ctx context.Context, address solana.PublicKey, txRange transactionRange, fromSlot uint64, toSlot uint64) { var lastIndexedSig solana.Signature foundIntersection := false before := txRange.before @@ -167,10 +186,28 @@ func (s *SolanaIndexer) backfillAddressTransactions(ctx context.Context, address continue } - // err = s.processor.ProcessSignature(ctx, sig.Slot, sig.Signature, logger) - // if err != nil { - // logger.Error("failed to process signature", zap.Error(err)) - // } + // Fetch the transaction details + // Note: Could also convert the subscription transaction to a solana.Transaction, + // but that could be error prone and the transaction is probably already in the cache anyway. + // Also, we need the blocktime which the subscription doesn't seem to provide. + txRes, err := common.FetchTransactionWithCache(ctx, s.transactionCache, s.rpcClient, sig.Signature) + if err != nil { + logger.Error("failed to fetch transaction", zap.Error(err)) + continue + } + + // Decode the transaction + tx, err := txRes.Transaction.GetTransaction() + if err != nil { + logger.Error("failed to decode transaction", zap.Error(err)) + continue + } + + // Add the lookup table accounts to the message accounts + tx = common.ResolveLookupTables(ctx, s.rpcClient, tx, txRes.Meta) + + // Process the transaction + s.processor.ProcessTransaction(ctx, txRes.Slot, txRes.Meta, tx, txRes.BlockTime.Time()) lastIndexedSig = sig.Signature diff --git a/solana/indexer/backfill_test.go b/solana/indexer/backfiller_test.go similarity index 89% rename from solana/indexer/backfill_test.go rename to solana/indexer/backfiller_test.go index 2a7f71df..e5f64622 100644 --- a/solana/indexer/backfill_test.go +++ b/solana/indexer/backfiller_test.go @@ -22,19 +22,14 @@ type mockProcessor struct { mock.Mock } -func (m *mockProcessor) ProcessSignature(ctx context.Context, slot uint64, txSig solana.Signature, logger *zap.Logger) error { - args := m.Called(ctx, slot, txSig, logger) - return args.Error(0) -} func (m *mockProcessor) ProcessTransaction( ctx context.Context, slot uint64, meta *rpc.TransactionMeta, tx *solana.Transaction, blockTime time.Time, - logger *zap.Logger, ) error { - args := m.Called(ctx, slot, meta, tx, blockTime, logger) + args := m.Called(ctx, slot, meta, tx, blockTime) return args.Error(0) } @@ -213,22 +208,30 @@ func TestBackfillContinue(t *testing.T) { ) processorMock := &mockProcessor{} - processorMock.On("ProcessSignature", mock.Anything, mock.Anything, mockTransactions[0].Signatures[0], mock.Anything). + processorMock.On("ProcessTransaction", mock.Anything, mock.Anything, mock.Anything, + mock.MatchedBy(func(tx *solana.Transaction) bool { + return tx.Signatures[0] == mockTransactions[0].Signatures[0] + }), mock.Anything). Return(nil).Once() - processorMock.On("ProcessSignature", mock.Anything, mock.Anything, mockTransactions[1].Signatures[0], mock.Anything). + processorMock.On("ProcessTransaction", mock.Anything, mock.Anything, mock.Anything, + mock.MatchedBy(func(tx *solana.Transaction) bool { + return tx.Signatures[0] == mockTransactions[1].Signatures[0] + }), mock.Anything). Return(nil).Once() - processorMock.On("ProcessSignature", mock.Anything, mock.Anything, mockTransactions[5].Signatures[0], mock.Anything). + processorMock.On("ProcessTransaction", mock.Anything, mock.Anything, mock.Anything, + mock.MatchedBy(func(tx *solana.Transaction) bool { + return tx.Signatures[0] == mockTransactions[5].Signatures[0] + }), mock.Anything). Return(nil).Once() - s := &SolanaIndexer{ + s := &Backfiller{ rpcClient: rpcFake, pool: poolMock, - // processor: processorMock, - logger: zap.NewNop(), + processor: processorMock, + logger: zap.NewNop(), } - err = s.Backfill(context.Background(), 100, 200) - + err = s.Start(context.Background(), 100, 200) assert.NoError(t, err) assert.NoError(t, poolMock.ExpectationsWereMet()) processorMock.AssertExpectations(t) @@ -379,17 +382,17 @@ func TestBackfillFresh(t *testing.T) { processorMock := &mockProcessor{} // Should get called once for each program - processorMock.On("ProcessSignature", mock.Anything, mock.Anything, mockTransactions[1].Signatures[0], mock.Anything). + processorMock.On("ProcessTransaction", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). Return(nil).Times(3) - s := &SolanaIndexer{ + s := &Backfiller{ rpcClient: rpcFake, pool: poolMock, - // processor: processorMock, - logger: zap.NewNop(), + processor: processorMock, + logger: zap.NewNop(), } - err = s.Backfill(context.Background(), 100, 200) + err = s.Start(context.Background(), 100, 200) assert.NoError(t, err) assert.NoError(t, poolMock.ExpectationsWereMet()) diff --git a/solana/indexer/program/indexer.go b/solana/indexer/program/indexer.go index 7651b2a1..3b130aa6 100644 --- a/solana/indexer/program/indexer.go +++ b/solana/indexer/program/indexer.go @@ -105,7 +105,7 @@ func (d *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) err tx = common.ResolveLookupTables(ctx, d.rpcClient, tx, txRes.Meta) // Process the transaction - d.processTransaction(ctx, txRes.Slot, txRes.Meta, tx, txRes.BlockTime.Time()) + d.ProcessTransaction(ctx, txRes.Slot, txRes.Meta, tx, txRes.BlockTime.Time()) return nil } @@ -174,7 +174,7 @@ func (d *Indexer) makeSubscriptionRequest(ctx context.Context, programIds []stri return subscription } -func (d *Indexer) processTransaction(ctx context.Context, slot uint64, meta *rpc.TransactionMeta, tx *solana.Transaction, blockTime time.Time) error { +func (d *Indexer) ProcessTransaction(ctx context.Context, slot uint64, meta *rpc.TransactionMeta, tx *solana.Transaction, blockTime time.Time) error { signature := tx.Signatures[0].String() logger := d.logger.With( zap.String("signature", signature), From 56fefa99a10ee9308f58484e31216f3d05d6fb8a Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Wed, 15 Oct 2025 12:12:50 -0700 Subject: [PATCH 20/56] fix fees to always return a row --- ddl/functions/calculate_artist_coin_fees.sql | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/ddl/functions/calculate_artist_coin_fees.sql b/ddl/functions/calculate_artist_coin_fees.sql index 4478779e..77c53a5f 100644 --- a/ddl/functions/calculate_artist_coin_fees.sql +++ b/ddl/functions/calculate_artist_coin_fees.sql @@ -52,7 +52,9 @@ RETURNS TABLE ( FLOOR(COALESCE(damm_fees.total_damm_v2_fees, 0)) AS total_damm_v2_fees, FLOOR(COALESCE(dbc_fees.unclaimed_dbc_fees, 0) + COALESCE(damm_fees.unclaimed_damm_v2_fees, 0)) AS unclaimed_fees, FLOOR(COALESCE(dbc_fees.total_dbc_fees, 0) + COALESCE(damm_fees.total_damm_v2_fees, 0)) AS total_fees - FROM dbc_fees - FULL OUTER JOIN damm_fees USING (mint); + FROM artist_coins + LEFT JOIN dbc_fees USING (mint) + FULL OUTER JOIN damm_fees USING (mint) + WHERE artist_coins.mint = artist_coin_mint; $function$; COMMIT; \ No newline at end of file From aeee389eef93beeae506a7dd9cc9ac82e8421cb6 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Wed, 15 Oct 2025 12:13:14 -0700 Subject: [PATCH 21/56] always use test db for creating test schema --- Makefile | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index 62895f03..d2adf3d8 100644 --- a/Makefile +++ b/Makefile @@ -39,12 +39,8 @@ apidiff:: test-schema:: @set -a; \ - . .env; \ - if [ -z "$$writeDbUrl" ]; then \ - echo "writeDbUrl is not set in .env - using test db and running migrations"; \ - writeDbUrl=postgresql://postgres:example@localhost:21300/postgres; \ - make migrate; \ - fi; \ + writeDbUrl=postgresql://postgres:example@localhost:21300/postgres; \ + make migrate; \ adjustedUrl=$$(echo "$$writeDbUrl" | sed 's/localhost/host.docker.internal/g'); \ docker compose exec db bash -c "pg_dump '$$adjustedUrl' --schema-only --no-owner --no-acl > ./sql/01_schema.sql"; \ sed '/^\\restrict /d;/^\\unrestrict /d' ./sql/01_schema.sql > ./sql/01_schema.sql.tmp && mv ./sql/01_schema.sql.tmp ./sql/01_schema.sql; \ From 4cddb1dce4c7db30b26e807524f1bf2b3fc00fa1 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Wed, 15 Oct 2025 12:18:07 -0700 Subject: [PATCH 22/56] fix test schema --- sql/01_schema.sql | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sql/01_schema.sql b/sql/01_schema.sql index ea280ed1..6af28d94 100644 --- a/sql/01_schema.sql +++ b/sql/01_schema.sql @@ -1028,8 +1028,10 @@ CREATE FUNCTION public.calculate_artist_coin_fees(artist_coin_mint text) RETURNS FLOOR(COALESCE(damm_fees.total_damm_v2_fees, 0)) AS total_damm_v2_fees, FLOOR(COALESCE(dbc_fees.unclaimed_dbc_fees, 0) + COALESCE(damm_fees.unclaimed_damm_v2_fees, 0)) AS unclaimed_fees, FLOOR(COALESCE(dbc_fees.total_dbc_fees, 0) + COALESCE(damm_fees.total_damm_v2_fees, 0)) AS total_fees - FROM dbc_fees - FULL OUTER JOIN damm_fees USING (mint); + FROM artist_coins + LEFT JOIN dbc_fees USING (mint) + FULL OUTER JOIN damm_fees USING (mint) + WHERE artist_coins.mint = artist_coin_mint; $$; From 6609c184be716abcfdf5e9c39a45c3a18d1682ff Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Wed, 15 Oct 2025 12:31:59 -0700 Subject: [PATCH 23/56] make deterministic test schema --- Makefile | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index d2adf3d8..96cb7847 100644 --- a/Makefile +++ b/Makefile @@ -40,8 +40,17 @@ apidiff:: test-schema:: @set -a; \ writeDbUrl=postgresql://postgres:example@localhost:21300/postgres; \ + echo "\033[0;32mBringing down any existing containers to start fresh...\033[0m"; \ + docker compose down --volumes; \ + docker compose up -d --wait; \ + echo "\n\033[0;32mRunning migrations on fresh instance...\033[0m"; \ make migrate; \ + echo "\033[0;32mDumping schema...\033[0m"; \ adjustedUrl=$$(echo "$$writeDbUrl" | sed 's/localhost/host.docker.internal/g'); \ docker compose exec db bash -c "pg_dump '$$adjustedUrl' --schema-only --no-owner --no-acl > ./sql/01_schema.sql"; \ sed '/^\\restrict /d;/^\\unrestrict /d' ./sql/01_schema.sql > ./sql/01_schema.sql.tmp && mv ./sql/01_schema.sql.tmp ./sql/01_schema.sql; \ - echo "schema dumped to ./sql/01_schema.sql" + echo "Schema dumped to ./sql/01_schema.sql"; \ + echo "\n\033[0;32mRestarting containers...\033[0m"; \ + docker compose down --volumes; \ + docker compose up -d --wait; \ + echo "\n\033[0;32mDone\033[0m"; \ No newline at end of file From e86577e0f1e504abb6a903166bd7988fc8d4a689 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Wed, 15 Oct 2025 12:41:04 -0700 Subject: [PATCH 24/56] add healthcheck for db --- compose.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/compose.yml b/compose.yml index 905ae50e..54b8b28f 100644 --- a/compose.yml +++ b/compose.yml @@ -28,6 +28,11 @@ services: # - "log_statement=all" - "-c" - "max_connections=1000" + healthcheck: + test: ["CMD-SHELL", "pg_isready -U $$POSTGRES_USER -d $$POSTGRES_DB"] + interval: 5s + timeout: 5s + retries: 5 elasticsearch: image: docker.elastic.co/elasticsearch/elasticsearch:8.10.2 From 8400aaced4edde25c90b7bea066fdee61d2fd7e8 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Wed, 15 Oct 2025 12:41:48 -0700 Subject: [PATCH 25/56] Make dbc job run more often temporarily --- solana/indexer/solana_indexer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solana/indexer/solana_indexer.go b/solana/indexer/solana_indexer.go index ed0c3d66..1ccb4f6a 100644 --- a/solana/indexer/solana_indexer.go +++ b/solana/indexer/solana_indexer.go @@ -114,7 +114,7 @@ func (s *SolanaIndexer) Start(ctx context.Context) error { dbcJob := jobs.NewCoinDBCJob(s.config, s.pool) dbcCtx := context.WithoutCancel(ctx) - dbcJob.ScheduleEvery(dbcCtx, 5*time.Minute) + dbcJob.ScheduleEvery(dbcCtx, 1*time.Minute) go dbcJob.Run(dbcCtx) go s.tokenIndexer.Start(ctx) From e77bfedfdea1264ca90171b06c45fd9e1fa0651e Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Wed, 15 Oct 2025 13:50:48 -0700 Subject: [PATCH 26/56] update readme --- README.md | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index cd084968..2dd96945 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ # Audius API Server + The API backend for the Audius mobile apps and [audius.co](https://audius.co) [![license](https://img.shields.io/github/license/AudiusProject/api)](https://github.com/AudiusProject/api/blob/main/LICENSE) [![releases](https://img.shields.io/github/v/release/AudiusProject/api)](https://github.com/AudiusProject/api/releases/latest) @@ -45,20 +46,15 @@ The API backend for the Audius mobile apps and [audius.co](https://audius.co) ### Tests #### To run tests against the existing schemas + ``` docker compose up -d make test ``` #### To update schema after migration changes and run tests -``` -docker compose up -d - -# update .env to contain -writeDbUrl=postgresql://postgres:example@localhost:21300/postgres -runMigrations=true -make migrate +``` make test-schema make test ``` From 47779d041d44792d2106546d4c2ab63ec47882fe Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 15:44:34 -0700 Subject: [PATCH 27/56] add new Version field to DAMM V2 pools --- ddl/migrations/0169_damm_and_positions.sql | 5 +++-- solana/indexer/damm_v2/damm_v2.go | 4 ++++ solana/spl/programs/meteora_damm_v2/types.go | 3 ++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/ddl/migrations/0169_damm_and_positions.sql b/ddl/migrations/0169_damm_and_positions.sql index 1c85f759..69989f8f 100644 --- a/ddl/migrations/0169_damm_and_positions.sql +++ b/ddl/migrations/0169_damm_and_positions.sql @@ -48,8 +48,9 @@ CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_pools ( token_b_flag SMALLINT NOT NULL, collect_fee_mode SMALLINT NOT NULL, pool_type SMALLINT NOT NULL, - fee_a_per_liquidity BIGINT NOT NULL, - fee_b_per_liquidity BIGINT NOT NULL, + version SMALLINT NOT NULL, + fee_a_per_liquidity NUMERIC NOT NULL, + fee_b_per_liquidity NUMERIC NOT NULL, permanent_lock_liquidity NUMERIC NOT NULL, creator TEXT NOT NULL, created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, diff --git a/solana/indexer/damm_v2/damm_v2.go b/solana/indexer/damm_v2/damm_v2.go index 4584f608..a95e55d5 100644 --- a/solana/indexer/damm_v2/damm_v2.go +++ b/solana/indexer/damm_v2/damm_v2.go @@ -41,6 +41,7 @@ func upsertDammV2Pool( token_b_flag, collect_fee_mode, pool_type, + version, fee_a_per_liquidity, fee_b_per_liquidity, permanent_lock_liquidity, @@ -71,6 +72,7 @@ func upsertDammV2Pool( @token_b_flag, @collect_fee_mode, @pool_type, + @version, @fee_a_per_liquidity, @fee_b_per_liquidity, @permanent_lock_liquidity, @@ -101,6 +103,7 @@ func upsertDammV2Pool( token_b_flag = EXCLUDED.token_b_flag, collect_fee_mode = EXCLUDED.collect_fee_mode, pool_type = EXCLUDED.pool_type, + version = EXCLUDED.version, fee_a_per_liquidity = EXCLUDED.fee_a_per_liquidity, fee_b_per_liquidity = EXCLUDED.fee_b_per_liquidity, permanent_lock_liquidity = EXCLUDED.permanent_lock_liquidity, @@ -132,6 +135,7 @@ func upsertDammV2Pool( "token_b_flag": pool.TokenBFlag, "collect_fee_mode": pool.CollectFeeMode, "pool_type": pool.PoolType, + "version": pool.Version, "fee_a_per_liquidity": pool.FeeAPerLiquidity, "fee_b_per_liquidity": pool.FeeBPerLiquidity, "permanent_lock_liquidity": pool.PermanentLockLiquidity.BigInt(), diff --git a/solana/spl/programs/meteora_damm_v2/types.go b/solana/spl/programs/meteora_damm_v2/types.go index 65cd22ee..7d2097b4 100644 --- a/solana/spl/programs/meteora_damm_v2/types.go +++ b/solana/spl/programs/meteora_damm_v2/types.go @@ -93,7 +93,8 @@ type Pool struct { TokenBFlag uint8 CollectFeeMode uint8 PoolType uint8 - Padding0 [2]uint8 + Version uint8 + Padding0 uint8 FeeAPerLiquidity Uint256LE FeeBPerLiquidity Uint256LE PermanentLockLiquidity bin.Uint128 From 334b6a9316cd7c2c79c3527a1fadbf0d5dbcc7b9 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 15:45:26 -0700 Subject: [PATCH 28/56] add dbc pool protocol_quote_fee and clarify pools in migration --- ddl/migrations/0171_artist_coins_pools.sql | 1 + solana/indexer/dbc/dbc.go | 16 ++++++++++------ .../spl/programs/meteora_dbc/MigrationDammV2.go | 6 +++--- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/ddl/migrations/0171_artist_coins_pools.sql b/ddl/migrations/0171_artist_coins_pools.sql index a89b182e..8b6be161 100644 --- a/ddl/migrations/0171_artist_coins_pools.sql +++ b/ddl/migrations/0171_artist_coins_pools.sql @@ -15,6 +15,7 @@ CREATE TABLE IF NOT EXISTS sol_meteora_dbc_pools ( base_reserve BIGINT NOT NULL, quote_reserve BIGINT NOT NULL, protocol_base_fee BIGINT NOT NULL, + protocol_quote_fee BIGINT NOT NULL, partner_base_fee BIGINT NOT NULL, partner_quote_fee BIGINT NOT NULL, sqrt_price NUMERIC NOT NULL, diff --git a/solana/indexer/dbc/dbc.go b/solana/indexer/dbc/dbc.go index 03e19ed1..1c3606e1 100644 --- a/solana/indexer/dbc/dbc.go +++ b/solana/indexer/dbc/dbc.go @@ -50,15 +50,15 @@ func processDbcInstruction( dbcPool: migrationInst.GetVirtualPool().PublicKey.String(), migrationMetadata: migrationInst.GetMigrationMetadata().PublicKey.String(), config: migrationInst.GetConfig().PublicKey.String(), - dbcPoolAuthority: migrationInst.GetPoolAuthority().PublicKey.String(), - dammV2Pool: migrationInst.GetPool().PublicKey.String(), + dbcPoolAuthority: migrationInst.GetDbcPoolAuthority().PublicKey.String(), + dammV2Pool: migrationInst.GetDammV2Pool().PublicKey.String(), firstPositionNftMint: migrationInst.GetFirstPositionNftMint().PublicKey.String(), firstPositionNftAccount: migrationInst.GetFirstPositionNftAccount().PublicKey.String(), firstPosition: migrationInst.GetFirstPosition().PublicKey.String(), secondPositionNftMint: migrationInst.GetSecondPositionNftMint().PublicKey.String(), secondPositionNftAccount: migrationInst.GetSecondPositionNftAccount().PublicKey.String(), secondPosition: migrationInst.GetSecondPosition().PublicKey.String(), - dammPoolAuthority: migrationInst.GetPoolAuthority().PublicKey.String(), + dammPoolAuthority: migrationInst.GetDammV2PoolAuthority().PublicKey.String(), baseMint: migrationInst.GetBaseMint().PublicKey.String(), quoteMint: migrationInst.GetQuoteMint().PublicKey.String(), }) @@ -68,16 +68,16 @@ func processDbcInstruction( instLogger.Info("dbc migrationDammV2", zap.String("mint", migrationInst.GetBaseMint().PublicKey.String()), zap.String("dbcPool", migrationInst.GetVirtualPool().PublicKey.String()), - zap.String("dammV2Pool", migrationInst.GetPool().PublicKey.String()), + zap.String("dammV2Pool", migrationInst.GetDammV2Pool().PublicKey.String()), ) - err = updateArtistCoinDammV2Pool(ctx, db, migrationInst.GetBaseMint().PublicKey.String(), migrationInst.GetPool().PublicKey.String()) + err = updateArtistCoinDammV2Pool(ctx, db, migrationInst.GetBaseMint().PublicKey.String(), migrationInst.GetDammV2Pool().PublicKey.String()) if err != nil { return fmt.Errorf("failed to update artist coin with damm v2 pool at instruction %d: %w", instructionIndex, err) } instLogger.Info("updated artist coin with damm v2 pool", zap.String("mint", migrationInst.GetBaseMint().PublicKey.String()), - zap.String("dammV2Pool", migrationInst.GetPool().PublicKey.String()), + zap.String("dammV2Pool", migrationInst.GetDammV2Pool().PublicKey.String()), ) } } @@ -205,6 +205,7 @@ func upsertDbcPool( base_reserve, quote_reserve, protocol_base_fee, + protocol_quote_fee, partner_base_fee, partner_quote_fee, sqrt_price, @@ -233,6 +234,7 @@ func upsertDbcPool( @base_reserve, @quote_reserve, @protocol_base_fee, + @protocol_quote_fee, @partner_base_fee, @partner_quote_fee, @sqrt_price, @@ -260,6 +262,7 @@ func upsertDbcPool( base_reserve = EXCLUDED.base_reserve, quote_reserve = EXCLUDED.quote_reserve, protocol_base_fee = EXCLUDED.protocol_base_fee, + protocol_quote_fee = EXCLUDED.protocol_quote_fee, partner_base_fee = EXCLUDED.partner_base_fee, partner_quote_fee = EXCLUDED.partner_quote_fee, sqrt_price = EXCLUDED.sqrt_price, @@ -288,6 +291,7 @@ func upsertDbcPool( "base_reserve": pool.BaseReserve, "quote_reserve": pool.QuoteReserve, "protocol_base_fee": pool.ProtocolBaseFee, + "protocol_quote_fee": pool.ProtocolQuoteFee, "partner_base_fee": pool.PartnerBaseFee, "partner_quote_fee": pool.PartnerQuoteFee, "sqrt_price": pool.SqrtPrice.BigInt(), diff --git a/solana/spl/programs/meteora_dbc/MigrationDammV2.go b/solana/spl/programs/meteora_dbc/MigrationDammV2.go index 5e6c24aa..823bb21b 100644 --- a/solana/spl/programs/meteora_dbc/MigrationDammV2.go +++ b/solana/spl/programs/meteora_dbc/MigrationDammV2.go @@ -18,11 +18,11 @@ func (inst *MigrationDammV2) GetConfig() *solana.AccountMeta { return inst.AccountMetaSlice.Get(2) } -func (inst *MigrationDammV2) GetPoolAuthority() *solana.AccountMeta { +func (inst *MigrationDammV2) GetDbcPoolAuthority() *solana.AccountMeta { return inst.AccountMetaSlice.Get(3) } -func (inst *MigrationDammV2) GetPool() *solana.AccountMeta { +func (inst *MigrationDammV2) GetDammV2Pool() *solana.AccountMeta { return inst.AccountMetaSlice.Get(4) } @@ -50,7 +50,7 @@ func (inst *MigrationDammV2) GetSecondPosition() *solana.AccountMeta { return inst.AccountMetaSlice.Get(10) } -func (inst *MigrationDammV2) GetDammPoolAuthority() *solana.AccountMeta { +func (inst *MigrationDammV2) GetDammV2PoolAuthority() *solana.AccountMeta { return inst.AccountMetaSlice.Get(11) } From d16ba21c3b809f37dc2b1bfd255046ca770d86a2 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 15:46:17 -0700 Subject: [PATCH 29/56] stronger test assertions --- solana/indexer/damm_v2/indexer_test.go | 72 ++++++++++++++ solana/indexer/dbc/indexer.go | 2 +- solana/indexer/dbc/indexer_test.go | 126 +++++++++++++++++++++++-- 3 files changed, 193 insertions(+), 7 deletions(-) diff --git a/solana/indexer/damm_v2/indexer_test.go b/solana/indexer/damm_v2/indexer_test.go index 98af640d..94870364 100644 --- a/solana/indexer/damm_v2/indexer_test.go +++ b/solana/indexer/damm_v2/indexer_test.go @@ -64,6 +64,7 @@ func TestHandleUpdate_DammV2PoolUpdate(t *testing.T) { Filters: []string{NAME}, UpdateOneof: &pb.SubscribeUpdate_Account{ Account: &pb.SubscribeUpdateAccount{ + Slot: 123456789, Account: &pb.SubscribeUpdateAccountInfo{ Pubkey: address.Bytes(), Data: poolData, @@ -75,6 +76,77 @@ func TestHandleUpdate_DammV2PoolUpdate(t *testing.T) { err = indexer.HandleUpdate(t.Context(), &update) require.NoError(t, err) + // Verify the DAMM v2 pool was inserted + var exists bool + sql := ` + SELECT EXISTS ( + SELECT 1 + FROM sol_meteora_damm_v2_pools + WHERE account = @account + AND slot = @slot + AND token_a_mint = @token_a_mint + AND token_b_mint = @token_b_mint + AND token_a_vault = @token_a_vault + AND token_b_vault = @token_b_vault + AND whitelisted_vault = @whitelisted_vault + AND partner = @partner + AND liquidity = @liquidity + AND protocol_a_fee = @protocol_a_fee + AND protocol_b_fee = @protocol_b_fee + AND partner_a_fee = @partner_a_fee + AND partner_b_fee = @partner_b_fee + AND sqrt_min_price = @sqrt_min_price + AND sqrt_max_price = @sqrt_max_price + AND sqrt_price = @sqrt_price + AND activation_point = @activation_point + AND activation_type = @activation_type + AND pool_status = @pool_status + AND token_a_flag = @token_a_flag + AND token_b_flag = @token_b_flag + AND collect_fee_mode = @collect_fee_mode + AND pool_type = @pool_type + AND version = @version + AND fee_a_per_liquidity = @fee_a_per_liquidity + AND fee_b_per_liquidity = @fee_b_per_liquidity + AND permanent_lock_liquidity = @permanent_lock_liquidity + AND creator = @creator + LIMIT 1 + ) + ` + + err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ + "account": address.String(), + "slot": int64(123456789), + "token_a_mint": "bnWKPK7YTUJTe3A3HTGEJrUEoAddRgRjWSwf7MwxMP3", + "token_b_mint": "9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM", + "token_a_vault": "9CG1qU4bhiGX9J5k5Ap1hkWpWftV5fjWJi3h8FMUvaGJ", + "token_b_vault": "7jKehVD6cxYtNmcqLqyFb3W4xtaWjfTf7CWgbWLkQ7ru", + "whitelisted_vault": "11111111111111111111111111111111", + "partner": "FhVo3mqL8PW5pH5U2CN4XE33DokiyZnUwuGpH2hmHLuM", + "liquidity": "31500505798829827035928817465053256", + "protocol_a_fee": uint64(0), + "protocol_b_fee": uint64(88308818520), + "partner_a_fee": uint64(0), + "partner_b_fee": uint64(0), + "sqrt_min_price": uint64(4295048016), + "sqrt_max_price": "79226673521066979257578248091", + "sqrt_price": "132140449179444258", + "activation_point": int64(1759932808), + "activation_type": uint8(1), + "pool_status": uint8(0), + "token_a_flag": uint8(0), + "token_b_flag": uint8(0), + "collect_fee_mode": uint8(1), + "pool_type": uint8(0), + "version": uint8(0), + "fee_a_per_liquidity": uint64(0), + "fee_b_per_liquidity": "3838765547535761", + "permanent_lock_liquidity": "31500505798829827035928817465053256", + "creator": "FhVo3mqL8PW5pH5U2CN4XE33DokiyZnUwuGpH2hmHLuM", + }).Scan(&exists) + require.NoError(t, err, "failed to query for damm v2 pool") + assert.True(t, exists, "damm v2 pool should exist after indexing") + rows, err := pool.Query(t.Context(), ` SELECT EXISTS ( SELECT 1 diff --git a/solana/indexer/dbc/indexer.go b/solana/indexer/dbc/indexer.go index 4bfab987..75a5d2ee 100644 --- a/solana/indexer/dbc/indexer.go +++ b/solana/indexer/dbc/indexer.go @@ -162,7 +162,7 @@ func (d *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) err } // Process the transaction - err = d.processTransaction(ctx, accountUpdate.Slot, tx) + err = d.processTransaction(ctx, txRes.Slot, tx) } } return nil diff --git a/solana/indexer/dbc/indexer_test.go b/solana/indexer/dbc/indexer_test.go index f994d300..636928b4 100644 --- a/solana/indexer/dbc/indexer_test.go +++ b/solana/indexer/dbc/indexer_test.go @@ -11,6 +11,7 @@ import ( "api.audius.co/solana/indexer/fake_rpc_client" "github.com/gagliardetto/solana-go" "github.com/gagliardetto/solana-go/rpc" + "github.com/jackc/pgx/v5" "github.com/maypok86/otter" pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" "github.com/test-go/testify/assert" @@ -81,6 +82,7 @@ func TestHandleUpdate_Migration(t *testing.T) { update := pb.SubscribeUpdate{ UpdateOneof: &pb.SubscribeUpdate_Account{ Account: &pb.SubscribeUpdateAccount{ + Slot: 367312008, Account: &pb.SubscribeUpdateAccountInfo{ Pubkey: poolAddress.Bytes(), Data: poolData, @@ -94,18 +96,130 @@ func TestHandleUpdate_Migration(t *testing.T) { err = indexer.HandleUpdate(t.Context(), &update) require.NoError(t, err) + // Verify that the dbc pool was inserted sql := ` SELECT EXISTS ( SELECT 1 - FROM artist_coins - JOIN sol_meteora_dbc_pools ON sol_meteora_dbc_pools.account = artist_coins.dbc_pool - JOIN sol_meteora_dbc_migrations ON sol_meteora_dbc_migrations.dbc_pool = sol_meteora_dbc_pools.account - WHERE artist_coins.damm_v2_pool IS NOT NULL + FROM sol_meteora_dbc_pools + WHERE account = @account + AND slot = @slot + AND config = @config + AND creator = @creator + AND base_mint = @base_mint + AND base_vault = @base_vault + AND quote_vault = @quote_vault + AND base_reserve = @base_reserve + AND quote_reserve = @quote_reserve + AND protocol_base_fee = @protocol_base_fee + AND partner_base_fee = @partner_base_fee + AND partner_quote_fee = @partner_quote_fee + AND sqrt_price = @sqrt_price + AND activation_point = @activation_point + AND pool_type = @pool_type + AND is_migrated = @is_migrated + AND is_partner_withdraw_surplus = @is_partner_withdraw_surplus + AND is_protocol_withdraw_surplus = @is_protocol_withdraw_surplus + AND migration_progress = @migration_progress + AND is_withdraw_leftover = @is_withdraw_leftover + AND is_creator_withdraw_surplus = @is_creator_withdraw_surplus + AND migration_fee_withdraw_status = @migration_fee_withdraw_status + AND finish_curve_timestamp = @finish_curve_timestamp + AND creator_base_fee = @creator_base_fee + AND creator_quote_fee = @creator_quote_fee LIMIT 1 ) ` var exists bool - err = pool.QueryRow(t.Context(), sql).Scan(&exists) + err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ + "account": poolAddress.String(), + "slot": int64(367312008), + "config": "2seGMFauXC22DX8hbop1gh54W1uW8YREWhsU7JuCptTj", + "creator": "CXFjXpXVQqv4jy5bjauLvECrDsKESQB6BPkSiC6dAmak", + "base_mint": "bearR26zyyB3fNQm5wWv1ZfN8MPQDUMwaAuoG79b1Yj", + "base_vault": "DkrZF8DT18gu4n9Q26c46r49ciJwNt8S8jbQhouEEJnA", + "quote_vault": "DQWe8KbbGSUyESbbfzG9keBmhtjMRbRVg4e3wH2h1KFT", + "base_reserve": uint64(749999999999750059), + "quote_reserve": uint64(17079211052949), + "protocol_base_fee": uint64(0), + "protocol_quote_fee": uint64(34543885755), + "partner_base_fee": uint64(0), + "partner_quote_fee": uint64(0), + "sqrt_price": uint64(184467440737095520), + "activation_point": int64(364338107), + "pool_type": uint8(0), + "is_migrated": uint8(1), + "is_partner_withdraw_surplus": uint8(0), + "is_protocol_withdraw_surplus": uint8(0), + "migration_progress": 3, + "is_withdraw_leftover": uint8(0), + "is_creator_withdraw_surplus": uint8(0), + "migration_fee_withdraw_status": uint8(0), + "finish_curve_timestamp": int64(1758068175), + "creator_base_fee": uint64(0), + "creator_quote_fee": uint64(0), + }).Scan(&exists) require.NoError(t, err, "failed to query for dbc pool") - assert.True(t, exists, "damm v2 pool should exist after migration") + assert.True(t, exists, "dbc pool should exist after migration") + + // Verify the migration instruction was inserted + sql = ` + SELECT EXISTS ( + SELECT 1 + FROM sol_meteora_dbc_migrations + WHERE signature = @signature + AND instruction_index = @instruction_index + AND slot = @slot + AND dbc_pool = @dbc_pool + AND migration_metadata = @migration_metadata + AND config = @config + AND dbc_pool_authority = @dbc_pool_authority + AND damm_v2_pool = @damm_v2_pool + AND first_position_nft_mint = @first_position_nft_mint + AND first_position_nft_account = @first_position_nft_account + AND first_position = @first_position + AND second_position_nft_mint = @second_position_nft_mint + AND second_position_nft_account = @second_position_nft_account + AND second_position = @second_position + AND damm_pool_authority = @damm_pool_authority + AND base_mint = @base_mint + AND quote_mint = @quote_mint + LIMIT 1 + ) + ` + err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ + "signature": txSig.String(), + "instruction_index": int64(0), + "slot": int64(367312008), + "dbc_pool": poolAddress.String(), + "migration_metadata": "BFjrGaLwPznyjCQ47dbP8xVy1QLjdJ7w8CpwZ4sZwqpf", + "config": "2seGMFauXC22DX8hbop1gh54W1uW8YREWhsU7JuCptTj", + "dbc_pool_authority": "FhVo3mqL8PW5pH5U2CN4XE33DokiyZnUwuGpH2hmHLuM", + "damm_v2_pool": "9avVRGRvPsSYiXKBMHnC6RNPbwN5yE3v7fD8FibgScwA", + "first_position_nft_mint": "DgWF4Huj8a4yyGBPaAw4kqQFBuD5BujbHN7VKWaEBp4t", + "first_position_nft_account": "CL5nkYmVBNHRDZLZovyK6RGbv2SFjJRfPgf4pFknitBn", + "first_position": "GRYndBmh1aoQjMXQm4NPs7oQ288o6xghrWVXytyM2c1Q", + "second_position_nft_mint": "76uQDWksWWvFKHn4ZNPTpR9jJ9x3eqgsmcHdb8FmXS3o", + "second_position_nft_account": "GXKmP3hBvRnor2ieNVrcAVGgDev1HUYNrwjZ4yJDwtU5", + "second_position": "9jUTYHEypL6XwYRSk5CZaKMEmNuQoRaPM17JDaNpk6G7", + "damm_pool_authority": "HLnpSz9h2S4hiLQ43rnSD9XkcUThA7B8hQMKmDaiTLcC", + "base_mint": "bearR26zyyB3fNQm5wWv1ZfN8MPQDUMwaAuoG79b1Yj", + "quote_mint": "9LzCMqDgTKYz9Drzqnpgee3SGa89up3a247ypMj2xrqM", + }).Scan(&exists) + require.NoError(t, err, "failed to query for dbc migration") + assert.True(t, exists, "dbc migration should exist after indexing") + + sql = ` + SELECT EXISTS ( + SELECT 1 + FROM artist_coins + WHERE mint = @mint AND damm_v2_pool = @damm_v2_pool + LIMIT 1 + ) + ` + err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ + "mint": "bearR26zyyB3fNQm5wWv1ZfN8MPQDUMwaAuoG79b1Yj", + "damm_v2_pool": "9avVRGRvPsSYiXKBMHnC6RNPbwN5yE3v7fD8FibgScwA", + }).Scan(&exists) + require.NoError(t, err, "failed to query for artist coin update") + assert.True(t, exists, "artist coin should be updated with damm v2 pool") } From 433133c1d2a7e0ac547f123b509fb85868cf086f Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 15:46:28 -0700 Subject: [PATCH 30/56] add slot padding constant --- solana/indexer/common/checkpoints.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/solana/indexer/common/checkpoints.go b/solana/indexer/common/checkpoints.go index ea4ab68d..4eb85840 100644 --- a/solana/indexer/common/checkpoints.go +++ b/solana/indexer/common/checkpoints.go @@ -14,7 +14,10 @@ import ( "go.uber.org/zap" ) -const MAX_SLOT_GAP = 2500 +const ( + MAX_SLOT_GAP = 2500 + SLOT_PADDING = 100 +) func EnsureCheckpoint( ctx context.Context, @@ -47,7 +50,7 @@ func EnsureCheckpoint( fromSlot = lastIndexedSlot } else if lastIndexedSlot == 0 { // New subscription, continue from latest slot - 100 - fromSlot = latestSlot - 100 // start 100 slots back to be safe + fromSlot = latestSlot - SLOT_PADDING // start 100 slots back to be safe logger.Warn("no last indexed slot found, starting from most recent slot (less 100 for safety) and skipping backfill", zap.Uint64("fromSlot", fromSlot)) } else { // Existing subscription that's too old, continue from as far back as possible From b509ca4f828663366c74f7a1ef488bb12349d327 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 15:56:53 -0700 Subject: [PATCH 31/56] rename update => update_message --- ddl/migrations/0170_sol_retry_queue.sql | 4 ++-- solana/indexer/common/retry_queue.go | 24 +++++++++++------------ solana/indexer/common/retry_queue_test.go | 4 ++-- solana/indexer/damm_v2/indexer_test.go | 6 +++--- solana/indexer/solana_indexer.go | 4 ++-- 5 files changed, 21 insertions(+), 21 deletions(-) diff --git a/ddl/migrations/0170_sol_retry_queue.sql b/ddl/migrations/0170_sol_retry_queue.sql index 2184e0ac..a4f6e42c 100644 --- a/ddl/migrations/0170_sol_retry_queue.sql +++ b/ddl/migrations/0170_sol_retry_queue.sql @@ -1,14 +1,14 @@ CREATE TABLE IF NOT EXISTS sol_retry_queue ( id UUID PRIMARY KEY DEFAULT gen_random_uuid(), indexer TEXT NOT NULL, - update JSONB NOT NULL, + update_message JSONB NOT NULL, error TEXT NOT NULL, created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ); COMMENT ON TABLE sol_retry_queue IS 'Queue for retrying failed indexer updates.'; COMMENT ON COLUMN sol_retry_queue.indexer IS 'The name of the indexer that failed (e.g., token_indexer, damm_v2_indexer).'; -COMMENT ON COLUMN sol_retry_queue.update IS 'The JSONB update data that failed to process.'; +COMMENT ON COLUMN sol_retry_queue.update_message IS 'The JSONB update data that failed to process.'; COMMENT ON COLUMN sol_retry_queue.error IS 'The error message from the failure.'; COMMENT ON COLUMN sol_retry_queue.created_at IS 'The timestamp when the retry entry was created.'; COMMENT ON COLUMN sol_retry_queue.updated_at IS 'The timestamp when the retry entry was last updated.'; diff --git a/solana/indexer/common/retry_queue.go b/solana/indexer/common/retry_queue.go index 7dac653d..55a58ff7 100644 --- a/solana/indexer/common/retry_queue.go +++ b/solana/indexer/common/retry_queue.go @@ -14,12 +14,12 @@ import ( ) type RetryQueueItem struct { - ID string - Indexer string - Update RetryQueueUpdate - Error string - CreatedAt time.Time - UpdatedAt time.Time + ID string + Indexer string + UpdateMessage RetryQueueUpdate + Error string + CreatedAt time.Time + UpdatedAt time.Time } type RetryQueueUpdate struct { @@ -47,7 +47,7 @@ func (r *RetryQueueUpdate) UnmarshalJSON(data []byte) error { } func GetRetryQueue(ctx context.Context, db database.DBTX, limit, offset int) ([]RetryQueueItem, error) { - sql := `SELECT id, indexer, update, error, created_at, updated_at + sql := `SELECT id, indexer, update_message, error, created_at, updated_at FROM sol_retry_queue ORDER BY created_at ASC LIMIT @limit OFFSET @offset` @@ -73,14 +73,14 @@ func GetRetryQueue(ctx context.Context, db database.DBTX, limit, offset int) ([] func AddToRetryQueue(ctx context.Context, db database.DBTX, indexer string, update *pb.SubscribeUpdate, errorMessage string) error { sql := ` - INSERT INTO sol_retry_queue (indexer, update, error) - VALUES (@indexer, @update, @error) + INSERT INTO sol_retry_queue (indexer, update_message, error) + VALUES (@indexer, @update_message, @error) ON CONFLICT (id) DO UPDATE SET error = @error, updated_at = NOW() ;` _, err := db.Exec(ctx, sql, pgx.NamedArgs{ - "indexer": indexer, - "update": RetryQueueUpdate{update}, - "error": errorMessage, + "indexer": indexer, + "update_message": RetryQueueUpdate{update}, + "error": errorMessage, }) if err != nil { return fmt.Errorf("failed to insert into retry queue: %w", err) diff --git a/solana/indexer/common/retry_queue_test.go b/solana/indexer/common/retry_queue_test.go index d641c577..8bcf61ff 100644 --- a/solana/indexer/common/retry_queue_test.go +++ b/solana/indexer/common/retry_queue_test.go @@ -37,8 +37,8 @@ func TestRetryQueue(t *testing.T) { item := items[0] assert.Equal(t, indexer, item.Indexer) assert.Equal(t, errorMsg, item.Error) - assert.NotNil(t, item.Update.SubscribeUpdate) - assert.Equal(t, slot, item.Update.SubscribeUpdate.GetSlot().Slot) + assert.NotNil(t, item.UpdateMessage.SubscribeUpdate) + assert.Equal(t, slot, item.UpdateMessage.SubscribeUpdate.GetSlot().Slot) assert.NotNil(t, item.CreatedAt) assert.NotNil(t, item.UpdatedAt) diff --git a/solana/indexer/damm_v2/indexer_test.go b/solana/indexer/damm_v2/indexer_test.go index 94870364..97802a05 100644 --- a/solana/indexer/damm_v2/indexer_test.go +++ b/solana/indexer/damm_v2/indexer_test.go @@ -335,15 +335,15 @@ func TestSubscription(t *testing.T) { require.NoError(t, err) var exists bool - sql = `SELECT EXISTS (SELECT 1 FROM sol_retry_queue WHERE update = @update)` + sql = `SELECT EXISTS (SELECT 1 FROM sol_retry_queue WHERE update_message = @update_message)` err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ - "update": positionUpdateJson, + "update_message": positionUpdateJson, }).Scan(&exists) require.NoError(t, err) assert.True(t, exists, "failed position update should be added to retry queue") err = pool.QueryRow(t.Context(), sql, pgx.NamedArgs{ - "update": poolUpdateJson, + "update_message": poolUpdateJson, }).Scan(&exists) require.NoError(t, err) assert.True(t, exists, "failed pool update should be added to retry queue") diff --git a/solana/indexer/solana_indexer.go b/solana/indexer/solana_indexer.go index 1ccb4f6a..7defba0e 100644 --- a/solana/indexer/solana_indexer.go +++ b/solana/indexer/solana_indexer.go @@ -168,7 +168,7 @@ func (s *SolanaIndexer) ProcessRetryQueue(ctx context.Context) error { for _, item := range queue { switch item.Indexer { case "token": - err := s.tokenIndexer.HandleUpdate(ctx, item.Update.SubscribeUpdate) + err := s.tokenIndexer.HandleUpdate(ctx, item.UpdateMessage.SubscribeUpdate) if err != nil { logger.Error("failed to retry token_indexer", zap.Error(err)) offset++ @@ -179,7 +179,7 @@ func (s *SolanaIndexer) ProcessRetryQueue(ctx context.Context) error { } } case "dammv2": - err := s.dammV2Indexer.HandleUpdate(ctx, item.Update.SubscribeUpdate) + err := s.dammV2Indexer.HandleUpdate(ctx, item.UpdateMessage.SubscribeUpdate) if err != nil { logger.Error("failed to retry damm_v2_indexer", zap.Error(err)) offset++ From 0b22e55a2eb3961e8df3b7609b633f6b65ea4b31 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 15:57:02 -0700 Subject: [PATCH 32/56] update test schema --- sql/01_schema.sql | 44 +++++++------------------------------------- 1 file changed, 7 insertions(+), 37 deletions(-) diff --git a/sql/01_schema.sql b/sql/01_schema.sql index 6af28d94..7b3413be 100644 --- a/sql/01_schema.sql +++ b/sql/01_schema.sql @@ -7412,8 +7412,9 @@ CREATE TABLE public.sol_meteora_damm_v2_pools ( token_b_flag smallint NOT NULL, collect_fee_mode smallint NOT NULL, pool_type smallint NOT NULL, - fee_a_per_liquidity bigint NOT NULL, - fee_b_per_liquidity bigint NOT NULL, + version smallint NOT NULL, + fee_a_per_liquidity numeric NOT NULL, + fee_b_per_liquidity numeric NOT NULL, permanent_lock_liquidity numeric NOT NULL, creator text NOT NULL, created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, @@ -7526,6 +7527,7 @@ CREATE TABLE public.sol_meteora_dbc_pools ( base_reserve bigint NOT NULL, quote_reserve bigint NOT NULL, protocol_base_fee bigint NOT NULL, + protocol_quote_fee bigint NOT NULL, partner_base_fee bigint NOT NULL, partner_quote_fee bigint NOT NULL, sqrt_price numeric NOT NULL, @@ -7617,7 +7619,7 @@ COMMENT ON COLUMN public.sol_purchases.is_valid IS 'A purchase is valid if it me CREATE TABLE public.sol_retry_queue ( id uuid DEFAULT gen_random_uuid() NOT NULL, indexer text NOT NULL, - update jsonb NOT NULL, + update_message jsonb NOT NULL, error text NOT NULL, created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL, updated_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP NOT NULL @@ -7639,10 +7641,10 @@ COMMENT ON COLUMN public.sol_retry_queue.indexer IS 'The name of the indexer tha -- --- Name: COLUMN sol_retry_queue.update; Type: COMMENT; Schema: public; Owner: - +-- Name: COLUMN sol_retry_queue.update_message; Type: COMMENT; Schema: public; Owner: - -- -COMMENT ON COLUMN public.sol_retry_queue.update IS 'The JSONB update data that failed to process.'; +COMMENT ON COLUMN public.sol_retry_queue.update_message IS 'The JSONB update data that failed to process.'; -- @@ -11398,38 +11400,6 @@ ALTER TABLE ONLY public.saves ADD CONSTRAINT saves_blocknumber_fkey FOREIGN KEY (blocknumber) REFERENCES public.blocks(number) ON DELETE CASCADE; --- --- Name: sol_meteora_damm_v2_pool_base_fees sol_meteora_damm_v2_pool_base_fees_pool_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - --- - -ALTER TABLE ONLY public.sol_meteora_damm_v2_pool_base_fees - ADD CONSTRAINT sol_meteora_damm_v2_pool_base_fees_pool_fkey FOREIGN KEY (pool) REFERENCES public.sol_meteora_damm_v2_pools(account) ON DELETE CASCADE; - - --- --- Name: sol_meteora_damm_v2_pool_dynamic_fees sol_meteora_damm_v2_pool_dynamic_fees_pool_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - --- - -ALTER TABLE ONLY public.sol_meteora_damm_v2_pool_dynamic_fees - ADD CONSTRAINT sol_meteora_damm_v2_pool_dynamic_fees_pool_fkey FOREIGN KEY (pool) REFERENCES public.sol_meteora_damm_v2_pools(account) ON DELETE CASCADE; - - --- --- Name: sol_meteora_damm_v2_pool_fees sol_meteora_damm_v2_pool_fees_pool_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - --- - -ALTER TABLE ONLY public.sol_meteora_damm_v2_pool_fees - ADD CONSTRAINT sol_meteora_damm_v2_pool_fees_pool_fkey FOREIGN KEY (pool) REFERENCES public.sol_meteora_damm_v2_pools(account) ON DELETE CASCADE; - - --- --- Name: sol_meteora_damm_v2_pool_metrics sol_meteora_damm_v2_pool_metrics_pool_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - --- - -ALTER TABLE ONLY public.sol_meteora_damm_v2_pool_metrics - ADD CONSTRAINT sol_meteora_damm_v2_pool_metrics_pool_fkey FOREIGN KEY (pool) REFERENCES public.sol_meteora_damm_v2_pools(account) ON DELETE CASCADE; - - -- -- Name: subscriptions subscriptions_blocknumber_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - -- From 8c849a200522fea71a56854e4c92f15072fbaf5b Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 15:58:11 -0700 Subject: [PATCH 33/56] make transactions for migrations --- ddl/migrations/0169_damm_and_positions.sql | 6 +++++- ddl/migrations/0170_sol_retry_queue.sql | 4 +++- ddl/migrations/0171_artist_coins_pools.sql | 7 ++++++- ddl/migrations/0172_recipient_eth_address_rewards.sql | 2 ++ 4 files changed, 16 insertions(+), 3 deletions(-) diff --git a/ddl/migrations/0169_damm_and_positions.sql b/ddl/migrations/0169_damm_and_positions.sql index 69989f8f..7345cfdf 100644 --- a/ddl/migrations/0169_damm_and_positions.sql +++ b/ddl/migrations/0169_damm_and_positions.sql @@ -1,3 +1,5 @@ +BEGIN; + CREATE TABLE IF NOT EXISTS sol_meteora_dbc_migrations ( signature TEXT NOT NULL, instruction_index INT NOT NULL, @@ -142,4 +144,6 @@ CREATE TABLE IF NOT EXISTS sol_meteora_damm_v2_position_metrics ( created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ); -COMMENT ON TABLE sol_meteora_damm_v2_position_metrics IS 'Tracks aggregated metrics for DAMM V2 positions. A slice of the DAMM V2 position state.'; \ No newline at end of file +COMMENT ON TABLE sol_meteora_damm_v2_position_metrics IS 'Tracks aggregated metrics for DAMM V2 positions. A slice of the DAMM V2 position state.'; + +COMMIT; \ No newline at end of file diff --git a/ddl/migrations/0170_sol_retry_queue.sql b/ddl/migrations/0170_sol_retry_queue.sql index a4f6e42c..27979fb9 100644 --- a/ddl/migrations/0170_sol_retry_queue.sql +++ b/ddl/migrations/0170_sol_retry_queue.sql @@ -1,3 +1,4 @@ +BEGIN; CREATE TABLE IF NOT EXISTS sol_retry_queue ( id UUID PRIMARY KEY DEFAULT gen_random_uuid(), indexer TEXT NOT NULL, @@ -16,4 +17,5 @@ COMMENT ON COLUMN sol_retry_queue.updated_at IS 'The timestamp when the retry en ALTER TABLE sol_slot_checkpoints ADD COLUMN IF NOT EXISTS name TEXT; COMMENT ON COLUMN sol_slot_checkpoints.name IS 'The name of the indexer this checkpoint is for (e.g., token_indexer, damm_v2_indexer).'; -DROP TABLE IF EXISTS sol_unprocessed_txs; \ No newline at end of file +DROP TABLE IF EXISTS sol_unprocessed_txs; +COMMIT; \ No newline at end of file diff --git a/ddl/migrations/0171_artist_coins_pools.sql b/ddl/migrations/0171_artist_coins_pools.sql index 8b6be161..c0920713 100644 --- a/ddl/migrations/0171_artist_coins_pools.sql +++ b/ddl/migrations/0171_artist_coins_pools.sql @@ -1,3 +1,6 @@ + +BEGIN; + ALTER TABLE IF EXISTS artist_coins ADD COLUMN IF NOT EXISTS dbc_pool TEXT, ADD COLUMN IF NOT EXISTS damm_v2_pool TEXT; @@ -33,4 +36,6 @@ CREATE TABLE IF NOT EXISTS sol_meteora_dbc_pools ( creator_quote_fee BIGINT NOT NULL, created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP -) \ No newline at end of file +) + +COMMIT: \ No newline at end of file diff --git a/ddl/migrations/0172_recipient_eth_address_rewards.sql b/ddl/migrations/0172_recipient_eth_address_rewards.sql index c4cb09d5..5c64590d 100644 --- a/ddl/migrations/0172_recipient_eth_address_rewards.sql +++ b/ddl/migrations/0172_recipient_eth_address_rewards.sql @@ -1,4 +1,5 @@ BEGIN; + ALTER TABLE sol_reward_disbursements ADD COLUMN IF NOT EXISTS recipient_eth_address TEXT DEFAULT ''; COMMENT ON COLUMN sol_reward_disbursements.recipient_eth_address IS 'The Ethereum address of the recipient of the reward.'; @@ -13,4 +14,5 @@ WHERE sol_claimable_accounts.account = sol_reward_disbursements.user_bank ALTER TABLE sol_reward_disbursements ALTER COLUMN recipient_eth_address DROP DEFAULT; + COMMIT; \ No newline at end of file From 239eba2c8bde4caf9d6046ff41ef3fbc1e5b21a0 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 16:01:17 -0700 Subject: [PATCH 34/56] comments --- solana/indexer/common/checkpoints.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/solana/indexer/common/checkpoints.go b/solana/indexer/common/checkpoints.go index 4eb85840..ae94be94 100644 --- a/solana/indexer/common/checkpoints.go +++ b/solana/indexer/common/checkpoints.go @@ -15,7 +15,9 @@ import ( ) const ( + // Laserstream has a 3000 slot max for historical data. Using 2500 to be extra safe. MAX_SLOT_GAP = 2500 + // Start 100 slots back when indexing from a fresh subscription, in case the indexer takes a bit to start up. SLOT_PADDING = 100 ) From eec077da9cde68ed2308e7217803977ad68e417f Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 16:01:51 -0700 Subject: [PATCH 35/56] clearer args --- solana/indexer/common/retry_queue.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solana/indexer/common/retry_queue.go b/solana/indexer/common/retry_queue.go index 55a58ff7..dd7785e3 100644 --- a/solana/indexer/common/retry_queue.go +++ b/solana/indexer/common/retry_queue.go @@ -46,7 +46,7 @@ func (r *RetryQueueUpdate) UnmarshalJSON(data []byte) error { return protojson.Unmarshal(data, r.SubscribeUpdate) } -func GetRetryQueue(ctx context.Context, db database.DBTX, limit, offset int) ([]RetryQueueItem, error) { +func GetRetryQueue(ctx context.Context, db database.DBTX, limit int, offset int) ([]RetryQueueItem, error) { sql := `SELECT id, indexer, update_message, error, created_at, updated_at FROM sol_retry_queue ORDER BY created_at ASC From f3f357a32c8bb17f867cd72ede4916fc094546cf Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 16:03:37 -0700 Subject: [PATCH 36/56] remove sleeps --- solana/indexer/damm_v2/indexer_test.go | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/solana/indexer/damm_v2/indexer_test.go b/solana/indexer/damm_v2/indexer_test.go index 97802a05..d99988a5 100644 --- a/solana/indexer/damm_v2/indexer_test.go +++ b/solana/indexer/damm_v2/indexer_test.go @@ -274,11 +274,7 @@ func TestSubscription(t *testing.T) { defer cancel() go indexer.Start(ctx) - for { - if grpcMock.onUpdate != nil { - break - } - time.Sleep(time.Millisecond * 10) + for grpcMock.onUpdate == nil { } // Assert the original subscription included the actual account @@ -306,11 +302,7 @@ func TestSubscription(t *testing.T) { }) require.NoError(t, err) - for { - if grpcMock.onUpdate != nil { - break - } - time.Sleep(time.Millisecond * 10) + for grpcMock.onUpdate == nil { } cancel() From 7cd1ff921c15e124b762f8b0d35fd89c5a43c6f9 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 16:06:50 -0700 Subject: [PATCH 37/56] collect instead of scan loops --- solana/indexer/damm_v2/indexer.go | 11 +---------- solana/indexer/dbc/indexer.go | 11 +---------- 2 files changed, 2 insertions(+), 20 deletions(-) diff --git a/solana/indexer/damm_v2/indexer.go b/solana/indexer/damm_v2/indexer.go index 4343fd04..ed8f6663 100644 --- a/solana/indexer/damm_v2/indexer.go +++ b/solana/indexer/damm_v2/indexer.go @@ -344,15 +344,6 @@ func getSubscribedDammV2Pools(ctx context.Context, db database.DBTX, limit int, if err != nil { return nil, err } - defer rows.Close() - var pools []string - for rows.Next() { - var address string - if err := rows.Scan(&address); err != nil { - return nil, err - } - pools = append(pools, address) - } - return pools, nil + return pgx.CollectRows(rows, pgx.RowTo[string]) } diff --git a/solana/indexer/dbc/indexer.go b/solana/indexer/dbc/indexer.go index 75a5d2ee..67ae4d9e 100644 --- a/solana/indexer/dbc/indexer.go +++ b/solana/indexer/dbc/indexer.go @@ -291,15 +291,6 @@ func getSubscribedDbcPools(ctx context.Context, db database.DBTX, limit int, off if err != nil { return nil, err } - defer rows.Close() - var pools []string - for rows.Next() { - var address string - if err := rows.Scan(&address); err != nil { - return nil, err - } - pools = append(pools, address) - } - return pools, nil + return pgx.CollectRows(rows, pgx.RowTo[string]) } From eb3e0c3b8299a87f5c37a49bfd32abe5ab58052c Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 16:12:00 -0700 Subject: [PATCH 38/56] clarity for retry queue --- solana/indexer/common/retry_queue.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/solana/indexer/common/retry_queue.go b/solana/indexer/common/retry_queue.go index dd7785e3..225a67a3 100644 --- a/solana/indexer/common/retry_queue.go +++ b/solana/indexer/common/retry_queue.go @@ -13,7 +13,7 @@ import ( pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" ) -type RetryQueueItem struct { +type RetryQueueRow struct { ID string Indexer string UpdateMessage RetryQueueUpdate @@ -22,10 +22,12 @@ type RetryQueueItem struct { UpdatedAt time.Time } +// Wrapper to handle protobuf JSON marshalling/unmarshalling type RetryQueueUpdate struct { *pb.SubscribeUpdate } +// Ensures the RetryQueueUpdate struct implements the json.Marshaler and json.Unmarshaler interfaces var ( _ json.Marshaler = (*RetryQueueUpdate)(nil) _ json.Unmarshaler = (*RetryQueueUpdate)(nil) @@ -46,7 +48,7 @@ func (r *RetryQueueUpdate) UnmarshalJSON(data []byte) error { return protojson.Unmarshal(data, r.SubscribeUpdate) } -func GetRetryQueue(ctx context.Context, db database.DBTX, limit int, offset int) ([]RetryQueueItem, error) { +func GetRetryQueue(ctx context.Context, db database.DBTX, limit int, offset int) ([]RetryQueueRow, error) { sql := `SELECT id, indexer, update_message, error, created_at, updated_at FROM sol_retry_queue ORDER BY created_at ASC @@ -64,7 +66,7 @@ func GetRetryQueue(ctx context.Context, db database.DBTX, limit int, offset int) return nil, fmt.Errorf("failed to query retry queue: %w", err) } - items, err := pgx.CollectRows(rows, pgx.RowToStructByName[RetryQueueItem]) + items, err := pgx.CollectRows(rows, pgx.RowToStructByName[RetryQueueRow]) if err != nil { return nil, fmt.Errorf("failed to collect retry queue items: %w", err) } From 65107135b474b6186d7ec98c1cd541918cfb3a6a Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 16:24:34 -0700 Subject: [PATCH 39/56] use discriminator instead of size to check position account --- solana/indexer/damm_v2/indexer.go | 16 +++++++++++----- solana/spl/programs/meteora_damm_v2/constants.go | 10 ++++++++++ 2 files changed, 21 insertions(+), 5 deletions(-) create mode 100644 solana/spl/programs/meteora_damm_v2/constants.go diff --git a/solana/indexer/damm_v2/indexer.go b/solana/indexer/damm_v2/indexer.go index ed8f6663..27a816d4 100644 --- a/solana/indexer/damm_v2/indexer.go +++ b/solana/indexer/damm_v2/indexer.go @@ -220,16 +220,22 @@ func (d *Indexer) makeSubscriptionRequest(ctx context.Context, dammV2Pools []str { Filter: &pb.SubscribeRequestFilterAccountsFilter_Memcmp{ Memcmp: &pb.SubscribeRequestFilterAccountsFilterMemcmp{ - Offset: 8, // Offset of the pool field in the position account (after discriminator) - Data: &pb.SubscribeRequestFilterAccountsFilterMemcmp_Base58{ - Base58: pool, + Offset: 0, + Data: &pb.SubscribeRequestFilterAccountsFilterMemcmp_Bytes{ + Bytes: meteora_damm_v2.POSITION_DISCRIMINATOR, }, }, }, }, { - Filter: &pb.SubscribeRequestFilterAccountsFilter_Datasize{ - Datasize: 408, // byte size of a Position account + Filter: &pb.SubscribeRequestFilterAccountsFilter_Memcmp{ + Memcmp: &pb.SubscribeRequestFilterAccountsFilterMemcmp{ + // Pool address is the next field after the discriminator (8 bytes) + Offset: uint64(len(meteora_damm_v2.POSITION_DISCRIMINATOR)), + Data: &pb.SubscribeRequestFilterAccountsFilterMemcmp_Base58{ + Base58: pool, + }, + }, }, }, }, diff --git a/solana/spl/programs/meteora_damm_v2/constants.go b/solana/spl/programs/meteora_damm_v2/constants.go new file mode 100644 index 00000000..f6ca095c --- /dev/null +++ b/solana/spl/programs/meteora_damm_v2/constants.go @@ -0,0 +1,10 @@ +package meteora_damm_v2 + +var ( + // Discriminator for Meteora DAMM V2 pool accounts + // See: https://github.com/MeteoraAg/damm-v2-go/blob/3cc12838bce93a9cd546b22a1caaabaa81ce81f7/instructions/state.go#L31 + POOL_DISCRIMINATOR = []byte{241, 154, 109, 4, 17, 177, 109, 188} + // Discriminator for Meteora DAMM V2 position accounts + // See: https://github.com/MeteoraAg/damm-v2-go/blob/3cc12838bce93a9cd546b22a1caaabaa81ce81f7/instructions/state.go#L128 + POSITION_DISCRIMINATOR = []byte{170, 188, 143, 228, 122, 64, 247, 208} +) From 8a9814c128723f2484f9c70693fce2ae62fe6300 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 16:58:10 -0700 Subject: [PATCH 40/56] fix typos --- ddl/migrations/0171_artist_coins_pools.sql | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ddl/migrations/0171_artist_coins_pools.sql b/ddl/migrations/0171_artist_coins_pools.sql index c0920713..ba5536c8 100644 --- a/ddl/migrations/0171_artist_coins_pools.sql +++ b/ddl/migrations/0171_artist_coins_pools.sql @@ -36,6 +36,6 @@ CREATE TABLE IF NOT EXISTS sol_meteora_dbc_pools ( creator_quote_fee BIGINT NOT NULL, created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP -) +); -COMMIT: \ No newline at end of file +COMMIT; \ No newline at end of file From 592f96da88ad3acbebb932a11f9485b3c217d491 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 17:00:30 -0700 Subject: [PATCH 41/56] fix test --- solana/indexer/damm_v2/indexer_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/solana/indexer/damm_v2/indexer_test.go b/solana/indexer/damm_v2/indexer_test.go index d99988a5..cfbd7310 100644 --- a/solana/indexer/damm_v2/indexer_test.go +++ b/solana/indexer/damm_v2/indexer_test.go @@ -282,7 +282,7 @@ func TestSubscription(t *testing.T) { hasDammFilter := len(req.Accounts[NAME].Account) == 1 && req.Accounts[NAME].Account[0] == dammPoolAddress.String() hasPositionFilter := req.Accounts[dammPoolAddress.String()]. - Filters[0]. + Filters[1]. Filter.(*pb.SubscribeRequestFilterAccountsFilter_Memcmp). Memcmp. Data.(*pb.SubscribeRequestFilterAccountsFilterMemcmp_Base58). @@ -312,7 +312,7 @@ func TestSubscription(t *testing.T) { hasDammFilter := len(req.Accounts[NAME].Account) == 1 && req.Accounts[NAME].Account[0] == dammPoolAddress2.String() hasPositionFilter := req.Accounts[dammPoolAddress2.String()]. - Filters[0]. + Filters[1]. Filter.(*pb.SubscribeRequestFilterAccountsFilter_Memcmp). Memcmp. Data.(*pb.SubscribeRequestFilterAccountsFilterMemcmp_Base58). From 7431b04f3e24c9cd94dfccd1577ed958f2963ae9 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 19:05:36 -0700 Subject: [PATCH 42/56] Use pgxlisten --- ddl/functions/handle_artist_coins.sql | 6 +-- solana/indexer/damm_v2/indexer.go | 63 ++++++++++++++++++------ solana/indexer/dbc/indexer.go | 70 +++++++++++++++++++++------ solana/indexer/program/indexer.go | 2 +- solana/indexer/token/indexer.go | 63 ++++++++++++++++++------ 5 files changed, 156 insertions(+), 48 deletions(-) diff --git a/ddl/functions/handle_artist_coins.sql b/ddl/functions/handle_artist_coins.sql index 5a972a2c..f33bc488 100644 --- a/ddl/functions/handle_artist_coins.sql +++ b/ddl/functions/handle_artist_coins.sql @@ -5,21 +5,21 @@ BEGIN OR (OLD.mint IS NOT NULL AND NEW.mint IS NULL) OR OLD.mint != NEW.mint THEN - PERFORM pg_notify('artist_coins_mint_changed', NEW.mint); + PERFORM pg_notify('artist_coins_mint_changed', JSON_BUILD_OBJECT('new', NEW.mint, 'old', OLD.mint)::TEXT); END IF; IF (OLD.dbc_pool IS NULL AND NEW.dbc_pool IS NOT NULL) OR (OLD.dbc_pool IS NOT NULL AND NEW.dbc_pool IS NULL) OR OLD.dbc_pool != NEW.dbc_pool THEN - PERFORM pg_notify('artist_coins_dbc_pool_changed', NEW.dbc_pool); + PERFORM pg_notify('artist_coins_dbc_pool_changed', JSON_BUILD_OBJECT('new', NEW.dbc_pool, 'old', OLD.dbc_pool)::TEXT); END IF; IF (OLD.damm_v2_pool IS NULL AND NEW.damm_v2_pool IS NOT NULL) OR (OLD.damm_v2_pool IS NOT NULL AND NEW.damm_v2_pool IS NULL) OR OLD.damm_v2_pool != NEW.damm_v2_pool THEN - PERFORM pg_notify('artist_coins_damm_v2_pool_changed', NEW.damm_v2_pool); + PERFORM pg_notify('artist_coins_damm_v2_pool_changed', JSON_BUILD_OBJECT('new', NEW.damm_v2_pool, 'old', OLD.damm_v2_pool)::TEXT); END IF; RETURN NEW; diff --git a/solana/indexer/damm_v2/indexer.go b/solana/indexer/damm_v2/indexer.go index 27a816d4..92ba7d16 100644 --- a/solana/indexer/damm_v2/indexer.go +++ b/solana/indexer/damm_v2/indexer.go @@ -2,7 +2,10 @@ package damm_v2 import ( "context" + "encoding/json" + "errors" "fmt" + "time" "api.audius.co/database" "api.audius.co/solana/indexer/common" @@ -11,6 +14,7 @@ import ( "github.com/gagliardetto/solana-go" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgxlisten" pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" "go.uber.org/zap" ) @@ -58,9 +62,26 @@ func (d *Indexer) Start(ctx context.Context) { })() // On notification, cancel the previous subscription task (if any) and start a new one - handleNotif := func(ctx context.Context, notification *pgconn.Notification) { + handleNotif := func(ctx context.Context, notification *pgconn.Notification, conn *pgx.Conn) error { subCtx, cancel := context.WithCancel(ctx) + type notificationPayload struct { + New string + Old string + } + var n notificationPayload + err := json.Unmarshal([]byte(notification.Payload), &n) + if err != nil { + d.logger.Error("failed to unmarshal notification payload", zap.String("payload", notification.Payload), zap.Error(err)) + // Proceed with resubscription even if unmarshalling fails + } else { + d.logger.Info("resubscribing due to mint change", + zap.String("notification", notification.Channel), + zap.String("new", n.New), + zap.String("old", n.Old), + ) + } + // Cancel previous subscription task if lastCancel != nil { lastCancel() @@ -76,12 +97,12 @@ func (d *Indexer) Start(ctx context.Context) { clients, err := d.subscribe(subCtx) grpcClients = clients if err != nil { - d.logger.Error("failed to resubscribe to DAMM V2 pools", zap.Error(err)) cancel() - return + return fmt.Errorf("failed to resubscribe to DAMM V2 pools: %w", err) } lastCancel = cancel + return nil } // Setup initial subscription @@ -92,22 +113,36 @@ func (d *Indexer) Start(ctx context.Context) { } grpcClients = clients - // Watch for new pools to be added - err = common.WatchPgNotification(ctx, d.pool, NOTIFICATION_NAME, handleNotif, d.logger) + // Acquire the connection to be used by pgxlisten + conn, err := d.pool.Acquire(ctx) if err != nil { - d.logger.Error("failed to watch for DAMM V2 pool changes", zap.Error(err)) + d.logger.Error("failed to acquire database connection", zap.Error(err)) return } + defer conn.Release() + + // Setup a listener for pg_notify notifications + listener := pgxlisten.Listener{ + Connect: func(ctx context.Context) (*pgx.Conn, error) { + return conn.Conn(), nil + }, + LogError: func(ctx context.Context, err error) { + if !errors.Is(err, context.Canceled) { + d.logger.Error("error occured in pg_notify subscription", zap.Error(err)) + } + }, + ReconnectDelay: 1 * time.Second, + } + listener.Handle(NOTIFICATION_NAME, pgxlisten.HandlerFunc(handleNotif)) - // Wait for shutdown - for { - select { - case <-ctx.Done(): - d.logger.Info("received shutdown signal, stopping DAMM V2 indexer") - return - default: - } + // Start listening for notifications + // this will block until the context is cancelled + err = listener.Listen(ctx) + if err != nil && !errors.Is(err, context.Canceled) { + d.logger.Error("failed to start pgxlisten listener", zap.Error(err)) } + + d.logger.Info("shutting down") } // Handles a single update message from the gRPC subscription diff --git a/solana/indexer/dbc/indexer.go b/solana/indexer/dbc/indexer.go index 67ae4d9e..97d3b19e 100644 --- a/solana/indexer/dbc/indexer.go +++ b/solana/indexer/dbc/indexer.go @@ -2,17 +2,20 @@ package dbc import ( "context" + "encoding/json" + "errors" "fmt" + "time" "api.audius.co/database" "api.audius.co/solana/indexer/common" - "api.audius.co/solana/spl/programs/meteora_damm_v2" "api.audius.co/solana/spl/programs/meteora_dbc" bin "github.com/gagliardetto/binary" "github.com/gagliardetto/solana-go" "github.com/gagliardetto/solana-go/rpc" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgxlisten" "github.com/maypok86/otter" pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" "go.uber.org/zap" @@ -62,9 +65,26 @@ func (d *Indexer) Start(ctx context.Context) { })() // On notification, cancel the previous subscription task (if any) and start a new one - handleNotif := func(ctx context.Context, notification *pgconn.Notification) { + handleNotif := func(ctx context.Context, notification *pgconn.Notification, conn *pgx.Conn) error { subCtx, cancel := context.WithCancel(ctx) + type notificationPayload struct { + New string + Old string + } + var n notificationPayload + err := json.Unmarshal([]byte(notification.Payload), &n) + if err != nil { + d.logger.Error("failed to unmarshal notification payload", zap.String("payload", notification.Payload), zap.Error(err)) + // Proceed with resubscription even if unmarshalling fails + } else { + d.logger.Info("resubscribing due to dbc_pool change", + zap.String("notification", notification.Channel), + zap.String("new", n.New), + zap.String("old", n.Old), + ) + } + // Cancel previous subscription task if lastCancel != nil { lastCancel() @@ -80,38 +100,52 @@ func (d *Indexer) Start(ctx context.Context) { clients, err := d.subscribe(subCtx) grpcClients = clients if err != nil { - d.logger.Error("failed to resubscribe to DBC pools", zap.Error(err)) cancel() - return + return fmt.Errorf("failed to resubscribe to DBC pools: %w", err) } lastCancel = cancel + return nil } // Setup initial subscription clients, err := d.subscribe(ctx) if err != nil { - d.logger.Error("failed to subscribe to DAMM V2 pools", zap.Error(err)) + d.logger.Error("failed to subscribe to DBC pools", zap.Error(err)) return } grpcClients = clients - // Watch for new pools to be added - err = common.WatchPgNotification(ctx, d.pool, NOTIFICATION_NAME, handleNotif, d.logger) + // Acquire the connection to be used by pgxlisten + conn, err := d.pool.Acquire(ctx) if err != nil { - d.logger.Error("failed to watch for DBC pool changes", zap.Error(err)) + d.logger.Error("failed to acquire database connection", zap.Error(err)) return } + defer conn.Release() + + // Setup a listener for pg_notify notifications + listener := pgxlisten.Listener{ + Connect: func(ctx context.Context) (*pgx.Conn, error) { + return conn.Conn(), nil + }, + LogError: func(ctx context.Context, err error) { + if !errors.Is(err, context.Canceled) { + d.logger.Error("error occured in pg_notify subscription", zap.Error(err)) + } + }, + ReconnectDelay: 1 * time.Second, + } + listener.Handle(NOTIFICATION_NAME, pgxlisten.HandlerFunc(handleNotif)) - // Wait for shutdown - for { - select { - case <-ctx.Done(): - d.logger.Info("received shutdown signal, stopping indexer") - return - default: - } + // Start listening for notifications + // this will block until the context is cancelled + err = listener.Listen(ctx) + if err != nil && !errors.Is(err, context.Canceled) { + d.logger.Error("failed to start pgxlisten listener", zap.Error(err)) } + + d.logger.Info("shutting down") } func (d *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) error { @@ -144,6 +178,10 @@ func (d *Indexer) HandleUpdate(ctx context.Context, msg *pb.SubscribeUpdate) err if err != nil { return fmt.Errorf("failed to upsert DBC pool: %w", err) } + d.logger.Debug("upserted DBC pool", + zap.String("account", account.String()), + zap.String("mint", pool.BaseMint.String()), + ) // If the pool is migrated, check for the migration transaction and process it if pool.IsMigrated == uint8(1) { diff --git a/solana/indexer/program/indexer.go b/solana/indexer/program/indexer.go index 3b130aa6..e85c9bcf 100644 --- a/solana/indexer/program/indexer.go +++ b/solana/indexer/program/indexer.go @@ -58,7 +58,7 @@ func (d *Indexer) Start(ctx context.Context) { for { select { case <-ctx.Done(): - d.logger.Info("received shutdown signal, stopping indexer") + d.logger.Info("shutting down") return default: } diff --git a/solana/indexer/token/indexer.go b/solana/indexer/token/indexer.go index 0c19e604..b897e547 100644 --- a/solana/indexer/token/indexer.go +++ b/solana/indexer/token/indexer.go @@ -2,7 +2,10 @@ package token import ( "context" + "encoding/json" + "errors" "fmt" + "time" "api.audius.co/database" "api.audius.co/solana/indexer/common" @@ -10,6 +13,7 @@ import ( "github.com/gagliardetto/solana-go/rpc" "github.com/jackc/pgx/v5" "github.com/jackc/pgx/v5/pgconn" + "github.com/jackc/pgxlisten" "github.com/maypok86/otter" pb "github.com/rpcpool/yellowstone-grpc/examples/golang/proto" "go.uber.org/zap" @@ -93,9 +97,26 @@ func (d *Indexer) Start(ctx context.Context) { } // On notification, cancel the previous subscription task (if any) and start a new one - handleNotif := func(ctx context.Context, notification *pgconn.Notification) { + handleNotif := func(ctx context.Context, notification *pgconn.Notification, conn *pgx.Conn) error { subCtx, cancel := context.WithCancel(ctx) + type notificationPayload struct { + New string + Old string + } + var n notificationPayload + err := json.Unmarshal([]byte(notification.Payload), &n) + if err != nil { + d.logger.Error("failed to unmarshal notification payload", zap.String("payload", notification.Payload), zap.Error(err)) + // Proceed with resubscription even if unmarshalling fails + } else { + d.logger.Info("resubscribing due to mint change", + zap.String("notification", notification.Channel), + zap.String("new", n.New), + zap.String("old", n.Old), + ) + } + // Cancel previous subscription task if lastCancel != nil { lastCancel() @@ -111,12 +132,12 @@ func (d *Indexer) Start(ctx context.Context) { clients, err := d.subscribeToArtistCoins(subCtx, handleUpdate) grpcClients = clients if err != nil { - d.logger.Error("failed to resubscribe to artist coins", zap.Error(err)) cancel() - return + return fmt.Errorf("failed to resubscribe to artist coins: %w", err) } lastCancel = cancel + return nil } // Initial subscription to all artist coins @@ -127,22 +148,36 @@ func (d *Indexer) Start(ctx context.Context) { } grpcClients = clients - // Watch for new coins to be added - err = common.WatchPgNotification(ctx, d.pool, NOTIFICATION_NAME, handleNotif, d.logger) + // Acquire the connection to be used by pgxlisten + conn, err := d.pool.Acquire(ctx) if err != nil { - d.logger.Error("failed to watch for artist coin changes", zap.Error(err)) + d.logger.Error("failed to acquire database connection", zap.Error(err)) return } + defer conn.Release() + + // Setup a listener for pg_notify notifications + listener := pgxlisten.Listener{ + Connect: func(ctx context.Context) (*pgx.Conn, error) { + return conn.Conn(), nil + }, + LogError: func(ctx context.Context, err error) { + if !errors.Is(err, context.Canceled) { + d.logger.Error("error occured in pg_notify subscription", zap.Error(err)) + } + }, + ReconnectDelay: 1 * time.Second, + } + listener.Handle(NOTIFICATION_NAME, pgxlisten.HandlerFunc(handleNotif)) - // Wait for shutdown - for { - select { - case <-ctx.Done(): - d.logger.Info("received shutdown signal, stopping indexer") - return - default: - } + // Start listening for notifications + // this will block until the context is cancelled + err = listener.Listen(ctx) + if err != nil && !errors.Is(err, context.Canceled) { + d.logger.Error("failed to start pgxlisten listener", zap.Error(err)) } + + d.logger.Info("shutting down") } // Handles a single update message from the gRPC subscription From 25ceaa9e7da60d3c3cf4a2aecabbffe06e61bc17 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 19:06:09 -0700 Subject: [PATCH 43/56] Use NAME constant for logger --- solana/indexer/damm_v2/indexer.go | 4 ++-- solana/indexer/dbc/indexer.go | 4 ++-- solana/indexer/program/indexer.go | 4 ++-- solana/indexer/token/indexer.go | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/solana/indexer/damm_v2/indexer.go b/solana/indexer/damm_v2/indexer.go index 92ba7d16..64223781 100644 --- a/solana/indexer/damm_v2/indexer.go +++ b/solana/indexer/damm_v2/indexer.go @@ -20,7 +20,7 @@ import ( ) const ( - NAME = "damm_v2" + NAME = "DammV2Indexer" MAX_POOLS_PER_SUBSCRIPTION = 10000 // Arbitrary NOTIFICATION_NAME = "artist_coins_damm_v2_pool_changed" ) @@ -44,7 +44,7 @@ func New( grpcConfig: config, grpcFactory: common.NewGrpcClient, rpcClient: rpcClient, - logger: logger.Named("DammV2Indexer"), + logger: logger.Named(NAME), } } diff --git a/solana/indexer/dbc/indexer.go b/solana/indexer/dbc/indexer.go index 97d3b19e..557507f1 100644 --- a/solana/indexer/dbc/indexer.go +++ b/solana/indexer/dbc/indexer.go @@ -22,7 +22,7 @@ import ( ) const ( - NAME = "dbc" + NAME = "DbcIndexer" MAX_POOLS_PER_SUBSCRIPTION = 10000 // Arbitrary NOTIFICATION_NAME = "artist_coins_dbc_pool_changed" ) @@ -47,7 +47,7 @@ func New( grpcConfig: grpcConfig, rpcClient: rpcClient, transactionCache: transactionCache, - logger: logger.Named("DBCIndexer"), + logger: logger.Named(NAME), } } diff --git a/solana/indexer/program/indexer.go b/solana/indexer/program/indexer.go index e85c9bcf..058ac363 100644 --- a/solana/indexer/program/indexer.go +++ b/solana/indexer/program/indexer.go @@ -18,7 +18,7 @@ import ( "go.uber.org/zap" ) -const NAME = "program" +const NAME = "ProgramIndexer" type Indexer struct { pool database.DbPool @@ -43,7 +43,7 @@ func New( rpcClient: rpcClient, config: config, transactionCache: transactionCache, - logger: logger.Named("ProgramIndexer"), + logger: logger.Named(NAME), } } diff --git a/solana/indexer/token/indexer.go b/solana/indexer/token/indexer.go index b897e547..2fa31661 100644 --- a/solana/indexer/token/indexer.go +++ b/solana/indexer/token/indexer.go @@ -20,7 +20,7 @@ import ( ) const ( - NAME = "token" + NAME = "TokenIndexer" NOTIFICATION_NAME = "artist_coins_mint_changed" MAX_MINTS_PER_SUBSCRIPTION = 10000 // Arbitrary WORKER_CHANNEL_SIZE = 3000 @@ -50,7 +50,7 @@ func New( grpcConfig: config, rpcClient: rpcClient, transactionCache: transactionCache, - logger: logger.Named("TokenIndexer"), + logger: logger.Named(NAME), } } From 1fae5557de1a463be6ce02238ac00e2b0419f0d6 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 19:06:30 -0700 Subject: [PATCH 44/56] Update DBC pool using job, for backfilling prod --- jobs/coin_dbc.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/jobs/coin_dbc.go b/jobs/coin_dbc.go index c67fa205..7c0224e9 100644 --- a/jobs/coin_dbc.go +++ b/jobs/coin_dbc.go @@ -179,6 +179,11 @@ func (j *CoinDBCJob) UpdatePool(ctx context.Context, poolPubkey solana.PublicKey j.logger.Error("error inserting pool", zap.Error(err)) return fmt.Errorf("error inserting pool: %w", err) } + err = j.UpdateCoinDbcPool(ctx, pool.BaseMint.String(), poolPubkey.String()) + if err != nil { + j.logger.Error("error updating coin dbc_pool", zap.Error(err)) + return fmt.Errorf("error updating coin dbc_pool: %w", err) + } return nil } @@ -273,3 +278,15 @@ func (j *CoinDBCJob) UpsertPool( }) return err } + +func (j *CoinDBCJob) UpdateCoinDbcPool(ctx context.Context, mint string, dbcPool string) error { + _, err := j.pool.Exec(ctx, ` + UPDATE artist_coins + SET dbc_pool = @dbc_pool + WHERE mint = @mint + `, pgx.NamedArgs{ + "mint": mint, + "dbc_pool": dbcPool, + }) + return err +} From f7f54eb8780a7a5dcc0cb490e00194536890e108 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 19:06:50 -0700 Subject: [PATCH 45/56] Make test respect context timeouts --- solana/indexer/damm_v2/indexer_test.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/solana/indexer/damm_v2/indexer_test.go b/solana/indexer/damm_v2/indexer_test.go index cfbd7310..c4457310 100644 --- a/solana/indexer/damm_v2/indexer_test.go +++ b/solana/indexer/damm_v2/indexer_test.go @@ -275,6 +275,11 @@ func TestSubscription(t *testing.T) { go indexer.Start(ctx) for grpcMock.onUpdate == nil { + select { + case <-ctx.Done(): + t.Fatal("timeout waiting for subscription to be created") + default: + } } // Assert the original subscription included the actual account @@ -303,6 +308,11 @@ func TestSubscription(t *testing.T) { require.NoError(t, err) for grpcMock.onUpdate == nil { + select { + case <-ctx.Done(): + t.Fatal("timeout waiting for subscription to be created") + default: + } } cancel() From 481701f905ba581a48721b0a13af065e8930caca Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 19:07:03 -0700 Subject: [PATCH 46/56] fix bug with dbc program indexer listening to wrong program --- solana/indexer/dbc/indexer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solana/indexer/dbc/indexer.go b/solana/indexer/dbc/indexer.go index 557507f1..019342c5 100644 --- a/solana/indexer/dbc/indexer.go +++ b/solana/indexer/dbc/indexer.go @@ -266,7 +266,7 @@ func (d *Indexer) makeSubscriptionRequest(ctx context.Context, pools []string) * // Listen to all watched pools subscription.Accounts = make(map[string]*pb.SubscribeRequestFilterAccounts) accountFilter := pb.SubscribeRequestFilterAccounts{ - Owner: []string{meteora_damm_v2.ProgramID.String()}, + Owner: []string{meteora_dbc.ProgramID.String()}, Account: pools, } subscription.Accounts[NAME] = &accountFilter From 985f77b363bfac541cae54b56fba21173764f13e Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 19:08:01 -0700 Subject: [PATCH 47/56] remove pg notify helpers --- solana/indexer/common/pg_notify.go | 69 ------------------------- solana/indexer/common/pg_notify_test.go | 53 ------------------- 2 files changed, 122 deletions(-) delete mode 100644 solana/indexer/common/pg_notify.go delete mode 100644 solana/indexer/common/pg_notify_test.go diff --git a/solana/indexer/common/pg_notify.go b/solana/indexer/common/pg_notify.go deleted file mode 100644 index 62cdbe81..00000000 --- a/solana/indexer/common/pg_notify.go +++ /dev/null @@ -1,69 +0,0 @@ -package common - -import ( - "context" - "errors" - "fmt" - - "api.audius.co/database" - "github.com/jackc/pgx/v5/pgconn" - "go.uber.org/zap" -) - -type notificationCallback func(ctx context.Context, notification *pgconn.Notification) - -// Listens for a notification and fires a callback when one is received. -// The function spawns a goroutine to listen for notifications, so it returns -// immediately. The caller should ensure the context is cancelled when they want -// to stop listening and wait indefinitely to listen. -func WatchPgNotification(ctx context.Context, pool database.DbPool, notification string, callback notificationCallback, logger *zap.Logger) error { - if logger == nil { - logger = zap.NewNop() - } - - childLogger := logger.With(zap.String("notification", notification)) - - conn, err := pool.Acquire(ctx) - if err != nil { - return fmt.Errorf("failed to acquire database connection: %w", err) - } - - rawConn := conn.Conn() - _, err = rawConn.Exec(ctx, fmt.Sprintf(`LISTEN %s`, notification)) - if err != nil { - return fmt.Errorf("failed to listen for %s changes: %w", notification, err) - } - - go func() { - defer func() { - if rawConn != nil && !rawConn.PgConn().IsClosed() && ctx.Err() != nil { - _, _ = rawConn.Exec(ctx, fmt.Sprintf(`UNLISTEN %s`, notification)) - } - childLogger.Debug("received shutdown signal, stopping notification watcher") - conn.Release() - }() - for { - select { - case <-ctx.Done(): - return - default: - } - - notif, err := rawConn.WaitForNotification(ctx) - if err != nil { - if !errors.Is(err, context.Canceled) { - childLogger.Error("failed waiting for notification", zap.Error(err)) - } - continue - } - if notif == nil { - childLogger.Warn("received nil notification, continuing to wait for notifications") - continue - } - - childLogger.Debug("received notification", zap.String("payload", notif.Payload)) - callback(ctx, notif) - } - }() - return nil -} diff --git a/solana/indexer/common/pg_notify_test.go b/solana/indexer/common/pg_notify_test.go deleted file mode 100644 index 0a3253e6..00000000 --- a/solana/indexer/common/pg_notify_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package common - -import ( - "context" - "testing" - "time" - - "api.audius.co/database" - "github.com/jackc/pgx/v5/pgconn" - "github.com/stretchr/testify/require" - "github.com/test-go/testify/assert" - "go.uber.org/zap" -) - -func TestWatchNotification(t *testing.T) { - pool := database.CreateTestDatabase(t, "test_solana_indexer_common") - defer pool.Close() - - notif := "test_notification" - ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) - defer cancel() - - notifChan := make(chan *pgconn.Notification, 1) - - // Callback to capture the notification - callback := func(ctx context.Context, notification *pgconn.Notification) { - notifChan <- notification - } - - logger := zap.NewNop() - err := WatchPgNotification(ctx, pool, notif, callback, logger) - require.NoError(t, err, "failed to listen for notifications") - - conn, err := pool.Acquire(ctx) - require.NoError(t, err, "failed to acquire database connection") - defer conn.Release() - - // Send a test notification - _, err = conn.Exec(ctx, "NOTIFY "+notif+", 'payload'") - require.NoError(t, err, "failed to send notification") - - // Wait for the notification to be received - select { - case <-ctx.Done(): - t.Fatal("timed out waiting for notification") - case n := <-notifChan: - require.NotNil(t, n, "notification should not be nil") - assert.Equal(t, notif, n.Channel, "notification channel should match") - assert.Equal(t, "payload", n.Payload, "notification payload should match") - default: - } - -} From 6c5fe82e17424819ea9341ec108b4f88eb87a201 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 19:58:15 -0700 Subject: [PATCH 48/56] add retries for other indexers --- solana/indexer/solana_indexer.go | 47 +++++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 13 deletions(-) diff --git a/solana/indexer/solana_indexer.go b/solana/indexer/solana_indexer.go index 7defba0e..680b7bed 100644 --- a/solana/indexer/solana_indexer.go +++ b/solana/indexer/solana_indexer.go @@ -133,33 +133,33 @@ func (s *SolanaIndexer) Start(ctx context.Context) error { } func (s *SolanaIndexer) ScheduleProcessRetryQueue(ctx context.Context, interval time.Duration) { + s.logger.Debug("starting retry ticker", zap.Duration("interval", interval)) ticker := time.NewTicker(interval) defer ticker.Stop() for { select { case <-ctx.Done(): - s.logger.Info("context cancelled, stopping retry ticker") + s.logger.Debug("stopping retry ticker") return case <-ticker.C: - err := s.ProcessRetryQueue(ctx) - if err != nil { - s.logger.Error("failed to retry unprocessed transactions", zap.Error(err)) - } + s.ProcessRetryQueue(ctx) } } } -func (s *SolanaIndexer) ProcessRetryQueue(ctx context.Context) error { +func (s *SolanaIndexer) ProcessRetryQueue(ctx context.Context) { limit := 100 offset := 0 logger := s.logger.Named("RetryQueue") count := 0 start := time.Now() + logger.Debug("starting to process retry queue...") for { queue, err := common.GetRetryQueue(ctx, s.pool, limit, offset) if err != nil { - return fmt.Errorf("failed to fetch retry queue: %w", err) + logger.Error("failed to fetch retry queue", zap.Error(err)) + return } if len(queue) == 0 { break @@ -167,10 +167,10 @@ func (s *SolanaIndexer) ProcessRetryQueue(ctx context.Context) error { for _, item := range queue { switch item.Indexer { - case "token": + case token.NAME: err := s.tokenIndexer.HandleUpdate(ctx, item.UpdateMessage.SubscribeUpdate) if err != nil { - logger.Error("failed to retry token_indexer", zap.Error(err)) + logger.Error("failed to retry", zap.String("indexer", token.NAME), zap.Error(err)) offset++ } else { err = common.DeleteFromRetryQueue(ctx, s.pool, item.ID) @@ -178,10 +178,32 @@ func (s *SolanaIndexer) ProcessRetryQueue(ctx context.Context) error { logger.Error("failed to delete from retry queue", zap.Error(err)) } } - case "dammv2": + case damm_v2.NAME: err := s.dammV2Indexer.HandleUpdate(ctx, item.UpdateMessage.SubscribeUpdate) if err != nil { - logger.Error("failed to retry damm_v2_indexer", zap.Error(err)) + logger.Error("failed to retry", zap.String("indexer", damm_v2.NAME), zap.Error(err)) + offset++ + } else { + err = common.DeleteFromRetryQueue(ctx, s.pool, item.ID) + if err != nil { + logger.Error("failed to delete from retry queue", zap.Error(err)) + } + } + case dbc.NAME: + err := s.dbcIndexer.HandleUpdate(ctx, item.UpdateMessage.SubscribeUpdate) + if err != nil { + logger.Error("failed to retry", zap.String("indexer", dbc.NAME), zap.Error(err)) + offset++ + } else { + err = common.DeleteFromRetryQueue(ctx, s.pool, item.ID) + if err != nil { + logger.Error("failed to delete from retry queue", zap.Error(err)) + } + } + case program.NAME: + err := s.programIndexer.HandleUpdate(ctx, item.UpdateMessage.SubscribeUpdate) + if err != nil { + logger.Error("failed to retry", zap.String("indexer", program.NAME), zap.Error(err)) offset++ } else { err = common.DeleteFromRetryQueue(ctx, s.pool, item.ID) @@ -198,7 +220,7 @@ func (s *SolanaIndexer) ProcessRetryQueue(ctx context.Context) error { if count == 0 { logger.Debug("no unprocessed transactions to retry") - return nil + return } logger.Info("finished processing retry queue", @@ -206,7 +228,6 @@ func (s *SolanaIndexer) ProcessRetryQueue(ctx context.Context) error { zap.Int("failed", offset), zap.Duration("duration", time.Since(start)), ) - return nil } func (s *SolanaIndexer) Close() { From 937c673c41cfc1a036af457b3add782d2304a65e Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 19:58:24 -0700 Subject: [PATCH 49/56] remove pointer --- api/v1_coins.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/v1_coins.go b/api/v1_coins.go index 055a564c..bf702591 100644 --- a/api/v1_coins.go +++ b/api/v1_coins.go @@ -83,7 +83,7 @@ type ArtistCoin struct { Buy int `json:"buy" db:"buy"` Sell int `json:"sell" db:"sell"` DynamicBondingCurve *DynamicBondingCurveInsights `json:"dynamicBondingCurve" db:"dynamic_bonding_curve"` - ArtistFees *ArtistCoinFees `json:"artistFees" db:"artist_fees"` + ArtistFees ArtistCoinFees `json:"artistFees" db:"artist_fees"` UpdatedAt time.Time `json:"updatedAt" db:"updated_at"` } From 9bf4f5034debd2e29176739210ddd7f8089b1f04 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 19:58:32 -0700 Subject: [PATCH 50/56] update models --- api/dbv1/models.go | 205 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 197 insertions(+), 8 deletions(-) diff --git a/api/dbv1/models.go b/api/dbv1/models.go index 2f53de0c..b45b9285 100644 --- a/api/dbv1/models.go +++ b/api/dbv1/models.go @@ -854,6 +854,10 @@ type ArtistCoin struct { Link2 pgtype.Text `json:"link_2"` Link3 pgtype.Text `json:"link_3"` Link4 pgtype.Text `json:"link_4"` + // The canonical DAMM V2 pool address for this artist coin, if any. Used in solana indexer. + DammV2Pool pgtype.Text `json:"damm_v2_pool"` + // The associated DBC pool address for this artist coin, if any. Used in solana indexer. + DbcPool pgtype.Text `json:"dbc_pool"` } type ArtistCoinPool struct { @@ -1634,6 +1638,180 @@ type SolKeypair struct { PrivateKey []byte `json:"private_key"` } +// Tracks DAMM V2 pool state. Join with sol_meteora_damm_v2_pool_metrics, sol_meteora_damm_v2_pool_fees, sol_meteora_damm_v2_pool_base_fees, and sol_meteora_damm_v2_pool_dynamic_fees for full pool state. +type SolMeteoraDammV2Pool struct { + Account string `json:"account"` + Slot int64 `json:"slot"` + TokenAMint string `json:"token_a_mint"` + TokenBMint string `json:"token_b_mint"` + TokenAVault string `json:"token_a_vault"` + TokenBVault string `json:"token_b_vault"` + WhitelistedVault string `json:"whitelisted_vault"` + Partner string `json:"partner"` + Liquidity pgtype.Numeric `json:"liquidity"` + ProtocolAFee int64 `json:"protocol_a_fee"` + ProtocolBFee int64 `json:"protocol_b_fee"` + PartnerAFee int64 `json:"partner_a_fee"` + PartnerBFee int64 `json:"partner_b_fee"` + SqrtMinPrice pgtype.Numeric `json:"sqrt_min_price"` + SqrtMaxPrice pgtype.Numeric `json:"sqrt_max_price"` + SqrtPrice pgtype.Numeric `json:"sqrt_price"` + ActivationPoint int64 `json:"activation_point"` + ActivationType int16 `json:"activation_type"` + PoolStatus int16 `json:"pool_status"` + TokenAFlag int16 `json:"token_a_flag"` + TokenBFlag int16 `json:"token_b_flag"` + CollectFeeMode int16 `json:"collect_fee_mode"` + PoolType int16 `json:"pool_type"` + Version int16 `json:"version"` + FeeAPerLiquidity pgtype.Numeric `json:"fee_a_per_liquidity"` + FeeBPerLiquidity pgtype.Numeric `json:"fee_b_per_liquidity"` + PermanentLockLiquidity pgtype.Numeric `json:"permanent_lock_liquidity"` + Creator string `json:"creator"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// Tracks base fee configuration for DAMM V2 pools. A slice of the DAMM V2 pool state. +type SolMeteoraDammV2PoolBaseFee struct { + Pool string `json:"pool"` + Slot int64 `json:"slot"` + CliffFeeNumerator int64 `json:"cliff_fee_numerator"` + FeeSchedulerMode int16 `json:"fee_scheduler_mode"` + NumberOfPeriod int16 `json:"number_of_period"` + PeriodFrequency int64 `json:"period_frequency"` + ReductionFactor int64 `json:"reduction_factor"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// Tracks dynamic fee configuration for DAMM V2 pools. A slice of the DAMM V2 pool state. +type SolMeteoraDammV2PoolDynamicFee struct { + Pool string `json:"pool"` + Slot int64 `json:"slot"` + Initialized int16 `json:"initialized"` + MaxVolatilityAccumulator int32 `json:"max_volatility_accumulator"` + VariableFeeControl int32 `json:"variable_fee_control"` + BinStep int16 `json:"bin_step"` + FilterPeriod int16 `json:"filter_period"` + DecayPeriod int16 `json:"decay_period"` + ReductionFactor int16 `json:"reduction_factor"` + LastUpdateTimestamp int64 `json:"last_update_timestamp"` + BinStepU128 pgtype.Numeric `json:"bin_step_u128"` + SqrtPriceReference pgtype.Numeric `json:"sqrt_price_reference"` + VolatilityAccumulator pgtype.Numeric `json:"volatility_accumulator"` + VolatilityReference pgtype.Numeric `json:"volatility_reference"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// Tracks fee configuration for DAMM V2 pools. A slice of the DAMM V2 pool state. +type SolMeteoraDammV2PoolFee struct { + Pool string `json:"pool"` + Slot int64 `json:"slot"` + ProtocolFeePercent int16 `json:"protocol_fee_percent"` + PartnerFeePercent int16 `json:"partner_fee_percent"` + ReferralFeePercent int16 `json:"referral_fee_percent"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// Tracks aggregated metrics for DAMM V2 pools. A slice of the DAMM V2 pool state. +type SolMeteoraDammV2PoolMetric struct { + Pool string `json:"pool"` + Slot int64 `json:"slot"` + TotalLpAFee pgtype.Numeric `json:"total_lp_a_fee"` + TotalLpBFee pgtype.Numeric `json:"total_lp_b_fee"` + TotalProtocolAFee pgtype.Numeric `json:"total_protocol_a_fee"` + TotalProtocolBFee pgtype.Numeric `json:"total_protocol_b_fee"` + TotalPartnerAFee pgtype.Numeric `json:"total_partner_a_fee"` + TotalPartnerBFee pgtype.Numeric `json:"total_partner_b_fee"` + TotalPosition int64 `json:"total_position"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// Tracks DAMM V2 positions representing a claim to the liquidity and associated fees in a DAMM V2 pool. Join with sol_meteora_damm_v2_position_metrics for full position state. +type SolMeteoraDammV2Position struct { + Account string `json:"account"` + Slot int64 `json:"slot"` + Pool string `json:"pool"` + NftMint string `json:"nft_mint"` + FeeAPerTokenCheckpoint int64 `json:"fee_a_per_token_checkpoint"` + FeeBPerTokenCheckpoint int64 `json:"fee_b_per_token_checkpoint"` + FeeAPending int64 `json:"fee_a_pending"` + FeeBPending int64 `json:"fee_b_pending"` + UnlockedLiquidity pgtype.Numeric `json:"unlocked_liquidity"` + VestedLiquidity pgtype.Numeric `json:"vested_liquidity"` + PermanentLockedLiquidity pgtype.Numeric `json:"permanent_locked_liquidity"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// Tracks aggregated metrics for DAMM V2 positions. A slice of the DAMM V2 position state. +type SolMeteoraDammV2PositionMetric struct { + Position string `json:"position"` + Slot int64 `json:"slot"` + TotalClaimedAFee int64 `json:"total_claimed_a_fee"` + TotalClaimedBFee int64 `json:"total_claimed_b_fee"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// Tracks migrations from DBC pools to DAMM V2 pools. +type SolMeteoraDbcMigration struct { + Signature string `json:"signature"` + InstructionIndex int32 `json:"instruction_index"` + Slot int64 `json:"slot"` + DbcPool string `json:"dbc_pool"` + MigrationMetadata string `json:"migration_metadata"` + Config string `json:"config"` + DbcPoolAuthority string `json:"dbc_pool_authority"` + DammV2Pool string `json:"damm_v2_pool"` + FirstPositionNftMint string `json:"first_position_nft_mint"` + FirstPositionNftAccount string `json:"first_position_nft_account"` + FirstPosition string `json:"first_position"` + SecondPositionNftMint string `json:"second_position_nft_mint"` + SecondPositionNftAccount string `json:"second_position_nft_account"` + SecondPosition string `json:"second_position"` + DammPoolAuthority string `json:"damm_pool_authority"` + BaseMint string `json:"base_mint"` + QuoteMint string `json:"quote_mint"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +type SolMeteoraDbcPool struct { + Account string `json:"account"` + Slot int64 `json:"slot"` + Config string `json:"config"` + Creator string `json:"creator"` + BaseMint string `json:"base_mint"` + BaseVault string `json:"base_vault"` + QuoteVault string `json:"quote_vault"` + BaseReserve int64 `json:"base_reserve"` + QuoteReserve int64 `json:"quote_reserve"` + ProtocolBaseFee int64 `json:"protocol_base_fee"` + ProtocolQuoteFee int64 `json:"protocol_quote_fee"` + PartnerBaseFee int64 `json:"partner_base_fee"` + PartnerQuoteFee int64 `json:"partner_quote_fee"` + SqrtPrice pgtype.Numeric `json:"sqrt_price"` + ActivationPoint int64 `json:"activation_point"` + PoolType int16 `json:"pool_type"` + IsMigrated int16 `json:"is_migrated"` + IsPartnerWithdrawSurplus int16 `json:"is_partner_withdraw_surplus"` + IsProtocolWithdrawSurplus int16 `json:"is_protocol_withdraw_surplus"` + MigrationProgress int16 `json:"migration_progress"` + IsWithdrawLeftover int16 `json:"is_withdraw_leftover"` + IsCreatorWithdrawSurplus int16 `json:"is_creator_withdraw_surplus"` + MigrationFeeWithdrawStatus int16 `json:"migration_fee_withdraw_status"` + FinishCurveTimestamp int64 `json:"finish_curve_timestamp"` + CreatorBaseFee int64 `json:"creator_base_fee"` + CreatorQuoteFee int64 `json:"creator_quote_fee"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + // Stores payment router program Route instruction recipients and amounts for tracked mints. type SolPayment struct { Signature string `json:"signature"` @@ -1664,6 +1842,21 @@ type SolPurchase struct { Country pgtype.Text `json:"country"` } +// Queue for retrying failed indexer updates. +type SolRetryQueue struct { + ID pgtype.UUID `json:"id"` + // The name of the indexer that failed (e.g., token_indexer, damm_v2_indexer). + Indexer string `json:"indexer"` + // The JSONB update data that failed to process. + UpdateMessage json.RawMessage `json:"update_message"` + // The error message from the failure. + Error string `json:"error"` + // The timestamp when the retry entry was created. + CreatedAt time.Time `json:"created_at"` + // The timestamp when the retry entry was last updated. + UpdatedAt time.Time `json:"updated_at"` +} + // Stores reward manager program Evaluate instructions for tracked mints. type SolRewardDisbursement struct { Signature string `json:"signature"` @@ -1673,6 +1866,8 @@ type SolRewardDisbursement struct { UserBank string `json:"user_bank"` ChallengeID string `json:"challenge_id"` Specifier string `json:"specifier"` + // The Ethereum address of the recipient of the reward. + RecipientEthAddress pgtype.Text `json:"recipient_eth_address"` } // Stores checkpoints for Solana slots to track indexing progress. @@ -1684,6 +1879,8 @@ type SolSlotCheckpoint struct { Subscription json.RawMessage `json:"subscription"` UpdatedAt time.Time `json:"updated_at"` CreatedAt time.Time `json:"created_at"` + // The name of the indexer this checkpoint is for (e.g., token_indexer, damm_v2_indexer). + Name pgtype.Text `json:"name"` } // Stores eg. Jupiter swaps for tracked mints. @@ -1734,14 +1931,6 @@ type SolTokenTransfer struct { ToAccount string `json:"to_account"` } -type SolUnprocessedTx struct { - Signature string `json:"signature"` - ErrorMessage pgtype.Text `json:"error_message"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` - Slot int64 `json:"slot"` -} - // Stores the balances of Solana tokens for users. type SolUserBalance struct { UserID int32 `json:"user_id"` From 362a34d2de3f7e48a7a9b25a161351e4bc5a32ea Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 21:37:42 -0700 Subject: [PATCH 51/56] further use the DRY query to prevent mistakes --- api/v1_coin.go | 6 +- api/v1_coins.go | 143 +++++++++++++++++++++++++----------------------- 2 files changed, 77 insertions(+), 72 deletions(-) diff --git a/api/v1_coin.go b/api/v1_coin.go index 061c77af..93768cfa 100644 --- a/api/v1_coin.go +++ b/api/v1_coin.go @@ -5,7 +5,7 @@ import ( "github.com/jackc/pgx/v5" ) -const sharedSql = ` +const sharedSelectCoinSql = ` SELECT artist_coins.name, artist_coins.mint, @@ -97,7 +97,7 @@ func (app *ApiServer) v1Coin(c *fiber.Ctx) error { } sql := ` - ` + sharedSql + ` + ` + sharedSelectCoinSql + ` WHERE artist_coins.mint = @mint LIMIT 1 ` @@ -128,7 +128,7 @@ func (app *ApiServer) v1CoinByTicker(c *fiber.Ctx) error { } sql := ` - ` + sharedSql + ` + ` + sharedSelectCoinSql + ` WHERE artist_coins.ticker = @ticker LIMIT 1 ` diff --git a/api/v1_coins.go b/api/v1_coins.go index bf702591..52673efd 100644 --- a/api/v1_coins.go +++ b/api/v1_coins.go @@ -9,15 +9,6 @@ import ( "github.com/jackc/pgx/v5" ) -type ArtistCoinFees struct { - UnclaimedDbcFees float64 `json:"unclaimed_dbc_fees" db:"unclaimed_dbc_fees"` - TotalDbcFees float64 `json:"total_dbc_fees" db:"total_dbc_fees"` - UnclaimedDammV2Fees float64 `json:"unclaimed_damm_v2_fees" db:"unclaimed_damm_v2_fees"` - TotalDammV2Fees float64 `json:"total_damm_v2_fees" db:"total_damm_v2_fees"` - UnclaimedFees float64 `json:"unclaimed_fees" db:"unclaimed_fees"` - TotalFees float64 `json:"total_fees" db:"total_fees"` -} - type ArtistCoin struct { Name string `json:"name"` Ticker string `json:"ticker"` @@ -87,68 +78,20 @@ type ArtistCoin struct { UpdatedAt time.Time `json:"updatedAt" db:"updated_at"` } -type GetArtistCoinsQueryParams struct { - Tickers []string `query:"ticker"` - Mints []string `query:"mint"` - OwnerIds []trashid.HashId `query:"owner_id"` - Limit int `query:"limit" default:"50" validate:"min=1,max=100"` - Offset int `query:"offset" default:"0" validate:"min=0"` - Query string `query:"query"` - SortMethod string `query:"sort_method" default:"market_cap" validate:"oneof=market_cap price volume created_at holder"` - SortDirection string `query:"sort_direction" default:"desc" validate:"oneof=asc desc"` +type ArtistCoinFees struct { + UnclaimedDbcFees float64 `json:"unclaimed_dbc_fees" db:"unclaimed_dbc_fees"` + TotalDbcFees float64 `json:"total_dbc_fees" db:"total_dbc_fees"` + UnclaimedDammV2Fees float64 `json:"unclaimed_damm_v2_fees" db:"unclaimed_damm_v2_fees"` + TotalDammV2Fees float64 `json:"total_damm_v2_fees" db:"total_damm_v2_fees"` + UnclaimedFees float64 `json:"unclaimed_fees" db:"unclaimed_fees"` + TotalFees float64 `json:"total_fees" db:"total_fees"` } -func (app *ApiServer) v1Coins(c *fiber.Ctx) error { - queryParams := GetArtistCoinsQueryParams{} - if err := app.ParseAndValidateQueryParams(c, &queryParams); err != nil { - return err - } - - mintFilter := "" - if len(queryParams.Mints) > 0 { - mintFilter = `AND artist_coins.mint = ANY(@mints)` - } - ownerIdFilter := "" - if len(queryParams.OwnerIds) > 0 { - ownerIdFilter = `AND artist_coins.user_id = ANY(@owner_ids)` - } - tickerFilter := "" - if len(queryParams.Tickers) > 0 { - tickerFilter = `AND artist_coins.ticker = ANY(@tickers)` - } - queryFilter := "" - if queryParams.Query != "" { - queryFilter = `AND ( - artist_coins.ticker ILIKE '%' || @query || '%' OR - artist_coins.name ILIKE '%' || @query || '%' OR - users.handle_lc ILIKE '%' || @query || '%' - )` - } - - sortMethod := "market_cap" - switch queryParams.SortMethod { - case "price": - sortMethod = "price" - case "volume": - sortMethod = "total_volume_usd" - case "created_at": - sortMethod = "created_at" - case "holder": - sortMethod = "holder" - } - - sortDirection := "desc" - if queryParams.SortDirection == "asc" { - sortDirection = "asc" - } - - sortString := fmt.Sprintf("%s %s", sortMethod, sortDirection) - - sql := ` +const sharedSelectCoinSql = ` SELECT artist_coins.name, - artist_coins.ticker, artist_coins.mint, + artist_coins.ticker, artist_coins.decimals, artist_coins.user_id, artist_coins.logo_uri, @@ -159,6 +102,7 @@ func (app *ApiServer) v1Coins(c *fiber.Ctx) error { artist_coins.link_4, artist_coins.has_discord, artist_coins.created_at, + artist_coins.updated_at as coin_updated_at, COALESCE(artist_coin_stats.market_cap, 0) as market_cap, COALESCE(artist_coin_stats.fdv, 0) as fdv, COALESCE(artist_coin_stats.liquidity, 0) as liquidity, @@ -200,10 +144,10 @@ func (app *ApiServer) v1Coins(c *fiber.Ctx) error { COALESCE(artist_coin_stats.number_markets, 0) as number_markets, COALESCE(artist_coin_stats.total_volume, 0) as total_volume, COALESCE(artist_coin_stats.total_volume_usd, 0) as total_volume_usd, - COALESCE(artist_coin_stats.volume_buy_usd, 0) as volume_buy_usd, - COALESCE(artist_coin_stats.volume_sell_usd, 0) as volume_sell_usd, COALESCE(artist_coin_stats.volume_buy, 0) as volume_buy, + COALESCE(artist_coin_stats.volume_buy_usd, 0) as volume_buy_usd, COALESCE(artist_coin_stats.volume_sell, 0) as volume_sell, + COALESCE(artist_coin_stats.volume_sell_usd, 0) as volume_sell_usd, COALESCE(artist_coin_stats.total_trade, 0) as total_trade, COALESCE(artist_coin_stats.buy, 0) as buy, COALESCE(artist_coin_stats.sell, 0) as sell, @@ -218,12 +162,73 @@ func (app *ApiServer) v1Coins(c *fiber.Ctx) error { 'creatorWalletAddress', COALESCE(artist_coin_pools.creator_wallet_address, '') ) AS dynamic_bonding_curve, ROW_TO_JSON(calculate_artist_coin_fees(artist_coins.mint)) AS artist_fees, - COALESCE(artist_coin_stats.updated_at, artist_coins.created_at) as updated_at + COALESCE(artist_coin_stats.updated_at, artist_coins.created_at) AS updated_at FROM artist_coins LEFT JOIN artist_coin_stats ON artist_coin_stats.mint = artist_coins.mint LEFT JOIN artist_coin_pools ON artist_coin_pools.base_mint = artist_coins.mint +` + +type GetArtistCoinsQueryParams struct { + Tickers []string `query:"ticker"` + Mints []string `query:"mint"` + OwnerIds []trashid.HashId `query:"owner_id"` + Limit int `query:"limit" default:"50" validate:"min=1,max=100"` + Offset int `query:"offset" default:"0" validate:"min=0"` + Query string `query:"query"` + SortMethod string `query:"sort_method" default:"market_cap" validate:"oneof=market_cap price volume created_at holder"` + SortDirection string `query:"sort_direction" default:"desc" validate:"oneof=asc desc"` +} + +func (app *ApiServer) v1Coins(c *fiber.Ctx) error { + queryParams := GetArtistCoinsQueryParams{} + if err := app.ParseAndValidateQueryParams(c, &queryParams); err != nil { + return err + } + + mintFilter := "" + if len(queryParams.Mints) > 0 { + mintFilter = `AND artist_coins.mint = ANY(@mints)` + } + ownerIdFilter := "" + if len(queryParams.OwnerIds) > 0 { + ownerIdFilter = `AND artist_coins.user_id = ANY(@owner_ids)` + } + tickerFilter := "" + if len(queryParams.Tickers) > 0 { + tickerFilter = `AND artist_coins.ticker = ANY(@tickers)` + } + queryFilter := "" + if queryParams.Query != "" { + queryFilter = `AND ( + artist_coins.ticker ILIKE '%' || @query || '%' OR + artist_coins.name ILIKE '%' || @query || '%' OR + users.handle_lc ILIKE '%' || @query || '%' + )` + } + + sortMethod := "market_cap" + switch queryParams.SortMethod { + case "price": + sortMethod = "price" + case "volume": + sortMethod = "total_volume_usd" + case "created_at": + sortMethod = "created_at" + case "holder": + sortMethod = "holder" + } + + sortDirection := "desc" + if queryParams.SortDirection == "asc" { + sortDirection = "asc" + } + + sortString := fmt.Sprintf("%s %s", sortMethod, sortDirection) + + sql := ` + ` + sharedSelectCoinSql + ` LEFT JOIN users ON users.user_id = artist_coins.user_id WHERE 1=1 From 3e7131963b95ab277bc3f78ac4f4baf1afbfc5e0 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 21:39:48 -0700 Subject: [PATCH 52/56] missed commit --- api/v1_coin.go | 83 -------------------------------------------------- 1 file changed, 83 deletions(-) diff --git a/api/v1_coin.go b/api/v1_coin.go index 93768cfa..75cfb7f5 100644 --- a/api/v1_coin.go +++ b/api/v1_coin.go @@ -5,89 +5,6 @@ import ( "github.com/jackc/pgx/v5" ) -const sharedSelectCoinSql = ` - SELECT - artist_coins.name, - artist_coins.mint, - artist_coins.ticker, - artist_coins.decimals, - artist_coins.user_id, - artist_coins.logo_uri, - artist_coins.description, - artist_coins.link_1, - artist_coins.link_2, - artist_coins.link_3, - artist_coins.link_4, - artist_coins.has_discord, - artist_coins.created_at, - artist_coins.updated_at as coin_updated_at, - COALESCE(artist_coin_stats.market_cap, 0) as market_cap, - COALESCE(artist_coin_stats.fdv, 0) as fdv, - COALESCE(artist_coin_stats.liquidity, 0) as liquidity, - COALESCE(artist_coin_stats.last_trade_unix_time, 0) as last_trade_unix_time, - COALESCE(artist_coin_stats.last_trade_human_time, '') as last_trade_human_time, - COALESCE(artist_coin_stats.price, 0) as price, - COALESCE(artist_coin_stats.history_24h_price, 0) as history_24h_price, - COALESCE(artist_coin_stats.price_change_24h_percent, 0) as price_change_24h_percent, - COALESCE(artist_coin_stats.unique_wallet_24h, 0) as unique_wallet_24h, - COALESCE(artist_coin_stats.unique_wallet_history_24h, 0) as unique_wallet_history_24h, - COALESCE(artist_coin_stats.unique_wallet_24h_change_percent, 0) as unique_wallet_24h_change_percent, - COALESCE(artist_coin_stats.total_supply, 0) as total_supply, - COALESCE(artist_coin_stats.circulating_supply, 0) as circulating_supply, - COALESCE(artist_coin_stats.holder, 0) as holder, - COALESCE(artist_coin_stats.trade_24h, 0) as trade_24h, - COALESCE(artist_coin_stats.trade_history_24h, 0) as trade_history_24h, - COALESCE(artist_coin_stats.trade_24h_change_percent, 0) as trade_24h_change_percent, - COALESCE(artist_coin_stats.sell_24h, 0) as sell_24h, - COALESCE(artist_coin_stats.sell_history_24h, 0) as sell_history_24h, - COALESCE(artist_coin_stats.sell_24h_change_percent, 0) as sell_24h_change_percent, - COALESCE(artist_coin_stats.buy_24h, 0) as buy_24h, - COALESCE(artist_coin_stats.buy_history_24h, 0) as buy_history_24h, - COALESCE(artist_coin_stats.buy_24h_change_percent, 0) as buy_24h_change_percent, - COALESCE(artist_coin_stats.v_24h, 0) as v_24h, - COALESCE(artist_coin_stats.v_24h_usd, 0) as v_24h_usd, - COALESCE(artist_coin_stats.v_history_24h, 0) as v_history_24h, - COALESCE(artist_coin_stats.v_history_24h_usd, 0) as v_history_24h_usd, - COALESCE(artist_coin_stats.v_24h_change_percent, 0) as v_24h_change_percent, - COALESCE(artist_coin_stats.v_buy_24h, 0) as v_buy_24h, - COALESCE(artist_coin_stats.v_buy_24h_usd, 0) as v_buy_24h_usd, - COALESCE(artist_coin_stats.v_buy_history_24h, 0) as v_buy_history_24h, - COALESCE(artist_coin_stats.v_buy_history_24h_usd, 0) as v_buy_history_24h_usd, - COALESCE(artist_coin_stats.v_buy_24h_change_percent, 0) as v_buy_24h_change_percent, - COALESCE(artist_coin_stats.v_sell_24h, 0) as v_sell_24h, - COALESCE(artist_coin_stats.v_sell_24h_usd, 0) as v_sell_24h_usd, - COALESCE(artist_coin_stats.v_sell_history_24h, 0) as v_sell_history_24h, - COALESCE(artist_coin_stats.v_sell_history_24h_usd, 0) as v_sell_history_24h_usd, - COALESCE(artist_coin_stats.v_sell_24h_change_percent, 0) as v_sell_24h_change_percent, - COALESCE(artist_coin_stats.number_markets, 0) as number_markets, - COALESCE(artist_coin_stats.total_volume, 0) as total_volume, - COALESCE(artist_coin_stats.total_volume_usd, 0) as total_volume_usd, - COALESCE(artist_coin_stats.volume_buy, 0) as volume_buy, - COALESCE(artist_coin_stats.volume_buy_usd, 0) as volume_buy_usd, - COALESCE(artist_coin_stats.volume_sell, 0) as volume_sell, - COALESCE(artist_coin_stats.volume_sell_usd, 0) as volume_sell_usd, - COALESCE(artist_coin_stats.total_trade, 0) as total_trade, - COALESCE(artist_coin_stats.buy, 0) as buy, - COALESCE(artist_coin_stats.sell, 0) as sell, - JSON_BUILD_OBJECT( - 'address', COALESCE(artist_coin_pools.address, ''), - 'price', COALESCE(artist_coin_pools.price, 0), - 'priceUSD', COALESCE(artist_coin_pools.price_usd, 0), - 'curveProgress', COALESCE(artist_coin_pools.curve_progress, 0), - 'isMigrated', COALESCE(artist_coin_pools.is_migrated, false), - 'creatorQuoteFee', COALESCE(artist_coin_pools.creator_quote_fee, 0), - 'totalTradingQuoteFee', COALESCE(artist_coin_pools.total_trading_quote_fee, 0), - 'creatorWalletAddress', COALESCE(artist_coin_pools.creator_wallet_address, '') - ) AS dynamic_bonding_curve, - ROW_TO_JSON(calculate_artist_coin_fees(artist_coins.mint)) AS artist_fees, - COALESCE(artist_coin_stats.updated_at, artist_coins.created_at) AS updated_at - FROM artist_coins - LEFT JOIN artist_coin_stats - ON artist_coin_stats.mint = artist_coins.mint - LEFT JOIN artist_coin_pools - ON artist_coin_pools.base_mint = artist_coins.mint -` - func (app *ApiServer) v1Coin(c *fiber.Ctx) error { mint := c.Params("mint") if mint == "" { From 2219e83b9d15c3e38f2510c04996894e7ab63ca5 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 21:42:30 -0700 Subject: [PATCH 53/56] remove destructive action --- ddl/migrations/0170_sol_retry_queue.sql | 1 - 1 file changed, 1 deletion(-) diff --git a/ddl/migrations/0170_sol_retry_queue.sql b/ddl/migrations/0170_sol_retry_queue.sql index 27979fb9..006ce443 100644 --- a/ddl/migrations/0170_sol_retry_queue.sql +++ b/ddl/migrations/0170_sol_retry_queue.sql @@ -17,5 +17,4 @@ COMMENT ON COLUMN sol_retry_queue.updated_at IS 'The timestamp when the retry en ALTER TABLE sol_slot_checkpoints ADD COLUMN IF NOT EXISTS name TEXT; COMMENT ON COLUMN sol_slot_checkpoints.name IS 'The name of the indexer this checkpoint is for (e.g., token_indexer, damm_v2_indexer).'; -DROP TABLE IF EXISTS sol_unprocessed_txs; COMMIT; \ No newline at end of file From da20fcb4c5b9cca7d4a9ad4043770822eaf116d6 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 22:34:02 -0700 Subject: [PATCH 54/56] swagger.yml --- api/swagger/swagger-v1.yaml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/api/swagger/swagger-v1.yaml b/api/swagger/swagger-v1.yaml index 84f92905..631c9e85 100644 --- a/api/swagger/swagger-v1.yaml +++ b/api/swagger/swagger-v1.yaml @@ -5594,6 +5594,34 @@ components: type: string description: Address of the pool creator's wallet example: "2AAsAwNPTNBk5N466xyPiwqdgbc5WLbDTdnn9gVuDKaN" + artistFees: + type: object + description: Information about the fees earned by the artist on the coin's trading. + properties: + unclaimed_dbc_fees: + type: number + description: Unclaimed fees from the dynamic bonding curve + example: 12.3456789 + total_dbc_fees: + type: number + description: Total fees earned from the dynamic bonding curve + example: 1234.56789 + unclaimed_damm_v2_fees: + type: number + description: Unclaimed fees from the DAMM v2 pools + example: 9.87654321 + total_damm_v2_fees: + type: number + description: Total fees earned from the DAMM v2 pools + example: 987.654321 + unclaimed_fees: + type: number + description: Total unclaimed fees from all sources + example: 22.2222221 + total_fees: + type: number + description: Total fees earned from all sources + example: 2222.222211 coin_response: type: object properties: From b39d434e344ec160eefd6596dd0b385c419048ab Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 22:35:35 -0700 Subject: [PATCH 55/56] more details --- api/swagger/swagger-v1.yaml | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/api/swagger/swagger-v1.yaml b/api/swagger/swagger-v1.yaml index 631c9e85..2e42d280 100644 --- a/api/swagger/swagger-v1.yaml +++ b/api/swagger/swagger-v1.yaml @@ -5600,28 +5600,28 @@ components: properties: unclaimed_dbc_fees: type: number - description: Unclaimed fees from the dynamic bonding curve - example: 12.3456789 + description: Unclaimed fees from the dynamic bonding curve, in $AUDIO. + example: 1000000000 total_dbc_fees: type: number - description: Total fees earned from the dynamic bonding curve - example: 1234.56789 + description: Total fees earned from the dynamic bonding curve, in $AUDIO. + example: 2000000000 unclaimed_damm_v2_fees: type: number - description: Unclaimed fees from the DAMM v2 pools - example: 9.87654321 + description: Unclaimed fees from the DAMM v2 pools, in $AUDIO. + example: 1000000000 total_damm_v2_fees: type: number - description: Total fees earned from the DAMM v2 pools - example: 987.654321 + description: Total fees earned from the DAMM v2 pools, in $AUDIO. + example: 2000000000 unclaimed_fees: type: number - description: Total unclaimed fees from all sources - example: 22.2222221 + description: Total unclaimed fees from all sources, in $AUDIO. + example: 2000000000 total_fees: type: number - description: Total fees earned from all sources - example: 2222.222211 + description: Total fees earned from all sources, in $AUDIO. + example: 4000000000 coin_response: type: object properties: From 232ecae59c7d88aa28b439d85107b547f31ce547 Mon Sep 17 00:00:00 2001 From: Marcus Pasell <3690498+rickyrombo@users.noreply.github.com> Date: Thu, 16 Oct 2025 22:36:48 -0700 Subject: [PATCH 56/56] keep underscores --- api/swagger/swagger-v1.yaml | 2 +- api/v1_coins.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/api/swagger/swagger-v1.yaml b/api/swagger/swagger-v1.yaml index 2e42d280..c0da7de5 100644 --- a/api/swagger/swagger-v1.yaml +++ b/api/swagger/swagger-v1.yaml @@ -5594,7 +5594,7 @@ components: type: string description: Address of the pool creator's wallet example: "2AAsAwNPTNBk5N466xyPiwqdgbc5WLbDTdnn9gVuDKaN" - artistFees: + artist_fees: type: object description: Information about the fees earned by the artist on the coin's trading. properties: diff --git a/api/v1_coins.go b/api/v1_coins.go index 52673efd..93ebb372 100644 --- a/api/v1_coins.go +++ b/api/v1_coins.go @@ -74,7 +74,7 @@ type ArtistCoin struct { Buy int `json:"buy" db:"buy"` Sell int `json:"sell" db:"sell"` DynamicBondingCurve *DynamicBondingCurveInsights `json:"dynamicBondingCurve" db:"dynamic_bonding_curve"` - ArtistFees ArtistCoinFees `json:"artistFees" db:"artist_fees"` + ArtistFees ArtistCoinFees `json:"artist_fees" db:"artist_fees"` UpdatedAt time.Time `json:"updatedAt" db:"updated_at"` }