Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions src/datastore/common.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1548,6 +1548,7 @@ export interface DbChainTip {
microblock_count: number;
tx_count: number;
tx_count_unanchored: number;
mempool_tx_count: number;
}

export enum IndexesState {
Expand Down
1 change: 1 addition & 0 deletions src/datastore/pg-store.ts
Original file line number Diff line number Diff line change
Expand Up @@ -218,6 +218,7 @@ export class PgStore extends BasePgStore {
microblock_count: tip?.microblock_count ?? 0,
tx_count: tip?.tx_count ?? 0,
tx_count_unanchored: tip?.tx_count_unanchored ?? 0,
mempool_tx_count: tip?.mempool_tx_count ?? 0,
};
}

Expand Down
27 changes: 25 additions & 2 deletions src/datastore/pg-write-store.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1644,6 +1644,27 @@ export class PgWriteStore extends PgStore {
tenure_change_signature: tx.tenure_change_signature ?? null,
tenure_change_signers: tx.tenure_change_signers ?? null,
}));

// Revive mempool txs that were previously dropped
const revivedTxs = await sql<{ tx_id: string }[]>`
UPDATE mempool_txs
SET pruned = false,
status = ${DbTxStatus.Pending},
receipt_block_height = ${values[0].receipt_block_height},
receipt_time = ${values[0].receipt_time}
WHERE tx_id IN ${sql(values.map(v => v.tx_id))}
AND pruned = true
AND NOT EXISTS (
SELECT 1
FROM txs
WHERE txs.tx_id = mempool_txs.tx_id
AND txs.canonical = true
AND txs.microblock_canonical = true
)
RETURNING tx_id
`;
txIds.push(...revivedTxs.map(r => r.tx_id));

const result = await sql<{ tx_id: string }[]>`
WITH inserted AS (
INSERT INTO mempool_txs ${sql(values)}
Expand All @@ -1652,7 +1673,9 @@ export class PgWriteStore extends PgStore {
),
count_update AS (
UPDATE chain_tip SET
mempool_tx_count = mempool_tx_count + (SELECT COUNT(*) FROM inserted),
mempool_tx_count = mempool_tx_count
+ (SELECT COUNT(*) FROM inserted)
+ ${revivedTxs.count},
mempool_updated_at = NOW()
)
SELECT tx_id FROM inserted
Expand Down Expand Up @@ -2329,7 +2352,7 @@ export class PgWriteStore extends PgStore {
const updatedRows = await sql<{ tx_id: string }[]>`
WITH restored AS (
UPDATE mempool_txs
SET pruned = FALSE
SET pruned = FALSE, status = ${DbTxStatus.Pending}
WHERE tx_id IN ${sql(txIds)} AND pruned = TRUE
RETURNING tx_id
),
Expand Down
3 changes: 3 additions & 0 deletions src/tests/datastore-tests.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4108,6 +4108,7 @@ describe('postgres datastore', () => {
index_block_hash: '0xcc',
burn_block_height: 123,
block_count: 3,
mempool_tx_count: 0,
microblock_count: 0,
microblock_hash: undefined,
microblock_sequence: undefined,
Expand Down Expand Up @@ -4180,6 +4181,7 @@ describe('postgres datastore', () => {
microblock_sequence: undefined,
tx_count: 2,
tx_count_unanchored: 2,
mempool_tx_count: 0,
});

const block4b: DbBlock = {
Expand Down Expand Up @@ -4230,6 +4232,7 @@ describe('postgres datastore', () => {
microblock_sequence: undefined,
tx_count: 2, // Tx from block 2b now counts, but compensates with tx from block 2
tx_count_unanchored: 2,
mempool_tx_count: 1,
});

const b1 = await db.getBlock({ hash: block1.block_hash });
Expand Down
220 changes: 220 additions & 0 deletions src/tests/mempool-tests.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1726,6 +1726,226 @@ describe('mempool tests', () => {
expect(txResult2.body.tx_status).toBe('success');
});

test('Revive dropped and rebroadcasted mempool tx', async () => {
const senderAddress = 'SP25YGP221F01S9SSCGN114MKDAK9VRK8P3KXGEMB';
const txId = '0x521234';
const dbBlock1: DbBlock = {
block_hash: '0x0123',
index_block_hash: '0x1234',
parent_index_block_hash: '0x5678',
parent_block_hash: '0x5678',
parent_microblock_hash: '0x00',
parent_microblock_sequence: 0,
block_height: 1,
burn_block_time: 39486,
burn_block_hash: '0x1234',
burn_block_height: 123,
miner_txid: '0x4321',
canonical: true,
execution_cost_read_count: 0,
execution_cost_read_length: 0,
execution_cost_runtime: 0,
execution_cost_write_count: 0,
execution_cost_write_length: 0,
tx_count: 1,
};
const dbBlock1b: DbBlock = {
block_hash: '0x0123bb',
index_block_hash: '0x1234bb',
parent_index_block_hash: '0x5678bb',
parent_block_hash: '0x5678bb',
parent_microblock_hash: '0x00',
parent_microblock_sequence: 0,
block_height: 1,
burn_block_time: 39486,
burn_block_hash: '0x1234bb',
burn_block_height: 123,
miner_txid: '0x4321bb',
canonical: true,
execution_cost_read_count: 0,
execution_cost_read_length: 0,
execution_cost_runtime: 0,
execution_cost_write_count: 0,
execution_cost_write_length: 0,
tx_count: 1,
};
const dbBlock2b: DbBlock = {
block_hash: '0x2123',
index_block_hash: '0x2234',
parent_index_block_hash: dbBlock1b.index_block_hash,
parent_block_hash: dbBlock1b.block_hash,
parent_microblock_hash: '0x00',
parent_microblock_sequence: 0,
block_height: 2,
burn_block_time: 39486,
burn_block_hash: '0x1234',
burn_block_height: 123,
miner_txid: '0x4321',
canonical: true,
execution_cost_read_count: 0,
execution_cost_read_length: 0,
execution_cost_runtime: 0,
execution_cost_write_count: 0,
execution_cost_write_length: 0,
tx_count: 1,
};
const mempoolTx: DbMempoolTxRaw = {
tx_id: txId,
anchor_mode: 3,
nonce: 0,
raw_tx: bufferToHex(Buffer.from('test-raw-mempool-tx')),
type_id: DbTxTypeId.Coinbase,
status: 1,
post_conditions: '0x01f5',
fee_rate: 1234n,
sponsored: false,
sponsor_address: undefined,
sender_address: senderAddress,
origin_hash_mode: 1,
coinbase_payload: bufferToHex(Buffer.from('hi')),
pruned: false,
receipt_time: 1616063078,
};
const dbTx1: DbTxRaw = {
...mempoolTx,
...dbBlock1,
parent_burn_block_time: 1626122935,
tx_index: 4,
status: DbTxStatus.Success,
raw_result: '0x0100000000000000000000000000000001', // u1
canonical: true,
microblock_canonical: true,
microblock_sequence: I32_MAX,
microblock_hash: '',
parent_index_block_hash: '',
event_count: 0,
execution_cost_read_count: 0,
execution_cost_read_length: 0,
execution_cost_runtime: 0,
execution_cost_write_count: 0,
execution_cost_write_length: 0,
};

await db.updateMempoolTxs({ mempoolTxs: [mempoolTx] });

let chainTip = await db.getChainTip();
expect(chainTip.mempool_tx_count).toBe(1);

// Verify tx shows up in mempool (non-pruned)
const mempoolResult1 = await supertest(api.server).get(
`/extended/v1/address/${mempoolTx.sender_address}/mempool`
);
expect(mempoolResult1.body.results[0].tx_id).toBe(txId);
const mempoolCount1 = await supertest(api.server).get(`/extended/v1/tx/mempool`);
expect(mempoolCount1.body.total).toBe(1);

// Drop mempool tx
await db.dropMempoolTxs({
status: DbTxStatus.DroppedStaleGarbageCollect,
txIds: [mempoolTx.tx_id],
});

// Verify tx is pruned from mempool
const mempoolResult2 = await supertest(api.server).get(
`/extended/v1/address/${mempoolTx.sender_address}/mempool`
);
expect(mempoolResult2.body.results).toHaveLength(0);
const mempoolCount2 = await supertest(api.server).get(`/extended/v1/tx/mempool`);
expect(mempoolCount2.body.total).toBe(0);
chainTip = await db.getChainTip();
expect(chainTip.mempool_tx_count).toBe(0);

// Re-broadcast mempool tx
await db.updateMempoolTxs({ mempoolTxs: [mempoolTx] });

// Verify tx shows up in mempool again (revived)
const mempoolResult3 = await supertest(api.server).get(
`/extended/v1/address/${mempoolTx.sender_address}/mempool`
);
expect(mempoolResult3.body.results[0].tx_id).toBe(txId);
const mempoolCount3 = await supertest(api.server).get(`/extended/v1/tx/mempool`);
expect(mempoolCount3.body.total).toBe(1);
chainTip = await db.getChainTip();
expect(chainTip.mempool_tx_count).toBe(1);

// Mine tx in block to prune from mempool
await db.update({
block: dbBlock1,
microblocks: [],
minerRewards: [],
txs: [
{
tx: dbTx1,
stxEvents: [],
stxLockEvents: [],
ftEvents: [],
nftEvents: [],
contractLogEvents: [],
smartContracts: [],
names: [],
namespaces: [],
pox2Events: [],
pox3Events: [],
pox4Events: [],
},
],
});

// Verify tx is pruned from mempool
const mempoolResult4 = await supertest(api.server).get(
`/extended/v1/address/${mempoolTx.sender_address}/mempool`
);
expect(mempoolResult4.body.results).toHaveLength(0);
const mempoolCount4 = await supertest(api.server).get(`/extended/v1/tx/mempool`);
expect(mempoolCount4.body.total).toBe(0);
chainTip = await db.getChainTip();
expect(chainTip.mempool_tx_count).toBe(0);

// Verify tx is mined
const txResult1 = await supertest(api.server).get(`/extended/v1/tx/${txId}`);
expect(txResult1.body.tx_status).toBe('success');
expect(txResult1.body.canonical).toBe(true);

// Orphan the block to get the tx orphaned and placed back in the pool
await db.update({
block: dbBlock1b,
microblocks: [],
minerRewards: [],
txs: [],
});
await db.update({
block: dbBlock2b,
microblocks: [],
minerRewards: [],
txs: [],
});

// Verify tx is orphaned and back in mempool
const txResult2 = await supertest(api.server).get(`/extended/v1/tx/${txId}`);
expect(txResult2.body.canonical).toBeFalsy();

// Verify tx has been revived and is back in the mempool
const mempoolResult5 = await supertest(api.server).get(
`/extended/v1/address/${mempoolTx.sender_address}/mempool`
);
expect(mempoolResult5.body.results[0].tx_id).toBe(txId);
const mempoolCount5 = await supertest(api.server).get(`/extended/v1/tx/mempool`);
expect(mempoolCount5.body.total).toBe(1);
chainTip = await db.getChainTip();
expect(chainTip.mempool_tx_count).toBe(1);

// Re-broadcast mempool tx
await db.updateMempoolTxs({ mempoolTxs: [mempoolTx] });

// Verify tx has been revived and is back in the mempool
const mempoolResult6 = await supertest(api.server).get(
`/extended/v1/address/${mempoolTx.sender_address}/mempool`
);
expect(mempoolResult6.body.results[0].tx_id).toBe(txId);
const mempoolCount6 = await supertest(api.server).get(`/extended/v1/tx/mempool`);
expect(mempoolCount6.body.total).toBe(1);
});

test('returns fee priorities for mempool transactions', async () => {
const mempoolTxs: DbMempoolTxRaw[] = [];
for (let i = 0; i < 10; i++) {
Expand Down