Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: track btc fees on success #4334

Merged
merged 3 commits into from
Dec 12, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,9 @@ where
.btc_deposits(witness_call.clone())
.egress_items(scope, state_chain_stream, state_chain_client)
.await
.then(move |epoch, header| process_egress(epoch, header, witness_call.clone()))
.then(move |epoch, header| {
process_egress(epoch, header, witness_call.clone(), btc_client.clone())
})
.logging("witnessing")
.spawn(scope);

Expand Down
19 changes: 18 additions & 1 deletion engine/src/btc/retry_rpc.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use bitcoin::{Block, BlockHash, Txid};
use bitcoin::{Block, BlockHash, Transaction, Txid};
use utilities::task_scope::Scope;

use crate::{
Expand Down Expand Up @@ -62,6 +62,8 @@ pub trait BtcRetryRpcApi: Clone {
async fn average_block_fee_rate(&self, block_hash: BlockHash) -> cf_chains::btc::BtcAmount;

async fn best_block_header(&self) -> BlockHeader;

async fn get_raw_transactions(&self, tx_hashes: Vec<Txid>) -> Vec<Transaction>;
}

#[async_trait::async_trait]
Expand Down Expand Up @@ -151,6 +153,19 @@ impl BtcRetryRpcApi for BtcRetryRpcClient {
)
.await
}

async fn get_raw_transactions(&self, tx_hashes: Vec<Txid>) -> Vec<Transaction> {
self.retry_client
.request(
Box::pin(move |client| {
let tx_hashes = tx_hashes.clone();
#[allow(clippy::redundant_async_block)]
Box::pin(async move { client.get_raw_transactions(tx_hashes).await })
}),
RequestLog::new("get_raw_transactions".to_string(), None),
)
.await
}
}

#[async_trait::async_trait]
Expand Down Expand Up @@ -212,6 +227,8 @@ pub mod mocks {
async fn average_block_fee_rate(&self, block_hash: BlockHash) -> cf_chains::btc::BtcAmount;

async fn best_block_header(&self) -> BlockHeader;

async fn get_raw_transactions(&self, tx_hashes: Vec<Txid>) -> Vec<Transaction>;
}
}
}
52 changes: 43 additions & 9 deletions engine/src/witness/btc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,39 @@ use super::common::{

use anyhow::Result;

async fn calc_bitcoin_transaction_fee(tx: &Transaction, client: &BtcRetryRpcClient) -> u64 {
let mut prev_outs: Vec<_> = tx
.input
.iter()
.map(|input| (input.previous_output.txid, input.previous_output.vout))
.collect();

prev_outs.sort_by_key(|(txid, _)| *txid);
let (input_txids, input_vouts): (Vec<_>, Vec<_>) = prev_outs.into_iter().unzip();

let input_txids_len = input_txids.len();
let mut input_txs: Vec<Transaction> = client.get_raw_transactions(input_txids).await;
assert_eq!(input_txs.len(), input_txids_len);

// protect against RPC re-ordering of the batched request
input_txs.sort_by_key(|tx| tx.txid());

let total_input_value: u64 = input_txs
.into_iter()
.zip(input_vouts)
.map(|(tx, vout)| tx.output[vout as usize].value)
.sum();

total_input_value
.checked_sub(tx.output.iter().map(|output| output.value).sum::<u64>())
.expect("It's not possible to pay more than you have.")
}

pub async fn process_egress<ProcessCall, ProcessingFut, ExtraInfo, ExtraHistoricInfo>(
epoch: Vault<cf_chains::Bitcoin, ExtraInfo, ExtraHistoricInfo>,
header: Header<u64, BlockHash, (Vec<Transaction>, Vec<(btc::Hash, BlockNumber)>)>,
process_call: ProcessCall,
rpc: BtcRetryRpcClient,
) where
ProcessCall: Fn(state_chain_runtime::RuntimeCall, EpochIndex) -> ProcessingFut
+ Send
Expand All @@ -43,7 +72,8 @@ pub async fn process_egress<ProcessCall, ProcessingFut, ExtraInfo, ExtraHistoric

let monitored_tx_hashes = monitored_tx_hashes.iter().map(|(tx_hash, _)| tx_hash);

for tx_hash in success_witnesses(monitored_tx_hashes, &txs) {
for (tx_hash, tx) in success_witnesses(monitored_tx_hashes, txs) {
let tx_fee = calc_bitcoin_transaction_fee(&tx, &rpc).await;
process_call(
state_chain_runtime::RuntimeCall::BitcoinBroadcaster(
pallet_cf_broadcast::Call::transaction_succeeded {
Expand All @@ -53,7 +83,7 @@ pub async fn process_egress<ProcessCall, ProcessingFut, ExtraInfo, ExtraHistoric
CHANGE_ADDRESS_SALT,
)
.script_pubkey(),
tx_fee: Default::default(),
tx_fee,
tx_metadata: (),
},
),
Expand Down Expand Up @@ -168,7 +198,9 @@ where
.await
.then({
let process_call = process_call.clone();
move |epoch, header| process_egress(epoch, header, process_call.clone())
move |epoch, header| {
process_egress(epoch, header, process_call.clone(), btc_client.clone())
}
})
.continuous("Bitcoin".to_string(), db)
.logging("witnessing")
Expand All @@ -179,15 +211,16 @@ where

fn success_witnesses<'a>(
monitored_tx_hashes: impl Iterator<Item = &'a btc::Hash> + Clone,
txs: &Vec<Transaction>,
) -> Vec<btc::Hash> {
txs: Vec<Transaction>,
) -> Vec<(btc::Hash, Transaction)> {
let mut successful_witnesses = Vec::new();

for tx in txs {
let mut monitored = monitored_tx_hashes.clone();
let tx_hash = tx.txid().as_raw_hash().to_byte_array();

if monitored.any(|&monitored_hash| monitored_hash == tx_hash) {
successful_witnesses.push(tx_hash);
successful_witnesses.push((tx_hash, tx));
}
}
successful_witnesses
Expand Down Expand Up @@ -235,10 +268,11 @@ mod tests {
// we're not monitoring for index 2, and they're out of order.
let mut monitored_hashes = vec![tx_hashes[3], tx_hashes[0], tx_hashes[1]];

let mut success_witnesses = success_witnesses(monitored_hashes.iter(), &txs);
success_witnesses.sort();
let (mut success_witness_hashes, _): (Vec<_>, Vec<_>) =
success_witnesses(monitored_hashes.iter(), txs).into_iter().unzip();
success_witness_hashes.sort();
monitored_hashes.sort();

assert_eq!(success_witnesses, monitored_hashes);
assert_eq!(success_witness_hashes, monitored_hashes);
}
}