Skip to content

Commit

Permalink
Add a benchmark for transactions retrieval
Browse files Browse the repository at this point in the history
It consist of two rounds with different db size (respectively 1 000 000
and 10 000 000 transactions) to check that the db size doesn't matter.
Each round will retrieve 100, 10 000, 100 000, and 1 000 000
transactions.
  • Loading branch information
Alenar committed May 7, 2024
1 parent 2d28139 commit 5d2b4cb
Show file tree
Hide file tree
Showing 2 changed files with 99 additions and 0 deletions.
4 changes: 4 additions & 0 deletions mithril-aggregator/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@ repository = { workspace = true }
name = "cardano_transactions_import"
harness = false

[[bench]]
name = "cardano_transactions_get"
harness = false

[dependencies]
anyhow = "1.0.79"
async-trait = "0.1.77"
Expand Down
95 changes: 95 additions & 0 deletions mithril-aggregator/benches/cardano_transactions_get.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
use std::sync::Arc;

use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use sqlite::ConnectionThreadSafe;

use mithril_aggregator::{
database::repository::CardanoTransactionRepository, services::TransactionStore,
};
use mithril_common::{entities::CardanoTransaction, test_utils::TempDir};
use mithril_persistence::sqlite::ConnectionBuilder;

fn cardano_tx_db_connection(db_file_name: &str) -> ConnectionThreadSafe {
let db_path =
TempDir::create("aggregator_benches", "bench_get_transactions").join(db_file_name);

if db_path.exists() {
std::fs::remove_file(db_path.clone()).unwrap();
}

ConnectionBuilder::open_file(&db_path)
.with_migrations(
mithril_aggregator::database::cardano_transaction_migration::get_migrations(),
)
.build()
.unwrap()
}

fn generate_transactions(nb_transactions: usize) -> Vec<CardanoTransaction> {
// Note: we irrealistically generate transactions where each are on a different block.
// This is to trick the repository `get_transactions_in_range` method to read the expected number
// of transactions.
(0..nb_transactions)
.map(|i| {
CardanoTransaction::new(
format!("tx_hash-{}", i),
i as u64,
i as u64 * 100,
format!("block_hash-{}", i),
i as u64 + 1,
)
})
.collect()
}

async fn init_db(nb_transaction_in_db: usize) -> CardanoTransactionRepository {
println!("Generating a db with {nb_transaction_in_db} transactions, one per block ...");
let transactions = generate_transactions(nb_transaction_in_db);
let connection = Arc::new(cardano_tx_db_connection(&format!(
"cardano_tx-{nb_transaction_in_db}.db",
)));
let repository = CardanoTransactionRepository::new(connection);
repository.store_transactions(transactions).await.unwrap();

repository
}

fn run_bench(c: &mut Criterion, nb_transaction_in_db: usize) {
let runtime = tokio::runtime::Runtime::new().unwrap();
let repository = runtime.block_on(async { init_db(nb_transaction_in_db).await });

let mut group = c.benchmark_group(format!(
"Get transactions - {nb_transaction_in_db} tx in db"
));
for max_block_number in [100, 10_000, 100_000, 1_000_000] {
group.bench_with_input(
BenchmarkId::from_parameter(format!(
"get_transactions_in_range(0..{max_block_number})"
)),
&max_block_number,
|b, &max_block_number| {
b.to_async(&runtime).iter(|| async {
let _transactions = repository
.get_transactions_in_range(0..max_block_number)
.await
.unwrap();
});
},
);
}
group.finish();
}

fn bench_get_transactions(c: &mut Criterion) {
// Two rounds of benchmarks: one with 1M transactions in the db, and one with 10M transactions.
// Each time the number of transactions to read is 100, 10_000, 100_000, 1_000_000.
run_bench(c, 1_000_000);
run_bench(c, 10_000_000);
}

criterion_group! {
name = benches;
config = Criterion::default().sample_size(20);
targets = bench_get_transactions
}
criterion_main!(benches);

0 comments on commit 5d2b4cb

Please sign in to comment.