Skip to content

Commit

Permalink
feat: complete migration to lazy blocks
Browse files Browse the repository at this point in the history
  • Loading branch information
lgalabru committed May 11, 2023
1 parent e8ee3ab commit fa50584
Show file tree
Hide file tree
Showing 3 changed files with 151 additions and 303 deletions.
109 changes: 5 additions & 104 deletions components/chainhook-cli/src/cli/mod.rs
Expand Up @@ -13,10 +13,9 @@ use chainhook_event_observer::chainhooks::types::{
StacksPrintEventBasedPredicate,
};
use chainhook_event_observer::hord::db::{
delete_blocks_in_block_range_sqlite, delete_data_in_hord_db, fetch_and_cache_blocks_in_hord_db,
find_block_at_block_height, find_block_at_block_height_sqlite, find_last_block_inserted,
find_watched_satpoint_for_inscription, initialize_hord_db, insert_entry_in_blocks,
insert_entry_in_blocks_lazy_block, open_readonly_hord_db_conn,
delete_data_in_hord_db, fetch_and_cache_blocks_in_hord_db,
find_block_at_block_height, find_last_block_inserted, find_watched_satpoint_for_inscription,
initialize_hord_db, insert_entry_in_blocks, open_readonly_hord_db_conn,
open_readonly_hord_db_conn_rocks_db, open_readwrite_hord_db_conn,
open_readwrite_hord_db_conn_rocks_db, retrieve_satoshi_point_using_lazy_storage, LazyBlock,
};
Expand Down Expand Up @@ -214,9 +213,6 @@ enum DbCommand {
/// Check integrity
#[clap(name = "check", bin_name = "check")]
Check(CheckHordDbCommand),
/// Legacy command
#[clap(name = "init", bin_name = "init")]
Init(InitHordDbCommand),
/// Patch DB
#[clap(name = "patch", bin_name = "patch")]
Patch(PatchHordDbCommand),
Expand Down Expand Up @@ -741,84 +737,6 @@ async fn handle_command(opts: Opts, ctx: Context) -> Result<(), String> {
}
},
Command::Hord(HordCommand::Db(subcmd)) => match subcmd {
DbCommand::Init(cmd) => {
let config = Config::default(false, false, false, &cmd.config_path)?;

let sqlite_db_conn_rw =
open_readwrite_hord_db_conn(&config.expected_cache_path(), &ctx)?;

// Migrate if required
if find_block_at_block_height_sqlite(1, &sqlite_db_conn_rw).is_some() {
let blocks_db =
open_readwrite_hord_db_conn_rocks_db(&config.expected_cache_path(), &ctx)?;

for i in 0..=300000 {
match find_block_at_block_height_sqlite(i, &sqlite_db_conn_rw) {
Some(block) => {
insert_entry_in_blocks(i, &block, &blocks_db, &ctx);
info!(ctx.expect_logger(), "Block #{} inserted", i);
}
None => {
error!(ctx.expect_logger(), "Block #{} missing", i);
}
}
}
let _ = blocks_db.flush();
delete_blocks_in_block_range_sqlite(0, 300000, &sqlite_db_conn_rw, &ctx);

for i in 300001..=500000 {
match find_block_at_block_height_sqlite(i, &sqlite_db_conn_rw) {
Some(block) => {
insert_entry_in_blocks(i, &block, &blocks_db, &ctx);
info!(ctx.expect_logger(), "Block #{} inserted", i);
}
None => {
info!(ctx.expect_logger(), "Block #{} missing", i);
}
}
}
let _ = blocks_db.flush();
delete_blocks_in_block_range_sqlite(300001, 500000, &sqlite_db_conn_rw, &ctx);

for i in 500001..=783986 {
match find_block_at_block_height_sqlite(i, &sqlite_db_conn_rw) {
Some(block) => {
insert_entry_in_blocks(i, &block, &blocks_db, &ctx);
info!(ctx.expect_logger(), "Block #{} inserted", i);
}
None => {
info!(ctx.expect_logger(), "Block #{} missing", i);
}
}
}
let _ = blocks_db.flush();
delete_blocks_in_block_range_sqlite(500001, 783986, &sqlite_db_conn_rw, &ctx);
}

// Sync
for _ in 0..5 {
if let Some((start_block, end_block)) = should_sync_hord_db(&config, &ctx)? {
if start_block == 0 {
info!(
ctx.expect_logger(),
"Initializing hord indexing from block #{}", start_block
);
} else {
info!(
ctx.expect_logger(),
"Resuming hord indexing from block #{}", start_block
);
}
perform_hord_db_update(start_block, end_block, 10, &config, &ctx).await?;
} else {
info!(ctx.expect_logger(), "Database hord up to date");
}
}

// Start node
let mut service = Service::new(config, ctx);
return service.run(vec![]).await;
}
DbCommand::Sync(cmd) => {
let config = Config::default(false, false, false, &cmd.config_path)?;
if let Some((start_block, end_block)) = should_sync_hord_db(&config, &ctx)? {
Expand Down Expand Up @@ -910,24 +828,7 @@ async fn handle_command(opts: Opts, ctx: Context) -> Result<(), String> {
);
}
DbCommand::Patch(cmd) => {
let config = Config::default(false, false, false, &cmd.config_path)?;
let sqlite_db_conn =
open_readonly_hord_db_conn(&config.expected_cache_path(), &ctx)?;

let blocks_db =
open_readwrite_hord_db_conn_rocks_db(&config.expected_cache_path(), &ctx)?;

for i in 0..774940 {
match find_block_at_block_height_sqlite(i, &sqlite_db_conn) {
Some(block) => {
insert_entry_in_blocks(i, &block, &blocks_db, &ctx);
println!("Block #{} inserted", i);
}
None => {
println!("Block #{} missing", i)
}
}
}
unimplemented!()
}
DbCommand::Migrate(cmd) => {
let config = Config::default(false, false, false, &cmd.config_path)?;
Expand All @@ -945,7 +846,7 @@ async fn handle_command(opts: Opts, ctx: Context) -> Result<(), String> {
.serialize_to_lazy_format(&mut bytes)
.expect("unable to convert to lazy block");
let lazy_block = LazyBlock::new(bytes);
insert_entry_in_blocks_lazy_block(i, &lazy_block, &blocks_db_rw, &ctx);
insert_entry_in_blocks(i, &lazy_block, &blocks_db_rw, &ctx);
println!("Block #{} migrated to lazy block", i);
}
None => {
Expand Down

0 comments on commit fa50584

Please sign in to comment.