Skip to content

Commit

Permalink
Merge pull request #624 from EspressoSystems/abdul/remove-vacuum
Browse files Browse the repository at this point in the history
Remove manual Vacuum from pruning
  • Loading branch information
imabdulbasit committed Jun 5, 2024
2 parents 6cb52cf + bafaf30 commit 49dfd6d
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 10 deletions.
6 changes: 3 additions & 3 deletions src/data_source/fetching.rs
Original file line number Diff line number Diff line change
Expand Up @@ -361,9 +361,7 @@ where
let task = {
BackgroundTask::spawn("pruner", async move {
for i in 1.. {
sleep(cfg.interval()).await;
tracing::warn!("pruner woke up for the {i}th time");

tracing::warn!("starting pruner run {i} ");
{
let mut storage = fetcher.storage.write().await;

Expand All @@ -381,6 +379,8 @@ where
}
}
}

sleep(cfg.interval()).await;
}
})
};
Expand Down
29 changes: 22 additions & 7 deletions src/data_source/storage/sql.rs
Original file line number Diff line number Diff line change
Expand Up @@ -574,6 +574,11 @@ impl PruneStorage for SqlStorage {
Ok(size as u64)
}

/// Note: The prune operation may not immediately free up space even after rows are deleted.
/// This is because a vacuum operation may be necessary to reclaim more space.
/// PostgreSQL already performs auto vacuuming, so we are not including it here
/// as running a vacuum operation can be resource-intensive.

async fn prune(&mut self) -> Result<Option<u64>, QueryError> {
let cfg = self.get_pruning_config().ok_or(QueryError::Error {
message: "Pruning config not found".to_string(),
Expand Down Expand Up @@ -642,13 +647,6 @@ impl PruneStorage for SqlStorage {
}
}
}
// Vacuum the database to reclaim space.
// Note: VACUUM FULL is not used as it requires an exclusive lock on the tables, which can
// cause downtime for the query service.
self.client
.batch_execute("VACUUM")
.await
.map_err(postgres_err)?;

Ok(pruned_height)
}
Expand Down Expand Up @@ -3376,6 +3374,11 @@ mod test {
storage.set_pruning_config(PrunerCfg::new());
// No data will be pruned
let pruned_height = storage.prune().await.unwrap();

// Vacuum the database to reclaim space.
// This is necessary to ensure the test passes.
// Note: We don't perform a vacuum after each pruner run in production because the auto vacuum job handles it automatically.
storage.client.batch_execute("VACUUM").await.unwrap();
// Pruned height should be none
assert!(pruned_height.is_none());

Expand All @@ -3393,6 +3396,10 @@ mod test {
// All of the data is now older than 1s.
// This would prune all the data as the target retention is set to 1s
let pruned_height = storage.prune().await.unwrap();
// Vacuum the database to reclaim space.
// This is necessary to ensure the test passes.
// Note: We don't perform a vacuum after each pruner run in production because the auto vacuum job handles it automatically.
storage.client.batch_execute("VACUUM").await.unwrap();

// Pruned height should be some
assert!(pruned_height.is_some());
Expand Down Expand Up @@ -3455,6 +3462,10 @@ mod test {
// Pruning would not delete any data
// All the data is younger than minimum retention period even though the usage > threshold
let pruned_height = storage.prune().await.unwrap();
// Vacuum the database to reclaim space.
// This is necessary to ensure the test passes.
// Note: We don't perform a vacuum after each pruner run in production because the auto vacuum job handles it automatically.
storage.client.batch_execute("VACUUM").await.unwrap();

// Pruned height should be none
assert!(pruned_height.is_none());
Expand All @@ -3475,6 +3486,10 @@ mod test {
sleep(Duration::from_secs(2)).await;
// This would prune all the data
let pruned_height = storage.prune().await.unwrap();
// Vacuum the database to reclaim space.
// This is necessary to ensure the test passes.
// Note: We don't perform a vacuum after each pruner run in production because the auto vacuum job handles it automatically.
storage.client.batch_execute("VACUUM").await.unwrap();

// Pruned height should be some
assert!(pruned_height.is_some());
Expand Down

0 comments on commit 49dfd6d

Please sign in to comment.