From ef164495e7e49bf3eac2c1ae7c4fdbd89e25a21f Mon Sep 17 00:00:00 2001 From: Valentin Bersier Date: Thu, 9 Feb 2023 13:09:30 +0100 Subject: [PATCH] refactor: new struct for Archive This new struct allows to keep a reference to the tempdir so that it doesn't get dropped prematurely (and thus deleted). Changed the visibility of all pub members to pub(crate) --- src/aws.rs | 9 ++++----- src/backup.rs | 25 ++++++++++++++++++------- src/config.rs | 18 +++++++++--------- src/prelude.rs | 2 +- 4 files changed, 32 insertions(+), 22 deletions(-) diff --git a/src/aws.rs b/src/aws.rs index a187947..c73b340 100644 --- a/src/aws.rs +++ b/src/aws.rs @@ -13,9 +13,8 @@ use aws_sdk_s3::{ }; use aws_smithy_http::byte_stream::Length; use log::*; -use temp_dir::TempDir; -use crate::config::Params; +use crate::{backup::Archive, config::Params}; /// In bytes, minimum chunk size of 5MB. Increase CHUNK_SIZE to send larger chunks. const CHUNK_SIZE: u64 = 1024 * 1024 * 5; @@ -25,7 +24,7 @@ const MAX_CHUNKS: u64 = 10000; /// /// The `_temp_dir` is not used but needs to be kept around until the upload is complete. It going out of scope will /// delete the temp folder. -pub async fn upload_file(archive_path: PathBuf, _temp_dir: TempDir, params: &Params) -> Result<()> { +pub(crate) async fn upload_file(archive: Archive, params: &Params) -> Result<()> { // we want to use `from_env` below, so make sure that environment variables are set properly, even if data comes // from the command line args env::set_var("AWS_ACCESS_KEY_ID", ¶ms.aws_key_id); @@ -63,7 +62,7 @@ pub async fn upload_file(archive_path: PathBuf, _temp_dir: TempDir, params: &Par let upload_id = multipart_upload_res .upload_id() .ok_or_else(|| anyhow!("upload_id not found"))?; // convert option to error if None - let file_size = get_file_size(&archive_path)?; + let file_size = get_file_size(&archive.path)?; let mut chunk_count = (file_size / CHUNK_SIZE) + 1; let mut size_of_last_chunk = file_size % CHUNK_SIZE; // if the file size is exactly a multiple of CHUNK_SIZE, we don't need the extra chunk @@ -90,7 +89,7 @@ pub async fn upload_file(archive_path: PathBuf, _temp_dir: TempDir, params: &Par }; // take the relevant part of the file corresponding to this chunk let stream = ByteStream::read_from() - .path(&archive_path) + .path(&archive.path) .offset(chunk_index * CHUNK_SIZE) .length(Length::Exact(this_chunk)) .build() diff --git a/src/backup.rs b/src/backup.rs index a25a64f..16eac22 100644 --- a/src/backup.rs +++ b/src/backup.rs @@ -9,18 +9,26 @@ use uuid::Uuid; use crate::{aws::upload_file, config::Params}; +/// A compressed archive of a folder. +/// +/// The `_temp_dir` is not used but needs to be kept around until the upload is complete. It going out of scope will +/// delete the temp folder. +pub(crate) struct Archive { + pub(crate) path: PathBuf, + _temp_dir: TempDir, +} + /// Perform a backup of the folder, uploading it to Dropbox once complete. -pub async fn backup(params: &Params) -> Result<()> { - let (archive_path, temp_dir) = - compress_folder(¶ms.folder).with_context(|| anyhow!("compression failed"))?; - upload_file(archive_path, temp_dir, params) +pub(crate) async fn backup(params: &Params) -> Result<()> { + let archive = compress_folder(¶ms.folder).with_context(|| anyhow!("compression failed"))?; + upload_file(archive, params) .await .with_context(|| anyhow!("upload failed"))?; Ok(()) } /// Compress the folder into a randomly named tar.gz archive in a temp directory -fn compress_folder(folder: &Path) -> Result<(PathBuf, TempDir)> { +fn compress_folder(folder: &Path) -> Result { // create a temp directory, it will be deleted when the ref goes out of scope let dir = TempDir::new()?; // generate a random filename @@ -37,6 +45,9 @@ fn compress_folder(folder: &Path) -> Result<(PathBuf, TempDir)> { let res = tar.into_inner()?; // make sure the gz layer data is written res.finish()?; - // we return the temp dir reference to keep it around until the file is uploaded - Ok((file_path, dir)) + // we keep temp dir reference to avoid premature deletion + Ok(Archive { + path: file_path, + _temp_dir: dir, + }) } diff --git a/src/config.rs b/src/config.rs index 09689c3..0227566 100644 --- a/src/config.rs +++ b/src/config.rs @@ -64,25 +64,25 @@ struct Cli { } /// Runtime parameters, parsed, validated and ready to be used -pub struct Params { +pub(crate) struct Params { /// Which folder to backup - pub folder: PathBuf, + pub(crate) folder: PathBuf, /// An optional interval duration in seconds - pub interval: Option, + pub(crate) interval: Option, /// The optional name of the archive that will be uploaded to S3 (without extension) - pub filename: Option, + pub(crate) filename: Option, /// The AWS S3 region, defaults to us-east-1 - pub aws_region: RegionProviderChain, + pub(crate) aws_region: RegionProviderChain, /// The AWS S3 bucket name - pub aws_bucket: String, + pub(crate) aws_bucket: String, /// The AWS S3 access key ID - pub aws_key_id: String, + pub(crate) aws_key_id: String, /// The AWS S3 access key - pub aws_key: String, + pub(crate) aws_key: String, } /// Parse the command-line arguments and environment variables into runtime params -pub async fn parse_config() -> Result { +pub(crate) async fn parse_config() -> Result { // Read from the command-line args, and if not present, check environment variables let params = Cli::parse(); diff --git a/src/prelude.rs b/src/prelude.rs index cf73bb5..30476e0 100644 --- a/src/prelude.rs +++ b/src/prelude.rs @@ -3,7 +3,7 @@ use std::fmt; /// An alias for unwrap when the code has been audited to ensure that the value is not None/Err or when panic /// is required. -pub trait OrPanic { +pub(crate) trait OrPanic { /// An alias for unwrap when the code has been audited to ensure that the value is not None/Err or when panic /// is required. fn or_panic(self) -> T;