Skip to content

refactor errors #651

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Mar 25, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "avml"
version = "0.14.0"
version = "0.15.0"
license = "MIT"
description = "A portable volatile memory acquisition tool"
authors = ["avml@microsoft.com"]
46 changes: 34 additions & 12 deletions src/bin/avml-convert.rs
Original file line number Diff line number Diff line change
@@ -17,11 +17,15 @@ use std::{
};

fn convert(src: &Path, dst: &Path, compress: bool) -> Result<()> {
let src_len = metadata(src).map_err(image::Error::Read)?.len();
let src_len = metadata(src)
.map_err(|e| image::Error::Io(e, "unable to read source size"))?
.len();
let mut image = image::Image::new(1, src, dst)?;

loop {
let current = image.src.stream_position().map_err(image::Error::Read)?;
let current = image.src.stream_position().map_err(|e| {
image::Error::Io(e, "unable to get current offset into the memory source")
})?;
if current >= src_len {
break;
}
@@ -40,7 +44,7 @@ fn convert(src: &Path, dst: &Path, compress: bool) -> Result<()> {
image
.src
.seek(SeekFrom::Current(8))
.map_err(image::Error::Read)?;
.map_err(|e| image::Error::Io(e, "unable to seek passed compressed len"))?;
}
_ => unimplemented!(),
}
@@ -50,32 +54,47 @@ fn convert(src: &Path, dst: &Path, compress: bool) -> Result<()> {
}

fn convert_to_raw(src: &Path, dst: &Path) -> Result<()> {
let src_len = metadata(src).map_err(image::Error::Read)?.len();
let src_len = metadata(src)
.map_err(|e| image::Error::Io(e, "unable to get source file size"))?
.len();
let mut image = image::Image::new(1, src, dst)?;

loop {
let current = image.src.stream_position().map_err(image::Error::Read)?;
let current = image.src.stream_position().map_err(|e| {
image::Error::Io(e, "unable to get the current offset into the memory source")
})?;
if current >= src_len {
break;
}
let current_dst = image.dst.stream_position().map_err(image::Error::Read)?;
let current_dst = image.dst.stream_position().map_err(|e| {
image::Error::Io(
e,
"unable to get the current offset into the destination stream",
)
})?;

let header = image::Header::read(&image.src)?;
let mut zeros = vec![0; ONE_MB];

let mut unmapped = usize::try_from(header.range.start - current_dst)
.map_err(|_| image::Error::SizeConversion)?;
.map_err(image::Error::IntConversion)?;
while unmapped > ONE_MB {
image.dst.write_all(&zeros).map_err(image::Error::Write)?;
image
.dst
.write_all(&zeros)
.map_err(|e| image::Error::Io(e, "unable to write padding bytes"))?;
unmapped -= ONE_MB;
}
if unmapped > 0 {
zeros.resize(unmapped, 0);
image.dst.write_all(&zeros).map_err(image::Error::Write)?;
image
.dst
.write_all(&zeros)
.map_err(|e| image::Error::Io(e, "unable to write padding bytes"))?;
}

let size = usize::try_from(header.range.end - header.range.start)
.map_err(|_| image::Error::SizeConversion)?;
.map_err(image::Error::IntConversion)?;

match header.version {
1 => {
@@ -87,7 +106,7 @@ fn convert_to_raw(src: &Path, dst: &Path) -> Result<()> {
image
.src
.seek(SeekFrom::Current(8))
.map_err(image::Error::Read)?;
.map_err(|e| image::Error::Io(e, "unable to seek past the compressed size"))?;
}
_ => unimplemented!(),
}
@@ -97,7 +116,10 @@ fn convert_to_raw(src: &Path, dst: &Path) -> Result<()> {
}

fn convert_from_raw(src: &Path, dst: &Path, compress: bool) -> Result<()> {
let src_len = metadata(src).map_err(image::Error::Read)?.len();
let src_len = metadata(src)
.map_err(|e| image::Error::Io(e, "unable to read source size"))?
.len();

let ranges = split_ranges(vec![0..src_len; 1], image::MAX_BLOCK_SIZE);

let version = if compress { 2 } else { 1 };
4 changes: 3 additions & 1 deletion src/bin/avml-upload.rs
Original file line number Diff line number Diff line change
@@ -67,6 +67,8 @@ async fn run(cmd: Cmd) -> Result<()> {

fn main() -> Result<()> {
let cmd = Cmd::parse();
Runtime::new().map_err(Error::Tokio)?.block_on(run(cmd))?;
Runtime::new()
.map_err(|e| Error::Io(e, "tokio runtime error"))?
.block_on(run(cmd))?;
Ok(())
}
7 changes: 4 additions & 3 deletions src/bin/avml.rs
Original file line number Diff line number Diff line change
@@ -108,7 +108,7 @@ async fn upload(config: &Config) -> Result<()> {
if delete && config.delete {
remove_file(&config.filename)
.await
.map_err(Error::RemoveSnapshot)?;
.map_err(|e| Error::Io(e, "unable to remove snapshot"))?;
}

Ok(())
@@ -129,8 +129,9 @@ fn main() -> Result<()> {

#[cfg(any(feature = "blobstore", feature = "put"))]
{
let rt = Runtime::new().map_err(Error::Tokio)?;
rt.block_on(upload(&config))?;
Runtime::new()
.map_err(|e| Error::Io(e, "tokio runtime error"))?
.block_on(upload(&config))?;
}

Ok(())
10 changes: 2 additions & 8 deletions src/errors.rs
Original file line number Diff line number Diff line change
@@ -2,7 +2,6 @@ use core::{
error::Error as StdError,
fmt::{Debug as FmtDebug, Formatter, Result as FmtResult},
};
#[cfg(any(feature = "blobstore", feature = "put"))]
use std::io::Error as IoError;

#[derive(thiserror::Error)]
@@ -24,13 +23,8 @@ pub enum Error {
#[error("unable to upload file to Azure Storage")]
Blob(#[from] crate::upload::blobstore::Error),

#[cfg(any(feature = "blobstore", feature = "put"))]
#[error("tokio runtime error: {0}")]
Tokio(#[source] IoError),

#[cfg(any(feature = "blobstore", feature = "put"))]
#[error("unable to remove snapshot")]
RemoveSnapshot(#[source] IoError),
#[error("io error: {0}")]
Io(#[source] IoError, &'static str),

#[error("no conversion required")]
NoConversionRequired,
86 changes: 42 additions & 44 deletions src/image.rs
Original file line number Diff line number Diff line change
@@ -14,14 +14,8 @@ use std::{

#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("unable to write image")]
Write(#[source] std::io::Error),

#[error("unable to read memory")]
Read(#[source] std::io::Error),

#[error("unable to read header: {1}")]
ReadHeader(#[source] std::io::Error, &'static str),
#[error("io error: {1}")]
Io(#[source] std::io::Error, &'static str),

#[error("invalid padding")]
InvalidPadding,
@@ -38,8 +32,8 @@ pub enum Error {
#[error("write block failed: {0:?}")]
WriteBlock(Range<u64>),

#[error("size conversion error")]
SizeConversion,
#[error(transparent)]
IntConversion(#[from] core::num::TryFromIntError),
}

type Result<T> = core::result::Result<T, Error>;
@@ -72,21 +66,21 @@ impl Header {
pub fn read(mut src: &File) -> Result<Self> {
let magic = src
.read_u32::<LittleEndian>()
.map_err(|e| Error::ReadHeader(e, "magic"))?;
.map_err(|e| Error::Io(e, "unable to read header magic"))?;
let version = src
.read_u32::<LittleEndian>()
.map_err(|e| Error::ReadHeader(e, "version"))?;
.map_err(|e| Error::Io(e, "unable to read header version"))?;
let start = src
.read_u64::<LittleEndian>()
.map_err(|e| Error::ReadHeader(e, "start offset"))?;
.map_err(|e| Error::Io(e, "unable to read header start offset"))?;
let end = src
.read_u64::<LittleEndian>()
.map_err(|e| Error::ReadHeader(e, "end offset"))?
.map_err(|e| Error::Io(e, "unable to read header end offset"))?
.checked_add(1)
.ok_or(Error::TooLarge)?;
let padding = src
.read_u64::<LittleEndian>()
.map_err(|e| Error::ReadHeader(e, "padding"))?;
.map_err(|e| Error::Io(e, "unable to read header padding"))?;
if padding != 0 {
return Err(Error::InvalidPadding);
}
@@ -126,7 +120,8 @@ impl Header {
W: Write,
{
let bytes = self.encode()?;
dst.write_all(&bytes).map_err(Error::Write)?;
dst.write_all(&bytes)
.map_err(|e| Error::Io(e, "unable to write header"))?;
Ok(())
}
}
@@ -144,14 +139,18 @@ where
{
let mut buf = vec![0; PAGE_SIZE];
while size >= PAGE_SIZE {
src.read_exact(&mut buf).map_err(Error::Read)?;
dst.write_all(&buf).map_err(Error::Write)?;
src.read_exact(&mut buf)
.map_err(|e| Error::Io(e, "unable to read memory page"))?;
dst.write_all(&buf)
.map_err(|e| Error::Io(e, "unable to write memory page"))?;
size = size.saturating_sub(PAGE_SIZE);
}
if size > 0 {
buf.resize(size, 0);
src.read_exact(&mut buf).map_err(Error::Read)?;
dst.write_all(&buf).map_err(Error::Write)?;
src.read_exact(&mut buf)
.map_err(|e| Error::Io(e, "unable to read memory page"))?;
dst.write_all(&buf)
.map_err(|e| Error::Io(e, "unable to write memory page"))?;
}
Ok(())
}
@@ -162,14 +161,7 @@ where
R: Read,
W: Write,
{
let size = usize::try_from(
header
.range
.end
.checked_sub(header.range.start)
.ok_or(Error::SizeConversion)?,
)
.map_err(|_| Error::SizeConversion)?;
let size = usize::try_from(header.range.end.saturating_sub(header.range.start))?;

// read the entire block into memory, but still read page by page
let mut buf = Cursor::new(vec![0; size]);
@@ -183,21 +175,26 @@ where

header.write(dst)?;
if header.version == 1 {
dst.write_all(&buf).map_err(Error::Write)?;
dst.write_all(&buf)
.map_err(|e| Error::Io(e, "unable to write non-zero block"))?;
} else {
let count = {
let mut encoder = SnapWriter::new(dst);
encoder.write_all(&buf).map_err(Error::Write)?;
let (count, dst_after) = encoder.into_inner().map_err(Error::Write)?;
encoder
.write_all(&buf)
.map_err(|e| Error::Io(e, "unable to write compressed block"))?;
let (count, dst_after) = encoder
.into_inner()
.map_err(|e| Error::Io(e, "unable to flush compressed data"))?;
dst = dst_after;
count
count.try_into()?
};
let count = count.try_into().map_err(|_| Error::SizeConversion)?;

let mut size_bytes = [0; 8];
LittleEndian::write_u64_into(&[count], &mut size_bytes);

dst.write_all(&size_bytes).map_err(Error::Write)?;
dst.write_all(&size_bytes)
.map_err(|e| Error::Io(e, "unable to write compressed size"))?;
}
Ok(())
}
@@ -208,24 +205,25 @@ where
W: Write,
{
header.write(dst)?;
let size = usize::try_from(header.range.end.saturating_sub(header.range.start))
.map_err(|_| Error::SizeConversion)?;
let size = usize::try_from(header.range.end.saturating_sub(header.range.start))?;

if header.version == 1 {
copy(size, src, dst)?;
} else {
let count = {
let mut encoder = SnapWriter::new(dst);
copy(size, src, &mut encoder)?;
let (count, dst_after) = encoder.into_inner().map_err(Error::Write)?;
let (count, dst_after) = encoder
.into_inner()
.map_err(|e| Error::Io(e, "unable to copy decompressed data"))?;
dst = dst_after;
count
count.try_into()?
};
let count = count.try_into().map_err(|_| Error::SizeConversion)?;

let mut size_bytes = [0; 8];
LittleEndian::write_u64_into(&[count], &mut size_bytes);
dst.write_all(&size_bytes).map_err(Error::Write)?;
dst.write_all(&size_bytes)
.map_err(|e| Error::Io(e, "unable to write compressed size"))?;
}
Ok(())
}
@@ -300,7 +298,7 @@ impl Image {
.create(true)
.truncate(true)
.open(path)
.map_err(Error::Write)
.map_err(|e| Error::Io(e, "unable to create snapshot file"))
}

#[cfg(target_family = "unix")]
@@ -311,7 +309,7 @@ impl Image {
.create(true)
.truncate(true)
.open(path)
.map_err(Error::Write)
.map_err(|e| Error::Io(e, "unable to create snapshot file"))
}

/// Creates a new Image with the specified version, source filename, and destination filename.
@@ -324,7 +322,7 @@ impl Image {
let src = OpenOptions::new()
.read(true)
.open(src_filename)
.map_err(Error::Read)?;
.map_err(|e| Error::Io(e, "unable to open memory source"))?;

let dst = Self::open_dst(dst_filename)?;

@@ -352,7 +350,7 @@ impl Image {
if block.offset > 0 {
self.src
.seek(SeekFrom::Start(block.offset))
.map_err(Error::Read)?;
.map_err(|e| Error::Io(e, "unable to see to page"))?;
}

copy_block(header, &mut self.src, &mut self.dst)?;
Loading
Oops, something went wrong.