diff --git a/CHANGELOG.md b/CHANGELOG.md index 11e25788..072e5f16 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,12 @@ # Changelog +## \[v0.17.2\] + +This release re-packages the code of vm-memory 0.18.0 while preserving +API compatibility with 0.17.1. All the actual implementation comes from +version 0.18.0, which this crate re-exports with `GuestMemoryBackend` +changed back to `GuestMemory`. + ## \[v0.17.1\] No visible changes. diff --git a/Cargo.toml b/Cargo.toml index 1ce968de..001d0713 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "vm-memory" -version = "0.17.1" +version = "0.17.2" description = "Safe abstractions for accessing the VM physical memory" keywords = ["memory"] categories = ["memory-management"] @@ -13,23 +13,16 @@ autobenches = false [features] default = ["rawfd"] -backend-bitmap = ["dep:libc", "dep:winapi"] -backend-mmap = ["dep:libc", "dep:winapi"] -backend-atomic = ["arc-swap"] -rawfd = ["dep:libc"] -xen = ["backend-mmap", "bitflags", "vmm-sys-util"] +backend-bitmap = ["vm-memory-new/backend-bitmap"] +backend-mmap = ["vm-memory-new/backend-mmap"] +backend-atomic = ["vm-memory-new/backend-atomic"] +rawfd = ["vm-memory-new/rawfd"] +xen = ["vm-memory-new/xen"] -[dependencies] -libc = { version = "0.2.39", optional = true } -arc-swap = { version = "1.0.0", optional = true } -bitflags = { version = "2.4.0", optional = true } -thiserror = "2.0.16" -vmm-sys-util = { version = ">=0.12.1, <=0.15.0", optional = true } - -[target.'cfg(target_family = "windows")'.dependencies.winapi] -version = "0.3" -features = ["errhandlingapi", "sysinfoapi"] -optional = true +[dependencies.vm-memory-new] +package = "vm-memory" +version = "0.18.0" +default-features = false [dev-dependencies] criterion = "0.7.0" @@ -40,10 +33,6 @@ vmm-sys-util = "0.15.0" name = "main" harness = false -[profile.bench] -lto = true -codegen-units = 1 - [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] diff --git a/src/address.rs b/src/address.rs index 639e226b..b0c1fafd 100644 --- a/src/address.rs +++ b/src/address.rs @@ -18,389 +18,8 @@ //! - [Address](trait.Address.html): encapsulates an [`AddressValue`](trait.AddressValue.html) //! object and defines methods to access and manipulate it. -use std::cmp::{Eq, Ord, PartialEq, PartialOrd}; -use std::fmt::Debug; -use std::ops::{Add, BitAnd, BitOr, Not, Sub}; - /// Simple helper trait used to store a raw address value. -pub trait AddressValue { - /// Type of the raw address value. - type V: Copy - + PartialEq - + Eq - + PartialOrd - + Ord - + Not - + Add - + Sub - + BitAnd - + BitOr - + Debug - + From; - - /// Return the value zero, coerced into the value type `Self::V` - fn zero() -> Self::V { - 0u8.into() - } - - /// Return the value zero, coerced into the value type `Self::V` - fn one() -> Self::V { - 1u8.into() - } -} - -/// Trait to represent an address within an address space. -/// -/// To simplify the design and implementation, assume the same raw data type `(AddressValue::V)` -/// could be used to store address, size and offset for the address space. Thus the `Address` trait -/// could be used to manage address, size and offset. On the other hand, type aliases may be -/// defined to improve code readability. -/// -/// One design rule is applied to the `Address` trait, namely that operators (+, -, &, | etc) are -/// not supported and it forces clients to explicitly invoke corresponding methods. But there are -/// always exceptions: -/// `Address` (BitAnd|BitOr) `AddressValue` are supported. -pub trait Address: - AddressValue - + Sized - + Default - + Copy - + Eq - + PartialEq - + Ord - + PartialOrd - + BitAnd<::V, Output = Self> - + BitOr<::V, Output = Self> -{ - /// Creates an address from a raw address value. - fn new(addr: Self::V) -> Self; - - /// Returns the raw value of the address. - fn raw_value(&self) -> Self::V; - - /// Returns the bitwise and of the address with the given mask. - fn mask(&self, mask: Self::V) -> Self::V { - self.raw_value() & mask - } - - /// Computes the offset from this address to the given base address. - /// - /// Returns `None` if there is underflow. - fn checked_offset_from(&self, base: Self) -> Option; - - /// Computes the offset from this address to the given base address. - /// - /// In the event of overflow, follows standard Rust behavior, i.e. panic in debug builds, - /// silently wrap in release builds. - /// - /// Note that, unlike the `unchecked_*` methods in std, this method never invokes undefined - /// behavior. - /// # Examples - /// - /// ``` - /// # use vm_memory::{Address, GuestAddress}; - /// # - /// let base = GuestAddress(0x100); - /// let addr = GuestAddress(0x150); - /// assert_eq!(addr.unchecked_offset_from(base), 0x50); - /// ``` - fn unchecked_offset_from(&self, base: Self) -> Self::V { - self.raw_value() - base.raw_value() - } - - /// Returns self, aligned to the given power of two. - fn checked_align_up(&self, power_of_two: Self::V) -> Option { - let mask = power_of_two - Self::one(); - assert_ne!(power_of_two, Self::zero()); - assert_eq!(power_of_two & mask, Self::zero()); - self.checked_add(mask).map(|x| x & !mask) - } - - /// Returns self, aligned to the given power of two. - /// Only use this when the result is guaranteed not to overflow. - fn unchecked_align_up(&self, power_of_two: Self::V) -> Self { - let mask = power_of_two - Self::one(); - self.unchecked_add(mask) & !mask - } - - /// Computes `self + other`, returning `None` if overflow occurred. - fn checked_add(&self, other: Self::V) -> Option; - - /// Computes `self + other`. - /// - /// Returns a tuple of the addition result along with a boolean indicating whether an arithmetic - /// overflow would occur. If an overflow would have occurred then the wrapped address - /// is returned. - fn overflowing_add(&self, other: Self::V) -> (Self, bool); - - /// Computes `self + offset`. - /// - /// In the event of overflow, follows standard Rust behavior, i.e. panic in debug builds, - /// silently wrap in release builds. - /// - /// Note that, unlike the `unchecked_*` methods in std, this method never invokes undefined - /// behavior.. - fn unchecked_add(&self, offset: Self::V) -> Self; - - /// Subtracts two addresses, checking for underflow. If underflow happens, `None` is returned. - fn checked_sub(&self, other: Self::V) -> Option; - - /// Computes `self - other`. - /// - /// Returns a tuple of the subtraction result along with a boolean indicating whether an - /// arithmetic overflow would occur. If an overflow would have occurred then the wrapped - /// address is returned. - fn overflowing_sub(&self, other: Self::V) -> (Self, bool); - - /// Computes `self - other`. - /// - /// In the event of underflow, follows standard Rust behavior, i.e. panic in debug builds, - /// silently wrap in release builds. - /// - /// Note that, unlike the `unchecked_*` methods in std, this method never invokes undefined - /// behavior. - fn unchecked_sub(&self, other: Self::V) -> Self; -} - -macro_rules! impl_address_ops { - ($T:ident, $V:ty) => { - impl AddressValue for $T { - type V = $V; - } - - impl Address for $T { - fn new(value: $V) -> $T { - $T(value) - } - - fn raw_value(&self) -> $V { - self.0 - } - - fn checked_offset_from(&self, base: $T) -> Option<$V> { - self.0.checked_sub(base.0) - } - - fn checked_add(&self, other: $V) -> Option<$T> { - self.0.checked_add(other).map($T) - } - - fn overflowing_add(&self, other: $V) -> ($T, bool) { - let (t, ovf) = self.0.overflowing_add(other); - ($T(t), ovf) - } - - fn unchecked_add(&self, offset: $V) -> $T { - $T(self.0 + offset) - } - - fn checked_sub(&self, other: $V) -> Option<$T> { - self.0.checked_sub(other).map($T) - } - - fn overflowing_sub(&self, other: $V) -> ($T, bool) { - let (t, ovf) = self.0.overflowing_sub(other); - ($T(t), ovf) - } - - fn unchecked_sub(&self, other: $V) -> $T { - $T(self.0 - other) - } - } - - impl Default for $T { - fn default() -> $T { - Self::new(0 as $V) - } - } - - impl BitAnd<$V> for $T { - type Output = $T; - - fn bitand(self, other: $V) -> $T { - $T(self.0 & other) - } - } - - impl BitOr<$V> for $T { - type Output = $T; - - fn bitor(self, other: $V) -> $T { - $T(self.0 | other) - } - } - }; -} - -#[cfg(test)] -mod tests { - use super::*; - - #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)] - struct MockAddress(pub u64); - impl_address_ops!(MockAddress, u64); - - #[test] - fn test_new() { - assert_eq!(MockAddress::new(0), MockAddress(0)); - assert_eq!(MockAddress::new(u64::MAX), MockAddress(u64::MAX)); - } - - #[test] - fn test_offset_from() { - let base = MockAddress(0x100); - let addr = MockAddress(0x150); - assert_eq!(addr.unchecked_offset_from(base), 0x50u64); - assert_eq!(addr.checked_offset_from(base), Some(0x50u64)); - assert_eq!(base.checked_offset_from(addr), None); - } - - #[test] - fn test_equals() { - let a = MockAddress(0x300); - let b = MockAddress(0x300); - let c = MockAddress(0x301); - assert_eq!(a, MockAddress(a.raw_value())); - assert_eq!(a, b); - assert_eq!(b, a); - assert_ne!(a, c); - assert_ne!(c, a); - } - - #[test] - fn test_cmp() { - let a = MockAddress(0x300); - let b = MockAddress(0x301); - assert!(a < b); - } - - #[test] - fn test_checked_align_up() { - assert_eq!( - MockAddress::new(0x128).checked_align_up(8), - Some(MockAddress(0x128)) - ); - assert_eq!( - MockAddress::new(0x128).checked_align_up(16), - Some(MockAddress(0x130)) - ); - assert_eq!( - MockAddress::new(u64::MAX - 0x3fff).checked_align_up(0x10000), - None - ); - } - - #[test] - #[should_panic] - fn test_checked_align_up_invalid() { - let _ = MockAddress::new(0x128).checked_align_up(12); - } - - #[test] - fn test_unchecked_align_up() { - assert_eq!( - MockAddress::new(0x128).unchecked_align_up(8), - MockAddress(0x128) - ); - assert_eq!( - MockAddress::new(0x128).unchecked_align_up(16), - MockAddress(0x130) - ); - } - - #[test] - fn test_mask() { - let a = MockAddress(0x5050); - assert_eq!(MockAddress(0x5000), a & 0xff00u64); - assert_eq!(0x5000, a.mask(0xff00u64)); - assert_eq!(MockAddress(0x5055), a | 0x0005u64); - } - - fn check_add(a: u64, b: u64, expected_overflow: bool, expected_result: u64) { - assert_eq!( - (MockAddress(expected_result), expected_overflow), - MockAddress(a).overflowing_add(b) - ); - if expected_overflow { - assert!(MockAddress(a).checked_add(b).is_none()); - #[cfg(debug_assertions)] - assert!(std::panic::catch_unwind(|| MockAddress(a).unchecked_add(b)).is_err()); - } else { - assert_eq!( - Some(MockAddress(expected_result)), - MockAddress(a).checked_add(b) - ); - assert_eq!( - MockAddress(expected_result), - MockAddress(a).unchecked_add(b) - ); - } - } - - #[test] - fn test_add() { - // without overflow - // normal case - check_add(10, 10, false, 20); - // edge case - check_add(u64::MAX - 1, 1, false, u64::MAX); - - // with overflow - check_add(u64::MAX, 1, true, 0); - } - - fn check_sub(a: u64, b: u64, expected_overflow: bool, expected_result: u64) { - assert_eq!( - (MockAddress(expected_result), expected_overflow), - MockAddress(a).overflowing_sub(b) - ); - if expected_overflow { - assert!(MockAddress(a).checked_sub(b).is_none()); - assert!(MockAddress(a).checked_offset_from(MockAddress(b)).is_none()); - #[cfg(debug_assertions)] - assert!(std::panic::catch_unwind(|| MockAddress(a).unchecked_sub(b)).is_err()); - } else { - assert_eq!( - Some(MockAddress(expected_result)), - MockAddress(a).checked_sub(b) - ); - assert_eq!( - Some(expected_result), - MockAddress(a).checked_offset_from(MockAddress(b)) - ); - assert_eq!( - MockAddress(expected_result), - MockAddress(a).unchecked_sub(b) - ); - } - } - - #[test] - fn test_sub() { - // without overflow - // normal case - check_sub(20, 10, false, 10); - // edge case - check_sub(1, 1, false, 0); - - // with underflow - check_sub(0, 1, true, u64::MAX); - } - - #[test] - fn test_default() { - assert_eq!(MockAddress::default(), MockAddress(0)); - } - - #[test] - fn test_bit_and() { - let a = MockAddress(0x0ff0); - assert_eq!(a & 0xf00f, MockAddress(0)); - } - - #[test] - fn test_bit_or() { - let a = MockAddress(0x0ff0); - assert_eq!(a | 0xf00f, MockAddress(0xffff)); - } -} +pub use vm_memory_new::address::{ + AddressValue, + Address, +}; diff --git a/src/atomic.rs b/src/atomic.rs index 87a2c1e3..fd22100f 100644 --- a/src/atomic.rs +++ b/src/atomic.rs @@ -9,274 +9,8 @@ //! To support mutable memory maps, devices will also need to use //! `GuestAddressSpace::memory()` to gain temporary access to guest memory. -extern crate arc_swap; - -use arc_swap::{ArcSwap, Guard}; -use std::ops::Deref; -use std::sync::{Arc, LockResult, Mutex, MutexGuard, PoisonError}; - -use crate::{GuestAddressSpace, GuestMemory}; - -/// A fast implementation of a mutable collection of memory regions. -/// -/// This implementation uses `ArcSwap` to provide RCU-like snapshotting of the memory map: -/// every update of the memory map creates a completely new `GuestMemory` object, and -/// readers will not be blocked because the copies they retrieved will be collected once -/// no one can access them anymore. Under the assumption that updates to the memory map -/// are rare, this allows a very efficient implementation of the `memory()` method. -#[derive(Debug)] -pub struct GuestMemoryAtomic { - // GuestAddressSpace, which we want to implement, is basically a drop-in - // replacement for &M. Therefore, we need to pass to devices the `GuestMemoryAtomic` - // rather than a reference to it. To obtain this effect we wrap the actual fields - // of GuestMemoryAtomic with an Arc, and derive the Clone trait. See the - // documentation for GuestAddressSpace for an example. - inner: Arc<(ArcSwap, Mutex<()>)>, -} - -impl From> for GuestMemoryAtomic { - /// create a new `GuestMemoryAtomic` object whose initial contents come from - /// the `map` reference counted `GuestMemory`. - fn from(map: Arc) -> Self { - let inner = (ArcSwap::new(map), Mutex::new(())); - GuestMemoryAtomic { - inner: Arc::new(inner), - } - } -} - -impl GuestMemoryAtomic { - /// create a new `GuestMemoryAtomic` object whose initial contents come from - /// the `map` `GuestMemory`. - pub fn new(map: M) -> Self { - Arc::new(map).into() - } - - fn load(&self) -> Guard> { - self.inner.0.load() - } - - /// Acquires the update mutex for the `GuestMemoryAtomic`, blocking the current - /// thread until it is able to do so. The returned RAII guard allows for - /// scoped unlock of the mutex (that is, the mutex will be unlocked when - /// the guard goes out of scope), and optionally also for replacing the - /// contents of the `GuestMemoryAtomic` when the lock is dropped. - pub fn lock(&self) -> LockResult> { - match self.inner.1.lock() { - Ok(guard) => Ok(GuestMemoryExclusiveGuard { - parent: self, - _guard: guard, - }), - Err(err) => Err(PoisonError::new(GuestMemoryExclusiveGuard { - parent: self, - _guard: err.into_inner(), - })), - } - } -} - -impl Clone for GuestMemoryAtomic { - fn clone(&self) -> Self { - Self { - inner: self.inner.clone(), - } - } -} - -impl GuestAddressSpace for GuestMemoryAtomic { - type T = GuestMemoryLoadGuard; - type M = M; - - fn memory(&self) -> Self::T { - GuestMemoryLoadGuard { guard: self.load() } - } -} - -/// A guard that provides temporary access to a `GuestMemoryAtomic`. This -/// object is returned from the `memory()` method. It dereference to -/// a snapshot of the `GuestMemory`, so it can be used transparently to -/// access memory. -#[derive(Debug)] -pub struct GuestMemoryLoadGuard { - guard: Guard>, -} - -impl GuestMemoryLoadGuard { - /// Make a clone of the held pointer and returns it. This is more - /// expensive than just using the snapshot, but it allows to hold on - /// to the snapshot outside the scope of the guard. It also allows - /// writers to proceed, so it is recommended if the reference must - /// be held for a long time (including for caching purposes). - pub fn into_inner(self) -> Arc { - Guard::into_inner(self.guard) - } -} - -impl Clone for GuestMemoryLoadGuard { - fn clone(&self) -> Self { - GuestMemoryLoadGuard { - guard: Guard::from_inner(Arc::clone(&*self.guard)), - } - } -} - -impl Deref for GuestMemoryLoadGuard { - type Target = M; - - fn deref(&self) -> &Self::Target { - &self.guard - } -} - -/// An RAII implementation of a "scoped lock" for `GuestMemoryAtomic`. When -/// this structure is dropped (falls out of scope) the lock will be unlocked, -/// possibly after updating the memory map represented by the -/// `GuestMemoryAtomic` that created the guard. -#[derive(Debug)] -pub struct GuestMemoryExclusiveGuard<'a, M: GuestMemory> { - parent: &'a GuestMemoryAtomic, - _guard: MutexGuard<'a, ()>, -} - -impl GuestMemoryExclusiveGuard<'_, M> { - /// Replace the memory map in the `GuestMemoryAtomic` that created the guard - /// with the new memory map, `map`. The lock is then dropped since this - /// method consumes the guard. - pub fn replace(self, map: M) { - self.parent.inner.0.store(Arc::new(map)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::region::tests::{new_guest_memory_collection_from_regions, Collection, MockRegion}; - use crate::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize}; - - type GuestMemoryMmapAtomic = GuestMemoryAtomic; - - #[test] - fn test_atomic_memory() { - let region_size = 0x400; - let regions = vec![ - (GuestAddress(0x0), region_size), - (GuestAddress(0x1000), region_size), - ]; - let mut iterated_regions = Vec::new(); - let gmm = new_guest_memory_collection_from_regions(®ions).unwrap(); - let gm = GuestMemoryMmapAtomic::new(gmm); - let mem = gm.memory(); - - for region in mem.iter() { - assert_eq!(region.len(), region_size as GuestUsize); - } - - for region in mem.iter() { - iterated_regions.push((region.start_addr(), region.len())); - } - assert_eq!(regions, iterated_regions); - assert_eq!(mem.num_regions(), 2); - assert!(mem.find_region(GuestAddress(0x1000)).is_some()); - assert!(mem.find_region(GuestAddress(0x10000)).is_none()); - - assert!(regions - .iter() - .map(|x| (x.0, x.1)) - .eq(iterated_regions.iter().copied())); - - let mem2 = mem.into_inner(); - for region in mem2.iter() { - assert_eq!(region.len(), region_size as GuestUsize); - } - assert_eq!(mem2.num_regions(), 2); - assert!(mem2.find_region(GuestAddress(0x1000)).is_some()); - assert!(mem2.find_region(GuestAddress(0x10000)).is_none()); - - assert!(regions - .iter() - .map(|x| (x.0, x.1)) - .eq(iterated_regions.iter().copied())); - - let mem3 = mem2.memory(); - for region in mem3.iter() { - assert_eq!(region.len(), region_size as GuestUsize); - } - assert_eq!(mem3.num_regions(), 2); - assert!(mem3.find_region(GuestAddress(0x1000)).is_some()); - assert!(mem3.find_region(GuestAddress(0x10000)).is_none()); - - let gm2 = gm.clone(); - let mem4 = gm2.memory(); - for region in mem4.iter() { - assert_eq!(region.len(), region_size as GuestUsize); - } - assert_eq!(mem4.num_regions(), 2); - assert!(mem4.find_region(GuestAddress(0x1000)).is_some()); - assert!(mem4.find_region(GuestAddress(0x10000)).is_none()); - } - - #[test] - fn test_clone_guard() { - let region_size = 0x400; - let regions = vec![ - (GuestAddress(0x0), region_size), - (GuestAddress(0x1000), region_size), - ]; - let gmm = new_guest_memory_collection_from_regions(®ions).unwrap(); - let gm = GuestMemoryMmapAtomic::new(gmm); - let mem = { - let guard1 = gm.memory(); - Clone::clone(&guard1) - }; - assert_eq!(mem.num_regions(), 2); - } - - #[test] - fn test_atomic_hotplug() { - let region_size = 0x1000; - let regions = [ - (GuestAddress(0x0), region_size), - (GuestAddress(0x10_0000), region_size), - ]; - let mut gmm = Arc::new(new_guest_memory_collection_from_regions(®ions).unwrap()); - let gm: GuestMemoryAtomic<_> = gmm.clone().into(); - let mem_orig = gm.memory(); - assert_eq!(mem_orig.num_regions(), 2); - - { - let guard = gm.lock().unwrap(); - let new_gmm = Arc::make_mut(&mut gmm); - let new_gmm = new_gmm - .insert_region(Arc::new(MockRegion { - start: GuestAddress(0x8000), - len: 0x1000, - })) - .unwrap(); - let new_gmm = new_gmm - .insert_region(Arc::new(MockRegion { - start: GuestAddress(0x4000), - len: 0x1000, - })) - .unwrap(); - let new_gmm = new_gmm - .insert_region(Arc::new(MockRegion { - start: GuestAddress(0xc000), - len: 0x1000, - })) - .unwrap(); - - new_gmm - .insert_region(Arc::new(MockRegion { - start: GuestAddress(0x8000), - len: 0x1000, - })) - .unwrap_err(); - - guard.replace(new_gmm); - } - - assert_eq!(mem_orig.num_regions(), 2); - let mem = gm.memory(); - assert_eq!(mem.num_regions(), 5); - } -} +pub use vm_memory_new::atomic::{ + GuestMemoryAtomic, + GuestMemoryLoadGuard, + GuestMemoryExclusiveGuard, +}; diff --git a/src/atomic_integer.rs b/src/atomic_integer.rs index 72ebc48d..011499ca 100644 --- a/src/atomic_integer.rs +++ b/src/atomic_integer.rs @@ -1,107 +1,4 @@ // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause -use std::sync::atomic::Ordering; - -/// # Safety -/// -/// Objects that implement this trait must consist exclusively of atomic types -/// from [`std::sync::atomic`](https://doc.rust-lang.org/std/sync/atomic/), except for -/// [`AtomicPtr`](https://doc.rust-lang.org/std/sync/atomic/struct.AtomicPtr.html) and -/// [`AtomicBool`](https://doc.rust-lang.org/std/sync/atomic/struct.AtomicBool.html). -pub unsafe trait AtomicInteger: Sync + Send { - /// The raw value type associated with the atomic integer (i.e. `u16` for `AtomicU16`). - type V; - - /// Create a new instance of `Self`. - fn new(v: Self::V) -> Self; - - /// Loads a value from the atomic integer. - fn load(&self, order: Ordering) -> Self::V; - - /// Stores a value into the atomic integer. - fn store(&self, val: Self::V, order: Ordering); -} - -macro_rules! impl_atomic_integer_ops { - ($T:path, $V:ty) => { - // SAFETY: This is safe as long as T is an Atomic type. - // This is a helper macro for generating the implementation for common - // Atomic types. - unsafe impl AtomicInteger for $T { - type V = $V; - - fn new(v: Self::V) -> Self { - Self::new(v) - } - - fn load(&self, order: Ordering) -> Self::V { - self.load(order) - } - - fn store(&self, val: Self::V, order: Ordering) { - self.store(val, order) - } - } - }; -} - -// TODO: Detect availability using #[cfg(target_has_atomic) when it is stabilized. -// Right now we essentially assume we're running on either x86 or Arm (32 or 64 bit). AFAIK, -// Rust starts using additional synchronization primitives to implement atomics when they're -// not natively available, and that doesn't interact safely with how we cast pointers to -// atomic value references. We should be wary of this when looking at a broader range of -// platforms. - -impl_atomic_integer_ops!(std::sync::atomic::AtomicI8, i8); -impl_atomic_integer_ops!(std::sync::atomic::AtomicI16, i16); -impl_atomic_integer_ops!(std::sync::atomic::AtomicI32, i32); -#[cfg(any( - target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "powerpc64", - target_arch = "s390x", - target_arch = "riscv64" -))] -impl_atomic_integer_ops!(std::sync::atomic::AtomicI64, i64); - -impl_atomic_integer_ops!(std::sync::atomic::AtomicU8, u8); -impl_atomic_integer_ops!(std::sync::atomic::AtomicU16, u16); -impl_atomic_integer_ops!(std::sync::atomic::AtomicU32, u32); -#[cfg(any( - target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "powerpc64", - target_arch = "s390x", - target_arch = "riscv64" -))] -impl_atomic_integer_ops!(std::sync::atomic::AtomicU64, u64); - -impl_atomic_integer_ops!(std::sync::atomic::AtomicIsize, isize); -impl_atomic_integer_ops!(std::sync::atomic::AtomicUsize, usize); - -#[cfg(test)] -mod tests { - use super::*; - - use std::fmt::Debug; - use std::sync::atomic::AtomicU32; - - fn check_atomic_integer_ops() - where - A::V: Copy + Debug + From + PartialEq, - { - let v = A::V::from(0); - let a = A::new(v); - assert_eq!(a.load(Ordering::Relaxed), v); - - let v2 = A::V::from(100); - a.store(v2, Ordering::Relaxed); - assert_eq!(a.load(Ordering::Relaxed), v2); - } - - #[test] - fn test_atomic_integer_ops() { - check_atomic_integer_ops::() - } -} +pub use vm_memory_new::AtomicInteger; diff --git a/src/bitmap.rs b/src/bitmap.rs new file mode 100644 index 00000000..fa53c520 --- /dev/null +++ b/src/bitmap.rs @@ -0,0 +1,23 @@ +// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause + +//! This module holds abstractions that enable tracking the areas dirtied by writes of a specified +//! length to a given offset. In particular, this is used to track write accesses within a +//! `GuestMemoryRegion` object, and the resulting bitmaps can then be aggregated to build the +//! global view for an entire `GuestMemory` object. + +#[cfg(feature = "backend-bitmap")] +pub use vm_memory_new::bitmap::{ + AtomicBitmap, + ArcSlice, + RefSlice, +}; + +pub use vm_memory_new::bitmap::{ + WithBitmapSlice, + BitmapSlice, + Bitmap, + NewBitmap, + BS, + MS, +}; diff --git a/src/bitmap/backend/atomic_bitmap.rs b/src/bitmap/backend/atomic_bitmap.rs deleted file mode 100644 index ae8b0f9e..00000000 --- a/src/bitmap/backend/atomic_bitmap.rs +++ /dev/null @@ -1,334 +0,0 @@ -// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause - -//! Bitmap backend implementation based on atomic integers. - -use std::num::NonZeroUsize; -use std::sync::atomic::{AtomicU64, Ordering}; - -use crate::bitmap::{Bitmap, NewBitmap, RefSlice, WithBitmapSlice}; - -/// `AtomicBitmap` implements a simple bit map on the page level with test and set operations. -/// It is page-size aware, so it converts addresses to page numbers before setting or clearing -/// the bits. -#[derive(Debug)] -pub struct AtomicBitmap { - map: Vec, - size: usize, - byte_size: usize, - page_size: NonZeroUsize, -} - -#[allow(clippy::len_without_is_empty)] -impl AtomicBitmap { - /// Create a new bitmap of `byte_size`, with one bit per page. This is effectively - /// rounded up, and we get a new vector of the next multiple of 64 bigger than `bit_size`. - pub fn new(byte_size: usize, page_size: NonZeroUsize) -> Self { - let num_pages = byte_size.div_ceil(page_size.get()); - let map_size = num_pages.div_ceil(u64::BITS as usize); - let map: Vec = (0..map_size).map(|_| AtomicU64::new(0)).collect(); - - AtomicBitmap { - map, - size: num_pages, - byte_size, - page_size, - } - } - - /// Enlarge this bitmap with enough bits to track `additional_size` additional bytes at page granularity. - /// New bits are initialized to zero. - pub fn enlarge(&mut self, additional_size: usize) { - self.byte_size += additional_size; - self.size = self.byte_size.div_ceil(self.page_size.get()); - let map_size = self.size.div_ceil(u64::BITS as usize); - self.map.resize_with(map_size, Default::default); - } - - /// Is bit `n` set? Bits outside the range of the bitmap are always unset. - pub fn is_bit_set(&self, index: usize) -> bool { - if index < self.size { - (self.map[index >> 6].load(Ordering::Acquire) & (1 << (index & 63))) != 0 - } else { - // Out-of-range bits are always unset. - false - } - } - - /// Is the bit corresponding to address `addr` set? - pub fn is_addr_set(&self, addr: usize) -> bool { - self.is_bit_set(addr / self.page_size) - } - - /// Set a range of `len` bytes starting at `start_addr`. The first bit set in the bitmap - /// is for the page corresponding to `start_addr`, and the last bit that we set corresponds - /// to address `start_addr + len - 1`. - pub fn set_addr_range(&self, start_addr: usize, len: usize) { - self.set_reset_addr_range(start_addr, len, true); - } - - // Set/Reset a range of `len` bytes starting at `start_addr` - // reset parameter determines whether bit will be set/reset - // if set is true then the range of bits will be set to one, - // otherwise zero - fn set_reset_addr_range(&self, start_addr: usize, len: usize, set: bool) { - // Return early in the unlikely event that `len == 0` so the `len - 1` computation - // below does not underflow. - if len == 0 { - return; - } - - let first_bit = start_addr / self.page_size; - // Handle input ranges where `start_addr + len - 1` would otherwise overflow an `usize` - // by ignoring pages at invalid addresses. - let last_bit = start_addr.saturating_add(len - 1) / self.page_size; - for n in first_bit..=last_bit { - if n >= self.size { - // Attempts to set bits beyond the end of the bitmap are simply ignored. - break; - } - if set { - self.map[n >> 6].fetch_or(1 << (n & 63), Ordering::SeqCst); - } else { - self.map[n >> 6].fetch_and(!(1 << (n & 63)), Ordering::SeqCst); - } - } - } - - /// Reset a range of `len` bytes starting at `start_addr`. The first bit set in the bitmap - /// is for the page corresponding to `start_addr`, and the last bit that we set corresponds - /// to address `start_addr + len - 1`. - pub fn reset_addr_range(&self, start_addr: usize, len: usize) { - self.set_reset_addr_range(start_addr, len, false); - } - - /// Set bit to corresponding index - pub fn set_bit(&self, index: usize) { - if index >= self.size { - // Attempts to set bits beyond the end of the bitmap are simply ignored. - return; - } - self.map[index >> 6].fetch_or(1 << (index & 63), Ordering::SeqCst); - } - - /// Reset bit to corresponding index - pub fn reset_bit(&self, index: usize) { - if index >= self.size { - // Attempts to reset bits beyond the end of the bitmap are simply ignored. - return; - } - self.map[index >> 6].fetch_and(!(1 << (index & 63)), Ordering::SeqCst); - } - - /// Get the length of the bitmap in bits (i.e. in how many pages it can represent). - pub fn len(&self) -> usize { - self.size - } - - /// Get the size in bytes i.e how many bytes the bitmap can represent, one bit per page. - pub fn byte_size(&self) -> usize { - self.byte_size - } - - /// Atomically get and reset the dirty page bitmap. - pub fn get_and_reset(&self) -> Vec { - self.map - .iter() - .map(|u| u.fetch_and(0, Ordering::SeqCst)) - .collect() - } - - /// Reset all bitmap bits to 0. - pub fn reset(&self) { - for it in self.map.iter() { - it.store(0, Ordering::Release); - } - } -} - -impl Clone for AtomicBitmap { - fn clone(&self) -> Self { - let map = self - .map - .iter() - .map(|i| i.load(Ordering::Acquire)) - .map(AtomicU64::new) - .collect(); - AtomicBitmap { - map, - size: self.size, - byte_size: self.byte_size, - page_size: self.page_size, - } - } -} - -impl<'a> WithBitmapSlice<'a> for AtomicBitmap { - type S = RefSlice<'a, Self>; -} - -impl Bitmap for AtomicBitmap { - fn mark_dirty(&self, offset: usize, len: usize) { - self.set_addr_range(offset, len) - } - - fn dirty_at(&self, offset: usize) -> bool { - self.is_addr_set(offset) - } - - fn slice_at(&self, offset: usize) -> ::S { - RefSlice::new(self, offset) - } -} - -impl Default for AtomicBitmap { - fn default() -> Self { - // SAFETY: Safe as `0x1000` is non-zero. - AtomicBitmap::new(0, const { NonZeroUsize::new(0x1000).unwrap() }) - } -} - -impl NewBitmap for AtomicBitmap { - fn with_len(len: usize) -> Self { - #[cfg(target_family = "unix")] - // SAFETY: There's no unsafe potential in calling this function. - let page_size = unsafe { libc::sysconf(libc::_SC_PAGE_SIZE) }; - - #[cfg(target_family = "windows")] - let page_size = { - use winapi::um::sysinfoapi::GetSystemInfo; - let mut sysinfo = std::mem::MaybeUninit::zeroed(); - // SAFETY: It's safe to call `GetSystemInfo` as `sysinfo` is rightly sized - // allocated memory. - unsafe { GetSystemInfo(sysinfo.as_mut_ptr()) }; - // SAFETY: It's safe to call `assume_init` as `GetSystemInfo` initializes `sysinfo`. - unsafe { sysinfo.assume_init().dwPageSize } - }; - - // The `unwrap` is safe to use because the above call should always succeed on the - // supported platforms, and the size of a page will always fit within a `usize`. - AtomicBitmap::new( - len, - NonZeroUsize::try_from(usize::try_from(page_size).unwrap()).unwrap(), - ) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - use crate::bitmap::tests::test_bitmap; - - #[allow(clippy::undocumented_unsafe_blocks)] - const DEFAULT_PAGE_SIZE: NonZeroUsize = NonZeroUsize::new(128).unwrap(); - - #[test] - fn test_bitmap_basic() { - // Test that bitmap size is properly rounded up. - let a = AtomicBitmap::new(1025, DEFAULT_PAGE_SIZE); - assert_eq!(a.len(), 9); - - let b = AtomicBitmap::new(1024, DEFAULT_PAGE_SIZE); - assert_eq!(b.len(), 8); - b.set_addr_range(128, 129); - assert!(!b.is_addr_set(0)); - assert!(b.is_addr_set(128)); - assert!(b.is_addr_set(256)); - assert!(!b.is_addr_set(384)); - - #[allow(clippy::redundant_clone)] - let copy_b = b.clone(); - assert!(copy_b.is_addr_set(256)); - assert!(!copy_b.is_addr_set(384)); - - b.reset(); - assert!(!b.is_addr_set(128)); - assert!(!b.is_addr_set(256)); - assert!(!b.is_addr_set(384)); - - b.set_addr_range(128, 129); - let v = b.get_and_reset(); - - assert!(!b.is_addr_set(128)); - assert!(!b.is_addr_set(256)); - assert!(!b.is_addr_set(384)); - - assert_eq!(v.len(), 1); - assert_eq!(v[0], 0b110); - } - - #[test] - fn test_bitmap_reset() { - let b = AtomicBitmap::new(1024, DEFAULT_PAGE_SIZE); - assert_eq!(b.len(), 8); - b.set_addr_range(128, 129); - assert!(!b.is_addr_set(0)); - assert!(b.is_addr_set(128)); - assert!(b.is_addr_set(256)); - assert!(!b.is_addr_set(384)); - - b.reset_addr_range(128, 129); - assert!(!b.is_addr_set(0)); - assert!(!b.is_addr_set(128)); - assert!(!b.is_addr_set(256)); - assert!(!b.is_addr_set(384)); - } - - #[test] - fn test_bitmap_out_of_range() { - let b = AtomicBitmap::new(1024, NonZeroUsize::MIN); - // Set a partial range that goes beyond the end of the bitmap - b.set_addr_range(768, 512); - assert!(b.is_addr_set(768)); - // The bitmap is never set beyond its end. - assert!(!b.is_addr_set(1024)); - assert!(!b.is_addr_set(1152)); - } - - #[test] - fn test_bitmap_impl() { - let b = AtomicBitmap::new(0x800, DEFAULT_PAGE_SIZE); - test_bitmap(&b); - } - - #[test] - fn test_bitmap_enlarge() { - let mut b = AtomicBitmap::new(8 * 1024, DEFAULT_PAGE_SIZE); - assert_eq!(b.len(), 64); - b.set_addr_range(128, 129); - assert!(!b.is_addr_set(0)); - assert!(b.is_addr_set(128)); - assert!(b.is_addr_set(256)); - assert!(!b.is_addr_set(384)); - - b.reset_addr_range(128, 129); - assert!(!b.is_addr_set(0)); - assert!(!b.is_addr_set(128)); - assert!(!b.is_addr_set(256)); - assert!(!b.is_addr_set(384)); - b.set_addr_range(128, 129); - b.enlarge(8 * 1024); - for i in 65..128 { - assert!(!b.is_bit_set(i)); - } - assert_eq!(b.len(), 128); - assert!(!b.is_addr_set(0)); - assert!(b.is_addr_set(128)); - assert!(b.is_addr_set(256)); - assert!(!b.is_addr_set(384)); - - b.set_bit(55); - assert!(b.is_bit_set(55)); - for i in 65..128 { - b.set_bit(i); - } - for i in 65..128 { - assert!(b.is_bit_set(i)); - } - b.reset_addr_range(0, 16 * 1024); - for i in 0..128 { - assert!(!b.is_bit_set(i)); - } - } -} diff --git a/src/bitmap/backend/mod.rs b/src/bitmap/backend/mod.rs deleted file mode 100644 index 9aa4c3b5..00000000 --- a/src/bitmap/backend/mod.rs +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause - -mod atomic_bitmap; -mod slice; - -pub use atomic_bitmap::AtomicBitmap; -pub use slice::{ArcSlice, RefSlice}; diff --git a/src/bitmap/backend/slice.rs b/src/bitmap/backend/slice.rs deleted file mode 100644 index 668492f9..00000000 --- a/src/bitmap/backend/slice.rs +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause - -//! Contains a generic implementation of `BitmapSlice`. - -use std::fmt::{self, Debug}; -use std::ops::Deref; -use std::sync::Arc; - -use crate::bitmap::{Bitmap, BitmapSlice, WithBitmapSlice}; - -/// Represents a slice into a `Bitmap` object, starting at `base_offset`. -#[derive(Clone, Copy)] -pub struct BaseSlice { - inner: B, - base_offset: usize, -} - -impl BaseSlice { - /// Create a new `BitmapSlice`, starting at the specified `offset`. - pub fn new(inner: B, offset: usize) -> Self { - BaseSlice { - inner, - base_offset: offset, - } - } -} - -impl WithBitmapSlice<'_> for BaseSlice -where - B: Clone + Deref, - B::Target: Bitmap, -{ - type S = Self; -} - -impl BitmapSlice for BaseSlice -where - B: Clone + Deref, - B::Target: Bitmap, -{ -} - -impl Bitmap for BaseSlice -where - B: Clone + Deref, - B::Target: Bitmap, -{ - /// Mark the memory range specified by the given `offset` (relative to the base offset of - /// the slice) and `len` as dirtied. - fn mark_dirty(&self, offset: usize, len: usize) { - // The `Bitmap` operations are supposed to accompany guest memory accesses defined by the - // same parameters (i.e. offset & length), so we use simple wrapping arithmetic instead of - // performing additional checks. If an overflow would occur, we simply end up marking some - // other region as dirty (which is just a false positive) instead of a region that could - // not have been accessed to begin with. - self.inner - .mark_dirty(self.base_offset.wrapping_add(offset), len) - } - - fn dirty_at(&self, offset: usize) -> bool { - self.inner.dirty_at(self.base_offset.wrapping_add(offset)) - } - - /// Create a new `BitmapSlice` starting from the specified `offset` into the current slice. - fn slice_at(&self, offset: usize) -> Self { - BaseSlice { - inner: self.inner.clone(), - base_offset: self.base_offset.wrapping_add(offset), - } - } -} - -impl Debug for BaseSlice { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // Dummy impl for now. - write!(f, "(bitmap slice)") - } -} - -impl Default for BaseSlice { - fn default() -> Self { - BaseSlice { - inner: B::default(), - base_offset: 0, - } - } -} - -/// A `BitmapSlice` implementation that wraps a reference to a `Bitmap` object. -pub type RefSlice<'a, B> = BaseSlice<&'a B>; - -/// A `BitmapSlice` implementation that uses an `Arc` handle to a `Bitmap` object. -pub type ArcSlice = BaseSlice>; - -#[cfg(test)] -mod tests { - use super::*; - - use crate::bitmap::tests::{range_is_clean, range_is_dirty, test_bitmap}; - use crate::bitmap::AtomicBitmap; - use std::num::NonZeroUsize; - - #[test] - fn test_slice() { - let bitmap_size = 0x800; - let dirty_offset = 0x400; - let dirty_len = 0x100; - - { - let bitmap = AtomicBitmap::new(bitmap_size, NonZeroUsize::MIN); - let slice1 = bitmap.slice_at(0); - let slice2 = bitmap.slice_at(dirty_offset); - - assert!(range_is_clean(&slice1, 0, bitmap_size)); - assert!(range_is_clean(&slice2, 0, dirty_len)); - - bitmap.mark_dirty(dirty_offset, dirty_len); - - assert!(range_is_dirty(&slice1, dirty_offset, dirty_len)); - assert!(range_is_dirty(&slice2, 0, dirty_len)); - } - - { - let bitmap = AtomicBitmap::new(bitmap_size, NonZeroUsize::MIN); - let slice = bitmap.slice_at(0); - test_bitmap(&slice); - } - } -} diff --git a/src/bitmap/mod.rs b/src/bitmap/mod.rs deleted file mode 100644 index 352b970c..00000000 --- a/src/bitmap/mod.rs +++ /dev/null @@ -1,336 +0,0 @@ -// Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause - -//! This module holds abstractions that enable tracking the areas dirtied by writes of a specified -//! length to a given offset. In particular, this is used to track write accesses within a -//! `GuestMemoryRegion` object, and the resulting bitmaps can then be aggregated to build the -//! global view for an entire `GuestMemory` object. - -#[cfg(feature = "backend-bitmap")] -mod backend; - -use std::fmt::Debug; - -use crate::{GuestMemory, GuestMemoryRegion}; - -#[cfg(feature = "backend-bitmap")] -pub use backend::{ArcSlice, AtomicBitmap, RefSlice}; - -/// Trait implemented by types that support creating `BitmapSlice` objects. -pub trait WithBitmapSlice<'a> { - /// Type of the bitmap slice. - type S: BitmapSlice; -} - -/// Trait used to represent that a `BitmapSlice` is a `Bitmap` itself, but also satisfies the -/// restriction that slices created from it have the same type as `Self`. -pub trait BitmapSlice: Bitmap + Clone + Debug + for<'a> WithBitmapSlice<'a, S = Self> {} - -/// Common bitmap operations. Using Higher-Rank Trait Bounds (HRTBs) to effectively define -/// an associated type that has a lifetime parameter, without tagging the `Bitmap` trait with -/// a lifetime as well. -/// -/// Using an associated type allows implementing the `Bitmap` and `BitmapSlice` functionality -/// as a zero-cost abstraction when providing trivial implementations such as the one -/// defined for `()`. -// These methods represent the core functionality that's required by `vm-memory` abstractions -// to implement generic tracking logic, as well as tests that can be reused by different backends. -pub trait Bitmap: for<'a> WithBitmapSlice<'a> { - /// Mark the memory range specified by the given `offset` and `len` as dirtied. - fn mark_dirty(&self, offset: usize, len: usize); - - /// Check whether the specified `offset` is marked as dirty. - fn dirty_at(&self, offset: usize) -> bool; - - /// Return a `::S` slice of the current bitmap, starting at - /// the specified `offset`. - fn slice_at(&self, offset: usize) -> ::S; -} - -/// A `Bitmap` that can be created starting from an initial size. -// Cannot be a part of the Bitmap trait itself because it cannot be implemented for BaseSlice -pub trait NewBitmap: Bitmap + Default { - /// Create a new object based on the specified length in bytes. - fn with_len(len: usize) -> Self; -} - -/// A no-op `Bitmap` implementation that can be provided for backends that do not actually -/// require the tracking functionality. -impl WithBitmapSlice<'_> for () { - type S = Self; -} - -impl BitmapSlice for () {} - -impl Bitmap for () { - fn mark_dirty(&self, _offset: usize, _len: usize) {} - - fn dirty_at(&self, _offset: usize) -> bool { - false - } - - fn slice_at(&self, _offset: usize) -> Self {} -} - -impl NewBitmap for () { - fn with_len(_len: usize) -> Self {} -} - -/// A `Bitmap` and `BitmapSlice` implementation for `Option`. -impl<'a, B> WithBitmapSlice<'a> for Option -where - B: WithBitmapSlice<'a>, -{ - type S = Option; -} - -impl BitmapSlice for Option {} - -impl Bitmap for Option { - fn mark_dirty(&self, offset: usize, len: usize) { - if let Some(inner) = self { - inner.mark_dirty(offset, len) - } - } - - fn dirty_at(&self, offset: usize) -> bool { - if let Some(inner) = self { - return inner.dirty_at(offset); - } - false - } - - fn slice_at(&self, offset: usize) -> Option<::S> { - if let Some(inner) = self { - return Some(inner.slice_at(offset)); - } - None - } -} - -/// Helper type alias for referring to the `BitmapSlice` concrete type associated with -/// an object `B: WithBitmapSlice<'a>`. -pub type BS<'a, B> = >::S; - -/// Helper type alias for referring to the `BitmapSlice` concrete type associated with -/// the memory regions of an object `M: GuestMemory`. -pub type MS<'a, M> = BS<'a, <::R as GuestMemoryRegion>::B>; - -#[cfg(test)] -#[cfg(feature = "backend-bitmap")] -pub(crate) mod tests { - use super::*; - - use std::mem::size_of_val; - use std::sync::atomic::Ordering; - - use crate::{Bytes, VolatileMemory}; - #[cfg(feature = "backend-mmap")] - use crate::{GuestAddress, MemoryRegionAddress}; - - // Helper method to check whether a specified range is clean. - pub fn range_is_clean(b: &B, start: usize, len: usize) -> bool { - (start..start + len).all(|offset| !b.dirty_at(offset)) - } - - // Helper method to check whether a specified range is dirty. - pub fn range_is_dirty(b: &B, start: usize, len: usize) -> bool { - (start..start + len).all(|offset| b.dirty_at(offset)) - } - - pub fn check_range(b: &B, start: usize, len: usize, clean: bool) -> bool { - if clean { - range_is_clean(b, start, len) - } else { - range_is_dirty(b, start, len) - } - } - - // Helper method that tests a generic `B: Bitmap` implementation. It assumes `b` covers - // an area of length at least 0x800. - pub fn test_bitmap(b: &B) { - let len = 0x800; - let dirty_offset = 0x400; - let dirty_len = 0x100; - - // Some basic checks. - let s = b.slice_at(dirty_offset); - - assert!(range_is_clean(b, 0, len)); - assert!(range_is_clean(&s, 0, dirty_len)); - - b.mark_dirty(dirty_offset, dirty_len); - assert!(range_is_dirty(b, dirty_offset, dirty_len)); - assert!(range_is_dirty(&s, 0, dirty_len)); - } - - // `F` and `G` stand for the same closure types as described in the `BytesHelper` comment. - // The `step` parameter represents the offset that's added the the current address after - // performing each access. It provides finer grained control when testing tracking - // implementations that aggregate entire ranges for accounting purposes (for example, doing - // tracking at the page level). - pub fn test_bytes(bytes: &M, check_range_fn: F, address_fn: G, step: usize) - where - F: Fn(&M, usize, usize, bool) -> bool, - G: Fn(usize) -> A, - M: Bytes, - >::E: Debug, - { - const BUF_SIZE: usize = 1024; - let buf = vec![1u8; 1024]; - let mut dirty_offset = 0x1000; - - let val = 1u64; - - // Test `write`. - assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, true)); - assert_eq!( - bytes - .write(buf.as_slice(), address_fn(dirty_offset)) - .unwrap(), - BUF_SIZE - ); - assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, false)); - - // Test `write_slice`. - dirty_offset += step; - assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, true)); - bytes - .write_slice(buf.as_slice(), address_fn(dirty_offset)) - .unwrap(); - assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, false)); - - // Test `write_obj`. - dirty_offset += step; - assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, true)); - bytes.write_obj(val, address_fn(dirty_offset)).unwrap(); - assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, false)); - - // Test `store`. - dirty_offset += step; - assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, true)); - bytes - .store(val, address_fn(dirty_offset), Ordering::Relaxed) - .unwrap(); - assert!(check_range_fn(bytes, dirty_offset, BUF_SIZE, false)); - } - - // This function and the next are currently conditionally compiled because we only use - // them to test the mmap-based backend implementations for now. Going forward, the generic - // test functions defined here can be placed in a separate module (i.e. `test_utilities`) - // which is gated by a feature and can be used for testing purposes by other crates as well. - #[cfg(feature = "backend-mmap")] - fn test_guest_memory_region(region: &R) { - let dirty_addr = MemoryRegionAddress(0x0); - let val = 123u64; - let dirty_len = size_of_val(&val); - - let slice = region.get_slice(dirty_addr, dirty_len).unwrap(); - - assert!(range_is_clean(®ion.bitmap(), 0, region.len() as usize)); - assert!(range_is_clean(slice.bitmap(), 0, dirty_len)); - - region.write_obj(val, dirty_addr).unwrap(); - - assert!(range_is_dirty( - ®ion.bitmap(), - dirty_addr.0 as usize, - dirty_len - )); - - assert!(range_is_dirty(slice.bitmap(), 0, dirty_len)); - - // Finally, let's invoke the generic tests for `R: Bytes`. It's ok to pass the same - // `region` handle because `test_bytes` starts performing writes after the range that's - // been already dirtied in the first part of this test. - test_bytes( - region, - |r: &R, start: usize, len: usize, clean: bool| { - check_range(&r.bitmap(), start, len, clean) - }, - |offset| MemoryRegionAddress(offset as u64), - 0x1000, - ); - } - - #[cfg(feature = "backend-mmap")] - // Assumptions about M generated by f ... - pub fn test_guest_memory_and_region(f: F) - where - M: GuestMemory, - F: Fn() -> M, - { - let m = f(); - let dirty_addr = GuestAddress(0x1000); - let val = 123u64; - let dirty_len = size_of_val(&val); - - let (region, region_addr) = m.to_region_addr(dirty_addr).unwrap(); - let mut slices = m.get_slices(dirty_addr, dirty_len); - let slice = slices.next().unwrap().unwrap(); - assert!(slices.next().is_none()); - - assert!(range_is_clean(®ion.bitmap(), 0, region.len() as usize)); - assert!(range_is_clean(slice.bitmap(), 0, dirty_len)); - - m.write_obj(val, dirty_addr).unwrap(); - - assert!(range_is_dirty( - ®ion.bitmap(), - region_addr.0 as usize, - dirty_len - )); - - assert!(range_is_dirty(slice.bitmap(), 0, dirty_len)); - - // Now let's invoke the tests for the inner `GuestMemoryRegion` type. - test_guest_memory_region(f().find_region(GuestAddress(0)).unwrap()); - - // Finally, let's invoke the generic tests for `Bytes`. - let check_range_closure = |m: &M, start: usize, len: usize, clean: bool| -> bool { - m.get_slices(GuestAddress(start as u64), len).all(|r| { - let slice = r.unwrap(); - check_range(slice.bitmap(), 0, slice.len(), clean) - }) - }; - - test_bytes( - &f(), - check_range_closure, - |offset| GuestAddress(offset as u64), - 0x1000, - ); - } - - pub fn test_volatile_memory(m: &M) { - assert!(m.len() >= 0x8000); - - let dirty_offset = 0x1000; - let val = 123u64; - let dirty_len = size_of_val(&val); - - let get_ref_offset = 0x2000; - let array_ref_offset = 0x3000; - - let s1 = m.as_volatile_slice(); - let s2 = m.get_slice(dirty_offset, dirty_len).unwrap(); - - assert!(range_is_clean(s1.bitmap(), 0, s1.len())); - assert!(range_is_clean(s2.bitmap(), 0, s2.len())); - - s1.write_obj(val, dirty_offset).unwrap(); - - assert!(range_is_dirty(s1.bitmap(), dirty_offset, dirty_len)); - assert!(range_is_dirty(s2.bitmap(), 0, dirty_len)); - - let v_ref = m.get_ref::(get_ref_offset).unwrap(); - assert!(range_is_clean(s1.bitmap(), get_ref_offset, dirty_len)); - v_ref.store(val); - assert!(range_is_dirty(s1.bitmap(), get_ref_offset, dirty_len)); - - let arr_ref = m.get_array_ref::(array_ref_offset, 1).unwrap(); - assert!(range_is_clean(s1.bitmap(), array_ref_offset, dirty_len)); - arr_ref.store(0, val); - assert!(range_is_dirty(s1.bitmap(), array_ref_offset, dirty_len)); - } -} diff --git a/src/bytes.rs b/src/bytes.rs index 2b48dbbf..babc9eb2 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -11,662 +11,8 @@ //! Define the `ByteValued` trait to mark that it is safe to instantiate the struct with random //! data. -use std::io::{Read, Write}; -use std::mem::{size_of, MaybeUninit}; -use std::result::Result; -use std::slice::{from_raw_parts, from_raw_parts_mut}; -use std::sync::atomic::Ordering; - -use crate::atomic_integer::AtomicInteger; -use crate::volatile_memory::VolatileSlice; -use crate::{ReadVolatile, WriteVolatile}; - -/// Types for which it is safe to initialize from raw data. -/// -/// # Safety -/// -/// A type `T` is `ByteValued` if and only if it can be initialized by reading its contents from a -/// byte array. This is generally true for all plain-old-data structs. It is notably not true for -/// any type that includes a reference. It is generally also not safe for non-packed structs, as -/// compiler-inserted padding is considered uninitialized memory, and thus reads/writing it will -/// cause undefined behavior. -/// -/// Implementing this trait guarantees that it is safe to instantiate the struct with random data. -pub unsafe trait ByteValued: Copy + Send + Sync { - /// Converts a slice of raw data into a reference of `Self`. - /// - /// The value of `data` is not copied. Instead a reference is made from the given slice. The - /// value of `Self` will depend on the representation of the type in memory, and may change in - /// an unstable fashion. - /// - /// This will return `None` if the length of data does not match the size of `Self`, or if the - /// data is not aligned for the type of `Self`. - fn from_slice(data: &[u8]) -> Option<&Self> { - // Early out to avoid an unneeded `align_to` call. - if data.len() != size_of::() { - return None; - } - - // SAFETY: Safe because the ByteValued trait asserts any data is valid for this type, and - // we ensured the size of the pointer's buffer is the correct size. The `align_to` method - // ensures that we don't have any unaligned references. This aliases a pointer, but because - // the pointer is from a const slice reference, there are no mutable aliases. Finally, the - // reference returned can not outlive data because they have equal implicit lifetime - // constraints. - match unsafe { data.align_to::() } { - ([], [mid], []) => Some(mid), - _ => None, - } - } - - /// Converts a mutable slice of raw data into a mutable reference of `Self`. - /// - /// Because `Self` is made from a reference to the mutable slice, mutations to the returned - /// reference are immediately reflected in `data`. The value of the returned `Self` will depend - /// on the representation of the type in memory, and may change in an unstable fashion. - /// - /// This will return `None` if the length of data does not match the size of `Self`, or if the - /// data is not aligned for the type of `Self`. - fn from_mut_slice(data: &mut [u8]) -> Option<&mut Self> { - // Early out to avoid an unneeded `align_to_mut` call. - if data.len() != size_of::() { - return None; - } - - // SAFETY: Safe because the ByteValued trait asserts any data is valid for this type, and - // we ensured the size of the pointer's buffer is the correct size. The `align_to` method - // ensures that we don't have any unaligned references. This aliases a pointer, but because - // the pointer is from a mut slice reference, we borrow the passed in mutable reference. - // Finally, the reference returned can not outlive data because they have equal implicit - // lifetime constraints. - match unsafe { data.align_to_mut::() } { - ([], [mid], []) => Some(mid), - _ => None, - } - } - - /// Converts a reference to `self` into a slice of bytes. - /// - /// The value of `self` is not copied. Instead, the slice is made from a reference to `self`. - /// The value of bytes in the returned slice will depend on the representation of the type in - /// memory, and may change in an unstable fashion. - fn as_slice(&self) -> &[u8] { - // SAFETY: Safe because the entire size of self is accessible as bytes because the trait - // guarantees it. The lifetime of the returned slice is the same as the passed reference, - // so that no dangling pointers will result from this pointer alias. - unsafe { from_raw_parts(self as *const Self as *const u8, size_of::()) } - } - - /// Converts a mutable reference to `self` into a mutable slice of bytes. - /// - /// Because the slice is made from a reference to `self`, mutations to the returned slice are - /// immediately reflected in `self`. The value of bytes in the returned slice will depend on - /// the representation of the type in memory, and may change in an unstable fashion. - fn as_mut_slice(&mut self) -> &mut [u8] { - // SAFETY: Safe because the entire size of self is accessible as bytes because the trait - // guarantees it. The trait also guarantees that any combination of bytes is valid for this - // type, so modifying them in the form of a byte slice is valid. The lifetime of the - // returned slice is the same as the passed reference, so that no dangling pointers will - // result from this pointer alias. Although this does alias a mutable pointer, we do so by - // exclusively borrowing the given mutable reference. - unsafe { from_raw_parts_mut(self as *mut Self as *mut u8, size_of::()) } - } - - /// Converts a mutable reference to `self` into a `VolatileSlice`. This is - /// useful because `VolatileSlice` provides a `Bytes` implementation. - fn as_bytes(&mut self) -> VolatileSlice { - VolatileSlice::from(self.as_mut_slice()) - } - - /// Constructs a `Self` ewhose binary representation is set to all zeroes. - fn zeroed() -> Self { - // SAFETY: ByteValued objects must be assignable from arbitrary byte - // sequences and are mandated to be packed. - // Hence, zeroed memory is a fine initialization. - unsafe { MaybeUninit::::zeroed().assume_init() } - } - - /// Writes this [`ByteValued`]'s byte representation to the given [`Write`] impl. - fn write_all_to(&self, mut writer: W) -> Result<(), std::io::Error> { - writer.write_all(self.as_slice()) - } - - /// Constructs an instance of this [`ByteValued`] by reading from the given [`Read`] impl. - fn read_exact_from(mut reader: R) -> Result { - let mut result = Self::zeroed(); - reader.read_exact(result.as_mut_slice()).map(|_| result) - } -} - -macro_rules! byte_valued_array { - ($T:ty, $($N:expr)+) => { - $( - // SAFETY: All intrinsic types and arrays of intrinsic types are ByteValued. - // They are just numbers. - unsafe impl ByteValued for [$T; $N] {} - )+ - } -} - -macro_rules! byte_valued_type { - ($T:ty) => { - // SAFETY: Safe as long T is POD. - // We are using this macro to generated the implementation for integer types below. - unsafe impl ByteValued for $T {} - byte_valued_array! { - $T, - 0 1 2 3 4 5 6 7 8 9 - 10 11 12 13 14 15 16 17 18 19 - 20 21 22 23 24 25 26 27 28 29 - 30 31 32 - } - }; -} - -byte_valued_type!(u8); -byte_valued_type!(u16); -byte_valued_type!(u32); -byte_valued_type!(u64); -byte_valued_type!(u128); -byte_valued_type!(usize); -byte_valued_type!(i8); -byte_valued_type!(i16); -byte_valued_type!(i32); -byte_valued_type!(i64); -byte_valued_type!(i128); -byte_valued_type!(isize); - -/// A trait used to identify types which can be accessed atomically by proxy. -pub trait AtomicAccess: - ByteValued - // Could not find a more succinct way of stating that `Self` can be converted - // into `Self::A::V`, and the other way around. - + From<<::A as AtomicInteger>::V> - + Into<<::A as AtomicInteger>::V> -{ - /// The `AtomicInteger` that atomic operations on `Self` are based on. - type A: AtomicInteger; -} - -macro_rules! impl_atomic_access { - ($T:ty, $A:path) => { - impl AtomicAccess for $T { - type A = $A; - } - }; -} - -impl_atomic_access!(i8, std::sync::atomic::AtomicI8); -impl_atomic_access!(i16, std::sync::atomic::AtomicI16); -impl_atomic_access!(i32, std::sync::atomic::AtomicI32); -#[cfg(any( - target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "powerpc64", - target_arch = "s390x", - target_arch = "riscv64" -))] -impl_atomic_access!(i64, std::sync::atomic::AtomicI64); - -impl_atomic_access!(u8, std::sync::atomic::AtomicU8); -impl_atomic_access!(u16, std::sync::atomic::AtomicU16); -impl_atomic_access!(u32, std::sync::atomic::AtomicU32); -#[cfg(any( - target_arch = "x86_64", - target_arch = "aarch64", - target_arch = "powerpc64", - target_arch = "s390x", - target_arch = "riscv64" -))] -impl_atomic_access!(u64, std::sync::atomic::AtomicU64); - -impl_atomic_access!(isize, std::sync::atomic::AtomicIsize); -impl_atomic_access!(usize, std::sync::atomic::AtomicUsize); - -/// A container to host a range of bytes and access its content. -/// -/// Candidates which may implement this trait include: -/// - anonymous memory areas -/// - mmapped memory areas -/// - data files -/// - a proxy to access memory on remote -pub trait Bytes { - /// Associated error codes - type E; - - /// Writes a slice into the container at `addr`. - /// - /// Returns the number of bytes written. The number of bytes written can - /// be less than the length of the slice if there isn't enough room in the - /// container. - /// - /// If the given slice is empty (e.g. has length 0), always returns `Ok(0)`, even if `addr` - /// is otherwise out of bounds. However, if the container is empty, it will - /// return an error (unless the slice is also empty, in which case the above takes precedence). - /// - /// ```rust - /// # use vm_memory::{Bytes, VolatileMemoryError, VolatileSlice}; - /// # use matches::assert_matches; - /// let mut arr = [1, 2, 3, 4, 5]; - /// let slice = VolatileSlice::from(arr.as_mut_slice()); - /// - /// assert_eq!(slice.write(&[1, 2, 3], 0).unwrap(), 3); - /// assert_eq!(slice.write(&[1, 2, 3], 3).unwrap(), 2); - /// assert_matches!( - /// slice.write(&[1, 2, 3], 5).unwrap_err(), - /// VolatileMemoryError::OutOfBounds { addr: 5 } - /// ); - /// assert_eq!(slice.write(&[], 5).unwrap(), 0); - /// ``` - fn write(&self, buf: &[u8], addr: A) -> Result; - - /// Reads data from the container at `addr` into a slice. - /// - /// Returns the number of bytes read. The number of bytes read can be less than the length - /// of the slice if there isn't enough data within the container. - /// - /// If the given slice is empty (e.g. has length 0), always returns `Ok(0)`, even if `addr` - /// is otherwise out of bounds. However, if the container is empty, it will - /// return an error (unless the slice is also empty, in which case the above takes precedence). - fn read(&self, buf: &mut [u8], addr: A) -> Result; - - /// Writes the entire content of a slice into the container at `addr`. - /// - /// If the given slice is empty (e.g. has length 0), always returns `Ok(0)`, even if `addr` - /// is otherwise out of bounds. - /// - /// # Errors - /// - /// Returns an error if there isn't enough space within the container to write the entire slice. - /// Part of the data may have been copied nevertheless. - fn write_slice(&self, buf: &[u8], addr: A) -> Result<(), Self::E>; - - /// Reads data from the container at `addr` to fill an entire slice. - /// - /// If the given slice is empty (e.g. has length 0), always returns `Ok(0)`, even if `addr` - /// is otherwise out of bounds. - /// - /// # Errors - /// - /// Returns an error if there isn't enough data within the container to fill the entire slice. - /// Part of the data may have been copied nevertheless. - fn read_slice(&self, buf: &mut [u8], addr: A) -> Result<(), Self::E>; - - /// Writes an object into the container at `addr`. - /// - /// # Errors - /// - /// Returns an error if the object doesn't fit inside the container. - fn write_obj(&self, val: T, addr: A) -> Result<(), Self::E> { - self.write_slice(val.as_slice(), addr) - } - - /// Reads an object from the container at `addr`. - /// - /// Reading from a volatile area isn't strictly safe as it could change mid-read. - /// However, as long as the type T is plain old data and can handle random initialization, - /// everything will be OK. - /// - /// # Errors - /// - /// Returns an error if there's not enough data inside the container. - fn read_obj(&self, addr: A) -> Result { - let mut result = T::zeroed(); - self.read_slice(result.as_mut_slice(), addr).map(|_| result) - } - - /// Reads up to `count` bytes from `src` and writes them into the container at `addr`. - /// Unlike `VolatileRead::read_volatile`, this function retries on `EINTR` being returned from - /// the underlying I/O `read` operation. - /// - /// Returns the number of bytes written into the container. - /// - /// # Arguments - /// * `addr` - Begin writing at this address. - /// * `src` - Copy from `src` into the container. - /// * `count` - Copy `count` bytes from `src` into the container. - /// - /// # Examples - /// - /// * Read bytes from /dev/urandom (uses the `backend-mmap` feature) - /// - /// ``` - /// # #[cfg(all(feature = "backend-mmap", feature = "rawfd"))] - /// # { - /// # use vm_memory::{Address, GuestMemory, Bytes, GuestAddress, GuestMemoryMmap}; - /// # use std::fs::File; - /// # use std::path::Path; - /// # - /// # let start_addr = GuestAddress(0x1000); - /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) - /// # .expect("Could not create guest memory"); - /// # let addr = GuestAddress(0x1010); - /// # let mut file = if cfg!(target_family = "unix") { - /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom"); - /// # file - /// # } else { - /// # File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")) - /// # .expect("Could not open c:\\Windows\\system32\\ntoskrnl.exe") - /// # }; - /// - /// gm.read_volatile_from(addr, &mut file, 128) - /// .expect("Could not read from /dev/urandom into guest memory"); - /// - /// let read_addr = addr.checked_add(8).expect("Could not compute read address"); - /// let rand_val: u32 = gm - /// .read_obj(read_addr) - /// .expect("Could not read u32 val from /dev/urandom"); - /// # } - /// ``` - fn read_volatile_from(&self, addr: A, src: &mut F, count: usize) -> Result - where - F: ReadVolatile; - - /// Reads exactly `count` bytes from an object and writes them into the container at `addr`. - /// - /// # Errors - /// - /// Returns an error if `count` bytes couldn't have been copied from `src` to the container. - /// Part of the data may have been copied nevertheless. - /// - /// # Arguments - /// * `addr` - Begin writing at this address. - /// * `src` - Copy from `src` into the container. - /// * `count` - Copy exactly `count` bytes from `src` into the container. - fn read_exact_volatile_from( - &self, - addr: A, - src: &mut F, - count: usize, - ) -> Result<(), Self::E> - where - F: ReadVolatile; - - /// Reads up to `count` bytes from the container at `addr` and writes them into `dst`. - /// Unlike `VolatileWrite::write_volatile`, this function retries on `EINTR` being returned by - /// the underlying I/O `write` operation. - /// - /// Returns the number of bytes written into the object. - /// - /// # Arguments - /// * `addr` - Begin reading from this address. - /// * `dst` - Copy from the container to `dst`. - /// * `count` - Copy `count` bytes from the container to `dst`. - fn write_volatile_to(&self, addr: A, dst: &mut F, count: usize) -> Result - where - F: WriteVolatile; - - /// Reads exactly `count` bytes from the container at `addr` and writes them into an object. - /// - /// # Errors - /// - /// Returns an error if `count` bytes couldn't have been copied from the container to `dst`. - /// Part of the data may have been copied nevertheless. - /// - /// # Arguments - /// * `addr` - Begin reading from this address. - /// * `dst` - Copy from the container to `dst`. - /// * `count` - Copy exactly `count` bytes from the container to `dst`. - fn write_all_volatile_to(&self, addr: A, dst: &mut F, count: usize) -> Result<(), Self::E> - where - F: WriteVolatile; - - /// Atomically store a value at the specified address. - fn store(&self, val: T, addr: A, order: Ordering) -> Result<(), Self::E>; - - /// Atomically load a value from the specified address. - fn load(&self, addr: A, order: Ordering) -> Result; -} - -#[cfg(test)] -pub(crate) mod tests { - #![allow(clippy::undocumented_unsafe_blocks)] - use super::*; - - use std::cell::RefCell; - use std::fmt::Debug; - use std::io::ErrorKind; - use std::mem::align_of; - - // Helper method to test atomic accesses for a given `b: Bytes` that's supposed to be - // zero-initialized. - pub fn check_atomic_accesses(b: B, addr: A, bad_addr: A) - where - A: Copy, - B: Bytes, - B::E: Debug, - { - let val = 100u32; - - assert_eq!(b.load::(addr, Ordering::Relaxed).unwrap(), 0); - b.store(val, addr, Ordering::Relaxed).unwrap(); - assert_eq!(b.load::(addr, Ordering::Relaxed).unwrap(), val); - - assert!(b.load::(bad_addr, Ordering::Relaxed).is_err()); - assert!(b.store(val, bad_addr, Ordering::Relaxed).is_err()); - } - - fn check_byte_valued_type() - where - T: ByteValued + PartialEq + Debug + Default, - { - let mut data = [0u8; 48]; - let pre_len = { - let (pre, _, _) = unsafe { data.align_to::() }; - pre.len() - }; - { - let aligned_data = &mut data[pre_len..pre_len + size_of::()]; - { - let mut val: T = Default::default(); - assert_eq!(T::from_slice(aligned_data), Some(&val)); - assert_eq!(T::from_mut_slice(aligned_data), Some(&mut val)); - assert_eq!(val.as_slice(), aligned_data); - assert_eq!(val.as_mut_slice(), aligned_data); - } - } - for i in 1..size_of::().min(align_of::()) { - let begin = pre_len + i; - let end = begin + size_of::(); - let unaligned_data = &mut data[begin..end]; - { - if align_of::() != 1 { - assert_eq!(T::from_slice(unaligned_data), None); - assert_eq!(T::from_mut_slice(unaligned_data), None); - } - } - } - // Check the early out condition - { - assert!(T::from_slice(&data).is_none()); - assert!(T::from_mut_slice(&mut data).is_none()); - } - } - - #[test] - fn test_byte_valued() { - check_byte_valued_type::(); - check_byte_valued_type::(); - check_byte_valued_type::(); - check_byte_valued_type::(); - check_byte_valued_type::(); - check_byte_valued_type::(); - check_byte_valued_type::(); - check_byte_valued_type::(); - check_byte_valued_type::(); - check_byte_valued_type::(); - check_byte_valued_type::(); - check_byte_valued_type::(); - } - - pub const MOCK_BYTES_CONTAINER_SIZE: usize = 10; - - pub struct MockBytesContainer { - container: RefCell<[u8; MOCK_BYTES_CONTAINER_SIZE]>, - } - - impl MockBytesContainer { - pub fn new() -> Self { - MockBytesContainer { - container: RefCell::new([0; MOCK_BYTES_CONTAINER_SIZE]), - } - } - - pub fn validate_slice_op(&self, buf: &[u8], addr: usize) -> Result<(), ()> { - if MOCK_BYTES_CONTAINER_SIZE - buf.len() <= addr { - return Err(()); - } - - Ok(()) - } - } - - impl Bytes for MockBytesContainer { - type E = (); - - fn write(&self, _: &[u8], _: usize) -> Result { - unimplemented!() - } - - fn read(&self, _: &mut [u8], _: usize) -> Result { - unimplemented!() - } - - fn write_slice(&self, buf: &[u8], addr: usize) -> Result<(), Self::E> { - self.validate_slice_op(buf, addr)?; - - let mut container = self.container.borrow_mut(); - container[addr..addr + buf.len()].copy_from_slice(buf); - - Ok(()) - } - - fn read_slice(&self, buf: &mut [u8], addr: usize) -> Result<(), Self::E> { - self.validate_slice_op(buf, addr)?; - - let container = self.container.borrow(); - buf.copy_from_slice(&container[addr..addr + buf.len()]); - - Ok(()) - } - - fn read_volatile_from( - &self, - _addr: usize, - _src: &mut F, - _count: usize, - ) -> Result - where - F: ReadVolatile, - { - unimplemented!() - } - - fn read_exact_volatile_from( - &self, - _addr: usize, - _src: &mut F, - _count: usize, - ) -> Result<(), Self::E> - where - F: ReadVolatile, - { - unimplemented!() - } - - fn write_volatile_to( - &self, - _addr: usize, - _dst: &mut F, - _count: usize, - ) -> Result - where - F: WriteVolatile, - { - unimplemented!() - } - - fn write_all_volatile_to( - &self, - _addr: usize, - _dst: &mut F, - _count: usize, - ) -> Result<(), Self::E> - where - F: WriteVolatile, - { - unimplemented!() - } - - fn store( - &self, - _val: T, - _addr: usize, - _order: Ordering, - ) -> Result<(), Self::E> { - unimplemented!() - } - - fn load(&self, _addr: usize, _order: Ordering) -> Result { - unimplemented!() - } - } - - #[test] - fn test_bytes() { - let bytes = MockBytesContainer::new(); - - assert!(bytes.write_obj(u64::MAX, 0).is_ok()); - assert_eq!(bytes.read_obj::(0).unwrap(), u64::MAX); - - assert!(bytes - .write_obj(u64::MAX, MOCK_BYTES_CONTAINER_SIZE) - .is_err()); - assert!(bytes.read_obj::(MOCK_BYTES_CONTAINER_SIZE).is_err()); - } - - #[repr(C)] - #[derive(Copy, Clone, Default, Debug)] - struct S { - a: u32, - b: u32, - } - - unsafe impl ByteValued for S {} - - #[test] - fn byte_valued_slice() { - let a: [u8; 8] = [0, 0, 0, 0, 1, 1, 1, 1]; - let mut s: S = Default::default(); - s.as_bytes().copy_from(&a); - assert_eq!(s.a, 0); - assert_eq!(s.b, 0x0101_0101); - } - - #[test] - fn test_byte_valued_io() { - let a: [u8; 8] = [0, 0, 0, 0, 1, 1, 1, 1]; - - let result = S::read_exact_from(&a[1..]); - assert_eq!(result.unwrap_err().kind(), ErrorKind::UnexpectedEof); - - let s = S::read_exact_from(&a[..]).unwrap(); - assert_eq!(s.a, 0); - assert_eq!(s.b, 0x0101_0101); - - let mut b = Vec::new(); - s.write_all_to(&mut b).unwrap(); - assert_eq!(a.as_ref(), b.as_slice()); - - let mut b = [0; 7]; - let result = s.write_all_to(b.as_mut_slice()); - assert_eq!(result.unwrap_err().kind(), ErrorKind::WriteZero); - } - - #[test] - fn test_byte_valued_zeroed() { - let s = S::zeroed(); - - assert!(s.as_slice().iter().all(|&b| b == 0x0)); - } -} +pub use vm_memory_new::bytes::{ + AtomicAccess, + ByteValued, + Bytes, +}; diff --git a/src/endian.rs b/src/endian.rs index 40e49b14..195f4bad 100644 --- a/src/endian.rs +++ b/src/endian.rs @@ -33,127 +33,13 @@ //! assert_ne!(b_trans, l_trans); //! ``` -use std::mem::{align_of, size_of}; - -use crate::bytes::ByteValued; - -macro_rules! const_assert { - ($condition:expr) => { - let _ = [(); 0 - !$condition as usize]; - }; -} - -macro_rules! endian_type { - ($old_type:ident, $new_type:ident, $to_new:ident, $from_new:ident) => { - /// An unsigned integer type of with an explicit endianness. - /// - /// See module level documentation for examples. - #[derive(Copy, Clone, Eq, PartialEq, Debug, Default)] - #[repr(transparent)] - pub struct $new_type($old_type); - - impl $new_type { - fn _assert() { - const_assert!(align_of::<$new_type>() == align_of::<$old_type>()); - const_assert!(size_of::<$new_type>() == size_of::<$old_type>()); - } - - /// Converts `self` to the native endianness. - pub fn to_native(self) -> $old_type { - $old_type::$from_new(self.0) - } - } - - // SAFETY: Safe because we are using this for implementing ByteValued for endian types - // which are POD. - unsafe impl ByteValued for $new_type {} - - impl PartialEq<$old_type> for $new_type { - fn eq(&self, other: &$old_type) -> bool { - self.0 == $old_type::$to_new(*other) - } - } - - impl PartialEq<$new_type> for $old_type { - fn eq(&self, other: &$new_type) -> bool { - $old_type::$to_new(other.0) == *self - } - } - - impl From<$new_type> for $old_type { - fn from(v: $new_type) -> $old_type { - v.to_native() - } - } - - impl From<$old_type> for $new_type { - fn from(v: $old_type) -> $new_type { - $new_type($old_type::$to_new(v)) - } - } - }; -} - -endian_type!(u16, Le16, to_le, from_le); -endian_type!(u32, Le32, to_le, from_le); -endian_type!(u64, Le64, to_le, from_le); -endian_type!(usize, LeSize, to_le, from_le); -endian_type!(u16, Be16, to_be, from_be); -endian_type!(u32, Be32, to_be, from_be); -endian_type!(u64, Be64, to_be, from_be); -endian_type!(usize, BeSize, to_be, from_be); - -#[cfg(test)] -mod tests { - #![allow(clippy::undocumented_unsafe_blocks)] - use super::*; - - use std::convert::From; - use std::mem::transmute; - - #[cfg(target_endian = "little")] - const NATIVE_LITTLE: bool = true; - #[cfg(target_endian = "big")] - const NATIVE_LITTLE: bool = false; - const NATIVE_BIG: bool = !NATIVE_LITTLE; - - macro_rules! endian_test { - ($old_type:ty, $new_type:ty, $test_name:ident, $native:expr) => { - mod $test_name { - use super::*; - - #[allow(overflowing_literals)] - #[test] - fn test_endian_type() { - <$new_type>::_assert(); - - let v = 0x0123_4567_89AB_CDEF as $old_type; - let endian_v: $new_type = From::from(v); - let endian_into: $old_type = endian_v.into(); - let endian_transmute: $old_type = unsafe { transmute(endian_v) }; - - if $native { - assert_eq!(endian_v, endian_transmute); - } else { - assert_eq!(endian_v, endian_transmute.swap_bytes()); - } - - assert_eq!(endian_into, v); - assert_eq!(endian_v.to_native(), v); - - assert!(v == endian_v); - assert!(endian_v == v); - } - } - }; - } - - endian_test!(u16, Le16, test_le16, NATIVE_LITTLE); - endian_test!(u32, Le32, test_le32, NATIVE_LITTLE); - endian_test!(u64, Le64, test_le64, NATIVE_LITTLE); - endian_test!(usize, LeSize, test_le_size, NATIVE_LITTLE); - endian_test!(u16, Be16, test_be16, NATIVE_BIG); - endian_test!(u32, Be32, test_be32, NATIVE_BIG); - endian_test!(u64, Be64, test_be64, NATIVE_BIG); - endian_test!(usize, BeSize, test_be_size, NATIVE_BIG); -} +pub use vm_memory_new::endian::{ + Le16, + Le32, + Le64, + LeSize, + Be16, + Be32, + Be64, + BeSize, +}; diff --git a/src/guest_memory.rs b/src/guest_memory.rs index 7136db35..83cc5e24 100644 --- a/src/guest_memory.rs +++ b/src/guest_memory.rs @@ -41,929 +41,14 @@ //! via pointers, references, or slices returned by methods of `GuestMemory`,`GuestMemoryRegion`, //! `VolatileSlice`, `VolatileRef`, or `VolatileArrayRef`. -use std::convert::From; -use std::fs::File; -use std::io; -use std::iter::FusedIterator; -use std::mem::size_of; -use std::ops::{BitAnd, BitOr, Deref}; -use std::rc::Rc; -use std::sync::atomic::Ordering; -use std::sync::Arc; - -use crate::address::{Address, AddressValue}; -use crate::bitmap::MS; -use crate::bytes::{AtomicAccess, Bytes}; -use crate::io::{ReadVolatile, WriteVolatile}; -use crate::volatile_memory::{self, VolatileSlice}; -use crate::GuestMemoryRegion; - -/// Errors associated with handling guest memory accesses. -#[allow(missing_docs)] -#[derive(Debug, thiserror::Error)] -pub enum Error { - /// Failure in finding a guest address in any memory regions mapped by this guest. - #[error("Guest memory error: invalid guest address {}",.0.raw_value())] - InvalidGuestAddress(GuestAddress), - /// Couldn't read/write from the given source. - #[error("Guest memory error: {0}")] - IOError(io::Error), - /// Incomplete read or write. - #[error("Guest memory error: only used {completed} bytes in {expected} long buffer")] - PartialBuffer { expected: usize, completed: usize }, - /// Requested backend address is out of range. - #[error("Guest memory error: invalid backend address")] - InvalidBackendAddress, - /// Host virtual address not available. - #[error("Guest memory error: host virtual address not available")] - HostAddressNotAvailable, - /// The length returned by the callback passed to `try_access` is outside the address range. - #[error( - "The length returned by the callback passed to `try_access` is outside the address range." - )] - CallbackOutOfRange, - /// The address to be read by `try_access` is outside the address range. - #[error("The address to be read by `try_access` is outside the address range")] - GuestAddressOverflow, -} - -impl From for Error { - fn from(e: volatile_memory::Error) -> Self { - match e { - volatile_memory::Error::OutOfBounds { .. } => Error::InvalidBackendAddress, - volatile_memory::Error::Overflow { .. } => Error::InvalidBackendAddress, - volatile_memory::Error::TooBig { .. } => Error::InvalidBackendAddress, - volatile_memory::Error::Misaligned { .. } => Error::InvalidBackendAddress, - volatile_memory::Error::IOError(e) => Error::IOError(e), - volatile_memory::Error::PartialBuffer { - expected, - completed, - } => Error::PartialBuffer { - expected, - completed, - }, - } - } -} - -/// Result of guest memory operations. -pub type Result = std::result::Result; - -/// Represents a guest physical address (GPA). -/// -/// # Notes: -/// On ARM64, a 32-bit hypervisor may be used to support a 64-bit guest. For simplicity, -/// `u64` is used to store the the raw value no matter if the guest a 32-bit or 64-bit virtual -/// machine. -#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)] -pub struct GuestAddress(pub u64); -impl_address_ops!(GuestAddress, u64); - -/// Represents an offset inside a region. -#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)] -pub struct MemoryRegionAddress(pub u64); -impl_address_ops!(MemoryRegionAddress, u64); - -/// Type of the raw value stored in a `GuestAddress` object. -pub type GuestUsize = ::V; - -/// Represents the start point within a `File` that backs a `GuestMemoryRegion`. -#[derive(Clone, Debug)] -pub struct FileOffset { - file: Arc, - start: u64, -} - -impl FileOffset { - /// Creates a new `FileOffset` object. - pub fn new(file: File, start: u64) -> Self { - FileOffset::from_arc(Arc::new(file), start) - } - - /// Creates a new `FileOffset` object based on an exiting `Arc`. - pub fn from_arc(file: Arc, start: u64) -> Self { - FileOffset { file, start } - } - - /// Returns a reference to the inner `File` object. - pub fn file(&self) -> &File { - self.file.as_ref() - } - - /// Return a reference to the inner `Arc` object. - pub fn arc(&self) -> &Arc { - &self.file - } - - /// Returns the start offset within the file. - pub fn start(&self) -> u64 { - self.start - } -} - -/// `GuestAddressSpace` provides a way to retrieve a `GuestMemory` object. -/// The vm-memory crate already provides trivial implementation for -/// references to `GuestMemory` or reference-counted `GuestMemory` objects, -/// but the trait can also be implemented by any other struct in order -/// to provide temporary access to a snapshot of the memory map. -/// -/// In order to support generic mutable memory maps, devices (or other things -/// that access memory) should store the memory as a `GuestAddressSpace`. -/// This example shows that references can also be used as the `GuestAddressSpace` -/// implementation, providing a zero-cost abstraction whenever immutable memory -/// maps are sufficient. -/// -/// # Examples (uses the `backend-mmap` and `backend-atomic` features) -/// -/// ``` -/// # #[cfg(feature = "backend-mmap")] -/// # { -/// # use std::sync::Arc; -/// # use vm_memory::{GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryMmap}; -/// # -/// pub struct VirtioDevice { -/// mem: Option, -/// } -/// -/// impl VirtioDevice { -/// fn new() -> Self { -/// VirtioDevice { mem: None } -/// } -/// fn activate(&mut self, mem: AS) { -/// self.mem = Some(mem) -/// } -/// } -/// -/// fn get_mmap() -> GuestMemoryMmap<()> { -/// let start_addr = GuestAddress(0x1000); -/// GuestMemoryMmap::from_ranges(&vec![(start_addr, 0x400)]) -/// .expect("Could not create guest memory") -/// } -/// -/// // Using `VirtioDevice` with an immutable GuestMemoryMmap: -/// let mut for_immutable_mmap = VirtioDevice::<&GuestMemoryMmap<()>>::new(); -/// let mmap = get_mmap(); -/// for_immutable_mmap.activate(&mmap); -/// let mut another = VirtioDevice::<&GuestMemoryMmap<()>>::new(); -/// another.activate(&mmap); -/// -/// # #[cfg(feature = "backend-atomic")] -/// # { -/// # use vm_memory::GuestMemoryAtomic; -/// // Using `VirtioDevice` with a mutable GuestMemoryMmap: -/// let mut for_mutable_mmap = VirtioDevice::>>::new(); -/// let atomic = GuestMemoryAtomic::new(get_mmap()); -/// for_mutable_mmap.activate(atomic.clone()); -/// let mut another = VirtioDevice::>>::new(); -/// another.activate(atomic.clone()); -/// -/// // atomic can be modified here... -/// # } -/// # } -/// ``` -pub trait GuestAddressSpace: Clone { - /// The type that will be used to access guest memory. - type M: GuestMemory; - - /// A type that provides access to the memory. - type T: Clone + Deref; - - /// Return an object (e.g. a reference or guard) that can be used - /// to access memory through this address space. The object provides - /// a consistent snapshot of the memory map. - fn memory(&self) -> Self::T; -} - -impl GuestAddressSpace for &M { - type M = M; - type T = Self; - - fn memory(&self) -> Self { - self - } -} - -impl GuestAddressSpace for Rc { - type M = M; - type T = Self; - - fn memory(&self) -> Self { - self.clone() - } -} - -impl GuestAddressSpace for Arc { - type M = M; - type T = Self; - - fn memory(&self) -> Self { - self.clone() - } -} - -/// `GuestMemory` represents a container for an *immutable* collection of -/// `GuestMemoryRegion` objects. `GuestMemory` provides the `Bytes` -/// trait to hide the details of accessing guest memory by physical address. -/// Interior mutability is not allowed for implementations of `GuestMemory` so -/// that they always provide a consistent view of the memory map. -/// -/// The task of the `GuestMemory` trait are: -/// - map a request address to a `GuestMemoryRegion` object and relay the request to it. -/// - handle cases where an access request spanning two or more `GuestMemoryRegion` objects. -pub trait GuestMemory { - /// Type of objects hosted by the address space. - type R: GuestMemoryRegion; - - /// Returns the number of regions in the collection. - fn num_regions(&self) -> usize { - self.iter().count() - } - - /// Returns the region containing the specified address or `None`. - fn find_region(&self, addr: GuestAddress) -> Option<&Self::R> { - self.iter() - .find(|region| addr >= region.start_addr() && addr <= region.last_addr()) - } - - /// Gets an iterator over the entries in the collection. - /// - /// # Examples - /// - /// * Compute the total size of all memory mappings in KB by iterating over the memory regions - /// and dividing their sizes to 1024, then summing up the values in an accumulator. (uses the - /// `backend-mmap` feature) - /// - /// ``` - /// # #[cfg(feature = "backend-mmap")] - /// # { - /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestMemoryMmap}; - /// # - /// let start_addr1 = GuestAddress(0x0); - /// let start_addr2 = GuestAddress(0x400); - /// let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr1, 1024), (start_addr2, 2048)]) - /// .expect("Could not create guest memory"); - /// - /// let total_size = gm - /// .iter() - /// .map(|region| region.len() / 1024) - /// .fold(0, |acc, size| acc + size); - /// assert_eq!(3, total_size) - /// # } - /// ``` - fn iter(&self) -> impl Iterator; - - /// Returns the maximum (inclusive) address managed by the - /// [`GuestMemory`](trait.GuestMemory.html). - /// - /// # Examples (uses the `backend-mmap` feature) - /// - /// ``` - /// # #[cfg(feature = "backend-mmap")] - /// # { - /// # use vm_memory::{Address, GuestAddress, GuestMemory, GuestMemoryMmap}; - /// # - /// let start_addr = GuestAddress(0x1000); - /// let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) - /// .expect("Could not create guest memory"); - /// - /// assert_eq!(start_addr.checked_add(0x3ff), Some(gm.last_addr())); - /// # } - /// ``` - fn last_addr(&self) -> GuestAddress { - self.iter() - .map(GuestMemoryRegion::last_addr) - .fold(GuestAddress(0), std::cmp::max) - } - - /// Tries to convert an absolute address to a relative address within the corresponding region. - /// - /// Returns `None` if `addr` isn't present within the memory of the guest. - fn to_region_addr(&self, addr: GuestAddress) -> Option<(&Self::R, MemoryRegionAddress)> { - self.find_region(addr) - .map(|r| (r, r.to_region_addr(addr).unwrap())) - } - - /// Returns `true` if the given address is present within the memory of the guest. - fn address_in_range(&self, addr: GuestAddress) -> bool { - self.find_region(addr).is_some() - } - - /// Returns the given address if it is present within the memory of the guest. - fn check_address(&self, addr: GuestAddress) -> Option { - self.find_region(addr).map(|_| addr) - } - - /// Check whether the range [base, base + len) is valid. - fn check_range(&self, base: GuestAddress, len: usize) -> bool { - // get_slices() ensures that if no error happens, the cumulative length of all slices - // equal `len`. - self.get_slices(base, len).all(|r| r.is_ok()) - } - - /// Returns the address plus the offset if it is present within the memory of the guest. - fn checked_offset(&self, base: GuestAddress, offset: usize) -> Option { - base.checked_add(offset as u64) - .and_then(|addr| self.check_address(addr)) - } - - /// Invokes callback `f` to handle data in the address range `[addr, addr + count)`. - /// - /// The address range `[addr, addr + count)` may span more than one - /// [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) object, or even have holes in it. - /// So [`try_access()`](trait.GuestMemory.html#method.try_access) invokes the callback 'f' - /// for each [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) object involved and returns: - /// - the error code returned by the callback 'f' - /// - the size of the already handled data when encountering the first hole - /// - the size of the already handled data when the whole range has been handled - #[deprecated( - since = "0.17.0", - note = "supplemented by external iterator `get_slices()`" - )] - fn try_access(&self, count: usize, addr: GuestAddress, mut f: F) -> Result - where - F: FnMut(usize, usize, MemoryRegionAddress, &Self::R) -> Result, - { - let mut cur = addr; - let mut total = 0; - while let Some(region) = self.find_region(cur) { - let start = region.to_region_addr(cur).unwrap(); - let cap = region.len() - start.raw_value(); - let len = std::cmp::min(cap, (count - total) as GuestUsize); - match f(total, len as usize, start, region) { - // no more data - Ok(0) => return Ok(total), - // made some progress - Ok(len) => { - total = match total.checked_add(len) { - Some(x) if x < count => x, - Some(x) if x == count => return Ok(x), - _ => return Err(Error::CallbackOutOfRange), - }; - cur = match cur.overflowing_add(len as GuestUsize) { - (x @ GuestAddress(0), _) | (x, false) => x, - (_, true) => return Err(Error::GuestAddressOverflow), - }; - } - // error happened - e => return e, - } - } - if total == 0 { - Err(Error::InvalidGuestAddress(addr)) - } else { - Ok(total) - } - } - - /// Get the host virtual address corresponding to the guest address. - /// - /// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`, - /// have the capability to mmap the guest address range into virtual address space of the host - /// for direct access, so the corresponding host virtual address may be passed to other - /// subsystems. - /// - /// # Note - /// The underlying guest memory is not protected from memory aliasing, which breaks the - /// Rust memory safety model. It's the caller's responsibility to ensure that there's no - /// concurrent accesses to the underlying guest memory. - /// - /// # Arguments - /// * `addr` - Guest address to convert. - /// - /// # Examples (uses the `backend-mmap` feature) - /// - /// ``` - /// # #[cfg(feature = "backend-mmap")] - /// # { - /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap}; - /// # - /// # let start_addr = GuestAddress(0x1000); - /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x500)]) - /// # .expect("Could not create guest memory"); - /// # - /// let addr = gm - /// .get_host_address(GuestAddress(0x1200)) - /// .expect("Could not get host address"); - /// println!("Host address is {:p}", addr); - /// # } - /// ``` - fn get_host_address(&self, addr: GuestAddress) -> Result<*mut u8> { - self.to_region_addr(addr) - .ok_or(Error::InvalidGuestAddress(addr)) - .and_then(|(r, addr)| r.get_host_address(addr)) - } - - /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at - /// `addr`. - fn get_slice(&self, addr: GuestAddress, count: usize) -> Result>> { - self.to_region_addr(addr) - .ok_or(Error::InvalidGuestAddress(addr)) - .and_then(|(r, addr)| r.get_slice(addr, count)) - } - - /// Returns an iterator over [`VolatileSlice`](struct.VolatileSlice.html)s, together covering - /// `count` bytes starting at `addr`. - /// - /// Iterating in this way is necessary because the given address range may be fragmented across - /// multiple [`GuestMemoryRegion`]s. - /// - /// The iterator’s items are wrapped in [`Result`], i.e. errors are reported on individual - /// items. If there is no such error, the cumulative length of all items will be equal to - /// `count`. If `count` is 0, an empty iterator will be returned. - fn get_slices<'a>( - &'a self, - addr: GuestAddress, - count: usize, - ) -> GuestMemorySliceIterator<'a, Self> { - GuestMemorySliceIterator { - mem: self, - addr, - count, - } - } -} - -/// Iterates over [`VolatileSlice`]s that together form a guest memory area. -/// -/// Returned by [`GuestMemory::get_slices()`]. -#[derive(Debug)] -pub struct GuestMemorySliceIterator<'a, M: GuestMemory + ?Sized> { - /// Underlying memory - mem: &'a M, - /// Next address in the guest memory area - addr: GuestAddress, - /// Remaining bytes in the guest memory area - count: usize, -} - -impl<'a, M: GuestMemory + ?Sized> GuestMemorySliceIterator<'a, M> { - /// Helper function for [`::next()`](GuestMemorySliceIterator::next). - /// - /// Get the next slice (i.e. the one starting from `self.addr` with a length up to - /// `self.count`) and update the internal state. - /// - /// # Safety - /// - /// This function does not reset to `self.count` to 0 in case of error, i.e. will not stop - /// iterating. Actual behavior after an error is ill-defined, so the caller must check the - /// return value, and in case of an error, reset `self.count` to 0. - /// - /// (This is why this function exists, so this resetting can be done in a single central - /// location.) - unsafe fn do_next(&mut self) -> Option>>> { - if self.count == 0 { - return None; - } - - let Some((region, start)) = self.mem.to_region_addr(self.addr) else { - return Some(Err(Error::InvalidGuestAddress(self.addr))); - }; - - let cap = region.len() - start.raw_value(); - let len = std::cmp::min(cap as usize, self.count); - - self.count -= len; - self.addr = match self.addr.overflowing_add(len as GuestUsize) { - (x @ GuestAddress(0), _) | (x, false) => x, - (_, true) => return Some(Err(Error::GuestAddressOverflow)), - }; - - Some(region.get_slice(start, len).inspect(|s| { - assert_eq!( - s.len(), - len, - "get_slice() returned a slice with wrong length" - ) - })) - } - - /// Adapts this [`GuestMemorySliceIterator`] to return `None` (e.g. gracefully terminate) - /// when it encounters an error after successfully producing at least one slice. - /// Return an error if requesting the first slice returns an error. - pub fn stop_on_error(self) -> Result>>> { - let mut peek = self.peekable(); - if let Some(err) = peek.next_if(Result::is_err) { - return Err(err.unwrap_err()); - } - Ok(peek.filter_map(Result::ok)) - } -} - -impl<'a, M: GuestMemory + ?Sized> Iterator for GuestMemorySliceIterator<'a, M> { - type Item = Result>>; - - fn next(&mut self) -> Option { - // SAFETY: - // We reset `self.count` to 0 on error - match unsafe { self.do_next() } { - Some(Ok(slice)) => Some(Ok(slice)), - other => { - // On error (or end), reset to 0 so iteration remains stopped - self.count = 0; - other - } - } - } -} - -/// This iterator continues to return `None` when exhausted. -/// -/// [`::next()`](GuestMemorySliceIterator::next) sets `self.count` to 0 when -/// returning `None`, ensuring that it will only return `None` from that point on. -impl FusedIterator for GuestMemorySliceIterator<'_, M> {} - -impl Bytes for T { - type E = Error; - - fn write(&self, buf: &[u8], addr: GuestAddress) -> Result { - self.get_slices(addr, buf.len()) - .stop_on_error()? - .try_fold(0, |acc, slice| Ok(acc + slice.write(&buf[acc..], 0)?)) - } - - fn read(&self, buf: &mut [u8], addr: GuestAddress) -> Result { - self.get_slices(addr, buf.len()) - .stop_on_error()? - .try_fold(0, |acc, slice| Ok(acc + slice.read(&mut buf[acc..], 0)?)) - } - - /// # Examples - /// - /// * Write a slice at guestaddress 0x1000. (uses the `backend-mmap` feature) - /// - /// ``` - /// # #[cfg(feature = "backend-mmap")] - /// # { - /// # use vm_memory::{Bytes, GuestAddress, mmap::GuestMemoryMmap}; - /// # - /// # let start_addr = GuestAddress(0x1000); - /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) - /// # .expect("Could not create guest memory"); - /// # - /// gm.write_slice(&[1, 2, 3, 4, 5], start_addr) - /// .expect("Could not write slice to guest memory"); - /// # } - /// ``` - fn write_slice(&self, buf: &[u8], addr: GuestAddress) -> Result<()> { - let res = self.write(buf, addr)?; - if res != buf.len() { - return Err(Error::PartialBuffer { - expected: buf.len(), - completed: res, - }); - } - Ok(()) - } - - /// # Examples - /// - /// * Read a slice of length 16 at guestaddress 0x1000. (uses the `backend-mmap` feature) - /// - /// ``` - /// # #[cfg(feature = "backend-mmap")] - /// # { - /// # use vm_memory::{Bytes, GuestAddress, mmap::GuestMemoryMmap}; - /// # - /// let start_addr = GuestAddress(0x1000); - /// let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) - /// .expect("Could not create guest memory"); - /// let buf = &mut [0u8; 16]; - /// - /// gm.read_slice(buf, start_addr) - /// .expect("Could not read slice from guest memory"); - /// # } - /// ``` - fn read_slice(&self, buf: &mut [u8], addr: GuestAddress) -> Result<()> { - let res = self.read(buf, addr)?; - if res != buf.len() { - return Err(Error::PartialBuffer { - expected: buf.len(), - completed: res, - }); - } - Ok(()) - } - - fn read_volatile_from(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result - where - F: ReadVolatile, - { - self.get_slices(addr, count) - .stop_on_error()? - .try_fold(0, |acc, slice| { - Ok(acc + slice.read_volatile_from(0, src, slice.len())?) - }) - } - - fn read_exact_volatile_from( - &self, - addr: GuestAddress, - src: &mut F, - count: usize, - ) -> Result<()> - where - F: ReadVolatile, - { - let res = self.read_volatile_from(addr, src, count)?; - if res != count { - return Err(Error::PartialBuffer { - expected: count, - completed: res, - }); - } - Ok(()) - } - - fn write_volatile_to(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result - where - F: WriteVolatile, - { - self.get_slices(addr, count) - .stop_on_error()? - .try_fold(0, |acc, slice| { - // For a non-RAM region, reading could have side effects, so we - // must use write_all(). - slice.write_all_volatile_to(0, dst, slice.len())?; - Ok(acc + slice.len()) - }) - } - - fn write_all_volatile_to(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<()> - where - F: WriteVolatile, - { - let res = self.write_volatile_to(addr, dst, count)?; - if res != count { - return Err(Error::PartialBuffer { - expected: count, - completed: res, - }); - } - Ok(()) - } - - fn store(&self, val: O, addr: GuestAddress, order: Ordering) -> Result<()> { - // No need to check past the first iterator item: It either has the size of `O`, then there - // can be no further items; or it does not, and then `VolatileSlice::store()` will fail. - self.get_slices(addr, size_of::()) - .next() - .unwrap()? // count > 0 never produces an empty iterator - .store(val, 0, order) - .map_err(Into::into) - } - - fn load(&self, addr: GuestAddress, order: Ordering) -> Result { - // No need to check past the first iterator item: It either has the size of `O`, then there - // can be no further items; or it does not, and then `VolatileSlice::store()` will fail. - self.get_slices(addr, size_of::()) - .next() - .unwrap()? // count > 0 never produces an empty iterator - .load(0, order) - .map_err(Into::into) - } -} - -#[cfg(test)] -mod tests { - #![allow(clippy::undocumented_unsafe_blocks)] - use super::*; - #[cfg(feature = "backend-mmap")] - use crate::bytes::ByteValued; - #[cfg(feature = "backend-mmap")] - use crate::GuestAddress; - #[cfg(feature = "backend-mmap")] - use std::time::{Duration, Instant}; - - use vmm_sys_util::tempfile::TempFile; - - #[cfg(feature = "backend-mmap")] - type GuestMemoryMmap = crate::GuestMemoryMmap<()>; - - #[cfg(feature = "backend-mmap")] - fn make_image(size: u8) -> Vec { - let mut image: Vec = Vec::with_capacity(size as usize); - for i in 0..size { - image.push(i); - } - image - } - - #[test] - fn test_file_offset() { - let file = TempFile::new().unwrap().into_file(); - let start = 1234; - let file_offset = FileOffset::new(file, start); - assert_eq!(file_offset.start(), start); - assert_eq!( - file_offset.file() as *const File, - file_offset.arc().as_ref() as *const File - ); - } - - #[cfg(feature = "backend-mmap")] - #[test] - fn checked_read_from() { - let start_addr1 = GuestAddress(0x0); - let start_addr2 = GuestAddress(0x40); - let mem = GuestMemoryMmap::from_ranges(&[(start_addr1, 64), (start_addr2, 64)]).unwrap(); - let image = make_image(0x80); - let offset = GuestAddress(0x30); - let count: usize = 0x20; - assert_eq!( - 0x20_usize, - mem.read_volatile_from(offset, &mut image.as_slice(), count) - .unwrap() - ); - } - - // Runs the provided closure in a loop, until at least `duration` time units have elapsed. - #[cfg(feature = "backend-mmap")] - fn loop_timed(duration: Duration, mut f: F) - where - F: FnMut(), - { - // We check the time every `CHECK_PERIOD` iterations. - const CHECK_PERIOD: u64 = 1_000_000; - let start_time = Instant::now(); - - loop { - for _ in 0..CHECK_PERIOD { - f(); - } - if start_time.elapsed() >= duration { - break; - } - } - } - - // Helper method for the following test. It spawns a writer and a reader thread, which - // simultaneously try to access an object that is placed at the junction of two memory regions. - // The part of the object that's continuously accessed is a member of type T. The writer - // flips all the bits of the member with every write, while the reader checks that every byte - // has the same value (and thus it did not do a non-atomic access). The test succeeds if - // no mismatch is detected after performing accesses for a pre-determined amount of time. - #[cfg(feature = "backend-mmap")] - #[cfg(not(miri))] // This test simulates a race condition between guest and vmm - fn non_atomic_access_helper() - where - T: ByteValued - + std::fmt::Debug - + From - + Into - + std::ops::Not - + PartialEq, - { - use std::mem; - use std::thread; - - // A dummy type that's always going to have the same alignment as the first member, - // and then adds some bytes at the end. - #[derive(Clone, Copy, Debug, Default, PartialEq)] - struct Data { - val: T, - some_bytes: [u8; 8], - } - - // Some sanity checks. - assert_eq!(mem::align_of::(), mem::align_of::>()); - assert_eq!(mem::size_of::(), mem::align_of::()); - - // There must be no padding bytes, as otherwise implementing ByteValued is UB - assert_eq!(mem::size_of::>(), mem::size_of::() + 8); - - unsafe impl ByteValued for Data {} - - // Start of first guest memory region. - let start = GuestAddress(0); - let region_len = 1 << 12; - - // The address where we start writing/reading a Data value. - let data_start = GuestAddress((region_len - mem::size_of::()) as u64); - - let mem = GuestMemoryMmap::from_ranges(&[ - (start, region_len), - (start.unchecked_add(region_len as u64), region_len), - ]) - .unwrap(); - - // Need to clone this and move it into the new thread we create. - let mem2 = mem.clone(); - // Just some bytes. - let some_bytes = [1u8, 2, 4, 16, 32, 64, 128, 255]; - - let mut data = Data { - val: T::from(0u8), - some_bytes, - }; - - // Simple check that cross-region write/read is ok. - mem.write_obj(data, data_start).unwrap(); - let read_data = mem.read_obj::>(data_start).unwrap(); - assert_eq!(read_data, data); - - let t = thread::spawn(move || { - let mut count: u64 = 0; - - loop_timed(Duration::from_secs(3), || { - let data = mem2.read_obj::>(data_start).unwrap(); - - // Every time data is written to memory by the other thread, the value of - // data.val alternates between 0 and T::MAX, so the inner bytes should always - // have the same value. If they don't match, it means we read a partial value, - // so the access was not atomic. - let bytes = data.val.into().to_le_bytes(); - for i in 1..mem::size_of::() { - if bytes[0] != bytes[i] { - panic!( - "val bytes don't match {:?} after {} iterations", - &bytes[..mem::size_of::()], - count - ); - } - } - count += 1; - }); - }); - - // Write the object while flipping the bits of data.val over and over again. - loop_timed(Duration::from_secs(3), || { - mem.write_obj(data, data_start).unwrap(); - data.val = !data.val; - }); - - t.join().unwrap() - } - - #[cfg(feature = "backend-mmap")] - #[test] - #[cfg(not(miri))] - fn test_non_atomic_access() { - non_atomic_access_helper::() - } - - #[cfg(feature = "backend-mmap")] - #[test] - fn test_zero_length_accesses() { - #[derive(Default, Clone, Copy)] - #[repr(C)] - struct ZeroSizedStruct { - dummy: [u32; 0], - } - - unsafe impl ByteValued for ZeroSizedStruct {} - - let addr = GuestAddress(0x1000); - let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap(); - let obj = ZeroSizedStruct::default(); - let mut image = make_image(0x80); - - assert_eq!(mem.write(&[], addr).unwrap(), 0); - assert_eq!(mem.read(&mut [], addr).unwrap(), 0); - - assert!(mem.write_slice(&[], addr).is_ok()); - assert!(mem.read_slice(&mut [], addr).is_ok()); - - assert!(mem.write_obj(obj, addr).is_ok()); - assert!(mem.read_obj::(addr).is_ok()); - - assert_eq!( - mem.read_volatile_from(addr, &mut image.as_slice(), 0) - .unwrap(), - 0 - ); - - assert!(mem - .read_exact_volatile_from(addr, &mut image.as_slice(), 0) - .is_ok()); - - assert_eq!( - mem.write_volatile_to(addr, &mut image.as_mut_slice(), 0) - .unwrap(), - 0 - ); - - assert!(mem - .write_all_volatile_to(addr, &mut image.as_mut_slice(), 0) - .is_ok()); - } - - #[cfg(feature = "backend-mmap")] - #[test] - fn test_atomic_accesses() { - let addr = GuestAddress(0x1000); - let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap(); - let bad_addr = addr.unchecked_add(0x1000); - - crate::bytes::tests::check_atomic_accesses(mem, addr, bad_addr); - } - - #[cfg(feature = "backend-mmap")] - #[cfg(target_os = "linux")] - #[test] - fn test_guest_memory_mmap_is_hugetlbfs() { - let addr = GuestAddress(0x1000); - let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap(); - let r = mem.find_region(addr).unwrap(); - assert_eq!(r.is_hugetlbfs(), None); - } -} +pub use vm_memory_new::guest_memory::{ + Error, + Result, + GuestAddress, + MemoryRegionAddress, + GuestUsize, + FileOffset, + GuestAddressSpace, + GuestMemoryBackendSliceIterator as GuestMemorySliceIterator, + GuestMemoryBackend as GuestMemory, +}; diff --git a/src/io.rs b/src/io.rs index 3c338491..abfa6171 100644 --- a/src/io.rs +++ b/src/io.rs @@ -3,689 +3,7 @@ //! Module containing versions of the standard library's [`Read`](std::io::Read) and //! [`Write`](std::io::Write) traits compatible with volatile memory accesses. -use crate::bitmap::BitmapSlice; -use crate::volatile_memory::copy_slice_impl::{copy_from_volatile_slice, copy_to_volatile_slice}; -use crate::{VolatileMemoryError, VolatileSlice}; -use std::io::{Cursor, ErrorKind}; - -#[cfg(feature = "rawfd")] -use std::io::Stdout; - -#[cfg(feature = "rawfd")] -use std::os::fd::{AsFd, AsRawFd, BorrowedFd}; - -macro_rules! retry_eintr { - ($io_call: expr) => { - loop { - let r = $io_call; - - if let Err(crate::VolatileMemoryError::IOError(ref err)) = r { - if err.kind() == std::io::ErrorKind::Interrupted { - continue; - } - } - - break r; - } - }; -} - -pub(crate) use retry_eintr; - -/// A version of the standard library's [`Read`](std::io::Read) trait that operates on volatile -/// memory instead of slices -/// -/// This trait is needed as rust slices (`&[u8]` and `&mut [u8]`) cannot be used when operating on -/// guest memory [1]. -/// -/// [1]: https://github.com/rust-vmm/vm-memory/pull/217 -pub trait ReadVolatile { - /// Tries to read some bytes into the given [`VolatileSlice`] buffer, returning how many bytes - /// were read. - /// - /// The behavior of implementations should be identical to [`Read::read`](std::io::Read::read) - fn read_volatile( - &mut self, - buf: &mut VolatileSlice, - ) -> Result; - - /// Tries to fill the given [`VolatileSlice`] buffer by reading from `self` returning an error - /// if insufficient bytes could be read. - /// - /// The default implementation is identical to that of [`Read::read_exact`](std::io::Read::read_exact) - fn read_exact_volatile( - &mut self, - buf: &mut VolatileSlice, - ) -> Result<(), VolatileMemoryError> { - // Implementation based on https://github.com/rust-lang/rust/blob/7e7483d26e3cec7a44ef00cf7ae6c9c8c918bec6/library/std/src/io/mod.rs#L465 - - let mut partial_buf = buf.offset(0)?; - - while !partial_buf.is_empty() { - match retry_eintr!(self.read_volatile(&mut partial_buf)) { - Ok(0) => { - return Err(VolatileMemoryError::IOError(std::io::Error::new( - ErrorKind::UnexpectedEof, - "failed to fill whole buffer", - ))) - } - Ok(bytes_read) => partial_buf = partial_buf.offset(bytes_read)?, - Err(err) => return Err(err), - } - } - - Ok(()) - } -} - -/// A version of the standard library's [`Write`](std::io::Write) trait that operates on volatile -/// memory instead of slices. -/// -/// This trait is needed as rust slices (`&[u8]` and `&mut [u8]`) cannot be used when operating on -/// guest memory [1]. -/// -/// [1]: https://github.com/rust-vmm/vm-memory/pull/217 -pub trait WriteVolatile { - /// Tries to write some bytes from the given [`VolatileSlice`] buffer, returning how many bytes - /// were written. - /// - /// The behavior of implementations should be identical to [`Write::write`](std::io::Write::write) - fn write_volatile( - &mut self, - buf: &VolatileSlice, - ) -> Result; - - /// Tries write the entire content of the given [`VolatileSlice`] buffer to `self` returning an - /// error if not all bytes could be written. - /// - /// The default implementation is identical to that of [`Write::write_all`](std::io::Write::write_all) - fn write_all_volatile( - &mut self, - buf: &VolatileSlice, - ) -> Result<(), VolatileMemoryError> { - // Based on https://github.com/rust-lang/rust/blob/7e7483d26e3cec7a44ef00cf7ae6c9c8c918bec6/library/std/src/io/mod.rs#L1570 - - let mut partial_buf = buf.offset(0)?; - - while !partial_buf.is_empty() { - match retry_eintr!(self.write_volatile(&partial_buf)) { - Ok(0) => { - return Err(VolatileMemoryError::IOError(std::io::Error::new( - ErrorKind::WriteZero, - "failed to write whole buffer", - ))) - } - Ok(bytes_written) => partial_buf = partial_buf.offset(bytes_written)?, - Err(err) => return Err(err), - } - } - - Ok(()) - } -} - -// We explicitly implement our traits for [`std::fs::File`] and [`std::os::unix::net::UnixStream`] -// instead of providing blanket implementation for [`AsRawFd`] due to trait coherence limitations: A -// blanket implementation would prevent us from providing implementations for `&mut [u8]` below, as -// "an upstream crate could implement AsRawFd for &mut [u8]". - -macro_rules! impl_read_write_volatile_for_raw_fd { - ($raw_fd_ty:ty) => { - #[cfg(feature = "rawfd")] - impl ReadVolatile for $raw_fd_ty { - fn read_volatile( - &mut self, - buf: &mut VolatileSlice, - ) -> Result { - read_volatile_raw_fd(self.as_fd(), buf) - } - } - - #[cfg(feature = "rawfd")] - impl ReadVolatile for &$raw_fd_ty { - fn read_volatile( - &mut self, - buf: &mut VolatileSlice, - ) -> Result { - read_volatile_raw_fd(self.as_fd(), buf) - } - } - - #[cfg(feature = "rawfd")] - impl ReadVolatile for &mut $raw_fd_ty { - fn read_volatile( - &mut self, - buf: &mut VolatileSlice, - ) -> Result { - read_volatile_raw_fd(self.as_fd(), buf) - } - } - - #[cfg(feature = "rawfd")] - impl WriteVolatile for $raw_fd_ty { - fn write_volatile( - &mut self, - buf: &VolatileSlice, - ) -> Result { - write_volatile_raw_fd(self.as_fd(), buf) - } - } - - #[cfg(feature = "rawfd")] - impl WriteVolatile for &$raw_fd_ty { - fn write_volatile( - &mut self, - buf: &VolatileSlice, - ) -> Result { - write_volatile_raw_fd(self.as_fd(), buf) - } - } - - #[cfg(feature = "rawfd")] - impl WriteVolatile for &mut $raw_fd_ty { - fn write_volatile( - &mut self, - buf: &VolatileSlice, - ) -> Result { - write_volatile_raw_fd(self.as_fd(), buf) - } - } - }; -} - -#[cfg(feature = "rawfd")] -impl WriteVolatile for Stdout { - fn write_volatile( - &mut self, - buf: &VolatileSlice, - ) -> Result { - write_volatile_raw_fd(self.as_fd(), buf) - } -} - -#[cfg(feature = "rawfd")] -impl WriteVolatile for &Stdout { - fn write_volatile( - &mut self, - buf: &VolatileSlice, - ) -> Result { - write_volatile_raw_fd(self.as_fd(), buf) - } -} - -impl_read_write_volatile_for_raw_fd!(std::fs::File); -impl_read_write_volatile_for_raw_fd!(std::net::TcpStream); -impl_read_write_volatile_for_raw_fd!(std::os::unix::net::UnixStream); -impl_read_write_volatile_for_raw_fd!(std::os::fd::OwnedFd); -impl_read_write_volatile_for_raw_fd!(std::os::fd::BorrowedFd<'_>); - -/// Tries to do a single `read` syscall on the provided file descriptor, storing the data raed in -/// the given [`VolatileSlice`]. -/// -/// Returns the numbers of bytes read. -#[cfg(feature = "rawfd")] -fn read_volatile_raw_fd( - raw_fd: BorrowedFd<'_>, - buf: &mut VolatileSlice, -) -> Result { - let fd = raw_fd.as_raw_fd(); - let guard = buf.ptr_guard_mut(); - - let dst = guard.as_ptr().cast::(); - - // SAFETY: Rust's I/O safety invariants ensure that BorrowedFd contains a valid file descriptor`. - // The memory pointed to by `dst` is valid for writes of length `buf.len() by the invariants - // upheld by the constructor of `VolatileSlice`. - let bytes_read = unsafe { libc::read(fd, dst, buf.len()) }; - - if bytes_read < 0 { - // We don't know if a partial read might have happened, so mark everything as dirty - buf.bitmap().mark_dirty(0, buf.len()); - - Err(VolatileMemoryError::IOError(std::io::Error::last_os_error())) - } else { - let bytes_read = bytes_read.try_into().unwrap(); - buf.bitmap().mark_dirty(0, bytes_read); - Ok(bytes_read) - } -} - -/// Tries to do a single `write` syscall on the provided file descriptor, attempting to write the -/// data stored in the given [`VolatileSlice`]. -/// -/// Returns the numbers of bytes written. -#[cfg(feature = "rawfd")] -fn write_volatile_raw_fd( - raw_fd: BorrowedFd<'_>, - buf: &VolatileSlice, -) -> Result { - let fd = raw_fd.as_raw_fd(); - let guard = buf.ptr_guard(); - - let src = guard.as_ptr().cast::(); - - // SAFETY: Rust's I/O safety invariants ensure that BorrowedFd contains a valid file descriptor`. - // The memory pointed to by `src` is valid for reads of length `buf.len() by the invariants - // upheld by the constructor of `VolatileSlice`. - let bytes_written = unsafe { libc::write(fd, src, buf.len()) }; - - if bytes_written < 0 { - Err(VolatileMemoryError::IOError(std::io::Error::last_os_error())) - } else { - Ok(bytes_written.try_into().unwrap()) - } -} - -impl WriteVolatile for &mut [u8] { - fn write_volatile( - &mut self, - buf: &VolatileSlice, - ) -> Result { - let total = buf.len().min(self.len()); - - // SAFETY: - // `buf` is contiguously allocated memory of length `total <= buf.len())` by the invariants - // of `VolatileSlice`. - // Furthermore, both source and destination of the call to copy_from_volatile_slice are valid - // for reads and writes respectively of length `total` since total is the minimum of lengths - // of the memory areas pointed to. The areas do not overlap, since the source is inside guest - // memory, and the destination is a pointer derived from a slice (no slices to guest memory - // are possible without violating rust's aliasing rules). - let written = unsafe { copy_from_volatile_slice(self.as_mut_ptr(), buf, total) }; - - // Advance the slice, just like the stdlib: https://doc.rust-lang.org/src/std/io/impls.rs.html#335 - *self = std::mem::take(self).split_at_mut(written).1; - - Ok(written) - } - - fn write_all_volatile( - &mut self, - buf: &VolatileSlice, - ) -> Result<(), VolatileMemoryError> { - // Based on https://github.com/rust-lang/rust/blob/f7b831ac8a897273f78b9f47165cf8e54066ce4b/library/std/src/io/impls.rs#L376-L382 - if self.write_volatile(buf)? == buf.len() { - Ok(()) - } else { - Err(VolatileMemoryError::IOError(std::io::Error::new( - ErrorKind::WriteZero, - "failed to write whole buffer", - ))) - } - } -} - -impl ReadVolatile for &[u8] { - fn read_volatile( - &mut self, - buf: &mut VolatileSlice, - ) -> Result { - let total = buf.len().min(self.len()); - - // SAFETY: - // `buf` is contiguously allocated memory of length `total <= buf.len())` by the invariants - // of `VolatileSlice`. - // Furthermore, both source and destination of the call to copy_to_volatile_slice are valid - // for reads and writes respectively of length `total` since total is the minimum of lengths - // of the memory areas pointed to. The areas do not overlap, since the destination is inside - // guest memory, and the source is a pointer derived from a slice (no slices to guest memory - // are possible without violating rust's aliasing rules). - let read = unsafe { copy_to_volatile_slice(buf, self.as_ptr(), total) }; - - // Advance the slice, just like the stdlib: https://doc.rust-lang.org/src/std/io/impls.rs.html#232-310 - *self = self.split_at(read).1; - - Ok(read) - } - - fn read_exact_volatile( - &mut self, - buf: &mut VolatileSlice, - ) -> Result<(), VolatileMemoryError> { - // Based on https://github.com/rust-lang/rust/blob/f7b831ac8a897273f78b9f47165cf8e54066ce4b/library/std/src/io/impls.rs#L282-L302 - if buf.len() > self.len() { - return Err(VolatileMemoryError::IOError(std::io::Error::new( - ErrorKind::UnexpectedEof, - "failed to fill whole buffer", - ))); - } - - self.read_volatile(buf).map(|_| ()) - } -} - -// WriteVolatile implementation for Vec is based upon the Write impl for Vec, which -// defers to Vec::append_elements, after which the below functionality is modelled. -impl WriteVolatile for Vec { - fn write_volatile( - &mut self, - buf: &VolatileSlice, - ) -> Result { - let count = buf.len(); - self.reserve(count); - let len = self.len(); - - // SAFETY: Calling Vec::reserve() above guarantees the the backing storage of the Vec has - // length at least `len + count`. This means that self.as_mut_ptr().add(len) remains within - // the same allocated object, the offset does not exceed isize (as otherwise reserve would - // have panicked), and does not rely on address space wrapping around. - // In particular, the entire `count` bytes after `self.as_mut_ptr().add(count)` is - // contiguously allocated and valid for writes. - // Lastly, `copy_to_volatile_slice` correctly initialized `copied_len` additional bytes - // in the Vec's backing storage, and we assert this to be equal to `count`. Additionally, - // `len + count` is at most the reserved capacity of the vector. Thus the call to `set_len` - // is safe. - unsafe { - let copied_len = copy_from_volatile_slice(self.as_mut_ptr().add(len), buf, count); - - assert_eq!(copied_len, count); - self.set_len(len + count); - } - Ok(count) - } -} - -// ReadVolatile and WriteVolatile implementations for Cursor is modelled after the standard -// library's implementation (modulo having to inline `Cursor::remaining_slice`, as that's nightly only) -impl ReadVolatile for Cursor -where - T: AsRef<[u8]>, -{ - fn read_volatile( - &mut self, - buf: &mut VolatileSlice, - ) -> Result { - let inner = self.get_ref().as_ref(); - let len = self.position().min(inner.len() as u64); - let n = ReadVolatile::read_volatile(&mut &inner[(len as usize)..], buf)?; - self.set_position(self.position() + n as u64); - Ok(n) - } - - fn read_exact_volatile( - &mut self, - buf: &mut VolatileSlice, - ) -> Result<(), VolatileMemoryError> { - let inner = self.get_ref().as_ref(); - let n = buf.len(); - let len = self.position().min(inner.len() as u64); - ReadVolatile::read_exact_volatile(&mut &inner[(len as usize)..], buf)?; - self.set_position(self.position() + n as u64); - Ok(()) - } -} - -impl WriteVolatile for Cursor<&mut [u8]> { - fn write_volatile( - &mut self, - buf: &VolatileSlice, - ) -> Result { - let pos = self.position().min(self.get_ref().len() as u64); - let n = WriteVolatile::write_volatile(&mut &mut self.get_mut()[(pos as usize)..], buf)?; - self.set_position(self.position() + n as u64); - Ok(n) - } - - // no write_all provided in standard library, since our default for write_all is based on the - // standard library's write_all, omitting it here as well will correctly mimic stdlib behavior. -} - -#[cfg(test)] -mod tests { - use crate::io::{ReadVolatile, WriteVolatile}; - use crate::{VolatileMemoryError, VolatileSlice}; - use std::io::{Cursor, ErrorKind}; - #[cfg(feature = "rawfd")] - use std::io::{Read, Seek, Write}; - #[cfg(feature = "rawfd")] - use vmm_sys_util::tempfile::TempFile; - - // ---- Test ReadVolatile for &[u8] ---- - fn read_4_bytes_to_5_byte_memory(source: Vec, expected_output: [u8; 5]) { - // Test read_volatile for &[u8] works - let mut memory = vec![0u8; 5]; - - assert_eq!( - (&source[..]) - .read_volatile(&mut VolatileSlice::from(&mut memory[..4])) - .unwrap(), - source.len().min(4) - ); - assert_eq!(&memory, &expected_output); - - // Test read_exact_volatile for &[u8] works - let mut memory = vec![0u8; 5]; - let result = (&source[..]).read_exact_volatile(&mut VolatileSlice::from(&mut memory[..4])); - - // read_exact fails if there are not enough bytes in input to completely fill - // memory[..4] - if source.len() < 4 { - match result.unwrap_err() { - VolatileMemoryError::IOError(ioe) => { - assert_eq!(ioe.kind(), ErrorKind::UnexpectedEof) - } - err => panic!("{:?}", err), - } - assert_eq!(memory, vec![0u8; 5]); - } else { - result.unwrap(); - assert_eq!(&memory, &expected_output); - } - } - - // ---- Test ReadVolatile for File ---- - #[cfg(all(feature = "rawfd", not(miri)))] - fn read_4_bytes_from_file(source: Vec, expected_output: [u8; 5]) { - let mut temp_file = TempFile::new().unwrap().into_file(); - temp_file.write_all(source.as_ref()).unwrap(); - temp_file.rewind().unwrap(); - - // Test read_volatile for File works - let mut memory = vec![0u8; 5]; - - assert_eq!( - temp_file - .read_volatile(&mut VolatileSlice::from(&mut memory[..4])) - .unwrap(), - source.len().min(4) - ); - assert_eq!(&memory, &expected_output); - - temp_file.rewind().unwrap(); - - // Test read_exact_volatile for File works - let mut memory = vec![0u8; 5]; - - let read_exact_result = - temp_file.read_exact_volatile(&mut VolatileSlice::from(&mut memory[..4])); - - if source.len() < 4 { - read_exact_result.unwrap_err(); - } else { - read_exact_result.unwrap(); - } - assert_eq!(&memory, &expected_output); - } - - #[test] - fn test_read_volatile() { - let test_cases = [ - (vec![1u8, 2], [1u8, 2, 0, 0, 0]), - (vec![1, 2, 3, 4], [1, 2, 3, 4, 0]), - // ensure we don't have a buffer overrun - (vec![5, 6, 7, 8, 9], [5, 6, 7, 8, 0]), - ]; - - for (input, output) in test_cases { - read_4_bytes_to_5_byte_memory(input.clone(), output); - #[cfg(all(feature = "rawfd", not(miri)))] - read_4_bytes_from_file(input, output); - } - } - - // ---- Test WriteVolatile for &mut [u8] ---- - fn write_4_bytes_to_5_byte_vec(mut source: Vec, expected_result: [u8; 5]) { - let mut memory = vec![0u8; 5]; - - // Test write_volatile for &mut [u8] works - assert_eq!( - (&mut memory[..4]) - .write_volatile(&VolatileSlice::from(source.as_mut_slice())) - .unwrap(), - source.len().min(4) - ); - assert_eq!(&memory, &expected_result); - - // Test write_all_volatile for &mut [u8] works - let mut memory = vec![0u8; 5]; - - let result = - (&mut memory[..4]).write_all_volatile(&VolatileSlice::from(source.as_mut_slice())); - - if source.len() > 4 { - match result.unwrap_err() { - VolatileMemoryError::IOError(ioe) => { - assert_eq!(ioe.kind(), ErrorKind::WriteZero) - } - err => panic!("{:?}", err), - } - // This quirky behavior of writing to the slice even in the case of failure is also - // exhibited by the stdlib - assert_eq!(&memory, &expected_result); - } else { - result.unwrap(); - assert_eq!(&memory, &expected_result); - } - } - - // ---- Test ẂriteVolatile for File works ---- - #[cfg(all(feature = "rawfd", not(miri)))] - fn write_5_bytes_to_file(mut source: Vec) { - // Test write_volatile for File works - let mut temp_file = TempFile::new().unwrap().into_file(); - - temp_file - .write_volatile(&VolatileSlice::from(source.as_mut_slice())) - .unwrap(); - temp_file.rewind().unwrap(); - - let mut written = vec![0u8; source.len()]; - temp_file.read_exact(written.as_mut_slice()).unwrap(); - - assert_eq!(source, written); - // check no excess bytes were written to the file - assert_eq!(temp_file.read(&mut [0u8]).unwrap(), 0); - - // Test write_all_volatile for File works - let mut temp_file = TempFile::new().unwrap().into_file(); - - temp_file - .write_all_volatile(&VolatileSlice::from(source.as_mut_slice())) - .unwrap(); - temp_file.rewind().unwrap(); - - let mut written = vec![0u8; source.len()]; - temp_file.read_exact(written.as_mut_slice()).unwrap(); - - assert_eq!(source, written); - // check no excess bytes were written to the file - assert_eq!(temp_file.read(&mut [0u8]).unwrap(), 0); - } - - #[test] - fn test_write_volatile() { - let test_cases = [ - (vec![1u8, 2], [1u8, 2, 0, 0, 0]), - (vec![1, 2, 3, 4], [1, 2, 3, 4, 0]), - // ensure we don't have a buffer overrun - (vec![5, 6, 7, 8, 9], [5, 6, 7, 8, 0]), - ]; - - for (input, output) in test_cases { - write_4_bytes_to_5_byte_vec(input.clone(), output); - #[cfg(all(feature = "rawfd", not(miri)))] - write_5_bytes_to_file(input); - } - } - - #[test] - fn test_read_volatile_for_cursor() { - let read_buffer = [1, 2, 3, 4, 5, 6, 7]; - let mut output = vec![0u8; 5]; - - let mut cursor = Cursor::new(read_buffer); - - // Read 4 bytes from cursor to volatile slice (amount read limited by volatile slice length) - assert_eq!( - cursor - .read_volatile(&mut VolatileSlice::from(&mut output[..4])) - .unwrap(), - 4 - ); - assert_eq!(output, vec![1, 2, 3, 4, 0]); - - // Read next 3 bytes from cursor to volatile slice (amount read limited by length of remaining data in cursor) - assert_eq!( - cursor - .read_volatile(&mut VolatileSlice::from(&mut output[..4])) - .unwrap(), - 3 - ); - assert_eq!(output, vec![5, 6, 7, 4, 0]); - - cursor.set_position(0); - // Same as first test above, but with read_exact - cursor - .read_exact_volatile(&mut VolatileSlice::from(&mut output[..4])) - .unwrap(); - assert_eq!(output, vec![1, 2, 3, 4, 0]); - - // Same as above, but with read_exact. Should fail now, because we cannot fill a 4 byte buffer - // with whats remaining in the cursor (3 bytes). Output should remain unchanged. - assert!(cursor - .read_exact_volatile(&mut VolatileSlice::from(&mut output[..4])) - .is_err()); - assert_eq!(output, vec![1, 2, 3, 4, 0]); - } - - #[test] - fn test_write_volatile_for_cursor() { - let mut write_buffer = vec![0u8; 7]; - let mut input = [1, 2, 3, 4]; - - let mut cursor = Cursor::new(write_buffer.as_mut_slice()); - - // Write 4 bytes from volatile slice to cursor (amount written limited by volatile slice length) - assert_eq!( - cursor - .write_volatile(&VolatileSlice::from(input.as_mut_slice())) - .unwrap(), - 4 - ); - assert_eq!(cursor.get_ref(), &[1, 2, 3, 4, 0, 0, 0]); - - // Write 3 bytes from volatile slice to cursor (amount written limited by remaining space in cursor) - assert_eq!( - cursor - .write_volatile(&VolatileSlice::from(input.as_mut_slice())) - .unwrap(), - 3 - ); - assert_eq!(cursor.get_ref(), &[1, 2, 3, 4, 1, 2, 3]); - } - - #[test] - fn test_write_volatile_for_vec() { - let mut write_buffer = Vec::new(); - let mut input = [1, 2, 3, 4]; - - assert_eq!( - write_buffer - .write_volatile(&VolatileSlice::from(input.as_mut_slice())) - .unwrap(), - 4 - ); - - assert_eq!(&write_buffer, &input); - } -} +pub use vm_memory_new::io::{ + ReadVolatile, + WriteVolatile, +}; diff --git a/src/lib.rs b/src/lib.rs index d4bb30ac..949c304e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -24,13 +24,6 @@ #[cfg(not(target_pointer_width = "64"))] compile_error!("vm-memory only supports 64-bit targets!"); -#[cfg(all(target_family = "windows", feature = "rawfd"))] -compile_error!("rawfd feature is not supported on Windows targets!"); - -#[cfg(all(target_family = "windows", feature = "xen"))] -compile_error!("xen feature is not supported on Windows targets!"); - -#[macro_use] pub mod address; pub use address::{Address, AddressValue}; diff --git a/src/mmap.rs b/src/mmap.rs new file mode 100644 index 00000000..c00aa61c --- /dev/null +++ b/src/mmap.rs @@ -0,0 +1,25 @@ +// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. +// +// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// +// Portions Copyright 2017 The Chromium OS Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE-BSD-3-Clause file. +// +// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause + +//! The default implementation for the [`GuestMemory`](trait.GuestMemory.html) trait. +//! +//! This implementation is mmap-ing the memory of the guest into the current process. + +// re-export for backward compat, as the trait used to be defined in mmap.rs +pub use vm_memory_new::bitmap::NewBitmap; + +#[cfg(all(not(feature = "xen"), target_family = "unix"))] +pub use vm_memory_new::mmap::MmapRegionBuilder; + +#[cfg(all(feature = "xen", target_family = "unix"))] +pub use vm_memory_new::mmap::{MmapRange, MmapXenFlags}; + +pub use vm_memory_new::mmap::{MmapRegion, MmapRegionError, GuestMemoryMmap, GuestRegionMmap, + FromRangesError}; diff --git a/src/mmap/mod.rs b/src/mmap/mod.rs deleted file mode 100644 index 7d133bdf..00000000 --- a/src/mmap/mod.rs +++ /dev/null @@ -1,720 +0,0 @@ -// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. -// -// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. -// -// Portions Copyright 2017 The Chromium OS Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE-BSD-3-Clause file. -// -// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause - -//! The default implementation for the [`GuestMemory`](trait.GuestMemory.html) trait. -//! -//! This implementation is mmap-ing the memory of the guest into the current process. - -use std::borrow::Borrow; -use std::ops::Deref; -use std::result; - -use crate::address::Address; -use crate::bitmap::{Bitmap, BS}; -use crate::guest_memory::{self, FileOffset, GuestAddress, GuestUsize, MemoryRegionAddress}; -use crate::region::{ - GuestMemoryRegion, GuestMemoryRegionBytes, GuestRegionCollection, GuestRegionCollectionError, -}; -use crate::volatile_memory::{VolatileMemory, VolatileSlice}; - -// re-export for backward compat, as the trait used to be defined in mmap.rs -pub use crate::bitmap::NewBitmap; - -#[cfg(all(not(feature = "xen"), target_family = "unix"))] -mod unix; - -#[cfg(all(feature = "xen", target_family = "unix"))] -pub(crate) mod xen; - -#[cfg(target_family = "windows")] -mod windows; - -#[cfg(all(not(feature = "xen"), target_family = "unix"))] -pub use unix::{Error as MmapRegionError, MmapRegion, MmapRegionBuilder}; - -#[cfg(all(feature = "xen", target_family = "unix"))] -pub use xen::{Error as MmapRegionError, MmapRange, MmapRegion, MmapXenFlags}; - -#[cfg(target_family = "windows")] -pub use std::io::Error as MmapRegionError; -#[cfg(target_family = "windows")] -pub use windows::MmapRegion; - -/// [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) implementation that mmaps the guest's -/// memory region in the current process. -/// -/// Represents a continuous region of the guest's physical memory that is backed by a mapping -/// in the virtual address space of the calling process. -#[derive(Debug)] -pub struct GuestRegionMmap { - mapping: MmapRegion, - guest_base: GuestAddress, -} - -impl Deref for GuestRegionMmap { - type Target = MmapRegion; - - fn deref(&self) -> &MmapRegion { - &self.mapping - } -} - -impl GuestRegionMmap { - /// Create a new memory-mapped memory region for the guest's physical memory. - /// - /// Returns `None` if `guest_base` + `mapping.len()` would overflow. - pub fn new(mapping: MmapRegion, guest_base: GuestAddress) -> Option { - guest_base - .0 - .checked_add(mapping.size() as u64) - .map(|_| Self { - mapping, - guest_base, - }) - } -} - -#[cfg(not(feature = "xen"))] -impl GuestRegionMmap { - /// Create a new memory-mapped memory region from guest's physical memory, size and file. - pub fn from_range( - addr: GuestAddress, - size: usize, - file: Option, - ) -> result::Result { - let region = if let Some(ref f_off) = file { - MmapRegion::from_file(f_off.clone(), size)? - } else { - MmapRegion::new(size)? - }; - - Self::new(region, addr).ok_or(FromRangesError::InvalidGuestRegion) - } -} - -#[cfg(feature = "xen")] -impl GuestRegionMmap { - /// Create a new Unix memory-mapped memory region from guest's physical memory, size and file. - /// This must only be used for tests, doctests, benches and is not designed for end consumers. - pub fn from_range( - addr: GuestAddress, - size: usize, - file: Option, - ) -> result::Result { - let range = MmapRange::new_unix(size, file, addr); - - let region = MmapRegion::from_range(range)?; - Self::new(region, addr).ok_or(FromRangesError::InvalidGuestRegion) - } -} - -impl GuestMemoryRegion for GuestRegionMmap { - type B = B; - - fn len(&self) -> GuestUsize { - self.mapping.size() as GuestUsize - } - - fn start_addr(&self) -> GuestAddress { - self.guest_base - } - - fn bitmap(&self) -> BS<'_, Self::B> { - self.mapping.bitmap().slice_at(0) - } - - fn get_host_address(&self, addr: MemoryRegionAddress) -> guest_memory::Result<*mut u8> { - // Not sure why wrapping_offset is not unsafe. Anyway this - // is safe because we've just range-checked addr using check_address. - self.check_address(addr) - .ok_or(guest_memory::Error::InvalidBackendAddress) - .map(|addr| { - self.mapping - .as_ptr() - .wrapping_offset(addr.raw_value() as isize) - }) - } - - fn file_offset(&self) -> Option<&FileOffset> { - self.mapping.file_offset() - } - - fn get_slice( - &self, - offset: MemoryRegionAddress, - count: usize, - ) -> guest_memory::Result>> { - let slice = self.mapping.get_slice(offset.raw_value() as usize, count)?; - Ok(slice) - } - - #[cfg(target_os = "linux")] - fn is_hugetlbfs(&self) -> Option { - self.mapping.is_hugetlbfs() - } -} - -impl GuestMemoryRegionBytes for GuestRegionMmap {} - -/// [`GuestMemory`](trait.GuestMemory.html) implementation that mmaps the guest's memory -/// in the current process. -/// -/// Represents the entire physical memory of the guest by tracking all its memory regions. -/// Each region is an instance of `GuestRegionMmap`, being backed by a mapping in the -/// virtual address space of the calling process. -pub type GuestMemoryMmap = GuestRegionCollection>; - -/// Errors that can happen during [`GuestMemoryMmap::from_ranges`] and related functions. -#[derive(Debug, thiserror::Error)] -pub enum FromRangesError { - /// Error during construction of [`GuestMemoryMmap`] - #[error("Error constructing guest region collection: {0}")] - Collection(#[from] GuestRegionCollectionError), - /// Error while allocating raw mmap region - #[error("Error setting up raw memory for guest region: {0}")] - MmapRegion(#[from] MmapRegionError), - /// A combination of region length and guest address would overflow. - #[error("Combination of guest address and region length invalid (would overflow)")] - InvalidGuestRegion, -} - -impl GuestMemoryMmap { - /// Creates a container and allocates anonymous memory for guest memory regions. - /// - /// Valid memory regions are specified as a slice of (Address, Size) tuples sorted by Address. - pub fn from_ranges(ranges: &[(GuestAddress, usize)]) -> result::Result { - Self::from_ranges_with_files(ranges.iter().map(|r| (r.0, r.1, None))) - } - - /// Creates a container and allocates anonymous memory for guest memory regions. - /// - /// Valid memory regions are specified as a sequence of (Address, Size, [`Option`]) - /// tuples sorted by Address. - pub fn from_ranges_with_files(ranges: T) -> result::Result - where - A: Borrow<(GuestAddress, usize, Option)>, - T: IntoIterator, - { - Self::from_regions( - ranges - .into_iter() - .map(|x| { - GuestRegionMmap::from_range(x.borrow().0, x.borrow().1, x.borrow().2.clone()) - }) - .collect::, _>>()?, - ) - .map_err(Into::into) - } -} - -#[cfg(test)] -mod tests { - #![allow(clippy::undocumented_unsafe_blocks)] - extern crate vmm_sys_util; - - use super::*; - - #[cfg(feature = "backend-bitmap")] - use crate::bitmap::AtomicBitmap; - use crate::{Bytes, GuestMemory, GuestMemoryError}; - - use std::io::Write; - #[cfg(feature = "rawfd")] - use std::{fs::File, path::Path}; - use vmm_sys_util::tempfile::TempFile; - - use matches::assert_matches; - - type GuestRegionMmap = super::GuestRegionMmap<()>; - type GuestMemoryMmap = super::GuestRegionCollection; - type MmapRegion = super::MmapRegion<()>; - - #[test] - fn basic_map() { - let m = MmapRegion::new(1024).unwrap(); - assert_eq!(1024, m.size()); - } - - #[test] - fn slice_addr() { - let m = GuestRegionMmap::from_range(GuestAddress(0), 5, None).unwrap(); - let s = m.get_slice(MemoryRegionAddress(2), 3).unwrap(); - let guard = s.ptr_guard(); - assert_eq!(guard.as_ptr(), unsafe { m.as_ptr().offset(2) }); - } - - #[test] - #[cfg(not(miri))] // Miri cannot mmap files - fn mapped_file_read() { - let mut f = TempFile::new().unwrap().into_file(); - let sample_buf = &[1, 2, 3, 4, 5]; - assert!(f.write_all(sample_buf).is_ok()); - - let file = Some(FileOffset::new(f, 0)); - let mem_map = GuestRegionMmap::from_range(GuestAddress(0), sample_buf.len(), file).unwrap(); - let buf = &mut [0u8; 16]; - assert_eq!( - mem_map.as_volatile_slice().unwrap().read(buf, 0).unwrap(), - sample_buf.len() - ); - assert_eq!(buf[0..sample_buf.len()], sample_buf[..]); - } - - #[test] - fn test_to_region_addr() { - let f1 = TempFile::new().unwrap().into_file(); - f1.set_len(0x400).unwrap(); - let f2 = TempFile::new().unwrap().into_file(); - f2.set_len(0x400).unwrap(); - - let start_addr1 = GuestAddress(0x0); - let start_addr2 = GuestAddress(0x800); - let guest_mem = - GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); - let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[ - (start_addr1, 0x400, Some(FileOffset::new(f1, 0))), - (start_addr2, 0x400, Some(FileOffset::new(f2, 0))), - ]) - .unwrap(); - - let guest_mem_list = [guest_mem, guest_mem_backed_by_file]; - for guest_mem in guest_mem_list.iter() { - assert!(guest_mem.to_region_addr(GuestAddress(0x600)).is_none()); - let (r0, addr0) = guest_mem.to_region_addr(GuestAddress(0x800)).unwrap(); - let (r1, addr1) = guest_mem.to_region_addr(GuestAddress(0xa00)).unwrap(); - assert!(r0.as_ptr() == r1.as_ptr()); - assert_eq!(addr0, MemoryRegionAddress(0)); - assert_eq!(addr1, MemoryRegionAddress(0x200)); - } - } - - #[test] - fn test_get_host_address() { - let f1 = TempFile::new().unwrap().into_file(); - f1.set_len(0x400).unwrap(); - let f2 = TempFile::new().unwrap().into_file(); - f2.set_len(0x400).unwrap(); - - let start_addr1 = GuestAddress(0x0); - let start_addr2 = GuestAddress(0x800); - let guest_mem = - GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); - let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[ - (start_addr1, 0x400, Some(FileOffset::new(f1, 0))), - (start_addr2, 0x400, Some(FileOffset::new(f2, 0))), - ]) - .unwrap(); - - let guest_mem_list = [guest_mem, guest_mem_backed_by_file]; - for guest_mem in guest_mem_list.iter() { - assert!(guest_mem.get_host_address(GuestAddress(0x600)).is_err()); - let ptr0 = guest_mem.get_host_address(GuestAddress(0x800)).unwrap(); - let ptr1 = guest_mem.get_host_address(GuestAddress(0xa00)).unwrap(); - assert_eq!( - ptr0, - guest_mem.find_region(GuestAddress(0x800)).unwrap().as_ptr() - ); - assert_eq!(unsafe { ptr0.offset(0x200) }, ptr1); - } - } - - #[test] - fn test_check_range() { - let start_addr1 = GuestAddress(0); - let start_addr2 = GuestAddress(0x800); - let start_addr3 = GuestAddress(0xc00); - let guest_mem = GuestMemoryMmap::from_ranges(&[ - (start_addr1, 0x400), - (start_addr2, 0x400), - (start_addr3, 0x400), - ]) - .unwrap(); - - assert!(guest_mem.check_range(start_addr1, 0x0)); - assert!(guest_mem.check_range(start_addr1, 0x200)); - assert!(guest_mem.check_range(start_addr1, 0x400)); - assert!(!guest_mem.check_range(start_addr1, 0xa00)); - assert!(guest_mem.check_range(start_addr2, 0x7ff)); - assert!(guest_mem.check_range(start_addr2, 0x800)); - assert!(!guest_mem.check_range(start_addr2, 0x801)); - assert!(!guest_mem.check_range(start_addr2, 0xc00)); - assert!(!guest_mem.check_range(start_addr1, usize::MAX)); - } - - #[test] - fn test_deref() { - let f = TempFile::new().unwrap().into_file(); - f.set_len(0x400).unwrap(); - - let start_addr = GuestAddress(0x0); - let guest_mem = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap(); - let guest_mem_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[( - start_addr, - 0x400, - Some(FileOffset::new(f, 0)), - )]) - .unwrap(); - - let guest_mem_list = [guest_mem, guest_mem_backed_by_file]; - for guest_mem in guest_mem_list.iter() { - let sample_buf = &[1, 2, 3, 4, 5]; - - assert_eq!(guest_mem.write(sample_buf, start_addr).unwrap(), 5); - let slice = guest_mem - .find_region(GuestAddress(0)) - .unwrap() - .as_volatile_slice() - .unwrap(); - - let buf = &mut [0, 0, 0, 0, 0]; - assert_eq!(slice.read(buf, 0).unwrap(), 5); - assert_eq!(buf, sample_buf); - } - } - - #[test] - fn test_read_u64() { - let f1 = TempFile::new().unwrap().into_file(); - f1.set_len(0x1000).unwrap(); - let f2 = TempFile::new().unwrap().into_file(); - f2.set_len(0x1000).unwrap(); - - let start_addr1 = GuestAddress(0x0); - let start_addr2 = GuestAddress(0x1000); - let bad_addr = GuestAddress(0x2001); - let bad_addr2 = GuestAddress(0x1ffc); - let max_addr = GuestAddress(0x2000); - - let gm = - GuestMemoryMmap::from_ranges(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap(); - let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[ - (start_addr1, 0x1000, Some(FileOffset::new(f1, 0))), - (start_addr2, 0x1000, Some(FileOffset::new(f2, 0))), - ]) - .unwrap(); - - let gm_list = [gm, gm_backed_by_file]; - for gm in gm_list.iter() { - let val1: u64 = 0xaa55_aa55_aa55_aa55; - let val2: u64 = 0x55aa_55aa_55aa_55aa; - assert_matches!( - gm.write_obj(val1, bad_addr).unwrap_err(), - GuestMemoryError::InvalidGuestAddress(addr) if addr == bad_addr - ); - assert_matches!( - gm.write_obj(val1, bad_addr2).unwrap_err(), - GuestMemoryError::PartialBuffer { expected, completed } if expected == size_of::() && completed == max_addr.checked_offset_from(bad_addr2).unwrap() as usize); - - gm.write_obj(val1, GuestAddress(0x500)).unwrap(); - gm.write_obj(val2, GuestAddress(0x1000 + 32)).unwrap(); - let num1: u64 = gm.read_obj(GuestAddress(0x500)).unwrap(); - let num2: u64 = gm.read_obj(GuestAddress(0x1000 + 32)).unwrap(); - assert_eq!(val1, num1); - assert_eq!(val2, num2); - } - } - - #[test] - fn write_and_read() { - let f = TempFile::new().unwrap().into_file(); - f.set_len(0x400).unwrap(); - - let mut start_addr = GuestAddress(0x1000); - let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap(); - let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[( - start_addr, - 0x400, - Some(FileOffset::new(f, 0)), - )]) - .unwrap(); - - let gm_list = [gm, gm_backed_by_file]; - for gm in gm_list.iter() { - let sample_buf = &[1, 2, 3, 4, 5]; - - assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 5); - - let buf = &mut [0u8; 5]; - assert_eq!(gm.read(buf, start_addr).unwrap(), 5); - assert_eq!(buf, sample_buf); - - start_addr = GuestAddress(0x13ff); - assert_eq!(gm.write(sample_buf, start_addr).unwrap(), 1); - assert_eq!(gm.read(buf, start_addr).unwrap(), 1); - assert_eq!(buf[0], sample_buf[0]); - start_addr = GuestAddress(0x1000); - } - } - - #[test] - #[cfg(feature = "rawfd")] - #[cfg(not(miri))] - fn read_to_and_write_from_mem() { - use std::mem; - - let f = TempFile::new().unwrap().into_file(); - f.set_len(0x400).unwrap(); - - let gm = GuestMemoryMmap::from_ranges(&[(GuestAddress(0x1000), 0x400)]).unwrap(); - let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[( - GuestAddress(0x1000), - 0x400, - Some(FileOffset::new(f, 0)), - )]) - .unwrap(); - - let gm_list = [gm, gm_backed_by_file]; - for gm in gm_list.iter() { - let addr = GuestAddress(0x1010); - let mut file = if cfg!(target_family = "unix") { - File::open(Path::new("/dev/zero")).unwrap() - } else { - File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap() - }; - gm.write_obj(!0u32, addr).unwrap(); - gm.read_exact_volatile_from(addr, &mut file, mem::size_of::()) - .unwrap(); - let value: u32 = gm.read_obj(addr).unwrap(); - if cfg!(target_family = "unix") { - assert_eq!(value, 0); - } else { - assert_eq!(value, 0x0090_5a4d); - } - - let mut sink = vec![0; mem::size_of::()]; - gm.write_all_volatile_to(addr, &mut sink.as_mut_slice(), mem::size_of::()) - .unwrap(); - if cfg!(target_family = "unix") { - assert_eq!(sink, vec![0; mem::size_of::()]); - } else { - assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]); - }; - } - } - - #[test] - fn test_access_cross_boundary() { - let f1 = TempFile::new().unwrap().into_file(); - f1.set_len(0x1000).unwrap(); - let f2 = TempFile::new().unwrap().into_file(); - f2.set_len(0x1000).unwrap(); - - let start_addr1 = GuestAddress(0x0); - let start_addr2 = GuestAddress(0x1000); - let gm = - GuestMemoryMmap::from_ranges(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap(); - let gm_backed_by_file = GuestMemoryMmap::from_ranges_with_files(&[ - (start_addr1, 0x1000, Some(FileOffset::new(f1, 0))), - (start_addr2, 0x1000, Some(FileOffset::new(f2, 0))), - ]) - .unwrap(); - - let gm_list = [gm, gm_backed_by_file]; - for gm in gm_list.iter() { - let sample_buf = &[1, 2, 3, 4, 5]; - assert_eq!(gm.write(sample_buf, GuestAddress(0xffc)).unwrap(), 5); - let buf = &mut [0u8; 5]; - assert_eq!(gm.read(buf, GuestAddress(0xffc)).unwrap(), 5); - assert_eq!(buf, sample_buf); - } - } - - #[test] - fn test_retrieve_fd_backing_memory_region() { - let f = TempFile::new().unwrap().into_file(); - f.set_len(0x400).unwrap(); - - let start_addr = GuestAddress(0x0); - let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap(); - assert!(gm.find_region(start_addr).is_some()); - let region = gm.find_region(start_addr).unwrap(); - assert!(region.file_offset().is_none()); - - let gm = GuestMemoryMmap::from_ranges_with_files(&[( - start_addr, - 0x400, - Some(FileOffset::new(f, 0)), - )]) - .unwrap(); - assert!(gm.find_region(start_addr).is_some()); - let region = gm.find_region(start_addr).unwrap(); - assert!(region.file_offset().is_some()); - } - - // Windows needs a dedicated test where it will retrieve the allocation - // granularity to determine a proper offset (other than 0) that can be - // used for the backing file. Refer to Microsoft docs here: - // https://docs.microsoft.com/en-us/windows/desktop/api/memoryapi/nf-memoryapi-mapviewoffile - #[test] - #[cfg(target_family = "unix")] - fn test_retrieve_offset_from_fd_backing_memory_region() { - let f = TempFile::new().unwrap().into_file(); - f.set_len(0x1400).unwrap(); - // Needs to be aligned on 4k, otherwise mmap will fail. - let offset = 0x1000; - - let start_addr = GuestAddress(0x0); - let gm = GuestMemoryMmap::from_ranges(&[(start_addr, 0x400)]).unwrap(); - assert!(gm.find_region(start_addr).is_some()); - let region = gm.find_region(start_addr).unwrap(); - assert!(region.file_offset().is_none()); - - let gm = GuestMemoryMmap::from_ranges_with_files(&[( - start_addr, - 0x400, - Some(FileOffset::new(f, offset)), - )]) - .unwrap(); - assert!(gm.find_region(start_addr).is_some()); - let region = gm.find_region(start_addr).unwrap(); - assert!(region.file_offset().is_some()); - assert_eq!(region.file_offset().unwrap().start(), offset); - } - - #[test] - fn test_guest_memory_mmap_get_slice() { - let region = GuestRegionMmap::from_range(GuestAddress(0), 0x400, None).unwrap(); - - // Normal case. - let slice_addr = MemoryRegionAddress(0x100); - let slice_size = 0x200; - let slice = region.get_slice(slice_addr, slice_size).unwrap(); - assert_eq!(slice.len(), slice_size); - - // Empty slice. - let slice_addr = MemoryRegionAddress(0x200); - let slice_size = 0x0; - let slice = region.get_slice(slice_addr, slice_size).unwrap(); - assert!(slice.is_empty()); - - // Error case when slice_size is beyond the boundary. - let slice_addr = MemoryRegionAddress(0x300); - let slice_size = 0x200; - assert!(region.get_slice(slice_addr, slice_size).is_err()); - } - - #[test] - fn test_guest_memory_mmap_as_volatile_slice() { - let region_size = 0x400; - let region = GuestRegionMmap::from_range(GuestAddress(0), region_size, None).unwrap(); - - // Test slice length. - let slice = region.as_volatile_slice().unwrap(); - assert_eq!(slice.len(), region_size); - - // Test slice data. - let v = 0x1234_5678u32; - let r = slice.get_ref::(0x200).unwrap(); - r.store(v); - assert_eq!(r.load(), v); - } - - #[test] - fn test_guest_memory_get_slice() { - let start_addr1 = GuestAddress(0); - let start_addr2 = GuestAddress(0x800); - let guest_mem = - GuestMemoryMmap::from_ranges(&[(start_addr1, 0x400), (start_addr2, 0x400)]).unwrap(); - - // Normal cases. - let slice_size = 0x200; - let slice = guest_mem - .get_slice(GuestAddress(0x100), slice_size) - .unwrap(); - assert_eq!(slice.len(), slice_size); - - let slice_size = 0x400; - let slice = guest_mem - .get_slice(GuestAddress(0x800), slice_size) - .unwrap(); - assert_eq!(slice.len(), slice_size); - - // Empty slice. - assert!(guest_mem - .get_slice(GuestAddress(0x900), 0) - .unwrap() - .is_empty()); - - // Error cases, wrong size or base address. - assert!(guest_mem.get_slice(GuestAddress(0), 0x500).is_err()); - assert!(guest_mem.get_slice(GuestAddress(0x600), 0x100).is_err()); - assert!(guest_mem.get_slice(GuestAddress(0xc00), 0x100).is_err()); - } - - #[test] - fn test_guest_memory_get_slices() { - let start_addr1 = GuestAddress(0); - let start_addr2 = GuestAddress(0x800); - let start_addr3 = GuestAddress(0xc00); - let guest_mem = GuestMemoryMmap::from_ranges(&[ - (start_addr1, 0x400), - (start_addr2, 0x400), - (start_addr3, 0x400), - ]) - .unwrap(); - - // Same cases as `test_guest_memory_get_slice()`, just with `get_slices()`. - let slice_size = 0x200; - let mut slices = guest_mem.get_slices(GuestAddress(0x100), slice_size); - let slice = slices.next().unwrap().unwrap(); - assert!(slices.next().is_none()); - assert_eq!(slice.len(), slice_size); - - let slice_size = 0x400; - let mut slices = guest_mem.get_slices(GuestAddress(0x800), slice_size); - let slice = slices.next().unwrap().unwrap(); - assert!(slices.next().is_none()); - assert_eq!(slice.len(), slice_size); - - // Empty iterator. - assert!(guest_mem - .get_slices(GuestAddress(0x900), 0) - .next() - .is_none()); - - // Error cases, wrong size or base address. - let mut slices = guest_mem.get_slices(GuestAddress(0), 0x500); - assert_eq!(slices.next().unwrap().unwrap().len(), 0x400); - assert!(slices.next().unwrap().is_err()); - assert!(slices.next().is_none()); - let mut slices = guest_mem.get_slices(GuestAddress(0x600), 0x100); - assert!(slices.next().unwrap().is_err()); - assert!(slices.next().is_none()); - let mut slices = guest_mem.get_slices(GuestAddress(0x1000), 0x100); - assert!(slices.next().unwrap().is_err()); - assert!(slices.next().is_none()); - - // Test fragmented case - let mut slices = guest_mem.get_slices(GuestAddress(0xa00), 0x400); - assert_eq!(slices.next().unwrap().unwrap().len(), 0x200); - assert_eq!(slices.next().unwrap().unwrap().len(), 0x200); - assert!(slices.next().is_none()); - } - - #[test] - fn test_atomic_accesses() { - let region = GuestRegionMmap::from_range(GuestAddress(0), 0x1000, None).unwrap(); - - crate::bytes::tests::check_atomic_accesses( - region, - MemoryRegionAddress(0), - MemoryRegionAddress(0x1000), - ); - } - - #[test] - #[cfg(feature = "backend-bitmap")] - fn test_dirty_tracking() { - crate::bitmap::tests::test_guest_memory_and_region(|| { - crate::GuestMemoryMmap::::from_ranges(&[(GuestAddress(0), 0x1_0000)]) - .unwrap() - }); - } -} diff --git a/src/mmap/unix.rs b/src/mmap/unix.rs deleted file mode 100644 index eea4788e..00000000 --- a/src/mmap/unix.rs +++ /dev/null @@ -1,659 +0,0 @@ -// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. -// -// Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. -// -// Portions Copyright 2017 The Chromium OS Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE-BSD-3-Clause file. -// -// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause - -//! Helper structure for working with mmaped memory regions in Unix. - -use std::io; -use std::os::unix::io::AsRawFd; -use std::ptr::null_mut; -use std::result; - -use crate::bitmap::{Bitmap, NewBitmap, BS}; -use crate::guest_memory::FileOffset; -use crate::volatile_memory::{self, VolatileMemory, VolatileSlice}; - -/// Error conditions that may arise when creating a new `MmapRegion` object. -#[derive(Debug, thiserror::Error)] -pub enum Error { - /// The specified file offset and length cause overflow when added. - #[error("The specified file offset and length cause overflow when added")] - InvalidOffsetLength, - /// The specified pointer to the mapping is not page-aligned. - #[error("The specified pointer to the mapping is not page-aligned")] - InvalidPointer, - /// The forbidden `MAP_FIXED` flag was specified. - #[error("The forbidden `MAP_FIXED` flag was specified")] - MapFixed, - /// Mappings using the same fd overlap in terms of file offset and length. - #[error("Mappings using the same fd overlap in terms of file offset and length")] - MappingOverlap, - /// A mapping with offset + length > EOF was attempted. - #[error("The specified file offset and length is greater then file length")] - MappingPastEof, - /// The `mmap` call returned an error. - #[error("{0}")] - Mmap(io::Error), -} - -pub type Result = result::Result; - -/// A factory struct to build `MmapRegion` objects. -#[derive(Debug)] -pub struct MmapRegionBuilder { - size: usize, - prot: i32, - flags: i32, - file_offset: Option, - raw_ptr: Option<*mut u8>, - hugetlbfs: Option, - bitmap: B, -} - -impl MmapRegionBuilder { - /// Create a new `MmapRegionBuilder` using the default value for - /// the inner `Bitmap` object. - pub fn new(size: usize) -> Self { - Self::new_with_bitmap(size, B::default()) - } -} - -impl MmapRegionBuilder { - /// Create a new `MmapRegionBuilder` using the provided `Bitmap` object. - /// - /// When instantiating the builder for a region that does not require dirty bitmap - /// bitmap tracking functionality, we can specify a trivial `Bitmap` implementation - /// such as `()`. - pub fn new_with_bitmap(size: usize, bitmap: B) -> Self { - MmapRegionBuilder { - size, - prot: 0, - flags: libc::MAP_ANONYMOUS | libc::MAP_PRIVATE, - file_offset: None, - raw_ptr: None, - hugetlbfs: None, - bitmap, - } - } - - /// Create the `MmapRegion` object with the specified mmap memory protection flag `prot`. - pub fn with_mmap_prot(mut self, prot: i32) -> Self { - self.prot = prot; - self - } - - /// Create the `MmapRegion` object with the specified mmap `flags`. - pub fn with_mmap_flags(mut self, flags: i32) -> Self { - self.flags = flags; - self - } - - /// Create the `MmapRegion` object with the specified `file_offset`. - pub fn with_file_offset(mut self, file_offset: FileOffset) -> Self { - self.file_offset = Some(file_offset); - self - } - - /// Create the `MmapRegion` object with the specified `hugetlbfs` flag. - pub fn with_hugetlbfs(mut self, hugetlbfs: bool) -> Self { - self.hugetlbfs = Some(hugetlbfs); - self - } - - /// Create the `MmapRegion` object with pre-mmapped raw pointer. - /// - /// # Safety - /// - /// To use this safely, the caller must guarantee that `raw_addr` and `self.size` define a - /// region within a valid mapping that is already present in the process. - pub unsafe fn with_raw_mmap_pointer(mut self, raw_ptr: *mut u8) -> Self { - self.raw_ptr = Some(raw_ptr); - self - } - - /// Build the `MmapRegion` object. - pub fn build(self) -> Result> { - if self.raw_ptr.is_some() { - return self.build_raw(); - } - - // Forbid MAP_FIXED, as it doesn't make sense in this context, and is pretty dangerous - // in general. - if self.flags & libc::MAP_FIXED != 0 { - return Err(Error::MapFixed); - } - - let (fd, offset) = if let Some(ref f_off) = self.file_offset { - (f_off.file().as_raw_fd(), f_off.start()) - } else { - (-1, 0) - }; - - #[cfg(not(miri))] - // SAFETY: This is safe because we're not allowing MAP_FIXED, and invalid parameters - // cannot break Rust safety guarantees (things may change if we're mapping /dev/mem or - // some wacky file). - let addr = unsafe { - libc::mmap( - null_mut(), - self.size, - self.prot, - self.flags, - fd, - offset as libc::off_t, - ) - }; - - #[cfg(not(miri))] - if addr == libc::MAP_FAILED { - return Err(Error::Mmap(io::Error::last_os_error())); - } - - #[cfg(miri)] - if self.size == 0 { - return Err(Error::Mmap(io::Error::from_raw_os_error(libc::EINVAL))); - } - - // Miri does not support the mmap syscall, so we use rust's allocator for miri tests - #[cfg(miri)] - let addr = unsafe { - std::alloc::alloc_zeroed(std::alloc::Layout::from_size_align(self.size, 8).unwrap()) - }; - - Ok(MmapRegion { - addr: addr as *mut u8, - size: self.size, - bitmap: self.bitmap, - file_offset: self.file_offset, - prot: self.prot, - flags: self.flags, - owned: true, - hugetlbfs: self.hugetlbfs, - }) - } - - fn build_raw(self) -> Result> { - // SAFETY: Safe because this call just returns the page size and doesn't have any side - // effects. - let page_size = unsafe { libc::sysconf(libc::_SC_PAGESIZE) } as usize; - let addr = self.raw_ptr.unwrap(); - - // Check that the pointer to the mapping is page-aligned. - if (addr as usize) & (page_size - 1) != 0 { - return Err(Error::InvalidPointer); - } - - Ok(MmapRegion { - addr, - size: self.size, - bitmap: self.bitmap, - file_offset: self.file_offset, - prot: self.prot, - flags: self.flags, - owned: false, - hugetlbfs: self.hugetlbfs, - }) - } -} - -/// Helper structure for working with mmaped memory regions in Unix. -/// -/// The structure is used for accessing the guest's physical memory by mmapping it into -/// the current process. -/// -/// # Limitations -/// When running a 64-bit virtual machine on a 32-bit hypervisor, only part of the guest's -/// physical memory may be mapped into the current process due to the limited virtual address -/// space size of the process. -#[derive(Debug)] -pub struct MmapRegion { - addr: *mut u8, - size: usize, - bitmap: B, - file_offset: Option, - prot: i32, - flags: i32, - owned: bool, - hugetlbfs: Option, -} - -// SAFETY: Send and Sync aren't automatically inherited for the raw address pointer. -// Accessing that pointer is only done through the stateless interface which -// allows the object to be shared by multiple threads without a decrease in -// safety. -unsafe impl Send for MmapRegion {} -// SAFETY: See comment above. -unsafe impl Sync for MmapRegion {} - -impl MmapRegion { - /// Creates a shared anonymous mapping of `size` bytes. - /// - /// # Arguments - /// * `size` - The size of the memory region in bytes. - pub fn new(size: usize) -> Result { - MmapRegionBuilder::new_with_bitmap(size, B::with_len(size)) - .with_mmap_prot(libc::PROT_READ | libc::PROT_WRITE) - .with_mmap_flags(libc::MAP_ANONYMOUS | libc::MAP_NORESERVE | libc::MAP_PRIVATE) - .build() - } - - /// Creates a shared file mapping of `size` bytes. - /// - /// # Arguments - /// * `file_offset` - The mapping will be created at offset `file_offset.start` in the file - /// referred to by `file_offset.file`. - /// * `size` - The size of the memory region in bytes. - pub fn from_file(file_offset: FileOffset, size: usize) -> Result { - MmapRegionBuilder::new_with_bitmap(size, B::with_len(size)) - .with_file_offset(file_offset) - .with_mmap_prot(libc::PROT_READ | libc::PROT_WRITE) - .with_mmap_flags(libc::MAP_NORESERVE | libc::MAP_SHARED) - .build() - } - - /// Creates a mapping based on the provided arguments. - /// - /// # Arguments - /// * `file_offset` - if provided, the method will create a file mapping at offset - /// `file_offset.start` in the file referred to by `file_offset.file`. - /// * `size` - The size of the memory region in bytes. - /// * `prot` - The desired memory protection of the mapping. - /// * `flags` - This argument determines whether updates to the mapping are visible to other - /// processes mapping the same region, and whether updates are carried through to - /// the underlying file. - pub fn build( - file_offset: Option, - size: usize, - prot: i32, - flags: i32, - ) -> Result { - let mut builder = MmapRegionBuilder::new_with_bitmap(size, B::with_len(size)) - .with_mmap_prot(prot) - .with_mmap_flags(flags); - if let Some(v) = file_offset { - builder = builder.with_file_offset(v); - } - builder.build() - } - - /// Creates a `MmapRegion` instance for an externally managed mapping. - /// - /// This method is intended to be used exclusively in situations in which the mapping backing - /// the region is provided by an entity outside the control of the caller (e.g. the dynamic - /// linker). - /// - /// # Arguments - /// * `addr` - Pointer to the start of the mapping. Must be page-aligned. - /// * `size` - The size of the memory region in bytes. - /// * `prot` - Must correspond to the memory protection attributes of the existing mapping. - /// * `flags` - Must correspond to the flags that were passed to `mmap` for the creation of - /// the existing mapping. - /// - /// # Safety - /// - /// To use this safely, the caller must guarantee that `addr` and `size` define a region within - /// a valid mapping that is already present in the process. - pub unsafe fn build_raw(addr: *mut u8, size: usize, prot: i32, flags: i32) -> Result { - MmapRegionBuilder::new_with_bitmap(size, B::with_len(size)) - .with_raw_mmap_pointer(addr) - .with_mmap_prot(prot) - .with_mmap_flags(flags) - .build() - } -} - -impl MmapRegion { - /// Returns a pointer to the beginning of the memory region. Mutable accesses performed - /// using the resulting pointer are not automatically accounted for by the dirty bitmap - /// tracking functionality. - /// - /// Should only be used for passing this region to ioctls for setting guest memory. - pub fn as_ptr(&self) -> *mut u8 { - self.addr - } - - /// Returns the size of this region. - pub fn size(&self) -> usize { - self.size - } - - /// Returns information regarding the offset into the file backing this region (if any). - pub fn file_offset(&self) -> Option<&FileOffset> { - self.file_offset.as_ref() - } - - /// Returns the value of the `prot` parameter passed to `mmap` when mapping this region. - pub fn prot(&self) -> i32 { - self.prot - } - - /// Returns the value of the `flags` parameter passed to `mmap` when mapping this region. - pub fn flags(&self) -> i32 { - self.flags - } - - /// Returns `true` if the mapping is owned by this `MmapRegion` instance. - pub fn owned(&self) -> bool { - self.owned - } - - /// Checks whether this region and `other` are backed by overlapping - /// [`FileOffset`](struct.FileOffset.html) objects. - /// - /// This is mostly a sanity check available for convenience, as different file descriptors - /// can alias the same file. - pub fn fds_overlap(&self, other: &MmapRegion) -> bool { - if let Some(f_off1) = self.file_offset() { - if let Some(f_off2) = other.file_offset() { - if f_off1.file().as_raw_fd() == f_off2.file().as_raw_fd() { - let s1 = f_off1.start(); - let s2 = f_off2.start(); - let l1 = self.len() as u64; - let l2 = other.len() as u64; - - if s1 < s2 { - return s1 + l1 > s2; - } else { - return s2 + l2 > s1; - } - } - } - } - false - } - - /// Set the hugetlbfs of the region - pub fn set_hugetlbfs(&mut self, hugetlbfs: bool) { - self.hugetlbfs = Some(hugetlbfs) - } - - /// Returns `true` if the region is hugetlbfs - pub fn is_hugetlbfs(&self) -> Option { - self.hugetlbfs - } - - /// Returns a reference to the inner bitmap object. - pub fn bitmap(&self) -> &B { - &self.bitmap - } -} - -impl VolatileMemory for MmapRegion { - type B = B; - - fn len(&self) -> usize { - self.size - } - - fn get_slice( - &self, - offset: usize, - count: usize, - ) -> volatile_memory::Result>> { - let _ = self.compute_end_offset(offset, count)?; - - Ok( - // SAFETY: Safe because we checked that offset + count was within our range and we only - // ever hand out volatile accessors. - unsafe { - VolatileSlice::with_bitmap( - self.addr.add(offset), - count, - self.bitmap.slice_at(offset), - None, - ) - }, - ) - } -} - -impl Drop for MmapRegion { - fn drop(&mut self) { - if self.owned { - // SAFETY: This is safe because we mmap the area at addr ourselves, and nobody - // else is holding a reference to it. - unsafe { - #[cfg(not(miri))] - libc::munmap(self.addr as *mut libc::c_void, self.size); - - #[cfg(miri)] - std::alloc::dealloc( - self.addr, - std::alloc::Layout::from_size_align(self.size, 8).unwrap(), - ); - } - } - } -} - -#[cfg(test)] -mod tests { - #![allow(clippy::undocumented_unsafe_blocks)] - use super::*; - - use std::io::Write; - use std::slice; - use std::sync::Arc; - use vmm_sys_util::tempfile::TempFile; - - #[cfg(feature = "backend-bitmap")] - use crate::bitmap::AtomicBitmap; - - use matches::assert_matches; - - type MmapRegion = super::MmapRegion<()>; - - impl Error { - /// Helper method to extract the errno within an - /// `Error::Mmap(e)`. Returns `i32::MIN` if `self` is any - /// other variant. - pub fn raw_os_error(&self) -> i32 { - match self { - Error::Mmap(e) => e.raw_os_error().unwrap(), - _ => i32::MIN, - } - } - } - - #[test] - fn test_mmap_region_new() { - assert!(MmapRegion::new(0).is_err()); - - let size = 4096; - - let r = MmapRegion::new(4096).unwrap(); - assert_eq!(r.size(), size); - assert!(r.file_offset().is_none()); - assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE); - assert_eq!( - r.flags(), - libc::MAP_ANONYMOUS | libc::MAP_NORESERVE | libc::MAP_PRIVATE - ); - } - - #[test] - fn test_mmap_region_set_hugetlbfs() { - assert!(MmapRegion::new(0).is_err()); - - let size = 4096; - - let r = MmapRegion::new(size).unwrap(); - assert_eq!(r.size(), size); - assert!(r.file_offset().is_none()); - assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE); - assert_eq!( - r.flags(), - libc::MAP_ANONYMOUS | libc::MAP_NORESERVE | libc::MAP_PRIVATE - ); - assert_eq!(r.is_hugetlbfs(), None); - - let mut r = MmapRegion::new(size).unwrap(); - r.set_hugetlbfs(false); - assert_eq!(r.size(), size); - assert!(r.file_offset().is_none()); - assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE); - assert_eq!( - r.flags(), - libc::MAP_ANONYMOUS | libc::MAP_NORESERVE | libc::MAP_PRIVATE - ); - assert_eq!(r.is_hugetlbfs(), Some(false)); - - let mut r = MmapRegion::new(size).unwrap(); - r.set_hugetlbfs(true); - assert_eq!(r.size(), size); - assert!(r.file_offset().is_none()); - assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE); - assert_eq!( - r.flags(), - libc::MAP_ANONYMOUS | libc::MAP_NORESERVE | libc::MAP_PRIVATE - ); - assert_eq!(r.is_hugetlbfs(), Some(true)); - } - - #[test] - #[cfg(not(miri))] // Miri cannot mmap files - fn test_mmap_region_from_file() { - let mut f = TempFile::new().unwrap().into_file(); - let offset: usize = 0; - let buf1 = [1u8, 2, 3, 4, 5]; - - f.write_all(buf1.as_ref()).unwrap(); - let r = MmapRegion::from_file(FileOffset::new(f, offset as u64), buf1.len()).unwrap(); - - assert_eq!(r.size(), buf1.len() - offset); - assert_eq!(r.file_offset().unwrap().start(), offset as u64); - assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE); - assert_eq!(r.flags(), libc::MAP_NORESERVE | libc::MAP_SHARED); - - let buf2 = unsafe { slice::from_raw_parts(r.as_ptr(), buf1.len() - offset) }; - assert_eq!(&buf1[offset..], buf2); - } - - #[test] - #[cfg(not(miri))] // Miri cannot mmap files - #[cfg(feature = "backend-bitmap")] - fn test_mmap_region_build() { - let a = Arc::new(TempFile::new().unwrap().into_file()); - - let prot = libc::PROT_READ | libc::PROT_WRITE; - let flags = libc::MAP_NORESERVE | libc::MAP_PRIVATE; - let offset = 4096; - let size = 1000; - - // Offset + size will overflow. - let r = MmapRegion::build( - Some(FileOffset::from_arc(a.clone(), u64::MAX)), - size, - prot, - flags, - ); - assert_matches!(r.unwrap_err(), Error::Mmap(err) if err.raw_os_error() == Some(libc::EINVAL)); - - // MAP_FIXED was specified among the flags. - let r = MmapRegion::build( - Some(FileOffset::from_arc(a.clone(), offset)), - size, - prot, - flags | libc::MAP_FIXED, - ); - assert_matches!(r.unwrap_err(), Error::MapFixed); - - // Let's resize the file. - assert_eq!(unsafe { libc::ftruncate(a.as_raw_fd(), 1024 * 10) }, 0); - - // The offset is not properly aligned. - let r = MmapRegion::build( - Some(FileOffset::from_arc(a.clone(), offset - 1)), - size, - prot, - flags, - ); - assert_eq!(r.unwrap_err().raw_os_error(), libc::EINVAL); - - // The build should be successful now. - let r = - MmapRegion::build(Some(FileOffset::from_arc(a, offset)), size, prot, flags).unwrap(); - - assert_eq!(r.size(), size); - assert_eq!(r.file_offset().unwrap().start(), offset); - assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE); - assert_eq!(r.flags(), libc::MAP_NORESERVE | libc::MAP_PRIVATE); - assert!(r.owned()); - - let region_size = 0x10_0000; - let bitmap = AtomicBitmap::new(region_size, std::num::NonZeroUsize::new(0x1000).unwrap()); - let builder = MmapRegionBuilder::new_with_bitmap(region_size, bitmap) - .with_hugetlbfs(true) - .with_mmap_prot(libc::PROT_READ | libc::PROT_WRITE); - assert_eq!(builder.size, region_size); - assert_eq!(builder.hugetlbfs, Some(true)); - assert_eq!(builder.prot, libc::PROT_READ | libc::PROT_WRITE); - - crate::bitmap::tests::test_volatile_memory(&(builder.build().unwrap())); - } - - #[test] - #[cfg(not(miri))] // Causes warnings due to the pointer casts - fn test_mmap_region_build_raw() { - let addr = 0; - let size = unsafe { libc::sysconf(libc::_SC_PAGESIZE) as usize }; - let prot = libc::PROT_READ | libc::PROT_WRITE; - let flags = libc::MAP_NORESERVE | libc::MAP_PRIVATE; - - let r = unsafe { MmapRegion::build_raw((addr + 1) as *mut u8, size, prot, flags) }; - assert_matches!(r.unwrap_err(), Error::InvalidPointer); - - let r = unsafe { MmapRegion::build_raw(addr as *mut u8, size, prot, flags).unwrap() }; - - assert_eq!(r.size(), size); - assert_eq!(r.prot(), libc::PROT_READ | libc::PROT_WRITE); - assert_eq!(r.flags(), libc::MAP_NORESERVE | libc::MAP_PRIVATE); - assert!(!r.owned()); - } - - #[test] - #[cfg(not(miri))] // Miri cannot mmap files - fn test_mmap_region_fds_overlap() { - let a = Arc::new(TempFile::new().unwrap().into_file()); - assert_eq!(unsafe { libc::ftruncate(a.as_raw_fd(), 1024 * 10) }, 0); - - let r1 = MmapRegion::from_file(FileOffset::from_arc(a.clone(), 0), 4096).unwrap(); - let r2 = MmapRegion::from_file(FileOffset::from_arc(a.clone(), 4096), 4096).unwrap(); - assert!(!r1.fds_overlap(&r2)); - - let r1 = MmapRegion::from_file(FileOffset::from_arc(a.clone(), 0), 5000).unwrap(); - assert!(r1.fds_overlap(&r2)); - - let r2 = MmapRegion::from_file(FileOffset::from_arc(a, 0), 1000).unwrap(); - assert!(r1.fds_overlap(&r2)); - - // Different files, so there's not overlap. - let new_file = TempFile::new().unwrap().into_file(); - // Resize before mapping. - assert_eq!( - unsafe { libc::ftruncate(new_file.as_raw_fd(), 1024 * 10) }, - 0 - ); - let r2 = MmapRegion::from_file(FileOffset::new(new_file, 0), 5000).unwrap(); - assert!(!r1.fds_overlap(&r2)); - - // R2 is not file backed, so no overlap. - let r2 = MmapRegion::new(5000).unwrap(); - assert!(!r1.fds_overlap(&r2)); - } - - #[test] - #[cfg(feature = "backend-bitmap")] - fn test_dirty_tracking() { - // Using the `crate` prefix because we aliased `MmapRegion` to `MmapRegion<()>` for - // the rest of the unit tests above. - let m = crate::MmapRegion::::new(0x1_0000).unwrap(); - crate::bitmap::tests::test_volatile_memory(&m); - } -} diff --git a/src/mmap/windows.rs b/src/mmap/windows.rs deleted file mode 100644 index 6d723917..00000000 --- a/src/mmap/windows.rs +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright (C) 2019 CrowdStrike, Inc. All rights reserved. -// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause - -//! Helper structure for working with mmaped memory regions in Windows. - -use std; -use std::io; -use std::os::windows::io::{AsRawHandle, RawHandle}; -use std::ptr::{null, null_mut}; - -use libc::{c_void, size_t}; - -use winapi::um::errhandlingapi::GetLastError; - -use crate::bitmap::{Bitmap, NewBitmap, BS}; -use crate::guest_memory::FileOffset; -use crate::volatile_memory::{self, compute_offset, VolatileMemory, VolatileSlice}; - -#[allow(non_snake_case)] -#[link(name = "kernel32")] -extern "stdcall" { - pub fn VirtualAlloc( - lpAddress: *mut c_void, - dwSize: size_t, - flAllocationType: u32, - flProtect: u32, - ) -> *mut c_void; - - pub fn VirtualFree(lpAddress: *mut c_void, dwSize: size_t, dwFreeType: u32) -> u32; - - pub fn CreateFileMappingA( - hFile: RawHandle, // HANDLE - lpFileMappingAttributes: *const c_void, // LPSECURITY_ATTRIBUTES - flProtect: u32, // DWORD - dwMaximumSizeHigh: u32, // DWORD - dwMaximumSizeLow: u32, // DWORD - lpName: *const u8, // LPCSTR - ) -> RawHandle; // HANDLE - - pub fn MapViewOfFile( - hFileMappingObject: RawHandle, - dwDesiredAccess: u32, - dwFileOffsetHigh: u32, - dwFileOffsetLow: u32, - dwNumberOfBytesToMap: size_t, - ) -> *mut c_void; - - pub fn CloseHandle(hObject: RawHandle) -> u32; // BOOL -} - -const MM_HIGHEST_VAD_ADDRESS: u64 = 0x000007FFFFFDFFFF; - -const MEM_COMMIT: u32 = 0x00001000; -const MEM_RELEASE: u32 = 0x00008000; -const FILE_MAP_ALL_ACCESS: u32 = 0xf001f; -const PAGE_READWRITE: u32 = 0x04; - -pub const MAP_FAILED: *mut c_void = 0 as *mut c_void; -pub const INVALID_HANDLE_VALUE: RawHandle = (-1isize) as RawHandle; -#[allow(dead_code)] -pub const ERROR_INVALID_PARAMETER: i32 = 87; - -/// Helper structure for working with mmaped memory regions in Unix. -/// -/// The structure is used for accessing the guest's physical memory by mmapping it into -/// the current process. -/// -/// # Limitations -/// When running a 64-bit virtual machine on a 32-bit hypervisor, only part of the guest's -/// physical memory may be mapped into the current process due to the limited virtual address -/// space size of the process. -#[derive(Debug)] -pub struct MmapRegion { - addr: *mut u8, - size: usize, - bitmap: B, - file_offset: Option, -} - -// Send and Sync aren't automatically inherited for the raw address pointer. -// Accessing that pointer is only done through the stateless interface which -// allows the object to be shared by multiple threads without a decrease in -// safety. -unsafe impl Send for MmapRegion {} -unsafe impl Sync for MmapRegion {} - -impl MmapRegion { - /// Creates a shared anonymous mapping of `size` bytes. - /// - /// # Arguments - /// * `size` - The size of the memory region in bytes. - pub fn new(size: usize) -> io::Result { - if (size == 0) || (size > MM_HIGHEST_VAD_ADDRESS as usize) { - return Err(io::Error::from_raw_os_error(libc::EINVAL)); - } - // This is safe because we are creating an anonymous mapping in a place not already used by - // any other area in this process. - let addr = unsafe { VirtualAlloc(0 as *mut c_void, size, MEM_COMMIT, PAGE_READWRITE) }; - if addr == MAP_FAILED { - return Err(io::Error::last_os_error()); - } - Ok(Self { - addr: addr as *mut u8, - size, - bitmap: B::with_len(size), - file_offset: None, - }) - } - - /// Creates a shared file mapping of `size` bytes. - /// - /// # Arguments - /// * `file_offset` - The mapping will be created at offset `file_offset.start` in the file - /// referred to by `file_offset.file`. - /// * `size` - The size of the memory region in bytes. - pub fn from_file(file_offset: FileOffset, size: usize) -> io::Result { - let handle = file_offset.file().as_raw_handle(); - if handle == INVALID_HANDLE_VALUE { - return Err(io::Error::from_raw_os_error(libc::EBADF)); - } - - let mapping = unsafe { - CreateFileMappingA( - handle, - null(), - PAGE_READWRITE, - (size >> 32) as u32, - size as u32, - null(), - ) - }; - if mapping == 0 as RawHandle { - return Err(io::Error::last_os_error()); - } - - let offset = file_offset.start(); - - // This is safe because we are creating a mapping in a place not already used by any other - // area in this process. - let addr = unsafe { - MapViewOfFile( - mapping, - FILE_MAP_ALL_ACCESS, - (offset >> 32) as u32, - offset as u32, - size, - ) - }; - - unsafe { - CloseHandle(mapping); - } - - if addr == null_mut() { - return Err(io::Error::last_os_error()); - } - Ok(Self { - addr: addr as *mut u8, - size, - bitmap: B::with_len(size), - file_offset: Some(file_offset), - }) - } -} - -impl MmapRegion { - /// Returns a pointer to the beginning of the memory region. Mutable accesses performed - /// using the resulting pointer are not automatically accounted for by the dirty bitmap - /// tracking functionality. - /// - /// Should only be used for passing this region to ioctls for setting guest memory. - pub fn as_ptr(&self) -> *mut u8 { - self.addr - } - - /// Returns the size of this region. - pub fn size(&self) -> usize { - self.size - } - - /// Returns information regarding the offset into the file backing this region (if any). - pub fn file_offset(&self) -> Option<&FileOffset> { - self.file_offset.as_ref() - } - - /// Returns a reference to the inner bitmap object. - pub fn bitmap(&self) -> &B { - &self.bitmap - } -} - -impl VolatileMemory for MmapRegion { - type B = B; - - fn len(&self) -> usize { - self.size - } - - fn get_slice( - &self, - offset: usize, - count: usize, - ) -> volatile_memory::Result>> { - let end = compute_offset(offset, count)?; - if end > self.size { - return Err(volatile_memory::Error::OutOfBounds { addr: end }); - } - - // Safe because we checked that offset + count was within our range and we only ever hand - // out volatile accessors. - Ok(unsafe { - VolatileSlice::with_bitmap( - self.addr.add(offset), - count, - self.bitmap.slice_at(offset), - None, - ) - }) - } -} - -impl Drop for MmapRegion { - fn drop(&mut self) { - // This is safe because we mmap the area at addr ourselves, and nobody - // else is holding a reference to it. - // Note that the size must be set to 0 when using MEM_RELEASE, - // otherwise the function fails. - unsafe { - let ret_val = VirtualFree(self.addr as *mut libc::c_void, 0, MEM_RELEASE); - if ret_val == 0 { - let err = GetLastError(); - // We can't use any fancy logger here, yet we want to - // pin point memory leaks. - println!( - "WARNING: Could not deallocate mmap region. \ - Address: {:?}. Size: {}. Error: {}", - self.addr, self.size, err - ) - } - } - } -} - -#[cfg(test)] -mod tests { - use std::os::windows::io::FromRawHandle; - - #[cfg(feature = "backend-bitmap")] - use crate::bitmap::AtomicBitmap; - use crate::guest_memory::FileOffset; - use crate::mmap::windows::INVALID_HANDLE_VALUE; - - type MmapRegion = super::MmapRegion<()>; - - #[test] - fn map_invalid_handle() { - let file = unsafe { std::fs::File::from_raw_handle(INVALID_HANDLE_VALUE) }; - let file_offset = FileOffset::new(file, 0); - let e = MmapRegion::from_file(file_offset, 1024).unwrap_err(); - assert_eq!(e.raw_os_error(), Some(libc::EBADF)); - } - - #[test] - #[cfg(feature = "backend-bitmap")] - fn test_dirty_tracking() { - // Using the `crate` prefix because we aliased `MmapRegion` to `MmapRegion<()>` for - // the rest of the unit tests above. - let m = crate::MmapRegion::::new(0x1_0000).unwrap(); - crate::bitmap::tests::test_volatile_memory(&m); - } -} diff --git a/src/mmap/xen.rs b/src/mmap/xen.rs deleted file mode 100644 index 3ffe3acb..00000000 --- a/src/mmap/xen.rs +++ /dev/null @@ -1,1199 +0,0 @@ -// Copyright 2023 Linaro Ltd. All Rights Reserved. -// Viresh Kumar -// -// Xen specific memory mapping implementations -// -// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause - -//! Helper structure for working with mmap'ed memory regions on Xen. - -use bitflags::bitflags; -use libc::{c_int, c_void, MAP_SHARED, _SC_PAGESIZE}; -use std::{io, mem::size_of, os::raw::c_ulong, os::unix::io::AsRawFd, ptr::null_mut, result}; - -use vmm_sys_util::{ - fam::{Error as FamError, FamStruct, FamStructWrapper}, - generate_fam_struct_impl, - ioctl::{ioctl_expr, _IOC_NONE}, -}; - -// Use a dummy ioctl implementation for tests instead. -#[cfg(not(test))] -use vmm_sys_util::ioctl::ioctl_with_ref; - -#[cfg(test)] -use tests::ioctl_with_ref; - -use crate::bitmap::{Bitmap, NewBitmap, BS}; -use crate::guest_memory::{FileOffset, GuestAddress}; -use crate::volatile_memory::{self, VolatileMemory, VolatileSlice}; - -/// Error conditions that may arise when creating a new `MmapRegion` object. -#[derive(Debug, thiserror::Error)] -pub enum Error { - /// The specified file offset and length cause overflow when added. - #[error("The specified file offset and length cause overflow when added")] - InvalidOffsetLength, - /// The forbidden `MAP_FIXED` flag was specified. - #[error("The forbidden `MAP_FIXED` flag was specified")] - MapFixed, - /// A mapping with offset + length > EOF was attempted. - #[error("The specified file offset and length is greater then file length")] - MappingPastEof, - /// The `mmap` call returned an error. - #[error("{0}")] - Mmap(io::Error), - /// Invalid file offset. - #[error("Invalid file offset")] - InvalidFileOffset, - /// Memory mapped in advance. - #[error("Memory mapped in advance")] - MappedInAdvance, - /// Invalid Xen mmap flags. - #[error("Invalid Xen Mmap flags: {0:x}")] - MmapFlags(u32), - /// Fam error. - #[error("Fam error: {0}")] - Fam(FamError), - /// Unexpected error. - #[error("Unexpected error")] - UnexpectedError, -} - -type Result = result::Result; - -/// `MmapRange` represents a range of arguments required to create Mmap regions. -#[derive(Clone, Debug)] -pub struct MmapRange { - size: usize, - file_offset: Option, - prot: Option, - flags: Option, - hugetlbfs: Option, - addr: GuestAddress, - mmap_flags: u32, - mmap_data: u32, -} - -impl MmapRange { - /// Creates instance of the range with multiple arguments. - pub fn new( - size: usize, - file_offset: Option, - addr: GuestAddress, - mmap_flags: u32, - mmap_data: u32, - ) -> Self { - Self { - size, - file_offset, - prot: None, - flags: None, - hugetlbfs: None, - addr, - mmap_flags, - mmap_data, - } - } - - /// Creates instance of the range for `MmapXenFlags::UNIX` type mapping. - pub fn new_unix(size: usize, file_offset: Option, addr: GuestAddress) -> Self { - let flags = Some(match file_offset { - Some(_) => libc::MAP_NORESERVE | libc::MAP_SHARED, - None => libc::MAP_ANONYMOUS | libc::MAP_PRIVATE, - }); - - Self { - size, - file_offset, - prot: None, - flags, - hugetlbfs: None, - addr, - mmap_flags: MmapXenFlags::UNIX.bits(), - mmap_data: 0, - } - } - - /// Set the prot of the range. - pub fn set_prot(&mut self, prot: i32) { - self.prot = Some(prot) - } - - /// Set the flags of the range. - pub fn set_flags(&mut self, flags: i32) { - self.flags = Some(flags) - } - - /// Set the hugetlbfs of the range. - pub fn set_hugetlbfs(&mut self, hugetlbfs: bool) { - self.hugetlbfs = Some(hugetlbfs) - } -} - -/// Helper structure for working with mmaped memory regions with Xen. -/// -/// The structure is used for accessing the guest's physical memory by mmapping it into -/// the current process. -/// -/// # Limitations -/// When running a 64-bit virtual machine on a 32-bit hypervisor, only part of the guest's -/// physical memory may be mapped into the current process due to the limited virtual address -/// space size of the process. -#[derive(Debug)] -pub struct MmapRegion { - bitmap: B, - size: usize, - prot: i32, - flags: i32, - file_offset: Option, - hugetlbfs: Option, - mmap: MmapXen, -} - -// SAFETY: Send and Sync aren't automatically inherited for the raw address pointer. -// Accessing that pointer is only done through the stateless interface which -// allows the object to be shared by multiple threads without a decrease in -// safety. -unsafe impl Send for MmapRegion {} -// SAFETY: See comment above. -unsafe impl Sync for MmapRegion {} - -impl MmapRegion { - /// Creates a shared anonymous mapping of `size` bytes. - /// - /// # Arguments - /// * `range` - An instance of type `MmapRange`. - /// - /// # Examples - /// * Write a slice at guest address 0x1200 with Xen's Grant mapping. - /// - /// ``` - /// use std::fs::File; - /// use std::path::Path; - /// use vm_memory::{ - /// Bytes, FileOffset, GuestAddress, GuestMemoryMmap, GuestRegionMmap, MmapRange, MmapRegion, - /// MmapXenFlags, - /// }; - /// # use vmm_sys_util::tempfile::TempFile; - /// - /// let addr = GuestAddress(0x1000); - /// # if false { - /// let file = Some(FileOffset::new( - /// File::open(Path::new("/dev/xen/gntdev")).expect("Could not open file"), - /// 0, - /// )); - /// - /// let range = MmapRange::new(0x400, file, addr, MmapXenFlags::GRANT.bits(), 0); - /// # } - /// # // We need a UNIX mapping for tests to succeed. - /// # let range = MmapRange::new_unix(0x400, None, addr); - /// - /// let r = GuestRegionMmap::new( - /// MmapRegion::<()>::from_range(range).expect("Could not create mmap region"), - /// addr, - /// ) - /// .expect("Could not create guest region"); - /// - /// let mut gm = GuestMemoryMmap::from_regions(vec![r]).expect("Could not create guest memory"); - /// let res = gm - /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200)) - /// .expect("Could not write to guest memory"); - /// assert_eq!(5, res); - /// ``` - /// - /// * Write a slice at guest address 0x1200 with Xen's Foreign mapping. - /// - /// ``` - /// use std::fs::File; - /// use std::path::Path; - /// use vm_memory::{ - /// Bytes, FileOffset, GuestAddress, GuestMemoryMmap, GuestRegionMmap, MmapRange, MmapRegion, - /// MmapXenFlags, - /// }; - /// # use vmm_sys_util::tempfile::TempFile; - /// - /// let addr = GuestAddress(0x1000); - /// # if false { - /// let file = Some(FileOffset::new( - /// File::open(Path::new("/dev/xen/privcmd")).expect("Could not open file"), - /// 0, - /// )); - /// - /// let range = MmapRange::new(0x400, file, addr, MmapXenFlags::FOREIGN.bits(), 0); - /// # } - /// # // We need a UNIX mapping for tests to succeed. - /// # let range = MmapRange::new_unix(0x400, None, addr); - /// - /// let r = GuestRegionMmap::new( - /// MmapRegion::<()>::from_range(range).expect("Could not create mmap region"), - /// addr, - /// ) - /// .expect("Could not create guest region"); - /// - /// let mut gm = GuestMemoryMmap::from_regions(vec![r]).expect("Could not create guest memory"); - /// let res = gm - /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200)) - /// .expect("Could not write to guest memory"); - /// assert_eq!(5, res); - /// ``` - pub fn from_range(mut range: MmapRange) -> Result { - if range.prot.is_none() { - range.prot = Some(libc::PROT_READ | libc::PROT_WRITE); - } - - match range.flags { - Some(flags) => { - if flags & libc::MAP_FIXED != 0 { - // Forbid MAP_FIXED, as it doesn't make sense in this context, and is pretty dangerous - // in general. - return Err(Error::MapFixed); - } - } - None => range.flags = Some(libc::MAP_NORESERVE | libc::MAP_SHARED), - } - - let mmap = MmapXen::new(&range)?; - - Ok(MmapRegion { - bitmap: B::with_len(range.size), - size: range.size, - prot: range.prot.ok_or(Error::UnexpectedError)?, - flags: range.flags.ok_or(Error::UnexpectedError)?, - file_offset: range.file_offset, - hugetlbfs: range.hugetlbfs, - mmap, - }) - } -} - -impl MmapRegion { - /// Returns a pointer to the beginning of the memory region. Mutable accesses performed - /// using the resulting pointer are not automatically accounted for by the dirty bitmap - /// tracking functionality. - /// - /// Should only be used for passing this region to ioctls for setting guest memory. - pub fn as_ptr(&self) -> *mut u8 { - self.mmap.addr() - } - - /// Returns the size of this region. - pub fn size(&self) -> usize { - self.size - } - - /// Returns information regarding the offset into the file backing this region (if any). - pub fn file_offset(&self) -> Option<&FileOffset> { - self.file_offset.as_ref() - } - - /// Returns the value of the `prot` parameter passed to `mmap` when mapping this region. - pub fn prot(&self) -> i32 { - self.prot - } - - /// Returns the value of the `flags` parameter passed to `mmap` when mapping this region. - pub fn flags(&self) -> i32 { - self.flags - } - - /// Checks whether this region and `other` are backed by overlapping - /// [`FileOffset`](struct.FileOffset.html) objects. - /// - /// This is mostly a sanity check available for convenience, as different file descriptors - /// can alias the same file. - pub fn fds_overlap(&self, other: &MmapRegion) -> bool { - if let Some(f_off1) = self.file_offset() { - if let Some(f_off2) = other.file_offset() { - if f_off1.file().as_raw_fd() == f_off2.file().as_raw_fd() { - let s1 = f_off1.start(); - let s2 = f_off2.start(); - let l1 = self.len() as u64; - let l2 = other.len() as u64; - - if s1 < s2 { - return s1 + l1 > s2; - } else { - return s2 + l2 > s1; - } - } - } - } - false - } - - /// Set the hugetlbfs of the region - pub fn set_hugetlbfs(&mut self, hugetlbfs: bool) { - self.hugetlbfs = Some(hugetlbfs) - } - - /// Returns `true` if the region is hugetlbfs - pub fn is_hugetlbfs(&self) -> Option { - self.hugetlbfs - } - - /// Returns a reference to the inner bitmap object. - pub fn bitmap(&self) -> &B { - &self.bitmap - } - - /// Returns xen mmap flags. - pub fn xen_mmap_flags(&self) -> u32 { - self.mmap.flags() - } - - /// Returns xen mmap data. - pub fn xen_mmap_data(&self) -> u32 { - self.mmap.data() - } -} - -impl VolatileMemory for MmapRegion { - type B = B; - - fn len(&self) -> usize { - self.size - } - - fn get_slice( - &self, - offset: usize, - count: usize, - ) -> volatile_memory::Result>> { - let _ = self.compute_end_offset(offset, count)?; - - let mmap_info = if self.mmap.mmap_in_advance() { - None - } else { - Some(&self.mmap) - }; - - Ok( - // SAFETY: Safe because we checked that offset + count was within our range and we only - // ever hand out volatile accessors. - unsafe { - VolatileSlice::with_bitmap( - self.as_ptr().add(offset), - count, - self.bitmap.slice_at(offset), - mmap_info, - ) - }, - ) - } -} - -#[derive(Clone, Debug, PartialEq)] -struct MmapUnix { - addr: *mut u8, - size: usize, -} - -impl MmapUnix { - fn new(size: usize, prot: i32, flags: i32, fd: i32, f_offset: u64) -> Result { - let addr = - // SAFETY: This is safe because we're not allowing MAP_FIXED, and invalid parameters - // cannot break Rust safety guarantees (things may change if we're mapping /dev/mem or - // some wacky file). - unsafe { libc::mmap(null_mut(), size, prot, flags, fd, f_offset as libc::off_t) }; - - if addr == libc::MAP_FAILED { - return Err(Error::Mmap(io::Error::last_os_error())); - } - - Ok(Self { - addr: addr as *mut u8, - size, - }) - } - - fn addr(&self) -> *mut u8 { - self.addr - } -} - -impl Drop for MmapUnix { - fn drop(&mut self) { - // SAFETY: This is safe because we mmap the area at addr ourselves, and nobody - // else is holding a reference to it. - unsafe { - libc::munmap(self.addr as *mut libc::c_void, self.size); - } - } -} - -// Bit mask for the vhost-user xen mmap message. -bitflags! { - /// Flags for the Xen mmap message. - #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] - pub struct MmapXenFlags: u32 { - /// Standard Unix memory mapping. - const UNIX = 0x0; - /// Xen foreign memory (accessed via /dev/privcmd). - const FOREIGN = 0x1; - /// Xen grant memory (accessed via /dev/gntdev). - const GRANT = 0x2; - /// Xen no advance mapping. - const NO_ADVANCE_MAP = 0x8; - /// All valid mappings. - const ALL = Self::FOREIGN.bits() | Self::GRANT.bits(); - } -} - -impl MmapXenFlags { - /// Mmap flags are valid. - pub fn is_valid(&self) -> bool { - // only one of unix, foreign or grant should be set and mmap_in_advance() should be true - // with foreign and unix. - if self.is_grant() { - !self.is_foreign() - } else if self.is_foreign() || self.is_unix() { - self.mmap_in_advance() - } else { - false - } - } - - /// Is standard Unix memory. - pub fn is_unix(&self) -> bool { - self.bits() == Self::UNIX.bits() - } - - /// Is xen foreign memory. - pub fn is_foreign(&self) -> bool { - self.contains(Self::FOREIGN) - } - - /// Is xen grant memory. - pub fn is_grant(&self) -> bool { - self.contains(Self::GRANT) - } - - /// Can mmap entire region in advance. - pub fn mmap_in_advance(&self) -> bool { - !self.contains(Self::NO_ADVANCE_MAP) - } -} - -fn page_size() -> u64 { - // SAFETY: Safe because this call just returns the page size and doesn't have any side effects. - unsafe { libc::sysconf(_SC_PAGESIZE) as u64 } -} - -fn pages(size: usize) -> (usize, usize) { - let page_size = page_size() as usize; - let num = size.div_ceil(page_size); - - (num, page_size * num) -} - -fn validate_file(file_offset: &Option) -> Result<(i32, u64)> { - let file_offset = match file_offset { - Some(f) => f, - None => return Err(Error::InvalidFileOffset), - }; - - let fd = file_offset.file().as_raw_fd(); - let f_offset = file_offset.start(); - - // We don't allow file offsets with Xen foreign mappings. - if f_offset != 0 { - return Err(Error::InvalidOffsetLength); - } - - Ok((fd, f_offset)) -} - -// Xen Foreign memory mapping interface. -trait MmapXenTrait: std::fmt::Debug { - fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result; - fn addr(&self) -> *mut u8; -} - -// Standard Unix memory mapping for testing other crates. -#[derive(Clone, Debug, PartialEq)] -struct MmapXenUnix(MmapUnix); - -impl MmapXenUnix { - fn new(range: &MmapRange) -> Result { - let (fd, offset) = if let Some(ref f_off) = range.file_offset { - (f_off.file().as_raw_fd(), f_off.start()) - } else { - (-1, 0) - }; - - Ok(Self(MmapUnix::new( - range.size, - range.prot.ok_or(Error::UnexpectedError)?, - range.flags.ok_or(Error::UnexpectedError)?, - fd, - offset, - )?)) - } -} - -impl MmapXenTrait for MmapXenUnix { - #[allow(unused_variables)] - fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result { - Err(Error::MappedInAdvance) - } - - fn addr(&self) -> *mut u8 { - self.0.addr() - } -} - -// Privcmd mmap batch v2 command -// -// include/uapi/xen/privcmd.h: `privcmd_mmapbatch_v2` -#[repr(C)] -#[derive(Debug, Copy, Clone)] -struct PrivCmdMmapBatchV2 { - // number of pages to populate - num: u32, - // target domain - domid: u16, - // virtual address - addr: *mut c_void, - // array of mfns - arr: *const u64, - // array of error codes - err: *mut c_int, -} - -const XEN_PRIVCMD_TYPE: u32 = 'P' as u32; - -// #define IOCTL_PRIVCMD_MMAPBATCH_V2 _IOC(_IOC_NONE, 'P', 4, sizeof(privcmd_mmapbatch_v2_t)) -fn ioctl_privcmd_mmapbatch_v2() -> c_ulong { - ioctl_expr( - _IOC_NONE, - XEN_PRIVCMD_TYPE, - 4, - size_of::() as u32, - ) -} - -// Xen foreign memory specific implementation. -#[derive(Clone, Debug, PartialEq)] -struct MmapXenForeign { - domid: u32, - guest_base: GuestAddress, - unix_mmap: MmapUnix, - fd: i32, -} - -impl AsRawFd for MmapXenForeign { - fn as_raw_fd(&self) -> i32 { - self.fd - } -} - -impl MmapXenForeign { - fn new(range: &MmapRange) -> Result { - let (fd, f_offset) = validate_file(&range.file_offset)?; - let (count, size) = pages(range.size); - - let unix_mmap = MmapUnix::new( - size, - range.prot.ok_or(Error::UnexpectedError)?, - range.flags.ok_or(Error::UnexpectedError)? | MAP_SHARED, - fd, - f_offset, - )?; - - let foreign = Self { - domid: range.mmap_data, - guest_base: range.addr, - unix_mmap, - fd, - }; - - foreign.mmap_ioctl(count)?; - Ok(foreign) - } - - // Ioctl to pass additional information to mmap infrastructure of privcmd driver. - fn mmap_ioctl(&self, count: usize) -> Result<()> { - let base = self.guest_base.0 / page_size(); - - let mut pfn = Vec::with_capacity(count); - for i in 0..count { - pfn.push(base + i as u64); - } - - let mut err: Vec = vec![0; count]; - - let map = PrivCmdMmapBatchV2 { - num: count as u32, - domid: self.domid as u16, - addr: self.addr() as *mut c_void, - arr: pfn.as_ptr(), - err: err.as_mut_ptr(), - }; - - // SAFETY: This is safe because the ioctl guarantees to not access memory beyond `map`. - let ret = unsafe { ioctl_with_ref(self, ioctl_privcmd_mmapbatch_v2(), &map) }; - - if ret == 0 { - Ok(()) - } else { - Err(Error::Mmap(io::Error::last_os_error())) - } - } -} - -impl MmapXenTrait for MmapXenForeign { - #[allow(unused_variables)] - fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result { - Err(Error::MappedInAdvance) - } - - fn addr(&self) -> *mut u8 { - self.unix_mmap.addr() - } -} - -// Xen Grant memory mapping interface. - -const XEN_GRANT_ADDR_OFF: u64 = 1 << 63; - -// Grant reference -// -// include/uapi/xen/gntdev.h: `ioctl_gntdev_grant_ref` -#[repr(C)] -#[derive(Copy, Clone, Debug, Default, PartialEq)] -struct GntDevGrantRef { - // The domain ID of the grant to be mapped. - domid: u32, - // The grant reference of the grant to be mapped. - reference: u32, -} - -#[repr(C)] -#[derive(Debug, Default, PartialEq, Eq)] -struct __IncompleteArrayField(::std::marker::PhantomData, [T; 0]); -impl __IncompleteArrayField { - #[inline] - unsafe fn as_ptr(&self) -> *const T { - self as *const __IncompleteArrayField as *const T - } - #[inline] - unsafe fn as_mut_ptr(&mut self) -> *mut T { - self as *mut __IncompleteArrayField as *mut T - } - #[inline] - unsafe fn as_slice(&self, len: usize) -> &[T] { - ::std::slice::from_raw_parts(self.as_ptr(), len) - } - #[inline] - unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] { - ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len) - } -} - -// Grant dev mapping reference -// -// include/uapi/xen/gntdev.h: `ioctl_gntdev_map_grant_ref` -#[repr(C)] -#[derive(Debug, Default)] -struct GntDevMapGrantRef { - // The number of grants to be mapped. - count: u32, - // Unused padding - pad: u32, - // The offset to be used on a subsequent call to mmap(). - index: u64, - // Array of grant references, of size @count. - refs: __IncompleteArrayField, -} - -generate_fam_struct_impl!( - GntDevMapGrantRef, - GntDevGrantRef, - refs, - u32, - count, - usize::MAX -); - -type GntDevMapGrantRefWrapper = FamStructWrapper; - -impl GntDevMapGrantRef { - fn new(domid: u32, base: u32, count: usize) -> Result { - let mut wrapper = GntDevMapGrantRefWrapper::new(count).map_err(Error::Fam)?; - let refs = wrapper.as_mut_slice(); - - // GntDevMapGrantRef's pad and index are initialized to 0 by Fam layer. - for (i, r) in refs.iter_mut().enumerate().take(count) { - r.domid = domid; - r.reference = base + i as u32; - } - - Ok(wrapper) - } -} - -// Grant dev un-mapping reference -// -// include/uapi/xen/gntdev.h: `ioctl_gntdev_unmap_grant_ref` -#[repr(C)] -#[derive(Debug, Copy, Clone)] -struct GntDevUnmapGrantRef { - // The offset returned by the map operation. - index: u64, - // The number of grants to be unmapped. - count: u32, - // Unused padding - pad: u32, -} - -impl GntDevUnmapGrantRef { - fn new(index: u64, count: u32) -> Self { - Self { - index, - count, - pad: 0, - } - } -} - -const XEN_GNTDEV_TYPE: u32 = 'G' as u32; - -// #define IOCTL_GNTDEV_MAP_GRANT_REF _IOC(_IOC_NONE, 'G', 0, sizeof(ioctl_gntdev_map_grant_ref)) -fn ioctl_gntdev_map_grant_ref() -> c_ulong { - ioctl_expr( - _IOC_NONE, - XEN_GNTDEV_TYPE, - 0, - (size_of::() + size_of::()) as u32, - ) -} - -// #define IOCTL_GNTDEV_UNMAP_GRANT_REF _IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref)) -fn ioctl_gntdev_unmap_grant_ref() -> c_ulong { - ioctl_expr( - _IOC_NONE, - XEN_GNTDEV_TYPE, - 1, - size_of::() as u32, - ) -} - -// Xen grant memory specific implementation. -#[derive(Clone, Debug)] -struct MmapXenGrant { - guest_base: GuestAddress, - unix_mmap: Option, - file_offset: FileOffset, - flags: i32, - size: usize, - index: u64, - domid: u32, -} - -impl AsRawFd for MmapXenGrant { - fn as_raw_fd(&self) -> i32 { - self.file_offset.file().as_raw_fd() - } -} - -impl MmapXenGrant { - fn new(range: &MmapRange, mmap_flags: MmapXenFlags) -> Result { - validate_file(&range.file_offset)?; - - let mut grant = Self { - guest_base: range.addr, - unix_mmap: None, - file_offset: range.file_offset.as_ref().unwrap().clone(), - flags: range.flags.ok_or(Error::UnexpectedError)?, - size: 0, - index: 0, - domid: range.mmap_data, - }; - - // Region can't be mapped in advance, partial mapping will be done later via - // `MmapXenSlice`. - if mmap_flags.mmap_in_advance() { - let (unix_mmap, index) = grant.mmap_range( - range.addr, - range.size, - range.prot.ok_or(Error::UnexpectedError)?, - )?; - - grant.unix_mmap = Some(unix_mmap); - grant.index = index; - grant.size = range.size; - } - - Ok(grant) - } - - fn mmap_range(&self, addr: GuestAddress, size: usize, prot: i32) -> Result<(MmapUnix, u64)> { - let (count, size) = pages(size); - let index = self.mmap_ioctl(addr, count)?; - let unix_mmap = MmapUnix::new(size, prot, self.flags, self.as_raw_fd(), index)?; - - Ok((unix_mmap, index)) - } - - fn unmap_range(&self, unix_mmap: MmapUnix, size: usize, index: u64) { - let (count, _) = pages(size); - - // Unmap the address first. - drop(unix_mmap); - self.unmap_ioctl(count as u32, index).unwrap(); - } - - fn mmap_ioctl(&self, addr: GuestAddress, count: usize) -> Result { - let base = ((addr.0 & !XEN_GRANT_ADDR_OFF) / page_size()) as u32; - let wrapper = GntDevMapGrantRef::new(self.domid, base, count)?; - let reference = wrapper.as_fam_struct_ref(); - - // SAFETY: This is safe because the ioctl guarantees to not access memory beyond reference. - let ret = unsafe { ioctl_with_ref(self, ioctl_gntdev_map_grant_ref(), reference) }; - - if ret == 0 { - Ok(reference.index) - } else { - Err(Error::Mmap(io::Error::last_os_error())) - } - } - - fn unmap_ioctl(&self, count: u32, index: u64) -> Result<()> { - let unmap = GntDevUnmapGrantRef::new(index, count); - - // SAFETY: This is safe because the ioctl guarantees to not access memory beyond unmap. - let ret = unsafe { ioctl_with_ref(self, ioctl_gntdev_unmap_grant_ref(), &unmap) }; - - if ret == 0 { - Ok(()) - } else { - Err(Error::Mmap(io::Error::last_os_error())) - } - } -} - -impl MmapXenTrait for MmapXenGrant { - // Maps a slice out of the entire region. - fn mmap_slice(&self, addr: *const u8, prot: i32, len: usize) -> Result { - MmapXenSlice::new_with(self.clone(), addr as usize, prot, len) - } - - fn addr(&self) -> *mut u8 { - if let Some(ref unix_mmap) = self.unix_mmap { - unix_mmap.addr() - } else { - null_mut() - } - } -} - -impl Drop for MmapXenGrant { - fn drop(&mut self) { - if let Some(unix_mmap) = self.unix_mmap.take() { - self.unmap_range(unix_mmap, self.size, self.index); - } - } -} - -#[derive(Debug)] -pub(crate) struct MmapXenSlice { - grant: Option, - unix_mmap: Option, - addr: *mut u8, - size: usize, - index: u64, -} - -impl MmapXenSlice { - fn raw(addr: *mut u8) -> Self { - Self { - grant: None, - unix_mmap: None, - addr, - size: 0, - index: 0, - } - } - - fn new_with(grant: MmapXenGrant, offset: usize, prot: i32, size: usize) -> Result { - let page_size = page_size() as usize; - let page_base: usize = (offset / page_size) * page_size; - let offset = offset - page_base; - let size = offset + size; - - let addr = grant.guest_base.0 + page_base as u64; - let (unix_mmap, index) = grant.mmap_range(GuestAddress(addr), size, prot)?; - - // SAFETY: We have already mapped the range including offset. - let addr = unsafe { unix_mmap.addr().add(offset) }; - - Ok(Self { - grant: Some(grant), - unix_mmap: Some(unix_mmap), - addr, - size, - index, - }) - } - - // Mapped address for the region. - pub(crate) fn addr(&self) -> *mut u8 { - self.addr - } -} - -impl Drop for MmapXenSlice { - fn drop(&mut self) { - // Unmaps memory automatically once this instance goes out of scope. - if let Some(unix_mmap) = self.unix_mmap.take() { - self.grant - .as_ref() - .unwrap() - .unmap_range(unix_mmap, self.size, self.index); - } - } -} - -#[derive(Debug)] -pub struct MmapXen { - xen_flags: MmapXenFlags, - domid: u32, - mmap: Box, -} - -impl MmapXen { - fn new(range: &MmapRange) -> Result { - let xen_flags = match MmapXenFlags::from_bits(range.mmap_flags) { - Some(flags) => flags, - None => return Err(Error::MmapFlags(range.mmap_flags)), - }; - - if !xen_flags.is_valid() { - return Err(Error::MmapFlags(xen_flags.bits())); - } - - Ok(Self { - xen_flags, - domid: range.mmap_data, - mmap: if xen_flags.is_foreign() { - Box::new(MmapXenForeign::new(range)?) - } else if xen_flags.is_grant() { - Box::new(MmapXenGrant::new(range, xen_flags)?) - } else { - Box::new(MmapXenUnix::new(range)?) - }, - }) - } - - fn addr(&self) -> *mut u8 { - self.mmap.addr() - } - - fn flags(&self) -> u32 { - self.xen_flags.bits() - } - - fn data(&self) -> u32 { - self.domid - } - - fn mmap_in_advance(&self) -> bool { - self.xen_flags.mmap_in_advance() - } - - pub(crate) fn mmap( - mmap_xen: Option<&Self>, - addr: *mut u8, - prot: i32, - len: usize, - ) -> MmapXenSlice { - match mmap_xen { - Some(mmap_xen) => mmap_xen.mmap.mmap_slice(addr, prot, len).unwrap(), - None => MmapXenSlice::raw(addr), - } - } -} - -#[cfg(test)] -mod tests { - #![allow(clippy::undocumented_unsafe_blocks)] - - use super::*; - use matches::assert_matches; - use vmm_sys_util::tempfile::TempFile; - - // Adding a helper method to extract the errno within an Error::Mmap(e), or return a - // distinctive value when the error is represented by another variant. - impl Error { - fn raw_os_error(&self) -> i32 { - match self { - Error::Mmap(e) => e.raw_os_error().unwrap(), - _ => i32::MIN, - } - } - } - - #[allow(unused_variables)] - pub unsafe fn ioctl_with_ref(fd: &F, req: c_ulong, arg: &T) -> c_int { - 0 - } - - impl MmapRange { - fn initialized(is_file: bool) -> Self { - let file_offset = if is_file { - Some(FileOffset::new(TempFile::new().unwrap().into_file(), 0)) - } else { - None - }; - - let mut range = MmapRange::new_unix(0x1000, file_offset, GuestAddress(0x1000)); - range.prot = Some(libc::PROT_READ | libc::PROT_WRITE); - range.mmap_data = 1; - - range - } - } - - impl MmapRegion { - /// Create an `MmapRegion` with specified `size` at GuestAdress(0) - pub fn new(size: usize) -> Result { - let range = MmapRange::new_unix(size, None, GuestAddress(0)); - Self::from_range(range) - } - } - - #[test] - fn test_mmap_xen_failures() { - let mut range = MmapRange::initialized(true); - // Invalid flags - range.mmap_flags = 16; - - let r = MmapXen::new(&range); - assert_matches!(r.unwrap_err(), Error::MmapFlags(flags) if flags == range.mmap_flags); - - range.mmap_flags = MmapXenFlags::FOREIGN.bits() | MmapXenFlags::GRANT.bits(); - let r = MmapXen::new(&range); - assert_matches!(r.unwrap_err(), Error::MmapFlags(flags) if flags == MmapXenFlags::ALL.bits()); - - range.mmap_flags = MmapXenFlags::FOREIGN.bits() | MmapXenFlags::NO_ADVANCE_MAP.bits(); - let r = MmapXen::new(&range); - assert_matches!(r.unwrap_err(), Error::MmapFlags(flags) if flags == MmapXenFlags::NO_ADVANCE_MAP.bits() | MmapXenFlags::FOREIGN.bits()); - } - - #[test] - fn test_mmap_xen_success() { - let mut range = MmapRange::initialized(true); - range.mmap_flags = MmapXenFlags::FOREIGN.bits(); - - let r = MmapXen::new(&range).unwrap(); - assert_eq!(r.flags(), range.mmap_flags); - assert_eq!(r.data(), range.mmap_data); - assert_ne!(r.addr(), null_mut()); - assert!(r.mmap_in_advance()); - - range.mmap_flags = MmapXenFlags::GRANT.bits(); - let r = MmapXen::new(&range).unwrap(); - assert_eq!(r.flags(), range.mmap_flags); - assert_eq!(r.data(), range.mmap_data); - assert_ne!(r.addr(), null_mut()); - assert!(r.mmap_in_advance()); - - range.mmap_flags = MmapXenFlags::GRANT.bits() | MmapXenFlags::NO_ADVANCE_MAP.bits(); - let r = MmapXen::new(&range).unwrap(); - assert_eq!(r.flags(), range.mmap_flags); - assert_eq!(r.data(), range.mmap_data); - assert_eq!(r.addr(), null_mut()); - assert!(!r.mmap_in_advance()); - } - - #[test] - fn test_foreign_map_failure() { - let mut range = MmapRange::initialized(true); - range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 0)); - range.prot = None; - let r = MmapXenForeign::new(&range); - assert_matches!(r.unwrap_err(), Error::UnexpectedError); - - let mut range = MmapRange::initialized(true); - range.flags = None; - let r = MmapXenForeign::new(&range); - assert_matches!(r.unwrap_err(), Error::UnexpectedError); - - let mut range = MmapRange::initialized(true); - range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 1)); - let r = MmapXenForeign::new(&range); - assert_matches!(r.unwrap_err(), Error::InvalidOffsetLength); - - let mut range = MmapRange::initialized(true); - range.size = 0; - let r = MmapXenForeign::new(&range); - assert_eq!(r.unwrap_err().raw_os_error(), libc::EINVAL); - } - - #[test] - fn test_foreign_map_success() { - let range = MmapRange::initialized(true); - let r = MmapXenForeign::new(&range).unwrap(); - assert_ne!(r.addr(), null_mut()); - assert_eq!(r.domid, range.mmap_data); - assert_eq!(r.guest_base, range.addr); - } - - #[test] - fn test_grant_map_failure() { - let mut range = MmapRange::initialized(true); - range.prot = None; - let r = MmapXenGrant::new(&range, MmapXenFlags::empty()); - assert_matches!(r.unwrap_err(), Error::UnexpectedError); - - let mut range = MmapRange::initialized(true); - range.prot = None; - // Protection isn't used for no-advance mappings - MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP).unwrap(); - - let mut range = MmapRange::initialized(true); - range.flags = None; - let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP); - assert_matches!(r.unwrap_err(), Error::UnexpectedError); - - let mut range = MmapRange::initialized(true); - range.file_offset = Some(FileOffset::new(TempFile::new().unwrap().into_file(), 1)); - let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP); - assert_matches!(r.unwrap_err(), Error::InvalidOffsetLength); - - let mut range = MmapRange::initialized(true); - range.size = 0; - let r = MmapXenGrant::new(&range, MmapXenFlags::empty()); - assert_eq!(r.unwrap_err().raw_os_error(), libc::EINVAL); - } - - #[test] - fn test_grant_map_success() { - let range = MmapRange::initialized(true); - let r = MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP).unwrap(); - assert_eq!(r.addr(), null_mut()); - assert_eq!(r.domid, range.mmap_data); - assert_eq!(r.guest_base, range.addr); - - let mut range = MmapRange::initialized(true); - // Size isn't used with no-advance mapping. - range.size = 0; - MmapXenGrant::new(&range, MmapXenFlags::NO_ADVANCE_MAP).unwrap(); - - let range = MmapRange::initialized(true); - let r = MmapXenGrant::new(&range, MmapXenFlags::empty()).unwrap(); - assert_ne!(r.addr(), null_mut()); - assert_eq!(r.domid, range.mmap_data); - assert_eq!(r.guest_base, range.addr); - } - - #[test] - fn test_grant_ref_alloc() { - let wrapper = GntDevMapGrantRef::new(0, 0x1000, 0x100).unwrap(); - let r = wrapper.as_fam_struct_ref(); - assert_eq!(r.count, 0x100); - assert_eq!(r.pad, 0); - assert_eq!(r.index, 0); - } -} diff --git a/src/region.rs b/src/region.rs index d3ca4350..4fd32259 100644 --- a/src/region.rs +++ b/src/region.rs @@ -1,776 +1,8 @@ //! Module containing abstracts for dealing with contiguous regions of guest memory -use crate::bitmap::{Bitmap, BS}; -use crate::guest_memory::Result; -use crate::{ - Address, AtomicAccess, Bytes, FileOffset, GuestAddress, GuestMemory, GuestMemoryError, - GuestUsize, MemoryRegionAddress, ReadVolatile, VolatileSlice, WriteVolatile, +pub use vm_memory_new::region::{ + GuestMemoryRegion, + GuestRegionCollectionError, + GuestRegionCollection, + GuestMemoryRegionBytes, }; -use std::sync::atomic::Ordering; -use std::sync::Arc; - -/// Represents a continuous region of guest physical memory. -/// -/// Note that the [`Bytes`] super trait requirement can be satisfied by implementing -/// [`GuestMemoryRegionBytes`], which provides a default implementation of `Bytes` -/// for memory regions that are backed by physical RAM: -/// -/// ``` -/// /// -/// use vm_memory::bitmap::BS; -/// use vm_memory::{GuestAddress, GuestMemoryRegion, GuestMemoryRegionBytes, GuestUsize}; -/// -/// struct MyRegion; -/// -/// impl GuestMemoryRegion for MyRegion { -/// type B = (); -/// fn len(&self) -> GuestUsize { -/// todo!() -/// } -/// fn start_addr(&self) -> GuestAddress { -/// todo!() -/// } -/// fn bitmap(&self) { -/// todo!() -/// } -/// } -/// -/// impl GuestMemoryRegionBytes for MyRegion {} -/// ``` -#[allow(clippy::len_without_is_empty)] -pub trait GuestMemoryRegion: Bytes { - /// Type used for dirty memory tracking. - type B: Bitmap; - - /// Returns the size of the region. - fn len(&self) -> GuestUsize; - - /// Returns the minimum (inclusive) address managed by the region. - fn start_addr(&self) -> GuestAddress; - - /// Returns the maximum (inclusive) address managed by the region. - fn last_addr(&self) -> GuestAddress { - // unchecked_add is safe as the region bounds were checked when it was created. - self.start_addr().unchecked_add(self.len() - 1) - } - - /// Borrow the associated `Bitmap` object. - fn bitmap(&self) -> BS<'_, Self::B>; - - /// Returns the given address if it is within this region. - fn check_address(&self, addr: MemoryRegionAddress) -> Option { - if self.address_in_range(addr) { - Some(addr) - } else { - None - } - } - - /// Returns `true` if the given address is within this region. - fn address_in_range(&self, addr: MemoryRegionAddress) -> bool { - addr.raw_value() < self.len() - } - - /// Returns the address plus the offset if it is in this region. - fn checked_offset( - &self, - base: MemoryRegionAddress, - offset: usize, - ) -> Option { - base.checked_add(offset as u64) - .and_then(|addr| self.check_address(addr)) - } - - /// Tries to convert an absolute address to a relative address within this region. - /// - /// Returns `None` if `addr` is out of the bounds of this region. - fn to_region_addr(&self, addr: GuestAddress) -> Option { - addr.checked_offset_from(self.start_addr()) - .and_then(|offset| self.check_address(MemoryRegionAddress(offset))) - } - - /// Returns the host virtual address corresponding to the region address. - /// - /// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`, - /// have the capability to mmap guest address range into host virtual address space for - /// direct access, so the corresponding host virtual address may be passed to other subsystems. - /// - /// # Note - /// The underlying guest memory is not protected from memory aliasing, which breaks the - /// Rust memory safety model. It's the caller's responsibility to ensure that there's no - /// concurrent accesses to the underlying guest memory. - fn get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8> { - Err(GuestMemoryError::HostAddressNotAvailable) - } - - /// Returns information regarding the file and offset backing this memory region. - fn file_offset(&self) -> Option<&FileOffset> { - None - } - - /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at - /// `offset`. - #[allow(unused_variables)] - fn get_slice( - &self, - offset: MemoryRegionAddress, - count: usize, - ) -> Result>> { - Err(GuestMemoryError::HostAddressNotAvailable) - } - - /// Gets a slice of memory for the entire region that supports volatile access. - /// - /// # Examples (uses the `backend-mmap` feature) - /// - /// ``` - /// # #[cfg(feature = "backend-mmap")] - /// # { - /// # use vm_memory::{GuestAddress, MmapRegion, GuestRegionMmap, GuestMemoryRegion}; - /// # use vm_memory::volatile_memory::{VolatileMemory, VolatileSlice, VolatileRef}; - /// # - /// let region = GuestRegionMmap::<()>::from_range(GuestAddress(0x0), 0x400, None) - /// .expect("Could not create guest memory"); - /// let slice = region - /// .as_volatile_slice() - /// .expect("Could not get volatile slice"); - /// - /// let v = 42u32; - /// let r = slice - /// .get_ref::(0x200) - /// .expect("Could not get reference"); - /// r.store(v); - /// assert_eq!(r.load(), v); - /// # } - /// ``` - fn as_volatile_slice(&self) -> Result>> { - self.get_slice(MemoryRegionAddress(0), self.len() as usize) - } - - /// Show if the region is based on the `HugeTLBFS`. - /// Returns Some(true) if the region is backed by hugetlbfs. - /// None represents that no information is available. - /// - /// # Examples (uses the `backend-mmap` feature) - /// - /// ``` - /// # #[cfg(feature = "backend-mmap")] - /// # { - /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, GuestRegionMmap}; - /// let addr = GuestAddress(0x1000); - /// let mem = GuestMemoryMmap::<()>::from_ranges(&[(addr, 0x1000)]).unwrap(); - /// let r = mem.find_region(addr).unwrap(); - /// assert_eq!(r.is_hugetlbfs(), None); - /// # } - /// ``` - #[cfg(target_os = "linux")] - fn is_hugetlbfs(&self) -> Option { - None - } -} - -/// Errors that can occur when dealing with [`GuestRegionCollection`]s -#[derive(Debug, thiserror::Error)] -pub enum GuestRegionCollectionError { - /// No memory region found. - #[error("No memory region found")] - NoMemoryRegion, - /// Some of the memory regions intersect with each other. - #[error("Some of the memory regions intersect with each other")] - MemoryRegionOverlap, - /// The provided memory regions haven't been sorted. - #[error("The provided memory regions haven't been sorted")] - UnsortedMemoryRegions, -} - -/// [`GuestMemory`](trait.GuestMemory.html) implementation based on a homogeneous collection -/// of [`GuestMemoryRegion`] implementations. -/// -/// Represents a sorted set of non-overlapping physical guest memory regions. -#[derive(Debug)] -pub struct GuestRegionCollection { - regions: Vec>, -} - -impl Default for GuestRegionCollection { - fn default() -> Self { - Self { - regions: Vec::new(), - } - } -} - -impl Clone for GuestRegionCollection { - fn clone(&self) -> Self { - GuestRegionCollection { - regions: self.regions.iter().map(Arc::clone).collect(), - } - } -} - -impl GuestRegionCollection { - /// Creates an empty `GuestMemoryMmap` instance. - pub fn new() -> Self { - Self::default() - } - - /// Creates a new [`GuestRegionCollection`] from a vector of regions. - /// - /// # Arguments - /// - /// * `regions` - The vector of regions. - /// The regions shouldn't overlap, and they should be sorted - /// by the starting address. - pub fn from_regions( - mut regions: Vec, - ) -> std::result::Result { - Self::from_arc_regions(regions.drain(..).map(Arc::new).collect()) - } - - /// Creates a new [`GuestRegionCollection`] from a vector of Arc regions. - /// - /// Similar to the constructor `from_regions()` as it returns a - /// [`GuestRegionCollection`]. The need for this constructor is to provide a way for - /// consumer of this API to create a new [`GuestRegionCollection`] based on existing - /// regions coming from an existing [`GuestRegionCollection`] instance. - /// - /// # Arguments - /// - /// * `regions` - The vector of `Arc` regions. - /// The regions shouldn't overlap and they should be sorted - /// by the starting address. - pub fn from_arc_regions( - regions: Vec>, - ) -> std::result::Result { - if regions.is_empty() { - return Err(GuestRegionCollectionError::NoMemoryRegion); - } - - for window in regions.windows(2) { - let prev = &window[0]; - let next = &window[1]; - - if prev.start_addr() > next.start_addr() { - return Err(GuestRegionCollectionError::UnsortedMemoryRegions); - } - - if prev.last_addr() >= next.start_addr() { - return Err(GuestRegionCollectionError::MemoryRegionOverlap); - } - } - - Ok(Self { regions }) - } - - /// Insert a region into the `GuestMemoryMmap` object and return a new `GuestMemoryMmap`. - /// - /// # Arguments - /// * `region`: the memory region to insert into the guest memory object. - pub fn insert_region( - &self, - region: Arc, - ) -> std::result::Result, GuestRegionCollectionError> { - let mut regions = self.regions.clone(); - regions.push(region); - regions.sort_by_key(|x| x.start_addr()); - - Self::from_arc_regions(regions) - } - - /// Remove a region from the [`GuestRegionCollection`] object and return a new `GuestRegionCollection` - /// on success, together with the removed region. - /// - /// # Arguments - /// * `base`: base address of the region to be removed - /// * `size`: size of the region to be removed - pub fn remove_region( - &self, - base: GuestAddress, - size: GuestUsize, - ) -> std::result::Result<(GuestRegionCollection, Arc), GuestRegionCollectionError> { - if let Ok(region_index) = self.regions.binary_search_by_key(&base, |x| x.start_addr()) { - if self.regions.get(region_index).unwrap().len() == size { - let mut regions = self.regions.clone(); - let region = regions.remove(region_index); - return Ok((Self { regions }, region)); - } - } - - Err(GuestRegionCollectionError::NoMemoryRegion) - } -} - -impl GuestMemory for GuestRegionCollection { - type R = R; - - fn num_regions(&self) -> usize { - self.regions.len() - } - - fn find_region(&self, addr: GuestAddress) -> Option<&R> { - let index = match self.regions.binary_search_by_key(&addr, |x| x.start_addr()) { - Ok(x) => Some(x), - // Within the closest region with starting address < addr - Err(x) if (x > 0 && addr <= self.regions[x - 1].last_addr()) => Some(x - 1), - _ => None, - }; - index.map(|x| self.regions[x].as_ref()) - } - - fn iter(&self) -> impl Iterator { - self.regions.iter().map(AsRef::as_ref) - } -} - -/// A marker trait that if implemented on a type `R` makes available a default -/// implementation of `Bytes` for `R`, based on the assumption -/// that the entire `GuestMemoryRegion` is just traditional memory without any -/// special access requirements. -pub trait GuestMemoryRegionBytes: GuestMemoryRegion {} - -impl Bytes for R { - type E = GuestMemoryError; - - /// # Examples - /// * Write a slice at guest address 0x1200. - /// - /// ``` - /// # #[cfg(feature = "backend-mmap")] - /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; - /// # - /// # #[cfg(feature = "backend-mmap")] - /// # { - /// # let start_addr = GuestAddress(0x1000); - /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) - /// # .expect("Could not create guest memory"); - /// # - /// let res = gm - /// .write(&[1, 2, 3, 4, 5], GuestAddress(0x1200)) - /// .expect("Could not write to guest memory"); - /// assert_eq!(5, res); - /// # } - /// ``` - fn write(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result { - let maddr = addr.raw_value() as usize; - self.as_volatile_slice()? - .write(buf, maddr) - .map_err(Into::into) - } - - /// # Examples - /// * Read a slice of length 16 at guestaddress 0x1200. - /// - /// ``` - /// # #[cfg(feature = "backend-mmap")] - /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; - /// # - /// # #[cfg(feature = "backend-mmap")] - /// # { - /// # let start_addr = GuestAddress(0x1000); - /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)]) - /// # .expect("Could not create guest memory"); - /// # - /// let buf = &mut [0u8; 16]; - /// let res = gm - /// .read(buf, GuestAddress(0x1200)) - /// .expect("Could not read from guest memory"); - /// assert_eq!(16, res); - /// # } - /// ``` - fn read(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result { - let maddr = addr.raw_value() as usize; - self.as_volatile_slice()? - .read(buf, maddr) - .map_err(Into::into) - } - - fn write_slice(&self, buf: &[u8], addr: MemoryRegionAddress) -> Result<()> { - let maddr = addr.raw_value() as usize; - self.as_volatile_slice()? - .write_slice(buf, maddr) - .map_err(Into::into) - } - - fn read_slice(&self, buf: &mut [u8], addr: MemoryRegionAddress) -> Result<()> { - let maddr = addr.raw_value() as usize; - self.as_volatile_slice()? - .read_slice(buf, maddr) - .map_err(Into::into) - } - - fn read_volatile_from( - &self, - addr: MemoryRegionAddress, - src: &mut F, - count: usize, - ) -> Result - where - F: ReadVolatile, - { - self.as_volatile_slice()? - .read_volatile_from(addr.0 as usize, src, count) - .map_err(Into::into) - } - - fn read_exact_volatile_from( - &self, - addr: MemoryRegionAddress, - src: &mut F, - count: usize, - ) -> Result<()> - where - F: ReadVolatile, - { - self.as_volatile_slice()? - .read_exact_volatile_from(addr.0 as usize, src, count) - .map_err(Into::into) - } - - fn write_volatile_to( - &self, - addr: MemoryRegionAddress, - dst: &mut F, - count: usize, - ) -> Result - where - F: WriteVolatile, - { - self.as_volatile_slice()? - .write_volatile_to(addr.0 as usize, dst, count) - .map_err(Into::into) - } - - fn write_all_volatile_to( - &self, - addr: MemoryRegionAddress, - dst: &mut F, - count: usize, - ) -> Result<()> - where - F: WriteVolatile, - { - self.as_volatile_slice()? - .write_all_volatile_to(addr.0 as usize, dst, count) - .map_err(Into::into) - } - - fn store( - &self, - val: T, - addr: MemoryRegionAddress, - order: Ordering, - ) -> Result<()> { - self.as_volatile_slice().and_then(|s| { - s.store(val, addr.raw_value() as usize, order) - .map_err(Into::into) - }) - } - - fn load(&self, addr: MemoryRegionAddress, order: Ordering) -> Result { - self.as_volatile_slice() - .and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into)) - } -} - -#[cfg(test)] -pub(crate) mod tests { - use crate::region::{GuestMemoryRegionBytes, GuestRegionCollectionError}; - use crate::{ - Address, GuestAddress, GuestMemory, GuestMemoryRegion, GuestRegionCollection, GuestUsize, - }; - use matches::assert_matches; - use std::sync::Arc; - - #[derive(Debug, PartialEq, Eq)] - pub(crate) struct MockRegion { - pub(crate) start: GuestAddress, - pub(crate) len: GuestUsize, - } - - impl GuestMemoryRegion for MockRegion { - type B = (); - - fn len(&self) -> GuestUsize { - self.len - } - - fn start_addr(&self) -> GuestAddress { - self.start - } - - fn bitmap(&self) {} - } - - impl GuestMemoryRegionBytes for MockRegion {} - - pub(crate) type Collection = GuestRegionCollection; - - fn check_guest_memory_mmap( - maybe_guest_mem: Result, - expected_regions_summary: &[(GuestAddress, u64)], - ) { - assert!(maybe_guest_mem.is_ok()); - - let guest_mem = maybe_guest_mem.unwrap(); - assert_eq!(guest_mem.num_regions(), expected_regions_summary.len()); - let maybe_last_mem_reg = expected_regions_summary.last(); - if let Some((region_addr, region_size)) = maybe_last_mem_reg { - let mut last_addr = region_addr.unchecked_add(*region_size); - if last_addr.raw_value() != 0 { - last_addr = last_addr.unchecked_sub(1); - } - assert_eq!(guest_mem.last_addr(), last_addr); - } - for ((region_addr, region_size), mmap) in - expected_regions_summary.iter().zip(guest_mem.iter()) - { - assert_eq!(region_addr, &mmap.start); - assert_eq!(region_size, &mmap.len); - - assert!(guest_mem.find_region(*region_addr).is_some()); - } - } - - pub(crate) fn new_guest_memory_collection_from_regions( - regions_summary: &[(GuestAddress, u64)], - ) -> Result { - Collection::from_regions( - regions_summary - .iter() - .map(|&(start, len)| MockRegion { start, len }) - .collect(), - ) - } - - fn new_guest_memory_collection_from_arc_regions( - regions_summary: &[(GuestAddress, u64)], - ) -> Result { - Collection::from_arc_regions( - regions_summary - .iter() - .map(|&(start, len)| Arc::new(MockRegion { start, len })) - .collect(), - ) - } - - #[test] - fn test_no_memory_region() { - let regions_summary = []; - - assert_matches!( - new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(), - GuestRegionCollectionError::NoMemoryRegion - ); - assert_matches!( - new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(), - GuestRegionCollectionError::NoMemoryRegion - ); - } - - #[test] - fn test_overlapping_memory_regions() { - let regions_summary = [(GuestAddress(0), 100), (GuestAddress(99), 100)]; - - assert_matches!( - new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(), - GuestRegionCollectionError::MemoryRegionOverlap - ); - assert_matches!( - new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(), - GuestRegionCollectionError::MemoryRegionOverlap - ); - } - - #[test] - fn test_unsorted_memory_regions() { - let regions_summary = [(GuestAddress(100), 100), (GuestAddress(0), 100)]; - - assert_matches!( - new_guest_memory_collection_from_regions(®ions_summary).unwrap_err(), - GuestRegionCollectionError::UnsortedMemoryRegions - ); - assert_matches!( - new_guest_memory_collection_from_arc_regions(®ions_summary).unwrap_err(), - GuestRegionCollectionError::UnsortedMemoryRegions - ); - } - - #[test] - fn test_valid_memory_regions() { - let regions_summary = [(GuestAddress(0), 100), (GuestAddress(100), 100)]; - - let guest_mem = Collection::new(); - assert_eq!(guest_mem.num_regions(), 0); - - check_guest_memory_mmap( - new_guest_memory_collection_from_regions(®ions_summary), - ®ions_summary, - ); - - check_guest_memory_mmap( - new_guest_memory_collection_from_arc_regions(®ions_summary), - ®ions_summary, - ); - } - - #[test] - fn test_mmap_insert_region() { - let region_size = 0x1000; - let regions = vec![ - (GuestAddress(0x0), region_size), - (GuestAddress(0x10_0000), region_size), - ]; - let mem_orig = new_guest_memory_collection_from_regions(®ions).unwrap(); - let mut gm = mem_orig.clone(); - assert_eq!(mem_orig.num_regions(), 2); - - let new_regions = [ - (GuestAddress(0x8000), 0x1000), - (GuestAddress(0x4000), 0x1000), - (GuestAddress(0xc000), 0x1000), - ]; - - for (start, len) in new_regions { - gm = gm - .insert_region(Arc::new(MockRegion { start, len })) - .unwrap(); - } - - gm.insert_region(Arc::new(MockRegion { - start: GuestAddress(0xc000), - len: 0x1000, - })) - .unwrap_err(); - - assert_eq!(mem_orig.num_regions(), 2); - assert_eq!(gm.num_regions(), 5); - - let regions = gm.iter().collect::>(); - - assert_eq!(regions[0].start_addr(), GuestAddress(0x0000)); - assert_eq!(regions[1].start_addr(), GuestAddress(0x4000)); - assert_eq!(regions[2].start_addr(), GuestAddress(0x8000)); - assert_eq!(regions[3].start_addr(), GuestAddress(0xc000)); - assert_eq!(regions[4].start_addr(), GuestAddress(0x10_0000)); - } - - #[test] - fn test_mmap_remove_region() { - let region_size = 0x1000; - let regions = vec![ - (GuestAddress(0x0), region_size), - (GuestAddress(0x10_0000), region_size), - ]; - let mem_orig = new_guest_memory_collection_from_regions(®ions).unwrap(); - let gm = mem_orig.clone(); - assert_eq!(mem_orig.num_regions(), 2); - - gm.remove_region(GuestAddress(0), 128).unwrap_err(); - gm.remove_region(GuestAddress(0x4000), 128).unwrap_err(); - let (gm, region) = gm.remove_region(GuestAddress(0x10_0000), 0x1000).unwrap(); - - assert_eq!(mem_orig.num_regions(), 2); - assert_eq!(gm.num_regions(), 1); - - assert_eq!(gm.iter().next().unwrap().start_addr(), GuestAddress(0x0000)); - assert_eq!(region.start_addr(), GuestAddress(0x10_0000)); - } - - #[test] - fn test_iter() { - let region_size = 0x400; - let regions = vec![ - (GuestAddress(0x0), region_size), - (GuestAddress(0x1000), region_size), - ]; - let mut iterated_regions = Vec::new(); - let gm = new_guest_memory_collection_from_regions(®ions).unwrap(); - - for region in gm.iter() { - assert_eq!(region.len(), region_size as GuestUsize); - } - - for region in gm.iter() { - iterated_regions.push((region.start_addr(), region.len())); - } - assert_eq!(regions, iterated_regions); - - assert!(regions - .iter() - .map(|x| (x.0, x.1)) - .eq(iterated_regions.iter().copied())); - - let mmap_regions = gm.iter().collect::>(); - - assert_eq!(mmap_regions[0].start, regions[0].0); - assert_eq!(mmap_regions[1].start, regions[1].0); - } - - #[test] - fn test_address_in_range() { - let start_addr1 = GuestAddress(0x0); - let start_addr2 = GuestAddress(0x800); - let guest_mem = - new_guest_memory_collection_from_regions(&[(start_addr1, 0x400), (start_addr2, 0x400)]) - .unwrap(); - - assert!(guest_mem.address_in_range(GuestAddress(0x200))); - assert!(!guest_mem.address_in_range(GuestAddress(0x600))); - assert!(guest_mem.address_in_range(GuestAddress(0xa00))); - assert!(!guest_mem.address_in_range(GuestAddress(0xc00))); - } - - #[test] - fn test_check_address() { - let start_addr1 = GuestAddress(0x0); - let start_addr2 = GuestAddress(0x800); - let guest_mem = - new_guest_memory_collection_from_regions(&[(start_addr1, 0x400), (start_addr2, 0x400)]) - .unwrap(); - - assert_eq!( - guest_mem.check_address(GuestAddress(0x200)), - Some(GuestAddress(0x200)) - ); - assert_eq!(guest_mem.check_address(GuestAddress(0x600)), None); - assert_eq!( - guest_mem.check_address(GuestAddress(0xa00)), - Some(GuestAddress(0xa00)) - ); - assert_eq!(guest_mem.check_address(GuestAddress(0xc00)), None); - } - - #[test] - fn test_checked_offset() { - let start_addr1 = GuestAddress(0); - let start_addr2 = GuestAddress(0x800); - let start_addr3 = GuestAddress(0xc00); - let guest_mem = new_guest_memory_collection_from_regions(&[ - (start_addr1, 0x400), - (start_addr2, 0x400), - (start_addr3, 0x400), - ]) - .unwrap(); - - assert_eq!( - guest_mem.checked_offset(start_addr1, 0x200), - Some(GuestAddress(0x200)) - ); - assert_eq!( - guest_mem.checked_offset(start_addr1, 0xa00), - Some(GuestAddress(0xa00)) - ); - assert_eq!( - guest_mem.checked_offset(start_addr2, 0x7ff), - Some(GuestAddress(0xfff)) - ); - assert_eq!(guest_mem.checked_offset(start_addr2, 0xc00), None); - assert_eq!(guest_mem.checked_offset(start_addr1, usize::MAX), None); - - assert_eq!(guest_mem.checked_offset(start_addr1, 0x400), None); - assert_eq!( - guest_mem.checked_offset(start_addr1, 0x400 - 1), - Some(GuestAddress(0x400 - 1)) - ); - } -} diff --git a/src/volatile_memory.rs b/src/volatile_memory.rs index f242fce1..a5425798 100644 --- a/src/volatile_memory.rs +++ b/src/volatile_memory.rs @@ -26,2246 +26,14 @@ //! done concurrently without synchronization. With volatile access we know that the compiler has //! not reordered or elided the access. -use std::cmp::min; -use std::io; -use std::marker::PhantomData; -use std::mem::{align_of, size_of}; -use std::ptr::copy; -use std::ptr::{read_volatile, write_volatile}; -use std::result; -use std::sync::atomic::Ordering; - -use crate::atomic_integer::AtomicInteger; -use crate::bitmap::{Bitmap, BitmapSlice, BS}; -use crate::{AtomicAccess, ByteValued, Bytes}; - -#[cfg(all(feature = "backend-mmap", feature = "xen", target_family = "unix"))] -use crate::mmap::xen::{MmapXen as MmapInfo, MmapXenSlice}; - -#[cfg(not(feature = "xen"))] -type MmapInfo = std::marker::PhantomData<()>; - -use crate::io::{retry_eintr, ReadVolatile, WriteVolatile}; -use copy_slice_impl::{copy_from_volatile_slice, copy_to_volatile_slice}; - -/// `VolatileMemory` related errors. -#[allow(missing_docs)] -#[derive(Debug, thiserror::Error)] -pub enum Error { - /// `addr` is out of bounds of the volatile memory slice. - #[error("address 0x{addr:x} is out of bounds")] - OutOfBounds { addr: usize }, - /// Taking a slice at `base` with `offset` would overflow `usize`. - #[error("address 0x{base:x} offset by 0x{offset:x} would overflow")] - Overflow { base: usize, offset: usize }, - /// Taking a slice whose size overflows `usize`. - #[error("{nelements:?} elements of size {size:?} would overflow a usize")] - TooBig { nelements: usize, size: usize }, - /// Trying to obtain a misaligned reference. - #[error("address 0x{addr:x} is not aligned to {alignment:?}")] - Misaligned { addr: usize, alignment: usize }, - /// Writing to memory failed - #[error("{0}")] - IOError(io::Error), - /// Incomplete read or write - #[error("only used {completed} bytes in {expected} long buffer")] - PartialBuffer { expected: usize, completed: usize }, -} - -/// Result of volatile memory operations. -pub type Result = result::Result; - -/// Convenience function for computing `base + offset`. -/// -/// # Errors -/// -/// Returns [`Err(Error::Overflow)`](enum.Error.html#variant.Overflow) in case `base + offset` -/// exceeds `usize::MAX`. -/// -/// # Examples -/// -/// ``` -/// # use vm_memory::volatile_memory::compute_offset; -/// # -/// assert_eq!(108, compute_offset(100, 8).unwrap()); -/// assert!(compute_offset(usize::MAX, 6).is_err()); -/// ``` -pub fn compute_offset(base: usize, offset: usize) -> Result { - match base.checked_add(offset) { - None => Err(Error::Overflow { base, offset }), - Some(m) => Ok(m), - } -} - -/// Types that support raw volatile access to their data. -pub trait VolatileMemory { - /// Type used for dirty memory tracking. - type B: Bitmap; - - /// Gets the size of this slice. - fn len(&self) -> usize; - - /// Check whether the region is empty. - fn is_empty(&self) -> bool { - self.len() == 0 - } - - /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at - /// `offset`. - /// - /// Note that the property `get_slice(offset, count).len() == count` MUST NOT be - /// relied on for the correctness of unsafe code. This is a safe function inside of a - /// safe trait, and implementors are under no obligation to follow its documentation. - fn get_slice(&self, offset: usize, count: usize) -> Result>>; - - /// Gets a slice of memory for the entire region that supports volatile access. - fn as_volatile_slice(&self) -> VolatileSlice> { - self.get_slice(0, self.len()).unwrap() - } - - /// Gets a `VolatileRef` at `offset`. - fn get_ref(&self, offset: usize) -> Result>> { - let slice = self.get_slice(offset, size_of::())?; - - assert_eq!( - slice.len(), - size_of::(), - "VolatileMemory::get_slice(offset, count) returned slice of length != count." - ); - - // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that - // slice.addr is valid memory of size slice.len(). The assert above ensures that - // the length of the slice is exactly enough to hold one `T`. Lastly, the lifetime of the - // returned VolatileRef match that of the VolatileSlice returned by get_slice and thus the - // lifetime one `self`. - unsafe { - Ok(VolatileRef::with_bitmap( - slice.addr, - slice.bitmap, - slice.mmap, - )) - } - } - - /// Returns a [`VolatileArrayRef`](struct.VolatileArrayRef.html) of `n` elements starting at - /// `offset`. - fn get_array_ref( - &self, - offset: usize, - n: usize, - ) -> Result>> { - // Use isize to avoid problems with ptr::offset and ptr::add down the line. - let nbytes = isize::try_from(n) - .ok() - .and_then(|n| n.checked_mul(size_of::() as isize)) - .ok_or(Error::TooBig { - nelements: n, - size: size_of::(), - })?; - let slice = self.get_slice(offset, nbytes as usize)?; - - assert_eq!( - slice.len(), - nbytes as usize, - "VolatileMemory::get_slice(offset, count) returned slice of length != count." - ); - - // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that - // slice.addr is valid memory of size slice.len(). The assert above ensures that - // the length of the slice is exactly enough to hold `n` instances of `T`. Lastly, the lifetime of the - // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the - // lifetime one `self`. - unsafe { - Ok(VolatileArrayRef::with_bitmap( - slice.addr, - n, - slice.bitmap, - slice.mmap, - )) - } - } - - /// Returns a reference to an instance of `T` at `offset`. - /// - /// # Safety - /// To use this safely, the caller must guarantee that there are no other - /// users of the given chunk of memory for the lifetime of the result. - /// - /// # Errors - /// - /// If the resulting pointer is not aligned, this method will return an - /// [`Error`](enum.Error.html). - unsafe fn aligned_as_ref(&self, offset: usize) -> Result<&T> { - let slice = self.get_slice(offset, size_of::())?; - slice.check_alignment(align_of::())?; - - assert_eq!( - slice.len(), - size_of::(), - "VolatileMemory::get_slice(offset, count) returned slice of length != count." - ); - - // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that - // slice.addr is valid memory of size slice.len(). The assert above ensures that - // the length of the slice is exactly enough to hold one `T`. - // Dereferencing the pointer is safe because we check the alignment above, and the invariants - // of this function ensure that no aliasing pointers exist. Lastly, the lifetime of the - // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the - // lifetime one `self`. - unsafe { Ok(&*(slice.addr as *const T)) } - } - - /// Returns a mutable reference to an instance of `T` at `offset`. Mutable accesses performed - /// using the resulting reference are not automatically accounted for by the dirty bitmap - /// tracking functionality. - /// - /// # Safety - /// - /// To use this safely, the caller must guarantee that there are no other - /// users of the given chunk of memory for the lifetime of the result. - /// - /// # Errors - /// - /// If the resulting pointer is not aligned, this method will return an - /// [`Error`](enum.Error.html). - unsafe fn aligned_as_mut(&self, offset: usize) -> Result<&mut T> { - let slice = self.get_slice(offset, size_of::())?; - slice.check_alignment(align_of::())?; - - assert_eq!( - slice.len(), - size_of::(), - "VolatileMemory::get_slice(offset, count) returned slice of length != count." - ); - - // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that - // slice.addr is valid memory of size slice.len(). The assert above ensures that - // the length of the slice is exactly enough to hold one `T`. - // Dereferencing the pointer is safe because we check the alignment above, and the invariants - // of this function ensure that no aliasing pointers exist. Lastly, the lifetime of the - // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the - // lifetime one `self`. - - unsafe { Ok(&mut *(slice.addr as *mut T)) } - } - - /// Returns a reference to an instance of `T` at `offset`. Mutable accesses performed - /// using the resulting reference are not automatically accounted for by the dirty bitmap - /// tracking functionality. - /// - /// # Errors - /// - /// If the resulting pointer is not aligned, this method will return an - /// [`Error`](enum.Error.html). - fn get_atomic_ref(&self, offset: usize) -> Result<&T> { - let slice = self.get_slice(offset, size_of::())?; - slice.check_alignment(align_of::())?; - - assert_eq!( - slice.len(), - size_of::(), - "VolatileMemory::get_slice(offset, count) returned slice of length != count." - ); - - // SAFETY: This is safe because the invariants of the constructors of VolatileSlice ensure that - // slice.addr is valid memory of size slice.len(). The assert above ensures that - // the length of the slice is exactly enough to hold one `T`. - // Dereferencing the pointer is safe because we check the alignment above. Lastly, the lifetime of the - // returned VolatileArrayRef match that of the VolatileSlice returned by get_slice and thus the - // lifetime one `self`. - unsafe { Ok(&*(slice.addr as *const T)) } - } - - /// Returns the sum of `base` and `offset` if it is valid to access a range of `offset` - /// bytes starting at `base`. - /// - /// Specifically, allows accesses of length 0 at the end of a slice: - /// - /// ```rust - /// # use vm_memory::{VolatileMemory, VolatileSlice}; - /// let mut arr = [1, 2, 3]; - /// let slice = VolatileSlice::from(arr.as_mut_slice()); - /// - /// assert_eq!(slice.compute_end_offset(3, 0).unwrap(), 3); - /// ``` - fn compute_end_offset(&self, base: usize, offset: usize) -> Result { - let mem_end = compute_offset(base, offset)?; - if mem_end > self.len() { - return Err(Error::OutOfBounds { addr: mem_end }); - } - Ok(mem_end) - } -} - -impl<'a> From<&'a mut [u8]> for VolatileSlice<'a, ()> { - fn from(value: &'a mut [u8]) -> Self { - // SAFETY: Since we construct the VolatileSlice from a rust slice, we know that - // the memory at addr `value as *mut u8` is valid for reads and writes (because mutable - // reference) of len `value.len()`. Since the `VolatileSlice` inherits the lifetime `'a`, - // it is not possible to access/mutate `value` while the VolatileSlice is alive. - // - // Note that it is possible for multiple aliasing sub slices of this `VolatileSlice`s to - // be created through `VolatileSlice::subslice`. This is OK, as pointers are allowed to - // alias, and it is impossible to get rust-style references from a `VolatileSlice`. - unsafe { VolatileSlice::new(value.as_mut_ptr(), value.len()) } - } -} - -#[repr(C, packed)] -struct Packed(T); - -/// A guard to perform mapping and protect unmapping of the memory. -#[derive(Debug)] -pub struct PtrGuard { - addr: *mut u8, - len: usize, - - // This isn't used anymore, but it protects the slice from getting unmapped while in use. - // Once this goes out of scope, the memory is unmapped automatically. - #[cfg(all(feature = "xen", target_family = "unix"))] - _slice: MmapXenSlice, -} - -#[allow(clippy::len_without_is_empty)] -impl PtrGuard { - #[allow(unused_variables)] - fn new(mmap: Option<&MmapInfo>, addr: *mut u8, write: bool, len: usize) -> Self { - #[cfg(all(feature = "xen", target_family = "unix"))] - let (addr, _slice) = { - let prot = if write { - libc::PROT_WRITE - } else { - libc::PROT_READ - }; - let slice = MmapInfo::mmap(mmap, addr, prot, len); - (slice.addr(), slice) - }; - - Self { - addr, - len, - - #[cfg(all(feature = "xen", target_family = "unix"))] - _slice, - } - } - - fn read(mmap: Option<&MmapInfo>, addr: *mut u8, len: usize) -> Self { - Self::new(mmap, addr, false, len) - } - - /// Returns a non-mutable pointer to the beginning of the slice. - pub fn as_ptr(&self) -> *const u8 { - self.addr - } - - /// Gets the length of the mapped region. - pub fn len(&self) -> usize { - self.len - } -} - -/// A mutable guard to perform mapping and protect unmapping of the memory. -#[derive(Debug)] -pub struct PtrGuardMut(PtrGuard); - -#[allow(clippy::len_without_is_empty)] -impl PtrGuardMut { - fn write(mmap: Option<&MmapInfo>, addr: *mut u8, len: usize) -> Self { - Self(PtrGuard::new(mmap, addr, true, len)) - } - - /// Returns a mutable pointer to the beginning of the slice. Mutable accesses performed - /// using the resulting pointer are not automatically accounted for by the dirty bitmap - /// tracking functionality. - pub fn as_ptr(&self) -> *mut u8 { - self.0.addr - } - - /// Gets the length of the mapped region. - pub fn len(&self) -> usize { - self.0.len - } -} - -/// A slice of raw memory that supports volatile access. -#[derive(Clone, Copy, Debug)] -pub struct VolatileSlice<'a, B = ()> { - addr: *mut u8, - size: usize, - bitmap: B, - mmap: Option<&'a MmapInfo>, -} - -impl<'a> VolatileSlice<'a, ()> { - /// Creates a slice of raw memory that must support volatile access. - /// - /// # Safety - /// - /// To use this safely, the caller must guarantee that the memory at `addr` is `size` bytes long - /// and is available for the duration of the lifetime of the new `VolatileSlice`. The caller - /// must also guarantee that all other users of the given chunk of memory are using volatile - /// accesses. - pub unsafe fn new(addr: *mut u8, size: usize) -> VolatileSlice<'a> { - Self::with_bitmap(addr, size, (), None) - } -} - -impl<'a, B: BitmapSlice> VolatileSlice<'a, B> { - /// Creates a slice of raw memory that must support volatile access, and uses the provided - /// `bitmap` object for dirty page tracking. - /// - /// # Safety - /// - /// To use this safely, the caller must guarantee that the memory at `addr` is `size` bytes long - /// and is available for the duration of the lifetime of the new `VolatileSlice`. The caller - /// must also guarantee that all other users of the given chunk of memory are using volatile - /// accesses. - pub unsafe fn with_bitmap( - addr: *mut u8, - size: usize, - bitmap: B, - mmap: Option<&'a MmapInfo>, - ) -> VolatileSlice<'a, B> { - VolatileSlice { - addr, - size, - bitmap, - mmap, - } - } - - /// Returns a guard for the pointer to the underlying memory. - pub fn ptr_guard(&self) -> PtrGuard { - PtrGuard::read(self.mmap, self.addr, self.len()) - } - - /// Returns a mutable guard for the pointer to the underlying memory. - pub fn ptr_guard_mut(&self) -> PtrGuardMut { - PtrGuardMut::write(self.mmap, self.addr, self.len()) - } - - /// Gets the size of this slice. - pub fn len(&self) -> usize { - self.size - } - - /// Checks if the slice is empty. - pub fn is_empty(&self) -> bool { - self.size == 0 - } - - /// Borrows the inner `BitmapSlice`. - pub fn bitmap(&self) -> &B { - &self.bitmap - } - - /// Divides one slice into two at an index. - /// - /// # Example - /// - /// ``` - /// # use vm_memory::{VolatileMemory, VolatileSlice}; - /// # - /// # // Create a buffer - /// # let mut mem = [0u8; 32]; - /// # - /// # // Get a `VolatileSlice` from the buffer - /// let vslice = VolatileSlice::from(&mut mem[..]); - /// - /// let (start, end) = vslice.split_at(8).expect("Could not split VolatileSlice"); - /// assert_eq!(8, start.len()); - /// assert_eq!(24, end.len()); - /// ``` - pub fn split_at(&self, mid: usize) -> Result<(Self, Self)> { - let end = self.offset(mid)?; - let start = - // SAFETY: safe because self.offset() already checked the bounds - unsafe { VolatileSlice::with_bitmap(self.addr, mid, self.bitmap.clone(), self.mmap) }; - - Ok((start, end)) - } - - /// Returns a subslice of this [`VolatileSlice`](struct.VolatileSlice.html) starting at - /// `offset` with `count` length. - /// - /// The returned subslice is a copy of this slice with the address increased by `offset` bytes - /// and the size set to `count` bytes. - pub fn subslice(&self, offset: usize, count: usize) -> Result { - let _ = self.compute_end_offset(offset, count)?; - - // SAFETY: This is safe because the pointer is range-checked by compute_end_offset, and - // the lifetime is the same as the original slice. - unsafe { - Ok(VolatileSlice::with_bitmap( - self.addr.add(offset), - count, - self.bitmap.slice_at(offset), - self.mmap, - )) - } - } - - /// Returns a subslice of this [`VolatileSlice`](struct.VolatileSlice.html) starting at - /// `offset`. - /// - /// The returned subslice is a copy of this slice with the address increased by `count` bytes - /// and the size reduced by `count` bytes. - pub fn offset(&self, count: usize) -> Result> { - let new_addr = (self.addr as usize) - .checked_add(count) - .ok_or(Error::Overflow { - base: self.addr as usize, - offset: count, - })?; - let new_size = self - .size - .checked_sub(count) - .ok_or(Error::OutOfBounds { addr: new_addr })?; - // SAFETY: Safe because the memory has the same lifetime and points to a subset of the - // memory of the original slice. - unsafe { - Ok(VolatileSlice::with_bitmap( - self.addr.add(count), - new_size, - self.bitmap.slice_at(count), - self.mmap, - )) - } - } - - /// Copies as many elements of type `T` as possible from this slice to `buf`. - /// - /// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller, - /// to `buf`. The copy happens from smallest to largest address in `T` sized chunks - /// using volatile reads. - /// - /// # Examples - /// - /// ``` - /// # use vm_memory::{VolatileMemory, VolatileSlice}; - /// # - /// let mut mem = [0u8; 32]; - /// let vslice = VolatileSlice::from(&mut mem[..]); - /// let mut buf = [5u8; 16]; - /// let res = vslice.copy_to(&mut buf[..]); - /// - /// assert_eq!(16, res); - /// for &v in &buf[..] { - /// assert_eq!(v, 0); - /// } - /// ``` - pub fn copy_to(&self, buf: &mut [T]) -> usize - where - T: ByteValued, - { - // A fast path for u8/i8 - if size_of::() == 1 { - let total = buf.len().min(self.len()); - - // SAFETY: - // - dst is valid for writes of at least `total`, since total <= buf.len() - // - src is valid for reads of at least `total` as total <= self.len() - // - The regions are non-overlapping as `src` points to guest memory and `buf` is - // a slice and thus has to live outside of guest memory (there can be more slices to - // guest memory without violating rust's aliasing rules) - // - size is always a multiple of alignment, so treating *mut T as *mut u8 is fine - unsafe { copy_from_volatile_slice(buf.as_mut_ptr() as *mut u8, self, total) } - } else { - let count = self.size / size_of::(); - let source = self.get_array_ref::(0, count).unwrap(); - source.copy_to(buf) - } - } - - /// Copies as many bytes as possible from this slice to the provided `slice`. - /// - /// The copies happen in an undefined order. - /// - /// # Examples - /// - /// ``` - /// # use vm_memory::{VolatileMemory, VolatileSlice}; - /// # - /// # // Create a buffer - /// # let mut mem = [0u8; 32]; - /// # - /// # // Get a `VolatileSlice` from the buffer - /// # let vslice = VolatileSlice::from(&mut mem[..]); - /// # - /// vslice.copy_to_volatile_slice( - /// vslice - /// .get_slice(16, 16) - /// .expect("Could not get VolatileSlice"), - /// ); - /// ``` - pub fn copy_to_volatile_slice(&self, slice: VolatileSlice) { - // SAFETY: Safe because the pointers are range-checked when the slices - // are created, and they never escape the VolatileSlices. - // FIXME: ... however, is it really okay to mix non-volatile - // operations such as copy with read_volatile and write_volatile? - unsafe { - let count = min(self.size, slice.size); - copy(self.addr, slice.addr, count); - slice.bitmap.mark_dirty(0, count); - } - } - - /// Copies as many elements of type `T` as possible from `buf` to this slice. - /// - /// The copy happens from smallest to largest address in `T` sized chunks using volatile writes. - /// - /// # Examples - /// - /// ``` - /// # use vm_memory::{VolatileMemory, VolatileSlice}; - /// # - /// let mut mem = [0u8; 32]; - /// let vslice = VolatileSlice::from(&mut mem[..]); - /// - /// let buf = [5u8; 64]; - /// vslice.copy_from(&buf[..]); - /// - /// for i in 0..4 { - /// let val = vslice - /// .get_ref::(i * 4) - /// .expect("Could not get value") - /// .load(); - /// assert_eq!(val, 0x05050505); - /// } - /// ``` - pub fn copy_from(&self, buf: &[T]) - where - T: ByteValued, - { - // A fast path for u8/i8 - if size_of::() == 1 { - let total = buf.len().min(self.len()); - // SAFETY: - // - dst is valid for writes of at least `total`, since total <= self.len() - // - src is valid for reads of at least `total` as total <= buf.len() - // - The regions are non-overlapping as `dst` points to guest memory and `buf` is - // a slice and thus has to live outside of guest memory (there can be more slices to - // guest memory without violating rust's aliasing rules) - // - size is always a multiple of alignment, so treating *mut T as *mut u8 is fine - unsafe { copy_to_volatile_slice(self, buf.as_ptr() as *const u8, total) }; - } else { - let count = self.size / size_of::(); - // It's ok to use unwrap here because `count` was computed based on the current - // length of `self`. - let dest = self.get_array_ref::(0, count).unwrap(); - - // No need to explicitly call `mark_dirty` after this call because - // `VolatileArrayRef::copy_from` already takes care of that. - dest.copy_from(buf); - }; - } - - /// Checks if the current slice is aligned at `alignment` bytes. - fn check_alignment(&self, alignment: usize) -> Result<()> { - // Check that the desired alignment is a power of two. - debug_assert!((alignment & (alignment - 1)) == 0); - if ((self.addr as usize) & (alignment - 1)) != 0 { - return Err(Error::Misaligned { - addr: self.addr as usize, - alignment, - }); - } - Ok(()) - } -} - -impl Bytes for VolatileSlice<'_, B> { - type E = Error; - - /// # Examples - /// * Write a slice of size 5 at offset 1020 of a 1024-byte `VolatileSlice`. - /// - /// ``` - /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice}; - /// # - /// let mut mem = [0u8; 1024]; - /// let vslice = VolatileSlice::from(&mut mem[..]); - /// let res = vslice.write(&[1, 2, 3, 4, 5], 1020); - /// - /// assert!(res.is_ok()); - /// assert_eq!(res.unwrap(), 4); - /// ``` - fn write(&self, mut buf: &[u8], addr: usize) -> Result { - if buf.is_empty() { - return Ok(0); - } - - if addr >= self.size { - return Err(Error::OutOfBounds { addr }); - } - - // NOTE: the duality of read <-> write here is correct. This is because we translate a call - // "volatile_slice.write(buf)" (e.g. "write to volatile_slice from buf") into - // "buf.read_volatile(volatile_slice)" (e.g. read from buf into volatile_slice) - buf.read_volatile(&mut self.offset(addr)?) - } - - /// # Examples - /// * Read a slice of size 16 at offset 1010 of a 1024-byte `VolatileSlice`. - /// - /// ``` - /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice}; - /// # - /// let mut mem = [0u8; 1024]; - /// let vslice = VolatileSlice::from(&mut mem[..]); - /// let buf = &mut [0u8; 16]; - /// let res = vslice.read(buf, 1010); - /// - /// assert!(res.is_ok()); - /// assert_eq!(res.unwrap(), 14); - /// ``` - fn read(&self, mut buf: &mut [u8], addr: usize) -> Result { - if buf.is_empty() { - return Ok(0); - } - - if addr >= self.size { - return Err(Error::OutOfBounds { addr }); - } - - // NOTE: The duality of read <-> write here is correct. This is because we translate a call - // volatile_slice.read(buf) (e.g. read from volatile_slice into buf) into - // "buf.write_volatile(volatile_slice)" (e.g. write into buf from volatile_slice) - // Both express data transfer from volatile_slice to buf. - buf.write_volatile(&self.offset(addr)?) - } - - /// # Examples - /// * Write a slice at offset 256. - /// - /// ``` - /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice}; - /// # - /// # // Create a buffer - /// # let mut mem = [0u8; 1024]; - /// # - /// # // Get a `VolatileSlice` from the buffer - /// # let vslice = VolatileSlice::from(&mut mem[..]); - /// # - /// let res = vslice.write_slice(&[1, 2, 3, 4, 5], 256); - /// - /// assert!(res.is_ok()); - /// assert_eq!(res.unwrap(), ()); - /// ``` - fn write_slice(&self, buf: &[u8], addr: usize) -> Result<()> { - // `mark_dirty` called within `self.write`. - let len = self.write(buf, addr)?; - if len != buf.len() { - return Err(Error::PartialBuffer { - expected: buf.len(), - completed: len, - }); - } - Ok(()) - } - - /// # Examples - /// * Read a slice of size 16 at offset 256. - /// - /// ``` - /// # use vm_memory::{Bytes, VolatileMemory, VolatileSlice}; - /// # - /// # // Create a buffer - /// # let mut mem = [0u8; 1024]; - /// # - /// # // Get a `VolatileSlice` from the buffer - /// # let vslice = VolatileSlice::from(&mut mem[..]); - /// # - /// let buf = &mut [0u8; 16]; - /// let res = vslice.read_slice(buf, 256); - /// - /// assert!(res.is_ok()); - /// ``` - fn read_slice(&self, buf: &mut [u8], addr: usize) -> Result<()> { - let len = self.read(buf, addr)?; - if len != buf.len() { - return Err(Error::PartialBuffer { - expected: buf.len(), - completed: len, - }); - } - Ok(()) - } - - fn read_volatile_from(&self, addr: usize, src: &mut F, count: usize) -> Result - where - F: ReadVolatile, - { - let slice = self.offset(addr)?; - /* Unwrap safe here because (0, min(len, count)) is definitely a valid subslice */ - let mut slice = slice.subslice(0, slice.len().min(count)).unwrap(); - retry_eintr!(src.read_volatile(&mut slice)) - } - - fn read_exact_volatile_from(&self, addr: usize, src: &mut F, count: usize) -> Result<()> - where - F: ReadVolatile, - { - src.read_exact_volatile(&mut self.get_slice(addr, count)?) - } - - fn write_volatile_to(&self, addr: usize, dst: &mut F, count: usize) -> Result - where - F: WriteVolatile, - { - let slice = self.offset(addr)?; - /* Unwrap safe here because (0, min(len, count)) is definitely a valid subslice */ - let slice = slice.subslice(0, slice.len().min(count)).unwrap(); - retry_eintr!(dst.write_volatile(&slice)) - } - - fn write_all_volatile_to(&self, addr: usize, dst: &mut F, count: usize) -> Result<()> - where - F: WriteVolatile, - { - dst.write_all_volatile(&self.get_slice(addr, count)?) - } - - fn store(&self, val: T, addr: usize, order: Ordering) -> Result<()> { - self.get_atomic_ref::(addr).map(|r| { - r.store(val.into(), order); - self.bitmap.mark_dirty(addr, size_of::()) - }) - } - - fn load(&self, addr: usize, order: Ordering) -> Result { - self.get_atomic_ref::(addr) - .map(|r| r.load(order).into()) - } -} - -impl VolatileMemory for VolatileSlice<'_, B> { - type B = B; - - fn len(&self) -> usize { - self.size - } - - fn get_slice(&self, offset: usize, count: usize) -> Result> { - self.subslice(offset, count) - } -} - -/// A memory location that supports volatile access to an instance of `T`. -/// -/// # Examples -/// -/// ``` -/// # use vm_memory::VolatileRef; -/// # -/// let mut v = 5u32; -/// let v_ref = unsafe { VolatileRef::new(&mut v as *mut u32 as *mut u8) }; -/// -/// assert_eq!(v, 5); -/// assert_eq!(v_ref.load(), 5); -/// v_ref.store(500); -/// assert_eq!(v, 500); -/// ``` -#[derive(Clone, Copy, Debug)] -pub struct VolatileRef<'a, T, B = ()> { - addr: *mut Packed, - bitmap: B, - mmap: Option<&'a MmapInfo>, -} - -impl VolatileRef<'_, T, ()> -where - T: ByteValued, -{ - /// Creates a [`VolatileRef`](struct.VolatileRef.html) to an instance of `T`. - /// - /// # Safety - /// - /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for a - /// `T` and is available for the duration of the lifetime of the new `VolatileRef`. The caller - /// must also guarantee that all other users of the given chunk of memory are using volatile - /// accesses. - pub unsafe fn new(addr: *mut u8) -> Self { - Self::with_bitmap(addr, (), None) - } -} - -#[allow(clippy::len_without_is_empty)] -impl<'a, T, B> VolatileRef<'a, T, B> -where - T: ByteValued, - B: BitmapSlice, -{ - /// Creates a [`VolatileRef`](struct.VolatileRef.html) to an instance of `T`, using the - /// provided `bitmap` object for dirty page tracking. - /// - /// # Safety - /// - /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for a - /// `T` and is available for the duration of the lifetime of the new `VolatileRef`. The caller - /// must also guarantee that all other users of the given chunk of memory are using volatile - /// accesses. - pub unsafe fn with_bitmap(addr: *mut u8, bitmap: B, mmap: Option<&'a MmapInfo>) -> Self { - VolatileRef { - addr: addr as *mut Packed, - bitmap, - mmap, - } - } - - /// Returns a guard for the pointer to the underlying memory. - pub fn ptr_guard(&self) -> PtrGuard { - PtrGuard::read(self.mmap, self.addr as *mut u8, self.len()) - } - - /// Returns a mutable guard for the pointer to the underlying memory. - pub fn ptr_guard_mut(&self) -> PtrGuardMut { - PtrGuardMut::write(self.mmap, self.addr as *mut u8, self.len()) - } - - /// Gets the size of the referenced type `T`. - /// - /// # Examples - /// - /// ``` - /// # use std::mem::size_of; - /// # use vm_memory::VolatileRef; - /// # - /// let v_ref = unsafe { VolatileRef::::new(0 as *mut _) }; - /// assert_eq!(v_ref.len(), size_of::() as usize); - /// ``` - pub fn len(&self) -> usize { - size_of::() - } - - /// Borrows the inner `BitmapSlice`. - pub fn bitmap(&self) -> &B { - &self.bitmap - } - - /// Does a volatile write of the value `v` to the address of this ref. - #[inline(always)] - pub fn store(&self, v: T) { - let guard = self.ptr_guard_mut(); - - // SAFETY: Safe because we checked the address and size when creating this VolatileRef. - unsafe { write_volatile(guard.as_ptr() as *mut Packed, Packed::(v)) }; - self.bitmap.mark_dirty(0, self.len()) - } - - /// Does a volatile read of the value at the address of this ref. - #[inline(always)] - pub fn load(&self) -> T { - let guard = self.ptr_guard(); - - // SAFETY: Safe because we checked the address and size when creating this VolatileRef. - // For the purposes of demonstrating why read_volatile is necessary, try replacing the code - // in this function with the commented code below and running `cargo test --release`. - // unsafe { *(self.addr as *const T) } - unsafe { read_volatile(guard.as_ptr() as *const Packed).0 } - } - - /// Converts this to a [`VolatileSlice`](struct.VolatileSlice.html) with the same size and - /// address. - pub fn to_slice(&self) -> VolatileSlice<'a, B> { - // SAFETY: Safe because we checked the address and size when creating this VolatileRef. - unsafe { - VolatileSlice::with_bitmap( - self.addr as *mut u8, - size_of::(), - self.bitmap.clone(), - self.mmap, - ) - } - } -} - -/// A memory location that supports volatile access to an array of elements of type `T`. -/// -/// # Examples -/// -/// ``` -/// # use vm_memory::VolatileArrayRef; -/// # -/// let mut v = [5u32; 1]; -/// let v_ref = unsafe { VolatileArrayRef::new(&mut v[0] as *mut u32 as *mut u8, v.len()) }; -/// -/// assert_eq!(v[0], 5); -/// assert_eq!(v_ref.load(0), 5); -/// v_ref.store(0, 500); -/// assert_eq!(v[0], 500); -/// ``` -#[derive(Clone, Copy, Debug)] -pub struct VolatileArrayRef<'a, T, B = ()> { - addr: *mut u8, - nelem: usize, - bitmap: B, - phantom: PhantomData<&'a T>, - mmap: Option<&'a MmapInfo>, -} - -impl VolatileArrayRef<'_, T> -where - T: ByteValued, -{ - /// Creates a [`VolatileArrayRef`](struct.VolatileArrayRef.html) to an array of elements of - /// type `T`. - /// - /// # Safety - /// - /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for - /// `nelem` values of type `T` and is available for the duration of the lifetime of the new - /// `VolatileRef`. The caller must also guarantee that all other users of the given chunk of - /// memory are using volatile accesses. - pub unsafe fn new(addr: *mut u8, nelem: usize) -> Self { - Self::with_bitmap(addr, nelem, (), None) - } -} - -impl<'a, T, B> VolatileArrayRef<'a, T, B> -where - T: ByteValued, - B: BitmapSlice, -{ - /// Creates a [`VolatileArrayRef`](struct.VolatileArrayRef.html) to an array of elements of - /// type `T`, using the provided `bitmap` object for dirty page tracking. - /// - /// # Safety - /// - /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for - /// `nelem` values of type `T` and is available for the duration of the lifetime of the new - /// `VolatileRef`. The caller must also guarantee that all other users of the given chunk of - /// memory are using volatile accesses. - pub unsafe fn with_bitmap( - addr: *mut u8, - nelem: usize, - bitmap: B, - mmap: Option<&'a MmapInfo>, - ) -> Self { - VolatileArrayRef { - addr, - nelem, - bitmap, - phantom: PhantomData, - mmap, - } - } - - /// Returns `true` if this array is empty. - /// - /// # Examples - /// - /// ``` - /// # use vm_memory::VolatileArrayRef; - /// # - /// let v_array = unsafe { VolatileArrayRef::::new(0 as *mut _, 0) }; - /// assert!(v_array.is_empty()); - /// ``` - pub fn is_empty(&self) -> bool { - self.nelem == 0 - } - - /// Returns the number of elements in the array. - /// - /// # Examples - /// - /// ``` - /// # use vm_memory::VolatileArrayRef; - /// # - /// # let v_array = unsafe { VolatileArrayRef::::new(0 as *mut _, 1) }; - /// assert_eq!(v_array.len(), 1); - /// ``` - pub fn len(&self) -> usize { - self.nelem - } - - /// Returns the size of `T`. - /// - /// # Examples - /// - /// ``` - /// # use std::mem::size_of; - /// # use vm_memory::VolatileArrayRef; - /// # - /// let v_ref = unsafe { VolatileArrayRef::::new(0 as *mut _, 0) }; - /// assert_eq!(v_ref.element_size(), size_of::() as usize); - /// ``` - pub fn element_size(&self) -> usize { - size_of::() - } - - /// Returns a guard for the pointer to the underlying memory. - pub fn ptr_guard(&self) -> PtrGuard { - PtrGuard::read(self.mmap, self.addr, self.len()) - } - - /// Returns a mutable guard for the pointer to the underlying memory. - pub fn ptr_guard_mut(&self) -> PtrGuardMut { - PtrGuardMut::write(self.mmap, self.addr, self.len()) - } - - /// Borrows the inner `BitmapSlice`. - pub fn bitmap(&self) -> &B { - &self.bitmap - } - - /// Converts this to a `VolatileSlice` with the same size and address. - pub fn to_slice(&self) -> VolatileSlice<'a, B> { - // SAFETY: Safe as long as the caller validated addr when creating this object. - unsafe { - VolatileSlice::with_bitmap( - self.addr, - self.nelem * self.element_size(), - self.bitmap.clone(), - self.mmap, - ) - } - } - - /// Does a volatile read of the element at `index`. - /// - /// # Panics - /// - /// Panics if `index` is less than the number of elements of the array to which `&self` points. - pub fn ref_at(&self, index: usize) -> VolatileRef<'a, T, B> { - assert!(index < self.nelem); - // SAFETY: Safe because the memory has the same lifetime and points to a subset of the - // memory of the VolatileArrayRef. - unsafe { - // byteofs must fit in an isize as it was checked in get_array_ref. - let byteofs = (self.element_size() * index) as isize; - let ptr = self.addr.offset(byteofs); - VolatileRef::with_bitmap(ptr, self.bitmap.slice_at(byteofs as usize), self.mmap) - } - } - - /// Does a volatile read of the element at `index`. - pub fn load(&self, index: usize) -> T { - self.ref_at(index).load() - } - - /// Does a volatile write of the element at `index`. - pub fn store(&self, index: usize, value: T) { - // The `VolatileRef::store` call below implements the required dirty bitmap tracking logic, - // so no need to do that in this method as well. - self.ref_at(index).store(value) - } - - /// Copies as many elements of type `T` as possible from this array to `buf`. - /// - /// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller, - /// to `buf`. The copy happens from smallest to largest address in `T` sized chunks - /// using volatile reads. - /// - /// # Examples - /// - /// ``` - /// # use vm_memory::VolatileArrayRef; - /// # - /// let mut v = [0u8; 32]; - /// let v_ref = unsafe { VolatileArrayRef::new(v.as_mut_ptr(), v.len()) }; - /// - /// let mut buf = [5u8; 16]; - /// v_ref.copy_to(&mut buf[..]); - /// for &v in &buf[..] { - /// assert_eq!(v, 0); - /// } - /// ``` - pub fn copy_to(&self, buf: &mut [T]) -> usize { - // A fast path for u8/i8 - if size_of::() == 1 { - let source = self.to_slice(); - let total = buf.len().min(source.len()); - - // SAFETY: - // - dst is valid for writes of at least `total`, since total <= buf.len() - // - src is valid for reads of at least `total` as total <= source.len() - // - The regions are non-overlapping as `src` points to guest memory and `buf` is - // a slice and thus has to live outside of guest memory (there can be more slices to - // guest memory without violating rust's aliasing rules) - // - size is always a multiple of alignment, so treating *mut T as *mut u8 is fine - return unsafe { - copy_from_volatile_slice(buf.as_mut_ptr() as *mut u8, &source, total) - }; - } - - let guard = self.ptr_guard(); - let mut ptr = guard.as_ptr() as *const Packed; - let start = ptr; - - for v in buf.iter_mut().take(self.len()) { - // SAFETY: read_volatile is safe because the pointers are range-checked when - // the slices are created, and they never escape the VolatileSlices. - // ptr::add is safe because get_array_ref() validated that - // size_of::() * self.len() fits in an isize. - unsafe { - *v = read_volatile(ptr).0; - ptr = ptr.add(1); - } - } - - // SAFETY: It is guaranteed that start and ptr point to the regions of the same slice. - unsafe { ptr.offset_from(start) as usize } - } - - /// Copies as many bytes as possible from this slice to the provided `slice`. - /// - /// The copies happen in an undefined order. - /// - /// # Examples - /// - /// ``` - /// # use vm_memory::VolatileArrayRef; - /// # - /// let mut v = [0u8; 32]; - /// let v_ref = unsafe { VolatileArrayRef::::new(v.as_mut_ptr(), v.len()) }; - /// let mut buf = [5u8; 16]; - /// let v_ref2 = unsafe { VolatileArrayRef::::new(buf.as_mut_ptr(), buf.len()) }; - /// - /// v_ref.copy_to_volatile_slice(v_ref2.to_slice()); - /// for &v in &buf[..] { - /// assert_eq!(v, 0); - /// } - /// ``` - pub fn copy_to_volatile_slice(&self, slice: VolatileSlice) { - // SAFETY: Safe because the pointers are range-checked when the slices - // are created, and they never escape the VolatileSlices. - // FIXME: ... however, is it really okay to mix non-volatile - // operations such as copy with read_volatile and write_volatile? - unsafe { - let count = min(self.len() * self.element_size(), slice.size); - copy(self.addr, slice.addr, count); - slice.bitmap.mark_dirty(0, count); - } - } - - /// Copies as many elements of type `T` as possible from `buf` to this slice. - /// - /// Copies `self.len()` or `buf.len()` times the size of `T` bytes, whichever is smaller, - /// to this slice's memory. The copy happens from smallest to largest address in - /// `T` sized chunks using volatile writes. - /// - /// # Examples - /// - /// ``` - /// # use vm_memory::VolatileArrayRef; - /// # - /// let mut v = [0u8; 32]; - /// let v_ref = unsafe { VolatileArrayRef::::new(v.as_mut_ptr(), v.len()) }; - /// - /// let buf = [5u8; 64]; - /// v_ref.copy_from(&buf[..]); - /// for &val in &v[..] { - /// assert_eq!(5u8, val); - /// } - /// ``` - pub fn copy_from(&self, buf: &[T]) { - // A fast path for u8/i8 - if size_of::() == 1 { - let destination = self.to_slice(); - let total = buf.len().min(destination.len()); - - // absurd formatting brought to you by clippy - // SAFETY: - // - dst is valid for writes of at least `total`, since total <= destination.len() - // - src is valid for reads of at least `total` as total <= buf.len() - // - The regions are non-overlapping as `dst` points to guest memory and `buf` is - // a slice and thus has to live outside of guest memory (there can be more slices to - // guest memory without violating rust's aliasing rules) - // - size is always a multiple of alignment, so treating *const T as *const u8 is fine - unsafe { copy_to_volatile_slice(&destination, buf.as_ptr() as *const u8, total) }; - } else { - let guard = self.ptr_guard_mut(); - let start = guard.as_ptr(); - let mut ptr = start as *mut Packed; - - for &v in buf.iter().take(self.len()) { - // SAFETY: write_volatile is safe because the pointers are range-checked when - // the slices are created, and they never escape the VolatileSlices. - // ptr::add is safe because get_array_ref() validated that - // size_of::() * self.len() fits in an isize. - unsafe { - write_volatile(ptr, Packed::(v)); - ptr = ptr.add(1); - } - } - - self.bitmap.mark_dirty(0, ptr as usize - start as usize); - } - } -} - -impl<'a, B: BitmapSlice> From> for VolatileArrayRef<'a, u8, B> { - fn from(slice: VolatileSlice<'a, B>) -> Self { - // SAFETY: Safe because the result has the same lifetime and points to the same - // memory as the incoming VolatileSlice. - unsafe { VolatileArrayRef::with_bitmap(slice.addr, slice.len(), slice.bitmap, slice.mmap) } - } -} - -// Return the largest value that `addr` is aligned to. Forcing this function to return 1 will -// cause test_non_atomic_access to fail. -fn alignment(addr: usize) -> usize { - // Rust is silly and does not let me write addr & -addr. - addr & (!addr + 1) -} - -pub(crate) mod copy_slice_impl { - use super::*; - - // SAFETY: Has the same safety requirements as `read_volatile` + `write_volatile`, namely: - // - `src_addr` and `dst_addr` must be valid for reads/writes. - // - `src_addr` and `dst_addr` must be properly aligned with respect to `align`. - // - `src_addr` must point to a properly initialized value, which is true here because - // we're only using integer primitives. - unsafe fn copy_single(align: usize, src_addr: *const u8, dst_addr: *mut u8) { - match align { - 8 => write_volatile(dst_addr as *mut u64, read_volatile(src_addr as *const u64)), - 4 => write_volatile(dst_addr as *mut u32, read_volatile(src_addr as *const u32)), - 2 => write_volatile(dst_addr as *mut u16, read_volatile(src_addr as *const u16)), - 1 => write_volatile(dst_addr, read_volatile(src_addr)), - _ => unreachable!(), - } - } - - /// Copies `total` bytes from `src` to `dst` using a loop of volatile reads and writes - /// - /// SAFETY: `src` and `dst` must be point to a contiguously allocated memory region of at least - /// length `total`. The regions must not overlap - unsafe fn copy_slice_volatile(mut dst: *mut u8, mut src: *const u8, total: usize) -> usize { - let mut left = total; - - let align = min(alignment(src as usize), alignment(dst as usize)); - - let mut copy_aligned_slice = |min_align| { - if align < min_align { - return; - } - - while left >= min_align { - // SAFETY: Safe because we check alignment beforehand, the memory areas are valid - // for reads/writes, and the source always contains a valid value. - unsafe { copy_single(min_align, src, dst) }; - - left -= min_align; - - if left == 0 { - break; - } - - // SAFETY: We only explain the invariants for `src`, the argument for `dst` is - // analogous. - // - `src` and `src + min_align` are within (or one byte past) the same allocated object - // This is given by the invariant on this function ensuring that [src, src + total) - // are part of the same allocated object, and the condition on the while loop - // ensures that we do not go outside this object - // - The computed offset in bytes cannot overflow isize, because `min_align` is at - // most 8 when the closure is called (see below) - // - The sum `src as usize + min_align` can only wrap around if src as usize + min_align - 1 == usize::MAX, - // however in this case, left == 0, and we'll have exited the loop above. - unsafe { - src = src.add(min_align); - dst = dst.add(min_align); - } - } - }; - - if size_of::() > 4 { - copy_aligned_slice(8); - } - copy_aligned_slice(4); - copy_aligned_slice(2); - copy_aligned_slice(1); - - total - } - - /// Copies `total` bytes from `src` to `dst` - /// - /// SAFETY: `src` and `dst` must be point to a contiguously allocated memory region of at least - /// length `total`. The regions must not overlap - unsafe fn copy_slice(dst: *mut u8, src: *const u8, total: usize) -> usize { - if total <= size_of::() { - // SAFETY: Invariants of copy_slice_volatile are the same as invariants of copy_slice - unsafe { - copy_slice_volatile(dst, src, total); - }; - } else { - // SAFETY: - // - Both src and dst are allocated for reads/writes of length `total` by function - // invariant - // - src and dst are properly aligned, as any alignment is valid for u8 - // - The regions are not overlapping by function invariant - unsafe { - std::ptr::copy_nonoverlapping(src, dst, total); - } - } - - total - } - - /// Copies `total` bytes from `slice` to `dst` - /// - /// SAFETY: `slice` and `dst` must be point to a contiguously allocated memory region of at - /// least length `total`. The regions must not overlap. - pub(crate) unsafe fn copy_from_volatile_slice( - dst: *mut u8, - slice: &VolatileSlice<'_, B>, - total: usize, - ) -> usize { - let guard = slice.ptr_guard(); - - // SAFETY: guaranteed by function invariants. - copy_slice(dst, guard.as_ptr(), total) - } - - /// Copies `total` bytes from 'src' to `slice` - /// - /// SAFETY: `slice` and `src` must be point to a contiguously allocated memory region of at - /// least length `total`. The regions must not overlap. - pub(crate) unsafe fn copy_to_volatile_slice( - slice: &VolatileSlice<'_, B>, - src: *const u8, - total: usize, - ) -> usize { - let guard = slice.ptr_guard_mut(); - - // SAFETY: guaranteed by function invariants. - let count = copy_slice(guard.as_ptr(), src, total); - slice.bitmap.mark_dirty(0, count); - count - } -} - -#[cfg(test)] -mod tests { - #![allow(clippy::undocumented_unsafe_blocks)] - - use super::*; - use std::alloc::Layout; - - #[cfg(feature = "rawfd")] - use std::fs::File; - #[cfg(feature = "backend-bitmap")] - use std::mem::size_of_val; - #[cfg(feature = "rawfd")] - use std::path::Path; - use std::sync::atomic::{AtomicUsize, Ordering}; - use std::sync::{Arc, Barrier}; - use std::thread::spawn; - - use matches::assert_matches; - #[cfg(feature = "backend-bitmap")] - use std::num::NonZeroUsize; - #[cfg(feature = "rawfd")] - use vmm_sys_util::tempfile::TempFile; - - #[cfg(feature = "backend-bitmap")] - use crate::bitmap::tests::{ - check_range, range_is_clean, range_is_dirty, test_bytes, test_volatile_memory, - }; - #[cfg(feature = "backend-bitmap")] - use crate::bitmap::{AtomicBitmap, RefSlice}; - - #[cfg(feature = "backend-bitmap")] - const DEFAULT_PAGE_SIZE: NonZeroUsize = NonZeroUsize::new(0x1000).unwrap(); - - #[test] - fn test_compute_end_offset() { - let mut array = [1, 2, 3, 4, 5]; - let slice = VolatileSlice::from(array.as_mut_slice()); - - // Iterate over all valid ranges, assert that they pass validation. - // This includes edge cases such as len = 0 and base = 5! - for len in 0..slice.len() { - for base in 0..=slice.len() - len { - assert_eq!( - slice.compute_end_offset(base, len).unwrap(), - len + base, - "compute_end_offset rejected valid base/offset pair {base} + {len}" - ); - } - } - - // Check invalid configurations - slice.compute_end_offset(5, 1).unwrap_err(); - slice.compute_end_offset(6, 0).unwrap_err(); - } - - #[test] - fn misaligned_ref() { - let mut a = [0u8; 3]; - let a_ref = VolatileSlice::from(&mut a[..]); - unsafe { - assert!( - a_ref.aligned_as_ref::(0).is_err() ^ a_ref.aligned_as_ref::(1).is_err() - ); - assert!( - a_ref.aligned_as_mut::(0).is_err() ^ a_ref.aligned_as_mut::(1).is_err() - ); - } - } - - #[test] - fn atomic_store() { - let mut a = [0usize; 1]; - { - let a_ref = unsafe { - VolatileSlice::new(&mut a[0] as *mut usize as *mut u8, size_of::()) - }; - let atomic = a_ref.get_atomic_ref::(0).unwrap(); - atomic.store(2usize, Ordering::Relaxed) - } - assert_eq!(a[0], 2); - } - - #[test] - fn atomic_load() { - let mut a = [5usize; 1]; - { - let a_ref = unsafe { - VolatileSlice::new(&mut a[0] as *mut usize as *mut u8, - size_of::()) - }; - let atomic = { - let atomic = a_ref.get_atomic_ref::(0).unwrap(); - assert_eq!(atomic.load(Ordering::Relaxed), 5usize); - atomic - }; - // To make sure we can take the atomic out of the scope we made it in: - atomic.load(Ordering::Relaxed); - // but not too far: - // atomicu8 - } //.load(std::sync::atomic::Ordering::Relaxed) - ; - } - - #[test] - fn misaligned_atomic() { - let mut a = [5usize, 5usize]; - let a_ref = - unsafe { VolatileSlice::new(&mut a[0] as *mut usize as *mut u8, size_of::()) }; - assert!(a_ref.get_atomic_ref::(0).is_ok()); - assert!(a_ref.get_atomic_ref::(1).is_err()); - } - - #[test] - fn ref_store() { - let mut a = [0u8; 1]; - { - let a_ref = VolatileSlice::from(&mut a[..]); - let v_ref = a_ref.get_ref(0).unwrap(); - v_ref.store(2u8); - } - assert_eq!(a[0], 2); - } - - #[test] - fn ref_load() { - let mut a = [5u8; 1]; - { - let a_ref = VolatileSlice::from(&mut a[..]); - let c = { - let v_ref = a_ref.get_ref::(0).unwrap(); - assert_eq!(v_ref.load(), 5u8); - v_ref - }; - // To make sure we can take a v_ref out of the scope we made it in: - c.load(); - // but not too far: - // c - } //.load() - ; - } - - #[test] - fn ref_to_slice() { - let mut a = [1u8; 5]; - let a_ref = VolatileSlice::from(&mut a[..]); - let v_ref = a_ref.get_ref(1).unwrap(); - v_ref.store(0x1234_5678u32); - let ref_slice = v_ref.to_slice(); - assert_eq!(v_ref.addr as usize, ref_slice.addr as usize); - assert_eq!(v_ref.len(), ref_slice.len()); - assert!(!ref_slice.is_empty()); - } - - #[test] - fn observe_mutate() { - struct RawMemory(*mut u8); - - // SAFETY: we use property synchronization below - unsafe impl Send for RawMemory {} - unsafe impl Sync for RawMemory {} - - let mem = Arc::new(RawMemory(unsafe { - std::alloc::alloc(Layout::from_size_align(1, 1).unwrap()) - })); - - let outside_slice = unsafe { VolatileSlice::new(Arc::clone(&mem).0, 1) }; - let inside_arc = Arc::clone(&mem); - - let v_ref = outside_slice.get_ref::(0).unwrap(); - let barrier = Arc::new(Barrier::new(2)); - let barrier1 = barrier.clone(); - - v_ref.store(99); - spawn(move || { - barrier1.wait(); - let inside_slice = unsafe { VolatileSlice::new(inside_arc.0, 1) }; - let clone_v_ref = inside_slice.get_ref::(0).unwrap(); - clone_v_ref.store(0); - barrier1.wait(); - }); - - assert_eq!(v_ref.load(), 99); - barrier.wait(); - barrier.wait(); - assert_eq!(v_ref.load(), 0); - - unsafe { std::alloc::dealloc(mem.0, Layout::from_size_align(1, 1).unwrap()) } - } - - #[test] - fn mem_is_empty() { - let mut backing = vec![0u8; 100]; - let a = VolatileSlice::from(backing.as_mut_slice()); - assert!(!a.is_empty()); - - let mut backing = vec![]; - let a = VolatileSlice::from(backing.as_mut_slice()); - assert!(a.is_empty()); - } - - #[test] - fn slice_len() { - let mut backing = vec![0u8; 100]; - let mem = VolatileSlice::from(backing.as_mut_slice()); - let slice = mem.get_slice(0, 27).unwrap(); - assert_eq!(slice.len(), 27); - assert!(!slice.is_empty()); - - let slice = mem.get_slice(34, 27).unwrap(); - assert_eq!(slice.len(), 27); - assert!(!slice.is_empty()); - - let slice = slice.get_slice(20, 5).unwrap(); - assert_eq!(slice.len(), 5); - assert!(!slice.is_empty()); - - let slice = mem.get_slice(34, 0).unwrap(); - assert!(slice.is_empty()); - } - - #[test] - fn slice_subslice() { - let mut backing = vec![0u8; 100]; - let mem = VolatileSlice::from(backing.as_mut_slice()); - let slice = mem.get_slice(0, 100).unwrap(); - assert!(slice.write(&[1; 80], 10).is_ok()); - - assert!(slice.subslice(0, 0).is_ok()); - assert!(slice.subslice(0, 101).is_err()); - - assert!(slice.subslice(99, 0).is_ok()); - assert!(slice.subslice(99, 1).is_ok()); - assert!(slice.subslice(99, 2).is_err()); - - assert!(slice.subslice(100, 0).is_ok()); - assert!(slice.subslice(100, 1).is_err()); - - assert!(slice.subslice(101, 0).is_err()); - assert!(slice.subslice(101, 1).is_err()); - - assert!(slice.subslice(usize::MAX, 2).is_err()); - assert!(slice.subslice(2, usize::MAX).is_err()); - - let maybe_offset_slice = slice.subslice(10, 80); - assert!(maybe_offset_slice.is_ok()); - let offset_slice = maybe_offset_slice.unwrap(); - assert_eq!(offset_slice.len(), 80); - - let mut buf = [0; 80]; - assert!(offset_slice.read(&mut buf, 0).is_ok()); - assert_eq!(&buf[0..80], &[1; 80][0..80]); - } - - #[test] - fn slice_offset() { - let mut backing = vec![0u8; 100]; - let mem = VolatileSlice::from(backing.as_mut_slice()); - let slice = mem.get_slice(0, 100).unwrap(); - assert!(slice.write(&[1; 80], 10).is_ok()); - - assert!(slice.offset(101).is_err()); - - let maybe_offset_slice = slice.offset(10); - assert!(maybe_offset_slice.is_ok()); - let offset_slice = maybe_offset_slice.unwrap(); - assert_eq!(offset_slice.len(), 90); - let mut buf = [0; 90]; - assert!(offset_slice.read(&mut buf, 0).is_ok()); - assert_eq!(&buf[0..80], &[1; 80][0..80]); - assert_eq!(&buf[80..90], &[0; 10][0..10]); - } - - #[test] - fn slice_copy_to_u8() { - let mut a = [2u8, 4, 6, 8, 10]; - let mut b = [0u8; 4]; - let mut c = [0u8; 6]; - let a_ref = VolatileSlice::from(&mut a[..]); - let v_ref = a_ref.get_slice(0, a_ref.len()).unwrap(); - v_ref.copy_to(&mut b[..]); - v_ref.copy_to(&mut c[..]); - assert_eq!(b[0..4], a[0..4]); - assert_eq!(c[0..5], a[0..5]); - } - - #[test] - fn slice_copy_to_u16() { - let mut a = [0x01u16, 0x2, 0x03, 0x4, 0x5]; - let mut b = [0u16; 4]; - let mut c = [0u16; 6]; - let a_ref = &mut a[..]; - let v_ref = unsafe { VolatileSlice::new(a_ref.as_mut_ptr() as *mut u8, 9) }; - - v_ref.copy_to(&mut b[..]); - v_ref.copy_to(&mut c[..]); - assert_eq!(b[0..4], a_ref[0..4]); - assert_eq!(c[0..4], a_ref[0..4]); - assert_eq!(c[4], 0); - } - - #[test] - fn slice_copy_from_u8() { - let a = [2u8, 4, 6, 8, 10]; - let mut b = [0u8; 4]; - let mut c = [0u8; 6]; - let b_ref = VolatileSlice::from(&mut b[..]); - let v_ref = b_ref.get_slice(0, b_ref.len()).unwrap(); - v_ref.copy_from(&a[..]); - assert_eq!(b[0..4], a[0..4]); - - let c_ref = VolatileSlice::from(&mut c[..]); - let v_ref = c_ref.get_slice(0, c_ref.len()).unwrap(); - v_ref.copy_from(&a[..]); - assert_eq!(c[0..5], a[0..5]); - } - - #[test] - fn slice_copy_from_u16() { - let a = [2u16, 4, 6, 8, 10]; - let mut b = [0u16; 4]; - let mut c = [0u16; 6]; - let b_ref = &mut b[..]; - let v_ref = unsafe { VolatileSlice::new(b_ref.as_mut_ptr() as *mut u8, 8) }; - v_ref.copy_from(&a[..]); - assert_eq!(b_ref[0..4], a[0..4]); - - let c_ref = &mut c[..]; - let v_ref = unsafe { VolatileSlice::new(c_ref.as_mut_ptr() as *mut u8, 9) }; - v_ref.copy_from(&a[..]); - assert_eq!(c_ref[0..4], a[0..4]); - assert_eq!(c_ref[4], 0); - } - - #[test] - fn slice_copy_to_volatile_slice() { - let mut a = [2u8, 4, 6, 8, 10]; - let a_ref = VolatileSlice::from(&mut a[..]); - let a_slice = a_ref.get_slice(0, a_ref.len()).unwrap(); - - let mut b = [0u8; 4]; - let b_ref = VolatileSlice::from(&mut b[..]); - let b_slice = b_ref.get_slice(0, b_ref.len()).unwrap(); - - a_slice.copy_to_volatile_slice(b_slice); - assert_eq!(b, [2, 4, 6, 8]); - } - - #[test] - fn slice_overflow_error() { - let mut backing = vec![0u8]; - let a = VolatileSlice::from(backing.as_mut_slice()); - let res = a.get_slice(usize::MAX, 1).unwrap_err(); - assert_matches!( - res, - Error::Overflow { - base: usize::MAX, - offset: 1, - } - ); - } - - #[test] - fn slice_oob_error() { - let mut backing = vec![0u8; 100]; - let a = VolatileSlice::from(backing.as_mut_slice()); - a.get_slice(50, 50).unwrap(); - let res = a.get_slice(55, 50).unwrap_err(); - assert_matches!(res, Error::OutOfBounds { addr: 105 }); - } - - #[test] - fn ref_overflow_error() { - let mut backing = vec![0u8]; - let a = VolatileSlice::from(backing.as_mut_slice()); - let res = a.get_ref::(usize::MAX).unwrap_err(); - assert_matches!( - res, - Error::Overflow { - base: usize::MAX, - offset: 1, - } - ); - } - - #[test] - fn ref_oob_error() { - let mut backing = vec![0u8; 100]; - let a = VolatileSlice::from(backing.as_mut_slice()); - a.get_ref::(99).unwrap(); - let res = a.get_ref::(99).unwrap_err(); - assert_matches!(res, Error::OutOfBounds { addr: 101 }); - } - - #[test] - fn ref_oob_too_large() { - let mut backing = vec![0u8; 3]; - let a = VolatileSlice::from(backing.as_mut_slice()); - let res = a.get_ref::(0).unwrap_err(); - assert_matches!(res, Error::OutOfBounds { addr: 4 }); - } - - #[test] - fn slice_store() { - let mut backing = vec![0u8; 5]; - let a = VolatileSlice::from(backing.as_mut_slice()); - let s = a.as_volatile_slice(); - let r = a.get_ref(2).unwrap(); - r.store(9u16); - assert_eq!(s.read_obj::(2).unwrap(), 9); - } - - #[test] - fn test_write_past_end() { - let mut backing = vec![0u8; 5]; - let a = VolatileSlice::from(backing.as_mut_slice()); - let s = a.as_volatile_slice(); - let res = s.write(&[1, 2, 3, 4, 5, 6], 0); - assert!(res.is_ok()); - assert_eq!(res.unwrap(), 5); - } - - #[test] - fn slice_read_and_write() { - let mut backing = vec![0u8; 5]; - let a = VolatileSlice::from(backing.as_mut_slice()); - let s = a.as_volatile_slice(); - let sample_buf = [1, 2, 3]; - assert!(s.write(&sample_buf, 5).is_err()); - assert!(s.write(&sample_buf, 2).is_ok()); - let mut buf = [0u8; 3]; - assert!(s.read(&mut buf, 5).is_err()); - assert!(s.read_slice(&mut buf, 2).is_ok()); - assert_eq!(buf, sample_buf); - - // Writing an empty buffer at the end of the volatile slice works. - assert_eq!(s.write(&[], 100).unwrap(), 0); - let buf: &mut [u8] = &mut []; - assert_eq!(s.read(buf, 4).unwrap(), 0); - - // Check that reading and writing an empty buffer does not yield an error. - let mut backing = Vec::new(); - let empty_mem = VolatileSlice::from(backing.as_mut_slice()); - let empty = empty_mem.as_volatile_slice(); - assert_eq!(empty.write(&[], 1).unwrap(), 0); - assert_eq!(empty.read(buf, 1).unwrap(), 0); - } - - #[test] - fn obj_read_and_write() { - let mut backing = vec![0u8; 5]; - let a = VolatileSlice::from(backing.as_mut_slice()); - let s = a.as_volatile_slice(); - assert!(s.write_obj(55u16, 4).is_err()); - assert!(s.write_obj(55u16, usize::MAX).is_err()); - assert!(s.write_obj(55u16, 2).is_ok()); - assert_eq!(s.read_obj::(2).unwrap(), 55u16); - assert!(s.read_obj::(4).is_err()); - assert!(s.read_obj::(usize::MAX).is_err()); - } - - #[test] - #[cfg(feature = "rawfd")] - fn mem_read_and_write() { - let mut backing = vec![0u8; 5]; - let a = VolatileSlice::from(backing.as_mut_slice()); - let s = a.as_volatile_slice(); - assert!(s.write_obj(!0u32, 1).is_ok()); - let mut file = if cfg!(target_family = "unix") { - File::open(Path::new("/dev/zero")).unwrap() - } else { - File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe")).unwrap() - }; - - assert!(file - .read_exact_volatile(&mut s.get_slice(1, size_of::()).unwrap()) - .is_ok()); - - let mut f = TempFile::new().unwrap().into_file(); - assert!(f - .read_exact_volatile(&mut s.get_slice(1, size_of::()).unwrap()) - .is_err()); - - let value = s.read_obj::(1).unwrap(); - if cfg!(target_family = "unix") { - assert_eq!(value, 0); - } else { - assert_eq!(value, 0x0090_5a4d); - } - - let mut sink = vec![0; size_of::()]; - assert!(sink - .as_mut_slice() - .write_all_volatile(&s.get_slice(1, size_of::()).unwrap()) - .is_ok()); - - if cfg!(target_family = "unix") { - assert_eq!(sink, vec![0; size_of::()]); - } else { - assert_eq!(sink, vec![0x4d, 0x5a, 0x90, 0x00]); - }; - } - - #[test] - fn unaligned_read_and_write() { - let mut backing = vec![0u8; 7]; - let a = VolatileSlice::from(backing.as_mut_slice()); - let s = a.as_volatile_slice(); - let sample_buf: [u8; 7] = [1, 2, 0xAA, 0xAA, 0xAA, 0xAA, 4]; - assert!(s.write_slice(&sample_buf, 0).is_ok()); - let r = a.get_ref::(2).unwrap(); - assert_eq!(r.load(), 0xAAAA_AAAA); - - r.store(0x5555_5555); - let sample_buf: [u8; 7] = [1, 2, 0x55, 0x55, 0x55, 0x55, 4]; - let mut buf: [u8; 7] = Default::default(); - assert!(s.read_slice(&mut buf, 0).is_ok()); - assert_eq!(buf, sample_buf); - } - - #[test] - fn test_read_from_exceeds_size() { - #[derive(Debug, Default, Copy, Clone)] - struct BytesToRead { - _val1: u128, // 16 bytes - _val2: u128, // 16 bytes - } - unsafe impl ByteValued for BytesToRead {} - let cursor_size = 20; - let image = vec![1u8; cursor_size]; - - // Trying to read more bytes than we have space for in image - // make the read_from function return maximum vec size (i.e. 20). - let mut bytes_to_read = BytesToRead::default(); - assert_eq!( - image - .as_slice() - .read_volatile(&mut bytes_to_read.as_bytes()) - .unwrap(), - cursor_size - ); - } - - #[test] - fn ref_array_from_slice() { - let mut a = [2, 4, 6, 8, 10]; - let a_vec = a.to_vec(); - let a_ref = VolatileSlice::from(&mut a[..]); - let a_slice = a_ref.get_slice(0, a_ref.len()).unwrap(); - let a_array_ref: VolatileArrayRef = a_slice.into(); - for (i, entry) in a_vec.iter().enumerate() { - assert_eq!(&a_array_ref.load(i), entry); - } - } - - #[test] - fn ref_array_store() { - let mut a = [0u8; 5]; - { - let a_ref = VolatileSlice::from(&mut a[..]); - let v_ref = a_ref.get_array_ref(1, 4).unwrap(); - v_ref.store(1, 2u8); - v_ref.store(2, 4u8); - v_ref.store(3, 6u8); - } - let expected = [2u8, 4u8, 6u8]; - assert_eq!(a[2..=4], expected); - } - - #[test] - fn ref_array_load() { - let mut a = [0, 0, 2, 3, 10]; - { - let a_ref = VolatileSlice::from(&mut a[..]); - let c = { - let v_ref = a_ref.get_array_ref::(1, 4).unwrap(); - assert_eq!(v_ref.load(1), 2u8); - assert_eq!(v_ref.load(2), 3u8); - assert_eq!(v_ref.load(3), 10u8); - v_ref - }; - // To make sure we can take a v_ref out of the scope we made it in: - c.load(0); - // but not too far: - // c - } //.load() - ; - } - - #[test] - fn ref_array_overflow() { - let mut a = [0, 0, 2, 3, 10]; - let a_ref = VolatileSlice::from(&mut a[..]); - let res = a_ref.get_array_ref::(4, usize::MAX).unwrap_err(); - assert_matches!( - res, - Error::TooBig { - nelements: usize::MAX, - size: 4, - } - ); - } - - #[test] - fn alignment() { - let a = [0u8; 64]; - let a = &a[a.as_ptr().align_offset(32)] as *const u8 as usize; - assert!(super::alignment(a) >= 32); - assert_eq!(super::alignment(a + 9), 1); - assert_eq!(super::alignment(a + 30), 2); - assert_eq!(super::alignment(a + 12), 4); - assert_eq!(super::alignment(a + 8), 8); - } - - #[test] - fn test_atomic_accesses() { - let len = 0x1000; - let buf = unsafe { std::alloc::alloc_zeroed(Layout::from_size_align(len, 8).unwrap()) }; - let a = unsafe { VolatileSlice::new(buf, len) }; - - crate::bytes::tests::check_atomic_accesses(a, 0, 0x1000); - unsafe { - std::alloc::dealloc(buf, Layout::from_size_align(len, 8).unwrap()); - } - } - - #[test] - fn split_at() { - let mut mem = [0u8; 32]; - let mem_ref = VolatileSlice::from(&mut mem[..]); - let vslice = mem_ref.get_slice(0, 32).unwrap(); - let (start, end) = vslice.split_at(8).unwrap(); - assert_eq!(start.len(), 8); - assert_eq!(end.len(), 24); - let (start, end) = vslice.split_at(0).unwrap(); - assert_eq!(start.len(), 0); - assert_eq!(end.len(), 32); - let (start, end) = vslice.split_at(31).unwrap(); - assert_eq!(start.len(), 31); - assert_eq!(end.len(), 1); - let (start, end) = vslice.split_at(32).unwrap(); - assert_eq!(start.len(), 32); - assert_eq!(end.len(), 0); - let err = vslice.split_at(33).unwrap_err(); - assert_matches!(err, Error::OutOfBounds { addr: _ }) - } - - #[test] - #[cfg(feature = "backend-bitmap")] - fn test_volatile_slice_dirty_tracking() { - let val = 123u64; - let dirty_offset = 0x1000; - let dirty_len = size_of_val(&val); - - let len = 0x10000; - let buf = unsafe { std::alloc::alloc_zeroed(Layout::from_size_align(len, 8).unwrap()) }; - - // Invoke the `Bytes` test helper function. - { - let bitmap = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE); - let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) }; - - test_bytes( - &slice, - |s: &VolatileSlice>, - start: usize, - len: usize, - clean: bool| { check_range(s.bitmap(), start, len, clean) }, - |offset| offset, - 0x1000, - ); - } - - // Invoke the `VolatileMemory` test helper function. - { - let bitmap = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE); - let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) }; - test_volatile_memory(&slice); - } - - let bitmap = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE); - let slice = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap.slice_at(0), None) }; - - let bitmap2 = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE); - let slice2 = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap2.slice_at(0), None) }; - - let bitmap3 = AtomicBitmap::new(len, DEFAULT_PAGE_SIZE); - let slice3 = unsafe { VolatileSlice::with_bitmap(buf, len, bitmap3.slice_at(0), None) }; - - assert!(range_is_clean(slice.bitmap(), 0, slice.len())); - assert!(range_is_clean(slice2.bitmap(), 0, slice2.len())); - - slice.write_obj(val, dirty_offset).unwrap(); - assert!(range_is_dirty(slice.bitmap(), dirty_offset, dirty_len)); - - slice.copy_to_volatile_slice(slice2); - assert!(range_is_dirty(slice2.bitmap(), 0, slice2.len())); - - { - let (s1, s2) = slice.split_at(dirty_offset).unwrap(); - assert!(range_is_clean(s1.bitmap(), 0, s1.len())); - assert!(range_is_dirty(s2.bitmap(), 0, dirty_len)); - } - - { - let s = slice.subslice(dirty_offset, dirty_len).unwrap(); - assert!(range_is_dirty(s.bitmap(), 0, s.len())); - } - - { - let s = slice.offset(dirty_offset).unwrap(); - assert!(range_is_dirty(s.bitmap(), 0, dirty_len)); - } - - // Test `copy_from` for size_of:: == 1. - { - let buf = vec![1u8; dirty_offset]; - - assert!(range_is_clean(slice.bitmap(), 0, dirty_offset)); - slice.copy_from(&buf); - assert!(range_is_dirty(slice.bitmap(), 0, dirty_offset)); - } - - // Test `copy_from` for size_of:: > 1. - { - let val = 1u32; - let buf = vec![val; dirty_offset / size_of_val(&val)]; - - assert!(range_is_clean(slice3.bitmap(), 0, dirty_offset)); - slice3.copy_from(&buf); - assert!(range_is_dirty(slice3.bitmap(), 0, dirty_offset)); - } - - unsafe { - std::alloc::dealloc(buf, Layout::from_size_align(len, 8).unwrap()); - } - } - - #[test] - #[cfg(feature = "backend-bitmap")] - fn test_volatile_ref_dirty_tracking() { - let val = 123u64; - let mut buf = vec![val]; - - let bitmap = AtomicBitmap::new(size_of_val(&val), DEFAULT_PAGE_SIZE); - let vref = unsafe { - VolatileRef::with_bitmap(buf.as_mut_ptr() as *mut u8, bitmap.slice_at(0), None) - }; - - assert!(range_is_clean(vref.bitmap(), 0, vref.len())); - vref.store(val); - assert!(range_is_dirty(vref.bitmap(), 0, vref.len())); - } - - #[cfg(feature = "backend-bitmap")] - fn test_volatile_array_ref_copy_from_tracking( - buf: &mut [T], - index: usize, - page_size: NonZeroUsize, - ) where - T: ByteValued + From, - { - let bitmap = AtomicBitmap::new(size_of_val(buf), page_size); - let arr = unsafe { - VolatileArrayRef::with_bitmap( - buf.as_mut_ptr() as *mut u8, - index + 1, - bitmap.slice_at(0), - None, - ) - }; - - let val = T::from(123); - let copy_buf = vec![val; index + 1]; - - assert!(range_is_clean(arr.bitmap(), 0, arr.len() * size_of::())); - arr.copy_from(copy_buf.as_slice()); - assert!(range_is_dirty(arr.bitmap(), 0, size_of_val(buf))); - } - - #[test] - #[cfg(feature = "backend-bitmap")] - fn test_volatile_array_ref_dirty_tracking() { - let val = 123u64; - let dirty_len = size_of_val(&val); - let index = 0x1000; - let dirty_offset = dirty_len * index; - - let mut buf = vec![0u64; index + 1]; - let mut byte_buf = vec![0u8; index + 1]; - - // Test `ref_at`. - { - let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), DEFAULT_PAGE_SIZE); - let arr = unsafe { - VolatileArrayRef::with_bitmap( - buf.as_mut_ptr() as *mut u8, - index + 1, - bitmap.slice_at(0), - None, - ) - }; - - assert!(range_is_clean(arr.bitmap(), 0, arr.len() * dirty_len)); - arr.ref_at(index).store(val); - assert!(range_is_dirty(arr.bitmap(), dirty_offset, dirty_len)); - } - - // Test `store`. - { - let bitmap = AtomicBitmap::new(buf.len() * size_of_val(&val), DEFAULT_PAGE_SIZE); - let arr = unsafe { - VolatileArrayRef::with_bitmap( - buf.as_mut_ptr() as *mut u8, - index + 1, - bitmap.slice_at(0), - None, - ) - }; - - let slice = arr.to_slice(); - assert!(range_is_clean(slice.bitmap(), 0, slice.len())); - arr.store(index, val); - assert!(range_is_dirty(slice.bitmap(), dirty_offset, dirty_len)); - } - - // Test `copy_from` when size_of::() == 1. - test_volatile_array_ref_copy_from_tracking(&mut byte_buf, index, DEFAULT_PAGE_SIZE); - // Test `copy_from` when size_of::() > 1. - test_volatile_array_ref_copy_from_tracking(&mut buf, index, DEFAULT_PAGE_SIZE); - } -} +pub use vm_memory_new::volatile_memory::{ + Error, + Result, + compute_offset, + VolatileMemory, + PtrGuard, + PtrGuardMut, + VolatileSlice, + VolatileRef, + VolatileArrayRef, +}; diff --git a/tests/semver.rs b/tests/semver.rs new file mode 100644 index 00000000..0da5d61e --- /dev/null +++ b/tests/semver.rs @@ -0,0 +1,123 @@ +#![allow(unused_imports)] +pub use vm_memory::Address; +mod address { + pub use vm_memory::address::Address; + pub use vm_memory::address::AddressValue; +} +pub use vm_memory::AddressValue; +pub use vm_memory::AtomicAccess; +mod atomic { + #[cfg(feature = "backend-atomic")] + pub use vm_memory::atomic::GuestMemoryAtomic; + #[cfg(feature = "backend-atomic")] + pub use vm_memory::atomic::GuestMemoryExclusiveGuard; + #[cfg(feature = "backend-atomic")] + pub use vm_memory::atomic::GuestMemoryLoadGuard; +} +pub use vm_memory::AtomicInteger; +pub use vm_memory::Be16; +pub use vm_memory::Be32; +pub use vm_memory::Be64; +pub use vm_memory::BeSize; +mod bitmap { + #[cfg(feature = "backend-bitmap")] + pub use vm_memory::bitmap::AtomicBitmap; + pub use vm_memory::bitmap::Bitmap; + pub use vm_memory::bitmap::BitmapSlice; + pub use vm_memory::bitmap::NewBitmap; + pub use vm_memory::bitmap::WithBitmapSlice; +} +pub use vm_memory::Bytes; +mod bytes { + pub use vm_memory::bytes::AtomicAccess; + pub use vm_memory::bytes::ByteValued; + pub use vm_memory::bytes::Bytes; +} +pub use vm_memory::ByteValued; +mod endian { + pub use vm_memory::endian::Be16; + pub use vm_memory::endian::Be32; + pub use vm_memory::endian::Be64; + pub use vm_memory::endian::BeSize; + pub use vm_memory::endian::Le16; + pub use vm_memory::endian::Le32; + pub use vm_memory::endian::Le64; + pub use vm_memory::endian::LeSize; +} +pub use vm_memory::FileOffset; +pub use vm_memory::GuestAddress; +pub use vm_memory::GuestAddressSpace; +pub use vm_memory::GuestMemory; +#[cfg(feature = "backend-atomic")] +pub use vm_memory::GuestMemoryAtomic; +pub use vm_memory::GuestMemoryError; +mod guest_memory { + pub use vm_memory::guest_memory::Error; + pub use vm_memory::guest_memory::FileOffset; + pub use vm_memory::guest_memory::GuestAddress; + pub use vm_memory::guest_memory::GuestAddressSpace; + pub use vm_memory::guest_memory::GuestMemory; + pub use vm_memory::guest_memory::GuestMemorySliceIterator; + pub use vm_memory::guest_memory::MemoryRegionAddress; +} +#[cfg(feature = "backend-atomic")] +pub use vm_memory::GuestMemoryLoadGuard; +pub use vm_memory::GuestMemoryRegion; +pub use vm_memory::GuestMemoryRegionBytes; +pub use vm_memory::GuestRegionCollection; +pub use vm_memory::GuestRegionCollectionError; +#[cfg(feature = "backend-mmap")] +pub use vm_memory::GuestRegionMmap; +mod io { + pub use vm_memory::io::ReadVolatile; + pub use vm_memory::io::WriteVolatile; +} +pub use vm_memory::Le16; +pub use vm_memory::Le32; +pub use vm_memory::Le64; +pub use vm_memory::LeSize; +pub use vm_memory::MemoryRegionAddress; +mod mmap { + #[cfg(feature = "backend-mmap")] + pub use vm_memory::mmap::FromRangesError; + #[cfg(feature = "backend-mmap")] + pub use vm_memory::mmap::GuestRegionMmap; + #[cfg(feature = "backend-mmap")] + pub use vm_memory::mmap::MmapRange; + #[cfg(feature = "backend-mmap")] + pub use vm_memory::mmap::MmapRegion; + #[cfg(feature = "backend-mmap")] + pub use vm_memory::mmap::MmapRegionError; + #[cfg(feature = "backend-mmap")] + pub use vm_memory::mmap::MmapXenFlags; + #[cfg(feature = "backend-mmap")] + pub use vm_memory::mmap::NewBitmap; +} +#[cfg(all(feature = "backend-mmap", feature = "xen", target_family = "unix"))] +pub use vm_memory::MmapRange; +#[cfg(feature = "backend-mmap")] +pub use vm_memory::MmapRegion; +#[cfg(all(feature = "backend-mmap", feature = "xen", target_family = "unix"))] +pub use vm_memory::MmapXenFlags; +pub use vm_memory::ReadVolatile; +mod region { + pub use vm_memory::region::GuestMemoryRegion; + pub use vm_memory::region::GuestMemoryRegionBytes; + pub use vm_memory::region::GuestRegionCollection; + pub use vm_memory::region::GuestRegionCollectionError; +} +pub use vm_memory::VolatileArrayRef; +pub use vm_memory::VolatileMemory; +pub use vm_memory::VolatileMemoryError; +mod volatile_memory { + pub use vm_memory::volatile_memory::Error; + pub use vm_memory::volatile_memory::PtrGuard; + pub use vm_memory::volatile_memory::PtrGuardMut; + pub use vm_memory::volatile_memory::VolatileArrayRef; + pub use vm_memory::volatile_memory::VolatileMemory; + pub use vm_memory::volatile_memory::VolatileRef; + pub use vm_memory::volatile_memory::VolatileSlice; +} +pub use vm_memory::VolatileRef; +pub use vm_memory::VolatileSlice; +pub use vm_memory::WriteVolatile;