Skip to content

Commit

Permalink
add atomic load and store methods to Bytes
Browse files Browse the repository at this point in the history
Signed-off-by: Alexandru Agache <aagch@amazon.com>
  • Loading branch information
alexandruag committed Sep 22, 2020
1 parent 11ae4a2 commit 60fd29c
Show file tree
Hide file tree
Showing 5 changed files with 162 additions and 9 deletions.
85 changes: 80 additions & 5 deletions src/bytes.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,14 @@
//! Define the `ByteValued` trait to mark that it is safe to instantiate the struct with random
//! data.

use crate::VolatileSlice;
use std::io::{Read, Write};
use std::mem::size_of;
use std::result::Result;
use std::slice::{from_raw_parts, from_raw_parts_mut};
use std::sync::atomic::Ordering;

use crate::atomic_integer::AtomicInteger;
use crate::VolatileSlice;

/// Types for which it is safe to initialize from raw data.
///
Expand Down Expand Up @@ -153,6 +156,41 @@ byte_valued_type!(i32);
byte_valued_type!(i64);
byte_valued_type!(isize);

/// A trait used to identify types which can be accessed atomically by proxy.
pub trait AtomicAccess:
ByteValued
// Could not find a more succinct way of stating that `Self` can be converted
// into `Self::A::V`, and the other way around.
+ From<<<Self as AtomicAccess>::A as AtomicInteger>::V>
+ Into<<<Self as AtomicAccess>::A as AtomicInteger>::V>
{
/// The `AtomicInteger` that atomic operations on `Self` are based on.
type A: AtomicInteger;
}

macro_rules! impl_atomic_access {
($T:ty, $A:path) => {
impl AtomicAccess for $T {
type A = $A;
}
};
}

impl_atomic_access!(i8, std::sync::atomic::AtomicI8);
impl_atomic_access!(i16, std::sync::atomic::AtomicI16);
impl_atomic_access!(i32, std::sync::atomic::AtomicI32);
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
impl_atomic_access!(i64, std::sync::atomic::AtomicI64);

impl_atomic_access!(u8, std::sync::atomic::AtomicU8);
impl_atomic_access!(u16, std::sync::atomic::AtomicU16);
impl_atomic_access!(u32, std::sync::atomic::AtomicU32);
#[cfg(any(target_arch = "x86_64", target_arch = "aarch64"))]
impl_atomic_access!(u64, std::sync::atomic::AtomicU64);

impl_atomic_access!(isize, std::sync::atomic::AtomicIsize);
impl_atomic_access!(usize, std::sync::atomic::AtomicUsize);

/// A container to host a range of bytes and access its content.
///
/// Candidates which may implement this trait include:
Expand Down Expand Up @@ -269,16 +307,40 @@ pub trait Bytes<A> {
fn write_all_to<F>(&self, addr: A, dst: &mut F, count: usize) -> Result<(), Self::E>
where
F: Write;

/// Atomically store a value at the specified address.
fn store<T: AtomicAccess>(&self, val: T, addr: A, order: Ordering) -> Result<(), Self::E>;

/// Atomically load a value from the specified address.
fn load<T: AtomicAccess>(&self, addr: A, order: Ordering) -> Result<T, Self::E>;
}

#[cfg(test)]
mod tests {
use crate::{ByteValued, Bytes};
pub(crate) mod tests {
use super::*;

use std::fmt::Debug;
use std::io::{Read, Write};
use std::mem::{align_of, size_of};
use std::mem::align_of;
use std::slice;

// Helper method to test atomic accesses for a given `b: Bytes` that's supposed to be
// zero-initialized.
pub fn check_atomic_accesses<A, B>(b: B, addr: A, bad_addr: A)
where
A: Copy,
B: Bytes<A>,
B::E: Debug,
{
let val = 100u32;

assert_eq!(b.load::<u32>(addr, Ordering::Relaxed).unwrap(), 0);
b.store(val, addr, Ordering::Relaxed).unwrap();
assert_eq!(b.load::<u32>(addr, Ordering::Relaxed).unwrap(), val);

assert!(b.load::<u32>(bad_addr, Ordering::Relaxed).is_err());
assert!(b.store(val, bad_addr, Ordering::Relaxed).is_err());
}

fn check_byte_valued_type<T>()
where
T: ByteValued + PartialEq + Debug + Default,
Expand Down Expand Up @@ -409,6 +471,19 @@ mod tests {
{
unimplemented!()
}

fn store<T: AtomicAccess>(
&self,
_val: T,
_addr: usize,
_order: Ordering,
) -> Result<(), Self::E> {
unimplemented!()
}

fn load<T: AtomicAccess>(&self, _addr: usize, _order: Ordering) -> Result<T, Self::E> {
unimplemented!()
}
}

#[test]
Expand Down
27 changes: 26 additions & 1 deletion src/guest_memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,11 @@ use std::fs::File;
use std::io::{self, Read, Write};
use std::ops::{BitAnd, BitOr, Deref};
use std::rc::Rc;
use std::sync::atomic::Ordering;
use std::sync::Arc;

use crate::address::{Address, AddressValue};
use crate::bytes::Bytes;
use crate::bytes::{AtomicAccess, Bytes};
use crate::volatile_memory;

static MAX_ACCESS_CHUNK: usize = 4096;
Expand Down Expand Up @@ -868,6 +869,20 @@ impl<T: GuestMemory> Bytes<GuestAddress> for T {
}
Ok(())
}

fn store<O: AtomicAccess>(&self, val: O, addr: GuestAddress, order: Ordering) -> Result<()> {
// `find_region` should really do what `to_region_addr` is doing right now, except
// it should keep returning a `Result`.
self.to_region_addr(addr)
.ok_or(Error::InvalidGuestAddress(addr))
.and_then(|(region, region_addr)| region.store(val, region_addr, order))
}

fn load<O: AtomicAccess>(&self, addr: GuestAddress, order: Ordering) -> Result<O> {
self.to_region_addr(addr)
.ok_or(Error::InvalidGuestAddress(addr))
.and_then(|(region, region_addr)| region.load(region_addr, order))
}
}

#[cfg(test)]
Expand Down Expand Up @@ -1081,4 +1096,14 @@ mod tests {
.write_all_to(addr, &mut Cursor::new(&mut image), 0)
.is_ok());
}

#[cfg(feature = "backend-mmap")]
#[test]
fn test_atomic_accesses() {
let addr = GuestAddress(0x1000);
let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
let bad_addr = addr.unchecked_add(0x1000);

crate::bytes::tests::check_atomic_accesses(mem, addr, bad_addr);
}
}
2 changes: 1 addition & 1 deletion src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ mod atomic_integer;
pub use atomic_integer::AtomicInteger;

pub mod bytes;
pub use bytes::{ByteValued, Bytes};
pub use bytes::{AtomicAccess, ByteValued, Bytes};

pub mod endian;
pub use endian::{Be16, Be32, Be64, BeSize, Le16, Le32, Le64, LeSize};
Expand Down
36 changes: 35 additions & 1 deletion src/mmap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,15 @@ use std::fmt;
use std::io::{Read, Write};
use std::ops::Deref;
use std::result;
use std::sync::atomic::Ordering;
use std::sync::Arc;

use crate::address::Address;
use crate::guest_memory::{
self, FileOffset, GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize, MemoryRegionAddress,
};
use crate::volatile_memory::{VolatileMemory, VolatileSlice};
use crate::Bytes;
use crate::{AtomicAccess, Bytes};

#[cfg(unix)]
pub use crate::mmap_unix::{Error as MmapRegionError, MmapRegion};
Expand Down Expand Up @@ -342,6 +343,27 @@ impl Bytes<MemoryRegionAddress> for GuestRegionMmap {
.write_all_to::<F>(maddr, dst, count)
.map_err(Into::into)
}

fn store<T: AtomicAccess>(
&self,
val: T,
addr: MemoryRegionAddress,
order: Ordering,
) -> guest_memory::Result<()> {
self.as_volatile_slice().and_then(|s| {
s.store(val, addr.raw_value() as usize, order)
.map_err(Into::into)
})
}

fn load<T: AtomicAccess>(
&self,
addr: MemoryRegionAddress,
order: Ordering,
) -> guest_memory::Result<T> {
self.as_volatile_slice()
.and_then(|s| s.load(addr.raw_value() as usize, order).map_err(Into::into))
}
}

impl GuestMemoryRegion for GuestRegionMmap {
Expand Down Expand Up @@ -1444,4 +1466,16 @@ mod tests {
assert_eq!(guest_mem.check_range(start_addr2, 0xc00), false);
assert_eq!(guest_mem.check_range(start_addr1, std::usize::MAX), false);
}

#[test]
fn test_atomic_accesses() {
let region =
GuestRegionMmap::new(MmapRegion::new(0x1000).unwrap(), GuestAddress(0)).unwrap();

crate::bytes::tests::check_atomic_accesses(
region,
MemoryRegionAddress(0),
MemoryRegionAddress(0x1000),
);
}
}
21 changes: 20 additions & 1 deletion src/volatile_memory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,11 @@ use std::ptr::copy;
use std::ptr::{read_volatile, write_volatile};
use std::result;
use std::slice::{from_raw_parts, from_raw_parts_mut};
use std::sync::atomic::Ordering;
use std::usize;

use crate::atomic_integer::AtomicInteger;
use crate::{ByteValued, Bytes};
use crate::{AtomicAccess, ByteValued, Bytes};

use copy_slice_impl::copy_slice;

Expand Down Expand Up @@ -695,6 +696,16 @@ impl Bytes<usize> for VolatileSlice<'_> {
}
Ok(())
}

fn store<T: AtomicAccess>(&self, val: T, addr: usize, order: Ordering) -> Result<()> {
self.get_atomic_ref::<T::A>(addr)
.map(|r| r.store(val.into(), order))
}

fn load<T: AtomicAccess>(&self, addr: usize, order: Ordering) -> Result<T> {
self.get_atomic_ref::<T::A>(addr)
.map(|r| r.load(order).into())
}
}

impl VolatileMemory for VolatileSlice<'_> {
Expand Down Expand Up @@ -1672,4 +1683,12 @@ mod tests {
assert_eq!(super::alignment(a + 12), 4);
assert_eq!(super::alignment(a + 8), 8);
}

#[test]
fn test_atomic_accesses() {
let a = VecMem::new(0x1000);
let s = a.as_volatile_slice();

crate::bytes::tests::check_atomic_accesses(s, 0, 0x1000);
}
}

0 comments on commit 60fd29c

Please sign in to comment.