diff --git a/src/idmap.rs b/src/idmap.rs index 2930164..49e3a07 100644 --- a/src/idmap.rs +++ b/src/idmap.rs @@ -196,7 +196,7 @@ impl IdMap { mod tests { use super::*; use crate::{ - paging::{Attributes, MemoryRegion, PAGE_SIZE}, + paging::{Attributes, MemoryRegion, BITS_PER_LEVEL, PAGE_SIZE}, MapError, VirtualAddress, }; @@ -206,6 +206,7 @@ mod tests { fn map_valid() { // A single byte at the start of the address space. let mut idmap = IdMap::new(1, 1); + idmap.mapping.activate(); assert_eq!( idmap.map_range( &MemoryRegion::new(0, 1), @@ -216,6 +217,7 @@ mod tests { // Two pages at the start of the address space. let mut idmap = IdMap::new(1, 1); + idmap.mapping.activate(); assert_eq!( idmap.map_range( &MemoryRegion::new(0, PAGE_SIZE * 2), @@ -226,6 +228,7 @@ mod tests { // A single byte at the end of the address space. let mut idmap = IdMap::new(1, 1); + idmap.mapping.activate(); assert_eq!( idmap.map_range( &MemoryRegion::new( @@ -239,6 +242,7 @@ mod tests { // Two pages, on the boundary between two subtables. let mut idmap = IdMap::new(1, 1); + idmap.mapping.activate(); assert_eq!( idmap.map_range( &MemoryRegion::new(PAGE_SIZE * 1023, PAGE_SIZE * 1025), @@ -249,6 +253,7 @@ mod tests { // The entire valid address space. let mut idmap = IdMap::new(1, 1); + idmap.mapping.activate(); assert_eq!( idmap.map_range( &MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1), @@ -258,6 +263,130 @@ mod tests { ); } + #[test] + fn map_break_before_make() { + const BLOCK_SIZE: usize = PAGE_SIZE << BITS_PER_LEVEL; + let mut idmap = IdMap::new(1, 1); + idmap + .map_range( + &MemoryRegion::new(BLOCK_SIZE, 2 * BLOCK_SIZE), + Attributes::NORMAL | Attributes::VALID, + ) + .ok(); + idmap.mapping.activate(); + + // Extending a range is fine even if there are block mappings + // in the middle + assert_eq!( + idmap.map_range( + &MemoryRegion::new(BLOCK_SIZE - PAGE_SIZE, 2 * BLOCK_SIZE + PAGE_SIZE), + Attributes::NORMAL | Attributes::VALID + ), + Ok(()) + ); + + // Splitting a range is not permitted + assert_eq!( + idmap.map_range( + &MemoryRegion::new(BLOCK_SIZE, BLOCK_SIZE + PAGE_SIZE), + Attributes::NORMAL | Attributes::VALID + ), + Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new( + BLOCK_SIZE, + BLOCK_SIZE + PAGE_SIZE + ))) + ); + + // Remapping a partially live range read-only is only permitted + // if it does not require splitting + assert_eq!( + idmap.map_range( + &MemoryRegion::new(0, BLOCK_SIZE + PAGE_SIZE), + Attributes::NORMAL | Attributes::VALID | Attributes::READ_ONLY + ), + Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new( + 0, + BLOCK_SIZE + PAGE_SIZE + ))) + ); + assert_eq!( + idmap.map_range( + &MemoryRegion::new(0, BLOCK_SIZE), + Attributes::NORMAL | Attributes::VALID | Attributes::READ_ONLY + ), + Ok(()) + ); + + // Changing the memory type is not permitted + assert_eq!( + idmap.map_range( + &MemoryRegion::new(0, BLOCK_SIZE), + Attributes::DEVICE_NGNRE | Attributes::VALID | Attributes::NON_GLOBAL + ), + Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new( + 0, BLOCK_SIZE + ))) + ); + + // Making a range invalid is only permitted if it does not require splitting + assert_eq!( + idmap.map_range( + &MemoryRegion::new(PAGE_SIZE, BLOCK_SIZE + PAGE_SIZE), + Attributes::NORMAL + ), + Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new( + PAGE_SIZE, + BLOCK_SIZE + PAGE_SIZE + ))) + ); + assert_eq!( + idmap.map_range( + &MemoryRegion::new(PAGE_SIZE, BLOCK_SIZE), + Attributes::NORMAL + ), + Ok(()) + ); + + // Creating a new valid entry is always permitted + assert_eq!( + idmap.map_range( + &MemoryRegion::new(0, 2 * PAGE_SIZE), + Attributes::NORMAL | Attributes::VALID + ), + Ok(()) + ); + + // Setting the non-global attribute is permitted + assert_eq!( + idmap.map_range( + &MemoryRegion::new(0, PAGE_SIZE), + Attributes::NORMAL | Attributes::VALID | Attributes::NON_GLOBAL + ), + Ok(()) + ); + + // Removing the non-global attribute from a live mapping is not permitted + assert_eq!( + idmap.map_range( + &MemoryRegion::new(0, PAGE_SIZE), + Attributes::NORMAL | Attributes::VALID + ), + Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new( + 0, PAGE_SIZE + ))) + ); + + // Removing the non-global attribute from an inactive mapping is permitted + idmap.mapping.deactivate(); + assert_eq!( + idmap.map_range( + &MemoryRegion::new(0, PAGE_SIZE), + Attributes::NORMAL | Attributes::VALID + ), + Ok(()) + ); + } + #[test] fn map_out_of_range() { let mut idmap = IdMap::new(1, 1); @@ -299,6 +428,7 @@ mod tests { | Attributes::VALID, ) .unwrap(); + idmap.mapping.activate(); idmap } @@ -344,6 +474,7 @@ mod tests { fn breakup_invalid_block() { const BLOCK_RANGE: usize = 0x200000; let mut idmap = IdMap::new(1, 1); + idmap.mapping.activate(); idmap .map_range( &MemoryRegion::new(0, BLOCK_RANGE), diff --git a/src/lib.rs b/src/lib.rs index 6592519..1f3cd5b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -71,6 +71,8 @@ pub enum MapError { RegionBackwards(MemoryRegion), /// There was an error while updating a page table entry. PteUpdateFault(Descriptor), + /// Mapping or updating a live range violates break-before-make rules + BreakBeforeMakeViolation(MemoryRegion), } impl Display for MapError { @@ -86,6 +88,9 @@ impl Display for MapError { Self::PteUpdateFault(desc) => { write!(f, "Error updating page table entry {:?}", desc) } + Self::BreakBeforeMakeViolation(region) => { + write!(f, "Cannot remap region {region} while translation is live.") + } } } } @@ -196,6 +201,59 @@ impl Mapping { self.previous_ttbr = None; } + /// Checks whether the given range can be mapped or updated while the translation is live, + /// without violating architectural break-before-make (BBM) requirements. + fn check_range_bbm( + &self, + range: &MemoryRegion, + flags: Option<(Attributes, PhysicalAddress)>, + ) -> Result<(), MapError> { + self.walk_range( + range, + &mut |mr: &MemoryRegion, d: &Descriptor, level: usize| { + if d.is_valid() { + if !mr.is_block(level) { + // Cannot split a live block mapping + return Err(()); + } + + if let Some((flags, pa)) = flags { + if !flags.contains(Attributes::VALID) { + // Removing the valid bit is always ok + return Ok(()); + } + + let oa = mr.start() - range.start() + pa.0; + + if oa != d.output_address().0 { + // Cannot change output address on a live mapping + return Err(()); + } + + let desc_flags = d.flags().unwrap(); + + if desc_flags.contains(Attributes::NORMAL) + ^ flags.contains(Attributes::NORMAL) + { + // Cannot change memory type + return Err(()); + } + + if desc_flags.contains(Attributes::NON_GLOBAL) + && !flags.contains(Attributes::NON_GLOBAL) + { + // Cannot convert from non-global to global + return Err(()); + } + } + } + Ok(()) + }, + ) + .map_err(|_| MapError::BreakBeforeMakeViolation(range.clone()))?; + Ok(()) + } + /// Maps the given range of virtual addresses to the corresponding range of physical addresses /// starting at `pa`, with the given flags. /// @@ -211,12 +269,18 @@ impl Mapping { /// /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the /// largest virtual address covered by the page table given its root level. + /// + /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings, + /// and modifying those would violate architectural break-before-make (BBM) requirements. pub fn map_range( &mut self, range: &MemoryRegion, pa: PhysicalAddress, flags: Attributes, ) -> Result<(), MapError> { + if self.active() { + self.check_range_bbm(range, Some((flags, pa)))?; + } self.root.map_range(range, pa, flags)?; #[cfg(target_arch = "aarch64")] // SAFETY: Safe because this is just a memory barrier. @@ -246,10 +310,17 @@ impl Mapping { /// /// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the /// largest virtual address covered by the page table given its root level. + /// + /// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings, + /// and modifying those would violate architectural break-before-make (BBM) requirements. pub fn modify_range(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError> where F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized, { + if self.active() { + // Check whether a split is needed + self.check_range_bbm(range, None)?; + } self.root.modify_range(range, f)?; #[cfg(target_arch = "aarch64")] // SAFETY: Safe because this is just a memory barrier. diff --git a/src/paging.rs b/src/paging.rs index 5f9a190..99d5ff2 100644 --- a/src/paging.rs +++ b/src/paging.rs @@ -188,7 +188,7 @@ impl MemoryRegion { } /// Returns whether this region can be mapped at 'level' using block mappings only. - fn is_block(&self, level: usize) -> bool { + pub(crate) fn is_block(&self, level: usize) -> bool { let gran = granularity_at_level(level); (self.0.start.0 | self.0.end.0) & (gran - 1) == 0 } @@ -707,7 +707,7 @@ pub struct Descriptor(usize); impl Descriptor { const PHYSICAL_ADDRESS_BITMASK: usize = !(PAGE_SIZE - 1) & !(0xffff << 48); - fn output_address(self) -> PhysicalAddress { + pub(crate) fn output_address(self) -> PhysicalAddress { PhysicalAddress(self.0 & Self::PHYSICAL_ADDRESS_BITMASK) }