Skip to content

Commit

Permalink
Add break-before-make checks to map_range and modify_range
Browse files Browse the repository at this point in the history
The AArch64 architecture is finicky when it comes to changing live page
tables, and there are elaborate rules to follow wrt whether an update
needs to go via an invalid mapping (break) before creating the new valid
mapping (make)

Let's add some checks for this:
- refuse to split live block entries into table entries
- refuse to change the output address
- refuse changes to the memory type or clearing the non-global bit
  • Loading branch information
ardbiesheuvel committed Oct 19, 2023
1 parent 1fe025a commit 88fb6a1
Show file tree
Hide file tree
Showing 3 changed files with 205 additions and 3 deletions.
133 changes: 132 additions & 1 deletion src/idmap.rs
Expand Up @@ -196,7 +196,7 @@ impl IdMap {
mod tests {
use super::*;
use crate::{
paging::{Attributes, MemoryRegion, PAGE_SIZE},
paging::{Attributes, MemoryRegion, BITS_PER_LEVEL, PAGE_SIZE},
MapError, VirtualAddress,
};

Expand All @@ -206,6 +206,7 @@ mod tests {
fn map_valid() {
// A single byte at the start of the address space.
let mut idmap = IdMap::new(1, 1);
idmap.mapping.activate();
assert_eq!(
idmap.map_range(
&MemoryRegion::new(0, 1),
Expand All @@ -216,6 +217,7 @@ mod tests {

// Two pages at the start of the address space.
let mut idmap = IdMap::new(1, 1);
idmap.mapping.activate();
assert_eq!(
idmap.map_range(
&MemoryRegion::new(0, PAGE_SIZE * 2),
Expand All @@ -226,6 +228,7 @@ mod tests {

// A single byte at the end of the address space.
let mut idmap = IdMap::new(1, 1);
idmap.mapping.activate();
assert_eq!(
idmap.map_range(
&MemoryRegion::new(
Expand All @@ -239,6 +242,7 @@ mod tests {

// Two pages, on the boundary between two subtables.
let mut idmap = IdMap::new(1, 1);
idmap.mapping.activate();
assert_eq!(
idmap.map_range(
&MemoryRegion::new(PAGE_SIZE * 1023, PAGE_SIZE * 1025),
Expand All @@ -249,6 +253,7 @@ mod tests {

// The entire valid address space.
let mut idmap = IdMap::new(1, 1);
idmap.mapping.activate();
assert_eq!(
idmap.map_range(
&MemoryRegion::new(0, MAX_ADDRESS_FOR_ROOT_LEVEL_1),
Expand All @@ -258,6 +263,130 @@ mod tests {
);
}

#[test]
fn map_break_before_make() {
const BLOCK_SIZE: usize = PAGE_SIZE << BITS_PER_LEVEL;
let mut idmap = IdMap::new(1, 1);
idmap
.map_range(
&MemoryRegion::new(BLOCK_SIZE, 2 * BLOCK_SIZE),
Attributes::NORMAL | Attributes::VALID,
)
.ok();
idmap.mapping.activate();

// Extending a range is fine even if there are block mappings
// in the middle
assert_eq!(
idmap.map_range(
&MemoryRegion::new(BLOCK_SIZE - PAGE_SIZE, 2 * BLOCK_SIZE + PAGE_SIZE),
Attributes::NORMAL | Attributes::VALID
),
Ok(())
);

// Splitting a range is not permitted
assert_eq!(
idmap.map_range(
&MemoryRegion::new(BLOCK_SIZE, BLOCK_SIZE + PAGE_SIZE),
Attributes::NORMAL | Attributes::VALID
),
Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
BLOCK_SIZE,
BLOCK_SIZE + PAGE_SIZE
)))
);

// Remapping a partially live range read-only is only permitted
// if it does not require splitting
assert_eq!(
idmap.map_range(
&MemoryRegion::new(0, BLOCK_SIZE + PAGE_SIZE),
Attributes::NORMAL | Attributes::VALID | Attributes::READ_ONLY
),
Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
0,
BLOCK_SIZE + PAGE_SIZE
)))
);
assert_eq!(
idmap.map_range(
&MemoryRegion::new(0, BLOCK_SIZE),
Attributes::NORMAL | Attributes::VALID | Attributes::READ_ONLY
),
Ok(())
);

// Changing the memory type is not permitted
assert_eq!(
idmap.map_range(
&MemoryRegion::new(0, BLOCK_SIZE),
Attributes::DEVICE_NGNRE | Attributes::VALID | Attributes::NON_GLOBAL
),
Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
0, BLOCK_SIZE
)))
);

// Making a range invalid is only permitted if it does not require splitting
assert_eq!(
idmap.map_range(
&MemoryRegion::new(PAGE_SIZE, BLOCK_SIZE + PAGE_SIZE),
Attributes::NORMAL
),
Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
PAGE_SIZE,
BLOCK_SIZE + PAGE_SIZE
)))
);
assert_eq!(
idmap.map_range(
&MemoryRegion::new(PAGE_SIZE, BLOCK_SIZE),
Attributes::NORMAL
),
Ok(())
);

// Creating a new valid entry is always permitted
assert_eq!(
idmap.map_range(
&MemoryRegion::new(0, 2 * PAGE_SIZE),
Attributes::NORMAL | Attributes::VALID
),
Ok(())
);

// Setting the non-global attribute is permitted
assert_eq!(
idmap.map_range(
&MemoryRegion::new(0, PAGE_SIZE),
Attributes::NORMAL | Attributes::VALID | Attributes::NON_GLOBAL
),
Ok(())
);

// Removing the non-global attribute from a live mapping is not permitted
assert_eq!(
idmap.map_range(
&MemoryRegion::new(0, PAGE_SIZE),
Attributes::NORMAL | Attributes::VALID
),
Err(MapError::BreakBeforeMakeViolation(MemoryRegion::new(
0, PAGE_SIZE
)))
);

// Removing the non-global attribute from an inactive mapping is permitted
idmap.mapping.deactivate();
assert_eq!(
idmap.map_range(
&MemoryRegion::new(0, PAGE_SIZE),
Attributes::NORMAL | Attributes::VALID
),
Ok(())
);
}

#[test]
fn map_out_of_range() {
let mut idmap = IdMap::new(1, 1);
Expand Down Expand Up @@ -299,6 +428,7 @@ mod tests {
| Attributes::VALID,
)
.unwrap();
idmap.mapping.activate();
idmap
}

Expand Down Expand Up @@ -344,6 +474,7 @@ mod tests {
fn breakup_invalid_block() {
const BLOCK_RANGE: usize = 0x200000;
let mut idmap = IdMap::new(1, 1);
idmap.mapping.activate();
idmap
.map_range(
&MemoryRegion::new(0, BLOCK_RANGE),
Expand Down
71 changes: 71 additions & 0 deletions src/lib.rs
Expand Up @@ -71,6 +71,8 @@ pub enum MapError {
RegionBackwards(MemoryRegion),
/// There was an error while updating a page table entry.
PteUpdateFault(Descriptor),
/// Mapping or updating a live range violates break-before-make rules
BreakBeforeMakeViolation(MemoryRegion),
}

impl Display for MapError {
Expand All @@ -86,6 +88,9 @@ impl Display for MapError {
Self::PteUpdateFault(desc) => {
write!(f, "Error updating page table entry {:?}", desc)
}
Self::BreakBeforeMakeViolation(region) => {
write!(f, "Cannot remap region {region} while translation is live.")
}
}
}
}
Expand Down Expand Up @@ -196,6 +201,59 @@ impl<T: Translation + Clone> Mapping<T> {
self.previous_ttbr = None;
}

/// Checks whether the given range can be mapped or updated while the translation is live,
/// without violating architectural break-before-make (BBM) requirements.
fn check_range_bbm(
&self,
range: &MemoryRegion,
flags: Option<(Attributes, PhysicalAddress)>,
) -> Result<(), MapError> {
self.walk_range(
range,
&mut |mr: &MemoryRegion, d: &Descriptor, level: usize| {
if d.is_valid() {
if !mr.is_block(level) {
// Cannot split a live block mapping
return Err(());
}

if let Some((flags, pa)) = flags {
if !flags.contains(Attributes::VALID) {
// Removing the valid bit is always ok
return Ok(());
}

let oa = mr.start() - range.start() + pa.0;

if oa != d.output_address().0 {
// Cannot change output address on a live mapping
return Err(());
}

let desc_flags = d.flags().unwrap();

if desc_flags.contains(Attributes::NORMAL)
^ flags.contains(Attributes::NORMAL)
{
// Cannot change memory type
return Err(());
}

if desc_flags.contains(Attributes::NON_GLOBAL)
&& !flags.contains(Attributes::NON_GLOBAL)
{
// Cannot convert from non-global to global
return Err(());
}
}
}
Ok(())
},
)
.map_err(|_| MapError::BreakBeforeMakeViolation(range.clone()))?;
Ok(())
}

/// Maps the given range of virtual addresses to the corresponding range of physical addresses
/// starting at `pa`, with the given flags.
///
Expand All @@ -211,12 +269,18 @@ impl<T: Translation + Clone> Mapping<T> {
///
/// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
/// largest virtual address covered by the page table given its root level.
///
/// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
/// and modifying those would violate architectural break-before-make (BBM) requirements.
pub fn map_range(
&mut self,
range: &MemoryRegion,
pa: PhysicalAddress,
flags: Attributes,
) -> Result<(), MapError> {
if self.active() {
self.check_range_bbm(range, Some((flags, pa)))?;
}
self.root.map_range(range, pa, flags)?;
#[cfg(target_arch = "aarch64")]
// SAFETY: Safe because this is just a memory barrier.
Expand Down Expand Up @@ -246,10 +310,17 @@ impl<T: Translation + Clone> Mapping<T> {
///
/// Returns [`MapError::AddressRange`] if the largest address in the `range` is greater than the
/// largest virtual address covered by the page table given its root level.
///
/// Returns [`MapError::BreakBeforeMakeViolation'] if the range intersects with live mappings,
/// and modifying those would violate architectural break-before-make (BBM) requirements.
pub fn modify_range<F>(&mut self, range: &MemoryRegion, f: &F) -> Result<(), MapError>
where
F: Fn(&MemoryRegion, &mut Descriptor, usize) -> Result<(), ()> + ?Sized,
{
if self.active() {
// Check whether a split is needed
self.check_range_bbm(range, None)?;
}
self.root.modify_range(range, f)?;
#[cfg(target_arch = "aarch64")]
// SAFETY: Safe because this is just a memory barrier.
Expand Down
4 changes: 2 additions & 2 deletions src/paging.rs
Expand Up @@ -188,7 +188,7 @@ impl MemoryRegion {
}

/// Returns whether this region can be mapped at 'level' using block mappings only.
fn is_block(&self, level: usize) -> bool {
pub(crate) fn is_block(&self, level: usize) -> bool {
let gran = granularity_at_level(level);
(self.0.start.0 | self.0.end.0) & (gran - 1) == 0
}
Expand Down Expand Up @@ -707,7 +707,7 @@ pub struct Descriptor(usize);
impl Descriptor {
const PHYSICAL_ADDRESS_BITMASK: usize = !(PAGE_SIZE - 1) & !(0xffff << 48);

fn output_address(self) -> PhysicalAddress {
pub(crate) fn output_address(self) -> PhysicalAddress {
PhysicalAddress(self.0 & Self::PHYSICAL_ADDRESS_BITMASK)
}

Expand Down

0 comments on commit 88fb6a1

Please sign in to comment.