Skip to content
Merged
3 changes: 3 additions & 0 deletions openhcl/openhcl_boot/build.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,8 @@
#![expect(missing_docs)]

fn main() {
// Allow a cfg of nightly to avoid using a feature, see main.rs.
println!("cargo:rustc-check-cfg=cfg(nightly)");

minimal_rt_build::init();
}
257 changes: 257 additions & 0 deletions openhcl/openhcl_boot/src/host_params/dt/bump_alloc.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,257 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.

//! A simple bump allocator that can be used in the bootloader.
//!
//! Note that we only allow allocations in a small window for supporting
//! mesh_protobuf. Any other attempts to allocate will result in a panic.

use crate::boot_logger::log;
use crate::single_threaded::SingleThreaded;
use core::alloc::GlobalAlloc;
use core::alloc::Layout;
use core::cell::RefCell;
use memory_range::MemoryRange;

// Only enable the bump allocator when compiling with minimal_rt, as otherwise
// it will override the global allocator in unit tests which is not what we
// want.
#[cfg_attr(minimal_rt, global_allocator)]
pub static ALLOCATOR: BumpAllocator = BumpAllocator::new();

#[derive(Debug, PartialEq, Eq)]
enum State {
/// Allocations can be enabled via `enable_alloc`.
Allowed,
/// Allocations are currently enabled.
Enabled,
/// Allocations are disabled and cannot be enabled again.
Disabled,
}

#[derive(Debug)]
pub struct Inner {
start: *mut u8,
next: *mut u8,
end: *mut u8,
allow_alloc: State,
alloc_count: usize,
}

pub struct BumpAllocator {
inner: SingleThreaded<RefCell<Inner>>,
}

impl BumpAllocator {
pub const fn new() -> Self {
BumpAllocator {
inner: SingleThreaded(RefCell::new(Inner {
start: core::ptr::null_mut(),
next: core::ptr::null_mut(),
end: core::ptr::null_mut(),
allow_alloc: State::Allowed,
alloc_count: 0,
})),
}
}

/// Initialize the bump allocator with the specified memory range.
///
/// # Safety
///
/// The caller must guarantee that the memory range is both valid to
/// access via the current pagetable identity map, and that it is unused.
pub unsafe fn init(&self, mem: MemoryRange) {
let mut inner = self.inner.borrow_mut();
assert!(
inner.start.is_null(),
"bump allocator memory range previously set {:#x?}",
inner.start
);

inner.start = mem.start() as *mut u8;
inner.next = mem.start() as *mut u8;
inner.end = mem.end() as *mut u8;
}

/// Enable allocations. This panics if allocations were ever previously
/// enabled.
pub fn enable_alloc(&self) {
let mut inner = self.inner.borrow_mut();

inner.allow_alloc = match inner.allow_alloc {
State::Allowed => State::Enabled,
State::Enabled => {
panic!("allocations are already enabled");
}
State::Disabled => {
panic!("allocations were previously disabled and cannot be re-enabled");
}
};
}

/// Disable allocations. Panics if the allocator was not previously enabled.
pub fn disable_alloc(&self) {
let mut inner = self.inner.borrow_mut();
inner.allow_alloc = match inner.allow_alloc {
State::Allowed => panic!("allocations were never enabled"),
State::Enabled => State::Disabled,
State::Disabled => {
panic!("allocations were previously disabled and cannot be disabled again");
}
};
}

pub fn log_stats(&self) {
let inner = self.inner.borrow();

// SAFETY: The pointers are within the same original allocation,
// specified by init. They are u8 pointers, so there is no alignment
// requirement.
let (allocated, free) = unsafe {
(
inner.next.offset_from(inner.start),
inner.end.offset_from(inner.next),
)
};

log!(
"Bump allocator: allocated {} bytes in {} allocations ({} bytes free)",
allocated,
inner.alloc_count,
free
);
}
}

// SAFETY: The allocator points to a valid identity VA range via the
// construction at init.
unsafe impl GlobalAlloc for BumpAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let mut inner = self.inner.borrow_mut();

if inner.allow_alloc != State::Enabled {
panic!("allocations are not allowed {:?}", inner.allow_alloc);
}

let align_offset = inner.next.align_offset(layout.align());
let alloc_start = inner.next.wrapping_add(align_offset);
let alloc_end = alloc_start.wrapping_add(layout.size());

// If end overflowed this allocation is too large. If start overflowed,
// end will also overflow.
//
// Rust `Layout` guarantees that the size is not larger than `isize`,
// so it's not possible to wrap around twice.
if alloc_end < alloc_start {
return core::ptr::null_mut();
}

// TODO: renable allocation tracing when we support tracing levels via
// the log crate.

if alloc_end > inner.end {
core::ptr::null_mut() // out of memory
} else {
inner.next = alloc_end;
inner.alloc_count += 1;
alloc_start
}
}

unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {
// TODO: renable allocation tracing when we support tracing levels via
// the log crate.
}

// TODO: consider implementing realloc for the Vec grow case, which is the
// main usecase we see. This would mean supporting realloc if the allocation
// being realloced was the last one aka the tail.
}

#[cfg(nightly)]
// SAFETY: The allocator points to a valid identity VA range via the
// construction at init, the same as for `GlobalAlloc`.
unsafe impl core::alloc::Allocator for &BumpAllocator {
fn allocate(
&self,
layout: Layout,
) -> Result<core::ptr::NonNull<[u8]>, core::alloc::AllocError> {
let ptr = unsafe { self.alloc(layout) };
if ptr.is_null() {
Err(core::alloc::AllocError)
} else {
unsafe {
Ok(core::ptr::NonNull::slice_from_raw_parts(
core::ptr::NonNull::new_unchecked(ptr),
layout.size(),
))
}
}
}

unsafe fn deallocate(&self, ptr: core::ptr::NonNull<u8>, layout: Layout) {
log!("deallocate called on {:#x?} of size {}", ptr, layout.size());
}
}

#[cfg(nightly)]
#[cfg(test)]
mod tests {
use super::*;

// NOTE: run these tests with miri via
// `RUSTFLAGS="--cfg nightly" cargo +nightly miri test -p openhcl_boot`
#[test]
fn test_alloc() {
let buffer: Box<[u8]> = Box::new([0; 0x1000 * 20]);
let addr = Box::into_raw(buffer) as *mut u8;
let allocator = BumpAllocator {
inner: SingleThreaded(RefCell::new(Inner {
start: addr,
next: addr,
end: unsafe { addr.add(0x1000 * 20) },
allow_alloc: State::Allowed,
alloc_count: 0,
})),
};
allocator.enable_alloc();

unsafe {
let ptr1 = allocator.alloc(Layout::from_size_align(100, 8).unwrap());
*ptr1 = 42;
assert_eq!(*ptr1, 42);

let ptr2 = allocator.alloc(Layout::from_size_align(200, 16).unwrap());
*ptr2 = 55;
assert_eq!(*ptr2, 55);

let ptr3 = allocator.alloc(Layout::from_size_align(300, 32).unwrap());
*ptr3 = 77;
assert_eq!(*ptr3, 77);
}

{
let mut vec: Vec<u8, &BumpAllocator> = Vec::new_in(&allocator);

// Push 4096 bytes, which should force a vec realloc.
for i in 0..4096 {
vec.push(i as u8);
}

// force an explicit resize to 10000 bytes
vec.resize(10000, 0);
}

// Attempt to allocate a large chunk that is not available.
unsafe {
let ptr4 = allocator.alloc(Layout::from_size_align(0x1000 * 20, 8).unwrap());
assert!(ptr4.is_null());
}

// Recreate the box, then drop it so miri is satisfied.
let _buf = unsafe { Box::from_raw(core::ptr::slice_from_raw_parts_mut(addr, 0x1000 * 20)) };

allocator.log_stats();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@ use memory_range::subtract_ranges;
use memory_range::walk_ranges;
use thiserror::Error;

mod bump_alloc;

/// Errors when reading the host device tree.
#[derive(Debug, Error)]
pub enum DtError {
Expand Down Expand Up @@ -311,6 +313,32 @@ fn parse_host_vtl2_ram(
vtl2_ram
}

#[cfg_attr(minimal_rt, allow(dead_code))]
fn init_heap(params: &ShimParams) {
// Initialize the temporary heap.
//
// This is only to be enabled for mesh decode.
//
// SAFETY: The heap range is reserved at file build time, and is
// guaranteed to be unused by anything else.
unsafe {
bump_alloc::ALLOCATOR.init(params.heap);
}

// TODO: test using heap, as no mesh decode yet.
#[cfg(debug_assertions)]
{
use alloc::boxed::Box;
bump_alloc::ALLOCATOR.enable_alloc();

let box_int = Box::new(42);
log!("box int {box_int}");
drop(box_int);
bump_alloc::ALLOCATOR.disable_alloc();
bump_alloc::ALLOCATOR.log_stats();
}
}

impl PartitionInfo {
// Read the IGVM provided DT for the vtl2 partition info.
pub fn read_from_dt<'a>(
Expand Down Expand Up @@ -383,6 +411,8 @@ impl PartitionInfo {
storage.vmbus_vtl2 = parsed.vmbus_vtl2.clone().ok_or(DtError::Vtl2Vmbus)?;
storage.vmbus_vtl0 = parsed.vmbus_vtl0.clone().ok_or(DtError::Vtl0Vmbus)?;

init_heap(params);

// The host is responsible for allocating MMIO ranges for non-isolated
// guests when it also provides the ram VTL2 should use.
//
Expand Down
10 changes: 10 additions & 0 deletions openhcl/openhcl_boot/src/host_params/shim_params.rs
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,8 @@ pub struct ShimParams {
pub page_tables: Option<MemoryRange>,
/// Log buffer region used by the shim.
pub log_buffer: MemoryRange,
/// Memory to be used for the heap.
pub heap: MemoryRange,
}

impl ShimParams {
Expand Down Expand Up @@ -135,6 +137,8 @@ impl ShimParams {
page_tables_size,
log_buffer_start,
log_buffer_size,
heap_start_offset,
heap_size,
} = raw;

let isolation_type = get_isolation_type(supported_isolation_type);
Expand All @@ -158,6 +162,11 @@ impl ShimParams {
MemoryRange::new(base..base + log_buffer_size)
};

let heap = {
let base = shim_base_address.wrapping_add_signed(heap_start_offset);
MemoryRange::new(base..base + heap_size)
};

Self {
kernel_entry_address: shim_base_address.wrapping_add_signed(kernel_entry_offset),
cmdline_base: shim_base_address.wrapping_add_signed(cmdline_offset),
Expand All @@ -182,6 +191,7 @@ impl ShimParams {
bounce_buffer,
page_tables,
log_buffer,
heap,
}
}

Expand Down
9 changes: 9 additions & 0 deletions openhcl/openhcl_boot/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,13 @@
#![cfg_attr(minimal_rt, no_std, no_main)]
// UNSAFETY: Interacting with low level hardware and bootloader primitives.
#![expect(unsafe_code)]
// Allow the allocator api when compiling with `RUSTFLAGS="--cfg nightly"`. This
// is used for some miri tests for testing the bump allocator.
//
// Do not use a normal feature, as that shows errors with rust-analyzer since
// most people are using stable and enable all features. We could remove this
// once the allocator_api feature is stable.
#![cfg_attr(nightly, feature(allocator_api))]

mod arch;
mod boot_logger;
Expand All @@ -20,6 +27,8 @@ mod rt;
mod sidecar;
mod single_threaded;

extern crate alloc;

use crate::arch::setup_vtl2_memory;
use crate::arch::setup_vtl2_vp;
#[cfg(target_arch = "x86_64")]
Expand Down
4 changes: 4 additions & 0 deletions vm/loader/loader_defs/src/shim.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,10 @@ pub struct ShimParamsRaw {
pub log_buffer_start: i64,
/// The size of the persisted bootshim log buffer.
pub log_buffer_size: u64,
/// The offset to the start of the bootshim heap.
pub heap_start_offset: i64,
/// The size of the bootshim heap.
pub heap_size: u64,
}

open_enum! {
Expand Down
Loading
Loading