diff --git a/machine/cortex-m/src/native/sched.rs b/machine/cortex-m/src/native/sched.rs index 1e6476a..bf46855 100644 --- a/machine/cortex-m/src/native/sched.rs +++ b/machine/cortex-m/src/native/sched.rs @@ -284,6 +284,12 @@ impl hal_api::stack::Stacklike for ArmStack { let top = NonNull::new(top.as_mut_ptr::()) .ok_or(hal_api::PosixError::EINVAL)?; + // `size` is in bytes (per the Descriptor contract); `does_fit` and + // `in_bounds` work in u32 words. Convert here so the stack's internal + // unit stays consistent with `StackPtr::offset`. + let size = NonZero::new(size.get() / core::mem::size_of::()) + .ok_or(hal_api::PosixError::EINVAL)?; + let mut stack = Self { top, sp: StackPtr { offset: 0 }, diff --git a/src/mem/alloc/bestfit.rs b/src/mem/alloc/bestfit.rs index aadbea6..99c9877 100644 --- a/src/mem/alloc/bestfit.rs +++ b/src/mem/alloc/bestfit.rs @@ -387,6 +387,15 @@ impl super::Allocator for BestFitAllocator { /// `size` - The size of the block. (This is used to check if the size of the block is correct.) unsafe fn free(&mut self, ptr: NonNull, size: usize) { let block = unsafe { Self::control_ptr(ptr.cast()) }; + + // Walking the free list catches a double-free before it can self-loop the list + // and turn the next `malloc` into an infinite traversal. + let mut walk = self.head; + while let Some(p) = walk { + bug_on!(p == block, "double free"); + walk = unsafe { p.cast::().as_ref().next }; + } + let meta = unsafe { block.cast::().as_mut() }; // The next block of a free block is always the current head. We essentially insert the block at the beginning of the list. @@ -891,6 +900,24 @@ mod tests { } } + #[test] + #[should_panic(expected = "double free")] + fn double_free_panics() { + let mut allocator = BestFitAllocator::new(); + let range = alloc_range(4096); + unsafe { + allocator.add_range(&range).unwrap(); + } + + let ptr = unsafe { allocator.malloc::(128, 1, None).unwrap() }; + unsafe { + allocator.free(ptr, 128); + // Without the defensive walk in free(), this re-insert builds a + // self-loop in the free list and the next malloc spins forever. + allocator.free(ptr, 128); + } + } + #[test] fn multi_range_oom() { // This function allocates multiple ranges and then frees one of them randomly. And only then there is no oom. diff --git a/src/mem/pfa/bitset.rs b/src/mem/pfa/bitset.rs index c0fd825..883da98 100644 --- a/src/mem/pfa/bitset.rs +++ b/src/mem/pfa/bitset.rs @@ -66,10 +66,78 @@ impl super::Allocator for Allocator { } fn free(&mut self, addr: PhysAddr, page_count: usize) { - if !addr.is_multiple_of(super::PAGE_SIZE) { - panic!("Address must be page aligned"); - } + bug_on!( + !addr.is_multiple_of(super::PAGE_SIZE), + "free address {} is not page-aligned", + addr + ); + // diff() is absolute, so a sub-begin address would silently map to a + // bit elsewhere in the bitmap. + bug_on!( + addr < self.begin, + "free address {} below allocator begin {}", + addr, + self.begin + ); let idx = addr.diff(self.begin) / super::PAGE_SIZE; self.bitalloc.free(idx, page_count); } } + +#[cfg(test)] +mod tests { + use super::super::Allocator as _; + use super::*; + + fn test_begin() -> PhysAddr { + let layout = std::alloc::Layout::from_size_align( + 2 * 64 * super::super::PAGE_SIZE, + super::super::PAGE_SIZE, + ) + .unwrap(); + let ptr = unsafe { std::alloc::alloc(layout) }; + PhysAddr::new(ptr as usize) + } + + #[test] + fn alloc_free_roundtrip() { + let begin = test_begin(); + let mut alloc = Allocator::<2>::new(begin).unwrap(); + + let a = alloc.alloc(1).unwrap(); + let b = alloc.alloc(1).unwrap(); + assert_ne!(a, b); + + alloc.free(a, 1); + let c = alloc.alloc(1).unwrap(); + assert_eq!(a, c, "freed page is returned by next alloc"); + } + + #[test] + fn alloc_returns_addresses_in_range() { + let begin = test_begin(); + let mut alloc = Allocator::<1>::new(begin).unwrap(); + let end = begin + 64 * super::super::PAGE_SIZE; + + while let Some(addr) = alloc.alloc(1) { + assert!( + addr >= begin && addr < end, + "addr {addr} outside [{begin}, {end})" + ); + assert!( + addr.is_multiple_of(super::super::PAGE_SIZE), + "addr {addr} not page-aligned" + ); + } + } + + #[test] + #[should_panic(expected = "below allocator begin")] + fn free_below_begin_panics() { + let begin = test_begin() + super::super::PAGE_SIZE; + let mut alloc = Allocator::<2>::new(begin).unwrap(); + // diff() is absolute, so without the bound check a sub-begin address + // would silently clear a bit elsewhere in the bitmap. + alloc.free(begin - super::super::PAGE_SIZE, 1); + } +} diff --git a/src/mem/vmm.rs b/src/mem/vmm.rs index 60b68f7..8a656bb 100644 --- a/src/mem/vmm.rs +++ b/src/mem/vmm.rs @@ -59,7 +59,45 @@ impl Region { #[allow(dead_code)] pub fn contains(&self, addr: VirtAddr) -> bool { - self.start().saturating_add(self.len()) > addr && addr >= self.start() + let Some(start) = self.start else { + return false; + }; + start.saturating_add(self.len()) > addr && addr >= start + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn unplaced_region_contains_nothing() { + let r = Region::new(None, 100, Backing::Uninit, Perms::Read); + assert!(!r.contains(VirtAddr::new(0))); + assert!(!r.contains(VirtAddr::new(50))); + assert!(!r.contains(VirtAddr::new(100))); + } + + #[test] + fn placed_region_contains_within_bounds() { + let r = Region::new(Some(VirtAddr::new(100)), 50, Backing::Uninit, Perms::Read); + assert!(!r.contains(VirtAddr::new(99))); + assert!(r.contains(VirtAddr::new(100))); + assert!(r.contains(VirtAddr::new(149))); + assert!(!r.contains(VirtAddr::new(150))); + } + + #[test] + fn placed_region_saturates_at_usize_max() { + let r = Region::new( + Some(VirtAddr::new(usize::MAX - 10)), + 100, + Backing::Uninit, + Perms::Read, + ); + assert!(r.contains(VirtAddr::new(usize::MAX - 10))); + assert!(r.contains(VirtAddr::new(usize::MAX - 1))); + assert!(!r.contains(VirtAddr::new(usize::MAX))); } } diff --git a/src/mem/vmm/nommu.rs b/src/mem/vmm/nommu.rs index 027c591..58d59e7 100644 --- a/src/mem/vmm/nommu.rs +++ b/src/mem/vmm/nommu.rs @@ -1,4 +1,4 @@ -use core::ptr::copy_nonoverlapping; +use core::ptr::{NonNull, copy_nonoverlapping}; use crate::hal::mem::{PhysAddr, VirtAddr}; @@ -62,7 +62,11 @@ impl vmm::AddressSpacelike for AddressSpace { Ok(start.into()) } - fn unmap(&mut self, _region: &vmm::Region) -> Result<()> { + fn unmap(&mut self, region: &vmm::Region) -> Result<()> { + let virt = region.start.ok_or(kerr!(EINVAL))?; + let phys = self.virt_to_phys(virt).ok_or(kerr!(EINVAL))?; + let ptr = NonNull::new(phys.as_mut_ptr::()).ok_or(kerr!(EINVAL))?; + unsafe { self.allocator.free(ptr, region.len()) }; Ok(()) } @@ -71,20 +75,105 @@ impl vmm::AddressSpacelike for AddressSpace { } fn phys_to_virt(&self, addr: PhysAddr) -> Option { + if addr < self.begin || addr >= self.end { + return None; + } addr.checked_sub(self.begin.as_usize()) .map(|phys| VirtAddr::new(phys.as_usize())) } fn virt_to_phys(&self, addr: VirtAddr) -> Option { - self.begin.checked_add(addr.as_usize()) + let phys = self.begin.checked_add(addr.as_usize())?; + if phys >= self.end { + return None; + } + Some(phys) } fn end(&self) -> VirtAddr { - // This should always succeed. - self.phys_to_virt(self.end).unwrap() + VirtAddr::new(self.end.diff(self.begin)) } fn activate(&self) -> Result<()> { Ok(()) } } + +impl Drop for AddressSpace { + fn drop(&mut self) { + // Without this the per-task page reservation returns to the PFA only on + // process death, which means PFA exhaustion under task churn. + let pgs = self.end.diff(self.begin) / pfa::PAGE_SIZE; + if pgs > 0 { + pfa::free_page(self.begin, pgs); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mem::vmm::{AddressSpacelike, Backing, Perms, Region}; + + fn make_addr_space(size: usize) -> AddressSpace { + let layout = + std::alloc::Layout::from_size_align(size, core::mem::align_of::()).unwrap(); + let ptr = unsafe { std::alloc::alloc(layout) }; + let begin = PhysAddr::new(ptr as usize); + let end = begin + size; + let mut allocator = bestfit::BestFitAllocator::new(); + unsafe { allocator.add_range(&(begin..end)).unwrap() }; + AddressSpace { + begin, + end, + allocator, + } + } + + #[test] + fn unmap_returns_space_to_allocator() { + let mut as_ = make_addr_space(4096); + + let region = Region::new(None, 2048, Backing::Uninit, Perms::Read); + let phys = as_.map(region).unwrap(); + + let virt = as_.phys_to_virt(phys).unwrap(); + let placed = Region::new(Some(virt), 2048, Backing::Uninit, Perms::Read); + as_.unmap(&placed).unwrap(); + + let region2 = Region::new(None, 2048, Backing::Uninit, Perms::Read); + as_.map(region2).expect("re-map after unmap should not OOM"); + } + + #[test] + fn unmap_unplaced_region_rejected() { + let mut as_ = make_addr_space(4096); + let region = Region::new(None, 128, Backing::Uninit, Perms::Read); + assert!(as_.unmap(®ion).is_err()); + } + + #[test] + fn virt_to_phys_rejects_out_of_range() { + let as_ = make_addr_space(4096); + let size = as_.end.diff(as_.begin); + assert!(as_.virt_to_phys(VirtAddr::new(size)).is_none()); + assert!(as_.virt_to_phys(VirtAddr::new(size + 1)).is_none()); + assert!(as_.virt_to_phys(VirtAddr::new(usize::MAX)).is_none()); + } + + #[test] + fn phys_to_virt_rejects_out_of_range() { + let as_ = make_addr_space(4096); + assert!(as_.phys_to_virt(as_.end).is_none()); + assert!(as_.phys_to_virt(as_.begin - 1).is_none()); + assert!(as_.phys_to_virt(as_.end + 1).is_none()); + } + + #[test] + fn virt_phys_roundtrip() { + let as_ = make_addr_space(4096); + let v = VirtAddr::new(128); + let p = as_.virt_to_phys(v).unwrap(); + assert_eq!(as_.phys_to_virt(p), Some(v)); + } +} diff --git a/src/types/array.rs b/src/types/array.rs index 01a0aef..19d00bc 100644 --- a/src/types/array.rs +++ b/src/types/array.rs @@ -138,6 +138,20 @@ impl GetMut for IndexMap { return (None, None); } + if idx1 >= N { + return ( + None, + if idx2 < N { + self.data[idx2].as_mut() + } else { + None + }, + ); + } + if idx2 >= N { + return (self.data[idx1].as_mut(), None); + } + let (left, right) = self.data.split_at_mut(idx1.max(idx2)); if idx1 < idx2 { @@ -170,13 +184,31 @@ impl GetMut for IndexMap { return (None, None, None); } - let ptr1 = &mut self.data[idx1] as *mut Option; - let ptr2 = &mut self.data[idx2] as *mut Option; - let ptr3 = &mut self.data[idx3] as *mut Option; + let ptr1 = if idx1 < N { + Some(&mut self.data[idx1] as *mut Option) + } else { + None + }; + let ptr2 = if idx2 < N { + Some(&mut self.data[idx2] as *mut Option) + } else { + None + }; + let ptr3 = if idx3 < N { + Some(&mut self.data[idx3] as *mut Option) + } else { + None + }; - // Safety: the elements at index1, index2 and index3 are nowhere else borrowed mutably by function contract. - // And they are disjoint because of the check above. - unsafe { ((*ptr1).as_mut(), (*ptr2).as_mut(), (*ptr3).as_mut()) } + // Safety: each pointer comes from an in-bounds slot in self.data, and the three indices + // are pairwise distinct (check above), so the resulting references are disjoint. + unsafe { + ( + ptr1.and_then(|p| (*p).as_mut()), + ptr2.and_then(|p| (*p).as_mut()), + ptr3.and_then(|p| (*p).as_mut()), + ) + } } } @@ -337,25 +369,25 @@ impl Vec { // Initialize all elements in the inline storage. for i in 0..length { vec.data[i].write(value.clone()); + // Bump len after each write so a panicking T::clone leaves Drop able + // to clean up the slots that were already initialized. + vec.len = i + 1; } } else { - // Initialize all elements in the inline storage. + // Allocate the extra storage first and install it on `vec` so any later + // failure (clone panic) leaves Drop with a consistent (len, extra) view, + // and so an OOM here cannot leak inline clones (none have been written yet). + let extra_len = length - N; + vec.extra = Box::new_slice_uninit(extra_len)?; + for elem in &mut vec.data { elem.write(value.clone()); + vec.len += 1; } - // Check if we need to allocate extra storage. - if length - N > 0 { - // Allocate extra storage for the remaining elements. - let mut extra = Box::new_slice_uninit(length - N)?; - - // Initialize all the required elements in the extra storage. - for i in N..length { - extra[i - N].write(value.clone()); - } - - // Set the extra storage in the Vec. - vec.extra = extra; + for i in 0..extra_len { + vec.extra[i].write(value.clone()); + vec.len += 1; } } @@ -546,12 +578,23 @@ impl Vec { return (None, None); } - let ptr1 = self.at_mut_unchecked(index1); - let ptr2 = self.at_mut_unchecked(index2); + let in1 = index1 < self.len; + let in2 = index2 < self.len; + let ptr1 = if in1 { + Some(self.at_mut_unchecked(index1)) + } else { + None + }; + let ptr2 = if in2 { + Some(self.at_mut_unchecked(index2)) + } else { + None + }; - // Safety: the elements at index1 and index2 are nowhere else borrowed mutably by function contract. - // And they are disjoint because of the check above. - unsafe { (Some(&mut *ptr1), Some(&mut *ptr2)) } + // Safety: each pointer is only constructed when the corresponding index is < self.len, + // so it points to an initialized slot. The two indices are pairwise distinct (check + // above), so the resulting references are disjoint. + unsafe { (ptr1.map(|p| &mut *p), ptr2.map(|p| &mut *p)) } } /// Get disjoint mutable references to the values at the given indices. @@ -572,13 +615,35 @@ impl Vec { return (None, None, None); } - let ptr1 = self.at_mut_unchecked(index1); - let ptr2 = self.at_mut_unchecked(index2); - let ptr3 = self.at_mut_unchecked(index3); + let in1 = index1 < self.len; + let in2 = index2 < self.len; + let in3 = index3 < self.len; + let ptr1 = if in1 { + Some(self.at_mut_unchecked(index1)) + } else { + None + }; + let ptr2 = if in2 { + Some(self.at_mut_unchecked(index2)) + } else { + None + }; + let ptr3 = if in3 { + Some(self.at_mut_unchecked(index3)) + } else { + None + }; - // Safety: the elements at index1, index2 and index3 are nowhere else borrowed mutably by function contract. - // And they are disjoint because of the check above. - unsafe { (Some(&mut *ptr1), Some(&mut *ptr2), Some(&mut *ptr3)) } + // Safety: each pointer is only constructed when the corresponding index is < self.len, + // so it points to an initialized slot. The three indices are pairwise distinct (check + // above), so the resulting references are disjoint. + unsafe { + ( + ptr1.map(|p| &mut *p), + ptr2.map(|p| &mut *p), + ptr3.map(|p| &mut *p), + ) + } } /// Swap the values at the given indices. @@ -749,7 +814,12 @@ impl BitReclaimMap { #[allow(dead_code)] pub fn insert(&mut self, value: V) -> Result { let idx = self.free.alloc(1).ok_or(kerr!(ENOMEM))?; - self.map.raw_insert(idx, value)?; + if let Err(e) = self.map.raw_insert(idx, value) { + // BitAlloc exposes more bits than IndexMap has slots; release any index + // that raw_insert rejects so a leaked bit can't accumulate across failures. + self.free.free(idx, 1); + return Err(e); + } Ok(idx) } @@ -774,8 +844,19 @@ impl BitReclaimMap { impl BitReclaimMap { pub fn insert_with(&mut self, f: impl FnOnce(usize) -> Result<(K, V)>) -> Result { let idx = self.free.alloc(1).ok_or(kerr!(ENOMEM))?; - let (key, value) = f(idx)?; - self.map.raw_insert(idx, value)?; + // The closure is user-supplied and may return an error; release the bit so a + // sustained run of closure failures cannot exhaust the allocator. + let (key, value) = match f(idx) { + Ok(kv) => kv, + Err(e) => { + self.free.free(idx, 1); + return Err(e); + } + }; + if let Err(e) = self.map.raw_insert(idx, value) { + self.free.free(idx, 1); + return Err(e); + } Ok(key) } } @@ -1158,4 +1239,132 @@ mod tests { assert_eq!(vec.at(i).unwrap().value, 42); } } + + #[test] + fn new_init_oom_does_not_leak() { + struct Counted<'a> { + drops: &'a AtomicUsize, + clones: &'a AtomicUsize, + } + impl Clone for Counted<'_> { + fn clone(&self) -> Self { + self.clones.fetch_add(1, Ordering::SeqCst); + Counted { + drops: self.drops, + clones: self.clones, + } + } + } + impl Drop for Counted<'_> { + fn drop(&mut self) { + self.drops.fetch_add(1, Ordering::SeqCst); + } + } + + setup_memory(4096); + let drops = AtomicUsize::new(0); + let clones = AtomicUsize::new(0); + let r = Vec::::new_init( + 1_000_000_000, + Counted { + drops: &drops, + clones: &clones, + }, + ); + assert!(r.is_err()); + let n_clones = clones.load(Ordering::SeqCst); + let n_drops = drops.load(Ordering::SeqCst); + assert_eq!( + n_drops, + n_clones + 1, + "leaked clones (clones={}, drops={})", + n_clones, + n_drops, + ); + } + + #[test] + fn at2_mut_out_of_bounds_returns_none() { + let mut vec = Vec::::new(); + vec.push(7).unwrap(); + let (a, b) = vec.at2_mut(0, 2); + assert!(a.is_some(), "index 0 is in-bounds"); + assert!( + b.is_none(), + "index 2 should be out-of-bounds (len=1) and return None, but got {:?}", + b + ); + } + + #[test] + fn at3_mut_out_of_bounds_returns_none() { + let mut vec = Vec::::new(); + vec.push(7).unwrap(); + vec.push(8).unwrap(); + let (a, b, c) = vec.at3_mut(0, 1, 3); + assert!(a.is_some()); + assert!(b.is_some()); + assert!( + c.is_none(), + "index 3 should be out-of-bounds (len=2) and return None, but got {:?}", + c + ); + } + + use super::IndexMap; + use crate::types::traits::GetMut; + + #[test] + fn indexmap_get2_mut_out_of_bounds_returns_none() { + let mut m: IndexMap = IndexMap::new(); + m.raw_insert(0, 10).unwrap(); + let (a, b) = m.get2_mut(0usize, 10usize); + assert!(a.is_some()); + assert!(b.is_none()); + } + + #[test] + fn indexmap_get3_mut_out_of_bounds_returns_none() { + let mut m: IndexMap = IndexMap::new(); + m.raw_insert(0, 10).unwrap(); + let (a, b, c) = m.get3_mut(0usize, 10usize, 11usize); + assert!(a.is_some()); + assert!(b.is_none()); + assert!(c.is_none()); + } + + use super::BitReclaimMap; + + #[test] + fn bitreclaim_insert_failure_does_not_leak() { + let mut m: BitReclaimMap = BitReclaimMap::new(); + m.insert(10).unwrap(); + m.insert(20).unwrap(); + // Third insert allocates bit 2 then fails (raw_insert rejects idx >= N). + assert!(m.insert(30).is_err()); + // Probe BitAlloc directly: bit 2 must be free again. + let next_free = m.free.alloc(1); + assert_eq!( + next_free, + Some(2), + "expected bit 2 to be free after failed insert, but BitAlloc returned {:?}", + next_free + ); + } + + #[test] + fn bitreclaim_insert_with_failed_closure_does_not_leak() { + use crate::error::Result as KResult; + let mut m: BitReclaimMap = BitReclaimMap::new(); + for _ in 0..10 { + let r: KResult = + m.insert_with(|_idx| -> KResult<(usize, u32)> { Err(kerr!(ENOMEM)) }); + assert!(r.is_err()); + } + let id0 = m.insert(10).unwrap(); + let id1 = m.insert(20).unwrap(); + assert!(id0 < 2); + assert!(id1 < 2); + assert_ne!(id0, id1); + } } diff --git a/src/types/bitset.rs b/src/types/bitset.rs index f2380d5..7ec9283 100644 --- a/src/types/bitset.rs +++ b/src/types/bitset.rs @@ -121,8 +121,13 @@ impl BitAlloc { } // Mark the remaining bits in the last word as used. - self.l1[idx] &= !((!0usize) - .unbounded_shl((Self::BITS_PER_WORD - (len % Self::BITS_PER_WORD)) as u32)); + // Guard against `len % BITS_PER_WORD == 0`, which means the run ended + // exactly on a word boundary — there is no trailing partial word, and + // unguarded `self.l1[idx]` would index one past the last word. + if len % Self::BITS_PER_WORD != 0 { + self.l1[idx] &= !((!0usize) + .unbounded_shl((Self::BITS_PER_WORD - (len % Self::BITS_PER_WORD)) as u32)); + } return Some(start); } } @@ -172,6 +177,25 @@ mod tests { assert!(result.is_some()); } + #[test] + fn alloc_full_two_words() { + let mut alloc = BitAlloc::<2>::new(2 * BitAlloc::<2>::BITS_PER_WORD).unwrap(); + let r = alloc.alloc(2 * BitAlloc::<2>::BITS_PER_WORD); + assert_eq!(r, Some(0)); + assert_eq!(alloc.l1[0], 0); + assert_eq!(alloc.l1[1], 0); + } + + #[test] + fn alloc_full_three_words() { + let mut alloc = BitAlloc::<3>::new(3 * BitAlloc::<3>::BITS_PER_WORD).unwrap(); + let r = alloc.alloc(3 * BitAlloc::<3>::BITS_PER_WORD); + assert_eq!(r, Some(0)); + assert_eq!(alloc.l1[0], 0); + assert_eq!(alloc.l1[1], 0); + assert_eq!(alloc.l1[2], 0); + } + #[test] fn test_random_pattern() { const ITARATIONS: usize = 10000; diff --git a/src/types/pool.rs b/src/types/pool.rs index 8a0fb41..facc7f2 100644 --- a/src/types/pool.rs +++ b/src/types/pool.rs @@ -61,16 +61,22 @@ impl FixedPool { // Safety: Alloc ensures that the index cannot be allocated until the next free. // A free can only happen when the Ref is dropped, as the function is not publicly accessible. // This guarantees that only one Ref can exist for a block at a time. - let idx = self.free.lock().alloc(1); - idx.map(|idx| { - let ptr = self.blocks[idx].get(); - // Safety: A block can only be allocated once. - unsafe { ptr.write(MaybeUninit::new(new)) }; - FixedPoolRef { - idx, - pool: self, - _marker: PhantomData, - } + let mut alloc = self.free.lock(); + let idx = alloc.alloc(1)?; + // BitAlloc exposes WORDS * BITS_PER_WORD bits, which can exceed N. + // Release any out-of-range index so the pool reports exhaustion instead of panicking. + if idx >= N { + alloc.free(idx, 1); + return None; + } + drop(alloc); + let ptr = self.blocks[idx].get(); + // Safety: A block can only be allocated once. + unsafe { ptr.write(MaybeUninit::new(new)) }; + Some(FixedPoolRef { + idx, + pool: self, + _marker: PhantomData, }) } @@ -212,3 +218,18 @@ impl DerefMut for Owned { unsafe { &mut *self.ptr } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn fixed_pool_alloc_beyond_n_returns_none() { + let pool: FixedPool = FixedPool::new(); + let _r0 = pool.alloc(0).unwrap(); + let _r1 = pool.alloc(1).unwrap(); + let _r2 = pool.alloc(2).unwrap(); + let _r3 = pool.alloc(3).unwrap(); + assert!(pool.alloc(4).is_none()); + } +}