From 2a4dd9dbe130e67ac2cc02c3626dd48412f59bef Mon Sep 17 00:00:00 2001 From: xarantolus Date: Thu, 7 May 2026 07:09:48 +0000 Subject: [PATCH 01/13] Add tests demonstrating 7 reachable bugs in data structures Tests added (all currently fail on the unfixed code): - BitAlloc::alloc(N*BITS_PER_WORD) panics on word-aligned full allocation (alloc_full_two_words, alloc_full_three_words). Reachable from PFA when allocating all available pages. - Vec::at2_mut / at3_mut return Some(&mut ) for out-of-bounds indices (UB) despite the doc-comment promising None for OOB. - IndexMap::get2_mut / get3_mut panic on OOB indices via split_at_mut / array indexing, inconsistent with the rest of Get/GetMut returning None. - BitReclaimMap::insert_with leaks BitAlloc bits on closure error. The closure is user-supplied; sustained errors exhaust the allocator. No production code is changed; this commit only adds failing tests that serve as regression reproducers for the issues. Co-Authored-By: Claude Opus 4.7 (1M context) --- src/types/array.rs | 85 +++++++++++++++++++++++++++++++++++++++++++++ src/types/bitset.rs | 24 +++++++++++++ 2 files changed, 109 insertions(+) diff --git a/src/types/array.rs b/src/types/array.rs index b7956c5..4a5d535 100644 --- a/src/types/array.rs +++ b/src/types/array.rs @@ -1147,4 +1147,89 @@ mod tests { assert_eq!(vec.at(i).unwrap().value, 42); } } + + // -------- Vec::at2_mut / at3_mut bounds tests -------- + + #[test] + fn at2_mut_out_of_bounds_returns_none() { + // The doc-comment promises Some(...)/Some(...) only for in-bounds disjoint indices. + // Without bounds-checking, at2_mut returns Some(&mut ) for any index in + // [len, capacity), which is UB (constructing &mut T over uninitialized memory). + let mut vec = Vec::::new(); + vec.push(7).unwrap(); + // len=1, so index 2 is out of bounds. + let (a, b) = vec.at2_mut(0, 2); + assert!(a.is_some(), "index 0 is in-bounds"); + assert!( + b.is_none(), + "index 2 should be out-of-bounds (len=1) and return None" + ); + } + + #[test] + fn at3_mut_out_of_bounds_returns_none() { + let mut vec = Vec::::new(); + vec.push(7).unwrap(); + vec.push(8).unwrap(); + // len=2, so index 3 is out of bounds. + let (a, b, c) = vec.at3_mut(0, 1, 3); + assert!(a.is_some()); + assert!(b.is_some()); + assert!( + c.is_none(), + "index 3 should be out-of-bounds (len=2) and return None" + ); + } + + // -------- IndexMap get2_mut / get3_mut OOB tests -------- + + use super::IndexMap; + use crate::types::traits::GetMut; + + #[test] + fn indexmap_get2_mut_oob_does_not_panic() { + // Inconsistent with the rest of Get/GetMut (which return None for OOB): + // get2_mut panics inside split_at_mut when an index is past N. + let mut m: IndexMap = IndexMap::new(); + m.raw_insert(0, 10).unwrap(); + let (a, b) = m.get2_mut(0usize, 10usize); + assert!(a.is_some()); + assert!(b.is_none()); + } + + #[test] + fn indexmap_get3_mut_oob_does_not_panic() { + // Same shape as get2_mut: get3_mut panics on direct array indexing for OOB indices. + let mut m: IndexMap = IndexMap::new(); + m.raw_insert(0, 10).unwrap(); + let (a, b, c) = m.get3_mut(0usize, 10usize, 11usize); + assert!(a.is_some()); + assert!(b.is_none()); + assert!(c.is_none()); + } + + // -------- BitReclaimMap bit-leak via insert_with closure error -------- + + use super::BitReclaimMap; + + #[test] + fn bitreclaim_insert_with_failed_closure_does_not_leak() { + // insert_with allocates a BitAlloc bit, calls the user closure, then raw_inserts + // into the IndexMap. If the closure returns Err, the allocated bit is never freed + // — sustained closure failures exhaust the allocator and break subsequent inserts + // even though no slot is in use. + use crate::error::Result as KResult; + let mut m: BitReclaimMap = BitReclaimMap::new(); + for _ in 0..10 { + let r: KResult = + m.insert_with(|_idx| -> KResult<(usize, u32)> { Err(kerr!(OutOfMemory)) }); + assert!(r.is_err()); + } + // After 10 failed closures, both slots should still be available. + let id0 = m.insert(10).unwrap(); + let id1 = m.insert(20).unwrap(); + assert!(id0 < 2); + assert!(id1 < 2); + assert_ne!(id0, id1); + } } diff --git a/src/types/bitset.rs b/src/types/bitset.rs index f2380d5..840520d 100644 --- a/src/types/bitset.rs +++ b/src/types/bitset.rs @@ -172,6 +172,30 @@ mod tests { assert!(result.is_some()); } + #[test] + fn alloc_full_two_words() { + // Allocate exactly 2*BITS_PER_WORD bits on a 2-word bitset where all bits are free. + // The marking-loop accesses l1[N] (out of bounds) when len % BITS_PER_WORD == 0 + // after the middle-words pass, so this panics on the unfixed implementation. + let mut alloc = BitAlloc::<2>::new(2 * BitAlloc::<2>::BITS_PER_WORD).unwrap(); + let r = alloc.alloc(2 * BitAlloc::<2>::BITS_PER_WORD); + assert_eq!(r, Some(0)); + assert_eq!(alloc.l1[0], 0); + assert_eq!(alloc.l1[1], 0); + } + + #[test] + fn alloc_full_three_words() { + // Same shape as `alloc_full_two_words` but with 3 words. Reachable from + // PFA when allocating all available pages on a multi-word configuration. + let mut alloc = BitAlloc::<3>::new(3 * BitAlloc::<3>::BITS_PER_WORD).unwrap(); + let r = alloc.alloc(3 * BitAlloc::<3>::BITS_PER_WORD); + assert_eq!(r, Some(0)); + assert_eq!(alloc.l1[0], 0); + assert_eq!(alloc.l1[1], 0); + assert_eq!(alloc.l1[2], 0); + } + #[test] fn test_random_pattern() { const ITARATIONS: usize = 10000; From 3a1be1d11de91c190f9f4ca4c6754b96d125e0f8 Mon Sep 17 00:00:00 2001 From: xarantolus Date: Thu, 7 May 2026 07:10:49 +0000 Subject: [PATCH 02/13] Add FixedPool OOB regression test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit FixedPool exposes N and WORDS as independent const generics, but BitAlloc tracks WORDS*BITS_PER_WORD bits while blocks has only N slots. Any user picking WORDS*BITS_PER_WORD > N hits an OOB panic on the (N+1)-th alloc from `self.blocks[idx]` indexing past the end — violating the allocator's "None on exhaustion" contract. Currently no in-tree caller instantiates FixedPool, but the bug is real for any future user. Co-Authored-By: Claude Opus 4.7 (1M context) --- src/types/pool.rs | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/src/types/pool.rs b/src/types/pool.rs index 8a0fb41..351c306 100644 --- a/src/types/pool.rs +++ b/src/types/pool.rs @@ -212,3 +212,25 @@ impl DerefMut for Owned { unsafe { &mut *self.ptr } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn fixed_pool_alloc_beyond_n_returns_none() { + // BitAlloc tracks WORDS*BITS_PER_WORD allocatable bits, but + // FixedPool::blocks only has N slots. The const generics + // `N` and `WORDS` are independent — a user picking WORDS*BITS_PER_WORD > N + // (e.g. WORDS=1 and N=4 here) will see the (N+1)-th alloc panic from + // `self.blocks[idx]` indexing past the end. Allocator API consumers + // expect None on exhaustion, not a panic. + let pool: FixedPool = FixedPool::new(); + let _r0 = pool.alloc(0).unwrap(); + let _r1 = pool.alloc(1).unwrap(); + let _r2 = pool.alloc(2).unwrap(); + let _r3 = pool.alloc(3).unwrap(); + // The 5th alloc must return None, not panic on `blocks[4]` OOB. + assert!(pool.alloc(4).is_none()); + } +} From cab2077a7489e67131074faf967371838a66e0bd Mon Sep 17 00:00:00 2001 From: xarantolus Date: Thu, 7 May 2026 07:15:44 +0000 Subject: [PATCH 03/13] Show returned value in at3_mut OOB assertion failure Mirror the at2_mut format-string pattern: when at3_mut returns Some(&mut ) for an out-of-bounds index, the failure message now prints the garbage value, making the UB obvious. Co-Authored-By: Claude Opus 4.7 (1M context) --- src/types/array.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/types/array.rs b/src/types/array.rs index 4a5d535..07e6c5d 100644 --- a/src/types/array.rs +++ b/src/types/array.rs @@ -1162,7 +1162,8 @@ mod tests { assert!(a.is_some(), "index 0 is in-bounds"); assert!( b.is_none(), - "index 2 should be out-of-bounds (len=1) and return None" + "index 2 should be out-of-bounds (len=1) and return None, but got {:?}", + b ); } @@ -1177,7 +1178,8 @@ mod tests { assert!(b.is_some()); assert!( c.is_none(), - "index 3 should be out-of-bounds (len=2) and return None" + "index 3 should be out-of-bounds (len=2) and return None, but got {:?}", + c ); } From a9eb0b3fda41dd9f8accab6a7ac367501813eaef Mon Sep 17 00:00:00 2001 From: xarantolus Date: Thu, 7 May 2026 07:30:37 +0000 Subject: [PATCH 04/13] Apply targeted fixes for the 8 failing tests - BitAlloc::alloc: guard last-word write against `len % BITS_PER_WORD == 0` (run ends on a word boundary, no trailing partial word). - Vec::at2_mut / at3_mut: bounds-check each index against self.len before constructing &mut T; previously returned references into uninitialized memory for out-of-bounds indices. - IndexMap::get2_mut / get3_mut: return None for out-of-bounds indices instead of panicking inside split_at_mut / array indexing. - BitReclaimMap::insert_with: release the BitAlloc bit when the user-supplied closure returns an error. - FixedPool::alloc: bounds-check the BitAlloc-returned index against N before indexing blocks; the const generics WORDS and N are independent and BitAlloc tracks WORDS * BITS_PER_WORD bits. Co-Authored-By: Claude Opus 4.7 (1M context) --- src/types/array.rs | 98 ++++++++++++++++++++++++++++----------------- src/types/bitset.rs | 16 ++++---- src/types/pool.rs | 35 ++++++++-------- 3 files changed, 89 insertions(+), 60 deletions(-) diff --git a/src/types/array.rs b/src/types/array.rs index 07e6c5d..801e894 100644 --- a/src/types/array.rs +++ b/src/types/array.rs @@ -138,6 +138,13 @@ impl GetMut for IndexMap { return (None, None); } + if idx1 >= N { + return (None, if idx2 < N { self.data[idx2].as_mut() } else { None }); + } + if idx2 >= N { + return (self.data[idx1].as_mut(), None); + } + let (left, right) = self.data.split_at_mut(idx1.max(idx2)); if idx1 < idx2 { @@ -170,13 +177,21 @@ impl GetMut for IndexMap { return (None, None, None); } - let ptr1 = &mut self.data[idx1] as *mut Option; - let ptr2 = &mut self.data[idx2] as *mut Option; - let ptr3 = &mut self.data[idx3] as *mut Option; + let base = self.data.as_mut_ptr(); + let ptr1 = if idx1 < N { Some(unsafe { base.add(idx1) }) } else { None }; + let ptr2 = if idx2 < N { Some(unsafe { base.add(idx2) }) } else { None }; + let ptr3 = if idx3 < N { Some(unsafe { base.add(idx3) }) } else { None }; - // Safety: the elements at index1, index2 and index3 are nowhere else borrowed mutably by function contract. - // And they are disjoint because of the check above. - unsafe { ((*ptr1).as_mut(), (*ptr2).as_mut(), (*ptr3).as_mut()) } + // Safety: each pointer is only constructed when its index is < N, so it stays inside + // self.data. The three indices are pairwise distinct (check above), so the resulting + // references are disjoint. + unsafe { + ( + ptr1.and_then(|p| (*p).as_mut()), + ptr2.and_then(|p| (*p).as_mut()), + ptr3.and_then(|p| (*p).as_mut()), + ) + } } } @@ -546,12 +561,15 @@ impl Vec { return (None, None); } - let ptr1 = self.at_mut_unchecked(index1); - let ptr2 = self.at_mut_unchecked(index2); + let in1 = index1 < self.len; + let in2 = index2 < self.len; + let ptr1 = if in1 { Some(self.at_mut_unchecked(index1)) } else { None }; + let ptr2 = if in2 { Some(self.at_mut_unchecked(index2)) } else { None }; - // Safety: the elements at index1 and index2 are nowhere else borrowed mutably by function contract. - // And they are disjoint because of the check above. - unsafe { (Some(&mut *ptr1), Some(&mut *ptr2)) } + // Safety: each pointer is only constructed when the corresponding index is < self.len, + // so it points to an initialized slot. The two indices are pairwise distinct (check + // above), so the resulting references are disjoint. + unsafe { (ptr1.map(|p| &mut *p), ptr2.map(|p| &mut *p)) } } /// Get disjoint mutable references to the values at the given indices. @@ -572,13 +590,23 @@ impl Vec { return (None, None, None); } - let ptr1 = self.at_mut_unchecked(index1); - let ptr2 = self.at_mut_unchecked(index2); - let ptr3 = self.at_mut_unchecked(index3); + let in1 = index1 < self.len; + let in2 = index2 < self.len; + let in3 = index3 < self.len; + let ptr1 = if in1 { Some(self.at_mut_unchecked(index1)) } else { None }; + let ptr2 = if in2 { Some(self.at_mut_unchecked(index2)) } else { None }; + let ptr3 = if in3 { Some(self.at_mut_unchecked(index3)) } else { None }; - // Safety: the elements at index1, index2 and index3 are nowhere else borrowed mutably by function contract. - // And they are disjoint because of the check above. - unsafe { (Some(&mut *ptr1), Some(&mut *ptr2), Some(&mut *ptr3)) } + // Safety: each pointer is only constructed when the corresponding index is < self.len, + // so it points to an initialized slot. The three indices are pairwise distinct (check + // above), so the resulting references are disjoint. + unsafe { + ( + ptr1.map(|p| &mut *p), + ptr2.map(|p| &mut *p), + ptr3.map(|p| &mut *p), + ) + } } /// Swap the values at the given indices. @@ -763,8 +791,19 @@ impl BitReclaimMap { impl BitReclaimMap { pub fn insert_with(&mut self, f: impl FnOnce(usize) -> Result<(K, V)>) -> Result { let idx = self.free.alloc(1).ok_or(kerr!(ENOMEM))?; - let (key, value) = f(idx)?; - self.map.raw_insert(idx, value)?; + // The closure is user-supplied and may return an error; release the bit so a + // sustained run of closure failures cannot exhaust the allocator. + let (key, value) = match f(idx) { + Ok(kv) => kv, + Err(e) => { + self.free.free(idx, 1); + return Err(e); + } + }; + if let Err(e) = self.map.raw_insert(idx, value) { + self.free.free(idx, 1); + return Err(e); + } Ok(key) } } @@ -1152,12 +1191,8 @@ mod tests { #[test] fn at2_mut_out_of_bounds_returns_none() { - // The doc-comment promises Some(...)/Some(...) only for in-bounds disjoint indices. - // Without bounds-checking, at2_mut returns Some(&mut ) for any index in - // [len, capacity), which is UB (constructing &mut T over uninitialized memory). let mut vec = Vec::::new(); vec.push(7).unwrap(); - // len=1, so index 2 is out of bounds. let (a, b) = vec.at2_mut(0, 2); assert!(a.is_some(), "index 0 is in-bounds"); assert!( @@ -1172,7 +1207,6 @@ mod tests { let mut vec = Vec::::new(); vec.push(7).unwrap(); vec.push(8).unwrap(); - // len=2, so index 3 is out of bounds. let (a, b, c) = vec.at3_mut(0, 1, 3); assert!(a.is_some()); assert!(b.is_some()); @@ -1183,15 +1217,13 @@ mod tests { ); } - // -------- IndexMap get2_mut / get3_mut OOB tests -------- + // -------- IndexMap::get2_mut / get3_mut bounds tests -------- use super::IndexMap; use crate::types::traits::GetMut; #[test] - fn indexmap_get2_mut_oob_does_not_panic() { - // Inconsistent with the rest of Get/GetMut (which return None for OOB): - // get2_mut panics inside split_at_mut when an index is past N. + fn indexmap_get2_mut_out_of_bounds_returns_none() { let mut m: IndexMap = IndexMap::new(); m.raw_insert(0, 10).unwrap(); let (a, b) = m.get2_mut(0usize, 10usize); @@ -1200,8 +1232,7 @@ mod tests { } #[test] - fn indexmap_get3_mut_oob_does_not_panic() { - // Same shape as get2_mut: get3_mut panics on direct array indexing for OOB indices. + fn indexmap_get3_mut_out_of_bounds_returns_none() { let mut m: IndexMap = IndexMap::new(); m.raw_insert(0, 10).unwrap(); let (a, b, c) = m.get3_mut(0usize, 10usize, 11usize); @@ -1210,16 +1241,12 @@ mod tests { assert!(c.is_none()); } - // -------- BitReclaimMap bit-leak via insert_with closure error -------- + // -------- BitReclaimMap insert_with bit-leak test -------- use super::BitReclaimMap; #[test] fn bitreclaim_insert_with_failed_closure_does_not_leak() { - // insert_with allocates a BitAlloc bit, calls the user closure, then raw_inserts - // into the IndexMap. If the closure returns Err, the allocated bit is never freed - // — sustained closure failures exhaust the allocator and break subsequent inserts - // even though no slot is in use. use crate::error::Result as KResult; let mut m: BitReclaimMap = BitReclaimMap::new(); for _ in 0..10 { @@ -1227,7 +1254,6 @@ mod tests { m.insert_with(|_idx| -> KResult<(usize, u32)> { Err(kerr!(OutOfMemory)) }); assert!(r.is_err()); } - // After 10 failed closures, both slots should still be available. let id0 = m.insert(10).unwrap(); let id1 = m.insert(20).unwrap(); assert!(id0 < 2); diff --git a/src/types/bitset.rs b/src/types/bitset.rs index 840520d..995636d 100644 --- a/src/types/bitset.rs +++ b/src/types/bitset.rs @@ -121,8 +121,13 @@ impl BitAlloc { } // Mark the remaining bits in the last word as used. - self.l1[idx] &= !((!0usize) - .unbounded_shl((Self::BITS_PER_WORD - (len % Self::BITS_PER_WORD)) as u32)); + // Guard against `len % BITS_PER_WORD == 0`, which means the run ended + // exactly on a word boundary — there is no trailing partial word, and + // unguarded `self.l1[idx]` would index one past the last word. + if len % Self::BITS_PER_WORD != 0 { + self.l1[idx] &= !((!0usize) + .unbounded_shl((Self::BITS_PER_WORD - (len % Self::BITS_PER_WORD)) as u32)); + } return Some(start); } } @@ -172,11 +177,10 @@ mod tests { assert!(result.is_some()); } + // -------- alloc on word-boundary-aligned full ranges -------- + #[test] fn alloc_full_two_words() { - // Allocate exactly 2*BITS_PER_WORD bits on a 2-word bitset where all bits are free. - // The marking-loop accesses l1[N] (out of bounds) when len % BITS_PER_WORD == 0 - // after the middle-words pass, so this panics on the unfixed implementation. let mut alloc = BitAlloc::<2>::new(2 * BitAlloc::<2>::BITS_PER_WORD).unwrap(); let r = alloc.alloc(2 * BitAlloc::<2>::BITS_PER_WORD); assert_eq!(r, Some(0)); @@ -186,8 +190,6 @@ mod tests { #[test] fn alloc_full_three_words() { - // Same shape as `alloc_full_two_words` but with 3 words. Reachable from - // PFA when allocating all available pages on a multi-word configuration. let mut alloc = BitAlloc::<3>::new(3 * BitAlloc::<3>::BITS_PER_WORD).unwrap(); let r = alloc.alloc(3 * BitAlloc::<3>::BITS_PER_WORD); assert_eq!(r, Some(0)); diff --git a/src/types/pool.rs b/src/types/pool.rs index 351c306..8484d5b 100644 --- a/src/types/pool.rs +++ b/src/types/pool.rs @@ -61,16 +61,22 @@ impl FixedPool { // Safety: Alloc ensures that the index cannot be allocated until the next free. // A free can only happen when the Ref is dropped, as the function is not publicly accessible. // This guarantees that only one Ref can exist for a block at a time. - let idx = self.free.lock().alloc(1); - idx.map(|idx| { - let ptr = self.blocks[idx].get(); - // Safety: A block can only be allocated once. - unsafe { ptr.write(MaybeUninit::new(new)) }; - FixedPoolRef { - idx, - pool: self, - _marker: PhantomData, - } + let mut alloc = self.free.lock(); + let idx = alloc.alloc(1)?; + // BitAlloc exposes WORDS * BITS_PER_WORD bits, which can exceed N. + // Release any out-of-range index so the pool reports exhaustion instead of panicking. + if idx >= N { + alloc.free(idx, 1); + return None; + } + drop(alloc); + let ptr = self.blocks[idx].get(); + // Safety: A block can only be allocated once. + unsafe { ptr.write(MaybeUninit::new(new)) }; + Some(FixedPoolRef { + idx, + pool: self, + _marker: PhantomData, }) } @@ -217,20 +223,15 @@ impl DerefMut for Owned { mod tests { use super::*; + // -------- FixedPool exhaustion under WORDS * BITS_PER_WORD > N -------- + #[test] fn fixed_pool_alloc_beyond_n_returns_none() { - // BitAlloc tracks WORDS*BITS_PER_WORD allocatable bits, but - // FixedPool::blocks only has N slots. The const generics - // `N` and `WORDS` are independent — a user picking WORDS*BITS_PER_WORD > N - // (e.g. WORDS=1 and N=4 here) will see the (N+1)-th alloc panic from - // `self.blocks[idx]` indexing past the end. Allocator API consumers - // expect None on exhaustion, not a panic. let pool: FixedPool = FixedPool::new(); let _r0 = pool.alloc(0).unwrap(); let _r1 = pool.alloc(1).unwrap(); let _r2 = pool.alloc(2).unwrap(); let _r3 = pool.alloc(3).unwrap(); - // The 5th alloc must return None, not panic on `blocks[4]` OOB. assert!(pool.alloc(4).is_none()); } } From f05e457c652053fa496d38d77274291fb1ef1d3b Mon Sep 17 00:00:00 2001 From: xarantolus Date: Thu, 7 May 2026 07:34:39 +0000 Subject: [PATCH 05/13] Simplify IndexMap::get3_mut to match original pointer-from-index pattern Use `&mut self.data[idx] as *mut Option` (released into a raw pointer) instead of as_mut_ptr() + add(); now that bounds-checks gate the indexing, the original pattern is cleaner and avoids unsafe pointer arithmetic. Co-Authored-By: Claude Opus 4.7 (1M context) --- src/types/array.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/types/array.rs b/src/types/array.rs index 801e894..08ee888 100644 --- a/src/types/array.rs +++ b/src/types/array.rs @@ -177,14 +177,12 @@ impl GetMut for IndexMap { return (None, None, None); } - let base = self.data.as_mut_ptr(); - let ptr1 = if idx1 < N { Some(unsafe { base.add(idx1) }) } else { None }; - let ptr2 = if idx2 < N { Some(unsafe { base.add(idx2) }) } else { None }; - let ptr3 = if idx3 < N { Some(unsafe { base.add(idx3) }) } else { None }; - - // Safety: each pointer is only constructed when its index is < N, so it stays inside - // self.data. The three indices are pairwise distinct (check above), so the resulting - // references are disjoint. + let ptr1 = if idx1 < N { Some(&mut self.data[idx1] as *mut Option) } else { None }; + let ptr2 = if idx2 < N { Some(&mut self.data[idx2] as *mut Option) } else { None }; + let ptr3 = if idx3 < N { Some(&mut self.data[idx3] as *mut Option) } else { None }; + + // Safety: each pointer comes from an in-bounds slot in self.data, and the three indices + // are pairwise distinct (check above), so the resulting references are disjoint. unsafe { ( ptr1.and_then(|p| (*p).as_mut()), From f296325c664da68c01f6acf6c624b5c2a25a6e3f Mon Sep 17 00:00:00 2001 From: xarantolus Date: Thu, 7 May 2026 07:37:41 +0000 Subject: [PATCH 06/13] Formatting --- src/types/array.rs | 57 ++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 48 insertions(+), 9 deletions(-) diff --git a/src/types/array.rs b/src/types/array.rs index 08ee888..9ef9004 100644 --- a/src/types/array.rs +++ b/src/types/array.rs @@ -139,7 +139,14 @@ impl GetMut for IndexMap { } if idx1 >= N { - return (None, if idx2 < N { self.data[idx2].as_mut() } else { None }); + return ( + None, + if idx2 < N { + self.data[idx2].as_mut() + } else { + None + }, + ); } if idx2 >= N { return (self.data[idx1].as_mut(), None); @@ -177,9 +184,21 @@ impl GetMut for IndexMap { return (None, None, None); } - let ptr1 = if idx1 < N { Some(&mut self.data[idx1] as *mut Option) } else { None }; - let ptr2 = if idx2 < N { Some(&mut self.data[idx2] as *mut Option) } else { None }; - let ptr3 = if idx3 < N { Some(&mut self.data[idx3] as *mut Option) } else { None }; + let ptr1 = if idx1 < N { + Some(&mut self.data[idx1] as *mut Option) + } else { + None + }; + let ptr2 = if idx2 < N { + Some(&mut self.data[idx2] as *mut Option) + } else { + None + }; + let ptr3 = if idx3 < N { + Some(&mut self.data[idx3] as *mut Option) + } else { + None + }; // Safety: each pointer comes from an in-bounds slot in self.data, and the three indices // are pairwise distinct (check above), so the resulting references are disjoint. @@ -561,8 +580,16 @@ impl Vec { let in1 = index1 < self.len; let in2 = index2 < self.len; - let ptr1 = if in1 { Some(self.at_mut_unchecked(index1)) } else { None }; - let ptr2 = if in2 { Some(self.at_mut_unchecked(index2)) } else { None }; + let ptr1 = if in1 { + Some(self.at_mut_unchecked(index1)) + } else { + None + }; + let ptr2 = if in2 { + Some(self.at_mut_unchecked(index2)) + } else { + None + }; // Safety: each pointer is only constructed when the corresponding index is < self.len, // so it points to an initialized slot. The two indices are pairwise distinct (check @@ -591,9 +618,21 @@ impl Vec { let in1 = index1 < self.len; let in2 = index2 < self.len; let in3 = index3 < self.len; - let ptr1 = if in1 { Some(self.at_mut_unchecked(index1)) } else { None }; - let ptr2 = if in2 { Some(self.at_mut_unchecked(index2)) } else { None }; - let ptr3 = if in3 { Some(self.at_mut_unchecked(index3)) } else { None }; + let ptr1 = if in1 { + Some(self.at_mut_unchecked(index1)) + } else { + None + }; + let ptr2 = if in2 { + Some(self.at_mut_unchecked(index2)) + } else { + None + }; + let ptr3 = if in3 { + Some(self.at_mut_unchecked(index3)) + } else { + None + }; // Safety: each pointer is only constructed when the corresponding index is < self.len, // so it points to an initialized slot. The three indices are pairwise distinct (check From 303510ac7601c896c2b604f17c7ff25fd8f5f5c2 Mon Sep 17 00:00:00 2001 From: xarantolus Date: Thu, 7 May 2026 07:46:49 +0000 Subject: [PATCH 07/13] Fix BitReclaimMap::insert bit-leak symmetric to insert_with When raw_insert rejects an idx that BitAlloc handed out (idx >= N because BitAlloc tracks N*BITS_PER_WORD bits while IndexMap has only N slots), release the bit so it can't accumulate. Mirrors the insert_with fix. The new test probes BitAlloc directly: after a failed insert it expects the next free bit to be 2 (the index that raw_insert rejected), confirming the bit was returned to the allocator. Verified to fail before the fix (BitAlloc returns Some(3), proving bit 2 is leaked). Co-Authored-By: Claude Opus 4.7 (1M context) --- src/types/array.rs | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/src/types/array.rs b/src/types/array.rs index 9ef9004..667bbcb 100644 --- a/src/types/array.rs +++ b/src/types/array.rs @@ -814,7 +814,12 @@ impl BitReclaimMap { #[allow(dead_code)] pub fn insert(&mut self, value: V) -> Result { let idx = self.free.alloc(1).ok_or(kerr!(ENOMEM))?; - self.map.raw_insert(idx, value)?; + if let Err(e) = self.map.raw_insert(idx, value) { + // BitAlloc exposes more bits than IndexMap has slots; release any index + // that raw_insert rejects so a leaked bit can't accumulate across failures. + self.free.free(idx, 1); + return Err(e); + } Ok(idx) } @@ -1282,6 +1287,23 @@ mod tests { use super::BitReclaimMap; + #[test] + fn bitreclaim_insert_failure_does_not_leak() { + let mut m: BitReclaimMap = BitReclaimMap::new(); + m.insert(10).unwrap(); + m.insert(20).unwrap(); + // Third insert allocates bit 2 then fails (raw_insert rejects idx >= N). + assert!(m.insert(30).is_err()); + // Probe BitAlloc directly: bit 2 must be free again. + let next_free = m.free.alloc(1); + assert_eq!( + next_free, + Some(2), + "expected bit 2 to be free after failed insert, but BitAlloc returned {:?}", + next_free + ); + } + #[test] fn bitreclaim_insert_with_failed_closure_does_not_leak() { use crate::error::Result as KResult; From 58c41513b15dd8343cbc3156d8d030829efb4ce7 Mon Sep 17 00:00:00 2001 From: xarantolus Date: Thu, 7 May 2026 07:54:38 +0000 Subject: [PATCH 08/13] Fix Vec::new_init to not leak inline clones on OOM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When length > N, the original code wrote N inline clones and only then attempted to allocate the extra Box. If the allocation failed, vec.len was still 0 → Drop called clear() which dropped nothing → the N clones leaked. Reorder so the Box allocation happens first (no inline clones written on OOM), and bump vec.len after each individual write so a panicking T::clone leaves Drop with a consistent state. Verified via the new clones==drops invariant test (fails before fix with clones=2, drops=1; passes after). Co-Authored-By: Claude Opus 4.7 (1M context) --- src/types/array.rs | 61 ++++++++++++++++++++++++++++++++++++---------- 1 file changed, 48 insertions(+), 13 deletions(-) diff --git a/src/types/array.rs b/src/types/array.rs index 667bbcb..855692f 100644 --- a/src/types/array.rs +++ b/src/types/array.rs @@ -369,25 +369,25 @@ impl Vec { // Initialize all elements in the inline storage. for i in 0..length { vec.data[i].write(value.clone()); + // Bump len after each write so a panicking T::clone leaves Drop able + // to clean up the slots that were already initialized. + vec.len = i + 1; } } else { - // Initialize all elements in the inline storage. + // Allocate the extra storage first and install it on `vec` so any later + // failure (clone panic) leaves Drop with a consistent (len, extra) view, + // and so an OOM here cannot leak inline clones (none have been written yet). + let extra_len = length - N; + vec.extra = Box::new_slice_uninit(extra_len)?; + for elem in &mut vec.data { elem.write(value.clone()); + vec.len += 1; } - // Check if we need to allocate extra storage. - if length - N > 0 { - // Allocate extra storage for the remaining elements. - let mut extra = Box::new_slice_uninit(length - N)?; - - // Initialize all the required elements in the extra storage. - for i in N..length { - extra[i - N].write(value.clone()); - } - - // Set the extra storage in the Vec. - vec.extra = extra; + for i in 0..extra_len { + vec.extra[i].write(value.clone()); + vec.len += 1; } } @@ -1229,6 +1229,41 @@ mod tests { } } + #[test] + fn new_init_oom_does_not_leak() { + struct Counted<'a> { + drops: &'a AtomicUsize, + clones: &'a AtomicUsize, + } + impl Clone for Counted<'_> { + fn clone(&self) -> Self { + self.clones.fetch_add(1, Ordering::SeqCst); + Counted { drops: self.drops, clones: self.clones } + } + } + impl Drop for Counted<'_> { + fn drop(&mut self) { self.drops.fetch_add(1, Ordering::SeqCst); } + } + + setup_memory(4096); + let drops = AtomicUsize::new(0); + let clones = AtomicUsize::new(0); + let r = Vec::::new_init( + 1_000_000_000, + Counted { drops: &drops, clones: &clones }, + ); + assert!(r.is_err()); + let n_clones = clones.load(Ordering::SeqCst); + let n_drops = drops.load(Ordering::SeqCst); + assert_eq!( + n_drops, + n_clones + 1, + "leaked clones (clones={}, drops={})", + n_clones, + n_drops, + ); + } + // -------- Vec::at2_mut / at3_mut bounds tests -------- #[test] From 64f5bfd8d86113cf19d7705d5bb3810f90a4f0a9 Mon Sep 17 00:00:00 2001 From: xarantolus Date: Thu, 7 May 2026 08:26:05 +0000 Subject: [PATCH 09/13] Format --- src/types/array.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/types/array.rs b/src/types/array.rs index 855692f..5677298 100644 --- a/src/types/array.rs +++ b/src/types/array.rs @@ -1238,11 +1238,16 @@ mod tests { impl Clone for Counted<'_> { fn clone(&self) -> Self { self.clones.fetch_add(1, Ordering::SeqCst); - Counted { drops: self.drops, clones: self.clones } + Counted { + drops: self.drops, + clones: self.clones, + } } } impl Drop for Counted<'_> { - fn drop(&mut self) { self.drops.fetch_add(1, Ordering::SeqCst); } + fn drop(&mut self) { + self.drops.fetch_add(1, Ordering::SeqCst); + } } setup_memory(4096); @@ -1250,7 +1255,10 @@ mod tests { let clones = AtomicUsize::new(0); let r = Vec::::new_init( 1_000_000_000, - Counted { drops: &drops, clones: &clones }, + Counted { + drops: &drops, + clones: &clones, + }, ); assert!(r.is_err()); let n_clones = clones.load(Ordering::SeqCst); From d4f471f81ecc073694200b216e3cc29541481f38 Mon Sep 17 00:00:00 2001 From: xarantolus Date: Thu, 7 May 2026 08:28:19 +0000 Subject: [PATCH 10/13] Remove section banner comments from test modules Drop the `// -------- ... --------` group banners; test names already describe what's being tested. Co-Authored-By: Claude Opus 4.7 (1M context) --- src/types/array.rs | 6 ------ src/types/bitset.rs | 2 -- src/types/pool.rs | 2 -- 3 files changed, 10 deletions(-) diff --git a/src/types/array.rs b/src/types/array.rs index 5677298..68eecde 100644 --- a/src/types/array.rs +++ b/src/types/array.rs @@ -1272,8 +1272,6 @@ mod tests { ); } - // -------- Vec::at2_mut / at3_mut bounds tests -------- - #[test] fn at2_mut_out_of_bounds_returns_none() { let mut vec = Vec::::new(); @@ -1302,8 +1300,6 @@ mod tests { ); } - // -------- IndexMap::get2_mut / get3_mut bounds tests -------- - use super::IndexMap; use crate::types::traits::GetMut; @@ -1326,8 +1322,6 @@ mod tests { assert!(c.is_none()); } - // -------- BitReclaimMap insert_with bit-leak test -------- - use super::BitReclaimMap; #[test] diff --git a/src/types/bitset.rs b/src/types/bitset.rs index 995636d..7ec9283 100644 --- a/src/types/bitset.rs +++ b/src/types/bitset.rs @@ -177,8 +177,6 @@ mod tests { assert!(result.is_some()); } - // -------- alloc on word-boundary-aligned full ranges -------- - #[test] fn alloc_full_two_words() { let mut alloc = BitAlloc::<2>::new(2 * BitAlloc::<2>::BITS_PER_WORD).unwrap(); diff --git a/src/types/pool.rs b/src/types/pool.rs index 8484d5b..facc7f2 100644 --- a/src/types/pool.rs +++ b/src/types/pool.rs @@ -223,8 +223,6 @@ impl DerefMut for Owned { mod tests { use super::*; - // -------- FixedPool exhaustion under WORDS * BITS_PER_WORD > N -------- - #[test] fn fixed_pool_alloc_beyond_n_returns_none() { let pool: FixedPool = FixedPool::new(); From 5395fdcd8d7dbfa413aa7b4108b0bdfad94c249f Mon Sep 17 00:00:00 2001 From: xarantolus Date: Mon, 11 May 2026 15:18:10 +0000 Subject: [PATCH 11/13] Memory subsystem audit: 7 bugs found and fixed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Per `audit-memory-report.md`: - BestFitAllocator: double-free creates free-list cycle (BLOCKER) - PFA bitset: free() accepts sub-begin addresses (silent corruption) - Region::contains: returns true for unplaced regions - no-MMU unmap was a no-op (per-task BestFit leak, BLOCKER) - no-MMU AddressSpace had no Drop (PFA pages leaked forever, BLOCKER) - no-MMU virt/phys translation accepted out-of-range addresses - HAL cortex-m ArmStack: byte/word unit confusion in bounds checks 12 new tests added (78 passing total, up from 66 on this branch). 5 Kani proofs validated locally, reverted before commit per cycle convention. The BitReclaimMap insert_with bit-leak fix from the prior cycle on this branch is load-bearing for thread-memory correctness — flagged in the report. Co-Authored-By: Claude Opus 4.7 (1M context) --- machine/cortex-m/src/native/sched.rs | 6 ++ src/mem/alloc/bestfit.rs | 27 ++++++++ src/mem/pfa/bitset.rs | 63 +++++++++++++++++- src/mem/vmm.rs | 40 ++++++++++- src/mem/vmm/nommu.rs | 99 ++++++++++++++++++++++++++-- 5 files changed, 226 insertions(+), 9 deletions(-) diff --git a/machine/cortex-m/src/native/sched.rs b/machine/cortex-m/src/native/sched.rs index 713bdd5..58feb6e 100644 --- a/machine/cortex-m/src/native/sched.rs +++ b/machine/cortex-m/src/native/sched.rs @@ -198,6 +198,12 @@ impl hal_api::stack::Stacklike for ArmStack { let top = NonNull::new(top.as_mut_ptr::()) .ok_or(hal_api::PosixError::EINVAL)?; + // `size` is in bytes (per the Descriptor contract); `does_fit` and + // `in_bounds` work in u32 words. Convert here so the stack's internal + // unit stays consistent with `StackPtr::offset`. + let size = NonZero::new(size.get() / core::mem::size_of::()) + .ok_or(hal_api::Error::default())?; + let mut stack = Self { top, sp: StackPtr { offset: 0 }, diff --git a/src/mem/alloc/bestfit.rs b/src/mem/alloc/bestfit.rs index 32d3d64..4676baf 100644 --- a/src/mem/alloc/bestfit.rs +++ b/src/mem/alloc/bestfit.rs @@ -344,6 +344,15 @@ impl super::Allocator for BestFitAllocator { /// `size` - The size of the block. (This is used to check if the size of the block is correct.) unsafe fn free(&mut self, ptr: NonNull, size: usize) { let block = unsafe { Self::control_ptr(ptr.cast()) }; + + // Walking the free list catches a double-free before it can self-loop the list + // and turn the next `malloc` into an infinite traversal. + let mut walk = self.head; + while let Some(p) = walk { + bug_on!(p == block, "double free"); + walk = unsafe { p.cast::().as_ref().next }; + } + let meta = unsafe { block.cast::().as_mut() }; // The next block of a free block is always the current head. We essentially insert the block at the beginning of the list. @@ -680,6 +689,24 @@ mod tests { } } + #[test] + #[should_panic(expected = "double free")] + fn double_free_panics() { + let mut allocator = BestFitAllocator::new(); + let range = alloc_range(4096); + unsafe { + allocator.add_range(&range).unwrap(); + } + + let ptr = unsafe { allocator.malloc::(128, 1, None).unwrap() }; + unsafe { + allocator.free(ptr, 128); + // Without the defensive walk in free(), this re-insert builds a + // self-loop in the free list and the next malloc spins forever. + allocator.free(ptr, 128); + } + } + #[test] fn multi_range_oom() { // This function allocates multiple ranges and then frees one of them randomly. And only then there is no oom. diff --git a/src/mem/pfa/bitset.rs b/src/mem/pfa/bitset.rs index c0fd825..fe65b80 100644 --- a/src/mem/pfa/bitset.rs +++ b/src/mem/pfa/bitset.rs @@ -66,10 +66,67 @@ impl super::Allocator for Allocator { } fn free(&mut self, addr: PhysAddr, page_count: usize) { - if !addr.is_multiple_of(super::PAGE_SIZE) { - panic!("Address must be page aligned"); - } + bug_on!( + !addr.is_multiple_of(super::PAGE_SIZE), + "free address {} is not page-aligned", + addr + ); + // diff() is absolute, so a sub-begin address would silently map to a + // bit elsewhere in the bitmap. + bug_on!(addr < self.begin, "free address {} below allocator begin {}", addr, self.begin); let idx = addr.diff(self.begin) / super::PAGE_SIZE; self.bitalloc.free(idx, page_count); } } + +#[cfg(test)] +mod tests { + use super::super::Allocator as _; + use super::*; + + fn test_begin() -> PhysAddr { + let layout = std::alloc::Layout::from_size_align( + 2 * 64 * super::super::PAGE_SIZE, + super::super::PAGE_SIZE, + ) + .unwrap(); + let ptr = unsafe { std::alloc::alloc(layout) }; + PhysAddr::new(ptr as usize) + } + + #[test] + fn alloc_free_roundtrip() { + let begin = test_begin(); + let mut alloc = Allocator::<2>::new(begin).unwrap(); + + let a = alloc.alloc(1).unwrap(); + let b = alloc.alloc(1).unwrap(); + assert_ne!(a, b); + + alloc.free(a, 1); + let c = alloc.alloc(1).unwrap(); + assert_eq!(a, c, "freed page is returned by next alloc"); + } + + #[test] + fn alloc_returns_addresses_in_range() { + let begin = test_begin(); + let mut alloc = Allocator::<1>::new(begin).unwrap(); + let end = begin + 64 * super::super::PAGE_SIZE; + + while let Some(addr) = alloc.alloc(1) { + assert!(addr >= begin && addr < end, "addr {addr} outside [{begin}, {end})"); + assert!(addr.is_multiple_of(super::super::PAGE_SIZE), "addr {addr} not page-aligned"); + } + } + + #[test] + #[should_panic(expected = "below allocator begin")] + fn free_below_begin_panics() { + let begin = test_begin() + super::super::PAGE_SIZE; + let mut alloc = Allocator::<2>::new(begin).unwrap(); + // diff() is absolute, so without the bound check a sub-begin address + // would silently clear a bit elsewhere in the bitmap. + alloc.free(begin - super::super::PAGE_SIZE, 1); + } +} diff --git a/src/mem/vmm.rs b/src/mem/vmm.rs index 60b68f7..8a656bb 100644 --- a/src/mem/vmm.rs +++ b/src/mem/vmm.rs @@ -59,7 +59,45 @@ impl Region { #[allow(dead_code)] pub fn contains(&self, addr: VirtAddr) -> bool { - self.start().saturating_add(self.len()) > addr && addr >= self.start() + let Some(start) = self.start else { + return false; + }; + start.saturating_add(self.len()) > addr && addr >= start + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn unplaced_region_contains_nothing() { + let r = Region::new(None, 100, Backing::Uninit, Perms::Read); + assert!(!r.contains(VirtAddr::new(0))); + assert!(!r.contains(VirtAddr::new(50))); + assert!(!r.contains(VirtAddr::new(100))); + } + + #[test] + fn placed_region_contains_within_bounds() { + let r = Region::new(Some(VirtAddr::new(100)), 50, Backing::Uninit, Perms::Read); + assert!(!r.contains(VirtAddr::new(99))); + assert!(r.contains(VirtAddr::new(100))); + assert!(r.contains(VirtAddr::new(149))); + assert!(!r.contains(VirtAddr::new(150))); + } + + #[test] + fn placed_region_saturates_at_usize_max() { + let r = Region::new( + Some(VirtAddr::new(usize::MAX - 10)), + 100, + Backing::Uninit, + Perms::Read, + ); + assert!(r.contains(VirtAddr::new(usize::MAX - 10))); + assert!(r.contains(VirtAddr::new(usize::MAX - 1))); + assert!(!r.contains(VirtAddr::new(usize::MAX))); } } diff --git a/src/mem/vmm/nommu.rs b/src/mem/vmm/nommu.rs index e47533a..3c9fe2f 100644 --- a/src/mem/vmm/nommu.rs +++ b/src/mem/vmm/nommu.rs @@ -1,4 +1,4 @@ -use core::ptr::copy_nonoverlapping; +use core::ptr::{NonNull, copy_nonoverlapping}; use crate::hal::mem::{PhysAddr, VirtAddr}; @@ -55,7 +55,11 @@ impl vmm::AddressSpacelike for AddressSpace { Ok(start.into()) } - fn unmap(&mut self, _region: &vmm::Region) -> Result<()> { + fn unmap(&mut self, region: &vmm::Region) -> Result<()> { + let virt = region.start.ok_or(kerr!(InvalidArgument))?; + let phys = self.virt_to_phys(virt).ok_or(kerr!(InvalidArgument))?; + let ptr = NonNull::new(phys.as_mut_ptr::()).ok_or(kerr!(InvalidArgument))?; + unsafe { self.allocator.free(ptr, region.len()) }; Ok(()) } @@ -64,20 +68,105 @@ impl vmm::AddressSpacelike for AddressSpace { } fn phys_to_virt(&self, addr: PhysAddr) -> Option { + if addr < self.begin || addr >= self.end { + return None; + } addr.checked_sub(self.begin.as_usize()) .map(|phys| VirtAddr::new(phys.as_usize())) } fn virt_to_phys(&self, addr: VirtAddr) -> Option { - self.begin.checked_add(addr.as_usize()) + let phys = self.begin.checked_add(addr.as_usize())?; + if phys >= self.end { + return None; + } + Some(phys) } fn end(&self) -> VirtAddr { - // This should always succeed. - self.phys_to_virt(self.end).unwrap() + VirtAddr::new(self.end.diff(self.begin)) } fn activate(&self) -> Result<()> { Ok(()) } } + +impl Drop for AddressSpace { + fn drop(&mut self) { + // Without this the per-task page reservation returns to the PFA only on + // process death, which means PFA exhaustion under task churn. + let pgs = self.end.diff(self.begin) / pfa::PAGE_SIZE; + if pgs > 0 { + pfa::free_page(self.begin, pgs); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mem::vmm::{AddressSpacelike, Backing, Perms, Region}; + + fn make_addr_space(size: usize) -> AddressSpace { + let layout = std::alloc::Layout::from_size_align(size, core::mem::align_of::()).unwrap(); + let ptr = unsafe { std::alloc::alloc(layout) }; + let begin = PhysAddr::new(ptr as usize); + let end = begin + size; + let mut allocator = bestfit::BestFitAllocator::new(); + unsafe { allocator.add_range(&(begin..end)).unwrap() }; + AddressSpace { + begin, + end, + allocator, + } + } + + #[test] + fn unmap_returns_space_to_allocator() { + let mut as_ = make_addr_space(4096); + + let region = Region::new(None, 2048, Backing::Uninit, Perms::Read); + let phys = as_.map(region).unwrap(); + + let virt = as_.phys_to_virt(phys).unwrap(); + let placed = Region::new(Some(virt), 2048, Backing::Uninit, Perms::Read); + as_.unmap(&placed).unwrap(); + + let region2 = Region::new(None, 2048, Backing::Uninit, Perms::Read); + as_.map(region2) + .expect("re-map after unmap should not OOM"); + } + + #[test] + fn unmap_unplaced_region_rejected() { + let mut as_ = make_addr_space(4096); + let region = Region::new(None, 128, Backing::Uninit, Perms::Read); + assert!(as_.unmap(®ion).is_err()); + } + + #[test] + fn virt_to_phys_rejects_out_of_range() { + let as_ = make_addr_space(4096); + let size = as_.end.diff(as_.begin); + assert!(as_.virt_to_phys(VirtAddr::new(size)).is_none()); + assert!(as_.virt_to_phys(VirtAddr::new(size + 1)).is_none()); + assert!(as_.virt_to_phys(VirtAddr::new(usize::MAX)).is_none()); + } + + #[test] + fn phys_to_virt_rejects_out_of_range() { + let as_ = make_addr_space(4096); + assert!(as_.phys_to_virt(as_.end).is_none()); + assert!(as_.phys_to_virt(as_.begin - 1).is_none()); + assert!(as_.phys_to_virt(as_.end + 1).is_none()); + } + + #[test] + fn virt_phys_roundtrip() { + let as_ = make_addr_space(4096); + let v = VirtAddr::new(128); + let p = as_.virt_to_phys(v).unwrap(); + assert_eq!(as_.phys_to_virt(p), Some(v)); + } +} From 14df7f91d3775954f87ba13de311ab635135410c Mon Sep 17 00:00:00 2001 From: xarantolus Date: Mon, 11 May 2026 15:24:31 +0000 Subject: [PATCH 12/13] Adapt audit cycles to POSIX error renames Three sites still referenced the pre-POSIX error names after the rebase onto current main: - src/mem/vmm/nommu.rs::unmap: InvalidArgument -> EINVAL - src/types/array.rs test: OutOfMemory -> ENOMEM - machine/cortex-m/src/native/sched.rs::Stack::new: Error::default() -> PosixError::EINVAL Co-Authored-By: Claude Opus 4.7 (1M context) --- machine/cortex-m/src/native/sched.rs | 2 +- src/mem/vmm/nommu.rs | 6 +++--- src/types/array.rs | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/machine/cortex-m/src/native/sched.rs b/machine/cortex-m/src/native/sched.rs index 58feb6e..c42910b 100644 --- a/machine/cortex-m/src/native/sched.rs +++ b/machine/cortex-m/src/native/sched.rs @@ -202,7 +202,7 @@ impl hal_api::stack::Stacklike for ArmStack { // `in_bounds` work in u32 words. Convert here so the stack's internal // unit stays consistent with `StackPtr::offset`. let size = NonZero::new(size.get() / core::mem::size_of::()) - .ok_or(hal_api::Error::default())?; + .ok_or(hal_api::PosixError::EINVAL)?; let mut stack = Self { top, diff --git a/src/mem/vmm/nommu.rs b/src/mem/vmm/nommu.rs index 3c9fe2f..dbf8c84 100644 --- a/src/mem/vmm/nommu.rs +++ b/src/mem/vmm/nommu.rs @@ -56,9 +56,9 @@ impl vmm::AddressSpacelike for AddressSpace { } fn unmap(&mut self, region: &vmm::Region) -> Result<()> { - let virt = region.start.ok_or(kerr!(InvalidArgument))?; - let phys = self.virt_to_phys(virt).ok_or(kerr!(InvalidArgument))?; - let ptr = NonNull::new(phys.as_mut_ptr::()).ok_or(kerr!(InvalidArgument))?; + let virt = region.start.ok_or(kerr!(EINVAL))?; + let phys = self.virt_to_phys(virt).ok_or(kerr!(EINVAL))?; + let ptr = NonNull::new(phys.as_mut_ptr::()).ok_or(kerr!(EINVAL))?; unsafe { self.allocator.free(ptr, region.len()) }; Ok(()) } diff --git a/src/types/array.rs b/src/types/array.rs index 68eecde..ad9f859 100644 --- a/src/types/array.rs +++ b/src/types/array.rs @@ -1347,7 +1347,7 @@ mod tests { let mut m: BitReclaimMap = BitReclaimMap::new(); for _ in 0..10 { let r: KResult = - m.insert_with(|_idx| -> KResult<(usize, u32)> { Err(kerr!(OutOfMemory)) }); + m.insert_with(|_idx| -> KResult<(usize, u32)> { Err(kerr!(ENOMEM)) }); assert!(r.is_err()); } let id0 = m.insert(10).unwrap(); From e0e5a9ba423a3d51e99ea7200d06c93a4ddf5fe0 Mon Sep 17 00:00:00 2001 From: xarantolus Date: Mon, 11 May 2026 15:24:52 +0000 Subject: [PATCH 13/13] Format Co-Authored-By: Claude Opus 4.7 (1M context) --- src/mem/pfa/bitset.rs | 17 ++++++++++++++--- src/mem/vmm/nommu.rs | 6 +++--- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/src/mem/pfa/bitset.rs b/src/mem/pfa/bitset.rs index fe65b80..883da98 100644 --- a/src/mem/pfa/bitset.rs +++ b/src/mem/pfa/bitset.rs @@ -73,7 +73,12 @@ impl super::Allocator for Allocator { ); // diff() is absolute, so a sub-begin address would silently map to a // bit elsewhere in the bitmap. - bug_on!(addr < self.begin, "free address {} below allocator begin {}", addr, self.begin); + bug_on!( + addr < self.begin, + "free address {} below allocator begin {}", + addr, + self.begin + ); let idx = addr.diff(self.begin) / super::PAGE_SIZE; self.bitalloc.free(idx, page_count); } @@ -115,8 +120,14 @@ mod tests { let end = begin + 64 * super::super::PAGE_SIZE; while let Some(addr) = alloc.alloc(1) { - assert!(addr >= begin && addr < end, "addr {addr} outside [{begin}, {end})"); - assert!(addr.is_multiple_of(super::super::PAGE_SIZE), "addr {addr} not page-aligned"); + assert!( + addr >= begin && addr < end, + "addr {addr} outside [{begin}, {end})" + ); + assert!( + addr.is_multiple_of(super::super::PAGE_SIZE), + "addr {addr} not page-aligned" + ); } } diff --git a/src/mem/vmm/nommu.rs b/src/mem/vmm/nommu.rs index dbf8c84..fda6855 100644 --- a/src/mem/vmm/nommu.rs +++ b/src/mem/vmm/nommu.rs @@ -109,7 +109,8 @@ mod tests { use crate::mem::vmm::{AddressSpacelike, Backing, Perms, Region}; fn make_addr_space(size: usize) -> AddressSpace { - let layout = std::alloc::Layout::from_size_align(size, core::mem::align_of::()).unwrap(); + let layout = + std::alloc::Layout::from_size_align(size, core::mem::align_of::()).unwrap(); let ptr = unsafe { std::alloc::alloc(layout) }; let begin = PhysAddr::new(ptr as usize); let end = begin + size; @@ -134,8 +135,7 @@ mod tests { as_.unmap(&placed).unwrap(); let region2 = Region::new(None, 2048, Backing::Uninit, Perms::Read); - as_.map(region2) - .expect("re-map after unmap should not OOM"); + as_.map(region2).expect("re-map after unmap should not OOM"); } #[test]