diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index f9d7cb4..205827b 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -29,8 +29,8 @@ jobs: mtools \ lld \ llvm \ - cmake \ - ninja-build \ + cmake ninja-build \ + curl --proto '=https' --tlsv1.3 https://sh.rustup.rs -sSf | sh - name: Compile & Link (CMake) run: | diff --git a/CMakeLists.txt b/CMakeLists.txt index 9507b40..ed6c520 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -107,10 +107,10 @@ set(EXECF_SOURCES set(MM_SOURCES mm/PMem.c mm/MemOps.c - mm/KernelHeap.c mm/VMem.c mm/StackGuard.c mm/MemPool.c + mm/KernelHeap.c mm/trace/StackTrace.c mm/security/Cerberus.c mm/PageFaultHandler.c @@ -191,6 +191,11 @@ if(NOT EXCLUDE_EXTRA_OBJECTS) ) endif() +# ============================================================================ +# Rust Memory Management +# ============================================================================ +add_subdirectory(mm/rust) + # ============================================================================ # Build Include Directories # ============================================================================ @@ -248,6 +253,9 @@ if(NOT EXCLUDE_EXTRA_OBJECTS) target_sources(voidframe.krnl PRIVATE ${OBJ_SOURCES}) endif() +# Link Rust heap library +target_link_libraries(voidframe.krnl PRIVATE rust_heap) + # Configure the linker to use ld.lld with proper arguments set_target_properties(voidframe.krnl PROPERTIES LINK_DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/voidframe.ld" diff --git a/kernel/etc/Shell.c b/kernel/etc/Shell.c index b181a4c..98cf542 100644 --- a/kernel/etc/Shell.c +++ b/kernel/etc/Shell.c @@ -34,6 +34,7 @@ #include "sound/Generic.h" #include "stdlib.h" #include "xHCI/xHCI.h" +#include "KernelHeapRust.h" #define DATE __DATE__ #define TIME __TIME__ @@ -363,7 +364,24 @@ static void AllocHandler(const char * args) { KernelFree(size_str); return; } - KernelMemoryAlloc((uint32_t)size); + if (!KernelMemoryAlloc((uint32_t)size)) PrintKernelErrorF("Allocation for %d bytes failed\n", size); + KernelFree(size_str); +} + +static void RsAllocHandler(const char * args) { + char* size_str = GetArg(args, 1); + if (!size_str) { + PrintKernel("Usage: rsalloc \n"); + KernelFree(size_str); + return; + } + int size = atoi(size_str); + if (size <= 0) { + PrintKernel("Usage: rsalloc \n"); + KernelFree(size_str); + return; + } + if (!rust_kmalloc((uint32_t)size)) PrintKernelErrorF("Allocation for %d bytes failed\n", size); KernelFree(size_str); } @@ -1182,6 +1200,7 @@ static const ShellCommand commands[] = {\ {"mkdir", MkdirHandler}, {"touch", TouchHandler}, {"alloc", AllocHandler}, + {"rsalloc", RsAllocHandler}, {"panic", PanicHandler}, {"kill", KillHandler}, {"rm", RmHandler}, diff --git a/mm/KernelHeapRust.h b/mm/KernelHeapRust.h new file mode 100644 index 0000000..bd94212 --- /dev/null +++ b/mm/KernelHeapRust.h @@ -0,0 +1,36 @@ +#ifndef KERNEL_HEAP_RUST_H +#define KERNEL_HEAP_RUST_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +// Core allocation functions +void* rust_kmalloc(size_t size); +void rust_kfree(void* ptr); +void* rust_krealloc(void* ptr, size_t new_size); +void* rust_kcalloc(size_t count, size_t size); + +// Statistics and monitoring +typedef struct { + size_t total_allocated; + size_t peak_allocated; + uint64_t alloc_count; + uint64_t free_count; + uint64_t cache_hits; + uint64_t cache_misses; + uint64_t coalesce_count; + uint64_t corruption_count; +} HeapStats; + +void rust_heap_get_stats(HeapStats* stats); +int rust_heap_validate(void); + +#ifdef __cplusplus +} +#endif + +#endif // KERNEL_HEAP_RUST_H \ No newline at end of file diff --git a/mm/rust/.cargo/config.toml b/mm/rust/.cargo/config.toml new file mode 100644 index 0000000..1ccbb2d --- /dev/null +++ b/mm/rust/.cargo/config.toml @@ -0,0 +1,9 @@ +[build] +target = "x86_64-unknown-none" + +[target.x86_64-unknown-none] +rustflags = [ + "-C", "code-model=kernel", + "-C", "relocation-model=static", + "-C", "target-feature=-mmx,-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-3dnow,-3dnowa,-avx,-avx2", +] \ No newline at end of file diff --git a/mm/rust/CMakeLists.txt b/mm/rust/CMakeLists.txt new file mode 100644 index 0000000..d60dca1 --- /dev/null +++ b/mm/rust/CMakeLists.txt @@ -0,0 +1,44 @@ +cmake_minimum_required(VERSION 3.20) + +# Find Rust and Cargo +find_program(CARGO_EXECUTABLE cargo REQUIRED) +find_program(RUSTC_EXECUTABLE rustc REQUIRED) + +# Set target triple based on architecture +if(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|AMD64") + set(RUST_TARGET "x86_64-unknown-none") +elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64|arm64") + set(RUST_TARGET "aarch64-unknown-none") +else() + message(FATAL_ERROR "Unsupported architecture: ${CMAKE_SYSTEM_PROCESSOR}") +endif() + +# Rust library target +set(RUST_LIB_NAME "voidframe_mm") +set(RUST_LIB_PATH "${CMAKE_CURRENT_SOURCE_DIR}/target/${RUST_TARGET}/release/lib${RUST_LIB_NAME}.a") + +# Custom command to build Rust library +add_custom_command( + OUTPUT ${RUST_LIB_PATH} + COMMAND ${CARGO_EXECUTABLE} build --release --target ${RUST_TARGET} + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/Cargo.toml + ${CMAKE_CURRENT_SOURCE_DIR}/src/lib.rs + ${CMAKE_CURRENT_SOURCE_DIR}/src/heap.rs + ${CMAKE_CURRENT_SOURCE_DIR}/src/vmem_ffi.rs + COMMENT "Building Rust heap library" +) + +# Create imported library target +add_custom_target(rust_heap_build DEPENDS ${RUST_LIB_PATH}) + +add_library(rust_heap STATIC IMPORTED GLOBAL) +set_target_properties(rust_heap PROPERTIES + IMPORTED_LOCATION ${RUST_LIB_PATH} +) + +add_dependencies(rust_heap rust_heap_build) + +# Install header +install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/../KernelHeapRust.h + DESTINATION include/mm) \ No newline at end of file diff --git a/mm/rust/Cargo.lock b/mm/rust/Cargo.lock new file mode 100644 index 0000000..5370749 --- /dev/null +++ b/mm/rust/Cargo.lock @@ -0,0 +1,16 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "spin" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" + +[[package]] +name = "voidframe-mm" +version = "0.1.0" +dependencies = [ + "spin", +] diff --git a/mm/rust/Cargo.toml b/mm/rust/Cargo.toml new file mode 100644 index 0000000..f827a59 --- /dev/null +++ b/mm/rust/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "voidframe-mm" +version = "0.1.0" +edition = "2021" + +[lib] +crate-type = ["staticlib"] + +[dependencies] +spin = { version = "0.9", default-features = false, features = ["mutex", "spin_mutex"] } + +[profile.release] +panic = "abort" +lto = true +codegen-units = 1 \ No newline at end of file diff --git a/mm/rust/src/heap.rs b/mm/rust/src/heap.rs new file mode 100644 index 0000000..c155ce7 --- /dev/null +++ b/mm/rust/src/heap.rs @@ -0,0 +1,544 @@ +use core::ptr; +use core::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; +use spin::Mutex; + +// Production constants +const HEAP_MAGIC_ALLOC: u32 = 0xDEADBEEF; +const HEAP_MAGIC_FREE: u32 = 0xFEEDFACE; +const MIN_BLOCK_SIZE: usize = 32; +const HEAP_ALIGN: usize = 16; // Better alignment for SIMD +const MAX_ALLOC_SIZE: usize = 1 << 30; +const NUM_SIZE_CLASSES: usize = 16; +const FAST_CACHE_SIZE: usize = 64; // Larger cache +const CANARY_VALUE: u64 = 0xDEADC0DEDEADBEEF; +const POISON_VALUE: u8 = 0xDE; +const COALESCE_THRESHOLD: usize = 256; // Coalesce every N frees + +// Optimized size classes with better coverage +static SIZE_CLASSES: [usize; NUM_SIZE_CLASSES] = [ + 16, 32, 48, 64, 96, 128, 192, 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096 +]; + +#[repr(C)] +struct HeapBlock { + magic: u32, + size: usize, + is_free: u8, + in_cache: u8, + next: *mut HeapBlock, + prev: *mut HeapBlock, + checksum: u32, + cache_next: *mut HeapBlock, +} + +#[repr(C)] +#[derive(Copy, Clone)] +struct FastCache { + free_list: *mut HeapBlock, + count: i32, + hits: u64, + misses: u64, +} + +#[repr(C)] +pub struct HeapStats { + pub total_allocated: usize, + pub peak_allocated: usize, + pub alloc_count: u64, + pub free_count: u64, + pub cache_hits: u64, + pub cache_misses: u64, + pub coalesce_count: u64, + pub corruption_count: u64, +} + +unsafe impl Send for HeapState {} +unsafe impl Sync for HeapState {} + +struct HeapState { + head: *mut HeapBlock, + fast_caches: [FastCache; NUM_SIZE_CLASSES], + free_counter: usize, +} + +// Lock-free counters +static TOTAL_ALLOCATED: AtomicUsize = AtomicUsize::new(0); +static PEAK_ALLOCATED: AtomicUsize = AtomicUsize::new(0); +static ALLOC_COUNTER: AtomicU64 = AtomicU64::new(0); +static FREE_COUNTER: AtomicU64 = AtomicU64::new(0); +static COALESCE_COUNTER: AtomicU64 = AtomicU64::new(0); +static CORRUPTION_COUNTER: AtomicU64 = AtomicU64::new(0); + +static HEAP: Mutex = Mutex::new(HeapState { + head: ptr::null_mut(), + fast_caches: [FastCache { + free_list: ptr::null_mut(), + count: 0, + hits: 0, + misses: 0, + }; NUM_SIZE_CLASSES], + free_counter: 0, +}); + +extern "C" { + fn VMemAlloc(size: u64) -> *mut u8; + fn VMemFree(ptr: *mut u8, size: u64); + fn PrintKernelError(msg: *const u8); + fn PrintKernelWarning(msg: *const u8); +} + +impl HeapBlock { + unsafe fn compute_checksum(&self) -> u32 { + (self as *const _ as usize ^ self.magic as usize ^ self.size) as u32 + } + + unsafe fn validate(&self) -> bool { + if (self as *const HeapBlock).is_null() { + return false; + } + + if self.magic != HEAP_MAGIC_ALLOC && self.magic != HEAP_MAGIC_FREE { + CORRUPTION_COUNTER.fetch_add(1, Ordering::Relaxed); + return false; + } + + if self.size == 0 || self.size > MAX_ALLOC_SIZE { + CORRUPTION_COUNTER.fetch_add(1, Ordering::Relaxed); + return false; + } + + if self.checksum != self.compute_checksum() { + CORRUPTION_COUNTER.fetch_add(1, Ordering::Relaxed); + return false; + } + + true + } + + unsafe fn init(&mut self, size: usize, is_free: bool) { + self.magic = if is_free { HEAP_MAGIC_FREE } else { HEAP_MAGIC_ALLOC }; + self.size = size; + self.is_free = if is_free { 1 } else { 0 }; + self.in_cache = 0; + self.cache_next = ptr::null_mut(); + self.checksum = self.compute_checksum(); + + // Add canary for allocated blocks + if !is_free && size >= 16 { + let canary_ptr = self.to_user_ptr().add(size - 8) as *mut u64; + *canary_ptr = CANARY_VALUE; + } + } + + unsafe fn validate_canary(&self) -> bool { + if self.is_free != 0 || self.size < 16 { + return true; + } + let canary_ptr = self.to_user_ptr().add(self.size - 8) as *const u64; + let valid = *canary_ptr == CANARY_VALUE; + if !valid { + CORRUPTION_COUNTER.fetch_add(1, Ordering::Relaxed); + } + valid + } + + unsafe fn to_user_ptr(&self) -> *mut u8 { + (self as *const HeapBlock as *mut u8).add(core::mem::size_of::()) + } + + unsafe fn from_user_ptr(ptr: *mut u8) -> *mut HeapBlock { + ptr.sub(core::mem::size_of::()) as *mut HeapBlock + } + + unsafe fn are_adjacent(&self, other: *const HeapBlock) -> bool { + let self_end = (self as *const HeapBlock as *const u8) + .add(core::mem::size_of::()) + .add(self.size); + self_end == other as *const u8 + } + + unsafe fn coalesce_with_next(&mut self) -> bool { + if self.next.is_null() || (*self.next).is_free == 0 || (*self.next).in_cache != 0 { + return false; + } + + if !self.are_adjacent(self.next) { + return false; + } + + let next_block = self.next; + self.size += core::mem::size_of::() + (*next_block).size; + self.next = (*next_block).next; + + if !self.next.is_null() { + (*self.next).prev = self; + } + + self.checksum = self.compute_checksum(); + COALESCE_COUNTER.fetch_add(1, Ordering::Relaxed); + true + } +} + +#[inline] +fn align_size(size: usize) -> usize { + (size + HEAP_ALIGN - 1) & !(HEAP_ALIGN - 1) +} + +fn get_size_class(size: usize) -> Option { + // Binary search for better performance + let mut left = 0; + let mut right = SIZE_CLASSES.len(); + + while left < right { + let mid = (left + right) / 2; + if SIZE_CLASSES[mid] >= size { + right = mid; + } else { + left = mid + 1; + } + } + + if left < SIZE_CLASSES.len() { + Some(left) + } else { + None + } +} + +unsafe fn create_new_block(size: usize) -> *mut HeapBlock { + // Smart chunk sizing + let chunk_size = if size <= 4096 { + // For small allocations, allocate larger chunks + let multiplier = if size <= 256 { 16 } else if size <= 1024 { 8 } else { 4 }; + ((size * multiplier) + 4095) & !4095 + } else { + // For large allocations, round up to page boundary + (size + 4095) & !4095 + }; + + let total_size = core::mem::size_of::() + chunk_size; + let mem = VMemAlloc(total_size as u64); + if mem.is_null() { + return ptr::null_mut(); + } + + let block = mem as *mut HeapBlock; + (*block).init(chunk_size, false); + + // Link to head with proper linking + let mut heap = HEAP.lock(); + (*block).next = heap.head; + (*block).prev = ptr::null_mut(); + if !heap.head.is_null() { + (*heap.head).prev = block; + } + heap.head = block; + + // Split if chunk is much larger than needed + if chunk_size > size * 2 && chunk_size - size >= MIN_BLOCK_SIZE + core::mem::size_of::() { + split_block(block, size); + } + + block +} + +unsafe fn split_block(block: *mut HeapBlock, needed_size: usize) { + let remaining = (*block).size - needed_size; + if remaining < MIN_BLOCK_SIZE + core::mem::size_of::() { + return; + } + + let new_block_ptr = (block as *mut u8) + .add(core::mem::size_of::()) + .add(needed_size) as *mut HeapBlock; + + let new_block = &mut *new_block_ptr; + new_block.init(remaining - core::mem::size_of::(), true); + + // Link new block + new_block.next = (*block).next; + new_block.prev = block; + if !(*block).next.is_null() { + (*(*block).next).prev = new_block_ptr; + } + (*block).next = new_block_ptr; + + // Update original block + (*block).size = needed_size; + (*block).checksum = (*block).compute_checksum(); +} + +unsafe fn find_free_block(size: usize) -> *mut HeapBlock { + let heap = HEAP.lock(); + let mut best: *mut HeapBlock = ptr::null_mut(); + let mut best_size = MAX_ALLOC_SIZE; + let mut scanned = 0; + + let mut current = heap.head; + while !current.is_null() && scanned < 64 { // Limit search + let block = &*current; + if block.is_free != 0 && block.in_cache == 0 && block.size >= size { + if block.size < best_size { + best = current; + best_size = block.size; + if block.size <= size * 2 { // Good enough fit + break; + } + } + } + current = block.next; + scanned += 1; + } + + best +} + +unsafe fn coalesce_free_blocks() { + let mut heap = HEAP.lock(); + let mut current = heap.head; + let mut coalesced = 0; + + while !current.is_null() && coalesced < 32 { + let block = &mut *current; + if block.is_free != 0 && block.in_cache == 0 { + if block.coalesce_with_next() { + coalesced += 1; + continue; // Don't advance, check same block again + } + } + current = block.next; + } +} + +#[no_mangle] +pub unsafe extern "C" fn rust_kmalloc(size: usize) -> *mut u8 { + if size == 0 || size > MAX_ALLOC_SIZE { + return ptr::null_mut(); + } + + let aligned_size = align_size(size.max(16)); // Minimum 16 bytes + + // Fast cache path + if let Some(size_class) = get_size_class(aligned_size) { + let mut heap = HEAP.lock(); + let cache = &mut heap.fast_caches[size_class]; + if !cache.free_list.is_null() { + let block = cache.free_list; + if !(*block).validate() { + PrintKernelError(b"[HEAP] Corrupted block in cache\0".as_ptr()); + return ptr::null_mut(); + } + + cache.free_list = (*block).cache_next; + cache.count -= 1; + cache.hits += 1; + + (*block).cache_next = ptr::null_mut(); + (*block).in_cache = 0; + (*block).is_free = 0; + (*block).magic = HEAP_MAGIC_ALLOC; + (*block).checksum = (*block).compute_checksum(); + + // Update counters + let new_total = TOTAL_ALLOCATED.fetch_add((*block).size, Ordering::Relaxed); + update_peak(new_total); + ALLOC_COUNTER.fetch_add(1, Ordering::Relaxed); + + return (*block).to_user_ptr(); + } + cache.misses += 1; + } + + // Slow path + let block = find_free_block(aligned_size); + let block = if block.is_null() { + create_new_block(aligned_size) + } else { + if !(*block).validate() { + PrintKernelError(b"[HEAP] Corrupted free block\0".as_ptr()); + return ptr::null_mut(); + } + (*block).is_free = 0; + (*block).magic = HEAP_MAGIC_ALLOC; + (*block).checksum = (*block).compute_checksum(); + + // Split if too large + if (*block).size > aligned_size * 2 { + split_block(block, aligned_size); + } + + block + }; + + if block.is_null() { + return ptr::null_mut(); + } + + let new_total = TOTAL_ALLOCATED.fetch_add((*block).size, Ordering::Relaxed); + update_peak(new_total); + ALLOC_COUNTER.fetch_add(1, Ordering::Relaxed); + + (*block).to_user_ptr() +} + +#[inline] +fn update_peak(new_total: usize) { + let mut peak = PEAK_ALLOCATED.load(Ordering::Relaxed); + while new_total > peak { + match PEAK_ALLOCATED.compare_exchange_weak(peak, new_total, Ordering::Relaxed, Ordering::Relaxed) { + Ok(_) => break, + Err(x) => peak = x, + } + } +} + +#[no_mangle] +pub unsafe extern "C" fn rust_kfree(ptr: *mut u8) { + if ptr.is_null() { + return; + } + + let block = HeapBlock::from_user_ptr(ptr); + if !(*block).validate() { + PrintKernelError(b"[HEAP] Invalid block in kfree\0".as_ptr()); + return; + } + + if (*block).magic != HEAP_MAGIC_ALLOC { + PrintKernelError(b"[HEAP] Double free detected\0".as_ptr()); + return; + } + + if !(*block).validate_canary() { + PrintKernelError(b"[HEAP] Buffer overflow detected\0".as_ptr()); + return; + } + + // Poison memory + core::ptr::write_bytes(ptr, POISON_VALUE, (*block).size.saturating_sub(8)); + + (*block).is_free = 1; + (*block).magic = HEAP_MAGIC_FREE; + (*block).checksum = (*block).compute_checksum(); + + TOTAL_ALLOCATED.fetch_sub((*block).size, Ordering::Relaxed); + FREE_COUNTER.fetch_add(1, Ordering::Relaxed); + + let mut heap = HEAP.lock(); + + // Try fast cache first + if let Some(size_class) = get_size_class((*block).size) { + let cache = &mut heap.fast_caches[size_class]; + if cache.count < FAST_CACHE_SIZE as i32 { + (*block).cache_next = cache.free_list; + cache.free_list = block; + cache.count += 1; + (*block).in_cache = 1; + return; + } + } + + // Periodic coalescing + heap.free_counter += 1; + if heap.free_counter >= COALESCE_THRESHOLD { + heap.free_counter = 0; + drop(heap); // Release lock before coalescing + coalesce_free_blocks(); + } +} + +#[no_mangle] +pub unsafe extern "C" fn rust_krealloc(ptr: *mut u8, new_size: usize) -> *mut u8 { + if ptr.is_null() { + return rust_kmalloc(new_size); + } + + if new_size == 0 { + rust_kfree(ptr); + return ptr::null_mut(); + } + + let block = HeapBlock::from_user_ptr(ptr); + if !(*block).validate() || (*block).magic != HEAP_MAGIC_ALLOC { + return ptr::null_mut(); + } + + let old_size = (*block).size; + let aligned_new_size = align_size(new_size.max(16)); + + // If shrinking or size is close enough, reuse + if aligned_new_size <= old_size && old_size <= aligned_new_size * 2 { + return ptr; + } + + let new_ptr = rust_kmalloc(new_size); + if !new_ptr.is_null() { + let copy_size = old_size.min(new_size).saturating_sub(8); // Account for canary + core::ptr::copy_nonoverlapping(ptr, new_ptr, copy_size); + rust_kfree(ptr); + } + + new_ptr +} + +#[no_mangle] +pub unsafe extern "C" fn rust_kcalloc(count: usize, size: usize) -> *mut u8 { + let total_size = count.saturating_mul(size); + if total_size / count != size { // Overflow check + return ptr::null_mut(); + } + + let ptr = rust_kmalloc(total_size); + if !ptr.is_null() { + core::ptr::write_bytes(ptr, 0, total_size); + } + ptr +} + +#[no_mangle] +pub extern "C" fn rust_heap_get_stats(stats: *mut HeapStats) { + if stats.is_null() { + return; + } + + unsafe { + let heap = HEAP.lock(); + let mut total_hits = 0; + let mut total_misses = 0; + + for cache in &heap.fast_caches { + total_hits += cache.hits; + total_misses += cache.misses; + } + + (*stats) = HeapStats { + total_allocated: TOTAL_ALLOCATED.load(Ordering::Relaxed), + peak_allocated: PEAK_ALLOCATED.load(Ordering::Relaxed), + alloc_count: ALLOC_COUNTER.load(Ordering::Relaxed), + free_count: FREE_COUNTER.load(Ordering::Relaxed), + cache_hits: total_hits, + cache_misses: total_misses, + coalesce_count: COALESCE_COUNTER.load(Ordering::Relaxed), + corruption_count: CORRUPTION_COUNTER.load(Ordering::Relaxed), + }; + } +} + +#[no_mangle] +pub extern "C" fn rust_heap_validate() -> i32 { + unsafe { + let heap = HEAP.lock(); + let mut current = heap.head; + let mut errors = 0; + + while !current.is_null() { + if !(*current).validate() { + errors += 1; + } + current = (*current).next; + } + + errors + } +} \ No newline at end of file diff --git a/mm/rust/src/lib.rs b/mm/rust/src/lib.rs new file mode 100644 index 0000000..ae8df75 --- /dev/null +++ b/mm/rust/src/lib.rs @@ -0,0 +1,28 @@ +#![no_std] +#![no_main] + + + +mod heap; +mod vmem_ffi; + +pub use heap::*; + +// Re-export C API functions +pub use heap::{ + rust_kmalloc as kmalloc, + rust_kfree as kfree, + rust_krealloc as krealloc, + rust_kcalloc as kcalloc, +}; + +#[panic_handler] +fn panic(_info: &core::panic::PanicInfo) -> ! { + // Call existing kernel panic function + extern "C" { + fn Panic(msg: *const u8) -> !; + } + unsafe { + Panic(b"Rust heap panic\0".as_ptr()); + } +} \ No newline at end of file diff --git a/mm/rust/src/vmem_ffi.rs b/mm/rust/src/vmem_ffi.rs new file mode 100644 index 0000000..efc8b42 --- /dev/null +++ b/mm/rust/src/vmem_ffi.rs @@ -0,0 +1,16 @@ +// FFI declarations for VMem functions +extern "C" { + pub fn VMemAlloc(size: u64) -> *mut u8; + pub fn VMemFree(ptr: *mut u8, size: u64); + pub fn VMemGetPhysAddr(vaddr: u64) -> u64; + pub fn VMemMap(vaddr: u64, paddr: u64, flags: u64) -> i32; + pub fn VMemUnmap(vaddr: u64, size: u64) -> i32; +} + +// Console functions +extern "C" { + pub fn PrintKernel(msg: *const u8); + pub fn PrintKernelError(msg: *const u8); + pub fn PrintKernelHex(value: u64); + pub fn PrintKernelInt(value: u64); +} \ No newline at end of file