diff --git a/CMakeLists.txt b/CMakeLists.txt index 438a279..e4576c2 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -71,6 +71,7 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON) # Rust Memory Management # ============================================================================ add_subdirectory(mm/rust) +add_subdirectory(kernel/atomic/rust) # ============================================================================ # Build Include Directories @@ -129,8 +130,9 @@ if(NOT EXCLUDE_EXTRA_OBJECTS) target_sources(voidframe.krnl PRIVATE ${OBJ_SOURCES}) endif() -# Link Rust heap library +# Rust libraries target_link_libraries(voidframe.krnl PRIVATE rust_heap) +target_link_libraries(voidframe.krnl PRIVATE rust_spinlock) # Configure the linker to use ld.lld with proper arguments set_target_properties(voidframe.krnl PROPERTIES diff --git a/cmake/dependencies.cmake b/cmake/dependencies.cmake index e713495..afa8d5b 100644 --- a/cmake/dependencies.cmake +++ b/cmake/dependencies.cmake @@ -7,17 +7,24 @@ find_program(QEMU_IMG qemu-img) find_program(MKFS_FAT mkfs.fat) find_program(MKFS_EXT2 mkfs.ext2) find_program(QEMU_SYSTEM_X86_64 qemu-system-x86_64) +find_program(CARGO_EXECUTABLE cargo REQUIRED) +find_program(RUSTC_EXECUTABLE rustc REQUIRED) -if (NOT LLVM_OBJDUMP) - message(WARNING "llvm-objdump not found. Please install LLVM.") -elseif (NOT GRUB_MKRESCUE) - message(WARNING "grub-mkrescue not found. Please install GRUB.") -elseif (NOT QEMU_IMG) - message(WARNING "qemu-img not found. Please install QEMU.") -elseif (NOT MKFS_FAT) - message(WARNING "mkfs.fat not found. Please install dosfstools.") -elseif (NOT MKFS_EXT2) - message(WARNING "mkfs.ext2 not found. Please install e2fsprogs.") -elseif (NOT QEMU_SYSTEM_X86_64) - message(WARNING "qemu-system-x86_64 not found. Please install QEMU.") +if(NOT LLVM_OBJDUMP) + message(WARNING "llvm-objdump not found. ") +endif() +if(NOT GRUB_MKRESCUE) + message(WARNING "grub-mkrescue not found. ") +endif() +if(NOT QEMU_IMG) + message(WARNING "qemu-img not found. ") +endif() +if(NOT MKFS_FAT) + message(WARNING "mkfs.fat not found. ") +endif() +if(NOT MKFS_EXT2) + message(WARNING "mkfs.ext2 not found. ") +endif() +if(NOT QEMU_SYSTEM_X86_64) + message(WARNING "qemu-system-x86_64 not found. ") endif() \ No newline at end of file diff --git a/cmake/source.cmake b/cmake/source.cmake index f2156a6..8f6eead 100644 --- a/cmake/source.cmake +++ b/cmake/source.cmake @@ -103,6 +103,7 @@ set(ARCH_SOURCES set(INCLUDE_SOURCES include/ctype.c include/Font.c + include/Io.c ) set(CPP_SOURCES diff --git a/cmake/variable.cmake b/cmake/variable.cmake index 38751ef..e477be7 100644 --- a/cmake/variable.cmake +++ b/cmake/variable.cmake @@ -13,4 +13,5 @@ add_compile_definitions( KERNEL_SPACE_END=0xFFFFFFFFFFFFFFFFULL PREFETCH_DISTANCE=256 NT_STORE_THRESHOLD=4*1024*1024 + MAX_SUPPORTED_MEMORY=128*1024*1024*1024 ) \ No newline at end of file diff --git a/drivers/Ide.c b/drivers/Ide.c index e339128..bec486b 100644 --- a/drivers/Ide.c +++ b/drivers/Ide.c @@ -1,15 +1,14 @@ #include "Ide.h" #include "BlockDevice.h" #include "Format.h" - +#include "SpinlockRust.h" #include "APIC.h" #include "Console.h" #include "Io.h" #include "MemOps.h" -#include "Spinlock.h" static IdeChannel channels[2]; -static volatile int ide_lock = 0; +static RustSpinLock* ide_lock = NULL; // Wait for drive to be ready (not busy) static int IdeWaitReady(uint16_t base_port) { @@ -113,6 +112,13 @@ static int IdeIdentifyDrive(uint16_t base_port, uint8_t drive, uint16_t* buffer, } int IdeInit(void) { + if (!ide_lock) { + ide_lock = rust_spinlock_new(); + if (!ide_lock) { + PrintKernelError("IDE: Failed to create spinlock\n"); + return IDE_ERROR_IO; + } + } PrintKernel("IDE: Initializing IDE controller...\n"); // Initialize channel structures @@ -211,7 +217,7 @@ int IdeInit(void) { return IDE_OK; } -int IdeReadBlocks(struct BlockDevice* device, uint64_t start_lba, uint32_t count, void* buffer) { +int IdeReadBlocks(BlockDevice* device, uint64_t start_lba, uint32_t count, void* buffer) { if (!device || !device->driver_data) { PrintKernel("IDE: Invalid device or driver_data\n"); return -1; @@ -227,8 +233,8 @@ int IdeReadBlocks(struct BlockDevice* device, uint64_t start_lba, uint32_t count return IDE_ERROR_NO_DRIVE; } - SpinLock(&ide_lock); - uint16_t base_port = channels[channel].base_port; + rust_spinlock_lock(ide_lock); + const uint16_t base_port = channels[channel].base_port; for (uint32_t i = 0; i < count; i++) { uint64_t lba = start_lba + i; @@ -236,7 +242,7 @@ int IdeReadBlocks(struct BlockDevice* device, uint64_t start_lba, uint32_t count int result = IdeSelectDrive(base_port, drive_num, lba); if (result != IDE_OK) { - SpinUnlock(&ide_lock); + rust_spinlock_unlock(ide_lock); return result; } @@ -251,7 +257,7 @@ int IdeReadBlocks(struct BlockDevice* device, uint64_t start_lba, uint32_t count PrintKernel("IDE: Wait for data failed with error "); PrintKernelInt(result); PrintKernel("\n"); - SpinUnlock(&ide_lock); + rust_spinlock_unlock(ide_lock); return result; } @@ -261,7 +267,7 @@ int IdeReadBlocks(struct BlockDevice* device, uint64_t start_lba, uint32_t count } } - SpinUnlock(&ide_lock); + rust_spinlock_unlock(ide_lock); return 0; } @@ -276,7 +282,7 @@ int IdeWriteBlocks(struct BlockDevice* device, uint64_t start_lba, uint32_t coun return IDE_ERROR_NO_DRIVE; } - SpinLock(&ide_lock); + rust_spinlock_lock(ide_lock); uint16_t base_port = channels[channel].base_port; for (uint32_t i = 0; i < count; i++) { @@ -285,7 +291,7 @@ int IdeWriteBlocks(struct BlockDevice* device, uint64_t start_lba, uint32_t coun int result = IdeSelectDrive(base_port, drive_num, lba); if (result != IDE_OK) { - SpinUnlock(&ide_lock); + rust_spinlock_unlock(ide_lock); return result; } @@ -297,7 +303,7 @@ int IdeWriteBlocks(struct BlockDevice* device, uint64_t start_lba, uint32_t coun result = IdeWaitData(base_port); if (result != IDE_OK) { - SpinUnlock(&ide_lock); + rust_spinlock_unlock(ide_lock); return result; } @@ -308,12 +314,12 @@ int IdeWriteBlocks(struct BlockDevice* device, uint64_t start_lba, uint32_t coun result = IdeWaitReady(base_port); if (result != IDE_OK) { - SpinUnlock(&ide_lock); + rust_spinlock_unlock(ide_lock); return result; } } - SpinUnlock(&ide_lock); + rust_spinlock_unlock(ide_lock); return 0; } @@ -357,9 +363,9 @@ int IdeReadLBA2048(uint8_t drive, uint32_t lba, void* buffer) { return IDE_ERROR_NO_DRIVE; } - SpinLock(&ide_lock); + rust_spinlock_lock(ide_lock); uint16_t base_port = channels[channel].base_port; - SpinUnlock(&ide_lock); + rust_spinlock_unlock(ide_lock); int result; result = IdeSelectDrive(base_port, drive_num, 0); // LBA is in the packet diff --git a/drivers/virtio/VirtioBlk.c b/drivers/virtio/VirtioBlk.c index 771c46c..5943d73 100644 --- a/drivers/virtio/VirtioBlk.c +++ b/drivers/virtio/VirtioBlk.c @@ -1,13 +1,14 @@ #include "VirtioBlk.h" -#include "Spinlock.h" #include "Atomics.h" #include "Console.h" #include "PCI/PCI.h" +#include "Spinlock.h" +#include "SpinlockRust.h" #include "VMem.h" -#include "stdbool.h" #include "Virtio.h" +#include "stdbool.h" // Globals to hold the capability structures we find -static volatile int* virtio_lock; +static RustSpinLock* virtio_lock = NULL; static struct VirtioPciCap cap_common_cfg; static struct VirtioPciCap cap_notify_cfg; static struct VirtioPciCap cap_isr_cfg; @@ -49,6 +50,11 @@ void ReadVirtioCapability(PciDevice device, uint8_t cap_offset, struct VirtioPci // Implementation for the VirtIO Block device driver. void InitializeVirtioBlk(PciDevice device) { + virtio_lock = rust_spinlock_new(); + if (!virtio_lock) { + PrintKernelError("VirtIO-Blk: - Failed to initialize spinlock.\n"); + return; + } PrintKernel("VirtIO-Blk: Initializing device at B/D/F "); PrintKernelHex(device.bus); PrintKernel("/"); PrintKernelHex(device.device); PrintKernel("/"); PrintKernelHex(device.function); PrintKernel("\n"); @@ -210,11 +216,11 @@ void InitializeVirtioBlk(PciDevice device) { } int VirtioBlkRead(uint64_t sector, void* buffer) { - SpinLock(&virtio_lock); + rust_spinlock_lock(virtio_lock); if ((vq_next_desc_idx + 3) > vq_size) { PrintKernel("VirtIO-Blk: Error - Not enough descriptors available\n"); - SpinUnlock(&virtio_lock); + rust_spinlock_unlock(virtio_lock); return -1; } @@ -224,7 +230,7 @@ int VirtioBlkRead(uint64_t sector, void* buffer) { PrintKernel("VirtIO-Blk: Failed to allocate request header/status\n"); if (req_hdr) VMemFree(req_hdr, sizeof(struct VirtioBlkReq)); if (status) VMemFree(status, sizeof(uint8_t)); - SpinUnlock(&virtio_lock); + rust_spinlock_unlock(virtio_lock); return -1; } @@ -273,16 +279,16 @@ int VirtioBlkRead(uint64_t sector, void* buffer) { last_used_idx++; } - SpinUnlock(&virtio_lock); + rust_spinlock_unlock(virtio_lock); return 0; } int VirtioBlkWrite(uint64_t sector, void* buffer) { - SpinLock(&virtio_lock); + rust_spinlock_lock(virtio_lock); if ((vq_next_desc_idx + 3) > vq_size) { PrintKernel("VirtIO-Blk: Error - Not enough descriptors available\n"); - SpinUnlock(&virtio_lock); + rust_spinlock_unlock(virtio_lock); return -1; } @@ -292,7 +298,7 @@ int VirtioBlkWrite(uint64_t sector, void* buffer) { PrintKernel("VirtIO-Blk: Failed to allocate request header/status\n"); if (req_hdr) VMemFree(req_hdr, sizeof(struct VirtioBlkReq)); if (status) VMemFree(status, sizeof(uint8_t)); - SpinUnlock(&virtio_lock); + rust_spinlock_unlock(virtio_lock); return -1; } @@ -341,6 +347,6 @@ int VirtioBlkWrite(uint64_t sector, void* buffer) { last_used_idx++; } - SpinUnlock(&virtio_lock); + rust_spinlock_unlock(virtio_lock); return 0; } diff --git a/include/Io.c b/include/Io.c new file mode 100644 index 0000000..b315eb0 --- /dev/null +++ b/include/Io.c @@ -0,0 +1,52 @@ +#include "Io.h" +#include "x64.h" + +void cli() { + _full_mem_prot_start(); + __asm__ volatile("cli" ::: "memory"); +#ifdef VF_CONFIG_INTEL + _full_mem_prot_end_intel(); +#else + _full_mem_prot_end(); +#endif +} + +void sti() { + _full_mem_prot_start(); + __asm__ volatile("sti" ::: "memory"); +#ifdef VF_CONFIG_INTEL + _full_mem_prot_end_intel(); +#else + _full_mem_prot_end(); +#endif +} + +// CPUID detection +void cpuid(uint32_t leaf, uint32_t* eax, uint32_t* ebx, uint32_t* ecx, uint32_t* edx) { + __asm__ volatile("cpuid" + : "=a"(*eax), "=b"(*ebx), "=c"(*ecx), "=d"(*edx) + : "a"(leaf)); +} + +// MSR access +uint64_t rdmsr(uint32_t msr) { + uint32_t low, high; + __asm__ volatile("rdmsr" : "=a"(low), "=d"(high) : "c"(msr)); + return ((uint64_t)high << 32) | low; +} + +void wrmsr(uint32_t msr, uint64_t value) { + uint32_t low = value & 0xFFFFFFFF; + uint32_t high = value >> 32; + __asm__ volatile("wrmsr" :: "a"(low), "d"(high), "c"(msr)); +} + +irq_flags_t save_irq_flags(void) { + irq_flags_t flags; + __asm__ volatile("pushfq\n\tpopq %0" : "=r"(flags)); + return flags; +} + +void restore_irq_flags(irq_flags_t flags) { + __asm__ volatile("pushq %0\n\tpopfq" : : "r"(flags)); +} \ No newline at end of file diff --git a/include/Io.h b/include/Io.h index 9fe2d4d..2a569f3 100644 --- a/include/Io.h +++ b/include/Io.h @@ -2,7 +2,6 @@ #define IO_H #include "stdint.h" -#include "x64.h" static inline void outb(uint16_t port, uint8_t val) { __asm__ volatile ("outb %0, %1" : : "a"(val), "Nd"(port)); @@ -24,6 +23,16 @@ static inline uint16_t inw(uint16_t port) { return ret; } +static inline void outd(uint16_t port, uint32_t val) { + __asm__ volatile ("outl %0, %1" : : "a"(val), "Nd"(port)); +} + +static inline uint32_t ind(uint16_t port) { + uint32_t ret; + __asm__ volatile ("inl %1, %0" : "=a"(ret) : "Nd"(port)); + return ret; +} + static inline void outl(uint16_t port, uint32_t val) { __asm__ volatile ("outl %0, %1" : : "a"(val), "Nd"(port)); } @@ -34,57 +43,62 @@ static inline uint32_t inl(uint16_t port) { return ret; } -typedef uint64_t irq_flags_t; +static inline void outq(uint16_t port, uint64_t val) { + __asm__ volatile ("outq %0, %1" : : "a"(val), "Nd"(port)); +} -static inline irq_flags_t save_irq_flags(void) { - irq_flags_t flags; - __asm__ volatile("pushfq\n\tpopq %0" : "=r"(flags)); - return flags; +static inline uint64_t inq(uint16_t port) { + uint64_t ret; + __asm__ volatile ("inq %1, %0" : "=a"(ret) : "Nd"(port)); + return ret; } -static inline void restore_irq_flags(irq_flags_t flags) { - __asm__ volatile("pushq %0\n\tpopfq" : : "r"(flags)); +static inline void outsb(uint16_t port, void* buf, size_t len) { + __asm__ volatile ("cld; rep outsb" : "+D"(buf), "+c"(len) : "d"(port)); } -static inline void __attribute__((always_inline, hot, flatten)) cli() { - _full_mem_prot_start(); - __asm__ volatile("cli" ::: "memory"); -#ifdef VF_CONFIG_INTEL - _full_mem_prot_end_intel(); -#else - _full_mem_prot_end(); -#endif +static inline void insb(uint16_t port, void* buf, size_t len) { + __asm__ volatile ("cld; rep insb" : "+D"(buf), "+c"(len) : "d"(port)); } -static inline void __attribute__((always_inline, hot, flatten)) sti() { - _full_mem_prot_start(); - __asm__ volatile("sti" ::: "memory"); -#ifdef VF_CONFIG_INTEL - _full_mem_prot_end_intel(); -#else - _full_mem_prot_end(); -#endif +static inline void outsd(uint16_t port, void* buf, size_t len) { + __asm__ volatile ("cld; rep outsd" : "+D"(buf), "+c"(len) : "d"(port)); } -// CPUID detection -static inline void __attribute__((always_inline)) cpuid(uint32_t leaf, uint32_t* eax, uint32_t* ebx, uint32_t* ecx, uint32_t* edx) { - __asm__ volatile("cpuid" - : "=a"(*eax), "=b"(*ebx), "=c"(*ecx), "=d"(*edx) - : "a"(leaf)); +static inline void insd(uint16_t port, void* buf, size_t len) { + __asm__ volatile ("cld; rep insd" : "+D"(buf), "+c"(len) : "d"(port)); } -// MSR access -static inline __attribute__((always_inline)) uint64_t rdmsr(uint32_t msr) { - uint32_t low, high; - __asm__ volatile("rdmsr" : "=a"(low), "=d"(high) : "c"(msr)); - return ((uint64_t)high << 32) | low; +static inline void outsl(uint16_t port, void* buf, size_t len) { + __asm__ volatile ("cld; rep outsl" : "+D"(buf), "+c"(len) : "d"(port)); +} + +static inline void insl(uint16_t port, void* buf, size_t len) { + __asm__ volatile ("cld; rep insl" : "+D"(buf), "+c"(len) : "d"(port)); } -static inline __attribute__((always_inline)) void wrmsr(uint32_t msr, uint64_t value) { - uint32_t low = value & 0xFFFFFFFF; - uint32_t high = value >> 32; - __asm__ volatile("wrmsr" :: "a"(low), "d"(high), "c"(msr)); +static inline void outsw(uint16_t port, void* buf, size_t len) { + __asm__ volatile ("cld; rep outsw" : "+D"(buf), "+c"(len) : "d"(port)); } +static inline void insw(uint16_t port, void* buf, size_t len) { + __asm__ volatile ("cld; rep insw" : "+D"(buf), "+c"(len) : "d"(port)); +} + +void cli(void); +void sti(void); + +typedef uint64_t irq_flags_t; + +irq_flags_t save_irq_flags(void); +void restore_irq_flags(irq_flags_t flags); + +// CPUID detection +void cpuid(uint32_t leaf, uint32_t* eax, uint32_t* ebx, uint32_t* ecx, uint32_t* edx); + +// MSR access +uint64_t rdmsr(uint32_t msr); +void wrmsr(uint32_t msr, uint64_t value); + #endif diff --git a/include/Scheduler.h b/include/Scheduler.h index 2ef217d..5fa567f 100644 --- a/include/Scheduler.h +++ b/include/Scheduler.h @@ -3,7 +3,6 @@ #ifndef VOIDFRAME_SCHEDULER_H #define VOIDFRAME_SCHEDULER_H -#include "Shared.h" #if defined(VF_CONFIG_SCHED_MLFQ) #include "MLFQ.h" #elif defined(VF_CONFIG_SCHED_EEVDF) @@ -136,5 +135,15 @@ static inline __attribute__((always_inline)) void DumpSchedulerState() { #endif } +static inline __attribute__((always_inline)) uint64_t GetSystemTicks() { +#if defined(VF_CONFIG_SCHED_MLFQ) + return MLFQGetSystemTicks(); +#elif defined(VF_CONFIG_SCHED_EEVDF) + return EEVDFGetSystemTicks(); +#elif defined(VF_CONFIG_SCHED_CFS) + return 0; // not implemented +#endif +} + #endif // VOIDFRAME_SCHEDULER_H diff --git a/kernel/atomic/Spinlock.h b/kernel/atomic/Spinlock.h index c3ba39e..905a347 100644 --- a/kernel/atomic/Spinlock.h +++ b/kernel/atomic/Spinlock.h @@ -3,42 +3,35 @@ #include "Io.h" #include "stdint.h" -#include "Panic.h" +#include "x64.h" #define DEADLOCK_TIMEOUT_CYCLES 100000000ULL #define MAX_BACKOFF_CYCLES 1024 -// Get CPU timestamp counter -static inline uint64_t get_cycles(void) { - uint32_t low, high; - __asm__ volatile("rdtsc" : "=a"(low), "=d"(high)); - return ((uint64_t)high << 32) | low; -} - // Exponential backoff delay static inline void backoff_delay(uint64_t cycles) { - uint64_t start = get_cycles(); - while (get_cycles() - start < cycles) { + uint64_t start = rdtsc(); + while (rdtsc() - start < cycles) { __builtin_ia32_pause(); } } // Advanced spinlock with multiple anti-race mechanisms static inline void SpinLock(volatile int* lock) { - uint64_t start = get_cycles(); + uint64_t start = rdtsc(); uint64_t backoff = 1; uint32_t attempts = 0; while (1) { // Try to acquire without contention first - if (!*lock && !__sync_lock_test_and_set(lock, 1)) { + if (!*lock && !__atomic_test_and_set(lock, __ATOMIC_ACQUIRE)) { return; } // Deadlock detection - if (get_cycles() - start > DEADLOCK_TIMEOUT_CYCLES) { + if (rdtsc() - start > DEADLOCK_TIMEOUT_CYCLES) { backoff_delay(MAX_BACKOFF_CYCLES); - start = get_cycles(); + start = rdtsc(); attempts = 0; continue; } @@ -159,7 +152,7 @@ static inline irq_flags_t SpinLockIrqSave(volatile int* lock) { } static inline void SpinUnlockIrqRestore(volatile int* lock, irq_flags_t flags) { - __sync_lock_release(lock); + __atomic_clear(lock, __ATOMIC_RELEASE); restore_irq_flags(flags); } diff --git a/kernel/atomic/SpinlockRust.h b/kernel/atomic/SpinlockRust.h new file mode 100644 index 0000000..34edc42 --- /dev/null +++ b/kernel/atomic/SpinlockRust.h @@ -0,0 +1,48 @@ +#ifndef RUST_SPINLOCK_H +#define RUST_SPINLOCK_H + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + + // Opaque types for Rust structures + typedef struct RustSpinLock RustSpinLock; + typedef struct RustMcsLock RustMcsLock; + typedef struct RustMcsNode RustMcsNode; + typedef struct RustRwLock RustRwLock; + + // SpinLock functions + RustSpinLock* rust_spinlock_new(void); + void rust_spinlock_free(RustSpinLock* lock); + void rust_spinlock_lock(RustSpinLock* lock); + void rust_spinlock_unlock(RustSpinLock* lock); + bool rust_spinlock_try_lock(RustSpinLock* lock); + + // IRQ-safe SpinLock functions + uint64_t rust_spinlock_lock_irqsave(RustSpinLock* lock); + void rust_spinlock_unlock_irqrestore(RustSpinLock* lock, uint64_t flags); + + // MCS Lock functions + RustMcsLock* rust_mcs_lock_new(void); + void rust_mcs_lock_free(RustMcsLock* lock); + RustMcsNode* rust_mcs_node_new(void); + void rust_mcs_node_free(RustMcsNode* node); + void rust_mcs_lock(RustMcsLock* lock, RustMcsNode* node); + void rust_mcs_unlock(RustMcsLock* lock, RustMcsNode* node); + + // RwLock functions + RustRwLock* rust_rwlock_new(void); + void rust_rwlock_free(RustRwLock* lock); + void rust_rwlock_read_lock(RustRwLock* lock, uint32_t owner_id); + void rust_rwlock_read_unlock(RustRwLock* lock, uint32_t owner_id); + void rust_rwlock_write_lock(RustRwLock* lock, uint32_t owner_id); + void rust_rwlock_write_unlock(RustRwLock* lock); + +#ifdef __cplusplus +} +#endif + +#endif // RUST_SPINLOCK_H \ No newline at end of file diff --git a/kernel/atomic/rust/.cargo/config.toml b/kernel/atomic/rust/.cargo/config.toml new file mode 100644 index 0000000..1ccbb2d --- /dev/null +++ b/kernel/atomic/rust/.cargo/config.toml @@ -0,0 +1,9 @@ +[build] +target = "x86_64-unknown-none" + +[target.x86_64-unknown-none] +rustflags = [ + "-C", "code-model=kernel", + "-C", "relocation-model=static", + "-C", "target-feature=-mmx,-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-3dnow,-3dnowa,-avx,-avx2", +] \ No newline at end of file diff --git a/kernel/atomic/rust/CMakeLists.txt b/kernel/atomic/rust/CMakeLists.txt new file mode 100644 index 0000000..637e3e6 --- /dev/null +++ b/kernel/atomic/rust/CMakeLists.txt @@ -0,0 +1,37 @@ +# Set target triple based on architecture +if(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|AMD64") + set(RUST_TARGET "x86_64-unknown-none") +else() + message(FATAL_ERROR "Unsupported architecture: ${CMAKE_SYSTEM_PROCESSOR}") +endif() + +# Rust library target +set(RUST_LIB_NAME "voidframe_spinlock") +set(RUST_LIB_PATH "${CMAKE_CURRENT_SOURCE_DIR}/target/${RUST_TARGET}/release/lib${RUST_LIB_NAME}.a") + +# Custom command to build Rust library +add_custom_command( + OUTPUT ${RUST_LIB_PATH} + COMMAND ${CARGO_EXECUTABLE} build --release --target ${RUST_TARGET} + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/Cargo.toml + ${CMAKE_CURRENT_SOURCE_DIR}/src/lib.rs + ${CMAKE_CURRENT_SOURCE_DIR}/src/spinlock.rs + ${CMAKE_CURRENT_SOURCE_DIR}/src/ffi.rs + ${CMAKE_CURRENT_SOURCE_DIR}/src/rwlock.rs + ${CMAKE_CURRENT_SOURCE_DIR}/src/mcs.rs + COMMENT "Building Rust spinlock library" + +) + +# Create imported library target +add_custom_target(rust_spinlock_build DEPENDS ${RUST_LIB_PATH}) + +# Create imported library target +add_library(rust_spinlock STATIC IMPORTED GLOBAL) +set_target_properties(rust_spinlock PROPERTIES + IMPORTED_LOCATION ${RUST_LIB_PATH} +) + +# Add dependency to ensure Rust library is built first +add_dependencies(rust_spinlock rust_spinlock_build) \ No newline at end of file diff --git a/kernel/atomic/rust/Cargo.lock b/kernel/atomic/rust/Cargo.lock new file mode 100644 index 0000000..cf72b4e --- /dev/null +++ b/kernel/atomic/rust/Cargo.lock @@ -0,0 +1,7 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "voidframe-spinlock" +version = "0.1.0" diff --git a/kernel/atomic/rust/Cargo.toml b/kernel/atomic/rust/Cargo.toml new file mode 100644 index 0000000..30547ea --- /dev/null +++ b/kernel/atomic/rust/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "voidframe-spinlock" +version = "0.1.0" +edition = "2021" + +[lib] +crate-type = ["staticlib"] + +[dependencies] + +[profile.release] +panic = "abort" +lto = true +codegen-units = 1 \ No newline at end of file diff --git a/kernel/atomic/rust/README.md b/kernel/atomic/rust/README.md new file mode 100644 index 0000000..b881706 --- /dev/null +++ b/kernel/atomic/rust/README.md @@ -0,0 +1,63 @@ +# VoidFrame Rust Spinlock Suite + +A complete, high-performance spinlock implementation in Rust with deadlock detection and C FFI bindings. + +## Features + +- **Advanced SpinLock**: Adaptive spinning with exponential backoff and deadlock detection +- **MCS Lock**: Fair queuing lock with reduced cache bouncing +- **Reader-Writer Lock**: Supports multiple readers or single writer with recursive write locking +- **C FFI**: Complete C bindings for seamless integration with kernel code +- **No-std**: Designed for kernel/bare-metal environments +- **Thread-safe**: All implementations use atomic operations + +## Performance Features + +- **Deadlock Detection**: Automatic timeout and recovery after 100M cycles +- **Adaptive Backoff**: Exponential backoff strategy to reduce CPU usage +- **Cache-friendly**: MCS locks minimize cache line bouncing +- **PAUSE instruction**: Uses x86 PAUSE for efficient spinning + +## Usage from C + +```c +#include "SpinlockRust.h" + +// Basic spinlock +RustSpinLock* lock = rust_spinlock_new(); +rust_spinlock_lock(lock); +// Critical section +rust_spinlock_unlock(lock); +rust_spinlock_free(lock); + +// MCS lock (fair) +RustMcsLock* mcs_lock = rust_mcs_lock_new(); +RustMcsNode* node = rust_mcs_node_new(); +rust_mcs_lock(mcs_lock, node); +// Critical section +rust_mcs_unlock(mcs_lock, node); +rust_mcs_node_free(node); +rust_mcs_lock_free(mcs_lock); + +// Reader-writer lock +RustRwLock* rw_lock = rust_rwlock_new(); +rust_rwlock_read_lock(rw_lock, process_id); +// Read critical section +rust_rwlock_read_unlock(rw_lock, process_id); +rust_rwlock_free(rw_lock); +``` + +## Building + +```bash +cd kernel/atomic/rust +cargo build --release +``` + +The compiled library will be at `target/x86_64-unknown-none/release/libvoidframe_spinlock.a` +## Architecture + +- **Static Allocation**: Uses pre-allocated static arrays (no heap allocation) +- **Lock-free Operations**: All synchronization uses atomic operations +- **Panic Handler**: Includes minimal panic handler for no-std environment +- **x86_64 Optimized**: Uses RDTSC and PAUSE instructions for optimal performance \ No newline at end of file diff --git a/kernel/atomic/rust/src/ffi.rs b/kernel/atomic/rust/src/ffi.rs new file mode 100644 index 0000000..b1e2628 --- /dev/null +++ b/kernel/atomic/rust/src/ffi.rs @@ -0,0 +1,231 @@ +use crate::{SpinLock, McsLock, McsNode, RwLock}; +use core::panic::{PanicInfo, PanicMessage}; + +// External C functions from Io.h +extern "C" { + fn save_irq_flags() -> u64; + fn restore_irq_flags(flags: u64); + fn cli(); +} + +#[panic_handler] +fn panic(_info: &PanicInfo) -> ! { + extern "C" { + fn Panic(msg: *const u8) -> !; + } + unsafe { + Panic("=>$s\0".as_ptr() as *const u8); + } +} + +// Static storage for locks (kernel will manage allocation) +static mut SPINLOCK_STORAGE: [SpinLock; 64] = [const { SpinLock::new() }; 64]; +static mut SPINLOCK_USED: [bool; 64] = [false; 64]; + +#[no_mangle] +pub extern "C" fn rust_spinlock_new() -> *mut SpinLock { + unsafe { + for i in 0..64 { + if !SPINLOCK_USED[i] { + SPINLOCK_USED[i] = true; + return &mut SPINLOCK_STORAGE[i] as *mut SpinLock; + } + } + } + core::ptr::null_mut() +} + +#[no_mangle] +pub extern "C" fn rust_spinlock_free(lock: *mut SpinLock) { + if !lock.is_null() { + unsafe { + for i in 0..64 { + if &mut SPINLOCK_STORAGE[i] as *mut SpinLock == lock { + SPINLOCK_USED[i] = false; + break; + } + } + } + } +} + +#[no_mangle] +pub extern "C" fn rust_spinlock_lock(lock: *mut SpinLock) { + if !lock.is_null() { + unsafe { (*lock).lock() }; + } +} + +#[no_mangle] +pub extern "C" fn rust_spinlock_unlock(lock: *mut SpinLock) { + if !lock.is_null() { + unsafe { (*lock).unlock() }; + } +} + +#[no_mangle] +pub extern "C" fn rust_spinlock_try_lock(lock: *mut SpinLock) -> bool { + if !lock.is_null() { + unsafe { (*lock).try_lock() } + } else { + false + } +} + +// Static storage for MCS locks and nodes +static mut MCS_LOCK_STORAGE: [McsLock; 32] = [const { McsLock::new() }; 32]; +static mut MCS_LOCK_USED: [bool; 32] = [false; 32]; +static mut MCS_NODE_STORAGE: [McsNode; 128] = [const { McsNode::new() }; 128]; +static mut MCS_NODE_USED: [bool; 128] = [false; 128]; + +#[no_mangle] +pub extern "C" fn rust_mcs_lock_new() -> *mut McsLock { + unsafe { + for i in 0..32 { + if !MCS_LOCK_USED[i] { + MCS_LOCK_USED[i] = true; + return &mut MCS_LOCK_STORAGE[i] as *mut McsLock; + } + } + } + core::ptr::null_mut() +} + +#[no_mangle] +pub extern "C" fn rust_mcs_lock_free(lock: *mut McsLock) { + if !lock.is_null() { + unsafe { + for i in 0..32 { + if &mut MCS_LOCK_STORAGE[i] as *mut McsLock == lock { + MCS_LOCK_USED[i] = false; + break; + } + } + } + } +} + +#[no_mangle] +pub extern "C" fn rust_mcs_node_new() -> *mut McsNode { + unsafe { + for i in 0..128 { + if !MCS_NODE_USED[i] { + MCS_NODE_USED[i] = true; + return &mut MCS_NODE_STORAGE[i] as *mut McsNode; + } + } + } + core::ptr::null_mut() +} + +#[no_mangle] +pub extern "C" fn rust_mcs_node_free(node: *mut McsNode) { + if !node.is_null() { + unsafe { + for i in 0..128 { + if &mut MCS_NODE_STORAGE[i] as *mut McsNode == node { + MCS_NODE_USED[i] = false; + break; + } + } + } + } +} + +#[no_mangle] +pub extern "C" fn rust_mcs_lock(lock: *mut McsLock, node: *mut McsNode) { + if !lock.is_null() && !node.is_null() { + unsafe { (*lock).lock(&mut *node) }; + } +} + +#[no_mangle] +pub extern "C" fn rust_mcs_unlock(lock: *mut McsLock, node: *mut McsNode) { + if !lock.is_null() && !node.is_null() { + unsafe { (*lock).unlock(&mut *node) }; + } +} + +// Static storage for RwLocks +static mut RWLOCK_STORAGE: [RwLock; 32] = [const { RwLock::new() }; 32]; +static mut RWLOCK_USED: [bool; 32] = [false; 32]; + +#[no_mangle] +pub extern "C" fn rust_rwlock_new() -> *mut RwLock { + unsafe { + for i in 0..32 { + if !RWLOCK_USED[i] { + RWLOCK_USED[i] = true; + return &mut RWLOCK_STORAGE[i] as *mut RwLock; + } + } + } + core::ptr::null_mut() +} + +#[no_mangle] +pub extern "C" fn rust_rwlock_free(lock: *mut RwLock) { + if !lock.is_null() { + unsafe { + for i in 0..32 { + if &mut RWLOCK_STORAGE[i] as *mut RwLock == lock { + RWLOCK_USED[i] = false; + break; + } + } + } + } +} + +#[no_mangle] +pub extern "C" fn rust_rwlock_read_lock(lock: *mut RwLock, owner_id: u32) { + if !lock.is_null() { + unsafe { (*lock).read_lock(owner_id) }; + } +} + +#[no_mangle] +pub extern "C" fn rust_rwlock_read_unlock(lock: *mut RwLock, owner_id: u32) { + if !lock.is_null() { + unsafe { (*lock).read_unlock(owner_id) }; + } +} + +#[no_mangle] +pub extern "C" fn rust_rwlock_write_lock(lock: *mut RwLock, owner_id: u32) { + if !lock.is_null() { + unsafe { (*lock).write_lock(owner_id) }; + } +} + +#[no_mangle] +pub extern "C" fn rust_rwlock_write_unlock(lock: *mut RwLock) { + if !lock.is_null() { + unsafe { (*lock).write_unlock() }; + } +} + +// IRQ-safe spinlock functions +#[no_mangle] +pub extern "C" fn rust_spinlock_lock_irqsave(lock: *mut SpinLock) -> u64 { + if !lock.is_null() { + unsafe { + let flags = save_irq_flags(); + cli(); + (*lock).lock(); + flags + } + } else { + 0 + } +} + +#[no_mangle] +pub extern "C" fn rust_spinlock_unlock_irqrestore(lock: *mut SpinLock, flags: u64) { + if !lock.is_null() { + unsafe { + (*lock).unlock(); + restore_irq_flags(flags); + } + } +} \ No newline at end of file diff --git a/kernel/atomic/rust/src/lib.rs b/kernel/atomic/rust/src/lib.rs new file mode 100644 index 0000000..6152158 --- /dev/null +++ b/kernel/atomic/rust/src/lib.rs @@ -0,0 +1,10 @@ +#![no_std] + +pub mod spinlock; +pub mod mcs; +pub mod rwlock; +pub mod ffi; + +pub use spinlock::*; +pub use mcs::*; +pub use rwlock::*; \ No newline at end of file diff --git a/kernel/atomic/rust/src/mcs.rs b/kernel/atomic/rust/src/mcs.rs new file mode 100644 index 0000000..9ee365c --- /dev/null +++ b/kernel/atomic/rust/src/mcs.rs @@ -0,0 +1,74 @@ +use core::sync::atomic::{AtomicPtr, AtomicBool, Ordering}; +use core::ptr; + +#[repr(C)] +pub struct McsNode { + next: AtomicPtr, + locked: AtomicBool, +} + +impl McsNode { + pub const fn new() -> Self { + Self { + next: AtomicPtr::new(ptr::null_mut()), + locked: AtomicBool::new(false), + } + } +} + +#[repr(C)] +pub struct McsLock { + tail: AtomicPtr, +} + +impl McsLock { + pub const fn new() -> Self { + Self { + tail: AtomicPtr::new(ptr::null_mut()), + } + } + + #[inline] + pub fn lock(&self, node: &mut McsNode) { + node.next.store(ptr::null_mut(), Ordering::Relaxed); + node.locked.store(true, Ordering::Relaxed); + + let prev = self.tail.swap(node as *mut McsNode, Ordering::AcqRel); + + if !prev.is_null() { + unsafe { + (*prev).next.store(node as *mut McsNode, Ordering::Release); + } + while node.locked.load(Ordering::Acquire) { + unsafe { core::arch::x86_64::_mm_pause() }; + } + } + } + + #[inline] + pub fn unlock(&self, node: &mut McsNode) { + let next = node.next.load(Ordering::Acquire); + + if next.is_null() { + if self.tail.compare_exchange( + node as *mut McsNode, + ptr::null_mut(), + Ordering::Release, + Ordering::Relaxed + ).is_ok() { + return; + } + + while node.next.load(Ordering::Acquire).is_null() { + unsafe { core::arch::x86_64::_mm_pause() }; + } + } + + let next = node.next.load(Ordering::Acquire); + if !next.is_null() { + unsafe { + (*next).locked.store(false, Ordering::Release); + } + } + } +} \ No newline at end of file diff --git a/kernel/atomic/rust/src/rwlock.rs b/kernel/atomic/rust/src/rwlock.rs new file mode 100644 index 0000000..bdc9419 --- /dev/null +++ b/kernel/atomic/rust/src/rwlock.rs @@ -0,0 +1,84 @@ +use core::sync::atomic::{AtomicU32, AtomicBool, Ordering}; + +#[repr(C)] +pub struct RwLock { + readers: AtomicU32, + writer: AtomicBool, + owner: AtomicU32, + recursion: AtomicU32, +} + +impl RwLock { + pub const fn new() -> Self { + Self { + readers: AtomicU32::new(0), + writer: AtomicBool::new(false), + owner: AtomicU32::new(0), + recursion: AtomicU32::new(0), + } + } + + #[inline] + pub fn read_lock(&self, owner_id: u32) { + if self.writer.load(Ordering::Acquire) && self.owner.load(Ordering::Acquire) == owner_id { + // The current process holds the write lock, so it can "read" + return; + } + + loop { + while self.writer.load(Ordering::Acquire) { + unsafe { core::arch::x86_64::_mm_pause() }; + } + + self.readers.fetch_add(1, Ordering::AcqRel); + + if !self.writer.load(Ordering::Acquire) { + break; + } + + self.readers.fetch_sub(1, Ordering::AcqRel); + } + } + + #[inline] + pub fn read_unlock(&self, owner_id: u32) { + if self.writer.load(Ordering::Acquire) && self.owner.load(Ordering::Acquire) == owner_id { + return; + } + self.readers.fetch_sub(1, Ordering::AcqRel); + } + + #[inline] + pub fn write_lock(&self, owner_id: u32) { + if self.writer.load(Ordering::Acquire) && self.owner.load(Ordering::Acquire) == owner_id { + self.recursion.fetch_add(1, Ordering::AcqRel); + return; + } + + while self.writer.swap(true, Ordering::AcqRel) { + while self.writer.load(Ordering::Acquire) { + unsafe { core::arch::x86_64::_mm_pause() }; + } + } + + while self.readers.load(Ordering::Acquire) > 0 { + unsafe { core::arch::x86_64::_mm_pause() }; + } + + self.owner.store(owner_id, Ordering::Release); + self.recursion.store(1, Ordering::Release); + } + + #[inline] + pub fn write_unlock(&self) { + let recursion = self.recursion.load(Ordering::Acquire); + + if recursion <= 1 { + self.recursion.store(0, Ordering::Release); + self.owner.store(0, Ordering::Release); + self.writer.store(false, Ordering::Release); + } else { + self.recursion.store(recursion - 1, Ordering::Release); + } + } +} \ No newline at end of file diff --git a/kernel/atomic/rust/src/spinlock.rs b/kernel/atomic/rust/src/spinlock.rs new file mode 100644 index 0000000..e8cf5f4 --- /dev/null +++ b/kernel/atomic/rust/src/spinlock.rs @@ -0,0 +1,255 @@ +use core::sync::atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering}; +use core::arch::x86_64::{_rdtsc, _mm_pause}; +use core::cell::UnsafeCell; +use core::marker::PhantomData; +use core::ops::{Deref, DerefMut}; + +const DEADLOCK_TIMEOUT_CYCLES: u64 = 100_000_000; +const MAX_BACKOFF_CYCLES: u64 = 1024; +const CONTENTION_THRESHOLD: u32 = 16; +const YIELD_THRESHOLD: u32 = 64; + +#[inline(always)] +fn rdtsc() -> u64 { + unsafe { _rdtsc() } +} + +#[inline(always)] +fn pause() { + unsafe { _mm_pause() } +} + +#[inline(always)] +fn backoff_delay(cycles: u64) { + let start = rdtsc(); + while rdtsc() - start < cycles { + pause(); + } +} + +extern "C" { + fn lapic_get_id() -> u32; +} + + +#[repr(C)] +pub struct SpinLock { + locked: AtomicBool, + contention_count: AtomicU32, + owner_cpu: AtomicU32, + acquire_time: AtomicU64, +} + +// RAII guard for automatic unlock +pub struct SpinLockGuard<'a, T> { + lock: &'a SpinLock, + data: &'a mut T, + _phantom: PhantomData<*const ()>, // !Send + !Sync +} + +// Advanced spinlock with data protection +#[repr(C)] +pub struct SpinMutex { + lock: SpinLock, + data: UnsafeCell, +} + +unsafe impl Sync for SpinMutex {} +unsafe impl Send for SpinMutex {} + +impl SpinLock { + pub const fn new() -> Self { + Self { + locked: AtomicBool::new(false), + contention_count: AtomicU32::new(0), + owner_cpu: AtomicU32::new(u32::MAX), + acquire_time: AtomicU64::new(0), + } + } + + #[inline] + pub fn lock(&self) { + let start = rdtsc(); + let mut cpu_id = 0; + unsafe { + cpu_id = lapic_get_id(); + } + let mut backoff = 1u64; + let mut attempts = 0u32; + let mut local_spins = 0u32; + + loop { + // Fast path: try to acquire without contention + if self.try_lock_fast(cpu_id, start) { + return; + } + + // Deadlock detection with owner tracking + if rdtsc() - start > DEADLOCK_TIMEOUT_CYCLES { + self.handle_potential_deadlock(cpu_id, start); + continue; + } + + attempts += 1; + local_spins += 1; + + // Adaptive strategy based on contention level + let contention = self.contention_count.load(Ordering::Relaxed); + + if contention < CONTENTION_THRESHOLD { + // Low contention: aggressive spinning + self.spin_wait_adaptive(local_spins); + } else if attempts < YIELD_THRESHOLD { + // Medium contention: exponential backoff + backoff_delay(backoff); + backoff = (backoff * 2).min(MAX_BACKOFF_CYCLES); + } else { + // High contention: yield to scheduler (kernel would implement this) + backoff_delay(MAX_BACKOFF_CYCLES); + local_spins = 0; + } + } + } + + #[inline] + fn try_lock_fast(&self, cpu_id: u32, start_time: u64) -> bool { + if !self.locked.load(Ordering::Relaxed) + && !self.locked.swap(true, Ordering::Acquire) { + self.owner_cpu.store(cpu_id, Ordering::Relaxed); + self.acquire_time.store(start_time, Ordering::Relaxed); + return true; + } + false + } + + #[inline] + fn spin_wait_adaptive(&self, local_spins: u32) { + let spin_count = if local_spins < 32 { 4 } else { 16 }; + for _ in 0..spin_count { + if !self.locked.load(Ordering::Relaxed) { + break; + } + pause(); + } + } + + #[inline] + fn handle_potential_deadlock(&self, cpu_id: u32, start_time: u64) { + let owner = self.owner_cpu.load(Ordering::Relaxed); + let owner_time = self.acquire_time.load(Ordering::Relaxed); + + // In a real kernel, this would check if owner CPU is still alive + // and potentially break the lock or log the deadlock + if owner == cpu_id { + // Self-deadlock detected - this should never happen + return; + } + + // Force yield and reset timing + backoff_delay(MAX_BACKOFF_CYCLES); + self.contention_count.fetch_add(1, Ordering::Relaxed); + } + + #[inline] + pub fn unlock(&self) { + self.owner_cpu.store(u32::MAX, Ordering::Relaxed); + self.acquire_time.store(0, Ordering::Relaxed); + self.locked.store(false, Ordering::Release); + + // Decay contention counter over time + let contention = self.contention_count.load(Ordering::Relaxed); + if contention > 0 { + self.contention_count.store(contention.saturating_sub(1), Ordering::Relaxed); + } + } + + #[inline] + pub fn try_lock(&self) -> bool { + if !self.locked.swap(true, Ordering::Acquire) { + let mut cpu_id = 0; + unsafe { + cpu_id = lapic_get_id(); + } + self.owner_cpu.store(cpu_id, Ordering::Relaxed); + self.acquire_time.store(rdtsc(), Ordering::Relaxed); + true + } else { + false + } + } + + #[inline] + pub fn is_locked(&self) -> bool { + self.locked.load(Ordering::Relaxed) + } + + #[inline] + pub fn contention_level(&self) -> u32 { + self.contention_count.load(Ordering::Relaxed) + } +} + +// RAII Guard implementation +impl<'a, T> SpinLockGuard<'a, T> { + fn new(lock: &'a SpinLock, data: &'a mut T) -> Self { + Self { + lock, + data, + _phantom: PhantomData, + } + } +} + +impl<'a, T> Drop for SpinLockGuard<'a, T> { + fn drop(&mut self) { + self.lock.unlock(); + } +} + +impl<'a, T> Deref for SpinLockGuard<'a, T> { + type Target = T; + fn deref(&self) -> &T { + self.data + } +} + +impl<'a, T> DerefMut for SpinLockGuard<'a, T> { + fn deref_mut(&mut self) -> &mut T { + self.data + } +} + +// SpinMutex implementation +impl SpinMutex { + pub const fn new(data: T) -> Self { + Self { + lock: SpinLock::new(), + data: UnsafeCell::new(data), + } + } + + pub fn lock(&'_ self) -> SpinLockGuard<'_, T> { + self.lock.lock(); + SpinLockGuard::new(&self.lock, unsafe { &mut *self.data.get() }) + } + + pub fn try_lock(&'_ self) -> Option> { + if self.lock.try_lock() { + Some(SpinLockGuard::new(&self.lock, unsafe { &mut *self.data.get() })) + } else { + None + } + } + + pub fn is_locked(&self) -> bool { + self.lock.is_locked() + } + + pub fn contention_level(&self) -> u32 { + self.lock.contention_level() + } + + pub fn into_inner(self) -> T { + self.data.into_inner() + } +} \ No newline at end of file diff --git a/kernel/core/Compositor.c b/kernel/core/Compositor.c index 21e058c..60203d5 100644 --- a/kernel/core/Compositor.c +++ b/kernel/core/Compositor.c @@ -4,11 +4,12 @@ #include "KernelHeap.h" #include "MLFQ.h" #include "MemOps.h" -#include "StringOps.h" -#include "Vesa.h" #include "Pallete.h" -#include "Spinlock.h" +#include "Panic.h" #include "Scheduler.h" +#include "SpinlockRust.h" +#include "StringOps.h" +#include "Vesa.h" // --- Globals --- #define MAX_WINDOWS 16 static Window* g_window_list_head = NULL; @@ -26,7 +27,7 @@ typedef struct { } WindowStateMapping; static WindowStateMapping g_window_state_map[MAX_WINDOWS]; -static irq_flags_t g_text_lock = 0; +static RustSpinLock* g_text_lock = NULL; static Window* g_vfshell_window = NULL; void VFCompositorRequestInit(const char * str) { @@ -64,18 +65,18 @@ void VFCompositorRequestInit(const char * str) { Window* GetWindowByTitle(const char* title) { if (!title) return NULL; - irq_flags_t flags = SpinLockIrqSave(&g_text_lock); + uint64_t flags = rust_spinlock_lock_irqsave(g_text_lock); Window* current = g_window_list_head; while (current) { if (current->title && FastStrCmp(current->title, title) == 0) { - SpinUnlockIrqRestore(&g_text_lock, flags); + rust_spinlock_unlock_irqrestore(g_text_lock, flags); return current; } current = current->next; } - SpinUnlockIrqRestore(&g_text_lock, flags); + rust_spinlock_unlock_irqrestore(g_text_lock, flags); return NULL; } @@ -146,11 +147,11 @@ void WindowScrollUp(Window* window) { void WindowPrintChar(Window* window, char c) { if (!window) return; - irq_flags_t flags = SpinLockIrqSave(&g_text_lock); + uint64_t flags = rust_spinlock_lock_irqsave(g_text_lock); WindowTextState* state = GetWindowTextState(window); if (!state) { - SpinUnlockIrqRestore(&g_text_lock, flags); + rust_spinlock_unlock_irqrestore(g_text_lock, flags); return; } @@ -203,7 +204,7 @@ void WindowPrintChar(Window* window, char c) { state->needs_refresh = true; window->needs_redraw = true; - SpinUnlockIrqRestore(&g_text_lock, flags); + rust_spinlock_unlock_irqrestore(g_text_lock, flags); } // Print string to window @@ -221,7 +222,7 @@ void WindowClearText(Window* window) { WindowTextState* state = GetWindowTextState(window); if (!state) return; - irq_flags_t flags = SpinLockIrqSave(&g_text_lock); + uint64_t flags = rust_spinlock_lock_irqsave(g_text_lock); FastMemset(state->buffer, 0, sizeof(state->buffer)); state->cursor_row = 0; @@ -229,11 +230,16 @@ void WindowClearText(Window* window) { state->needs_refresh = true; window->needs_redraw = true; - SpinUnlockIrqRestore(&g_text_lock, flags); + rust_spinlock_unlock_irqrestore(g_text_lock, flags); } // Update VFCompositor to cache VFShell window reference void VFCompositor(void) { + g_text_lock = rust_spinlock_new(); + if (!g_text_lock) { + PrintKernelError("VFCompositor: Failed to initialize text lock\n"); + return; + } Snooze(); if (!VBEIsInitialized()) { @@ -275,9 +281,8 @@ void VFCompositor(void) { } WindowManagerRun(); - // MLFQYield(); } else { - MLFQYield(); + Yield(); } } diff --git a/kernel/etc/Console.c b/kernel/etc/Console.c index ba1f3de..d6b86df 100644 --- a/kernel/etc/Console.c +++ b/kernel/etc/Console.c @@ -4,13 +4,14 @@ #include "Io.h" #include "Scheduler.h" #include "Serial.h" -#include "Spinlock.h" +#include "SpinlockRust.h" #include "VBEConsole.h" #include "Vesa.h" #include "stdarg.h" #include "stdbool.h" #include "stdint.h" +static RustSpinLock* console_lock = NULL; // For future use - a DE/VM? static uint8_t snooze = 0; // VBE mode flag @@ -33,8 +34,6 @@ ConsoleT console = { .color = VGA_COLOR_DEFAULT }; -static volatile int lock = 0; - static void PrintToVFShell(const char* message) { Window* vfshell = GetVFShellWindow(); if (vfshell) { @@ -59,6 +58,11 @@ void Unsnooze() { // Initialize console - auto-detect VBE or VGA void ConsoleInit(void) { + console_lock = rust_spinlock_new(); + if (!console_lock) { + PrintKernel("Console: Failed to allocate spinlock\n"); + return; + } if (VBEIsInitialized()) { use_vbe = 1; VBEConsoleInit(); @@ -82,7 +86,7 @@ static void ConsolePutcharAt(char c, uint32_t x, uint32_t y, uint8_t color) { } void ClearScreen(void) { - SpinLock(&lock); + rust_spinlock_lock(console_lock); if (use_vbe) { VBEConsoleClear(); } else { @@ -102,7 +106,7 @@ void ClearScreen(void) { UpdateCursor(); } - SpinUnlock(&lock); + rust_spinlock_unlock(console_lock); } static void ConsoleScroll(void) { @@ -174,7 +178,8 @@ void PrintKernel(const char* str) { SerialWrite(str); return; } - SpinLock(&lock); + + rust_spinlock_lock(console_lock); if (use_vbe) { VBEConsolePrint(str); } else { @@ -184,7 +189,7 @@ void PrintKernel(const char* str) { } console.color = original_color; } - SpinUnlock(&lock); + rust_spinlock_unlock(console_lock); SerialWrite(str); } diff --git a/kernel/ipc/Ipc.c b/kernel/ipc/Ipc.c index e16d133..12c11ce 100644 --- a/kernel/ipc/Ipc.c +++ b/kernel/ipc/Ipc.c @@ -1,8 +1,6 @@ #include "Ipc.h" #include "../../mm/MemOps.h" -#include "Panic.h" -#include "MLFQ.h" -#include "Spinlock.h" +#include "Scheduler.h" static uint32_t next_sequence_id = 1; @@ -41,10 +39,11 @@ static uint32_t find_priority_message(MessageQueue* queue, IpcPriority min_prior IpcResult IpcSendMessage(uint32_t target_pid, const IpcMessage* msg) { if (!msg) return IPC_ERROR_INVALID_MSG; - MLFQProcessControlBlock* target = MLFQGetCurrentProcessByPID(target_pid); + CurrentProcessControlBlock* target = GetCurrentProcessByPID(target_pid); if (!target) return IPC_ERROR_NO_PROCESS; MessageQueue* queue = &target->ipc_queue; - SpinLock(&queue->lock); + if (!queue->lock) queue->lock = rust_spinlock_new(); + rust_spinlock_lock(queue->lock); IpcMessage* dest = NULL; if (queue->count >= MAX_MESSAGES) { // Try to drop lowest priority message if this is higher priority @@ -63,11 +62,11 @@ IpcResult IpcSendMessage(uint32_t target_pid, const IpcMessage* msg) { dest = &queue->messages[drop_idx]; queue->dropped_count++; } else { - SpinUnlock(&queue->lock); + rust_spinlock_unlock(queue->lock); return IPC_ERROR_QUEUE_FULL; } } else { - SpinUnlock(&queue->lock); + rust_spinlock_unlock(queue->lock); return IPC_ERROR_QUEUE_FULL; } } else { @@ -77,7 +76,7 @@ IpcResult IpcSendMessage(uint32_t target_pid, const IpcMessage* msg) { queue->count++; } FastMemcpy(dest, msg, sizeof(IpcMessage)); - dest->timestamp = MLFQGetSystemTicks(); + dest->timestamp = GetSystemTicks(); dest->sequence_id = get_next_sequence_id(); // Recompute the priority bitmap so it's accurate after any replacement queue->priority_bitmap = 0; @@ -89,16 +88,17 @@ IpcResult IpcSendMessage(uint32_t target_pid, const IpcMessage* msg) { if (target->state == PROC_BLOCKED) { target->state = PROC_READY; } - SpinUnlock(&queue->lock); + rust_spinlock_unlock(queue->lock); return IPC_SUCCESS; } IpcResult IpcReceiveMessage(IpcMessage* msg_buffer) { if (!msg_buffer) return IPC_ERROR_INVALID_MSG; - MLFQProcessControlBlock* current = MLFQGetCurrentProcess(); + CurrentProcessControlBlock* current = GetCurrentProcess(); MessageQueue* queue = ¤t->ipc_queue; while (true) { - SpinLock(&queue->lock); + if (!queue->lock) queue->lock = rust_spinlock_new(); + rust_spinlock_lock(queue->lock); if (queue->count > 0) { uint32_t msg_idx = find_priority_message(queue, IPC_PRIORITY_LOW); @@ -127,22 +127,22 @@ IpcResult IpcReceiveMessage(IpcMessage* msg_buffer) { } } - SpinUnlock(&queue->lock); + rust_spinlock_unlock(queue->lock); return IPC_SUCCESS; } current->state = PROC_BLOCKED; - SpinUnlock(&queue->lock); - MLFQYield(); + rust_spinlock_unlock(queue->lock); + Yield(); } } IpcResult IpcReceiveMessageType(IpcMessage* msg_buffer, IpcMessageType type) { if (!msg_buffer) return IPC_ERROR_INVALID_MSG; - MLFQProcessControlBlock* current = MLFQGetCurrentProcess(); + CurrentProcessControlBlock* current = GetCurrentProcess(); MessageQueue* queue = ¤t->ipc_queue; while (true) { - SpinLock(&queue->lock); + rust_spinlock_lock(queue->lock); // Look for message of specific type for (uint32_t i = 0; i < queue->count; i++) { @@ -166,15 +166,15 @@ IpcResult IpcReceiveMessageType(IpcMessage* msg_buffer, IpcMessageType type) { uint32_t idx2 = (queue->head + k) % MAX_MESSAGES; update_priority_bitmap(queue, queue->messages[idx2].priority); } - SpinUnlock(&queue->lock); + rust_spinlock_unlock(queue->lock); return IPC_SUCCESS; } } // Mark blocked while still holding the lock to avoid a wakeup‐before‐block race current->state = PROC_BLOCKED; - SpinUnlock(&queue->lock); - MLFQYield(); + rust_spinlock_unlock(queue->lock); + Yield(); } } @@ -184,7 +184,7 @@ IpcResult IpcSendRequest(uint32_t target_pid, const void* request_data, uint64_t } IpcMessage msg = { - .sender_pid = MLFQGetCurrentProcess()->pid, + .sender_pid = GetCurrentProcess()->pid, .type = IPC_TYPE_REQUEST, .priority = IPC_PRIORITY_NORMAL, .size = size + 8 @@ -202,7 +202,7 @@ IpcResult IpcSendResponse(uint32_t target_pid, uint32_t request_id, const void* if (size > (IPC_MAX_PAYLOAD - 8)) return IPC_ERROR_INVALID_MSG; IpcMessage msg = { - .sender_pid = MLFQGetCurrentProcess()->pid, + .sender_pid = GetCurrentProcess()->pid, .type = IPC_TYPE_RESPONSE, .priority = IPC_PRIORITY_HIGH, .size = size + 8 @@ -218,7 +218,7 @@ IpcResult IpcSendResponse(uint32_t target_pid, uint32_t request_id, const void* } uint32_t IpcGetQueueCount(void) { - MLFQProcessControlBlock* current = MLFQGetCurrentProcess(); + CurrentProcessControlBlock* current = GetCurrentProcess(); return current->ipc_queue.count; } @@ -227,27 +227,27 @@ bool IpcHasMessages(void) { } bool IpcHasMessageType(IpcMessageType type) { - MLFQProcessControlBlock* current = MLFQGetCurrentProcess(); + CurrentProcessControlBlock* current = GetCurrentProcess(); MessageQueue* queue = ¤t->ipc_queue; - SpinLock(&queue->lock); + rust_spinlock_lock(queue->lock); for (uint32_t i = 0; i < queue->count; i++) { uint32_t idx = (queue->head + i) % MAX_MESSAGES; if (queue->messages[idx].type == type) { - SpinUnlock(&queue->lock); + rust_spinlock_unlock(queue->lock); return true; } } - SpinUnlock(&queue->lock); + rust_spinlock_unlock(queue->lock); return false; } void IpcFlushQueue(void) { - MLFQProcessControlBlock* current = MLFQGetCurrentProcess(); + CurrentProcessControlBlock* current = GetCurrentProcess(); MessageQueue* queue = ¤t->ipc_queue; - SpinLock(&queue->lock); + rust_spinlock_lock(queue->lock); queue->head = queue->tail = queue->count = 0; queue->priority_bitmap = 0; - SpinUnlock(&queue->lock); + rust_spinlock_unlock(queue->lock); } diff --git a/kernel/ipc/Ipc.h b/kernel/ipc/Ipc.h index 370d208..35d3227 100644 --- a/kernel/ipc/Ipc.h +++ b/kernel/ipc/Ipc.h @@ -1,6 +1,7 @@ #ifndef IPC_H #define IPC_H +#include "SpinlockRust.h" #include "StringOps.h" #include "stdbool.h" #include "stdint.h" @@ -56,7 +57,7 @@ typedef struct { uint32_t head; uint32_t tail; uint32_t count; - volatile int lock; // Spinlock for thread safety + RustSpinLock* lock; // Spinlock for thread safety uint32_t dropped_count; // Track dropped messages uint32_t priority_bitmap; // Track priority levels present } MessageQueue; diff --git a/kernel/sched/EEVDF.c b/kernel/sched/EEVDF.c index 57e9c01..aeee835 100644 --- a/kernel/sched/EEVDF.c +++ b/kernel/sched/EEVDF.c @@ -11,6 +11,7 @@ #include "Panic.h" #include "Shell.h" #include "Spinlock.h" +#include "SpinlockRust.h" #include "VFS.h" #include "VMem.h" #include "x64.h" @@ -55,11 +56,12 @@ const uint32_t eevdf_nice_to_wmult[40] = { static EEVDFProcessControlBlock processes[EEVDF_MAX_PROCESSES] ALIGNED_CACHE; static volatile uint32_t next_pid = 1; static uint64_t pid_bitmap[EEVDF_MAX_PROCESSES / 64 + 1] = {0}; -static volatile irq_flags_t pid_lock = 0; static volatile uint32_t current_process = 0; static volatile uint32_t process_count = 0; static volatile int need_schedule = 0; -static volatile int scheduler_lock = 0; +static RustSpinLock* pid_lock = NULL; +static RustSpinLock* eevdf_lock = NULL; + rwlock_t process_table_rwlock_eevdf = {0}; // Security subsystem @@ -683,7 +685,7 @@ void EEVDFUpdateClock(EEVDFRunqueue* rq) { } void EEVDFSchedule(Registers* regs) { - irq_flags_t flags = SpinLockIrqSave(&scheduler_lock); + uint64_t flags = rust_spinlock_lock_irqsave(eevdf_lock); AtomicInc(&scheduler_calls); AtomicInc(&eevdf_scheduler.tick_counter); @@ -782,10 +784,15 @@ pick_retry:; EEVDFCleanupTerminatedProcessInternal(); } - SpinUnlockIrqRestore(&scheduler_lock, flags); + rust_spinlock_unlock_irqrestore(eevdf_lock, flags); } int EEVDFSchedInit(void) { + if (!eevdf_lock && !pid_lock) { + eevdf_lock = rust_spinlock_new(); + pid_lock = rust_spinlock_new(); + if (!eevdf_lock || !pid_lock) PANIC("EEVDFSchedInit: Failed to allocate locks"); + } PrintKernel("System: Initializing EEVDF scheduler...\n"); // Initialize process array FastMemset(processes, 0, sizeof(EEVDFProcessControlBlock) * EEVDF_MAX_PROCESSES); @@ -861,22 +868,22 @@ uint32_t EEVDFCreateProcess(const char* name, void (*entry_point)(void)) { PANIC("EEVDFCreateProcess: NULL entry point"); } - irq_flags_t flags = SpinLockIrqSave(&scheduler_lock); + uint64_t flags = rust_spinlock_lock_irqsave(eevdf_lock); if (UNLIKELY(process_count >= EEVDF_MAX_PROCESSES)) { - SpinUnlockIrqRestore(&scheduler_lock, flags); + rust_spinlock_unlock_irqrestore(eevdf_lock, flags); PANIC("EEVDFCreateProcess: Too many processes"); } // Find free slot int slot = FindFreeSlotFast(); if (UNLIKELY(slot == -1)) { - SpinUnlockIrqRestore(&scheduler_lock, flags); + rust_spinlock_unlock_irqrestore(eevdf_lock, flags); PANIC("EEVDFCreateProcess: No free process slots"); } // Allocate PID uint32_t new_pid = 0; - SpinLock(&pid_lock); + rust_spinlock_lock(pid_lock); for (int i = 1; i < EEVDF_MAX_PROCESSES; i++) { int idx = i / 64; int bit = i % 64; @@ -886,11 +893,11 @@ uint32_t EEVDFCreateProcess(const char* name, void (*entry_point)(void)) { break; } } - SpinUnlock(&pid_lock); + rust_spinlock_unlock(pid_lock); if (new_pid == 0) { FreeSlotFast(slot); - SpinUnlockIrqRestore(&scheduler_lock, flags); + rust_spinlock_unlock_irqrestore(eevdf_lock, flags); PANIC("EEVDFCreateProcess: PID exhaustion"); } @@ -901,7 +908,7 @@ uint32_t EEVDFCreateProcess(const char* name, void (*entry_point)(void)) { void* stack = VMemAllocStack(EEVDF_STACK_SIZE); if (UNLIKELY(!stack)) { FreeSlotFast(slot); - SpinUnlockIrqRestore(&scheduler_lock, flags); + rust_spinlock_unlock_irqrestore(eevdf_lock, flags); PANIC("EEVDFCreateProcess: Failed to allocate stack"); } @@ -959,7 +966,7 @@ uint32_t EEVDFCreateProcess(const char* name, void (*entry_point)(void)) { // Add to scheduler EEVDFEnqueueTask(&eevdf_scheduler.rq, proc); - SpinUnlockIrqRestore(&scheduler_lock, flags); + rust_spinlock_unlock_irqrestore(eevdf_lock, flags); return new_pid; } @@ -1008,11 +1015,11 @@ static int EEVDFValidateToken(const EEVDFSecurityToken* token, uint32_t pid) { // ============================================================================= static void EEVDFTerminateProcess(uint32_t pid, TerminationReason reason, uint32_t exit_code) { - irq_flags_t flags = SpinLockIrqSave(&scheduler_lock); + uint64_t flags = rust_spinlock_lock_irqsave(eevdf_lock); EEVDFProcessControlBlock* proc = EEVDFGetCurrentProcessByPID(pid); if (UNLIKELY(!proc || proc->state == PROC_DYING || proc->state == PROC_ZOMBIE || proc->state == PROC_TERMINATED)) { - SpinUnlockIrqRestore(&scheduler_lock, flags); + rust_spinlock_unlock_irqrestore(eevdf_lock, flags); return; } @@ -1020,7 +1027,7 @@ static void EEVDFTerminateProcess(uint32_t pid, TerminationReason reason, uint32 uint32_t slot = proc - processes; if (slot >= EEVDF_MAX_PROCESSES) { - SpinUnlockIrqRestore(&scheduler_lock, flags); + rust_spinlock_unlock_irqrestore(eevdf_lock, flags); return; } @@ -1033,7 +1040,7 @@ static void EEVDFTerminateProcess(uint32_t pid, TerminationReason reason, uint32 if (proc->privilege_level == EEVDF_PROC_PRIV_SYSTEM) { // Only system processes can kill system processes if (caller->privilege_level != EEVDF_PROC_PRIV_SYSTEM) { - SpinUnlockIrqRestore(&scheduler_lock, flags); + rust_spinlock_unlock_irqrestore(eevdf_lock, flags); PrintKernelError("[EEVDF-SECURITY] Process "); PrintKernelInt(caller->pid); PrintKernel(" tried to kill system process "); @@ -1047,14 +1054,14 @@ static void EEVDFTerminateProcess(uint32_t pid, TerminationReason reason, uint32 // Cannot terminate immune processes if (UNLIKELY(proc->token.flags & EEVDF_PROC_FLAG_IMMUNE)) { - SpinUnlockIrqRestore(&scheduler_lock, flags); + rust_spinlock_unlock_irqrestore(eevdf_lock, flags); EEVDFASTerminate(caller->pid, "Attempted termination of immune process"); return; } // Cannot terminate critical system processes if (UNLIKELY(proc->token.flags & EEVDF_PROC_FLAG_CRITICAL)) { - SpinUnlockIrqRestore(&scheduler_lock, flags); + rust_spinlock_unlock_irqrestore(eevdf_lock, flags); EEVDFASTerminate(caller->pid, "Attempted termination of critical process"); return; } @@ -1062,7 +1069,7 @@ static void EEVDFTerminateProcess(uint32_t pid, TerminationReason reason, uint32 // Validate caller's token before allowing termination if (UNLIKELY(!EEVDFValidateToken(&caller->token, caller->pid))) { - SpinUnlockIrqRestore(&scheduler_lock, flags); + rust_spinlock_unlock_irqrestore(eevdf_lock, flags); EEVDFASTerminate(caller->pid, "Token validation failed"); return; } @@ -1071,7 +1078,7 @@ static void EEVDFTerminateProcess(uint32_t pid, TerminationReason reason, uint32 // Atomic state transition ProcessState old_state = proc->state; if (UNLIKELY(AtomicCmpxchg((volatile uint32_t*)&proc->state, old_state, PROC_DYING) != old_state)) { - SpinUnlockIrqRestore(&scheduler_lock, flags); + rust_spinlock_unlock_irqrestore(eevdf_lock, flags); return; // Race condition, another thread is handling termination } @@ -1101,18 +1108,18 @@ static void EEVDFTerminateProcess(uint32_t pid, TerminationReason reason, uint32 AddToTerminationQueueAtomic(slot); // Then add to queue // Free PID - SpinLock(&pid_lock); + rust_spinlock_lock(pid_lock); int idx = proc->pid / 64; int bit = proc->pid % 64; pid_bitmap[idx] &= ~(1ULL << bit); - SpinUnlock(&pid_lock); + rust_spinlock_unlock(pid_lock); // Update scheduler statistics if (eevdf_scheduler.total_processes > 0) { eevdf_scheduler.total_processes--; } - SpinUnlockIrqRestore(&scheduler_lock, flags); + rust_spinlock_unlock_irqrestore(eevdf_lock, flags); #ifdef VF_CONFIG_USE_CERBERUS CerberusUnregisterProcess(proc->pid); @@ -1140,11 +1147,11 @@ static void EEVDFTerminateProcess(uint32_t pid, TerminationReason reason, uint32 // EEVDF's deadly termination function - bypasses all protections static void EEVDFASTerminate(uint32_t pid, const char* reason) { - irq_flags_t flags = SpinLockIrqSave(&scheduler_lock); + uint64_t flags = rust_spinlock_lock_irqsave(eevdf_lock); EEVDFProcessControlBlock* proc = EEVDFGetCurrentProcessByPID(pid); if (!proc || proc->state == PROC_TERMINATED) { - SpinUnlockIrqRestore(&scheduler_lock, flags); + rust_spinlock_unlock_irqrestore(eevdf_lock, flags); return; } @@ -1169,7 +1176,7 @@ static void EEVDFASTerminate(uint32_t pid, const char* reason) { eevdf_scheduler.total_processes--; } - SpinUnlockIrqRestore(&scheduler_lock, flags); + rust_spinlock_unlock_irqrestore(eevdf_lock, flags); #ifdef VF_CONFIG_PROCINFO_AUTO_CLEANUP char cleanup_path[256]; @@ -1291,9 +1298,9 @@ static void EEVDFCleanupTerminatedProcessInternal(void) { } void EEVDFCleanupTerminatedProcess(void) { - irq_flags_t flags = SpinLockIrqSave(&scheduler_lock); + uint64_t flags = rust_spinlock_lock_irqsave(eevdf_lock); EEVDFCleanupTerminatedProcessInternal(); - SpinUnlockIrqRestore(&scheduler_lock, flags); + rust_spinlock_unlock_irqrestore(eevdf_lock, flags); } // ============================================================================= diff --git a/mm/PMem.c b/mm/PMem.c index d71b7c4..870d88d 100644 --- a/mm/PMem.c +++ b/mm/PMem.c @@ -1,9 +1,10 @@ #include "PMem.h" -#include "VMem.h" #include "Console.h" #include "MemOps.h" #include "Multiboot2.h" -#include "Spinlock.h" +#include "Panic.h" +#include "SpinlockRust.h" +#include "VMem.h" // Support up to 128GB memory with dynamic bitmap allocation #define MAX_PAGE_BUFFER_OVERHEAD (1024 * 1024) // 1MB @@ -24,12 +25,11 @@ static uint64_t huge_pages_allocated = 0; static uint64_t* page_bitmap = NULL; static uint64_t bitmap_words = 0; uint64_t total_pages = 0; +static RustSpinLock* pmm_lock = NULL; static uint64_t used_pages = 0; -static volatile int memory_lock = 0; static uint64_t next_free_hint = 0x100000 / PAGE_SIZE; static uint64_t low_memory_watermark = 0; static uint64_t allocation_failures = 0; -volatile mcs_node_t* memory_mcs_lock = NULL; // Fast bitmap operations using 64-bit words static inline void MarkPageUsed(uint64_t page_idx) { @@ -80,6 +80,10 @@ static inline int FindFirstFreeBit(uint64_t word) { } int MemoryInit(uint32_t multiboot_info_addr) { + if (!pmm_lock) { + pmm_lock = rust_spinlock_new(); + if (!pmm_lock) PANIC("Failed to allocate PMM lock"); + } used_pages = 0; allocation_failures = 0; @@ -237,14 +241,13 @@ int MemoryInit(uint32_t multiboot_info_addr) { void* AllocPage(void) { if (!page_bitmap) return NULL; // Safety check - - irq_flags_t flags = SpinLockIrqSave(&memory_lock); + uint64_t flags = rust_spinlock_lock_irqsave(pmm_lock); // Check low memory condition if (used_pages > (total_pages * 9) / 10) { // 90% used if (low_memory_watermark == 0) { low_memory_watermark = used_pages; - PrintKernelWarning("[MEMORY] Low memory warning: "); + PrintKernelWarning("System: Low memory warning: "); PrintKernelInt((total_pages - used_pages) * PAGE_SIZE / (1024 * 1024)); PrintKernel("MB remaining\n"); } @@ -265,7 +268,7 @@ void* AllocPage(void) { MarkPageUsed(page_idx); next_free_hint = page_idx + 1; void* page = (void*)(page_idx * PAGE_SIZE); - SpinUnlockIrqRestore(&memory_lock, flags); + rust_spinlock_unlock_irqrestore(pmm_lock, flags); return page; } } @@ -283,19 +286,20 @@ void* AllocPage(void) { MarkPageUsed(page_idx); next_free_hint = page_idx + 1; void* page = (void*)(page_idx * PAGE_SIZE); - SpinUnlockIrqRestore(&memory_lock, flags); + rust_spinlock_unlock_irqrestore(pmm_lock, flags); return page; } } } allocation_failures++; - SpinUnlockIrqRestore(&memory_lock, flags); + rust_spinlock_unlock_irqrestore(pmm_lock, flags); return NULL; // Out of memory } void* AllocHugePages(uint64_t num_pages) { - irq_flags_t flags = SpinLockIrqSave(&memory_lock); + + uint64_t flags = rust_spinlock_lock_irqsave(pmm_lock); // Find contiguous 2MB-aligned region (512 pages) uint64_t pages_per_huge = HUGE_PAGE_SIZE / PAGE_SIZE; // 512 @@ -322,13 +326,13 @@ void* AllocHugePages(uint64_t num_pages) { } void* huge_page = (void*)(start * PAGE_SIZE); - SpinUnlockIrqRestore(&memory_lock, flags); + rust_spinlock_unlock_irqrestore(pmm_lock, flags); ++huge_pages_allocated; return huge_page; } } - SpinUnlockIrqRestore(&memory_lock, flags); + rust_spinlock_unlock_irqrestore(pmm_lock, flags); ++allocation_failures; return NULL; // No contiguous region found } @@ -336,30 +340,30 @@ void* AllocHugePages(uint64_t num_pages) { void FreePage(void* page) { if (!page) { - PrintKernelError("[MEMORY] FreePage: NULL pointer\n"); + PrintKernelError("System: FreePage: NULL pointer\n"); return; } uint64_t addr = (uint64_t)page; if (addr % PAGE_SIZE != 0) { - PrintKernelError("[MEMORY] FreePage: Unaligned address "); + PrintKernelError("System: FreePage: Unaligned address "); PrintKernelHex(addr); PrintKernel("\n"); return; } uint64_t page_idx = addr / PAGE_SIZE; if (page_idx >= total_pages) { - PrintKernelError("[MEMORY] FreePage: Page index out of bounds: "); + PrintKernelError("System: FreePage: Page index out of bounds: "); PrintKernelInt(page_idx); PrintKernel("\n"); return; } - irq_flags_t flags = SpinLockIrqSave(&memory_lock); + uint64_t flags = rust_spinlock_lock_irqsave(pmm_lock); // Check for double free if (IsPageFree(page_idx)) { - SpinUnlockIrqRestore(&memory_lock, flags); - PrintKernelError("[MEMORY] Double free of page "); + rust_spinlock_unlock_irqrestore(pmm_lock, flags); + PrintKernelError("System: Double free of page "); PrintKernelHex(addr); PrintKernel("\n"); return; } @@ -371,7 +375,7 @@ void FreePage(void* page) { next_free_hint = page_idx; } - SpinUnlockIrqRestore(&memory_lock, flags); + rust_spinlock_unlock_irqrestore(pmm_lock, flags); } uint64_t GetFreeMemory(void) { @@ -379,7 +383,9 @@ uint64_t GetFreeMemory(void) { } void GetDetailedMemoryStats(MemoryStats* stats) { - irq_flags_t flags = SpinLockIrqSave(&memory_lock); + if (!stats) return; + + uint64_t flags = rust_spinlock_lock_irqsave(pmm_lock); stats->total_physical_bytes = total_pages * PAGE_SIZE; stats->used_physical_bytes = used_pages * PAGE_SIZE; @@ -427,6 +433,6 @@ void GetDetailedMemoryStats(MemoryStats* stats) { stats->fragmentation_score = 0; } - SpinUnlockIrqRestore(&memory_lock, flags); + rust_spinlock_unlock_irqrestore(pmm_lock, flags); } diff --git a/mm/VMem.c b/mm/VMem.c index db38a84..be0e0c4 100644 --- a/mm/VMem.c +++ b/mm/VMem.c @@ -90,10 +90,10 @@ static inline uint64_t GetBuddyAddr(uint64_t addr, uint32_t order) { static void BuddyAddFreeBlock(uint64_t addr, uint32_t order) { if (order >= BUDDY_NUM_ORDERS) return; - + VMemFreeBlock* node = AllocBuddyNode(); if (!node) return; - + node->base = addr; node->size = OrderToSize(order); node->next = buddy_free_lists[order]; @@ -102,10 +102,10 @@ static void BuddyAddFreeBlock(uint64_t addr, uint32_t order) { static VMemFreeBlock* BuddyRemoveFreeBlock(uint64_t addr, uint32_t order) { if (order >= BUDDY_NUM_ORDERS) return NULL; - + VMemFreeBlock* prev = NULL; VMemFreeBlock* curr = buddy_free_lists[order]; - + while (curr) { if (curr->base == addr) { if (prev) prev->next = curr->next; @@ -121,45 +121,45 @@ static VMemFreeBlock* BuddyRemoveFreeBlock(uint64_t addr, uint32_t order) { static uint64_t BuddyAlloc(uint64_t size) { uint32_t order = GetOrder(size); if (order >= BUDDY_NUM_ORDERS) return 0; - + // Find smallest available block for (uint32_t curr_order = order; curr_order < BUDDY_NUM_ORDERS; curr_order++) { if (!buddy_free_lists[curr_order]) continue; - + VMemFreeBlock* block = buddy_free_lists[curr_order]; buddy_free_lists[curr_order] = block->next; uint64_t addr = block->base; ReleaseBuddyNode(block); - + // Split down to required order while (curr_order > order) { curr_order--; uint64_t buddy_addr = addr + OrderToSize(curr_order); BuddyAddFreeBlock(buddy_addr, curr_order); } - + return addr; } - + return 0; // No free blocks } static void BuddyFree(uint64_t addr, uint64_t size) { uint32_t order = GetOrder(size); if (order >= BUDDY_NUM_ORDERS) return; - + // Try to coalesce with buddy while (order < BUDDY_NUM_ORDERS - 1) { uint64_t buddy_addr = GetBuddyAddr(addr, order); VMemFreeBlock* buddy = BuddyRemoveFreeBlock(buddy_addr, order); - + if (!buddy) break; // Buddy not free - + ReleaseBuddyNode(buddy); if (buddy_addr < addr) addr = buddy_addr; order++; } - + BuddyAddFreeBlock(addr, order); } @@ -175,13 +175,13 @@ static inline int IsValidVirtAddr(uint64_t vaddr) { } static inline uint64_t* GetTableVirt(uint64_t phys_addr) { - return (phys_addr < IDENTITY_MAP_SIZE) ? + return (phys_addr < IDENTITY_MAP_SIZE) ? (uint64_t*)phys_addr : (uint64_t*)PHYS_TO_VIRT(phys_addr); } static void flush_tlb_batch(void) { if (tlb_batch_count == 0) return; - + if (tlb_batch_count > 8) { VMemFlushTLB(); } else { @@ -204,7 +204,7 @@ static void* alloc_identity_page_table(void) { if (pt_cache_count > 0) { return pt_cache[--pt_cache_count]; } - + for (uint32_t attempt = 0; attempt < 32; attempt++) { void* candidate = AllocPage(); if (!candidate) break; @@ -227,45 +227,45 @@ static void cache_page_table(void* pt) { void VMemInit(void) { InitBuddyNodePool(); - + // Initialize buddy allocator for (int i = 0; i < BUDDY_NUM_ORDERS; i++) { buddy_free_lists[i] = NULL; } - + // Set up buddy regions buddy_region_start[0] = VIRT_ADDR_SPACE_LOW_START; buddy_region_size[0] = VIRT_ADDR_SPACE_LOW_END - VIRT_ADDR_SPACE_LOW_START + 1; buddy_region_start[1] = VIRT_ADDR_SPACE_HIGH_START; buddy_region_size[1] = VIRT_ADDR_SPACE_HIGH_END - VIRT_ADDR_SPACE_HIGH_START + 1; - + // Add initial free blocks (largest possible) for (int region = 0; region < 2; region++) { uint64_t addr = buddy_region_start[region]; uint64_t remaining = buddy_region_size[region]; - + while (remaining >= PAGE_SIZE) { uint32_t order = BUDDY_NUM_ORDERS - 1; while (order > 0 && OrderToSize(order) > remaining) { order--; } - + BuddyAddFreeBlock(addr, order); uint64_t block_size = OrderToSize(order); addr += block_size; remaining -= block_size; } } - + // Get current PML4 from CR3 (set by bootstrap) uint64_t pml4_phys_addr; __asm__ volatile("mov %%cr3, %0" : "=r"(pml4_phys_addr)); pml4_phys_addr &= ~0xFFF; - + kernel_space.pml4 = (uint64_t*)pml4_phys_addr; kernel_space.used_pages = 0; kernel_space.total_mapped = IDENTITY_MAP_SIZE; - + // Test identity mapping if (VMemGetPhysAddr(0x100000) != 0x100000) { PANIC("Bootstrap identity mapping failed - VALIDATION FAILED"); @@ -285,7 +285,7 @@ static uint64_t VMemGetPageTablePhys(uint64_t pml4_phys, uint64_t vaddr, uint32_ uint64_t* table_virt = GetTableVirt(pml4_phys); uint32_t shift = 39U - (level * 9U); uint32_t index = (vaddr >> shift) & PT_INDEX_MASK; - + if (!(table_virt[index] & PAGE_PRESENT)) { if (!create) return 0; @@ -385,18 +385,18 @@ int VMemMapHuge(uint64_t vaddr, uint64_t paddr, uint64_t flags) { void* VMemAlloc(uint64_t size) { if (size == 0) return NULL; size = PAGE_ALIGN_UP(size); - + irq_flags_t flags = SpinLockIrqSave(&vmem_lock); - + uint64_t vaddr = BuddyAlloc(size); if (!vaddr) { SpinUnlockIrqRestore(&vmem_lock, flags); return NULL; } - + vmem_allocations++; SpinUnlockIrqRestore(&vmem_lock, flags); - + // Map physical pages for (uint64_t offset = 0; offset < size; offset += PAGE_SIZE) { void* paddr = AllocPage(); @@ -410,14 +410,14 @@ void* VMemAlloc(uint64_t size) { return NULL; } } - + flush_tlb_batch(); - + flags = SpinLockIrqSave(&vmem_lock); kernel_space.used_pages += size / PAGE_SIZE; kernel_space.total_mapped += size; SpinUnlockIrqRestore(&vmem_lock, flags); - + FastMemset((void*)vaddr, 0, size); return (void*)vaddr; } @@ -437,9 +437,9 @@ void VMemFree(void* vaddr, uint64_t size) { FreePage((void*)paddr); } } - + flush_tlb_batch(); - + irq_flags_t flags = SpinLockIrqSave(&vmem_lock); BuddyFree(start_vaddr, size); kernel_space.used_pages -= size / PAGE_SIZE; @@ -544,7 +544,7 @@ int VMemUnmap(uint64_t vaddr, uint64_t size) { uint64_t num_pages = (end - start) / PAGE_SIZE; irq_flags_t flags = SpinLockIrqSave(&vmem_lock); - + for (uint64_t i = 0; i < num_pages; i++) { uint64_t current_vaddr = start + (i * PAGE_SIZE); @@ -558,7 +558,7 @@ int VMemUnmap(uint64_t vaddr, uint64_t size) { uint64_t* pd_virt = GetTableVirt(pd_phys); uint32_t pd_index = (current_vaddr >> PD_SHIFT) & PT_INDEX_MASK; uint64_t pde = pd_virt[pd_index]; - + if ((pde & PAGE_PRESENT) && (pde & PAGE_LARGE)) { if (IS_HUGE_PAGE_ALIGNED(current_vaddr) && (end - current_vaddr) >= HUGE_PAGE_SIZE) { pd_virt[pd_index] = 0; @@ -583,7 +583,7 @@ int VMemUnmap(uint64_t vaddr, uint64_t size) { add_to_tlb_batch(current_vaddr); } } - + flush_tlb_batch(); SpinUnlockIrqRestore(&vmem_lock, flags); return VMEM_SUCCESS; @@ -607,7 +607,7 @@ void PrintVMemStats(void) { uint64_t frees = vmem_frees; uint64_t flushes = tlb_flushes; SpinUnlockIrqRestore(&vmem_lock, flags); - + PrintKernel("[VMEM] Stats:\n"); PrintKernel(" Used pages: "); PrintKernelInt(used); PrintKernel("\n"); PrintKernel(" Mapped: "); PrintKernelInt(mapped / (1024 * 1024)); PrintKernel("MB\n"); @@ -629,9 +629,9 @@ int VMemMapMMIO(uint64_t vaddr, uint64_t paddr, uint64_t size, uint64_t flags) { uint64_t mmio_flags = flags | PAGE_PRESENT | PAGE_NOCACHE | PAGE_WRITETHROUGH; uint64_t num_pages = size / PAGE_SIZE; - + irq_flags_t irq_flags = SpinLockIrqSave(&vmem_lock); - + for (uint64_t i = 0; i < num_pages; i++) { uint64_t current_vaddr = vaddr + (i * PAGE_SIZE); uint64_t current_paddr = paddr + (i * PAGE_SIZE); @@ -665,7 +665,7 @@ int VMemMapMMIO(uint64_t vaddr, uint64_t paddr, uint64_t size, uint64_t flags) { pt_virt[pt_index] = current_paddr | mmio_flags; add_to_tlb_batch(current_vaddr); } - + flush_tlb_batch(); SpinUnlockIrqRestore(&vmem_lock, irq_flags); __asm__ volatile("mfence" ::: "memory"); @@ -701,7 +701,7 @@ void VMemUnmapMMIO(uint64_t vaddr, uint64_t size) { add_to_tlb_batch(current_vaddr); } } - + flush_tlb_batch(); SpinUnlockIrqRestore(&vmem_lock, irq_flags); } @@ -758,7 +758,7 @@ void VMemFreeStack(void* stack_top, uint64_t size) { void VMemDumpFreeList(void) { irq_flags_t flags = SpinLockIrqSave(&vmem_lock); PrintKernel("[VMEM] Buddy Allocator Free Blocks:\n"); - + uint64_t total_free = 0; for (uint32_t order = 0; order < BUDDY_NUM_ORDERS; order++) { uint32_t count = 0; @@ -767,19 +767,19 @@ void VMemDumpFreeList(void) { count++; current = current->next; } - + if (count > 0) { uint64_t block_size = OrderToSize(order); uint64_t total_size = count * block_size; total_free += total_size; - + PrintKernel(" Order "); PrintKernelInt(order); PrintKernel(" ("); PrintKernelInt(block_size / 1024); PrintKernel("KB): "); PrintKernelInt(count); PrintKernel(" blocks, "); PrintKernelInt(total_size / (1024 * 1024)); PrintKernel("MB total\n"); } } - + PrintKernel("[VMEM] Total free: "); PrintKernelInt(total_free / (1024 * 1024)); PrintKernel("MB\n"); SpinUnlockIrqRestore(&vmem_lock, flags); -} +} \ No newline at end of file diff --git a/mm/rust/CMakeLists.txt b/mm/rust/CMakeLists.txt index d56994b..37df50f 100644 --- a/mm/rust/CMakeLists.txt +++ b/mm/rust/CMakeLists.txt @@ -1,7 +1,3 @@ -# Find Rust and Cargo -find_program(CARGO_EXECUTABLE cargo REQUIRED) -find_program(RUSTC_EXECUTABLE rustc REQUIRED) - # Set target triple based on architecture if(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|AMD64") set(RUST_TARGET "x86_64-unknown-none") @@ -34,8 +30,4 @@ set_target_properties(rust_heap PROPERTIES IMPORTED_LOCATION ${RUST_LIB_PATH} ) -add_dependencies(rust_heap rust_heap_build) - -# Install header -install(FILES ${CMAKE_CURRENT_SOURCE_DIR}/../KernelHeapRust.h - DESTINATION include/mm) \ No newline at end of file +add_dependencies(rust_heap rust_heap_build) \ No newline at end of file diff --git a/mm/security/Cerberus.c b/mm/security/Cerberus.c index 03c9055..0a80fe6 100644 --- a/mm/security/Cerberus.c +++ b/mm/security/Cerberus.c @@ -3,19 +3,19 @@ #include "Console.h" #include "Format.h" #include "Ipc.h" -#include "Spinlock.h" +#include "SpinlockRust.h" #include "StackGuard.h" #include "StringOps.h" #include "VFS.h" #include "VMem.h" static CerberusState g_cerberus_state = {0}; -static volatile int cerberus_lock = 0; +static RustSpinLock* cerberus_lock = NULL; static uint64_t system_ticks = 0; void CerberusLogViolation(CerberusViolationReport* report) { if (!g_cerberus_state.is_initialized || !report) return; - SpinLock(&cerberus_lock); + rust_spinlock_lock(cerberus_lock); // Update statistics g_cerberus_state.total_violations++; @@ -24,7 +24,7 @@ void CerberusLogViolation(CerberusViolationReport* report) { proc_info->violation_count++; proc_info->last_violation = system_ticks; - SpinUnlock(&cerberus_lock); + rust_spinlock_unlock(cerberus_lock); // Console logging PrintKernelErrorF("[Cerberus] VIOLATION PID=%d: %s\n", @@ -43,6 +43,11 @@ void CerberusLogViolation(CerberusViolationReport* report) { } void CerberusInit(void) { + cerberus_lock = rust_spinlock_new(); + if (!cerberus_lock) { + PrintKernelError("Cerberus: Failed to create lock\n"); + return; + } PrintKernel("Cerberus initializing...\n"); // Initialize all structures @@ -74,11 +79,11 @@ int CerberusRegisterProcess(uint32_t pid, uint64_t stack_base, uint64_t stack_si return -1; } - SpinLock(&cerberus_lock); + rust_spinlock_lock(cerberus_lock); CerberusProcessInfo* proc_info = &g_cerberus_state.process_info[pid]; if (proc_info->is_monitored) { - SpinUnlock(&cerberus_lock); + rust_spinlock_unlock(cerberus_lock); return 0; // Already registered } @@ -96,7 +101,7 @@ int CerberusRegisterProcess(uint32_t pid, uint64_t stack_base, uint64_t stack_si } #endif g_cerberus_state.monitored_processes++; - SpinUnlock(&cerberus_lock); + rust_spinlock_unlock(cerberus_lock); PrintKernelF("[Cerberus] Process %d registered\n", pid); return 0; @@ -107,7 +112,7 @@ void CerberusUnregisterProcess(uint32_t pid) { return; } - SpinLock(&cerberus_lock); + rust_spinlock_lock(cerberus_lock); CerberusProcessInfo* proc_info = &g_cerberus_state.process_info[pid]; if (proc_info->is_monitored) { @@ -124,7 +129,7 @@ void CerberusUnregisterProcess(uint32_t pid) { } } - SpinUnlock(&cerberus_lock); + rust_spinlock_unlock(cerberus_lock); PrintKernelF("[Cerberus] Process %d unregistered\n", pid); } @@ -326,7 +331,7 @@ void CerberusReportThreat(uint32_t pid, MemorySecurityViolation violation) { int CerberusTrackAlloc(uint64_t addr, uint64_t size, uint32_t pid) { if (!g_cerberus_state.is_initialized) return -1; - SpinLock(&cerberus_lock); + rust_spinlock_lock(cerberus_lock); // Find empty watch region slot for (int i = 0; i < CERBERUS_MAX_WATCH_REGIONS; i++) { @@ -340,19 +345,19 @@ int CerberusTrackAlloc(uint64_t addr, uint64_t size, uint32_t pid) { region->is_stack_region = false; g_cerberus_state.active_regions++; - SpinUnlock(&cerberus_lock); + rust_spinlock_unlock(cerberus_lock); return 0; } } - SpinUnlock(&cerberus_lock); + rust_spinlock_unlock(cerberus_lock); return -1; // No space } int CerberusTrackFree(uint64_t addr, uint32_t pid) { if (!g_cerberus_state.is_initialized) return -1; - SpinLock(&cerberus_lock); + rust_spinlock_lock(cerberus_lock); // Find and remove watch region for (int i = 0; i < CERBERUS_MAX_WATCH_REGIONS; i++) { @@ -360,12 +365,12 @@ int CerberusTrackFree(uint64_t addr, uint32_t pid) { if (region->is_active && region->base_addr == addr && region->process_id == pid) { region->is_active = false; g_cerberus_state.active_regions--; - SpinUnlock(&cerberus_lock); + rust_spinlock_unlock(cerberus_lock); return 0; } } - SpinUnlock(&cerberus_lock); + rust_spinlock_unlock(cerberus_lock); // Potential double-free CerberusViolationReport violation = {