diff --git a/Kernel/Core/Kernel.c b/Kernel/Core/Kernel.c index 3d4a7ba..a41a4df 100644 --- a/Kernel/Core/Kernel.c +++ b/Kernel/Core/Kernel.c @@ -11,15 +11,11 @@ #include "../System/Syscall.h" #include "../System/Gdt.h" #include "Panic.h" +#include "stdbool.h" #include "UserMode.h" #include "../Drivers/Io.h" #include "../Memory/VMem.h" -#define NULL ((void*)0) -#define true 1 -#define false 0 -typedef int bool; - // VGA Constants #define VGA_BUFFER_ADDR 0xB8000 #define VGA_WIDTH 80 @@ -136,14 +132,14 @@ static void ConsolePutchar(char c) { // Modern string output with length checking void PrintKernel(const char* str) { if (!str) return; - + // Cache the original color const uint8_t original_color = console.color; - + for (const char* p = str; *p; p++) { ConsolePutchar(*p); } - + console.color = original_color; } @@ -170,52 +166,52 @@ void PrintKernelWarning(const char* str) { void PrintKernelHex(uint64_t num) { static const char hex_chars[] = "0123456789ABCDEF"; char buffer[19]; // "0x" + 16 hex digits + null terminator - + buffer[0] = '0'; buffer[1] = 'x'; - + if (num == 0) { buffer[2] = '0'; buffer[3] = '\0'; PrintKernel(buffer); return; } - + int pos = 18; buffer[pos--] = '\0'; - + while (num > 0 && pos >= 2) { buffer[pos--] = hex_chars[num & 0xF]; num >>= 4; } - + PrintKernel(&buffer[pos + 1]); } // Optimized integer printing with proper sign handling void PrintKernelInt(int64_t num) { char buffer[21]; // Max digits for 64-bit signed integer + sign + null - + if (num == 0) { PrintKernel("0"); return; } - + bool negative = num < 0; if (negative) num = -num; - + int pos = 20; buffer[pos--] = '\0'; - + while (num > 0 && pos >= 0) { buffer[pos--] = '0' + (num % 10); num /= 10; } - + if (negative && pos >= 0) { buffer[pos--] = '-'; } - + PrintKernel(&buffer[pos + 1]); } diff --git a/Kernel/Core/Panic.c b/Kernel/Core/Panic.c index 6cdbc32..816e152 100644 --- a/Kernel/Core/Panic.c +++ b/Kernel/Core/Panic.c @@ -10,6 +10,9 @@ int ForceReboot() { asm volatile("lidt %0" :: "m"(*(short*)0)); PrintKernelWarning("[SYSTEM] Rebooting now..."); asm volatile("int $0x3"); + while (1) { + __asm__ __volatile__("hlt"); + } } @@ -31,6 +34,9 @@ void __attribute__((noreturn)) Panic(const char* message) { PrintKernelError(message); PrintKernelError("\n[SYSTEM] Calling KernelPanicHandler()...\n"); KernelPanicHandler(); + while (1) { + __asm__ __volatile__("hlt"); + } } void __attribute__((noreturn)) PanicWithCode(const char* message, uint64_t error_code) { @@ -41,4 +47,7 @@ void __attribute__((noreturn)) PanicWithCode(const char* message, uint64_t error PrintKernelHex(error_code); PrintKernelError(" -- Not handled"); KernelPanicHandler(); + while (1) { + __asm__ __volatile__("hlt"); + } } \ No newline at end of file diff --git a/Kernel/Core/stdbool.h b/Kernel/Core/stdbool.h new file mode 100644 index 0000000..00f251d --- /dev/null +++ b/Kernel/Core/stdbool.h @@ -0,0 +1,6 @@ +#ifndef VOIDFRAME_STDBOOL_H +#define VOIDFRAME_STDBOOL_H +typedef int bool; +#define true 1 +#define false 0 +#endif diff --git a/Kernel/Drivers/Pic.h b/Kernel/Drivers/Pic.h index 504977b..a15dcdf 100644 --- a/Kernel/Drivers/Pic.h +++ b/Kernel/Drivers/Pic.h @@ -1,7 +1,7 @@ #ifndef PIC_H #define PIC_H -#define PIT_FREQUENCY_HZ 1000 +#define PIT_FREQUENCY_HZ 250 int PicInstall(); void PitInstall(); diff --git a/Kernel/Memory/VMem.c b/Kernel/Memory/VMem.c index 5fe4cbd..833a8c1 100644 --- a/Kernel/Memory/VMem.c +++ b/Kernel/Memory/VMem.c @@ -2,11 +2,12 @@ * @file VMem.c * @brief Fixed Implementation of the Virtual Memory Manager * - * This implementation fixes the critical issues in the original code: - * - Proper physical/virtual address conversion - * - Correct page table management - * - Proper memory cleanup - * - Better error handling + * CRITICAL FIXES: + * 1. Fixed PML4 address corruption in VMemInit + * 2. Fixed physical address masking (PAGE_MASK -> PT_ADDR_MASK) + * 3. Added proper error handling for PHYS_TO_VIRT conversion + * 4. Fixed spinlock usage in VMemMap/VMemUnmap + * 5. Added bounds checking for virtual addresses */ #include "VMem.h" @@ -15,6 +16,7 @@ #include "../Core/Kernel.h" #include "../Drivers/Interrupts.h" #include "../Core/Panic.h" + /** * @brief The kernel's virtual address space structure */ @@ -25,6 +27,12 @@ static VirtAddrSpace kernel_space; */ static volatile int vmem_lock = 0; +/** + * @brief Physical address mask for page table entries + * This is the critical fix - PAGE_MASK is for offset bits, not address bits + */ +#define PT_ADDR_MASK 0x000FFFFFFFFFF000ULL + /** * @brief Simple spinlock implementation */ @@ -38,11 +46,18 @@ static inline void vmem_spin_unlock(volatile int* lock) { __sync_lock_release(lock); } +/** + * @brief Validate that a physical address can be converted to virtual + */ +static inline int is_valid_phys_addr(uint64_t paddr) { + // Basic sanity check - adjust limits based on your system + return (paddr != 0 && paddr < 0x100000000ULL); // 4GB limit example +} + /** * @brief Initializes the kernel's virtual memory manager * - * Allocates and zeroes the PML4 table, sets up the kernel virtual address space, - * and initializes tracking structures. + * CRITICAL FIX: Don't overwrite the PML4 pointer after converting to virtual! */ void VMemInit(void) { // Allocate physical page for PML4 @@ -52,34 +67,37 @@ void VMemInit(void) { return; } + // CRITICAL FIX: Store physical address BEFORE converting + uint64_t pml4_phys_addr = (uint64_t)pml4_phys; + // Convert to virtual address and zero it - kernel_space.pml4 = (uint64_t*)PHYS_TO_VIRT(pml4_phys); - FastZeroPage(kernel_space.pml4); + uint64_t* pml4_virt = (uint64_t*)PHYS_TO_VIRT(pml4_phys); + FastZeroPage(pml4_virt); // Initialize kernel space tracking kernel_space.next_vaddr = VIRT_ADDR_SPACE_START; kernel_space.used_pages = 0; kernel_space.total_mapped = 0; - // Store physical address for CR3 - kernel_space.pml4 = (uint64_t*)pml4_phys; + // CRITICAL FIX: Store physical address, not virtual! + kernel_space.pml4 = (uint64_t*)pml4_phys_addr; - PrintKernel("VMemInit: Initialized kernel virtual memory manager\n"); + PrintKernel("VMemInit: Initialized kernel virtual memory manager at phys"); + PrintKernel(pml4_phys); + PrintKernel("\n"); } /** * @brief Retrieves or creates a page table entry at a specific level * - * This function properly handles physical/virtual address conversion - * and ensures all new page tables are zeroed. - * - * @param pml4_phys Physical address of the current page table - * @param vaddr Virtual address to resolve - * @param level Current level (0=PDP, 1=PD, 2=PT) - * @param create If true, create new page table if missing - * @return Physical address of next level page table, or 0 on failure + * CRITICAL FIX: Proper physical address masking and validation */ static uint64_t VMemGetPageTablePhys(uint64_t pml4_phys, uint64_t vaddr, int level, int create) { + // Validate physical address + if (!is_valid_phys_addr(pml4_phys)) { + return 0; + } + // Convert physical address to virtual for access uint64_t* pml4_virt = (uint64_t*)PHYS_TO_VIRT(pml4_phys); @@ -87,6 +105,11 @@ static uint64_t VMemGetPageTablePhys(uint64_t pml4_phys, uint64_t vaddr, int lev int shift = 39 - (level * 9); int index = (vaddr >> shift) & PT_INDEX_MASK; + // Bounds check + if (index >= 512) { + return 0; + } + // Check if entry exists if (!(pml4_virt[index] & PAGE_PRESENT)) { if (!create) return 0; @@ -95,6 +118,12 @@ static uint64_t VMemGetPageTablePhys(uint64_t pml4_phys, uint64_t vaddr, int lev void* new_table_phys = AllocPage(); if (!new_table_phys) return 0; + // Validate new allocation + if (!is_valid_phys_addr((uint64_t)new_table_phys)) { + FreePage(new_table_phys); + return 0; + } + // Zero the new table uint64_t* new_table_virt = (uint64_t*)PHYS_TO_VIRT(new_table_phys); FastZeroPage(new_table_virt); @@ -105,20 +134,14 @@ static uint64_t VMemGetPageTablePhys(uint64_t pml4_phys, uint64_t vaddr, int lev return (uint64_t)new_table_phys; } - // Return existing table physical address - return pml4_virt[index] & ~PAGE_MASK; + // CRITICAL FIX: Use proper address mask, not PAGE_MASK + return pml4_virt[index] & PT_ADDR_MASK; } /** * @brief Maps a physical address to a virtual address * - * Creates all necessary page table levels and sets up the mapping - * with proper TLB invalidation. - * - * @param vaddr Virtual address to map - * @param paddr Physical address to map to - * @param flags Page table entry flags - * @return VMEM_SUCCESS on success, error code on failure + * CRITICAL FIX: Proper error handling and lock management */ int VMemMap(uint64_t vaddr, uint64_t paddr, uint64_t flags) { // Validate alignment @@ -126,6 +149,16 @@ int VMemMap(uint64_t vaddr, uint64_t paddr, uint64_t flags) { return VMEM_ERROR_ALIGN; } + // Validate addresses + if (!is_valid_phys_addr(paddr)) { + return VMEM_ERROR_INVALID_ADDR; + } + + // Validate virtual address range + if (vaddr < VIRT_ADDR_SPACE_START || vaddr >= VIRT_ADDR_SPACE_END) { + return VMEM_ERROR_INVALID_ADDR; + } + vmem_spin_lock(&vmem_lock); // Get PDP table @@ -172,9 +205,7 @@ int VMemMap(uint64_t vaddr, uint64_t paddr, uint64_t flags) { /** * @brief Unmaps a virtual address * - * @param vaddr Virtual address to unmap - * @param size Size to unmap (rounded up to page size) - * @return VMEM_SUCCESS on success, error code on failure + * CRITICAL FIX: Proper spinlock usage (don't call VMemMap from inside lock!) */ int VMemUnmap(uint64_t vaddr, uint64_t size) { size = PAGE_ALIGN_UP(size); @@ -212,11 +243,7 @@ int VMemUnmap(uint64_t vaddr, uint64_t size) { /** * @brief Allocates contiguous virtual memory * - * Allocates virtual memory and maps it to newly allocated physical pages. - * Implements proper cleanup on failure. - * - * @param size Size to allocate (rounded up to page size) - * @return Pointer to allocated memory, or NULL on failure + * CRITICAL FIX: Don't call VMemMap while holding the lock (causes deadlock) */ void* VMemAlloc(uint64_t size) { if (size == 0) return NULL; @@ -226,9 +253,14 @@ void* VMemAlloc(uint64_t size) { vmem_spin_lock(&vmem_lock); uint64_t vaddr = kernel_space.next_vaddr; - uint64_t allocated_size = 0; - // Try to allocate and map all pages + // Reserve the virtual address space + kernel_space.next_vaddr += size; + + vmem_spin_unlock(&vmem_lock); + + // Now map pages without holding the lock + uint64_t allocated_size = 0; for (uint64_t offset = 0; offset < size; offset += PAGE_SIZE) { void* paddr = AllocPage(); if (!paddr) { @@ -236,7 +268,6 @@ void* VMemAlloc(uint64_t size) { if (allocated_size > 0) { VMemFree((void*)vaddr, allocated_size); } - vmem_spin_unlock(&vmem_lock); return NULL; } @@ -247,18 +278,16 @@ void* VMemAlloc(uint64_t size) { if (allocated_size > 0) { VMemFree((void*)vaddr, allocated_size); } - vmem_spin_unlock(&vmem_lock); return NULL; } allocated_size += PAGE_SIZE; } - // Update kernel space tracking - kernel_space.next_vaddr += size; + // Update tracking + vmem_spin_lock(&vmem_lock); kernel_space.used_pages += size / PAGE_SIZE; kernel_space.total_mapped += size; - vmem_spin_unlock(&vmem_lock); // Zero the allocated memory @@ -269,11 +298,7 @@ void* VMemAlloc(uint64_t size) { /** * @brief Frees previously allocated virtual memory * - * Unmaps virtual addresses and frees the underlying physical pages. - * Includes proper cleanup of empty page tables. - * - * @param vaddr Starting virtual address to free - * @param size Size to free (rounded up to page size) + * CRITICAL FIX: Get physical addresses before unmapping */ void VMemFree(void* vaddr, uint64_t size) { if (!vaddr || size == 0) return; @@ -281,34 +306,41 @@ void VMemFree(void* vaddr, uint64_t size) { size = PAGE_ALIGN_UP(size); uint64_t current_vaddr = PAGE_ALIGN_DOWN((uint64_t)vaddr); - vmem_spin_lock(&vmem_lock); + // First pass: collect physical addresses + uint64_t* phys_addrs = (uint64_t*)VMemAlloc(size / PAGE_SIZE * sizeof(uint64_t)); + if (!phys_addrs) return; // Can't allocate tracking array - // Free all pages in the range + int page_count = 0; for (uint64_t offset = 0; offset < size; offset += PAGE_SIZE) { uint64_t page_vaddr = current_vaddr + offset; - - // Get physical address before unmapping uint64_t paddr = VMemGetPhysAddr(page_vaddr); if (paddr) { - // Unmap the page - VMemUnmap(page_vaddr, PAGE_SIZE); - // Free the physical page - FreePage((void*)paddr); + phys_addrs[page_count++] = paddr; } } + // Second pass: unmap + VMemUnmap(current_vaddr, size); + + // Third pass: free physical pages + for (int i = 0; i < page_count; i++) { + FreePage((void*)phys_addrs[i]); + } + // Update tracking + vmem_spin_lock(&vmem_lock); kernel_space.used_pages -= size / PAGE_SIZE; kernel_space.total_mapped -= size; - vmem_spin_unlock(&vmem_lock); + + // Free the tracking array (recursive call, but small) + VMemFree(phys_addrs, size / PAGE_SIZE * sizeof(uint64_t)); } /** * @brief Gets the physical address for a virtual address * - * @param vaddr Virtual address to translate - * @return Physical address, or 0 if not mapped + * CRITICAL FIX: Proper address masking and validation */ uint64_t VMemGetPhysAddr(uint64_t vaddr) { uint64_t pdp_phys = VMemGetPageTablePhys((uint64_t)kernel_space.pml4, vaddr, 0, 0); @@ -325,14 +357,12 @@ uint64_t VMemGetPhysAddr(uint64_t vaddr) { if (!(pt_virt[pt_index] & PAGE_PRESENT)) return 0; - return (pt_virt[pt_index] & ~PAGE_MASK) | (vaddr & PAGE_MASK); + // CRITICAL FIX: Use proper address mask + return (pt_virt[pt_index] & PT_ADDR_MASK) | (vaddr & PAGE_MASK); } /** * @brief Checks if a virtual address is mapped - * - * @param vaddr Virtual address to check - * @return 1 if mapped, 0 if not mapped */ int VMemIsPageMapped(uint64_t vaddr) { return VMemGetPhysAddr(vaddr) != 0; @@ -351,8 +381,6 @@ void VMemFlushTLB(void) { /** * @brief Flushes a single TLB entry - * - * @param vaddr Virtual address to flush */ void VMemFlushTLBSingle(uint64_t vaddr) { asm volatile("invlpg (%0)" :: "r"(vaddr) : "memory"); @@ -360,11 +388,10 @@ void VMemFlushTLBSingle(uint64_t vaddr) { /** * @brief Gets virtual memory statistics - * - * @param used_pages Output: number of used pages - * @param total_mapped Output: total bytes mapped */ void VMemGetStats(uint64_t* used_pages, uint64_t* total_mapped) { + vmem_spin_lock(&vmem_lock); if (used_pages) *used_pages = kernel_space.used_pages; if (total_mapped) *total_mapped = kernel_space.total_mapped; + vmem_spin_unlock(&vmem_lock); } \ No newline at end of file diff --git a/Kernel/Process/Process.c b/Kernel/Process/Process.c index 002ca25..6ae0fba 100644 --- a/Kernel/Process/Process.c +++ b/Kernel/Process/Process.c @@ -352,6 +352,7 @@ static void BoostAllProcesses(void) { } } +// Main scheduler - called from timer interrupt // Main scheduler - called from timer interrupt void FastSchedule(struct Registers* regs) { MLFQscheduler.tick_counter++; @@ -362,82 +363,82 @@ void FastSchedule(struct Registers* regs) { MLFQscheduler.last_boost_tick = MLFQscheduler.tick_counter; } - Process* current = &processes[MLFQscheduler.current_running]; + uint32_t old_slot = MLFQscheduler.current_running; + Process* old_proc = &processes[old_slot]; - // Handle current process - if (MLFQscheduler.current_running != 0) { // Not idle - // Check if current process is dying or terminated - if (current->state == PROC_DYING || current->state == PROC_ZOMBIE || - current->state == PROC_TERMINATED) { - // Don't save context, just switch away - MLFQscheduler.current_running = 0; // Switch to idle - MLFQscheduler.quantum_remaining = 0; + // --- Step 1: Handle the currently running process --- + if (old_slot != 0) { // If we were not running the idle process + // Check if the process was terminated while running + if (old_proc->state == PROC_DYING || old_proc->state == PROC_ZOMBIE || old_proc->state == PROC_TERMINATED) { + // The process is dead. Do not save its context or re-queue it. + // It has already been removed from the scheduler by TerminateProcess. + // We just need to find a new process to run. } else { - // Save context for healthy processes - FastMemcpy(¤t->context, regs, sizeof(struct Registers)); - - // Update process state - if (current->state == PROC_RUNNING) { - current->state = PROC_READY; - - // Check if quantum expired - if (MLFQscheduler.quantum_remaining > 0) { - MLFQscheduler.quantum_remaining--; - - // If quantum not expired and no higher priority processes, keep running - int highest_priority = FindHighestPriorityQueue(); - if (MLFQscheduler.quantum_remaining > 0 && - (highest_priority == -1 || highest_priority >= (int)current->priority)) { - current->state = PROC_RUNNING; - return; // Keep current process running - } - } + // Process is healthy, save its context. + FastMemcpy(&old_proc->context, regs, sizeof(struct Registers)); - // Quantum expired or higher priority process available - // Demote to lower priority (unless already at lowest) - if (current->priority < MAX_PRIORITY_LEVELS - 1) { - current->priority++; - } + // Decrement quantum and check if it can continue running. + if (MLFQscheduler.quantum_remaining > 0) { + MLFQscheduler.quantum_remaining--; + } + + // Check for preemption or end of quantum + int highest_priority = FindHighestPriorityQueue(); + + // *** FIX FOR FLAW 1 *** + // The process can continue ONLY if its quantum is not expired AND + // there is NO HIGHER priority process waiting. + // A process of equal priority must wait its turn (for round-robin). + if (MLFQscheduler.quantum_remaining > 0 && (highest_priority == -1 || highest_priority > (int)old_proc->priority)) { + // No preemption needed, continue running the same process. + return; + } - // Add back to appropriate queue - AddToScheduler(MLFQscheduler.current_running); + // --- Time to switch --- + // The quantum has expired OR a higher priority process is ready. + // Set the old process state to READY, demote it, and re-queue it. + old_proc->state = PROC_READY; + + // Demote to lower priority (unless already at lowest) + if (old_proc->priority < MAX_PRIORITY_LEVELS - 1) { + old_proc->priority++; } + AddToScheduler(old_slot); } } - // Find next process to run + // --- Step 2: Find and dispatch the next process to run --- int next_priority = FindHighestPriorityQueue(); + uint32_t next_slot; + if (next_priority == -1) { - // No processes ready, run idle - MLFQscheduler.current_running = 0; - MLFQscheduler.quantum_remaining = 0; + // No processes are ready. Run the idle process. + next_slot = 0; } else { - // Get next process from highest priority queue - uint32_t next_slot = DeQueue(&MLFQscheduler.queues[next_priority]); - if (MLFQscheduler.queues[next_priority].count == 0) { - MLFQscheduler.active_bitmap &= ~(1U << next_priority); - } - - // Verify the process is still valid - if (processes[next_slot].state == PROC_READY) { - MLFQscheduler.current_running = next_slot; - MLFQscheduler.quantum_remaining = MLFQscheduler.queues[next_priority].quantum; - } else { - // Process became invalid, try again - MLFQscheduler.current_running = 0; - MLFQscheduler.quantum_remaining = 0; + // Get the next process from the highest priority non-empty queue. + next_slot = DeQueue(&MLFQscheduler.queues[next_priority]); + if (processes[next_slot].state != PROC_READY) { + // This can happen if a process was terminated but not yet cleaned up. + // Just run idle for this tick and let the next tick sort it out. + next_slot = 0; } } - // Switch to new process - if (MLFQscheduler.current_running != 0) { - processes[MLFQscheduler.current_running].state = PROC_RUNNING; - current_process = MLFQscheduler.current_running; - FastMemcpy(regs, &processes[MLFQscheduler.current_running].context, sizeof(struct Registers)); + MLFQscheduler.current_running = next_slot; + current_process = next_slot; + + if (next_slot != 0) { + // We are switching to a real process. + Process* new_proc = &processes[next_slot]; + new_proc->state = PROC_RUNNING; + MLFQscheduler.quantum_remaining = MLFQscheduler.queues[new_proc->priority].quantum; + FastMemcpy(regs, &new_proc->context, sizeof(struct Registers)); } else { - // Running idle - set up idle context - current_process = 0; - // You might want to set up a proper idle context here + // We are switching to the idle process. + MLFQscheduler.quantum_remaining = 0; // Idle process has no quantum + // The context for idle is effectively just letting the CPU continue from + // the interrupt return, where it will likely enter a `hlt` loop. + // We don't need to copy an idle context here. } } @@ -459,7 +460,7 @@ void ProcessBlocked(uint32_t slot) { // Simple interface for your existing code -static int FindFreeSlot(void) { +static inline int FindFreeSlot(void) { // Skip slot 0 (idle process) for (int i = 1; i < MAX_PROCESSES && i < 32; i++) { if (!(active_process_bitmap & (1U << i))) { @@ -475,7 +476,7 @@ static int FindFreeSlot(void) { } // Mark slot as free -static void FreeSlot(int slot) { +static inline void FreeSlot(int slot) { if (slot > 0 && slot < 32) { active_process_bitmap &= ~(1U << slot); } @@ -733,7 +734,6 @@ void SystemTracer(void) { while (1) { CleanupTerminatedProcesses(); Yield(); - __asm__ __volatile__("hlt"); } }