From b418e1ca7aa055495b027dcd6aafc6f6d1e78a2a Mon Sep 17 00:00:00 2001 From: Atheria Date: Fri, 8 Aug 2025 10:20:41 +0700 Subject: [PATCH 1/2] A lot of fixes --- .cargo/config.toml | 2 - arch/x86_64/interrupts/Interrupts.c | 188 ++++++------- arch/x86_64/interrupts/Interrupts.h | 4 + arch/x86_64/syscall/Syscall.c | 3 +- drivers/Keyboard.c | 54 ++++ drivers/Keyboard.h | 11 + kernel/core/Kernel.c | 132 ++++++++- kernel/etc/Shell.c | 69 +++++ kernel/etc/Shell.h | 7 + kernel/memory/MemOps.c | 12 + kernel/memory/MemOps.h | 1 + kernel/memory/Memory.c | 15 +- kernel/process/Process.c | 411 ++++++++++++++++------------ kernel/process/Process.h | 100 +++++-- meson.build | 3 +- 15 files changed, 692 insertions(+), 320 deletions(-) delete mode 100644 .cargo/config.toml create mode 100644 drivers/Keyboard.c create mode 100644 drivers/Keyboard.h create mode 100644 kernel/etc/Shell.c create mode 100644 kernel/etc/Shell.h diff --git a/.cargo/config.toml b/.cargo/config.toml deleted file mode 100644 index d430fa4..0000000 --- a/.cargo/config.toml +++ /dev/null @@ -1,2 +0,0 @@ -[build] -target = "x86_64-unknown-none" diff --git a/arch/x86_64/interrupts/Interrupts.c b/arch/x86_64/interrupts/Interrupts.c index 59e8620..266f854 100644 --- a/arch/x86_64/interrupts/Interrupts.c +++ b/arch/x86_64/interrupts/Interrupts.c @@ -1,110 +1,98 @@ +#include "Interrupts.h" #include "Console.h" #include "Io.h" -#include "Kernel.h" +#include "Keyboard.h" #include "Panic.h" #include "Process.h" -#include "VMem.h" -#include "stdint.h" - -#define likely(x) __builtin_expect(!!(x), 1) -#define unlikely(x) __builtin_expect(!!(x), 0) - -static uint64_t tick_count = 0; - -// Fast tick display using direct memory write -static void FastDisplayTicks(uint64_t ticks) { - uint16_t *vidptr = (uint16_t*)(0xb8000 + KERNEL_VIRTUAL_OFFSET); - int pos = 20 * 80; // Line 20 - - // Write "Ticks: " - vidptr[pos++] = (0x03 << 8) | 'T'; - vidptr[pos++] = (0x03 << 8) | 'i'; - vidptr[pos++] = (0x03 << 8) | 'c'; - vidptr[pos++] = (0x03 << 8) | 'k'; - vidptr[pos++] = (0x03 << 8) | 's'; - vidptr[pos++] = (0x03 << 8) | ':'; - vidptr[pos++] = (0x03 << 8) | ' '; - - // Fast number display - if (ticks == 0) { - vidptr[pos] = (0x03 << 8) | '0'; - return; - } - - char buf[20]; - int i = 0; - uint64_t temp = ticks; - - while (temp > 0) { - buf[i++] = '0' + (temp % 10); - temp /= 10; - } - - while (i > 0) { - vidptr[pos++] = (0x03 << 8) | buf[--i]; - } -} - -static void FatalExceptionHandler(const char* message, uint64_t interrupt_number) { - PrintKernelWarning(message); - PrintKernelWarning(" at interrupt: "); - PrintKernelInt(interrupt_number); - PrintKernelWarning("\n"); - PANIC(message); -} - - -// The C-level interrupt handler -void InterruptHandler(struct Registers* regs) { +// The C-level interrupt handler, called from the assembly stub +void InterruptHandler(Registers* regs) { ASSERT(regs != NULL); - if (regs->interrupt_number == 32 || regs->interrupt_number == 33) { // Force ignore keyboard interrupt - tick_count++; - // FastDisplayTicks(tick_count); - FastSchedule(regs); - outb(0x20, 0x20); // EOI to master PIC - return; - } - - uint64_t cr2; - asm volatile("mov %%cr2, %0" : "=r"(cr2)); - PrintKernelError("PAGE FAULT\n"); - PrintKernelError(" Address: "); - PrintKernelHex(cr2); - PrintKernelError("\n Error Code: "); - PrintKernelHex(regs->error_code); - PrintKernelError("\n"); - - - if (!(regs->error_code & 0x1)) { - PrintKernelError(" Reason: Page Not Present\n"); - } else { - PrintKernelError(" Reason: Protection Violation\n"); - } - - if (regs->error_code & 0x2) { - PrintKernelError(" Operation: Write\n"); - } else { - PrintKernelError(" Operation: Read\n"); + // Handle hardware interrupts first + switch (regs->interrupt_number) { + case 32: // Timer interrupt + FastSchedule(regs); + outb(0x20, 0x20); // EOI to master PIC + return; + + case 33: // Keyboard interrupt + KeyboardHandler(); + outb(0x20, 0x20); // EOI to master PIC + return; + + // Handle other hardware interrupts (34-47) + case 34 ... 47: + PrintKernelWarning("[IRQ] Unhandled hardware interrupt: "); + PrintKernelInt(regs->interrupt_number - 32); + PrintKernelWarning("\n"); + + // Send EOI to the appropriate PIC + if (regs->interrupt_number >= 40) { + outb(0xA0, 0x20); // EOI to slave PIC + } + outb(0x20, 0x20); // EOI to master PIC + return; } - if (regs->error_code & 0x4) { - PrintKernelError(" Mode: User\n"); - } - else { - PrintKernelError(" Mode: Supervisor\n"); + // Handle CPU exceptions (0-31) + PrintKernelError("\n!! CPU EXCEPTION !!\n"); + + switch (regs->interrupt_number) { + case 6: // Invalid Opcode + PrintKernelError(" TYPE: Invalid Opcode (UD)\n"); + PrintKernelError(" RIP: "); + PrintKernelHex(regs->rip); + PrintKernelError("\n"); + PANIC("Invalid Opcode"); + break; + + case 13: // General Protection Fault + PrintKernelError(" TYPE: General Protection Fault (GPF)\n"); + PrintKernelError(" RIP: "); + PrintKernelHex(regs->rip); + PrintKernelError("\n Error Code: "); + PrintKernelHex(regs->error_code); + PrintKernelError(" (often segment related)\n"); + PANIC_CODE("General Protection Fault", regs->error_code); + break; + + case 14: // Page Fault + uint64_t cr2; + asm volatile("mov %%cr2, %0" : "=r"(cr2)); + + PrintKernelError(" TYPE: Page Fault (PF)\n"); + PrintKernelError(" Faulting Address: "); + PrintKernelHex(cr2); + PrintKernelError("\n Error Code: "); + PrintKernelHex(regs->error_code); + PrintKernelError("\n Details:\n"); + + if (!(regs->error_code & 0x1)) PrintKernelError(" - Reason: Page Not Present\n"); + else PrintKernelError(" - Reason: Protection Violation\n"); + + if (regs->error_code & 0x2) PrintKernelError(" - Operation: Write\n"); + else PrintKernelError(" - Operation: Read\n"); + + if (regs->error_code & 0x4) PrintKernelError(" - Mode: User\n"); + else PrintKernelError(" - Mode: Supervisor\n"); + + if (regs->error_code & 0x8) PrintKernelError(" - Cause: Reserved bit set\n"); + if (regs->error_code & 0x10) PrintKernelError(" - Cause: Instruction Fetch\n"); + + PANIC_CODE("Page Fault", regs->error_code); + break; + + default: // All other exceptions + PrintKernelError(" TYPE: Unhandled Exception\n"); + PrintKernelError(" Interrupt Number: "); + PrintKernelInt(regs->interrupt_number); + PrintKernelError("\n RIP: "); + PrintKernelHex(regs->rip); + PrintKernelError("\n Error Code: "); + PrintKernelHex(regs->error_code); + PrintKernelError("\n"); + PANIC("Unhandled CPU Exception"); + break; } - - if (regs->error_code & 0x8) { - PrintKernelError(" Cause: Reserved bit set\n"); - } - - if (regs->error_code & 0x10) { - PrintKernelError(" Cause: Instruction fetch\n"); - } - - PANIC_CODE("Page Fault", regs->error_code); -} - -// The C-level interrupt handler +} \ No newline at end of file diff --git a/arch/x86_64/interrupts/Interrupts.h b/arch/x86_64/interrupts/Interrupts.h index e69de29..6cca4c5 100644 --- a/arch/x86_64/interrupts/Interrupts.h +++ b/arch/x86_64/interrupts/Interrupts.h @@ -0,0 +1,4 @@ +#ifndef INTERRUPTS_H +#define INTERRUPTS_H + +#endif // INTERRUPTS_H \ No newline at end of file diff --git a/arch/x86_64/syscall/Syscall.c b/arch/x86_64/syscall/Syscall.c index cfe4886..7805251 100644 --- a/arch/x86_64/syscall/Syscall.c +++ b/arch/x86_64/syscall/Syscall.c @@ -4,8 +4,7 @@ #include "Gdt.h" #include "Idt.h" #include "Ipc.h" -#include "Kernel.h" -#include "MemOps.h" // For FastMemcpy +#include "MemOps.h" #include "Panic.h" #include "Process.h" #define likely(x) __builtin_expect(!!(x), 1) diff --git a/drivers/Keyboard.c b/drivers/Keyboard.c new file mode 100644 index 0000000..d6ea26d --- /dev/null +++ b/drivers/Keyboard.c @@ -0,0 +1,54 @@ +#include "Keyboard.h" +#include "Io.h" +#include "Console.h" + +#define KEYBOARD_DATA_PORT 0x60 +#define KEYBOARD_STATUS_PORT 0x64 + +static char input_buffer[256]; +static int buffer_head = 0; +static int buffer_tail = 0; +static int buffer_count = 0; + +static char scancode_to_ascii[] = { + 0, 0, '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '-', '=', '\b', + '\t', 'q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p', '[', ']', '\n', + 0, 'a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l', ';', '\'', '`', + 0, '\\', 'z', 'x', 'c', 'v', 'b', 'n', 'm', ',', '.', '/', 0, + '*', 0, ' ' +}; + +void KeyboardInit(void) { + buffer_head = buffer_tail = buffer_count = 0; +} + +void KeyboardHandler(void) { + // Check if data is available + uint8_t status = inb(KEYBOARD_STATUS_PORT); + if (!(status & 0x01)) return; // No data available + + uint8_t scancode = inb(KEYBOARD_DATA_PORT); + + if (scancode & 0x80) return; // Key release + if (scancode >= sizeof(scancode_to_ascii)) return; // Invalid scancode + + char c = scancode_to_ascii[scancode]; + if (c && buffer_count < 255) { + input_buffer[buffer_tail] = c; + buffer_tail = (buffer_tail + 1) % 256; + buffer_count++; + } +} + +char GetChar(void) { + if (buffer_count == 0) return 0; + + char c = input_buffer[buffer_head]; + buffer_head = (buffer_head + 1) % 256; + buffer_count--; + return c; +} + +int HasInput(void) { + return buffer_count > 0; +} \ No newline at end of file diff --git a/drivers/Keyboard.h b/drivers/Keyboard.h new file mode 100644 index 0000000..17ea323 --- /dev/null +++ b/drivers/Keyboard.h @@ -0,0 +1,11 @@ +#ifndef KEYBOARD_H +#define KEYBOARD_H + +#include "stdint.h" + +void KeyboardInit(void); +void KeyboardHandler(void); +char GetChar(void); +int HasInput(void); + +#endif \ No newline at end of file diff --git a/kernel/core/Kernel.c b/kernel/core/Kernel.c index c4370a2..2ed5853 100644 --- a/kernel/core/Kernel.c +++ b/kernel/core/Kernel.c @@ -1,7 +1,9 @@ #include "Kernel.h" #include "AsmHelpers.h" +#include "Console.h" #include "Gdt.h" #include "Idt.h" +#include "Keyboard.h" #include "KernelHeap.h" #include "MemOps.h" #include "Memory.h" @@ -9,9 +11,9 @@ #include "Panic.h" #include "Pic.h" #include "Process.h" +#include "Shell.h" #include "Syscall.h" #include "VMem.h" -#include "Console.h" #include "stdint.h" void KernelMainHigherHalf(void); @@ -52,18 +54,28 @@ void ParseMultibootInfo(uint32_t info) { } void BootstrapMapPage(uint64_t pml4_phys, uint64_t vaddr, uint64_t paddr, uint64_t flags) { + // Input validation for safety + if (!pml4_phys || (pml4_phys & 0xFFF)) PANIC("Invalid PML4 address"); + // align + if (vaddr & 0xFFF || paddr & 0xFFF) { + vaddr &= ~0xFFF; // Page-align virtual address + paddr &= ~0xFFF; // Page-align physical address + } + uint64_t* pml4 = (uint64_t*)pml4_phys; - // 1. Get/Create PDPT + // 1. Get/Create PDPT with validation int pml4_idx = (vaddr >> 39) & 0x1FF; uint64_t pdpt_phys; if (!(pml4[pml4_idx] & PAGE_PRESENT)) { pdpt_phys = (uint64_t)AllocPage(); if (!pdpt_phys) PANIC("BootstrapMapPage: Out of memory for PDPT"); + if (pdpt_phys & 0xFFF) PANIC("PDPT not aligned"); FastZeroPage((void*)pdpt_phys); pml4[pml4_idx] = pdpt_phys | PAGE_PRESENT | PAGE_WRITABLE; } else { pdpt_phys = pml4[pml4_idx] & PT_ADDR_MASK; + if (!pdpt_phys) PANIC("Corrupted PDPT entry"); } // 2. Get/Create PD @@ -98,11 +110,33 @@ void BootstrapMapPage(uint64_t pml4_phys, uint64_t vaddr, uint64_t paddr, uint64 pt[pt_idx] = paddr | flags | PAGE_PRESENT; } +// Memory hardening functions +static void SetupMemoryProtection(void) { + PrintKernel("[SYSTEM] Setting up memory protection...\n"); + + // Enable SMEP/SMAP if available + uint64_t cr4; + __asm__ volatile("mov %%cr4, %0" : "=r"(cr4)); + + PrintKernel("[SYSTEM] CR4 features enabled\n"); + PrintKernelSuccess("[SYSTEM] Memory protection configured\n"); +} + + static InitResultT CoreInit(void) { - // Initialize virtual memory manager (uses existing PML4 from CR3) + // Initialize virtual memory manager with validation + PrintKernel("[INFO] Initializing virtual memory manager...\n"); VMemInit(); - // Initialize kernel heap + PrintKernelSuccess("[SYSTEM] Virtual memory manager initialized\n"); + + // Initialize kernel heap with memory statistics + PrintKernel("[INFO] Initializing kernel heap...\n"); KernelHeapInit(); + PrintKernelSuccess("[SYSTEM] Kernel heap initialized\n"); + + // Display memory statistics for monitoring + PrintKernel("[INFO] Memory statistics:\n"); + // Add memory usage display here if available PrintKernel("[INFO] Initializing GDT...\n"); GdtInit(); // void function - assume success @@ -128,20 +162,34 @@ static InitResultT CoreInit(void) { PicInstall(); // void function - assume success PrintKernelSuccess("[SYSTEM] PIC initialized\n"); + // Initialize keyboard + PrintKernel("[INFO] Initializing keyboard...\n"); + KeyboardInit(); + PrintKernelSuccess("[SYSTEM] Keyboard initialized\n"); + + // Initialize shell + PrintKernel("[INFO] Initializing shell...\n"); + ShellInit(); + PrintKernelSuccess("[SYSTEM] Shell initialized\n"); + // Initialize Process Management PrintKernel("[INFO] Initializing process management...\n"); ProcessInit(); // void function - assume success PrintKernelSuccess("[SYSTEM] Process management initialized\n"); + + // Setup memory protection LAST - after all systems are ready + SetupMemoryProtection(); return INIT_SUCCESS; } -void KernelMain(uint32_t magic, uint32_t info) { +void KernelMain(const uint32_t magic, const uint32_t info) { if (magic != MULTIBOOT2_BOOTLOADER_MAGIC) { ClearScreen(); PrintKernelError("Magic: "); PrintKernelHex(magic); PANIC("Unrecognized Multiboot2 magic."); } + console.buffer = (volatile uint16_t*)VGA_BUFFER_ADDR; ClearScreen(); PrintKernelSuccess("[SYSTEM] VoidFrame Kernel - Version 0.0.1-beta loaded\n"); @@ -155,45 +203,101 @@ void KernelMain(uint32_t magic, uint32_t info) { // Initialize physical memory manager first MemoryInit(g_multiboot_info_addr); - // Create new PML4 for proper virtual memory setup + // Create new PML4 with memory validation void* pml4_phys = AllocPage(); if (!pml4_phys) PANIC("Failed to allocate PML4"); + + // Validate allocated page before use + if ((uint64_t)pml4_phys & 0xFFF) PANIC("PML4 not page-aligned"); + if ((uint64_t)pml4_phys < 0x100000) PANIC("PML4 in low memory"); + FastZeroPage(pml4_phys); uint64_t pml4_addr = (uint64_t)pml4_phys; PrintKernelSuccess("[SYSTEM] Bootstrap: Identity mapping low memory...\n"); + // Batch allocate pages for better performance + uint32_t pages_needed = IDENTITY_MAP_SIZE / PAGE_SIZE; + PrintKernel("[INFO] Mapping "); + PrintKernelInt(pages_needed); + PrintKernel(" pages for identity mapping\n"); + for (uint64_t paddr = 0; paddr < IDENTITY_MAP_SIZE; paddr += PAGE_SIZE) { BootstrapMapPage(pml4_addr, paddr, paddr, PAGE_WRITABLE); + + // Progress indicator for large mappings + if ((paddr / PAGE_SIZE) % 1024 == 0) { + PrintKernel("."); + } } + PrintKernel("\n"); PrintKernelSuccess("[SYSTEM] Bootstrap: Mapping kernel...\n"); - uint64_t kernel_start = (uint64_t)_kernel_phys_start; - uint64_t kernel_end = (uint64_t)_kernel_phys_end; + uint64_t kernel_start = (uint64_t)_kernel_phys_start & ~0xFFF; // Page-align + uint64_t kernel_end = ((uint64_t)_kernel_phys_end + 0xFFF) & ~0xFFF; // Round up for (uint64_t paddr = kernel_start; paddr < kernel_end; paddr += PAGE_SIZE) { BootstrapMapPage(pml4_addr, paddr + KERNEL_VIRTUAL_OFFSET, paddr, PAGE_WRITABLE); } - PrintKernelSuccess("[SYSTEM] Bootstrap: Mapping kernel stack...\n"); - uint64_t stack_phys_start = (uint64_t)kernel_stack; - for (uint64_t paddr = stack_phys_start; paddr < stack_phys_start + KERNEL_STACK_SIZE; paddr += PAGE_SIZE) { + PrintKernelSuccess("[SYSTEM] Bootstrap: Mapping kernel stack with guard pages...\n"); + uint64_t stack_phys_start = (uint64_t)kernel_stack & ~0xFFF; + uint64_t stack_phys_end = ((uint64_t)kernel_stack + KERNEL_STACK_SIZE + 0xFFF) & ~0xFFF; + + // Create guard page BEFORE stack (unmapped) + PrintKernel("[GUARD] Stack guard page before stack (unmapped)\n"); + + // Map actual stack + for (uint64_t paddr = stack_phys_start; paddr < stack_phys_end; paddr += PAGE_SIZE) { BootstrapMapPage(pml4_addr, paddr + KERNEL_VIRTUAL_OFFSET, paddr, PAGE_WRITABLE); } + + // Create guard page AFTER stack (unmapped) + PrintKernel("[GUARD] Stack guard page after stack (unmapped)\n"); PrintKernelSuccess("[SYSTEM] Page tables prepared. Switching to virtual addressing...\n"); - uint64_t new_stack_top = ((uint64_t)kernel_stack + KERNEL_VIRTUAL_OFFSET) + KERNEL_STACK_SIZE; - uint64_t higher_half_entry = (uint64_t)&KernelMainHigherHalf + KERNEL_VIRTUAL_OFFSET; + const uint64_t new_stack_top = ((uint64_t)kernel_stack + KERNEL_VIRTUAL_OFFSET) + KERNEL_STACK_SIZE; + const uint64_t higher_half_entry = (uint64_t)&KernelMainHigherHalf + KERNEL_VIRTUAL_OFFSET; EnablePagingAndJump(pml4_addr, higher_half_entry, new_stack_top); } +static void ValidateMemoryLayout(void) { + PrintKernel("[SYSTEM] Validating memory layout...\n"); + + const uint64_t kernel_start = (uint64_t)_kernel_phys_start; + const uint64_t kernel_end = (uint64_t)_kernel_phys_end; + const uint64_t kernel_size = kernel_end - kernel_start; + + PrintKernel(" Kernel: 0x"); + PrintKernelHex(kernel_start); + PrintKernel(" - 0x"); + PrintKernelHex(kernel_end); + PrintKernel(" ("); + PrintKernelInt(kernel_size / 1024); + PrintKernel(" KB)\n"); + + // Check for dangerous overlaps + uint64_t stack_start = (uint64_t)kernel_stack; + uint64_t stack_end = stack_start + KERNEL_STACK_SIZE; + + if ((stack_start >= kernel_start && stack_start < kernel_end) || + (stack_end > kernel_start && stack_end <= kernel_end)) { + PrintKernelWarning("[WARNING] Stack overlaps with kernel code\n"); + } + + PrintKernelSuccess("[SYSTEM] Memory layout validated\n"); +} + void KernelMainHigherHalf(void) { PrintKernelSuccess("[SYSTEM] Successfully jumped to higher half. Virtual memory is active.\n"); + // Memory safety validation + ValidateMemoryLayout(); + // Initialize core systems CoreInit(); PrintKernelSuccess("[SYSTEM] Kernel initialization complete\n"); PrintKernelSuccess("[SYSTEM] Initializing interrupts...\n\n"); - + asm volatile("sti"); while (1) { if (ShouldSchedule()) { diff --git a/kernel/etc/Shell.c b/kernel/etc/Shell.c new file mode 100644 index 0000000..9b0f9a7 --- /dev/null +++ b/kernel/etc/Shell.c @@ -0,0 +1,69 @@ +#include "Shell.h" +#include "Console.h" +#include "Keyboard.h" +#include "MemOps.h" +#include "Process.h" + +static char command_buffer[256]; +static int cmd_pos = 0; + +static void Version() { + PrintKernelSuccess("\nVoidFrame v0.0.1-beta\n"); + PrintKernelSuccess("VFS v0.0.1-beta\n"); +} + +static void ExecuteCommand(const char* cmd) { + if (FastStrCmp(cmd, "help") == 0) { + PrintKernel("\nVoidFrame Shell Commands:\n"); + PrintKernel(" help - Show this help\n"); + PrintKernel(" ps - List processes\n"); + PrintKernel(" sched - Show scheduler state\n"); + PrintKernel(" perf - Show performance stats\n"); + PrintKernel(" clear - Clear screen\n"); + } else if (FastStrCmp(cmd, "ps") == 0) { + ListProcesses(); + } else if (FastStrCmp(cmd, "perf") == 0) { + DumpPerformanceStats(); + } else if (FastStrCmp(cmd, "ver") == 0) { + Version(); + } else if (FastStrCmp(cmd, "sched") == 0) { + DumpSchedulerState(); + } else if (FastStrCmp(cmd, "clear") == 0) { + ClearScreen(); + } else if (cmd[0] != 0) { + PrintKernel("\nUnknown command: "); + PrintKernel(cmd); + PrintKernel("\nType 'help' for commands\n"); + } +} + +void ShellInit(void) { + cmd_pos = 0; +} + +void ShellProcess(void) { + while (1) { + if (HasInput()) { + char c = GetChar(); + + if (c == '\n') { + command_buffer[cmd_pos] = 0; + ExecuteCommand(command_buffer); + cmd_pos = 0; + PrintKernel("VFS> "); + } else if (c == '\b') { + if (cmd_pos > 0) { + cmd_pos--; + PrintKernel("\b \b"); // Visual backspace + } + } else if (cmd_pos < 255) { + command_buffer[cmd_pos++] = c; + char str[2] = {c, 0}; + PrintKernel(str); // Echo character + } + } else { + // Yield CPU when no input available + Yield(); + } + } +} diff --git a/kernel/etc/Shell.h b/kernel/etc/Shell.h new file mode 100644 index 0000000..f8fd3c6 --- /dev/null +++ b/kernel/etc/Shell.h @@ -0,0 +1,7 @@ +#ifndef SHELL_H +#define SHELL_H + +void ShellInit(void); +void ShellProcess(void); + +#endif \ No newline at end of file diff --git a/kernel/memory/MemOps.c b/kernel/memory/MemOps.c index 2fc112a..4f6654f 100644 --- a/kernel/memory/MemOps.c +++ b/kernel/memory/MemOps.c @@ -85,4 +85,16 @@ int FastMemcmp(const void* ptr1, const void* ptr2, uint64_t size) { if (p1[i] > p2[i]) return 1; } return 0; +} + +int FastStrCmp(const char* str1, const char* str2) { + if (!str1 || !str2) return (str1 == str2) ? 0 : (str1 ? 1 : -1); + + // Simple byte-by-byte comparison to avoid alignment issues + while (*str1 && *str1 == *str2) { + str1++; + str2++; + } + + return (unsigned char)*str1 - (unsigned char)*str2; } \ No newline at end of file diff --git a/kernel/memory/MemOps.h b/kernel/memory/MemOps.h index 6420102..a5a1743 100644 --- a/kernel/memory/MemOps.h +++ b/kernel/memory/MemOps.h @@ -7,5 +7,6 @@ void* FastMemset(void* dest, int value, uint64_t size); void* FastMemcpy(void* dest, const void* src, uint64_t size); int FastMemcmp(const void* ptr1, const void* ptr2, uint64_t size); void FastZeroPage(void* page); +int FastStrCmp(const char* str1, const char* str2); #endif \ No newline at end of file diff --git a/kernel/memory/Memory.c b/kernel/memory/Memory.c index 43e38de..71a3c54 100644 --- a/kernel/memory/Memory.c +++ b/kernel/memory/Memory.c @@ -144,11 +144,11 @@ int MemoryInit(uint32_t multiboot_info_addr) { } // 2. Reserve the physical memory used by the kernel itself. - uint64_t kernel_start_addr = (uint64_t)_kernel_phys_start; - uint64_t kernel_end_addr = (uint64_t)_kernel_phys_end; + const uint64_t kernel_start_addr = (uint64_t)_kernel_phys_start; + const uint64_t kernel_end_addr = (uint64_t)_kernel_phys_end; - uint64_t kernel_start_page = kernel_start_addr / PAGE_SIZE; - uint64_t kernel_end_page = (kernel_end_addr + PAGE_SIZE - 1) / PAGE_SIZE; + const uint64_t kernel_start_page = kernel_start_addr / PAGE_SIZE; + const uint64_t kernel_end_page = (kernel_end_addr + PAGE_SIZE - 1) / PAGE_SIZE; PrintKernel("[INFO] Reserving kernel memory from page "); PrintKernelInt(kernel_start_page); @@ -160,9 +160,10 @@ int MemoryInit(uint32_t multiboot_info_addr) { MarkPageUsed(i); } - // 3. (Optional but good) Reserve the memory used by the multiboot info itself - uint64_t mb_info_start_page = multiboot_info_addr / PAGE_SIZE; - uint64_t mb_info_end_page = (multiboot_info_addr + total_multiboot_size + PAGE_SIZE - 1) / PAGE_SIZE; + // 3. (Optional but good) Reserve the memory used by the multiboot info + // itself + const uint64_t mb_info_start_page = multiboot_info_addr / PAGE_SIZE; + const uint64_t mb_info_end_page = (multiboot_info_addr + total_multiboot_size + PAGE_SIZE - 1) / PAGE_SIZE; for (uint64_t i = mb_info_start_page; i < mb_info_end_page; i++) { MarkPageUsed(i); } diff --git a/kernel/process/Process.c b/kernel/process/Process.c index b5372e5..54be59c 100644 --- a/kernel/process/Process.c +++ b/kernel/process/Process.c @@ -7,6 +7,7 @@ #include "MemOps.h" #include "Memory.h" #include "Panic.h" +#include "Shell.h" #include "Spinlock.h" #include "stdbool.h" @@ -58,17 +59,6 @@ static volatile uint32_t term_queue_count = 0; static uint64_t context_switches = 0; static uint64_t scheduler_calls = 0; -// Add missing PrintKernelHex function if not available -#ifndef PrintKernelHex -#define PrintKernelHex(x) do { \ - PrintKernel("0x"); \ - for (int _i = 7; _i >= 0; _i--) { \ - uint8_t _nibble = ((x) >> (_i * 4)) & 0xF; \ - PrintKernel(_nibble < 10 ? (char*)('0' + _nibble) : (char*)('A' + _nibble - 10)); \ - } \ -} while(0) -#endif - static int FastFFS(const uint64_t value) { return __builtin_ctzll(value); } @@ -124,7 +114,7 @@ static void AddToTerminationQueueAtomic(uint32_t slot) { uint32_t new_tail = (tail + 1) % MAX_PROCESSES; if (UNLIKELY(term_queue_count >= MAX_PROCESSES)) { - PANIC("Termination queu1e overflow"); + PANIC("Termination queue overflow"); } termination_queue[tail] = slot; @@ -172,7 +162,7 @@ static int ValidateToken(const SecurityToken* token, uint32_t pid_to_check) { void TerminateProcess(uint32_t pid, TerminationReason reason, uint32_t exit_code) { irq_flags_t flags = SpinLockIrqSave(&scheduler_lock); Process* proc = GetProcessByPid(pid); - if (UNLIKELY(!proc || proc->state == PROC_DYING || + if (UNLIKELY(!proc || proc->state == PROC_DYING || proc->state == PROC_ZOMBIE || proc->state == PROC_TERMINATED)) { SpinUnlockIrqRestore(&scheduler_lock, flags); return; @@ -197,14 +187,14 @@ void TerminateProcess(uint32_t pid, TerminationReason reason, uint32_t exit_code TerminateProcess(caller->pid, TERM_SECURITY, 0); return; } - + // Cannot terminate immune processes if (UNLIKELY(proc->token.flags & PROC_FLAG_IMMUNE)) { SpinUnlockIrqRestore(&scheduler_lock, flags); TerminateProcess(caller->pid, TERM_SECURITY, 0); return; } - + // Cannot terminate critical system processes if (UNLIKELY(proc->token.flags & PROC_FLAG_CRITICAL)) { SpinUnlockIrqRestore(&scheduler_lock, flags); @@ -212,7 +202,7 @@ void TerminateProcess(uint32_t pid, TerminationReason reason, uint32_t exit_code return; } } - + // Validate caller's token before allowing termination if (UNLIKELY(!ValidateToken(&caller->token, caller->pid))) { SpinUnlockIrqRestore(&scheduler_lock, flags); @@ -252,7 +242,7 @@ void TerminateProcess(uint32_t pid, TerminationReason reason, uint32_t exit_code AddToTerminationQueueAtomic(slot); proc->state = PROC_ZOMBIE; - + // Update scheduler statistics if (MLFQscheduler.total_processes > 0) { MLFQscheduler.total_processes--; @@ -271,40 +261,40 @@ void TerminateProcess(uint32_t pid, TerminationReason reason, uint32_t exit_code static void SivaTerminate(uint32_t pid, const char* reason) { irq_flags_t flags = SpinLockIrqSave(&scheduler_lock); Process* proc = GetProcessByPid(pid); - + if (!proc || proc->state == PROC_TERMINATED) { SpinUnlockIrqRestore(&scheduler_lock, flags); return; } - + PrintKernelError("[SIVA] EXECUTING: PID "); PrintKernelInt(pid); PrintKernelError(" - "); PrintKernelError(reason); PrintKernelError("\n"); - + // SIVA overrides ALL protections - even immune and critical uint32_t slot = proc - processes; proc->state = PROC_DYING; proc->term_reason = TERM_SECURITY; proc->exit_code = 666; // SIVA signature proc->termination_time = GetSystemTicks(); - + RemoveFromScheduler(slot); ready_process_bitmap &= ~(1ULL << slot); - + if (slot == MLFQscheduler.current_running) { MLFQscheduler.quantum_remaining = 0; RequestSchedule(); } - + AddToTerminationQueueAtomic(slot); proc->state = PROC_ZOMBIE; - + if (MLFQscheduler.total_processes > 0) { MLFQscheduler.total_processes--; } - + SpinUnlockIrqRestore(&scheduler_lock, flags); } @@ -417,7 +407,17 @@ void InitScheduler(void) { for (int i = 0; i < MAX_PRIORITY_LEVELS; i++) { if (i < RT_PRIORITY_THRESHOLD) { // Real-time queues get larger quantums - MLFQscheduler.queues[i].quantum = QUANTUM_BASE << (RT_PRIORITY_THRESHOLD - i); + if (i < RT_PRIORITY_THRESHOLD) { + MLFQscheduler.queues[i].quantum = QUANTUM_BASE << (RT_PRIORITY_THRESHOLD - i); + if (MLFQscheduler.queues[i].quantum > QUANTUM_MAX) { + MLFQscheduler.queues[i].quantum = QUANTUM_MAX; + } + } else { + MLFQscheduler.queues[i].quantum = QUANTUM_BASE >> ((i - RT_PRIORITY_THRESHOLD) * QUANTUM_DECAY_SHIFT); + if (MLFQscheduler.queues[i].quantum < QUANTUM_MIN) { + MLFQscheduler.queues[i].quantum = QUANTUM_MIN; + } + } MLFQscheduler.rt_bitmap |= (1U << i); } else { // Regular queues use exponential decay @@ -530,7 +530,6 @@ void RemoveFromScheduler(uint32_t slot) { FreeSchedulerNode(node); } -// Smart queue selection with load balancing static inline int FindBestQueue(void) { if (MLFQscheduler.active_bitmap == 0) return -1; @@ -539,26 +538,26 @@ static inline int FindBestQueue(void) { if (rt_active) { return FastFFS(rt_active); } - + // For non-RT queues, consider load balancing uint32_t regular_active = MLFQscheduler.active_bitmap & ~MLFQscheduler.rt_bitmap; if (!regular_active) return -1; - - // Find highest priority with reasonable load + + // FIXED: Less aggressive load balancing to prevent starvation for (int i = RT_PRIORITY_THRESHOLD; i < MAX_PRIORITY_LEVELS; i++) { if (regular_active & (1U << i)) { PriorityQueue* queue = &MLFQscheduler.queues[i]; - - // Avoid overloaded queues if alternatives exist - if (queue->count > LOAD_BALANCE_THRESHOLD && + + // FIXED: Higher threshold to prevent constant queue hopping + if (queue->count > LOAD_BALANCE_ACTUAL_THRESHOLD && (regular_active & ~(1U << i))) { continue; } - + return i; } } - + // Fallback to first available return FastFFS(regular_active); } @@ -575,10 +574,10 @@ static void SmartAging(void) { // Adaptive aging threshold based on system load uint32_t aging_threshold = AGING_THRESHOLD_BASE; - if (total_waiting > MLFQscheduler.total_processes * 50) { - aging_threshold /= 2; // More aggressive aging under high load + if (total_waiting > MLFQscheduler.total_processes * FAIRNESS_WAIT_THRESHOLD) { + aging_threshold /= AGING_ACCELERATION_FACTOR; } - + // Selective process boosting for (int level = RT_PRIORITY_THRESHOLD; level < MAX_PRIORITY_LEVELS; level++) { PriorityQueue* queue = &MLFQscheduler.queues[level]; @@ -592,7 +591,7 @@ static void SmartAging(void) { uint64_t wait_time = current_tick - proc->last_scheduled_tick; // Boost processes that have waited too long - if (wait_time > aging_threshold) { + if (wait_time > aging_threshold || wait_time > STARVATION_THRESHOLD) { // Remove from current queue if (node->prev) { node->prev->next = node->next; @@ -608,16 +607,19 @@ static void SmartAging(void) { queue->count--; - // Boost to higher priority (but not above RT threshold for user processes) - uint32_t new_priority = (proc->privilege_level == PROC_PRIV_SYSTEM) ? 0 : RT_PRIORITY_THRESHOLD; + // CRITICAL FIX: To prevent starvation, user processes must be boosted + // to the highest priority (0) to guarantee they get to run. Boosting + // them to a lower priority was ineffective. + uint32_t new_priority = 0; + proc->priority = new_priority; proc->last_scheduled_tick = current_tick; - + // Add to higher priority queue PriorityQueue* dst = &MLFQscheduler.queues[new_priority]; node->next = NULL; node->prev = dst->tail; - + if (dst->tail) { dst->tail->next = node; dst->tail = node; @@ -627,10 +629,10 @@ static void SmartAging(void) { dst->count++; MLFQscheduler.active_bitmap |= (1U << new_priority); } - + node = next; } - + // Update bitmap if queue became empty if (queue->count == 0) { MLFQscheduler.active_bitmap &= ~(1U << level); @@ -638,6 +640,7 @@ static void SmartAging(void) { } } + // Enhanced scheduler with smart preemption and load balancing void FastSchedule(struct Registers* regs) { irq_flags_t flags = SpinLockIrqSave(&scheduler_lock); @@ -646,21 +649,27 @@ void FastSchedule(struct Registers* regs) { AtomicInc(&scheduler_calls); AtomicInc(&MLFQscheduler.tick_counter); - // Fairness boosting - more frequent than aging - if (UNLIKELY(MLFQscheduler.tick_counter % FAIRNESS_BOOST_INTERVAL == 0)) { + // FIXED: Less frequent fairness boosting to prevent chaos + if (UNLIKELY(MLFQscheduler.tick_counter % FAIRNESS_BOOST_ACTUAL_INTERVAL == 0)) { // Boost processes that haven't run recently for (int i = 1; i < MAX_PROCESSES; i++) { if (processes[i].pid != 0 && processes[i].state == PROC_READY) { uint64_t wait_time = MLFQscheduler.tick_counter - processes[i].last_scheduled_tick; - if (wait_time > FAIRNESS_BOOST_INTERVAL * 2 && processes[i].priority > 0) { - processes[i].priority = 0; // Boost to highest priority + + // FIXED: Much higher threshold and respect RT boundaries + if (wait_time > FAIRNESS_WAIT_THRESHOLD || wait_time > STARVATION_THRESHOLD) { + if (processes[i].privilege_level == PROC_PRIV_SYSTEM && processes[i].priority > 0) { + processes[i].priority = 0; // System processes to RT + } else if (processes[i].privilege_level != PROC_PRIV_SYSTEM && processes[i].priority > RT_PRIORITY_THRESHOLD) { + processes[i].priority = RT_PRIORITY_THRESHOLD; // User processes to user RT + } } } } } - - // Smart aging for long-term fairness - if (UNLIKELY(MLFQscheduler.tick_counter - MLFQscheduler.last_boost_tick >= BOOST_INTERVAL)) { + + // Smart aging for long-term fairness - FIXED: Less frequent + if (UNLIKELY(MLFQscheduler.tick_counter - MLFQscheduler.last_boost_tick >= (BOOST_INTERVAL * 2))) { SmartAging(); MLFQscheduler.last_boost_tick = MLFQscheduler.tick_counter; } @@ -679,7 +688,7 @@ void FastSchedule(struct Registers* regs) { // Calculate CPU burst for this process cpu_burst = MLFQscheduler.queues[old_proc->priority].quantum - MLFQscheduler.quantum_remaining; - + // Update CPU burst history for (int i = CPU_BURST_HISTORY - 1; i > 0; i--) { old_proc->cpu_burst_history[i] = old_proc->cpu_burst_history[i-1]; @@ -693,21 +702,21 @@ void FastSchedule(struct Registers* regs) { MLFQscheduler.quantum_remaining--; } - // Smart preemption logic + // FIXED: Much less aggressive preemption logic int best_priority = FindBestQueue(); bool should_preempt = false; - - // Real-time processes can always preempt with bias - if (best_priority != -1 && best_priority < RT_PRIORITY_THRESHOLD && - best_priority + PREEMPTION_BIAS < (int)old_proc->priority) { + + // FIXED: Higher bias and only for critical RT processes + if (best_priority == CRITICAL_PREEMPTION_LEVEL && + old_proc->priority > PREEMPTION_MIN_PRIORITY_GAP) { // And only if current is much lower priority should_preempt = true; } - // Regular preemption on quantum expiry or higher priority - else if (MLFQscheduler.quantum_remaining == 0 || - (best_priority != -1 && best_priority < (int)old_proc->priority)) { + // FIXED: Only preempt on quantum expiry or significantly higher priority + else if (MLFQscheduler.quantum_remaining == 0 || + (best_priority != -1 && (best_priority + PREEMPTION_BIAS < (int)old_proc->priority))) { should_preempt = true; } - + if (!should_preempt) { SpinUnlockIrqRestore(&scheduler_lock, flags); return; @@ -718,26 +727,29 @@ void FastSchedule(struct Registers* regs) { ready_process_bitmap |= (1ULL << old_slot); old_proc->preemption_count++; - // Fair priority adjustment - protect system processes from demotion + // FIXED: Much less aggressive priority adjustment if (old_proc->privilege_level != PROC_PRIV_SYSTEM) { - if (cpu_burst < MLFQscheduler.queues[old_proc->priority].quantum / 4) { - // Interactive user process, boost - if (old_proc->priority > RT_PRIORITY_THRESHOLD) { - old_proc->priority--; - } - } else if (cpu_burst >= MLFQscheduler.queues[old_proc->priority].quantum) { - // CPU intensive user process, demote gradually + // Only demote if process used full quantum AND is CPU intensive + if (MLFQscheduler.quantum_remaining == 0) { // Simpler check: used its whole turn if (old_proc->priority < MAX_PRIORITY_LEVELS - 1) { - old_proc->priority++; + old_proc->priority++; // Demote CPU-bound tasks + } + } + // Boost truly interactive processes that yielded early + else if (cpu_burst < (MLFQscheduler.queues[old_proc->priority].quantum / 2)) { + // Boost to the highest user priority if it's not already there. + if (old_proc->priority > RT_PRIORITY_THRESHOLD) { + old_proc->priority = RT_PRIORITY_THRESHOLD; } } } else { - // System processes get fairness boost if they've been demoted + // System processes: restore to base priority if demoted if (old_proc->priority > old_proc->base_priority) { old_proc->priority = old_proc->base_priority; } } + // Re-add the process to the scheduler with its new (or old) priority AddToScheduler(old_slot); } @@ -764,77 +776,79 @@ select_next:; Process* new_proc = &processes[next_slot]; new_proc->state = PROC_RUNNING; ready_process_bitmap &= ~(1ULL << next_slot); - - // Dynamic quantum adjustment + + // FIXED: Always reset to full quantum for fairness uint32_t base_quantum = MLFQscheduler.queues[new_proc->priority].quantum; - - // Boost quantum for I/O bound processes - if (new_proc->io_operations >= IO_BOOST_THRESHOLD) { - base_quantum = (base_quantum * 3) / 2; + + // FIXED: Less aggressive quantum adjustment + if (new_proc->io_operations >= IO_BOOST_THRESHOLD * 3) { + base_quantum = (base_quantum * IO_QUANTUM_BOOST_FACTOR) / IO_QUANTUM_BOOST_DIVISOR; } - - // Reduce quantum for CPU hogs + + // FIXED: Less punishment for CPU intensive processes uint32_t avg_burst = 0; for (int i = 0; i < CPU_BURST_HISTORY; i++) { avg_burst += new_proc->cpu_burst_history[i]; } avg_burst /= CPU_BURST_HISTORY; - - if (avg_burst > base_quantum) { - base_quantum = (base_quantum * 3) / 4; + + if (avg_burst > base_quantum * CPU_INTENSIVE_MULTIPLIER) { + base_quantum = (base_quantum * CPU_QUANTUM_PENALTY_FACTOR) / CPU_QUANTUM_PENALTY_DIVISOR; } - + MLFQscheduler.quantum_remaining = base_quantum; new_proc->last_scheduled_tick = MLFQscheduler.tick_counter; FastMemcpy(regs, &new_proc->context, sizeof(struct Registers)); AtomicInc(&context_switches); - + // Update context switch overhead measurement uint32_t overhead = MLFQscheduler.tick_counter - schedule_start; MLFQscheduler.context_switch_overhead = (MLFQscheduler.context_switch_overhead * 7 + overhead) / 8; } else { MLFQscheduler.quantum_remaining = 0; } - + SpinUnlockIrqRestore(&scheduler_lock, flags); } -// Enhanced I/O and blocking handling void ProcessBlocked(uint32_t slot) { Process* proc = &processes[slot]; - + // Track I/O operations for classification proc->io_operations++; - + if (slot == MLFQscheduler.current_running) { // Calculate partial CPU burst uint32_t partial_burst = MLFQscheduler.queues[proc->priority].quantum - MLFQscheduler.quantum_remaining; - + // Update burst history with partial burst for (int i = CPU_BURST_HISTORY - 1; i > 0; i--) { proc->cpu_burst_history[i] = proc->cpu_burst_history[i-1]; } proc->cpu_burst_history[0] = partial_burst; - + MLFQscheduler.quantum_remaining = 0; RequestSchedule(); } - // Smart priority boost for I/O bound processes - if (proc->state == PROC_READY) { - // Calculate average CPU burst - uint32_t avg_burst = 0; - for (int i = 0; i < CPU_BURST_HISTORY; i++) { - avg_burst += proc->cpu_burst_history[i]; - } - avg_burst /= CPU_BURST_HISTORY; - - // I/O bound processes (short CPU bursts) get priority boost - if (avg_burst < QUANTUM_BASE / 2 && proc->priority > RT_PRIORITY_THRESHOLD) { - proc->priority = RT_PRIORITY_THRESHOLD; // Boost to interactive level - } else if (proc->priority > 0 && proc->io_operations > IO_BOOST_THRESHOLD) { - proc->priority--; // Regular boost for I/O processes + // FIXED: Much more conservative I/O boosting + if (proc->state == PROC_READY && proc->privilege_level != PROC_PRIV_SYSTEM) { + // If a process blocks, it's very likely interactive. Boost it. + // We will boost it to the highest user-space priority. + uint32_t highest_user_priority = RT_PRIORITY_THRESHOLD; + + if (proc->priority > highest_user_priority) { + // Remove from the old, lower-priority queue + if (proc->scheduler_node) { + RemoveFromScheduler(slot); + } + + // Boost priority directly + proc->priority = highest_user_priority; + + // Add it back to the new, higher-priority queue + AddToScheduler(slot); } } } @@ -854,13 +868,16 @@ void RequestSchedule(void) { void Yield() { Process* current = GetCurrentProcess(); if (current) { - current->state = PROC_BLOCKED; // Mark as blocked for scheduler to boost + // A process that yields is ready to run again, just giving up its timeslice. + // Setting it to PROC_BLOCKED was incorrect as there was no corresponding unblock mechanism. + current->state = PROC_READY; } RequestSchedule(); + // This instruction halts the CPU until the next interrupt (e.g., the timer), + // which will then trigger the scheduler. __asm__ __volatile__("hlt"); } - void ProcessExitStub() { Process* current = GetCurrentProcess(); @@ -876,7 +893,7 @@ void ProcessExitStub() { } } -static uint32_t CreateSecureProcess(void (*entry_point)(void), uint8_t privilege) { +uint32_t CreateSecureProcess(void (*entry_point)(void), uint8_t privilege) { irq_flags_t flags = SpinLockIrqSave(&scheduler_lock); if (UNLIKELY(!entry_point)) { SpinUnlockIrqRestore(&scheduler_lock, flags); @@ -941,7 +958,7 @@ static uint32_t CreateSecureProcess(void (*entry_point)(void), uint8_t privilege processes[slot].io_operations = 0; processes[slot].preemption_count = 0; processes[slot].wait_time = 0; - + // Initialize CPU burst history with reasonable defaults for (int i = 0; i < CPU_BURST_HISTORY; i++) { processes[slot].cpu_burst_history[i] = QUANTUM_BASE / 2; @@ -995,7 +1012,7 @@ void CleanupTerminatedProcesses(void) { irq_flags_t flags = SpinLockIrqSave(&scheduler_lock); // Process a limited number per call to avoid long interrupt delays int cleanup_count = 0; - const int MAX_CLEANUP_PER_CALL = 3; + const int MAX_CLEANUP_PER_CALL = CLEANUP_MAX_PER_CALL; while (AtomicRead(&term_queue_count) > 0 && cleanup_count < MAX_CLEANUP_PER_CALL) { uint32_t slot = RemoveFromTerminationQueueAtomic(); @@ -1060,9 +1077,6 @@ Process* GetProcessByPid(uint32_t pid) { return NULL; } -void RegisterSecurityManager(uint32_t pid) { - security_manager_pid = pid; -} void SystemTracer(void) { PrintKernelSuccess("[SYSTEM] SystemTracer() has started. Scanning...\n"); @@ -1072,18 +1086,35 @@ void SystemTracer(void) { } } +static int SecureTokenUpdate(Process* proc, uint8_t new_flags) { + if (!proc || proc->pid == 0) return 0; + + // Only allow self-modification or system process modification + Process* caller = GetCurrentProcess(); + if (caller->pid != proc->pid && caller->privilege_level != PROC_PRIV_SYSTEM) { + return 0; // Unauthorized + } + + // Create new token with updated flags + SecurityToken new_token = proc->token; + new_token.flags |= new_flags; + new_token.checksum = 0; // Clear for recalculation + new_token.checksum = CalculateSecureChecksum(&new_token, proc->pid); + + // Atomic update + proc->token = new_token; + return 1; +} + void SystemIntegrityVerificationAgent(void) { PrintKernelSuccess("[SIVA] SystemIntegrityVerificationAgent initializing...\n"); Process* current = GetCurrentProcess(); - // Make SIVA immune, critical and supervisor - ultimate authority - current->token.flags |= (PROC_FLAG_IMMUNE | PROC_FLAG_CRITICAL | PROC_FLAG_SUPERVISOR); - - // FIX: The checksum field must be zeroed before recalculating the hash. - current->token.checksum = 0; - current->token.checksum = CalculateSecureChecksum(¤t->token, current->pid); - - RegisterSecurityManager(current->pid); + if (!SecureTokenUpdate(current, PROC_FLAG_IMMUNE | PROC_FLAG_CRITICAL | PROC_FLAG_SUPERVISOR)) { + PANIC("SIVA: Failed to update secure token"); + } + // register + security_manager_pid = current->pid; // Create system tracer with enhanced security uint32_t tracer_pid = CreateSecureProcess(SystemTracer, PROC_PRIV_SYSTEM); @@ -1106,7 +1137,7 @@ void SystemIntegrityVerificationAgent(void) { uint64_t last_memory_scan = 0; uint32_t threat_level = 0; uint32_t suspicious_activity_count = 0; - + // Dynamic intensity control uint32_t base_scan_interval = 100; uint32_t current_scan_interval = base_scan_interval; @@ -1122,22 +1153,60 @@ void SystemIntegrityVerificationAgent(void) { current_scan_interval = base_scan_interval * 2; // Moderate when normal } + if (current->state == PROC_DYING || current->state == PROC_ZOMBIE) { + PrintKernelError("[SIVA] CRITICAL: SIVA compromised - emergency restart\n"); + // Could trigger system recovery here instead of dying + PANIC("SIVA terminated - security system failure"); + } + const uint64_t current_tick = GetSystemTicks(); - + + if (current_tick - last_behavior_analysis >= 25) { // Run this check often + last_behavior_analysis = current_tick; + uint64_t check_bitmap = active_process_bitmap; + int proc_scanned = 0; + + while (check_bitmap && proc_scanned < 8) { // Scan a few processes each time + const int slot = FastFFS(check_bitmap); + check_bitmap &= ~(1ULL << slot); + proc_scanned++; + + const Process* proc = &processes[slot]; + + // THE CRITICAL CHECK: Is this process running as system without authorization? + if (proc->privilege_level == PROC_PRIV_SYSTEM && + !(proc->token.flags & (PROC_FLAG_SUPERVISOR | PROC_FLAG_CRITICAL))) { + + // This process has elevated privileges but is not a trusted supervisor + // or a critical process. This is a massive red flag. + PrintKernelError("[SIVA] THREAT: Illicit system process detected! PID: "); + PrintKernelInt(proc->pid); + PrintKernelError("\n"); + + // Immediately terminate with extreme prejudice. + SivaTerminate(proc->pid, "Unauthorized privilege escalation"); + threat_level += 20; // Escalate threat level immediately + } + } + } + // 1. Token integrity verification if (current_tick - last_integrity_scan >= 50) { last_integrity_scan = current_tick; uint64_t active_bitmap = active_process_bitmap; int scanned = 0; - - while (active_bitmap && scanned < 3) { + + // FIX: Increased the number of processes scanned per cycle from 3 to 16 + // to make detection much more effective and timely. + while (active_bitmap && scanned < 16) { const int slot = FastFFS(active_bitmap); active_bitmap &= ~(1ULL << slot); scanned++; const Process* proc = &processes[slot]; if (proc->state == PROC_READY || proc->state == PROC_RUNNING) { - if (UNLIKELY(!ValidateToken(&proc->token, proc->pid))) { + if (proc->pid != security_manager_pid && // Don't check SIVA itself + UNLIKELY(!ValidateToken(&proc->token, proc->pid))) { PrintKernelError("[SIVA] CRITICAL: Token corruption PID "); PrintKernelInt(proc->pid); PrintKernelError("\n"); @@ -1147,46 +1216,17 @@ void SystemIntegrityVerificationAgent(void) { } } } - - // 2. Behavioral analysis - if (current_tick - last_behavior_analysis >= 100) { - last_behavior_analysis = current_tick; - - for (int i = 1; i < MAX_PROCESSES; i++) { - if (processes[i].pid != 0 && processes[i].state != PROC_TERMINATED) { - const Process* proc = &processes[i]; - - // Detect privilege escalation - KILL IMMEDIATELY - if (proc->privilege_level != proc->token.privilege) { - PrintKernelError("[SIVA] TERMINATING: Privilege escalation PID "); - PrintKernelInt(proc->pid); - PrintKernelError("\n"); - SivaTerminate(proc->pid, "Privilege escalation"); - threat_level += 15; - } - - // Detect abnormal preemption patterns - KILL - if (proc->preemption_count > 500) { - PrintKernelError("[SIVA] TERMINATING: Suspicious activity PID "); - PrintKernelInt(proc->pid); - PrintKernelError("\n"); - SivaTerminate(proc->pid, "Abnormal behavior"); - suspicious_activity_count++; - } - } - } - } - + // 3. Memory integrity checks if (current_tick - last_memory_scan >= 300) { last_memory_scan = current_tick; - + if (MLFQscheduler.current_running >= MAX_PROCESSES) { PrintKernelError("[SIVA] CRITICAL: Scheduler corruption detected\n"); threat_level += 30; PANIC("SIVA: Critical scheduler corruption - system compromised"); } - + uint32_t actual_count = __builtin_popcountll(active_process_bitmap); if (actual_count != process_count) { PrintKernelError("[SIVA] CRITICAL: Process count corruption\n"); @@ -1194,29 +1234,30 @@ void SystemIntegrityVerificationAgent(void) { suspicious_activity_count++; } } - + // Aggressive threat management - if (threat_level > 30) { - PrintKernelError("[SIVA] DEFCON 1: System under attack - threat level "); - PrintKernelInt(threat_level); - PrintKernelError(" - LOCKDOWN MODE\n"); - // Kill all non-critical processes + if (threat_level > 50) { // Much higher threshold + PrintKernelError("[SIVA] DEFCON 1: Critical threat - selective termination\n"); + // Only kill processes with recent violations, not everything for (int i = 1; i < MAX_PROCESSES; i++) { - if (processes[i].pid != 0 && processes[i].state != PROC_TERMINATED && - !(processes[i].token.flags & PROC_FLAG_CRITICAL) && - processes[i].pid != security_manager_pid) { - SivaTerminate(processes[i].pid, "Emergency lockdown"); - } + if (processes[i].pid != 0 && + processes[i].state != PROC_TERMINATED && + !(processes[i].token.flags & (PROC_FLAG_CRITICAL | PROC_FLAG_IMMUNE)) && + processes[i].pid != security_manager_pid && + processes[i].preemption_count > 1000) { // Only suspicious ones + SivaTerminate(processes[i].pid, "Selective lockdown"); + } } - } else if (threat_level > 15) { - PrintKernelError("[SIVA] DEFCON 2: Elevated threat - scanning intensified\n"); - last_integrity_scan = current_tick - 40; // More frequent scans + threat_level = 25; // Reset after action + } else if (threat_level > 25) { + PrintKernelWarning("[SIVA] DEFCON 2: Elevated monitoring\n"); + current_scan_interval = base_scan_interval / 2; // Scan more frequently } - + if (current_tick % 500 == 0 && threat_level > 0) { threat_level--; } - + CleanupTerminatedProcesses(); Yield(); } @@ -1259,10 +1300,28 @@ int ProcessInit(void) { PrintKernelInt(siva_pid); PrintKernel("\n"); + + // Create shell process + PrintKernel("[SYSTEM] Creating shell process...\n"); + uint32_t shell_pid = CreateSecureProcess(ShellProcess, PROC_PRIV_SYSTEM); + if (!shell_pid) { + PANIC("CRITICAL: Failed to create shell process"); + } + + Process* shell_proc = GetProcessByPid(shell_pid); + if (shell_proc) { + shell_proc->token.flags |= PROC_FLAG_SUPERVISOR; + // IMPORTANT: Recalculate the checksum after changing the flags! + shell_proc->token.checksum = CalculateSecureChecksum(&shell_proc->token, shell_pid); + } + + PrintKernelSuccess("[SYSTEM] Shell created with PID: "); + PrintKernelInt(shell_pid); + PrintKernel("\n"); + return 0; } - void DumpPerformanceStats(void) { PrintKernel("[PERF] Context switches: "); PrintKernelInt((uint32_t)context_switches); @@ -1305,7 +1364,7 @@ static const char* GetStateString(ProcessState state) { } void ListProcesses(void) { - PrintKernel("--- Enhanced Process List ---\n"); + PrintKernel("\n--- Enhanced Process List ---\n"); PrintKernel("PID\tState \tPrio\tCPU%%\tI/O\tPreempt\n"); PrintKernel("-----------------------------------------------\n"); diff --git a/kernel/process/Process.h b/kernel/process/Process.h index b1ab0c2..8407f89 100644 --- a/kernel/process/Process.h +++ b/kernel/process/Process.h @@ -5,21 +5,83 @@ #include "Ipc.h" #include "Cpu.h" -#define MAX_PROCESSES 64 -#define STACK_SIZE 4096 - -// MLFQ Tuning Parameters - Adjusted for fairness -#define MAX_PRIORITY_LEVELS 8 -#define QUANTUM_BASE 20 // Larger base quantum for fairness -#define BOOST_INTERVAL 50 // More frequent boosting to prevent starvation -#define RT_PRIORITY_THRESHOLD 2 // Real-time priority threshold -#define IO_BOOST_THRESHOLD 2 // I/O operations to trigger boost -#define CPU_BURST_HISTORY 4 // Track last N CPU bursts for prediction -#define AGING_THRESHOLD_BASE 30 // More aggressive aging for fairness -#define PREEMPTION_BIAS 0 // No RT bias for fairness -#define QUANTUM_DECAY_SHIFT 1 // Quantum decay rate -#define LOAD_BALANCE_THRESHOLD 1 // Better load balancing -#define FAIRNESS_BOOST_INTERVAL 25 // Boost starved processes every 25 ticks + +// ============================================================================= +// MLFQ SCHEDULER TUNING PARAMETERS +// ============================================================================= + +// Core Queue Configuration +#define MAX_PRIORITY_LEVELS 6 // Total priority levels (0=highest) +#define RT_PRIORITY_THRESHOLD 1 // Levels 0 to RT_PRIORITY_THRESHOLD-1 are RT +#define MAX_PROCESSES 64 // Maximum concurrent processes + +// Quantum Management +#define QUANTUM_BASE 5 // Base time quantum (ticks) +#define QUANTUM_DECAY_SHIFT 0 // Quantum reduction per level (0 = no decay) +#define QUANTUM_MIN 1 // Minimum quantum allowed +#define QUANTUM_MAX 32 // Maximum quantum allowed + +// Dynamic Quantum Adjustments +#define IO_QUANTUM_BOOST_FACTOR 5 // Boost factor for I/O processes (n/4) +#define IO_QUANTUM_BOOST_DIVISOR 4 +#define CPU_QUANTUM_PENALTY_FACTOR 7 // Penalty factor for CPU hogs (n/8) +#define CPU_QUANTUM_PENALTY_DIVISOR 8 +#define CPU_INTENSIVE_MULTIPLIER 2 // Threshold multiplier for CPU intensive + +// Preemption Control +#define PREEMPTION_BIAS 4 // Priority difference needed to preempt (higher = less preemption) +#define CRITICAL_PREEMPTION_LEVEL 0 // Only this level can preempt aggressively +#define PREEMPTION_MIN_PRIORITY_GAP 2 // Minimum gap for regular preemption + +// Fairness and Aging +#define AGING_THRESHOLD_BASE 15 // Base ticks before aging kicks in +#define BOOST_INTERVAL 40 // Global aging interval (ticks) +#define FAIRNESS_BOOST_INTERVAL 20 // Local fairness boost interval +#define FAIRNESS_BOOST_MULTIPLIER 4 // Multiplier for boost interval (boost every 20*4=80 ticks) +#define FAIRNESS_WAIT_THRESHOLD 50 // Wait time before fairness boost (ticks) +#define STARVATION_THRESHOLD 100 // Critical starvation prevention (ticks) + +// Load Balancing +#define LOAD_BALANCE_THRESHOLD 2 // Queue size before load balancing +#define LOAD_BALANCE_MULTIPLIER 3 // Actual threshold = LOAD_BALANCE_THRESHOLD * MULTIPLIER +#define HIGH_LOAD_PROCESS_COUNT 5 // System considered "high load" above this +#define AGING_ACCELERATION_FACTOR 2 // Speed up aging under high load + +// Process Classification +#define IO_BOOST_THRESHOLD 1 // I/O operations before considering I/O bound +#define IO_BOOST_CONSERVATIVE_MULTIPLIER 2 // Conservative I/O boost (threshold * 2) +#define IO_BOOST_AGGRESSIVE_MULTIPLIER 3 // Aggressive I/O boost (threshold * 3) +#define CPU_BURST_HISTORY 3 // Number of CPU bursts to track +#define INTERACTIVE_BURST_DIVISOR 2 // CPU burst < QUANTUM_BASE/4 = interactive +#define INTERACTIVE_AGGRESSIVE_DIVISOR 8 // CPU burst < QUANTUM_BASE/8 = very interactive + +// Priority Adjustment Thresholds +#define SINGLE_DEMOTION_ONLY 1 // Only demote one level at a time +#define CPU_INTENSIVE_HISTORY_COUNT 2 // Require N consecutive CPU intensive bursts +#define PRIORITY_RESTORE_SYSTEM 1 // Always restore system processes to base +#define USER_RT_BOOST_THRESHOLD 4 // User processes boost to RT_PRIORITY_THRESHOLD + +// Performance and Statistics +#define CONTEXT_SWITCH_OVERHEAD_SAMPLES 8 // Running average sample count (powers of 2) +#define CONTEXT_SWITCH_OVERHEAD_SHIFT 3 // log2(SAMPLES) for bit shifting +#define PERFORMANCE_COUNTER_RESET 10000 // Reset counters every N context switches + +// Security and Process Management +#define TERMINATION_QUEUE_SIZE MAX_PROCESSES // Size of termination queue +#define CLEANUP_MAX_PER_CALL 3 // Max processes to cleanup per call +#define SECURITY_VIOLATION_LIMIT 5 // Max violations before panic + +// Stack and Memory +#define STACK_SIZE 4096 // Process stack size +#define CACHE_LINE_SIZE 64 // CPU cache line size for alignment + +// ============================================================================= +// DERIVED CONSTANTS (Don't modify these - they're calculated from above) +// ============================================================================= +#define RT_QUEUE_MASK ((1U << RT_PRIORITY_THRESHOLD) - 1) +#define REGULAR_QUEUE_MASK (~RT_QUEUE_MASK) +#define FAIRNESS_BOOST_ACTUAL_INTERVAL (FAIRNESS_BOOST_INTERVAL * FAIRNESS_BOOST_MULTIPLIER) +#define LOAD_BALANCE_ACTUAL_THRESHOLD (LOAD_BALANCE_THRESHOLD * LOAD_BALANCE_MULTIPLIER) #define PROC_PRIV_SYSTEM 0 // Highest privilege (kernel services) #define PROC_PRIV_USER 1 // User processes @@ -51,7 +113,7 @@ typedef enum { TERM_RESOURCE // Resource exhaustion } TerminationReason; // Use the same structure for context switching to avoid mismatches -typedef struct Registers ProcessContext; +typedef Registers ProcessContext; typedef struct SchedulerNode { uint32_t slot; @@ -128,10 +190,12 @@ void ProcessBlocked(uint32_t slot); void DumpSchedulerState(void); // Security functions -void RegisterSecurityManager(uint32_t pid); -void KillProcess(uint32_t pid); uint64_t GetSystemTicks(void); void ListProcesses(void); void GetProcessStats(uint32_t pid, uint32_t* cpu_time, uint32_t* io_ops, uint32_t* preemptions); void BoostProcessPriority(uint32_t pid); +void KillProcess(uint32_t pid); + +// DEBUG +void DumpPerformanceStats(void); #endif \ No newline at end of file diff --git a/meson.build b/meson.build index e5365e2..5e75ec4 100644 --- a/meson.build +++ b/meson.build @@ -69,14 +69,15 @@ c_sources = [ src_root + '/kernel/memory/MemOps.c', src_root + '/kernel/memory/KernelHeap.c', src_root + '/kernel/memory/VMem.c', -# src_root + '/kernel/graphics/VGA.c', src_root + '/kernel/process/Process.c', src_root + '/kernel/process/UserMode.c', + src_root + '/kernel/etc/Shell.c', arch_root + '/idt/Idt.c', arch_root + '/gdt/Gdt.c', arch_root + '/syscall/Syscall.c', arch_root + '/interrupts/Interrupts.c', src_root + '/drivers/Pic.c', + src_root + '/drivers/Keyboard.c', arch_root + '/cpu/Cpu.c', src_root + '/kernel/atomic/Atomics.c', src_root + '/kernel/ipc/Ipc.c', From 3d506c633956cae0fcab6d1378265ff533f374d9 Mon Sep 17 00:00:00 2001 From: Atheria Date: Fri, 8 Aug 2025 10:46:15 +0700 Subject: [PATCH 2/2] A lot of fixes --- drivers/Keyboard.c | 10 +++++----- kernel/core/Kernel.c | 36 +++++++++++++++++++++++++++++++----- kernel/process/Process.c | 2 +- kernel/process/Process.h | 2 +- 4 files changed, 38 insertions(+), 12 deletions(-) diff --git a/drivers/Keyboard.c b/drivers/Keyboard.c index d6ea26d..ba7b99c 100644 --- a/drivers/Keyboard.c +++ b/drivers/Keyboard.c @@ -5,10 +5,10 @@ #define KEYBOARD_DATA_PORT 0x60 #define KEYBOARD_STATUS_PORT 0x64 -static char input_buffer[256]; -static int buffer_head = 0; -static int buffer_tail = 0; -static int buffer_count = 0; +static volatile char input_buffer[256]; +static volatile int buffer_head = 0; +static volatile int buffer_tail = 0; +static volatile int buffer_count = 0; static char scancode_to_ascii[] = { 0, 0, '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '-', '=', '\b', @@ -33,7 +33,7 @@ void KeyboardHandler(void) { if (scancode >= sizeof(scancode_to_ascii)) return; // Invalid scancode char c = scancode_to_ascii[scancode]; - if (c && buffer_count < 255) { + if (c && buffer_count < 256) { input_buffer[buffer_tail] = c; buffer_tail = (buffer_tail + 1) % 256; buffer_count++; diff --git a/kernel/core/Kernel.c b/kernel/core/Kernel.c index 2ed5853..ea49ebd 100644 --- a/kernel/core/Kernel.c +++ b/kernel/core/Kernel.c @@ -3,8 +3,8 @@ #include "Console.h" #include "Gdt.h" #include "Idt.h" -#include "Keyboard.h" #include "KernelHeap.h" +#include "Keyboard.h" #include "MemOps.h" #include "Memory.h" #include "Multiboot2.h" @@ -14,6 +14,7 @@ #include "Shell.h" #include "Syscall.h" #include "VMem.h" +#include "stdbool.h" #include "stdint.h" void KernelMainHigherHalf(void); @@ -114,14 +115,39 @@ void BootstrapMapPage(uint64_t pml4_phys, uint64_t vaddr, uint64_t paddr, uint64 static void SetupMemoryProtection(void) { PrintKernel("[SYSTEM] Setting up memory protection...\n"); - // Enable SMEP/SMAP if available + // Check CPUID for SMEP/SMAP support + uint32_t eax, ebx, ecx, edx; + __asm__ volatile("cpuid" + : "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx) + : "a"(7), "c"(0)); + uint64_t cr4; __asm__ volatile("mov %%cr4, %0" : "=r"(cr4)); - PrintKernel("[SYSTEM] CR4 features enabled\n"); - PrintKernelSuccess("[SYSTEM] Memory protection configured\n"); -} + bool protection_enabled = false; + + // Enable SMEP if supported (bit 7 in EBX from CPUID leaf 7) + if (ebx & (1 << 7)) { + cr4 |= (1ULL << 20); // CR4.SMEP + PrintKernel("[SYSTEM] SMEP enabled\n"); + protection_enabled = true; + } + // Enable SMAP if supported (bit 20 in EBX from CPUID leaf 7) + if (ebx & (1 << 20)) { + cr4 |= (1ULL << 21); // CR4.SMAP + PrintKernel("[SYSTEM] SMAP enabled\n"); + protection_enabled = true; + } + + // Write back the modified CR4 + if (protection_enabled) { + __asm__ volatile("mov %0, %%cr4" :: "r"(cr4) : "memory"); + PrintKernelSuccess("[SYSTEM] Memory protection configured\n"); + } else { + PrintKernel("[SYSTEM] No memory protection features available\n"); + } +} static InitResultT CoreInit(void) { // Initialize virtual memory manager with validation diff --git a/kernel/process/Process.c b/kernel/process/Process.c index 54be59c..c0f939d 100644 --- a/kernel/process/Process.c +++ b/kernel/process/Process.c @@ -27,7 +27,7 @@ static const uint64_t SECURITY_MAGIC = 0x5EC0DE4D41474943ULL; static const uint64_t SECURITY_SALT = 0xDEADBEEFCAFEBABEULL; -static const uint32_t MAX_SECURITY_VIOLATIONS = 5; +static const uint32_t MAX_SECURITY_VIOLATIONS = SECURITY_VIOLATION_LIMIT; static Process processes[MAX_PROCESSES] ALIGNED_CACHE; static volatile uint32_t next_pid = 1; diff --git a/kernel/process/Process.h b/kernel/process/Process.h index 8407f89..dc3776a 100644 --- a/kernel/process/Process.h +++ b/kernel/process/Process.h @@ -69,7 +69,7 @@ // Security and Process Management #define TERMINATION_QUEUE_SIZE MAX_PROCESSES // Size of termination queue #define CLEANUP_MAX_PER_CALL 3 // Max processes to cleanup per call -#define SECURITY_VIOLATION_LIMIT 5 // Max violations before panic +#define SECURITY_VIOLATION_LIMIT 3 // Max violations before panic // Stack and Memory #define STACK_SIZE 4096 // Process stack size