diff --git a/arch/x86_64/interrupts/Interrupts.c b/arch/x86_64/interrupts/Interrupts.c index cdcea18..39ec375 100644 --- a/arch/x86_64/interrupts/Interrupts.c +++ b/arch/x86_64/interrupts/Interrupts.c @@ -57,11 +57,11 @@ static void FatalExceptionHandler(const char* message, uint64_t interrupt_number // The C-level interrupt handler void InterruptHandler(struct Registers* regs) { ASSERT(regs != NULL); - if (regs->interrupt_number == 32) { + if (regs->interrupt_number == 32 || regs->interrupt_number == 33) { // Force ignore keyboard interrupt tick_count++; // FastDisplayTicks(tick_count); FastSchedule(regs); - outb(0x20, 0x20); + outb(0x20, 0x20); // EOI to master PIC return; } @@ -90,7 +90,8 @@ void InterruptHandler(struct Registers* regs) { if (regs->error_code & 0x4) { PrintKernelError(" Mode: User\n"); - } else { + } + else { PrintKernelError(" Mode: Supervisor\n"); } @@ -105,4 +106,4 @@ void InterruptHandler(struct Registers* regs) { PANIC_CODE("Page Fault", regs->error_code); } -// The C-level interrupt handler \ No newline at end of file +// The C-level interrupt handler diff --git a/drivers/Pic.h b/drivers/Pic.h index e29c4ef..2419c21 100644 --- a/drivers/Pic.h +++ b/drivers/Pic.h @@ -1,7 +1,7 @@ #ifndef PIC_H #define PIC_H -#define PIT_FREQUENCY_HZ 1000 +#define PIT_FREQUENCY_HZ 200 void PicInstall(); void PitInstall(); diff --git a/include/Io.h b/include/Io.h index cbb9fbc..781f100 100644 --- a/include/Io.h +++ b/include/Io.h @@ -13,4 +13,25 @@ static inline uint8_t inb(uint16_t port) { return ret; } +typedef uint64_t irq_flags_t; + +static inline irq_flags_t save_irq_flags(void) { + irq_flags_t flags; + asm volatile("pushfq\n\tpopq %0" : "=r"(flags)); + return flags; +} + +static inline void restore_irq_flags(irq_flags_t flags) { + asm volatile("pushq %0\n\tpopfq" : : "r"(flags)); +} + +static inline void cli(void) { + asm volatile("cli"); +} + +static inline void sti(void) { + asm volatile("sti"); +} + #endif + diff --git a/include/stddef.h b/include/stddef.h new file mode 100644 index 0000000..4e4fff2 --- /dev/null +++ b/include/stddef.h @@ -0,0 +1,13 @@ +#ifndef STDDEF_H +#define STDDEF_H + +#include + +#undef size_t +#define size_t uint64_t + +#ifndef NULL +#define NULL ((void*)0) +#endif + +#endif //STDDEF_H diff --git a/kernel/atomic/Spinlock.h b/kernel/atomic/Spinlock.h index 84138fa..f024ad9 100644 --- a/kernel/atomic/Spinlock.h +++ b/kernel/atomic/Spinlock.h @@ -2,6 +2,7 @@ #define SPINLOCK_H #include "stdint.h" +#include "Io.h" // Locks static inline void SpinLock(volatile int* lock) { @@ -14,4 +15,19 @@ static inline void SpinUnlock(volatile int* lock) { __sync_lock_release(lock); } +// Spinlock with interrupt saving/restoring +static inline irq_flags_t SpinLockIrqSave(volatile int* lock) { + irq_flags_t flags = save_irq_flags(); + cli(); + while (__sync_lock_test_and_set(lock, 1)) { + while (*lock) __builtin_ia32_pause(); + } + return flags; +} + +static inline void SpinUnlockIrqRestore(volatile int* lock, irq_flags_t flags) { + __sync_lock_release(lock); + restore_irq_flags(flags); +} + #endif // SPINLOCK_H diff --git a/kernel/core/Kernel.c b/kernel/core/Kernel.c index f0210d6..ef3c0a2 100644 --- a/kernel/core/Kernel.c +++ b/kernel/core/Kernel.c @@ -14,6 +14,7 @@ #include "stdbool.h" #include "Multiboot2.h" #include "AsmHelpers.h" +#include "KernelHeap.h" #include "MemOps.h" #include "VMem.h" #include "Spinlock.h" @@ -390,7 +391,7 @@ void KernelMain(uint32_t magic, uint32_t info) { ParseMultibootInfo(info); MemoryInit(g_multiboot_info_addr); VMemInit(); - + KernelHeapInit(); uint64_t pml4_phys = VMemGetPML4PhysAddr(); PrintKernelSuccess("[SYSTEM] Bootstrap: Mapping kernel...\n"); uint64_t kernel_start = (uint64_t)_kernel_phys_start; diff --git a/kernel/memory/KernelHeap.c b/kernel/memory/KernelHeap.c new file mode 100644 index 0000000..0fe604e --- /dev/null +++ b/kernel/memory/KernelHeap.c @@ -0,0 +1,115 @@ +#include "KernelHeap.h" +#include "VMem.h" +#include "Kernel.h" +#include "Spinlock.h" +#include "MemOps.h" +#include "Panic.h" + +// Simple page-based allocator for now. +// A more sophisticated allocator (e.g., buddy system, slab allocator) +// would be needed for finer-grained allocations. + +// Structure to store metadata for each allocated block +typedef struct HeapBlockHeader { + size_t size; // Size of the user-requested data (not including header) +} HeapBlockHeader; + +static volatile int kheap_lock = 0; + +void KernelHeapInit() { + // No specific initialization needed for this simple page-based allocator + // VMemInit handles the underlying virtual memory setup. + PrintKernelSuccess("[SYSTEM] Kernel Heap Initialized (page-based).\n"); +} + +void* KernelMemoryAlloc(size_t size) { + if (size == 0) { + return NULL; + } + + irq_flags_t flags = SpinLockIrqSave(&kheap_lock); + + // Calculate total size needed: user data + header + size_t total_alloc_size = size + sizeof(HeapBlockHeader); + + // Allocate memory using VMemAlloc, which handles page alignment and mapping + void* allocated_ptr = VMemAlloc(total_alloc_size); + + if (allocated_ptr == NULL) { + PrintKernelError("[ERROR] KernelMemoryAlloc: Failed to allocate "); + PrintKernelInt(size); + PrintKernel(" bytes.\n"); + SpinUnlockIrqRestore(&kheap_lock, flags); + return NULL; + } + + // Store the size in the header + HeapBlockHeader* header = (HeapBlockHeader*)allocated_ptr; + header->size = size; + + // Return pointer to the user-accessible memory (after the header) + void* user_ptr = (void*)((uint8_t*)allocated_ptr + sizeof(HeapBlockHeader)); + + SpinUnlockIrqRestore(&kheap_lock, flags); + return user_ptr; +} + +void* KernelCallLocation(size_t num, size_t size) { + size_t total_size = num * size; + void* ptr = KernelMemoryAlloc(total_size); + if (ptr) { + FastMemset(ptr, 0, total_size); + } + return ptr; +} + +void* KernelRealLocation(void* ptr, size_t size) { + if (ptr == NULL) { + return KernelMemoryAlloc(size); + } + if (size == 0) { + KernelFree(ptr); + return NULL; + } + + irq_flags_t flags = SpinLockIrqSave(&kheap_lock); + + // Get the original header and size + HeapBlockHeader* old_header = (HeapBlockHeader*)((uint8_t*)ptr - sizeof(HeapBlockHeader)); + size_t old_size = old_header->size; + + // Allocate new memory + void* new_ptr = KernelMemoryAlloc(size); + if (new_ptr) { + // Copy data from old to new, up to the minimum of old_size and new_size + FastMemcpy(new_ptr, ptr, (old_size < size) ? old_size : size); + // Free the old memory block + KernelFree(ptr); + } else { + PrintKernelError("[ERROR] KernelRealLocation: Failed to reallocate "); + PrintKernelInt(size); + PrintKernel(" bytes.\n"); + } + + SpinUnlockIrqRestore(&kheap_lock, flags); + return new_ptr; +} + +void KernelFree(void* ptr) { + if (ptr == NULL) { + return; + } + + irq_flags_t flags = SpinLockIrqSave(&kheap_lock); + + // Get the header by subtracting the header size from the user pointer + HeapBlockHeader* header = (HeapBlockHeader*)((uint8_t*)ptr - sizeof(HeapBlockHeader)); + + // Get the original allocated size (including header) + size_t original_alloc_size = header->size + sizeof(HeapBlockHeader); + + // Free the entire allocated block (including header) using VMemFree + VMemFree((void*)header, original_alloc_size); + + SpinUnlockIrqRestore(&kheap_lock, flags); +} \ No newline at end of file diff --git a/kernel/memory/KernelHeap.h b/kernel/memory/KernelHeap.h new file mode 100644 index 0000000..17b0777 --- /dev/null +++ b/kernel/memory/KernelHeap.h @@ -0,0 +1,13 @@ +#ifndef KHEAP_H +#define KHEAP_H + +#include "stdint.h" +#include "stddef.h" + +void KernelHeapInit(); +void* KernelMemoryAlloc(size_t size); +void* KernelCallLocation(size_t num, size_t size); +void* KernelRealLocation(void* ptr, size_t size); +void KernelFree(void* ptr); + +#endif // KHEAP_H \ No newline at end of file diff --git a/kernel/memory/Memory.c b/kernel/memory/Memory.c index 6dea4ea..fa90031 100644 --- a/kernel/memory/Memory.c +++ b/kernel/memory/Memory.c @@ -19,9 +19,7 @@ static uint64_t memory_start = 0x100000; // Start after 1MB // Helper to mark a page as used static void MarkPageUsed(uint64_t page_idx) { - // SpinLock(&memory_lock); if (page_idx >= total_pages) { - SpinUnlock(&memory_lock); return; } uint64_t byte_idx = page_idx / 8; @@ -30,14 +28,11 @@ static void MarkPageUsed(uint64_t page_idx) { page_bitmap[byte_idx] |= (1 << bit_idx); used_pages++; } - // SpinUnlock(&memory_lock); } // Helper to mark a page as free static void MarkPageFree(uint64_t page_idx) { - // SpinLock(&memory_lock); if (page_idx >= total_pages) { - SpinUnlock(&memory_lock); return; } uint64_t byte_idx = page_idx / 8; @@ -46,7 +41,6 @@ static void MarkPageFree(uint64_t page_idx) { page_bitmap[byte_idx] &= ~(1 << bit_idx); used_pages--; } - // SpinUnlock(&memory_lock); } int MemoryInit(uint32_t multiboot_info_addr) { @@ -146,48 +140,48 @@ int MemoryInit(uint32_t multiboot_info_addr) { for (uint64_t i = mb_info_start_page; i < mb_info_end_page; i++) { MarkPageUsed(i); } - // --- END OF REPLACEMENT --- + PrintKernelSuccess("[SYSTEM] Physical memory manager initialized"); return 0; } void* AllocPage(void) { - // SpinLock(&memory_lock); + irq_flags_t flags = SpinLockIrqSave(&memory_lock); for (uint64_t i = 0; i < total_pages; i++) { uint64_t byte_idx = i / 8; uint8_t bit_idx = i % 8; if (!(page_bitmap[byte_idx] & (1 << bit_idx))) { MarkPageUsed(i); void* page = (void*)(i * PAGE_SIZE); - // SpinUnlock(&memory_lock); + SpinUnlockIrqRestore(&memory_lock, flags); return page; } } - // SpinUnlock(&memory_lock); + SpinUnlockIrqRestore(&memory_lock, flags); return NULL; // Out of memory } void FreePage(void* page) { - // SpinLock(&memory_lock); + irq_flags_t flags = SpinLockIrqSave(&memory_lock); if (!page) { - SpinUnlock(&memory_lock); + SpinUnlockIrqRestore(&memory_lock, flags); Panic("FreePage: NULL pointer"); } uint64_t addr = (uint64_t)page; if (addr % PAGE_SIZE != 0) { - SpinUnlock(&memory_lock); + SpinUnlockIrqRestore(&memory_lock, flags); Panic("FreePage: Address not page aligned"); } uint64_t page_idx = addr / PAGE_SIZE; if (page_idx >= total_pages) { - SpinUnlock(&memory_lock); + SpinUnlockIrqRestore(&memory_lock, flags); Panic("FreePage: Page index out of bounds"); } MarkPageFree(page_idx); - // SpinUnlock(&memory_lock); + SpinUnlockIrqRestore(&memory_lock, flags); } uint64_t GetFreeMemory(void) { diff --git a/kernel/memory/VMem.c b/kernel/memory/VMem.c index 1b60b69..81e55bf 100644 --- a/kernel/memory/VMem.c +++ b/kernel/memory/VMem.c @@ -116,26 +116,26 @@ int VMemMap(uint64_t vaddr, uint64_t paddr, uint64_t flags) { return VMEM_ERROR_INVALID_ADDR; } - SpinLock(&vmem_lock); + irq_flags_t irq_flags = SpinLockIrqSave(&vmem_lock); // Get PDP table uint64_t pdp_phys = VMemGetPageTablePhys((uint64_t)kernel_space.pml4, vaddr, 0, 1); if (!pdp_phys) { - SpinUnlock(&vmem_lock); + SpinUnlockIrqRestore(&vmem_lock, irq_flags); return VMEM_ERROR_NOMEM; } // Get PD table uint64_t pd_phys = VMemGetPageTablePhys(pdp_phys, vaddr, 1, 1); if (!pd_phys) { - SpinUnlock(&vmem_lock); + SpinUnlockIrqRestore(&vmem_lock, irq_flags); return VMEM_ERROR_NOMEM; } // Get PT table uint64_t pt_phys = VMemGetPageTablePhys(pd_phys, vaddr, 2, 1); if (!pt_phys) { - SpinUnlock(&vmem_lock); + SpinUnlockIrqRestore(&vmem_lock, irq_flags); return VMEM_ERROR_NOMEM; } @@ -145,7 +145,7 @@ int VMemMap(uint64_t vaddr, uint64_t paddr, uint64_t flags) { // Check if already mapped if (pt_virt[pt_index] & PAGE_PRESENT) { - SpinUnlock(&vmem_lock); + SpinUnlockIrqRestore(&vmem_lock, irq_flags); return VMEM_ERROR_ALREADY_MAPPED; } @@ -155,7 +155,7 @@ int VMemMap(uint64_t vaddr, uint64_t paddr, uint64_t flags) { // Invalidate TLB VMemFlushTLBSingle(vaddr); - SpinUnlock(&vmem_lock); + SpinUnlockIrqRestore(&vmem_lock, irq_flags); return VMEM_SUCCESS; } @@ -176,14 +176,14 @@ void* VMemAlloc(uint64_t size) { size = PAGE_ALIGN_UP(size); - SpinLock(&vmem_lock); + irq_flags_t flags = SpinLockIrqSave(&vmem_lock); uint64_t vaddr = kernel_space.next_vaddr; // Reserve the virtual address space kernel_space.next_vaddr += size; - SpinUnlock(&vmem_lock); + SpinUnlockIrqRestore(&vmem_lock, flags); // Now map pages without holding the lock uint64_t allocated_size = 0; @@ -211,10 +211,10 @@ void* VMemAlloc(uint64_t size) { } // Update tracking - SpinLock(&vmem_lock); + flags = SpinLockIrqSave(&vmem_lock); kernel_space.used_pages += size / PAGE_SIZE; kernel_space.total_mapped += size; - SpinUnlock(&vmem_lock); + SpinUnlockIrqRestore(&vmem_lock, flags); // Zero the allocated memory FastMemset((void*)vaddr, 0, size); @@ -237,16 +237,16 @@ void VMemFree(void* vaddr, uint64_t size) { if (paddr == 0) continue; // Not mapped // Now acquire lock for modification - SpinLock(&vmem_lock); + irq_flags_t flags = SpinLockIrqSave(&vmem_lock); // Navigate to the Page Table Entry (PTE) uint64_t pdp_phys = VMemGetPageTablePhys((uint64_t)kernel_space.pml4, current_vaddr, 0, 0); - if (!pdp_phys) { SpinUnlock(&vmem_lock); continue; } + if (!pdp_phys) { SpinUnlockIrqRestore(&vmem_lock, flags); continue; } uint64_t pd_phys = VMemGetPageTablePhys(pdp_phys, current_vaddr, 1, 0); - if (!pd_phys) { SpinUnlock(&vmem_lock); continue; } + if (!pd_phys) { SpinUnlockIrqRestore(&vmem_lock, flags); continue; } uint64_t pt_phys = VMemGetPageTablePhys(pd_phys, current_vaddr, 2, 0); - if (!pt_phys) { SpinUnlock(&vmem_lock); continue; } + if (!pt_phys) { SpinUnlockIrqRestore(&vmem_lock, flags); continue; } // Get virtual address of the page table to modify it uint64_t* pt_virt = (uint64_t*)PHYS_TO_VIRT(pt_phys); @@ -265,7 +265,7 @@ void VMemFree(void* vaddr, uint64_t size) { kernel_space.total_mapped -= PAGE_SIZE; } - SpinUnlock(&vmem_lock); + SpinUnlockIrqRestore(&vmem_lock, flags); // Free physical page outside the lock FreePage((void*)paddr); diff --git a/kernel/process/Process.c b/kernel/process/Process.c index 9c98105..c6943cd 100644 --- a/kernel/process/Process.c +++ b/kernel/process/Process.c @@ -6,6 +6,8 @@ #include "MemOps.h" #include "Ipc.h" #include "Atomics.h" +#include "Io.h" +#include "Spinlock.h" #define offsetof(type, member) ((uint64_t)&(((type*)0)->member)) @@ -30,6 +32,7 @@ static volatile uint32_t next_pid = 1; static volatile uint32_t current_process = 0; static volatile uint32_t process_count = 0; static volatile int need_schedule = 0; +static volatile int scheduler_lock = 0; // Security subsystem static uint32_t security_manager_pid = 0; @@ -155,9 +158,11 @@ static int ValidateToken(const SecurityToken* token, uint32_t pid_to_check) { } void TerminateProcess(uint32_t pid, TerminationReason reason, uint32_t exit_code) { + irq_flags_t flags = SpinLockIrqSave(&scheduler_lock); Process* proc = GetProcessByPid(pid); if (UNLIKELY(!proc || proc->state == PROC_DYING || proc->state == PROC_ZOMBIE || proc->state == PROC_TERMINATED)) { + SpinUnlockIrqRestore(&scheduler_lock, flags); return; } @@ -165,6 +170,7 @@ void TerminateProcess(uint32_t pid, TerminationReason reason, uint32_t exit_code uint32_t slot = proc - processes; if (UNLIKELY(slot >= MAX_PROCESSES)) { + SpinUnlockIrqRestore(&scheduler_lock, flags); return; } @@ -174,18 +180,21 @@ void TerminateProcess(uint32_t pid, TerminationReason reason, uint32_t exit_code if (caller->pid != proc->pid) { // Only system processes can terminate other processes if (UNLIKELY(caller->privilege_level != PROC_PRIV_SYSTEM)) { + SpinUnlockIrqRestore(&scheduler_lock, flags); TerminateProcess(caller->pid, TERM_SECURITY, 0); return; } // Cannot terminate immune processes if (UNLIKELY(proc->token.flags & PROC_FLAG_IMMUNE)) { + SpinUnlockIrqRestore(&scheduler_lock, flags); TerminateProcess(caller->pid, TERM_SECURITY, 0); return; } // Cannot terminate critical system processes if (UNLIKELY(proc->token.flags & PROC_FLAG_CRITICAL)) { + SpinUnlockIrqRestore(&scheduler_lock, flags); TerminateProcess(caller->pid, TERM_SECURITY, 0); return; } @@ -193,6 +202,7 @@ void TerminateProcess(uint32_t pid, TerminationReason reason, uint32_t exit_code // Validate caller's token before allowing termination if (UNLIKELY(!ValidateToken(&caller->token, caller->pid))) { + SpinUnlockIrqRestore(&scheduler_lock, flags); TerminateProcess(caller->pid, TERM_SECURITY, 0); return; } @@ -201,6 +211,7 @@ void TerminateProcess(uint32_t pid, TerminationReason reason, uint32_t exit_code // Atomic state transition ProcessState old_state = proc->state; if (UNLIKELY(AtomicCmpxchg((volatile uint32_t*)&proc->state, old_state, PROC_DYING) != old_state)) { + SpinUnlockIrqRestore(&scheduler_lock, flags); return; // Race condition, another thread is handling termination } @@ -231,8 +242,10 @@ void TerminateProcess(uint32_t pid, TerminationReason reason, uint32_t exit_code // Self-termination handling if (UNLIKELY(pid == caller->pid)) { + SpinUnlockIrqRestore(&scheduler_lock, flags); __asm__ __volatile__("cli; hlt" ::: "memory"); } + SpinUnlockIrqRestore(&scheduler_lock, flags); } @@ -482,6 +495,8 @@ static void BoostAllProcesses(void) { // Main scheduler - called from timer interrupt // Main scheduler - called from timer interrupt void FastSchedule(struct Registers* regs) { + irq_flags_t flags = SpinLockIrqSave(&scheduler_lock); + AtomicInc(&scheduler_calls); AtomicInc(&MLFQscheduler.tick_counter); @@ -518,6 +533,7 @@ void FastSchedule(struct Registers* regs) { // Continue if quantum remains and no higher priority process if (LIKELY(MLFQscheduler.quantum_remaining > 0 && (highest_priority == -1 || highest_priority > (int)old_proc->priority))) { + SpinUnlockIrqRestore(&scheduler_lock, flags); return; // No context switch needed } @@ -568,6 +584,7 @@ select_next:; } else { MLFQscheduler.quantum_remaining = 0; } + SpinUnlockIrqRestore(&scheduler_lock, flags); } // Called when process blocks (I/O, IPC, etc.) @@ -658,7 +675,9 @@ void ProcessExitStub() { } uint32_t CreateSecureProcess(void (*entry_point)(void), uint8_t privilege) { + irq_flags_t flags = SpinLockIrqSave(&scheduler_lock); if (UNLIKELY(!entry_point)) { + SpinUnlockIrqRestore(&scheduler_lock, flags); PANIC("CreateSecureProcess: NULL entry point"); } @@ -666,6 +685,7 @@ uint32_t CreateSecureProcess(void (*entry_point)(void), uint8_t privilege) { // Enhanced security validation if (UNLIKELY(!ValidateToken(&creator->token, creator->pid))) { + SpinUnlockIrqRestore(&scheduler_lock, flags); SecurityViolationHandler(creator->pid, "Corrupt token during process creation"); return 0; } @@ -673,18 +693,21 @@ uint32_t CreateSecureProcess(void (*entry_point)(void), uint8_t privilege) { // Privilege escalation check if (privilege == PROC_PRIV_SYSTEM) { if (UNLIKELY(creator->pid != 0 && creator->privilege_level != PROC_PRIV_SYSTEM)) { + SpinUnlockIrqRestore(&scheduler_lock, flags); SecurityViolationHandler(creator->pid, "Unauthorized system process creation"); return 0; } } if (UNLIKELY(process_count >= MAX_PROCESSES)) { + SpinUnlockIrqRestore(&scheduler_lock, flags); PANIC("CreateSecureProcess: Too many processes"); } // Fast slot allocation int slot = FindFreeSlotFast(); if (UNLIKELY(slot == -1)) { + SpinUnlockIrqRestore(&scheduler_lock, flags); PANIC("CreateSecureProcess: No free process slots"); } @@ -697,6 +720,7 @@ uint32_t CreateSecureProcess(void (*entry_point)(void), uint8_t privilege) { void* stack = AllocPage(); if (UNLIKELY(!stack)) { FreeSlotFast(slot); + SpinUnlockIrqRestore(&scheduler_lock, flags); PANIC("CreateSecureProcess: Failed to allocate stack"); } @@ -741,6 +765,7 @@ uint32_t CreateSecureProcess(void (*entry_point)(void), uint8_t privilege) { // Add to scheduler AddToScheduler(slot); + SpinUnlockIrqRestore(&scheduler_lock, flags); return new_pid; } @@ -748,6 +773,7 @@ void ScheduleFromInterrupt(struct Registers* regs) { FastSchedule(regs); } void CleanupTerminatedProcesses(void) { + irq_flags_t flags = SpinLockIrqSave(&scheduler_lock); // Process a limited number per call to avoid long interrupt delays int cleanup_count = 0; const int MAX_CLEANUP_PER_CALL = 3; @@ -796,6 +822,7 @@ void CleanupTerminatedProcesses(void) { PrintKernelInt(pid_backup); PrintKernel(" cleaned up successfully (state now PROC_TERMINATED=0)\n"); } + SpinUnlockIrqRestore(&scheduler_lock, flags); } Process* GetCurrentProcess(void) { diff --git a/meson.build b/meson.build index 048ed0e..a22ef3d 100644 --- a/meson.build +++ b/meson.build @@ -66,6 +66,7 @@ c_sources = [ src_root + '/kernel/core/Panic.c', src_root + '/kernel/memory/Memory.c', src_root + '/kernel/memory/MemOps.c', + src_root + '/kernel/memory/KernelHeap.c', src_root + '/kernel/memory/VMem.c', src_root + '/kernel/process/Process.c', src_root + '/kernel/process/UserMode.c',