Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion drivers/Pic.h
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#ifndef PIC_H
#define PIC_H

#define PIT_FREQUENCY_HZ 1000
#define PIT_FREQUENCY_HZ 100

int PicInstall();
void PitInstall();
Expand Down
97 changes: 84 additions & 13 deletions kernel/core/Kernel.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,12 @@
#include "Panic.h"
#include "stdbool.h"
#include "Multiboot2.h"
#include "UserMode.h"
#include "Io.h"
#include "AsmHelpers.h"
#include "MemOps.h"
#include "VMem.h"

void KernelMainHigherHalf(void);
extern uint8_t _kernel_phys_start[];
extern uint8_t _kernel_phys_end[];
// VGA Constants
#define VGA_BUFFER_ADDR 0xB8000
#define VGA_WIDTH 80
Expand Down Expand Up @@ -307,7 +309,54 @@ void ParseMultibootInfo(uint32_t info) {
PrintKernelSuccess("[SYSTEM] Multiboot2 info parsed.\n");
}

static InitResultT SystemInitialize(void) {
void BootstrapMapPage(uint64_t pml4_phys, uint64_t vaddr, uint64_t paddr, uint64_t flags) {
uint64_t* pml4 = (uint64_t*)pml4_phys;

// 1. Get/Create PDPT
int pml4_idx = (vaddr >> 39) & 0x1FF;
uint64_t pdpt_phys;
if (!(pml4[pml4_idx] & PAGE_PRESENT)) {
pdpt_phys = (uint64_t)AllocPage();
if (!pdpt_phys) Panic("BootstrapMapPage: Out of memory for PDPT");
FastZeroPage((void*)pdpt_phys);
pml4[pml4_idx] = pdpt_phys | PAGE_PRESENT | PAGE_WRITABLE;
} else {
pdpt_phys = pml4[pml4_idx] & PT_ADDR_MASK;
}

// 2. Get/Create PD
uint64_t* pdpt = (uint64_t*)pdpt_phys;
int pdpt_idx = (vaddr >> 30) & 0x1FF;
uint64_t pd_phys;
if (!(pdpt[pdpt_idx] & PAGE_PRESENT)) {
pd_phys = (uint64_t)AllocPage();
if (!pd_phys) Panic("BootstrapMapPage: Out of memory for PD");
FastZeroPage((void*)pd_phys);
pdpt[pdpt_idx] = pd_phys | PAGE_PRESENT | PAGE_WRITABLE;
} else {
pd_phys = pdpt[pdpt_idx] & PT_ADDR_MASK;
}

// 3. Get/Create PT
uint64_t* pd = (uint64_t*)pd_phys;
int pd_idx = (vaddr >> 21) & 0x1FF;
uint64_t pt_phys;
if (!(pd[pd_idx] & PAGE_PRESENT)) {
pt_phys = (uint64_t)AllocPage();
if (!pt_phys) Panic("BootstrapMapPage: Out of memory for PT");
FastZeroPage((void*)pt_phys);
pd[pd_idx] = pt_phys | PAGE_PRESENT | PAGE_WRITABLE;
} else {
pt_phys = pd[pd_idx] & PT_ADDR_MASK;
}

// 4. Set the final PTE
uint64_t* pt = (uint64_t*)pt_phys;
int pt_idx = (vaddr >> 12) & 0x1FF;
pt[pt_idx] = paddr | flags | PAGE_PRESENT;
}

static InitResultT CoreInit(void) {
// Initialize GDT
PrintKernel("[INFO] Initializing GDT...\n");
GdtInit(); // void function - assume success
Expand All @@ -328,18 +377,13 @@ static InitResultT SystemInitialize(void) {
PicInstall(); // void function - assume success
PrintKernelSuccess("[SYSTEM] PIC initialized\n");

// Initialize Memory Management
PrintKernel("[INFO] Initializing memory management...\n");
MemoryInit(g_multiboot_info_addr); // Pass Multiboot2 info address
PrintKernelSuccess("[SYSTEM] Memory management initialized\n");

// Initialize Process Management
PrintKernel("[INFO] Initializing process management...\n");
ProcessInit(); // void function - assume success
PrintKernelSuccess("[SYSTEM] Process management initialized\n");
// VMemInit();
return INIT_SUCCESS;
}

void KernelMain(uint32_t magic, uint32_t info) {
if (magic != MULTIBOOT2_BOOTLOADER_MAGIC) {
ClearScreen();
Expand All @@ -355,8 +399,34 @@ void KernelMain(uint32_t magic, uint32_t info) {
PrintKernelHex(info);
PrintKernel("\n\n");
ParseMultibootInfo(info);
SystemInitialize();
// Create the security manager process (PID 1)
MemoryInit(g_multiboot_info_addr);
VMemInit();
uint64_t pml4_phys = VMemGetPML4PhysAddr();

uint64_t kernel_start = (uint64_t)_kernel_phys_start;
uint64_t kernel_end = (uint64_t)_kernel_phys_end;

PrintKernelSuccess("[SYSTEM] Bootstrap: Mapping kernel...\n");
// Map the kernel itself using the bootstrap function
for (uint64_t paddr = kernel_start; paddr < kernel_end; paddr += PAGE_SIZE) {
BootstrapMapPage(pml4_phys, paddr + KERNEL_VIRTUAL_OFFSET, paddr, PAGE_WRITABLE);
}

PrintKernelSuccess("[SYSTEM] Bootstrap: Identity mapping low memory...\n");
// Map the first 4MB identity-mapped for safety (VGA, etc.)
for (uint64_t paddr = 0; paddr < 4 * 1024 * 1024; paddr += PAGE_SIZE) {
BootstrapMapPage(pml4_phys, paddr, paddr, PAGE_WRITABLE);
}

PrintKernelSuccess("[SYSTEM] Page tables prepared. Switching to virtual addressing...\n");

uint64_t higher_half_entry = (uint64_t)&KernelMainHigherHalf;
EnablePagingAndJump(pml4_phys, higher_half_entry);
}

void KernelMainHigherHalf(void) {
CoreInit();
PrintKernelSuccess("[SYSTEM] Successfully jumped to higher half. Virtual memory is active.\n");
PrintKernel("[INFO] Creating security manager process...\n");
uint64_t security_pid = CreateSecureProcess(SecureKernelIntegritySubsystem, PROC_PRIV_SYSTEM);
if (!security_pid) {
Expand All @@ -368,7 +438,8 @@ void KernelMain(uint32_t magic, uint32_t info) {
PrintKernel("\n");
PrintKernelSuccess("[SYSTEM] Core system modules loaded\n");
PrintKernelSuccess("[SYSTEM] Kernel initialization complete\n");
PrintKernelSuccess("[SYSTEM] Transferring control to SecureKernelIntegritySubsystem...\n\n");
PrintKernelSuccess("[SYSTEM] Transferring control to SecureKernelIntegritySubsystem...\n");
PrintKernelSuccess("[SYSTEM] Initializing interrupts...\n\n");
asm volatile("sti");
while (1) {
if (ShouldSchedule()) {
Expand Down
6 changes: 6 additions & 0 deletions kernel/memory/AsmHelpers.asm
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
section .text
global EnablePagingAndJump

EnablePagingAndJump:
mov rdi, cr3
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Critical bug: Incorrect CR3 register operation.

The assembly code is moving the current CR3 value to RDI instead of setting CR3 with the provided PML4 physical address. This completely breaks the paging enablement functionality.

Apply this fix:

-    mov rdi, cr3
+    mov cr3, rdi

The function should set CR3 to the first parameter (pml4_phys_addr in RDI), not read from CR3.

🤖 Prompt for AI Agents
In kernel/memory/AsmHelpers.asm at line 5, the code incorrectly moves the CR3
register value into RDI instead of setting CR3. To fix this, replace the
instruction so that CR3 is set to the value in RDI (the first parameter
pml4_phys_addr), ensuring the paging enablement works correctly.

jmp rsi
3 changes: 3 additions & 0 deletions kernel/memory/AsmHelpers.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
#pragma once
#include "stdint.h"
void EnablePagingAndJump(uint64_t pml4_phys_addr, uint64_t jump_to_addr);
34 changes: 28 additions & 6 deletions kernel/memory/Memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
// Max 4GB memory for now (1M pages)
#define MAX_PAGES (4ULL * 1024 * 1024 * 1024 / PAGE_SIZE)
#define MAX_BITMAP_SIZE (MAX_PAGES / 8)
extern uint8_t _kernel_phys_start[];
extern uint8_t _kernel_phys_end[];

static uint8_t page_bitmap[MAX_BITMAP_SIZE];
uint64_t total_pages = 0;
Expand Down Expand Up @@ -73,14 +75,13 @@ int MemoryInit(uint32_t multiboot_info_addr) {
PrintKernelInt(total_pages);
PrintKernel(" pages)\n");

// Second pass: mark available memory and reserved regions
tag = (struct MultibootTag*)(multiboot_info_addr + 8); // Reset tag pointer
while (tag->type != MULTIBOOT2_TAG_TYPE_END) {
if (tag->type == MULTIBOOT2_TAG_TYPE_MMAP) {
struct MultibootTagMmap* mmap_tag = (struct MultibootTagMmap*)tag;
for (uint32_t i = 0; i < (mmap_tag->size - sizeof(struct MultibootTagMmap)) / mmap_tag->entry_size; i++) {
struct MultibootMmapEntry* entry = (struct MultibootMmapEntry*)((uint8_t*)mmap_tag + sizeof(struct MultibootTagMmap) + (i * mmap_tag->entry_size));

uint64_t start_page = entry->addr / PAGE_SIZE;
uint64_t end_page = (entry->addr + entry->len - 1) / PAGE_SIZE;

Expand All @@ -101,15 +102,36 @@ int MemoryInit(uint32_t multiboot_info_addr) {
}
tag = (struct MultibootTag*)((uint8_t*)tag + ((tag->size + 7) & ~7));
}
PrintKernel("[INFO] Reserving first 1MB of physical memory.\n");
for (uint64_t i = 0; i < 0x100000 / PAGE_SIZE; i++) {
MarkPageUsed(i);
}

// Mark pages used by the kernel and initial structures (first 1MB + Multiboot info)
// This is a rough estimate, a proper solution would parse ELF sections
uint64_t kernel_end_addr = (uint64_t)multiboot_info_addr + total_multiboot_size; // Approx end of kernel + multiboot info
// 2. Reserve the physical memory used by the kernel itself.
uint64_t kernel_start_addr = (uint64_t)_kernel_phys_start;
uint64_t kernel_end_addr = (uint64_t)_kernel_phys_end;

uint64_t kernel_start_page = kernel_start_addr / PAGE_SIZE;
uint64_t kernel_end_page = (kernel_end_addr + PAGE_SIZE - 1) / PAGE_SIZE;

for (uint64_t i = 0; i < kernel_end_page; i++) {
PrintKernel("[INFO] Reserving kernel memory from page ");
PrintKernelInt(kernel_start_page);
PrintKernel(" to ");
PrintKernelInt(kernel_end_page);
PrintKernel("\n");

for (uint64_t i = kernel_start_page; i < kernel_end_page; i++) {
MarkPageUsed(i);
}

// 3. (Optional but good) Reserve the memory used by the multiboot info itself
uint64_t mb_info_start_page = multiboot_info_addr / PAGE_SIZE;
uint64_t mb_info_end_page = (multiboot_info_addr + total_multiboot_size + PAGE_SIZE - 1) / PAGE_SIZE;
for (uint64_t i = mb_info_start_page; i < mb_info_end_page; i++) {
MarkPageUsed(i);
}
// --- END OF REPLACEMENT ---

return 0;
}

Expand Down
60 changes: 32 additions & 28 deletions kernel/memory/VMem.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,26 +16,12 @@
#include "Kernel.h"
#include "Interrupts.h"
#include "Panic.h"

/**
* @brief The kernel's virtual address space structure
*/
static VirtAddrSpace kernel_space;

/**
* @brief Spinlock for virtual memory operations
*/
static volatile int vmem_lock = 0;

/**
* @brief Physical address mask for page table entries
* This is the critical fix - PAGE_MASK is for offset bits, not address bits
*/
#define PT_ADDR_MASK 0x000FFFFFFFFFF000ULL

/**
* @brief Simple spinlock implementation
*/

static inline void vmem_spin_lock(volatile int* lock) {
while (__sync_lock_test_and_set(lock, 1)) {
while (*lock) __builtin_ia32_pause();
Expand All @@ -46,20 +32,12 @@ static inline void vmem_spin_unlock(volatile int* lock) {
__sync_lock_release(lock);
}

/**
* @brief Validate that a physical address can be converted to virtual
*/
extern uint64_t total_pages;
static inline int is_valid_phys_addr(uint64_t paddr) {
// Basic sanity check - adjust limits based on your system
return (paddr != 0 && paddr < (total_pages * PAGE_SIZE));
}

/**
* @brief Initializes the kernel's virtual memory manager
*
* CRITICAL FIX: Don't overwrite the PML4 pointer after converting to virtual!
*/
void VMemInit(void) {
// Allocate physical page for PML4
void* pml4_phys = AllocPage();
Expand Down Expand Up @@ -116,11 +94,6 @@ static uint64_t VMemGetPageTablePhys(uint64_t pml4_phys, uint64_t vaddr, int lev

// Zero the new table
uint64_t* new_table_virt = (uint64_t*)PHYS_TO_VIRT(new_table_phys);
if (!VMemIsPageMapped((uint64_t)new_table_virt)) {
FreePage(new_table_phys);
Panic("VMemGetPageTablePhys: Newly allocated page is not accessible via PHYS_TO_VIRT!");
}

FastZeroPage(new_table_virt);

// Set the entry with physical address
Expand Down Expand Up @@ -363,6 +336,32 @@ uint64_t VMemGetPhysAddr(uint64_t vaddr) {
return (pt_virt[pt_index] & PT_ADDR_MASK) | (vaddr & PAGE_MASK);
}

void VMemMapKernel(uint64_t kernel_phys_start, uint64_t kernel_phys_end) {
// Align addresses to page boundaries
uint64_t start = PAGE_ALIGN_DOWN(kernel_phys_start);
uint64_t end = PAGE_ALIGN_UP(kernel_phys_end);

PrintKernel("VMem: Mapping kernel from phys 0x");
PrintKernelHex(start);
PrintKernel(" to 0x");
PrintKernelHex(end);
PrintKernel("\n");

for (uint64_t paddr = start; paddr < end; paddr += PAGE_SIZE) {
// Map the physical address to its higher-half virtual address
uint64_t vaddr = paddr + KERNEL_VIRTUAL_OFFSET;

// Map with Present and Writable flags. You might want to map
// .text sections as read-only later, but this is a good start.
int result = VMemMap(vaddr, paddr, PAGE_WRITABLE);
if (result != VMEM_SUCCESS) {
Panic("VMemMapKernel: Failed to map kernel page!");
return;
}
}
PrintKernel("VMem: Kernel mapping complete.\n");
}

/**
* @brief Checks if a virtual address is mapped
*/
Expand Down Expand Up @@ -396,4 +395,9 @@ void VMemGetStats(uint64_t* used_pages, uint64_t* total_mapped) {
if (used_pages) *used_pages = kernel_space.used_pages;
if (total_mapped) *total_mapped = kernel_space.total_mapped;
vmem_spin_unlock(&vmem_lock);
}

// Add this function definition
uint64_t VMemGetPML4PhysAddr(void) {
return (uint64_t)kernel_space.pml4;
}
7 changes: 4 additions & 3 deletions kernel/memory/VMem.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,14 +88,15 @@ typedef enum {
VMEM_ERROR_NOT_MAPPED = -4,
VMEM_ERROR_ALIGN = -5
} VMem_Result;

#define KERNEL_VIRTUAL_OFFSET 0xFFFFFFFF80000000ULL
#define PT_ADDR_MASK 0x000FFFFFFFFFF000ULL
// Core virtual memory functions
void VMemInit(void);
void* VMemAlloc(uint64_t size);
void VMemFree(void* vaddr, uint64_t size);
int VMemMap(uint64_t vaddr, uint64_t paddr, uint64_t flags);
int VMemUnmap(uint64_t vaddr, uint64_t size);

void VMemMapKernel(uint64_t kernel_phys_start, uint64_t kernel_phys_end);
// Page table management functions
uint64_t* VMemGetPageTable(uint64_t* pml4, uint64_t vaddr, int level, int create);
int VMemSetPageFlags(uint64_t vaddr, uint64_t flags);
Expand All @@ -111,7 +112,7 @@ int VMemSwitchAddressSpace(VirtAddrSpace* space);
void VMemFlushTLB(void);
void VMemFlushTLBSingle(uint64_t vaddr);
void VMemGetStats(uint64_t* used_pages, uint64_t* total_mapped);

uint64_t VMemGetPML4PhysAddr(void);
// Debug functions
void VMemDumpPageTable(uint64_t vaddr);
void VMemValidatePageTable(uint64_t* pml4);
Expand Down
3 changes: 2 additions & 1 deletion linker.ld
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ ENTRY(start) /* The entry point of our kernel is the 'start' label */
SECTIONS
{
. = 1M;

_kernel_phys_start = .;
.boot :
{
*(.multiboot)
Expand Down Expand Up @@ -36,4 +36,5 @@ SECTIONS
{
*(.bss)
}
_kernel_phys_end = .;
}
3 changes: 2 additions & 1 deletion meson.build
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,8 @@ asm_sources = [
arch_root + '/gdt/GdtFlush.asm',
arch_root + '/syscall/SyscallEntry.asm',
arch_root + '/asm/Switch.asm',
arch_root + '/interrupts/Interrupts.asm'
arch_root + '/interrupts/Interrupts.asm',
src_root + '/kernel/memory/AsmHelpers.asm',
]

# C sources (organized)
Expand Down