Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions arch/x86_64/cpu/Cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,12 @@ typedef struct Registers {
uint64_t rsp, ss;
} __attribute__((packed)) Registers;

static inline uint64_t ReadCr3(void) {
uint64_t cr3_val;
asm volatile("mov %%cr3, %0" : "=r"(cr3_val));
return cr3_val;
}

void CpuInit(void);
CpuFeatures* GetCpuFeatures(void);

Expand Down
9 changes: 4 additions & 5 deletions include/Paging.asm
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
section .text
global EnablePagingAndJump
global JumpToKernelHigherHalf

EnablePagingAndJump:
mov cr3, rdi
mov rsp, rdx
jmp rsi
JumpToKernelHigherHalf:
mov rsp, rsi
jmp rdi
2 changes: 1 addition & 1 deletion include/Paging.h
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
#pragma once
#include "stdint.h"
void EnablePagingAndJump(uint64_t pml4_phys_addr, uint64_t jump_to_addr, uint64_t stack);
extern void JumpToKernelHigherHalf(uint64_t entry_point, uint64_t new_stack_top);
110 changes: 46 additions & 64 deletions kernel/core/Kernel.c
Original file line number Diff line number Diff line change
Expand Up @@ -89,13 +89,13 @@ uint64_t AllocPageTable(const char* table_name) {
return table_phys;
}

void BootstrapMapPage(uint64_t pml4_phys, uint64_t vaddr, uint64_t paddr, uint64_t flags) {
void BootstrapMapPage(uint64_t pml4_phys, uint64_t vaddr, uint64_t paddr, uint64_t flags, bool allow_huge_page) {
// Input validation
if (!pml4_phys || (pml4_phys & 0xFFF)) PANIC("Invalid PML4 address");
if (vaddr & 0xFFF || paddr & 0xFFF) {
vaddr &= ~0xFFF; // Page-align virtual address
paddr &= ~0xFFF; // Page-align physical address
}

// NEW: Align addresses for huge page checks
const uint64_t vaddr_huge_aligned = vaddr & ~(HUGE_PAGE_SIZE - 1);
const uint64_t paddr_huge_aligned = paddr & ~(HUGE_PAGE_SIZE - 1);

uint64_t* pml4 = (uint64_t*)pml4_phys;
Comment on lines +92 to 100
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

💡 Verification agent

🧩 Analysis chain

BootstrapMapPage: assert PML4 is accessible under identity map

Dereferencing pml4_phys directly assumes it’s identity-mapped. If a bootloader places PML4 above IDENTITY_MAP_SIZE, this will fault during PXS1.

Prefer failing fast here during bootstrap:

 void BootstrapMapPage(uint64_t pml4_phys, uint64_t vaddr, uint64_t paddr, uint64_t flags, bool allow_huge_page) {
     // Input validation
     if (!pml4_phys || (pml4_phys & 0xFFF)) PANIC("Invalid PML4 address");
+    if (pml4_phys >= IDENTITY_MAP_SIZE) {
+        PANIC("Bootstrap: PML4 must reside in identity-mapped memory during PXS1");
+    }

-    uint64_t* pml4 = (uint64_t*)pml4_phys;
+    uint64_t* pml4 = (uint64_t*)pml4_phys;

If you intend to support non-identity PML4 at bootstrap, we can rework accessors to temporarily map it; that’s more invasive.


🏁 Script executed:

#!/bin/bash
set -e

# Search for the identity-map size macro
rg -n 'IDENTITY_MAP_SIZE'

# Check how physical addresses are translated elsewhere
rg -n 'PHYS_TO_VIRT'

Length of output: 2013


🏁 Script executed:

#!/bin/bash
set -e

# Locate BootstrapMapPage in Kernel.c
rg -n 'BootstrapMapPage' -n kernel/core/Kernel.c | sed -n '1,20p'

# Show 20 lines around the function for context
rg -n 'BootstrapMapPage' -C10 kernel/core/Kernel.c

Length of output: 2814


#!/bin/bash
set -e

# Show lines around the identity map probe in VMem.c for context
sed -n '1,120p' kernel/memory/VMem.c | sed -n '60,100p'

Enforce identity‐mapped PML4 in BootstrapMapPage

Dereferencing pml4_phys assumes it lives in the identity‐mapped region (< IDENTITY_MAP_SIZE). Fail fast if that’s not true to avoid a crash when reading the table.

• Location: kernel/core/Kernel.c, inside BootstrapMapPage (around line 92)

Proposed diff:

 void BootstrapMapPage(uint64_t pml4_phys, uint64_t vaddr, uint64_t paddr, uint64_t flags, bool allow_huge_page) {
     // Input validation
     if (!pml4_phys || (pml4_phys & 0xFFF)) PANIC("Invalid PML4 address");
+    if (pml4_phys >= IDENTITY_MAP_SIZE) {
+        PANIC("Bootstrap: PML4 must reside in identity-mapped memory (<4 GB)");
+    }

     // NEW: Align addresses for huge page checks
     const uint64_t vaddr_huge_aligned = vaddr & ~(HUGE_PAGE_SIZE - 1);
     const uint64_t paddr_huge_aligned = paddr & ~(HUGE_PAGE_SIZE - 1);

     uint64_t* pml4 = (uint64_t*)pml4_phys;
🤖 Prompt for AI Agents
In kernel/core/Kernel.c around lines 92 to 100, the function dereferences
pml4_phys assuming it resides in the identity-mapped region; add a check that
pml4_phys is below IDENTITY_MAP_SIZE (and properly aligned) and PANIC if it is
not to avoid a crash when dereferencing an unmapped physical address. Use the
existing IDENTITY_MAP_SIZE constant (and keep the existing low-12-bit alignment
check), and only cast to a pointer after verifying pml4_phys <
IDENTITY_MAP_SIZE; include a clear PANIC message indicating the PML4 is not
identity-mapped.


Expand All @@ -120,10 +120,30 @@ void BootstrapMapPage(uint64_t pml4_phys, uint64_t vaddr, uint64_t paddr, uint64
} else {
pd_phys = pdpt[pdpt_idx] & PT_ADDR_MASK;
}

// 3. Get/Create PT
uint64_t* pd = (uint64_t*)pd_phys;
const int pd_idx = (vaddr >> 21) & 0x1FF;

// NEW: Huge Page Mapping Logic
if (allow_huge_page && vaddr == vaddr_huge_aligned && paddr == paddr_huge_aligned) {
// This is a 2MB-aligned address, so we can use a huge page.
// The PD entry will point directly to the 2MB physical frame.
pd[pd_idx] = paddr | flags | PAGE_PRESENT | PAGE_LARGE;

// Progress update for huge pages (maps 2MB at a time)
static uint64_t huge_pages_mapped = 0;
huge_pages_mapped++;
if (huge_pages_mapped % 128 == 0) { // Print every 256MB
PrintKernelInt((huge_pages_mapped * HUGE_PAGE_SIZE) / (1024 * 1024));
PrintKernel("MB ");
}
return; // We are done, no need to create a PT
}

// --- Original 4KB Page Mapping Logic ---
vaddr &= ~0xFFF;
paddr &= ~0xFFF;

// 3. Get/Create PT
uint64_t pt_phys;
if (!(pd[pd_idx] & PAGE_PRESENT)) {
pt_phys = AllocPageTable("PT");
Expand All @@ -136,7 +156,6 @@ void BootstrapMapPage(uint64_t pml4_phys, uint64_t vaddr, uint64_t paddr, uint64
uint64_t* pt = (uint64_t*)pt_phys;
const int pt_idx = (vaddr >> 12) & 0x1FF;

// NEW: Check for remapping
if (pt[pt_idx] & PAGE_PRESENT) {
uint64_t existing_paddr = pt[pt_idx] & PT_ADDR_MASK;
if (existing_paddr != paddr) {
Expand All @@ -154,14 +173,7 @@ void BootstrapMapPage(uint64_t pml4_phys, uint64_t vaddr, uint64_t paddr, uint64

static uint64_t pages_mapped = 0;
pages_mapped++;

// Show progress every 64K pages (256MB)
if (pages_mapped % 65536 == 0) {
PrintKernelInt((pages_mapped * PAGE_SIZE) / (1024 * 1024));
PrintKernel("MB ");
}
// Show dots every 16K pages (64MB) for finer progress
else if (pages_mapped % 16384 == 0) {
if (pages_mapped % 16384 == 0) {
PrintKernel(".");
}
}
Expand Down Expand Up @@ -421,82 +433,52 @@ static void PrintBootstrapSummary(void) {


// Pre-eXecutionSystem 1
void PXS1(const uint32_t info) {
int sret = SerialInit();
// In Kernel.c

if (sret != 0) {
PrintKernelWarning("[WARN] COM1 failed, probing other COM ports...\n");
if (SerialInitPort(COM2) != 0 && SerialInitPort(COM3) != 0 &&SerialInitPort(COM4) != 0) {
PrintKernelWarning("[WARN] No serial ports initialized. Continuing without serial.\n");
} else {
PrintKernelSuccess("System: Serial driver initialized on fallback port\n");
}
} else {
PrintKernelSuccess("System: Serial driver initialized on COM1\n");
}
void PXS1(const uint32_t info) {
SerialInit(); // Keep serial for logging

// VBE and Console init are fine here
if (VBEInit(info) != 0) {
PrintKernelError("System: Failed to initialize VBE and graphical environment");
}

PrintKernel("System: Starting Console...\n");
ConsoleInit();
PrintKernelSuccess("System: Console initialized\n");

VBEShowSplash();
ClearScreen();

PrintKernel("System: Parsing MULTIBOOT2 info...\n");
ParseMultibootInfo(info);
PrintKernelSuccess("System: MULTIBOOT2 info parsed\n");

PrintKernel("System: Initializing memory...\n");
PrintKernel("System: Initializing physical memory manager...\n");
MemoryInit(g_multiboot_info_addr);
PrintKernelSuccess("System: Memory initialized\n");

// Create new PML4 with memory validation (ensure identity-mapped physical page)
void* pml4_phys = NULL;
for (int attempt = 0; attempt < 64; attempt++) {
void* candidate = AllocPage();
if (!candidate) break;
if ((uint64_t)candidate < IDENTITY_MAP_SIZE) { pml4_phys = candidate; break; }
FreePage(candidate);
}
if (!pml4_phys) PANIC("Failed to allocate PML4 in identity-mapped memory");

FastZeroPage(pml4_phys);
uint64_t pml4_addr = (uint64_t)pml4_phys;

PrintKernelSuccess("System: Bootstrap: Identity mapping...\n");

for (uint64_t paddr = 0; paddr < IDENTITY_MAP_SIZE; paddr += PAGE_SIZE) {
BootstrapMapPage(pml4_addr, paddr, paddr, PAGE_WRITABLE);

if (paddr / PAGE_SIZE % 32768 == 0) {
PrintKernel(".");
}
}
PrintKernelSuccess("System: Bootstrap paging already configured by bootloader.\n");
uint64_t pml4_addr = ReadCr3(); // Get the PML4 from the bootloader
PrintKernel("Info: PML4 is at physical address 0x");
PrintKernelHex(pml4_addr);
PrintKernel("\n");
Comment on lines +456 to 459
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

💡 Verification agent

🧩 Analysis chain

Missing header for ReadCr3 causes build error

ReadCr3 is defined in arch/x86_64/cpu/Cpu.h (per snippets). Kernel.c does not include this, which will fail under modern C.

Add the include near the top (outside this hunk):

#include "arch/x86_64/cpu/Cpu.h"

Verification script to find mismatches:


🏁 Script executed:

#!/bin/bash
# Check where ReadCr3 is declared and used
rg -n -C2 '\bReadCr3\s*\('
rg -n -C2 'arch/x86_64/cpu/Cpu\.h'

Length of output: 717


🏁 Script executed:

#!/bin/bash
# Verify if Kernel.c currently includes the Cpu.h header
echo "Searching for direct Cpu.h includes in kernel/core/Kernel.c..."
rg -n '^#include.*cpu/Cpu\.h' -C2 kernel/core/Kernel.c || true

echo -e "\nListing all #include directives in kernel/core/Kernel.c:"
rg -n '^#include' kernel/core/Kernel.c

Length of output: 1062


Add missing Cpu.h include to resolve ReadCr3 build error

The call to ReadCr3() in kernel/core/Kernel.c requires its declaration from arch/x86_64/cpu/Cpu.h, which isn’t currently included and will cause a build failure under a conforming C compiler.

Please insert the following near the top of kernel/core/Kernel.c (with the other architecture‐specific headers):

 #include "xHCI/xHCI.h"
+#include "arch/x86_64/cpu/Cpu.h"   // for ReadCr3()
 
 uint64_t pml4_addr = ReadCr3(); // Get the PML4 from the bootloader
 PrintKernel("Info: PML4 is at physical address 0x");
 PrintKernelHex(pml4_addr);

critical fix required to restore successful compilation.

📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
uint64_t pml4_addr = ReadCr3(); // Get the PML4 from the bootloader
PrintKernel("Info: PML4 is at physical address 0x");
PrintKernelHex(pml4_addr);
PrintKernel("\n");
#include "xHCI/xHCI.h"
#include "arch/x86_64/cpu/Cpu.h" // for ReadCr3()
uint64_t pml4_addr = ReadCr3(); // Get the PML4 from the bootloader
PrintKernel("Info: PML4 is at physical address 0x");
PrintKernelHex(pml4_addr);
PrintKernel("\n");
🤖 Prompt for AI Agents
In kernel/core/Kernel.c around lines 456-459 the call to ReadCr3() is missing
its declaration because arch/x86_64/cpu/Cpu.h is not included; add a #include
for that header near the top of Kernel.c alongside the other
architecture-specific headers (place it with the other x86_64 includes) so
ReadCr3 is declared before use and the file compiles cleanly.


PrintKernelSuccess("System: Bootstrap: Mapping kernel...\n");
PrintKernel("System: Mapping kernel to higher half...\n");
uint64_t kernel_start = (uint64_t)_kernel_phys_start & ~0xFFF;
uint64_t kernel_end = ((uint64_t)_kernel_phys_end + 0xFFF) & ~0xFFF;
for (uint64_t paddr = kernel_start; paddr < kernel_end; paddr += PAGE_SIZE) {
BootstrapMapPage(pml4_addr, paddr + KERNEL_VIRTUAL_OFFSET, paddr, PAGE_WRITABLE);
// Use the EXISTING pml4_addr from CR3
BootstrapMapPage(pml4_addr, paddr + KERNEL_VIRTUAL_OFFSET, paddr, PAGE_WRITABLE, false);
}

PrintKernelSuccess("System: Bootstrap: Mapping kernel stack...\n");
PrintKernel("System: Mapping kernel stack to higher half...\n");
uint64_t stack_phys_start = (uint64_t)kernel_stack & ~0xFFF;
uint64_t stack_phys_end = ((uint64_t)kernel_stack + KERNEL_STACK_SIZE + 0xFFF) & ~0xFFF;

for (uint64_t paddr = stack_phys_start; paddr < stack_phys_end; paddr += PAGE_SIZE) {
BootstrapMapPage(pml4_addr, paddr + KERNEL_VIRTUAL_OFFSET, paddr, PAGE_WRITABLE);
// Use the EXISTING pml4_addr from CR3
BootstrapMapPage(pml4_addr, paddr + KERNEL_VIRTUAL_OFFSET, paddr, PAGE_WRITABLE, false);
}

PrintKernelSuccess("System: Page tables prepared. Switching to virtual addressing...\n");
PrintKernel("System: Switching to higher half...\n");
const uint64_t new_stack_top = ((uint64_t)kernel_stack + KERNEL_VIRTUAL_OFFSET) + KERNEL_STACK_SIZE;
const uint64_t higher_half_entry = (uint64_t)&KernelMainHigherHalf + KERNEL_VIRTUAL_OFFSET;
EnablePagingAndJump(pml4_addr, higher_half_entry, new_stack_top);

JumpToKernelHigherHalf(higher_half_entry, new_stack_top);
}
Comment on lines +477 to 482
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

Fix stack alignment before jumping; align stack object too

System V AMD64 requires 16-byte alignment at function entry when entering via jump (no return address pushed). Current new_stack_top derivation doesn’t guarantee alignment; kernel_stack as uint8_t[] is aligned only to 1 byte by default.

Within this hunk:

-    const uint64_t new_stack_top = ((uint64_t)kernel_stack + KERNEL_VIRTUAL_OFFSET) + KERNEL_STACK_SIZE;
+    // Align to 16B for SysV ABI since we are jmp'ing (no return addr pushed)
+    const uint64_t new_stack_top =
+        ((((uint64_t)kernel_stack + KERNEL_VIRTUAL_OFFSET) + KERNEL_STACK_SIZE) & ~0xFULL);

Outside this hunk, align the stack object:

// Ensure 16-byte alignment (or page-align if you prefer)
static uint8_t kernel_stack[KERNEL_STACK_SIZE] __attribute__((aligned(16)));

Optionally, declare the callee as noreturn (see Paging.h comment) to help the compiler keep alignment assumptions tight.


static void IRQUnmaskCoreSystems() {
Expand Down
42 changes: 41 additions & 1 deletion kernel/memory/Memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ int MemoryInit(uint32_t multiboot_info_addr) {
MarkPageUsed(i);
}

// 3. (Optional but good) Reserve the memory used by the multiboot info
// Reserve the memory used by the multiboot info
// itself
const uint64_t mb_info_start_page = multiboot_info_addr / PAGE_SIZE;
const uint64_t mb_info_end_page = (multiboot_info_addr + total_multiboot_size + PAGE_SIZE - 1) / PAGE_SIZE;
Expand Down Expand Up @@ -368,3 +368,43 @@ void GetDetailedMemoryStats(MemoryStats* stats) {
SpinUnlockIrqRestore(&memory_lock, flags);
}

void FreeHugePages(void* pages, uint64_t num_pages) {
if (!pages || num_pages == 0) {
PrintKernelError("[MEMORY] FreeHugePages: NULL pointer or zero pages\n");
return;
}

uint64_t addr = (uint64_t)pages;
if (addr % HUGE_PAGE_SIZE != 0) {
PrintKernelError("[MEMORY] FreeHugePages: Unaligned address ");
PrintKernelHex(addr); PrintKernel("\n");
return;
}

uint64_t start_page_idx = addr / PAGE_SIZE;
uint64_t pages_per_huge = HUGE_PAGE_SIZE / PAGE_SIZE;
uint64_t total_to_free = num_pages * pages_per_huge;

irq_flags_t flags = SpinLockIrqSave(&memory_lock);

for (uint64_t i = 0; i < total_to_free; i++) {
uint64_t current_page_idx = start_page_idx + i;
if (current_page_idx >= total_pages) break; // Bounds check

if (IsPageFree(current_page_idx)) {
// Freeing a page that is already free can indicate a serious bug
PrintKernelError("[MEMORY] Double free detected in FreeHugePages at page ");
PrintKernelInt(current_page_idx); PrintKernel("\n");
} else {
MarkPageFree(current_page_idx);
}
}

// Update hint
if (start_page_idx < next_free_hint) {
next_free_hint = start_page_idx;
}

free_count++; // Count this as one logical free operation
SpinUnlockIrqRestore(&memory_lock, flags);
}
Comment on lines +371 to +410
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue

Fix accounting: huge_pages_allocated never decremented on free

Currently, huge_pages_allocated grows on AllocHugePages but is never decremented in FreeHugePages. This will skew stats long-term.

Apply this within the selected range:

     // Update hint
     if (start_page_idx < next_free_hint) {
         next_free_hint = start_page_idx;
     }

-    free_count++; // Count this as one logical free operation
+    free_count++; // Count this as one logical free operation
+    if (huge_pages_allocated >= num_pages) {
+        huge_pages_allocated -= num_pages;
+    } else {
+        huge_pages_allocated = 0; // avoid underflow in case of mismatch
+    }
     SpinUnlockIrqRestore(&memory_lock, flags);

Additionally, ensure allocation increments by the number of huge pages, not by 1 (see below).

📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
void FreeHugePages(void* pages, uint64_t num_pages) {
if (!pages || num_pages == 0) {
PrintKernelError("[MEMORY] FreeHugePages: NULL pointer or zero pages\n");
return;
}
uint64_t addr = (uint64_t)pages;
if (addr % HUGE_PAGE_SIZE != 0) {
PrintKernelError("[MEMORY] FreeHugePages: Unaligned address ");
PrintKernelHex(addr); PrintKernel("\n");
return;
}
uint64_t start_page_idx = addr / PAGE_SIZE;
uint64_t pages_per_huge = HUGE_PAGE_SIZE / PAGE_SIZE;
uint64_t total_to_free = num_pages * pages_per_huge;
irq_flags_t flags = SpinLockIrqSave(&memory_lock);
for (uint64_t i = 0; i < total_to_free; i++) {
uint64_t current_page_idx = start_page_idx + i;
if (current_page_idx >= total_pages) break; // Bounds check
if (IsPageFree(current_page_idx)) {
// Freeing a page that is already free can indicate a serious bug
PrintKernelError("[MEMORY] Double free detected in FreeHugePages at page ");
PrintKernelInt(current_page_idx); PrintKernel("\n");
} else {
MarkPageFree(current_page_idx);
}
}
// Update hint
if (start_page_idx < next_free_hint) {
next_free_hint = start_page_idx;
}
free_count++; // Count this as one logical free operation
SpinUnlockIrqRestore(&memory_lock, flags);
}
// Update hint
if (start_page_idx < next_free_hint) {
next_free_hint = start_page_idx;
}
free_count++; // Count this as one logical free operation
if (huge_pages_allocated >= num_pages) {
huge_pages_allocated -= num_pages;
} else {
huge_pages_allocated = 0; // avoid underflow in case of mismatch
}
SpinUnlockIrqRestore(&memory_lock, flags);
🤖 Prompt for AI Agents
In kernel/memory/Memory.c around lines 371 to 410, update accounting so
huge_pages_allocated is decremented when huge pages are freed and ensure
allocation increments use the number of huge pages rather than 1: inside
FreeHugePages subtract num_pages from huge_pages_allocated (with proper locking
and bounds-checking to avoid underflow), and in AllocHugePages increment
huge_pages_allocated by the number of huge pages allocated (not by 1); keep
these updates inside the memory_lock critical section and ensure
free_count/alloc counts reflect logical huge-page operations consistently.

6 changes: 4 additions & 2 deletions kernel/memory/Memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,16 @@ typedef struct MemoryStats {


#define PAGE_SIZE 4096
#define HUGE_PAGE_SIZE (2 * 1024 * 1024) // ADDED: For clarity in calculations

extern uint64_t total_pages;

int MemoryInit(uint32_t multiboot_info_addr);

void* AllocPage(void);
void FreePage(void* page);
void* AllocHugePages(uint64_t num_pages); // Allocate contiguous 2MB pages
void FreeHugePages(void* pages, uint64_t num_pages);
void* AllocHugePages(uint64_t num_pages);
void FreeHugePages(void* pages, uint64_t num_pages); // ADDED: Matching declaration for the new function

// Misc.
void GetDetailedMemoryStats(MemoryStats* stats);
Expand Down
22 changes: 8 additions & 14 deletions kernel/memory/VMem.c
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
//
// Created by Atheria on 7/15/25.
//
#include "VMem.h"
#include "Console.h"
#include "MemOps.h"
Expand Down Expand Up @@ -72,14 +69,14 @@ void VMemInit(void) {
kernel_space.pml4 = (uint64_t*)pml4_phys_addr;
uint64_t kernel_size = (uint64_t)_kernel_phys_end - (uint64_t)_kernel_phys_start;
kernel_space.total_mapped += PAGE_ALIGN_UP(kernel_size);
// Now test identity mapping
if (VMemGetPhysAddr(0x100000) != 0x100000) {
PANIC("Bootstrap identity mapping failed - VALIDATION FAILED");
}
const uint64_t probe = IDENTITY_MAP_SIZE - PAGE_SIZE;
if (VMemGetPhysAddr(probe) != probe) {
PANIC("Bootstrap identity mapping failed at IDENTITY_MAP_SIZE boundary");
}
// Now test identity mapping -- FAILING
// if (VMemGetPhysAddr(0x100000) != 0x100000) {
// PANIC("Bootstrap identity mapping failed - VALIDATION FAILED");
// }
// const uint64_t probe = IDENTITY_MAP_SIZE - PAGE_SIZE;
// if (VMemGetPhysAddr(probe) != probe) {
// PANIC("Bootstrap identity mapping failed at IDENTITY_MAP_SIZE boundary");
// }
Comment on lines +72 to +79
Copy link

@coderabbitai coderabbitai bot Aug 17, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🛠️ Refactor suggestion

⚠️ Potential issue

Identity-mapping validation removed, but identity-mapping assumptions remain in table walks.

VMemGetPageTablePhys/VMemMap/VMemGetPhysAddr still conditionally dereference tables via identity mapping when phys < IDENTITY_MAP_SIZE. With validation removed (and previously failing), this can trap at runtime if the loader doesn’t maintain a 1:1 map for the probed range. Either restore a robust runtime check or drop identity-map assumptions and always access tables via higher-half mapping.

Follow-up options:

  • Prefer a single access mode for page tables: always via higher-half mapping (PHYS_TO_VIRT), removing the identity-branching in:
    • VMemGetPageTablePhys
    • VMemMap
    • VMemGetPhysAddr
    • VMemUnmap
    • VMemMapMMIO
    • VMemUnmapMMIO
  • Or, reinstate a concrete identity-map probe that walks PML4->PDP->PD for a few sentinel addresses (e.g., 0, 1MB, IDENTITY_MAP_SIZE - PAGE_SIZE) and PANIC if missing.

I can generate a consolidated patch to remove identity-map assumptions across these helpers if you want.

🤖 Prompt for AI Agents
In kernel/memory/VMem.c around lines 72-79, identity-mapping validation was
removed but the code still conditionally dereferences physical addresses when
phys < IDENTITY_MAP_SIZE, which can crash if the loader doesn't provide that 1:1
mapping; fix by choosing one approach and applying it consistently: either
(preferred) remove all identity-map branches in VMemGetPageTablePhys, VMemMap,
VMemGetPhysAddr, VMemUnmap, VMemMapMMIO and VMemUnmapMMIO and always convert
physical addresses to kernel virtual addresses via PHYS_TO_VIRT before
dereferencing page table memory, or reinstate a small runtime probe that walks
PML4->PDP->PD for sentinel addresses (e.g., 0, 1MB, IDENTITY_MAP_SIZE -
PAGE_SIZE) and PANIC if identity mappings are missing; update the listed
functions to use the chosen strategy and delete the phys < IDENTITY_MAP_SIZE
conditional paths so page-table accesses never assume identity mapping.

Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

well theres an issue VMemGetPhysAddr, it actually fail and panics

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧩 Analysis chain

@assembler-0, that confirms my analysis! VMemGetPhysAddr is panicking because it's trying to access page tables via identity mapping that doesn't exist. Let me examine the exact failure pattern and provide a fix.


🏁 Script executed:

#!/bin/bash
# Examine VMemGetPhysAddr implementation to understand the panic
ast-grep --pattern 'uint64_t VMemGetPhysAddr(uint64_t vaddr) {
  $$$
}'

Length of output: 1411


🏁 Script executed:

#!/bin/bash
# Look for all functions that use the identity mapping pattern
rg -nC2 "IDENTITY_MAP_SIZE" kernel/memory/VMem.c

Length of output: 2552


🏁 Script executed:

#!/bin/bash
# Check what IDENTITY_MAP_SIZE is defined as
rg -n "IDENTITY_MAP_SIZE" include/ kernel/memory/

Length of output: 1083


Perfect! Now I can see the full scope of the problem. The panic occurs because VMemGetPhysAddr (and related functions) assume identity mapping exists for the first 4GB, but the bootloader doesn't provide it. Here's a comprehensive fix that removes all identity mapping assumptions:

// Fix for kernel/memory/VMem.c
// Remove all identity mapping assumptions - always use PHYS_TO_VIRT

static uint64_t VMemGetPageTablePhys(uint64_t pml4_phys, uint64_t vaddr, int level, int create) {
    if (!is_valid_phys_addr(pml4_phys)) return 0;

    // Always access tables via higher-half mapping
    uint64_t* table_virt = (uint64_t*)PHYS_TO_VIRT(pml4_phys);

    int shift = 39 - (level * 9);
    int index = (vaddr >> shift) & PT_INDEX_MASK;
    if (index >= 512) return 0;

    if (!(table_virt[index] & PAGE_PRESENT)) {
        if (!create) return 0;

        // Allocate any available page for page tables
        void* new_table_phys = AllocPage();
        if (!new_table_phys) return 0;
        if (!is_valid_phys_addr((uint64_t)new_table_phys)) {
            FreePage(new_table_phys);
            return 0;
        }

        // Always zero via higher-half mapping
        FastZeroPage(PHYS_TO_VIRT(new_table_phys));

        table_virt[index] = (uint64_t)new_table_phys | PAGE_PRESENT | PAGE_WRITABLE;
        return (uint64_t)new_table_phys;
    }

    return table_virt[index] & PT_ADDR_MASK;
}

int VMemMap(uint64_t vaddr, uint64_t paddr, uint64_t flags) {
    // ... validation code unchanged ...

    irq_flags_t irq_flags = SpinLockIrqSave(&vmem_lock);

    // Get page tables
    uint64_t pdp_phys = VMemGetPageTablePhys((uint64_t)kernel_space.pml4, vaddr, 0, 1);
    if (!pdp_phys) {
        SpinUnlockIrqRestore(&vmem_lock, irq_flags);
        return VMEM_ERROR_NOMEM;
    }

    uint64_t pd_phys = VMemGetPageTablePhys(pdp_phys, vaddr, 1, 1);
    if (!pd_phys) {
        SpinUnlockIrqRestore(&vmem_lock, irq_flags);
        return VMEM_ERROR_NOMEM;
    }

    uint64_t pt_phys = VMemGetPageTablePhys(pd_phys, vaddr, 2, 1);
    if (!pt_phys) {
        SpinUnlockIrqRestore(&vmem_lock, irq_flags);
        return VMEM_ERROR_NOMEM;
    }

    // Always access PT through higher-half mapping
    uint64_t* pt_virt = (uint64_t*)PHYS_TO_VIRT(pt_phys);
    int pt_index = (vaddr >> PT_SHIFT) & PT_INDEX_MASK;

    // Check if already mapped
    if (pt_virt[pt_index] & PAGE_PRESENT) {
        SpinUnlockIrqRestore(&vmem_lock, irq_flags);
        return VMEM_ERROR_ALREADY_MAPPED;
    }

    // Set the mapping
    pt_virt[pt_index] = paddr | flags | PAGE_PRESENT;

    // Invalidate TLB
    VMemFlushTLBSingle(vaddr);

    SpinUnlockIrqRestore(&vmem_lock, irq_flags);
    return VMEM_SUCCESS;
}

int VMemMapHuge(uint64_t vaddr, uint64_t paddr, uint64_t flags) {
    if (!IS_HUGE_PAGE_ALIGNED(vaddr) || !IS_HUGE_PAGE_ALIGNED(paddr)) {
        return VMEM_ERROR_ALIGN;
    }

    irq_flags_t irq_flags = SpinLockIrqSave(&vmem_lock);

    if (!is_valid_phys_addr(paddr)) {
        SpinUnlockIrqRestore(&vmem_lock, irq_flags);
        return VMEM_ERROR_INVALID_ADDR;
    }

    // Get page tables
    uint64_t pdp_phys = VMemGetPageTablePhys((uint64_t)kernel_space.pml4, vaddr, 0, 1);
    if (!pdp_phys) {
        SpinUnlockIrqRestore(&vmem_lock, irq_flags);
        return VMEM_ERROR_NOMEM;
    }

    uint64_t pd_phys = VMemGetPageTablePhys(pdp_phys, vaddr, 1, 1);
    if (!pd_phys) {
        SpinUnlockIrqRestore(&vmem_lock, irq_flags);
        return VMEM_ERROR_NOMEM;
    }

    // Always access PD through higher-half mapping
    uint64_t* pd_virt = (uint64_t*)PHYS_TO_VIRT(pd_phys);
    int pd_index = (vaddr >> PD_SHIFT) & PT_INDEX_MASK;

    // Check if already mapped
    if (pd_virt[pd_index] & PAGE_PRESENT) {
        SpinUnlockIrqRestore(&vmem_lock, irq_flags);
        return VMEM_ERROR_ALREADY_MAPPED;
    }

    // Set huge page mapping
    pd_virt[pd_index] = paddr | flags | PAGE_PRESENT | PAGE_LARGE;

    VMemFlushTLBSingle(vaddr);
    SpinUnlockIrqRestore(&vmem_lock, irq_flags);
    return VMEM_SUCCESS;
}

uint64_t VMemGetPhysAddr(uint64_t vaddr) {
    uint64_t pdp_phys = VMemGetPageTablePhys((uint64_t)kernel_space.pml4, vaddr, 0, 0);
    if (!pdp_phys) return 0;

    uint64_t pd_phys = VMemGetPageTablePhys(pdp_phys, vaddr, 1, 0);
    if (!pd_phys) return 0;

    uint64_t pt_phys = VMemGetPageTablePhys(pd_phys, vaddr, 2, 0);
    if (!pt_phys) return 0;

    // Always access PT through higher-half mapping
    uint64_t* pt_virt = (uint64_t*)PHYS_TO_VIRT(pt_phys);
    int pt_index = (vaddr >> PT_SHIFT) & PT_INDEX_MASK;

    if (!(pt_virt[pt_index] & PAGE_PRESENT)) return 0;

    return (pt_virt[pt_index] & PT_ADDR_MASK) | (vaddr & PAGE_MASK);
}

int VMemUnmap(uint64_t vaddr, uint64_t size) {
    if (size == 0) return VMEM_SUCCESS;
    
    size = PAGE_ALIGN_UP(size);
    uint64_t num_pages = size / PAGE_SIZE;
    
    for (uint64_t i = 0; i < num_pages; i++) {
        uint64_t current_vaddr = vaddr + (i * PAGE_SIZE);
        
        irq_flags_t flags = SpinLockIrqSave(&vmem_lock);
        
        uint64_t pdp_phys = VMemGetPageTablePhys((uint64_t)kernel_space.pml4, current_vaddr, 0, 0);
        if (!pdp_phys) { SpinUnlockIrqRestore(&vmem_lock, flags); continue; }

        uint64_t pd_phys = VMemGetPageTablePhys(pdp_phys, current_vaddr, 1, 0);
        if (!pd_phys) { SpinUnlockIrqRestore(&vmem_lock, flags); continue; }

        uint64_t pt_phys = VMemGetPageTablePhys(pd_phys, current_vaddr, 2, 0);
        if (!pt_phys) { SpinUnlockIrqRestore(&vmem_lock, flags); continue; }

        // Always access PT through higher-half mapping
        uint64_t* pt_virt = (uint64_t*)PHYS_TO_VIRT(pt_phys);
        int pt_index = (current_vaddr >> PT_SHIFT) & PT_INDEX_MASK;

        if (pt_virt[pt_index] & PAGE_PRESENT) {
            pt_virt[pt_index] = 0;
            kernel_space.used_pages--;
            kernel_space.total_mapped -= PAGE_SIZE;
        }
        
        SpinUnlockIrqRestore(&vmem_lock, flags);
        VMemFlushTLBSingle(current_vaddr);
    }
    
    return VMEM_SUCCESS;
}

// Similar fixes needed for VMemMapMMIO and VMemUnmapMMIO - always use PHYS_TO_VIRT

The key changes:

  1. Removed all identity mapping conditionals - no more if (phys < IDENTITY_MAP_SIZE)
  2. Always use PHYS_TO_VIRT for page table access
  3. Simplified page allocation in VMemGetPageTablePhys - no need to prefer low memory
  4. Consistent access pattern across all functions

This fix eliminates the panic because all page table access now goes through the higher-half mapping which the bootloader has established, rather than assuming identity mapping exists.


🧬 Code Graph Analysis Results
  • File: kernel/memory/VMem.c, lines 425-447
    Content:

    uint64_t VMemGetPhysAddr(uint64_t vaddr) {
        uint64_t pdp_phys = VMemGetPageTablePhys((uint64_t)kernel_space.pml4, vaddr, 0, 0);
        if (!pdp_phys) return 0;
    
        uint64_t pd_phys = VMemGetPageTablePhys(pdp_phys, vaddr, 1, 0);
        if (!pd_phys) return 0;
    
        uint64_t pt_phys = VMemGetPageTablePhys(pd_phys, vaddr, 2, 0);
        if (!pt_phys) return 0;
    
        // Access PT through identity mapping if possible
        uint64_t* pt_virt;
        if (pt_phys < IDENTITY_MAP_SIZE) {
            pt_virt = (uint64_t*)pt_phys;
        } else {
            pt_virt = (uint64_t*)PHYS_TO_VIRT(pt_phys);
        }
        int pt_index = (vaddr >> PT_SHIFT) & PT_INDEX_MASK;
    
        if (!(pt_virt[pt_index] & PAGE_PRESENT)) return 0;
    
        return (pt_virt[pt_index] & PT_ADDR_MASK) | (vaddr & PAGE_MASK);
    }
    
  • File: kernel/memory/VMem.c, lines 89-134
    Content:

    static uint64_t VMemGetPageTablePhys(uint64_t pml4_phys, uint64_t vaddr, int level, int create) {
        if (!is_valid_phys_addr(pml4_phys)) return 0;
    
        // Access the table via identity map when available, otherwise via higher-half mapping
        uint64_t* table_virt = (pml4_phys < IDENTITY_MAP_SIZE)
            ? (uint64_t*)pml4_phys
            : (uint64_t*)PHYS_TO_VIRT(pml4_phys);
    
        int shift = 39 - (level * 9);
        int index = (vaddr >> shift) & PT_INDEX_MASK;
        if (index >= 512) return 0;
    
        if (!(table_virt[index] & PAGE_PRESENT)) {
            if (!create) return 0;
    
            // Allocate page-table memory from identity-mapped low memory to ensure accessibility
            void* new_table_phys = NULL;
            for (int attempt = 0; attempt < 32; attempt++) {
                void* candidate = AllocPage();
                if (!candidate) break;
                if ((uint64_t)candidate < IDENTITY_MAP_SIZE) {
                    new_table_phys = candidate;
                    break;
                }
                // Not identity-mapped; return it to the pool and try again
                FreePage(candidate);
            }
            if (!new_table_phys) return 0;
            if (!is_valid_phys_addr((uint64_t)new_table_phys)) {
                FreePage(new_table_phys);
                return 0;
            }
    
            // Zero the new table using an address we can access
            if ((uint64_t)new_table_phys < IDENTITY_MAP_SIZE) {
                FastZeroPage(new_table_phys);
            } else {
                FastZeroPage(PHYS_TO_VIRT(new_table_phys));
            }
    
            table_virt[index] = (uint64_t)new_table_phys | PAGE_PRESENT | PAGE_WRITABLE;
            return (uint64_t)new_table_phys;
        }
    
        return table_virt[index] & PT_ADDR_MASK;
    }
    
  • File: kernel/memory/VMem.c, lines 72-79
    Content:

    // Now test identity mapping -- FAILING
    // if (VMemGetPhysAddr(0x100000) != 0x100000) {
    //     PANIC("Bootstrap identity mapping failed - VALIDATION FAILED");
    // }
    // const uint64_t probe = IDENTITY_MAP_SIZE - PAGE_SIZE;
    // if (VMemGetPhysAddr(probe) != probe) {
    //     PANIC("Bootstrap identity mapping failed at IDENTITY_MAP_SIZE boundary");
    // }
    
  • File: kernel/atomic/Spinlock.h, lines 26-60
    Content:

    static inline void SpinLock(volatile int* lock) {
        uint64_t start = get_cycles();
        uint64_t backoff = 1;
        uint32_t attempts = 0;
    
        while (1) {
            // Try to acquire without contention first
            if (!*lock && !__sync_lock_test_and_set(lock, 1)) {
                return;
            }
    
            // Deadlock detection
            if (get_cycles() - start > DEADLOCK_TIMEOUT_CYCLES) {
                backoff_delay(MAX_BACKOFF_CYCLES);
                start = get_cycles();
                attempts = 0;
                continue;
            }
    
            attempts++;
    
            // Adaptive spinning strategy
            if (attempts < 100) {
                // Initial fast spinning with pause
                for (int i = 0; i < 64; i++) {
                    if (!*lock) break;
                    __builtin_ia32_pause();
                }
            } else {
                // Switch to exponential backoff after many attempts
                backoff_delay(backoff);
                backoff = (backoff * 2) > MAX_BACKOFF_CYCLES ? MAX_BACKOFF_CYCLES : (backoff * 2);
            }
        }
    }
    
  • File: kernel/atomic/Spinlock.h, lines 124-129
    Content:

    static inline irq_flags_t SpinLockIrqSave(volatile int* lock) {
        irq_flags_t flags = save_irq_flags();
        cli();
        SpinLock(lock);  // Uses the advanced version above
        return flags;
    }
    
  • File: kernel/atomic/Spinlock.h, lines 131-134
    Content:

    static inline void SpinUnlockIrqRestore(volatile int* lock, irq_flags_t flags) {
        __sync_lock_release(lock);
        restore_irq_flags(flags);
    }
    
  • File: kernel/memory/Memory.c, lines 177-233
    Content:

    void* AllocPage(void) {
        irq_flags_t flags = SpinLockIrqSave(&memory_lock);
    
        // Check low memory condition
        if (used_pages > (total_pages * 9) / 10) { // 90% used
            if (low_memory_watermark == 0) {
                low_memory_watermark = used_pages;
                PrintKernelWarning("[MEMORY] Low memory warning: ");
                PrintKernelInt((total_pages - used_pages) * PAGE_SIZE / (1024 * 1024));
                PrintKernel("MB remaining\n");
            }
        }
    
        // Fast word-based search from hint
        uint64_t start_word = next_free_hint / 64;
        uint64_t total_words = (total_pages + 63) / 64;
    
        // Search from hint word onwards
        for (uint64_t word_idx = start_word; word_idx < total_words; word_idx++) {
            if (page_bitmap[word_idx] != ~0ULL) { // Not all bits set
                int bit_pos = FindFirstFreeBit(page_bitmap[word_idx]);
                if (bit_pos >= 0) {
                    uint64_t page_idx = word_idx * 64 + bit_pos;
                    if (page_idx >= total_pages) break;
                    if (page_idx < 0x100000 / PAGE_SIZE) continue; // Skip low memory
    
                    MarkPageUsed(page_idx);
                    next_free_hint = page_idx + 1;
                    void* page = (void*)(page_idx * PAGE_SIZE);
                    SpinUnlockIrqRestore(&memory_lock, flags);
                    return page;
                }
            }
        }
    
        // Search from beginning to hint
        uint64_t min_word = (0x100000 / PAGE_SIZE) / 64;
        for (uint64_t word_idx = min_word; word_idx < start_word; word_idx++) {
            if (page_bitmap[word_idx] != ~0ULL) {
                int bit_pos = FindFirstFreeBit(page_bitmap[word_idx]);
                if (bit_pos >= 0) {
                    uint64_t page_idx = word_idx * 64 + bit_pos;
                    if (page_idx < 0x100000 / PAGE_SIZE) continue;
    
                    MarkPageUsed(page_idx);
                    next_free_hint = page_idx + 1;
                    void* page = (void*)(page_idx * PAGE_SIZE);
                    SpinUnlockIrqRestore(&memory_lock, flags);
                    return page;
                }
            }
        }
    
        allocation_failures++;
        SpinUnlockIrqRestore(&memory_lock, flags);
        return NULL; // Out of memory
    }
    
  • File: kernel/memory/Memory.c, lines 275-313
    Content:

    void FreePage(void* page) {
        if (!page) {
            PrintKernelError("[MEMORY] FreePage: NULL pointer\n");
            return;
        }
    
        uint64_t addr = (uint64_t)page;
        if (addr % PAGE_SIZE != 0) {
            PrintKernelError("[MEMORY] FreePage: Unaligned address ");
            PrintKernelHex(addr); PrintKernel("\n");
            return;
        }
    
        uint64_t page_idx = addr / PAGE_SIZE;
        if (page_idx >= total_pages) {
            PrintKernelError("[MEMORY] FreePage: Page index out of bounds: ");
            PrintKernelInt(page_idx); PrintKernel("\n");
            return;
        }
    
        irq_flags_t flags = SpinLockIrqSave(&memory_lock);
    
        // Check for double free
        if (IsPageFree(page_idx)) {
            SpinUnlockIrqRestore(&memory_lock, flags);
            PrintKernelError("[MEMORY] Double free of page ");
            PrintKernelHex(addr); PrintKernel("\n");
            return;
        }
    
        MarkPageFree(page_idx);
    
        // Update hint if this page is before current hint
        if (page_idx < next_free_hint) {
            next_free_hint = page_idx;
        }
    
        SpinUnlockIrqRestore(&memory_lock, flags);
    }
    

✏️ Learnings added
Learnt from: assembler-0
PR: assembler-0/VoidFrame#93
File: kernel/memory/VMem.c:72-79
Timestamp: 2025-08-17T08:16:43.520Z
Learning: In VoidFrame kernel, VMemGetPhysAddr and related page table walking functions were causing panics because they assumed identity mapping existed for physical addresses < IDENTITY_MAP_SIZE (4GB), but the bootloader doesn't provide this 1:1 mapping. The fix is to remove all identity mapping assumptions and always use PHYS_TO_VIRT() for page table access in functions like VMemGetPageTablePhys, VMemMap, VMemMapHuge, VMemGetPhysAddr, VMemUnmap, VMemMapMMIO, and VMemUnmapMMIO.

Note: Learnings are effective only in the context of similar code segments. To apply general coding guidelines, please configure review instructions. You can manage existing learnings in the app.


🧠 Learnings used
Learnt from: assembler-0
PR: assembler-0/VoidFrame#47
File: kernel/core/Kernel.c:434-434
Timestamp: 2025-07-16T01:04:23.790Z
Learning: VMemAlloc() in VoidFrame kernel fails with error code -2 (VMEM_ERROR_INVALID_ADDR) due to virtual address space layout conflict where the heap start address (_bss_end + KERNEL_VIRTUAL_OFFSET + 1MB margin) exceeds VIRT_ADDR_SPACE_END (0xFFFFFFFF80000000ULL), causing VMemMap validation to fail.

Learnt from: assembler-0
PR: assembler-0/VoidFrame#79
File: drivers/PCI/PCI.c:154-154
Timestamp: 2025-08-13T07:19:55.274Z
Learning: VMemUnmapMMIO in VoidFrame kernel should use the same page table access pattern as the working VMemUnmap function: `uint64_t* pt_table = (uint64_t*)(pt_phys + KERNEL_VIRTUAL_OFFSET);` rather than PHYS_TO_VIRT macro or identity mapping checks.

Learnt from: assembler-0
PR: assembler-0/VoidFrame#90
File: kernel/memory/MemOps.c:103-124
Timestamp: 2025-08-16T05:28:34.904Z
Learning: In VoidFrame kernel, the virtual address space management uses a simple linear allocator (kernel_space.next_vaddr) without any reclamation mechanism. VMemFree and VMemUnmapMMIO clear page tables but don't free virtual address space for reuse, causing eventual virtual address space exhaustion. The solution requires implementing a free list with coalescing in the VirtAddrSpace structure and updating allocation/deallocation functions to reuse freed ranges.

Learnt from: assembler-0
PR: assembler-0/VoidFrame#92
File: meson.build:31-34
Timestamp: 2025-08-17T05:53:48.018Z
Learning: The VoidFrame kernel's memory detection was reporting incorrect memory amounts (like 9200MB) because it used the highest physical address (including memory holes and reserved regions) as total memory size, rather than summing only the MULTIBOOT2_MEMORY_AVAILABLE regions. The fix involves counting only available memory regions and using the highest available address only for bitmap sizing.

Learnt from: assembler-0
PR: assembler-0/VoidFrame#79
File: drivers/PCI/PCI.c:154-154
Timestamp: 2025-08-13T07:17:35.941Z
Learning: In VoidFrame kernel, VMemGetPageTablePhys uses a 3-level hierarchy: Level 0 = PML4->PDP, Level 1 = PDP->PD, Level 2 = PD->PT. To access Page Tables, always use level=2, not level=1.

Learnt from: assembler-0
PR: assembler-0/VoidFrame#53
File: kernel/core/Kernel.c:256-259
Timestamp: 2025-08-09T04:47:56.883Z
Learning: The VoidFrame kernel memory crash occurs when VIRT_ADDR_SPACE_START is set to 0xFFFF800000000000ULL instead of 0xFFFFFFFF80000000ULL, causing the heap to allocate 128TB away from where the kernel is mapped, resulting in page faults when accessing heap memory.

Learnt from: assembler-0
PR: assembler-0/VoidFrame#90
File: kernel/memory/MemOps.c:103-124
Timestamp: 2025-08-16T05:27:10.739Z
Learning: In VoidFrame kernel VMemAlloc implementation, different virtual addresses on consecutive allocations after freeing is expected behavior because VMemAlloc uses a simple bump allocator (kernel_space.next_vaddr) that only moves forward and doesn't track or reuse freed virtual address ranges.

Learnt from: assembler-0
PR: assembler-0/VoidFrame#58
File: drivers/VesaBIOSExtension.c:77-85
Timestamp: 2025-08-09T10:31:44.439Z
Learning: In VoidFrame kernel, process stacks are allocated using VMemAllocWithGuards() which returns virtual addresses, but TerminateProcess incorrectly uses FreePage() (which expects physical addresses) to free them, causing "Page index out of bounds" errors. The correct function to use is VMemFreeWithGuards(proc->stack, STACK_SIZE).

Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

wait is the VMemMapMMIO changed? its looks good to me, any changes required?

Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧩 Analysis chain

@assembler-0, actually VMemMapMMIO and VMemUnmapMMIO still have the same identity mapping problem! Let me show you:

Looking at the current code, both functions still have these problematic lines:

VMemMapMMIO (around line 635):

// Access PT through identity mapping if possible
uint64_t* pt_virt;
if (pt_phys < IDENTITY_MAP_SIZE) {
    pt_virt = (uint64_t*)pt_phys;
} else {
    pt_virt = (uint64_t*)PHYS_TO_VIRT(pt_phys);
}

VMemUnmapMMIO (around line 701):

// Access PT through identity mapping if possible
uint64_t* pt_table;
if (pt_phys < IDENTITY_MAP_SIZE) {
    pt_table = (uint64_t*)pt_phys;
} else {
    pt_table = (uint64_t*)PHYS_TO_VIRT(pt_phys);
}

These need the same fix as the other functions. Here's the corrected code:

// Fix for VMemMapMMIO - always use PHYS_TO_VIRT
uint64_t* pt_virt = (uint64_t*)PHYS_TO_VIRT(pt_phys);

// Fix for VMemUnmapMMIO - always use PHYS_TO_VIRT  
uint64_t* pt_table = (uint64_t*)PHYS_TO_VIRT(pt_phys);

Without this fix, VMemMapMMIO and VMemUnmapMMIO will also panic when they try to access page tables that aren't identity-mapped!


🏁 Script executed:

#!/bin/bash
# Check VMemMapMMIO and VMemUnmapMMIO for identity mapping conditionals
rg -nA5 -B5 "IDENTITY_MAP_SIZE.*pt_" kernel/memory/VMem.c

Length of output: 59


🏁 Script executed:

#!/bin/bash
# Search for identity mapping conditionals in VMemMapMMIO and VMemUnmapMMIO
rg -n "pt_phys.*IDENTITY_MAP_SIZE" kernel/memory/VMem.c

Length of output: 284


🏁 Script executed:

#!/bin/bash
# Also search for the broader pattern of identity mapping checks
rg -nC3 "< IDENTITY_MAP_SIZE" kernel/memory/VMem.c

Length of output: 2557


🏁 Script executed:

#!/bin/bash
# Let's examine the VMemMapMMIO and VMemUnmapMMIO functions specifically
ast-grep --pattern 'int VMemMapMMIO($$$) {
  $$$
}'

Length of output: 5726


@assembler-0, actually VMemMapMMIO and VMemUnmapMMIO do need changes! The search results show the problematic identity mapping conditionals are still there:

VMemMapMMIO (Line 638):

// Access PT through identity mapping if possible
uint64_t* pt_virt;
if (pt_phys < IDENTITY_MAP_SIZE) {
    pt_virt = (uint64_t*)pt_phys;          // ← This will panic!
} else {
    pt_virt = (uint64_t*)PHYS_TO_VIRT(pt_phys);
}

VMemUnmapMMIO (Line 723):

// Access PT through identity mapping if possible
uint64_t* pt_table;
if (pt_phys < IDENTITY_MAP_SIZE) {
    pt_table = (uint64_t*)pt_phys;         // ← This will panic too!
} else {
    pt_table = (uint64_t*)PHYS_TO_VIRT(pt_phys);
}

These have the exact same problem as VMemGetPhysAddr - they assume identity mapping exists for physical addresses < 4GB, but the bootloader doesn't provide it.

The fix is simple - remove the conditionals and always use PHYS_TO_VIRT:

// Fix for VMemMapMMIO (replace lines 637-642):
uint64_t* pt_virt = (uint64_t*)PHYS_TO_VIRT(pt_phys);

// Fix for VMemUnmapMMIO (replace lines 722-727):
uint64_t* pt_table = (uint64_t*)PHYS_TO_VIRT(pt_phys);

Without this fix, any call to VMemMapMMIO or VMemUnmapMMIO will panic when accessing page tables, just like VMemGetPhysAddr was doing!

Actually, looking at the search results, this identity mapping assumption is systemic - it appears in 6 different locations across multiple functions. All of them need the same fix.


🧬 Code Graph Analysis Results
  • File: kernel/atomic/Spinlock.h, lines 124-134
static inline irq_flags_t SpinLockIrqSave(volatile int* lock) {
    irq_flags_t flags = save_irq_flags();
    cli();
    SpinLock(lock);  // Uses the advanced version above
    return flags;
}

/* ... */

static inline void SpinUnlockIrqRestore(volatile int* lock, irq_flags_t flags) {
    __sync_lock_release(lock);
    restore_irq_flags(flags);
}
  • File: kernel/memory/Memory.c, lines 177-233
void* AllocPage(void) {
    irq_flags_t flags = SpinLockIrqSave(&memory_lock);

    // Check low memory condition
    if (used_pages > (total_pages * 9) / 10) { // 90% used
        if (low_memory_watermark == 0) {
            low_memory_watermark = used_pages;
            PrintKernelWarning("[MEMORY] Low memory warning: ");
            PrintKernelInt((total_pages - used_pages) * PAGE_SIZE / (1024 * 1024));
            PrintKernel("MB remaining\n");
        }
    }

    // Fast word-based search from hint
    uint64_t start_word = next_free_hint / 64;
    uint64_t total_words = (total_pages + 63) / 64;

    // Search from hint word onwards
    for (uint64_t word_idx = start_word; word_idx < total_words; word_idx++) {
        if (page_bitmap[word_idx] != ~0ULL) { // Not all bits set
            int bit_pos = FindFirstFreeBit(page_bitmap[word_idx]);
            if (bit_pos >= 0) {
                uint64_t page_idx = word_idx * 64 + bit_pos;
                if (page_idx >= total_pages) break;
                if (page_idx < 0x100000 / PAGE_SIZE) continue; // Skip low memory

                MarkPageUsed(page_idx);
                next_free_hint = page_idx + 1;
                void* page = (void*)(page_idx * PAGE_SIZE);
                SpinUnlockIrqRestore(&memory_lock, flags);
                return page;
            }
        }
    }

    // Search from beginning to hint
    uint64_t min_word = (0x100000 / PAGE_SIZE) / 64;
    for (uint64_t word_idx = min_word; word_idx < start_word; word_idx++) {
        if (page_bitmap[word_idx] != ~0ULL) {
            int bit_pos = FindFirstFreeBit(page_bitmap[word_idx]);
            if (bit_pos >= 0) {
                uint64_t page_idx = word_idx * 64 + bit_pos;
                if (page_idx < 0x100000 / PAGE_SIZE) continue;

                MarkPageUsed(page_idx);
                next_free_hint = page_idx + 1;
                void* page = (void*)(page_idx * PAGE_SIZE);
                SpinUnlockIrqRestore(&memory_lock, flags);
                return page;
            }
        }
    }

    allocation_failures++;
    SpinUnlockIrqRestore(&memory_lock, flags);
    return NULL; // Out of memory
}
  • File: kernel/memory/Memory.c, lines 275-313
void FreePage(void* page) {
    if (!page) {
        PrintKernelError("[MEMORY] FreePage: NULL pointer\n");
        return;
    }

    uint64_t addr = (uint64_t)page;
    if (addr % PAGE_SIZE != 0) {
        PrintKernelError("[MEMORY] FreePage: Unaligned address ");
        PrintKernelHex(addr); PrintKernel("\n");
        return;
    }

    uint64_t page_idx = addr / PAGE_SIZE;
    if (page_idx >= total_pages) {
        PrintKernelError("[MEMORY] FreePage: Page index out of bounds: ");
        PrintKernelInt(page_idx); PrintKernel("\n");
        return;
    }

    irq_flags_t flags = SpinLockIrqSave(&memory_lock);

    // Check for double free
    if (IsPageFree(page_idx)) {
        SpinUnlockIrqRestore(&memory_lock, flags);
        PrintKernelError("[MEMORY] Double free of page ");
        PrintKernelHex(addr); PrintKernel("\n");
        return;
    }

    MarkPageFree(page_idx);

    // Update hint if this page is before current hint
    if (page_idx < next_free_hint) {
        next_free_hint = page_idx;
    }

    SpinUnlockIrqRestore(&memory_lock, flags);
}

🧠 Learnings used
Learnt from: assembler-0
PR: assembler-0/VoidFrame#93
File: kernel/memory/VMem.c:72-79
Timestamp: 2025-08-17T08:16:43.520Z
Learning: In VoidFrame kernel, VMemGetPhysAddr and related page table walking functions were causing panics because they assumed identity mapping existed for physical addresses < IDENTITY_MAP_SIZE (4GB), but the bootloader doesn't provide this 1:1 mapping. The fix is to remove all identity mapping assumptions and always use PHYS_TO_VIRT() for page table access in functions like VMemGetPageTablePhys, VMemMap, VMemMapHuge, VMemGetPhysAddr, VMemUnmap, VMemMapMMIO, and VMemUnmapMMIO.

Learnt from: assembler-0
PR: assembler-0/VoidFrame#47
File: kernel/core/Kernel.c:434-434
Timestamp: 2025-07-16T01:04:23.790Z
Learning: VMemAlloc() in VoidFrame kernel fails with error code -2 (VMEM_ERROR_INVALID_ADDR) due to virtual address space layout conflict where the heap start address (_bss_end + KERNEL_VIRTUAL_OFFSET + 1MB margin) exceeds VIRT_ADDR_SPACE_END (0xFFFFFFFF80000000ULL), causing VMemMap validation to fail.

Learnt from: assembler-0
PR: assembler-0/VoidFrame#79
File: drivers/PCI/PCI.c:154-154
Timestamp: 2025-08-13T07:19:55.274Z
Learning: VMemUnmapMMIO in VoidFrame kernel should use the same page table access pattern as the working VMemUnmap function: `uint64_t* pt_table = (uint64_t*)(pt_phys + KERNEL_VIRTUAL_OFFSET);` rather than PHYS_TO_VIRT macro or identity mapping checks.

Learnt from: assembler-0
PR: assembler-0/VoidFrame#90
File: kernel/memory/MemOps.c:103-124
Timestamp: 2025-08-16T05:28:34.904Z
Learning: In VoidFrame kernel, the virtual address space management uses a simple linear allocator (kernel_space.next_vaddr) without any reclamation mechanism. VMemFree and VMemUnmapMMIO clear page tables but don't free virtual address space for reuse, causing eventual virtual address space exhaustion. The solution requires implementing a free list with coalescing in the VirtAddrSpace structure and updating allocation/deallocation functions to reuse freed ranges.

Learnt from: assembler-0
PR: assembler-0/VoidFrame#92
File: meson.build:31-34
Timestamp: 2025-08-17T05:53:48.018Z
Learning: The VoidFrame kernel's memory detection was reporting incorrect memory amounts (like 9200MB) because it used the highest physical address (including memory holes and reserved regions) as total memory size, rather than summing only the MULTIBOOT2_MEMORY_AVAILABLE regions. The fix involves counting only available memory regions and using the highest available address only for bitmap sizing.

Learnt from: assembler-0
PR: assembler-0/VoidFrame#79
File: drivers/PCI/PCI.c:154-154
Timestamp: 2025-08-13T07:17:35.941Z
Learning: In VoidFrame kernel, VMemGetPageTablePhys uses a 3-level hierarchy: Level 0 = PML4->PDP, Level 1 = PDP->PD, Level 2 = PD->PT. To access Page Tables, always use level=2, not level=1.

Learnt from: assembler-0
PR: assembler-0/VoidFrame#53
File: kernel/core/Kernel.c:256-259
Timestamp: 2025-08-09T04:47:56.883Z
Learning: The VoidFrame kernel memory crash occurs when VIRT_ADDR_SPACE_START is set to 0xFFFF800000000000ULL instead of 0xFFFFFFFF80000000ULL, causing the heap to allocate 128TB away from where the kernel is mapped, resulting in page faults when accessing heap memory.

Learnt from: assembler-0
PR: assembler-0/VoidFrame#90
File: kernel/memory/MemOps.c:103-124
Timestamp: 2025-08-16T05:27:10.739Z
Learning: In VoidFrame kernel VMemAlloc implementation, different virtual addresses on consecutive allocations after freeing is expected behavior because VMemAlloc uses a simple bump allocator (kernel_space.next_vaddr) that only moves forward and doesn't track or reuse freed virtual address ranges.

Learnt from: assembler-0
PR: assembler-0/VoidFrame#58
File: drivers/VesaBIOSExtension.c:77-85
Timestamp: 2025-08-09T10:31:44.439Z
Learning: In VoidFrame kernel, process stacks are allocated using VMemAllocWithGuards() which returns virtual addresses, but TerminateProcess incorrectly uses FreePage() (which expects physical addresses) to free them, causing "Page index out of bounds" errors. The correct function to use is VMemFreeWithGuards(proc->stack, STACK_SIZE).

Learnt from: assembler-0
PR: assembler-0/VoidFrame#82
File: kernel/process/Process.h:0-0
Timestamp: 2025-08-14T02:11:58.963Z
Learning: In VoidFrame kernel's FastMemcpy implementation (kernel/memory/MemOps.c), memory corruption occurs under high load due to interrupts occurring between SSE2 movdqu load and store instructions, corrupting XMM register state. The fix requires disabling interrupts around SSE operations and adding memory barriers (sfence) to ensure stores complete atomically.

PrintKernelSuccess("VMem: VMem initialized using existing PML4: ");
PrintKernelHex(pml4_phys_addr);
PrintKernel("\n");
Expand Down Expand Up @@ -559,9 +556,6 @@ int VMemUnmap(uint64_t vaddr, uint64_t size) {
return VMEM_SUCCESS;
}

/**
* @brief Gets virtual memory statistics
*/
void VMemGetStats(uint64_t* used_pages, uint64_t* total_mapped) {
SpinLock(&vmem_lock);
if (used_pages) *used_pages = kernel_space.used_pages;
Expand Down
2 changes: 1 addition & 1 deletion kernel/memory/VMem.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
#define PAGE_NOCACHE 0x010
#define PAGE_ACCESSED 0x020
#define PAGE_DIRTY 0x040
#define PAGE_LARGE 0x080
#define PAGE_LARGE 0x080 // (1ULL << 7)
#define PAGE_GLOBAL 0x100
#define PAGE_NX 0x8000000000000000ULL

Expand Down