Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion arch/x86_64/interrupts/Interrupts.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ static void FatalExceptionHandler(const char* message, uint64_t interrupt_number

Process* current_process = GetCurrentProcess();
if (current_process != NULL) {
current_process->state = PROC_TERMINATED;
KillProcess(current_process->pid);
RequestSchedule();
} else {
Panic("Cannot terminate process, NULL process found!");
Expand Down
2 changes: 1 addition & 1 deletion drivers/Pic.h
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#ifndef PIC_H
#define PIC_H

#define PIT_FREQUENCY_HZ 250
#define PIT_FREQUENCY_HZ 1000

int PicInstall();
void PitInstall();
Expand Down
2 changes: 1 addition & 1 deletion kernel/core/Kernel.c
Original file line number Diff line number Diff line change
Expand Up @@ -337,7 +337,7 @@ static InitResultT SystemInitialize(void) {
PrintKernel("[INFO] Initializing process management...\n");
ProcessInit(); // void function - assume success
PrintKernelSuccess("[SYSTEM] Process management initialized\n");

// VMemInit();
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

💡 Verification agent

🧩 Analysis chain

Critical: Virtual memory initialization is disabled.

Commenting out VMemInit() disables the kernel's virtual memory subsystem initialization. This is a fundamental component that should not be disabled without clear justification.

Please explain why this initialization is being skipped and confirm that the system can operate correctly without it.


🏁 Script executed:

#!/bin/bash
set -e
# Locate all uses of VMemInit across the codebase
rg -n 'VMemInit' .
# Show context around SystemInitialize in Kernel.c
sed -n '330,370p' kernel/core/Kernel.c
# Search for any replacement or new VM initialization functions
rg -n 'MemInit' -n kernel/core

Length of output: 2156


🏁 Script executed:

#!/bin/bash
set -e
# Locate the file with MemoryInit definition
file=$(rg -l 'void MemoryInit' -n kernel/memory)
echo "MemoryInit found in: $file"
# Display the first 200 lines to include the function definition
sed -n '1,200p' "$file"

Length of output: 60


🏁 Script executed:

#!/bin/bash
set -e
# Search for all occurrences of MemoryInit to find its definition and usage
rg -n 'MemoryInit' .
# Specifically look for its declaration/definition
rg -n 'void MemoryInit' .

Length of output: 304


Re-enable Virtual Memory Initialization or Provide Justification

Disabling VMemInit() in kernel/core/Kernel.c:340 leaves the virtual memory subsystem uninitialized, which will prevent page‐table setup and break fundamental kernel operations. No alternative VM init call was found in the codebase.

• File: kernel/core/Kernel.c
Line: 340
Change:

-   VMemInit();
+   // VMemInit();

Please either restore the VMemInit() call or document and implement a suitable replacement to ensure the kernel’s virtual memory layer is correctly initialized.

Committable suggestion skipped: line range outside the PR's diff.

🤖 Prompt for AI Agents
In kernel/core/Kernel.c at line 340, the call to VMemInit() has been commented
out, disabling virtual memory initialization and breaking kernel operations
dependent on page-table setup. To fix this, either uncomment and restore the
original VMemInit() call to re-enable the virtual memory subsystem
initialization or provide a documented and implemented alternative
initialization function that properly sets up the kernel's virtual memory layer.

return INIT_SUCCESS;
}
void KernelMain(uint32_t magic, uint32_t info) {
Expand Down
5 changes: 0 additions & 5 deletions kernel/memory/Memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ static void MarkPageFree(uint64_t page_idx) {
}

int MemoryInit(uint32_t multiboot_info_addr) {
PrintKernel("[INFO] MemoryInit: Initializing memory manager...\n");
FastMemset(page_bitmap, 0, MAX_BITMAP_SIZE);

uint32_t total_multiboot_size = *(uint32_t*)multiboot_info_addr;
Expand Down Expand Up @@ -111,10 +110,6 @@ int MemoryInit(uint32_t multiboot_info_addr) {
for (uint64_t i = 0; i < kernel_end_page; i++) {
MarkPageUsed(i);
}

PrintKernel("[INFO] MemoryInit: Used pages after kernel and reserved regions: ");
PrintKernelInt(used_pages);
PrintKernel("\n");
return 0;
}

Expand Down
101 changes: 51 additions & 50 deletions kernel/memory/VMem.c
Original file line number Diff line number Diff line change
Expand Up @@ -65,34 +65,23 @@ void VMemInit(void) {
void* pml4_phys = AllocPage();
if (!pml4_phys) {
Panic("VMemInit: Failed to allocate PML4 table");
return;
}

// CRITICAL FIX: Store physical address BEFORE converting
uint64_t pml4_phys_addr = (uint64_t)pml4_phys;

// Convert to virtual address and zero it
uint64_t* pml4_virt = (uint64_t*)PHYS_TO_VIRT(pml4_phys);
FastZeroPage(pml4_virt);

// Zero the newly allocated physical page directly, assuming it's identity-mapped by the bootloader.
FastZeroPage(pml4_phys);
// Initialize kernel space tracking
kernel_space.next_vaddr = VIRT_ADDR_SPACE_START;
kernel_space.used_pages = 0;
kernel_space.total_mapped = 0;

// CRITICAL FIX: Store physical address, not virtual!
kernel_space.pml4 = (uint64_t*)pml4_phys_addr;

PrintKernel("VMemInit: Initialized kernel virtual memory manager at phys");
PrintKernel(pml4_phys);
PrintKernelSuccess("[SYSTEM] Initialized kernel virtual memory manager at address: ");
PrintKernelHex(pml4_phys_addr);
PrintKernel("\n");
}

/**
* @brief Retrieves or creates a page table entry at a specific level
*
* CRITICAL FIX: Proper physical address masking and validation
*/
static uint64_t VMemGetPageTablePhys(uint64_t pml4_phys, uint64_t vaddr, int level, int create) {
// Validate physical address
if (!is_valid_phys_addr(pml4_phys)) {
Expand Down Expand Up @@ -127,6 +116,11 @@ static uint64_t VMemGetPageTablePhys(uint64_t pml4_phys, uint64_t vaddr, int lev

// Zero the new table
uint64_t* new_table_virt = (uint64_t*)PHYS_TO_VIRT(new_table_phys);
if (!VMemIsPageMapped((uint64_t)new_table_virt)) {
FreePage(new_table_phys);
Panic("VMemGetPageTablePhys: Newly allocated page is not accessible via PHYS_TO_VIRT!");
}

FastZeroPage(new_table_virt);

// Set the entry with physical address
Expand Down Expand Up @@ -296,53 +290,60 @@ void* VMemAlloc(uint64_t size) {
return (void*)vaddr;
}

/**
* @brief Frees previously allocated virtual memory
*
* CRITICAL FIX: Get physical addresses before unmapping
*/
void VMemFree(void* vaddr, uint64_t size) {
if (!vaddr || size == 0) return;

size = PAGE_ALIGN_UP(size);
uint64_t current_vaddr = PAGE_ALIGN_DOWN((uint64_t)vaddr);
uint64_t start_vaddr = PAGE_ALIGN_DOWN((uint64_t)vaddr);
uint64_t num_pages = size / PAGE_SIZE;

// First pass: collect physical addresses
uint64_t* phys_addrs = (uint64_t*)VMemAlloc(size / PAGE_SIZE * sizeof(uint64_t));
if (!phys_addrs) return; // Can't allocate tracking array
for (uint64_t i = 0; i < num_pages; i++) {
uint64_t current_vaddr = start_vaddr + (i * PAGE_SIZE);

int page_count = 0;
for (uint64_t offset = 0; offset < size; offset += PAGE_SIZE) {
uint64_t page_vaddr = current_vaddr + offset;
uint64_t paddr = VMemGetPhysAddr(page_vaddr);
if (paddr) {
phys_addrs[page_count++] = paddr;
}
}
// This lock is to protect the page table walk in VMemGetPhysAddr
// and the modification below.
vmem_spin_lock(&vmem_lock);

// Second pass: unmap
VMemUnmap(current_vaddr, size);
// Find the physical page before we destroy the mapping
uint64_t paddr = VMemGetPhysAddr(current_vaddr);

// Third pass: free physical pages
for (int i = 0; i < page_count; i++) {
FreePage((void*)phys_addrs[i]);
}
// Navigate to the Page Table Entry (PTE)
uint64_t pdp_phys = VMemGetPageTablePhys((uint64_t)kernel_space.pml4, current_vaddr, 0, 0);
if (!pdp_phys) { vmem_spin_unlock(&vmem_lock); continue; }

// Update tracking
vmem_spin_lock(&vmem_lock);
kernel_space.used_pages -= size / PAGE_SIZE;
kernel_space.total_mapped -= size;
vmem_spin_unlock(&vmem_lock);
uint64_t pd_phys = VMemGetPageTablePhys(pdp_phys, current_vaddr, 1, 0);
if (!pd_phys) { vmem_spin_unlock(&vmem_lock); continue; }

uint64_t pt_phys = VMemGetPageTablePhys(pd_phys, current_vaddr, 2, 0);
if (!pt_phys) { vmem_spin_unlock(&vmem_lock); continue; }

// Free the tracking array (recursive call, but small)
VMemFree(phys_addrs, size / PAGE_SIZE * sizeof(uint64_t));
// Get virtual address of the page table to modify it
uint64_t* pt_virt = (uint64_t*)PHYS_TO_VIRT(pt_phys);
int pt_index = (current_vaddr >> PT_SHIFT) & PT_INDEX_MASK;

// Check if it's actually mapped before proceeding
if (pt_virt[pt_index] & PAGE_PRESENT) {
// Clear the entry (unmap)
pt_virt[pt_index] = 0;

// Flush the TLB for this specific address
VMemFlushTLBSingle(current_vaddr);

// Update stats
kernel_space.used_pages--;
kernel_space.total_mapped -= PAGE_SIZE;
}

vmem_spin_unlock(&vmem_lock);

// Now that the page is unmapped and the lock is released,
// free the physical page. Only do this if a valid paddr was found.
if (paddr != 0) {
FreePage((void*)paddr);
}
}
}

/**
* @brief Gets the physical address for a virtual address
*
* CRITICAL FIX: Proper address masking and validation
*/
uint64_t VMemGetPhysAddr(uint64_t vaddr) {
uint64_t pdp_phys = VMemGetPageTablePhys((uint64_t)kernel_space.pml4, vaddr, 0, 0);
if (!pdp_phys) return 0;
Expand Down