Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion drivers/Pic.h
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
#ifndef PIC_H
#define PIC_H

#define PIT_FREQUENCY_HZ 200
#define PIT_FREQUENCY_HZ 1000

int PicInstall();
void PitInstall();
Expand Down
7 changes: 5 additions & 2 deletions kernel/core/Kernel.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ static ConsoleT console = {
.buffer = (volatile uint16_t*)VGA_BUFFER_ADDR,
.color = VGA_COLOR_DEFAULT
};

static volatile int lock = 0;
// Inline functions for better performance
static inline void ConsoleSetColor(uint8_t color) {
console.color = color;
Expand All @@ -73,6 +73,7 @@ static inline void ConsolePutcharAt(char c, uint32_t x, uint32_t y, uint8_t colo

// Optimized screen clear using memset-like approach
void ClearScreen(void) {
SpinLock(&lock);
const uint16_t blank = MakeVGAEntry(' ', VGA_COLOR_DEFAULT);

// Use 32-bit writes for better performance
Expand All @@ -86,6 +87,7 @@ void ClearScreen(void) {

console.line = 0;
console.column = 0;
SpinUnlock(&lock);;
}

// Optimized scrolling
Expand Down Expand Up @@ -136,7 +138,7 @@ static void ConsolePutchar(char c) {
// Modern string output with length checking
void PrintKernel(const char* str) {
if (!str) return;

SpinLock(&lock);
// Cache the original color
const uint8_t original_color = console.color;

Expand All @@ -145,6 +147,7 @@ void PrintKernel(const char* str) {
}

console.color = original_color;
SpinUnlock(&lock);
}

// Colored output variants
Expand Down
9 changes: 3 additions & 6 deletions kernel/etc/Splash.c
Original file line number Diff line number Diff line change
Expand Up @@ -93,16 +93,13 @@ void ShowSplashScreen() {
PrintString(12, 20, "Loading: ", COLOR_CYAN_ON_BLACK);

// Simulate loading progress
for (int progress = 0; progress <= 100; progress += 10) {
for (int progress = 0; progress <= 100; progress += 7) {
DrawProgressBar(21, 20, 48, progress, COLOR_CYAN_ON_BLACK);

// Simple delay
for (volatile int i = 0; i < 100000000; i++);
for (volatile int i = 0; i < 50000000; i++);
}

// Copyright notice


// Final delay
for (volatile int i = 0; i < 500000000; i++);
for (volatile int i = 0; i < 70000000; i++);
}
49 changes: 19 additions & 30 deletions kernel/memory/VMem.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,17 +21,6 @@ static VirtAddrSpace kernel_space;
static volatile int vmem_lock = 0;



static inline void vmem_spin_lock(volatile int* lock) {
while (__sync_lock_test_and_set(lock, 1)) {
while (*lock) __builtin_ia32_pause();
}
}

static inline void vmem_spin_unlock(volatile int* lock) {
__sync_lock_release(lock);
}

extern uint64_t total_pages;
static inline int is_valid_phys_addr(uint64_t paddr) {
// Basic sanity check - adjust limits based on your system
Expand Down Expand Up @@ -127,26 +116,26 @@ int VMemMap(uint64_t vaddr, uint64_t paddr, uint64_t flags) {
return VMEM_ERROR_INVALID_ADDR;
}

vmem_spin_lock(&vmem_lock);
SpinLock(&vmem_lock);

// Get PDP table
uint64_t pdp_phys = VMemGetPageTablePhys((uint64_t)kernel_space.pml4, vaddr, 0, 1);
if (!pdp_phys) {
vmem_spin_unlock(&vmem_lock);
SpinUnlock(&vmem_lock);
return VMEM_ERROR_NOMEM;
}

// Get PD table
uint64_t pd_phys = VMemGetPageTablePhys(pdp_phys, vaddr, 1, 1);
if (!pd_phys) {
vmem_spin_unlock(&vmem_lock);
SpinUnlock(&vmem_lock);
return VMEM_ERROR_NOMEM;
}

// Get PT table
uint64_t pt_phys = VMemGetPageTablePhys(pd_phys, vaddr, 2, 1);
if (!pt_phys) {
vmem_spin_unlock(&vmem_lock);
SpinUnlock(&vmem_lock);
return VMEM_ERROR_NOMEM;
}

Expand All @@ -156,7 +145,7 @@ int VMemMap(uint64_t vaddr, uint64_t paddr, uint64_t flags) {

// Check if already mapped
if (pt_virt[pt_index] & PAGE_PRESENT) {
vmem_spin_unlock(&vmem_lock);
SpinUnlock(&vmem_lock);
return VMEM_ERROR_ALREADY_MAPPED;
}

Expand All @@ -166,7 +155,7 @@ int VMemMap(uint64_t vaddr, uint64_t paddr, uint64_t flags) {
// Invalidate TLB
VMemFlushTLBSingle(vaddr);

vmem_spin_unlock(&vmem_lock);
SpinUnlock(&vmem_lock);
return VMEM_SUCCESS;
}

Expand All @@ -179,7 +168,7 @@ int VMemUnmap(uint64_t vaddr, uint64_t size) {
size = PAGE_ALIGN_UP(size);
vaddr = PAGE_ALIGN_DOWN(vaddr);

vmem_spin_lock(&vmem_lock);
SpinLock(&vmem_lock);

for (uint64_t offset = 0; offset < size; offset += PAGE_SIZE) {
uint64_t current_vaddr = vaddr + offset;
Expand All @@ -204,7 +193,7 @@ int VMemUnmap(uint64_t vaddr, uint64_t size) {
}
}

vmem_spin_unlock(&vmem_lock);
SpinUnlock(&vmem_lock);
return VMEM_SUCCESS;
}

Expand All @@ -218,14 +207,14 @@ void* VMemAlloc(uint64_t size) {

size = PAGE_ALIGN_UP(size);

vmem_spin_lock(&vmem_lock);
SpinLock(&vmem_lock);

uint64_t vaddr = kernel_space.next_vaddr;

// Reserve the virtual address space
kernel_space.next_vaddr += size;

vmem_spin_unlock(&vmem_lock);
SpinUnlock(&vmem_lock);

// Now map pages without holding the lock
uint64_t allocated_size = 0;
Expand Down Expand Up @@ -253,10 +242,10 @@ void* VMemAlloc(uint64_t size) {
}

// Update tracking
vmem_spin_lock(&vmem_lock);
SpinLock(&vmem_lock);
kernel_space.used_pages += size / PAGE_SIZE;
kernel_space.total_mapped += size;
vmem_spin_unlock(&vmem_lock);
SpinUnlock(&vmem_lock);

// Zero the allocated memory
FastMemset((void*)vaddr, 0, size);
Expand All @@ -275,20 +264,20 @@ void VMemFree(void* vaddr, uint64_t size) {

// This lock is to protect the page table walk in VMemGetPhysAddr
// and the modification below.
vmem_spin_lock(&vmem_lock);
SpinLock(&vmem_lock);

// Find the physical page before we destroy the mapping
uint64_t paddr = VMemGetPhysAddr(current_vaddr);

// Navigate to the Page Table Entry (PTE)
uint64_t pdp_phys = VMemGetPageTablePhys((uint64_t)kernel_space.pml4, current_vaddr, 0, 0);
if (!pdp_phys) { vmem_spin_unlock(&vmem_lock); continue; }
if (!pdp_phys) { SpinUnlock(&vmem_lock); continue; }

uint64_t pd_phys = VMemGetPageTablePhys(pdp_phys, current_vaddr, 1, 0);
if (!pd_phys) { vmem_spin_unlock(&vmem_lock); continue; }
if (!pd_phys) { SpinUnlock(&vmem_lock); continue; }

uint64_t pt_phys = VMemGetPageTablePhys(pd_phys, current_vaddr, 2, 0);
if (!pt_phys) { vmem_spin_unlock(&vmem_lock); continue; }
if (!pt_phys) { SpinUnlock(&vmem_lock); continue; }

// Get virtual address of the page table to modify it
uint64_t* pt_virt = (uint64_t*)PHYS_TO_VIRT(pt_phys);
Expand All @@ -307,7 +296,7 @@ void VMemFree(void* vaddr, uint64_t size) {
kernel_space.total_mapped -= PAGE_SIZE;
}

vmem_spin_unlock(&vmem_lock);
SpinUnlock(&vmem_lock);

// Now that the page is unmapped and the lock is released,
// free the physical page. Only do this if a valid paddr was found.
Expand Down Expand Up @@ -391,10 +380,10 @@ void VMemFlushTLBSingle(uint64_t vaddr) {
* @brief Gets virtual memory statistics
*/
void VMemGetStats(uint64_t* used_pages, uint64_t* total_mapped) {
vmem_spin_lock(&vmem_lock);
SpinLock(&vmem_lock);
if (used_pages) *used_pages = kernel_space.used_pages;
if (total_mapped) *total_mapped = kernel_space.total_mapped;
vmem_spin_unlock(&vmem_lock);
SpinUnlock(&vmem_lock);
}

// Add this function definition
Expand Down
9 changes: 9 additions & 0 deletions kernel/memory/VMem.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,5 +116,14 @@ uint64_t VMemGetPML4PhysAddr(void);
// Debug functions
void VMemDumpPageTable(uint64_t vaddr);
void VMemValidatePageTable(uint64_t* pml4);
// Locks
static inline void SpinLock(volatile int* lock) {
while (__sync_lock_test_and_set(lock, 1)) {
while (*lock) __builtin_ia32_pause();
}
}

static inline void SpinUnlock(volatile int* lock) {
__sync_lock_release(lock);
}
#endif // VMEM_H