diff --git a/include/private/error.h b/include/private/error.h index 1eb9711..27dcfb8 100644 --- a/include/private/error.h +++ b/include/private/error.h @@ -27,6 +27,7 @@ enum { /* Memory Protection Errors */ ERR_STACK_CHECK, /* Stack overflow or corruption detected */ + ERR_HEAP_CORRUPT, /* Heap corruption or invalid free detected */ /* IPC and Synchronization Errors */ ERR_PIPE_ALLOC, /* Pipe allocation failed */ diff --git a/kernel/error.c b/kernel/error.c index 1d6071e..4edb49d 100644 --- a/kernel/error.c +++ b/kernel/error.c @@ -22,6 +22,7 @@ static const struct error_code error_desc[] = { /* stack guard */ {ERR_STACK_CHECK, "stack corruption"}, + {ERR_HEAP_CORRUPT, "heap corruption or invalid free"}, /* IPC / sync */ {ERR_PIPE_ALLOC, "pipe allocation"}, diff --git a/lib/malloc.c b/lib/malloc.c index ac439ab..f893d3b 100644 --- a/lib/malloc.c +++ b/lib/malloc.c @@ -4,6 +4,7 @@ #include #include +#include "private/error.h" #include "private/utils.h" /* Memory allocator using first-fit strategy with selective coalescing. @@ -44,15 +45,21 @@ static uint32_t free_blocks_count; /* track fragmentation */ /* Validate block integrity */ static inline bool validate_block(memblock_t *block) { - if (!IS_VALID_BLOCK(block)) + if (unlikely(!IS_VALID_BLOCK(block))) return false; size_t size = GET_SIZE(block); - if (!size || size > MALLOC_MAX_SIZE) + if (unlikely(!size || size > MALLOC_MAX_SIZE)) return false; /* Check if block extends beyond heap */ - if ((uint8_t *) block + sizeof(memblock_t) + size > (uint8_t *) heap_end) + if (unlikely((uint8_t *) block + sizeof(memblock_t) + size > + (uint8_t *) heap_end)) + return false; + + if (unlikely(block->next && + (uint8_t *) block + sizeof(memblock_t) + GET_SIZE(block) != + (uint8_t *) block->next)) return false; return true; @@ -69,8 +76,9 @@ void free(void *ptr) memblock_t *p = ((memblock_t *) ptr) - 1; /* Validate the block being freed */ - if (!validate_block(p) || !IS_USED(p)) { + if (unlikely(!validate_block(p) || !IS_USED(p))) { CRITICAL_LEAVE(); + panic(ERR_HEAP_CORRUPT); return; /* Invalid or double-free */ } @@ -78,9 +86,7 @@ void free(void *ptr) free_blocks_count++; /* Forward merge if the next block is free and physically adjacent */ - if (p->next && !IS_USED(p->next) && - (uint8_t *) p + sizeof(memblock_t) + GET_SIZE(p) == - (uint8_t *) p->next) { + if (p->next && !IS_USED(p->next)) { p->size = GET_SIZE(p) + sizeof(memblock_t) + GET_SIZE(p->next); p->next = p->next->next; free_blocks_count--; @@ -94,9 +100,12 @@ void free(void *ptr) current = current->next; } - if (prev && !IS_USED(prev) && - (uint8_t *) prev + sizeof(memblock_t) + GET_SIZE(prev) == - (uint8_t *) p) { + if (prev && !IS_USED(prev)) { + if (unlikely(!validate_block(prev))) { + CRITICAL_LEAVE(); + panic(ERR_HEAP_CORRUPT); + return; + } prev->size = GET_SIZE(prev) + sizeof(memblock_t) + GET_SIZE(p); prev->next = p->next; free_blocks_count--; @@ -109,15 +118,16 @@ void free(void *ptr) static void selective_coalesce(void) { memblock_t *p = first_free; - uint32_t coalesced = 0; while (p && p->next) { /* Merge only when blocks are FREE *and* adjacent in memory */ - uint8_t *pend = (uint8_t *) p + sizeof(memblock_t) + GET_SIZE(p); - if (!IS_USED(p) && !IS_USED(p->next) && pend == (uint8_t *) p->next) { + if (unlikely(!validate_block(p))) { + panic(ERR_HEAP_CORRUPT); + return; + } + if (!IS_USED(p) && !IS_USED(p->next)) { p->size = GET_SIZE(p) + sizeof(memblock_t) + GET_SIZE(p->next); p->next = p->next->next; - coalesced++; free_blocks_count--; } else { p = p->next; @@ -125,6 +135,28 @@ static void selective_coalesce(void) } } +static inline void split_block(memblock_t *block, size_t size) +{ + size_t remaining; + memblock_t *new_block; + + if (unlikely(size >= GET_SIZE(block))) { + panic(ERR_HEAP_CORRUPT); + return; + } + remaining = GET_SIZE(block) - size; + /* Split only when remaining memory is large enough */ + if (remaining < sizeof(memblock_t) + MALLOC_MIN_SIZE) + return; + new_block = (memblock_t *) ((size_t) block + sizeof(memblock_t) + size); + new_block->next = block->next; + new_block->size = remaining - sizeof(memblock_t); + MARK_FREE(new_block); + block->next = new_block; + block->size = size | IS_USED(block); + free_blocks_count++; /* New free block created */ +} + /* O(n) first-fit allocation with selective coalescing */ void *malloc(uint32_t size) { @@ -146,29 +178,22 @@ void *malloc(uint32_t size) memblock_t *p = first_free; while (p) { - if (!validate_block(p)) { + if (unlikely(!validate_block(p))) { CRITICAL_LEAVE(); + panic(ERR_HEAP_CORRUPT); return NULL; /* Heap corruption detected */ } if (!IS_USED(p) && GET_SIZE(p) >= size) { - size_t remaining = GET_SIZE(p) - size; - /* Split block only if remainder is large enough to be useful */ - if (remaining >= sizeof(memblock_t) + MALLOC_MIN_SIZE) { - memblock_t *new_block = - (memblock_t *) ((size_t) p + sizeof(memblock_t) + size); - new_block->next = p->next; - new_block->size = remaining - sizeof(memblock_t); - MARK_FREE(new_block); - p->next = new_block; - p->size = size; - free_blocks_count++; /* New free block created */ - } + split_block(p, size); MARK_USED(p); - if (free_blocks_count > 0) - free_blocks_count--; + if (unlikely(free_blocks_count <= 0)) { + panic(ERR_HEAP_CORRUPT); + return NULL; + } + free_blocks_count--; CRITICAL_LEAVE(); return (void *) (p + 1); @@ -213,7 +238,7 @@ void *calloc(uint32_t nmemb, uint32_t size) if (unlikely(nmemb && size > MALLOC_MAX_SIZE / nmemb)) return NULL; - uint32_t total_size = nmemb * size; + uint32_t total_size = ALIGN4(nmemb * size); void *buf = malloc(total_size); if (buf) @@ -236,11 +261,15 @@ void *realloc(void *ptr, uint32_t size) return NULL; } + size = ALIGN4(size); + memblock_t *old_block = ((memblock_t *) ptr) - 1; /* Validate the existing block */ - if (!validate_block(old_block) || !IS_USED(old_block)) + if (unlikely(!validate_block(old_block) || !IS_USED(old_block))) { + panic(ERR_HEAP_CORRUPT); return NULL; + } size_t old_size = GET_SIZE(old_block); @@ -249,6 +278,33 @@ void *realloc(void *ptr, uint32_t size) old_size - size < sizeof(memblock_t) + MALLOC_MIN_SIZE) return ptr; + /* fast path for shrinking */ + if (size <= old_size) { + split_block(old_block, size); + /* Trigger coalescing only when fragmentation is high */ + if (free_blocks_count > COALESCE_THRESHOLD) + selective_coalesce(); + CRITICAL_LEAVE(); + return (void *) (old_block + 1); + } + + /* fast path for growing */ + if (old_block->next && !IS_USED(old_block->next) && + GET_SIZE(old_block) + sizeof(memblock_t) + GET_SIZE(old_block->next) >= + size) { + old_block->size = GET_SIZE(old_block) + sizeof(memblock_t) + + GET_SIZE(old_block->next); + old_block->next = old_block->next->next; + free_blocks_count--; + split_block(old_block, size); + /* Trigger coalescing only when fragmentation is high */ + if (free_blocks_count > COALESCE_THRESHOLD) + selective_coalesce(); + CRITICAL_LEAVE(); + return (void *) (old_block + 1); + } + + void *new_buf = malloc(size); if (new_buf) { memcpy(new_buf, ptr, min(old_size, size));