Skip to content

Commit

Permalink
Add the stack allocator
Browse files Browse the repository at this point in the history
  • Loading branch information
edubart committed Aug 14, 2020
1 parent 0be6880 commit 1a06a2b
Show file tree
Hide file tree
Showing 7 changed files with 241 additions and 76 deletions.
64 changes: 35 additions & 29 deletions lib/allocators/arena.nelua
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@
-- this can be changed to runtime errors by setting `error_on_failure` to true.
-- Remember to use the proper alignment for the allocated objects to have fast memory access.
--
-- The implementation is based on https://www.gingerbill.org/article/2019/02/08/memory-allocation-strategies-002/
-- The implementation is based on
-- https://www.gingerbill.org/article/2019/02/08/memory-allocation-strategies-002/

require 'allocators.interface'

Expand All @@ -36,17 +37,17 @@ end

local function memcpy(dest: pointer, src: pointer, n: csize): pointer <cimport,cinclude'<string.h>',nodecl> end

## local make_arena_allocator = generalize(function(Size, Align, error_on_failure)
## Align = Align or 8
## staticassert(Size % Align == 0, 'ArenaAllocator: size must be multiple of align')
## staticassert(Align & (Align-1) == 0, 'ArenaAllocator: align must be a power of two')
## local make_arena_allocator = generalize(function(SIZE, ALIGN, error_on_failure)
## ALIGN = ALIGN or 8
## staticassert(SIZE % ALIGN == 0, 'ArenaAllocator: size must be multiple of align')
## staticassert(ALIGN & (ALIGN-1) == 0, 'ArenaAllocator: align must be a power of two')

local Size <comptime> = #[Size]#
local Align <comptime> = #[Align]#
local SIZE <comptime> = #[SIZE]#
local ALIGN <comptime> = #[ALIGN]#
local ArenaAllocatorT = @record{
prev_offset: usize,
curr_offset: usize,
buffer: byte[Size]
buffer: byte[SIZE]
}

-- Free all allocations.
Expand All @@ -57,58 +58,63 @@ local function memcpy(dest: pointer, src: pointer, n: csize): pointer <cimport,c

function ArenaAllocatorT:alloc(size: usize): pointer
local base: usize = (@usize)(&self.buffer[0])
local offset: usize = align_forward(base + self.curr_offset, Align) - base
local offset: usize = align_forward(base + self.curr_offset, ALIGN) - base
local next_offset: usize = offset + size
if unlikely(next_offset > Size) then
if unlikely(next_offset > SIZE) then
## if error_on_failure then
error('ArenaAllocator.alloc: out of memory')
## end
return nilptr
end
local p: pointer = &self.buffer[offset]
self.prev_offset = self.curr_offset
self.prev_offset = offset
self.curr_offset = next_offset
return p
end

function ArenaAllocatorT:dealloc(p: pointer)
if unlikely(p == nilptr) then return end
-- get offset for this pointer
local offset: usize = (@usize)(p) - (@usize)(&self.buffer[0])
if offset == self.prev_offset then
check(offset < SIZE, 'ArenaAllocator.dealloc: pointer not in buffer of bounds')
-- we can only dealloc the most recent allocation once
-- any other allocation we can do nothing about
if likely(offset == self.prev_offset) then
self.curr_offset = offset
end
end

function ArenaAllocatorT:realloc(p: pointer, newsize: usize, oldsize: usize): pointer
if unlikely(p == nilptr) then
return self:alloc(newsize)
elseif unlikely(newsize == oldsize) then
return p
elseif unlikely(newsize == 0) then
self:dealloc(p)
return nilptr
end
local offset: usize = (@usize)(p) - (@usize)(&self.buffer[0])
check(offset < Size, 'ArenaAllocator.realloc: pointer not in buffer of bounds')
if offset == self.prev_offset then -- modify last allocation
if likely(newsize > 0) then
local next_offset: usize = offset + newsize
if unlikely(next_offset > Size) then
## if error_on_failure then
error('ArenaAllocator.realloc: out of memory')
## end
return nilptr
end
self.curr_offset = next_offset
else
self.curr_offset = offset
check(offset < SIZE, 'ArenaAllocator.realloc: pointer not in buffer of bounds')
if likely(offset == self.prev_offset) then -- is the very last allocation?
-- we can just update the offset here to grow or shrink
local next_offset: usize = offset + newsize
if unlikely(next_offset > SIZE) then
## if error_on_failure then
error('ArenaAllocator.realloc: out of memory')
## end
return nilptr
end
self.curr_offset = next_offset
return p
elseif newsize > oldsize then -- growing, move to a new allocation
elseif newsize > oldsize then -- growing
-- when growing we need to move to a new allocation
if unlikely(newsize == 0) then return nilptr end
local newp: pointer = self:alloc(newsize)
if likely(newp ~= nilptr and p ~= nilptr and oldsize ~= 0) then
-- copy the mem to the new location
memcpy(newp, p, oldsize)
end
-- no dealloc is done on old pointer because it's not possible in this allocator
return newp
else -- shrinking, can return the same pointer
else -- same size or shrinking, can return the same pointer
return p
end
end
Expand Down
16 changes: 8 additions & 8 deletions lib/allocators/heap.nelua
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
-- or if you want to avoid the system's default allocator for some reason,
-- or if the system does not have an allocator.
--
-- It's memory cannot grow automatically, use the system's general purpose allocator for that.
-- Its memory cannot grow automatically, use the system's general purpose allocator for that.
-- The allocator is not thread safe, it was designed to be used in single thread applications.
-- Allocations are always 16 byte aligned.
--
Expand Down Expand Up @@ -409,21 +409,21 @@ end

local HEAP_SIZE: usize <comptime> = #[HEAP_SIZE]#

local HeapAllocatorN = @record{
local HeapAllocatorT = @record{
initialized: boolean,
heap: Heap,
buffer: byte[HEAP_SIZE]
}

-- Initialize the heap allocator.
-- This is called automatically when needed on first alloc/realloc.
function HeapAllocatorN:init()
function HeapAllocatorT:init()
self.heap = {}
self.heap:add_memory_region(&self.buffer[0], HEAP_SIZE)
self.initialized = true
end

function HeapAllocatorN:alloc(size: usize)
function HeapAllocatorT:alloc(size: usize)
if unlikely(not self.initialized) then self:init() end
local p: pointer = self.heap:alloc(size)
## if error_on_failure then
Expand All @@ -432,11 +432,11 @@ end
return p
end

function HeapAllocatorN:dealloc(p: pointer)
function HeapAllocatorT:dealloc(p: pointer)
self.heap:dealloc(p)
end

function HeapAllocatorN:realloc(p: pointer, newsize: usize, oldsize: usize)
function HeapAllocatorT:realloc(p: pointer, newsize: usize, oldsize: usize)
if unlikely(not self.initialized) then self:init() end
if unlikely(newsize == oldsize) then
return p
Expand All @@ -450,9 +450,9 @@ end

require 'allocators.interface'

## implement_allocator_interface(HeapAllocatorN)
## implement_allocator_interface(HeapAllocatorT)

## return HeapAllocatorN
## return HeapAllocatorT
## end)

global HeapAllocator: type = #[make_heap_allocator]#
52 changes: 28 additions & 24 deletions lib/allocators/interface.nelua
Original file line number Diff line number Diff line change
Expand Up @@ -4,32 +4,36 @@

require 'span'

## function implement_allocator_interface(allocator)
## function implement_allocator_interface(Allocator)
local function memcpy(dest: pointer, src: pointer, n: csize): pointer <cimport,cinclude'<string.h>',nodecl> end
local function memmove(dest: pointer, src: pointer, n: csize): pointer <cimport,cinclude'<string.h>',nodecl> end
local function memset(s: pointer, c: cint, n: csize): pointer <cimport,cinclude'<string.h>',nodecl> end

local is_span = #[concept(function(x) return x.type.is_span end)]#
local allocator = #[allocator]#
local Allocator = #[Allocator]#

## if not allocator.value:get_metafield('realloc') then
## if not Allocator.value:get_metafield('realloc') then
-- naive implementation for realloc
function allocator:realloc(p: pointer, newsize: usize, oldsize: usize): pointer
function Allocator:realloc(p: pointer, newsize: usize, oldsize: usize): pointer
if unlikely(p == nilptr) then
return self:alloc(newsize)
elseif unlikely(newsize == 0) then
self:dealloc(p)
return nilptr
elseif unlikely(newsize == oldsize) then
return p
else
else -- shrinking or growing
local newp: pointer = self:alloc(newsize)
if likely(p ~= nilptr) then
if likely(newp ~= nilptr and oldsize ~= 0) then
local copysize = oldsize
-- copy the old size when growing or the new size when shrinking
local minsize: usize = oldsize
if newsize < oldsize then
copysize = newsize
minsize = newsize
end
memcpy(newp, p, copysize)
-- use memmove instead of memcpy because we don't really know
-- if the Allocator can overlap memory
memmove(newp, p, minsize)
end
self:dealloc(p)
end
Expand All @@ -38,9 +42,9 @@ require 'span'
end
## end

## if not allocator.value:get_metafield('alloc0') then
## if not Allocator.value:get_metafield('alloc0') then
-- naive implementation for alloc0
function allocator:alloc0(size: usize): pointer
function Allocator:alloc0(size: usize): pointer
local p: pointer = self:alloc(size)
if likely(p ~= nilptr and size ~= 0) then
memset(p, 0, size)
Expand All @@ -49,9 +53,9 @@ require 'span'
end
## end

## if not allocator.value:get_metafield('realloc0') then
## if not Allocator.value:get_metafield('realloc0') then
-- naive implementation for realloc0
function allocator:realloc0(p: pointer, newsize: usize, oldsize: usize): pointer
function Allocator:realloc0(p: pointer, newsize: usize, oldsize: usize): pointer
p = self:realloc(p, newsize, oldsize)
if likely(newsize > oldsize and p ~= nilptr) then
-- zero the grown part
Expand All @@ -61,7 +65,7 @@ require 'span'
end
## end

function allocator:spanalloc(T: type, size: usize)
function Allocator:spanalloc(T: type, size: usize)
local s: span(T)
if likely(size > 0) then
s.data = (@T[0]*)(self:alloc(size * #T))
Expand All @@ -70,7 +74,7 @@ require 'span'
return s
end

function allocator:spanalloc0(T: type, size: usize)
function Allocator:spanalloc0(T: type, size: usize)
local s: span(T)
if likely(size > 0) then
s.data = (@T[0]*)(self:alloc0(size * #T))
Expand All @@ -79,7 +83,7 @@ require 'span'
return s
end

function allocator:spanrealloc(s: is_span, size: usize)
function Allocator:spanrealloc(s: is_span, size: usize)
local T: type = #[s.type.subtype]#
local p: T[0]* = (@T[0]*)(self:realloc(s.data, size * #T, s.size))
if unlikely(size > 0 and p == nilptr) then
Expand All @@ -91,7 +95,7 @@ require 'span'
return s
end

function allocator:spanrealloc0(s: is_span, size: usize)
function Allocator:spanrealloc0(s: is_span, size: usize)
local T: type = #[s.type.subtype]#
local p: T[0]* = (@T[0]*)(self:realloc0(s.data, size * #T, s.size * #T))
if unlikely(size > 0 and p == nilptr) then
Expand All @@ -103,42 +107,42 @@ require 'span'
return s
end

function allocator:spandealloc(s: is_span)
function Allocator:spandealloc(s: is_span)
if unlikely(s.size == 0) then return end
self:dealloc(s.data)
end

function allocator:spannew(what: auto, size: usize) <inline>
function Allocator:spannew(what: auto, size: usize) <inline>
## if what.type.is_type then
local T = what
local spn: span(T) = self:spanalloc0(T, size)
check(spn.size > 0, 'allocator.spannew: out of memory')
check(spn.size > 0, 'Allocator.spannew: out of memory')
## else
local T = #[what.type]#
local spn: span(T) = self:spanalloc(T, size)
check(spn.size > 0, 'allocator.spannew: out of memory')
check(spn.size > 0, 'Allocator.spannew: out of memory')
for i:usize=0,<size do
memcpy(&spn[i], &what, #T)
end
## end
return spn
end

function allocator:new(what: auto) <inline>
function Allocator:new(what: auto) <inline>
## if what.type.is_type then
local T = what
local ptr: T* = (@T*)(self:alloc0(#T))
check(ptr ~= nilptr, 'allocator.new: out of memory')
check(ptr ~= nilptr, 'Allocator.new: out of memory')
## else
local T = #[what.type]#
local ptr: T* = (@T*)(self:alloc(#T))
check(ptr ~= nilptr, 'allocator.new: out of memory')
check(ptr ~= nilptr, 'Allocator.new: out of memory')
memcpy(ptr, &what, #T)
## end
return ptr
end

function allocator:delete(s: #[concept(function(x)
function Allocator:delete(s: #[concept(function(x)
return x.type.is_pointer or x.type.is_span end
)]#) <inline>
## if s.type.is_span then
Expand Down
27 changes: 14 additions & 13 deletions lib/allocators/pool.nelua
Original file line number Diff line number Diff line change
@@ -1,35 +1,36 @@
-- Pool Allocator
--
-- The pool allocator allocate chunks from fixed contiguous buffer of many chunks, allocations
-- pops a free chunk from the pool and deallocations pushes a chunk back. It works
-- by using a single linked list of free chunks.
-- The pool allocator allocate chunks from fixed contiguous buffer of many chunks,
-- allocations pops a free chunk from the pool and deallocations pushes a chunk back.
-- It works by using a single linked list of free chunks.
--
-- The purpose of this allocator is to have very fast allocations with almost
-- no runtime cost when the maximum used space is known ahead.
--
-- Reallocations and deallocations free space (unlikely the Arena allocator).
-- Reallocations greater than the chunk size will always fails.
-- Allocations greater than the chunk size will always fails.
--
-- The implementation is based on https://www.gingerbill.org/article/2019/02/16/memory-allocation-strategies-004/
-- The implementation is based on
-- https://www.gingerbill.org/article/2019/02/16/memory-allocation-strategies-004/

require 'allocators.interface'

## local make_pool_allocator = generalize(function(T, Size, error_on_failure)
## local make_pool_allocator = generalize(function(T, SIZE, error_on_failure)
##[[
staticassert(traits.is_type(T), 'PoolAllocator: T must be a type')
local ChunkSize = T.size
local ChunkAlign = math.max(T.align, primtypes.pointer.size)
staticassert(traits.is_number(Size), 'PoolAllocator: size must be a number')
staticassert(Size * ChunkSize > 0, 'PoolAllocator: size must be greater than 0')
staticassert(ChunkSize >= primtypes.pointer.size, 'PoolAllocator: chunk size must be at least a pointer in size')
local CHUNK_SIZE = T.size
local CHUNK_ALIGN = math.max(T.align, primtypes.pointer.size)
staticassert(traits.is_number(SIZE), 'PoolAllocator: size must be a number')
staticassert(SIZE * CHUNK_SIZE > 0, 'PoolAllocator: size must be greater than 0')
staticassert(CHUNK_SIZE >= primtypes.pointer.size, 'PoolAllocator: chunk size must be at least a pointer in size')
]]

local PoolFreeNode = @record{next: PoolFreeNode*}
local PoolChunk <aligned(#[ChunkAlign]#)> = @record {data: byte[#[ChunkSize]#]}
local PoolChunk <aligned(#[CHUNK_ALIGN]#)> = @record {data: byte[#[CHUNK_SIZE]#]}
local PoolAllocatorT = @record{
initialized: boolean,
head: PoolFreeNode*,
buffer: PoolChunk[#[Size]#]
buffer: PoolChunk[#[SIZE]#]
}

-- Free all allocations.
Expand Down
Loading

0 comments on commit 1a06a2b

Please sign in to comment.