Skip to content

Commit

Permalink
Add the experimental pool allocator
Browse files Browse the repository at this point in the history
  • Loading branch information
edubart committed Aug 11, 2020
1 parent 3daff19 commit 1ae5dd3
Show file tree
Hide file tree
Showing 3 changed files with 167 additions and 1 deletion.
133 changes: 133 additions & 0 deletions lib/allocators/pool.nelua
Original file line number Diff line number Diff line change
@@ -0,0 +1,133 @@
-- Pool Allocator
--
-- The pool allocator allocate chunks from fixed contiguous buffer of many chunks, allocations
-- pops a free chunk from the pool and deallocations pushes a chunk back. It works
-- by using a single linked list of free chunks.
--
-- The purpose of this allocator is to have very fast allocations with almost
-- no runtime cost when the maximum used space is known ahead.
--
-- Reallocations and deallocations free space (unlikely the Arena allocator).
-- Reallocations greater than the chunk size will always fails.
--
-- The implementation is based on https://www.gingerbill.org/article/2019/02/16/memory-allocation-strategies-004/

require 'memory'
require 'allocators.interface'

## local make_pool_allocator = generalize(function(T, Size, error_on_failure)
##[[
staticassert(traits.is_number(Size), 'PoolAllocator: size must be a number')
staticassert(Size > 0, 'PoolAllocator: size must be greater than 0')
staticassert(traits.is_type(T), 'PoolAllocator: T must be a valid type')
staticassert(T.size >= primtypes.pointer.size, 'PoolAllocator: T size must be at least a pointer in size')
staticassert(T.align & (primtypes.pointer.size-1) == 0, 'PoolAllocator: T must be aligned to pointers')
]]

local T = #[T]#
local PoolFreeNode = @record{
next: PoolFreeNode*
}
local PoolAllocatorT = @record{
initialized: boolean,
head: PoolFreeNode*,
buffer: T[#[Size]#]
}

-- Free all allocations.
function PoolAllocatorT:dealloc_all() <noinline>
self.head = nilptr
-- link all free nodes in reverse order
for i:isize=#self.buffer-1,0,-1 do
local node: PoolFreeNode* = (@PoolFreeNode*)(&self.buffer[i])
node.next = self.head
self.head = node
end
end

function PoolAllocatorT:dealloc(p: pointer)
if unlikely(p == nilptr) then return end
-- is this pointer really valid?
local offset: usize = (@usize)(p) - (@usize)(&self.buffer[0])
check(offset // #T < #self.buffer and offset % #T == 0, 'PoolAllocator.dealloc: pointer not in buffer bounds')
-- push free node
local node: PoolFreeNode* = (@PoolFreeNode*)(p)
node.next = self.head
self.head = node
end

-- Initialize the pool allocator.
-- There is not need to call this if zero initialized, it's called automatically on first alloc.
function PoolAllocatorT:init()
self.initialized = true
self:dealloc_all()
end

function PoolAllocatorT:alloc(size: usize): pointer
if unlikely(size > #T) then
## if error_on_failure then
error('PoolAllocator.alloc: attempt to allocate a size greater than chunk size')
## end
return nilptr
end
-- get the latest free node
local node: PoolFreeNode* = self.head
-- the node will be nilptr if not initialized or out of memory
if unlikely(node == nilptr) then
if not self.initialized then
-- first initialization
self:init()
node = self.head
else
-- out of memory
## if error_on_failure then
error('PoolAllocator.alloc: out of memory')
## end
return nilptr
end
end
-- pop free node
self.head = node.next
-- the node is now actually the allocated chunk
return node
end

function PoolAllocatorT:alloc0(size: usize): pointer
local p: pointer = self:alloc(size)
if likely(p ~= nilptr and size ~= 0) then
memory.zero(p, size)
end
return p
end

function PoolAllocatorT:realloc(p: pointer, size: usize): pointer
if unlikely(p == nilptr) then
return self:alloc(size)
elseif unlikely(size == 0) then
self:dealloc(p)
return nilptr
elseif unlikely(size > #T) then
## if error_on_failure then
error('PoolAllocator.realloc: attempt to allocate a size greater than chunk size')
## end
return nilptr
else
return p
end
end

function PoolAllocatorT:realloc0(p: pointer, newsize: usize, oldsize: usize): pointer
p = self:realloc(p, newsize)
if likely(newsize > oldsize and p ~= nilptr) then
-- zero the grown part
memory.zero(&(@byte[0]*)(p)[oldsize], newsize - oldsize)
end
return p
end

## implement_allocator_interface(PoolAllocatorT)

## return PoolAllocatorT
## end)

global PoolAllocator: type = #[make_pool_allocator]#
1 change: 1 addition & 0 deletions nelua/types.lua
Original file line number Diff line number Diff line change
Expand Up @@ -454,6 +454,7 @@ ArithmeticType.is_primitive = true

function ArithmeticType:_init(name, size)
Type._init(self, name, size)
self.align = size
self.bitsize = size * 8
end

Expand Down
34 changes: 33 additions & 1 deletion tests/allocators_test.nelua
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
require 'allocators.arena'
require 'allocators.pool'

do -- basic tests on alloc/realloc/dealloc
do -- Arena
local allocator: ArenaAllocator(1024,8)
local i: integer* = (@integer*)(allocator:alloc0(#integer))
assert($i == 0)
Expand Down Expand Up @@ -32,3 +33,34 @@ do -- basic tests on alloc/realloc/dealloc
assert(allocator.buffer[i] == 0)
end
end

do -- Pool
local Object = @record {
x: integer,
y: integer
}
local allocator: PoolAllocator(@Object, 2)
local o: Object* = (@Object*)(allocator:alloc0(#Object))
assert(allocator.initialized == true)
assert(o.x == 0)
o.x = 0xff
assert(o.x == 0xff)
allocator:dealloc(o)
-- must reuse the same free address
local no: Object* = (@Object*)(allocator:alloc0(#Object))
assert(no == o and no.x == 0)
-- last avail memory
allocator:alloc0(#Object)
-- should be out of memory
assert(allocator:alloc0(#Object) == nilptr)
-- free all
allocator:dealloc_all()
-- realloc
local i: integer* = (@integer*)(allocator:realloc0(nilptr, #integer, 0))
local ri: integer* = (@integer*)(allocator:realloc0(i, 2*#integer, #integer))
-- should use the same address
assert(i == ri)
-- should deallocate
ri = (@integer*)(allocator:realloc(ri, 0))
assert(ri == nilptr)
end

0 comments on commit 1ae5dd3

Please sign in to comment.