|
| 1 | +// Copyright (c) 2022, sin-ack <sin-ack@protonmail.com> |
| 2 | +// |
| 3 | +// SPDX-License-Identifier: GPL-3.0-only |
| 4 | + |
| 5 | +//! This is an arena allocator which allows chunks to be reclaimed. It is used |
| 6 | +//! to allocate individual handles so that they can be used throughout the VM. |
| 7 | +//! |
| 8 | +//! Each chunk is 1MB. The chunks are self-aligned, and an area at the start |
| 9 | +//! of each chunk is reserved for metadata. This allows easy access to the |
| 10 | +//! chunk's metadata area so that it can be manipulated when a handle is |
| 11 | +//! reclaimed by the allocator. |
| 12 | +//! |
| 13 | +//! Each chunk maintains how many handles have been allocated from it. When a |
| 14 | +//! chunk has no handles allocated on it and is not the last chunk, it is moved |
| 15 | +//! to a pool of unused chunks (which is bounded by MaximumUnusedChunks). |
| 16 | + |
| 17 | +const std = @import("std"); |
| 18 | +const Allocator = std.mem.Allocator; |
| 19 | + |
| 20 | +/// The allocator that is internally used to allocate the chunks. |
| 21 | +backing_allocator: Allocator, |
| 22 | +first_chunk: *Chunk, |
| 23 | +latest_chunk: *Chunk, |
| 24 | +/// The chunks which are currently unused. They are maintained as a singly |
| 25 | +/// linked list. If there are already MaximumUnusedChunks unused chunks, then |
| 26 | +/// the newly unused chunk is simply destroyed instead of being added to this |
| 27 | +/// list. |
| 28 | +unused_chunks: ?*Chunk = null, |
| 29 | +unused_chunk_count: usize = 0, |
| 30 | + |
| 31 | +const Self = @This(); |
| 32 | +const HandleType = *[*]u64; |
| 33 | + |
| 34 | +/// Size (and alignment) of the chunk. |
| 35 | +const ChunkSize: usize = 1024 * 1024; |
| 36 | +/// Magic to verify that we're pointing at a chunk. |
| 37 | +const ChunkMagic: u64 = 0xDEADBEEFBEEFDEAD; |
| 38 | +/// The maximum amount of unused chunks that will be kept around. |
| 39 | +const MaximumUnusedChunks: usize = 8; |
| 40 | +/// The maximum amount of handles that can be allocated in a single chunk. |
| 41 | +const MaximumHandlesInChunk = @divExact(ChunkSize - @sizeOf(Chunk), @sizeOf(HandleType)); |
| 42 | + |
| 43 | +/// A chunk, starting with the metadata. Each chunk is exactly ChunkSize bytes. |
| 44 | +const Chunk = packed struct { |
| 45 | + // These members will take up 32 bytes on 32-bit systems, and 40 bytes on |
| 46 | + // 64-bit systems. Either way, they are aligned by 8 bytes. |
| 47 | + magic: u64 = ChunkMagic, |
| 48 | + previous: ?*Chunk = null, |
| 49 | + next: ?*Chunk = null, |
| 50 | + count: u64 = 0, |
| 51 | + /// The topmost handle we allocated (in @sizeOf(HandleType)s from the start |
| 52 | + /// of the chunk allocation area). |
| 53 | + high_water_mark: u64 = 0, |
| 54 | + |
| 55 | + pub fn create(allocator: Allocator) !*Chunk { |
| 56 | + const memory_area = try allocator.alignedAlloc(u8, ChunkSize, ChunkSize); |
| 57 | + const self = @ptrCast(*Chunk, memory_area); |
| 58 | + self.* = .{}; |
| 59 | + |
| 60 | + return self; |
| 61 | + } |
| 62 | + |
| 63 | + pub fn fromHandle(handle: HandleType) *Chunk { |
| 64 | + const handle_address = @ptrToInt(handle); |
| 65 | + const chunk_address = handle_address & ~(ChunkSize - 1); |
| 66 | + |
| 67 | + const self = @intToPtr(*Chunk, chunk_address); |
| 68 | + if (self.magic != ChunkMagic) { |
| 69 | + std.debug.panic("!!! Got invalid magic {x} when attempting to get chunk from handle!", .{self.magic}); |
| 70 | + } |
| 71 | + |
| 72 | + return self; |
| 73 | + } |
| 74 | + |
| 75 | + pub fn destroy(self: *Chunk, allocator: Allocator) void { |
| 76 | + // NOTE: Have to restore my original size so that the allocator doesn't |
| 77 | + // complain. |
| 78 | + const memory_area = @ptrCast([*]u8, self); |
| 79 | + allocator.free(memory_area[0..ChunkSize]); |
| 80 | + } |
| 81 | + |
| 82 | + pub fn insertAfterMe(self: *Chunk, new_chunk: *Chunk) void { |
| 83 | + std.debug.assert(new_chunk.previous == null); |
| 84 | + std.debug.assert(new_chunk.next == null); |
| 85 | + |
| 86 | + new_chunk.next = self.next; |
| 87 | + new_chunk.previous = self; |
| 88 | + |
| 89 | + if (self.next) |next| next.previous = new_chunk; |
| 90 | + self.next = new_chunk; |
| 91 | + } |
| 92 | + |
| 93 | + pub fn insertBeforeMe(self: *Chunk, new_chunk: *Chunk) void { |
| 94 | + std.debug.assert(new_chunk.previous == null); |
| 95 | + std.debug.assert(new_chunk.next == null); |
| 96 | + |
| 97 | + new_chunk.previous = self.previous; |
| 98 | + new_chunk.next = self; |
| 99 | + |
| 100 | + if (self.previous) |previous| previous.next = new_chunk; |
| 101 | + self.previous = new_chunk; |
| 102 | + } |
| 103 | + |
| 104 | + pub fn remove(self: *Chunk) void { |
| 105 | + std.debug.assert(self.count == 0); |
| 106 | + // Wipe my high water mark too, so that I start fresh when I'm used again. |
| 107 | + self.high_water_mark = 0; |
| 108 | + |
| 109 | + if (self.previous) |previous| previous.next = self.next; |
| 110 | + if (self.next) |next| next.previous = self.previous; |
| 111 | + |
| 112 | + self.previous = null; |
| 113 | + self.next = null; |
| 114 | + } |
| 115 | + |
| 116 | + pub fn allocate(self: *Chunk) !HandleType { |
| 117 | + if (self.high_water_mark == MaximumHandlesInChunk) { |
| 118 | + return error.OutOfMemory; |
| 119 | + } |
| 120 | + |
| 121 | + const memory_area = self.getMemoryArea(); |
| 122 | + const new_handle = @ptrCast(HandleType, &memory_area[self.high_water_mark]); |
| 123 | + |
| 124 | + self.high_water_mark += 1; |
| 125 | + self.count += 1; |
| 126 | + return new_handle; |
| 127 | + } |
| 128 | + |
| 129 | + pub fn free(self: *Chunk, handle: HandleType) bool { |
| 130 | + if (!self.handleInChunk(handle)) { |
| 131 | + @panic("Attempting to free handle from chunk which doesn't own it"); |
| 132 | + } |
| 133 | + |
| 134 | + self.count -= 1; |
| 135 | + return self.count == 0; |
| 136 | + } |
| 137 | + |
| 138 | + fn getMemoryArea(self: *Chunk) [*]u64 { |
| 139 | + return @intToPtr([*]u64, @ptrToInt(self) + @sizeOf(Chunk)); |
| 140 | + } |
| 141 | + |
| 142 | + fn handleInChunk(self: *Chunk, handle: HandleType) bool { |
| 143 | + const start_of_memory = @ptrToInt(self.getMemoryArea()); |
| 144 | + const end_of_memory = start_of_memory + MaximumHandlesInChunk * @sizeOf(HandleType); |
| 145 | + const handle_address = @ptrToInt(handle); |
| 146 | + |
| 147 | + return handle_address >= start_of_memory and handle_address < end_of_memory; |
| 148 | + } |
| 149 | +}; |
| 150 | + |
| 151 | +pub fn create(backing_allocator: Allocator) !Self { |
| 152 | + const initial_chunk = try Chunk.create(backing_allocator); |
| 153 | + |
| 154 | + return Self{ |
| 155 | + .backing_allocator = backing_allocator, |
| 156 | + .first_chunk = initial_chunk, |
| 157 | + .latest_chunk = initial_chunk, |
| 158 | + }; |
| 159 | +} |
| 160 | + |
| 161 | +pub fn destroy(self: *Self) void { |
| 162 | + var chunk_it: ?*Chunk = self.first_chunk; |
| 163 | + while (chunk_it) |chunk| { |
| 164 | + var next_chunk = chunk.next; |
| 165 | + chunk.destroy(self.backing_allocator); |
| 166 | + chunk_it = next_chunk; |
| 167 | + } |
| 168 | + |
| 169 | + chunk_it = self.unused_chunks; |
| 170 | + while (chunk_it) |chunk| { |
| 171 | + var next_chunk = chunk.next; |
| 172 | + chunk.destroy(self.backing_allocator); |
| 173 | + chunk_it = next_chunk; |
| 174 | + } |
| 175 | +} |
| 176 | + |
| 177 | +pub fn allocHandle(self: *Self) !HandleType { |
| 178 | + return self.latest_chunk.allocate() catch |err| switch (err) { |
| 179 | + error.OutOfMemory => { |
| 180 | + try self.allocateNewChunk(); |
| 181 | + return self.latest_chunk.allocate() catch |second_err| switch (second_err) { |
| 182 | + error.OutOfMemory => @panic("!!! Could not allocate a handle even after allocating a new chunk!"), |
| 183 | + else => return second_err, |
| 184 | + }; |
| 185 | + }, |
| 186 | + else => return err, |
| 187 | + }; |
| 188 | +} |
| 189 | + |
| 190 | +pub fn freeHandle(self: *Self, handle: HandleType) void { |
| 191 | + const chunk = Chunk.fromHandle(handle); |
| 192 | + if (chunk.free(handle) and chunk != self.latest_chunk) { |
| 193 | + self.moveChunkIntoUnusedPool(chunk); |
| 194 | + } |
| 195 | +} |
| 196 | + |
| 197 | +fn allocateNewChunk(self: *Self) !void { |
| 198 | + var new_chunk = if (self.hasUnusedChunks()) |
| 199 | + self.getFirstUnusedChunk() |
| 200 | + else |
| 201 | + try Chunk.create(self.backing_allocator); |
| 202 | + |
| 203 | + self.latest_chunk.insertAfterMe(new_chunk); |
| 204 | + self.latest_chunk = new_chunk; |
| 205 | +} |
| 206 | + |
| 207 | +fn moveChunkIntoUnusedPool(self: *Self, chunk: *Chunk) void { |
| 208 | + std.debug.assert(chunk != self.latest_chunk); |
| 209 | + std.debug.assert(chunk.count == 0); |
| 210 | + |
| 211 | + if (chunk == self.first_chunk) { |
| 212 | + const next_chunk = chunk.next.?; |
| 213 | + chunk.remove(); |
| 214 | + self.first_chunk = next_chunk; |
| 215 | + } else { |
| 216 | + chunk.remove(); |
| 217 | + } |
| 218 | + |
| 219 | + if (self.unused_chunk_count < MaximumUnusedChunks) { |
| 220 | + if (self.unused_chunks) |first_unused_chunk| { |
| 221 | + first_unused_chunk.insertBeforeMe(chunk); |
| 222 | + } |
| 223 | + |
| 224 | + self.unused_chunks = chunk; |
| 225 | + self.unused_chunk_count += 1; |
| 226 | + } else { |
| 227 | + chunk.destroy(self.backing_allocator); |
| 228 | + } |
| 229 | +} |
| 230 | + |
| 231 | +fn hasUnusedChunks(self: *Self) bool { |
| 232 | + return self.unused_chunk_count > 0; |
| 233 | +} |
| 234 | + |
| 235 | +fn getFirstUnusedChunk(self: *Self) *Chunk { |
| 236 | + std.debug.assert(self.hasUnusedChunks()); |
| 237 | + |
| 238 | + const first_unused_chunk = self.unused_chunks.?; |
| 239 | + const next_unused_chunk = first_unused_chunk.next; |
| 240 | + |
| 241 | + first_unused_chunk.remove(); |
| 242 | + self.unused_chunks = next_unused_chunk; |
| 243 | + |
| 244 | + self.unused_chunk_count -= 1; |
| 245 | + return first_unused_chunk; |
| 246 | +} |
0 commit comments