Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Allocate a guard page for each newly created thread #2545

Merged
merged 2 commits into from May 28, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
4 changes: 2 additions & 2 deletions std/os.zig
Expand Up @@ -1889,14 +1889,14 @@ pub const MProtectError = error{
};

/// `memory.len` must be page-aligned.
pub fn mprotect(memory: [*]align(mem.page_size) u8, protection: u32) MProtectError!void {
pub fn mprotect(memory: []align(mem.page_size) u8, protection: u32) MProtectError!void {
assert(mem.isAligned(memory.len, mem.page_size));
switch (errno(system.mprotect(memory.ptr, memory.len, protection))) {
0 => return,
EINVAL => unreachable,
EACCES => return error.AccessDenied,
ENOMEM => return error.OutOfMemory,
else => return unexpectedErrno(err),
else => |err| return unexpectedErrno(err),
}
}

Expand Down
33 changes: 24 additions & 9 deletions std/thread.zig
Expand Up @@ -223,15 +223,17 @@ pub const Thread = struct {
}
};

const MAP_GROWSDOWN = if (os.linux.is_the_target) os.linux.MAP_GROWSDOWN else 0;

var guard_end_offset: usize = undefined;
var stack_end_offset: usize = undefined;
var thread_start_offset: usize = undefined;
var context_start_offset: usize = undefined;
var tls_start_offset: usize = undefined;
const mmap_len = blk: {
// First in memory will be the stack, which grows downwards.
var l: usize = mem.alignForward(default_stack_size, mem.page_size);
var l: usize = mem.page_size;
// Allocate a guard page right after the end of the stack region
guard_end_offset = l;
// The stack itself, which grows downwards.
l = mem.alignForward(l + default_stack_size, mem.page_size);
stack_end_offset = l;
// Above the stack, so that it can be in the same mmap call, put the Thread object.
l = mem.alignForward(l, @alignOf(Thread));
Expand All @@ -253,20 +255,33 @@ pub const Thread = struct {
}
break :blk l;
};
// Map the whole stack with no rw permissions to avoid committing the
// whole region right away
const mmap_slice = os.mmap(
null,
mem.alignForward(mmap_len, mem.page_size),
os.PROT_READ | os.PROT_WRITE,
os.MAP_PRIVATE | os.MAP_ANONYMOUS | MAP_GROWSDOWN,
os.PROT_NONE,
os.MAP_PRIVATE | os.MAP_ANONYMOUS,
-1,
0,
) catch |err| switch (err) {
error.MemoryMappingNotSupported => unreachable, // no file descriptor
error.AccessDenied => unreachable, // no file descriptor
error.PermissionDenied => unreachable, // no file descriptor
error.MemoryMappingNotSupported => unreachable,
error.AccessDenied => unreachable,
error.PermissionDenied => unreachable,
else => |e| return e,
};
errdefer os.munmap(mmap_slice);

// Map everything but the guard page as rw
os.mprotect(
mmap_slice,
os.PROT_READ | os.PROT_WRITE,
) catch |err| switch (err) {
error.OutOfMemory => unreachable,
Copy link
Member

@andrewrk andrewrk May 28, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this truly unreachable? My understanding is that this "splits" a mapping which can potentially result in ENOMEM. Fortunately OutOfMemory is one of the possible errors of spawning a thread, so I think we can just return it.

error.AccessDenied => unreachable,
else => |e| return e,
};

const mmap_addr = @ptrToInt(mmap_slice.ptr);

const thread_ptr = @alignCast(@alignOf(Thread), @intToPtr(*Thread, mmap_addr + thread_start_offset));
Expand Down