/
child_malloc.h
231 lines (206 loc) · 6.52 KB
/
child_malloc.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
// Copyright Microsoft and Project Verona Contributors.
// SPDX-License-Identifier: MIT
#pragma once
#ifdef __FreeBSD__
# define SNMALLOC_USE_THREAD_CLEANUP 1
#endif
#define SNMALLOC_PLATFORM_HAS_GETENTROPY 1
#define SNMALLOC_PROVIDE_OWN_CONFIG 1
#include <process_sandbox/helpers.h>
#include <process_sandbox/sandbox_fd_numbers.h>
#include <process_sandbox/sandbox_meta_entry.h>
#include <snmalloc/snmalloc_core.h>
namespace snmalloc
{
class Superslab;
class Mediumslab;
class Largeslab;
}
namespace sandbox
{
/**
* Helper range that deallocates memory provided by the PAL.
*/
struct PalRange : public snmalloc::PalRange<snmalloc::DefaultPal>
{
/**
* This range is intended to be used directly by the small buddy allocator
* without a pagemap to store extra metadata and so must support aligned
* allocation, even if the underlying PAL does not.
*/
static constexpr bool Aligned = true;
/**
* All operations on the address space are locked by the kernel.
*/
static constexpr bool ConcurrencySafe = true;
/**
* Deallocate memory using munmap.
*/
void dealloc_range(snmalloc::capptr::Chunk<void> base, size_t size);
/**
* Allocate memory, trimming to guarantee alignment if necessary.
*/
snmalloc::capptr::Chunk<void> alloc_range(size_t size)
{
if constexpr (snmalloc::pal_supports<
snmalloc::AlignedAllocation,
snmalloc::DefaultPal>)
{
return snmalloc::PalRange<snmalloc::DefaultPal>::alloc_range(size);
}
else
{
size_t overallocation = size * 2;
auto alloc =
snmalloc::PalRange<snmalloc::DefaultPal>::alloc_range(overallocation);
auto end = snmalloc::pointer_offset(alloc, size * 2);
auto aligned_base = snmalloc::pointer_align_up(alloc, size);
auto aligned_end = snmalloc::pointer_offset(aligned_base, size);
if (end.unsafe_ptr() > aligned_end.unsafe_ptr())
{
dealloc_range(
aligned_end, end.unsafe_uintptr() - aligned_end.unsafe_uintptr());
}
if (aligned_base.unsafe_ptr() > alloc.unsafe_ptr())
{
dealloc_range(
alloc, aligned_base.unsafe_uintptr() - alloc.unsafe_uintptr());
}
return aligned_base;
}
}
};
/**
* The snmalloc configuration used for the child process.
*/
class SnmallocGlobals : public snmalloc::CommonConfig
{
/**
* Private allocator. Used to manage metadata allocations, which are
* not shared with the parent.
*/
inline static snmalloc::SmallBuddyRange<PalRange> metadata_range;
public:
/**
* Expose a PAL that doesn't do allocation.
*/
using Pal = snmalloc::PALNoAlloc<snmalloc::DefaultPal>;
/**
* Thread-local state. Currently not used.
*/
struct LocalState
{};
/**
* This back end does not need to hold any extra metadata and so exports the
* default slab metadata type.
*/
using SlabMetadata = snmalloc::FrontendSlabMetadata;
/**
* Adaptor for the pagemap that is managed by the parent. This is backed
* by a shared-memory object that is passed into the child on process
* start and is then mapped read-only in the child. All updates require
* an RPC to the parent, which will validate the updates and install them.
*/
struct Pagemap
{
/**
* This back end does not need to hold any extra metadata and so exports
* the default pagemap metadata type.
*/
using Entry = SandboxMetaEntry;
/**
* The pagemap that spans the entire address space. This uses a read-only
* mapping of a shared memory region as its backing store.
*/
inline static snmalloc::FlatPagemap<
snmalloc::MIN_CHUNK_BITS,
Entry,
snmalloc::DefaultPal,
/*fixed range*/ false>
pagemap;
/**
* Return the metadata associated with an address. This reads the
* read-only mapping of the pagemap directly.
*/
template<bool potentially_out_of_range = false>
static const auto& get_metaentry(snmalloc::address_t p)
{
return pagemap.template get<potentially_out_of_range>(p);
}
/**
* Ensure that the range is valid. This is a no-op: the parent is
* responsible for ensuring that the pagemap covers the entire address
* range.
*/
static void register_range(snmalloc::address_t, size_t) {}
};
/**
* Allocate a chunk of memory and install its metadata in the pagemap.
* This performs a single RPC that validates the metadata and then
* allocates and installs the entry.
*/
static std::pair<snmalloc::capptr::Chunk<void>, SlabMetadata*>
alloc_chunk(LocalState& local_state, size_t size, uintptr_t ras);
static void dealloc_chunk(
LocalState& local_state,
SlabMetadata& meta_common,
snmalloc::capptr::Alloc<void> start,
size_t size);
/**
* Allocate metadata. This allocates non-shared memory for metaslabs and
* shared memory for allocators.
*/
template<typename T>
static snmalloc::capptr::Chunk<void>
alloc_meta_data(LocalState*, size_t size);
/**
* The allocator pool type used to allocate per-thread allocators.
*/
using AllocPool =
snmalloc::PoolState<snmalloc::CoreAllocator<SnmallocGlobals>>;
/**
* The concrete instance of the pool allocator.
*/
inline static AllocPool alloc_pool;
public:
/**
* Returns the allocation pool.
*/
static AllocPool& pool()
{
return alloc_pool;
}
/**
* Ensure that all of the early bootstrapping is done.
*/
static void ensure_init() noexcept;
/**
* Returns true if the system has bootstrapped, false otherwise.
*/
static bool is_initialised();
/**
* Message queues are currently always allocated inline for
* in-sandbox allocators. When we move to dynamically creating
* shared memory objects one per chunk then they will move to a
* separate place. For now, all options are the defaults.
*/
constexpr static snmalloc::Flags Options{};
/**
* Register per-thread cleanup.
*/
static void register_clean_up()
{
#ifndef SNMALLOC_USE_THREAD_CLEANUP
snmalloc::register_clean_up();
#endif
}
};
}
namespace snmalloc
{
/**
* The standard allocator type that we provide.
*/
using Alloc = LocalAllocator<sandbox::SnmallocGlobals>;
}
#include <snmalloc/override/malloc.cc>