-
Notifications
You must be signed in to change notification settings - Fork 8
/
mmm.c
301 lines (250 loc) · 7.87 KB
/
mmm.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
/*
* Copyright © 2021-2022 John Viega
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Name: mmm.c
* Description: Miniature memory manager: a malloc wrapper to support
* linearization and safe reclaimation for my hash tables.
*
* Author: John Viega, john@zork.org
*
*/
#include <hatrack.h>
// clang-format off
__thread mmm_header_t *mmm_retire_list = NULL;
__thread pthread_once_t mmm_inited = PTHREAD_ONCE_INIT;
_Atomic uint64_t mmm_epoch = HATRACK_EPOCH_FIRST;
_Atomic uint64_t mmm_nexttid = 0;
__thread int64_t mmm_mytid = -1;
__thread uint64_t mmm_retire_ctr = 0;
uint64_t mmm_reservations[HATRACK_THREADS_MAX] = { 0, };
//clang-format on
static void mmm_empty(void);
/*
* We want to avoid overrunning the reservations array that our memory
* management system uses.
*
* In all cases, the first HATRACK_THREADS_MAX threads to register
* with us get a steadily increasing id. Also, threads will yield
* their TID altogether if and when they call the mmm thread cleanup
* routine-- mmm_clean_up_before_exit().
*
* The unused ID then goes on a linked list (mmm_free_tids), and
* doesn't get touched until we run out of TIDs to give out.
*
*/
static _Atomic (mmm_free_tids_t *) mmm_free_tids;
/* This grabs an mmm-specific threadid and stashes it in the
* thread-local variable mmm_mytid.
*
* We have a fixed number of TIDS to give out though (controlled by
* the preprocessor variable, HATRACK_THREADS_MAX). We give them out
* sequentially till they're done, and then we give out ones that have
* been "given back", which are stored on a stack (mmm_free_tids).
*
* If we finally run out, we abort.
*/
void
mmm_register_thread(void)
{
mmm_free_tids_t *head;
if (mmm_mytid != -1) {
return;
}
mmm_mytid = atomic_fetch_add(&mmm_nexttid, 1);
if (mmm_mytid >= HATRACK_THREADS_MAX) {
head = atomic_load(&mmm_free_tids);
do {
if (!head) {
abort();
}
} while (!CAS(&mmm_free_tids, &head, head->next));
mmm_mytid = head->tid;
mmm_retire(head);
}
mmm_reservations[mmm_mytid] = HATRACK_EPOCH_UNRESERVED;
return;
}
// Call when a thread exits to add to the free TID stack.
static void
mmm_tid_giveback(void)
{
mmm_free_tids_t *new_head;
mmm_free_tids_t *old_head;
new_head = mmm_alloc(sizeof(mmm_free_tids_t));
new_head->tid = mmm_mytid;
old_head = atomic_load(&mmm_free_tids);
do {
new_head->next = old_head;
} while (!CAS(&mmm_free_tids, &old_head, new_head));
return;
}
// This is here for convenience of testing; generally this
// is not the way to handle tid recyling!
void mmm_reset_tids(void)
{
atomic_store(&mmm_nexttid, 0);
return;
}
/* For now, our cleanup function spins until it is able to retire
* everything on its list. Soon, when we finally get around to
* worrying about thread kills, we will change this to add its
* contents to an "ophan" list.
*/
void
mmm_clean_up_before_exit(void)
{
if (mmm_mytid == -1) {
return;
}
mmm_end_op();
while (mmm_retire_list) {
mmm_empty();
}
mmm_tid_giveback();
return;
}
/* Sets the retirement epoch on the pointer, and adds it to the
* thread-local retirement list.
*
* We also keep a thread-local counter of how many times we've called
* this function, and every HATRACK_RETIRE_FREQ calls, we run
* mmm_empty(), which walks our thread-local retirement list, looking
* for items to free.
*/
void
mmm_retire(void *ptr)
{
mmm_header_t *cell;
cell = mmm_get_header(ptr);
#ifdef HATRACK_MMM_DEBUG
// Don't need this check when not debugging algorithms.
if (!cell->write_epoch) {
DEBUG_MMM_INTERNAL(ptr, "No write epoch??");
abort();
}
// Algorithms that steal bits from pointers might steal up to
// three bits, thus the mask of 0x07.
if (hatrack_pflag_test(ptr, 0x07)) {
DEBUG_MMM_INTERNAL(ptr, "Bad alignment on retired pointer.");
abort();
}
/* Detect multiple threads adding this to their retire list.
* Generally, you should be able to avoid this, but with
* HATRACK_MMM_DEBUG on we explicitly check for it.
*/
if (cell->retire_epoch) {
DEBUG_MMM_INTERNAL(ptr, "Double free");
DEBUG_PTR((void *)atomic_load(&mmm_epoch), "epoch of double free");
abort();
return;
}
#endif
cell->retire_epoch = atomic_load(&mmm_epoch);
cell->next = mmm_retire_list;
mmm_retire_list = cell;
DEBUG_MMM_INTERNAL(cell->data, "mmm_retire");
if (++mmm_retire_ctr & HATRACK_RETIRE_FREQ) {
mmm_retire_ctr = 0;
mmm_empty();
}
return;
}
/* The basic gist of this algorithm is that we're going to look at
* every reservation we can find, identifying the oldest reservation
* in the list.
*
* Then, we can then safely free anything in the list with an earlier
* retirement epoch than the reservation time. Since the items in the
* stack were pushed on in order of their retirement epoch, it
* suffices to find the first item that is lower than the target,
* and free everything else.
*/
static void
mmm_empty(void)
{
mmm_header_t *tmp;
mmm_header_t *cell;
uint64_t lowest;
uint64_t reservation;
uint64_t lasttid;
uint64_t i;
/* We don't have to search the whole array, just the items assigned
* to active threads. Even if a new thread comes along, it will
* not be able to reserve something that's already been retired
* by the time we call this.
*/
lasttid = atomic_load(&mmm_nexttid);
if (lasttid > HATRACK_THREADS_MAX) {
lasttid = HATRACK_THREADS_MAX;
}
/* We start out w/ the "lowest" reservation we've seen as
* HATRACK_EPOCH_MAX. If this value never changes, then it
* means no epochs were reserved, and we can safely
* free every record in our stack.
*/
lowest = HATRACK_EPOCH_MAX;
for (i = 0; i < lasttid; i++) {
reservation = mmm_reservations[i];
if (reservation < lowest) {
lowest = reservation;
}
}
/* The list here is ordered by retire epoch, with most recent on
* top. Go down the list until the NEXT cell is the first item we
* should delete.
*
* Then, set the current cell's next pointer to NULL (since
* it's the new end of the list), and then place the pointer at
* the top of the list of cells to delete.
*
* Note that this function is only called if there's something
* something on the retire list, so cell will never start out
* empty.
*/
cell = mmm_retire_list;
// Special-case this, in case we have to delete the head cell,
// to make sure we reinitialize the linked list right.
if (mmm_retire_list->retire_epoch < lowest) {
mmm_retire_list = NULL;
} else {
while (true) {
// We got to the end of the list, and didn't
// find one we should bother deleting.
if (!cell->next) {
return;
}
if (cell->next->retire_epoch < lowest) {
tmp = cell;
cell = cell->next;
tmp->next = NULL;
break;
}
cell = cell->next;
}
}
// Now cell and everything below it can be freed.
while (cell) {
tmp = cell;
cell = cell->next;
HATRACK_FREE_CTR();
DEBUG_MMM_INTERNAL(tmp->data, "mmm_empty::free");
// Call the cleanup handler, if one exists.
if (tmp->cleanup) {
(*tmp->cleanup)(&tmp->data, tmp->cleanup_aux);
}
free(tmp);
}
return;
}