Skip to content

Commit 53a6945

Browse files
committed
move mpk code to separate file, cleanup some of the interfaces
1 parent 3cc6733 commit 53a6945

File tree

5 files changed

+489
-263
lines changed

5 files changed

+489
-263
lines changed

src/Makefile

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ library: clean
1818
## Build a debug version of the library
1919
library_debug: clean
2020
mkdir -p ../build/
21-
$(CC) $(CFLAGS) $(LIBRARY) $(DEBUG_FLAGS) mapguard.c vector.c -o ../build/libmapguard.so
21+
$(CC) $(CFLAGS) $(LIBRARY) $(DEBUG_FLAGS) mapguard.c mapguard_mpk.c vector.c -o ../build/libmapguard.so
2222

2323
## Build the unit test
2424
test: clean library
@@ -29,8 +29,8 @@ test: clean library
2929
## Build a debug version of the unit test
3030
debug_test: clean library_debug
3131
mkdir -p ../build/
32-
$(CC) $(CFLAGS) $(EXE_CFLAGS) $(DEBUG_FLAGS) mapguard_test.c vector.c -o ../build/mapguard_test -L../build/ -lmapguard
33-
$(CC) $(CFLAGS) $(EXE_CFLAGS) $(DEBUG_FLAGS) mapguard_thread_test.c vector.c -o ../build/mapguard_thread_test -L../build/ -lmapguard -lpthread
32+
$(CC) $(CFLAGS) $(EXE_CFLAGS) $(DEBUG_FLAGS) mapguard_test.c vector.c -o ../build/mapguard_test -L../build/ -lmapguard -ldl
33+
$(CC) $(CFLAGS) $(EXE_CFLAGS) $(DEBUG_FLAGS) mapguard_thread_test.c vector.c -o ../build/mapguard_thread_test -L../build/ -lmapguard -lpthread -ldl
3434

3535
## Build the vector tests
3636
vector_test:

src/mapguard.c

Lines changed: 27 additions & 249 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,8 @@ __attribute__((constructor)) void mapguard_ctor() {
2424
g_real_pkey_mprotect = dlsym(RTLD_NEXT, "pkey_mprotect");
2525
g_real_pkey_alloc = dlsym(RTLD_NEXT, "pkey_alloc");
2626
g_real_pkey_free = dlsym(RTLD_NEXT, "pkey_free");
27+
g_real_pkey_set = dlsym(RTLD_NEXT, "pkey_set");
28+
g_real_pkey_get = dlsym(RTLD_NEXT, "pkey_get");
2729
#endif
2830

2931
vector_init(&g_map_cache_vector);
@@ -60,10 +62,6 @@ int32_t env_to_int(char *string) {
6062
return strtoul(p, NULL, 0);
6163
}
6264

63-
inline __attribute__((always_inline)) void *get_base_page(void *addr) {
64-
return (void *) ((uintptr_t) addr & ~(g_page_size-1));
65-
}
66-
6765
/* Checks if we have a cache entry for this mapping */
6866
void *is_mapguard_entry_cached(void *p, void *data) {
6967
mapguard_cache_entry_t *mce = (mapguard_cache_entry_t *) p;
@@ -80,6 +78,11 @@ void *is_mapguard_entry_cached(void *p, void *data) {
8078
return NULL;
8179
}
8280

81+
mapguard_cache_entry_t *get_cache_entry(void *addr) {
82+
mapguard_cache_entry_t *mce = (mapguard_cache_entry_t *) vector_for_each(&g_map_cache_vector, (vector_for_each_callback_t *) is_mapguard_entry_cached, addr);
83+
return mce;
84+
}
85+
8386
void *map_guard_page(void *addr) {
8487
return g_real_mmap(get_base_page(addr), g_page_size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
8588
}
@@ -106,7 +109,7 @@ void unmap_guard_pages(mapguard_cache_entry_t *mce) {
106109
}
107110

108111
void map_bottom_guard_page(mapguard_cache_entry_t *mce) {
109-
if(mce == NULL || mce->guard_bottom != 0) {
112+
if(mce == NULL || mce->guard_bottom) {
110113
return;
111114
}
112115

@@ -122,7 +125,7 @@ void map_bottom_guard_page(mapguard_cache_entry_t *mce) {
122125
}
123126

124127
void map_top_guard_page(mapguard_cache_entry_t *mce) {
125-
if(mce == NULL || mce->guard_top != 0) {
128+
if(mce == NULL || mce->guard_top) {
126129
return;
127130
}
128131

@@ -138,7 +141,7 @@ void map_top_guard_page(mapguard_cache_entry_t *mce) {
138141
}
139142

140143
void map_guard_pages(mapguard_cache_entry_t *mce) {
141-
if(mce->start == 0 && mce->size != 0) {
144+
if(mce->start == 0 && mce->size) {
142145
return;
143146
}
144147

@@ -217,7 +220,7 @@ int munmap(void *addr, size_t length) {
217220

218221
/* Remove tracked pages from the cache */
219222
if(g_mapguard_policy.use_mapping_cache) {
220-
mce = (mapguard_cache_entry_t *) vector_for_each(&g_map_cache_vector, (vector_for_each_callback_t *) is_mapguard_entry_cached, (void *) addr);
223+
mce = get_cache_entry(addr);
221224

222225
if(mce) {
223226
LOG("Found mapguard cache entry for mapping %p", mce->start);
@@ -254,17 +257,20 @@ int munmap(void *addr, size_t length) {
254257
return ret;
255258
}
256259
} else {
257-
ret = g_real_munmap(addr, length);
258-
259-
/* Continue tracking a failed unmapping */
260-
if(ret != 0) {
261-
return ret;
262-
}
263260
#ifdef MPK_SUPPORT
264261
if(mce->pkey) {
262+
/* This is a full unmapping so we call pkey_free */
263+
g_real_pkey_set(mce->pkey, 0);
265264
g_real_pkey_free(mce->pkey);
266265
}
267266
#endif
267+
ret = g_real_munmap(addr, length);
268+
269+
/* Continue tracking a failed unmapping */
270+
if(ret) {
271+
return ret;
272+
}
273+
268274
unmap_guard_pages(mce);
269275
LOG("Deleting cache entry for %p", mce->start);
270276
vector_delete_at(&g_map_cache_vector, mce->cache_index);
@@ -292,8 +298,7 @@ int mprotect(void *addr, size_t len, int prot) {
292298

293299
/* Disallow transition to/from X (requires the mapping cache) */
294300
if(g_mapguard_policy.use_mapping_cache) {
295-
mce = (mapguard_cache_entry_t *) vector_for_each(&g_map_cache_vector, (vector_for_each_callback_t *) is_mapguard_entry_cached, (void *) addr);
296-
301+
mce = get_cache_entry(addr);
297302
#ifdef MPK_SUPPORT
298303
if(mce != NULL && mce->xom_enabled == 0) {
299304
#else
@@ -332,28 +337,12 @@ int mprotect(void *addr, size_t len, int prot) {
332337
return ret;
333338
}
334339

335-
/* Hook pkey_mprotect in libc */
336-
int pkey_mprotect(void *addr, size_t len, int prot, int pkey) {
337-
/* No support for other pkey callers just yet */
338-
return ERROR;
339-
}
340-
341-
int pkey_alloc(unsigned int flags, unsigned int access_rights) {
342-
/* No support for other pkey callers just yet */
343-
return ERROR;
344-
}
345-
346-
int pkey_free(int pkey) {
347-
/* No support for other pkey callers just yet */
348-
return ERROR;
349-
}
350-
351340
/* Hook mremap in libc */
352341
void* mremap(void *__addr, size_t __old_len, size_t __new_len, int __flags, ...) {
353342
void *map_ptr = g_real_mremap(__addr, __old_len, __new_len, __flags);
354343

355344
if(g_mapguard_policy.use_mapping_cache) {
356-
mapguard_cache_entry_t *mce = (mapguard_cache_entry_t *) vector_for_each(&g_map_cache_vector, (vector_for_each_callback_t *) is_mapguard_entry_cached, (void *) __addr);
345+
mapguard_cache_entry_t *mce = get_cache_entry(__addr);
357346

358347
/* We are remapping a previously tracked allocation. This
359348
* means we may have to reallocate guard pages and update
@@ -376,21 +365,14 @@ void* mremap(void *__addr, size_t __old_len, size_t __new_len, int __flags, ...)
376365
map_guard_pages(mce);
377366

378367
#ifdef MPK_SUPPORT
379-
/* If this mapping had previously utilized MPK support we
380-
* need to setup that up again */
368+
/* If this mapping had previously utilized MPK
369+
* support we need to setup that up again. We
370+
* cheat and reuse the existing pkey and assume
371+
* the desired access rights are the same */
381372
if(mce->pkey) {
382-
g_real_pkey_free(mce->pkey);
383-
mce->pkey = g_real_pkey_alloc(0, mce->pkey_access_rights);
384-
385-
/* This shouldn't happen... */
386-
if(mce->pkey == 0) {
387-
LOG("Failed to allocate protection key for address %p", mce->start);
388-
return map_ptr;
389-
}
390-
391373
int32_t ret = g_real_pkey_mprotect(mce->start, mce->size, mce->current_prot, mce->pkey);
392374

393-
if(ret != 0) {
375+
if(ret) {
394376
LOG("Failed to call pkey_mprotect for address %p", mce->start);
395377
}
396378
}
@@ -400,207 +382,3 @@ void* mremap(void *__addr, size_t __old_len, size_t __new_len, int __flags, ...)
400382

401383
return map_ptr;
402384
}
403-
404-
#ifdef MPK_SUPPORT
405-
406-
/* Memory Protection Keys is a feature on newer Intel x64 Skylake processors
407-
* that allows a program set permission bits on a per-page mapping. The advantage
408-
* of MPK over mprotect() is that its a lot faster. This feature of MapGuard
409-
* has only been tested on AWS EC2 C5 instances and it may not even work there
410-
* depending on your kernel version and program design.
411-
*
412-
* To know if your kernel supports MPK try the following:
413-
* cat /proc/cpuinfo | grep -E 'pku|ospke'
414-
*
415-
* The Map Guard APIs that work with MPK always works terms of page ranges.
416-
* This means for API functions like protect_mapping which take a start
417-
* and end pointer we may end up protecting an entire region of memory
418-
* and not just the page represented by the start pointer. Theres no easy
419-
* way to implement this except for with explicit documentation detailing
420-
* the implicit behavior.
421-
*/
422-
423-
/* memcpy_xom - Allocates writeable memory, copies src_size bytes from src
424-
* into those pages, and then marks the allocation execute only. Upon
425-
* failure it returns MAP_FAILED. Upon success it returns a pointer to the
426-
* Execute Only memory region */
427-
void *memcpy_xom(size_t allocation_size, void *src, size_t src_size) {
428-
429-
if(g_mapguard_policy.use_mapping_cache == 0) {
430-
LOG("Cannot allocate XOM memory without MG_USE_MAPPING_CACHE enabled");
431-
return MAP_FAILED;
432-
}
433-
434-
if(src == NULL || src_size == 0) {
435-
LOG("XOM allocation failed, src is %p and src_size = %ld", src, src_size);
436-
return MAP_FAILED;
437-
}
438-
439-
if(src_size > allocation_size) {
440-
LOG("XOM allocation failed, src size larger than allocation size")
441-
return MAP_FAILED;
442-
}
443-
444-
void *map_ptr = g_real_mmap(0x0, allocation_size, PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
445-
446-
if(map_ptr == MAP_FAILED) {
447-
LOG("XOM mmap failed");
448-
return MAP_FAILED;
449-
}
450-
451-
memcpy(map_ptr, src, src_size);
452-
453-
mapguard_cache_entry_t *mce = (mapguard_cache_entry_t *) malloc(sizeof(mapguard_cache_entry_t));
454-
memset(mce, 0x0, sizeof(mapguard_cache_entry_t));
455-
456-
mce->start = map_ptr;
457-
mce->size = allocation_size;
458-
mce->immutable_prot |= PROT_EXEC;
459-
mce->current_prot = PROT_EXEC;
460-
mce->xom_enabled = 1;
461-
/* We use -1 here as a stand in for the kernels execute_only_pkey */
462-
mce->pkey = -1;
463-
mce->pkey_access_rights = PKEY_DISABLE_ACCESS;
464-
465-
int32_t ret = g_real_mprotect(map_ptr, allocation_size, PROT_EXEC);
466-
467-
if(ret != 0) {
468-
LOG("XOM mprotect failed, unmapping memory");
469-
g_real_munmap(map_ptr, allocation_size);
470-
free(mce);
471-
return MAP_FAILED;
472-
}
473-
474-
mce->cache_index = vector_push(&g_map_cache_vector, mce);
475-
476-
return map_ptr;
477-
}
478-
479-
int munmap_xom(void *addr, size_t length) {
480-
mapguard_cache_entry_t *mce = (mapguard_cache_entry_t *) vector_for_each(&g_map_cache_vector, (vector_for_each_callback_t *) is_mapguard_entry_cached, (void *) addr);
481-
482-
if(mce != NULL) {
483-
LOG("Found mapguard cache entry for mapping %p", mce->start);
484-
g_real_pkey_free(mce->pkey);
485-
vector_delete_at(&g_map_cache_vector, mce->cache_index);
486-
free(mce);
487-
} else {
488-
return ERROR;
489-
}
490-
491-
return OK;
492-
}
493-
494-
/* addr - Address within a page range to protect
495-
*
496-
* This function derives the base page start is mapped in
497-
* and then determines if Map Guard is currently tracking
498-
* it. If so we check for an existing pkey or alloc a new
499-
* one. A -1 return value means we are out of pkeys or
500-
* the value of start was bad.
501-
*
502-
* If this function detects we are tracking an allocation
503-
* of pages this address falls within the entire range will
504-
* be protected with MPK, not just the page its on.
505-
*/
506-
int32_t protect_mapping(void *addr) {
507-
if(addr == NULL) {
508-
return ERROR;
509-
}
510-
511-
mapguard_cache_entry_t *mce = (mapguard_cache_entry_t *) vector_for_each(&g_map_cache_vector, (vector_for_each_callback_t *) is_mapguard_entry_cached, addr);
512-
513-
if(mce == NULL) {
514-
/* We aren't currently tracking these pages, so lets
515-
* start doing that. We don't allocate guard pages
516-
* because no r/w operations will take place here */
517-
mce = new_mapguard_cache_entry();
518-
mce->start = get_base_page(addr);
519-
/* We only know a single address, so we default to 1 page */
520-
mce->size = g_page_size;
521-
mce->immutable_prot |= PROT_NONE;
522-
mce->current_prot = PROT_NONE;
523-
mce->cache_index = vector_push(&g_map_cache_vector, mce);
524-
}
525-
526-
mce->pkey_access_rights = PKEY_DISABLE_ACCESS;
527-
528-
/* If there an existing key then we free it and allocate a new one */
529-
if(mce->pkey != 0) {
530-
g_real_pkey_free(mce->pkey);
531-
pkeys_used--;
532-
}
533-
534-
mce->pkey = g_real_pkey_alloc(0, mce->pkey_access_rights);
535-
536-
if(mce->pkey == 0) {
537-
LOG("Failed to allocate protection key for address %p", mce->start);
538-
return ERROR;
539-
}
540-
541-
pkeys_used++;
542-
543-
int32_t ret = g_real_pkey_mprotect(mce->start, mce->size, PROT_NONE, mce->pkey);
544-
545-
if(ret != 0) {
546-
LOG("Failed to call pkey_mprotect for address %p", mce->start);
547-
return ret;
548-
}
549-
550-
return OK;
551-
}
552-
553-
int32_t unprotect_mapping(void *addr, int new_prot) {
554-
if(addr == NULL) {
555-
return ERROR;
556-
}
557-
558-
mapguard_cache_entry_t *mce = (mapguard_cache_entry_t *) vector_for_each(&g_map_cache_vector, (vector_for_each_callback_t *) is_mapguard_entry_cached, addr);
559-
560-
if(mce != NULL && mce->pkey != 0) {
561-
mprotect(get_base_page(addr), mce->size, new_prot);
562-
mce->immutable_prot |= new_prot;
563-
mce->current_prot = new_prot;
564-
mce->pkey_access_rights = 0;
565-
g_real_pkey_free(mce->pkey);
566-
mce->pkey = 0;
567-
}
568-
569-
return OK;
570-
}
571-
572-
static int32_t map_guard_protect_code_callback(struct dl_phdr_info *info, size_t size, void *data) {
573-
const char *lib_name = "unknown_object";
574-
575-
if(strlen(info->dlpi_name) != 0) {
576-
lib_name = info->dlpi_name;
577-
}
578-
579-
void *load_address = (void *) info->dlpi_addr;
580-
int32_t ret = OK;
581-
582-
for(uint32_t i = 0; i < info->dlpi_phnum; i++) {
583-
if(info->dlpi_phdr[i].p_type == PT_LOAD && (info->dlpi_phdr[i].p_flags & PF_X)) {
584-
ret |= g_real_mprotect(load_address, info->dlpi_phdr[i].p_memsz, (int32_t) data);
585-
}
586-
}
587-
588-
return ret;
589-
}
590-
591-
/* Uses the dynamic linker dl_iterate_phdr API to locate all
592-
* currently mapped PT_LOAD segments with PF_X flags and then
593-
* uses mprotect to mark them execute only */
594-
int32_t protect_code() {
595-
return dl_iterate_phdr(map_guard_protect_code_callback, (void *)PROT_EXEC);
596-
}
597-
598-
/* Locate all currently mapped PT_LOAD segments with PF_X flags
599-
* and mark them PROT_READ|PROT_EXEC. Its possible this will find
600-
* segments of code that were not found when you called protect_code
601-
* but that should be harmless */
602-
int32_t unprotect_code() {
603-
return dl_iterate_phdr(map_guard_protect_code_callback, (void *)(PROT_READ|PROT_EXEC));
604-
}
605-
606-
#endif

0 commit comments

Comments
 (0)