-
-
Notifications
You must be signed in to change notification settings - Fork 2.9k
/
sampler_mac.rs
118 lines (107 loc) · 3.85 KB
/
sampler_mac.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use crate::sampler::{Address, NativeStack, Registers, Sampler};
use libc;
use mach;
use std::panic;
use std::process;
type MonitoredThreadId = mach::mach_types::thread_act_t;
pub struct MacOsSampler {
thread_id: MonitoredThreadId,
}
impl MacOsSampler {
#[allow(unsafe_code)]
pub fn new() -> Box<Sampler> {
let thread_id = unsafe { mach::mach_init::mach_thread_self() };
Box::new(MacOsSampler { thread_id })
}
}
impl Sampler for MacOsSampler {
#[allow(unsafe_code)]
fn suspend_and_sample_thread(&self) -> Result<NativeStack, ()> {
// Warning: The "critical section" begins here.
// In the critical section:
// we must not do any dynamic memory allocation,
// nor try to acquire any lock
// or any other unshareable resource.
let current_hook = panic::take_hook();
panic::set_hook(Box::new(|_| {
// Avoiding any allocation or locking as part of standard panicking.
process::abort();
}));
let native_stack = unsafe {
if let Err(()) = suspend_thread(self.thread_id) {
panic::set_hook(current_hook);
return Err(());
};
let native_stack = match get_registers(self.thread_id) {
Ok(regs) => Ok(frame_pointer_stack_walk(regs)),
Err(()) => Err(()),
};
if let Err(()) = resume_thread(self.thread_id) {
process::abort();
}
native_stack
};
panic::set_hook(current_hook);
// NOTE: End of "critical section".
native_stack
}
}
fn check_kern_return(kret: mach::kern_return::kern_return_t) -> Result<(), ()> {
if kret != mach::kern_return::KERN_SUCCESS {
return Err(());
}
Ok(())
}
#[allow(unsafe_code)]
unsafe fn suspend_thread(thread_id: MonitoredThreadId) -> Result<(), ()> {
check_kern_return(mach::thread_act::thread_suspend(thread_id))
}
#[allow(unsafe_code)]
unsafe fn get_registers(thread_id: MonitoredThreadId) -> Result<Registers, ()> {
let mut state = mach::structs::x86_thread_state64_t::new();
let mut state_count = mach::structs::x86_thread_state64_t::count();
let kret = mach::thread_act::thread_get_state(
thread_id,
mach::thread_status::x86_THREAD_STATE64,
(&mut state) as *mut _ as *mut _,
&mut state_count,
);
check_kern_return(kret)?;
Ok(Registers {
instruction_ptr: state.__rip as Address,
stack_ptr: state.__rsp as Address,
frame_ptr: state.__rbp as Address,
})
}
#[allow(unsafe_code)]
unsafe fn resume_thread(thread_id: MonitoredThreadId) -> Result<(), ()> {
check_kern_return(mach::thread_act::thread_resume(thread_id))
}
#[allow(unsafe_code)]
unsafe fn frame_pointer_stack_walk(regs: Registers) -> NativeStack {
// Note: this function will only work with code build with:
// --dev,
// or --with-frame-pointer.
let stackaddr = libc::pthread_get_stackaddr_np(libc::pthread_self());
let mut native_stack = NativeStack::new();
let pc = regs.instruction_ptr as *mut std::ffi::c_void;
let stack = regs.stack_ptr as *mut std::ffi::c_void;
let _ = native_stack.process_register(pc, stack);
let mut current = regs.frame_ptr as *mut *mut std::ffi::c_void;
while !current.is_null() {
if (current as usize) < stackaddr as usize {
break;
}
let next = *current as *mut *mut std::ffi::c_void;
let pc = current.add(1);
let stack = current.add(2);
if let Err(()) = native_stack.process_register(*pc, *stack) {
break;
}
current = next;
}
native_stack
}