Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

frida-asan: move to mmap-rs #1570

Merged
merged 3 commits into from
Sep 28, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions libafl_frida/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ num-traits = "0.2"
ahash = "0.8"
paste = "1.0"
log = "0.4.20"
mmap-rs = "0.6.0"

[dev-dependencies]
serial_test = { version = "2", default-features = false, features = ["logging"] }
209 changes: 97 additions & 112 deletions libafl_frida/src/alloc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,7 @@
target_vendor = "apple",
all(target_arch = "aarch64", target_os = "android")
))]
use std::io;
use std::{collections::BTreeMap, ffi::c_void, num::NonZeroUsize};
use std::{collections::BTreeMap, ffi::c_void};

use backtrace::Backtrace;
use frida_gum::{PageProtection, RangeDetails};
Expand All @@ -15,11 +14,8 @@ use libafl_bolts::cli::FuzzerOptions;
target_vendor = "apple",
all(target_arch = "aarch64", target_os = "android")
))]
use libc::{sysconf, _SC_PAGESIZE};
use nix::{
libc::memset,
sys::mman::{mmap, MapFlags, ProtFlags},
};
use mmap_rs::{MemoryAreas, MmapFlags, MmapMut, MmapOptions, ReservedMut};
use nix::libc::memset;
use rangemap::RangeSet;
use serde::{Deserialize, Serialize};

Expand All @@ -38,10 +34,12 @@ pub struct Allocator {
shadow_offset: usize,
/// The shadow bit
shadow_bit: usize,
/// If the shadow is pre-allocated
pre_allocated_shadow: bool,
/// The reserved (pre-allocated) shadow mapping
pre_allocated_shadow_mappings: HashMap<(usize, usize), ReservedMut>,
/// All tracked allocations
allocations: HashMap<usize, AllocationMetadata>,
/// All mappings
mappings: HashMap<usize, MmapMut>,
/// The shadow memory pages
shadow_pages: RangeSet<usize>,
/// A list of allocations
Expand All @@ -56,11 +54,6 @@ pub struct Allocator {
current_mapping_addr: usize,
}

#[cfg(target_vendor = "apple")]
const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANON;
#[cfg(not(target_vendor = "apple"))]
const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANONYMOUS;

macro_rules! map_to_shadow {
($self:expr, $address:expr) => {
$self.shadow_offset + (($address >> 3) & ((1 << ($self.shadow_bit + 1)) - 1))
Expand Down Expand Up @@ -89,6 +82,7 @@ pub struct AllocationMetadata {
impl Allocator {
/// Creates a new [`Allocator`] (not supported on this platform!)
#[cfg(not(any(
windows,
target_os = "linux",
target_vendor = "apple",
all(target_arch = "aarch64", target_os = "android")
Expand All @@ -100,6 +94,7 @@ impl Allocator {

/// Creates a new [`Allocator`]
#[cfg(any(
windows,
target_os = "linux",
target_vendor = "apple",
all(target_arch = "aarch64", target_os = "android")
Expand Down Expand Up @@ -181,29 +176,32 @@ impl Allocator {
metadata
} else {
// log::trace!("{:x}, {:x}", self.current_mapping_addr, rounded_up_size);
let mapping = match mmap(
NonZeroUsize::new(self.current_mapping_addr),
NonZeroUsize::new_unchecked(rounded_up_size),
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
ANONYMOUS_FLAG
| MapFlags::MAP_PRIVATE
| MapFlags::MAP_FIXED
| MapFlags::MAP_NORESERVE,
-1,
0,
) {
Ok(mapping) => mapping as usize,
let mapping = match MmapOptions::new(rounded_up_size)
.unwrap()
.with_address(self.current_mapping_addr)
.map_mut()
{
Ok(mapping) => mapping,
Err(err) => {
log::error!("An error occurred while mapping memory: {err:?}");
return std::ptr::null_mut();
}
};
self.current_mapping_addr += rounded_up_size;

self.map_shadow_for_region(mapping, mapping + rounded_up_size, false);
self.current_mapping_addr += ((rounded_up_size
+ MmapOptions::allocation_granularity())
/ MmapOptions::allocation_granularity())
* MmapOptions::allocation_granularity();

self.map_shadow_for_region(
mapping.as_ptr() as usize,
mapping.as_ptr().add(rounded_up_size) as usize,
false,
);
let address = mapping.as_ptr() as usize;
self.mappings.insert(address, mapping);

let mut metadata = AllocationMetadata {
address: mapping,
address,
size,
actual_size: rounded_up_size,
..AllocationMetadata::default()
Expand All @@ -223,8 +221,7 @@ impl Allocator {
);
let address = (metadata.address + self.page_size) as *mut c_void;

self.allocations
.insert(metadata.address + self.page_size, metadata);
self.allocations.insert(address as usize, metadata);
// log::trace!("serving address: {:?}, size: {:x}", address, size);
address
}
Expand Down Expand Up @@ -373,31 +370,50 @@ impl Allocator {

let shadow_mapping_start = map_to_shadow!(self, start);

if !self.pre_allocated_shadow {
let shadow_start = self.round_down_to_page(shadow_mapping_start);
let shadow_end =
self.round_up_to_page((end - start) / 8) + self.page_size + shadow_start;
let shadow_start = self.round_down_to_page(shadow_mapping_start);
let shadow_end = self.round_up_to_page((end - start) / 8) + self.page_size + shadow_start;
if self.pre_allocated_shadow_mappings.is_empty() {
for range in self.shadow_pages.gaps(&(shadow_start..shadow_end)) {
/*
log::trace!(
"range: {:x}-{:x}, pagesize: {}",
range.start, range.end, self.page_size
);
*/
unsafe {
mmap(
NonZeroUsize::new(range.start),
NonZeroUsize::new(range.end - range.start).unwrap(),
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
ANONYMOUS_FLAG | MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE,
-1,
0,
)
let mapping = MmapOptions::new(range.end - range.start - 1)
.unwrap()
.with_address(range.start)
.map_mut()
.expect("An error occurred while mapping shadow memory");
}

self.mappings.insert(range.start, mapping);
}

self.shadow_pages.insert(shadow_start..shadow_end);
} else {
let mut new_shadow_mappings = Vec::new();
for range in self.shadow_pages.gaps(&(shadow_start..shadow_end)) {
for ((start, end), shadow_mapping) in &mut self.pre_allocated_shadow_mappings {
if *start <= range.start && range.start < *start + shadow_mapping.len() {
let mut start_mapping =
shadow_mapping.split_off(range.start - *start).unwrap();
let end_mapping = start_mapping
.split_off(range.end - (range.start - *start))
.unwrap();
new_shadow_mappings.push(((range.end, *end), end_mapping));
self.mappings
.insert(range.start, start_mapping.try_into().unwrap());

break;
}
}
}
for new_shadow_mapping in new_shadow_mappings {
self.pre_allocated_shadow_mappings
.insert(new_shadow_mapping.0, new_shadow_mapping.1);
self.shadow_pages
.insert(new_shadow_mapping.0 .0..new_shadow_mapping.0 .1);
}
}

// log::trace!("shadow_mapping_start: {:x}, shadow_size: {:x}", shadow_mapping_start, (end - start) / 8);
Expand Down Expand Up @@ -438,7 +454,7 @@ impl Allocator {
if range.protection() as u32 & PageProtection::ReadWrite as u32 != 0 {
let start = range.memory_range().base_address().0 as usize;
let end = start + range.memory_range().size();
if self.pre_allocated_shadow && start == 1 << self.shadow_bit {
if !self.pre_allocated_shadow_mappings.is_empty() && start == 1 << self.shadow_bit {
return true;
}
self.map_shadow_for_region(start, end, true);
Expand All @@ -461,31 +477,28 @@ impl Allocator {
let mut userspace_max: usize = 0;

// Enumerate memory ranges that are already occupied.
for prot in [
PageProtection::Read,
PageProtection::Write,
PageProtection::Execute,
] {
RangeDetails::enumerate_with_prot(prot, &mut |details| {
let start = details.memory_range().base_address().0 as usize;
let end = start + details.memory_range().size();
occupied_ranges.push((start, end));
log::trace!("{:x} {:x}", start, end);
let base: usize = 2;
// On x64, if end > 2**48, then that's in vsyscall or something.
#[cfg(target_arch = "x86_64")]
if end <= base.pow(48) && end > userspace_max {
userspace_max = end;
}
for area in MemoryAreas::open(None).unwrap() {
let start = area.as_ref().unwrap().start();
let end = area.unwrap().end();
occupied_ranges.push((start, end));
log::trace!("{:x} {:x}", start, end);
let base: usize = 2;
// On x64, if end > 2**48, then that's in vsyscall or something.
#[cfg(all(unix, target_arch = "x86_64"))]
if end <= base.pow(48) && end > userspace_max {
userspace_max = end;
}

// On x64, if end > 2**52, then range is not in userspace
#[cfg(target_arch = "aarch64")]
if end <= base.pow(52) && end > userspace_max {
userspace_max = end;
}
#[cfg(all(not(unix), target_arch = "x86_64"))]
if (end >> 3) <= base.pow(44) && (end >> 3) > userspace_max {
userspace_max = end >> 3;
}

true
});
// On aarch64, if end > 2**52, then range is not in userspace
#[cfg(target_arch = "aarch64")]
if end <= base.pow(52) && end > userspace_max {
userspace_max = end;
}
}

let mut maxbit = 0;
Expand All @@ -498,7 +511,7 @@ impl Allocator {
}

{
for try_shadow_bit in &[maxbit - 4, maxbit - 3, maxbit - 2] {
for try_shadow_bit in &[maxbit, maxbit - 4, maxbit - 3, maxbit - 2] {
let addr: usize = 1 << try_shadow_bit;
let shadow_start = addr;
let shadow_end = addr + addr + addr;
Expand All @@ -512,48 +525,27 @@ impl Allocator {
}
}

if unsafe {
mmap(
NonZeroUsize::new(addr),
NonZeroUsize::new_unchecked(self.page_size),
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
MapFlags::MAP_PRIVATE
| ANONYMOUS_FLAG
| MapFlags::MAP_FIXED
| MapFlags::MAP_NORESERVE,
-1,
0,
)
}
.is_ok()
if let Ok(mapping) = MmapOptions::new(1 << (*try_shadow_bit + 1))
.unwrap()
.with_flags(MmapFlags::NO_RESERVE)
.with_address(addr)
.reserve_mut()
{
shadow_bit = (*try_shadow_bit).try_into().unwrap();

log::warn!("shadow_bit {shadow_bit:x} is suitable");
self.pre_allocated_shadow_mappings
.insert((addr, (addr + (1 << shadow_bit))), mapping);
break;
}
}
}

log::warn!("shadow_bit {shadow_bit:x} is suitable");
// assert!(shadow_bit != 0);
// attempt to pre-map the entire shadow-memory space

let addr: usize = 1 << shadow_bit;
let pre_allocated_shadow = unsafe {
mmap(
NonZeroUsize::new(addr),
NonZeroUsize::new_unchecked(addr + addr),
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
ANONYMOUS_FLAG
| MapFlags::MAP_FIXED
| MapFlags::MAP_PRIVATE
| MapFlags::MAP_NORESERVE,
-1,
0,
)
}
.is_ok();

self.pre_allocated_shadow = pre_allocated_shadow;
self.shadow_offset = 1 << shadow_bit;
self.shadow_bit = shadow_bit;
self.base_mapping_addr = addr + addr + addr;
Expand All @@ -564,6 +556,7 @@ impl Allocator {
impl Default for Allocator {
/// Creates a new [`Allocator`] (not supported on this platform!)
#[cfg(not(any(
windows,
target_os = "linux",
target_vendor = "apple",
all(target_arch = "aarch64", target_os = "android")
Expand All @@ -572,25 +565,17 @@ impl Default for Allocator {
todo!("Shadow region not yet supported for this platform!");
}

#[allow(clippy::too_many_lines)]
fn default() -> Self {
let ret = unsafe { sysconf(_SC_PAGESIZE) };
assert!(
ret >= 0,
"Failed to read pagesize {:?}",
io::Error::last_os_error()
);

#[allow(clippy::cast_sign_loss)]
let page_size = ret as usize;
let page_size = MmapOptions::page_size();

Self {
max_allocation: 1 << 30,
max_allocation_panics: false,
max_total_allocation: 1 << 32,
allocation_backtraces: false,
page_size,
pre_allocated_shadow: false,
pre_allocated_shadow_mappings: HashMap::new(),
mappings: HashMap::new(),
shadow_offset: 0,
shadow_bit: 0,
allocations: HashMap::new(),
Expand Down
Loading