Skip to content

Commit 7f0a4f1

Browse files
libafl_frida: Add FridaInstrumentationHelperBuilder, don't rely on Clap options (#1523)
* impr(frida): Don't keep FuzzerOptions in Helper Instead, keep the actual values that are needed. This allows us to make a builder for FridaInstrumentationBuilder in a subsequent commit. * refactor(frida): Move workaround to separate method This is just code movement. * refactor(frida): move transformer initialization Mostly code movement here, sets up replacing `new` with a builder. The one exception is the introduction of a lifetime bound on RT, which needs to outlive the transformer. This could be generic, but there's probably no reason to introduce an additional lifetime. However, because of this lifetime introduction, this is _technically_ a breaking change. * impr(frida): Pass module map to runtimes Instead of passing a slice of modules to instrument, and re-building the modulemap, pass a Ref-counted module map directly to the initialization. * feat(frida): Builder for InstrumentationHelper Co-authored-by: Dominik Maier <[email protected]> * impr(frida/alloc): optional options in allocator Move all the initialization into Default::default with sensible defaults and override parameters set from options in new. * impr(frida): remove options from AsanError The only option AsanError uses is whether to continue on error. Instead of keeping a whole clone of the options around, just store that single boolean value. * impr(frida/asan): Use less FuzzerOptions * Implement Default::default to get a good default AsanRuntime --------- Co-authored-by: Dominik Maier <[email protected]>
1 parent fdd2f53 commit 7f0a4f1

File tree

11 files changed

+680
-392
lines changed

11 files changed

+680
-392
lines changed

fuzzers/frida_executable_libpng/src/fuzzer.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ unsafe fn fuzz(
104104

105105
let coverage = CoverageRuntime::new();
106106
#[cfg(unix)]
107-
let asan = AsanRuntime::new(options.clone());
107+
let asan = AsanRuntime::new(&options);
108108

109109
#[cfg(unix)]
110110
let mut frida_helper =

fuzzers/frida_gdiplus/src/fuzzer.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> {
9999

100100
let coverage = CoverageRuntime::new();
101101
#[cfg(unix)]
102-
let asan = AsanRuntime::new(options.clone());
102+
let asan = AsanRuntime::new(&options);
103103

104104
#[cfg(unix)]
105105
let mut frida_helper =

fuzzers/frida_libpng/src/fuzzer.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> {
9494

9595
let coverage = CoverageRuntime::new();
9696
#[cfg(unix)]
97-
let asan = AsanRuntime::new(options.clone());
97+
let asan = AsanRuntime::new(&options);
9898

9999
#[cfg(unix)]
100100
let mut frida_helper =

libafl_frida/src/alloc.rs

Lines changed: 158 additions & 132 deletions
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,10 @@ use crate::asan::errors::{AsanError, AsanErrors};
2828
/// An allocator wrapper with binary-only address sanitization
2929
#[derive(Debug)]
3030
pub struct Allocator {
31-
/// The fuzzer options
32-
#[allow(dead_code)]
33-
options: FuzzerOptions,
31+
max_allocation: usize,
32+
max_total_allocation: usize,
33+
max_allocation_panics: bool,
34+
allocation_backtraces: bool,
3435
/// The page size
3536
page_size: usize,
3637
/// The shadow offsets
@@ -104,130 +105,13 @@ impl Allocator {
104105
all(target_arch = "aarch64", target_os = "android")
105106
))]
106107
#[must_use]
107-
#[allow(clippy::too_many_lines)]
108-
pub fn new(options: FuzzerOptions) -> Self {
109-
let ret = unsafe { sysconf(_SC_PAGESIZE) };
110-
assert!(
111-
ret >= 0,
112-
"Failed to read pagesize {:?}",
113-
io::Error::last_os_error()
114-
);
115-
116-
#[allow(clippy::cast_sign_loss)]
117-
let page_size = ret as usize;
118-
// probe to find a usable shadow bit:
119-
let mut shadow_bit = 0;
120-
121-
let mut occupied_ranges: Vec<(usize, usize)> = vec![];
122-
// max(userspace address) this is usually 0x8_0000_0000_0000 - 1 on x64 linux.
123-
let mut userspace_max: usize = 0;
124-
125-
// Enumerate memory ranges that are already occupied.
126-
for prot in [
127-
PageProtection::Read,
128-
PageProtection::Write,
129-
PageProtection::Execute,
130-
] {
131-
RangeDetails::enumerate_with_prot(prot, &mut |details| {
132-
let start = details.memory_range().base_address().0 as usize;
133-
let end = start + details.memory_range().size();
134-
occupied_ranges.push((start, end));
135-
// log::trace!("{:x} {:x}", start, end);
136-
let base: usize = 2;
137-
// On x64, if end > 2**48, then that's in vsyscall or something.
138-
#[cfg(target_arch = "x86_64")]
139-
if end <= base.pow(48) && end > userspace_max {
140-
userspace_max = end;
141-
}
142-
143-
// On x64, if end > 2**52, then range is not in userspace
144-
#[cfg(target_arch = "aarch64")]
145-
if end <= base.pow(52) && end > userspace_max {
146-
userspace_max = end;
147-
}
148-
149-
true
150-
});
151-
}
152-
153-
let mut maxbit = 0;
154-
for power in 1..64 {
155-
let base: usize = 2;
156-
if base.pow(power) > userspace_max {
157-
maxbit = power;
158-
break;
159-
}
160-
}
161-
162-
{
163-
for try_shadow_bit in &[maxbit - 4, maxbit - 3, maxbit - 2] {
164-
let addr: usize = 1 << try_shadow_bit;
165-
let shadow_start = addr;
166-
let shadow_end = addr + addr + addr;
167-
168-
// check if the proposed shadow bit overlaps with occupied ranges.
169-
for (start, end) in &occupied_ranges {
170-
if (shadow_start <= *end) && (*start <= shadow_end) {
171-
// log::trace!("{:x} {:x}, {:x} {:x}",shadow_start,shadow_end,start,end);
172-
log::warn!("shadow_bit {try_shadow_bit:x} is not suitable");
173-
break;
174-
}
175-
}
176-
177-
if unsafe {
178-
mmap(
179-
NonZeroUsize::new(addr),
180-
NonZeroUsize::new_unchecked(page_size),
181-
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
182-
MapFlags::MAP_PRIVATE
183-
| ANONYMOUS_FLAG
184-
| MapFlags::MAP_FIXED
185-
| MapFlags::MAP_NORESERVE,
186-
-1,
187-
0,
188-
)
189-
}
190-
.is_ok()
191-
{
192-
shadow_bit = (*try_shadow_bit).try_into().unwrap();
193-
break;
194-
}
195-
}
196-
}
197-
198-
log::warn!("shadow_bit {shadow_bit:x} is suitable");
199-
assert!(shadow_bit != 0);
200-
// attempt to pre-map the entire shadow-memory space
201-
202-
let addr: usize = 1 << shadow_bit;
203-
let pre_allocated_shadow = unsafe {
204-
mmap(
205-
NonZeroUsize::new(addr),
206-
NonZeroUsize::new_unchecked(addr + addr),
207-
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
208-
ANONYMOUS_FLAG
209-
| MapFlags::MAP_FIXED
210-
| MapFlags::MAP_PRIVATE
211-
| MapFlags::MAP_NORESERVE,
212-
-1,
213-
0,
214-
)
215-
}
216-
.is_ok();
217-
108+
pub fn new(options: &FuzzerOptions) -> Self {
218109
Self {
219-
options,
220-
page_size,
221-
pre_allocated_shadow,
222-
shadow_offset: 1 << shadow_bit,
223-
shadow_bit,
224-
allocations: HashMap::new(),
225-
shadow_pages: RangeSet::new(),
226-
allocation_queue: BTreeMap::new(),
227-
largest_allocation: 0,
228-
total_allocation_size: 0,
229-
base_mapping_addr: addr + addr + addr,
230-
current_mapping_addr: addr + addr + addr,
110+
max_allocation: options.max_allocation,
111+
max_allocation_panics: options.max_allocation_panics,
112+
max_total_allocation: options.max_total_allocation,
113+
allocation_backtraces: options.allocation_backtraces,
114+
..Self::default()
231115
}
232116
}
233117

@@ -272,17 +156,17 @@ impl Allocator {
272156
} else {
273157
size
274158
};
275-
if size > self.options.max_allocation {
159+
if size > self.max_allocation {
276160
#[allow(clippy::manual_assert)]
277-
if self.options.max_allocation_panics {
161+
if self.max_allocation_panics {
278162
panic!("ASAN: Allocation is too large: 0x{size:x}");
279163
}
280164

281165
return std::ptr::null_mut();
282166
}
283167
let rounded_up_size = self.round_up_to_page(size) + 2 * self.page_size;
284168

285-
if self.total_allocation_size + rounded_up_size > self.options.max_total_allocation {
169+
if self.total_allocation_size + rounded_up_size > self.max_total_allocation {
286170
return std::ptr::null_mut();
287171
}
288172
self.total_allocation_size += rounded_up_size;
@@ -291,7 +175,7 @@ impl Allocator {
291175
//log::trace!("reusing allocation at {:x}, (actual mapping starts at {:x}) size {:x}", metadata.address, metadata.address - self.page_size, size);
292176
metadata.is_malloc_zero = is_malloc_zero;
293177
metadata.size = size;
294-
if self.options.allocation_backtraces {
178+
if self.allocation_backtraces {
295179
metadata.allocation_site_backtrace = Some(Backtrace::new_unresolved());
296180
}
297181
metadata
@@ -324,7 +208,7 @@ impl Allocator {
324208
actual_size: rounded_up_size,
325209
..AllocationMetadata::default()
326210
};
327-
if self.options.allocation_backtraces {
211+
if self.allocation_backtraces {
328212
metadata.allocation_site_backtrace = Some(Backtrace::new_unresolved());
329213
}
330214

@@ -367,7 +251,7 @@ impl Allocator {
367251
let shadow_mapping_start = map_to_shadow!(self, ptr as usize);
368252

369253
metadata.freed = true;
370-
if self.options.allocation_backtraces {
254+
if self.allocation_backtraces {
371255
metadata.release_site_backtrace = Some(Backtrace::new_unresolved());
372256
}
373257

@@ -563,3 +447,145 @@ impl Allocator {
563447
});
564448
}
565449
}
450+
451+
impl Default for Allocator {
452+
/// Creates a new [`Allocator`] (not supported on this platform!)
453+
#[cfg(not(any(
454+
target_os = "linux",
455+
target_vendor = "apple",
456+
all(target_arch = "aarch64", target_os = "android")
457+
)))]
458+
fn default() -> Self {
459+
todo!("Shadow region not yet supported for this platform!");
460+
}
461+
462+
#[allow(clippy::too_many_lines)]
463+
fn default() -> Self {
464+
let ret = unsafe { sysconf(_SC_PAGESIZE) };
465+
assert!(
466+
ret >= 0,
467+
"Failed to read pagesize {:?}",
468+
io::Error::last_os_error()
469+
);
470+
471+
#[allow(clippy::cast_sign_loss)]
472+
let page_size = ret as usize;
473+
// probe to find a usable shadow bit:
474+
let mut shadow_bit = 0;
475+
476+
let mut occupied_ranges: Vec<(usize, usize)> = vec![];
477+
// max(userspace address) this is usually 0x8_0000_0000_0000 - 1 on x64 linux.
478+
let mut userspace_max: usize = 0;
479+
480+
// Enumerate memory ranges that are already occupied.
481+
for prot in [
482+
PageProtection::Read,
483+
PageProtection::Write,
484+
PageProtection::Execute,
485+
] {
486+
RangeDetails::enumerate_with_prot(prot, &mut |details| {
487+
let start = details.memory_range().base_address().0 as usize;
488+
let end = start + details.memory_range().size();
489+
occupied_ranges.push((start, end));
490+
// log::trace!("{:x} {:x}", start, end);
491+
let base: usize = 2;
492+
// On x64, if end > 2**48, then that's in vsyscall or something.
493+
#[cfg(target_arch = "x86_64")]
494+
if end <= base.pow(48) && end > userspace_max {
495+
userspace_max = end;
496+
}
497+
498+
// On x64, if end > 2**52, then range is not in userspace
499+
#[cfg(target_arch = "aarch64")]
500+
if end <= base.pow(52) && end > userspace_max {
501+
userspace_max = end;
502+
}
503+
504+
true
505+
});
506+
}
507+
508+
let mut maxbit = 0;
509+
for power in 1..64 {
510+
let base: usize = 2;
511+
if base.pow(power) > userspace_max {
512+
maxbit = power;
513+
break;
514+
}
515+
}
516+
517+
{
518+
for try_shadow_bit in &[maxbit - 4, maxbit - 3, maxbit - 2] {
519+
let addr: usize = 1 << try_shadow_bit;
520+
let shadow_start = addr;
521+
let shadow_end = addr + addr + addr;
522+
523+
// check if the proposed shadow bit overlaps with occupied ranges.
524+
for (start, end) in &occupied_ranges {
525+
if (shadow_start <= *end) && (*start <= shadow_end) {
526+
// log::trace!("{:x} {:x}, {:x} {:x}",shadow_start,shadow_end,start,end);
527+
log::warn!("shadow_bit {try_shadow_bit:x} is not suitable");
528+
break;
529+
}
530+
}
531+
532+
if unsafe {
533+
mmap(
534+
NonZeroUsize::new(addr),
535+
NonZeroUsize::new_unchecked(page_size),
536+
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
537+
MapFlags::MAP_PRIVATE
538+
| ANONYMOUS_FLAG
539+
| MapFlags::MAP_FIXED
540+
| MapFlags::MAP_NORESERVE,
541+
-1,
542+
0,
543+
)
544+
}
545+
.is_ok()
546+
{
547+
shadow_bit = (*try_shadow_bit).try_into().unwrap();
548+
break;
549+
}
550+
}
551+
}
552+
553+
log::warn!("shadow_bit {shadow_bit:x} is suitable");
554+
assert!(shadow_bit != 0);
555+
// attempt to pre-map the entire shadow-memory space
556+
557+
let addr: usize = 1 << shadow_bit;
558+
let pre_allocated_shadow = unsafe {
559+
mmap(
560+
NonZeroUsize::new(addr),
561+
NonZeroUsize::new_unchecked(addr + addr),
562+
ProtFlags::PROT_READ | ProtFlags::PROT_WRITE,
563+
ANONYMOUS_FLAG
564+
| MapFlags::MAP_FIXED
565+
| MapFlags::MAP_PRIVATE
566+
| MapFlags::MAP_NORESERVE,
567+
-1,
568+
0,
569+
)
570+
}
571+
.is_ok();
572+
573+
Self {
574+
max_allocation: 1 << 30,
575+
max_allocation_panics: false,
576+
max_total_allocation: 1 << 32,
577+
allocation_backtraces: false,
578+
page_size,
579+
pre_allocated_shadow,
580+
shadow_offset: 1 << shadow_bit,
581+
shadow_bit,
582+
allocations: HashMap::new(),
583+
shadow_pages: RangeSet::new(),
584+
allocation_queue: BTreeMap::new(),
585+
largest_allocation: 0,
586+
total_allocation_size: 0,
587+
base_mapping_addr: addr + addr + addr,
588+
current_mapping_addr: addr + addr + addr,
589+
}
590+
}
591+
}

0 commit comments

Comments
 (0)