Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
80 changes: 45 additions & 35 deletions accel/tcg/cputlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,11 @@
#endif
#include "tcg/tcg-ldst.h"

//// --- Begin LibAFL code ---
//#define CONFIG_DEBUG_SYX
#include "libafl/syx-snapshot/syx-snapshot.h"
//// --- End LibAFL code ---

/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
/* #define DEBUG_TLB */
/* #define DEBUG_TLB_LOG */
Expand Down Expand Up @@ -89,13 +94,6 @@ QEMU_BUILD_BUG_ON(sizeof(vaddr) > sizeof(run_on_cpu_data));
QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)

//// --- Begin LibAFL code ---

// void syx_snapshot_dirty_list_add(hwaddr paddr);
void syx_snapshot_dirty_list_add_hostaddr(void* host_addr);

//// --- End LibAFL code ---

static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
{
return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
Expand Down Expand Up @@ -439,6 +437,15 @@ void tlb_flush_all_cpus_synced(CPUState *src_cpu)
tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
}

void tlb_flush_all_cpus(void)
{
const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;

tlb_debug("mmu_idx: 0x%"PRIx16"\n", ALL_MMUIDX_BITS);

flush_all_helper(NULL, fn, RUN_ON_CPU_HOST_INT(ALL_MMUIDX_BITS));
}

static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
vaddr page, vaddr mask)
{
Expand Down Expand Up @@ -1410,15 +1417,18 @@ static int probe_access_internal(CPUState *cpu, vaddr addr,

/* Everything else is RAM. */
*phost = (void *)((uintptr_t)addr + entry->addend);
//// --- Begin LibAFL code ---

if (access_type == MMU_DATA_STORE) {
syx_snapshot_dirty_list_add_hostaddr(*phost);
return flags;
}

//// --- Begin LibAFL code ---
// Use this snippet multiple times below
#define SYX_SNAPSHOT_DIRTY_LIST_ADD_HOSTADDR_PROBE(dbg, access_type, addr, entry_full, phost) { \
if (access_type == MMU_DATA_STORE && !(flags & (TLB_MMIO | TLB_DISCARD_WRITE))) { \
SYX_DEBUG("%s %llx %llx\n", dbg, addr, addr+ (entry_full)->xlat_section); \
syx_snapshot_dirty_list_add_hostaddr((phost)); \
}}\
//// --- End LibAFL code ---
return flags;
}

int probe_access_full(CPUArchState *env, vaddr addr, int size,
MMUAccessType access_type, int mmu_idx,
Expand All @@ -1434,6 +1444,9 @@ int probe_access_full(CPUArchState *env, vaddr addr, int size,
int dirtysize = size == 0 ? 1 : size;
notdirty_write(env_cpu(env), addr, dirtysize, *pfull, retaddr);
flags &= ~TLB_NOTDIRTY;
//// --- Begin LibAFL code ---
SYX_SNAPSHOT_DIRTY_LIST_ADD_HOSTADDR_PROBE("probe_access_full", access_type, addr, *pfull, *phost);
//// --- End LibAFL code ---
}

return flags;
Expand All @@ -1458,6 +1471,9 @@ int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
int dirtysize = size == 0 ? 1 : size;
notdirty_write(env_cpu(env), addr, dirtysize, *pfull, 0);
flags &= ~TLB_NOTDIRTY;
//// --- Begin LibAFL code ---
SYX_SNAPSHOT_DIRTY_LIST_ADD_HOSTADDR_PROBE("probe_access_full_mmu", access_type, addr, *pfull, *phost);
//// --- End LibAFL code ---
}

return flags;
Expand All @@ -1481,6 +1497,9 @@ int probe_access_flags(CPUArchState *env, vaddr addr, int size,
int dirtysize = size == 0 ? 1 : size;
notdirty_write(env_cpu(env), addr, dirtysize, full, retaddr);
flags &= ~TLB_NOTDIRTY;
//// --- Begin LibAFL code ---
SYX_SNAPSHOT_DIRTY_LIST_ADD_HOSTADDR_PROBE("probe_access_flags", access_type, addr, full, *phost);
//// --- End LibAFL code ---
}

return flags;
Expand Down Expand Up @@ -1516,6 +1535,9 @@ void *probe_access(CPUArchState *env, vaddr addr, int size,
/* Handle clean RAM pages. */
if (flags & TLB_NOTDIRTY) {
notdirty_write(env_cpu(env), addr, size, full, retaddr);
//// --- Begin LibAFL code ---
SYX_SNAPSHOT_DIRTY_LIST_ADD_HOSTADDR_PROBE("probe_access", access_type, addr, full, host);
//// --- End LibAFL code ---
}
}

Expand Down Expand Up @@ -1732,6 +1754,12 @@ static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data,
if (flags & TLB_NOTDIRTY) {
notdirty_write(cpu, addr, size, full, ra);
flags &= ~TLB_NOTDIRTY;
//// --- Begin LibAFL code ---
if (!(flags & (TLB_MMIO | TLB_DISCARD_WRITE))) {
SYX_DEBUG("mmu_watch_or_dirty %llx %llx\n", addr, addr+full->xlat_section);
syx_snapshot_dirty_list_add_hostaddr(data->haddr);
}
//// --- End LibAFL code ---
}
data->flags = flags;
}
Expand Down Expand Up @@ -1776,14 +1804,6 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
l->memop ^= MO_BSWAP;
}

//// --- Begin LibAFL code ---

// TODO: Does not work?
// if (type == MMU_DATA_STORE) {
syx_snapshot_dirty_list_add_hostaddr(l->page[0].haddr);
// }

//// --- End LibAFL code ---

} else {
/* Finish compute of page crossing. */
Expand All @@ -1807,15 +1827,6 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
mmu_watch_or_dirty(cpu, &l->page[1], type, ra);
}

//// --- Begin LibAFL code ---

// if (type == MMU_DATA_STORE) {
syx_snapshot_dirty_list_add_hostaddr(l->page[0].haddr);
syx_snapshot_dirty_list_add_hostaddr(l->page[1].haddr);
// }

//// --- End LibAFL code ---

/*
* Since target/sparc is the only user of TLB_BSWAP, and all
* Sparc accesses are aligned, any treatment across two pages
Expand Down Expand Up @@ -1911,14 +1922,13 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
full = &cpu->neg.tlb.d[mmu_idx].fulltlb[index];

//// --- Begin LibAFL code ---

syx_snapshot_dirty_list_add_hostaddr(hostaddr);

//// --- End LibAFL code ---

if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
notdirty_write(cpu, addr, size, full, retaddr);
//// --- Begin LibAFL code ---

SYX_DEBUG("atomic_mmu_lookup %llx %llx\n", addr, addr+full->xlat_section);
syx_snapshot_dirty_list_add_hostaddr(hostaddr);
//// --- End LibAFL code ---
}

if (unlikely(tlb_addr & TLB_FORCE_SLOW)) {
Expand Down
8 changes: 3 additions & 5 deletions hw/core/cpu-system.c
Original file line number Diff line number Diff line change
Expand Up @@ -225,11 +225,9 @@ static int cpu_common_post_load(void *opaque, int version_id)
//tb_flush(cpu);
//// --- Begin LibAFL code ---

// flushing the TBs every restore makes it really slow
// TODO handle writes to X code with specific calls to tb_invalidate_phys_addr
if (!libafl_devices_is_restoring()) {
tb_flush(cpu);
}
// Only invalidate per CPU virtual JMP cache
// Note: Global TB cache will be invalidated by SYX snapshot code
tcg_flush_jmp_cache(cpu);

//// --- End LibAFL code ---
}
Expand Down
4 changes: 4 additions & 0 deletions include/exec/cputlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,10 @@ void tlb_flush_all_cpus_synced(CPUState *src_cpu);
void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
uint16_t idxmap);

//// --- Begin LibAFL code ---
void tlb_flush_all_cpus(void);
//// --- End LibAFL code ---

/**
* tlb_flush_page_by_mmuidx_all_cpus_synced:
* @cpu: Originating CPU of the flush
Expand Down
1 change: 1 addition & 0 deletions include/libafl/syx-snapshot/syx-snapshot.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@

#include "device-save.h"
#include "syx-cow-cache.h"
#include "libafl/syx-misc.h"

#define SYX_SNAPSHOT_COW_CACHE_DEFAULT_CHUNK_SIZE 64
#define SYX_SNAPSHOT_COW_CACHE_DEFAULT_MAX_BLOCKS (1024 * 1024)
Expand Down
45 changes: 38 additions & 7 deletions libafl/syx-snapshot/syx-snapshot.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@

#include "exec/ramlist.h"
#include "exec/ram_addr.h"
#include "exec/exec-all.h"
#include "exec/address-spaces.h"

#include "libafl/syx-snapshot/syx-snapshot.h"
#include "libafl/syx-snapshot/device-save.h"
Expand Down Expand Up @@ -96,6 +98,22 @@ static void root_restore_check_memory_rb(gpointer rb_idstr_hash,
static SyxSnapshotIncrement*
syx_snapshot_increment_free(SyxSnapshotIncrement* increment);

//set all RAM as clear (not dirty)
static void all_ram_notdirty(void) {

MemoryRegion *sysmem, *subregion, *next;
sysmem = get_system_memory();
QTAILQ_FOREACH_SAFE(subregion, &sysmem->subregions, subregions_link,
next)
{
if (subregion->ram) {
#ifdef SYX_SNAPSHOT_DEBUG
printf("memory_region_reset_dirty: %llx %llx\n", subregion->addr, subregion->size);
#endif
memory_region_reset_dirty(subregion, 0, subregion->size, DIRTY_MEMORY_MIGRATION);
}
}
}
static RAMBlock* ramblock_lookup(gpointer rb_idstr_hash)
{
RAMBlock* block;
Expand Down Expand Up @@ -171,20 +189,27 @@ SyxSnapshot* syx_snapshot_new(bool track, bool is_active_bdrv_cache,
snapshot->rbs_dirty_list =
g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL,
(GDestroyNotify)g_hash_table_remove_all);
snapshot->bdrvs_cow_cache = syx_cow_cache_new();

if (is_active_bdrv_cache) {
syx_cow_cache_move(snapshot->bdrvs_cow_cache,
&syx_snapshot_state.before_fuzz_cache);
syx_snapshot_state.active_bdrv_cache_snapshot = snapshot;
// we have cached writes from BEFORE fuzzing starts
snapshot->bdrvs_cow_cache = syx_snapshot_state.before_fuzz_cache;
syx_snapshot_state.before_fuzz_cache = NULL;
} else {
syx_cow_cache_push_layer(snapshot->bdrvs_cow_cache,
SYX_SNAPSHOT_COW_CACHE_DEFAULT_CHUNK_SIZE,
SYX_SNAPSHOT_COW_CACHE_DEFAULT_MAX_BLOCKS);
snapshot->bdrvs_cow_cache = syx_cow_cache_new();
}
syx_cow_cache_push_layer(snapshot->bdrvs_cow_cache,
SYX_SNAPSHOT_COW_CACHE_DEFAULT_CHUNK_SIZE,
SYX_SNAPSHOT_COW_CACHE_DEFAULT_MAX_BLOCKS);
syx_snapshot_state.active_bdrv_cache_snapshot = snapshot;

if (track) {
syx_snapshot_track(&syx_snapshot_state.tracked_snapshots, snapshot);

//make sure to catch all new writes
//with a filled TLB there might be missed writes
tlb_flush_all_cpus();

all_ram_notdirty();
}

syx_snapshot_state.is_enabled = true;
Expand Down Expand Up @@ -623,6 +648,11 @@ static void root_restore_rb_page(gpointer offset_within_rb, gpointer _unused,
memcpy(host_rb_restore, host_snapshot_rb_restore,
syx_snapshot_state.page_size);
// TODO: manage special case of TSEG.

// Invalidate TBs
tb_invalidate_phys_range(rb->offset + (ram_addr_t)offset_within_rb,
rb->offset + (ram_addr_t)offset_within_rb +
syx_snapshot_state.page_size);
}

static void root_restore_rb(gpointer rb_idstr_hash,
Expand Down Expand Up @@ -735,6 +765,7 @@ void syx_snapshot_root_restore(SyxSnapshot* snapshot)
}

syx_snapshot_dirty_list_flush(snapshot);
all_ram_notdirty();

if (must_unlock_bql) {
bql_unlock();
Expand Down