|
71 | 71 |
|
72 | 72 | __thread int cur_block_is_good;
|
73 | 73 |
|
| 74 | +static int afl_track_unstable_log_fd(void) { |
| 75 | + static bool initialized = false; |
| 76 | + static int track_fd = -1; |
| 77 | + if (unlikely(!initialized)) { |
| 78 | + char * fname = getenv("AFL_QEMU_TRACK_UNSTABLE"); |
| 79 | + if (fname != NULL) { |
| 80 | + track_fd = open(fname, O_WRONLY | O_APPEND | O_CREAT, S_IRUSR); |
| 81 | + } |
| 82 | + initialized = true; |
| 83 | + if (track_fd > 0) dprintf(track_fd, "QEMU UNSTABLE TRACKING ENABLED\n"); |
| 84 | + } |
| 85 | + return track_fd; |
| 86 | +} |
| 87 | + |
74 | 88 | void HELPER(afl_maybe_log)(target_ulong cur_loc) {
|
75 | 89 |
|
76 |
| - register uintptr_t afl_idx = cur_loc ^ afl_prev_loc; |
| 90 | + /* If we are tracking fuzzing instability in QEMU, then we simply use the |
| 91 | + block id when updating the coverage map (rather than combining it with the |
| 92 | + id of the previous block. Therefore when afl-fuzz writes the var_bytes |
| 93 | + entries in fuzzer_stats, they actually just contain block ids rather than |
| 94 | + edge ids. */ |
| 95 | + if (unlikely(afl_track_unstable_log_fd() > 0)) { |
77 | 96 |
|
78 |
| - INC_AFL_AREA(afl_idx); |
| 97 | + register uintptr_t afl_idx = cur_loc; |
| 98 | + INC_AFL_AREA(afl_idx); |
79 | 99 |
|
80 |
| - // afl_prev_loc = ((cur_loc & (MAP_SIZE - 1) >> 1)) | |
81 |
| - // ((cur_loc & 1) << ((int)ceil(log2(MAP_SIZE)) -1)); |
82 |
| - afl_prev_loc = cur_loc >> 1; |
| 100 | + } else { |
| 101 | + register uintptr_t afl_idx = cur_loc ^ afl_prev_loc; |
83 | 102 |
|
| 103 | + INC_AFL_AREA(afl_idx); |
| 104 | + |
| 105 | + // afl_prev_loc = ((cur_loc & (MAP_SIZE - 1) >> 1)) | |
| 106 | + // ((cur_loc & 1) << ((int)ceil(log2(MAP_SIZE)) -1)); |
| 107 | + afl_prev_loc = cur_loc >> 1; |
| 108 | + } |
84 | 109 | }
|
85 | 110 |
|
86 | 111 | static target_ulong pc_hash(target_ulong x) {
|
@@ -1930,7 +1955,7 @@ TranslationBlock *afl_gen_edge(CPUState *cpu, unsigned long afl_id)
|
1930 | 1955 | tcg_func_start(tcg_ctx);
|
1931 | 1956 |
|
1932 | 1957 | tcg_ctx->cpu = env_cpu(env);
|
1933 |
| - |
| 1958 | + |
1934 | 1959 | target_ulong afl_loc = afl_id & (MAP_SIZE -1);
|
1935 | 1960 | //*afl_dynamic_size = MAX(*afl_dynamic_size, afl_loc);
|
1936 | 1961 | TCGv tmp0 = tcg_const_tl(afl_loc);
|
@@ -2075,6 +2100,17 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
|
2075 | 2100 |
|
2076 | 2101 | trace_translate_block(tb, tb->pc, tb->tc.ptr);
|
2077 | 2102 |
|
| 2103 | + /* If we are tracking block instability, then since afl-fuzz will log the ids |
| 2104 | + of the unstable blocks, in fuzzer_stats, we must log these alongside the |
| 2105 | + instruction pointer so that the user can associate these back with the |
| 2106 | + actual binary */ |
| 2107 | + int track_fd = afl_track_unstable_log_fd(); |
| 2108 | + if (unlikely(track_fd > 0)) { |
| 2109 | + uintptr_t block_id = (uintptr_t)(afl_hash_ip((uint64_t)pc)); |
| 2110 | + block_id &= (MAP_SIZE - 1); |
| 2111 | + dprintf(track_fd, "BLOCK ID: 0x%016" PRIx64 ", PC: 0x%016zx-0x%016zx\n", block_id, pc, pc + tb->size); |
| 2112 | + } |
| 2113 | + |
2078 | 2114 | /* generate machine code */
|
2079 | 2115 | tb->jmp_reset_offset[0] = TB_JMP_RESET_OFFSET_INVALID;
|
2080 | 2116 | tb->jmp_reset_offset[1] = TB_JMP_RESET_OFFSET_INVALID;
|
|
0 commit comments