@@ -132,8 +132,10 @@ static int64_t decode_sleb128(const uint8_t **pp)
132
132
line. The seed for the first line is { tb->pc, 0..., tb->tc.ptr }.
133
133
That is, the first column is seeded with the guest pc, the last column
134
134
with the host pc, and the middle columns with zeros. */
135
-
136
- static int encode_search (TranslationBlock * tb , uint8_t * block )
135
+ /*
136
+ static
137
+ */
138
+ int encode_search (TranslationBlock * tb , uint8_t * block )
137
139
{
138
140
uint8_t * highwater = tcg_ctx -> code_gen_highwater ;
139
141
uint64_t * insn_data = tcg_ctx -> gen_insn_data ;
@@ -299,208 +301,6 @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
299
301
300
302
return tcg_gen_code (tcg_ctx , tb , pc );
301
303
}
302
-
303
- /* Called with mmap_lock held for user mode emulation. */
304
- TranslationBlock * libafl_gen_edge (CPUState * cpu , target_ulong src_block ,
305
- target_ulong dst_block , int exit_n ,
306
- target_ulong cs_base , uint32_t flags ,
307
- int cflags )
308
- {
309
- CPUArchState * env = cpu_env (cpu );
310
- TranslationBlock * tb ;
311
- tb_page_addr_t phys_pc ;
312
- tcg_insn_unit * gen_code_buf ;
313
- int gen_code_size , search_size , max_insns ;
314
- int64_t ti ;
315
- void * host_pc ;
316
-
317
- // edge hooks generation callbacks
318
- // early check if it should be skipped or not
319
- bool no_exec_hook = libafl_qemu_hook_edge_gen (src_block , dst_block );
320
- if (no_exec_hook ) {
321
- // no exec hooks to run for edges, not point in generating a TB
322
- return NULL ;
323
- }
324
-
325
- target_ulong pc = src_block ^ reverse_bits ((target_ulong )exit_n );
326
-
327
- assert_memory_lock ();
328
- qemu_thread_jit_write ();
329
-
330
- // TODO: this (get_page_addr_code_hostp) is a bottleneck in systemmode, investigate why
331
- phys_pc = get_page_addr_code_hostp (env , src_block , & host_pc );
332
- phys_pc ^= reverse_bits ((tb_page_addr_t )exit_n );
333
-
334
- // if (phys_pc == -1) {
335
- // /* Generate a one-shot TB with 1 insn in it */
336
- // cflags = (cflags & ~CF_COUNT_MASK) | 1;
337
- // }
338
-
339
- /* Generate a one-shot TB with max 16 insn in it */
340
- cflags = (cflags & ~CF_COUNT_MASK ) | LIBAFL_MAX_INSNS ;
341
- QEMU_BUILD_BUG_ON (LIBAFL_MAX_INSNS > TCG_MAX_INSNS );
342
-
343
- max_insns = cflags & CF_COUNT_MASK ;
344
- if (max_insns == 0 ) {
345
- max_insns = TCG_MAX_INSNS ;
346
- }
347
- QEMU_BUILD_BUG_ON (CF_COUNT_MASK + 1 != TCG_MAX_INSNS );
348
-
349
- buffer_overflow :
350
- assert_no_pages_locked ();
351
- tb = tcg_tb_alloc (tcg_ctx );
352
- if (unlikely (!tb )) {
353
- /* flush must be done */
354
- tb_flush (cpu );
355
- mmap_unlock ();
356
- /* Make the execution loop process the flush as soon as possible. */
357
- cpu -> exception_index = EXCP_INTERRUPT ;
358
- cpu_loop_exit (cpu );
359
- }
360
-
361
- gen_code_buf = tcg_ctx -> code_gen_ptr ;
362
- tb -> tc .ptr = tcg_splitwx_to_rx (gen_code_buf );
363
-
364
- if (!(cflags & CF_PCREL )) {
365
- tb -> pc = pc ;
366
- }
367
-
368
- tb -> cs_base = cs_base ;
369
- tb -> flags = flags ;
370
- tb -> cflags = cflags | CF_IS_EDGE ;
371
- tb_set_page_addr0 (tb , phys_pc );
372
- tb_set_page_addr1 (tb , -1 );
373
- // if (phys_pc != -1) {
374
- // tb_lock_page0(phys_pc);
375
- // }
376
-
377
- tcg_ctx -> gen_tb = tb ;
378
- tcg_ctx -> addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64 ;
379
- #ifdef CONFIG_SOFTMMU
380
- tcg_ctx -> page_bits = TARGET_PAGE_BITS ;
381
- tcg_ctx -> page_mask = TARGET_PAGE_MASK ;
382
- tcg_ctx -> tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS ;
383
- #endif
384
- tcg_ctx -> insn_start_words = TARGET_INSN_START_WORDS ;
385
- #ifdef TCG_GUEST_DEFAULT_MO
386
- tcg_ctx -> guest_mo = TCG_GUEST_DEFAULT_MO ;
387
- #else
388
- tcg_ctx -> guest_mo = TCG_MO_ALL ;
389
- #endif
390
-
391
- restart_translate :
392
- trace_translate_block (tb , pc , tb -> tc .ptr );
393
-
394
- gen_code_size = libafl_setjmp_gen_code (env , tb , pc , host_pc , & max_insns , & ti );
395
- if (unlikely (gen_code_size < 0 )) {
396
- switch (gen_code_size ) {
397
- case -1 :
398
- /*
399
- * Overflow of code_gen_buffer, or the current slice of it.
400
- *
401
- * TODO: We don't need to re-do gen_intermediate_code, nor
402
- * should we re-do the tcg optimization currently hidden
403
- * inside tcg_gen_code. All that should be required is to
404
- * flush the TBs, allocate a new TB, re-initialize it per
405
- * above, and re-do the actual code generation.
406
- */
407
- qemu_log_mask (CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT ,
408
- "Restarting code generation for "
409
- "code_gen_buffer overflow\n" );
410
- tb_unlock_pages (tb );
411
- tcg_ctx -> gen_tb = NULL ;
412
- goto buffer_overflow ;
413
-
414
- case -2 :
415
- assert (false && "This should never happen for edge code. There must be a bug." );
416
- /*
417
- * The code generated for the TranslationBlock is too large.
418
- * The maximum size allowed by the unwind info is 64k.
419
- * There may be stricter constraints from relocations
420
- * in the tcg backend.
421
- *
422
- * Try again with half as many insns as we attempted this time.
423
- * If a single insn overflows, there's a bug somewhere...
424
- */
425
- assert (max_insns > 1 );
426
- max_insns /= 2 ;
427
- qemu_log_mask (CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT ,
428
- "Restarting code generation with "
429
- "smaller translation block (max %d insns)\n" ,
430
- max_insns );
431
-
432
- /*
433
- * The half-sized TB may not cross pages.
434
- * TODO: Fix all targets that cross pages except with
435
- * the first insn, at which point this can't be reached.
436
- */
437
- // phys_p2 = tb_page_addr1(tb);
438
- // if (unlikely(phys_p2 != -1)) {
439
- // tb_unlock_page1(phys_pc, phys_p2);
440
- // tb_set_page_addr1(tb, -1);
441
- // }
442
- goto restart_translate ;
443
-
444
- case -3 :
445
- /*
446
- * We had a page lock ordering problem. In order to avoid
447
- * deadlock we had to drop the lock on page0, which means
448
- * that everything we translated so far is compromised.
449
- * Restart with locks held on both pages.
450
- */
451
- qemu_log_mask (CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT ,
452
- "Restarting code generation with re-locked pages" );
453
- goto restart_translate ;
454
-
455
- default :
456
- g_assert_not_reached ();
457
- }
458
- }
459
- tcg_ctx -> gen_tb = NULL ;
460
-
461
- search_size = encode_search (tb , (void * )gen_code_buf + gen_code_size );
462
- if (unlikely (search_size < 0 )) {
463
- tb_unlock_pages (tb );
464
- goto buffer_overflow ;
465
- }
466
- tb -> tc .size = gen_code_size ;
467
-
468
- /*
469
- * For CF_PCREL, attribute all executions of the generated code
470
- * to its first mapping.
471
- */
472
- perf_report_code (pc , tb , tcg_splitwx_to_rx (gen_code_buf ));
473
-
474
- qatomic_set (& tcg_ctx -> code_gen_ptr , (void * )
475
- ROUND_UP ((uintptr_t )gen_code_buf + gen_code_size + search_size ,
476
- CODE_GEN_ALIGN ));
477
-
478
- /* init jump list */
479
- qemu_spin_init (& tb -> jmp_lock );
480
- tb -> jmp_list_head = (uintptr_t )NULL ;
481
- tb -> jmp_list_next [0 ] = (uintptr_t )NULL ;
482
- tb -> jmp_list_next [1 ] = (uintptr_t )NULL ;
483
- tb -> jmp_dest [0 ] = (uintptr_t )NULL ;
484
- tb -> jmp_dest [1 ] = (uintptr_t )NULL ;
485
-
486
- /* init original jump addresses which have been set during tcg_gen_code() */
487
- if (tb -> jmp_reset_offset [0 ] != TB_JMP_OFFSET_INVALID ) {
488
- tb_reset_jump (tb , 0 );
489
- }
490
- if (tb -> jmp_reset_offset [1 ] != TB_JMP_OFFSET_INVALID ) {
491
- tb_reset_jump (tb , 1 );
492
- }
493
-
494
- assert_no_pages_locked ();
495
-
496
- #ifndef CONFIG_USER_ONLY
497
- tb -> page_addr [0 ] = tb -> page_addr [1 ] = -1 ;
498
- #endif
499
- return tb ;
500
- }
501
-
502
- //// --- End LibAFL code ---
503
-
504
304
/* Called with mmap_lock held for user mode emulation. */
505
305
TranslationBlock * tb_gen_code (CPUState * cpu ,
506
306
vaddr pc , uint64_t cs_base ,
0 commit comments