@@ -453,31 +453,31 @@ void tcg_gen_qemu_st_i64_chk(TCGv_i64 val, TCGTemp *addr, TCGArg idx,
453453 * does not require 16-byte atomicity, and it would be adventagous
454454 * to avoid a call to a helper function.
455455 */
456- static bool use_two_i64_for_i128 (MemOp mop )
457- {
458- #ifdef CONFIG_SOFTMMU
459- /* Two softmmu tlb lookups is larger than one function call. */
460- return false;
461- #else
462- /*
463- * For user-only, two 64-bit operations may well be smaller than a call.
464- * Determine if that would be legal for the requested atomicity.
465- */
466- switch (mop & MO_ATOM_MASK ) {
467- case MO_ATOM_NONE :
468- case MO_ATOM_IFALIGN_PAIR :
469- return true;
470- case MO_ATOM_IFALIGN :
471- case MO_ATOM_SUBALIGN :
472- case MO_ATOM_WITHIN16 :
473- case MO_ATOM_WITHIN16_PAIR :
474- /* In a serialized context, no atomicity is required. */
475- return !(tcg_ctx -> gen_tb -> cflags & CF_PARALLEL );
476- default :
477- g_assert_not_reached ();
478- }
479- #endif
480- }
456+ // static bool use_two_i64_for_i128(MemOp mop)
457+ // {
458+ // #ifdef CONFIG_SOFTMMU
459+ // /* Two softmmu tlb lookups is larger than one function call. */
460+ // return false;
461+ // #else
462+ // /*
463+ // * For user-only, two 64-bit operations may well be smaller than a call.
464+ // * Determine if that would be legal for the requested atomicity.
465+ // */
466+ // switch (mop & MO_ATOM_MASK) {
467+ // case MO_ATOM_NONE:
468+ // case MO_ATOM_IFALIGN_PAIR:
469+ // return true;
470+ // case MO_ATOM_IFALIGN:
471+ // case MO_ATOM_SUBALIGN:
472+ // case MO_ATOM_WITHIN16:
473+ // case MO_ATOM_WITHIN16_PAIR:
474+ // /* In a serialized context, no atomicity is required. */
475+ // return !(tcg_ctx->gen_tb->cflags & CF_PARALLEL);
476+ // default:
477+ // g_assert_not_reached();
478+ // }
479+ // #endif
480+ // }
481481
482482static void canonicalize_memop_i128_as_i64 (MemOp ret [2 ], MemOp orig )
483483{
@@ -551,7 +551,7 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
551551 tcg_gen_req_mo (TCG_MO_LD_LD | TCG_MO_ST_LD );
552552
553553 /* TODO: For now, force 32-bit hosts to use the helper. */
554- if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64 ) {
554+ /* if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
555555 TCGv_i64 lo, hi;
556556 bool need_bswap = false;
557557 MemOpIdx oi = orig_oi;
@@ -577,11 +577,13 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
577577 tcg_gen_bswap64_i64(lo, lo);
578578 tcg_gen_bswap64_i64(hi, hi);
579579 }
580- } else if (use_two_i64_for_i128 (memop )) {
580+ } else if (use_two_i64_for_i128(memop)) { */
581+ // Always take this path to get symbolic propagation
581582 MemOp mop [2 ];
582583 TCGTemp * addr_p8 ;
583584 TCGv_i64 x , y ;
584585 bool need_bswap ;
586+ TCGv_i64 load_size , mmu_idx ;
585587
586588 canonicalize_memop_i128_as_i64 (mop , memop );
587589 need_bswap = (mop [0 ] ^ memop ) & MO_BSWAP ;
@@ -607,6 +609,14 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
607609
608610 gen_ldst_i64 (opc , x , addr , make_memop_idx (mop [0 ], idx ));
609611
612+ /* Perform the symbolic memory access. Doing so _after_ the concrete
613+ * operation ensures that the target address is in the TLB. */
614+ mmu_idx = tcg_constant_i64 (idx );
615+ load_size = tcg_constant_i64 (1 << MO_64 );
616+ gen_helper_sym_load_guest_i64 (tcgv_i64_expr (x ), cpu_env ,
617+ temp_tcgv_i64 (addr ), tcgv_i64_expr (temp_tcgv_i64 (addr )),
618+ load_size , mmu_idx );
619+
610620 if (need_bswap ) {
611621 tcg_gen_bswap64_i64 (x , x );
612622 }
@@ -622,20 +632,27 @@ static void tcg_gen_qemu_ld_i128_int(TCGv_i128 val, TCGTemp *addr,
622632 }
623633
624634 gen_ldst_i64 (opc , y , addr_p8 , make_memop_idx (mop [1 ], idx ));
635+
636+ /* Perform the symbolic memory access. Doing so _after_ the concrete
637+ * operation ensures that the target address is in the TLB. */
638+ gen_helper_sym_load_guest_i64 (tcgv_i64_expr (y ), cpu_env ,
639+ temp_tcgv_i64 (addr_p8 ), tcgv_i64_expr (temp_tcgv_i64 (addr_p8 )),
640+ load_size , mmu_idx );
641+
625642 tcg_temp_free_internal (addr_p8 );
626643
627644 if (need_bswap ) {
628645 tcg_gen_bswap64_i64 (y , y );
629646 }
630- } else {
647+ /* } else {
631648 if (tcg_ctx->addr_type == TCG_TYPE_I32) {
632649 ext_addr = tcg_temp_ebb_new_i64();
633650 tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr));
634651 addr = tcgv_i64_temp(ext_addr);
635652 }
636653 gen_helper_ld_i128(val, cpu_env, temp_tcgv_i64(addr),
637654 tcg_constant_i32(orig_oi));
638- }
655+ } */
639656
640657 plugin_gen_mem_callbacks (ext_addr , addr , orig_oi , QEMU_PLUGIN_MEM_R );
641658}
@@ -661,7 +678,7 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
661678
662679 /* TODO: For now, force 32-bit hosts to use the helper. */
663680
664- if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64 ) {
681+ /* if (TCG_TARGET_HAS_qemu_ldst_i128 && TCG_TARGET_REG_BITS == 64) {
665682 TCGv_i64 lo, hi;
666683 MemOpIdx oi = orig_oi;
667684 bool need_bswap = false;
@@ -689,10 +706,12 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
689706 tcg_temp_free_i64(lo);
690707 tcg_temp_free_i64(hi);
691708 }
692- } else if (use_two_i64_for_i128 (memop )) {
709+ } else if (use_two_i64_for_i128(memop)) { */
710+ // Always take this path to get symbolic propagation
693711 MemOp mop [2 ];
694712 TCGTemp * addr_p8 ;
695713 TCGv_i64 x , y , b = NULL ;
714+ TCGv_i64 store_size , mmu_idx ;
696715
697716 canonicalize_memop_i128_as_i64 (mop , memop );
698717
@@ -718,6 +737,15 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
718737
719738 gen_ldst_i64 (opc , x , addr , make_memop_idx (mop [0 ], idx ));
720739
740+ /* Perform the symbolic memory access. Doing so _after_ the concrete
741+ * operation ensures that the target address is in the TLB. */
742+ mmu_idx = tcg_constant_i64 (idx );
743+ store_size = tcg_constant_i64 (1 << MO_64 );
744+ gen_helper_sym_store_guest_i64 (cpu_env ,
745+ x , tcgv_i64_expr (x ),
746+ temp_tcgv_i64 (addr ), tcgv_i64_expr (temp_tcgv_i64 (addr )),
747+ store_size , mmu_idx );
748+
721749 if (tcg_ctx -> addr_type == TCG_TYPE_I32 ) {
722750 TCGv_i32 t = tcg_temp_ebb_new_i32 ();
723751 tcg_gen_addi_i32 (t , temp_tcgv_i32 (addr ), 8 );
@@ -731,20 +759,35 @@ static void tcg_gen_qemu_st_i128_int(TCGv_i128 val, TCGTemp *addr,
731759 if (b ) {
732760 tcg_gen_bswap64_i64 (b , y );
733761 gen_ldst_i64 (opc , b , addr_p8 , make_memop_idx (mop [1 ], idx ));
762+
763+ /* Perform the symbolic memory access. Doing so _after_ the concrete
764+ * operation ensures that the target address is in the TLB. */
765+ gen_helper_sym_store_guest_i64 (cpu_env ,
766+ b , tcgv_i64_expr (b ),
767+ temp_tcgv_i64 (addr_p8 ), tcgv_i64_expr (temp_tcgv_i64 (addr_p8 )),
768+ store_size , mmu_idx );
769+
734770 tcg_temp_free_i64 (b );
735771 } else {
736772 gen_ldst_i64 (opc , y , addr_p8 , make_memop_idx (mop [1 ], idx ));
773+
774+ /* Perform the symbolic memory access. Doing so _after_ the concrete
775+ * operation ensures that the target address is in the TLB. */
776+ gen_helper_sym_store_guest_i64 (cpu_env ,
777+ y , tcgv_i64_expr (y ),
778+ temp_tcgv_i64 (addr_p8 ), tcgv_i64_expr (temp_tcgv_i64 (addr_p8 )),
779+ store_size , mmu_idx );
737780 }
738781 tcg_temp_free_internal (addr_p8 );
739- } else {
782+ /* } else {
740783 if (tcg_ctx->addr_type == TCG_TYPE_I32) {
741784 ext_addr = tcg_temp_ebb_new_i64();
742785 tcg_gen_extu_i32_i64(ext_addr, temp_tcgv_i32(addr));
743786 addr = tcgv_i64_temp(ext_addr);
744787 }
745788 gen_helper_st_i128(cpu_env, temp_tcgv_i64(addr), val,
746789 tcg_constant_i32(orig_oi));
747- }
790+ } */
748791
749792 plugin_gen_mem_callbacks (ext_addr , addr , orig_oi , QEMU_PLUGIN_MEM_W );
750793}
0 commit comments