@@ -76,14 +76,14 @@ static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
76
76
sc -> base , sc -> limit , sc -> flags );
77
77
}
78
78
79
- static inline bool ctl_has_irq (uint32_t int_ctl )
79
+ static inline bool ctl_has_irq (CPUX86State * env )
80
80
{
81
81
uint32_t int_prio ;
82
82
uint32_t tpr ;
83
83
84
- int_prio = (int_ctl & V_INTR_PRIO_MASK ) >> V_INTR_PRIO_SHIFT ;
85
- tpr = int_ctl & V_TPR_MASK ;
86
- return (int_ctl & V_IRQ_MASK ) && (int_prio >= tpr );
84
+ int_prio = (env -> int_ctl & V_INTR_PRIO_MASK ) >> V_INTR_PRIO_SHIFT ;
85
+ tpr = env -> int_ctl & V_TPR_MASK ;
86
+ return (env -> int_ctl & V_IRQ_MASK ) && (int_prio >= tpr );
87
87
}
88
88
89
89
static inline bool is_efer_invalid_state (CPUX86State * env )
@@ -121,13 +121,11 @@ static inline bool is_efer_invalid_state (CPUX86State *env)
121
121
return false;
122
122
}
123
123
124
- static inline bool virtual_gif_enabled (CPUX86State * env , uint32_t * int_ctl )
124
+ static inline bool virtual_gif_enabled (CPUX86State * env )
125
125
{
126
126
if (likely (env -> hflags & HF_GUEST_MASK )) {
127
- * int_ctl = x86_ldl_phys (env_cpu (env ),
128
- env -> vm_vmcb + offsetof(struct vmcb , control .int_ctl ));
129
127
return (env -> features [FEAT_SVM ] & CPUID_SVM_VGIF )
130
- && (* int_ctl & V_GIF_ENABLED_MASK );
128
+ && (env -> int_ctl & V_GIF_ENABLED_MASK );
131
129
}
132
130
return false;
133
131
}
@@ -139,7 +137,6 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
139
137
target_ulong addr ;
140
138
uint64_t nested_ctl ;
141
139
uint32_t event_inj ;
142
- uint32_t int_ctl ;
143
140
uint32_t asid ;
144
141
uint64_t new_cr0 ;
145
142
uint64_t new_cr3 ;
@@ -292,11 +289,10 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
292
289
cpu_x86_update_cr3 (env , new_cr3 );
293
290
env -> cr [2 ] = x86_ldq_phys (cs ,
294
291
env -> vm_vmcb + offsetof(struct vmcb , save .cr2 ));
295
- int_ctl = x86_ldl_phys (cs ,
292
+ env -> int_ctl = x86_ldl_phys (cs ,
296
293
env -> vm_vmcb + offsetof(struct vmcb , control .int_ctl ));
297
294
env -> hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK );
298
- if (int_ctl & V_INTR_MASKING_MASK ) {
299
- env -> v_tpr = int_ctl & V_TPR_MASK ;
295
+ if (env -> int_ctl & V_INTR_MASKING_MASK ) {
300
296
env -> hflags2 |= HF2_VINTR_MASK ;
301
297
if (env -> eflags & IF_MASK ) {
302
298
env -> hflags2 |= HF2_HIF_MASK ;
@@ -362,7 +358,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
362
358
363
359
env -> hflags2 |= HF2_GIF_MASK ;
364
360
365
- if (ctl_has_irq (int_ctl )) {
361
+ if (ctl_has_irq (env )) {
366
362
CPUState * cs = env_cpu (env );
367
363
368
364
cs -> interrupt_request |= CPU_INTERRUPT_VIRQ ;
@@ -522,11 +518,8 @@ void helper_stgi(CPUX86State *env)
522
518
{
523
519
cpu_svm_check_intercept_param (env , SVM_EXIT_STGI , 0 , GETPC ());
524
520
525
- CPUState * cs = env_cpu (env );
526
- uint32_t int_ctl ;
527
- if (virtual_gif_enabled (env , & int_ctl )) {
528
- x86_stl_phys (cs , env -> vm_vmcb + offsetof(struct vmcb , control .int_ctl ),
529
- int_ctl | V_GIF_MASK );
521
+ if (virtual_gif_enabled (env )) {
522
+ env -> int_ctl |= V_GIF_MASK ;
530
523
} else {
531
524
env -> hflags2 |= HF2_GIF_MASK ;
532
525
}
@@ -536,11 +529,8 @@ void helper_clgi(CPUX86State *env)
536
529
{
537
530
cpu_svm_check_intercept_param (env , SVM_EXIT_CLGI , 0 , GETPC ());
538
531
539
- CPUState * cs = env_cpu (env );
540
- uint32_t int_ctl ;
541
- if (virtual_gif_enabled (env , & int_ctl )) {
542
- x86_stl_phys (cs , env -> vm_vmcb + offsetof(struct vmcb , control .int_ctl ),
543
- int_ctl & ~V_GIF_MASK );
532
+ if (virtual_gif_enabled (env )) {
533
+ env -> int_ctl &= ~V_GIF_MASK ;
544
534
} else {
545
535
env -> hflags2 &= ~HF2_GIF_MASK ;
546
536
}
@@ -688,7 +678,6 @@ void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
688
678
void do_vmexit (CPUX86State * env )
689
679
{
690
680
CPUState * cs = env_cpu (env );
691
- uint32_t int_ctl ;
692
681
693
682
if (env -> hflags & HF_INHIBIT_IRQ_MASK ) {
694
683
x86_stl_phys (cs ,
@@ -731,16 +720,8 @@ void do_vmexit(CPUX86State *env)
731
720
env -> vm_vmcb + offsetof(struct vmcb , save .cr3 ), env -> cr [3 ]);
732
721
x86_stq_phys (cs ,
733
722
env -> vm_vmcb + offsetof(struct vmcb , save .cr4 ), env -> cr [4 ]);
734
-
735
- int_ctl = x86_ldl_phys (cs ,
736
- env -> vm_vmcb + offsetof(struct vmcb , control .int_ctl ));
737
- int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK );
738
- int_ctl |= env -> v_tpr & V_TPR_MASK ;
739
- if (cs -> interrupt_request & CPU_INTERRUPT_VIRQ ) {
740
- int_ctl |= V_IRQ_MASK ;
741
- }
742
723
x86_stl_phys (cs ,
743
- env -> vm_vmcb + offsetof(struct vmcb , control .int_ctl ), int_ctl );
724
+ env -> vm_vmcb + offsetof(struct vmcb , control .int_ctl ), env -> int_ctl );
744
725
745
726
x86_stq_phys (cs , env -> vm_vmcb + offsetof(struct vmcb , save .rflags ),
746
727
cpu_compute_eflags (env ));
@@ -763,6 +744,7 @@ void do_vmexit(CPUX86State *env)
763
744
env -> intercept = 0 ;
764
745
env -> intercept_exceptions = 0 ;
765
746
cs -> interrupt_request &= ~CPU_INTERRUPT_VIRQ ;
747
+ env -> int_ctl = 0 ;
766
748
env -> tsc_offset = 0 ;
767
749
768
750
env -> gdt .base = x86_ldq_phys (cs , env -> vm_hsave + offsetof(struct vmcb ,
0 commit comments