Skip to content

Commit eb44ea6

Browse files
committed
KVM: SVM: Move x2AVIC MSR interception helper to avic.c
Move svm_set_x2apic_msr_interception() to avic.c as it's only relevant when x2AVIC is enabled/supported and only called by AVIC code. In addition to scoping AVIC code to avic.c, this will allow burying the global x2avic_enabled variable in avic. Opportunistically rename the helper to explicitly scope it to "avic". No functional change intended. Reviewed-by: Naveen N Rao (AMD) <[email protected]> Tested-by: Naveen N Rao (AMD) <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent 44bfe1f commit eb44ea6

File tree

3 files changed

+54
-53
lines changed

3 files changed

+54
-53
lines changed

arch/x86/kvm/svm/avic.c

Lines changed: 54 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,57 @@ static bool next_vm_id_wrapped = 0;
7979
static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
8080
bool x2avic_enabled;
8181

82+
83+
static void avic_set_x2apic_msr_interception(struct vcpu_svm *svm,
84+
bool intercept)
85+
{
86+
static const u32 x2avic_passthrough_msrs[] = {
87+
X2APIC_MSR(APIC_ID),
88+
X2APIC_MSR(APIC_LVR),
89+
X2APIC_MSR(APIC_TASKPRI),
90+
X2APIC_MSR(APIC_ARBPRI),
91+
X2APIC_MSR(APIC_PROCPRI),
92+
X2APIC_MSR(APIC_EOI),
93+
X2APIC_MSR(APIC_RRR),
94+
X2APIC_MSR(APIC_LDR),
95+
X2APIC_MSR(APIC_DFR),
96+
X2APIC_MSR(APIC_SPIV),
97+
X2APIC_MSR(APIC_ISR),
98+
X2APIC_MSR(APIC_TMR),
99+
X2APIC_MSR(APIC_IRR),
100+
X2APIC_MSR(APIC_ESR),
101+
X2APIC_MSR(APIC_ICR),
102+
X2APIC_MSR(APIC_ICR2),
103+
104+
/*
105+
* Note! Always intercept LVTT, as TSC-deadline timer mode
106+
* isn't virtualized by hardware, and the CPU will generate a
107+
* #GP instead of a #VMEXIT.
108+
*/
109+
X2APIC_MSR(APIC_LVTTHMR),
110+
X2APIC_MSR(APIC_LVTPC),
111+
X2APIC_MSR(APIC_LVT0),
112+
X2APIC_MSR(APIC_LVT1),
113+
X2APIC_MSR(APIC_LVTERR),
114+
X2APIC_MSR(APIC_TMICT),
115+
X2APIC_MSR(APIC_TMCCT),
116+
X2APIC_MSR(APIC_TDCR),
117+
};
118+
int i;
119+
120+
if (intercept == svm->x2avic_msrs_intercepted)
121+
return;
122+
123+
if (!x2avic_enabled)
124+
return;
125+
126+
for (i = 0; i < ARRAY_SIZE(x2avic_passthrough_msrs); i++)
127+
svm_set_intercept_for_msr(&svm->vcpu, x2avic_passthrough_msrs[i],
128+
MSR_TYPE_RW, intercept);
129+
130+
svm->x2avic_msrs_intercepted = intercept;
131+
}
132+
82133
static void avic_activate_vmcb(struct vcpu_svm *svm)
83134
{
84135
struct vmcb *vmcb = svm->vmcb01.ptr;
@@ -99,7 +150,7 @@ static void avic_activate_vmcb(struct vcpu_svm *svm)
99150
vmcb->control.int_ctl |= X2APIC_MODE_MASK;
100151
vmcb->control.avic_physical_id |= X2AVIC_MAX_PHYSICAL_ID;
101152
/* Disabling MSR intercept for x2APIC registers */
102-
svm_set_x2apic_msr_interception(svm, false);
153+
avic_set_x2apic_msr_interception(svm, false);
103154
} else {
104155
/*
105156
* Flush the TLB, the guest may have inserted a non-APIC
@@ -110,7 +161,7 @@ static void avic_activate_vmcb(struct vcpu_svm *svm)
110161
/* For xAVIC and hybrid-xAVIC modes */
111162
vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID;
112163
/* Enabling MSR intercept for x2APIC registers */
113-
svm_set_x2apic_msr_interception(svm, true);
164+
avic_set_x2apic_msr_interception(svm, true);
114165
}
115166
}
116167

@@ -130,7 +181,7 @@ static void avic_deactivate_vmcb(struct vcpu_svm *svm)
130181
return;
131182

132183
/* Enabling MSR intercept for x2APIC registers */
133-
svm_set_x2apic_msr_interception(svm, true);
184+
avic_set_x2apic_msr_interception(svm, true);
134185
}
135186

136187
/* Note:

arch/x86/kvm/svm/svm.c

Lines changed: 0 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -724,55 +724,6 @@ static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu)
724724
svm_set_intercept_for_msr(vcpu, MSR_IA32_DEBUGCTLMSR, MSR_TYPE_RW, intercept);
725725
}
726726

727-
void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
728-
{
729-
static const u32 x2avic_passthrough_msrs[] = {
730-
X2APIC_MSR(APIC_ID),
731-
X2APIC_MSR(APIC_LVR),
732-
X2APIC_MSR(APIC_TASKPRI),
733-
X2APIC_MSR(APIC_ARBPRI),
734-
X2APIC_MSR(APIC_PROCPRI),
735-
X2APIC_MSR(APIC_EOI),
736-
X2APIC_MSR(APIC_RRR),
737-
X2APIC_MSR(APIC_LDR),
738-
X2APIC_MSR(APIC_DFR),
739-
X2APIC_MSR(APIC_SPIV),
740-
X2APIC_MSR(APIC_ISR),
741-
X2APIC_MSR(APIC_TMR),
742-
X2APIC_MSR(APIC_IRR),
743-
X2APIC_MSR(APIC_ESR),
744-
X2APIC_MSR(APIC_ICR),
745-
X2APIC_MSR(APIC_ICR2),
746-
747-
/*
748-
* Note! Always intercept LVTT, as TSC-deadline timer mode
749-
* isn't virtualized by hardware, and the CPU will generate a
750-
* #GP instead of a #VMEXIT.
751-
*/
752-
X2APIC_MSR(APIC_LVTTHMR),
753-
X2APIC_MSR(APIC_LVTPC),
754-
X2APIC_MSR(APIC_LVT0),
755-
X2APIC_MSR(APIC_LVT1),
756-
X2APIC_MSR(APIC_LVTERR),
757-
X2APIC_MSR(APIC_TMICT),
758-
X2APIC_MSR(APIC_TMCCT),
759-
X2APIC_MSR(APIC_TDCR),
760-
};
761-
int i;
762-
763-
if (intercept == svm->x2avic_msrs_intercepted)
764-
return;
765-
766-
if (!x2avic_enabled)
767-
return;
768-
769-
for (i = 0; i < ARRAY_SIZE(x2avic_passthrough_msrs); i++)
770-
svm_set_intercept_for_msr(&svm->vcpu, x2avic_passthrough_msrs[i],
771-
MSR_TYPE_RW, intercept);
772-
773-
svm->x2avic_msrs_intercepted = intercept;
774-
}
775-
776727
void svm_vcpu_free_msrpm(void *msrpm)
777728
{
778729
__free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE));

arch/x86/kvm/svm/svm.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -703,7 +703,6 @@ void svm_set_gif(struct vcpu_svm *svm, bool value);
703703
int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
704704
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
705705
int read, int write);
706-
void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool disable);
707706
void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
708707
int trig_mode, int vec);
709708

0 commit comments

Comments
 (0)