Skip to content

Commit 7f41238

Browse files
yosrym93opsiff
authored andcommitted
KVM: nSVM: Fix and simplify LBR virtualization handling with nested
commit 8a48214 upstream. The current scheme for handling LBRV when nested is used is very complicated, especially when L1 does not enable LBRV (i.e. does not set LBR_CTL_ENABLE_MASK). To avoid copying LBRs between VMCB01 and VMCB02 on every nested transition, the current implementation switches between using VMCB01 or VMCB02 as the source of truth for the LBRs while L2 is running. If L2 enables LBR, VMCB02 is used as the source of truth. When L2 disables LBR, the LBRs are copied to VMCB01 and VMCB01 is used as the source of truth. This introduces significant complexity, and incorrect behavior in some cases. For example, on a nested #VMEXIT, the LBRs are only copied from VMCB02 to VMCB01 if LBRV is enabled in VMCB01. This is because L2's writes to MSR_IA32_DEBUGCTLMSR to enable LBR are intercepted and propagated to VMCB01 instead of VMCB02. However, LBRV is only enabled in VMCB02 when L2 is running. This means that if L2 enables LBR and exits to L1, the LBRs will not be propagated from VMCB02 to VMCB01, because LBRV is disabled in VMCB01. There is no meaningful difference in CPUID rate in L2 when copying LBRs on every nested transition vs. the current approach, so do the simple and correct thing and always copy LBRs between VMCB01 and VMCB02 on nested transitions (when LBRV is disabled by L1). Drop the conditional LBRs copying in __svm_{enable/disable}_lbrv() as it is now unnecessary. VMCB02 becomes the only source of truth for LBRs when L2 is running, regardless of LBRV being enabled by L1, drop svm_get_lbr_vmcb() and use svm->vmcb directly in its place. Fixes: 1d5a1b5 ("KVM: x86: nSVM: correctly virtualize LBR msrs when L2 is running") Cc: stable@vger.kernel.org Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev> Link: https://patch.msgid.link/20251108004524.1600006-4-yosry.ahmed@linux.dev Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> (cherry picked from commit 35c53e4eae0f737ebfa3e17dc47251c6b805359b) Signed-off-by: Wentao Guan <guanwentao@uniontech.com>
1 parent e0176de commit 7f41238

File tree

2 files changed

+17
-50
lines changed

2 files changed

+17
-50
lines changed

arch/x86/kvm/svm/nested.c

Lines changed: 7 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -601,11 +601,10 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
601601
*/
602602
svm_copy_lbrs(vmcb02, vmcb12);
603603
vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS;
604-
svm_update_lbrv(&svm->vcpu);
605-
606-
} else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
604+
} else {
607605
svm_copy_lbrs(vmcb02, vmcb01);
608606
}
607+
svm_update_lbrv(&svm->vcpu);
609608
}
610609

611610
static inline bool is_evtinj_soft(u32 evtinj)
@@ -731,11 +730,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
731730
svm->soft_int_next_rip = vmcb12_rip;
732731
}
733732

734-
vmcb02->control.virt_ext = vmcb01->control.virt_ext &
735-
LBR_CTL_ENABLE_MASK;
736-
if (guest_can_use(vcpu, X86_FEATURE_LBRV))
737-
vmcb02->control.virt_ext |=
738-
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
733+
/* LBR_CTL_ENABLE_MASK is controlled by svm_update_lbrv() */
739734

740735
if (!nested_vmcb_needs_vls_intercept(svm))
741736
vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
@@ -1066,13 +1061,12 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
10661061
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
10671062

10681063
if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
1069-
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
1064+
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK)))
10701065
svm_copy_lbrs(vmcb12, vmcb02);
1071-
svm_update_lbrv(vcpu);
1072-
} else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
1066+
else
10731067
svm_copy_lbrs(vmcb01, vmcb02);
1074-
svm_update_lbrv(vcpu);
1075-
}
1068+
1069+
svm_update_lbrv(vcpu);
10761070

10771071
if (vnmi) {
10781072
if (vmcb02->control.int_ctl & V_NMI_BLOCKING_MASK)

arch/x86/kvm/svm/svm.c

Lines changed: 10 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -1049,13 +1049,7 @@ static void svm_recalc_lbr_msr_intercepts(struct kvm_vcpu *vcpu)
10491049

10501050
static void __svm_enable_lbrv(struct kvm_vcpu *vcpu)
10511051
{
1052-
struct vcpu_svm *svm = to_svm(vcpu);
1053-
1054-
svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
1055-
1056-
/* Move the LBR msrs to the vmcb02 so that the guest can see them. */
1057-
if (is_guest_mode(vcpu))
1058-
svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
1052+
to_svm(vcpu)->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
10591053
}
10601054

10611055
void svm_enable_lbrv(struct kvm_vcpu *vcpu)
@@ -1066,36 +1060,15 @@ void svm_enable_lbrv(struct kvm_vcpu *vcpu)
10661060

10671061
static void __svm_disable_lbrv(struct kvm_vcpu *vcpu)
10681062
{
1069-
struct vcpu_svm *svm = to_svm(vcpu);
1070-
10711063
KVM_BUG_ON(sev_es_guest(vcpu->kvm), vcpu->kvm);
1072-
1073-
svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
1074-
1075-
/*
1076-
* Move the LBR msrs back to the vmcb01 to avoid copying them
1077-
* on nested guest entries.
1078-
*/
1079-
if (is_guest_mode(vcpu))
1080-
svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb);
1081-
}
1082-
1083-
static struct vmcb *svm_get_lbr_vmcb(struct vcpu_svm *svm)
1084-
{
1085-
/*
1086-
* If LBR virtualization is disabled, the LBR MSRs are always kept in
1087-
* vmcb01. If LBR virtualization is enabled and L1 is running VMs of
1088-
* its own, the MSRs are moved between vmcb01 and vmcb02 as needed.
1089-
*/
1090-
return svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK ? svm->vmcb :
1091-
svm->vmcb01.ptr;
1064+
to_svm(vcpu)->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
10921065
}
10931066

10941067
void svm_update_lbrv(struct kvm_vcpu *vcpu)
10951068
{
10961069
struct vcpu_svm *svm = to_svm(vcpu);
10971070
bool current_enable_lbrv = svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK;
1098-
bool enable_lbrv = (svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR) ||
1071+
bool enable_lbrv = (svm->vmcb->save.dbgctl & DEBUGCTLMSR_LBR) ||
10991072
(is_guest_mode(vcpu) && guest_can_use(vcpu, X86_FEATURE_LBRV) &&
11001073
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK));
11011074

@@ -2964,19 +2937,19 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
29642937
msr_info->data = svm->tsc_aux;
29652938
break;
29662939
case MSR_IA32_DEBUGCTLMSR:
2967-
msr_info->data = svm_get_lbr_vmcb(svm)->save.dbgctl;
2940+
msr_info->data = svm->vmcb->save.dbgctl;
29682941
break;
29692942
case MSR_IA32_LASTBRANCHFROMIP:
2970-
msr_info->data = svm_get_lbr_vmcb(svm)->save.br_from;
2943+
msr_info->data = svm->vmcb->save.br_from;
29712944
break;
29722945
case MSR_IA32_LASTBRANCHTOIP:
2973-
msr_info->data = svm_get_lbr_vmcb(svm)->save.br_to;
2946+
msr_info->data = svm->vmcb->save.br_to;
29742947
break;
29752948
case MSR_IA32_LASTINTFROMIP:
2976-
msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_from;
2949+
msr_info->data = svm->vmcb->save.last_excp_from;
29772950
break;
29782951
case MSR_IA32_LASTINTTOIP:
2979-
msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_to;
2952+
msr_info->data = svm->vmcb->save.last_excp_to;
29802953
break;
29812954
case MSR_VM_HSAVE_PA:
29822955
msr_info->data = svm->nested.hsave_msr;
@@ -3255,10 +3228,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
32553228
if (data & DEBUGCTL_RESERVED_BITS)
32563229
return 1;
32573230

3258-
if (svm_get_lbr_vmcb(svm)->save.dbgctl == data)
3231+
if (svm->vmcb->save.dbgctl == data)
32593232
break;
32603233

3261-
svm_get_lbr_vmcb(svm)->save.dbgctl = data;
3234+
svm->vmcb->save.dbgctl = data;
32623235
vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
32633236
svm_update_lbrv(vcpu);
32643237
break;

0 commit comments

Comments
 (0)