Skip to content

Commit 2c5e168

Browse files
committed
KVM: x86: Rename "governed features" helpers to use "guest_cpu_cap"
As the first step toward replacing KVM's so-called "governed features" framework with a more comprehensive, less poorly named implementation, replace the "kvm_governed_feature" function prefix with "guest_cpu_cap" and rename guest_can_use() to guest_cpu_cap_has(). The "guest_cpu_cap" naming scheme mirrors that of "kvm_cpu_cap", and provides a more clear distinction between guest capabilities, which are KVM controlled (heh, or one might say "governed"), and guest CPUID, which with few exceptions is fully userspace controlled. Opportunistically rewrite the comment about XSS passthrough for SEV-ES guests to avoid referencing so many functions, as such comments are prone to becoming stale (case in point...). No functional change intended. Reviewed-by: Maxim Levitsky <[email protected]> Reviewed-by: Binbin Wu <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Sean Christopherson <[email protected]>
1 parent 9aa470f commit 2c5e168

File tree

11 files changed

+59
-60
lines changed

11 files changed

+59
-60
lines changed

arch/x86/kvm/cpuid.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -380,7 +380,7 @@ void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
380380
allow_gbpages = tdp_enabled ? boot_cpu_has(X86_FEATURE_GBPAGES) :
381381
guest_cpuid_has(vcpu, X86_FEATURE_GBPAGES);
382382
if (allow_gbpages)
383-
kvm_governed_feature_set(vcpu, X86_FEATURE_GBPAGES);
383+
guest_cpu_cap_set(vcpu, X86_FEATURE_GBPAGES);
384384

385385
best = kvm_find_cpuid_entry(vcpu, 1);
386386
if (best && apic) {

arch/x86/kvm/cpuid.h

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -238,24 +238,24 @@ static __always_inline bool kvm_is_governed_feature(unsigned int x86_feature)
238238
return kvm_governed_feature_index(x86_feature) >= 0;
239239
}
240240

241-
static __always_inline void kvm_governed_feature_set(struct kvm_vcpu *vcpu,
242-
unsigned int x86_feature)
241+
static __always_inline void guest_cpu_cap_set(struct kvm_vcpu *vcpu,
242+
unsigned int x86_feature)
243243
{
244244
BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
245245

246246
__set_bit(kvm_governed_feature_index(x86_feature),
247247
vcpu->arch.governed_features.enabled);
248248
}
249249

250-
static __always_inline void kvm_governed_feature_check_and_set(struct kvm_vcpu *vcpu,
251-
unsigned int x86_feature)
250+
static __always_inline void guest_cpu_cap_check_and_set(struct kvm_vcpu *vcpu,
251+
unsigned int x86_feature)
252252
{
253253
if (kvm_cpu_cap_has(x86_feature) && guest_cpuid_has(vcpu, x86_feature))
254-
kvm_governed_feature_set(vcpu, x86_feature);
254+
guest_cpu_cap_set(vcpu, x86_feature);
255255
}
256256

257-
static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu,
258-
unsigned int x86_feature)
257+
static __always_inline bool guest_cpu_cap_has(struct kvm_vcpu *vcpu,
258+
unsigned int x86_feature)
259259
{
260260
BUILD_BUG_ON(!kvm_is_governed_feature(x86_feature));
261261

@@ -265,7 +265,7 @@ static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu,
265265

266266
static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
267267
{
268-
if (guest_can_use(vcpu, X86_FEATURE_LAM))
268+
if (guest_cpu_cap_has(vcpu, X86_FEATURE_LAM))
269269
cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
270270

271271
return kvm_vcpu_is_legal_gpa(vcpu, cr3);

arch/x86/kvm/mmu.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
126126

127127
static inline unsigned long kvm_get_active_cr3_lam_bits(struct kvm_vcpu *vcpu)
128128
{
129-
if (!guest_can_use(vcpu, X86_FEATURE_LAM))
129+
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_LAM))
130130
return 0;
131131

132132
return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57);

arch/x86/kvm/mmu/mmu.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5034,7 +5034,7 @@ static void reset_guest_rsvds_bits_mask(struct kvm_vcpu *vcpu,
50345034
__reset_rsvds_bits_mask(&context->guest_rsvd_check,
50355035
vcpu->arch.reserved_gpa_bits,
50365036
context->cpu_role.base.level, is_efer_nx(context),
5037-
guest_can_use(vcpu, X86_FEATURE_GBPAGES),
5037+
guest_cpu_cap_has(vcpu, X86_FEATURE_GBPAGES),
50385038
is_cr4_pse(context),
50395039
guest_cpuid_is_amd_compatible(vcpu));
50405040
}
@@ -5111,7 +5111,7 @@ static void reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
51115111
__reset_rsvds_bits_mask(shadow_zero_check, reserved_hpa_bits(),
51125112
context->root_role.level,
51135113
context->root_role.efer_nx,
5114-
guest_can_use(vcpu, X86_FEATURE_GBPAGES),
5114+
guest_cpu_cap_has(vcpu, X86_FEATURE_GBPAGES),
51155115
is_pse, is_amd);
51165116

51175117
if (!shadow_me_mask)

arch/x86/kvm/svm/nested.c

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
111111

112112
static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
113113
{
114-
if (!guest_can_use(&svm->vcpu, X86_FEATURE_V_VMSAVE_VMLOAD))
114+
if (!guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_V_VMSAVE_VMLOAD))
115115
return true;
116116

117117
if (!nested_npt_enabled(svm))
@@ -594,7 +594,7 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
594594
vmcb_mark_dirty(vmcb02, VMCB_DR);
595595
}
596596

597-
if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
597+
if (unlikely(guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV) &&
598598
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
599599
/*
600600
* Reserved bits of DEBUGCTL are ignored. Be consistent with
@@ -651,7 +651,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
651651
* exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
652652
*/
653653

654-
if (guest_can_use(vcpu, X86_FEATURE_VGIF) &&
654+
if (guest_cpu_cap_has(vcpu, X86_FEATURE_VGIF) &&
655655
(svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK))
656656
int_ctl_vmcb12_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
657657
else
@@ -689,7 +689,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
689689

690690
vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;
691691

692-
if (guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR) &&
692+
if (guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR) &&
693693
svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio)
694694
nested_svm_update_tsc_ratio_msr(vcpu);
695695

@@ -710,7 +710,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
710710
* what a nrips=0 CPU would do (L1 is responsible for advancing RIP
711711
* prior to injecting the event).
712712
*/
713-
if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
713+
if (guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS))
714714
vmcb02->control.next_rip = svm->nested.ctl.next_rip;
715715
else if (boot_cpu_has(X86_FEATURE_NRIPS))
716716
vmcb02->control.next_rip = vmcb12_rip;
@@ -720,26 +720,26 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
720720
svm->soft_int_injected = true;
721721
svm->soft_int_csbase = vmcb12_csbase;
722722
svm->soft_int_old_rip = vmcb12_rip;
723-
if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
723+
if (guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS))
724724
svm->soft_int_next_rip = svm->nested.ctl.next_rip;
725725
else
726726
svm->soft_int_next_rip = vmcb12_rip;
727727
}
728728

729729
vmcb02->control.virt_ext = vmcb01->control.virt_ext &
730730
LBR_CTL_ENABLE_MASK;
731-
if (guest_can_use(vcpu, X86_FEATURE_LBRV))
731+
if (guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV))
732732
vmcb02->control.virt_ext |=
733733
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
734734

735735
if (!nested_vmcb_needs_vls_intercept(svm))
736736
vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
737737

738-
if (guest_can_use(vcpu, X86_FEATURE_PAUSEFILTER))
738+
if (guest_cpu_cap_has(vcpu, X86_FEATURE_PAUSEFILTER))
739739
pause_count12 = svm->nested.ctl.pause_filter_count;
740740
else
741741
pause_count12 = 0;
742-
if (guest_can_use(vcpu, X86_FEATURE_PFTHRESHOLD))
742+
if (guest_cpu_cap_has(vcpu, X86_FEATURE_PFTHRESHOLD))
743743
pause_thresh12 = svm->nested.ctl.pause_filter_thresh;
744744
else
745745
pause_thresh12 = 0;
@@ -1026,7 +1026,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
10261026
if (vmcb12->control.exit_code != SVM_EXIT_ERR)
10271027
nested_save_pending_event_to_vmcb12(svm, vmcb12);
10281028

1029-
if (guest_can_use(vcpu, X86_FEATURE_NRIPS))
1029+
if (guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS))
10301030
vmcb12->control.next_rip = vmcb02->control.next_rip;
10311031

10321032
vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
@@ -1065,7 +1065,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
10651065
if (!nested_exit_on_intr(svm))
10661066
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
10671067

1068-
if (unlikely(guest_can_use(vcpu, X86_FEATURE_LBRV) &&
1068+
if (unlikely(guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV) &&
10691069
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
10701070
svm_copy_lbrs(vmcb12, vmcb02);
10711071
svm_update_lbrv(vcpu);

arch/x86/kvm/svm/sev.c

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4445,16 +4445,15 @@ static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
44454445
* For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if
44464446
* the host/guest supports its use.
44474447
*
4448-
* guest_can_use() checks a number of requirements on the host/guest to
4449-
* ensure that MSR_IA32_XSS is available, but it might report true even
4450-
* if X86_FEATURE_XSAVES isn't configured in the guest to ensure host
4451-
* MSR_IA32_XSS is always properly restored. For SEV-ES, it is better
4452-
* to further check that the guest CPUID actually supports
4453-
* X86_FEATURE_XSAVES so that accesses to MSR_IA32_XSS by misbehaved
4454-
* guests will still get intercepted and caught in the normal
4455-
* kvm_emulate_rdmsr()/kvm_emulated_wrmsr() paths.
4448+
* KVM treats the guest as being capable of using XSAVES even if XSAVES
4449+
* isn't enabled in guest CPUID as there is no intercept for XSAVES,
4450+
* i.e. the guest can use XSAVES/XRSTOR to read/write XSS if XSAVE is
4451+
* exposed to the guest and XSAVES is supported in hardware. Condition
4452+
* full XSS passthrough on the guest being able to use XSAVES *and*
4453+
* XSAVES being exposed to the guest so that KVM can at least honor
4454+
* guest CPUID for RDMSR and WRMSR.
44564455
*/
4457-
if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
4456+
if (guest_cpu_cap_has(vcpu, X86_FEATURE_XSAVES) &&
44584457
guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
44594458
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 1, 1);
44604459
else

arch/x86/kvm/svm/svm.c

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1049,7 +1049,7 @@ void svm_update_lbrv(struct kvm_vcpu *vcpu)
10491049
struct vcpu_svm *svm = to_svm(vcpu);
10501050
bool current_enable_lbrv = svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK;
10511051
bool enable_lbrv = (svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR) ||
1052-
(is_guest_mode(vcpu) && guest_can_use(vcpu, X86_FEATURE_LBRV) &&
1052+
(is_guest_mode(vcpu) && guest_cpu_cap_has(vcpu, X86_FEATURE_LBRV) &&
10531053
(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK));
10541054

10551055
if (enable_lbrv == current_enable_lbrv)
@@ -2864,7 +2864,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
28642864
switch (msr_info->index) {
28652865
case MSR_AMD64_TSC_RATIO:
28662866
if (!msr_info->host_initiated &&
2867-
!guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR))
2867+
!guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR))
28682868
return 1;
28692869
msr_info->data = svm->tsc_ratio_msr;
28702870
break;
@@ -3024,7 +3024,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
30243024
switch (ecx) {
30253025
case MSR_AMD64_TSC_RATIO:
30263026

3027-
if (!guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR)) {
3027+
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR)) {
30283028

30293029
if (!msr->host_initiated)
30303030
return 1;
@@ -3046,7 +3046,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
30463046

30473047
svm->tsc_ratio_msr = data;
30483048

3049-
if (guest_can_use(vcpu, X86_FEATURE_TSCRATEMSR) &&
3049+
if (guest_cpu_cap_has(vcpu, X86_FEATURE_TSCRATEMSR) &&
30503050
is_guest_mode(vcpu))
30513051
nested_svm_update_tsc_ratio_msr(vcpu);
30523052

@@ -4404,24 +4404,24 @@ static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
44044404
if (boot_cpu_has(X86_FEATURE_XSAVE) &&
44054405
boot_cpu_has(X86_FEATURE_XSAVES) &&
44064406
guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
4407-
kvm_governed_feature_set(vcpu, X86_FEATURE_XSAVES);
4407+
guest_cpu_cap_set(vcpu, X86_FEATURE_XSAVES);
44084408

4409-
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_NRIPS);
4410-
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_TSCRATEMSR);
4411-
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LBRV);
4409+
guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_NRIPS);
4410+
guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_TSCRATEMSR);
4411+
guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_LBRV);
44124412

44134413
/*
44144414
* Intercept VMLOAD if the vCPU model is Intel in order to emulate that
44154415
* VMLOAD drops bits 63:32 of SYSENTER (ignoring the fact that exposing
44164416
* SVM on Intel is bonkers and extremely unlikely to work).
44174417
*/
44184418
if (!guest_cpuid_is_intel_compatible(vcpu))
4419-
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
4419+
guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
44204420

4421-
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_PAUSEFILTER);
4422-
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_PFTHRESHOLD);
4423-
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VGIF);
4424-
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VNMI);
4421+
guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_PAUSEFILTER);
4422+
guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_PFTHRESHOLD);
4423+
guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_VGIF);
4424+
guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_VNMI);
44254425

44264426
svm_recalc_instruction_intercepts(vcpu, svm);
44274427

arch/x86/kvm/svm/svm.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -502,7 +502,7 @@ static inline bool svm_is_intercept(struct vcpu_svm *svm, int bit)
502502

503503
static inline bool nested_vgif_enabled(struct vcpu_svm *svm)
504504
{
505-
return guest_can_use(&svm->vcpu, X86_FEATURE_VGIF) &&
505+
return guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_VGIF) &&
506506
(svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK);
507507
}
508508

@@ -554,7 +554,7 @@ static inline bool nested_npt_enabled(struct vcpu_svm *svm)
554554

555555
static inline bool nested_vnmi_enabled(struct vcpu_svm *svm)
556556
{
557-
return guest_can_use(&svm->vcpu, X86_FEATURE_VNMI) &&
557+
return guest_cpu_cap_has(&svm->vcpu, X86_FEATURE_VNMI) &&
558558
(svm->nested.ctl.int_ctl & V_NMI_ENABLE_MASK);
559559
}
560560

arch/x86/kvm/vmx/nested.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6617,7 +6617,7 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
66176617
vmx = to_vmx(vcpu);
66186618
vmcs12 = get_vmcs12(vcpu);
66196619

6620-
if (guest_can_use(vcpu, X86_FEATURE_VMX) &&
6620+
if (guest_cpu_cap_has(vcpu, X86_FEATURE_VMX) &&
66216621
(vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
66226622
kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
66236623
kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
@@ -6758,7 +6758,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
67586758
if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
67596759
return -EINVAL;
67606760
} else {
6761-
if (!guest_can_use(vcpu, X86_FEATURE_VMX))
6761+
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
67626762
return -EINVAL;
67636763

67646764
if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
@@ -6792,7 +6792,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
67926792
return -EINVAL;
67936793

67946794
if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) &&
6795-
(!guest_can_use(vcpu, X86_FEATURE_VMX) ||
6795+
(!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX) ||
67966796
!vmx->nested.enlightened_vmcs_enabled))
67976797
return -EINVAL;
67986798

arch/x86/kvm/vmx/vmx.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2084,7 +2084,7 @@ int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
20842084
[msr_info->index - MSR_IA32_SGXLEPUBKEYHASH0];
20852085
break;
20862086
case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
2087-
if (!guest_can_use(vcpu, X86_FEATURE_VMX))
2087+
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
20882088
return 1;
20892089
if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
20902090
&msr_info->data))
@@ -2394,7 +2394,7 @@ int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
23942394
case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
23952395
if (!msr_info->host_initiated)
23962396
return 1; /* they are read-only */
2397-
if (!guest_can_use(vcpu, X86_FEATURE_VMX))
2397+
if (!guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
23982398
return 1;
23992399
return vmx_set_vmx_msr(vcpu, msr_index, data);
24002400
case MSR_IA32_RTIT_CTL:
@@ -4591,7 +4591,7 @@ vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control,
45914591
\
45924592
if (cpu_has_vmx_##name()) { \
45934593
if (kvm_is_governed_feature(X86_FEATURE_##feat_name)) \
4594-
__enabled = guest_can_use(__vcpu, X86_FEATURE_##feat_name); \
4594+
__enabled = guest_cpu_cap_has(__vcpu, X86_FEATURE_##feat_name); \
45954595
else \
45964596
__enabled = guest_cpuid_has(__vcpu, X86_FEATURE_##feat_name); \
45974597
vmx_adjust_secondary_exec_control(vmx, exec_control, SECONDARY_EXEC_##ctrl_name,\
@@ -7830,18 +7830,18 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
78307830
*/
78317831
if (boot_cpu_has(X86_FEATURE_XSAVE) &&
78327832
guest_cpuid_has(vcpu, X86_FEATURE_XSAVE))
7833-
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_XSAVES);
7833+
guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_XSAVES);
78347834

7835-
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VMX);
7836-
kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LAM);
7835+
guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_VMX);
7836+
guest_cpu_cap_check_and_set(vcpu, X86_FEATURE_LAM);
78377837

78387838
vmx_setup_uret_msrs(vmx);
78397839

78407840
if (cpu_has_secondary_exec_ctrls())
78417841
vmcs_set_secondary_exec_control(vmx,
78427842
vmx_secondary_exec_control(vmx));
78437843

7844-
if (guest_can_use(vcpu, X86_FEATURE_VMX))
7844+
if (guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
78457845
vmx->msr_ia32_feature_control_valid_bits |=
78467846
FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
78477847
FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
@@ -7850,7 +7850,7 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
78507850
~(FEAT_CTL_VMX_ENABLED_INSIDE_SMX |
78517851
FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX);
78527852

7853-
if (guest_can_use(vcpu, X86_FEATURE_VMX))
7853+
if (guest_cpu_cap_has(vcpu, X86_FEATURE_VMX))
78547854
nested_vmx_cr_fixed1_bits_update(vcpu);
78557855

78567856
if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&

0 commit comments

Comments
 (0)