Skip to content

Commit 208a352

Browse files
committed
Merge tag 'kvm-x86-vmx-6.11' of https://github.com/kvm-x86/linux into HEAD
KVM VMX changes for 6.11 - Remove an unnecessary EPT TLB flush when enabling hardware. - Fix a series of bugs that cause KVM to fail to detect nested pending posted interrupts as valid wake eents for a vCPU executing HLT in L2 (with HLT-exiting disable by L1). - Misc cleanups
2 parents 1229cbe + 4540515 commit 208a352

File tree

10 files changed

+78
-55
lines changed

10 files changed

+78
-55
lines changed

arch/x86/include/asm/kvm-x86-ops.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,6 @@ KVM_X86_OP_OPTIONAL(update_cr8_intercept)
8585
KVM_X86_OP(refresh_apicv_exec_ctrl)
8686
KVM_X86_OP_OPTIONAL(hwapic_irr_update)
8787
KVM_X86_OP_OPTIONAL(hwapic_isr_update)
88-
KVM_X86_OP_OPTIONAL_RET0(guest_apic_has_interrupt)
8988
KVM_X86_OP_OPTIONAL(load_eoi_exitmap)
9089
KVM_X86_OP_OPTIONAL(set_virtual_apic_mode)
9190
KVM_X86_OP_OPTIONAL(set_apic_access_page_addr)

arch/x86/include/asm/kvm_host.h

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1731,7 +1731,6 @@ struct kvm_x86_ops {
17311731
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
17321732
void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
17331733
void (*hwapic_isr_update)(int isr);
1734-
bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
17351734
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
17361735
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
17371736
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
@@ -1837,7 +1836,7 @@ struct kvm_x86_nested_ops {
18371836
bool (*is_exception_vmexit)(struct kvm_vcpu *vcpu, u8 vector,
18381837
u32 error_code);
18391838
int (*check_events)(struct kvm_vcpu *vcpu);
1840-
bool (*has_events)(struct kvm_vcpu *vcpu);
1839+
bool (*has_events)(struct kvm_vcpu *vcpu, bool for_injection);
18411840
void (*triple_fault)(struct kvm_vcpu *vcpu);
18421841
int (*get_state)(struct kvm_vcpu *vcpu,
18431842
struct kvm_nested_state __user *user_kvm_nested_state,

arch/x86/kvm/vmx/main.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,6 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
9797
.required_apicv_inhibits = VMX_REQUIRED_APICV_INHIBITS,
9898
.hwapic_irr_update = vmx_hwapic_irr_update,
9999
.hwapic_isr_update = vmx_hwapic_isr_update,
100-
.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
101100
.sync_pir_to_irr = vmx_sync_pir_to_irr,
102101
.deliver_interrupt = vmx_deliver_interrupt,
103102
.dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,

arch/x86/kvm/vmx/nested.c

Lines changed: 42 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@
1212
#include "mmu.h"
1313
#include "nested.h"
1414
#include "pmu.h"
15+
#include "posted_intr.h"
1516
#include "sgx.h"
1617
#include "trace.h"
1718
#include "vmx.h"
@@ -3899,8 +3900,8 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
38993900
if (!pi_test_and_clear_on(vmx->nested.pi_desc))
39003901
return 0;
39013902

3902-
max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
3903-
if (max_irr != 256) {
3903+
max_irr = pi_find_highest_vector(vmx->nested.pi_desc);
3904+
if (max_irr > 0) {
39043905
vapic_page = vmx->nested.virtual_apic_map.hva;
39053906
if (!vapic_page)
39063907
goto mmio_needed;
@@ -4031,10 +4032,46 @@ static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
40314032
to_vmx(vcpu)->nested.preemption_timer_expired;
40324033
}
40334034

4034-
static bool vmx_has_nested_events(struct kvm_vcpu *vcpu)
4035+
static bool vmx_has_nested_events(struct kvm_vcpu *vcpu, bool for_injection)
40354036
{
4036-
return nested_vmx_preemption_timer_pending(vcpu) ||
4037-
to_vmx(vcpu)->nested.mtf_pending;
4037+
struct vcpu_vmx *vmx = to_vmx(vcpu);
4038+
void *vapic = vmx->nested.virtual_apic_map.hva;
4039+
int max_irr, vppr;
4040+
4041+
if (nested_vmx_preemption_timer_pending(vcpu) ||
4042+
vmx->nested.mtf_pending)
4043+
return true;
4044+
4045+
/*
4046+
* Virtual Interrupt Delivery doesn't require manual injection. Either
4047+
* the interrupt is already in GUEST_RVI and will be recognized by CPU
4048+
* at VM-Entry, or there is a KVM_REQ_EVENT pending and KVM will move
4049+
* the interrupt from the PIR to RVI prior to entering the guest.
4050+
*/
4051+
if (for_injection)
4052+
return false;
4053+
4054+
if (!nested_cpu_has_vid(get_vmcs12(vcpu)) ||
4055+
__vmx_interrupt_blocked(vcpu))
4056+
return false;
4057+
4058+
if (!vapic)
4059+
return false;
4060+
4061+
vppr = *((u32 *)(vapic + APIC_PROCPRI));
4062+
4063+
max_irr = vmx_get_rvi();
4064+
if ((max_irr & 0xf0) > (vppr & 0xf0))
4065+
return true;
4066+
4067+
if (vmx->nested.pi_pending && vmx->nested.pi_desc &&
4068+
pi_test_on(vmx->nested.pi_desc)) {
4069+
max_irr = pi_find_highest_vector(vmx->nested.pi_desc);
4070+
if (max_irr > 0 && (max_irr & 0xf0) > (vppr & 0xf0))
4071+
return true;
4072+
}
4073+
4074+
return false;
40384075
}
40394076

40404077
/*

arch/x86/kvm/vmx/posted_intr.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
/* SPDX-License-Identifier: GPL-2.0 */
22
#ifndef __KVM_X86_VMX_POSTED_INTR_H
33
#define __KVM_X86_VMX_POSTED_INTR_H
4+
5+
#include <linux/find.h>
46
#include <asm/posted_intr.h>
57

68
void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu);
@@ -12,4 +14,12 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
1214
uint32_t guest_irq, bool set);
1315
void vmx_pi_start_assignment(struct kvm *kvm);
1416

17+
static inline int pi_find_highest_vector(struct pi_desc *pi_desc)
18+
{
19+
int vec;
20+
21+
vec = find_last_bit((unsigned long *)pi_desc->pir, 256);
22+
return vec < 256 ? vec : -1;
23+
}
24+
1525
#endif /* __KVM_X86_VMX_POSTED_INTR_H */

arch/x86/kvm/vmx/vmcs12.h

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -188,12 +188,13 @@ struct __packed vmcs12 {
188188
};
189189

190190
/*
191-
* VMCS12_REVISION is an arbitrary id that should be changed if the content or
192-
* layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
193-
* VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
191+
* VMCS12_REVISION is KVM's arbitrary ID for the layout of struct vmcs12. KVM
192+
* enumerates this value to L1 via MSR_IA32_VMX_BASIC, and checks the revision
193+
* ID during nested VMPTRLD to verify that L1 is loading a VMCS that adhere's
194+
* to KVM's virtual CPU definition.
194195
*
195-
* IMPORTANT: Changing this value will break save/restore compatibility with
196-
* older kvm releases.
196+
* DO NOT change this value, as it will break save/restore compatibility with
197+
* older KVM releases.
197198
*/
198199
#define VMCS12_REVISION 0x11e57ed0
199200

@@ -206,7 +207,8 @@ struct __packed vmcs12 {
206207
#define VMCS12_SIZE KVM_STATE_NESTED_VMX_VMCS_SIZE
207208

208209
/*
209-
* For save/restore compatibility, the vmcs12 field offsets must not change.
210+
* For save/restore compatibility, the vmcs12 field offsets must not change,
211+
* although appending fields and/or filling gaps is obviously allowed.
210212
*/
211213
#define CHECK_OFFSET(field, loc) \
212214
ASSERT_STRUCT_OFFSET(struct vmcs12, field, loc)

arch/x86/kvm/vmx/vmx.c

Lines changed: 9 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -2868,9 +2868,6 @@ int vmx_hardware_enable(void)
28682868
return r;
28692869
}
28702870

2871-
if (enable_ept)
2872-
ept_sync_global();
2873-
28742871
return 0;
28752872
}
28762873

@@ -4142,26 +4139,6 @@ void pt_update_intercept_for_msr(struct kvm_vcpu *vcpu)
41424139
}
41434140
}
41444141

4145-
bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
4146-
{
4147-
struct vcpu_vmx *vmx = to_vmx(vcpu);
4148-
void *vapic_page;
4149-
u32 vppr;
4150-
int rvi;
4151-
4152-
if (WARN_ON_ONCE(!is_guest_mode(vcpu)) ||
4153-
!nested_cpu_has_vid(get_vmcs12(vcpu)) ||
4154-
WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn))
4155-
return false;
4156-
4157-
rvi = vmx_get_rvi();
4158-
4159-
vapic_page = vmx->nested.virtual_apic_map.hva;
4160-
vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
4161-
4162-
return ((rvi & 0xf0) > (vppr & 0xf0));
4163-
}
4164-
41654142
void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
41664143
{
41674144
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -5086,14 +5063,19 @@ int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
50865063
return !vmx_nmi_blocked(vcpu);
50875064
}
50885065

5066+
bool __vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
5067+
{
5068+
return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) ||
5069+
(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5070+
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
5071+
}
5072+
50895073
bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
50905074
{
50915075
if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
50925076
return false;
50935077

5094-
return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) ||
5095-
(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
5096-
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
5078+
return __vmx_interrupt_blocked(vcpu);
50975079
}
50985080

50995081
int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
@@ -8610,9 +8592,9 @@ static void __vmx_exit(void)
86108592
static void vmx_exit(void)
86118593
{
86128594
kvm_exit();
8595+
__vmx_exit();
86138596
kvm_x86_vendor_exit();
86148597

8615-
__vmx_exit();
86168598
}
86178599
module_exit(vmx_exit);
86188600

arch/x86/kvm/vmx/vmx.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -406,6 +406,7 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, hpa_t root_hpa, int root_level);
406406
bool vmx_guest_inject_ac(struct kvm_vcpu *vcpu);
407407
void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu);
408408
bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
409+
bool __vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
409410
bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
410411
bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
411412
void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);

arch/x86/kvm/vmx/x86_ops.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,6 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
4848
void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu);
4949
void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
5050
void vmx_hwapic_isr_update(int max_isr);
51-
bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu);
5251
int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu);
5352
void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
5453
int trig_mode, int vector);

arch/x86/kvm/x86.c

Lines changed: 7 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -10557,7 +10557,7 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
1055710557

1055810558
if (is_guest_mode(vcpu) &&
1055910559
kvm_x86_ops.nested_ops->has_events &&
10560-
kvm_x86_ops.nested_ops->has_events(vcpu))
10560+
kvm_x86_ops.nested_ops->has_events(vcpu, true))
1056110561
*req_immediate_exit = true;
1056210562

1056310563
/*
@@ -11255,7 +11255,10 @@ static inline int vcpu_block(struct kvm_vcpu *vcpu)
1125511255
* causes a spurious wakeup from HLT).
1125611256
*/
1125711257
if (is_guest_mode(vcpu)) {
11258-
if (kvm_check_nested_events(vcpu) < 0)
11258+
int r = kvm_check_nested_events(vcpu);
11259+
11260+
WARN_ON_ONCE(r == -EBUSY);
11261+
if (r < 0)
1125911262
return 0;
1126011263
}
1126111264

@@ -13142,12 +13145,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
1314213145
kvm_arch_free_memslot(kvm, old);
1314313146
}
1314413147

13145-
static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
13146-
{
13147-
return (is_guest_mode(vcpu) &&
13148-
static_call(kvm_x86_guest_apic_has_interrupt)(vcpu));
13149-
}
13150-
1315113148
static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
1315213149
{
1315313150
if (!list_empty_careful(&vcpu->async_pf.done))
@@ -13181,17 +13178,15 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
1318113178
if (kvm_test_request(KVM_REQ_UPDATE_PROTECTED_GUEST_STATE, vcpu))
1318213179
return true;
1318313180

13184-
if (kvm_arch_interrupt_allowed(vcpu) &&
13185-
(kvm_cpu_has_interrupt(vcpu) ||
13186-
kvm_guest_apic_has_interrupt(vcpu)))
13181+
if (kvm_arch_interrupt_allowed(vcpu) && kvm_cpu_has_interrupt(vcpu))
1318713182
return true;
1318813183

1318913184
if (kvm_hv_has_stimer_pending(vcpu))
1319013185
return true;
1319113186

1319213187
if (is_guest_mode(vcpu) &&
1319313188
kvm_x86_ops.nested_ops->has_events &&
13194-
kvm_x86_ops.nested_ops->has_events(vcpu))
13189+
kvm_x86_ops.nested_ops->has_events(vcpu, false))
1319513190
return true;
1319613191

1319713192
if (kvm_xen_has_pending_events(vcpu))

0 commit comments

Comments
 (0)