Skip to content

Commit 3e763ec

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM fixes from Paolo Bonzini: "ARM: - Plug race between enabling MTE and creating vcpus - Fix off-by-one bug when checking whether an address range is RAM x86: - Fixes for the new MMU, especially a memory leak on hosts with <39 physical address bits - Remove bogus EFER.NX checks on 32-bit non-PAE hosts - WAITPKG fix" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86/mmu: Protect marking SPs unsync when using TDP MMU with spinlock KVM: x86/mmu: Don't step down in the TDP iterator when zapping all SPTEs KVM: x86/mmu: Don't leak non-leaf SPTEs when zapping all SPTEs KVM: nVMX: Use vmx_need_pf_intercept() when deciding if L0 wants a #PF kvm: vmx: Sync all matching EPTPs when injecting nested EPT fault KVM: x86: remove dead initialization KVM: x86: Allow guest to set EFER.NX=1 on non-PAE 32-bit kernels KVM: VMX: Use current VMCS to query WAITPKG support for MSR emulation KVM: arm64: Fix race when enabling KVM_ARM_CAP_MTE KVM: arm64: Fix off-by-one in range_is_memory
2 parents 0aa78d1 + 6e949dd commit 3e763ec

File tree

10 files changed

+118
-62
lines changed

10 files changed

+118
-62
lines changed

Documentation/virt/kvm/locking.rst

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,10 +25,10 @@ On x86:
2525

2626
- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock
2727

28-
- kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock is
29-
taken inside kvm->arch.mmu_lock, and cannot be taken without already
30-
holding kvm->arch.mmu_lock (typically with ``read_lock``, otherwise
31-
there's no need to take kvm->arch.tdp_mmu_pages_lock at all).
28+
- kvm->arch.mmu_lock is an rwlock. kvm->arch.tdp_mmu_pages_lock and
29+
kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and
30+
cannot be taken without already holding kvm->arch.mmu_lock (typically with
31+
``read_lock`` for the TDP MMU, thus the need for additional spinlocks).
3232

3333
Everything else is a leaf: no other lock is taken inside the critical
3434
sections.

arch/arm64/kvm/arm.c

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -94,10 +94,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
9494
kvm->arch.return_nisv_io_abort_to_user = true;
9595
break;
9696
case KVM_CAP_ARM_MTE:
97-
if (!system_supports_mte() || kvm->created_vcpus)
98-
return -EINVAL;
99-
r = 0;
100-
kvm->arch.mte_enabled = true;
97+
mutex_lock(&kvm->lock);
98+
if (!system_supports_mte() || kvm->created_vcpus) {
99+
r = -EINVAL;
100+
} else {
101+
r = 0;
102+
kvm->arch.mte_enabled = true;
103+
}
104+
mutex_unlock(&kvm->lock);
101105
break;
102106
default:
103107
r = -EINVAL;

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ static bool range_is_memory(u64 start, u64 end)
193193
{
194194
struct kvm_mem_range r1, r2;
195195

196-
if (!find_mem_range(start, &r1) || !find_mem_range(end, &r2))
196+
if (!find_mem_range(start, &r1) || !find_mem_range(end - 1, &r2))
197197
return false;
198198
if (r1.start != r2.start)
199199
return false;

arch/x86/include/asm/kvm_host.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1038,6 +1038,13 @@ struct kvm_arch {
10381038
struct list_head lpage_disallowed_mmu_pages;
10391039
struct kvm_page_track_notifier_node mmu_sp_tracker;
10401040
struct kvm_page_track_notifier_head track_notifier_head;
1041+
/*
1042+
* Protects marking pages unsync during page faults, as TDP MMU page
1043+
* faults only take mmu_lock for read. For simplicity, the unsync
1044+
* pages lock is always taken when marking pages unsync regardless of
1045+
* whether mmu_lock is held for read or write.
1046+
*/
1047+
spinlock_t mmu_unsync_pages_lock;
10411048

10421049
struct list_head assigned_dev_head;
10431050
struct iommu_domain *iommu_domain;

arch/x86/kvm/cpuid.c

Lines changed: 1 addition & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -208,30 +208,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
208208
kvm_mmu_after_set_cpuid(vcpu);
209209
}
210210

211-
static int is_efer_nx(void)
212-
{
213-
return host_efer & EFER_NX;
214-
}
215-
216-
static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
217-
{
218-
int i;
219-
struct kvm_cpuid_entry2 *e, *entry;
220-
221-
entry = NULL;
222-
for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
223-
e = &vcpu->arch.cpuid_entries[i];
224-
if (e->function == 0x80000001) {
225-
entry = e;
226-
break;
227-
}
228-
}
229-
if (entry && cpuid_entry_has(entry, X86_FEATURE_NX) && !is_efer_nx()) {
230-
cpuid_entry_clear(entry, X86_FEATURE_NX);
231-
printk(KERN_INFO "kvm: guest NX capability removed\n");
232-
}
233-
}
234-
235211
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
236212
{
237213
struct kvm_cpuid_entry2 *best;
@@ -302,7 +278,6 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
302278
vcpu->arch.cpuid_entries = e2;
303279
vcpu->arch.cpuid_nent = cpuid->nent;
304280

305-
cpuid_fix_nx_cap(vcpu);
306281
kvm_update_cpuid_runtime(vcpu);
307282
kvm_vcpu_after_set_cpuid(vcpu);
308283

@@ -401,7 +376,6 @@ static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
401376

402377
void kvm_set_cpu_caps(void)
403378
{
404-
unsigned int f_nx = is_efer_nx() ? F(NX) : 0;
405379
#ifdef CONFIG_X86_64
406380
unsigned int f_gbpages = F(GBPAGES);
407381
unsigned int f_lm = F(LM);
@@ -515,7 +489,7 @@ void kvm_set_cpu_caps(void)
515489
F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
516490
F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
517491
F(PAT) | F(PSE36) | 0 /* Reserved */ |
518-
f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
492+
F(NX) | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
519493
F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
520494
0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
521495
);

arch/x86/kvm/hyperv.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1933,7 +1933,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc, bool
19331933
void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
19341934
{
19351935
struct kvm_cpuid_entry2 *entry;
1936-
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1936+
struct kvm_vcpu_hv *hv_vcpu;
19371937

19381938
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
19391939
if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {

arch/x86/kvm/mmu/mmu.c

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2535,6 +2535,7 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
25352535
int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
25362536
{
25372537
struct kvm_mmu_page *sp;
2538+
bool locked = false;
25382539

25392540
/*
25402541
* Force write-protection if the page is being tracked. Note, the page
@@ -2557,9 +2558,34 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
25572558
if (sp->unsync)
25582559
continue;
25592560

2561+
/*
2562+
* TDP MMU page faults require an additional spinlock as they
2563+
* run with mmu_lock held for read, not write, and the unsync
2564+
* logic is not thread safe. Take the spinklock regardless of
2565+
* the MMU type to avoid extra conditionals/parameters, there's
2566+
* no meaningful penalty if mmu_lock is held for write.
2567+
*/
2568+
if (!locked) {
2569+
locked = true;
2570+
spin_lock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
2571+
2572+
/*
2573+
* Recheck after taking the spinlock, a different vCPU
2574+
* may have since marked the page unsync. A false
2575+
* positive on the unprotected check above is not
2576+
* possible as clearing sp->unsync _must_ hold mmu_lock
2577+
* for write, i.e. unsync cannot transition from 0->1
2578+
* while this CPU holds mmu_lock for read (or write).
2579+
*/
2580+
if (READ_ONCE(sp->unsync))
2581+
continue;
2582+
}
2583+
25602584
WARN_ON(sp->role.level != PG_LEVEL_4K);
25612585
kvm_unsync_page(vcpu, sp);
25622586
}
2587+
if (locked)
2588+
spin_unlock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
25632589

25642590
/*
25652591
* We need to ensure that the marking of unsync pages is visible
@@ -5537,6 +5563,8 @@ void kvm_mmu_init_vm(struct kvm *kvm)
55375563
{
55385564
struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
55395565

5566+
spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
5567+
55405568
if (!kvm_mmu_init_tdp_mmu(kvm))
55415569
/*
55425570
* No smp_load/store wrappers needed here as we are in

arch/x86/kvm/mmu/tdp_mmu.c

Lines changed: 24 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
4343
if (!kvm->arch.tdp_mmu_enabled)
4444
return;
4545

46+
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
4647
WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
4748

4849
/*
@@ -81,8 +82,6 @@ static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
8182
void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
8283
bool shared)
8384
{
84-
gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
85-
8685
kvm_lockdep_assert_mmu_lock_held(kvm, shared);
8786

8887
if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
@@ -94,7 +93,7 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
9493
list_del_rcu(&root->link);
9594
spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
9695

97-
zap_gfn_range(kvm, root, 0, max_gfn, false, false, shared);
96+
zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
9897

9998
call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
10099
}
@@ -724,13 +723,29 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
724723
gfn_t start, gfn_t end, bool can_yield, bool flush,
725724
bool shared)
726725
{
726+
gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
727+
bool zap_all = (start == 0 && end >= max_gfn_host);
727728
struct tdp_iter iter;
728729

730+
/*
731+
* No need to try to step down in the iterator when zapping all SPTEs,
732+
* zapping the top-level non-leaf SPTEs will recurse on their children.
733+
*/
734+
int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
735+
736+
/*
737+
* Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
738+
* hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
739+
* and so KVM will never install a SPTE for such addresses.
740+
*/
741+
end = min(end, max_gfn_host);
742+
729743
kvm_lockdep_assert_mmu_lock_held(kvm, shared);
730744

731745
rcu_read_lock();
732746

733-
tdp_root_for_each_pte(iter, root, start, end) {
747+
for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
748+
min_level, start, end) {
734749
retry:
735750
if (can_yield &&
736751
tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
@@ -744,9 +759,10 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
744759
/*
745760
* If this is a non-last-level SPTE that covers a larger range
746761
* than should be zapped, continue, and zap the mappings at a
747-
* lower level.
762+
* lower level, except when zapping all SPTEs.
748763
*/
749-
if ((iter.gfn < start ||
764+
if (!zap_all &&
765+
(iter.gfn < start ||
750766
iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
751767
!is_last_spte(iter.old_spte, iter.level))
752768
continue;
@@ -794,12 +810,11 @@ bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
794810

795811
void kvm_tdp_mmu_zap_all(struct kvm *kvm)
796812
{
797-
gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
798813
bool flush = false;
799814
int i;
800815

801816
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
802-
flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, max_gfn,
817+
flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull,
803818
flush, false);
804819

805820
if (flush)
@@ -838,7 +853,6 @@ static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
838853
*/
839854
void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
840855
{
841-
gfn_t max_gfn = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
842856
struct kvm_mmu_page *next_root;
843857
struct kvm_mmu_page *root;
844858
bool flush = false;
@@ -854,8 +868,7 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
854868

855869
rcu_read_unlock();
856870

857-
flush = zap_gfn_range(kvm, root, 0, max_gfn, true, flush,
858-
true);
871+
flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true);
859872

860873
/*
861874
* Put the reference acquired in

arch/x86/kvm/vmx/nested.c

Lines changed: 43 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -330,6 +330,31 @@ void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
330330
vcpu_put(vcpu);
331331
}
332332

333+
#define EPTP_PA_MASK GENMASK_ULL(51, 12)
334+
335+
static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
336+
{
337+
return VALID_PAGE(root_hpa) &&
338+
((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
339+
}
340+
341+
static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
342+
gpa_t addr)
343+
{
344+
uint i;
345+
struct kvm_mmu_root_info *cached_root;
346+
347+
WARN_ON_ONCE(!mmu_is_nested(vcpu));
348+
349+
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
350+
cached_root = &vcpu->arch.mmu->prev_roots[i];
351+
352+
if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd,
353+
eptp))
354+
vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa);
355+
}
356+
}
357+
333358
static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
334359
struct x86_exception *fault)
335360
{
@@ -342,10 +367,22 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
342367
vm_exit_reason = EXIT_REASON_PML_FULL;
343368
vmx->nested.pml_full = false;
344369
exit_qualification &= INTR_INFO_UNBLOCK_NMI;
345-
} else if (fault->error_code & PFERR_RSVD_MASK)
346-
vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
347-
else
348-
vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
370+
} else {
371+
if (fault->error_code & PFERR_RSVD_MASK)
372+
vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
373+
else
374+
vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
375+
376+
/*
377+
* Although the caller (kvm_inject_emulated_page_fault) would
378+
* have already synced the faulting address in the shadow EPT
379+
* tables for the current EPTP12, we also need to sync it for
380+
* any other cached EPTP02s based on the same EP4TA, since the
381+
* TLB associates mappings to the EP4TA rather than the full EPTP.
382+
*/
383+
nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer,
384+
fault->address);
385+
}
349386

350387
nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification);
351388
vmcs12->guest_physical_address = fault->address;
@@ -5325,14 +5362,6 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
53255362
return nested_vmx_succeed(vcpu);
53265363
}
53275364

5328-
#define EPTP_PA_MASK GENMASK_ULL(51, 12)
5329-
5330-
static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
5331-
{
5332-
return VALID_PAGE(root_hpa) &&
5333-
((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
5334-
}
5335-
53365365
/* Emulate the INVEPT instruction */
53375366
static int handle_invept(struct kvm_vcpu *vcpu)
53385367
{
@@ -5826,7 +5855,8 @@ static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
58265855
if (is_nmi(intr_info))
58275856
return true;
58285857
else if (is_page_fault(intr_info))
5829-
return vcpu->arch.apf.host_apf_flags || !enable_ept;
5858+
return vcpu->arch.apf.host_apf_flags ||
5859+
vmx_need_pf_intercept(vcpu);
58305860
else if (is_debug(intr_info) &&
58315861
vcpu->guest_debug &
58325862
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))

arch/x86/kvm/vmx/vmx.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -522,7 +522,7 @@ static inline struct vmcs *alloc_vmcs(bool shadow)
522522

523523
static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
524524
{
525-
return vmx->secondary_exec_control &
525+
return secondary_exec_controls_get(vmx) &
526526
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
527527
}
528528

0 commit comments

Comments
 (0)