Skip to content

Commit 7769a33

Browse files
kvaneeshmpe
authored andcommitted
powerpc/kvm/book3s: use find_kvm_host_pte in pute_tce functions
Current code just hold rmap lock to ensure parallel page table update is prevented. That is not sufficient. The kernel should also check whether a mmu_notifer callback was running in parallel. Signed-off-by: Aneesh Kumar K.V <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent e3d8ed5 commit 7769a33

File tree

1 file changed

+24
-6
lines changed

1 file changed

+24
-6
lines changed

arch/powerpc/kvm/book3s_64_vio_hv.c

Lines changed: 24 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -437,8 +437,8 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
437437
return H_SUCCESS;
438438
}
439439

440-
static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
441-
unsigned long ua, unsigned long *phpa)
440+
static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
441+
unsigned long ua, unsigned long *phpa)
442442
{
443443
pte_t *ptep, pte;
444444
unsigned shift = 0;
@@ -452,10 +452,17 @@ static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
452452
* to exit which will agains result in the below page table walk
453453
* to finish.
454454
*/
455-
ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift);
456-
if (!ptep || !pte_present(*ptep))
455+
/* an rmap lock won't make it safe. because that just ensure hash
456+
* page table entries are removed with rmap lock held. After that
457+
* mmu notifier returns and we go ahead and removing ptes from Qemu page table.
458+
*/
459+
ptep = find_kvm_host_pte(vcpu->kvm, mmu_seq, ua, &shift);
460+
if (!ptep)
461+
return -ENXIO;
462+
463+
pte = READ_ONCE(*ptep);
464+
if (!pte_present(pte))
457465
return -ENXIO;
458-
pte = *ptep;
459466

460467
if (!shift)
461468
shift = PAGE_SHIFT;
@@ -477,17 +484,25 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
477484
unsigned long liobn, unsigned long ioba,
478485
unsigned long tce_list, unsigned long npages)
479486
{
487+
struct kvm *kvm = vcpu->kvm;
480488
struct kvmppc_spapr_tce_table *stt;
481489
long i, ret = H_SUCCESS;
482490
unsigned long tces, entry, ua = 0;
483491
unsigned long *rmap = NULL;
492+
unsigned long mmu_seq;
484493
bool prereg = false;
485494
struct kvmppc_spapr_tce_iommu_table *stit;
486495

487496
/* For radix, we might be in virtual mode, so punt */
488497
if (kvm_is_radix(vcpu->kvm))
489498
return H_TOO_HARD;
490499

500+
/*
501+
* used to check for invalidations in progress
502+
*/
503+
mmu_seq = kvm->mmu_notifier_seq;
504+
smp_rmb();
505+
491506
stt = kvmppc_find_table(vcpu->kvm, liobn);
492507
if (!stt)
493508
return H_TOO_HARD;
@@ -547,7 +562,9 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
547562
* real page.
548563
*/
549564
lock_rmap(rmap);
550-
if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
565+
566+
arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
567+
if (kvmppc_rm_ua_to_hpa(vcpu, mmu_seq, ua, &tces)) {
551568
ret = H_TOO_HARD;
552569
goto unlock_exit;
553570
}
@@ -593,6 +610,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
593610
if (rmap)
594611
unlock_rmap(rmap);
595612

613+
arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
596614
return ret;
597615
}
598616

0 commit comments

Comments
 (0)