Skip to content

Commit 3ff8df1

Browse files
kvaneeshmpe
authored andcommitted
powerpc/kvm/book3s: Avoid using rmap to protect parallel page table update.
We now depend on kvm->mmu_lock Signed-off-by: Aneesh Kumar K.V <[email protected]> Signed-off-by: Michael Ellerman <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 7769a33 commit 3ff8df1

File tree

1 file changed

+9
-29
lines changed

1 file changed

+9
-29
lines changed

arch/powerpc/kvm/book3s_64_vio_hv.c

Lines changed: 9 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,8 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
7474
EXPORT_SYMBOL_GPL(kvmppc_find_table);
7575

7676
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
77-
static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce,
78-
unsigned long *ua, unsigned long **prmap)
77+
static long kvmppc_rm_tce_to_ua(struct kvm *kvm,
78+
unsigned long tce, unsigned long *ua)
7979
{
8080
unsigned long gfn = tce >> PAGE_SHIFT;
8181
struct kvm_memory_slot *memslot;
@@ -87,9 +87,6 @@ static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce,
8787
*ua = __gfn_to_hva_memslot(memslot, gfn) |
8888
(tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
8989

90-
if (prmap)
91-
*prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
92-
9390
return 0;
9491
}
9592

@@ -116,7 +113,7 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
116113
if (iommu_tce_check_gpa(stt->page_shift, gpa))
117114
return H_PARAMETER;
118115

119-
if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua, NULL))
116+
if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua))
120117
return H_TOO_HARD;
121118

122119
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -411,7 +408,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
411408
return ret;
412409

413410
dir = iommu_tce_direction(tce);
414-
if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
411+
if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua))
415412
return H_PARAMETER;
416413

417414
entry = ioba >> stt->page_shift;
@@ -488,7 +485,6 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
488485
struct kvmppc_spapr_tce_table *stt;
489486
long i, ret = H_SUCCESS;
490487
unsigned long tces, entry, ua = 0;
491-
unsigned long *rmap = NULL;
492488
unsigned long mmu_seq;
493489
bool prereg = false;
494490
struct kvmppc_spapr_tce_iommu_table *stit;
@@ -530,7 +526,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
530526
*/
531527
struct mm_iommu_table_group_mem_t *mem;
532528

533-
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
529+
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
534530
return H_TOO_HARD;
535531

536532
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
@@ -546,23 +542,9 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
546542
* We do not require memory to be preregistered in this case
547543
* so lock rmap and do __find_linux_pte_or_hugepte().
548544
*/
549-
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
550-
return H_TOO_HARD;
551-
552-
rmap = (void *) vmalloc_to_phys(rmap);
553-
if (WARN_ON_ONCE_RM(!rmap))
545+
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua))
554546
return H_TOO_HARD;
555547

556-
/*
557-
* Synchronize with the MMU notifier callbacks in
558-
* book3s_64_mmu_hv.c (kvm_unmap_hva_range_hv etc.).
559-
* While we have the rmap lock, code running on other CPUs
560-
* cannot finish unmapping the host real page that backs
561-
* this guest real page, so we are OK to access the host
562-
* real page.
563-
*/
564-
lock_rmap(rmap);
565-
566548
arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
567549
if (kvmppc_rm_ua_to_hpa(vcpu, mmu_seq, ua, &tces)) {
568550
ret = H_TOO_HARD;
@@ -582,7 +564,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
582564
unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
583565

584566
ua = 0;
585-
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
567+
if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua)) {
586568
ret = H_PARAMETER;
587569
goto invalidate_exit;
588570
}
@@ -607,10 +589,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
607589
iommu_tce_kill_rm(stit->tbl, entry, npages);
608590

609591
unlock_exit:
610-
if (rmap)
611-
unlock_rmap(rmap);
612-
613-
arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
592+
if (!prereg)
593+
arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
614594
return ret;
615595
}
616596

0 commit comments

Comments
 (0)