Skip to content

Commit 1577cb5

Browse files
ouptonMarc Zyngier
authored andcommitted
KVM: arm64: Handle stage-2 faults in parallel
The stage-2 map walker has been made parallel-aware, and as such can be called while only holding the read side of the MMU lock. Rip out the conditional locking in user_mem_abort() and instead grab the read lock. Continue to take the write lock from other callsites to kvm_pgtable_stage2_map(). Signed-off-by: Oliver Upton <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent af87fc0 commit 1577cb5

File tree

4 files changed

+13
-28
lines changed

4 files changed

+13
-28
lines changed

arch/arm64/include/asm/kvm_pgtable.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -412,6 +412,7 @@ void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pg
412412
* @prot: Permissions and attributes for the mapping.
413413
* @mc: Cache of pre-allocated and zeroed memory from which to allocate
414414
* page-table pages.
415+
* @flags: Flags to control the page-table walk (ex. a shared walk)
415416
*
416417
* The offset of @addr within a page is ignored, @size is rounded-up to
417418
* the next page boundary and @phys is rounded-down to the previous page
@@ -433,7 +434,7 @@ void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pg
433434
*/
434435
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
435436
u64 phys, enum kvm_pgtable_prot prot,
436-
void *mc);
437+
void *mc, enum kvm_pgtable_walk_flags flags);
437438

438439
/**
439440
* kvm_pgtable_stage2_set_owner() - Unmap and annotate pages in the IPA space to

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -257,7 +257,7 @@ static inline int __host_stage2_idmap(u64 start, u64 end,
257257
enum kvm_pgtable_prot prot)
258258
{
259259
return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
260-
prot, &host_s2_pool);
260+
prot, &host_s2_pool, 0);
261261
}
262262

263263
/*

arch/arm64/kvm/hyp/pgtable.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -912,7 +912,7 @@ static int stage2_map_walker(const struct kvm_pgtable_visit_ctx *ctx,
912912

913913
int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
914914
u64 phys, enum kvm_pgtable_prot prot,
915-
void *mc)
915+
void *mc, enum kvm_pgtable_walk_flags flags)
916916
{
917917
int ret;
918918
struct stage2_map_data map_data = {
@@ -923,7 +923,8 @@ int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
923923
};
924924
struct kvm_pgtable_walker walker = {
925925
.cb = stage2_map_walker,
926-
.flags = KVM_PGTABLE_WALK_TABLE_PRE |
926+
.flags = flags |
927+
KVM_PGTABLE_WALK_TABLE_PRE |
927928
KVM_PGTABLE_WALK_LEAF,
928929
.arg = &map_data,
929930
};

arch/arm64/kvm/mmu.c

Lines changed: 7 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -861,7 +861,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
861861

862862
write_lock(&kvm->mmu_lock);
863863
ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot,
864-
&cache);
864+
&cache, 0);
865865
write_unlock(&kvm->mmu_lock);
866866
if (ret)
867867
break;
@@ -1156,7 +1156,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
11561156
gfn_t gfn;
11571157
kvm_pfn_t pfn;
11581158
bool logging_active = memslot_is_logging(memslot);
1159-
bool use_read_lock = false;
11601159
unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
11611160
unsigned long vma_pagesize, fault_granule;
11621161
enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
@@ -1191,8 +1190,6 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
11911190
if (logging_active) {
11921191
force_pte = true;
11931192
vma_shift = PAGE_SHIFT;
1194-
use_read_lock = (fault_status == FSC_PERM && write_fault &&
1195-
fault_granule == PAGE_SIZE);
11961193
} else {
11971194
vma_shift = get_vma_page_shift(vma, hva);
11981195
}
@@ -1291,15 +1288,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
12911288
if (exec_fault && device)
12921289
return -ENOEXEC;
12931290

1294-
/*
1295-
* To reduce MMU contentions and enhance concurrency during dirty
1296-
* logging dirty logging, only acquire read lock for permission
1297-
* relaxation.
1298-
*/
1299-
if (use_read_lock)
1300-
read_lock(&kvm->mmu_lock);
1301-
else
1302-
write_lock(&kvm->mmu_lock);
1291+
read_lock(&kvm->mmu_lock);
13031292
pgt = vcpu->arch.hw_mmu->pgt;
13041293
if (mmu_invalidate_retry(kvm, mmu_seq))
13051294
goto out_unlock;
@@ -1343,15 +1332,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
13431332
* permissions only if vma_pagesize equals fault_granule. Otherwise,
13441333
* kvm_pgtable_stage2_map() should be called to change block size.
13451334
*/
1346-
if (fault_status == FSC_PERM && vma_pagesize == fault_granule) {
1335+
if (fault_status == FSC_PERM && vma_pagesize == fault_granule)
13471336
ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
1348-
} else {
1349-
WARN_ONCE(use_read_lock, "Attempted stage-2 map outside of write lock\n");
1350-
1337+
else
13511338
ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
13521339
__pfn_to_phys(pfn), prot,
1353-
memcache);
1354-
}
1340+
memcache, KVM_PGTABLE_WALK_SHARED);
13551341

13561342
/* Mark the page dirty only if the fault is handled successfully */
13571343
if (writable && !ret) {
@@ -1360,10 +1346,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
13601346
}
13611347

13621348
out_unlock:
1363-
if (use_read_lock)
1364-
read_unlock(&kvm->mmu_lock);
1365-
else
1366-
write_unlock(&kvm->mmu_lock);
1349+
read_unlock(&kvm->mmu_lock);
13671350
kvm_set_pfn_accessed(pfn);
13681351
kvm_release_pfn_clean(pfn);
13691352
return ret != -EAGAIN ? ret : 0;
@@ -1569,7 +1552,7 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
15691552
*/
15701553
kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT,
15711554
PAGE_SIZE, __pfn_to_phys(pfn),
1572-
KVM_PGTABLE_PROT_R, NULL);
1555+
KVM_PGTABLE_PROT_R, NULL, 0);
15731556

15741557
return false;
15751558
}

0 commit comments

Comments
 (0)