Skip to content

Commit 6d674e2

Browse files
author
Marc Zyngier
committed
KVM: arm/arm64: Properly handle faulting of device mappings
A device mapping is normally always mapped at Stage-2, since there is very little gain in having it faulted in. Nonetheless, it is possible to end-up in a situation where the device mapping has been removed from Stage-2 (userspace munmaped the VFIO region, and the MMU notifier did its job), but present in a userspace mapping (userpace has mapped it back at the same address). In such a situation, the device mapping will be demand-paged as the guest performs memory accesses. This requires to be careful when dealing with mapping size, cache management, and to handle potential execution of a device mapping. Reported-by: Alexandru Elisei <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Tested-by: Alexandru Elisei <[email protected]> Reviewed-by: James Morse <[email protected]> Cc: [email protected] Link: https://lore.kernel.org/r/[email protected]
1 parent 1ce74e9 commit 6d674e2

File tree

1 file changed

+17
-4
lines changed

1 file changed

+17
-4
lines changed

virt/kvm/arm/mmu.c

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,11 @@ static unsigned long io_map_base;
3838
#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
3939
#define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
4040

41+
static bool is_iomap(unsigned long flags)
42+
{
43+
return flags & KVM_S2PTE_FLAG_IS_IOMAP;
44+
}
45+
4146
static bool memslot_is_logging(struct kvm_memory_slot *memslot)
4247
{
4348
return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
@@ -1698,6 +1703,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
16981703

16991704
vma_pagesize = vma_kernel_pagesize(vma);
17001705
if (logging_active ||
1706+
(vma->vm_flags & VM_PFNMAP) ||
17011707
!fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
17021708
force_pte = true;
17031709
vma_pagesize = PAGE_SIZE;
@@ -1760,6 +1766,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
17601766
writable = false;
17611767
}
17621768

1769+
if (exec_fault && is_iomap(flags))
1770+
return -ENOEXEC;
1771+
17631772
spin_lock(&kvm->mmu_lock);
17641773
if (mmu_notifier_retry(kvm, mmu_seq))
17651774
goto out_unlock;
@@ -1781,7 +1790,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
17811790
if (writable)
17821791
kvm_set_pfn_dirty(pfn);
17831792

1784-
if (fault_status != FSC_PERM)
1793+
if (fault_status != FSC_PERM && !is_iomap(flags))
17851794
clean_dcache_guest_page(pfn, vma_pagesize);
17861795

17871796
if (exec_fault)
@@ -1948,9 +1957,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
19481957
if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
19491958
if (is_iabt) {
19501959
/* Prefetch Abort on I/O address */
1951-
kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1952-
ret = 1;
1953-
goto out_unlock;
1960+
ret = -ENOEXEC;
1961+
goto out;
19541962
}
19551963

19561964
/*
@@ -1992,6 +2000,11 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
19922000
ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
19932001
if (ret == 0)
19942002
ret = 1;
2003+
out:
2004+
if (ret == -ENOEXEC) {
2005+
kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
2006+
ret = 1;
2007+
}
19952008
out_unlock:
19962009
srcu_read_unlock(&vcpu->kvm->srcu, idx);
19972010
return ret;

0 commit comments

Comments
 (0)