Skip to content

Commit b1a3a94

Browse files
Marc Zyngieroupton
authored andcommitted
KVM: arm64: nv: Tag shadow S2 entries with guest's leaf S2 level
Populate bits [56:55] of the leaf entry with the level provided by the guest's S2 translation. This will allow us to better scope the invalidation by remembering the mapping size. Of course, this assume that the guest will issue an invalidation with an address that falls into the same leaf. If the guest doesn't, we'll over-invalidate. Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Oliver Upton <[email protected]>
1 parent d1de157 commit b1a3a94

File tree

2 files changed

+25
-2
lines changed

2 files changed

+25
-2
lines changed

arch/arm64/include/asm/kvm_nested.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
#include <linux/bitfield.h>
66
#include <linux/kvm_host.h>
77
#include <asm/kvm_emulate.h>
8+
#include <asm/kvm_pgtable.h>
89

910
static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
1011
{
@@ -195,4 +196,11 @@ static inline bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr)
195196
}
196197
#endif
197198

199+
#define KVM_NV_GUEST_MAP_SZ (KVM_PGTABLE_PROT_SW1 | KVM_PGTABLE_PROT_SW0)
200+
201+
static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans)
202+
{
203+
return FIELD_PREP(KVM_NV_GUEST_MAP_SZ, trans->level);
204+
}
205+
198206
#endif /* __ARM64_KVM_NESTED_H */

arch/arm64/kvm/mmu.c

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1598,11 +1598,19 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
15981598
* Potentially reduce shadow S2 permissions to match the guest's own
15991599
* S2. For exec faults, we'd only reach this point if the guest
16001600
* actually allowed it (see kvm_s2_handle_perm_fault).
1601+
*
1602+
* Also encode the level of the original translation in the SW bits
1603+
* of the leaf entry as a proxy for the span of that translation.
1604+
* This will be retrieved on TLB invalidation from the guest and
1605+
* used to limit the invalidation scope if a TTL hint or a range
1606+
* isn't provided.
16011607
*/
16021608
if (nested) {
16031609
writable &= kvm_s2_trans_writable(nested);
16041610
if (!kvm_s2_trans_readable(nested))
16051611
prot &= ~KVM_PGTABLE_PROT_R;
1612+
1613+
prot |= kvm_encode_nested_level(nested);
16061614
}
16071615

16081616
read_lock(&kvm->mmu_lock);
@@ -1661,14 +1669,21 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
16611669
* permissions only if vma_pagesize equals fault_granule. Otherwise,
16621670
* kvm_pgtable_stage2_map() should be called to change block size.
16631671
*/
1664-
if (fault_is_perm && vma_pagesize == fault_granule)
1672+
if (fault_is_perm && vma_pagesize == fault_granule) {
1673+
/*
1674+
* Drop the SW bits in favour of those stored in the
1675+
* PTE, which will be preserved.
1676+
*/
1677+
prot &= ~KVM_NV_GUEST_MAP_SZ;
16651678
ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
1666-
else
1679+
} else {
16671680
ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
16681681
__pfn_to_phys(pfn), prot,
16691682
memcache,
16701683
KVM_PGTABLE_WALK_HANDLE_FAULT |
16711684
KVM_PGTABLE_WALK_SHARED);
1685+
}
1686+
16721687
out_unlock:
16731688
read_unlock(&kvm->mmu_lock);
16741689

0 commit comments

Comments
 (0)