Skip to content

Commit df6556a

Browse files
committed
KVM: arm64: Correctly handle page aging notifiers for unaligned memslot
Userspace is allowed to select any PAGE_SIZE aligned hva to back guest memory. This is even the case with hugepages, although it is a rather suboptimal configuration as PTE level mappings are used at stage-2. The arm64 page aging handlers have an assumption that the specified range is exactly one page/block of memory, which in the aforementioned case is not necessarily true. All together this leads to the WARN() in kvm_age_gfn() firing. However, the WARN is only part of the issue as the table walkers visit at most a single leaf PTE. For hugepage-backed memory in a memslot that isn't hugepage-aligned, page aging entirely misses accesses to the hugepage beyond the first page in the memslot. Add a new walker dedicated to handling page aging MMU notifiers capable of walking a range of PTEs. Convert kvm(_test)_age_gfn() over to the new walker and drop the WARN that caught the issue in the first place. The implementation of this walker was inspired by the test_clear_young() implementation by Yu Zhao [*], but repurposed to address a bug in the existing aging implementation. Cc: [email protected] # v5.15 Fixes: 056aad6 ("kvm: arm/arm64: Rework gpa callback handlers") Link: https://lore.kernel.org/kvmarm/[email protected]/ Co-developed-by: Yu Zhao <[email protected]> Signed-off-by: Yu Zhao <[email protected]> Reported-by: Reiji Watanabe <[email protected]> Reviewed-by: Marc Zyngier <[email protected]> Reviewed-by: Shaoqin Huang <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Oliver Upton <[email protected]>
1 parent 970dee0 commit df6556a

File tree

3 files changed

+55
-36
lines changed

3 files changed

+55
-36
lines changed

arch/arm64/include/asm/kvm_pgtable.h

Lines changed: 9 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -608,22 +608,26 @@ int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size);
608608
kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr);
609609

610610
/**
611-
* kvm_pgtable_stage2_mkold() - Clear the access flag in a page-table entry.
611+
* kvm_pgtable_stage2_test_clear_young() - Test and optionally clear the access
612+
* flag in a page-table entry.
612613
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
613614
* @addr: Intermediate physical address to identify the page-table entry.
615+
* @size: Size of the address range to visit.
616+
* @mkold: True if the access flag should be cleared.
614617
*
615618
* The offset of @addr within a page is ignored.
616619
*
617-
* If there is a valid, leaf page-table entry used to translate @addr, then
618-
* clear the access flag in that entry.
620+
* Tests and conditionally clears the access flag for every valid, leaf
621+
* page-table entry used to translate the range [@addr, @addr + @size).
619622
*
620623
* Note that it is the caller's responsibility to invalidate the TLB after
621624
* calling this function to ensure that the updated permissions are visible
622625
* to the CPUs.
623626
*
624-
* Return: The old page-table entry prior to clearing the flag, 0 on failure.
627+
* Return: True if any of the visited PTEs had the access flag set.
625628
*/
626-
kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr);
629+
bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
630+
u64 size, bool mkold);
627631

628632
/**
629633
* kvm_pgtable_stage2_relax_perms() - Relax the permissions enforced by a
@@ -645,18 +649,6 @@ kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr);
645649
int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
646650
enum kvm_pgtable_prot prot);
647651

648-
/**
649-
* kvm_pgtable_stage2_is_young() - Test whether a page-table entry has the
650-
* access flag set.
651-
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
652-
* @addr: Intermediate physical address to identify the page-table entry.
653-
*
654-
* The offset of @addr within a page is ignored.
655-
*
656-
* Return: True if the page-table entry has the access flag set, false otherwise.
657-
*/
658-
bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr);
659-
660652
/**
661653
* kvm_pgtable_stage2_flush_range() - Clean and invalidate data cache to Point
662654
* of Coherency for guest stage-2 address

arch/arm64/kvm/hyp/pgtable.c

Lines changed: 38 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1195,25 +1195,54 @@ kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
11951195
return pte;
11961196
}
11971197

1198-
kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr)
1198+
struct stage2_age_data {
1199+
bool mkold;
1200+
bool young;
1201+
};
1202+
1203+
static int stage2_age_walker(const struct kvm_pgtable_visit_ctx *ctx,
1204+
enum kvm_pgtable_walk_flags visit)
11991205
{
1200-
kvm_pte_t pte = 0;
1201-
stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF,
1202-
&pte, NULL, 0);
1206+
kvm_pte_t new = ctx->old & ~KVM_PTE_LEAF_ATTR_LO_S2_AF;
1207+
struct stage2_age_data *data = ctx->arg;
1208+
1209+
if (!kvm_pte_valid(ctx->old) || new == ctx->old)
1210+
return 0;
1211+
1212+
data->young = true;
1213+
1214+
/*
1215+
* stage2_age_walker() is always called while holding the MMU lock for
1216+
* write, so this will always succeed. Nonetheless, this deliberately
1217+
* follows the race detection pattern of the other stage-2 walkers in
1218+
* case the locking mechanics of the MMU notifiers is ever changed.
1219+
*/
1220+
if (data->mkold && !stage2_try_set_pte(ctx, new))
1221+
return -EAGAIN;
1222+
12031223
/*
12041224
* "But where's the TLBI?!", you scream.
12051225
* "Over in the core code", I sigh.
12061226
*
12071227
* See the '->clear_flush_young()' callback on the KVM mmu notifier.
12081228
*/
1209-
return pte;
1229+
return 0;
12101230
}
12111231

1212-
bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr)
1232+
bool kvm_pgtable_stage2_test_clear_young(struct kvm_pgtable *pgt, u64 addr,
1233+
u64 size, bool mkold)
12131234
{
1214-
kvm_pte_t pte = 0;
1215-
stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL, 0);
1216-
return pte & KVM_PTE_LEAF_ATTR_LO_S2_AF;
1235+
struct stage2_age_data data = {
1236+
.mkold = mkold,
1237+
};
1238+
struct kvm_pgtable_walker walker = {
1239+
.cb = stage2_age_walker,
1240+
.arg = &data,
1241+
.flags = KVM_PGTABLE_WALK_LEAF,
1242+
};
1243+
1244+
WARN_ON(kvm_pgtable_walk(pgt, addr, size, &walker));
1245+
return data.young;
12171246
}
12181247

12191248
int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,

arch/arm64/kvm/mmu.c

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1756,27 +1756,25 @@ bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
17561756
bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
17571757
{
17581758
u64 size = (range->end - range->start) << PAGE_SHIFT;
1759-
kvm_pte_t kpte;
1760-
pte_t pte;
17611759

17621760
if (!kvm->arch.mmu.pgt)
17631761
return false;
17641762

1765-
WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE);
1766-
1767-
kpte = kvm_pgtable_stage2_mkold(kvm->arch.mmu.pgt,
1768-
range->start << PAGE_SHIFT);
1769-
pte = __pte(kpte);
1770-
return pte_valid(pte) && pte_young(pte);
1763+
return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
1764+
range->start << PAGE_SHIFT,
1765+
size, true);
17711766
}
17721767

17731768
bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
17741769
{
1770+
u64 size = (range->end - range->start) << PAGE_SHIFT;
1771+
17751772
if (!kvm->arch.mmu.pgt)
17761773
return false;
17771774

1778-
return kvm_pgtable_stage2_is_young(kvm->arch.mmu.pgt,
1779-
range->start << PAGE_SHIFT);
1775+
return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
1776+
range->start << PAGE_SHIFT,
1777+
size, false);
17801778
}
17811779

17821780
phys_addr_t kvm_mmu_get_httbr(void)

0 commit comments

Comments
 (0)