Skip to content

Commit 0ab12f3

Browse files
ouptonMarc Zyngier
authored andcommitted
KVM: arm64: Make block->table PTE changes parallel-aware
In order to service stage-2 faults in parallel, stage-2 table walkers must take exclusive ownership of the PTE being worked on. An additional requirement of the architecture is that software must perform a 'break-before-make' operation when changing the block size used for mapping memory. Roll these two concepts together into helpers for performing a 'break-before-make' sequence. Use a special PTE value to indicate a PTE has been locked by a software walker. Additionally, use an atomic compare-exchange to 'break' the PTE when the stage-2 page tables are possibly shared with another software walker. Elide the DSB + TLBI if the evicted PTE was invalid (and thus not subject to break-before-make). All of the atomics do nothing for now, as the stage-2 walker isn't fully ready to perform parallel walks. Signed-off-by: Oliver Upton <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 331aa3a commit 0ab12f3

File tree

1 file changed

+75
-5
lines changed

1 file changed

+75
-5
lines changed

arch/arm64/kvm/hyp/pgtable.c

Lines changed: 75 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,12 @@
4949
#define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2)
5050
#define KVM_MAX_OWNER_ID 1
5151

52+
/*
53+
* Used to indicate a pte for which a 'break-before-make' sequence is in
54+
* progress.
55+
*/
56+
#define KVM_INVALID_PTE_LOCKED BIT(10)
57+
5258
struct kvm_pgtable_walk_data {
5359
struct kvm_pgtable_walker *walker;
5460

@@ -674,6 +680,11 @@ static bool stage2_pte_is_counted(kvm_pte_t pte)
674680
return !!pte;
675681
}
676682

683+
static bool stage2_pte_is_locked(kvm_pte_t pte)
684+
{
685+
return !kvm_pte_valid(pte) && (pte & KVM_INVALID_PTE_LOCKED);
686+
}
687+
677688
static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
678689
{
679690
if (!kvm_pgtable_walk_shared(ctx)) {
@@ -684,6 +695,64 @@ static bool stage2_try_set_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_
684695
return cmpxchg(ctx->ptep, ctx->old, new) == ctx->old;
685696
}
686697

698+
/**
699+
* stage2_try_break_pte() - Invalidates a pte according to the
700+
* 'break-before-make' requirements of the
701+
* architecture.
702+
*
703+
* @ctx: context of the visited pte.
704+
* @mmu: stage-2 mmu
705+
*
706+
* Returns: true if the pte was successfully broken.
707+
*
708+
* If the removed pte was valid, performs the necessary serialization and TLB
709+
* invalidation for the old value. For counted ptes, drops the reference count
710+
* on the containing table page.
711+
*/
712+
static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
713+
struct kvm_s2_mmu *mmu)
714+
{
715+
struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
716+
717+
if (stage2_pte_is_locked(ctx->old)) {
718+
/*
719+
* Should never occur if this walker has exclusive access to the
720+
* page tables.
721+
*/
722+
WARN_ON(!kvm_pgtable_walk_shared(ctx));
723+
return false;
724+
}
725+
726+
if (!stage2_try_set_pte(ctx, KVM_INVALID_PTE_LOCKED))
727+
return false;
728+
729+
/*
730+
* Perform the appropriate TLB invalidation based on the evicted pte
731+
* value (if any).
732+
*/
733+
if (kvm_pte_table(ctx->old, ctx->level))
734+
kvm_call_hyp(__kvm_tlb_flush_vmid, mmu);
735+
else if (kvm_pte_valid(ctx->old))
736+
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr, ctx->level);
737+
738+
if (stage2_pte_is_counted(ctx->old))
739+
mm_ops->put_page(ctx->ptep);
740+
741+
return true;
742+
}
743+
744+
static void stage2_make_pte(const struct kvm_pgtable_visit_ctx *ctx, kvm_pte_t new)
745+
{
746+
struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
747+
748+
WARN_ON(!stage2_pte_is_locked(*ctx->ptep));
749+
750+
if (stage2_pte_is_counted(new))
751+
mm_ops->get_page(ctx->ptep);
752+
753+
smp_store_release(ctx->ptep, new);
754+
}
755+
687756
static void stage2_put_pte(const struct kvm_pgtable_visit_ctx *ctx, struct kvm_s2_mmu *mmu,
688757
struct kvm_pgtable_mm_ops *mm_ops)
689758
{
@@ -812,17 +881,18 @@ static int stage2_map_walk_leaf(const struct kvm_pgtable_visit_ctx *ctx,
812881
if (!childp)
813882
return -ENOMEM;
814883

884+
if (!stage2_try_break_pte(ctx, data->mmu)) {
885+
mm_ops->put_page(childp);
886+
return -EAGAIN;
887+
}
888+
815889
/*
816890
* If we've run into an existing block mapping then replace it with
817891
* a table. Accesses beyond 'end' that fall within the new table
818892
* will be mapped lazily.
819893
*/
820-
if (stage2_pte_is_counted(ctx->old))
821-
stage2_put_pte(ctx, data->mmu, mm_ops);
822-
823894
new = kvm_init_table_pte(childp, mm_ops);
824-
mm_ops->get_page(ctx->ptep);
825-
smp_store_release(ctx->ptep, new);
895+
stage2_make_pte(ctx, new);
826896

827897
return 0;
828898
}

0 commit comments

Comments
 (0)