Skip to content

Commit 4efc0ed

Browse files
author
Marc Zyngier
committed
KVM: arm64: Unify stage-2 programming behind __load_stage2()
The protected mode relies on a separate helper to load the S2 context. Move over to the __load_guest_stage2() helper instead, and rename it to __load_stage2() to present a unified interface. Cc: Catalin Marinas <[email protected]> Cc: Jade Alglave <[email protected]> Cc: Shameer Kolothum <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Signed-off-by: Will Deacon <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 923a547 commit 4efc0ed

File tree

7 files changed

+13
-18
lines changed

7 files changed

+13
-18
lines changed

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -267,9 +267,10 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
267267
* Must be called from hyp code running at EL2 with an updated VTTBR
268268
* and interrupts disabled.
269269
*/
270-
static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long vtcr)
270+
static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
271+
struct kvm_arch *arch)
271272
{
272-
write_sysreg(vtcr, vtcr_el2);
273+
write_sysreg(arch->vtcr, vtcr_el2);
273274
write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
274275

275276
/*
@@ -280,12 +281,6 @@ static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long
280281
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
281282
}
282283

283-
static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu,
284-
struct kvm_arch *arch)
285-
{
286-
__load_stage2(mmu, arch->vtcr);
287-
}
288-
289284
static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
290285
{
291286
return container_of(mmu->arch, struct kvm, arch);

arch/arm64/kvm/hyp/include/nvhe/mem_protect.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
2929
static __always_inline void __load_host_stage2(void)
3030
{
3131
if (static_branch_likely(&kvm_protected_mode_initialized))
32-
__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
32+
__load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
3333
else
3434
write_sysreg(0, vttbr_el2);
3535
}

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ int __pkvm_prot_finalize(void)
126126
kvm_flush_dcache_to_poc(params, sizeof(*params));
127127

128128
write_sysreg(params->hcr_el2, hcr_el2);
129-
__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
129+
__load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
130130

131131
/*
132132
* Make sure to have an ISB before the TLB maintenance below but only

arch/arm64/kvm/hyp/nvhe/switch.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -215,7 +215,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
215215
__sysreg_restore_state_nvhe(guest_ctxt);
216216

217217
mmu = kern_hyp_va(vcpu->arch.hw_mmu);
218-
__load_guest_stage2(mmu, kern_hyp_va(mmu->arch));
218+
__load_stage2(mmu, kern_hyp_va(mmu->arch));
219219
__activate_traps(vcpu);
220220

221221
__hyp_vgic_restore_state(vcpu);

arch/arm64/kvm/hyp/nvhe/tlb.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,12 +34,12 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
3434
}
3535

3636
/*
37-
* __load_guest_stage2() includes an ISB only when the AT
37+
* __load_stage2() includes an ISB only when the AT
3838
* workaround is applied. Take care of the opposite condition,
3939
* ensuring that we always have an ISB, but not two ISBs back
4040
* to back.
4141
*/
42-
__load_guest_stage2(mmu, kern_hyp_va(mmu->arch));
42+
__load_stage2(mmu, kern_hyp_va(mmu->arch));
4343
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
4444
}
4545

arch/arm64/kvm/hyp/vhe/switch.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -124,11 +124,11 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
124124
*
125125
* We have already configured the guest's stage 1 translation in
126126
* kvm_vcpu_load_sysregs_vhe above. We must now call
127-
* __load_guest_stage2 before __activate_traps, because
128-
* __load_guest_stage2 configures stage 2 translation, and
127+
* __load_stage2 before __activate_traps, because
128+
* __load_stage2 configures stage 2 translation, and
129129
* __activate_traps clear HCR_EL2.TGE (among other things).
130130
*/
131-
__load_guest_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
131+
__load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
132132
__activate_traps(vcpu);
133133

134134
__kvm_adjust_pc(vcpu);

arch/arm64/kvm/hyp/vhe/tlb.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,10 +50,10 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
5050
*
5151
* ARM erratum 1165522 requires some special handling (again),
5252
* as we need to make sure both stages of translation are in
53-
* place before clearing TGE. __load_guest_stage2() already
53+
* place before clearing TGE. __load_stage2() already
5454
* has an ISB in order to deal with this.
5555
*/
56-
__load_guest_stage2(mmu, mmu->arch);
56+
__load_stage2(mmu, mmu->arch);
5757
val = read_sysreg(hcr_el2);
5858
val &= ~HCR_TGE;
5959
write_sysreg(val, hcr_el2);

0 commit comments

Comments
 (0)