Skip to content

Commit deb151a

Browse files
author
Marc Zyngier
committed
Merge branch kvm-arm64/mmu/vmid-cleanups into kvmarm-master/next
* kvm-arm64/mmu/vmid-cleanups: : Cleanup the stage-2 configuration by providing a single helper, : and tidy up some of the ordering requirements for the VMID : allocator. KVM: arm64: Upgrade VMID accesses to {READ,WRITE}_ONCE KVM: arm64: Unify stage-2 programming behind __load_stage2() KVM: arm64: Move kern_hyp_va() usage in __load_guest_stage2() into the callers Signed-off-by: Marc Zyngier <[email protected]>
2 parents ca3385a + cf364e0 commit deb151a

File tree

9 files changed

+25
-22
lines changed

9 files changed

+25
-22
lines changed

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -252,24 +252,30 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
252252

253253
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
254254

255+
/*
256+
* When this is (directly or indirectly) used on the TLB invalidation
257+
* path, we rely on a previously issued DSB so that page table updates
258+
* and VMID reads are correctly ordered.
259+
*/
255260
static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
256261
{
257262
struct kvm_vmid *vmid = &mmu->vmid;
258263
u64 vmid_field, baddr;
259264
u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
260265

261266
baddr = mmu->pgd_phys;
262-
vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
267+
vmid_field = (u64)READ_ONCE(vmid->vmid) << VTTBR_VMID_SHIFT;
263268
return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
264269
}
265270

266271
/*
267272
* Must be called from hyp code running at EL2 with an updated VTTBR
268273
* and interrupts disabled.
269274
*/
270-
static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long vtcr)
275+
static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
276+
struct kvm_arch *arch)
271277
{
272-
write_sysreg(vtcr, vtcr_el2);
278+
write_sysreg(arch->vtcr, vtcr_el2);
273279
write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
274280

275281
/*
@@ -280,11 +286,6 @@ static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long
280286
asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
281287
}
282288

283-
static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu)
284-
{
285-
__load_stage2(mmu, kern_hyp_va(mmu->arch)->vtcr);
286-
}
287-
288289
static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
289290
{
290291
return container_of(mmu->arch, struct kvm, arch);

arch/arm64/kvm/arm.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -573,7 +573,7 @@ static void update_vmid(struct kvm_vmid *vmid)
573573
kvm_call_hyp(__kvm_flush_vm_context);
574574
}
575575

576-
vmid->vmid = kvm_next_vmid;
576+
WRITE_ONCE(vmid->vmid, kvm_next_vmid);
577577
kvm_next_vmid++;
578578
kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
579579

arch/arm64/kvm/hyp/include/nvhe/mem_protect.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
6060
static __always_inline void __load_host_stage2(void)
6161
{
6262
if (static_branch_likely(&kvm_protected_mode_initialized))
63-
__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
63+
__load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
6464
else
6565
write_sysreg(0, vttbr_el2);
6666
}

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -112,8 +112,8 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
112112
mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
113113
mmu->arch = &host_kvm.arch;
114114
mmu->pgt = &host_kvm.pgt;
115-
mmu->vmid.vmid_gen = 0;
116-
mmu->vmid.vmid = 0;
115+
WRITE_ONCE(mmu->vmid.vmid_gen, 0);
116+
WRITE_ONCE(mmu->vmid.vmid, 0);
117117

118118
return 0;
119119
}
@@ -129,7 +129,7 @@ int __pkvm_prot_finalize(void)
129129
kvm_flush_dcache_to_poc(params, sizeof(*params));
130130

131131
write_sysreg(params->hcr_el2, hcr_el2);
132-
__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
132+
__load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
133133

134134
/*
135135
* Make sure to have an ISB before the TLB maintenance below but only

arch/arm64/kvm/hyp/nvhe/switch.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -170,6 +170,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
170170
{
171171
struct kvm_cpu_context *host_ctxt;
172172
struct kvm_cpu_context *guest_ctxt;
173+
struct kvm_s2_mmu *mmu;
173174
bool pmu_switch_needed;
174175
u64 exit_code;
175176

@@ -213,7 +214,8 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
213214
__sysreg32_restore_state(vcpu);
214215
__sysreg_restore_state_nvhe(guest_ctxt);
215216

216-
__load_guest_stage2(kern_hyp_va(vcpu->arch.hw_mmu));
217+
mmu = kern_hyp_va(vcpu->arch.hw_mmu);
218+
__load_stage2(mmu, kern_hyp_va(mmu->arch));
217219
__activate_traps(vcpu);
218220

219221
__hyp_vgic_restore_state(vcpu);

arch/arm64/kvm/hyp/nvhe/tlb.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,12 +34,12 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
3434
}
3535

3636
/*
37-
* __load_guest_stage2() includes an ISB only when the AT
37+
* __load_stage2() includes an ISB only when the AT
3838
* workaround is applied. Take care of the opposite condition,
3939
* ensuring that we always have an ISB, but not two ISBs back
4040
* to back.
4141
*/
42-
__load_guest_stage2(mmu);
42+
__load_stage2(mmu, kern_hyp_va(mmu->arch));
4343
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
4444
}
4545

arch/arm64/kvm/hyp/vhe/switch.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -124,11 +124,11 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
124124
*
125125
* We have already configured the guest's stage 1 translation in
126126
* kvm_vcpu_load_sysregs_vhe above. We must now call
127-
* __load_guest_stage2 before __activate_traps, because
128-
* __load_guest_stage2 configures stage 2 translation, and
127+
* __load_stage2 before __activate_traps, because
128+
* __load_stage2 configures stage 2 translation, and
129129
* __activate_traps clear HCR_EL2.TGE (among other things).
130130
*/
131-
__load_guest_stage2(vcpu->arch.hw_mmu);
131+
__load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
132132
__activate_traps(vcpu);
133133

134134
__kvm_adjust_pc(vcpu);

arch/arm64/kvm/hyp/vhe/tlb.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -50,10 +50,10 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
5050
*
5151
* ARM erratum 1165522 requires some special handling (again),
5252
* as we need to make sure both stages of translation are in
53-
* place before clearing TGE. __load_guest_stage2() already
53+
* place before clearing TGE. __load_stage2() already
5454
* has an ISB in order to deal with this.
5555
*/
56-
__load_guest_stage2(mmu);
56+
__load_stage2(mmu, mmu->arch);
5757
val = read_sysreg(hcr_el2);
5858
val &= ~HCR_TGE;
5959
write_sysreg(val, hcr_el2);

arch/arm64/kvm/mmu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -532,7 +532,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
532532
mmu->arch = &kvm->arch;
533533
mmu->pgt = pgt;
534534
mmu->pgd_phys = __pa(pgt->pgd);
535-
mmu->vmid.vmid_gen = 0;
535+
WRITE_ONCE(mmu->vmid.vmid_gen, 0);
536536
return 0;
537537

538538
out_destroy_pgtable:

0 commit comments

Comments
 (0)