Skip to content

Commit cf364e0

Browse files
author
Marc Zyngier
committed
KVM: arm64: Upgrade VMID accesses to {READ,WRITE}_ONCE
Since TLB invalidation can run in parallel with VMID allocation, we need to be careful and avoid any sort of load/store tearing. Use {READ,WRITE}_ONCE consistently to avoid any surprise. Cc: Catalin Marinas <[email protected]> Cc: Jade Alglave <[email protected]> Cc: Shameer Kolothum <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Signed-off-by: Will Deacon <[email protected]> Reviewed-by: Quentin Perret <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 4efc0ed commit cf364e0

File tree

4 files changed

+10
-5
lines changed

4 files changed

+10
-5
lines changed

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -252,14 +252,19 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
252252

253253
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
254254

255+
/*
256+
* When this is (directly or indirectly) used on the TLB invalidation
257+
* path, we rely on a previously issued DSB so that page table updates
258+
* and VMID reads are correctly ordered.
259+
*/
255260
static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
256261
{
257262
struct kvm_vmid *vmid = &mmu->vmid;
258263
u64 vmid_field, baddr;
259264
u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
260265

261266
baddr = mmu->pgd_phys;
262-
vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
267+
vmid_field = (u64)READ_ONCE(vmid->vmid) << VTTBR_VMID_SHIFT;
263268
return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
264269
}
265270

arch/arm64/kvm/arm.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -571,7 +571,7 @@ static void update_vmid(struct kvm_vmid *vmid)
571571
kvm_call_hyp(__kvm_flush_vm_context);
572572
}
573573

574-
vmid->vmid = kvm_next_vmid;
574+
WRITE_ONCE(vmid->vmid, kvm_next_vmid);
575575
kvm_next_vmid++;
576576
kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
577577

arch/arm64/kvm/hyp/nvhe/mem_protect.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -109,8 +109,8 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
109109
mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
110110
mmu->arch = &host_kvm.arch;
111111
mmu->pgt = &host_kvm.pgt;
112-
mmu->vmid.vmid_gen = 0;
113-
mmu->vmid.vmid = 0;
112+
WRITE_ONCE(mmu->vmid.vmid_gen, 0);
113+
WRITE_ONCE(mmu->vmid.vmid, 0);
114114

115115
return 0;
116116
}

arch/arm64/kvm/mmu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -485,7 +485,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
485485
mmu->arch = &kvm->arch;
486486
mmu->pgt = pgt;
487487
mmu->pgd_phys = __pa(pgt->pgd);
488-
mmu->vmid.vmid_gen = 0;
488+
WRITE_ONCE(mmu->vmid.vmid_gen, 0);
489489
return 0;
490490

491491
out_destroy_pgtable:

0 commit comments

Comments
 (0)