Skip to content

Commit 4288ff7

Browse files
Marc Zyngieroupton
authored andcommitted
KVM: arm64: Restore the stage-2 context in VHE's __tlb_switch_to_host()
An MMU notifier could cause us to clobber the stage-2 context loaded on a CPU when we switch to another VM's context to invalidate. This isn't an issue right now as the stage-2 context gets reloaded on every guest entry, but is disastrous when moving __load_stage2() into the vcpu_load() path. Restore the previous stage-2 context on the way out of a TLB invalidation if we installed something else. Deliberately do this after TGE=1 is synchronized to keep things safe in light of the speculative AT errata. Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Oliver Upton <[email protected]>
1 parent 38ce26b commit 4288ff7

File tree

1 file changed

+14
-3
lines changed
  • arch/arm64/kvm/hyp/vhe

1 file changed

+14
-3
lines changed

arch/arm64/kvm/hyp/vhe/tlb.c

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,18 +11,25 @@
1111
#include <asm/tlbflush.h>
1212

1313
struct tlb_inv_context {
14-
unsigned long flags;
15-
u64 tcr;
16-
u64 sctlr;
14+
struct kvm_s2_mmu *mmu;
15+
unsigned long flags;
16+
u64 tcr;
17+
u64 sctlr;
1718
};
1819

1920
static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
2021
struct tlb_inv_context *cxt)
2122
{
23+
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
2224
u64 val;
2325

2426
local_irq_save(cxt->flags);
2527

28+
if (vcpu && mmu != vcpu->arch.hw_mmu)
29+
cxt->mmu = vcpu->arch.hw_mmu;
30+
else
31+
cxt->mmu = NULL;
32+
2633
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
2734
/*
2835
* For CPUs that are affected by ARM errata 1165522 or 1530923,
@@ -69,6 +76,10 @@ static void __tlb_switch_to_host(struct tlb_inv_context *cxt)
6976
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
7077
isb();
7178

79+
/* ... and the stage-2 MMU context that we switched away from */
80+
if (cxt->mmu)
81+
__load_stage2(cxt->mmu, cxt->mmu->arch);
82+
7283
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
7384
/* Restore the registers to what they were */
7485
write_sysreg_el1(cxt->tcr, SYS_TCR);

0 commit comments

Comments
 (0)