Skip to content

Commit e03fa29

Browse files
David BrazdilMarc Zyngier
authored andcommitted
KVM: arm64: Duplicate hyp/tlb.c for VHE/nVHE
tlb.c contains code for flushing the TLB, with code shared between VHE/nVHE. Because common code is small, duplicate tlb.c and specialize each copy for VHE/nVHE. Signed-off-by: David Brazdil <[email protected]> Signed-off-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 208243c commit e03fa29

File tree

6 files changed

+177
-100
lines changed

6 files changed

+177
-100
lines changed

arch/arm64/kernel/image-vars.h

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -81,12 +81,6 @@ KVM_NVHE_ALIAS(__kvm_enable_ssbs);
8181
/* Symbols defined in timer-sr.c (not yet compiled with nVHE build rules). */
8282
KVM_NVHE_ALIAS(__kvm_timer_set_cntvoff);
8383

84-
/* Symbols defined in tlb.c (not yet compiled with nVHE build rules). */
85-
KVM_NVHE_ALIAS(__kvm_flush_vm_context);
86-
KVM_NVHE_ALIAS(__kvm_tlb_flush_local_vmid);
87-
KVM_NVHE_ALIAS(__kvm_tlb_flush_vmid);
88-
KVM_NVHE_ALIAS(__kvm_tlb_flush_vmid_ipa);
89-
9084
/* Symbols defined in vgic-v3-sr.c (not yet compiled with nVHE build rules). */
9185
KVM_NVHE_ALIAS(__vgic_v3_get_ich_vtr_el2);
9286
KVM_NVHE_ALIAS(__vgic_v3_init_lrs);
@@ -116,6 +110,14 @@ KVM_NVHE_ALIAS(__hyp_stub_vectors);
116110
/* IDMAP TCR_EL1.T0SZ as computed by the EL1 init code */
117111
KVM_NVHE_ALIAS(idmap_t0sz);
118112

113+
/* Kernel symbol used by icache_is_vpipt(). */
114+
KVM_NVHE_ALIAS(__icache_flags);
115+
116+
/* Kernel symbols needed for cpus_have_final/const_caps checks. */
117+
KVM_NVHE_ALIAS(arm64_const_caps_ready);
118+
KVM_NVHE_ALIAS(cpu_hwcap_keys);
119+
KVM_NVHE_ALIAS(cpu_hwcaps);
120+
119121
#endif /* CONFIG_KVM */
120122

121123
#endif /* __ARM64_KERNEL_IMAGE_VARS_H */

arch/arm64/kvm/hyp/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ obj-$(CONFIG_KVM) += hyp.o vhe/ nvhe/
1414
obj-$(CONFIG_KVM_INDIRECT_VECTORS) += smccc_wa.o
1515

1616
hyp-y := vgic-v3-sr.o timer-sr.o aarch32.o vgic-v2-cpuif-proxy.o sysreg-sr.o \
17-
debug-sr.o entry.o switch.o fpsimd.o tlb.o
17+
debug-sr.o entry.o switch.o fpsimd.o
1818

1919
# KVM code is run at a different exception code with a different map, so
2020
# compiler instrumentation that inserts callbacks or checks into the code may

arch/arm64/kvm/hyp/nvhe/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
asflags-y := -D__KVM_NVHE_HYPERVISOR__
77
ccflags-y := -D__KVM_NVHE_HYPERVISOR__
88

9-
obj-y := hyp-init.o ../hyp-entry.o
9+
obj-y := tlb.o hyp-init.o ../hyp-entry.o
1010

1111
obj-y := $(patsubst %.o,%.hyp.o,$(obj-y))
1212
extra-y := $(patsubst %.hyp.o,%.hyp.tmp.o,$(obj-y))

arch/arm64/kvm/hyp/tlb.c renamed to arch/arm64/kvm/hyp/nvhe/tlb.c

Lines changed: 5 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -4,64 +4,16 @@
44
* Author: Marc Zyngier <[email protected]>
55
*/
66

7-
#include <linux/irqflags.h>
8-
97
#include <asm/kvm_hyp.h>
108
#include <asm/kvm_mmu.h>
119
#include <asm/tlbflush.h>
1210

1311
struct tlb_inv_context {
14-
unsigned long flags;
1512
u64 tcr;
16-
u64 sctlr;
1713
};
1814

19-
static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
20-
struct tlb_inv_context *cxt)
21-
{
22-
u64 val;
23-
24-
local_irq_save(cxt->flags);
25-
26-
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
27-
/*
28-
* For CPUs that are affected by ARM errata 1165522 or 1530923,
29-
* we cannot trust stage-1 to be in a correct state at that
30-
* point. Since we do not want to force a full load of the
31-
* vcpu state, we prevent the EL1 page-table walker to
32-
* allocate new TLBs. This is done by setting the EPD bits
33-
* in the TCR_EL1 register. We also need to prevent it to
34-
* allocate IPA->PA walks, so we enable the S1 MMU...
35-
*/
36-
val = cxt->tcr = read_sysreg_el1(SYS_TCR);
37-
val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
38-
write_sysreg_el1(val, SYS_TCR);
39-
val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
40-
val |= SCTLR_ELx_M;
41-
write_sysreg_el1(val, SYS_SCTLR);
42-
}
43-
44-
/*
45-
* With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
46-
* most TLB operations target EL2/EL0. In order to affect the
47-
* guest TLBs (EL1/EL0), we need to change one of these two
48-
* bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
49-
* let's flip TGE before executing the TLB operation.
50-
*
51-
* ARM erratum 1165522 requires some special handling (again),
52-
* as we need to make sure both stages of translation are in
53-
* place before clearing TGE. __load_guest_stage2() already
54-
* has an ISB in order to deal with this.
55-
*/
56-
__load_guest_stage2(kvm);
57-
val = read_sysreg(hcr_el2);
58-
val &= ~HCR_TGE;
59-
write_sysreg(val, hcr_el2);
60-
isb();
61-
}
62-
63-
static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
64-
struct tlb_inv_context *cxt)
15+
static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm,
16+
struct tlb_inv_context *cxt)
6517
{
6618
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
6719
u64 val;
@@ -84,37 +36,8 @@ static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
8436
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
8537
}
8638

87-
static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm,
88-
struct tlb_inv_context *cxt)
89-
{
90-
if (has_vhe())
91-
__tlb_switch_to_guest_vhe(kvm, cxt);
92-
else
93-
__tlb_switch_to_guest_nvhe(kvm, cxt);
94-
}
95-
96-
static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
97-
struct tlb_inv_context *cxt)
98-
{
99-
/*
100-
* We're done with the TLB operation, let's restore the host's
101-
* view of HCR_EL2.
102-
*/
103-
write_sysreg(0, vttbr_el2);
104-
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
105-
isb();
106-
107-
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
108-
/* Restore the registers to what they were */
109-
write_sysreg_el1(cxt->tcr, SYS_TCR);
110-
write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
111-
}
112-
113-
local_irq_restore(cxt->flags);
114-
}
115-
116-
static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
117-
struct tlb_inv_context *cxt)
39+
static void __hyp_text __tlb_switch_to_host(struct kvm *kvm,
40+
struct tlb_inv_context *cxt)
11841
{
11942
write_sysreg(0, vttbr_el2);
12043

@@ -126,15 +49,6 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
12649
}
12750
}
12851

129-
static void __hyp_text __tlb_switch_to_host(struct kvm *kvm,
130-
struct tlb_inv_context *cxt)
131-
{
132-
if (has_vhe())
133-
__tlb_switch_to_host_vhe(kvm, cxt);
134-
else
135-
__tlb_switch_to_host_nvhe(kvm, cxt);
136-
}
137-
13852
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
13953
{
14054
struct tlb_inv_context cxt;
@@ -183,7 +97,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
18397
* The moral of this story is: if you have a VPIPT I-cache, then
18498
* you should be running with VHE enabled.
18599
*/
186-
if (!has_vhe() && icache_is_vpipt())
100+
if (icache_is_vpipt())
187101
__flush_icache_all();
188102

189103
__tlb_switch_to_host(kvm, &cxt);

arch/arm64/kvm/hyp/vhe/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
asflags-y := -D__KVM_VHE_HYPERVISOR__
77
ccflags-y := -D__KVM_VHE_HYPERVISOR__
88

9-
obj-y := ../hyp-entry.o
9+
obj-y := tlb.o ../hyp-entry.o
1010

1111
# KVM code is run at a different exception code with a different map, so
1212
# compiler instrumentation that inserts callbacks or checks into the code may

arch/arm64/kvm/hyp/vhe/tlb.c

Lines changed: 161 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,161 @@
1+
// SPDX-License-Identifier: GPL-2.0-only
2+
/*
3+
* Copyright (C) 2015 - ARM Ltd
4+
* Author: Marc Zyngier <[email protected]>
5+
*/
6+
7+
#include <linux/irqflags.h>
8+
9+
#include <asm/kvm_hyp.h>
10+
#include <asm/kvm_mmu.h>
11+
#include <asm/tlbflush.h>
12+
13+
struct tlb_inv_context {
14+
unsigned long flags;
15+
u64 tcr;
16+
u64 sctlr;
17+
};
18+
19+
static void __tlb_switch_to_guest(struct kvm *kvm, struct tlb_inv_context *cxt)
20+
{
21+
u64 val;
22+
23+
local_irq_save(cxt->flags);
24+
25+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
26+
/*
27+
* For CPUs that are affected by ARM errata 1165522 or 1530923,
28+
* we cannot trust stage-1 to be in a correct state at that
29+
* point. Since we do not want to force a full load of the
30+
* vcpu state, we prevent the EL1 page-table walker to
31+
* allocate new TLBs. This is done by setting the EPD bits
32+
* in the TCR_EL1 register. We also need to prevent it to
33+
* allocate IPA->PA walks, so we enable the S1 MMU...
34+
*/
35+
val = cxt->tcr = read_sysreg_el1(SYS_TCR);
36+
val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
37+
write_sysreg_el1(val, SYS_TCR);
38+
val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR);
39+
val |= SCTLR_ELx_M;
40+
write_sysreg_el1(val, SYS_SCTLR);
41+
}
42+
43+
/*
44+
* With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
45+
* most TLB operations target EL2/EL0. In order to affect the
46+
* guest TLBs (EL1/EL0), we need to change one of these two
47+
* bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
48+
* let's flip TGE before executing the TLB operation.
49+
*
50+
* ARM erratum 1165522 requires some special handling (again),
51+
* as we need to make sure both stages of translation are in
52+
* place before clearing TGE. __load_guest_stage2() already
53+
* has an ISB in order to deal with this.
54+
*/
55+
__load_guest_stage2(kvm);
56+
val = read_sysreg(hcr_el2);
57+
val &= ~HCR_TGE;
58+
write_sysreg(val, hcr_el2);
59+
isb();
60+
}
61+
62+
static void __tlb_switch_to_host(struct kvm *kvm, struct tlb_inv_context *cxt)
63+
{
64+
/*
65+
* We're done with the TLB operation, let's restore the host's
66+
* view of HCR_EL2.
67+
*/
68+
write_sysreg(0, vttbr_el2);
69+
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
70+
isb();
71+
72+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
73+
/* Restore the registers to what they were */
74+
write_sysreg_el1(cxt->tcr, SYS_TCR);
75+
write_sysreg_el1(cxt->sctlr, SYS_SCTLR);
76+
}
77+
78+
local_irq_restore(cxt->flags);
79+
}
80+
81+
void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
82+
{
83+
struct tlb_inv_context cxt;
84+
85+
dsb(ishst);
86+
87+
/* Switch to requested VMID */
88+
__tlb_switch_to_guest(kvm, &cxt);
89+
90+
/*
91+
* We could do so much better if we had the VA as well.
92+
* Instead, we invalidate Stage-2 for this IPA, and the
93+
* whole of Stage-1. Weep...
94+
*/
95+
ipa >>= 12;
96+
__tlbi(ipas2e1is, ipa);
97+
98+
/*
99+
* We have to ensure completion of the invalidation at Stage-2,
100+
* since a table walk on another CPU could refill a TLB with a
101+
* complete (S1 + S2) walk based on the old Stage-2 mapping if
102+
* the Stage-1 invalidation happened first.
103+
*/
104+
dsb(ish);
105+
__tlbi(vmalle1is);
106+
dsb(ish);
107+
isb();
108+
109+
__tlb_switch_to_host(kvm, &cxt);
110+
}
111+
112+
void __kvm_tlb_flush_vmid(struct kvm *kvm)
113+
{
114+
struct tlb_inv_context cxt;
115+
116+
dsb(ishst);
117+
118+
/* Switch to requested VMID */
119+
__tlb_switch_to_guest(kvm, &cxt);
120+
121+
__tlbi(vmalls12e1is);
122+
dsb(ish);
123+
isb();
124+
125+
__tlb_switch_to_host(kvm, &cxt);
126+
}
127+
128+
void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
129+
{
130+
struct kvm *kvm = vcpu->kvm;
131+
struct tlb_inv_context cxt;
132+
133+
/* Switch to requested VMID */
134+
__tlb_switch_to_guest(kvm, &cxt);
135+
136+
__tlbi(vmalle1);
137+
dsb(nsh);
138+
isb();
139+
140+
__tlb_switch_to_host(kvm, &cxt);
141+
}
142+
143+
void __kvm_flush_vm_context(void)
144+
{
145+
dsb(ishst);
146+
__tlbi(alle1is);
147+
148+
/*
149+
* VIPT and PIPT caches are not affected by VMID, so no maintenance
150+
* is necessary across a VMID rollover.
151+
*
152+
* VPIPT caches constrain lookup and maintenance to the active VMID,
153+
* so we need to invalidate lines with a stale VMID to avoid an ABA
154+
* race after multiple rollovers.
155+
*
156+
*/
157+
if (icache_is_vpipt())
158+
asm volatile("ic ialluis");
159+
160+
dsb(ish);
161+
}

0 commit comments

Comments
 (0)