Skip to content

Commit 8cd84b7

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull more KVM updates from Paolo Bonzini: "PPC: - Improvements and bugfixes for secure VM support, giving reduced startup time and memory hotplug support. - Locking fixes in nested KVM code - Increase number of guests supported by HV KVM to 4094 - Preliminary POWER10 support ARM: - Split the VHE and nVHE hypervisor code bases, build the EL2 code separately, allowing for the VHE code to now be built with instrumentation - Level-based TLB invalidation support - Restructure of the vcpu register storage to accomodate the NV code - Pointer Authentication available for guests on nVHE hosts - Simplification of the system register table parsing - MMU cleanups and fixes - A number of post-32bit cleanups and other fixes MIPS: - compilation fixes x86: - bugfixes - support for the SERIALIZE instruction" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (70 commits) KVM: MIPS/VZ: Fix build error caused by 'kvm_run' cleanup x86/kvm/hyper-v: Synic default SCONTROL MSR needs to be enabled MIPS: KVM: Convert a fallthrough comment to fallthrough MIPS: VZ: Only include loongson_regs.h for CPU_LOONGSON64 x86: Expose SERIALIZE for supported cpuid KVM: x86: Don't attempt to load PDPTRs when 64-bit mode is enabled KVM: arm64: Move S1PTW S2 fault logic out of io_mem_abort() KVM: arm64: Don't skip cache maintenance for read-only memslots KVM: arm64: Handle data and instruction external aborts the same way KVM: arm64: Rename kvm_vcpu_dabt_isextabt() KVM: arm: Add trace name for ARM_NISV KVM: arm64: Ensure that all nVHE hyp code is in .hyp.text KVM: arm64: Substitute RANDOMIZE_BASE for HARDEN_EL2_VECTORS KVM: arm64: Make nVHE ASLR conditional on RANDOMIZE_BASE KVM: PPC: Book3S HV: Rework secure mem slot dropping KVM: PPC: Book3S HV: Move kvmppc_svm_page_out up KVM: PPC: Book3S HV: Migrate hot plugged memory KVM: PPC: Book3S HV: In H_SVM_INIT_DONE, migrate remaining normal-GFNs to secure-GFNs KVM: PPC: Book3S HV: Track the state GFNs associated with secure VMs KVM: PPC: Book3S HV: Disable page merging in H_SVM_INIT_START ...
2 parents 05a5b5d + e792415 commit 8cd84b7

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

83 files changed

+3479
-2636
lines changed

Documentation/powerpc/ultravisor.rst

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -895,6 +895,7 @@ Return values
895895
One of the following values:
896896

897897
* H_SUCCESS on success.
898+
* H_STATE if the VM is not in a position to switch to secure.
898899

899900
Description
900901
~~~~~~~~~~~
@@ -933,6 +934,8 @@ Return values
933934
* H_UNSUPPORTED if called from the wrong context (e.g.
934935
from an SVM or before an H_SVM_INIT_START
935936
hypercall).
937+
* H_STATE if the hypervisor could not successfully
938+
transition the VM to Secure VM.
936939

937940
Description
938941
~~~~~~~~~~~

arch/arm64/Kconfig

Lines changed: 1 addition & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -1182,22 +1182,6 @@ config HARDEN_BRANCH_PREDICTOR
11821182

11831183
If unsure, say Y.
11841184

1185-
config HARDEN_EL2_VECTORS
1186-
bool "Harden EL2 vector mapping against system register leak" if EXPERT
1187-
default y
1188-
help
1189-
Speculation attacks against some high-performance processors can
1190-
be used to leak privileged information such as the vector base
1191-
register, resulting in a potential defeat of the EL2 layout
1192-
randomization.
1193-
1194-
This config option will map the vectors to a fixed location,
1195-
independent of the EL2 code mapping, so that revealing VBAR_EL2
1196-
to an attacker does not give away any extra information. This
1197-
only gets enabled on affected CPUs.
1198-
1199-
If unsure, say Y.
1200-
12011185
config ARM64_SSBD
12021186
bool "Speculative Store Bypass Disable" if EXPERT
12031187
default y
@@ -1520,7 +1504,6 @@ menu "ARMv8.3 architectural features"
15201504
config ARM64_PTR_AUTH
15211505
bool "Enable support for pointer authentication"
15221506
default y
1523-
depends on !KVM || ARM64_VHE
15241507
depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
15251508
# Modern compilers insert a .note.gnu.property section note for PAC
15261509
# which is only understood by binutils starting with version 2.33.1.
@@ -1547,8 +1530,7 @@ config ARM64_PTR_AUTH
15471530

15481531
The feature is detected at runtime. If the feature is not present in
15491532
hardware it will not be advertised to userspace/KVM guest nor will it
1550-
be enabled. However, KVM guest also require VHE mode and hence
1551-
CONFIG_ARM64_VHE=y option to use this feature.
1533+
be enabled.
15521534

15531535
If the feature is present on the boot CPU but not on a late CPU, then
15541536
the late CPU will be parked. Also, if the boot CPU does not have

arch/arm64/include/asm/kvm_asm.h

Lines changed: 61 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -42,33 +42,81 @@
4242

4343
#include <linux/mm.h>
4444

45-
/* Translate a kernel address of @sym into its equivalent linear mapping */
46-
#define kvm_ksym_ref(sym) \
45+
/*
46+
* Translate name of a symbol defined in nVHE hyp to the name seen
47+
* by kernel proper. All nVHE symbols are prefixed by the build system
48+
* to avoid clashes with the VHE variants.
49+
*/
50+
#define kvm_nvhe_sym(sym) __kvm_nvhe_##sym
51+
52+
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
53+
#define DECLARE_KVM_NVHE_SYM(sym) extern char kvm_nvhe_sym(sym)[]
54+
55+
/*
56+
* Define a pair of symbols sharing the same name but one defined in
57+
* VHE and the other in nVHE hyp implementations.
58+
*/
59+
#define DECLARE_KVM_HYP_SYM(sym) \
60+
DECLARE_KVM_VHE_SYM(sym); \
61+
DECLARE_KVM_NVHE_SYM(sym)
62+
63+
#define CHOOSE_VHE_SYM(sym) sym
64+
#define CHOOSE_NVHE_SYM(sym) kvm_nvhe_sym(sym)
65+
66+
#ifndef __KVM_NVHE_HYPERVISOR__
67+
/*
68+
* BIG FAT WARNINGS:
69+
*
70+
* - Don't be tempted to change the following is_kernel_in_hyp_mode()
71+
* to has_vhe(). has_vhe() is implemented as a *final* capability,
72+
* while this is used early at boot time, when the capabilities are
73+
* not final yet....
74+
*
75+
* - Don't let the nVHE hypervisor have access to this, as it will
76+
* pick the *wrong* symbol (yes, it runs at EL2...).
77+
*/
78+
#define CHOOSE_HYP_SYM(sym) (is_kernel_in_hyp_mode() ? CHOOSE_VHE_SYM(sym) \
79+
: CHOOSE_NVHE_SYM(sym))
80+
#else
81+
/* The nVHE hypervisor shouldn't even try to access anything */
82+
extern void *__nvhe_undefined_symbol;
83+
#define CHOOSE_HYP_SYM(sym) __nvhe_undefined_symbol
84+
#endif
85+
86+
/* Translate a kernel address @ptr into its equivalent linear mapping */
87+
#define kvm_ksym_ref(ptr) \
4788
({ \
48-
void *val = &sym; \
89+
void *val = (ptr); \
4990
if (!is_kernel_in_hyp_mode()) \
50-
val = lm_alias(&sym); \
91+
val = lm_alias((ptr)); \
5192
val; \
5293
})
94+
#define kvm_ksym_ref_nvhe(sym) kvm_ksym_ref(kvm_nvhe_sym(sym))
5395

5496
struct kvm;
5597
struct kvm_vcpu;
98+
struct kvm_s2_mmu;
5699

57-
extern char __kvm_hyp_init[];
58-
extern char __kvm_hyp_init_end[];
100+
DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
101+
DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
102+
#define __kvm_hyp_init CHOOSE_NVHE_SYM(__kvm_hyp_init)
103+
#define __kvm_hyp_vector CHOOSE_HYP_SYM(__kvm_hyp_vector)
59104

60-
extern char __kvm_hyp_vector[];
105+
#ifdef CONFIG_KVM_INDIRECT_VECTORS
106+
extern atomic_t arm64_el2_vector_last_slot;
107+
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
108+
#define __bp_harden_hyp_vecs CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
109+
#endif
61110

62111
extern void __kvm_flush_vm_context(void);
63-
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
64-
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
65-
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
112+
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
113+
int level);
114+
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
115+
extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
66116

67117
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
68118

69-
extern int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu);
70-
71-
extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);
119+
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
72120

73121
extern void __kvm_enable_ssbs(void);
74122

@@ -143,7 +191,6 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
143191
.macro get_vcpu_ptr vcpu, ctxt
144192
get_host_ctxt \ctxt, \vcpu
145193
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
146-
kern_hyp_va \vcpu
147194
.endm
148195

149196
#endif

arch/arm64/include/asm/kvm_coproc.h

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -19,14 +19,6 @@ struct kvm_sys_reg_table {
1919
size_t num;
2020
};
2121

22-
struct kvm_sys_reg_target_table {
23-
struct kvm_sys_reg_table table64;
24-
struct kvm_sys_reg_table table32;
25-
};
26-
27-
void kvm_register_target_sys_reg_table(unsigned int target,
28-
struct kvm_sys_reg_target_table *table);
29-
3022
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
3123
int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
3224
int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 27 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -124,33 +124,12 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
124124

125125
static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
126126
{
127-
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
128-
}
129-
130-
static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu)
131-
{
132-
return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
133-
}
134-
135-
static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu)
136-
{
137-
if (vcpu->arch.sysregs_loaded_on_cpu)
138-
return read_sysreg_el1(SYS_ELR);
139-
else
140-
return *__vcpu_elr_el1(vcpu);
141-
}
142-
143-
static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v)
144-
{
145-
if (vcpu->arch.sysregs_loaded_on_cpu)
146-
write_sysreg_el1(v, SYS_ELR);
147-
else
148-
*__vcpu_elr_el1(vcpu) = v;
127+
return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
149128
}
150129

151130
static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
152131
{
153-
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
132+
return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
154133
}
155134

156135
static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
@@ -179,14 +158,14 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
179158
static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
180159
u8 reg_num)
181160
{
182-
return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
161+
return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
183162
}
184163

185164
static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
186165
unsigned long val)
187166
{
188167
if (reg_num != 31)
189-
vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
168+
vcpu_gp_regs(vcpu)->regs[reg_num] = val;
190169
}
191170

192171
static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
@@ -197,7 +176,7 @@ static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
197176
if (vcpu->arch.sysregs_loaded_on_cpu)
198177
return read_sysreg_el1(SYS_SPSR);
199178
else
200-
return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
179+
return __vcpu_sys_reg(vcpu, SPSR_EL1);
201180
}
202181

203182
static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
@@ -210,7 +189,7 @@ static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
210189
if (vcpu->arch.sysregs_loaded_on_cpu)
211190
write_sysreg_el1(v, SYS_SPSR);
212191
else
213-
vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v;
192+
__vcpu_sys_reg(vcpu, SPSR_EL1) = v;
214193
}
215194

216195
/*
@@ -259,14 +238,14 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
259238
return mode != PSR_MODE_EL0t;
260239
}
261240

262-
static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
241+
static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
263242
{
264243
return vcpu->arch.fault.esr_el2;
265244
}
266245

267246
static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
268247
{
269-
u32 esr = kvm_vcpu_get_hsr(vcpu);
248+
u32 esr = kvm_vcpu_get_esr(vcpu);
270249

271250
if (esr & ESR_ELx_CV)
272251
return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
@@ -291,64 +270,64 @@ static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
291270

292271
static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
293272
{
294-
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
273+
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
295274
}
296275

297276
static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
298277
{
299-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
278+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
300279
}
301280

302281
static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
303282
{
304-
return kvm_vcpu_get_hsr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
283+
return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
305284
}
306285

307286
static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
308287
{
309-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
288+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
310289
}
311290

312291
static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
313292
{
314-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
293+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
315294
}
316295

317296
static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
318297
{
319-
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
298+
return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
320299
}
321300

322301
static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
323302
{
324-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
303+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
325304
}
326305

327306
static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
328307
{
329-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
308+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) ||
330309
kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
331310
}
332311

333312
static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
334313
{
335-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
314+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
336315
}
337316

338317
static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
339318
{
340-
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
319+
return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
341320
}
342321

343322
/* This one is not specific to Data Abort */
344323
static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
345324
{
346-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
325+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
347326
}
348327

349328
static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
350329
{
351-
return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
330+
return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
352331
}
353332

354333
static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
@@ -358,15 +337,15 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
358337

359338
static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
360339
{
361-
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
340+
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
362341
}
363342

364343
static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
365344
{
366-
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
345+
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
367346
}
368347

369-
static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
348+
static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
370349
{
371350
switch (kvm_vcpu_trap_get_fault(vcpu)) {
372351
case FSC_SEA:
@@ -387,7 +366,7 @@ static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
387366

388367
static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
389368
{
390-
u32 esr = kvm_vcpu_get_hsr(vcpu);
369+
u32 esr = kvm_vcpu_get_esr(vcpu);
391370
return ESR_ELx_SYS64_ISS_RT(esr);
392371
}
393372

@@ -516,14 +495,14 @@ static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_i
516495
* Skip an instruction which has been emulated at hyp while most guest sysregs
517496
* are live.
518497
*/
519-
static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
498+
static __always_inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
520499
{
521500
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
522-
vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
501+
vcpu_gp_regs(vcpu)->pstate = read_sysreg_el2(SYS_SPSR);
523502

524503
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
525504

526-
write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, SYS_SPSR);
505+
write_sysreg_el2(vcpu_gp_regs(vcpu)->pstate, SYS_SPSR);
527506
write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
528507
}
529508

0 commit comments

Comments
 (0)