Skip to content

Commit 52cd0d9

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull more KVM updates from Paolo Bonzini: "The guest side of the asynchronous page fault work has been delayed to 5.9 in order to sync with Thomas's interrupt entry rework, but here's the rest of the KVM updates for this merge window. MIPS: - Loongson port PPC: - Fixes ARM: - Fixes x86: - KVM_SET_USER_MEMORY_REGION optimizations - Fixes - Selftest fixes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (62 commits) KVM: x86: do not pass poisoned hva to __kvm_set_memory_region KVM: selftests: fix sync_with_host() in smm_test KVM: async_pf: Inject 'page ready' event only if 'page not present' was previously injected KVM: async_pf: Cleanup kvm_setup_async_pf() kvm: i8254: remove redundant assignment to pointer s KVM: x86: respect singlestep when emulating instruction KVM: selftests: Don't probe KVM_CAP_HYPERV_ENLIGHTENED_VMCS when nested VMX is unsupported KVM: selftests: do not substitute SVM/VMX check with KVM_CAP_NESTED_STATE check KVM: nVMX: Consult only the "basic" exit reason when routing nested exit KVM: arm64: Move hyp_symbol_addr() to kvm_asm.h KVM: arm64: Synchronize sysreg state on injecting an AArch32 exception KVM: arm64: Make vcpu_cp1x() work on Big Endian hosts KVM: arm64: Remove host_cpu_context member from vcpu structure KVM: arm64: Stop sparse from moaning at __hyp_this_cpu_ptr KVM: arm64: Handle PtrAuth traps early KVM: x86: Unexport x86_fpu_cache and make it static KVM: selftests: Ignore KVM 5-level paging support for VM_MODE_PXXV48_4K KVM: arm64: Save the host's PtrAuth keys in non-preemptible context KVM: arm64: Stop save/restoring ACTLR_EL1 KVM: arm64: Add emulation for 32bit guests accessing ACTLR2 ...
2 parents d2d5439 + 49b3dea commit 52cd0d9

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

83 files changed

+1801
-740
lines changed

arch/arm64/include/asm/kvm_asm.h

Lines changed: 30 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -81,12 +81,39 @@ extern u32 __kvm_get_mdcr_el2(void);
8181

8282
extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
8383

84-
/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
84+
/*
85+
* Obtain the PC-relative address of a kernel symbol
86+
* s: symbol
87+
*
88+
* The goal of this macro is to return a symbol's address based on a
89+
* PC-relative computation, as opposed to a loading the VA from a
90+
* constant pool or something similar. This works well for HYP, as an
91+
* absolute VA is guaranteed to be wrong. Only use this if trying to
92+
* obtain the address of a symbol (i.e. not something you obtained by
93+
* following a pointer).
94+
*/
95+
#define hyp_symbol_addr(s) \
96+
({ \
97+
typeof(s) *addr; \
98+
asm("adrp %0, %1\n" \
99+
"add %0, %0, :lo12:%1\n" \
100+
: "=r" (addr) : "S" (&s)); \
101+
addr; \
102+
})
103+
104+
/*
105+
* Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
106+
* provided that sym is really a *symbol* and not a pointer obtained from
107+
* a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
108+
* sparse quiet.
109+
*/
85110
#define __hyp_this_cpu_ptr(sym) \
86111
({ \
87-
void *__ptr = hyp_symbol_addr(sym); \
112+
void *__ptr; \
113+
__verify_pcpu_ptr(&sym); \
114+
__ptr = hyp_symbol_addr(sym); \
88115
__ptr += read_sysreg(tpidr_el2); \
89-
(typeof(&sym))__ptr; \
116+
(typeof(sym) __kernel __force *)__ptr; \
90117
})
91118

92119
#define __hyp_this_cpu_read(sym) \

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -112,12 +112,6 @@ static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
112112
vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
113113
}
114114

115-
static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
116-
{
117-
if (vcpu_has_ptrauth(vcpu))
118-
vcpu_ptrauth_disable(vcpu);
119-
}
120-
121115
static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
122116
{
123117
return vcpu->arch.vsesr_el2;

arch/arm64/include/asm/kvm_host.h

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -284,9 +284,6 @@ struct kvm_vcpu_arch {
284284
struct kvm_guest_debug_arch vcpu_debug_state;
285285
struct kvm_guest_debug_arch external_debug_state;
286286

287-
/* Pointer to host CPU context */
288-
struct kvm_cpu_context *host_cpu_context;
289-
290287
struct thread_info *host_thread_info; /* hyp VA */
291288
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
292289

@@ -404,8 +401,10 @@ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
404401
* CP14 and CP15 live in the same array, as they are backed by the
405402
* same system registers.
406403
*/
407-
#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r)])
408-
#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r)])
404+
#define CPx_BIAS IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)
405+
406+
#define vcpu_cp14(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
407+
#define vcpu_cp15(v,r) ((v)->arch.ctxt.copro[(r) ^ CPx_BIAS])
409408

410409
struct kvm_vm_stat {
411410
ulong remote_tlb_flush;

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 0 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -107,26 +107,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
107107

108108
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
109109

110-
/*
111-
* Obtain the PC-relative address of a kernel symbol
112-
* s: symbol
113-
*
114-
* The goal of this macro is to return a symbol's address based on a
115-
* PC-relative computation, as opposed to a loading the VA from a
116-
* constant pool or something similar. This works well for HYP, as an
117-
* absolute VA is guaranteed to be wrong. Only use this if trying to
118-
* obtain the address of a symbol (i.e. not something you obtained by
119-
* following a pointer).
120-
*/
121-
#define hyp_symbol_addr(s) \
122-
({ \
123-
typeof(s) *addr; \
124-
asm("adrp %0, %1\n" \
125-
"add %0, %0, :lo12:%1\n" \
126-
: "=r" (addr) : "S" (&s)); \
127-
addr; \
128-
})
129-
130110
/*
131111
* We currently support using a VM-specified IPA size. For backward
132112
* compatibility, the default IPA size is fixed to 40bits.

arch/arm64/kvm/aarch32.c

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,26 @@ static const u8 return_offsets[8][2] = {
3333
[7] = { 4, 4 }, /* FIQ, unused */
3434
};
3535

36+
static bool pre_fault_synchronize(struct kvm_vcpu *vcpu)
37+
{
38+
preempt_disable();
39+
if (vcpu->arch.sysregs_loaded_on_cpu) {
40+
kvm_arch_vcpu_put(vcpu);
41+
return true;
42+
}
43+
44+
preempt_enable();
45+
return false;
46+
}
47+
48+
static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded)
49+
{
50+
if (loaded) {
51+
kvm_arch_vcpu_load(vcpu, smp_processor_id());
52+
preempt_enable();
53+
}
54+
}
55+
3656
/*
3757
* When an exception is taken, most CPSR fields are left unchanged in the
3858
* handler. However, some are explicitly overridden (e.g. M[4:0]).
@@ -155,7 +175,10 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
155175

156176
void kvm_inject_undef32(struct kvm_vcpu *vcpu)
157177
{
178+
bool loaded = pre_fault_synchronize(vcpu);
179+
158180
prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
181+
post_fault_synchronize(vcpu, loaded);
159182
}
160183

161184
/*
@@ -168,6 +191,9 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
168191
u32 vect_offset;
169192
u32 *far, *fsr;
170193
bool is_lpae;
194+
bool loaded;
195+
196+
loaded = pre_fault_synchronize(vcpu);
171197

172198
if (is_pabt) {
173199
vect_offset = 12;
@@ -191,6 +217,8 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
191217
/* no need to shuffle FS[4] into DFSR[10] as its 0 */
192218
*fsr = DFSR_FSC_EXTABT_nLPAE;
193219
}
220+
221+
post_fault_synchronize(vcpu, loaded);
194222
}
195223

196224
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)

arch/arm64/kvm/arm.c

Lines changed: 12 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -144,11 +144,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
144144
return ret;
145145
}
146146

147-
int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
148-
{
149-
return 0;
150-
}
151-
152147
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
153148
{
154149
return VM_FAULT_SIGBUS;
@@ -340,10 +335,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
340335
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
341336
{
342337
int *last_ran;
343-
kvm_host_data_t *cpu_data;
344338

345339
last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
346-
cpu_data = this_cpu_ptr(&kvm_host_data);
347340

348341
/*
349342
* We might get preempted before the vCPU actually runs, but
@@ -355,7 +348,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
355348
}
356349

357350
vcpu->cpu = cpu;
358-
vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
359351

360352
kvm_vgic_load(vcpu);
361353
kvm_timer_vcpu_load(vcpu);
@@ -370,7 +362,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
370362
else
371363
vcpu_set_wfx_traps(vcpu);
372364

373-
vcpu_ptrauth_setup_lazy(vcpu);
365+
if (vcpu_has_ptrauth(vcpu))
366+
vcpu_ptrauth_disable(vcpu);
374367
}
375368

376369
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -990,11 +983,17 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
990983
* Ensure a rebooted VM will fault in RAM pages and detect if the
991984
* guest MMU is turned off and flush the caches as needed.
992985
*
993-
* S2FWB enforces all memory accesses to RAM being cacheable, we
994-
* ensure that the cache is always coherent.
986+
* S2FWB enforces all memory accesses to RAM being cacheable,
987+
* ensuring that the data side is always coherent. We still
988+
* need to invalidate the I-cache though, as FWB does *not*
989+
* imply CTR_EL0.DIC.
995990
*/
996-
if (vcpu->arch.has_run_once && !cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
997-
stage2_unmap_vm(vcpu->kvm);
991+
if (vcpu->arch.has_run_once) {
992+
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
993+
stage2_unmap_vm(vcpu->kvm);
994+
else
995+
__flush_icache_all();
996+
}
998997

999998
vcpu_reset_hcr(vcpu);
1000999

arch/arm64/kvm/handle_exit.c

Lines changed: 3 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -162,40 +162,14 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
162162
return 1;
163163
}
164164

165-
#define __ptrauth_save_key(regs, key) \
166-
({ \
167-
regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
168-
regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
169-
})
170-
171-
/*
172-
* Handle the guest trying to use a ptrauth instruction, or trying to access a
173-
* ptrauth register.
174-
*/
175-
void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
176-
{
177-
struct kvm_cpu_context *ctxt;
178-
179-
if (vcpu_has_ptrauth(vcpu)) {
180-
vcpu_ptrauth_enable(vcpu);
181-
ctxt = vcpu->arch.host_cpu_context;
182-
__ptrauth_save_key(ctxt->sys_regs, APIA);
183-
__ptrauth_save_key(ctxt->sys_regs, APIB);
184-
__ptrauth_save_key(ctxt->sys_regs, APDA);
185-
__ptrauth_save_key(ctxt->sys_regs, APDB);
186-
__ptrauth_save_key(ctxt->sys_regs, APGA);
187-
} else {
188-
kvm_inject_undefined(vcpu);
189-
}
190-
}
191-
192165
/*
193166
* Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
194-
* a NOP).
167+
* a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
168+
* that we can do is give the guest an UNDEF.
195169
*/
196170
static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run)
197171
{
198-
kvm_arm_vcpu_ptrauth_trap(vcpu);
172+
kvm_inject_undefined(vcpu);
199173
return 1;
200174
}
201175

arch/arm64/kvm/hyp/debug-sr.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,7 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
185185
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
186186
return;
187187

188-
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
188+
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
189189
guest_ctxt = &vcpu->arch.ctxt;
190190
host_dbg = &vcpu->arch.host_debug_state.regs;
191191
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
@@ -207,7 +207,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
207207
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
208208
return;
209209

210-
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
210+
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
211211
guest_ctxt = &vcpu->arch.ctxt;
212212
host_dbg = &vcpu->arch.host_debug_state.regs;
213213
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);

arch/arm64/kvm/hyp/switch.c

Lines changed: 63 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -490,6 +490,64 @@ static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
490490
return true;
491491
}
492492

493+
static bool __hyp_text esr_is_ptrauth_trap(u32 esr)
494+
{
495+
u32 ec = ESR_ELx_EC(esr);
496+
497+
if (ec == ESR_ELx_EC_PAC)
498+
return true;
499+
500+
if (ec != ESR_ELx_EC_SYS64)
501+
return false;
502+
503+
switch (esr_sys64_to_sysreg(esr)) {
504+
case SYS_APIAKEYLO_EL1:
505+
case SYS_APIAKEYHI_EL1:
506+
case SYS_APIBKEYLO_EL1:
507+
case SYS_APIBKEYHI_EL1:
508+
case SYS_APDAKEYLO_EL1:
509+
case SYS_APDAKEYHI_EL1:
510+
case SYS_APDBKEYLO_EL1:
511+
case SYS_APDBKEYHI_EL1:
512+
case SYS_APGAKEYLO_EL1:
513+
case SYS_APGAKEYHI_EL1:
514+
return true;
515+
}
516+
517+
return false;
518+
}
519+
520+
#define __ptrauth_save_key(regs, key) \
521+
({ \
522+
regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
523+
regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
524+
})
525+
526+
static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
527+
{
528+
struct kvm_cpu_context *ctxt;
529+
u64 val;
530+
531+
if (!vcpu_has_ptrauth(vcpu) ||
532+
!esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu)))
533+
return false;
534+
535+
ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
536+
__ptrauth_save_key(ctxt->sys_regs, APIA);
537+
__ptrauth_save_key(ctxt->sys_regs, APIB);
538+
__ptrauth_save_key(ctxt->sys_regs, APDA);
539+
__ptrauth_save_key(ctxt->sys_regs, APDB);
540+
__ptrauth_save_key(ctxt->sys_regs, APGA);
541+
542+
vcpu_ptrauth_enable(vcpu);
543+
544+
val = read_sysreg(hcr_el2);
545+
val |= (HCR_API | HCR_APK);
546+
write_sysreg(val, hcr_el2);
547+
548+
return true;
549+
}
550+
493551
/*
494552
* Return true when we were able to fixup the guest exit and should return to
495553
* the guest, false when we should restore the host state and return to the
@@ -524,6 +582,9 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
524582
if (__hyp_handle_fpsimd(vcpu))
525583
return true;
526584

585+
if (__hyp_handle_ptrauth(vcpu))
586+
return true;
587+
527588
if (!__populate_fault_info(vcpu))
528589
return true;
529590

@@ -642,7 +703,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
642703
struct kvm_cpu_context *guest_ctxt;
643704
u64 exit_code;
644705

645-
host_ctxt = vcpu->arch.host_cpu_context;
706+
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
646707
host_ctxt->__hyp_running_vcpu = vcpu;
647708
guest_ctxt = &vcpu->arch.ctxt;
648709

@@ -747,7 +808,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
747808

748809
vcpu = kern_hyp_va(vcpu);
749810

750-
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
811+
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
751812
host_ctxt->__hyp_running_vcpu = vcpu;
752813
guest_ctxt = &vcpu->arch.ctxt;
753814

0 commit comments

Comments
 (0)