Skip to content

Commit 15c9981

Browse files
author
Marc Zyngier
committed
Merge branch 'kvm-arm64/ptrauth-fixes' into kvmarm-master/next
Signed-off-by: Marc Zyngier <[email protected]>
2 parents 0370964 + 304e298 commit 15c9981

File tree

11 files changed

+111
-85
lines changed

11 files changed

+111
-85
lines changed

arch/arm64/include/asm/kvm_asm.h

Lines changed: 30 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -81,12 +81,39 @@ extern u32 __kvm_get_mdcr_el2(void);
8181

8282
extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
8383

84-
/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
84+
/*
85+
* Obtain the PC-relative address of a kernel symbol
86+
* s: symbol
87+
*
88+
* The goal of this macro is to return a symbol's address based on a
89+
* PC-relative computation, as opposed to a loading the VA from a
90+
* constant pool or something similar. This works well for HYP, as an
91+
* absolute VA is guaranteed to be wrong. Only use this if trying to
92+
* obtain the address of a symbol (i.e. not something you obtained by
93+
* following a pointer).
94+
*/
95+
#define hyp_symbol_addr(s) \
96+
({ \
97+
typeof(s) *addr; \
98+
asm("adrp %0, %1\n" \
99+
"add %0, %0, :lo12:%1\n" \
100+
: "=r" (addr) : "S" (&s)); \
101+
addr; \
102+
})
103+
104+
/*
105+
* Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
106+
* provided that sym is really a *symbol* and not a pointer obtained from
107+
* a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
108+
* sparse quiet.
109+
*/
85110
#define __hyp_this_cpu_ptr(sym) \
86111
({ \
87-
void *__ptr = hyp_symbol_addr(sym); \
112+
void *__ptr; \
113+
__verify_pcpu_ptr(&sym); \
114+
__ptr = hyp_symbol_addr(sym); \
88115
__ptr += read_sysreg(tpidr_el2); \
89-
(typeof(&sym))__ptr; \
116+
(typeof(sym) __kernel __force *)__ptr; \
90117
})
91118

92119
#define __hyp_this_cpu_read(sym) \

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -112,12 +112,6 @@ static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
112112
vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
113113
}
114114

115-
static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
116-
{
117-
if (vcpu_has_ptrauth(vcpu))
118-
vcpu_ptrauth_disable(vcpu);
119-
}
120-
121115
static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
122116
{
123117
return vcpu->arch.vsesr_el2;

arch/arm64/include/asm/kvm_host.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -284,9 +284,6 @@ struct kvm_vcpu_arch {
284284
struct kvm_guest_debug_arch vcpu_debug_state;
285285
struct kvm_guest_debug_arch external_debug_state;
286286

287-
/* Pointer to host CPU context */
288-
struct kvm_cpu_context *host_cpu_context;
289-
290287
struct thread_info *host_thread_info; /* hyp VA */
291288
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
292289

arch/arm64/include/asm/kvm_mmu.h

Lines changed: 0 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -107,26 +107,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
107107

108108
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
109109

110-
/*
111-
* Obtain the PC-relative address of a kernel symbol
112-
* s: symbol
113-
*
114-
* The goal of this macro is to return a symbol's address based on a
115-
* PC-relative computation, as opposed to a loading the VA from a
116-
* constant pool or something similar. This works well for HYP, as an
117-
* absolute VA is guaranteed to be wrong. Only use this if trying to
118-
* obtain the address of a symbol (i.e. not something you obtained by
119-
* following a pointer).
120-
*/
121-
#define hyp_symbol_addr(s) \
122-
({ \
123-
typeof(s) *addr; \
124-
asm("adrp %0, %1\n" \
125-
"add %0, %0, :lo12:%1\n" \
126-
: "=r" (addr) : "S" (&s)); \
127-
addr; \
128-
})
129-
130110
/*
131111
* We currently support using a VM-specified IPA size. For backward
132112
* compatibility, the default IPA size is fixed to 40bits.

arch/arm64/kvm/arm.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -340,10 +340,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
340340
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
341341
{
342342
int *last_ran;
343-
kvm_host_data_t *cpu_data;
344343

345344
last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
346-
cpu_data = this_cpu_ptr(&kvm_host_data);
347345

348346
/*
349347
* We might get preempted before the vCPU actually runs, but
@@ -355,7 +353,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
355353
}
356354

357355
vcpu->cpu = cpu;
358-
vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
359356

360357
kvm_vgic_load(vcpu);
361358
kvm_timer_vcpu_load(vcpu);
@@ -370,7 +367,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
370367
else
371368
vcpu_set_wfx_traps(vcpu);
372369

373-
vcpu_ptrauth_setup_lazy(vcpu);
370+
if (vcpu_has_ptrauth(vcpu))
371+
vcpu_ptrauth_disable(vcpu);
374372
}
375373

376374
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)

arch/arm64/kvm/handle_exit.c

Lines changed: 3 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -162,40 +162,14 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
162162
return 1;
163163
}
164164

165-
#define __ptrauth_save_key(regs, key) \
166-
({ \
167-
regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
168-
regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
169-
})
170-
171-
/*
172-
* Handle the guest trying to use a ptrauth instruction, or trying to access a
173-
* ptrauth register.
174-
*/
175-
void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
176-
{
177-
struct kvm_cpu_context *ctxt;
178-
179-
if (vcpu_has_ptrauth(vcpu)) {
180-
vcpu_ptrauth_enable(vcpu);
181-
ctxt = vcpu->arch.host_cpu_context;
182-
__ptrauth_save_key(ctxt->sys_regs, APIA);
183-
__ptrauth_save_key(ctxt->sys_regs, APIB);
184-
__ptrauth_save_key(ctxt->sys_regs, APDA);
185-
__ptrauth_save_key(ctxt->sys_regs, APDB);
186-
__ptrauth_save_key(ctxt->sys_regs, APGA);
187-
} else {
188-
kvm_inject_undefined(vcpu);
189-
}
190-
}
191-
192165
/*
193166
* Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
194-
* a NOP).
167+
* a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
168+
* that we can do is give the guest an UNDEF.
195169
*/
196170
static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run)
197171
{
198-
kvm_arm_vcpu_ptrauth_trap(vcpu);
172+
kvm_inject_undefined(vcpu);
199173
return 1;
200174
}
201175

arch/arm64/kvm/hyp/debug-sr.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -185,7 +185,7 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
185185
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
186186
return;
187187

188-
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
188+
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
189189
guest_ctxt = &vcpu->arch.ctxt;
190190
host_dbg = &vcpu->arch.host_debug_state.regs;
191191
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
@@ -207,7 +207,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
207207
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
208208
return;
209209

210-
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
210+
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
211211
guest_ctxt = &vcpu->arch.ctxt;
212212
host_dbg = &vcpu->arch.host_debug_state.regs;
213213
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);

arch/arm64/kvm/hyp/switch.c

Lines changed: 63 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -490,6 +490,64 @@ static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
490490
return true;
491491
}
492492

493+
static bool __hyp_text esr_is_ptrauth_trap(u32 esr)
494+
{
495+
u32 ec = ESR_ELx_EC(esr);
496+
497+
if (ec == ESR_ELx_EC_PAC)
498+
return true;
499+
500+
if (ec != ESR_ELx_EC_SYS64)
501+
return false;
502+
503+
switch (esr_sys64_to_sysreg(esr)) {
504+
case SYS_APIAKEYLO_EL1:
505+
case SYS_APIAKEYHI_EL1:
506+
case SYS_APIBKEYLO_EL1:
507+
case SYS_APIBKEYHI_EL1:
508+
case SYS_APDAKEYLO_EL1:
509+
case SYS_APDAKEYHI_EL1:
510+
case SYS_APDBKEYLO_EL1:
511+
case SYS_APDBKEYHI_EL1:
512+
case SYS_APGAKEYLO_EL1:
513+
case SYS_APGAKEYHI_EL1:
514+
return true;
515+
}
516+
517+
return false;
518+
}
519+
520+
#define __ptrauth_save_key(regs, key) \
521+
({ \
522+
regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
523+
regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
524+
})
525+
526+
static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
527+
{
528+
struct kvm_cpu_context *ctxt;
529+
u64 val;
530+
531+
if (!vcpu_has_ptrauth(vcpu) ||
532+
!esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu)))
533+
return false;
534+
535+
ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
536+
__ptrauth_save_key(ctxt->sys_regs, APIA);
537+
__ptrauth_save_key(ctxt->sys_regs, APIB);
538+
__ptrauth_save_key(ctxt->sys_regs, APDA);
539+
__ptrauth_save_key(ctxt->sys_regs, APDB);
540+
__ptrauth_save_key(ctxt->sys_regs, APGA);
541+
542+
vcpu_ptrauth_enable(vcpu);
543+
544+
val = read_sysreg(hcr_el2);
545+
val |= (HCR_API | HCR_APK);
546+
write_sysreg(val, hcr_el2);
547+
548+
return true;
549+
}
550+
493551
/*
494552
* Return true when we were able to fixup the guest exit and should return to
495553
* the guest, false when we should restore the host state and return to the
@@ -524,6 +582,9 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
524582
if (__hyp_handle_fpsimd(vcpu))
525583
return true;
526584

585+
if (__hyp_handle_ptrauth(vcpu))
586+
return true;
587+
527588
if (!__populate_fault_info(vcpu))
528589
return true;
529590

@@ -642,7 +703,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
642703
struct kvm_cpu_context *guest_ctxt;
643704
u64 exit_code;
644705

645-
host_ctxt = vcpu->arch.host_cpu_context;
706+
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
646707
host_ctxt->__hyp_running_vcpu = vcpu;
647708
guest_ctxt = &vcpu->arch.ctxt;
648709

@@ -747,7 +808,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
747808

748809
vcpu = kern_hyp_va(vcpu);
749810

750-
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
811+
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
751812
host_ctxt->__hyp_running_vcpu = vcpu;
752813
guest_ctxt = &vcpu->arch.ctxt;
753814

arch/arm64/kvm/hyp/sysreg-sr.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -263,12 +263,13 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
263263
*/
264264
void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
265265
{
266-
struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
267266
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
267+
struct kvm_cpu_context *host_ctxt;
268268

269269
if (!has_vhe())
270270
return;
271271

272+
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
272273
__sysreg_save_user_state(host_ctxt);
273274

274275
/*
@@ -299,12 +300,13 @@ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
299300
*/
300301
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
301302
{
302-
struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
303303
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
304+
struct kvm_cpu_context *host_ctxt;
304305

305306
if (!has_vhe())
306307
return;
307308

309+
host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
308310
deactivate_traps_vhe_put();
309311

310312
__sysreg_save_el1_state(guest_ctxt);

arch/arm64/kvm/pmu.c

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -163,15 +163,13 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
163163
*/
164164
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
165165
{
166-
struct kvm_cpu_context *host_ctxt;
167166
struct kvm_host_data *host;
168167
u32 events_guest, events_host;
169168

170169
if (!has_vhe())
171170
return;
172171

173-
host_ctxt = vcpu->arch.host_cpu_context;
174-
host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
172+
host = this_cpu_ptr(&kvm_host_data);
175173
events_guest = host->pmu_events.events_guest;
176174
events_host = host->pmu_events.events_host;
177175

@@ -184,15 +182,13 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
184182
*/
185183
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
186184
{
187-
struct kvm_cpu_context *host_ctxt;
188185
struct kvm_host_data *host;
189186
u32 events_guest, events_host;
190187

191188
if (!has_vhe())
192189
return;
193190

194-
host_ctxt = vcpu->arch.host_cpu_context;
195-
host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
191+
host = this_cpu_ptr(&kvm_host_data);
196192
events_guest = host->pmu_events.events_guest;
197193
events_host = host->pmu_events.events_host;
198194

0 commit comments

Comments
 (0)