Skip to content

Commit 29eb5a3

Browse files
author
Marc Zyngier
committed
KVM: arm64: Handle PtrAuth traps early
The current way we deal with PtrAuth is a bit heavy handed: - We forcefully save the host's keys on each vcpu_load() - Handling the PtrAuth trap forces us to go all the way back to the exit handling code to just set the HCR bits Overall, this is pretty cumbersome. A better approach would be to handle it the same way we deal with the FPSIMD registers: - On vcpu_load() disable PtrAuth for the guest - On first use, save the host's keys, enable PtrAuth in the guest Crucially, this can happen as a fixup, which is done very early on exit. We can then reenter the guest immediately without leaving the hypervisor role. Another thing is that it simplify the rest of the host handling: exiting all the way to the host means that the only possible outcome for this trap is to inject an UNDEF. Reviewed-by: Mark Rutland <[email protected]> Signed-off-by: Marc Zyngier <[email protected]>
1 parent ef3e40a commit 29eb5a3

File tree

4 files changed

+70
-38
lines changed

4 files changed

+70
-38
lines changed

arch/arm64/kvm/arm.c

Lines changed: 1 addition & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -337,12 +337,6 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
337337
preempt_enable();
338338
}
339339

340-
#define __ptrauth_save_key(regs, key) \
341-
({ \
342-
regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
343-
regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
344-
})
345-
346340
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
347341
{
348342
int *last_ran;
@@ -376,17 +370,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
376370
else
377371
vcpu_set_wfx_traps(vcpu);
378372

379-
if (vcpu_has_ptrauth(vcpu)) {
380-
struct kvm_cpu_context *ctxt = vcpu->arch.host_cpu_context;
381-
382-
__ptrauth_save_key(ctxt->sys_regs, APIA);
383-
__ptrauth_save_key(ctxt->sys_regs, APIB);
384-
__ptrauth_save_key(ctxt->sys_regs, APDA);
385-
__ptrauth_save_key(ctxt->sys_regs, APDB);
386-
__ptrauth_save_key(ctxt->sys_regs, APGA);
387-
373+
if (vcpu_has_ptrauth(vcpu))
388374
vcpu_ptrauth_disable(vcpu);
389-
}
390375
}
391376

392377
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)

arch/arm64/kvm/handle_exit.c

Lines changed: 3 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -162,25 +162,14 @@ static int handle_sve(struct kvm_vcpu *vcpu, struct kvm_run *run)
162162
return 1;
163163
}
164164

165-
/*
166-
* Handle the guest trying to use a ptrauth instruction, or trying to access a
167-
* ptrauth register.
168-
*/
169-
void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu)
170-
{
171-
if (vcpu_has_ptrauth(vcpu))
172-
vcpu_ptrauth_enable(vcpu);
173-
else
174-
kvm_inject_undefined(vcpu);
175-
}
176-
177165
/*
178166
* Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
179-
* a NOP).
167+
* a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
168+
* that we can do is give the guest an UNDEF.
180169
*/
181170
static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu, struct kvm_run *run)
182171
{
183-
kvm_arm_vcpu_ptrauth_trap(vcpu);
172+
kvm_inject_undefined(vcpu);
184173
return 1;
185174
}
186175

arch/arm64/kvm/hyp/switch.c

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -490,6 +490,64 @@ static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
490490
return true;
491491
}
492492

493+
static bool __hyp_text esr_is_ptrauth_trap(u32 esr)
494+
{
495+
u32 ec = ESR_ELx_EC(esr);
496+
497+
if (ec == ESR_ELx_EC_PAC)
498+
return true;
499+
500+
if (ec != ESR_ELx_EC_SYS64)
501+
return false;
502+
503+
switch (esr_sys64_to_sysreg(esr)) {
504+
case SYS_APIAKEYLO_EL1:
505+
case SYS_APIAKEYHI_EL1:
506+
case SYS_APIBKEYLO_EL1:
507+
case SYS_APIBKEYHI_EL1:
508+
case SYS_APDAKEYLO_EL1:
509+
case SYS_APDAKEYHI_EL1:
510+
case SYS_APDBKEYLO_EL1:
511+
case SYS_APDBKEYHI_EL1:
512+
case SYS_APGAKEYLO_EL1:
513+
case SYS_APGAKEYHI_EL1:
514+
return true;
515+
}
516+
517+
return false;
518+
}
519+
520+
#define __ptrauth_save_key(regs, key) \
521+
({ \
522+
regs[key ## KEYLO_EL1] = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \
523+
regs[key ## KEYHI_EL1] = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \
524+
})
525+
526+
static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
527+
{
528+
struct kvm_cpu_context *ctxt;
529+
u64 val;
530+
531+
if (!vcpu_has_ptrauth(vcpu) ||
532+
!esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu)))
533+
return false;
534+
535+
ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
536+
__ptrauth_save_key(ctxt->sys_regs, APIA);
537+
__ptrauth_save_key(ctxt->sys_regs, APIB);
538+
__ptrauth_save_key(ctxt->sys_regs, APDA);
539+
__ptrauth_save_key(ctxt->sys_regs, APDB);
540+
__ptrauth_save_key(ctxt->sys_regs, APGA);
541+
542+
vcpu_ptrauth_enable(vcpu);
543+
544+
val = read_sysreg(hcr_el2);
545+
val |= (HCR_API | HCR_APK);
546+
write_sysreg(val, hcr_el2);
547+
548+
return true;
549+
}
550+
493551
/*
494552
* Return true when we were able to fixup the guest exit and should return to
495553
* the guest, false when we should restore the host state and return to the
@@ -524,6 +582,9 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
524582
if (__hyp_handle_fpsimd(vcpu))
525583
return true;
526584

585+
if (__hyp_handle_ptrauth(vcpu))
586+
return true;
587+
527588
if (!__populate_fault_info(vcpu))
528589
return true;
529590

arch/arm64/kvm/sys_regs.c

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1034,16 +1034,13 @@ static bool trap_ptrauth(struct kvm_vcpu *vcpu,
10341034
struct sys_reg_params *p,
10351035
const struct sys_reg_desc *rd)
10361036
{
1037-
kvm_arm_vcpu_ptrauth_trap(vcpu);
1038-
10391037
/*
1040-
* Return false for both cases as we never skip the trapped
1041-
* instruction:
1042-
*
1043-
* - Either we re-execute the same key register access instruction
1044-
* after enabling ptrauth.
1045-
* - Or an UNDEF is injected as ptrauth is not supported/enabled.
1038+
* If we land here, that is because we didn't fixup the access on exit
1039+
* by allowing the PtrAuth sysregs. The only way this happens is when
1040+
* the guest does not have PtrAuth support enabled.
10461041
*/
1042+
kvm_inject_undefined(vcpu);
1043+
10471044
return false;
10481045
}
10491046

0 commit comments

Comments
 (0)