Skip to content

Commit 77ee70a

Browse files
committed
KVM: arm64: nv: Honor SError exception routing / masking
To date KVM has used HCR_EL2.VSE to track the state of a pending SError for the guest. With this bit set, hardware respects the EL1 exception routing / masking rules and injects the vSError when appropriate. This isn't correct for NV guests as hardware is oblivious to vEL2's intentions for SErrors. Better yet, with FEAT_NV2 the guest can change the routing behind our back as HCR_EL2 is redirected to memory. Cope with this mess by: - Using a flag (instead of HCR_EL2.VSE) to track the pending SError state when SErrors are unconditionally masked for the current context - Resampling the routing / masking of a pending SError on every guest entry/exit - Emulating exception entry when SError routing implies a translation regime change Reviewed-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Oliver Upton <[email protected]>
1 parent 9aba641 commit 77ee70a

File tree

11 files changed

+144
-38
lines changed

11 files changed

+144
-38
lines changed

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 19 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
4545
void kvm_skip_instr32(struct kvm_vcpu *vcpu);
4646

4747
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
48-
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
48+
int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr);
4949
int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
5050
void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
5151

@@ -59,12 +59,25 @@ static inline int kvm_inject_sea_iabt(struct kvm_vcpu *vcpu, u64 addr)
5959
return kvm_inject_sea(vcpu, true, addr);
6060
}
6161

62+
static inline int kvm_inject_serror(struct kvm_vcpu *vcpu)
63+
{
64+
/*
65+
* ESR_ELx.ISV (later renamed to IDS) indicates whether or not
66+
* ESR_ELx.ISS contains IMPLEMENTATION DEFINED syndrome information.
67+
*
68+
* Set the bit when injecting an SError w/o an ESR to indicate ISS
69+
* does not follow the architected format.
70+
*/
71+
return kvm_inject_serror_esr(vcpu, ESR_ELx_ISV);
72+
}
73+
6274
void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
6375

6476
void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
6577
int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
6678
int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
6779
int kvm_inject_nested_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
80+
int kvm_inject_nested_serror(struct kvm_vcpu *vcpu, u64 esr);
6881

6982
static inline void kvm_inject_nested_sve_trap(struct kvm_vcpu *vcpu)
7083
{
@@ -205,6 +218,11 @@ static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
205218
return ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2) & HCR_TGE;
206219
}
207220

221+
static inline bool vcpu_el2_amo_is_set(const struct kvm_vcpu *vcpu)
222+
{
223+
return ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2) & HCR_AMO;
224+
}
225+
208226
static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
209227
{
210228
bool e2h, tge;

arch/arm64/include/asm/kvm_host.h

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -817,7 +817,7 @@ struct kvm_vcpu_arch {
817817
u8 iflags;
818818

819819
/* State flags for kernel bookkeeping, unused by the hypervisor code */
820-
u8 sflags;
820+
u16 sflags;
821821

822822
/*
823823
* Don't run the guest (internal implementation need).
@@ -953,9 +953,21 @@ struct kvm_vcpu_arch {
953953
__vcpu_flags_preempt_enable(); \
954954
} while (0)
955955

956+
#define __vcpu_test_and_clear_flag(v, flagset, f, m) \
957+
({ \
958+
typeof(v->arch.flagset) set; \
959+
\
960+
set = __vcpu_get_flag(v, flagset, f, m); \
961+
__vcpu_clear_flag(v, flagset, f, m); \
962+
\
963+
set; \
964+
})
965+
956966
#define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__)
957967
#define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__)
958968
#define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__)
969+
#define vcpu_test_and_clear_flag(v, ...) \
970+
__vcpu_test_and_clear_flag((v), __VA_ARGS__)
959971

960972
/* KVM_ARM_VCPU_INIT completed */
961973
#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(0))
@@ -1015,6 +1027,8 @@ struct kvm_vcpu_arch {
10151027
#define IN_WFI __vcpu_single_flag(sflags, BIT(6))
10161028
/* KVM is currently emulating a nested ERET */
10171029
#define IN_NESTED_ERET __vcpu_single_flag(sflags, BIT(7))
1030+
/* SError pending for nested guest */
1031+
#define NESTED_SERROR_PENDING __vcpu_single_flag(sflags, BIT(8))
10181032

10191033

10201034
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
@@ -1387,8 +1401,6 @@ static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
13871401
return (vcpu_arch->steal.base != INVALID_GPA);
13881402
}
13891403

1390-
void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
1391-
13921404
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
13931405

13941406
DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);

arch/arm64/include/asm/kvm_nested.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -80,6 +80,8 @@ extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
8080
extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
8181

8282
extern void check_nested_vcpu_requests(struct kvm_vcpu *vcpu);
83+
extern void kvm_nested_flush_hwstate(struct kvm_vcpu *vcpu);
84+
extern void kvm_nested_sync_hwstate(struct kvm_vcpu *vcpu);
8385

8486
struct kvm_s2_trans {
8587
phys_addr_t output;

arch/arm64/kvm/arm.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1188,6 +1188,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
11881188
*/
11891189
preempt_disable();
11901190

1191+
kvm_nested_flush_hwstate(vcpu);
1192+
11911193
if (kvm_vcpu_has_pmu(vcpu))
11921194
kvm_pmu_flush_hwstate(vcpu);
11931195

@@ -1287,6 +1289,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
12871289
/* Exit types that need handling before we can be preempted */
12881290
handle_exit_early(vcpu, ret);
12891291

1292+
kvm_nested_sync_hwstate(vcpu);
1293+
12901294
preempt_enable();
12911295

12921296
/*

arch/arm64/kvm/emulate-nested.c

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2714,6 +2714,9 @@ static void kvm_inject_el2_exception(struct kvm_vcpu *vcpu, u64 esr_el2,
27142714
case except_type_irq:
27152715
kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_IRQ);
27162716
break;
2717+
case except_type_serror:
2718+
kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SERR);
2719+
break;
27172720
default:
27182721
WARN_ONCE(1, "Unsupported EL2 exception injection %d\n", type);
27192722
}
@@ -2821,3 +2824,14 @@ int kvm_inject_nested_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
28212824
vcpu_write_sys_reg(vcpu, FAR_EL2, addr);
28222825
return kvm_inject_nested_sync(vcpu, esr);
28232826
}
2827+
2828+
int kvm_inject_nested_serror(struct kvm_vcpu *vcpu, u64 esr)
2829+
{
2830+
/*
2831+
* Hardware sets up the EC field when propagating ESR as a result of
2832+
* vSError injection. Manually populate EC for an emulated SError
2833+
* exception.
2834+
*/
2835+
esr |= FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_SERROR);
2836+
return kvm_inject_nested(vcpu, esr, except_type_serror);
2837+
}

arch/arm64/kvm/guest.c

Lines changed: 20 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -818,8 +818,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
818818
int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
819819
struct kvm_vcpu_events *events)
820820
{
821-
events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE);
822821
events->exception.serror_has_esr = cpus_have_final_cap(ARM64_HAS_RAS_EXTN);
822+
events->exception.serror_pending = (vcpu->arch.hcr_el2 & HCR_VSE) ||
823+
vcpu_get_flag(vcpu, NESTED_SERROR_PENDING);
823824

824825
if (events->exception.serror_pending && events->exception.serror_has_esr)
825826
events->exception.serror_esr = vcpu_get_vsesr(vcpu);
@@ -839,23 +840,29 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
839840
bool serror_pending = events->exception.serror_pending;
840841
bool has_esr = events->exception.serror_has_esr;
841842
bool ext_dabt_pending = events->exception.ext_dabt_pending;
843+
u64 esr = events->exception.serror_esr;
842844
int ret = 0;
843845

844-
if (serror_pending && has_esr) {
845-
if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
846-
return -EINVAL;
847-
848-
if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK))
849-
kvm_set_sei_esr(vcpu, events->exception.serror_esr);
850-
else
851-
return -EINVAL;
852-
} else if (serror_pending) {
853-
kvm_inject_vabt(vcpu);
854-
}
855-
856846
if (ext_dabt_pending)
857847
ret = kvm_inject_sea_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
858848

849+
if (ret < 0)
850+
return ret;
851+
852+
if (!serror_pending)
853+
return 0;
854+
855+
if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && has_esr)
856+
return -EINVAL;
857+
858+
if (has_esr && (esr & ~ESR_ELx_ISS_MASK))
859+
return -EINVAL;
860+
861+
if (has_esr)
862+
ret = kvm_inject_serror_esr(vcpu, esr);
863+
else
864+
ret = kvm_inject_serror(vcpu);
865+
859866
return (ret < 0) ? ret : 0;
860867
}
861868

arch/arm64/kvm/handle_exit.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ typedef int (*exit_handle_fn)(struct kvm_vcpu *);
3232
static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
3333
{
3434
if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
35-
kvm_inject_vabt(vcpu);
35+
kvm_inject_serror(vcpu);
3636
}
3737

3838
static int handle_hvc(struct kvm_vcpu *vcpu)
@@ -490,7 +490,7 @@ void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
490490

491491
kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
492492
} else {
493-
kvm_inject_vabt(vcpu);
493+
kvm_inject_serror(vcpu);
494494
}
495495

496496
return;

arch/arm64/kvm/hyp/exception.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -347,9 +347,13 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu)
347347
enter_exception64(vcpu, PSR_MODE_EL2h, except_type_irq);
348348
break;
349349

350+
case unpack_vcpu_flag(EXCEPT_AA64_EL2_SERR):
351+
enter_exception64(vcpu, PSR_MODE_EL2h, except_type_serror);
352+
break;
353+
350354
default:
351355
/*
352-
* Only EL1_SYNC and EL2_{SYNC,IRQ} makes
356+
* Only EL1_SYNC and EL2_{SYNC,IRQ,SERR} makes
353357
* sense so far. Everything else gets silently
354358
* ignored.
355359
*/

arch/arm64/kvm/inject_fault.c

Lines changed: 22 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -219,25 +219,30 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
219219
inject_undef64(vcpu);
220220
}
221221

222-
void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
222+
static bool kvm_serror_target_is_el2(struct kvm_vcpu *vcpu)
223223
{
224-
vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
225-
*vcpu_hcr(vcpu) |= HCR_VSE;
224+
return is_hyp_ctxt(vcpu) || vcpu_el2_amo_is_set(vcpu);
226225
}
227226

228-
/**
229-
* kvm_inject_vabt - inject an async abort / SError into the guest
230-
* @vcpu: The VCPU to receive the exception
231-
*
232-
* It is assumed that this code is called from the VCPU thread and that the
233-
* VCPU therefore is not currently executing guest code.
234-
*
235-
* Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with
236-
* the remaining ISS all-zeros so that this error is not interpreted as an
237-
* uncategorized RAS error. Without the RAS Extensions we can't specify an ESR
238-
* value, so the CPU generates an imp-def value.
239-
*/
240-
void kvm_inject_vabt(struct kvm_vcpu *vcpu)
227+
static bool kvm_serror_undeliverable_at_el2(struct kvm_vcpu *vcpu)
241228
{
242-
kvm_set_sei_esr(vcpu, ESR_ELx_ISV);
229+
return !(vcpu_el2_tge_is_set(vcpu) || vcpu_el2_amo_is_set(vcpu));
230+
}
231+
232+
int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr)
233+
{
234+
lockdep_assert_held(&vcpu->mutex);
235+
236+
if (is_nested_ctxt(vcpu) && kvm_serror_target_is_el2(vcpu))
237+
return kvm_inject_nested_serror(vcpu, esr);
238+
239+
if (vcpu_is_el2(vcpu) && kvm_serror_undeliverable_at_el2(vcpu)) {
240+
vcpu_set_vsesr(vcpu, esr);
241+
vcpu_set_flag(vcpu, NESTED_SERROR_PENDING);
242+
return 1;
243+
}
244+
245+
vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
246+
*vcpu_hcr(vcpu) |= HCR_VSE;
247+
return 1;
243248
}

arch/arm64/kvm/mmu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1808,7 +1808,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
18081808
* There is no need to pass the error into the guest.
18091809
*/
18101810
if (kvm_handle_guest_sea())
1811-
kvm_inject_vabt(vcpu);
1811+
return kvm_inject_serror(vcpu);
18121812

18131813
return 1;
18141814
}

0 commit comments

Comments
 (0)