Skip to content

Commit d9d7d84

Browse files
author
Marc Zyngier
committed
KVM: arm64: Parametrize exception entry with a target EL
We currently assume that an exception is delivered to EL1, always. Once we emulate EL2, this no longer will be the case. To prepare for this, add a target_mode parameter. While we're at it, merge the computing of the target PC and PSTATE in a single function that updates both PC and CPSR after saving their previous values in the corresponding ELR/SPSR. This ensures that they are updated in the correct order (a pretty common source of bugs...). Reviewed-by: Mark Rutland <[email protected]> Signed-off-by: Marc Zyngier <[email protected]>
1 parent 349c330 commit d9d7d84

File tree

2 files changed

+39
-37
lines changed

2 files changed

+39
-37
lines changed

arch/arm64/include/asm/ptrace.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
#define GIC_PRIO_PSR_I_SET (1 << 4)
3636

3737
/* Additional SPSR bits not exposed in the UABI */
38+
#define PSR_MODE_THREAD_BIT (1 << 0)
3839
#define PSR_IL_BIT (1 << 20)
3940

4041
/* AArch32-specific ptrace requests */

arch/arm64/kvm/inject_fault.c

Lines changed: 38 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -26,28 +26,12 @@ enum exception_type {
2626
except_type_serror = 0x180,
2727
};
2828

29-
static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
30-
{
31-
u64 exc_offset;
32-
33-
switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
34-
case PSR_MODE_EL1t:
35-
exc_offset = CURRENT_EL_SP_EL0_VECTOR;
36-
break;
37-
case PSR_MODE_EL1h:
38-
exc_offset = CURRENT_EL_SP_ELx_VECTOR;
39-
break;
40-
case PSR_MODE_EL0t:
41-
exc_offset = LOWER_EL_AArch64_VECTOR;
42-
break;
43-
default:
44-
exc_offset = LOWER_EL_AArch32_VECTOR;
45-
}
46-
47-
return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
48-
}
49-
5029
/*
30+
* This performs the exception entry at a given EL (@target_mode), stashing PC
31+
* and PSTATE into ELR and SPSR respectively, and compute the new PC/PSTATE.
32+
* The EL passed to this function *must* be a non-secure, privileged mode with
33+
* bit 0 being set (PSTATE.SP == 1).
34+
*
5135
* When an exception is taken, most PSTATE fields are left unchanged in the
5236
* handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
5337
* of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
@@ -59,10 +43,35 @@ static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
5943
* Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
6044
* MSB to LSB.
6145
*/
62-
static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
46+
static void enter_exception64(struct kvm_vcpu *vcpu, unsigned long target_mode,
47+
enum exception_type type)
6348
{
64-
unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
65-
unsigned long old, new;
49+
unsigned long sctlr, vbar, old, new, mode;
50+
u64 exc_offset;
51+
52+
mode = *vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT);
53+
54+
if (mode == target_mode)
55+
exc_offset = CURRENT_EL_SP_ELx_VECTOR;
56+
else if ((mode | PSR_MODE_THREAD_BIT) == target_mode)
57+
exc_offset = CURRENT_EL_SP_EL0_VECTOR;
58+
else if (!(mode & PSR_MODE32_BIT))
59+
exc_offset = LOWER_EL_AArch64_VECTOR;
60+
else
61+
exc_offset = LOWER_EL_AArch32_VECTOR;
62+
63+
switch (target_mode) {
64+
case PSR_MODE_EL1h:
65+
vbar = vcpu_read_sys_reg(vcpu, VBAR_EL1);
66+
sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
67+
vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
68+
break;
69+
default:
70+
/* Don't do that */
71+
BUG();
72+
}
73+
74+
*vcpu_pc(vcpu) = vbar + exc_offset + type;
6675

6776
old = *vcpu_cpsr(vcpu);
6877
new = 0;
@@ -105,9 +114,10 @@ static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
105114
new |= PSR_I_BIT;
106115
new |= PSR_F_BIT;
107116

108-
new |= PSR_MODE_EL1h;
117+
new |= target_mode;
109118

110-
return new;
119+
*vcpu_cpsr(vcpu) = new;
120+
vcpu_write_spsr(vcpu, old);
111121
}
112122

113123
static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
@@ -116,11 +126,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
116126
bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
117127
u32 esr = 0;
118128

119-
vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
120-
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
121-
122-
*vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
123-
vcpu_write_spsr(vcpu, cpsr);
129+
enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
124130

125131
vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
126132

@@ -148,14 +154,9 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
148154

149155
static void inject_undef64(struct kvm_vcpu *vcpu)
150156
{
151-
unsigned long cpsr = *vcpu_cpsr(vcpu);
152157
u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
153158

154-
vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
155-
*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
156-
157-
*vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
158-
vcpu_write_spsr(vcpu, cpsr);
159+
enter_exception64(vcpu, PSR_MODE_EL1h, except_type_sync);
159160

160161
/*
161162
* Build an unknown exception, depending on the instruction

0 commit comments

Comments
 (0)