@@ -26,28 +26,12 @@ enum exception_type {
26
26
except_type_serror = 0x180 ,
27
27
};
28
28
29
- static u64 get_except_vector (struct kvm_vcpu * vcpu , enum exception_type type )
30
- {
31
- u64 exc_offset ;
32
-
33
- switch (* vcpu_cpsr (vcpu ) & (PSR_MODE_MASK | PSR_MODE32_BIT )) {
34
- case PSR_MODE_EL1t :
35
- exc_offset = CURRENT_EL_SP_EL0_VECTOR ;
36
- break ;
37
- case PSR_MODE_EL1h :
38
- exc_offset = CURRENT_EL_SP_ELx_VECTOR ;
39
- break ;
40
- case PSR_MODE_EL0t :
41
- exc_offset = LOWER_EL_AArch64_VECTOR ;
42
- break ;
43
- default :
44
- exc_offset = LOWER_EL_AArch32_VECTOR ;
45
- }
46
-
47
- return vcpu_read_sys_reg (vcpu , VBAR_EL1 ) + exc_offset + type ;
48
- }
49
-
50
29
/*
30
+ * This performs the exception entry at a given EL (@target_mode), stashing PC
31
+ * and PSTATE into ELR and SPSR respectively, and compute the new PC/PSTATE.
32
+ * The EL passed to this function *must* be a non-secure, privileged mode with
33
+ * bit 0 being set (PSTATE.SP == 1).
34
+ *
51
35
* When an exception is taken, most PSTATE fields are left unchanged in the
52
36
* handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
53
37
* of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
@@ -59,10 +43,35 @@ static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
59
43
* Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
60
44
* MSB to LSB.
61
45
*/
62
- static unsigned long get_except64_pstate (struct kvm_vcpu * vcpu )
46
+ static void enter_exception64 (struct kvm_vcpu * vcpu , unsigned long target_mode ,
47
+ enum exception_type type )
63
48
{
64
- unsigned long sctlr = vcpu_read_sys_reg (vcpu , SCTLR_EL1 );
65
- unsigned long old , new ;
49
+ unsigned long sctlr , vbar , old , new , mode ;
50
+ u64 exc_offset ;
51
+
52
+ mode = * vcpu_cpsr (vcpu ) & (PSR_MODE_MASK | PSR_MODE32_BIT );
53
+
54
+ if (mode == target_mode )
55
+ exc_offset = CURRENT_EL_SP_ELx_VECTOR ;
56
+ else if ((mode | PSR_MODE_THREAD_BIT ) == target_mode )
57
+ exc_offset = CURRENT_EL_SP_EL0_VECTOR ;
58
+ else if (!(mode & PSR_MODE32_BIT ))
59
+ exc_offset = LOWER_EL_AArch64_VECTOR ;
60
+ else
61
+ exc_offset = LOWER_EL_AArch32_VECTOR ;
62
+
63
+ switch (target_mode ) {
64
+ case PSR_MODE_EL1h :
65
+ vbar = vcpu_read_sys_reg (vcpu , VBAR_EL1 );
66
+ sctlr = vcpu_read_sys_reg (vcpu , SCTLR_EL1 );
67
+ vcpu_write_elr_el1 (vcpu , * vcpu_pc (vcpu ));
68
+ break ;
69
+ default :
70
+ /* Don't do that */
71
+ BUG ();
72
+ }
73
+
74
+ * vcpu_pc (vcpu ) = vbar + exc_offset + type ;
66
75
67
76
old = * vcpu_cpsr (vcpu );
68
77
new = 0 ;
@@ -105,9 +114,10 @@ static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
105
114
new |= PSR_I_BIT ;
106
115
new |= PSR_F_BIT ;
107
116
108
- new |= PSR_MODE_EL1h ;
117
+ new |= target_mode ;
109
118
110
- return new ;
119
+ * vcpu_cpsr (vcpu ) = new ;
120
+ vcpu_write_spsr (vcpu , old );
111
121
}
112
122
113
123
static void inject_abt64 (struct kvm_vcpu * vcpu , bool is_iabt , unsigned long addr )
@@ -116,11 +126,7 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
116
126
bool is_aarch32 = vcpu_mode_is_32bit (vcpu );
117
127
u32 esr = 0 ;
118
128
119
- vcpu_write_elr_el1 (vcpu , * vcpu_pc (vcpu ));
120
- * vcpu_pc (vcpu ) = get_except_vector (vcpu , except_type_sync );
121
-
122
- * vcpu_cpsr (vcpu ) = get_except64_pstate (vcpu );
123
- vcpu_write_spsr (vcpu , cpsr );
129
+ enter_exception64 (vcpu , PSR_MODE_EL1h , except_type_sync );
124
130
125
131
vcpu_write_sys_reg (vcpu , addr , FAR_EL1 );
126
132
@@ -148,14 +154,9 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
148
154
149
155
static void inject_undef64 (struct kvm_vcpu * vcpu )
150
156
{
151
- unsigned long cpsr = * vcpu_cpsr (vcpu );
152
157
u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT );
153
158
154
- vcpu_write_elr_el1 (vcpu , * vcpu_pc (vcpu ));
155
- * vcpu_pc (vcpu ) = get_except_vector (vcpu , except_type_sync );
156
-
157
- * vcpu_cpsr (vcpu ) = get_except64_pstate (vcpu );
158
- vcpu_write_spsr (vcpu , cpsr );
159
+ enter_exception64 (vcpu , PSR_MODE_EL1h , except_type_sync );
159
160
160
161
/*
161
162
* Build an unknown exception, depending on the instruction
0 commit comments