Skip to content

Commit a394cf6

Browse files
author
Marc Zyngier
committed
Merge branch 'kvm-arm64/misc-5.9' into kvmarm-master/next-WIP
Signed-off-by: Marc Zyngier <[email protected]>
2 parents c9dc950 + a59a2ed commit a394cf6

File tree

16 files changed

+85
-90
lines changed

16 files changed

+85
-90
lines changed

arch/arm64/Kconfig

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1182,22 +1182,6 @@ config HARDEN_BRANCH_PREDICTOR
11821182

11831183
If unsure, say Y.
11841184

1185-
config HARDEN_EL2_VECTORS
1186-
bool "Harden EL2 vector mapping against system register leak" if EXPERT
1187-
default y
1188-
help
1189-
Speculation attacks against some high-performance processors can
1190-
be used to leak privileged information such as the vector base
1191-
register, resulting in a potential defeat of the EL2 layout
1192-
randomization.
1193-
1194-
This config option will map the vectors to a fixed location,
1195-
independent of the EL2 code mapping, so that revealing VBAR_EL2
1196-
to an attacker does not give away any extra information. This
1197-
only gets enabled on affected CPUs.
1198-
1199-
If unsure, say Y.
1200-
12011185
config ARM64_SSBD
12021186
bool "Speculative Store Bypass Disable" if EXPERT
12031187
default y

arch/arm64/include/asm/kvm_asm.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -191,7 +191,6 @@ extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
191191
.macro get_vcpu_ptr vcpu, ctxt
192192
get_host_ctxt \ctxt, \vcpu
193193
ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
194-
kern_hyp_va \vcpu
195194
.endm
196195

197196
#endif

arch/arm64/include/asm/kvm_emulate.h

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -238,14 +238,14 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
238238
return mode != PSR_MODE_EL0t;
239239
}
240240

241-
static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
241+
static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
242242
{
243243
return vcpu->arch.fault.esr_el2;
244244
}
245245

246246
static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
247247
{
248-
u32 esr = kvm_vcpu_get_hsr(vcpu);
248+
u32 esr = kvm_vcpu_get_esr(vcpu);
249249

250250
if (esr & ESR_ELx_CV)
251251
return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
@@ -270,64 +270,64 @@ static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
270270

271271
static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
272272
{
273-
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
273+
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
274274
}
275275

276276
static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
277277
{
278-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
278+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
279279
}
280280

281281
static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
282282
{
283-
return kvm_vcpu_get_hsr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
283+
return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
284284
}
285285

286286
static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
287287
{
288-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
288+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
289289
}
290290

291291
static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
292292
{
293-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
293+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
294294
}
295295

296296
static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
297297
{
298-
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
298+
return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
299299
}
300300

301301
static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
302302
{
303-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
303+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
304304
}
305305

306306
static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
307307
{
308-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
308+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) ||
309309
kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
310310
}
311311

312312
static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
313313
{
314-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
314+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
315315
}
316316

317317
static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
318318
{
319-
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
319+
return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
320320
}
321321

322322
/* This one is not specific to Data Abort */
323323
static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
324324
{
325-
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
325+
return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
326326
}
327327

328328
static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
329329
{
330-
return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
330+
return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
331331
}
332332

333333
static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
@@ -337,12 +337,12 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
337337

338338
static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
339339
{
340-
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
340+
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
341341
}
342342

343343
static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
344344
{
345-
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
345+
return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
346346
}
347347

348348
static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
@@ -366,7 +366,7 @@ static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
366366

367367
static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
368368
{
369-
u32 esr = kvm_vcpu_get_hsr(vcpu);
369+
u32 esr = kvm_vcpu_get_esr(vcpu);
370370
return ESR_ELx_SYS64_ISS_RT(esr);
371371
}
372372

arch/arm64/kernel/cpu_errata.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -637,7 +637,7 @@ has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
637637
return is_midr_in_range(midr, &range) && has_dic;
638638
}
639639

640-
#if defined(CONFIG_HARDEN_EL2_VECTORS)
640+
#ifdef CONFIG_RANDOMIZE_BASE
641641

642642
static const struct midr_range ca57_a72[] = {
643643
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
@@ -882,7 +882,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
882882
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
883883
.matches = check_branch_predictor,
884884
},
885-
#ifdef CONFIG_HARDEN_EL2_VECTORS
885+
#ifdef CONFIG_RANDOMIZE_BASE
886886
{
887887
.desc = "EL2 vector hardening",
888888
.capability = ARM64_HARDEN_EL2_VECTORS,

arch/arm64/kvm/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ config KVM_ARM_PMU
5858
virtual machines.
5959

6060
config KVM_INDIRECT_VECTORS
61-
def_bool HARDEN_BRANCH_PREDICTOR || HARDEN_EL2_VECTORS
61+
def_bool HARDEN_BRANCH_PREDICTOR || RANDOMIZE_BASE
6262

6363
endif # KVM
6464

arch/arm64/kvm/arm.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -456,7 +456,6 @@ static bool need_new_vmid_gen(struct kvm_vmid *vmid)
456456

457457
/**
458458
* update_vmid - Update the vmid with a valid VMID for the current generation
459-
* @kvm: The guest that struct vmid belongs to
460459
* @vmid: The stage-2 VMID information struct
461460
*/
462461
static void update_vmid(struct kvm_vmid *vmid)

arch/arm64/kvm/handle_exit.c

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run)
8989
*/
9090
static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
9191
{
92-
if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
92+
if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
9393
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
9494
vcpu->stat.wfe_exit_stat++;
9595
kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
@@ -119,13 +119,13 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
119119
*/
120120
static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
121121
{
122-
u32 hsr = kvm_vcpu_get_hsr(vcpu);
122+
u32 esr = kvm_vcpu_get_esr(vcpu);
123123
int ret = 0;
124124

125125
run->exit_reason = KVM_EXIT_DEBUG;
126-
run->debug.arch.hsr = hsr;
126+
run->debug.arch.hsr = esr;
127127

128-
switch (ESR_ELx_EC(hsr)) {
128+
switch (ESR_ELx_EC(esr)) {
129129
case ESR_ELx_EC_WATCHPT_LOW:
130130
run->debug.arch.far = vcpu->arch.fault.far_el2;
131131
/* fall through */
@@ -135,8 +135,8 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
135135
case ESR_ELx_EC_BRK64:
136136
break;
137137
default:
138-
kvm_err("%s: un-handled case hsr: %#08x\n",
139-
__func__, (unsigned int) hsr);
138+
kvm_err("%s: un-handled case esr: %#08x\n",
139+
__func__, (unsigned int) esr);
140140
ret = -1;
141141
break;
142142
}
@@ -146,10 +146,10 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
146146

147147
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
148148
{
149-
u32 hsr = kvm_vcpu_get_hsr(vcpu);
149+
u32 esr = kvm_vcpu_get_esr(vcpu);
150150

151-
kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
152-
hsr, esr_get_class_string(hsr));
151+
kvm_pr_unimpl("Unknown exception class: esr: %#08x -- %s\n",
152+
esr, esr_get_class_string(esr));
153153

154154
kvm_inject_undefined(vcpu);
155155
return 1;
@@ -200,10 +200,10 @@ static exit_handle_fn arm_exit_handlers[] = {
200200

201201
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
202202
{
203-
u32 hsr = kvm_vcpu_get_hsr(vcpu);
204-
u8 hsr_ec = ESR_ELx_EC(hsr);
203+
u32 esr = kvm_vcpu_get_esr(vcpu);
204+
u8 esr_ec = ESR_ELx_EC(esr);
205205

206-
return arm_exit_handlers[hsr_ec];
206+
return arm_exit_handlers[esr_ec];
207207
}
208208

209209
/*
@@ -241,15 +241,15 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
241241
int exception_index)
242242
{
243243
if (ARM_SERROR_PENDING(exception_index)) {
244-
u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
244+
u8 esr_ec = ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
245245

246246
/*
247247
* HVC/SMC already have an adjusted PC, which we need
248248
* to correct in order to return to after having
249249
* injected the SError.
250250
*/
251-
if (hsr_ec == ESR_ELx_EC_HVC32 || hsr_ec == ESR_ELx_EC_HVC64 ||
252-
hsr_ec == ESR_ELx_EC_SMC32 || hsr_ec == ESR_ELx_EC_SMC64) {
251+
if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64 ||
252+
esr_ec == ESR_ELx_EC_SMC32 || esr_ec == ESR_ELx_EC_SMC64) {
253253
u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
254254
*vcpu_pc(vcpu) -= adj;
255255
}
@@ -307,5 +307,5 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
307307
exception_index = ARM_EXCEPTION_CODE(exception_index);
308308

309309
if (exception_index == ARM_EXCEPTION_EL1_SERROR)
310-
kvm_handle_guest_serror(vcpu, kvm_vcpu_get_hsr(vcpu));
310+
kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
311311
}

arch/arm64/kvm/hyp/aarch32.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
5151
int cond;
5252

5353
/* Top two bits non-zero? Unconditional. */
54-
if (kvm_vcpu_get_hsr(vcpu) >> 30)
54+
if (kvm_vcpu_get_esr(vcpu) >> 30)
5555
return true;
5656

5757
/* Is condition field valid? */

arch/arm64/kvm/hyp/include/hyp/switch.h

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
199199
static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
200200
{
201201
bool vhe, sve_guest, sve_host;
202-
u8 hsr_ec;
202+
u8 esr_ec;
203203

204204
if (!system_supports_fpsimd())
205205
return false;
@@ -219,14 +219,14 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
219219
vhe = has_vhe();
220220
}
221221

222-
hsr_ec = kvm_vcpu_trap_get_class(vcpu);
223-
if (hsr_ec != ESR_ELx_EC_FP_ASIMD &&
224-
hsr_ec != ESR_ELx_EC_SVE)
222+
esr_ec = kvm_vcpu_trap_get_class(vcpu);
223+
if (esr_ec != ESR_ELx_EC_FP_ASIMD &&
224+
esr_ec != ESR_ELx_EC_SVE)
225225
return false;
226226

227227
/* Don't handle SVE traps for non-SVE vcpus here: */
228228
if (!sve_guest)
229-
if (hsr_ec != ESR_ELx_EC_FP_ASIMD)
229+
if (esr_ec != ESR_ELx_EC_FP_ASIMD)
230230
return false;
231231

232232
/* Valid trap. Switch the context: */
@@ -284,7 +284,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
284284

285285
static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
286286
{
287-
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
287+
u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
288288
int rt = kvm_vcpu_sys_get_rt(vcpu);
289289
u64 val = vcpu_get_reg(vcpu, rt);
290290

@@ -379,7 +379,7 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
379379
u64 val;
380380

381381
if (!vcpu_has_ptrauth(vcpu) ||
382-
!esr_is_ptrauth_trap(kvm_vcpu_get_hsr(vcpu)))
382+
!esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
383383
return false;
384384

385385
ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;

arch/arm64/kvm/hyp/vgic-v3-sr.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -426,7 +426,7 @@ static int __vgic_v3_bpr_min(void)
426426

427427
static int __vgic_v3_get_group(struct kvm_vcpu *vcpu)
428428
{
429-
u32 esr = kvm_vcpu_get_hsr(vcpu);
429+
u32 esr = kvm_vcpu_get_esr(vcpu);
430430
u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
431431

432432
return crm != 8;
@@ -978,7 +978,7 @@ int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
978978
bool is_read;
979979
u32 sysreg;
980980

981-
esr = kvm_vcpu_get_hsr(vcpu);
981+
esr = kvm_vcpu_get_esr(vcpu);
982982
if (vcpu_mode_is_32bit(vcpu)) {
983983
if (!kvm_condition_valid(vcpu)) {
984984
__kvm_skip_instr(vcpu);

0 commit comments

Comments
 (0)