Skip to content

Commit f69f5aa

Browse files
committed
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: - Fix bogus KASAN splat on EFI runtime stack - Select JUMP_LABEL unconditionally to avoid boot failure with pKVM and the legacy implementation of static keys - Avoid touching GCS registers when 'arm64.nogcs' has been passed on the command-line - Move a 'cpumask_t' off the stack in smp_send_stop() - Don't advertise SME-related hwcaps to userspace when ID_AA64PFR1_EL1 indicates that SME is not implemented - Always check the VMA when handling an Overlay fault - Avoid corrupting TCR2_EL1 during boot * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64/mm: Drop wrong writes into TCR2_EL1 arm64: poe: Handle spurious Overlay faults arm64: Filter out SME hwcaps when FEAT_SME isn't implemented arm64: move smp_send_stop() cpu mask off stack arm64/gcs: Don't try to access GCS registers if arm64.nogcs is enabled arm64: Unconditionally select CONFIG_JUMP_LABEL arm64: efi: Fix KASAN false positive for EFI runtime stack
2 parents 9adf143 + 9dd1757 commit f69f5aa

File tree

9 files changed

+76
-53
lines changed

9 files changed

+76
-53
lines changed

arch/arm64/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -256,6 +256,7 @@ config ARM64
256256
select HOTPLUG_SMT if HOTPLUG_CPU
257257
select IRQ_DOMAIN
258258
select IRQ_FORCED_THREADING
259+
select JUMP_LABEL
259260
select KASAN_VMALLOC if KASAN
260261
select LOCK_MM_AND_FIND_VMA
261262
select MODULES_USE_ELF_RELA

arch/arm64/include/asm/el2_setup.h

Lines changed: 7 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -287,17 +287,6 @@
287287
.Lskip_fgt2_\@:
288288
.endm
289289

290-
.macro __init_el2_gcs
291-
mrs_s x1, SYS_ID_AA64PFR1_EL1
292-
ubfx x1, x1, #ID_AA64PFR1_EL1_GCS_SHIFT, #4
293-
cbz x1, .Lskip_gcs_\@
294-
295-
/* Ensure GCS is not enabled when we start trying to do BLs */
296-
msr_s SYS_GCSCR_EL1, xzr
297-
msr_s SYS_GCSCRE0_EL1, xzr
298-
.Lskip_gcs_\@:
299-
.endm
300-
301290
/**
302291
* Initialize EL2 registers to sane values. This should be called early on all
303292
* cores that were booted in EL2. Note that everything gets initialised as
@@ -319,7 +308,6 @@
319308
__init_el2_cptr
320309
__init_el2_fgt
321310
__init_el2_fgt2
322-
__init_el2_gcs
323311
.endm
324312

325313
#ifndef __KVM_NVHE_HYPERVISOR__
@@ -371,6 +359,13 @@
371359
msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2
372360

373361
.Lskip_mpam_\@:
362+
check_override id_aa64pfr1, ID_AA64PFR1_EL1_GCS_SHIFT, .Linit_gcs_\@, .Lskip_gcs_\@, x1, x2
363+
364+
.Linit_gcs_\@:
365+
msr_s SYS_GCSCR_EL1, xzr
366+
msr_s SYS_GCSCRE0_EL1, xzr
367+
368+
.Lskip_gcs_\@:
374369
check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2
375370

376371
.Linit_sve_\@: /* SVE register access */

arch/arm64/kernel/Makefile

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
3434
cpufeature.o alternative.o cacheinfo.o \
3535
smp.o smp_spin_table.o topology.o smccc-call.o \
3636
syscall.o proton-pack.o idle.o patching.o pi/ \
37-
rsi.o
37+
rsi.o jump_label.o
3838

3939
obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
4040
sys_compat.o
@@ -47,7 +47,6 @@ obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
4747
obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF) += watchdog_hld.o
4848
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
4949
obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
50-
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
5150
obj-$(CONFIG_KGDB) += kgdb.o
5251
obj-$(CONFIG_EFI) += efi.o efi-rt-wrapper.o
5352
obj-$(CONFIG_PCI) += pci.o

arch/arm64/kernel/cpufeature.c

Lines changed: 32 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -3135,6 +3135,13 @@ static bool has_sve_feature(const struct arm64_cpu_capabilities *cap, int scope)
31353135
}
31363136
#endif
31373137

3138+
#ifdef CONFIG_ARM64_SME
3139+
static bool has_sme_feature(const struct arm64_cpu_capabilities *cap, int scope)
3140+
{
3141+
return system_supports_sme() && has_user_cpuid_feature(cap, scope);
3142+
}
3143+
#endif
3144+
31383145
static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
31393146
HWCAP_CAP(ID_AA64ISAR0_EL1, AES, PMULL, CAP_HWCAP, KERNEL_HWCAP_PMULL),
31403147
HWCAP_CAP(ID_AA64ISAR0_EL1, AES, AES, CAP_HWCAP, KERNEL_HWCAP_AES),
@@ -3223,31 +3230,31 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
32233230
HWCAP_CAP(ID_AA64ISAR2_EL1, BC, IMP, CAP_HWCAP, KERNEL_HWCAP_HBC),
32243231
#ifdef CONFIG_ARM64_SME
32253232
HWCAP_CAP(ID_AA64PFR1_EL1, SME, IMP, CAP_HWCAP, KERNEL_HWCAP_SME),
3226-
HWCAP_CAP(ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
3227-
HWCAP_CAP(ID_AA64SMFR0_EL1, LUTv2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_LUTV2),
3228-
HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p2, CAP_HWCAP, KERNEL_HWCAP_SME2P2),
3229-
HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1),
3230-
HWCAP_CAP(ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2),
3231-
HWCAP_CAP(ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
3232-
HWCAP_CAP(ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
3233-
HWCAP_CAP(ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32),
3234-
HWCAP_CAP(ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16),
3235-
HWCAP_CAP(ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16),
3236-
HWCAP_CAP(ID_AA64SMFR0_EL1, F8F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F16),
3237-
HWCAP_CAP(ID_AA64SMFR0_EL1, F8F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F32),
3238-
HWCAP_CAP(ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32),
3239-
HWCAP_CAP(ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32),
3240-
HWCAP_CAP(ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32),
3241-
HWCAP_CAP(ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32),
3242-
HWCAP_CAP(ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32),
3243-
HWCAP_CAP(ID_AA64SMFR0_EL1, SF8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8FMA),
3244-
HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP4),
3245-
HWCAP_CAP(ID_AA64SMFR0_EL1, SF8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP2),
3246-
HWCAP_CAP(ID_AA64SMFR0_EL1, SBitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SBITPERM),
3247-
HWCAP_CAP(ID_AA64SMFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_AES),
3248-
HWCAP_CAP(ID_AA64SMFR0_EL1, SFEXPA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SFEXPA),
3249-
HWCAP_CAP(ID_AA64SMFR0_EL1, STMOP, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_STMOP),
3250-
HWCAP_CAP(ID_AA64SMFR0_EL1, SMOP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SMOP4),
3233+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, FA64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_FA64),
3234+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, LUTv2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_LUTV2),
3235+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2p2, CAP_HWCAP, KERNEL_HWCAP_SME2P2),
3236+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2p1, CAP_HWCAP, KERNEL_HWCAP_SME2P1),
3237+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMEver, SME2, CAP_HWCAP, KERNEL_HWCAP_SME2),
3238+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I16I64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I64),
3239+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F64F64, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F64F64),
3240+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I16I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I16I32),
3241+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, B16B16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16B16),
3242+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F16F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F16),
3243+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F8F16, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F16),
3244+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F8F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F8F32),
3245+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, I8I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_I8I32),
3246+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F16F32),
3247+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, B16F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_B16F32),
3248+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, BI32I32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_BI32I32),
3249+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, F32F32, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_F32F32),
3250+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8FMA),
3251+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8DP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP4),
3252+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SF8DP2, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SF8DP2),
3253+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SBitPerm, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SBITPERM),
3254+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, AES, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_AES),
3255+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SFEXPA, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SFEXPA),
3256+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, STMOP, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_STMOP),
3257+
HWCAP_CAP_MATCH_ID(has_sme_feature, ID_AA64SMFR0_EL1, SMOP4, IMP, CAP_HWCAP, KERNEL_HWCAP_SME_SMOP4),
32513258
#endif /* CONFIG_ARM64_SME */
32523259
HWCAP_CAP(ID_AA64FPFR0_EL1, F8CVT, IMP, CAP_HWCAP, KERNEL_HWCAP_F8CVT),
32533260
HWCAP_CAP(ID_AA64FPFR0_EL1, F8FMA, IMP, CAP_HWCAP, KERNEL_HWCAP_F8FMA),

arch/arm64/kernel/efi.c

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515

1616
#include <asm/efi.h>
1717
#include <asm/stacktrace.h>
18+
#include <asm/vmap_stack.h>
1819

1920
static bool region_is_misaligned(const efi_memory_desc_t *md)
2021
{
@@ -214,9 +215,13 @@ static int __init arm64_efi_rt_init(void)
214215
if (!efi_enabled(EFI_RUNTIME_SERVICES))
215216
return 0;
216217

217-
p = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, GFP_KERNEL,
218-
NUMA_NO_NODE, &&l);
219-
l: if (!p) {
218+
if (!IS_ENABLED(CONFIG_VMAP_STACK)) {
219+
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
220+
return -ENOMEM;
221+
}
222+
223+
p = arch_alloc_vmap_stack(THREAD_SIZE, NUMA_NO_NODE);
224+
if (!p) {
220225
pr_warn("Failed to allocate EFI runtime stack\n");
221226
clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
222227
return -ENOMEM;

arch/arm64/kernel/process.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -673,6 +673,11 @@ static void permission_overlay_switch(struct task_struct *next)
673673
current->thread.por_el0 = read_sysreg_s(SYS_POR_EL0);
674674
if (current->thread.por_el0 != next->thread.por_el0) {
675675
write_sysreg_s(next->thread.por_el0, SYS_POR_EL0);
676+
/*
677+
* No ISB required as we can tolerate spurious Overlay faults -
678+
* the fault handler will check again based on the new value
679+
* of POR_EL0.
680+
*/
676681
}
677682
}
678683

arch/arm64/kernel/smp.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1143,7 +1143,7 @@ static inline unsigned int num_other_online_cpus(void)
11431143
void smp_send_stop(void)
11441144
{
11451145
static unsigned long stop_in_progress;
1146-
cpumask_t mask;
1146+
static cpumask_t mask;
11471147
unsigned long timeout;
11481148

11491149
/*

arch/arm64/mm/fault.c

Lines changed: 21 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -487,17 +487,29 @@ static void do_bad_area(unsigned long far, unsigned long esr,
487487
}
488488
}
489489

490-
static bool fault_from_pkey(unsigned long esr, struct vm_area_struct *vma,
491-
unsigned int mm_flags)
490+
static bool fault_from_pkey(struct vm_area_struct *vma, unsigned int mm_flags)
492491
{
493-
unsigned long iss2 = ESR_ELx_ISS2(esr);
494-
495492
if (!system_supports_poe())
496493
return false;
497494

498-
if (esr_fsc_is_permission_fault(esr) && (iss2 & ESR_ELx_Overlay))
499-
return true;
500-
495+
/*
496+
* We do not check whether an Overlay fault has occurred because we
497+
* cannot make a decision based solely on its value:
498+
*
499+
* - If Overlay is set, a fault did occur due to POE, but it may be
500+
* spurious in those cases where we update POR_EL0 without ISB (e.g.
501+
* on context-switch). We would then need to manually check POR_EL0
502+
* against vma_pkey(vma), which is exactly what
503+
* arch_vma_access_permitted() does.
504+
*
505+
* - If Overlay is not set, we may still need to report a pkey fault.
506+
* This is the case if an access was made within a mapping but with no
507+
* page mapped, and POR_EL0 forbids the access (according to
508+
* vma_pkey()). Such access will result in a SIGSEGV regardless
509+
* because core code checks arch_vma_access_permitted(), but in order
510+
* to report the correct error code - SEGV_PKUERR - we must handle
511+
* that case here.
512+
*/
501513
return !arch_vma_access_permitted(vma,
502514
mm_flags & FAULT_FLAG_WRITE,
503515
mm_flags & FAULT_FLAG_INSTRUCTION,
@@ -635,7 +647,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
635647
goto bad_area;
636648
}
637649

638-
if (fault_from_pkey(esr, vma, mm_flags)) {
650+
if (fault_from_pkey(vma, mm_flags)) {
639651
pkey = vma_pkey(vma);
640652
vma_end_read(vma);
641653
fault = 0;
@@ -679,7 +691,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
679691
goto bad_area;
680692
}
681693

682-
if (fault_from_pkey(esr, vma, mm_flags)) {
694+
if (fault_from_pkey(vma, mm_flags)) {
683695
pkey = vma_pkey(vma);
684696
mmap_read_unlock(mm);
685697
fault = 0;

arch/arm64/mm/proc.S

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -518,7 +518,6 @@ alternative_else_nop_endif
518518
msr REG_PIR_EL1, x0
519519

520520
orr tcr2, tcr2, TCR2_EL1_PIE
521-
msr REG_TCR2_EL1, x0
522521

523522
.Lskip_indirection:
524523

0 commit comments

Comments
 (0)