Skip to content

Commit 8530684

Browse files
committed
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: "The big fix here is to our vDSO sigreturn trampoline as, after a painfully long stint of debugging, it turned out that fixing some of our CFI directives in the merge window lit up a bunch of logic in libgcc which has been shown to SEGV in some cases during asynchronous pthread cancellation. It looks like we can fix this by extending the directives to restore most of the interrupted register state from the sigcontext, but it's risky and hard to test so we opted to remove the CFI directives for now and rely on the unwinder fallback path like we used to. - Fix unwinding through vDSO sigreturn trampoline - Fix build warnings by raising minimum LD version for PAC - Whitelist some Kryo Cortex-A55 derivatives for Meltdown and SSB - Fix perf register PC reporting for compat tasks - Fix 'make clean' warning for arm64 signal selftests - Fix ftrace when BTI is compiled in - Avoid building the compat vDSO using GCC plugins" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: Add KRYO{3,4}XX silver CPU cores to SSB safelist arm64: perf: Report the PC value in REGS_ABI_32 mode kselftest: arm64: Remove redundant clean target arm64: kpti: Add KRYO{3, 4}XX silver CPU cores to kpti safelist arm64: Don't insert a BTI instruction at inner labels arm64: vdso: Don't use gcc plugins for building vgettimeofday.c arm64: vdso: Only pass --no-eh-frame-hdr when linker supports it arm64: Depend on newer binutils when building PAC arm64: compat: Remove 32-bit sigreturn code from the vDSO arm64: compat: Always use sigpage for sigreturn trampoline arm64: compat: Allow 32-bit vdso and sigpage to co-exist arm64: vdso: Disable dwarf unwinding through the sigreturn trampoline
2 parents 1590a2e + 108447f commit 8530684

File tree

15 files changed

+98
-170
lines changed

15 files changed

+98
-170
lines changed

arch/arm64/Kconfig

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1518,9 +1518,9 @@ config ARM64_PTR_AUTH
15181518
default y
15191519
depends on !KVM || ARM64_VHE
15201520
depends on (CC_HAS_SIGN_RETURN_ADDRESS || CC_HAS_BRANCH_PROT_PAC_RET) && AS_HAS_PAC
1521-
# GCC 9.1 and later inserts a .note.gnu.property section note for PAC
1521+
# Modern compilers insert a .note.gnu.property section note for PAC
15221522
# which is only understood by binutils starting with version 2.33.1.
1523-
depends on !CC_IS_GCC || GCC_VERSION < 90100 || LD_VERSION >= 233010000
1523+
depends on LD_IS_LLD || LD_VERSION >= 233010000 || (CC_IS_GCC && GCC_VERSION < 90100)
15241524
depends on !CC_IS_CLANG || AS_HAS_CFI_NEGATE_RA_STATE
15251525
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
15261526
help

arch/arm64/include/asm/linkage.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
* instead.
1313
*/
1414
#define BTI_C hint 34 ;
15-
#define BTI_J hint 36 ;
1615

1716
/*
1817
* When using in-kernel BTI we need to ensure that PCS-conformant assembly
@@ -43,11 +42,6 @@
4342
SYM_START(name, SYM_L_WEAK, SYM_A_NONE) \
4443
BTI_C
4544

46-
#define SYM_INNER_LABEL(name, linkage) \
47-
.type name SYM_T_NONE ASM_NL \
48-
SYM_ENTRY(name, linkage, SYM_A_NONE) \
49-
BTI_J
50-
5145
#endif
5246

5347
/*

arch/arm64/include/asm/mmu.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,9 @@
1919

2020
typedef struct {
2121
atomic64_t id;
22+
#ifdef CONFIG_COMPAT
23+
void *sigpage;
24+
#endif
2225
void *vdso;
2326
unsigned long flags;
2427
} mm_context_t;

arch/arm64/kernel/Makefile

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,9 +29,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE
2929

3030
obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
3131
sys_compat.o
32-
ifneq ($(CONFIG_COMPAT_VDSO), y)
3332
obj-$(CONFIG_COMPAT) += sigreturn32.o
34-
endif
3533
obj-$(CONFIG_KUSER_HELPERS) += kuser32.o
3634
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
3735
obj-$(CONFIG_MODULES) += module.o

arch/arm64/kernel/cpu_errata.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -460,6 +460,8 @@ static const struct midr_range arm64_ssb_cpus[] = {
460460
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
461461
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
462462
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
463+
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
464+
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
463465
{},
464466
};
465467

arch/arm64/kernel/cpufeature.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1290,6 +1290,8 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
12901290
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
12911291
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
12921292
MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
1293+
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
1294+
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
12931295
{ /* sentinel */ }
12941296
};
12951297
char const *str = "kpti command line option";

arch/arm64/kernel/perf_regs.c

Lines changed: 22 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,15 +15,34 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
1515
return 0;
1616

1717
/*
18-
* Compat (i.e. 32 bit) mode:
19-
* - PC has been set in the pt_regs struct in kernel_entry,
20-
* - Handle SP and LR here.
18+
* Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but
19+
* we're stuck with it for ABI compatability reasons.
20+
*
21+
* For a 32-bit consumer inspecting a 32-bit task, then it will look at
22+
* the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h).
23+
* These correspond directly to a prefix of the registers saved in our
24+
* 'struct pt_regs', with the exception of the PC, so we copy that down
25+
* (x15 corresponds to SP_hyp in the architecture).
26+
*
27+
* So far, so good.
28+
*
29+
* The oddity arises when a 64-bit consumer looks at a 32-bit task and
30+
* asks for registers beyond PERF_REG_ARM_MAX. In this case, we return
31+
* SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and
32+
* PC registers would normally live. The initial idea was to allow a
33+
* 64-bit unwinder to unwind a 32-bit task and, although it's not clear
34+
* how well that works in practice, somebody might be relying on it.
35+
*
36+
* At the time we make a sample, we don't know whether the consumer is
37+
* 32-bit or 64-bit, so we have to cater for both possibilities.
2138
*/
2239
if (compat_user_mode(regs)) {
2340
if ((u32)idx == PERF_REG_ARM64_SP)
2441
return regs->compat_sp;
2542
if ((u32)idx == PERF_REG_ARM64_LR)
2643
return regs->compat_lr;
44+
if (idx == 15)
45+
return regs->pc;
2746
}
2847

2948
if ((u32)idx == PERF_REG_ARM64_SP)

arch/arm64/kernel/signal32.c

Lines changed: 1 addition & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -342,38 +342,13 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
342342
retcode = ptr_to_compat(ka->sa.sa_restorer);
343343
} else {
344344
/* Set up sigreturn pointer */
345-
#ifdef CONFIG_COMPAT_VDSO
346-
void *vdso_base = current->mm->context.vdso;
347-
void *vdso_trampoline;
348-
349-
if (ka->sa.sa_flags & SA_SIGINFO) {
350-
if (thumb) {
351-
vdso_trampoline = VDSO_SYMBOL(vdso_base,
352-
compat_rt_sigreturn_thumb);
353-
} else {
354-
vdso_trampoline = VDSO_SYMBOL(vdso_base,
355-
compat_rt_sigreturn_arm);
356-
}
357-
} else {
358-
if (thumb) {
359-
vdso_trampoline = VDSO_SYMBOL(vdso_base,
360-
compat_sigreturn_thumb);
361-
} else {
362-
vdso_trampoline = VDSO_SYMBOL(vdso_base,
363-
compat_sigreturn_arm);
364-
}
365-
}
366-
367-
retcode = ptr_to_compat(vdso_trampoline) + thumb;
368-
#else
369345
unsigned int idx = thumb << 1;
370346

371347
if (ka->sa.sa_flags & SA_SIGINFO)
372348
idx += 3;
373349

374-
retcode = (unsigned long)current->mm->context.vdso +
350+
retcode = (unsigned long)current->mm->context.sigpage +
375351
(idx << 2) + thumb;
376-
#endif
377352
}
378353

379354
regs->regs[0] = usig;

arch/arm64/kernel/vdso.c

Lines changed: 28 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -191,15 +191,12 @@ enum aarch32_map {
191191
#ifdef CONFIG_COMPAT_VDSO
192192
AA32_MAP_VVAR,
193193
AA32_MAP_VDSO,
194-
#else
195-
AA32_MAP_SIGPAGE
196194
#endif
195+
AA32_MAP_SIGPAGE
197196
};
198197

199198
static struct page *aarch32_vectors_page __ro_after_init;
200-
#ifndef CONFIG_COMPAT_VDSO
201199
static struct page *aarch32_sig_page __ro_after_init;
202-
#endif
203200

204201
static struct vm_special_mapping aarch32_vdso_maps[] = {
205202
[AA32_MAP_VECTORS] = {
@@ -214,12 +211,11 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
214211
.name = "[vdso]",
215212
.mremap = aarch32_vdso_mremap,
216213
},
217-
#else
214+
#endif /* CONFIG_COMPAT_VDSO */
218215
[AA32_MAP_SIGPAGE] = {
219216
.name = "[sigpage]", /* ABI */
220217
.pages = &aarch32_sig_page,
221218
},
222-
#endif /* CONFIG_COMPAT_VDSO */
223219
};
224220

225221
static int aarch32_alloc_kuser_vdso_page(void)
@@ -242,27 +238,11 @@ static int aarch32_alloc_kuser_vdso_page(void)
242238
return 0;
243239
}
244240

245-
#ifdef CONFIG_COMPAT_VDSO
246-
static int __aarch32_alloc_vdso_pages(void)
247-
{
248-
int ret;
249-
250-
vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
251-
vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
252-
253-
ret = __vdso_init(VDSO_ABI_AA32);
254-
if (ret)
255-
return ret;
256-
257-
return aarch32_alloc_kuser_vdso_page();
258-
}
259-
#else
260-
static int __aarch32_alloc_vdso_pages(void)
241+
static int aarch32_alloc_sigpage(void)
261242
{
262243
extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
263244
int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
264245
unsigned long sigpage;
265-
int ret;
266246

267247
sigpage = get_zeroed_page(GFP_ATOMIC);
268248
if (!sigpage)
@@ -271,18 +251,34 @@ static int __aarch32_alloc_vdso_pages(void)
271251
memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
272252
aarch32_sig_page = virt_to_page(sigpage);
273253
flush_dcache_page(aarch32_sig_page);
254+
return 0;
255+
}
274256

275-
ret = aarch32_alloc_kuser_vdso_page();
276-
if (ret)
277-
free_page(sigpage);
257+
#ifdef CONFIG_COMPAT_VDSO
258+
static int __aarch32_alloc_vdso_pages(void)
259+
{
260+
vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
261+
vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
278262

279-
return ret;
263+
return __vdso_init(VDSO_ABI_AA32);
280264
}
281265
#endif /* CONFIG_COMPAT_VDSO */
282266

283267
static int __init aarch32_alloc_vdso_pages(void)
284268
{
285-
return __aarch32_alloc_vdso_pages();
269+
int ret;
270+
271+
#ifdef CONFIG_COMPAT_VDSO
272+
ret = __aarch32_alloc_vdso_pages();
273+
if (ret)
274+
return ret;
275+
#endif
276+
277+
ret = aarch32_alloc_sigpage();
278+
if (ret)
279+
return ret;
280+
281+
return aarch32_alloc_kuser_vdso_page();
286282
}
287283
arch_initcall(aarch32_alloc_vdso_pages);
288284

@@ -305,7 +301,6 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
305301
return PTR_ERR_OR_ZERO(ret);
306302
}
307303

308-
#ifndef CONFIG_COMPAT_VDSO
309304
static int aarch32_sigreturn_setup(struct mm_struct *mm)
310305
{
311306
unsigned long addr;
@@ -328,12 +323,11 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm)
328323
if (IS_ERR(ret))
329324
goto out;
330325

331-
mm->context.vdso = (void *)addr;
326+
mm->context.sigpage = (void *)addr;
332327

333328
out:
334329
return PTR_ERR_OR_ZERO(ret);
335330
}
336-
#endif /* !CONFIG_COMPAT_VDSO */
337331

338332
int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
339333
{
@@ -352,10 +346,11 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
352346
mm,
353347
bprm,
354348
uses_interp);
355-
#else
356-
ret = aarch32_sigreturn_setup(mm);
349+
if (ret)
350+
goto out;
357351
#endif /* CONFIG_COMPAT_VDSO */
358352

353+
ret = aarch32_sigreturn_setup(mm);
359354
out:
360355
mmap_write_unlock(mm);
361356
return ret;

arch/arm64/kernel/vdso/Makefile

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,13 +23,14 @@ btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti
2323
# potential future proofing if we end up with internal calls to the exported
2424
# routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so
2525
# preparation in build-time C")).
26-
ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
27-
-Bsymbolic --eh-frame-hdr --build-id -n $(btildflags-y) -T
26+
ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
27+
-Bsymbolic $(call ld-option, --no-eh-frame-hdr) --build-id -n \
28+
$(btildflags-y) -T
2829

2930
ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
3031
ccflags-y += -DDISABLE_BRANCH_PROFILING
3132

32-
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS)
33+
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS)
3334
KBUILD_CFLAGS += $(DISABLE_LTO)
3435
KASAN_SANITIZE := n
3536
UBSAN_SANITIZE := n

0 commit comments

Comments
 (0)