Skip to content

Commit f66d6ac

Browse files
committed
Merge tag 'x86_urgent_for_v6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Borislav Petkov: - Make sure a kdump kernel with CONFIG_IMA_KEXEC enabled and booted on an AMD SME enabled hardware properly decrypts the ima_kexec buffer information passed to it from the previous kernel - Fix building the kernel with Clang where a non-TLS definition of the stack protector guard cookie leads to bogus code generation - Clear a wrongly advertised virtualized VMLOAD/VMSAVE feature flag on some Zen4 client systems as those insns are not supported on client * tag 'x86_urgent_for_v6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mm: Fix a kdump kernel failure on SME system when CONFIG_IMA_KEXEC=y x86/stackprotector: Work around strict Clang TLS symbol requirements x86/CPU/AMD: Clear virtualized VMLOAD/VMSAVE on Zen4 client
2 parents 4a5df37 + 8d9ffb2 commit f66d6ac

File tree

7 files changed

+42
-4
lines changed

7 files changed

+42
-4
lines changed

arch/x86/Makefile

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -142,9 +142,10 @@ ifeq ($(CONFIG_X86_32),y)
142142

143143
ifeq ($(CONFIG_STACKPROTECTOR),y)
144144
ifeq ($(CONFIG_SMP),y)
145-
KBUILD_CFLAGS += -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard
145+
KBUILD_CFLAGS += -mstack-protector-guard-reg=fs \
146+
-mstack-protector-guard-symbol=__ref_stack_chk_guard
146147
else
147-
KBUILD_CFLAGS += -mstack-protector-guard=global
148+
KBUILD_CFLAGS += -mstack-protector-guard=global
148149
endif
149150
endif
150151
else

arch/x86/entry/entry.S

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,3 +51,19 @@ EXPORT_SYMBOL_GPL(mds_verw_sel);
5151
.popsection
5252

5353
THUNK warn_thunk_thunk, __warn_thunk
54+
55+
#ifndef CONFIG_X86_64
56+
/*
57+
* Clang's implementation of TLS stack cookies requires the variable in
58+
* question to be a TLS variable. If the variable happens to be defined as an
59+
* ordinary variable with external linkage in the same compilation unit (which
60+
* amounts to the whole of vmlinux with LTO enabled), Clang will drop the
61+
* segment register prefix from the references, resulting in broken code. Work
62+
* around this by avoiding the symbol used in -mstack-protector-guard-symbol=
63+
* entirely in the C code, and use an alias emitted by the linker script
64+
* instead.
65+
*/
66+
#ifdef CONFIG_STACKPROTECTOR
67+
EXPORT_SYMBOL(__ref_stack_chk_guard);
68+
#endif
69+
#endif

arch/x86/include/asm/asm-prototypes.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,3 +20,6 @@
2020
extern void cmpxchg8b_emu(void);
2121
#endif
2222

23+
#if defined(__GENKSYMS__) && defined(CONFIG_STACKPROTECTOR)
24+
extern unsigned long __ref_stack_chk_guard;
25+
#endif

arch/x86/kernel/cpu/amd.c

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -924,6 +924,17 @@ static void init_amd_zen4(struct cpuinfo_x86 *c)
924924
{
925925
if (!cpu_has(c, X86_FEATURE_HYPERVISOR))
926926
msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
927+
928+
/*
929+
* These Zen4 SoCs advertise support for virtualized VMLOAD/VMSAVE
930+
* in some BIOS versions but they can lead to random host reboots.
931+
*/
932+
switch (c->x86_model) {
933+
case 0x18 ... 0x1f:
934+
case 0x60 ... 0x7f:
935+
clear_cpu_cap(c, X86_FEATURE_V_VMSAVE_VMLOAD);
936+
break;
937+
}
927938
}
928939

929940
static void init_amd_zen5(struct cpuinfo_x86 *c)

arch/x86/kernel/cpu/common.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2089,8 +2089,10 @@ void syscall_init(void)
20892089

20902090
#ifdef CONFIG_STACKPROTECTOR
20912091
DEFINE_PER_CPU(unsigned long, __stack_chk_guard);
2092+
#ifndef CONFIG_SMP
20922093
EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
20932094
#endif
2095+
#endif
20942096

20952097
#endif /* CONFIG_X86_64 */
20962098

arch/x86/kernel/vmlinux.lds.S

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -491,6 +491,9 @@ SECTIONS
491491
. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
492492
"kernel image bigger than KERNEL_IMAGE_SIZE");
493493

494+
/* needed for Clang - see arch/x86/entry/entry.S */
495+
PROVIDE(__ref_stack_chk_guard = __stack_chk_guard);
496+
494497
#ifdef CONFIG_X86_64
495498
/*
496499
* Per-cpu symbols which need to be offset from __per_cpu_load

arch/x86/mm/ioremap.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -656,7 +656,8 @@ static bool memremap_is_setup_data(resource_size_t phys_addr,
656656
paddr_next = data->next;
657657
len = data->len;
658658

659-
if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
659+
if ((phys_addr > paddr) &&
660+
(phys_addr < (paddr + sizeof(struct setup_data) + len))) {
660661
memunmap(data);
661662
return true;
662663
}
@@ -718,7 +719,8 @@ static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
718719
paddr_next = data->next;
719720
len = data->len;
720721

721-
if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
722+
if ((phys_addr > paddr) &&
723+
(phys_addr < (paddr + sizeof(struct setup_data) + len))) {
722724
early_memunmap(data, sizeof(*data));
723725
return true;
724726
}

0 commit comments

Comments
 (0)