Skip to content

Commit 9d8e0d5

Browse files
committed
Merge tag 'x86-boot-2024-05-13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 boot updates from Ingo Molnar: - Move the kernel cmdline setup earlier in the boot process (again), to address a split_lock_detect= boot parameter bug - Ignore relocations in .notes sections - Simplify boot stack setup - Re-introduce a bootloader quirk wrt CR4 handling - Miscellaneous cleanups & fixes * tag 'x86-boot-2024-05-13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/boot/64: Clear most of CR4 in startup_64(), except PAE, MCE and LA57 x86/boot: Move kernel cmdline setup earlier in the boot process (again) x86/build: Clean up arch/x86/tools/relocs.c a bit x86/boot: Ignore relocations in .notes sections in walk_relocs() too x86: Rename __{start,end}_init_task to __{start,end}_init_stack x86/boot: Simplify boot stack setup
2 parents d791a4d + a0025f5 commit 9d8e0d5

File tree

9 files changed

+219
-219
lines changed

9 files changed

+219
-219
lines changed

arch/x86/boot/compressed/head_64.S

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -398,6 +398,11 @@ SYM_CODE_START(startup_64)
398398
call sev_enable
399399
#endif
400400

401+
/* Preserve only the CR4 bits that must be preserved, and clear the rest */
402+
movq %cr4, %rax
403+
andl $(X86_CR4_PAE | X86_CR4_MCE | X86_CR4_LA57), %eax
404+
movq %rax, %cr4
405+
401406
/*
402407
* configure_5level_paging() updates the number of paging levels using
403408
* a trampoline in 32-bit addressable memory if the current number does

arch/x86/include/asm/processor.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -635,12 +635,10 @@ static __always_inline void prefetchw(const void *x)
635635
#define KSTK_ESP(task) (task_pt_regs(task)->sp)
636636

637637
#else
638-
extern unsigned long __end_init_task[];
638+
extern unsigned long __top_init_kernel_stack[];
639639

640640
#define INIT_THREAD { \
641-
.sp = (unsigned long)&__end_init_task - \
642-
TOP_OF_KERNEL_STACK_PADDING - \
643-
sizeof(struct pt_regs), \
641+
.sp = (unsigned long)&__top_init_kernel_stack, \
644642
}
645643

646644
extern unsigned long KSTK_ESP(struct task_struct *task);

arch/x86/kernel/head_32.S

Lines changed: 1 addition & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -44,9 +44,6 @@
4444
#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
4545
#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
4646

47-
48-
#define SIZEOF_PTREGS 17*4
49-
5047
/*
5148
* Worst-case size of the kernel mapping we need to make:
5249
* a relocatable kernel can live anywhere in lowmem, so we need to be able
@@ -488,13 +485,7 @@ SYM_DATA_END(initial_page_table)
488485

489486
.data
490487
.balign 4
491-
/*
492-
* The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder
493-
* reliably detect the end of the stack.
494-
*/
495-
SYM_DATA(initial_stack,
496-
.long init_thread_union + THREAD_SIZE -
497-
SIZEOF_PTREGS - TOP_OF_KERNEL_STACK_PADDING)
488+
SYM_DATA(initial_stack, .long __top_init_kernel_stack)
498489

499490
__INITRODATA
500491
int_msg:

arch/x86/kernel/head_64.S

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ SYM_CODE_START_NOALIGN(startup_64)
6666
mov %rsi, %r15
6767

6868
/* Set up the stack for verify_cpu() */
69-
leaq (__end_init_task - TOP_OF_KERNEL_STACK_PADDING - PTREGS_SIZE)(%rip), %rsp
69+
leaq __top_init_kernel_stack(%rip), %rsp
7070

7171
/* Setup GSBASE to allow stack canary access for C code */
7272
movl $MSR_GS_BASE, %ecx

arch/x86/kernel/setup.c

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -753,6 +753,22 @@ void __init setup_arch(char **cmdline_p)
753753
boot_cpu_data.x86_phys_bits = MAX_PHYSMEM_BITS;
754754
#endif
755755

756+
#ifdef CONFIG_CMDLINE_BOOL
757+
#ifdef CONFIG_CMDLINE_OVERRIDE
758+
strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
759+
#else
760+
if (builtin_cmdline[0]) {
761+
/* append boot loader cmdline to builtin */
762+
strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
763+
strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
764+
strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
765+
}
766+
#endif
767+
#endif
768+
769+
strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
770+
*cmdline_p = command_line;
771+
756772
/*
757773
* If we have OLPC OFW, we might end up relocating the fixmap due to
758774
* reserve_top(), so do this before touching the ioremap area.
@@ -832,22 +848,6 @@ void __init setup_arch(char **cmdline_p)
832848
bss_resource.start = __pa_symbol(__bss_start);
833849
bss_resource.end = __pa_symbol(__bss_stop)-1;
834850

835-
#ifdef CONFIG_CMDLINE_BOOL
836-
#ifdef CONFIG_CMDLINE_OVERRIDE
837-
strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
838-
#else
839-
if (builtin_cmdline[0]) {
840-
/* append boot loader cmdline to builtin */
841-
strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
842-
strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
843-
strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
844-
}
845-
#endif
846-
#endif
847-
848-
strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
849-
*cmdline_p = command_line;
850-
851851
/*
852852
* x86_configure_nx() is called before parse_early_param() to detect
853853
* whether hardware doesn't support NX (so that the early EHCI debug

arch/x86/kernel/vmlinux.lds.S

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -172,6 +172,9 @@ SECTIONS
172172
/* init_task */
173173
INIT_TASK_DATA(THREAD_SIZE)
174174

175+
/* equivalent to task_pt_regs(&init_task) */
176+
__top_init_kernel_stack = __end_init_stack - TOP_OF_KERNEL_STACK_PADDING - PTREGS_SIZE;
177+
175178
#ifdef CONFIG_X86_32
176179
/* 32 bit has nosave before _edata */
177180
NOSAVE_DATA

0 commit comments

Comments
 (0)