Skip to content

Commit 1642285

Browse files
Alexander GordeevVasily Gorbik
authored andcommitted
s390/boot: Fix KASLR base offset off by __START_KERNEL bytes
Symbol offsets to the KASLR base do not match symbol address in the vmlinux image. That is the result of setting the KASLR base to the beginning of .text section as result of an optimization. Revert that optimization and allocate virtual memory for the whole kernel image including __START_KERNEL bytes as per the linker script. That allows keeping the semantics of the KASLR base offset in sync with other architectures. Rename __START_KERNEL to TEXT_OFFSET, since it represents the offset of the .text section within the kernel image, rather than a virtual address. Still skip mapping TEXT_OFFSET bytes to save memory on pgtables and provoke exceptions in case an attempt to access this area is made, as no kernel symbol may reside there. In case CONFIG_KASAN is enabled the location counter might exceed the value of TEXT_OFFSET, while the decompressor linker script forcefully resets it to TEXT_OFFSET, which leads to a sections overlap link failure. Use MAX() expression to avoid that. Reported-by: Omar Sandoval <[email protected]> Closes: https://lore.kernel.org/linux-s390/[email protected]/ Fixes: 56b1069 ("s390/boot: Rework deployment of the kernel image") Signed-off-by: Alexander Gordeev <[email protected]> Acked-by: Vasily Gorbik <[email protected]> Signed-off-by: Vasily Gorbik <[email protected]>
1 parent d7fd294 commit 1642285

File tree

6 files changed

+52
-31
lines changed

6 files changed

+52
-31
lines changed

arch/s390/boot/startup.c

Lines changed: 30 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
162162
loc = (long)*reloc + phys_offset;
163163
if (loc < min_addr || loc > max_addr)
164164
error("64-bit relocation outside of kernel!\n");
165-
*(u64 *)loc += offset - __START_KERNEL;
165+
*(u64 *)loc += offset;
166166
}
167167
}
168168

@@ -177,7 +177,7 @@ static void kaslr_adjust_got(unsigned long offset)
177177
*/
178178
for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++) {
179179
if (*entry)
180-
*entry += offset - __START_KERNEL;
180+
*entry += offset;
181181
}
182182
}
183183

@@ -252,7 +252,7 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
252252
vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
253253

254254
/* choose kernel address space layout: 4 or 3 levels. */
255-
BUILD_BUG_ON(!IS_ALIGNED(__START_KERNEL, THREAD_SIZE));
255+
BUILD_BUG_ON(!IS_ALIGNED(TEXT_OFFSET, THREAD_SIZE));
256256
BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE));
257257
BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE);
258258
vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE);
@@ -389,31 +389,25 @@ static void kaslr_adjust_vmlinux_info(long offset)
389389
#endif
390390
}
391391

392-
static void fixup_vmlinux_info(void)
393-
{
394-
vmlinux.entry -= __START_KERNEL;
395-
kaslr_adjust_vmlinux_info(-__START_KERNEL);
396-
}
397-
398392
void startup_kernel(void)
399393
{
400-
unsigned long kernel_size = vmlinux.image_size + vmlinux.bss_size;
401-
unsigned long nokaslr_offset_phys, kaslr_large_page_offset;
402-
unsigned long amode31_lma = 0;
394+
unsigned long vmlinux_size = vmlinux.image_size + vmlinux.bss_size;
395+
unsigned long nokaslr_text_lma, text_lma = 0, amode31_lma = 0;
396+
unsigned long kernel_size = TEXT_OFFSET + vmlinux_size;
397+
unsigned long kaslr_large_page_offset;
403398
unsigned long max_physmem_end;
404399
unsigned long asce_limit;
405400
unsigned long safe_addr;
406401
psw_t psw;
407402

408-
fixup_vmlinux_info();
409403
setup_lpp();
410404

411405
/*
412406
* Non-randomized kernel physical start address must be _SEGMENT_SIZE
413407
* aligned (see blow).
414408
*/
415-
nokaslr_offset_phys = ALIGN(mem_safe_offset(), _SEGMENT_SIZE);
416-
safe_addr = PAGE_ALIGN(nokaslr_offset_phys + kernel_size);
409+
nokaslr_text_lma = ALIGN(mem_safe_offset(), _SEGMENT_SIZE);
410+
safe_addr = PAGE_ALIGN(nokaslr_text_lma + vmlinux_size);
417411

418412
/*
419413
* Reserve decompressor memory together with decompression heap,
@@ -457,16 +451,27 @@ void startup_kernel(void)
457451
*/
458452
kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK;
459453
if (kaslr_enabled()) {
460-
unsigned long size = kernel_size + kaslr_large_page_offset;
454+
unsigned long size = vmlinux_size + kaslr_large_page_offset;
461455

462-
__kaslr_offset_phys = randomize_within_range(size, _SEGMENT_SIZE, 0, ident_map_size);
456+
text_lma = randomize_within_range(size, _SEGMENT_SIZE, TEXT_OFFSET, ident_map_size);
463457
}
464-
if (!__kaslr_offset_phys)
465-
__kaslr_offset_phys = nokaslr_offset_phys;
466-
__kaslr_offset_phys |= kaslr_large_page_offset;
458+
if (!text_lma)
459+
text_lma = nokaslr_text_lma;
460+
text_lma |= kaslr_large_page_offset;
461+
462+
/*
463+
* [__kaslr_offset_phys..__kaslr_offset_phys + TEXT_OFFSET] region is
464+
* never accessed via the kernel image mapping as per the linker script:
465+
*
466+
* . = TEXT_OFFSET;
467+
*
468+
* Therefore, this region could be used for something else and does
469+
* not need to be reserved. See how it is skipped in setup_vmem().
470+
*/
471+
__kaslr_offset_phys = text_lma - TEXT_OFFSET;
467472
kaslr_adjust_vmlinux_info(__kaslr_offset_phys);
468-
physmem_reserve(RR_VMLINUX, __kaslr_offset_phys, kernel_size);
469-
deploy_kernel((void *)__kaslr_offset_phys);
473+
physmem_reserve(RR_VMLINUX, text_lma, vmlinux_size);
474+
deploy_kernel((void *)text_lma);
470475

471476
/* vmlinux decompression is done, shrink reserved low memory */
472477
physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end);
@@ -489,7 +494,7 @@ void startup_kernel(void)
489494
amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, amode31_min, SZ_2G);
490495
}
491496
if (!amode31_lma)
492-
amode31_lma = __kaslr_offset_phys - vmlinux.amode31_size;
497+
amode31_lma = text_lma - vmlinux.amode31_size;
493498
physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
494499

495500
/*
@@ -505,8 +510,8 @@ void startup_kernel(void)
505510
* - copy_bootdata() must follow setup_vmem() to propagate changes
506511
* to bootdata made by setup_vmem()
507512
*/
508-
clear_bss_section(__kaslr_offset_phys);
509-
kaslr_adjust_relocs(__kaslr_offset_phys, __kaslr_offset_phys + vmlinux.image_size,
513+
clear_bss_section(text_lma);
514+
kaslr_adjust_relocs(text_lma, text_lma + vmlinux.image_size,
510515
__kaslr_offset, __kaslr_offset_phys);
511516
kaslr_adjust_got(__kaslr_offset);
512517
setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit);

arch/s390/boot/vmem.c

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -90,7 +90,7 @@ static void kasan_populate_shadow(unsigned long kernel_start, unsigned long kern
9090
}
9191
memgap_start = end;
9292
}
93-
kasan_populate(kernel_start, kernel_end, POPULATE_KASAN_MAP_SHADOW);
93+
kasan_populate(kernel_start + TEXT_OFFSET, kernel_end, POPULATE_KASAN_MAP_SHADOW);
9494
kasan_populate(0, (unsigned long)__identity_va(0), POPULATE_KASAN_ZERO_SHADOW);
9595
kasan_populate(AMODE31_START, AMODE31_END, POPULATE_KASAN_ZERO_SHADOW);
9696
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
@@ -475,7 +475,17 @@ void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned l
475475
(unsigned long)__identity_va(end),
476476
POPULATE_IDENTITY);
477477
}
478-
pgtable_populate(kernel_start, kernel_end, POPULATE_KERNEL);
478+
479+
/*
480+
* [kernel_start..kernel_start + TEXT_OFFSET] region is never
481+
* accessed as per the linker script:
482+
*
483+
* . = TEXT_OFFSET;
484+
*
485+
* Therefore, skip mapping TEXT_OFFSET bytes to prevent access to
486+
* [__kaslr_offset_phys..__kaslr_offset_phys + TEXT_OFFSET] region.
487+
*/
488+
pgtable_populate(kernel_start + TEXT_OFFSET, kernel_end, POPULATE_KERNEL);
479489
pgtable_populate(AMODE31_START, AMODE31_END, POPULATE_DIRECT);
480490
pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
481491
POPULATE_ABS_LOWCORE);

arch/s390/boot/vmlinux.lds.S

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,12 @@ SECTIONS
109109
#ifdef CONFIG_KERNEL_UNCOMPRESSED
110110
. = ALIGN(PAGE_SIZE);
111111
. += AMODE31_SIZE; /* .amode31 section */
112-
. = ALIGN(1 << 20); /* _SEGMENT_SIZE */
112+
113+
/*
114+
* Make sure the location counter is not less than TEXT_OFFSET.
115+
* _SEGMENT_SIZE is not available, use ALIGN(1 << 20) instead.
116+
*/
117+
. = MAX(TEXT_OFFSET, ALIGN(1 << 20));
113118
#else
114119
. = ALIGN(8);
115120
#endif

arch/s390/include/asm/page.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -279,8 +279,9 @@ static inline unsigned long virt_to_pfn(const void *kaddr)
279279
#define AMODE31_SIZE (3 * PAGE_SIZE)
280280

281281
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
282-
#define __START_KERNEL 0x100000
283282
#define __NO_KASLR_START_KERNEL CONFIG_KERNEL_IMAGE_BASE
284283
#define __NO_KASLR_END_KERNEL (__NO_KASLR_START_KERNEL + KERNEL_IMAGE_SIZE)
285284

285+
#define TEXT_OFFSET 0x100000
286+
286287
#endif /* _S390_PAGE_H */

arch/s390/kernel/vmlinux.lds.S

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ PHDRS {
3939

4040
SECTIONS
4141
{
42-
. = __START_KERNEL;
42+
. = TEXT_OFFSET;
4343
.text : {
4444
_stext = .; /* Start of text section */
4545
_text = .; /* Text and read-only data */

arch/s390/tools/relocs.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -280,7 +280,7 @@ static int do_reloc(struct section *sec, Elf_Rel *rel)
280280
case R_390_GOTOFF64:
281281
break;
282282
case R_390_64:
283-
add_reloc(&relocs64, offset - ehdr.e_entry);
283+
add_reloc(&relocs64, offset);
284284
break;
285285
default:
286286
die("Unsupported relocation type: %d\n", r_type);

0 commit comments

Comments
 (0)