11// SPDX-License-Identifier: GPL-2.0
2+ #define boot_fmt (fmt ) "startup: " fmt
23#include <linux/string.h>
34#include <linux/elf.h>
45#include <asm/page-states.h>
@@ -223,12 +224,16 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
223224 if (oldmem_data .start ) {
224225 __kaslr_enabled = 0 ;
225226 ident_map_size = min (ident_map_size , oldmem_data .size );
227+ boot_debug ("kdump memory limit: 0x%016lx\n" , oldmem_data .size );
226228 } else if (ipl_block_valid && is_ipl_block_dump ()) {
227229 __kaslr_enabled = 0 ;
228- if (!sclp_early_get_hsa_size (& hsa_size ) && hsa_size )
230+ if (!sclp_early_get_hsa_size (& hsa_size ) && hsa_size ) {
229231 ident_map_size = min (ident_map_size , hsa_size );
232+ boot_debug ("Stand-alone dump limit: 0x%016lx\n" , hsa_size );
233+ }
230234 }
231235#endif
236+ boot_debug ("Identity map size: 0x%016lx\n" , ident_map_size );
232237}
233238
234239#define FIXMAP_SIZE round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore))
@@ -266,6 +271,7 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
266271 BUILD_BUG_ON (!IS_ALIGNED (__NO_KASLR_START_KERNEL , THREAD_SIZE ));
267272 BUILD_BUG_ON (__NO_KASLR_END_KERNEL > _REGION1_SIZE );
268273 vsize = get_vmem_size (ident_map_size , vmemmap_size , vmalloc_size , _REGION3_SIZE );
274+ boot_debug ("vmem size estimated: 0x%016lx\n" , vsize );
269275 if (IS_ENABLED (CONFIG_KASAN ) || __NO_KASLR_END_KERNEL > _REGION2_SIZE ||
270276 (vsize > _REGION2_SIZE && kaslr_enabled ())) {
271277 asce_limit = _REGION1_SIZE ;
@@ -289,8 +295,10 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
289295 * otherwise asce_limit and rte_size would have been adjusted.
290296 */
291297 vmax = adjust_to_uv_max (asce_limit );
298+ boot_debug ("%d level paging 0x%016lx vmax\n" , vmax == _REGION1_SIZE ? 4 : 3 , vmax );
292299#ifdef CONFIG_KASAN
293300 BUILD_BUG_ON (__NO_KASLR_END_KERNEL > KASAN_SHADOW_START );
301+ boot_debug ("KASAN shadow area: 0x%016lx-0x%016lx\n" , KASAN_SHADOW_START , KASAN_SHADOW_END );
294302 /* force vmalloc and modules below kasan shadow */
295303 vmax = min (vmax , KASAN_SHADOW_START );
296304#endif
@@ -304,19 +312,27 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
304312 pos = 0 ;
305313 kernel_end = vmax - pos * THREAD_SIZE ;
306314 kernel_start = round_down (kernel_end - kernel_size , THREAD_SIZE );
315+ boot_debug ("Randomization range: 0x%016lx-0x%016lx\n" , vmax - kaslr_len , vmax );
316+ boot_debug ("kernel image: 0x%016lx-0x%016lx (kaslr)\n" , kernel_start ,
317+ kernel_size + kernel_size );
307318 } else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL ) {
308319 kernel_start = round_down (vmax - kernel_size , THREAD_SIZE );
309- boot_debug ("The kernel base address is forced to %lx\n" , kernel_start );
320+ boot_debug ("kernel image: 0x%016lx-0x%016lx (constrained)\n" , kernel_start ,
321+ kernel_start + kernel_size );
310322 } else {
311323 kernel_start = __NO_KASLR_START_KERNEL ;
324+ boot_debug ("kernel image: 0x%016lx-0x%016lx (nokaslr)\n" , kernel_start ,
325+ kernel_start + kernel_size );
312326 }
313327 __kaslr_offset = kernel_start ;
328+ boot_debug ("__kaslr_offset: 0x%016lx\n" , __kaslr_offset );
314329
315330 MODULES_END = round_down (kernel_start , _SEGMENT_SIZE );
316331 MODULES_VADDR = MODULES_END - MODULES_LEN ;
317332 VMALLOC_END = MODULES_VADDR ;
318333 if (IS_ENABLED (CONFIG_KMSAN ))
319334 VMALLOC_END -= MODULES_LEN * 2 ;
335+ boot_debug ("modules area: 0x%016lx-0x%016lx\n" , MODULES_VADDR , MODULES_END );
320336
321337 /* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
322338 vsize = (VMALLOC_END - FIXMAP_SIZE ) / 2 ;
@@ -328,10 +344,15 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
328344 VMALLOC_END -= vmalloc_size * 2 ;
329345 }
330346 VMALLOC_START = VMALLOC_END - vmalloc_size ;
347+ boot_debug ("vmalloc area: 0x%016lx-0x%016lx\n" , VMALLOC_START , VMALLOC_END );
331348
332349 __memcpy_real_area = round_down (VMALLOC_START - MEMCPY_REAL_SIZE , PAGE_SIZE );
350+ boot_debug ("memcpy real area: 0x%016lx-0x%016lx\n" , __memcpy_real_area ,
351+ __memcpy_real_area + MEMCPY_REAL_SIZE );
333352 __abs_lowcore = round_down (__memcpy_real_area - ABS_LOWCORE_MAP_SIZE ,
334353 sizeof (struct lowcore ));
354+ boot_debug ("abs lowcore: 0x%016lx-0x%016lx\n" , __abs_lowcore ,
355+ __abs_lowcore + ABS_LOWCORE_MAP_SIZE );
335356
336357 /* split remaining virtual space between 1:1 mapping & vmemmap array */
337358 pages = __abs_lowcore / (PAGE_SIZE + sizeof (struct page ));
@@ -353,6 +374,8 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
353374 max_mappable = min (max_mappable , vmemmap_start );
354375 if (IS_ENABLED (CONFIG_RANDOMIZE_IDENTITY_BASE ))
355376 __identity_base = round_down (vmemmap_start - max_mappable , rte_size );
377+ boot_debug ("identity map: 0x%016lx-0x%016lx\n" , __identity_base ,
378+ __identity_base + ident_map_size );
356379
357380 return asce_limit ;
358381}
@@ -542,5 +565,6 @@ void startup_kernel(void)
542565 */
543566 psw .addr = __kaslr_offset + vmlinux .entry ;
544567 psw .mask = PSW_KERNEL_BITS ;
568+ boot_debug ("Starting kernel at: 0x%016lx\n" , psw .addr );
545569 __load_psw (psw );
546570}
0 commit comments