1
1
// SPDX-License-Identifier: GPL-2.0
2
+ #define boot_fmt (fmt ) "startup: " fmt
2
3
#include <linux/string.h>
3
4
#include <linux/elf.h>
4
5
#include <asm/page-states.h>
@@ -223,12 +224,16 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
223
224
if (oldmem_data .start ) {
224
225
__kaslr_enabled = 0 ;
225
226
ident_map_size = min (ident_map_size , oldmem_data .size );
227
+ boot_debug ("kdump memory limit: 0x%016lx\n" , oldmem_data .size );
226
228
} else if (ipl_block_valid && is_ipl_block_dump ()) {
227
229
__kaslr_enabled = 0 ;
228
- if (!sclp_early_get_hsa_size (& hsa_size ) && hsa_size )
230
+ if (!sclp_early_get_hsa_size (& hsa_size ) && hsa_size ) {
229
231
ident_map_size = min (ident_map_size , hsa_size );
232
+ boot_debug ("Stand-alone dump limit: 0x%016lx\n" , hsa_size );
233
+ }
230
234
}
231
235
#endif
236
+ boot_debug ("Identity map size: 0x%016lx\n" , ident_map_size );
232
237
}
233
238
234
239
#define FIXMAP_SIZE round_up(MEMCPY_REAL_SIZE + ABS_LOWCORE_MAP_SIZE, sizeof(struct lowcore))
@@ -266,6 +271,7 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
266
271
BUILD_BUG_ON (!IS_ALIGNED (__NO_KASLR_START_KERNEL , THREAD_SIZE ));
267
272
BUILD_BUG_ON (__NO_KASLR_END_KERNEL > _REGION1_SIZE );
268
273
vsize = get_vmem_size (ident_map_size , vmemmap_size , vmalloc_size , _REGION3_SIZE );
274
+ boot_debug ("vmem size estimated: 0x%016lx\n" , vsize );
269
275
if (IS_ENABLED (CONFIG_KASAN ) || __NO_KASLR_END_KERNEL > _REGION2_SIZE ||
270
276
(vsize > _REGION2_SIZE && kaslr_enabled ())) {
271
277
asce_limit = _REGION1_SIZE ;
@@ -289,8 +295,10 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
289
295
* otherwise asce_limit and rte_size would have been adjusted.
290
296
*/
291
297
vmax = adjust_to_uv_max (asce_limit );
298
+ boot_debug ("%d level paging 0x%016lx vmax\n" , vmax == _REGION1_SIZE ? 4 : 3 , vmax );
292
299
#ifdef CONFIG_KASAN
293
300
BUILD_BUG_ON (__NO_KASLR_END_KERNEL > KASAN_SHADOW_START );
301
+ boot_debug ("KASAN shadow area: 0x%016lx-0x%016lx\n" , KASAN_SHADOW_START , KASAN_SHADOW_END );
294
302
/* force vmalloc and modules below kasan shadow */
295
303
vmax = min (vmax , KASAN_SHADOW_START );
296
304
#endif
@@ -304,19 +312,27 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
304
312
pos = 0 ;
305
313
kernel_end = vmax - pos * THREAD_SIZE ;
306
314
kernel_start = round_down (kernel_end - kernel_size , THREAD_SIZE );
315
+ boot_debug ("Randomization range: 0x%016lx-0x%016lx\n" , vmax - kaslr_len , vmax );
316
+ boot_debug ("kernel image: 0x%016lx-0x%016lx (kaslr)\n" , kernel_start ,
317
+ kernel_size + kernel_size );
307
318
} else if (vmax < __NO_KASLR_END_KERNEL || vsize > __NO_KASLR_END_KERNEL ) {
308
319
kernel_start = round_down (vmax - kernel_size , THREAD_SIZE );
309
- boot_debug ("The kernel base address is forced to %lx\n" , kernel_start );
320
+ boot_debug ("kernel image: 0x%016lx-0x%016lx (constrained)\n" , kernel_start ,
321
+ kernel_start + kernel_size );
310
322
} else {
311
323
kernel_start = __NO_KASLR_START_KERNEL ;
324
+ boot_debug ("kernel image: 0x%016lx-0x%016lx (nokaslr)\n" , kernel_start ,
325
+ kernel_start + kernel_size );
312
326
}
313
327
__kaslr_offset = kernel_start ;
328
+ boot_debug ("__kaslr_offset: 0x%016lx\n" , __kaslr_offset );
314
329
315
330
MODULES_END = round_down (kernel_start , _SEGMENT_SIZE );
316
331
MODULES_VADDR = MODULES_END - MODULES_LEN ;
317
332
VMALLOC_END = MODULES_VADDR ;
318
333
if (IS_ENABLED (CONFIG_KMSAN ))
319
334
VMALLOC_END -= MODULES_LEN * 2 ;
335
+ boot_debug ("modules area: 0x%016lx-0x%016lx\n" , MODULES_VADDR , MODULES_END );
320
336
321
337
/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
322
338
vsize = (VMALLOC_END - FIXMAP_SIZE ) / 2 ;
@@ -328,10 +344,15 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
328
344
VMALLOC_END -= vmalloc_size * 2 ;
329
345
}
330
346
VMALLOC_START = VMALLOC_END - vmalloc_size ;
347
+ boot_debug ("vmalloc area: 0x%016lx-0x%016lx\n" , VMALLOC_START , VMALLOC_END );
331
348
332
349
__memcpy_real_area = round_down (VMALLOC_START - MEMCPY_REAL_SIZE , PAGE_SIZE );
350
+ boot_debug ("memcpy real area: 0x%016lx-0x%016lx\n" , __memcpy_real_area ,
351
+ __memcpy_real_area + MEMCPY_REAL_SIZE );
333
352
__abs_lowcore = round_down (__memcpy_real_area - ABS_LOWCORE_MAP_SIZE ,
334
353
sizeof (struct lowcore ));
354
+ boot_debug ("abs lowcore: 0x%016lx-0x%016lx\n" , __abs_lowcore ,
355
+ __abs_lowcore + ABS_LOWCORE_MAP_SIZE );
335
356
336
357
/* split remaining virtual space between 1:1 mapping & vmemmap array */
337
358
pages = __abs_lowcore / (PAGE_SIZE + sizeof (struct page ));
@@ -353,6 +374,8 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
353
374
max_mappable = min (max_mappable , vmemmap_start );
354
375
if (IS_ENABLED (CONFIG_RANDOMIZE_IDENTITY_BASE ))
355
376
__identity_base = round_down (vmemmap_start - max_mappable , rte_size );
377
+ boot_debug ("identity map: 0x%016lx-0x%016lx\n" , __identity_base ,
378
+ __identity_base + ident_map_size );
356
379
357
380
return asce_limit ;
358
381
}
@@ -542,5 +565,6 @@ void startup_kernel(void)
542
565
*/
543
566
psw .addr = __kaslr_offset + vmlinux .entry ;
544
567
psw .mask = PSW_KERNEL_BITS ;
568
+ boot_debug ("Starting kernel at: 0x%016lx\n" , psw .addr );
545
569
__load_psw (psw );
546
570
}
0 commit comments