@@ -158,8 +158,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
158
158
crash_save_cpu (regs , safe_smp_processor_id ());
159
159
}
160
160
161
- #ifdef CONFIG_KEXEC_FILE
162
-
161
+ #if defined(CONFIG_KEXEC_FILE ) || defined(CONFIG_CRASH_HOTPLUG )
163
162
static int get_nr_ram_ranges_callback (struct resource * res , void * arg )
164
163
{
165
164
unsigned int * nr_ranges = arg ;
@@ -231,7 +230,7 @@ static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
231
230
232
231
/* Prepare elf headers. Return addr and size */
233
232
static int prepare_elf_headers (struct kimage * image , void * * addr ,
234
- unsigned long * sz )
233
+ unsigned long * sz , unsigned long * nr_mem_ranges )
235
234
{
236
235
struct crash_mem * cmem ;
237
236
int ret ;
@@ -249,14 +248,19 @@ static int prepare_elf_headers(struct kimage *image, void **addr,
249
248
if (ret )
250
249
goto out ;
251
250
251
+ /* Return the computed number of memory ranges, for hotplug usage */
252
+ * nr_mem_ranges = cmem -> nr_ranges ;
253
+
252
254
/* By default prepare 64bit headers */
253
255
ret = crash_prepare_elf64_headers (cmem , IS_ENABLED (CONFIG_X86_64 ), addr , sz );
254
256
255
257
out :
256
258
vfree (cmem );
257
259
return ret ;
258
260
}
261
+ #endif
259
262
263
+ #ifdef CONFIG_KEXEC_FILE
260
264
static int add_e820_entry (struct boot_params * params , struct e820_entry * entry )
261
265
{
262
266
unsigned int nr_e820_entries ;
@@ -371,18 +375,42 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
371
375
int crash_load_segments (struct kimage * image )
372
376
{
373
377
int ret ;
378
+ unsigned long pnum = 0 ;
374
379
struct kexec_buf kbuf = { .image = image , .buf_min = 0 ,
375
380
.buf_max = ULONG_MAX , .top_down = false };
376
381
377
382
/* Prepare elf headers and add a segment */
378
- ret = prepare_elf_headers (image , & kbuf .buffer , & kbuf .bufsz );
383
+ ret = prepare_elf_headers (image , & kbuf .buffer , & kbuf .bufsz , & pnum );
379
384
if (ret )
380
385
return ret ;
381
386
382
- image -> elf_headers = kbuf .buffer ;
383
- image -> elf_headers_sz = kbuf .bufsz ;
387
+ image -> elf_headers = kbuf .buffer ;
388
+ image -> elf_headers_sz = kbuf .bufsz ;
389
+ kbuf .memsz = kbuf .bufsz ;
390
+
391
+ #ifdef CONFIG_CRASH_HOTPLUG
392
+ /*
393
+ * The elfcorehdr segment size accounts for VMCOREINFO, kernel_map,
394
+ * maximum CPUs and maximum memory ranges.
395
+ */
396
+ if (IS_ENABLED (CONFIG_MEMORY_HOTPLUG ))
397
+ pnum = 2 + CONFIG_NR_CPUS_DEFAULT + CONFIG_CRASH_MAX_MEMORY_RANGES ;
398
+ else
399
+ pnum += 2 + CONFIG_NR_CPUS_DEFAULT ;
400
+
401
+ if (pnum < (unsigned long )PN_XNUM ) {
402
+ kbuf .memsz = pnum * sizeof (Elf64_Phdr );
403
+ kbuf .memsz += sizeof (Elf64_Ehdr );
404
+
405
+ image -> elfcorehdr_index = image -> nr_segments ;
406
+
407
+ /* Mark as usable to crash kernel, else crash kernel fails on boot */
408
+ image -> elf_headers_sz = kbuf .memsz ;
409
+ } else {
410
+ pr_err ("number of Phdrs %lu exceeds max\n" , pnum );
411
+ }
412
+ #endif
384
413
385
- kbuf .memsz = kbuf .bufsz ;
386
414
kbuf .buf_align = ELF_CORE_HEADER_ALIGN ;
387
415
kbuf .mem = KEXEC_BUF_MEM_UNKNOWN ;
388
416
ret = kexec_add_buffer (& kbuf );
@@ -395,3 +423,66 @@ int crash_load_segments(struct kimage *image)
395
423
return ret ;
396
424
}
397
425
#endif /* CONFIG_KEXEC_FILE */
426
+
427
+ #ifdef CONFIG_CRASH_HOTPLUG
428
+
429
+ #undef pr_fmt
430
+ #define pr_fmt (fmt ) "crash hp: " fmt
431
+
432
+ /**
433
+ * arch_crash_handle_hotplug_event() - Handle hotplug elfcorehdr changes
434
+ * @image: a pointer to kexec_crash_image
435
+ *
436
+ * Prepare the new elfcorehdr and replace the existing elfcorehdr.
437
+ */
438
+ void arch_crash_handle_hotplug_event (struct kimage * image )
439
+ {
440
+ void * elfbuf = NULL , * old_elfcorehdr ;
441
+ unsigned long nr_mem_ranges ;
442
+ unsigned long mem , memsz ;
443
+ unsigned long elfsz = 0 ;
444
+
445
+ /*
446
+ * Create the new elfcorehdr reflecting the changes to CPU and/or
447
+ * memory resources.
448
+ */
449
+ if (prepare_elf_headers (image , & elfbuf , & elfsz , & nr_mem_ranges )) {
450
+ pr_err ("unable to create new elfcorehdr" );
451
+ goto out ;
452
+ }
453
+
454
+ /*
455
+ * Obtain address and size of the elfcorehdr segment, and
456
+ * check it against the new elfcorehdr buffer.
457
+ */
458
+ mem = image -> segment [image -> elfcorehdr_index ].mem ;
459
+ memsz = image -> segment [image -> elfcorehdr_index ].memsz ;
460
+ if (elfsz > memsz ) {
461
+ pr_err ("update elfcorehdr elfsz %lu > memsz %lu" ,
462
+ elfsz , memsz );
463
+ goto out ;
464
+ }
465
+
466
+ /*
467
+ * Copy new elfcorehdr over the old elfcorehdr at destination.
468
+ */
469
+ old_elfcorehdr = kmap_local_page (pfn_to_page (mem >> PAGE_SHIFT ));
470
+ if (!old_elfcorehdr ) {
471
+ pr_err ("mapping elfcorehdr segment failed\n" );
472
+ goto out ;
473
+ }
474
+
475
+ /*
476
+ * Temporarily invalidate the crash image while the
477
+ * elfcorehdr is updated.
478
+ */
479
+ xchg (& kexec_crash_image , NULL );
480
+ memcpy_flushcache (old_elfcorehdr , elfbuf , elfsz );
481
+ xchg (& kexec_crash_image , image );
482
+ kunmap_local (old_elfcorehdr );
483
+ pr_debug ("updated elfcorehdr\n" );
484
+
485
+ out :
486
+ vfree (elfbuf );
487
+ }
488
+ #endif
0 commit comments