24
24
#include <linux/export.h>
25
25
#include <linux/slab.h>
26
26
#include <linux/vmalloc.h>
27
+ #include <linux/memblock.h>
27
28
28
29
#include <asm/processor.h>
29
30
#include <asm/hardirq.h>
39
40
#include <asm/virtext.h>
40
41
#include <asm/intel_pt.h>
41
42
#include <asm/crash.h>
43
+ #include <asm/cmdline.h>
42
44
43
45
/* Used while preparing memory map entries for second kernel */
44
46
struct crash_memmap_data {
@@ -68,6 +70,19 @@ static inline void cpu_crash_vmclear_loaded_vmcss(void)
68
70
rcu_read_unlock ();
69
71
}
70
72
73
+ /*
74
+ * When the crashkernel option is specified, only use the low
75
+ * 1M for the real mode trampoline.
76
+ */
77
+ void __init crash_reserve_low_1M (void )
78
+ {
79
+ if (cmdline_find_option (boot_command_line , "crashkernel" , NULL , 0 ) < 0 )
80
+ return ;
81
+
82
+ memblock_reserve (0 , 1 <<20 );
83
+ pr_info ("Reserving the low 1M of memory for crashkernel\n" );
84
+ }
85
+
71
86
#if defined(CONFIG_SMP ) && defined(CONFIG_X86_LOCAL_APIC )
72
87
73
88
static void kdump_nmi_callback (int cpu , struct pt_regs * regs )
@@ -173,8 +188,6 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
173
188
174
189
#ifdef CONFIG_KEXEC_FILE
175
190
176
- static unsigned long crash_zero_bytes ;
177
-
178
191
static int get_nr_ram_ranges_callback (struct resource * res , void * arg )
179
192
{
180
193
unsigned int * nr_ranges = arg ;
@@ -189,8 +202,7 @@ static struct crash_mem *fill_up_crash_elf_data(void)
189
202
unsigned int nr_ranges = 0 ;
190
203
struct crash_mem * cmem ;
191
204
192
- walk_system_ram_res (0 , -1 , & nr_ranges ,
193
- get_nr_ram_ranges_callback );
205
+ walk_system_ram_res (0 , -1 , & nr_ranges , get_nr_ram_ranges_callback );
194
206
if (!nr_ranges )
195
207
return NULL ;
196
208
@@ -217,15 +229,19 @@ static int elf_header_exclude_ranges(struct crash_mem *cmem)
217
229
{
218
230
int ret = 0 ;
219
231
232
+ /* Exclude the low 1M because it is always reserved */
233
+ ret = crash_exclude_mem_range (cmem , 0 , 1 <<20 );
234
+ if (ret )
235
+ return ret ;
236
+
220
237
/* Exclude crashkernel region */
221
238
ret = crash_exclude_mem_range (cmem , crashk_res .start , crashk_res .end );
222
239
if (ret )
223
240
return ret ;
224
241
225
- if (crashk_low_res .end ) {
242
+ if (crashk_low_res .end )
226
243
ret = crash_exclude_mem_range (cmem , crashk_low_res .start ,
227
- crashk_low_res .end );
228
- }
244
+ crashk_low_res .end );
229
245
230
246
return ret ;
231
247
}
@@ -246,16 +262,13 @@ static int prepare_elf_headers(struct kimage *image, void **addr,
246
262
unsigned long * sz )
247
263
{
248
264
struct crash_mem * cmem ;
249
- Elf64_Ehdr * ehdr ;
250
- Elf64_Phdr * phdr ;
251
- int ret , i ;
265
+ int ret ;
252
266
253
267
cmem = fill_up_crash_elf_data ();
254
268
if (!cmem )
255
269
return - ENOMEM ;
256
270
257
- ret = walk_system_ram_res (0 , -1 , cmem ,
258
- prepare_elf64_ram_headers_callback );
271
+ ret = walk_system_ram_res (0 , -1 , cmem , prepare_elf64_ram_headers_callback );
259
272
if (ret )
260
273
goto out ;
261
274
@@ -265,24 +278,8 @@ static int prepare_elf_headers(struct kimage *image, void **addr,
265
278
goto out ;
266
279
267
280
/* By default prepare 64bit headers */
268
- ret = crash_prepare_elf64_headers (cmem ,
269
- IS_ENABLED (CONFIG_X86_64 ), addr , sz );
270
- if (ret )
271
- goto out ;
281
+ ret = crash_prepare_elf64_headers (cmem , IS_ENABLED (CONFIG_X86_64 ), addr , sz );
272
282
273
- /*
274
- * If a range matches backup region, adjust offset to backup
275
- * segment.
276
- */
277
- ehdr = (Elf64_Ehdr * )* addr ;
278
- phdr = (Elf64_Phdr * )(ehdr + 1 );
279
- for (i = 0 ; i < ehdr -> e_phnum ; phdr ++ , i ++ )
280
- if (phdr -> p_type == PT_LOAD &&
281
- phdr -> p_paddr == image -> arch .backup_src_start &&
282
- phdr -> p_memsz == image -> arch .backup_src_sz ) {
283
- phdr -> p_offset = image -> arch .backup_load_addr ;
284
- break ;
285
- }
286
283
out :
287
284
vfree (cmem );
288
285
return ret ;
@@ -296,8 +293,7 @@ static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
296
293
if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE )
297
294
return 1 ;
298
295
299
- memcpy (& params -> e820_table [nr_e820_entries ], entry ,
300
- sizeof (struct e820_entry ));
296
+ memcpy (& params -> e820_table [nr_e820_entries ], entry , sizeof (struct e820_entry ));
301
297
params -> e820_entries ++ ;
302
298
return 0 ;
303
299
}
@@ -321,19 +317,11 @@ static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
321
317
unsigned long long mend )
322
318
{
323
319
unsigned long start , end ;
324
- int ret = 0 ;
325
320
326
321
cmem -> ranges [0 ].start = mstart ;
327
322
cmem -> ranges [0 ].end = mend ;
328
323
cmem -> nr_ranges = 1 ;
329
324
330
- /* Exclude Backup region */
331
- start = image -> arch .backup_load_addr ;
332
- end = start + image -> arch .backup_src_sz - 1 ;
333
- ret = crash_exclude_mem_range (cmem , start , end );
334
- if (ret )
335
- return ret ;
336
-
337
325
/* Exclude elf header region */
338
326
start = image -> arch .elf_load_addr ;
339
327
end = start + image -> arch .elf_headers_sz - 1 ;
@@ -356,28 +344,28 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
356
344
memset (& cmd , 0 , sizeof (struct crash_memmap_data ));
357
345
cmd .params = params ;
358
346
359
- /* Add first 640K segment */
360
- ei . addr = image -> arch . backup_src_start ;
361
- ei . size = image -> arch . backup_src_sz ;
362
- ei . type = E820_TYPE_RAM ;
363
- add_e820_entry ( params , & ei );
347
+ /* Add the low 1M */
348
+ cmd . type = E820_TYPE_RAM ;
349
+ flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY ;
350
+ walk_iomem_res_desc ( IORES_DESC_NONE , flags , 0 , ( 1 << 20 ) - 1 , & cmd ,
351
+ memmap_entry_callback );
364
352
365
353
/* Add ACPI tables */
366
354
cmd .type = E820_TYPE_ACPI ;
367
355
flags = IORESOURCE_MEM | IORESOURCE_BUSY ;
368
356
walk_iomem_res_desc (IORES_DESC_ACPI_TABLES , flags , 0 , -1 , & cmd ,
369
- memmap_entry_callback );
357
+ memmap_entry_callback );
370
358
371
359
/* Add ACPI Non-volatile Storage */
372
360
cmd .type = E820_TYPE_NVS ;
373
361
walk_iomem_res_desc (IORES_DESC_ACPI_NV_STORAGE , flags , 0 , -1 , & cmd ,
374
- memmap_entry_callback );
362
+ memmap_entry_callback );
375
363
376
364
/* Add e820 reserved ranges */
377
365
cmd .type = E820_TYPE_RESERVED ;
378
366
flags = IORESOURCE_MEM ;
379
367
walk_iomem_res_desc (IORES_DESC_RESERVED , flags , 0 , -1 , & cmd ,
380
- memmap_entry_callback );
368
+ memmap_entry_callback );
381
369
382
370
/* Add crashk_low_res region */
383
371
if (crashk_low_res .end ) {
@@ -388,8 +376,7 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
388
376
}
389
377
390
378
/* Exclude some ranges from crashk_res and add rest to memmap */
391
- ret = memmap_exclude_ranges (image , cmem , crashk_res .start ,
392
- crashk_res .end );
379
+ ret = memmap_exclude_ranges (image , cmem , crashk_res .start , crashk_res .end );
393
380
if (ret )
394
381
goto out ;
395
382
@@ -409,55 +396,12 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
409
396
return ret ;
410
397
}
411
398
412
- static int determine_backup_region (struct resource * res , void * arg )
413
- {
414
- struct kimage * image = arg ;
415
-
416
- image -> arch .backup_src_start = res -> start ;
417
- image -> arch .backup_src_sz = resource_size (res );
418
-
419
- /* Expecting only one range for backup region */
420
- return 1 ;
421
- }
422
-
423
399
int crash_load_segments (struct kimage * image )
424
400
{
425
401
int ret ;
426
402
struct kexec_buf kbuf = { .image = image , .buf_min = 0 ,
427
403
.buf_max = ULONG_MAX , .top_down = false };
428
404
429
- /*
430
- * Determine and load a segment for backup area. First 640K RAM
431
- * region is backup source
432
- */
433
-
434
- ret = walk_system_ram_res (KEXEC_BACKUP_SRC_START , KEXEC_BACKUP_SRC_END ,
435
- image , determine_backup_region );
436
-
437
- /* Zero or postive return values are ok */
438
- if (ret < 0 )
439
- return ret ;
440
-
441
- /* Add backup segment. */
442
- if (image -> arch .backup_src_sz ) {
443
- kbuf .buffer = & crash_zero_bytes ;
444
- kbuf .bufsz = sizeof (crash_zero_bytes );
445
- kbuf .memsz = image -> arch .backup_src_sz ;
446
- kbuf .buf_align = PAGE_SIZE ;
447
- /*
448
- * Ideally there is no source for backup segment. This is
449
- * copied in purgatory after crash. Just add a zero filled
450
- * segment for now to make sure checksum logic works fine.
451
- */
452
- ret = kexec_add_buffer (& kbuf );
453
- if (ret )
454
- return ret ;
455
- image -> arch .backup_load_addr = kbuf .mem ;
456
- pr_debug ("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n" ,
457
- image -> arch .backup_load_addr ,
458
- image -> arch .backup_src_start , kbuf .memsz );
459
- }
460
-
461
405
/* Prepare elf headers and add a segment */
462
406
ret = prepare_elf_headers (image , & kbuf .buffer , & kbuf .bufsz );
463
407
if (ret )
0 commit comments