@@ -408,28 +408,52 @@ static bool find_aligned_area(struct ma_state *mas, unsigned long section_size,
408
408
409
409
static int vm_module_tags_populate (void )
410
410
{
411
- unsigned long phys_size = vm_module_tags -> nr_pages << PAGE_SHIFT ;
411
+ unsigned long phys_end = ALIGN_DOWN (module_tags .start_addr , PAGE_SIZE ) +
412
+ (vm_module_tags -> nr_pages << PAGE_SHIFT );
413
+ unsigned long new_end = module_tags .start_addr + module_tags .size ;
412
414
413
- if (phys_size < module_tags . size ) {
415
+ if (phys_end < new_end ) {
414
416
struct page * * next_page = vm_module_tags -> pages + vm_module_tags -> nr_pages ;
415
- unsigned long addr = module_tags .start_addr + phys_size ;
417
+ unsigned long old_shadow_end = ALIGN (phys_end , MODULE_ALIGN );
418
+ unsigned long new_shadow_end = ALIGN (new_end , MODULE_ALIGN );
416
419
unsigned long more_pages ;
417
420
unsigned long nr ;
418
421
419
- more_pages = ALIGN (module_tags . size - phys_size , PAGE_SIZE ) >> PAGE_SHIFT ;
422
+ more_pages = ALIGN (new_end - phys_end , PAGE_SIZE ) >> PAGE_SHIFT ;
420
423
nr = alloc_pages_bulk_array_node (GFP_KERNEL | __GFP_NOWARN ,
421
424
NUMA_NO_NODE , more_pages , next_page );
422
425
if (nr < more_pages ||
423
- vmap_pages_range (addr , addr + (nr << PAGE_SHIFT ), PAGE_KERNEL ,
426
+ vmap_pages_range (phys_end , phys_end + (nr << PAGE_SHIFT ), PAGE_KERNEL ,
424
427
next_page , PAGE_SHIFT ) < 0 ) {
425
428
/* Clean up and error out */
426
429
for (int i = 0 ; i < nr ; i ++ )
427
430
__free_page (next_page [i ]);
428
431
return - ENOMEM ;
429
432
}
433
+
430
434
vm_module_tags -> nr_pages += nr ;
435
+
436
+ /*
437
+ * Kasan allocates 1 byte of shadow for every 8 bytes of data.
438
+ * When kasan_alloc_module_shadow allocates shadow memory,
439
+ * its unit of allocation is a page.
440
+ * Therefore, here we need to align to MODULE_ALIGN.
441
+ */
442
+ if (old_shadow_end < new_shadow_end )
443
+ kasan_alloc_module_shadow ((void * )old_shadow_end ,
444
+ new_shadow_end - old_shadow_end ,
445
+ GFP_KERNEL );
431
446
}
432
447
448
+ /*
449
+ * Mark the pages as accessible, now that they are mapped.
450
+ * With hardware tag-based KASAN, marking is skipped for
451
+ * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
452
+ */
453
+ kasan_unpoison_vmalloc ((void * )module_tags .start_addr ,
454
+ new_end - module_tags .start_addr ,
455
+ KASAN_VMALLOC_PROT_NORMAL );
456
+
433
457
return 0 ;
434
458
}
435
459
0 commit comments