Skip to content

Commit d98c9e8

Browse files
aryabinintorvalds
authored andcommitted
kasan: fix crashes on access to memory mapped by vm_map_ram()
With CONFIG_KASAN_VMALLOC=y any use of memory obtained via vm_map_ram() will crash because there is no shadow backing that memory. Instead of sprinkling additional kasan_populate_vmalloc() calls all over the vmalloc code, move it into alloc_vmap_area(). This will fix vm_map_ram() and simplify the code a bit. [[email protected]: v2] Link: http://lkml.kernel.org/r/[email protected]: http://lkml.kernel.org/r/[email protected] Fixes: 3c5c3cf ("kasan: support backing vmalloc space with real shadow memory") Signed-off-by: Andrey Ryabinin <[email protected]> Reported-by: Dmitry Vyukov <[email protected]> Reviewed-by: Uladzislau Rezki (Sony) <[email protected]> Cc: Daniel Axtens <[email protected]> Cc: Alexander Potapenko <[email protected]> Cc: Daniel Axtens <[email protected]> Cc: Qian Cai <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 2187f21 commit d98c9e8

File tree

3 files changed

+67
-60
lines changed

3 files changed

+67
-60
lines changed

include/linux/kasan.h

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -205,20 +205,23 @@ static inline void *kasan_reset_tag(const void *addr)
205205
#endif /* CONFIG_KASAN_SW_TAGS */
206206

207207
#ifdef CONFIG_KASAN_VMALLOC
208-
int kasan_populate_vmalloc(unsigned long requested_size,
209-
struct vm_struct *area);
210-
void kasan_poison_vmalloc(void *start, unsigned long size);
208+
int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
209+
void kasan_poison_vmalloc(const void *start, unsigned long size);
210+
void kasan_unpoison_vmalloc(const void *start, unsigned long size);
211211
void kasan_release_vmalloc(unsigned long start, unsigned long end,
212212
unsigned long free_region_start,
213213
unsigned long free_region_end);
214214
#else
215-
static inline int kasan_populate_vmalloc(unsigned long requested_size,
216-
struct vm_struct *area)
215+
static inline int kasan_populate_vmalloc(unsigned long start,
216+
unsigned long size)
217217
{
218218
return 0;
219219
}
220220

221-
static inline void kasan_poison_vmalloc(void *start, unsigned long size) {}
221+
static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
222+
{ }
223+
static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
224+
{ }
222225
static inline void kasan_release_vmalloc(unsigned long start,
223226
unsigned long end,
224227
unsigned long free_region_start,

mm/kasan/common.c

Lines changed: 18 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -778,15 +778,17 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
778778
return 0;
779779
}
780780

781-
int kasan_populate_vmalloc(unsigned long requested_size, struct vm_struct *area)
781+
int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
782782
{
783783
unsigned long shadow_start, shadow_end;
784784
int ret;
785785

786-
shadow_start = (unsigned long)kasan_mem_to_shadow(area->addr);
786+
if (!is_vmalloc_or_module_addr((void *)addr))
787+
return 0;
788+
789+
shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
787790
shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
788-
shadow_end = (unsigned long)kasan_mem_to_shadow(area->addr +
789-
area->size);
791+
shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
790792
shadow_end = ALIGN(shadow_end, PAGE_SIZE);
791793

792794
ret = apply_to_page_range(&init_mm, shadow_start,
@@ -797,10 +799,6 @@ int kasan_populate_vmalloc(unsigned long requested_size, struct vm_struct *area)
797799

798800
flush_cache_vmap(shadow_start, shadow_end);
799801

800-
kasan_unpoison_shadow(area->addr, requested_size);
801-
802-
area->flags |= VM_KASAN;
803-
804802
/*
805803
* We need to be careful about inter-cpu effects here. Consider:
806804
*
@@ -843,12 +841,23 @@ int kasan_populate_vmalloc(unsigned long requested_size, struct vm_struct *area)
843841
* Poison the shadow for a vmalloc region. Called as part of the
844842
* freeing process at the time the region is freed.
845843
*/
846-
void kasan_poison_vmalloc(void *start, unsigned long size)
844+
void kasan_poison_vmalloc(const void *start, unsigned long size)
847845
{
846+
if (!is_vmalloc_or_module_addr(start))
847+
return;
848+
848849
size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
849850
kasan_poison_shadow(start, size, KASAN_VMALLOC_INVALID);
850851
}
851852

853+
void kasan_unpoison_vmalloc(const void *start, unsigned long size)
854+
{
855+
if (!is_vmalloc_or_module_addr(start))
856+
return;
857+
858+
kasan_unpoison_shadow(start, size);
859+
}
860+
852861
static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
853862
void *unused)
854863
{

mm/vmalloc.c

Lines changed: 40 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -1061,6 +1061,26 @@ __alloc_vmap_area(unsigned long size, unsigned long align,
10611061
return nva_start_addr;
10621062
}
10631063

1064+
/*
1065+
* Free a region of KVA allocated by alloc_vmap_area
1066+
*/
1067+
static void free_vmap_area(struct vmap_area *va)
1068+
{
1069+
/*
1070+
* Remove from the busy tree/list.
1071+
*/
1072+
spin_lock(&vmap_area_lock);
1073+
unlink_va(va, &vmap_area_root);
1074+
spin_unlock(&vmap_area_lock);
1075+
1076+
/*
1077+
* Insert/Merge it back to the free tree/list.
1078+
*/
1079+
spin_lock(&free_vmap_area_lock);
1080+
merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list);
1081+
spin_unlock(&free_vmap_area_lock);
1082+
}
1083+
10641084
/*
10651085
* Allocate a region of KVA of the specified size and alignment, within the
10661086
* vstart and vend.
@@ -1073,6 +1093,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
10731093
struct vmap_area *va, *pva;
10741094
unsigned long addr;
10751095
int purged = 0;
1096+
int ret;
10761097

10771098
BUG_ON(!size);
10781099
BUG_ON(offset_in_page(size));
@@ -1139,6 +1160,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
11391160
va->va_end = addr + size;
11401161
va->vm = NULL;
11411162

1163+
11421164
spin_lock(&vmap_area_lock);
11431165
insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
11441166
spin_unlock(&vmap_area_lock);
@@ -1147,6 +1169,12 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
11471169
BUG_ON(va->va_start < vstart);
11481170
BUG_ON(va->va_end > vend);
11491171

1172+
ret = kasan_populate_vmalloc(addr, size);
1173+
if (ret) {
1174+
free_vmap_area(va);
1175+
return ERR_PTR(ret);
1176+
}
1177+
11501178
return va;
11511179

11521180
overflow:
@@ -1185,26 +1213,6 @@ int unregister_vmap_purge_notifier(struct notifier_block *nb)
11851213
}
11861214
EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
11871215

1188-
/*
1189-
* Free a region of KVA allocated by alloc_vmap_area
1190-
*/
1191-
static void free_vmap_area(struct vmap_area *va)
1192-
{
1193-
/*
1194-
* Remove from the busy tree/list.
1195-
*/
1196-
spin_lock(&vmap_area_lock);
1197-
unlink_va(va, &vmap_area_root);
1198-
spin_unlock(&vmap_area_lock);
1199-
1200-
/*
1201-
* Insert/Merge it back to the free tree/list.
1202-
*/
1203-
spin_lock(&free_vmap_area_lock);
1204-
merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list);
1205-
spin_unlock(&free_vmap_area_lock);
1206-
}
1207-
12081216
/*
12091217
* Clear the pagetable entries of a given vmap_area
12101218
*/
@@ -1771,6 +1779,8 @@ void vm_unmap_ram(const void *mem, unsigned int count)
17711779
BUG_ON(addr > VMALLOC_END);
17721780
BUG_ON(!PAGE_ALIGNED(addr));
17731781

1782+
kasan_poison_vmalloc(mem, size);
1783+
17741784
if (likely(count <= VMAP_MAX_ALLOC)) {
17751785
debug_check_no_locks_freed(mem, size);
17761786
vb_free(mem, size);
@@ -1821,6 +1831,9 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
18211831
addr = va->va_start;
18221832
mem = (void *)addr;
18231833
}
1834+
1835+
kasan_unpoison_vmalloc(mem, size);
1836+
18241837
if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
18251838
vm_unmap_ram(mem, count);
18261839
return NULL;
@@ -2075,6 +2088,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
20752088
{
20762089
struct vmap_area *va;
20772090
struct vm_struct *area;
2091+
unsigned long requested_size = size;
20782092

20792093
BUG_ON(in_interrupt());
20802094
size = PAGE_ALIGN(size);
@@ -2098,23 +2112,9 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
20982112
return NULL;
20992113
}
21002114

2101-
setup_vmalloc_vm(area, va, flags, caller);
2115+
kasan_unpoison_vmalloc((void *)va->va_start, requested_size);
21022116

2103-
/*
2104-
* For KASAN, if we are in vmalloc space, we need to cover the shadow
2105-
* area with real memory. If we come here through VM_ALLOC, this is
2106-
* done by a higher level function that has access to the true size,
2107-
* which might not be a full page.
2108-
*
2109-
* We assume module space comes via VM_ALLOC path.
2110-
*/
2111-
if (is_vmalloc_addr(area->addr) && !(area->flags & VM_ALLOC)) {
2112-
if (kasan_populate_vmalloc(area->size, area)) {
2113-
unmap_vmap_area(va);
2114-
kfree(area);
2115-
return NULL;
2116-
}
2117-
}
2117+
setup_vmalloc_vm(area, va, flags, caller);
21182118

21192119
return area;
21202120
}
@@ -2293,8 +2293,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
22932293
debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
22942294
debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
22952295

2296-
if (area->flags & VM_KASAN)
2297-
kasan_poison_vmalloc(area->addr, area->size);
2296+
kasan_poison_vmalloc(area->addr, area->size);
22982297

22992298
vm_remove_mappings(area, deallocate_pages);
23002299

@@ -2539,7 +2538,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
25392538
if (!size || (size >> PAGE_SHIFT) > totalram_pages())
25402539
goto fail;
25412540

2542-
area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
2541+
area = __get_vm_area_node(real_size, align, VM_ALLOC | VM_UNINITIALIZED |
25432542
vm_flags, start, end, node, gfp_mask, caller);
25442543
if (!area)
25452544
goto fail;
@@ -2548,11 +2547,6 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
25482547
if (!addr)
25492548
return NULL;
25502549

2551-
if (is_vmalloc_or_module_addr(area->addr)) {
2552-
if (kasan_populate_vmalloc(real_size, area))
2553-
return NULL;
2554-
}
2555-
25562550
/*
25572551
* In this function, newly allocated vm_struct has VM_UNINITIALIZED
25582552
* flag. It means that vm_struct is not fully initialized.
@@ -3437,7 +3431,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
34373431
/* populate the shadow space outside of the lock */
34383432
for (area = 0; area < nr_vms; area++) {
34393433
/* assume success here */
3440-
kasan_populate_vmalloc(sizes[area], vms[area]);
3434+
kasan_populate_vmalloc(vas[area]->va_start, sizes[area]);
3435+
kasan_unpoison_vmalloc((void *)vms[area]->addr, sizes[area]);
34413436
}
34423437

34433438
kfree(vas);

0 commit comments

Comments
 (0)