@@ -1061,6 +1061,26 @@ __alloc_vmap_area(unsigned long size, unsigned long align,
1061
1061
return nva_start_addr ;
1062
1062
}
1063
1063
1064
+ /*
1065
+ * Free a region of KVA allocated by alloc_vmap_area
1066
+ */
1067
+ static void free_vmap_area (struct vmap_area * va )
1068
+ {
1069
+ /*
1070
+ * Remove from the busy tree/list.
1071
+ */
1072
+ spin_lock (& vmap_area_lock );
1073
+ unlink_va (va , & vmap_area_root );
1074
+ spin_unlock (& vmap_area_lock );
1075
+
1076
+ /*
1077
+ * Insert/Merge it back to the free tree/list.
1078
+ */
1079
+ spin_lock (& free_vmap_area_lock );
1080
+ merge_or_add_vmap_area (va , & free_vmap_area_root , & free_vmap_area_list );
1081
+ spin_unlock (& free_vmap_area_lock );
1082
+ }
1083
+
1064
1084
/*
1065
1085
* Allocate a region of KVA of the specified size and alignment, within the
1066
1086
* vstart and vend.
@@ -1073,6 +1093,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
1073
1093
struct vmap_area * va , * pva ;
1074
1094
unsigned long addr ;
1075
1095
int purged = 0 ;
1096
+ int ret ;
1076
1097
1077
1098
BUG_ON (!size );
1078
1099
BUG_ON (offset_in_page (size ));
@@ -1139,6 +1160,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
1139
1160
va -> va_end = addr + size ;
1140
1161
va -> vm = NULL ;
1141
1162
1163
+
1142
1164
spin_lock (& vmap_area_lock );
1143
1165
insert_vmap_area (va , & vmap_area_root , & vmap_area_list );
1144
1166
spin_unlock (& vmap_area_lock );
@@ -1147,6 +1169,12 @@ static struct vmap_area *alloc_vmap_area(unsigned long size,
1147
1169
BUG_ON (va -> va_start < vstart );
1148
1170
BUG_ON (va -> va_end > vend );
1149
1171
1172
+ ret = kasan_populate_vmalloc (addr , size );
1173
+ if (ret ) {
1174
+ free_vmap_area (va );
1175
+ return ERR_PTR (ret );
1176
+ }
1177
+
1150
1178
return va ;
1151
1179
1152
1180
overflow :
@@ -1185,26 +1213,6 @@ int unregister_vmap_purge_notifier(struct notifier_block *nb)
1185
1213
}
1186
1214
EXPORT_SYMBOL_GPL (unregister_vmap_purge_notifier );
1187
1215
1188
- /*
1189
- * Free a region of KVA allocated by alloc_vmap_area
1190
- */
1191
- static void free_vmap_area (struct vmap_area * va )
1192
- {
1193
- /*
1194
- * Remove from the busy tree/list.
1195
- */
1196
- spin_lock (& vmap_area_lock );
1197
- unlink_va (va , & vmap_area_root );
1198
- spin_unlock (& vmap_area_lock );
1199
-
1200
- /*
1201
- * Insert/Merge it back to the free tree/list.
1202
- */
1203
- spin_lock (& free_vmap_area_lock );
1204
- merge_or_add_vmap_area (va , & free_vmap_area_root , & free_vmap_area_list );
1205
- spin_unlock (& free_vmap_area_lock );
1206
- }
1207
-
1208
1216
/*
1209
1217
* Clear the pagetable entries of a given vmap_area
1210
1218
*/
@@ -1771,6 +1779,8 @@ void vm_unmap_ram(const void *mem, unsigned int count)
1771
1779
BUG_ON (addr > VMALLOC_END );
1772
1780
BUG_ON (!PAGE_ALIGNED (addr ));
1773
1781
1782
+ kasan_poison_vmalloc (mem , size );
1783
+
1774
1784
if (likely (count <= VMAP_MAX_ALLOC )) {
1775
1785
debug_check_no_locks_freed (mem , size );
1776
1786
vb_free (mem , size );
@@ -1821,6 +1831,9 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
1821
1831
addr = va -> va_start ;
1822
1832
mem = (void * )addr ;
1823
1833
}
1834
+
1835
+ kasan_unpoison_vmalloc (mem , size );
1836
+
1824
1837
if (vmap_page_range (addr , addr + size , prot , pages ) < 0 ) {
1825
1838
vm_unmap_ram (mem , count );
1826
1839
return NULL ;
@@ -2075,6 +2088,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
2075
2088
{
2076
2089
struct vmap_area * va ;
2077
2090
struct vm_struct * area ;
2091
+ unsigned long requested_size = size ;
2078
2092
2079
2093
BUG_ON (in_interrupt ());
2080
2094
size = PAGE_ALIGN (size );
@@ -2098,23 +2112,9 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
2098
2112
return NULL ;
2099
2113
}
2100
2114
2101
- setup_vmalloc_vm ( area , va , flags , caller );
2115
+ kasan_unpoison_vmalloc (( void * ) va -> va_start , requested_size );
2102
2116
2103
- /*
2104
- * For KASAN, if we are in vmalloc space, we need to cover the shadow
2105
- * area with real memory. If we come here through VM_ALLOC, this is
2106
- * done by a higher level function that has access to the true size,
2107
- * which might not be a full page.
2108
- *
2109
- * We assume module space comes via VM_ALLOC path.
2110
- */
2111
- if (is_vmalloc_addr (area -> addr ) && !(area -> flags & VM_ALLOC )) {
2112
- if (kasan_populate_vmalloc (area -> size , area )) {
2113
- unmap_vmap_area (va );
2114
- kfree (area );
2115
- return NULL ;
2116
- }
2117
- }
2117
+ setup_vmalloc_vm (area , va , flags , caller );
2118
2118
2119
2119
return area ;
2120
2120
}
@@ -2293,8 +2293,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
2293
2293
debug_check_no_locks_freed (area -> addr , get_vm_area_size (area ));
2294
2294
debug_check_no_obj_freed (area -> addr , get_vm_area_size (area ));
2295
2295
2296
- if (area -> flags & VM_KASAN )
2297
- kasan_poison_vmalloc (area -> addr , area -> size );
2296
+ kasan_poison_vmalloc (area -> addr , area -> size );
2298
2297
2299
2298
vm_remove_mappings (area , deallocate_pages );
2300
2299
@@ -2539,7 +2538,7 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
2539
2538
if (!size || (size >> PAGE_SHIFT ) > totalram_pages ())
2540
2539
goto fail ;
2541
2540
2542
- area = __get_vm_area_node (size , align , VM_ALLOC | VM_UNINITIALIZED |
2541
+ area = __get_vm_area_node (real_size , align , VM_ALLOC | VM_UNINITIALIZED |
2543
2542
vm_flags , start , end , node , gfp_mask , caller );
2544
2543
if (!area )
2545
2544
goto fail ;
@@ -2548,11 +2547,6 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
2548
2547
if (!addr )
2549
2548
return NULL ;
2550
2549
2551
- if (is_vmalloc_or_module_addr (area -> addr )) {
2552
- if (kasan_populate_vmalloc (real_size , area ))
2553
- return NULL ;
2554
- }
2555
-
2556
2550
/*
2557
2551
* In this function, newly allocated vm_struct has VM_UNINITIALIZED
2558
2552
* flag. It means that vm_struct is not fully initialized.
@@ -3437,7 +3431,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3437
3431
/* populate the shadow space outside of the lock */
3438
3432
for (area = 0 ; area < nr_vms ; area ++ ) {
3439
3433
/* assume success here */
3440
- kasan_populate_vmalloc (sizes [area ], vms [area ]);
3434
+ kasan_populate_vmalloc (vas [area ]-> va_start , sizes [area ]);
3435
+ kasan_unpoison_vmalloc ((void * )vms [area ]-> addr , sizes [area ]);
3441
3436
}
3442
3437
3443
3438
kfree (vas );
0 commit comments