@@ -207,14 +207,14 @@ struct vm_stack {
207
207
struct vm_struct * stack_vm_area ;
208
208
};
209
209
210
- static bool try_release_thread_stack_to_cache (struct vm_struct * vm )
210
+ static bool try_release_thread_stack_to_cache (struct vm_struct * vm_area )
211
211
{
212
212
unsigned int i ;
213
213
214
214
for (i = 0 ; i < NR_CACHED_STACKS ; i ++ ) {
215
215
struct vm_struct * tmp = NULL ;
216
216
217
- if (this_cpu_try_cmpxchg (cached_stacks [i ], & tmp , vm ))
217
+ if (this_cpu_try_cmpxchg (cached_stacks [i ], & tmp , vm_area ))
218
218
return true;
219
219
}
220
220
return false;
@@ -223,11 +223,12 @@ static bool try_release_thread_stack_to_cache(struct vm_struct *vm)
223
223
static void thread_stack_free_rcu (struct rcu_head * rh )
224
224
{
225
225
struct vm_stack * vm_stack = container_of (rh , struct vm_stack , rcu );
226
+ struct vm_struct * vm_area = vm_stack -> stack_vm_area ;
226
227
227
228
if (try_release_thread_stack_to_cache (vm_stack -> stack_vm_area ))
228
229
return ;
229
230
230
- vfree (vm_stack );
231
+ vfree (vm_area -> addr );
231
232
}
232
233
233
234
static void thread_stack_delayed_free (struct task_struct * tsk )
@@ -240,71 +241,68 @@ static void thread_stack_delayed_free(struct task_struct *tsk)
240
241
241
242
static int free_vm_stack_cache (unsigned int cpu )
242
243
{
243
- struct vm_struct * * cached_vm_stacks = per_cpu_ptr (cached_stacks , cpu );
244
+ struct vm_struct * * cached_vm_stack_areas = per_cpu_ptr (cached_stacks , cpu );
244
245
int i ;
245
246
246
247
for (i = 0 ; i < NR_CACHED_STACKS ; i ++ ) {
247
- struct vm_struct * vm_stack = cached_vm_stacks [i ];
248
+ struct vm_struct * vm_area = cached_vm_stack_areas [i ];
248
249
249
- if (!vm_stack )
250
+ if (!vm_area )
250
251
continue ;
251
252
252
- vfree (vm_stack -> addr );
253
- cached_vm_stacks [i ] = NULL ;
253
+ vfree (vm_area -> addr );
254
+ cached_vm_stack_areas [i ] = NULL ;
254
255
}
255
256
256
257
return 0 ;
257
258
}
258
259
259
- static int memcg_charge_kernel_stack (struct vm_struct * vm )
260
+ static int memcg_charge_kernel_stack (struct vm_struct * vm_area )
260
261
{
261
262
int i ;
262
263
int ret ;
263
264
int nr_charged = 0 ;
264
265
265
- BUG_ON (vm -> nr_pages != THREAD_SIZE / PAGE_SIZE );
266
+ BUG_ON (vm_area -> nr_pages != THREAD_SIZE / PAGE_SIZE );
266
267
267
268
for (i = 0 ; i < THREAD_SIZE / PAGE_SIZE ; i ++ ) {
268
- ret = memcg_kmem_charge_page (vm -> pages [i ], GFP_KERNEL , 0 );
269
+ ret = memcg_kmem_charge_page (vm_area -> pages [i ], GFP_KERNEL , 0 );
269
270
if (ret )
270
271
goto err ;
271
272
nr_charged ++ ;
272
273
}
273
274
return 0 ;
274
275
err :
275
276
for (i = 0 ; i < nr_charged ; i ++ )
276
- memcg_kmem_uncharge_page (vm -> pages [i ], 0 );
277
+ memcg_kmem_uncharge_page (vm_area -> pages [i ], 0 );
277
278
return ret ;
278
279
}
279
280
280
281
static int alloc_thread_stack_node (struct task_struct * tsk , int node )
281
282
{
282
- struct vm_struct * vm ;
283
+ struct vm_struct * vm_area ;
283
284
void * stack ;
284
285
int i ;
285
286
286
287
for (i = 0 ; i < NR_CACHED_STACKS ; i ++ ) {
287
- struct vm_struct * s ;
288
-
289
- s = this_cpu_xchg (cached_stacks [i ], NULL );
290
-
291
- if (!s )
288
+ vm_area = this_cpu_xchg (cached_stacks [i ], NULL );
289
+ if (!vm_area )
292
290
continue ;
293
291
294
292
/* Reset stack metadata. */
295
- kasan_unpoison_range (s -> addr , THREAD_SIZE );
293
+ kasan_unpoison_range (vm_area -> addr , THREAD_SIZE );
296
294
297
- stack = kasan_reset_tag (s -> addr );
295
+ stack = kasan_reset_tag (vm_area -> addr );
298
296
299
297
/* Clear stale pointers from reused stack. */
300
298
memset (stack , 0 , THREAD_SIZE );
301
299
302
- if (memcg_charge_kernel_stack (s )) {
303
- vfree (s -> addr );
300
+ if (memcg_charge_kernel_stack (vm_area )) {
301
+ vfree (vm_area -> addr );
304
302
return - ENOMEM ;
305
303
}
306
304
307
- tsk -> stack_vm_area = s ;
305
+ tsk -> stack_vm_area = vm_area ;
308
306
tsk -> stack = stack ;
309
307
return 0 ;
310
308
}
@@ -320,8 +318,8 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
320
318
if (!stack )
321
319
return - ENOMEM ;
322
320
323
- vm = find_vm_area (stack );
324
- if (memcg_charge_kernel_stack (vm )) {
321
+ vm_area = find_vm_area (stack );
322
+ if (memcg_charge_kernel_stack (vm_area )) {
325
323
vfree (stack );
326
324
return - ENOMEM ;
327
325
}
@@ -330,7 +328,7 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
330
328
* free_thread_stack() can be called in interrupt context,
331
329
* so cache the vm_struct.
332
330
*/
333
- tsk -> stack_vm_area = vm ;
331
+ tsk -> stack_vm_area = vm_area ;
334
332
stack = kasan_reset_tag (stack );
335
333
tsk -> stack = stack ;
336
334
return 0 ;
@@ -437,11 +435,11 @@ static struct kmem_cache *mm_cachep;
437
435
static void account_kernel_stack (struct task_struct * tsk , int account )
438
436
{
439
437
if (IS_ENABLED (CONFIG_VMAP_STACK )) {
440
- struct vm_struct * vm = task_stack_vm_area (tsk );
438
+ struct vm_struct * vm_area = task_stack_vm_area (tsk );
441
439
int i ;
442
440
443
441
for (i = 0 ; i < THREAD_SIZE / PAGE_SIZE ; i ++ )
444
- mod_lruvec_page_state (vm -> pages [i ], NR_KERNEL_STACK_KB ,
442
+ mod_lruvec_page_state (vm_area -> pages [i ], NR_KERNEL_STACK_KB ,
445
443
account * (PAGE_SIZE / 1024 ));
446
444
} else {
447
445
void * stack = task_stack_page (tsk );
@@ -457,12 +455,12 @@ void exit_task_stack_account(struct task_struct *tsk)
457
455
account_kernel_stack (tsk , -1 );
458
456
459
457
if (IS_ENABLED (CONFIG_VMAP_STACK )) {
460
- struct vm_struct * vm ;
458
+ struct vm_struct * vm_area ;
461
459
int i ;
462
460
463
- vm = task_stack_vm_area (tsk );
461
+ vm_area = task_stack_vm_area (tsk );
464
462
for (i = 0 ; i < THREAD_SIZE / PAGE_SIZE ; i ++ )
465
- memcg_kmem_uncharge_page (vm -> pages [i ], 0 );
463
+ memcg_kmem_uncharge_page (vm_area -> pages [i ], 0 );
466
464
}
467
465
}
468
466
0 commit comments