@@ -198,14 +198,14 @@ struct vm_stack {
198
198
struct vm_struct * stack_vm_area ;
199
199
};
200
200
201
- static bool try_release_thread_stack_to_cache (struct vm_struct * vm )
201
+ static bool try_release_thread_stack_to_cache (struct vm_struct * vm_area )
202
202
{
203
203
unsigned int i ;
204
204
205
205
for (i = 0 ; i < NR_CACHED_STACKS ; i ++ ) {
206
206
struct vm_struct * tmp = NULL ;
207
207
208
- if (this_cpu_try_cmpxchg (cached_stacks [i ], & tmp , vm ))
208
+ if (this_cpu_try_cmpxchg (cached_stacks [i ], & tmp , vm_area ))
209
209
return true;
210
210
}
211
211
return false;
@@ -214,11 +214,12 @@ static bool try_release_thread_stack_to_cache(struct vm_struct *vm)
214
214
static void thread_stack_free_rcu (struct rcu_head * rh )
215
215
{
216
216
struct vm_stack * vm_stack = container_of (rh , struct vm_stack , rcu );
217
+ struct vm_struct * vm_area = vm_stack -> stack_vm_area ;
217
218
218
219
if (try_release_thread_stack_to_cache (vm_stack -> stack_vm_area ))
219
220
return ;
220
221
221
- vfree (vm_stack );
222
+ vfree (vm_area -> addr );
222
223
}
223
224
224
225
static void thread_stack_delayed_free (struct task_struct * tsk )
@@ -231,71 +232,68 @@ static void thread_stack_delayed_free(struct task_struct *tsk)
231
232
232
233
static int free_vm_stack_cache (unsigned int cpu )
233
234
{
234
- struct vm_struct * * cached_vm_stacks = per_cpu_ptr (cached_stacks , cpu );
235
+ struct vm_struct * * cached_vm_stack_areas = per_cpu_ptr (cached_stacks , cpu );
235
236
int i ;
236
237
237
238
for (i = 0 ; i < NR_CACHED_STACKS ; i ++ ) {
238
- struct vm_struct * vm_stack = cached_vm_stacks [i ];
239
+ struct vm_struct * vm_area = cached_vm_stack_areas [i ];
239
240
240
- if (!vm_stack )
241
+ if (!vm_area )
241
242
continue ;
242
243
243
- vfree (vm_stack -> addr );
244
- cached_vm_stacks [i ] = NULL ;
244
+ vfree (vm_area -> addr );
245
+ cached_vm_stack_areas [i ] = NULL ;
245
246
}
246
247
247
248
return 0 ;
248
249
}
249
250
250
- static int memcg_charge_kernel_stack (struct vm_struct * vm )
251
+ static int memcg_charge_kernel_stack (struct vm_struct * vm_area )
251
252
{
252
253
int i ;
253
254
int ret ;
254
255
int nr_charged = 0 ;
255
256
256
- BUG_ON (vm -> nr_pages != THREAD_SIZE / PAGE_SIZE );
257
+ BUG_ON (vm_area -> nr_pages != THREAD_SIZE / PAGE_SIZE );
257
258
258
259
for (i = 0 ; i < THREAD_SIZE / PAGE_SIZE ; i ++ ) {
259
- ret = memcg_kmem_charge_page (vm -> pages [i ], GFP_KERNEL , 0 );
260
+ ret = memcg_kmem_charge_page (vm_area -> pages [i ], GFP_KERNEL , 0 );
260
261
if (ret )
261
262
goto err ;
262
263
nr_charged ++ ;
263
264
}
264
265
return 0 ;
265
266
err :
266
267
for (i = 0 ; i < nr_charged ; i ++ )
267
- memcg_kmem_uncharge_page (vm -> pages [i ], 0 );
268
+ memcg_kmem_uncharge_page (vm_area -> pages [i ], 0 );
268
269
return ret ;
269
270
}
270
271
271
272
static int alloc_thread_stack_node (struct task_struct * tsk , int node )
272
273
{
273
- struct vm_struct * vm ;
274
+ struct vm_struct * vm_area ;
274
275
void * stack ;
275
276
int i ;
276
277
277
278
for (i = 0 ; i < NR_CACHED_STACKS ; i ++ ) {
278
- struct vm_struct * s ;
279
-
280
- s = this_cpu_xchg (cached_stacks [i ], NULL );
281
-
282
- if (!s )
279
+ vm_area = this_cpu_xchg (cached_stacks [i ], NULL );
280
+ if (!vm_area )
283
281
continue ;
284
282
285
283
/* Reset stack metadata. */
286
- kasan_unpoison_range (s -> addr , THREAD_SIZE );
284
+ kasan_unpoison_range (vm_area -> addr , THREAD_SIZE );
287
285
288
- stack = kasan_reset_tag (s -> addr );
286
+ stack = kasan_reset_tag (vm_area -> addr );
289
287
290
288
/* Clear stale pointers from reused stack. */
291
289
memset (stack , 0 , THREAD_SIZE );
292
290
293
- if (memcg_charge_kernel_stack (s )) {
294
- vfree (s -> addr );
291
+ if (memcg_charge_kernel_stack (vm_area )) {
292
+ vfree (vm_area -> addr );
295
293
return - ENOMEM ;
296
294
}
297
295
298
- tsk -> stack_vm_area = s ;
296
+ tsk -> stack_vm_area = vm_area ;
299
297
tsk -> stack = stack ;
300
298
return 0 ;
301
299
}
@@ -311,8 +309,8 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
311
309
if (!stack )
312
310
return - ENOMEM ;
313
311
314
- vm = find_vm_area (stack );
315
- if (memcg_charge_kernel_stack (vm )) {
312
+ vm_area = find_vm_area (stack );
313
+ if (memcg_charge_kernel_stack (vm_area )) {
316
314
vfree (stack );
317
315
return - ENOMEM ;
318
316
}
@@ -321,7 +319,7 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
321
319
* free_thread_stack() can be called in interrupt context,
322
320
* so cache the vm_struct.
323
321
*/
324
- tsk -> stack_vm_area = vm ;
322
+ tsk -> stack_vm_area = vm_area ;
325
323
stack = kasan_reset_tag (stack );
326
324
tsk -> stack = stack ;
327
325
return 0 ;
@@ -517,11 +515,11 @@ void vm_area_free(struct vm_area_struct *vma)
517
515
static void account_kernel_stack (struct task_struct * tsk , int account )
518
516
{
519
517
if (IS_ENABLED (CONFIG_VMAP_STACK )) {
520
- struct vm_struct * vm = task_stack_vm_area (tsk );
518
+ struct vm_struct * vm_area = task_stack_vm_area (tsk );
521
519
int i ;
522
520
523
521
for (i = 0 ; i < THREAD_SIZE / PAGE_SIZE ; i ++ )
524
- mod_lruvec_page_state (vm -> pages [i ], NR_KERNEL_STACK_KB ,
522
+ mod_lruvec_page_state (vm_area -> pages [i ], NR_KERNEL_STACK_KB ,
525
523
account * (PAGE_SIZE / 1024 ));
526
524
} else {
527
525
void * stack = task_stack_page (tsk );
@@ -537,12 +535,12 @@ void exit_task_stack_account(struct task_struct *tsk)
537
535
account_kernel_stack (tsk , -1 );
538
536
539
537
if (IS_ENABLED (CONFIG_VMAP_STACK )) {
540
- struct vm_struct * vm ;
538
+ struct vm_struct * vm_area ;
541
539
int i ;
542
540
543
- vm = task_stack_vm_area (tsk );
541
+ vm_area = task_stack_vm_area (tsk );
544
542
for (i = 0 ; i < THREAD_SIZE / PAGE_SIZE ; i ++ )
545
- memcg_kmem_uncharge_page (vm -> pages [i ], 0 );
543
+ memcg_kmem_uncharge_page (vm_area -> pages [i ], 0 );
546
544
}
547
545
}
548
546
0 commit comments