@@ -185,7 +185,13 @@ static inline void free_task_struct(struct task_struct *tsk)
185
185
kmem_cache_free (task_struct_cachep , tsk );
186
186
}
187
187
188
- #ifdef CONFIG_VMAP_STACK
188
+ /*
189
+ * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
190
+ * kmemcache based allocator.
191
+ */
192
+ # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK )
193
+
194
+ # ifdef CONFIG_VMAP_STACK
189
195
/*
190
196
* vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
191
197
* flush. Try to minimize the number of calls by caching stacks.
@@ -198,14 +204,14 @@ struct vm_stack {
198
204
struct vm_struct * stack_vm_area ;
199
205
};
200
206
201
- static bool try_release_thread_stack_to_cache (struct vm_struct * vm_area )
207
+ static bool try_release_thread_stack_to_cache (struct vm_struct * vm )
202
208
{
203
209
unsigned int i ;
204
210
205
211
for (i = 0 ; i < NR_CACHED_STACKS ; i ++ ) {
206
212
struct vm_struct * tmp = NULL ;
207
213
208
- if (this_cpu_try_cmpxchg (cached_stacks [i ], & tmp , vm_area ))
214
+ if (this_cpu_try_cmpxchg (cached_stacks [i ], & tmp , vm ))
209
215
return true;
210
216
}
211
217
return false;
@@ -214,12 +220,11 @@ static bool try_release_thread_stack_to_cache(struct vm_struct *vm_area)
214
220
static void thread_stack_free_rcu (struct rcu_head * rh )
215
221
{
216
222
struct vm_stack * vm_stack = container_of (rh , struct vm_stack , rcu );
217
- struct vm_struct * vm_area = vm_stack -> stack_vm_area ;
218
223
219
224
if (try_release_thread_stack_to_cache (vm_stack -> stack_vm_area ))
220
225
return ;
221
226
222
- vfree (vm_area -> addr );
227
+ vfree (vm_stack );
223
228
}
224
229
225
230
static void thread_stack_delayed_free (struct task_struct * tsk )
@@ -232,68 +237,71 @@ static void thread_stack_delayed_free(struct task_struct *tsk)
232
237
233
238
static int free_vm_stack_cache (unsigned int cpu )
234
239
{
235
- struct vm_struct * * cached_vm_stack_areas = per_cpu_ptr (cached_stacks , cpu );
240
+ struct vm_struct * * cached_vm_stacks = per_cpu_ptr (cached_stacks , cpu );
236
241
int i ;
237
242
238
243
for (i = 0 ; i < NR_CACHED_STACKS ; i ++ ) {
239
- struct vm_struct * vm_area = cached_vm_stack_areas [i ];
244
+ struct vm_struct * vm_stack = cached_vm_stacks [i ];
240
245
241
- if (!vm_area )
246
+ if (!vm_stack )
242
247
continue ;
243
248
244
- vfree (vm_area -> addr );
245
- cached_vm_stack_areas [i ] = NULL ;
249
+ vfree (vm_stack -> addr );
250
+ cached_vm_stacks [i ] = NULL ;
246
251
}
247
252
248
253
return 0 ;
249
254
}
250
255
251
- static int memcg_charge_kernel_stack (struct vm_struct * vm_area )
256
+ static int memcg_charge_kernel_stack (struct vm_struct * vm )
252
257
{
253
258
int i ;
254
259
int ret ;
255
260
int nr_charged = 0 ;
256
261
257
- BUG_ON (vm_area -> nr_pages != THREAD_SIZE / PAGE_SIZE );
262
+ BUG_ON (vm -> nr_pages != THREAD_SIZE / PAGE_SIZE );
258
263
259
264
for (i = 0 ; i < THREAD_SIZE / PAGE_SIZE ; i ++ ) {
260
- ret = memcg_kmem_charge_page (vm_area -> pages [i ], GFP_KERNEL , 0 );
265
+ ret = memcg_kmem_charge_page (vm -> pages [i ], GFP_KERNEL , 0 );
261
266
if (ret )
262
267
goto err ;
263
268
nr_charged ++ ;
264
269
}
265
270
return 0 ;
266
271
err :
267
272
for (i = 0 ; i < nr_charged ; i ++ )
268
- memcg_kmem_uncharge_page (vm_area -> pages [i ], 0 );
273
+ memcg_kmem_uncharge_page (vm -> pages [i ], 0 );
269
274
return ret ;
270
275
}
271
276
272
277
static int alloc_thread_stack_node (struct task_struct * tsk , int node )
273
278
{
274
- struct vm_struct * vm_area ;
279
+ struct vm_struct * vm ;
275
280
void * stack ;
276
281
int i ;
277
282
278
283
for (i = 0 ; i < NR_CACHED_STACKS ; i ++ ) {
279
- vm_area = this_cpu_xchg (cached_stacks [i ], NULL );
280
- if (!vm_area )
281
- continue ;
284
+ struct vm_struct * s ;
282
285
283
- if ( memcg_charge_kernel_stack ( vm_area )) {
284
- vfree ( vm_area -> addr );
285
- return - ENOMEM ;
286
- }
286
+ s = this_cpu_xchg ( cached_stacks [ i ], NULL );
287
+
288
+ if (! s )
289
+ continue ;
287
290
288
291
/* Reset stack metadata. */
289
- kasan_unpoison_range (vm_area -> addr , THREAD_SIZE );
292
+ kasan_unpoison_range (s -> addr , THREAD_SIZE );
290
293
291
- stack = kasan_reset_tag (vm_area -> addr );
294
+ stack = kasan_reset_tag (s -> addr );
292
295
293
296
/* Clear stale pointers from reused stack. */
294
297
memset (stack , 0 , THREAD_SIZE );
295
298
296
- tsk -> stack_vm_area = vm_area ;
299
+ if (memcg_charge_kernel_stack (s )) {
300
+ vfree (s -> addr );
301
+ return - ENOMEM ;
302
+ }
303
+
304
+ tsk -> stack_vm_area = s ;
297
305
tsk -> stack = stack ;
298
306
return 0 ;
299
307
}
@@ -309,8 +317,8 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
309
317
if (!stack )
310
318
return - ENOMEM ;
311
319
312
- vm_area = find_vm_area (stack );
313
- if (memcg_charge_kernel_stack (vm_area )) {
320
+ vm = find_vm_area (stack );
321
+ if (memcg_charge_kernel_stack (vm )) {
314
322
vfree (stack );
315
323
return - ENOMEM ;
316
324
}
@@ -319,7 +327,7 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
319
327
* free_thread_stack() can be called in interrupt context,
320
328
* so cache the vm_struct.
321
329
*/
322
- tsk -> stack_vm_area = vm_area ;
330
+ tsk -> stack_vm_area = vm ;
323
331
stack = kasan_reset_tag (stack );
324
332
tsk -> stack = stack ;
325
333
return 0 ;
@@ -334,13 +342,7 @@ static void free_thread_stack(struct task_struct *tsk)
334
342
tsk -> stack_vm_area = NULL ;
335
343
}
336
344
337
- #else /* !CONFIG_VMAP_STACK */
338
-
339
- /*
340
- * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
341
- * kmemcache based allocator.
342
- */
343
- #if THREAD_SIZE >= PAGE_SIZE
345
+ # else /* !CONFIG_VMAP_STACK */
344
346
345
347
static void thread_stack_free_rcu (struct rcu_head * rh )
346
348
{
@@ -372,7 +374,8 @@ static void free_thread_stack(struct task_struct *tsk)
372
374
tsk -> stack = NULL ;
373
375
}
374
376
375
- #else /* !(THREAD_SIZE >= PAGE_SIZE) */
377
+ # endif /* CONFIG_VMAP_STACK */
378
+ # else /* !(THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)) */
376
379
377
380
static struct kmem_cache * thread_stack_cache ;
378
381
@@ -411,8 +414,7 @@ void thread_stack_cache_init(void)
411
414
BUG_ON (thread_stack_cache == NULL );
412
415
}
413
416
414
- #endif /* THREAD_SIZE >= PAGE_SIZE */
415
- #endif /* CONFIG_VMAP_STACK */
417
+ # endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */
416
418
417
419
/* SLAB cache for signal_struct structures (tsk->signal) */
418
420
static struct kmem_cache * signal_cachep ;
@@ -515,11 +517,11 @@ void vm_area_free(struct vm_area_struct *vma)
515
517
static void account_kernel_stack (struct task_struct * tsk , int account )
516
518
{
517
519
if (IS_ENABLED (CONFIG_VMAP_STACK )) {
518
- struct vm_struct * vm_area = task_stack_vm_area (tsk );
520
+ struct vm_struct * vm = task_stack_vm_area (tsk );
519
521
int i ;
520
522
521
523
for (i = 0 ; i < THREAD_SIZE / PAGE_SIZE ; i ++ )
522
- mod_lruvec_page_state (vm_area -> pages [i ], NR_KERNEL_STACK_KB ,
524
+ mod_lruvec_page_state (vm -> pages [i ], NR_KERNEL_STACK_KB ,
523
525
account * (PAGE_SIZE / 1024 ));
524
526
} else {
525
527
void * stack = task_stack_page (tsk );
@@ -535,12 +537,12 @@ void exit_task_stack_account(struct task_struct *tsk)
535
537
account_kernel_stack (tsk , -1 );
536
538
537
539
if (IS_ENABLED (CONFIG_VMAP_STACK )) {
538
- struct vm_struct * vm_area ;
540
+ struct vm_struct * vm ;
539
541
int i ;
540
542
541
- vm_area = task_stack_vm_area (tsk );
543
+ vm = task_stack_vm_area (tsk );
542
544
for (i = 0 ; i < THREAD_SIZE / PAGE_SIZE ; i ++ )
543
- memcg_kmem_uncharge_page (vm_area -> pages [i ], 0 );
545
+ memcg_kmem_uncharge_page (vm -> pages [i ], 0 );
544
546
}
545
547
}
546
548
0 commit comments