@@ -188,13 +188,7 @@ static inline void free_task_struct(struct task_struct *tsk)
188
188
kmem_cache_free (task_struct_cachep , tsk );
189
189
}
190
190
191
- /*
192
- * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
193
- * kmemcache based allocator.
194
- */
195
- # if THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK )
196
-
197
- # ifdef CONFIG_VMAP_STACK
191
+ #ifdef CONFIG_VMAP_STACK
198
192
/*
199
193
* vmalloc() is a bit slow, and calling vfree() enough times will force a TLB
200
194
* flush. Try to minimize the number of calls by caching stacks.
@@ -344,7 +338,13 @@ static void free_thread_stack(struct task_struct *tsk)
344
338
tsk -> stack_vm_area = NULL ;
345
339
}
346
340
347
- # else /* !CONFIG_VMAP_STACK */
341
+ #else /* !CONFIG_VMAP_STACK */
342
+
343
+ /*
344
+ * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
345
+ * kmemcache based allocator.
346
+ */
347
+ #if THREAD_SIZE >= PAGE_SIZE
348
348
349
349
static void thread_stack_free_rcu (struct rcu_head * rh )
350
350
{
@@ -376,8 +376,7 @@ static void free_thread_stack(struct task_struct *tsk)
376
376
tsk -> stack = NULL ;
377
377
}
378
378
379
- # endif /* CONFIG_VMAP_STACK */
380
- # else /* !(THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK)) */
379
+ #else /* !(THREAD_SIZE >= PAGE_SIZE) */
381
380
382
381
static struct kmem_cache * thread_stack_cache ;
383
382
@@ -416,7 +415,8 @@ void thread_stack_cache_init(void)
416
415
BUG_ON (thread_stack_cache == NULL );
417
416
}
418
417
419
- # endif /* THREAD_SIZE >= PAGE_SIZE || defined(CONFIG_VMAP_STACK) */
418
+ #endif /* THREAD_SIZE >= PAGE_SIZE */
419
+ #endif /* CONFIG_VMAP_STACK */
420
420
421
421
/* SLAB cache for signal_struct structures (tsk->signal) */
422
422
static struct kmem_cache * signal_cachep ;
0 commit comments