Skip to content

Commit 449e0b4

Browse files
soleenakpm00
authored andcommitted
fork: clean-up naming of vm_stack/vm_struct variables in vmap stacks code
There are two data types: "struct vm_struct" and "struct vm_stack" that have the same local variable names: vm_stack, or vm, or s, which makes the code confusing to read. Change the code so the naming is consistent: struct vm_struct is always called vm_area struct vm_stack is always called vm_stack One change altering vfree(vm_stack) to vfree(vm_area->addr) may look like a semantic change but it is not: vm_area->addr points to the vm_stack. This was done to improve readability. [[email protected]: rebased and added new users of the variable names, address review comments] Link: https://lore.kernel.org/[email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Pasha Tatashin <[email protected]> Signed-off-by: Linus Walleij <[email protected]> Acked-by: Mike Rapoport (Microsoft) <[email protected]> Cc: Mateusz Guzik <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 08e2153 commit 449e0b4

File tree

1 file changed

+29
-31
lines changed

1 file changed

+29
-31
lines changed

kernel/fork.c

Lines changed: 29 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -207,14 +207,14 @@ struct vm_stack {
207207
struct vm_struct *stack_vm_area;
208208
};
209209

210-
static bool try_release_thread_stack_to_cache(struct vm_struct *vm)
210+
static bool try_release_thread_stack_to_cache(struct vm_struct *vm_area)
211211
{
212212
unsigned int i;
213213

214214
for (i = 0; i < NR_CACHED_STACKS; i++) {
215215
struct vm_struct *tmp = NULL;
216216

217-
if (this_cpu_try_cmpxchg(cached_stacks[i], &tmp, vm))
217+
if (this_cpu_try_cmpxchg(cached_stacks[i], &tmp, vm_area))
218218
return true;
219219
}
220220
return false;
@@ -223,11 +223,12 @@ static bool try_release_thread_stack_to_cache(struct vm_struct *vm)
223223
static void thread_stack_free_rcu(struct rcu_head *rh)
224224
{
225225
struct vm_stack *vm_stack = container_of(rh, struct vm_stack, rcu);
226+
struct vm_struct *vm_area = vm_stack->stack_vm_area;
226227

227228
if (try_release_thread_stack_to_cache(vm_stack->stack_vm_area))
228229
return;
229230

230-
vfree(vm_stack);
231+
vfree(vm_area->addr);
231232
}
232233

233234
static void thread_stack_delayed_free(struct task_struct *tsk)
@@ -240,71 +241,68 @@ static void thread_stack_delayed_free(struct task_struct *tsk)
240241

241242
static int free_vm_stack_cache(unsigned int cpu)
242243
{
243-
struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu);
244+
struct vm_struct **cached_vm_stack_areas = per_cpu_ptr(cached_stacks, cpu);
244245
int i;
245246

246247
for (i = 0; i < NR_CACHED_STACKS; i++) {
247-
struct vm_struct *vm_stack = cached_vm_stacks[i];
248+
struct vm_struct *vm_area = cached_vm_stack_areas[i];
248249

249-
if (!vm_stack)
250+
if (!vm_area)
250251
continue;
251252

252-
vfree(vm_stack->addr);
253-
cached_vm_stacks[i] = NULL;
253+
vfree(vm_area->addr);
254+
cached_vm_stack_areas[i] = NULL;
254255
}
255256

256257
return 0;
257258
}
258259

259-
static int memcg_charge_kernel_stack(struct vm_struct *vm)
260+
static int memcg_charge_kernel_stack(struct vm_struct *vm_area)
260261
{
261262
int i;
262263
int ret;
263264
int nr_charged = 0;
264265

265-
BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
266+
BUG_ON(vm_area->nr_pages != THREAD_SIZE / PAGE_SIZE);
266267

267268
for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
268-
ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL, 0);
269+
ret = memcg_kmem_charge_page(vm_area->pages[i], GFP_KERNEL, 0);
269270
if (ret)
270271
goto err;
271272
nr_charged++;
272273
}
273274
return 0;
274275
err:
275276
for (i = 0; i < nr_charged; i++)
276-
memcg_kmem_uncharge_page(vm->pages[i], 0);
277+
memcg_kmem_uncharge_page(vm_area->pages[i], 0);
277278
return ret;
278279
}
279280

280281
static int alloc_thread_stack_node(struct task_struct *tsk, int node)
281282
{
282-
struct vm_struct *vm;
283+
struct vm_struct *vm_area;
283284
void *stack;
284285
int i;
285286

286287
for (i = 0; i < NR_CACHED_STACKS; i++) {
287-
struct vm_struct *s;
288-
289-
s = this_cpu_xchg(cached_stacks[i], NULL);
290-
291-
if (!s)
288+
vm_area = this_cpu_xchg(cached_stacks[i], NULL);
289+
if (!vm_area)
292290
continue;
293291

294292
/* Reset stack metadata. */
295-
kasan_unpoison_range(s->addr, THREAD_SIZE);
293+
kasan_unpoison_range(vm_area->addr, THREAD_SIZE);
296294

297-
stack = kasan_reset_tag(s->addr);
295+
stack = kasan_reset_tag(vm_area->addr);
298296

299297
/* Clear stale pointers from reused stack. */
300298
memset(stack, 0, THREAD_SIZE);
301299

302-
if (memcg_charge_kernel_stack(s)) {
303-
vfree(s->addr);
300+
if (memcg_charge_kernel_stack(vm_area)) {
301+
vfree(vm_area->addr);
304302
return -ENOMEM;
305303
}
306304

307-
tsk->stack_vm_area = s;
305+
tsk->stack_vm_area = vm_area;
308306
tsk->stack = stack;
309307
return 0;
310308
}
@@ -320,8 +318,8 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
320318
if (!stack)
321319
return -ENOMEM;
322320

323-
vm = find_vm_area(stack);
324-
if (memcg_charge_kernel_stack(vm)) {
321+
vm_area = find_vm_area(stack);
322+
if (memcg_charge_kernel_stack(vm_area)) {
325323
vfree(stack);
326324
return -ENOMEM;
327325
}
@@ -330,7 +328,7 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
330328
* free_thread_stack() can be called in interrupt context,
331329
* so cache the vm_struct.
332330
*/
333-
tsk->stack_vm_area = vm;
331+
tsk->stack_vm_area = vm_area;
334332
stack = kasan_reset_tag(stack);
335333
tsk->stack = stack;
336334
return 0;
@@ -437,11 +435,11 @@ static struct kmem_cache *mm_cachep;
437435
static void account_kernel_stack(struct task_struct *tsk, int account)
438436
{
439437
if (IS_ENABLED(CONFIG_VMAP_STACK)) {
440-
struct vm_struct *vm = task_stack_vm_area(tsk);
438+
struct vm_struct *vm_area = task_stack_vm_area(tsk);
441439
int i;
442440

443441
for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
444-
mod_lruvec_page_state(vm->pages[i], NR_KERNEL_STACK_KB,
442+
mod_lruvec_page_state(vm_area->pages[i], NR_KERNEL_STACK_KB,
445443
account * (PAGE_SIZE / 1024));
446444
} else {
447445
void *stack = task_stack_page(tsk);
@@ -457,12 +455,12 @@ void exit_task_stack_account(struct task_struct *tsk)
457455
account_kernel_stack(tsk, -1);
458456

459457
if (IS_ENABLED(CONFIG_VMAP_STACK)) {
460-
struct vm_struct *vm;
458+
struct vm_struct *vm_area;
461459
int i;
462460

463-
vm = task_stack_vm_area(tsk);
461+
vm_area = task_stack_vm_area(tsk);
464462
for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
465-
memcg_kmem_uncharge_page(vm->pages[i], 0);
463+
memcg_kmem_uncharge_page(vm_area->pages[i], 0);
466464
}
467465
}
468466

0 commit comments

Comments
 (0)