Skip to content

Commit 90eb270

Browse files
soleenakpm00
authored andcommitted
fork: clean-up naming of vm_stack/vm_struct variables in vmap stacks code
There are two data types: "struct vm_struct" and "struct vm_stack" that have the same local variable names: vm_stack, or vm, or s, which makes the code confusing to read. Change the code so the naming is consistent: struct vm_struct is always called vm_area struct vm_stack is always called vm_stack One change altering vfree(vm_stack) to vfree(vm_area->addr) may look like a semantic change but it is not: vm_area->addr points to the vm_stack. This was done to improve readability. [[email protected]: rebased and added new users of the variable names, address review comments] Link: https://lore.kernel.org/[email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Pasha Tatashin <[email protected]> Signed-off-by: Linus Walleij <[email protected]> Acked-by: Mike Rapoport (Microsoft) <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 85e1f75 commit 90eb270

File tree

1 file changed

+29
-31
lines changed

1 file changed

+29
-31
lines changed

kernel/fork.c

Lines changed: 29 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -198,14 +198,14 @@ struct vm_stack {
198198
struct vm_struct *stack_vm_area;
199199
};
200200

201-
static bool try_release_thread_stack_to_cache(struct vm_struct *vm)
201+
static bool try_release_thread_stack_to_cache(struct vm_struct *vm_area)
202202
{
203203
unsigned int i;
204204

205205
for (i = 0; i < NR_CACHED_STACKS; i++) {
206206
struct vm_struct *tmp = NULL;
207207

208-
if (this_cpu_try_cmpxchg(cached_stacks[i], &tmp, vm))
208+
if (this_cpu_try_cmpxchg(cached_stacks[i], &tmp, vm_area))
209209
return true;
210210
}
211211
return false;
@@ -214,11 +214,12 @@ static bool try_release_thread_stack_to_cache(struct vm_struct *vm)
214214
static void thread_stack_free_rcu(struct rcu_head *rh)
215215
{
216216
struct vm_stack *vm_stack = container_of(rh, struct vm_stack, rcu);
217+
struct vm_struct *vm_area = vm_stack->stack_vm_area;
217218

218219
if (try_release_thread_stack_to_cache(vm_stack->stack_vm_area))
219220
return;
220221

221-
vfree(vm_stack);
222+
vfree(vm_area->addr);
222223
}
223224

224225
static void thread_stack_delayed_free(struct task_struct *tsk)
@@ -231,71 +232,68 @@ static void thread_stack_delayed_free(struct task_struct *tsk)
231232

232233
static int free_vm_stack_cache(unsigned int cpu)
233234
{
234-
struct vm_struct **cached_vm_stacks = per_cpu_ptr(cached_stacks, cpu);
235+
struct vm_struct **cached_vm_stack_areas = per_cpu_ptr(cached_stacks, cpu);
235236
int i;
236237

237238
for (i = 0; i < NR_CACHED_STACKS; i++) {
238-
struct vm_struct *vm_stack = cached_vm_stacks[i];
239+
struct vm_struct *vm_area = cached_vm_stack_areas[i];
239240

240-
if (!vm_stack)
241+
if (!vm_area)
241242
continue;
242243

243-
vfree(vm_stack->addr);
244-
cached_vm_stacks[i] = NULL;
244+
vfree(vm_area->addr);
245+
cached_vm_stack_areas[i] = NULL;
245246
}
246247

247248
return 0;
248249
}
249250

250-
static int memcg_charge_kernel_stack(struct vm_struct *vm)
251+
static int memcg_charge_kernel_stack(struct vm_struct *vm_area)
251252
{
252253
int i;
253254
int ret;
254255
int nr_charged = 0;
255256

256-
BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE);
257+
BUG_ON(vm_area->nr_pages != THREAD_SIZE / PAGE_SIZE);
257258

258259
for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++) {
259-
ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL, 0);
260+
ret = memcg_kmem_charge_page(vm_area->pages[i], GFP_KERNEL, 0);
260261
if (ret)
261262
goto err;
262263
nr_charged++;
263264
}
264265
return 0;
265266
err:
266267
for (i = 0; i < nr_charged; i++)
267-
memcg_kmem_uncharge_page(vm->pages[i], 0);
268+
memcg_kmem_uncharge_page(vm_area->pages[i], 0);
268269
return ret;
269270
}
270271

271272
static int alloc_thread_stack_node(struct task_struct *tsk, int node)
272273
{
273-
struct vm_struct *vm;
274+
struct vm_struct *vm_area;
274275
void *stack;
275276
int i;
276277

277278
for (i = 0; i < NR_CACHED_STACKS; i++) {
278-
struct vm_struct *s;
279-
280-
s = this_cpu_xchg(cached_stacks[i], NULL);
281-
282-
if (!s)
279+
vm_area = this_cpu_xchg(cached_stacks[i], NULL);
280+
if (!vm_area)
283281
continue;
284282

285283
/* Reset stack metadata. */
286-
kasan_unpoison_range(s->addr, THREAD_SIZE);
284+
kasan_unpoison_range(vm_area->addr, THREAD_SIZE);
287285

288-
stack = kasan_reset_tag(s->addr);
286+
stack = kasan_reset_tag(vm_area->addr);
289287

290288
/* Clear stale pointers from reused stack. */
291289
memset(stack, 0, THREAD_SIZE);
292290

293-
if (memcg_charge_kernel_stack(s)) {
294-
vfree(s->addr);
291+
if (memcg_charge_kernel_stack(vm_area)) {
292+
vfree(vm_area->addr);
295293
return -ENOMEM;
296294
}
297295

298-
tsk->stack_vm_area = s;
296+
tsk->stack_vm_area = vm_area;
299297
tsk->stack = stack;
300298
return 0;
301299
}
@@ -311,8 +309,8 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
311309
if (!stack)
312310
return -ENOMEM;
313311

314-
vm = find_vm_area(stack);
315-
if (memcg_charge_kernel_stack(vm)) {
312+
vm_area = find_vm_area(stack);
313+
if (memcg_charge_kernel_stack(vm_area)) {
316314
vfree(stack);
317315
return -ENOMEM;
318316
}
@@ -321,7 +319,7 @@ static int alloc_thread_stack_node(struct task_struct *tsk, int node)
321319
* free_thread_stack() can be called in interrupt context,
322320
* so cache the vm_struct.
323321
*/
324-
tsk->stack_vm_area = vm;
322+
tsk->stack_vm_area = vm_area;
325323
stack = kasan_reset_tag(stack);
326324
tsk->stack = stack;
327325
return 0;
@@ -517,11 +515,11 @@ void vm_area_free(struct vm_area_struct *vma)
517515
static void account_kernel_stack(struct task_struct *tsk, int account)
518516
{
519517
if (IS_ENABLED(CONFIG_VMAP_STACK)) {
520-
struct vm_struct *vm = task_stack_vm_area(tsk);
518+
struct vm_struct *vm_area = task_stack_vm_area(tsk);
521519
int i;
522520

523521
for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
524-
mod_lruvec_page_state(vm->pages[i], NR_KERNEL_STACK_KB,
522+
mod_lruvec_page_state(vm_area->pages[i], NR_KERNEL_STACK_KB,
525523
account * (PAGE_SIZE / 1024));
526524
} else {
527525
void *stack = task_stack_page(tsk);
@@ -537,12 +535,12 @@ void exit_task_stack_account(struct task_struct *tsk)
537535
account_kernel_stack(tsk, -1);
538536

539537
if (IS_ENABLED(CONFIG_VMAP_STACK)) {
540-
struct vm_struct *vm;
538+
struct vm_struct *vm_area;
541539
int i;
542540

543-
vm = task_stack_vm_area(tsk);
541+
vm_area = task_stack_vm_area(tsk);
544542
for (i = 0; i < THREAD_SIZE / PAGE_SIZE; i++)
545-
memcg_kmem_uncharge_page(vm->pages[i], 0);
543+
memcg_kmem_uncharge_page(vm_area->pages[i], 0);
546544
}
547545
}
548546

0 commit comments

Comments
 (0)