Skip to content

Commit 4340984

Browse files
committed
fgraph: Give ret_stack its own kmem cache
The ret_stack (shadow stack used by function graph infrastructure) is created for every task on the system when function graph is enabled. Give it its own kmem_cache. This will make it easier to see how much memory is being used specifically for function graph shadow stacks. In the future, this size may change and may not be a power of two. Having its own cache can also keep it from fragmenting memory. Cc: Mark Rutland <[email protected]> Cc: Mathieu Desnoyers <[email protected]> Cc: Ryan Roberts <[email protected]> Link: https://lore.kernel.org/[email protected] Acked-by: Masami Hiramatsu (Google) <[email protected]> Signed-off-by: Steven Rostedt (Google) <[email protected]>
1 parent 6ea8b69 commit 4340984

File tree

1 file changed

+28
-5
lines changed

1 file changed

+28
-5
lines changed

kernel/trace/fgraph.c

Lines changed: 28 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -172,6 +172,8 @@ enum {
172172
DEFINE_STATIC_KEY_FALSE(kill_ftrace_graph);
173173
int ftrace_graph_active;
174174

175+
static struct kmem_cache *fgraph_stack_cachep;
176+
175177
static struct fgraph_ops *fgraph_array[FGRAPH_ARRAY_SIZE];
176178
static unsigned long fgraph_array_bitmask;
177179

@@ -1022,8 +1024,11 @@ static int alloc_retstack_tasklist(unsigned long **ret_stack_list)
10221024
int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
10231025
struct task_struct *g, *t;
10241026

1027+
if (WARN_ON_ONCE(!fgraph_stack_cachep))
1028+
return -ENOMEM;
1029+
10251030
for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
1026-
ret_stack_list[i] = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL);
1031+
ret_stack_list[i] = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL);
10271032
if (!ret_stack_list[i]) {
10281033
start = 0;
10291034
end = i;
@@ -1054,7 +1059,7 @@ static int alloc_retstack_tasklist(unsigned long **ret_stack_list)
10541059
rcu_read_unlock();
10551060
free:
10561061
for (i = start; i < end; i++)
1057-
kfree(ret_stack_list[i]);
1062+
kmem_cache_free(fgraph_stack_cachep, ret_stack_list[i]);
10581063
return ret;
10591064
}
10601065

@@ -1117,9 +1122,12 @@ void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
11171122
if (ftrace_graph_active) {
11181123
unsigned long *ret_stack;
11191124

1125+
if (WARN_ON_ONCE(!fgraph_stack_cachep))
1126+
return;
1127+
11201128
ret_stack = per_cpu(idle_ret_stack, cpu);
11211129
if (!ret_stack) {
1122-
ret_stack = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL);
1130+
ret_stack = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL);
11231131
if (!ret_stack)
11241132
return;
11251133
per_cpu(idle_ret_stack, cpu) = ret_stack;
@@ -1139,7 +1147,10 @@ void ftrace_graph_init_task(struct task_struct *t)
11391147
if (ftrace_graph_active) {
11401148
unsigned long *ret_stack;
11411149

1142-
ret_stack = kmalloc(SHADOW_STACK_SIZE, GFP_KERNEL);
1150+
if (WARN_ON_ONCE(!fgraph_stack_cachep))
1151+
return;
1152+
1153+
ret_stack = kmem_cache_alloc(fgraph_stack_cachep, GFP_KERNEL);
11431154
if (!ret_stack)
11441155
return;
11451156
graph_init_task(t, ret_stack);
@@ -1154,7 +1165,11 @@ void ftrace_graph_exit_task(struct task_struct *t)
11541165
/* NULL must become visible to IRQs before we free it: */
11551166
barrier();
11561167

1157-
kfree(ret_stack);
1168+
if (ret_stack) {
1169+
if (WARN_ON_ONCE(!fgraph_stack_cachep))
1170+
return;
1171+
kmem_cache_free(fgraph_stack_cachep, ret_stack);
1172+
}
11581173
}
11591174

11601175
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -1294,6 +1309,14 @@ int register_ftrace_graph(struct fgraph_ops *gops)
12941309

12951310
guard(mutex)(&ftrace_lock);
12961311

1312+
if (!fgraph_stack_cachep) {
1313+
fgraph_stack_cachep = kmem_cache_create("fgraph_stack",
1314+
SHADOW_STACK_SIZE,
1315+
SHADOW_STACK_SIZE, 0, NULL);
1316+
if (!fgraph_stack_cachep)
1317+
return -ENOMEM;
1318+
}
1319+
12971320
if (!fgraph_initialized) {
12981321
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "fgraph:online",
12991322
fgraph_cpu_init, NULL);

0 commit comments

Comments
 (0)