Skip to content

Commit f62e3de

Browse files
committed
ftrace: Do not disabled function graph based on "disabled" field
The per CPU "disabled" value was the original way to disable tracing when the tracing subsystem was first created. Today, the ring buffer infrastructure has its own way to disable tracing. In fact, things have changed so much since 2008 that many things ignore the disable flag. Do not bother disabling the function graph tracer if the per CPU disabled field is set. Just record as normal. If tracing is disabled in the ring buffer it will not be recorded. Also, when tracing is enabled again, it will not drop the return call of the function. Cc: Masami Hiramatsu <[email protected]> Cc: Mark Rutland <[email protected]> Cc: Mathieu Desnoyers <[email protected]> Cc: Andrew Morton <[email protected]> Link: https://lore.kernel.org/[email protected] Signed-off-by: Steven Rostedt (Google) <[email protected]>
1 parent a9839d2 commit f62e3de

File tree

1 file changed

+9
-29
lines changed

1 file changed

+9
-29
lines changed

kernel/trace/trace_functions_graph.c

Lines changed: 9 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -202,12 +202,9 @@ static int graph_entry(struct ftrace_graph_ent *trace,
202202
{
203203
unsigned long *task_var = fgraph_get_task_var(gops);
204204
struct trace_array *tr = gops->private;
205-
struct trace_array_cpu *data;
206205
struct fgraph_times *ftimes;
207206
unsigned int trace_ctx;
208-
long disabled;
209207
int ret = 0;
210-
int cpu;
211208

212209
if (*task_var & TRACE_GRAPH_NOTRACE)
213210
return 0;
@@ -257,21 +254,14 @@ static int graph_entry(struct ftrace_graph_ent *trace,
257254
if (tracing_thresh)
258255
return 1;
259256

260-
preempt_disable_notrace();
261-
cpu = raw_smp_processor_id();
262-
data = per_cpu_ptr(tr->array_buffer.data, cpu);
263-
disabled = atomic_read(&data->disabled);
264-
if (likely(!disabled)) {
265-
trace_ctx = tracing_gen_ctx();
266-
if (IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) &&
267-
tracer_flags_is_set(TRACE_GRAPH_PRINT_RETADDR)) {
268-
unsigned long retaddr = ftrace_graph_top_ret_addr(current);
269-
ret = __trace_graph_retaddr_entry(tr, trace, trace_ctx, retaddr);
270-
} else {
271-
ret = __graph_entry(tr, trace, trace_ctx, fregs);
272-
}
257+
trace_ctx = tracing_gen_ctx();
258+
if (IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) &&
259+
tracer_flags_is_set(TRACE_GRAPH_PRINT_RETADDR)) {
260+
unsigned long retaddr = ftrace_graph_top_ret_addr(current);
261+
ret = __trace_graph_retaddr_entry(tr, trace, trace_ctx, retaddr);
262+
} else {
263+
ret = __graph_entry(tr, trace, trace_ctx, fregs);
273264
}
274-
preempt_enable_notrace();
275265

276266
return ret;
277267
}
@@ -351,13 +341,10 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
351341
{
352342
unsigned long *task_var = fgraph_get_task_var(gops);
353343
struct trace_array *tr = gops->private;
354-
struct trace_array_cpu *data;
355344
struct fgraph_times *ftimes;
356345
unsigned int trace_ctx;
357346
u64 calltime, rettime;
358-
long disabled;
359347
int size;
360-
int cpu;
361348

362349
rettime = trace_clock_local();
363350

@@ -376,15 +363,8 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
376363

377364
calltime = ftimes->calltime;
378365

379-
preempt_disable_notrace();
380-
cpu = raw_smp_processor_id();
381-
data = per_cpu_ptr(tr->array_buffer.data, cpu);
382-
disabled = atomic_read(&data->disabled);
383-
if (likely(!disabled)) {
384-
trace_ctx = tracing_gen_ctx();
385-
__trace_graph_return(tr, trace, trace_ctx, calltime, rettime);
386-
}
387-
preempt_enable_notrace();
366+
trace_ctx = tracing_gen_ctx();
367+
__trace_graph_return(tr, trace, trace_ctx, calltime, rettime);
388368
}
389369

390370
static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,

0 commit comments

Comments
 (0)