Skip to content

Commit f692a6c

Browse files
committed
Merge tag 'trace-v6.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull tracing fixes from Steven Rostedt: - Fix a regression in the irqsoff and wakeup latency tracing The function graph tracer infrastructure has become generic so that fprobes and BPF can be based on it. As it use to only handle function graph tracing, it would always calculate the time the function entered so that it could then calculate the time it exits and give the length of time the function executed for. But this is not needed for the other users (fprobes and BPF) and reading the clock adds a non-negligible overhead, so the calculation was moved into the function graph tracer logic. But the irqsoff and wakeup latency tracers, when the "display-graph" option was set, would use the function graph tracer to calculate the times of functions during the latency. The movement of the calltime calculation made the value zero for these tracers, and the output no longer showed the length of time of each tracer, but instead the absolute timestamp of when the function returned (rettime - calltime where calltime is now zero). Have the irqsoff and wakeup latency tracers also do the calltime calculation as the function graph tracer does and report the proper length of the function timings. - Update the tracing display to reflect the new preempt lazy model When the system is configured with preempt lazy, the output of the trace data would state "unknown" for the current preemption model. Because the lazy preemption model was just added, make it known to the tracing subsystem too. This is just a one line change. - Document multiple function graph having slightly different timings Now that function graph tracer infrastructure is separate, this also allows the function graph tracer to run in multiple instances (it wasn't able to do so before). If two instances ran the function graph tracer and traced the same functions, the timings for them will be slightly different because each does their own timings and collects the timestamps differently. Document this to not have people be confused by it. * tag 'trace-v6.13-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: ftrace: Document that multiple function_graph tracing may have different times tracing: Print lazy preemption model tracing: Fix irqsoff and wakeup latency tracers when using function graph
2 parents ce69b40 + 94d529a commit f692a6c

File tree

4 files changed

+35
-0
lines changed

4 files changed

+35
-0
lines changed

Documentation/trace/ftrace.rst

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -810,6 +810,12 @@ Here is the list of current tracers that may be configured.
810810
to draw a graph of function calls similar to C code
811811
source.
812812

813+
Note that the function graph calculates the timings of when the
814+
function starts and returns internally and for each instance. If
815+
there are two instances that run function graph tracer and traces
816+
the same functions, the length of the timings may be slightly off as
817+
each read the timestamp separately and not at the same time.
818+
813819
"blk"
814820

815821
The block tracer. The tracer used by the blktrace user

kernel/trace/trace.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4122,6 +4122,7 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
41224122
preempt_model_none() ? "server" :
41234123
preempt_model_voluntary() ? "desktop" :
41244124
preempt_model_full() ? "preempt" :
4125+
preempt_model_lazy() ? "lazy" :
41254126
preempt_model_rt() ? "preempt_rt" :
41264127
"unknown",
41274128
/* These are reserved for later use */

kernel/trace/trace_irqsoff.c

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -182,6 +182,7 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace,
182182
struct trace_array_cpu *data;
183183
unsigned long flags;
184184
unsigned int trace_ctx;
185+
u64 *calltime;
185186
int ret;
186187

187188
if (ftrace_graph_ignore_func(gops, trace))
@@ -199,6 +200,12 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace,
199200
if (!func_prolog_dec(tr, &data, &flags))
200201
return 0;
201202

203+
calltime = fgraph_reserve_data(gops->idx, sizeof(*calltime));
204+
if (!calltime)
205+
return 0;
206+
207+
*calltime = trace_clock_local();
208+
202209
trace_ctx = tracing_gen_ctx_flags(flags);
203210
ret = __trace_graph_entry(tr, trace, trace_ctx);
204211
atomic_dec(&data->disabled);
@@ -213,12 +220,19 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace,
213220
struct trace_array_cpu *data;
214221
unsigned long flags;
215222
unsigned int trace_ctx;
223+
u64 *calltime;
224+
int size;
216225

217226
ftrace_graph_addr_finish(gops, trace);
218227

219228
if (!func_prolog_dec(tr, &data, &flags))
220229
return;
221230

231+
calltime = fgraph_retrieve_data(gops->idx, &size);
232+
if (!calltime)
233+
return;
234+
trace->calltime = *calltime;
235+
222236
trace_ctx = tracing_gen_ctx_flags(flags);
223237
__trace_graph_return(tr, trace, trace_ctx);
224238
atomic_dec(&data->disabled);

kernel/trace/trace_sched_wakeup.c

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,7 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace,
118118
struct trace_array *tr = wakeup_trace;
119119
struct trace_array_cpu *data;
120120
unsigned int trace_ctx;
121+
u64 *calltime;
121122
int ret = 0;
122123

123124
if (ftrace_graph_ignore_func(gops, trace))
@@ -135,6 +136,12 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace,
135136
if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
136137
return 0;
137138

139+
calltime = fgraph_reserve_data(gops->idx, sizeof(*calltime));
140+
if (!calltime)
141+
return 0;
142+
143+
*calltime = trace_clock_local();
144+
138145
ret = __trace_graph_entry(tr, trace, trace_ctx);
139146
atomic_dec(&data->disabled);
140147
preempt_enable_notrace();
@@ -148,12 +155,19 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace,
148155
struct trace_array *tr = wakeup_trace;
149156
struct trace_array_cpu *data;
150157
unsigned int trace_ctx;
158+
u64 *calltime;
159+
int size;
151160

152161
ftrace_graph_addr_finish(gops, trace);
153162

154163
if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
155164
return;
156165

166+
calltime = fgraph_retrieve_data(gops->idx, &size);
167+
if (!calltime)
168+
return;
169+
trace->calltime = *calltime;
170+
157171
__trace_graph_return(tr, trace, trace_ctx);
158172
atomic_dec(&data->disabled);
159173

0 commit comments

Comments
 (0)