@@ -181,10 +181,9 @@ int trace_graph_entry(struct ftrace_graph_ent *trace,
181
181
struct trace_array * tr = gops -> private ;
182
182
struct trace_array_cpu * data ;
183
183
struct fgraph_times * ftimes ;
184
- unsigned long flags ;
185
184
unsigned int trace_ctx ;
186
185
long disabled ;
187
- int ret ;
186
+ int ret = 0 ;
188
187
int cpu ;
189
188
190
189
if (* task_var & TRACE_GRAPH_NOTRACE )
@@ -235,25 +234,21 @@ int trace_graph_entry(struct ftrace_graph_ent *trace,
235
234
if (tracing_thresh )
236
235
return 1 ;
237
236
238
- local_irq_save ( flags );
237
+ preempt_disable_notrace ( );
239
238
cpu = raw_smp_processor_id ();
240
239
data = per_cpu_ptr (tr -> array_buffer .data , cpu );
241
- disabled = atomic_inc_return (& data -> disabled );
242
- if (likely (disabled == 1 )) {
243
- trace_ctx = tracing_gen_ctx_flags ( flags );
244
- if (unlikely ( IS_ENABLED (CONFIG_FUNCTION_GRAPH_RETADDR ) &&
245
- tracer_flags_is_set (TRACE_GRAPH_PRINT_RETADDR ) )) {
240
+ disabled = atomic_read (& data -> disabled );
241
+ if (likely (! disabled )) {
242
+ trace_ctx = tracing_gen_ctx ( );
243
+ if (IS_ENABLED (CONFIG_FUNCTION_GRAPH_RETADDR ) &&
244
+ tracer_flags_is_set (TRACE_GRAPH_PRINT_RETADDR )) {
246
245
unsigned long retaddr = ftrace_graph_top_ret_addr (current );
247
-
248
246
ret = __trace_graph_retaddr_entry (tr , trace , trace_ctx , retaddr );
249
- } else
247
+ } else {
250
248
ret = __trace_graph_entry (tr , trace , trace_ctx );
251
- } else {
252
- ret = 0 ;
249
+ }
253
250
}
254
-
255
- atomic_dec (& data -> disabled );
256
- local_irq_restore (flags );
251
+ preempt_enable_notrace ();
257
252
258
253
return ret ;
259
254
}
@@ -320,7 +315,6 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
320
315
struct trace_array * tr = gops -> private ;
321
316
struct trace_array_cpu * data ;
322
317
struct fgraph_times * ftimes ;
323
- unsigned long flags ;
324
318
unsigned int trace_ctx ;
325
319
long disabled ;
326
320
int size ;
@@ -341,16 +335,15 @@ void trace_graph_return(struct ftrace_graph_ret *trace,
341
335
342
336
trace -> calltime = ftimes -> calltime ;
343
337
344
- local_irq_save ( flags );
338
+ preempt_disable_notrace ( );
345
339
cpu = raw_smp_processor_id ();
346
340
data = per_cpu_ptr (tr -> array_buffer .data , cpu );
347
- disabled = atomic_inc_return (& data -> disabled );
348
- if (likely (disabled == 1 )) {
349
- trace_ctx = tracing_gen_ctx_flags ( flags );
341
+ disabled = atomic_read (& data -> disabled );
342
+ if (likely (! disabled )) {
343
+ trace_ctx = tracing_gen_ctx ( );
350
344
__trace_graph_return (tr , trace , trace_ctx );
351
345
}
352
- atomic_dec (& data -> disabled );
353
- local_irq_restore (flags );
346
+ preempt_enable_notrace ();
354
347
}
355
348
356
349
static void trace_graph_thresh_return (struct ftrace_graph_ret * trace ,
0 commit comments