Skip to content

Commit 90633c3

Browse files
committed
tracing: Convert the per CPU "disabled" counter to local from atomic
The per CPU "disabled" counter is used for the latency tracers and stack tracers to make sure that their accounting isn't messed up by an NMI or interrupt coming in and affecting the same CPU data. But the counter is an atomic_t type. As it only needs to synchronize against the current CPU, switch it over to local_t type. Cc: Masami Hiramatsu <[email protected]> Cc: Mark Rutland <[email protected]> Cc: Mathieu Desnoyers <[email protected]> Cc: Andrew Morton <[email protected]> Link: https://lore.kernel.org/[email protected] Signed-off-by: Steven Rostedt (Google) <[email protected]>
1 parent cf64792 commit 90633c3

File tree

4 files changed

+25
-25
lines changed

4 files changed

+25
-25
lines changed

kernel/trace/trace.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -183,7 +183,7 @@ struct trace_array;
183183
* the trace, etc.)
184184
*/
185185
struct trace_array_cpu {
186-
atomic_t disabled;
186+
local_t disabled;
187187
void *buffer_page; /* ring buffer spare */
188188

189189
unsigned long entries;

kernel/trace/trace_functions.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -291,7 +291,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
291291
parent_ip = function_get_true_parent_ip(parent_ip, fregs);
292292
cpu = raw_smp_processor_id();
293293
data = per_cpu_ptr(tr->array_buffer.data, cpu);
294-
disabled = atomic_inc_return(&data->disabled);
294+
disabled = local_inc_return(&data->disabled);
295295

296296
if (likely(disabled == 1)) {
297297
trace_ctx = tracing_gen_ctx_flags(flags);
@@ -303,7 +303,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
303303
__trace_stack(tr, trace_ctx, skip);
304304
}
305305

306-
atomic_dec(&data->disabled);
306+
local_dec(&data->disabled);
307307
local_irq_restore(flags);
308308
}
309309

@@ -402,7 +402,7 @@ function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
402402
parent_ip = function_get_true_parent_ip(parent_ip, fregs);
403403
cpu = raw_smp_processor_id();
404404
data = per_cpu_ptr(tr->array_buffer.data, cpu);
405-
disabled = atomic_inc_return(&data->disabled);
405+
disabled = local_inc_return(&data->disabled);
406406

407407
if (likely(disabled == 1)) {
408408
last_info = per_cpu_ptr(tr->last_func_repeats, cpu);
@@ -417,7 +417,7 @@ function_stack_no_repeats_trace_call(unsigned long ip, unsigned long parent_ip,
417417
}
418418

419419
out:
420-
atomic_dec(&data->disabled);
420+
local_dec(&data->disabled);
421421
local_irq_restore(flags);
422422
}
423423

kernel/trace/trace_irqsoff.c

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -123,12 +123,12 @@ static int func_prolog_dec(struct trace_array *tr,
123123
return 0;
124124

125125
*data = per_cpu_ptr(tr->array_buffer.data, cpu);
126-
disabled = atomic_inc_return(&(*data)->disabled);
126+
disabled = local_inc_return(&(*data)->disabled);
127127

128128
if (likely(disabled == 1))
129129
return 1;
130130

131-
atomic_dec(&(*data)->disabled);
131+
local_dec(&(*data)->disabled);
132132

133133
return 0;
134134
}
@@ -152,7 +152,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
152152

153153
trace_function(tr, ip, parent_ip, trace_ctx, fregs);
154154

155-
atomic_dec(&data->disabled);
155+
local_dec(&data->disabled);
156156
}
157157
#endif /* CONFIG_FUNCTION_TRACER */
158158

@@ -209,7 +209,7 @@ static int irqsoff_graph_entry(struct ftrace_graph_ent *trace,
209209

210210
trace_ctx = tracing_gen_ctx_flags(flags);
211211
ret = __trace_graph_entry(tr, trace, trace_ctx);
212-
atomic_dec(&data->disabled);
212+
local_dec(&data->disabled);
213213

214214
return ret;
215215
}
@@ -238,7 +238,7 @@ static void irqsoff_graph_return(struct ftrace_graph_ret *trace,
238238

239239
trace_ctx = tracing_gen_ctx_flags(flags);
240240
__trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
241-
atomic_dec(&data->disabled);
241+
local_dec(&data->disabled);
242242
}
243243

244244
static struct fgraph_ops fgraph_ops = {
@@ -408,10 +408,10 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
408408

409409
data = per_cpu_ptr(tr->array_buffer.data, cpu);
410410

411-
if (unlikely(!data) || atomic_read(&data->disabled))
411+
if (unlikely(!data) || local_read(&data->disabled))
412412
return;
413413

414-
atomic_inc(&data->disabled);
414+
local_inc(&data->disabled);
415415

416416
data->critical_sequence = max_sequence;
417417
data->preempt_timestamp = ftrace_now(cpu);
@@ -421,7 +421,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
421421

422422
per_cpu(tracing_cpu, cpu) = 1;
423423

424-
atomic_dec(&data->disabled);
424+
local_dec(&data->disabled);
425425
}
426426

427427
static nokprobe_inline void
@@ -445,16 +445,16 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
445445
data = per_cpu_ptr(tr->array_buffer.data, cpu);
446446

447447
if (unlikely(!data) ||
448-
!data->critical_start || atomic_read(&data->disabled))
448+
!data->critical_start || local_read(&data->disabled))
449449
return;
450450

451-
atomic_inc(&data->disabled);
451+
local_inc(&data->disabled);
452452

453453
trace_ctx = tracing_gen_ctx();
454454
__trace_function(tr, ip, parent_ip, trace_ctx);
455455
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
456456
data->critical_start = 0;
457-
atomic_dec(&data->disabled);
457+
local_dec(&data->disabled);
458458
}
459459

460460
/* start and stop critical timings used to for stoppage (in idle) */

kernel/trace/trace_sched_wakeup.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -83,14 +83,14 @@ func_prolog_preempt_disable(struct trace_array *tr,
8383
goto out_enable;
8484

8585
*data = per_cpu_ptr(tr->array_buffer.data, cpu);
86-
disabled = atomic_inc_return(&(*data)->disabled);
86+
disabled = local_inc_return(&(*data)->disabled);
8787
if (unlikely(disabled != 1))
8888
goto out;
8989

9090
return 1;
9191

9292
out:
93-
atomic_dec(&(*data)->disabled);
93+
local_dec(&(*data)->disabled);
9494

9595
out_enable:
9696
preempt_enable_notrace();
@@ -144,7 +144,7 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace,
144144
*calltime = trace_clock_local();
145145

146146
ret = __trace_graph_entry(tr, trace, trace_ctx);
147-
atomic_dec(&data->disabled);
147+
local_dec(&data->disabled);
148148
preempt_enable_notrace();
149149

150150
return ret;
@@ -173,7 +173,7 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace,
173173
return;
174174

175175
__trace_graph_return(tr, trace, trace_ctx, *calltime, rettime);
176-
atomic_dec(&data->disabled);
176+
local_dec(&data->disabled);
177177

178178
preempt_enable_notrace();
179179
return;
@@ -243,7 +243,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
243243
trace_function(tr, ip, parent_ip, trace_ctx, fregs);
244244
local_irq_restore(flags);
245245

246-
atomic_dec(&data->disabled);
246+
local_dec(&data->disabled);
247247
preempt_enable_notrace();
248248
}
249249

@@ -471,7 +471,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
471471

472472
/* disable local data, not wakeup_cpu data */
473473
cpu = raw_smp_processor_id();
474-
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
474+
disabled = local_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
475475
if (likely(disabled != 1))
476476
goto out;
477477

@@ -508,7 +508,7 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
508508
arch_spin_unlock(&wakeup_lock);
509509
local_irq_restore(flags);
510510
out:
511-
atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
511+
local_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
512512
}
513513

514514
static void __wakeup_reset(struct trace_array *tr)
@@ -563,7 +563,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
563563
(!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
564564
return;
565565

566-
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
566+
disabled = local_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
567567
if (unlikely(disabled != 1))
568568
goto out;
569569

@@ -610,7 +610,7 @@ probe_wakeup(void *ignore, struct task_struct *p)
610610
out_locked:
611611
arch_spin_unlock(&wakeup_lock);
612612
out:
613-
atomic_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
613+
local_dec(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
614614
}
615615

616616
static void start_wakeup_tracer(struct trace_array *tr)

0 commit comments

Comments
 (0)