Skip to content

Commit 9d6a414

Browse files
committed
Merge tag 'trace-v6.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull tracing fixes from Steven Rostedt: - Fix trace histogram sort function cmp_entries_dup() The sort function cmp_entries_dup() returns either 1 or 0, and not -1 if parameter "a" is less than "b" by memcmp(). - Fix archs that call trace_hardirqs_off() without RCU watching Both x86 and arm64 no longer call any tracepoints with RCU not watching. It was assumed that it was safe to get rid of trace_*_rcuidle() version of the tracepoint calls. This was needed to get rid of the SRCU protection and be able to implement features like faultable traceponits and add rust tracepoints. Unfortunately, there were a few architectures that still relied on that logic. There's only one file that has tracepoints that are called without RCU watching. Add macro logic around the tracepoints for architectures that do not have CONFIG_ARCH_WANTS_NO_INSTR defined will check if the code is in the idle path (the only place RCU isn't watching), and enable RCU around calling the tracepoint, but only do it if the tracepoint is enabled. * tag 'trace-v6.13-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: tracing: Fix archs that still call tracepoints without RCU watching tracing: Fix cmp_entries_dup() to respect sort() comparison rules
2 parents 2a770b4 + dc1b157 commit 9d6a414

File tree

2 files changed

+38
-11
lines changed

2 files changed

+38
-11
lines changed

kernel/trace/trace_preemptirq.c

Lines changed: 37 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -10,11 +10,42 @@
1010
#include <linux/module.h>
1111
#include <linux/ftrace.h>
1212
#include <linux/kprobes.h>
13+
#include <linux/hardirq.h>
1314
#include "trace.h"
1415

1516
#define CREATE_TRACE_POINTS
1617
#include <trace/events/preemptirq.h>
1718

19+
/*
20+
* Use regular trace points on architectures that implement noinstr
21+
* tooling: these calls will only happen with RCU enabled, which can
22+
* use a regular tracepoint.
23+
*
24+
* On older architectures, RCU may not be watching in idle. In that
25+
* case, wake up RCU to watch while calling the tracepoint. These
26+
* aren't NMI-safe - so exclude NMI contexts:
27+
*/
28+
#ifdef CONFIG_ARCH_WANTS_NO_INSTR
29+
#define trace(point, args) trace_##point(args)
30+
#else
31+
#define trace(point, args) \
32+
do { \
33+
if (trace_##point##_enabled()) { \
34+
bool exit_rcu = false; \
35+
if (in_nmi()) \
36+
break; \
37+
if (!IS_ENABLED(CONFIG_TINY_RCU) && \
38+
is_idle_task(current)) { \
39+
ct_irq_enter(); \
40+
exit_rcu = true; \
41+
} \
42+
trace_##point(args); \
43+
if (exit_rcu) \
44+
ct_irq_exit(); \
45+
} \
46+
} while (0)
47+
#endif
48+
1849
#ifdef CONFIG_TRACE_IRQFLAGS
1950
/* Per-cpu variable to prevent redundant calls when IRQs already off */
2051
static DEFINE_PER_CPU(int, tracing_irq_cpu);
@@ -28,7 +59,7 @@ static DEFINE_PER_CPU(int, tracing_irq_cpu);
2859
void trace_hardirqs_on_prepare(void)
2960
{
3061
if (this_cpu_read(tracing_irq_cpu)) {
31-
trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
62+
trace(irq_enable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
3263
tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
3364
this_cpu_write(tracing_irq_cpu, 0);
3465
}
@@ -39,7 +70,7 @@ NOKPROBE_SYMBOL(trace_hardirqs_on_prepare);
3970
void trace_hardirqs_on(void)
4071
{
4172
if (this_cpu_read(tracing_irq_cpu)) {
42-
trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
73+
trace(irq_enable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
4374
tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
4475
this_cpu_write(tracing_irq_cpu, 0);
4576
}
@@ -61,7 +92,7 @@ void trace_hardirqs_off_finish(void)
6192
if (!this_cpu_read(tracing_irq_cpu)) {
6293
this_cpu_write(tracing_irq_cpu, 1);
6394
tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
64-
trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
95+
trace(irq_disable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
6596
}
6697

6798
}
@@ -75,7 +106,7 @@ void trace_hardirqs_off(void)
75106
if (!this_cpu_read(tracing_irq_cpu)) {
76107
this_cpu_write(tracing_irq_cpu, 1);
77108
tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
78-
trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
109+
trace(irq_disable, TP_ARGS(CALLER_ADDR0, CALLER_ADDR1));
79110
}
80111
}
81112
EXPORT_SYMBOL(trace_hardirqs_off);
@@ -86,13 +117,13 @@ NOKPROBE_SYMBOL(trace_hardirqs_off);
86117

87118
void trace_preempt_on(unsigned long a0, unsigned long a1)
88119
{
89-
trace_preempt_enable(a0, a1);
120+
trace(preempt_enable, TP_ARGS(a0, a1));
90121
tracer_preempt_on(a0, a1);
91122
}
92123

93124
void trace_preempt_off(unsigned long a0, unsigned long a1)
94125
{
95-
trace_preempt_disable(a0, a1);
126+
trace(preempt_disable, TP_ARGS(a0, a1));
96127
tracer_preempt_off(a0, a1);
97128
}
98129
#endif

kernel/trace/tracing_map.c

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -845,15 +845,11 @@ int tracing_map_init(struct tracing_map *map)
845845
static int cmp_entries_dup(const void *A, const void *B)
846846
{
847847
const struct tracing_map_sort_entry *a, *b;
848-
int ret = 0;
849848

850849
a = *(const struct tracing_map_sort_entry **)A;
851850
b = *(const struct tracing_map_sort_entry **)B;
852851

853-
if (memcmp(a->key, b->key, a->elt->map->key_size))
854-
ret = 1;
855-
856-
return ret;
852+
return memcmp(a->key, b->key, a->elt->map->key_size);
857853
}
858854

859855
static int cmp_entries_sum(const void *A, const void *B)

0 commit comments

Comments
 (0)