Skip to content

Commit 70045bf

Browse files
committed
Merge tag 'ftrace-v6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull ftrace updates from Steven Rostedt: "Rewrite of function graph tracer to allow multiple users Up until now, the function graph tracer could only have a single user attached to it. If another user tried to attach to the function graph tracer while one was already attached, it would fail. Allowing function graph tracer to have more than one user has been asked for since 2009, but it required a rewrite to the logic to pull it off so it never happened. Until now! There's three systems that trace the return of a function. That is kretprobes, function graph tracer, and BPF. kretprobes and function graph tracing both do it similarly. The difference is that kretprobes uses a shadow stack per callback and function graph tracer creates a shadow stack for all tasks. The function graph tracer method makes it possible to trace the return of all functions. As kretprobes now needs that feature too, allowing it to use function graph tracer was needed. BPF also wants to trace the return of many probes and its method doesn't scale either. Having it use function graph tracer would improve that. By allowing function graph tracer to have multiple users allows both kretprobes and BPF to use function graph tracer in these cases. This will allow kretprobes code to be removed in the future as it's version will no longer be needed. Note, function graph tracer is only limited to 16 simultaneous users, due to shadow stack size and allocated slots" * tag 'ftrace-v6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: (49 commits) fgraph: Use str_plural() in test_graph_storage_single() function_graph: Add READ_ONCE() when accessing fgraph_array[] ftrace: Add missing kerneldoc parameters to unregister_ftrace_direct() function_graph: Everyone uses HAVE_FUNCTION_GRAPH_RET_ADDR_PTR, remove it function_graph: Fix up ftrace_graph_ret_addr() function_graph: Make fgraph_update_pid_func() a stub for !DYNAMIC_FTRACE function_graph: Rename BYTE_NUMBER to CHAR_NUMBER in selftests fgraph: Remove some unused functions ftrace: Hide one more entry in stack trace when ftrace_pid is enabled function_graph: Do not update pid func if CONFIG_DYNAMIC_FTRACE not enabled function_graph: Make fgraph_do_direct static key static ftrace: Fix prototypes for ftrace_startup/shutdown_subops() ftrace: Assign RCU list variable with rcu_assign_ptr() ftrace: Assign ftrace_list_end to ftrace_ops_list type cast to RCU ftrace: Declare function_trace_op in header to quiet sparse warning ftrace: Add comments to ftrace_hash_move() and friends ftrace: Convert "inc" parameter to bool in ftrace_hash_rec_update_modify() ftrace: Add comments to ftrace_hash_rec_disable/enable() ftrace: Remove "filter_hash" parameter from __ftrace_hash_rec_update() ftrace: Rename dup_hash() and comment it ...
2 parents 2fd4130 + b576d37 commit 70045bf

File tree

22 files changed

+2055
-441
lines changed

22 files changed

+2055
-441
lines changed

Documentation/trace/ftrace-design.rst

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -217,18 +217,6 @@ along to ftrace_push_return_trace() instead of a stub value of 0.
217217

218218
Similarly, when you call ftrace_return_to_handler(), pass it the frame pointer.
219219

220-
HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
221-
--------------------------------
222-
223-
An arch may pass in a pointer to the return address on the stack. This
224-
prevents potential stack unwinding issues where the unwinder gets out of
225-
sync with ret_stack and the wrong addresses are reported by
226-
ftrace_graph_ret_addr().
227-
228-
Adding support for it is easy: just define the macro in asm/ftrace.h and
229-
pass the return address pointer as the 'retp' argument to
230-
ftrace_push_return_trace().
231-
232220
HAVE_SYSCALL_TRACEPOINTS
233221
------------------------
234222

arch/arm64/include/asm/ftrace.h

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -12,17 +12,6 @@
1212

1313
#define HAVE_FUNCTION_GRAPH_FP_TEST
1414

15-
/*
16-
* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR means that the architecture can provide a
17-
* "return address pointer" which can be used to uniquely identify a return
18-
* address which has been overwritten.
19-
*
20-
* On arm64 we use the address of the caller's frame record, which remains the
21-
* same for the lifetime of the instrumented function, unlike the return
22-
* address in the LR.
23-
*/
24-
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
25-
2615
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
2716
#define ARCH_SUPPORTS_FTRACE_OPS 1
2817
#else

arch/csky/include/asm/ftrace.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,6 @@
77

88
#define HAVE_FUNCTION_GRAPH_FP_TEST
99

10-
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
11-
1210
#define ARCH_SUPPORTS_FTRACE_OPS 1
1311

1412
#define MCOUNT_ADDR ((unsigned long)_mcount)

arch/loongarch/include/asm/ftrace.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,6 @@ struct dyn_ftrace;
2828
struct dyn_arch_ftrace { };
2929

3030
#define ARCH_SUPPORTS_FTRACE_OPS 1
31-
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
3231

3332
#define ftrace_init_nop ftrace_init_nop
3433
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);

arch/powerpc/include/asm/ftrace.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,6 @@
88
#define MCOUNT_ADDR ((unsigned long)(_mcount))
99
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
1010

11-
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
12-
1311
/* Ignore unused weak functions which will have larger offsets */
1412
#if defined(CONFIG_MPROFILE_KERNEL) || defined(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)
1513
#define FTRACE_MCOUNT_MAX_OFFSET 16

arch/riscv/include/asm/ftrace.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111
#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_FRAME_POINTER)
1212
#define HAVE_FUNCTION_GRAPH_FP_TEST
1313
#endif
14-
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
1514

1615
#define ARCH_SUPPORTS_FTRACE_OPS 1
1716
#ifndef __ASSEMBLY__

arch/s390/include/asm/ftrace.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
#ifndef _ASM_S390_FTRACE_H
33
#define _ASM_S390_FTRACE_H
44

5-
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
65
#define ARCH_SUPPORTS_FTRACE_OPS 1
76
#define MCOUNT_INSN_SIZE 6
87

arch/x86/include/asm/ftrace.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,6 @@
2020
#define ARCH_SUPPORTS_FTRACE_OPS 1
2121
#endif
2222

23-
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
24-
2523
#ifndef __ASSEMBLY__
2624
extern void __fentry__(void);
2725

include/linux/ftrace.h

Lines changed: 33 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -227,6 +227,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
227227
* ftrace_enabled.
228228
* DIRECT - Used by the direct ftrace_ops helper for direct functions
229229
* (internal ftrace only, should not be used by others)
230+
* SUBOP - Is controlled by another op in field managed.
230231
*/
231232
enum {
232233
FTRACE_OPS_FL_ENABLED = BIT(0),
@@ -247,6 +248,7 @@ enum {
247248
FTRACE_OPS_FL_TRACE_ARRAY = BIT(15),
248249
FTRACE_OPS_FL_PERMANENT = BIT(16),
249250
FTRACE_OPS_FL_DIRECT = BIT(17),
251+
FTRACE_OPS_FL_SUBOP = BIT(18),
250252
};
251253

252254
#ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
@@ -334,7 +336,9 @@ struct ftrace_ops {
334336
unsigned long trampoline;
335337
unsigned long trampoline_size;
336338
struct list_head list;
339+
struct list_head subop_list;
337340
ftrace_ops_func_t ops_func;
341+
struct ftrace_ops *managed;
338342
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
339343
unsigned long direct_call;
340344
#endif
@@ -509,6 +513,15 @@ static inline void stack_tracer_disable(void) { }
509513
static inline void stack_tracer_enable(void) { }
510514
#endif
511515

516+
enum {
517+
FTRACE_UPDATE_CALLS = (1 << 0),
518+
FTRACE_DISABLE_CALLS = (1 << 1),
519+
FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
520+
FTRACE_START_FUNC_RET = (1 << 3),
521+
FTRACE_STOP_FUNC_RET = (1 << 4),
522+
FTRACE_MAY_SLEEP = (1 << 5),
523+
};
524+
512525
#ifdef CONFIG_DYNAMIC_FTRACE
513526

514527
void ftrace_arch_code_modify_prepare(void);
@@ -603,15 +616,6 @@ void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
603616
void ftrace_free_filter(struct ftrace_ops *ops);
604617
void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
605618

606-
enum {
607-
FTRACE_UPDATE_CALLS = (1 << 0),
608-
FTRACE_DISABLE_CALLS = (1 << 1),
609-
FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
610-
FTRACE_START_FUNC_RET = (1 << 3),
611-
FTRACE_STOP_FUNC_RET = (1 << 4),
612-
FTRACE_MAY_SLEEP = (1 << 5),
613-
};
614-
615619
/*
616620
* The FTRACE_UPDATE_* enum is used to pass information back
617621
* from the ftrace_update_record() and ftrace_test_record()
@@ -1027,19 +1031,31 @@ struct ftrace_graph_ret {
10271031
unsigned long long rettime;
10281032
} __packed;
10291033

1034+
struct fgraph_ops;
1035+
10301036
/* Type of the callback handlers for tracing function graph*/
1031-
typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
1032-
typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
1037+
typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *,
1038+
struct fgraph_ops *); /* return */
1039+
typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *,
1040+
struct fgraph_ops *); /* entry */
10331041

1034-
extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
1042+
extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, struct fgraph_ops *gops);
1043+
bool ftrace_pids_enabled(struct ftrace_ops *ops);
10351044

10361045
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
10371046

10381047
struct fgraph_ops {
10391048
trace_func_graph_ent_t entryfunc;
10401049
trace_func_graph_ret_t retfunc;
1050+
struct ftrace_ops ops; /* for the hash lists */
1051+
void *private;
1052+
trace_func_graph_ent_t saved_func;
1053+
int idx;
10411054
};
10421055

1056+
void *fgraph_reserve_data(int idx, int size_bytes);
1057+
void *fgraph_retrieve_data(int idx, int *size_bytes);
1058+
10431059
/*
10441060
* Stack of return addresses for functions
10451061
* of a thread.
@@ -1055,9 +1071,7 @@ struct ftrace_ret_stack {
10551071
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
10561072
unsigned long fp;
10571073
#endif
1058-
#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
10591074
unsigned long *retp;
1060-
#endif
10611075
};
10621076

10631077
/*
@@ -1072,10 +1086,11 @@ function_graph_enter(unsigned long ret, unsigned long func,
10721086
unsigned long frame_pointer, unsigned long *retp);
10731087

10741088
struct ftrace_ret_stack *
1075-
ftrace_graph_get_ret_stack(struct task_struct *task, int idx);
1089+
ftrace_graph_get_ret_stack(struct task_struct *task, int skip);
10761090

10771091
unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
10781092
unsigned long ret, unsigned long *retp);
1093+
unsigned long *fgraph_get_task_var(struct fgraph_ops *gops);
10791094

10801095
/*
10811096
* Sometimes we don't want to trace a function with the function
@@ -1114,6 +1129,9 @@ extern void ftrace_graph_init_task(struct task_struct *t);
11141129
extern void ftrace_graph_exit_task(struct task_struct *t);
11151130
extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
11161131

1132+
/* Used by assembly, but to quiet sparse warnings */
1133+
extern struct ftrace_ops *function_trace_op;
1134+
11171135
static inline void pause_graph_tracing(void)
11181136
{
11191137
atomic_inc(&current->tracing_graph_pause);

include/linux/sched.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1413,7 +1413,7 @@ struct task_struct {
14131413
int curr_ret_depth;
14141414

14151415
/* Stack of return addresses for return function tracing: */
1416-
struct ftrace_ret_stack *ret_stack;
1416+
unsigned long *ret_stack;
14171417

14181418
/* Timestamp for last schedule: */
14191419
unsigned long long ftrace_timestamp;

0 commit comments

Comments
 (0)