Skip to content

Commit b5ecc19

Browse files
mrutland-armwilldeacon
authored andcommitted
arm64: stacktrace: always inline core stacktrace functions
The arm64 stacktrace code can be used in kprobe context, and so cannot be safely probed. Some (but not all) of the unwind functions are annotated with `NOKPROBE_SYMBOL()` to ensure this, with others markes as `__always_inline`, relying on the top-level unwind function being marked as `noinstr`. This patch has stacktrace.c consistently mark the internal stacktrace functions as `__always_inline`, removing the need for NOKPROBE_SYMBOL() as the top-level unwind function (arch_stack_walk()) is marked as `noinstr`. This is more consistent and is a simpler pattern to follow for future additions to stacktrace.c. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <[email protected]> Reviewed-by: Kalesh Singh <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Madhavan T. Venkataraman <[email protected]> Cc: Mark Brown <[email protected]> Cc: Will Deacon <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Will Deacon <[email protected]>
1 parent ead6122 commit b5ecc19

File tree

1 file changed

+13
-10
lines changed

1 file changed

+13
-10
lines changed

arch/arm64/kernel/stacktrace.c

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,9 @@
2525
*
2626
* The regs must be on a stack currently owned by the calling task.
2727
*/
28-
static __always_inline void unwind_init_from_regs(struct unwind_state *state,
29-
struct pt_regs *regs)
28+
static __always_inline void
29+
unwind_init_from_regs(struct unwind_state *state,
30+
struct pt_regs *regs)
3031
{
3132
unwind_init_common(state, current);
3233

@@ -42,7 +43,8 @@ static __always_inline void unwind_init_from_regs(struct unwind_state *state,
4243
*
4344
* The function which invokes this must be noinline.
4445
*/
45-
static __always_inline void unwind_init_from_caller(struct unwind_state *state)
46+
static __always_inline void
47+
unwind_init_from_caller(struct unwind_state *state)
4648
{
4749
unwind_init_common(state, current);
4850

@@ -60,8 +62,9 @@ static __always_inline void unwind_init_from_caller(struct unwind_state *state)
6062
* duration of the unwind, or the unwind will be bogus. It is never valid to
6163
* call this for the current task.
6264
*/
63-
static __always_inline void unwind_init_from_task(struct unwind_state *state,
64-
struct task_struct *task)
65+
static __always_inline void
66+
unwind_init_from_task(struct unwind_state *state,
67+
struct task_struct *task)
6568
{
6669
unwind_init_common(state, task);
6770

@@ -102,7 +105,8 @@ unwind_recover_return_address(struct unwind_state *state)
102105
* records (e.g. a cycle), determined based on the location and fp value of A
103106
* and the location (but not the fp value) of B.
104107
*/
105-
static int notrace unwind_next(struct unwind_state *state)
108+
static __always_inline int
109+
unwind_next(struct unwind_state *state)
106110
{
107111
struct task_struct *tsk = state->task;
108112
unsigned long fp = state->fp;
@@ -120,10 +124,10 @@ static int notrace unwind_next(struct unwind_state *state)
120124

121125
return unwind_recover_return_address(state);
122126
}
123-
NOKPROBE_SYMBOL(unwind_next);
124127

125-
static void notrace unwind(struct unwind_state *state,
126-
stack_trace_consume_fn consume_entry, void *cookie)
128+
static __always_inline void
129+
unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry,
130+
void *cookie)
127131
{
128132
if (unwind_recover_return_address(state))
129133
return;
@@ -138,7 +142,6 @@ static void notrace unwind(struct unwind_state *state,
138142
break;
139143
}
140144
}
141-
NOKPROBE_SYMBOL(unwind);
142145

143146
/*
144147
* Per-cpu stacks are only accessible when unwinding the current task in a

0 commit comments

Comments
 (0)