38
38
* @kr_cur: When KRETPROBES is selected, holds the kretprobe instance
39
39
* associated with the most recently encountered replacement lr
40
40
* value.
41
+ *
42
+ * @task: The task being unwound.
41
43
*/
42
44
struct unwind_state {
43
45
unsigned long fp ;
@@ -48,13 +50,13 @@ struct unwind_state {
48
50
#ifdef CONFIG_KRETPROBES
49
51
struct llist_node * kr_cur ;
50
52
#endif
53
+ struct task_struct * task ;
51
54
};
52
55
53
- static notrace void unwind_init (struct unwind_state * state , unsigned long fp ,
54
- unsigned long pc )
56
+ static void unwind_init_common (struct unwind_state * state ,
57
+ struct task_struct * task )
55
58
{
56
- state -> fp = fp ;
57
- state -> pc = pc ;
59
+ state -> task = task ;
58
60
#ifdef CONFIG_KRETPROBES
59
61
state -> kr_cur = NULL ;
60
62
#endif
@@ -72,7 +74,57 @@ static notrace void unwind_init(struct unwind_state *state, unsigned long fp,
72
74
state -> prev_fp = 0 ;
73
75
state -> prev_type = STACK_TYPE_UNKNOWN ;
74
76
}
75
- NOKPROBE_SYMBOL (unwind_init );
77
+
78
+ /*
79
+ * Start an unwind from a pt_regs.
80
+ *
81
+ * The unwind will begin at the PC within the regs.
82
+ *
83
+ * The regs must be on a stack currently owned by the calling task.
84
+ */
85
+ static inline void unwind_init_from_regs (struct unwind_state * state ,
86
+ struct pt_regs * regs )
87
+ {
88
+ unwind_init_common (state , current );
89
+
90
+ state -> fp = regs -> regs [29 ];
91
+ state -> pc = regs -> pc ;
92
+ }
93
+
94
+ /*
95
+ * Start an unwind from a caller.
96
+ *
97
+ * The unwind will begin at the caller of whichever function this is inlined
98
+ * into.
99
+ *
100
+ * The function which invokes this must be noinline.
101
+ */
102
+ static __always_inline void unwind_init_from_caller (struct unwind_state * state )
103
+ {
104
+ unwind_init_common (state , current );
105
+
106
+ state -> fp = (unsigned long )__builtin_frame_address (1 );
107
+ state -> pc = (unsigned long )__builtin_return_address (0 );
108
+ }
109
+
110
+ /*
111
+ * Start an unwind from a blocked task.
112
+ *
113
+ * The unwind will begin at the blocked tasks saved PC (i.e. the caller of
114
+ * cpu_switch_to()).
115
+ *
116
+ * The caller should ensure the task is blocked in cpu_switch_to() for the
117
+ * duration of the unwind, or the unwind will be bogus. It is never valid to
118
+ * call this for the current task.
119
+ */
120
+ static inline void unwind_init_from_task (struct unwind_state * state ,
121
+ struct task_struct * task )
122
+ {
123
+ unwind_init_common (state , task );
124
+
125
+ state -> fp = thread_saved_fp (task );
126
+ state -> pc = thread_saved_pc (task );
127
+ }
76
128
77
129
/*
78
130
* Unwind from one frame record (A) to the next frame record (B).
@@ -81,9 +133,9 @@ NOKPROBE_SYMBOL(unwind_init);
81
133
* records (e.g. a cycle), determined based on the location and fp value of A
82
134
* and the location (but not the fp value) of B.
83
135
*/
84
- static int notrace unwind_next (struct task_struct * tsk ,
85
- struct unwind_state * state )
136
+ static int notrace unwind_next (struct unwind_state * state )
86
137
{
138
+ struct task_struct * tsk = state -> task ;
87
139
unsigned long fp = state -> fp ;
88
140
struct stack_info info ;
89
141
@@ -117,15 +169,15 @@ static int notrace unwind_next(struct task_struct *tsk,
117
169
if (fp <= state -> prev_fp )
118
170
return - EINVAL ;
119
171
} else {
120
- set_bit (state -> prev_type , state -> stacks_done );
172
+ __set_bit (state -> prev_type , state -> stacks_done );
121
173
}
122
174
123
175
/*
124
176
* Record this frame record's values and location. The prev_fp and
125
177
* prev_type are only meaningful to the next unwind_next() invocation.
126
178
*/
127
- state -> fp = READ_ONCE_NOCHECK (* (unsigned long * )(fp ));
128
- state -> pc = READ_ONCE_NOCHECK (* (unsigned long * )(fp + 8 ));
179
+ state -> fp = READ_ONCE (* (unsigned long * )(fp ));
180
+ state -> pc = READ_ONCE (* (unsigned long * )(fp + 8 ));
129
181
state -> prev_fp = fp ;
130
182
state -> prev_type = info .type ;
131
183
@@ -157,16 +209,15 @@ static int notrace unwind_next(struct task_struct *tsk,
157
209
}
158
210
NOKPROBE_SYMBOL (unwind_next );
159
211
160
- static void notrace unwind (struct task_struct * tsk ,
161
- struct unwind_state * state ,
212
+ static void notrace unwind (struct unwind_state * state ,
162
213
stack_trace_consume_fn consume_entry , void * cookie )
163
214
{
164
215
while (1 ) {
165
216
int ret ;
166
217
167
218
if (!consume_entry (cookie , state -> pc ))
168
219
break ;
169
- ret = unwind_next (tsk , state );
220
+ ret = unwind_next (state );
170
221
if (ret < 0 )
171
222
break ;
172
223
}
@@ -212,15 +263,15 @@ noinline notrace void arch_stack_walk(stack_trace_consume_fn consume_entry,
212
263
{
213
264
struct unwind_state state ;
214
265
215
- if (regs )
216
- unwind_init ( & state , regs -> regs [ 29 ], regs -> pc );
217
- else if ( task == current )
218
- unwind_init (& state ,
219
- ( unsigned long ) __builtin_frame_address ( 1 ),
220
- ( unsigned long ) __builtin_return_address ( 0 ) );
221
- else
222
- unwind_init (& state , thread_saved_fp ( task ),
223
- thread_saved_pc ( task ));
224
-
225
- unwind (task , & state , consume_entry , cookie );
266
+ if (regs ) {
267
+ if ( task != current )
268
+ return ;
269
+ unwind_init_from_regs (& state , regs );
270
+ } else if ( task == current ) {
271
+ unwind_init_from_caller ( & state );
272
+ } else {
273
+ unwind_init_from_task (& state , task );
274
+ }
275
+
276
+ unwind (& state , consume_entry , cookie );
226
277
}
0 commit comments