|
12 | 12 | #include <linux/slab.h>
|
13 | 13 | #include <linux/mm.h>
|
14 | 14 |
|
| 15 | +/* |
| 16 | + * For requesting a deferred user space stack trace from NMI context |
| 17 | + * the architecture must support a safe cmpxchg in NMI context. |
| 18 | + * For those architectures that do not have that, then it cannot ask |
| 19 | + * for a deferred user space stack trace from an NMI context. If it |
| 20 | + * does, then it will get -EINVAL. |
| 21 | + */ |
| 22 | +#if defined(CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG) |
| 23 | +# define CAN_USE_IN_NMI 1 |
| 24 | +static inline bool try_assign_cnt(struct unwind_task_info *info, u32 cnt) |
| 25 | +{ |
| 26 | + u32 old = 0; |
| 27 | + |
| 28 | + return try_cmpxchg(&info->id.cnt, &old, cnt); |
| 29 | +} |
| 30 | +#else |
| 31 | +# define CAN_USE_IN_NMI 0 |
| 32 | +/* When NMIs are not allowed, this always succeeds */ |
| 33 | +static inline bool try_assign_cnt(struct unwind_task_info *info, u32 cnt) |
| 34 | +{ |
| 35 | + info->id.cnt = cnt; |
| 36 | + return true; |
| 37 | +} |
| 38 | +#endif |
| 39 | + |
15 | 40 | /* Make the cache fit in a 4K page */
|
16 | 41 | #define UNWIND_MAX_ENTRIES \
|
17 | 42 | ((SZ_4K - sizeof(struct unwind_cache)) / sizeof(long))
|
@@ -42,14 +67,13 @@ static DEFINE_PER_CPU(u32, unwind_ctx_ctr);
|
42 | 67 | static u64 get_cookie(struct unwind_task_info *info)
|
43 | 68 | {
|
44 | 69 | u32 cnt = 1;
|
45 |
| - u32 old = 0; |
46 | 70 |
|
47 | 71 | if (info->id.cpu)
|
48 | 72 | return info->id.id;
|
49 | 73 |
|
50 | 74 | /* LSB is always set to ensure 0 is an invalid value */
|
51 | 75 | cnt |= __this_cpu_read(unwind_ctx_ctr) + 2;
|
52 |
| - if (try_cmpxchg(&info->id.cnt, &old, cnt)) { |
| 76 | + if (try_assign_cnt(info, cnt)) { |
53 | 77 | /* Update the per cpu counter */
|
54 | 78 | __this_cpu_write(unwind_ctx_ctr, cnt);
|
55 | 79 | }
|
@@ -167,31 +191,43 @@ static void unwind_deferred_task_work(struct callback_head *head)
|
167 | 191 | int unwind_deferred_request(struct unwind_work *work, u64 *cookie)
|
168 | 192 | {
|
169 | 193 | struct unwind_task_info *info = ¤t->unwind_info;
|
| 194 | + long pending; |
170 | 195 | int ret;
|
171 | 196 |
|
172 | 197 | *cookie = 0;
|
173 | 198 |
|
174 |
| - if (WARN_ON_ONCE(in_nmi())) |
175 |
| - return -EINVAL; |
176 |
| - |
177 | 199 | if ((current->flags & (PF_KTHREAD | PF_EXITING)) ||
|
178 | 200 | !user_mode(task_pt_regs(current)))
|
179 | 201 | return -EINVAL;
|
180 | 202 |
|
| 203 | + /* |
| 204 | + * NMI requires having safe cmpxchg operations. |
| 205 | + * Trigger a warning to make it obvious that an architecture |
| 206 | + * is using this in NMI when it should not be. |
| 207 | + */ |
| 208 | + if (WARN_ON_ONCE(!CAN_USE_IN_NMI && in_nmi())) |
| 209 | + return -EINVAL; |
| 210 | + |
181 | 211 | guard(irqsave)();
|
182 | 212 |
|
183 | 213 | *cookie = get_cookie(info);
|
184 | 214 |
|
185 | 215 | /* callback already pending? */
|
186 |
| - if (info->pending) |
| 216 | + pending = READ_ONCE(info->pending); |
| 217 | + if (pending) |
| 218 | + return 1; |
| 219 | + |
| 220 | + /* Claim the work unless an NMI just now swooped in to do so. */ |
| 221 | + if (!try_cmpxchg(&info->pending, &pending, 1)) |
187 | 222 | return 1;
|
188 | 223 |
|
189 | 224 | /* The work has been claimed, now schedule it. */
|
190 | 225 | ret = task_work_add(current, &info->work, TWA_RESUME);
|
191 |
| - if (WARN_ON_ONCE(ret)) |
| 226 | + if (WARN_ON_ONCE(ret)) { |
| 227 | + WRITE_ONCE(info->pending, 0); |
192 | 228 | return ret;
|
| 229 | + } |
193 | 230 |
|
194 |
| - info->pending = 1; |
195 | 231 | return 0;
|
196 | 232 | }
|
197 | 233 |
|
|
0 commit comments