Skip to content

Commit b5e683d

Browse files
committed
eventfd: track eventfd_signal() recursion depth
eventfd use cases from aio and io_uring can deadlock due to circular or resursive calling, when eventfd_signal() tries to grab the waitqueue lock. On top of that, it's also possible to construct notification chains that are deep enough that we could blow the stack. Add a percpu counter that tracks the percpu recursion depth, warn if we exceed it. The counter is also exposed so that users of eventfd_signal() can do the right thing if it's non-zero in the context where it is called. Cc: [email protected] # 4.19+ Signed-off-by: Jens Axboe <[email protected]>
1 parent d7f62e8 commit b5e683d

File tree

2 files changed

+29
-0
lines changed

2 files changed

+29
-0
lines changed

fs/eventfd.c

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,8 @@
2424
#include <linux/seq_file.h>
2525
#include <linux/idr.h>
2626

27+
DEFINE_PER_CPU(int, eventfd_wake_count);
28+
2729
static DEFINE_IDA(eventfd_ida);
2830

2931
struct eventfd_ctx {
@@ -60,12 +62,25 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
6062
{
6163
unsigned long flags;
6264

65+
/*
66+
* Deadlock or stack overflow issues can happen if we recurse here
67+
* through waitqueue wakeup handlers. If the caller users potentially
68+
* nested waitqueues with custom wakeup handlers, then it should
69+
* check eventfd_signal_count() before calling this function. If
70+
* it returns true, the eventfd_signal() call should be deferred to a
71+
* safe context.
72+
*/
73+
if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
74+
return 0;
75+
6376
spin_lock_irqsave(&ctx->wqh.lock, flags);
77+
this_cpu_inc(eventfd_wake_count);
6478
if (ULLONG_MAX - ctx->count < n)
6579
n = ULLONG_MAX - ctx->count;
6680
ctx->count += n;
6781
if (waitqueue_active(&ctx->wqh))
6882
wake_up_locked_poll(&ctx->wqh, EPOLLIN);
83+
this_cpu_dec(eventfd_wake_count);
6984
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
7085

7186
return n;

include/linux/eventfd.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@
1212
#include <linux/fcntl.h>
1313
#include <linux/wait.h>
1414
#include <linux/err.h>
15+
#include <linux/percpu-defs.h>
16+
#include <linux/percpu.h>
1517

1618
/*
1719
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
@@ -40,6 +42,13 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
4042
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
4143
__u64 *cnt);
4244

45+
DECLARE_PER_CPU(int, eventfd_wake_count);
46+
47+
static inline bool eventfd_signal_count(void)
48+
{
49+
return this_cpu_read(eventfd_wake_count);
50+
}
51+
4352
#else /* CONFIG_EVENTFD */
4453

4554
/*
@@ -68,6 +77,11 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
6877
return -ENOSYS;
6978
}
7079

80+
static inline bool eventfd_signal_count(void)
81+
{
82+
return false;
83+
}
84+
7185
#endif
7286

7387
#endif /* _LINUX_EVENTFD_H */

0 commit comments

Comments
 (0)