Commit b5e683d5 authored by Jens Axboe's avatar Jens Axboe

eventfd: track eventfd_signal() recursion depth

eventfd use cases from aio and io_uring can deadlock due to circular
or resursive calling, when eventfd_signal() tries to grab the waitqueue
lock. On top of that, it's also possible to construct notification
chains that are deep enough that we could blow the stack.

Add a percpu counter that tracks the percpu recursion depth, warn if we
exceed it. The counter is also exposed so that users of eventfd_signal()
can do the right thing if it's non-zero in the context where it is
called.

Cc: stable@vger.kernel.org # 4.19+
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d7f62e82
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/idr.h> #include <linux/idr.h>
DEFINE_PER_CPU(int, eventfd_wake_count);
static DEFINE_IDA(eventfd_ida); static DEFINE_IDA(eventfd_ida);
struct eventfd_ctx { struct eventfd_ctx {
...@@ -60,12 +62,25 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) ...@@ -60,12 +62,25 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
{ {
unsigned long flags; unsigned long flags;
/*
* Deadlock or stack overflow issues can happen if we recurse here
* through waitqueue wakeup handlers. If the caller users potentially
* nested waitqueues with custom wakeup handlers, then it should
* check eventfd_signal_count() before calling this function. If
* it returns true, the eventfd_signal() call should be deferred to a
* safe context.
*/
if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
return 0;
spin_lock_irqsave(&ctx->wqh.lock, flags); spin_lock_irqsave(&ctx->wqh.lock, flags);
this_cpu_inc(eventfd_wake_count);
if (ULLONG_MAX - ctx->count < n) if (ULLONG_MAX - ctx->count < n)
n = ULLONG_MAX - ctx->count; n = ULLONG_MAX - ctx->count;
ctx->count += n; ctx->count += n;
if (waitqueue_active(&ctx->wqh)) if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, EPOLLIN); wake_up_locked_poll(&ctx->wqh, EPOLLIN);
this_cpu_dec(eventfd_wake_count);
spin_unlock_irqrestore(&ctx->wqh.lock, flags); spin_unlock_irqrestore(&ctx->wqh.lock, flags);
return n; return n;
......
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
#include <linux/fcntl.h> #include <linux/fcntl.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/percpu-defs.h>
#include <linux/percpu.h>
/* /*
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
...@@ -40,6 +42,13 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); ...@@ -40,6 +42,13 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt); __u64 *cnt);
DECLARE_PER_CPU(int, eventfd_wake_count);
static inline bool eventfd_signal_count(void)
{
return this_cpu_read(eventfd_wake_count);
}
#else /* CONFIG_EVENTFD */ #else /* CONFIG_EVENTFD */
/* /*
...@@ -68,6 +77,11 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, ...@@ -68,6 +77,11 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
return -ENOSYS; return -ENOSYS;
} }
static inline bool eventfd_signal_count(void)
{
return false;
}
#endif #endif
#endif /* _LINUX_EVENTFD_H */ #endif /* _LINUX_EVENTFD_H */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment