Commit ebdfefc0 authored by Jens Axboe's avatar Jens Axboe

io_uring/sqpoll: fix io-wq affinity when IORING_SETUP_SQPOLL is used

If we setup the ring with SQPOLL, then that polling thread has its
own io-wq setup. This means that if the application uses
IORING_REGISTER_IOWQ_AFF to set the io-wq affinity, we should not be
setting it for the invoking task, but rather the sqpoll task.

Add an sqpoll helper that parks the thread and updates the affinity,
and use that one if we're using SQPOLL.

Fixes: fe76421d ("io_uring: allow user configurable IO thread CPU affinity")
Cc: stable@vger.kernel.org # 5.10+
Link: https://github.com/axboe/liburing/discussions/884Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent d246c759
...@@ -1306,13 +1306,16 @@ static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node) ...@@ -1306,13 +1306,16 @@ static int io_wq_cpu_offline(unsigned int cpu, struct hlist_node *node)
return __io_wq_cpu_online(wq, cpu, false); return __io_wq_cpu_online(wq, cpu, false);
} }
int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask) int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask)
{ {
if (!tctx || !tctx->io_wq)
return -EINVAL;
rcu_read_lock(); rcu_read_lock();
if (mask) if (mask)
cpumask_copy(wq->cpu_mask, mask); cpumask_copy(tctx->io_wq->cpu_mask, mask);
else else
cpumask_copy(wq->cpu_mask, cpu_possible_mask); cpumask_copy(tctx->io_wq->cpu_mask, cpu_possible_mask);
rcu_read_unlock(); rcu_read_unlock();
return 0; return 0;
......
...@@ -50,7 +50,7 @@ void io_wq_put_and_exit(struct io_wq *wq); ...@@ -50,7 +50,7 @@ void io_wq_put_and_exit(struct io_wq *wq);
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work); void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
void io_wq_hash_work(struct io_wq_work *work, void *val); void io_wq_hash_work(struct io_wq_work *work, void *val);
int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask); int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask);
int io_wq_max_workers(struct io_wq *wq, int *new_count); int io_wq_max_workers(struct io_wq *wq, int *new_count);
static inline bool io_wq_is_hashed(struct io_wq_work *work) static inline bool io_wq_is_hashed(struct io_wq_work *work)
......
...@@ -4183,16 +4183,28 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx) ...@@ -4183,16 +4183,28 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
return 0; return 0;
} }
static __cold int __io_register_iowq_aff(struct io_ring_ctx *ctx,
cpumask_var_t new_mask)
{
int ret;
if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
ret = io_wq_cpu_affinity(current->io_uring, new_mask);
} else {
mutex_unlock(&ctx->uring_lock);
ret = io_sqpoll_wq_cpu_affinity(ctx, new_mask);
mutex_lock(&ctx->uring_lock);
}
return ret;
}
static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx, static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
void __user *arg, unsigned len) void __user *arg, unsigned len)
{ {
struct io_uring_task *tctx = current->io_uring;
cpumask_var_t new_mask; cpumask_var_t new_mask;
int ret; int ret;
if (!tctx || !tctx->io_wq)
return -EINVAL;
if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
return -ENOMEM; return -ENOMEM;
...@@ -4213,19 +4225,14 @@ static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx, ...@@ -4213,19 +4225,14 @@ static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
return -EFAULT; return -EFAULT;
} }
ret = io_wq_cpu_affinity(tctx->io_wq, new_mask); ret = __io_register_iowq_aff(ctx, new_mask);
free_cpumask_var(new_mask); free_cpumask_var(new_mask);
return ret; return ret;
} }
static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx) static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
{ {
struct io_uring_task *tctx = current->io_uring; return __io_register_iowq_aff(ctx, NULL);
if (!tctx || !tctx->io_wq)
return -EINVAL;
return io_wq_cpu_affinity(tctx->io_wq, NULL);
} }
static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
......
...@@ -421,3 +421,18 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, ...@@ -421,3 +421,18 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
io_sq_thread_finish(ctx); io_sq_thread_finish(ctx);
return ret; return ret;
} }
__cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx,
cpumask_var_t mask)
{
struct io_sq_data *sqd = ctx->sq_data;
int ret = -EINVAL;
if (sqd) {
io_sq_thread_park(sqd);
ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
io_sq_thread_unpark(sqd);
}
return ret;
}
...@@ -27,3 +27,4 @@ void io_sq_thread_park(struct io_sq_data *sqd); ...@@ -27,3 +27,4 @@ void io_sq_thread_park(struct io_sq_data *sqd);
void io_sq_thread_unpark(struct io_sq_data *sqd); void io_sq_thread_unpark(struct io_sq_data *sqd);
void io_put_sq_data(struct io_sq_data *sqd); void io_put_sq_data(struct io_sq_data *sqd);
void io_sqpoll_wait_sq(struct io_ring_ctx *ctx); void io_sqpoll_wait_sq(struct io_ring_ctx *ctx);
int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment