Commit 75c668cd authored by Pavel Begunkov's avatar Pavel Begunkov Committed by Jens Axboe

io_uring: treat NONBLOCK and RWF_NOWAIT similarly

Make decision making of whether we need to retry read/write similar for
O_NONBLOCK and RWF_NOWAIT. Set REQ_F_NOWAIT when either is specified and
use it for all relevant checks. Also fix resubmitting NOWAIT requests
via io_rw_reissue().
Signed-off-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b23df91b
...@@ -2734,7 +2734,9 @@ static bool io_rw_reissue(struct io_kiocb *req, long res) ...@@ -2734,7 +2734,9 @@ static bool io_rw_reissue(struct io_kiocb *req, long res)
if (res != -EAGAIN && res != -EOPNOTSUPP) if (res != -EAGAIN && res != -EOPNOTSUPP)
return false; return false;
mode = file_inode(req->file)->i_mode; mode = file_inode(req->file)->i_mode;
if ((!S_ISBLK(mode) && !S_ISREG(mode)) || io_wq_current_is_worker()) if (!S_ISBLK(mode) && !S_ISREG(mode))
return false;
if ((req->flags & REQ_F_NOWAIT) || io_wq_current_is_worker())
return false; return false;
lockdep_assert_held(&req->ctx->uring_lock); lockdep_assert_held(&req->ctx->uring_lock);
...@@ -2907,16 +2909,17 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -2907,16 +2909,17 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
struct kiocb *kiocb = &req->rw.kiocb; struct kiocb *kiocb = &req->rw.kiocb;
struct file *file = req->file;
unsigned ioprio; unsigned ioprio;
int ret; int ret;
if (S_ISREG(file_inode(req->file)->i_mode)) if (S_ISREG(file_inode(file)->i_mode))
req->flags |= REQ_F_ISREG; req->flags |= REQ_F_ISREG;
kiocb->ki_pos = READ_ONCE(sqe->off); kiocb->ki_pos = READ_ONCE(sqe->off);
if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) { if (kiocb->ki_pos == -1 && !(file->f_mode & FMODE_STREAM)) {
req->flags |= REQ_F_CUR_POS; req->flags |= REQ_F_CUR_POS;
kiocb->ki_pos = req->file->f_pos; kiocb->ki_pos = file->f_pos;
} }
kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp)); kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
kiocb->ki_flags = iocb_flags(kiocb->ki_filp); kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
...@@ -2924,6 +2927,10 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -2924,6 +2927,10 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
/* don't allow async punt for O_NONBLOCK or RWF_NOWAIT */
if ((kiocb->ki_flags & IOCB_NOWAIT) || (file->f_flags & O_NONBLOCK))
req->flags |= REQ_F_NOWAIT;
ioprio = READ_ONCE(sqe->ioprio); ioprio = READ_ONCE(sqe->ioprio);
if (ioprio) { if (ioprio) {
ret = ioprio_check_cap(ioprio); ret = ioprio_check_cap(ioprio);
...@@ -2934,10 +2941,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) ...@@ -2934,10 +2941,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
} else } else
kiocb->ki_ioprio = get_current_ioprio(); kiocb->ki_ioprio = get_current_ioprio();
/* don't allow async punt if RWF_NOWAIT was requested */
if (kiocb->ki_flags & IOCB_NOWAIT)
req->flags |= REQ_F_NOWAIT;
if (ctx->flags & IORING_SETUP_IOPOLL) { if (ctx->flags & IORING_SETUP_IOPOLL) {
if (!(kiocb->ki_flags & IOCB_DIRECT) || if (!(kiocb->ki_flags & IOCB_DIRECT) ||
!kiocb->ki_filp->f_op->iopoll) !kiocb->ki_filp->f_op->iopoll)
...@@ -3546,15 +3549,14 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, ...@@ -3546,15 +3549,14 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
/* IOPOLL retry should happen for io-wq threads */ /* IOPOLL retry should happen for io-wq threads */
if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
goto done; goto done;
/* no retry on NONBLOCK marked file */ /* no retry on NONBLOCK nor RWF_NOWAIT */
if (req->file->f_flags & O_NONBLOCK) if (req->flags & REQ_F_NOWAIT)
goto done; goto done;
/* some cases will consume bytes even on error returns */ /* some cases will consume bytes even on error returns */
iov_iter_revert(iter, io_size - iov_iter_count(iter)); iov_iter_revert(iter, io_size - iov_iter_count(iter));
ret = 0; ret = 0;
} else if (ret <= 0 || ret == io_size || !force_nonblock || } else if (ret <= 0 || ret == io_size || !force_nonblock ||
(req->file->f_flags & O_NONBLOCK) || (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
!(req->flags & REQ_F_ISREG)) {
/* read all, failed, already did sync or don't want to retry */ /* read all, failed, already did sync or don't want to retry */
goto done; goto done;
} }
...@@ -3675,8 +3677,8 @@ static int io_write(struct io_kiocb *req, bool force_nonblock, ...@@ -3675,8 +3677,8 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
*/ */
if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
ret2 = -EAGAIN; ret2 = -EAGAIN;
/* no retry on NONBLOCK marked file */ /* no retry on NONBLOCK nor RWF_NOWAIT */
if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK)) if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
goto done; goto done;
if (!force_nonblock || ret2 != -EAGAIN) { if (!force_nonblock || ret2 != -EAGAIN) {
/* IOPOLL retry should happen for io-wq threads */ /* IOPOLL retry should happen for io-wq threads */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment