Commit d63d1b5e authored by Jens Axboe's avatar Jens Axboe

io_uring: add support for fallocate()

This exposes fallocate(2) through io_uring.
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 4d927483
...@@ -319,6 +319,7 @@ struct io_sync { ...@@ -319,6 +319,7 @@ struct io_sync {
loff_t len; loff_t len;
loff_t off; loff_t off;
int flags; int flags;
int mode;
}; };
struct io_cancel { struct io_cancel {
...@@ -2101,6 +2102,54 @@ static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt, ...@@ -2101,6 +2102,54 @@ static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
return 0; return 0;
} }
static void io_fallocate_finish(struct io_wq_work **workptr)
{
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
struct io_kiocb *nxt = NULL;
int ret;
ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
req->sync.len);
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
io_put_req_find_next(req, &nxt);
if (nxt)
io_wq_assign_next(workptr, nxt);
}
static int io_fallocate_prep(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
if (sqe->ioprio || sqe->buf_index || sqe->rw_flags)
return -EINVAL;
req->sync.off = READ_ONCE(sqe->off);
req->sync.len = READ_ONCE(sqe->addr);
req->sync.mode = READ_ONCE(sqe->len);
return 0;
}
static int io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt,
bool force_nonblock)
{
struct io_wq_work *work, *old_work;
/* fallocate always requiring blocking context */
if (force_nonblock) {
io_put_req(req);
req->work.func = io_fallocate_finish;
return -EAGAIN;
}
work = old_work = &req->work;
io_fallocate_finish(&work);
if (work && work != old_work)
*nxt = container_of(work, struct io_kiocb, work);
return 0;
}
static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
struct io_ring_ctx *ctx = req->ctx; struct io_ring_ctx *ctx = req->ctx;
...@@ -3123,6 +3172,9 @@ static int io_req_defer_prep(struct io_kiocb *req, ...@@ -3123,6 +3172,9 @@ static int io_req_defer_prep(struct io_kiocb *req,
case IORING_OP_ACCEPT: case IORING_OP_ACCEPT:
ret = io_accept_prep(req, sqe); ret = io_accept_prep(req, sqe);
break; break;
case IORING_OP_FALLOCATE:
ret = io_fallocate_prep(req, sqe);
break;
default: default:
printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n", printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
req->opcode); req->opcode);
...@@ -3277,6 +3329,14 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, ...@@ -3277,6 +3329,14 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
} }
ret = io_async_cancel(req, nxt); ret = io_async_cancel(req, nxt);
break; break;
case IORING_OP_FALLOCATE:
if (sqe) {
ret = io_fallocate_prep(req, sqe);
if (ret)
break;
}
ret = io_fallocate(req, nxt, force_nonblock);
break;
default: default:
ret = -EINVAL; ret = -EINVAL;
break; break;
......
...@@ -76,6 +76,7 @@ enum { ...@@ -76,6 +76,7 @@ enum {
IORING_OP_ASYNC_CANCEL, IORING_OP_ASYNC_CANCEL,
IORING_OP_LINK_TIMEOUT, IORING_OP_LINK_TIMEOUT,
IORING_OP_CONNECT, IORING_OP_CONNECT,
IORING_OP_FALLOCATE,
/* this goes last, obviously */ /* this goes last, obviously */
IORING_OP_LAST, IORING_OP_LAST,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment