Commit 552ef802 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Theodore Ts'o

direct-io: move aio_complete into ->end_io

Filesystems with unwritten extent support must not complete an AIO request
until the transaction to convert the extent has been commited.  That means
the aio_complete calls needs to be moved into the ->end_io callback so
that the filesystem can control when to call it exactly.

This makes a bit of a mess out of dio_complete and the ->end_io callback
prototype even more complicated. 
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: Jan Kara <jack@suse.cz> 
Signed-off-by: default avatar"Theodore Ts'o" <tytso@mit.edu>
parent 5c521830
......@@ -218,7 +218,7 @@ static struct page *dio_get_page(struct dio *dio)
* filesystems can use it to hold additional state between get_block calls and
* dio_complete.
*/
static int dio_complete(struct dio *dio, loff_t offset, int ret)
static int dio_complete(struct dio *dio, loff_t offset, int ret, bool is_async)
{
ssize_t transferred = 0;
......@@ -239,14 +239,6 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret)
transferred = dio->i_size - offset;
}
if (dio->end_io && dio->result)
dio->end_io(dio->iocb, offset, transferred,
dio->map_bh.b_private);
if (dio->flags & DIO_LOCKING)
/* lockdep: non-owner release */
up_read_non_owner(&dio->inode->i_alloc_sem);
if (ret == 0)
ret = dio->page_errors;
if (ret == 0)
......@@ -254,6 +246,17 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret)
if (ret == 0)
ret = transferred;
if (dio->end_io && dio->result) {
dio->end_io(dio->iocb, offset, transferred,
dio->map_bh.b_private, ret, is_async);
} else if (is_async) {
aio_complete(dio->iocb, ret, 0);
}
if (dio->flags & DIO_LOCKING)
/* lockdep: non-owner release */
up_read_non_owner(&dio->inode->i_alloc_sem);
return ret;
}
......@@ -277,8 +280,7 @@ static void dio_bio_end_aio(struct bio *bio, int error)
spin_unlock_irqrestore(&dio->bio_lock, flags);
if (remaining == 0) {
int ret = dio_complete(dio, dio->iocb->ki_pos, 0);
aio_complete(dio->iocb, ret, 0);
dio_complete(dio, dio->iocb->ki_pos, 0, true);
kfree(dio);
}
}
......@@ -1126,7 +1128,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
spin_unlock_irqrestore(&dio->bio_lock, flags);
if (ret2 == 0) {
ret = dio_complete(dio, offset, ret);
ret = dio_complete(dio, offset, ret, false);
kfree(dio);
} else
BUG_ON(ret != -EIOCBQUEUED);
......
......@@ -3775,7 +3775,8 @@ static ext4_io_end_t *ext4_init_io_end (struct inode *inode, gfp_t flags)
}
static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
ssize_t size, void *private)
ssize_t size, void *private, int ret,
bool is_async)
{
ext4_io_end_t *io_end = iocb->private;
struct workqueue_struct *wq;
......@@ -3784,7 +3785,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
/* if not async direct IO or dio with 0 bytes write, just return */
if (!io_end || !size)
return;
goto out;
ext_debug("ext4_end_io_dio(): io_end 0x%p"
"for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
......@@ -3795,7 +3796,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
if (io_end->flag != EXT4_IO_UNWRITTEN){
ext4_free_io_end(io_end);
iocb->private = NULL;
return;
goto out;
}
io_end->offset = offset;
......@@ -3812,6 +3813,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
list_add_tail(&io_end->list, &ei->i_completed_io_list);
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
iocb->private = NULL;
out:
if (is_async)
aio_complete(iocb, ret, 0);
}
static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
......
......@@ -609,7 +609,9 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
static void ocfs2_dio_end_io(struct kiocb *iocb,
loff_t offset,
ssize_t bytes,
void *private)
void *private,
int ret,
bool is_async)
{
struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
int level;
......@@ -623,6 +625,9 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
if (!level)
up_read(&inode->i_alloc_sem);
ocfs2_rw_unlock(inode, level);
if (is_async)
aio_complete(iocb, ret, 0);
}
/*
......
......@@ -1599,7 +1599,9 @@ xfs_end_io_direct(
struct kiocb *iocb,
loff_t offset,
ssize_t size,
void *private)
void *private,
int ret,
bool is_async)
{
xfs_ioend_t *ioend = iocb->private;
......@@ -1645,6 +1647,9 @@ xfs_end_io_direct(
* against double-freeing.
*/
iocb->private = NULL;
if (is_async)
aio_complete(iocb, ret, 0);
}
STATIC ssize_t
......
......@@ -37,6 +37,8 @@ typedef struct xfs_ioend {
size_t io_size; /* size of the extent */
xfs_off_t io_offset; /* offset in the file */
struct work_struct io_work; /* xfsdatad work queue */
struct kiocb *io_iocb;
int io_result;
} xfs_ioend_t;
extern const struct address_space_operations xfs_address_space_operations;
......
......@@ -415,7 +415,8 @@ struct buffer_head;
typedef int (get_block_t)(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
ssize_t bytes, void *private);
ssize_t bytes, void *private, int ret,
bool is_async);
/*
* Attribute flags. These should be or-ed together to figure out what
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment