Commit bbc5a740 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Dave Chinner

xfs: split xfs_file_read_iter into buffered and direct I/O helpers

Similar to what we did on the write side a while ago.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarDave Chinner <david@fromorbit.com>
parent cf810712
...@@ -282,35 +282,33 @@ xfs_file_fsync( ...@@ -282,35 +282,33 @@ xfs_file_fsync(
} }
STATIC ssize_t STATIC ssize_t
xfs_file_read_iter( xfs_file_dio_aio_read(
struct kiocb *iocb, struct kiocb *iocb,
struct iov_iter *to) struct iov_iter *to)
{ {
struct file *file = iocb->ki_filp; struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = file->f_mapping->host; struct inode *inode = mapping->host;
struct xfs_inode *ip = XFS_I(inode); struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount; size_t count = iov_iter_count(to);
size_t size = iov_iter_count(to); struct xfs_buftarg *target;
ssize_t ret = 0; ssize_t ret = 0;
loff_t pos = iocb->ki_pos;
XFS_STATS_INC(mp, xs_read_calls); trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
if ((iocb->ki_flags & IOCB_DIRECT) && !IS_DAX(inode)) { if (XFS_IS_REALTIME_INODE(ip))
xfs_buftarg_t *target = target = ip->i_mount->m_rtdev_targp;
XFS_IS_REALTIME_INODE(ip) ? else
mp->m_rtdev_targp : mp->m_ddev_targp; target = ip->i_mount->m_ddev_targp;
if (!IS_DAX(inode)) {
/* DIO must be aligned to device logical sector size */ /* DIO must be aligned to device logical sector size */
if ((pos | size) & target->bt_logical_sectormask) { if ((iocb->ki_pos | count) & target->bt_logical_sectormask) {
if (pos == i_size_read(inode)) if (iocb->ki_pos == i_size_read(inode))
return 0; return 0;
return -EINVAL; return -EINVAL;
} }
} }
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
/* /*
* Locking is a bit tricky here. If we take an exclusive lock for direct * Locking is a bit tricky here. If we take an exclusive lock for direct
* IO, we effectively serialise all new concurrent read IO to this file * IO, we effectively serialise all new concurrent read IO to this file
...@@ -322,7 +320,7 @@ xfs_file_read_iter( ...@@ -322,7 +320,7 @@ xfs_file_read_iter(
* serialisation. * serialisation.
*/ */
xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
if ((iocb->ki_flags & IOCB_DIRECT) && inode->i_mapping->nrpages) { if (mapping->nrpages) {
xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
...@@ -337,8 +335,8 @@ xfs_file_read_iter( ...@@ -337,8 +335,8 @@ xfs_file_read_iter(
* flush and reduce the chances of repeated iolock cycles going * flush and reduce the chances of repeated iolock cycles going
* forward. * forward.
*/ */
if (inode->i_mapping->nrpages) { if (mapping->nrpages) {
ret = filemap_write_and_wait(VFS_I(ip)->i_mapping); ret = filemap_write_and_wait(mapping);
if (ret) { if (ret) {
xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
return ret; return ret;
...@@ -349,23 +347,56 @@ xfs_file_read_iter( ...@@ -349,23 +347,56 @@ xfs_file_read_iter(
* we fail to invalidate a page, but this should never * we fail to invalidate a page, but this should never
* happen on XFS. Warn if it does fail. * happen on XFS. Warn if it does fail.
*/ */
ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping); ret = invalidate_inode_pages2(mapping);
WARN_ON_ONCE(ret); WARN_ON_ONCE(ret);
ret = 0; ret = 0;
} }
xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
} }
ret = generic_file_read_iter(iocb, to);
xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
return ret;
}
STATIC ssize_t
xfs_file_buffered_aio_read(
struct kiocb *iocb,
struct iov_iter *to)
{
struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
ssize_t ret;
trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
ret = generic_file_read_iter(iocb, to);
xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
return ret;
}
STATIC ssize_t
xfs_file_read_iter(
struct kiocb *iocb,
struct iov_iter *to)
{
struct xfs_mount *mp = XFS_I(file_inode(iocb->ki_filp))->i_mount;
ssize_t ret = 0;
XFS_STATS_INC(mp, xs_read_calls);
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
if (iocb->ki_flags & IOCB_DIRECT) if (iocb->ki_flags & IOCB_DIRECT)
trace_xfs_file_direct_read(ip, size, pos); ret = xfs_file_dio_aio_read(iocb, to);
else else
trace_xfs_file_buffered_read(ip, size, pos); ret = xfs_file_buffered_aio_read(iocb, to);
ret = generic_file_read_iter(iocb, to);
if (ret > 0) if (ret > 0)
XFS_STATS_ADD(mp, xs_read_bytes, ret); XFS_STATS_ADD(mp, xs_read_bytes, ret);
xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
return ret; return ret;
} }
...@@ -747,7 +778,7 @@ xfs_file_dio_aio_write( ...@@ -747,7 +778,7 @@ xfs_file_dio_aio_write(
end = iocb->ki_pos + count - 1; end = iocb->ki_pos + count - 1;
/* /*
* See xfs_file_read_iter() for why we do a full-file flush here. * See xfs_file_dio_aio_read() for why we do a full-file flush here.
*/ */
if (mapping->nrpages) { if (mapping->nrpages) {
ret = filemap_write_and_wait(VFS_I(ip)->i_mapping); ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment