Commit eaebb515 authored by Brian Foster's avatar Brian Foster Committed by Darrick J. Wong

xfs: refactor buffer submission into a common helper

Sync and async buffer submission both do generally similar things
with a couple odd exceptions. Refactor the core buffer submission
code into a common helper to isolate buffer submission from
completion handling of synchronous buffer I/O.

This patch does not change behavior. It is a step towards support
for using synchronous buffer I/O via synchronous delwri queue
submission.
Designed-by: default avatarDave Chinner <dchinner@redhat.com>
Signed-off-by: default avatarBrian Foster <bfoster@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent 5fdd9794
...@@ -1458,22 +1458,20 @@ _xfs_buf_ioapply( ...@@ -1458,22 +1458,20 @@ _xfs_buf_ioapply(
* a call to this function unless the caller holds an additional reference * a call to this function unless the caller holds an additional reference
* itself. * itself.
*/ */
void static int
xfs_buf_submit( __xfs_buf_submit(
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
trace_xfs_buf_submit(bp, _RET_IP_); trace_xfs_buf_submit(bp, _RET_IP_);
ASSERT(!(bp->b_flags & _XBF_DELWRI_Q)); ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
ASSERT(bp->b_flags & XBF_ASYNC);
/* on shutdown we stale and complete the buffer immediately */ /* on shutdown we stale and complete the buffer immediately */
if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) { if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
xfs_buf_ioerror(bp, -EIO); xfs_buf_ioerror(bp, -EIO);
bp->b_flags &= ~XBF_DONE; bp->b_flags &= ~XBF_DONE;
xfs_buf_stale(bp); xfs_buf_stale(bp);
xfs_buf_ioend(bp); return -EIO;
return;
} }
if (bp->b_flags & XBF_WRITE) if (bp->b_flags & XBF_WRITE)
...@@ -1482,22 +1480,13 @@ xfs_buf_submit( ...@@ -1482,22 +1480,13 @@ xfs_buf_submit(
/* clear the internal error state to avoid spurious errors */ /* clear the internal error state to avoid spurious errors */
bp->b_io_error = 0; bp->b_io_error = 0;
/*
* The caller's reference is released during I/O completion.
* This occurs some time after the last b_io_remaining reference is
* released, so after we drop our Io reference we have to have some
* other reference to ensure the buffer doesn't go away from underneath
* us. Take a direct reference to ensure we have safe access to the
* buffer until we are finished with it.
*/
xfs_buf_hold(bp);
/* /*
* Set the count to 1 initially, this will stop an I/O completion * Set the count to 1 initially, this will stop an I/O completion
* callout which happens before we have started all the I/O from calling * callout which happens before we have started all the I/O from calling
* xfs_buf_ioend too early. * xfs_buf_ioend too early.
*/ */
atomic_set(&bp->b_io_remaining, 1); atomic_set(&bp->b_io_remaining, 1);
if (bp->b_flags & XBF_ASYNC)
xfs_buf_ioacct_inc(bp); xfs_buf_ioacct_inc(bp);
_xfs_buf_ioapply(bp); _xfs_buf_ioapply(bp);
...@@ -1507,14 +1496,39 @@ xfs_buf_submit( ...@@ -1507,14 +1496,39 @@ xfs_buf_submit(
* that we don't return to the caller with completion still pending. * that we don't return to the caller with completion still pending.
*/ */
if (atomic_dec_and_test(&bp->b_io_remaining) == 1) { if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
if (bp->b_error) if (bp->b_error || !(bp->b_flags & XBF_ASYNC))
xfs_buf_ioend(bp); xfs_buf_ioend(bp);
else else
xfs_buf_ioend_async(bp); xfs_buf_ioend_async(bp);
} }
xfs_buf_rele(bp); return 0;
}
void
xfs_buf_submit(
struct xfs_buf *bp)
{
int error;
ASSERT(bp->b_flags & XBF_ASYNC);
/*
* The caller's reference is released during I/O completion.
* This occurs some time after the last b_io_remaining reference is
* released, so after we drop our Io reference we have to have some
* other reference to ensure the buffer doesn't go away from underneath
* us. Take a direct reference to ensure we have safe access to the
* buffer until we are finished with it.
*/
xfs_buf_hold(bp);
error = __xfs_buf_submit(bp);
if (error)
xfs_buf_ioend(bp);
/* Note: it is not safe to reference bp now we've dropped our ref */ /* Note: it is not safe to reference bp now we've dropped our ref */
xfs_buf_rele(bp);
} }
/* /*
...@@ -1526,22 +1540,7 @@ xfs_buf_submit_wait( ...@@ -1526,22 +1540,7 @@ xfs_buf_submit_wait(
{ {
int error; int error;
trace_xfs_buf_submit_wait(bp, _RET_IP_); ASSERT(!(bp->b_flags & XBF_ASYNC));
ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC)));
if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
xfs_buf_ioerror(bp, -EIO);
xfs_buf_stale(bp);
bp->b_flags &= ~XBF_DONE;
return -EIO;
}
if (bp->b_flags & XBF_WRITE)
xfs_buf_wait_unpin(bp);
/* clear the internal error state to avoid spurious errors */
bp->b_io_error = 0;
/* /*
* For synchronous IO, the IO does not inherit the submitters reference * For synchronous IO, the IO does not inherit the submitters reference
...@@ -1551,20 +1550,9 @@ xfs_buf_submit_wait( ...@@ -1551,20 +1550,9 @@ xfs_buf_submit_wait(
*/ */
xfs_buf_hold(bp); xfs_buf_hold(bp);
/* error = __xfs_buf_submit(bp);
* Set the count to 1 initially, this will stop an I/O completion if (error)
* callout which happens before we have started all the I/O from calling goto out;
* xfs_buf_ioend too early.
*/
atomic_set(&bp->b_io_remaining, 1);
_xfs_buf_ioapply(bp);
/*
* make sure we run completion synchronously if it raced with us and is
* already complete.
*/
if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
xfs_buf_ioend(bp);
/* wait for completion before gathering the error from the buffer */ /* wait for completion before gathering the error from the buffer */
trace_xfs_buf_iowait(bp, _RET_IP_); trace_xfs_buf_iowait(bp, _RET_IP_);
...@@ -1572,6 +1560,7 @@ xfs_buf_submit_wait( ...@@ -1572,6 +1560,7 @@ xfs_buf_submit_wait(
trace_xfs_buf_iowait_done(bp, _RET_IP_); trace_xfs_buf_iowait_done(bp, _RET_IP_);
error = bp->b_error; error = bp->b_error;
out:
/* /*
* all done now, we can release the hold that keeps the buffer * all done now, we can release the hold that keeps the buffer
* referenced for the entire IO. * referenced for the entire IO.
......
...@@ -310,7 +310,6 @@ DEFINE_BUF_EVENT(xfs_buf_hold); ...@@ -310,7 +310,6 @@ DEFINE_BUF_EVENT(xfs_buf_hold);
DEFINE_BUF_EVENT(xfs_buf_rele); DEFINE_BUF_EVENT(xfs_buf_rele);
DEFINE_BUF_EVENT(xfs_buf_iodone); DEFINE_BUF_EVENT(xfs_buf_iodone);
DEFINE_BUF_EVENT(xfs_buf_submit); DEFINE_BUF_EVENT(xfs_buf_submit);
DEFINE_BUF_EVENT(xfs_buf_submit_wait);
DEFINE_BUF_EVENT(xfs_buf_lock); DEFINE_BUF_EVENT(xfs_buf_lock);
DEFINE_BUF_EVENT(xfs_buf_lock_done); DEFINE_BUF_EVENT(xfs_buf_lock_done);
DEFINE_BUF_EVENT(xfs_buf_trylock_fail); DEFINE_BUF_EVENT(xfs_buf_trylock_fail);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment