Commit 6af88cda authored by Brian Foster's avatar Brian Foster Committed by Darrick J. Wong

xfs: combine [a]sync buffer submission apis

The buffer I/O submission path consists of separate function calls
per type. The buffer I/O type is already controlled via buffer
state (XBF_ASYNC), however, so there is no real need for separate
submission functions.

Combine the buffer submission functions into a single function that
processes the buffer appropriately based on XBF_ASYNC. Retain an
internal helper with a conditional wait parameter to continue to
support batched !XBF_ASYNC submission/completion required by delwri
queues.
Suggested-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarBrian Foster <bfoster@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent e339dd8d
...@@ -757,11 +757,7 @@ _xfs_buf_read( ...@@ -757,11 +757,7 @@ _xfs_buf_read(
bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD); bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
if (flags & XBF_ASYNC) { return xfs_buf_submit(bp);
xfs_buf_submit(bp);
return 0;
}
return xfs_buf_submit_wait(bp);
} }
xfs_buf_t * xfs_buf_t *
...@@ -846,7 +842,7 @@ xfs_buf_read_uncached( ...@@ -846,7 +842,7 @@ xfs_buf_read_uncached(
bp->b_flags |= XBF_READ; bp->b_flags |= XBF_READ;
bp->b_ops = ops; bp->b_ops = ops;
xfs_buf_submit_wait(bp); xfs_buf_submit(bp);
if (bp->b_error) { if (bp->b_error) {
int error = bp->b_error; int error = bp->b_error;
xfs_buf_relse(bp); xfs_buf_relse(bp);
...@@ -1249,7 +1245,7 @@ xfs_bwrite( ...@@ -1249,7 +1245,7 @@ xfs_bwrite(
bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
XBF_WRITE_FAIL | XBF_DONE); XBF_WRITE_FAIL | XBF_DONE);
error = xfs_buf_submit_wait(bp); error = xfs_buf_submit(bp);
if (error) { if (error) {
xfs_force_shutdown(bp->b_target->bt_mount, xfs_force_shutdown(bp->b_target->bt_mount,
SHUTDOWN_META_IO_ERROR); SHUTDOWN_META_IO_ERROR);
...@@ -1459,7 +1455,7 @@ _xfs_buf_ioapply( ...@@ -1459,7 +1455,7 @@ _xfs_buf_ioapply(
* itself. * itself.
*/ */
static int static int
__xfs_buf_submit( __xfs_buf_submit_common(
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
trace_xfs_buf_submit(bp, _RET_IP_); trace_xfs_buf_submit(bp, _RET_IP_);
...@@ -1505,32 +1501,6 @@ __xfs_buf_submit( ...@@ -1505,32 +1501,6 @@ __xfs_buf_submit(
return 0; return 0;
} }
void
xfs_buf_submit(
struct xfs_buf *bp)
{
int error;
ASSERT(bp->b_flags & XBF_ASYNC);
/*
* The caller's reference is released during I/O completion.
* This occurs some time after the last b_io_remaining reference is
* released, so after we drop our Io reference we have to have some
* other reference to ensure the buffer doesn't go away from underneath
* us. Take a direct reference to ensure we have safe access to the
* buffer until we are finished with it.
*/
xfs_buf_hold(bp);
error = __xfs_buf_submit(bp);
if (error)
xfs_buf_ioend(bp);
/* Note: it is not safe to reference bp now we've dropped our ref */
xfs_buf_rele(bp);
}
/* /*
* Wait for I/O completion of a sync buffer and return the I/O error code. * Wait for I/O completion of a sync buffer and return the I/O error code.
*/ */
...@@ -1538,6 +1508,8 @@ static int ...@@ -1538,6 +1508,8 @@ static int
xfs_buf_iowait( xfs_buf_iowait(
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
ASSERT(!(bp->b_flags & XBF_ASYNC));
trace_xfs_buf_iowait(bp, _RET_IP_); trace_xfs_buf_iowait(bp, _RET_IP_);
wait_for_completion(&bp->b_iowait); wait_for_completion(&bp->b_iowait);
trace_xfs_buf_iowait_done(bp, _RET_IP_); trace_xfs_buf_iowait_done(bp, _RET_IP_);
...@@ -1549,30 +1521,33 @@ xfs_buf_iowait( ...@@ -1549,30 +1521,33 @@ xfs_buf_iowait(
* Synchronous buffer IO submission path, read or write. * Synchronous buffer IO submission path, read or write.
*/ */
int int
xfs_buf_submit_wait( __xfs_buf_submit(
struct xfs_buf *bp) struct xfs_buf *bp,
bool wait)
{ {
int error; int error;
ASSERT(!(bp->b_flags & XBF_ASYNC));
/* /*
* For synchronous IO, the IO does not inherit the submitters reference * Grab a reference so the buffer does not go away underneath us. For
* count, nor the buffer lock. Hence we cannot release the reference we * async buffers, I/O completion drops the callers reference, which
* are about to take until we've waited for all IO completion to occur, * could occur before submission returns.
* including any xfs_buf_ioend_async() work that may be pending.
*/ */
xfs_buf_hold(bp); xfs_buf_hold(bp);
error = __xfs_buf_submit(bp); error = __xfs_buf_submit_common(bp);
if (error) if (error) {
if (bp->b_flags & XBF_ASYNC)
xfs_buf_ioend(bp);
goto out; goto out;
error = xfs_buf_iowait(bp); }
if (wait)
error = xfs_buf_iowait(bp);
out: out:
/* /*
* all done now, we can release the hold that keeps the buffer * Release the hold that keeps the buffer referenced for the entire
* referenced for the entire IO. * I/O. Note that if the buffer is async, it is not safe to reference
* after this release.
*/ */
xfs_buf_rele(bp); xfs_buf_rele(bp);
return error; return error;
...@@ -2026,12 +2001,11 @@ xfs_buf_delwri_submit_buffers( ...@@ -2026,12 +2001,11 @@ xfs_buf_delwri_submit_buffers(
if (wait_list) { if (wait_list) {
bp->b_flags &= ~XBF_ASYNC; bp->b_flags &= ~XBF_ASYNC;
list_move_tail(&bp->b_list, wait_list); list_move_tail(&bp->b_list, wait_list);
__xfs_buf_submit(bp);
} else { } else {
bp->b_flags |= XBF_ASYNC; bp->b_flags |= XBF_ASYNC;
list_del_init(&bp->b_list); list_del_init(&bp->b_list);
xfs_buf_submit(bp);
} }
__xfs_buf_submit(bp, false);
} }
blk_finish_plug(&plug); blk_finish_plug(&plug);
......
...@@ -297,8 +297,14 @@ extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error, ...@@ -297,8 +297,14 @@ extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
xfs_failaddr_t failaddr); xfs_failaddr_t failaddr);
#define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address) #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func); extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
extern void xfs_buf_submit(struct xfs_buf *bp);
extern int xfs_buf_submit_wait(struct xfs_buf *bp); extern int __xfs_buf_submit(struct xfs_buf *bp, bool);
static inline int xfs_buf_submit(struct xfs_buf *bp)
{
bool wait = bp->b_flags & XBF_ASYNC ? false : true;
return __xfs_buf_submit(bp, wait);
}
extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *, extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
xfs_buf_rw_t); xfs_buf_rw_t);
#define xfs_buf_zero(bp, off, len) \ #define xfs_buf_zero(bp, off, len) \
......
...@@ -196,7 +196,7 @@ xlog_bread_noalign( ...@@ -196,7 +196,7 @@ xlog_bread_noalign(
bp->b_io_length = nbblks; bp->b_io_length = nbblks;
bp->b_error = 0; bp->b_error = 0;
error = xfs_buf_submit_wait(bp); error = xfs_buf_submit(bp);
if (error && !XFS_FORCED_SHUTDOWN(log->l_mp)) if (error && !XFS_FORCED_SHUTDOWN(log->l_mp))
xfs_buf_ioerror_alert(bp, __func__); xfs_buf_ioerror_alert(bp, __func__);
return error; return error;
...@@ -5706,7 +5706,7 @@ xlog_do_recover( ...@@ -5706,7 +5706,7 @@ xlog_do_recover(
bp->b_flags |= XBF_READ; bp->b_flags |= XBF_READ;
bp->b_ops = &xfs_sb_buf_ops; bp->b_ops = &xfs_sb_buf_ops;
error = xfs_buf_submit_wait(bp); error = xfs_buf_submit(bp);
if (error) { if (error) {
if (!XFS_FORCED_SHUTDOWN(mp)) { if (!XFS_FORCED_SHUTDOWN(mp)) {
xfs_buf_ioerror_alert(bp, __func__); xfs_buf_ioerror_alert(bp, __func__);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment