Commit 26f1fe85 authored by Dave Chinner's avatar Dave Chinner Committed by Dave Chinner

xfs: reduce lock hold times in buffer writeback

When we have a lot of metadata to flush from the AIL, the buffer
list can get very long. The current submission code tries to batch
submission to optimise IO order of the metadata (i.e. ascending
block order) to maximise block layer merging or IO to adjacent
metadata blocks.

Unfortunately, the method used can result in long lock times
occurring as buffers locked early on in the buffer list might not be
dispatched until the end of the IO licst processing. This is because
sorting does not occur util after the buffer list has been processed
and the buffers that are going to be submitted are locked. Hence
when the buffer list is several thousand buffers long, the lock hold
times before IO dispatch can be significant.

To fix this, sort the buffer list before we start trying to lock and
submit buffers. This means we can now submit buffers immediately
after they are locked, allowing merging to occur immediately on the
plug and dispatch to occur as quickly as possible. This means there
is minimal delay between locking the buffer and IO submission
occuring, hence reducing the worst case lock hold times seen during
delayed write buffer IO submission signficantly.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarCarlos Maiolino <cmaiolino@redhat.com>
Signed-off-by: default avatarDave Chinner <david@fromorbit.com>
parent 4478fb1f
...@@ -1774,18 +1774,33 @@ xfs_buf_cmp( ...@@ -1774,18 +1774,33 @@ xfs_buf_cmp(
return 0; return 0;
} }
/*
* submit buffers for write.
*
* When we have a large buffer list, we do not want to hold all the buffers
* locked while we block on the request queue waiting for IO dispatch. To avoid
* this problem, we lock and submit buffers in groups of 50, thereby minimising
* the lock hold times for lists which may contain thousands of objects.
*
* To do this, we sort the buffer list before we walk the list to lock and
* submit buffers, and we plug and unplug around each group of buffers we
* submit.
*/
static int static int
__xfs_buf_delwri_submit( xfs_buf_delwri_submit_buffers(
struct list_head *buffer_list, struct list_head *buffer_list,
struct list_head *io_list, struct list_head *wait_list)
bool wait)
{ {
struct blk_plug plug;
struct xfs_buf *bp, *n; struct xfs_buf *bp, *n;
LIST_HEAD (submit_list);
int pinned = 0; int pinned = 0;
struct blk_plug plug;
list_sort(NULL, buffer_list, xfs_buf_cmp);
blk_start_plug(&plug);
list_for_each_entry_safe(bp, n, buffer_list, b_list) { list_for_each_entry_safe(bp, n, buffer_list, b_list) {
if (!wait) { if (!wait_list) {
if (xfs_buf_ispinned(bp)) { if (xfs_buf_ispinned(bp)) {
pinned++; pinned++;
continue; continue;
...@@ -1808,25 +1823,21 @@ __xfs_buf_delwri_submit( ...@@ -1808,25 +1823,21 @@ __xfs_buf_delwri_submit(
continue; continue;
} }
list_move_tail(&bp->b_list, io_list);
trace_xfs_buf_delwri_split(bp, _RET_IP_); trace_xfs_buf_delwri_split(bp, _RET_IP_);
}
list_sort(NULL, io_list, xfs_buf_cmp);
blk_start_plug(&plug);
list_for_each_entry_safe(bp, n, io_list, b_list) {
bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);
bp->b_flags |= XBF_WRITE | XBF_ASYNC;
/* /*
* we do all Io submission async. This means if we need to wait * We do all IO submission async. This means if we need
* for IO completion we need to take an extra reference so the * to wait for IO completion we need to take an extra
* buffer is still valid on the other side. * reference so the buffer is still valid on the other
* side. We need to move the buffer onto the io_list
* at this point so the caller can still access it.
*/ */
if (wait) bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);
bp->b_flags |= XBF_WRITE | XBF_ASYNC;
if (wait_list) {
xfs_buf_hold(bp); xfs_buf_hold(bp);
else list_move_tail(&bp->b_list, wait_list);
} else
list_del_init(&bp->b_list); list_del_init(&bp->b_list);
xfs_buf_submit(bp); xfs_buf_submit(bp);
...@@ -1849,8 +1860,7 @@ int ...@@ -1849,8 +1860,7 @@ int
xfs_buf_delwri_submit_nowait( xfs_buf_delwri_submit_nowait(
struct list_head *buffer_list) struct list_head *buffer_list)
{ {
LIST_HEAD (io_list); return xfs_buf_delwri_submit_buffers(buffer_list, NULL);
return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
} }
/* /*
...@@ -1865,15 +1875,15 @@ int ...@@ -1865,15 +1875,15 @@ int
xfs_buf_delwri_submit( xfs_buf_delwri_submit(
struct list_head *buffer_list) struct list_head *buffer_list)
{ {
LIST_HEAD (io_list); LIST_HEAD (wait_list);
int error = 0, error2; int error = 0, error2;
struct xfs_buf *bp; struct xfs_buf *bp;
__xfs_buf_delwri_submit(buffer_list, &io_list, true); xfs_buf_delwri_submit_buffers(buffer_list, &wait_list);
/* Wait for IO to complete. */ /* Wait for IO to complete. */
while (!list_empty(&io_list)) { while (!list_empty(&wait_list)) {
bp = list_first_entry(&io_list, struct xfs_buf, b_list); bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
list_del_init(&bp->b_list); list_del_init(&bp->b_list);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment