Commit 9aa05000 authored by Dave Chinner's avatar Dave Chinner Committed by Ben Myers

xfs: xfs_sync_data is redundant.

We don't do any data writeback from XFS any more - the VFS is
completely responsible for that, including for freeze. We can
replace the remaining caller with a VFS level function that
achieves the same thing, but without conflicting with current
writeback work.

This means we can remove the flush_work and xfs_flush_inodes() - the
VFS functionality completely replaces the internal flush queue for
doing this writeback work in a separate context to avoid stack
overruns.

This does have one complication - it cannot be called with page
locks held.  Hence move the flushing of delalloc space when ENOSPC
occurs back up into xfs_file_aio_buffered_write when we don't hold
any locks that will stall writeback.

Unfortunately, writeback_inodes_sb_if_idle() is not sufficient to
trigger delalloc conversion fast enough to prevent spurious ENOSPC
whent here are hundreds of writers, thousands of small files and GBs
of free RAM.  Hence we need to use sync_sb_inodes() to block callers
while we wait for writeback like the previous xfs_flush_inodes
implementation did.

That means we have to hold the s_umount lock here, but because this
call can nest inside i_mutex (the parent directory in the create
case, held by the VFS), we have to use down_read_trylock() to avoid
potential deadlocks. In practice, this trylock will succeed on
almost every attempt as unmount/remount type operations are
exceedingly rare.

Note: we always need to pass a count of zero to
generic_file_buffered_write() as the previously written byte count.
We only do this by accident before this patch by the virtue of ret
always being zero when there are no errors. Make this explicit
rather than needing to specifically zero ret in the ENOSPC retry
case.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Tested-by: default avatarBrian Foster <bfoster@redhat.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarBen Myers <bpm@sgi.com>
parent cf2931db
...@@ -728,16 +728,17 @@ xfs_file_buffered_aio_write( ...@@ -728,16 +728,17 @@ xfs_file_buffered_aio_write(
write_retry: write_retry:
trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
ret = generic_file_buffered_write(iocb, iovp, nr_segs, ret = generic_file_buffered_write(iocb, iovp, nr_segs,
pos, &iocb->ki_pos, count, ret); pos, &iocb->ki_pos, count, 0);
/* /*
* if we just got an ENOSPC, flush the inode now we aren't holding any * If we just got an ENOSPC, try to write back all dirty inodes to
* page locks and retry *once* * convert delalloc space to free up some of the excess reserved
* metadata space.
*/ */
if (ret == -ENOSPC && !enospc) { if (ret == -ENOSPC && !enospc) {
enospc = 1; enospc = 1;
ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE); xfs_flush_inodes(ip->i_mount);
if (!ret) goto write_retry;
goto write_retry;
} }
current->backing_dev_info = NULL; current->backing_dev_info = NULL;
......
...@@ -373,7 +373,7 @@ xfs_iomap_write_delay( ...@@ -373,7 +373,7 @@ xfs_iomap_write_delay(
xfs_extlen_t extsz; xfs_extlen_t extsz;
int nimaps; int nimaps;
xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS]; xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
int prealloc, flushed = 0; int prealloc;
int error; int error;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
...@@ -434,26 +434,17 @@ xfs_iomap_write_delay( ...@@ -434,26 +434,17 @@ xfs_iomap_write_delay(
} }
/* /*
* If bmapi returned us nothing, we got either ENOSPC or EDQUOT. For * If bmapi returned us nothing, we got either ENOSPC or EDQUOT. Retry
* ENOSPC, * flush all other inodes with delalloc blocks to free up
* some of the excess reserved metadata space. For both cases, retry
* without EOF preallocation. * without EOF preallocation.
*/ */
if (nimaps == 0) { if (nimaps == 0) {
trace_xfs_delalloc_enospc(ip, offset, count); trace_xfs_delalloc_enospc(ip, offset, count);
if (flushed) if (prealloc) {
return XFS_ERROR(error ? error : ENOSPC); prealloc = 0;
error = 0;
if (error == ENOSPC) { goto retry;
xfs_iunlock(ip, XFS_ILOCK_EXCL);
xfs_flush_inodes(ip);
xfs_ilock(ip, XFS_ILOCK_EXCL);
} }
return XFS_ERROR(error ? error : ENOSPC);
flushed = 1;
error = 0;
prealloc = 0;
goto retry;
} }
if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip))) if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip)))
......
...@@ -198,7 +198,6 @@ typedef struct xfs_mount { ...@@ -198,7 +198,6 @@ typedef struct xfs_mount {
#endif #endif
struct xfs_mru_cache *m_filestream; /* per-mount filestream data */ struct xfs_mru_cache *m_filestream; /* per-mount filestream data */
struct delayed_work m_reclaim_work; /* background inode reclaim */ struct delayed_work m_reclaim_work; /* background inode reclaim */
struct work_struct m_flush_work; /* background inode flush */
__int64_t m_update_flags; /* sb flags we need to update __int64_t m_update_flags; /* sb flags we need to update
on the next remount,rw */ on the next remount,rw */
struct shrinker m_inode_shrink; /* inode reclaim shrinker */ struct shrinker m_inode_shrink; /* inode reclaim shrinker */
......
...@@ -882,6 +882,24 @@ xfs_destroy_mount_workqueues( ...@@ -882,6 +882,24 @@ xfs_destroy_mount_workqueues(
destroy_workqueue(mp->m_unwritten_workqueue); destroy_workqueue(mp->m_unwritten_workqueue);
} }
/*
* Flush all dirty data to disk. Must not be called while holding an XFS_ILOCK
* or a page lock. We use sync_inodes_sb() here to ensure we block while waiting
* for IO to complete so that we effectively throttle multiple callers to the
* rate at which IO is completing.
*/
void
xfs_flush_inodes(
struct xfs_mount *mp)
{
struct super_block *sb = mp->m_super;
if (down_read_trylock(&sb->s_umount)) {
sync_inodes_sb(sb);
up_read(&sb->s_umount);
}
}
/* Catch misguided souls that try to use this interface on XFS */ /* Catch misguided souls that try to use this interface on XFS */
STATIC struct inode * STATIC struct inode *
xfs_fs_alloc_inode( xfs_fs_alloc_inode(
...@@ -1005,8 +1023,6 @@ xfs_fs_put_super( ...@@ -1005,8 +1023,6 @@ xfs_fs_put_super(
{ {
struct xfs_mount *mp = XFS_M(sb); struct xfs_mount *mp = XFS_M(sb);
cancel_work_sync(&mp->m_flush_work);
xfs_filestream_unmount(mp); xfs_filestream_unmount(mp);
xfs_unmountfs(mp); xfs_unmountfs(mp);
...@@ -1324,7 +1340,6 @@ xfs_fs_fill_super( ...@@ -1324,7 +1340,6 @@ xfs_fs_fill_super(
spin_lock_init(&mp->m_sb_lock); spin_lock_init(&mp->m_sb_lock);
mutex_init(&mp->m_growlock); mutex_init(&mp->m_growlock);
atomic_set(&mp->m_active_trans, 0); atomic_set(&mp->m_active_trans, 0);
INIT_WORK(&mp->m_flush_work, xfs_flush_worker);
INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
mp->m_super = sb; mp->m_super = sb;
......
...@@ -74,6 +74,7 @@ struct block_device; ...@@ -74,6 +74,7 @@ struct block_device;
extern __uint64_t xfs_max_file_offset(unsigned int); extern __uint64_t xfs_max_file_offset(unsigned int);
extern void xfs_flush_inodes(struct xfs_mount *mp);
extern void xfs_blkdev_issue_flush(struct xfs_buftarg *); extern void xfs_blkdev_issue_flush(struct xfs_buftarg *);
extern xfs_agnumber_t xfs_set_inode32(struct xfs_mount *); extern xfs_agnumber_t xfs_set_inode32(struct xfs_mount *);
extern xfs_agnumber_t xfs_set_inode64(struct xfs_mount *); extern xfs_agnumber_t xfs_set_inode64(struct xfs_mount *);
......
...@@ -216,51 +216,6 @@ xfs_inode_ag_iterator( ...@@ -216,51 +216,6 @@ xfs_inode_ag_iterator(
return XFS_ERROR(last_error); return XFS_ERROR(last_error);
} }
STATIC int
xfs_sync_inode_data(
struct xfs_inode *ip,
struct xfs_perag *pag,
int flags)
{
struct inode *inode = VFS_I(ip);
struct address_space *mapping = inode->i_mapping;
int error = 0;
if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
return 0;
if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
if (flags & SYNC_TRYLOCK)
return 0;
xfs_ilock(ip, XFS_IOLOCK_SHARED);
}
error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
0 : XBF_ASYNC, FI_NONE);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
return error;
}
/*
* Write out pagecache data for the whole filesystem.
*/
STATIC int
xfs_sync_data(
struct xfs_mount *mp,
int flags)
{
int error;
ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags);
if (error)
return XFS_ERROR(error);
xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
return 0;
}
STATIC int STATIC int
xfs_sync_fsdata( xfs_sync_fsdata(
struct xfs_mount *mp) struct xfs_mount *mp)
...@@ -415,39 +370,6 @@ xfs_reclaim_worker( ...@@ -415,39 +370,6 @@ xfs_reclaim_worker(
xfs_syncd_queue_reclaim(mp); xfs_syncd_queue_reclaim(mp);
} }
/*
* Flush delayed allocate data, attempting to free up reserved space
* from existing allocations. At this point a new allocation attempt
* has failed with ENOSPC and we are in the process of scratching our
* heads, looking about for more room.
*
* Queue a new data flush if there isn't one already in progress and
* wait for completion of the flush. This means that we only ever have one
* inode flush in progress no matter how many ENOSPC events are occurring and
* so will prevent the system from bogging down due to every concurrent
* ENOSPC event scanning all the active inodes in the system for writeback.
*/
void
xfs_flush_inodes(
struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
queue_work(xfs_syncd_wq, &mp->m_flush_work);
flush_work(&mp->m_flush_work);
}
void
xfs_flush_worker(
struct work_struct *work)
{
struct xfs_mount *mp = container_of(work,
struct xfs_mount, m_flush_work);
xfs_sync_data(mp, SYNC_TRYLOCK);
xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
}
void void
__xfs_inode_set_reclaim_tag( __xfs_inode_set_reclaim_tag(
struct xfs_perag *pag, struct xfs_perag *pag,
......
...@@ -26,14 +26,11 @@ struct xfs_perag; ...@@ -26,14 +26,11 @@ struct xfs_perag;
extern struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */ extern struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */
void xfs_flush_worker(struct work_struct *work);
void xfs_reclaim_worker(struct work_struct *work); void xfs_reclaim_worker(struct work_struct *work);
int xfs_quiesce_data(struct xfs_mount *mp); int xfs_quiesce_data(struct xfs_mount *mp);
void xfs_quiesce_attr(struct xfs_mount *mp); void xfs_quiesce_attr(struct xfs_mount *mp);
void xfs_flush_inodes(struct xfs_inode *ip);
int xfs_reclaim_inodes(struct xfs_mount *mp, int mode); int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
int xfs_reclaim_inodes_count(struct xfs_mount *mp); int xfs_reclaim_inodes_count(struct xfs_mount *mp);
void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan); void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan);
......
...@@ -777,7 +777,7 @@ xfs_create( ...@@ -777,7 +777,7 @@ xfs_create(
XFS_TRANS_PERM_LOG_RES, log_count); XFS_TRANS_PERM_LOG_RES, log_count);
if (error == ENOSPC) { if (error == ENOSPC) {
/* flush outstanding delalloc blocks and retry */ /* flush outstanding delalloc blocks and retry */
xfs_flush_inodes(dp); xfs_flush_inodes(mp);
error = xfs_trans_reserve(tp, resblks, log_res, 0, error = xfs_trans_reserve(tp, resblks, log_res, 0,
XFS_TRANS_PERM_LOG_RES, log_count); XFS_TRANS_PERM_LOG_RES, log_count);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment