Commit 5889608d authored by Dave Chinner's avatar Dave Chinner Committed by Ben Myers

xfs: syncd workqueue is no more

With the syncd functions moved to the log and/or removed, the syncd
workqueue is the only remaining bit left. It is used by the log
covering/ail pushing work, as well as by the inode reclaim work.

Given how cheap workqueues are these days, give the log and inode
reclaim work their own work queues and kill the syncd work queue.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarMark Tinguely <tinguely@sgi.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarBen Myers <bpm@sgi.com>
parent 9aa05000
...@@ -1193,7 +1193,7 @@ void ...@@ -1193,7 +1193,7 @@ void
xfs_log_work_queue( xfs_log_work_queue(
struct xfs_mount *mp) struct xfs_mount *mp)
{ {
queue_delayed_work(xfs_syncd_wq, &mp->m_log->l_work, queue_delayed_work(mp->m_log_workqueue, &mp->m_log->l_work,
msecs_to_jiffies(xfs_syncd_centisecs * 10)); msecs_to_jiffies(xfs_syncd_centisecs * 10));
} }
......
...@@ -207,6 +207,8 @@ typedef struct xfs_mount { ...@@ -207,6 +207,8 @@ typedef struct xfs_mount {
struct workqueue_struct *m_data_workqueue; struct workqueue_struct *m_data_workqueue;
struct workqueue_struct *m_unwritten_workqueue; struct workqueue_struct *m_unwritten_workqueue;
struct workqueue_struct *m_cil_workqueue; struct workqueue_struct *m_cil_workqueue;
struct workqueue_struct *m_reclaim_workqueue;
struct workqueue_struct *m_log_workqueue;
} xfs_mount_t; } xfs_mount_t;
/* /*
......
...@@ -863,8 +863,23 @@ xfs_init_mount_workqueues( ...@@ -863,8 +863,23 @@ xfs_init_mount_workqueues(
WQ_MEM_RECLAIM, 0, mp->m_fsname); WQ_MEM_RECLAIM, 0, mp->m_fsname);
if (!mp->m_cil_workqueue) if (!mp->m_cil_workqueue)
goto out_destroy_unwritten; goto out_destroy_unwritten;
mp->m_reclaim_workqueue = alloc_workqueue("xfs-reclaim/%s",
WQ_NON_REENTRANT, 0, mp->m_fsname);
if (!mp->m_reclaim_workqueue)
goto out_destroy_cil;
mp->m_log_workqueue = alloc_workqueue("xfs-log/%s",
WQ_NON_REENTRANT, 0, mp->m_fsname);
if (!mp->m_log_workqueue)
goto out_destroy_reclaim;
return 0; return 0;
out_destroy_reclaim:
destroy_workqueue(mp->m_reclaim_workqueue);
out_destroy_cil:
destroy_workqueue(mp->m_cil_workqueue);
out_destroy_unwritten: out_destroy_unwritten:
destroy_workqueue(mp->m_unwritten_workqueue); destroy_workqueue(mp->m_unwritten_workqueue);
out_destroy_data_iodone_queue: out_destroy_data_iodone_queue:
...@@ -877,6 +892,8 @@ STATIC void ...@@ -877,6 +892,8 @@ STATIC void
xfs_destroy_mount_workqueues( xfs_destroy_mount_workqueues(
struct xfs_mount *mp) struct xfs_mount *mp)
{ {
destroy_workqueue(mp->m_log_workqueue);
destroy_workqueue(mp->m_reclaim_workqueue);
destroy_workqueue(mp->m_cil_workqueue); destroy_workqueue(mp->m_cil_workqueue);
destroy_workqueue(mp->m_data_workqueue); destroy_workqueue(mp->m_data_workqueue);
destroy_workqueue(mp->m_unwritten_workqueue); destroy_workqueue(mp->m_unwritten_workqueue);
...@@ -1391,10 +1408,6 @@ xfs_fs_fill_super( ...@@ -1391,10 +1408,6 @@ xfs_fs_fill_super(
/* /*
* we must configure the block size in the superblock before we run the * we must configure the block size in the superblock before we run the
* full mount process as the mount process can lookup and cache inodes. * full mount process as the mount process can lookup and cache inodes.
* For the same reason we must also initialise the syncd and register
* the inode cache shrinker so that inodes can be reclaimed during
* operations like a quotacheck that iterate all inodes in the
* filesystem.
*/ */
sb->s_magic = XFS_SB_MAGIC; sb->s_magic = XFS_SB_MAGIC;
sb->s_blocksize = mp->m_sb.sb_blocksize; sb->s_blocksize = mp->m_sb.sb_blocksize;
...@@ -1638,16 +1651,6 @@ xfs_destroy_zones(void) ...@@ -1638,16 +1651,6 @@ xfs_destroy_zones(void)
STATIC int __init STATIC int __init
xfs_init_workqueues(void) xfs_init_workqueues(void)
{ {
/*
* We never want to the same work item to run twice, reclaiming inodes
* or idling the log is not going to get any faster by multiple CPUs
* competing for ressources. Use the default large max_active value
* so that even lots of filesystems can perform these task in parallel.
*/
xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_NON_REENTRANT, 0);
if (!xfs_syncd_wq)
return -ENOMEM;
/* /*
* The allocation workqueue can be used in memory reclaim situations * The allocation workqueue can be used in memory reclaim situations
* (writepage path), and parallelism is only limited by the number of * (writepage path), and parallelism is only limited by the number of
...@@ -1656,20 +1659,15 @@ xfs_init_workqueues(void) ...@@ -1656,20 +1659,15 @@ xfs_init_workqueues(void)
*/ */
xfs_alloc_wq = alloc_workqueue("xfsalloc", WQ_MEM_RECLAIM, 0); xfs_alloc_wq = alloc_workqueue("xfsalloc", WQ_MEM_RECLAIM, 0);
if (!xfs_alloc_wq) if (!xfs_alloc_wq)
goto out_destroy_syncd; return -ENOMEM;
return 0; return 0;
out_destroy_syncd:
destroy_workqueue(xfs_syncd_wq);
return -ENOMEM;
} }
STATIC void STATIC void
xfs_destroy_workqueues(void) xfs_destroy_workqueues(void)
{ {
destroy_workqueue(xfs_alloc_wq); destroy_workqueue(xfs_alloc_wq);
destroy_workqueue(xfs_syncd_wq);
} }
STATIC int __init STATIC int __init
......
...@@ -40,8 +40,6 @@ ...@@ -40,8 +40,6 @@
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/freezer.h> #include <linux/freezer.h>
struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */
/* /*
* The inode lookup is done in batches to keep the amount of lock traffic and * The inode lookup is done in batches to keep the amount of lock traffic and
* radix tree lookups to a minimum. The batch size is a trade off between * radix tree lookups to a minimum. The batch size is a trade off between
...@@ -335,18 +333,18 @@ xfs_quiesce_attr( ...@@ -335,18 +333,18 @@ xfs_quiesce_attr(
/* /*
* Queue a new inode reclaim pass if there are reclaimable inodes and there * Queue a new inode reclaim pass if there are reclaimable inodes and there
* isn't a reclaim pass already in progress. By default it runs every 5s based * isn't a reclaim pass already in progress. By default it runs every 5s based
* on the xfs syncd work default of 30s. Perhaps this should have it's own * on the xfs periodic sync default of 30s. Perhaps this should have it's own
* tunable, but that can be done if this method proves to be ineffective or too * tunable, but that can be done if this method proves to be ineffective or too
* aggressive. * aggressive.
*/ */
static void static void
xfs_syncd_queue_reclaim( xfs_reclaim_work_queue(
struct xfs_mount *mp) struct xfs_mount *mp)
{ {
rcu_read_lock(); rcu_read_lock();
if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
queue_delayed_work(xfs_syncd_wq, &mp->m_reclaim_work, queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -367,7 +365,7 @@ xfs_reclaim_worker( ...@@ -367,7 +365,7 @@ xfs_reclaim_worker(
struct xfs_mount, m_reclaim_work); struct xfs_mount, m_reclaim_work);
xfs_reclaim_inodes(mp, SYNC_TRYLOCK); xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
xfs_syncd_queue_reclaim(mp); xfs_reclaim_work_queue(mp);
} }
void void
...@@ -388,7 +386,7 @@ __xfs_inode_set_reclaim_tag( ...@@ -388,7 +386,7 @@ __xfs_inode_set_reclaim_tag(
spin_unlock(&ip->i_mount->m_perag_lock); spin_unlock(&ip->i_mount->m_perag_lock);
/* schedule periodic background inode reclaim */ /* schedule periodic background inode reclaim */
xfs_syncd_queue_reclaim(ip->i_mount); xfs_reclaim_work_queue(ip->i_mount);
trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
-1, _RET_IP_); -1, _RET_IP_);
...@@ -646,9 +644,9 @@ xfs_reclaim_inode( ...@@ -646,9 +644,9 @@ xfs_reclaim_inode(
/* /*
* We could return EAGAIN here to make reclaim rescan the inode tree in * We could return EAGAIN here to make reclaim rescan the inode tree in
* a short while. However, this just burns CPU time scanning the tree * a short while. However, this just burns CPU time scanning the tree
* waiting for IO to complete and xfssyncd never goes back to the idle * waiting for IO to complete and the reclaim work never goes back to
* state. Instead, return 0 to let the next scheduled background reclaim * the idle state. Instead, return 0 to let the next scheduled
* attempt to reclaim the inode again. * background reclaim attempt to reclaim the inode again.
*/ */
return 0; return 0;
} }
...@@ -804,7 +802,7 @@ xfs_reclaim_inodes_nr( ...@@ -804,7 +802,7 @@ xfs_reclaim_inodes_nr(
int nr_to_scan) int nr_to_scan)
{ {
/* kick background reclaimer and push the AIL */ /* kick background reclaimer and push the AIL */
xfs_syncd_queue_reclaim(mp); xfs_reclaim_work_queue(mp);
xfs_ail_push_all(mp->m_ail); xfs_ail_push_all(mp->m_ail);
xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
......
...@@ -24,8 +24,6 @@ struct xfs_perag; ...@@ -24,8 +24,6 @@ struct xfs_perag;
#define SYNC_WAIT 0x0001 /* wait for i/o to complete */ #define SYNC_WAIT 0x0001 /* wait for i/o to complete */
#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */ #define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */
extern struct workqueue_struct *xfs_syncd_wq; /* sync workqueue */
void xfs_reclaim_worker(struct work_struct *work); void xfs_reclaim_worker(struct work_struct *work);
int xfs_quiesce_data(struct xfs_mount *mp); int xfs_quiesce_data(struct xfs_mount *mp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment