Commit fd074841 authored by Dave Chinner's avatar Dave Chinner Committed by Dave Chinner

xfs: push the AIL from memory reclaim and periodic sync

When we are short on memory, we want to expedite the cleaning of
dirty objects.  Hence when we run short on memory, we need to kick
the AIL flushing into action to clean as many dirty objects as
quickly as possible.  To implement this, sample the lsn of the log
item at the head of the AIL and use that as the push target for the
AIL flush.

Further, we keep items in the AIL that are dirty that are not
tracked any other way, so we can get objects sitting in the AIL that
don't get written back until the AIL is pushed. Hence to get the
filesystem to the idle state, we might need to push the AIL to flush
out any remaining dirty objects sitting in the AIL. This requires
the same push mechanism as the reclaim push.

This patch also renames xfs_trans_ail_tail() to xfs_ail_min_lsn() to
match the new xfs_ail_max_lsn() function introduced in this patch.
Similarly for xfs_trans_ail_push -> xfs_ail_push.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarAlex Elder <aelder@sgi.com>
parent cd4a3c50
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_inum.h" #include "xfs_inum.h"
#include "xfs_trans.h" #include "xfs_trans.h"
#include "xfs_trans_priv.h"
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_ag.h" #include "xfs_ag.h"
#include "xfs_mount.h" #include "xfs_mount.h"
...@@ -462,6 +463,9 @@ xfs_sync_worker( ...@@ -462,6 +463,9 @@ xfs_sync_worker(
else else
xfs_log_force(mp, 0); xfs_log_force(mp, 0);
error = xfs_qm_sync(mp, SYNC_TRYLOCK); error = xfs_qm_sync(mp, SYNC_TRYLOCK);
/* start pushing all the metadata that is currently dirty */
xfs_ail_push_all(mp->m_ail);
} }
/* queue us up again */ /* queue us up again */
...@@ -1027,8 +1031,9 @@ xfs_reclaim_inode_shrink( ...@@ -1027,8 +1031,9 @@ xfs_reclaim_inode_shrink(
mp = container_of(shrink, struct xfs_mount, m_inode_shrink); mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
if (nr_to_scan) { if (nr_to_scan) {
/* kick background reclaimer */ /* kick background reclaimer and push the AIL */
xfs_syncd_queue_reclaim(mp); xfs_syncd_queue_reclaim(mp);
xfs_ail_push_all(mp->m_ail);
if (!(gfp_mask & __GFP_FS)) if (!(gfp_mask & __GFP_FS))
return -1; return -1;
......
...@@ -761,7 +761,7 @@ xfs_log_need_covered(xfs_mount_t *mp) ...@@ -761,7 +761,7 @@ xfs_log_need_covered(xfs_mount_t *mp)
break; break;
case XLOG_STATE_COVER_NEED: case XLOG_STATE_COVER_NEED:
case XLOG_STATE_COVER_NEED2: case XLOG_STATE_COVER_NEED2:
if (!xfs_trans_ail_tail(log->l_ailp) && if (!xfs_ail_min_lsn(log->l_ailp) &&
xlog_iclogs_empty(log)) { xlog_iclogs_empty(log)) {
if (log->l_covered_state == XLOG_STATE_COVER_NEED) if (log->l_covered_state == XLOG_STATE_COVER_NEED)
log->l_covered_state = XLOG_STATE_COVER_DONE; log->l_covered_state = XLOG_STATE_COVER_DONE;
...@@ -801,7 +801,7 @@ xlog_assign_tail_lsn( ...@@ -801,7 +801,7 @@ xlog_assign_tail_lsn(
xfs_lsn_t tail_lsn; xfs_lsn_t tail_lsn;
struct log *log = mp->m_log; struct log *log = mp->m_log;
tail_lsn = xfs_trans_ail_tail(mp->m_ail); tail_lsn = xfs_ail_min_lsn(mp->m_ail);
if (!tail_lsn) if (!tail_lsn)
tail_lsn = atomic64_read(&log->l_last_sync_lsn); tail_lsn = atomic64_read(&log->l_last_sync_lsn);
...@@ -1239,7 +1239,7 @@ xlog_grant_push_ail( ...@@ -1239,7 +1239,7 @@ xlog_grant_push_ail(
* the filesystem is shutting down. * the filesystem is shutting down.
*/ */
if (!XLOG_FORCED_SHUTDOWN(log)) if (!XLOG_FORCED_SHUTDOWN(log))
xfs_trans_ail_push(log->l_ailp, threshold_lsn); xfs_ail_push(log->l_ailp, threshold_lsn);
} }
/* /*
......
...@@ -90,6 +90,20 @@ xfs_ail_min( ...@@ -90,6 +90,20 @@ xfs_ail_min(
return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail); return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
} }
/*
* Return a pointer to the last item in the AIL. If the AIL is empty, then
* return NULL.
*/
static xfs_log_item_t *
xfs_ail_max(
struct xfs_ail *ailp)
{
if (list_empty(&ailp->xa_ail))
return NULL;
return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail);
}
/* /*
* Return a pointer to the item which follows the given item in the AIL. If * Return a pointer to the item which follows the given item in the AIL. If
* the given item is the last item in the list, then return NULL. * the given item is the last item in the list, then return NULL.
...@@ -114,7 +128,7 @@ xfs_ail_next( ...@@ -114,7 +128,7 @@ xfs_ail_next(
* item in the AIL. * item in the AIL.
*/ */
xfs_lsn_t xfs_lsn_t
xfs_trans_ail_tail( xfs_ail_min_lsn(
struct xfs_ail *ailp) struct xfs_ail *ailp)
{ {
xfs_lsn_t lsn = 0; xfs_lsn_t lsn = 0;
...@@ -129,6 +143,25 @@ xfs_trans_ail_tail( ...@@ -129,6 +143,25 @@ xfs_trans_ail_tail(
return lsn; return lsn;
} }
/*
* Return the maximum lsn held in the AIL, or zero if the AIL is empty.
*/
static xfs_lsn_t
xfs_ail_max_lsn(
struct xfs_ail *ailp)
{
xfs_lsn_t lsn = 0;
xfs_log_item_t *lip;
spin_lock(&ailp->xa_lock);
lip = xfs_ail_max(ailp);
if (lip)
lsn = lip->li_lsn;
spin_unlock(&ailp->xa_lock);
return lsn;
}
/* /*
* AIL traversal cursor initialisation. * AIL traversal cursor initialisation.
* *
...@@ -504,7 +537,7 @@ xfs_ail_worker( ...@@ -504,7 +537,7 @@ xfs_ail_worker(
* any of the objects, so the lock is not needed. * any of the objects, so the lock is not needed.
*/ */
void void
xfs_trans_ail_push( xfs_ail_push(
struct xfs_ail *ailp, struct xfs_ail *ailp,
xfs_lsn_t threshold_lsn) xfs_lsn_t threshold_lsn)
{ {
...@@ -525,6 +558,19 @@ xfs_trans_ail_push( ...@@ -525,6 +558,19 @@ xfs_trans_ail_push(
queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
} }
/*
* Push out all items in the AIL immediately
*/
void
xfs_ail_push_all(
struct xfs_ail *ailp)
{
xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp);
if (threshold_lsn)
xfs_ail_push(ailp, threshold_lsn);
}
/* /*
* This is to be called when an item is unlocked that may have * This is to be called when an item is unlocked that may have
* been in the AIL. It will wake up the first member of the AIL * been in the AIL. It will wake up the first member of the AIL
......
...@@ -104,12 +104,13 @@ xfs_trans_ail_delete( ...@@ -104,12 +104,13 @@ xfs_trans_ail_delete(
xfs_trans_ail_delete_bulk(ailp, &lip, 1); xfs_trans_ail_delete_bulk(ailp, &lip, 1);
} }
void xfs_trans_ail_push(struct xfs_ail *, xfs_lsn_t); void xfs_ail_push(struct xfs_ail *, xfs_lsn_t);
void xfs_ail_push_all(struct xfs_ail *);
xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp);
void xfs_trans_unlocked_item(struct xfs_ail *, void xfs_trans_unlocked_item(struct xfs_ail *,
xfs_log_item_t *); xfs_log_item_t *);
xfs_lsn_t xfs_trans_ail_tail(struct xfs_ail *ailp);
struct xfs_log_item *xfs_trans_ail_cursor_first(struct xfs_ail *ailp, struct xfs_log_item *xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
struct xfs_ail_cursor *cur, struct xfs_ail_cursor *cur,
xfs_lsn_t lsn); xfs_lsn_t lsn);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment