Commit d4d12c02 authored by Dave Chinner's avatar Dave Chinner Committed by Dave Chinner

xfs: collect errors from inodegc for unlinked inode recovery

Unlinked list recovery requires errors removing the inode the from
the unlinked list get fed back to the main recovery loop. Now that
we offload the unlinking to the inodegc work, we don't get errors
being fed back when we trip over a corruption that prevents the
inode from being removed from the unlinked list.

This means we never clear the corrupt unlinked list bucket,
resulting in runtime operations eventually tripping over it and
shutting down.

Fix this by collecting inodegc worker errors and feed them
back to the flush caller. This is largely best effort - the only
context that really cares is log recovery, and it only flushes a
single inode at a time so we don't need complex synchronised
handling. Essentially the inodegc workers will capture the first
error that occurs and the next flush will gather them and clear
them. The flush itself will only report the first gathered error.

In the cases where callers can return errors, propagate the
collected inodegc flush error up the error handling chain.

In the case of inode unlinked list recovery, there are several
superfluous calls to flush queued unlinked inodes -
xlog_recover_iunlink_bucket() guarantees that it has flushed the
inodegc and collected errors before it returns. Hence nothing in the
calling path needs to run a flush, even when an error is returned.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Signed-off-by: default avatarDave Chinner <david@fromorbit.com>
parent 7dfee17b
...@@ -454,6 +454,27 @@ xfs_inodegc_queue_all( ...@@ -454,6 +454,27 @@ xfs_inodegc_queue_all(
return ret; return ret;
} }
/* Wait for all queued work and collect errors */
static int
xfs_inodegc_wait_all(
struct xfs_mount *mp)
{
int cpu;
int error = 0;
flush_workqueue(mp->m_inodegc_wq);
for_each_online_cpu(cpu) {
struct xfs_inodegc *gc;
gc = per_cpu_ptr(mp->m_inodegc, cpu);
if (gc->error && !error)
error = gc->error;
gc->error = 0;
}
return error;
}
/* /*
* Check the validity of the inode we just found it the cache * Check the validity of the inode we just found it the cache
*/ */
...@@ -1491,15 +1512,14 @@ xfs_blockgc_free_space( ...@@ -1491,15 +1512,14 @@ xfs_blockgc_free_space(
if (error) if (error)
return error; return error;
xfs_inodegc_flush(mp); return xfs_inodegc_flush(mp);
return 0;
} }
/* /*
* Reclaim all the free space that we can by scheduling the background blockgc * Reclaim all the free space that we can by scheduling the background blockgc
* and inodegc workers immediately and waiting for them all to clear. * and inodegc workers immediately and waiting for them all to clear.
*/ */
void int
xfs_blockgc_flush_all( xfs_blockgc_flush_all(
struct xfs_mount *mp) struct xfs_mount *mp)
{ {
...@@ -1520,7 +1540,7 @@ xfs_blockgc_flush_all( ...@@ -1520,7 +1540,7 @@ xfs_blockgc_flush_all(
for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG) for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
flush_delayed_work(&pag->pag_blockgc_work); flush_delayed_work(&pag->pag_blockgc_work);
xfs_inodegc_flush(mp); return xfs_inodegc_flush(mp);
} }
/* /*
...@@ -1842,13 +1862,17 @@ xfs_inodegc_set_reclaimable( ...@@ -1842,13 +1862,17 @@ xfs_inodegc_set_reclaimable(
* This is the last chance to make changes to an otherwise unreferenced file * This is the last chance to make changes to an otherwise unreferenced file
* before incore reclamation happens. * before incore reclamation happens.
*/ */
static void static int
xfs_inodegc_inactivate( xfs_inodegc_inactivate(
struct xfs_inode *ip) struct xfs_inode *ip)
{ {
int error;
trace_xfs_inode_inactivating(ip); trace_xfs_inode_inactivating(ip);
xfs_inactive(ip); error = xfs_inactive(ip);
xfs_inodegc_set_reclaimable(ip); xfs_inodegc_set_reclaimable(ip);
return error;
} }
void void
...@@ -1880,8 +1904,12 @@ xfs_inodegc_worker( ...@@ -1880,8 +1904,12 @@ xfs_inodegc_worker(
WRITE_ONCE(gc->shrinker_hits, 0); WRITE_ONCE(gc->shrinker_hits, 0);
llist_for_each_entry_safe(ip, n, node, i_gclist) { llist_for_each_entry_safe(ip, n, node, i_gclist) {
int error;
xfs_iflags_set(ip, XFS_INACTIVATING); xfs_iflags_set(ip, XFS_INACTIVATING);
xfs_inodegc_inactivate(ip); error = xfs_inodegc_inactivate(ip);
if (error && !gc->error)
gc->error = error;
} }
memalloc_nofs_restore(nofs_flag); memalloc_nofs_restore(nofs_flag);
...@@ -1905,13 +1933,13 @@ xfs_inodegc_push( ...@@ -1905,13 +1933,13 @@ xfs_inodegc_push(
* Force all currently queued inode inactivation work to run immediately and * Force all currently queued inode inactivation work to run immediately and
* wait for the work to finish. * wait for the work to finish.
*/ */
void int
xfs_inodegc_flush( xfs_inodegc_flush(
struct xfs_mount *mp) struct xfs_mount *mp)
{ {
xfs_inodegc_push(mp); xfs_inodegc_push(mp);
trace_xfs_inodegc_flush(mp, __return_address); trace_xfs_inodegc_flush(mp, __return_address);
flush_workqueue(mp->m_inodegc_wq); return xfs_inodegc_wait_all(mp);
} }
/* /*
......
...@@ -62,7 +62,7 @@ int xfs_blockgc_free_dquots(struct xfs_mount *mp, struct xfs_dquot *udqp, ...@@ -62,7 +62,7 @@ int xfs_blockgc_free_dquots(struct xfs_mount *mp, struct xfs_dquot *udqp,
unsigned int iwalk_flags); unsigned int iwalk_flags);
int xfs_blockgc_free_quota(struct xfs_inode *ip, unsigned int iwalk_flags); int xfs_blockgc_free_quota(struct xfs_inode *ip, unsigned int iwalk_flags);
int xfs_blockgc_free_space(struct xfs_mount *mp, struct xfs_icwalk *icm); int xfs_blockgc_free_space(struct xfs_mount *mp, struct xfs_icwalk *icm);
void xfs_blockgc_flush_all(struct xfs_mount *mp); int xfs_blockgc_flush_all(struct xfs_mount *mp);
void xfs_inode_set_eofblocks_tag(struct xfs_inode *ip); void xfs_inode_set_eofblocks_tag(struct xfs_inode *ip);
void xfs_inode_clear_eofblocks_tag(struct xfs_inode *ip); void xfs_inode_clear_eofblocks_tag(struct xfs_inode *ip);
...@@ -80,7 +80,7 @@ void xfs_blockgc_start(struct xfs_mount *mp); ...@@ -80,7 +80,7 @@ void xfs_blockgc_start(struct xfs_mount *mp);
void xfs_inodegc_worker(struct work_struct *work); void xfs_inodegc_worker(struct work_struct *work);
void xfs_inodegc_push(struct xfs_mount *mp); void xfs_inodegc_push(struct xfs_mount *mp);
void xfs_inodegc_flush(struct xfs_mount *mp); int xfs_inodegc_flush(struct xfs_mount *mp);
void xfs_inodegc_stop(struct xfs_mount *mp); void xfs_inodegc_stop(struct xfs_mount *mp);
void xfs_inodegc_start(struct xfs_mount *mp); void xfs_inodegc_start(struct xfs_mount *mp);
void xfs_inodegc_cpu_dead(struct xfs_mount *mp, unsigned int cpu); void xfs_inodegc_cpu_dead(struct xfs_mount *mp, unsigned int cpu);
......
...@@ -1620,16 +1620,7 @@ xfs_inactive_ifree( ...@@ -1620,16 +1620,7 @@ xfs_inactive_ifree(
*/ */
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1); xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
/* return xfs_trans_commit(tp);
* Just ignore errors at this point. There is nothing we can do except
* to try to keep going. Make sure it's not a silent error.
*/
error = xfs_trans_commit(tp);
if (error)
xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
__func__, error);
return 0;
} }
/* /*
...@@ -1693,12 +1684,12 @@ xfs_inode_needs_inactive( ...@@ -1693,12 +1684,12 @@ xfs_inode_needs_inactive(
* now be truncated. Also, we clear all of the read-ahead state * now be truncated. Also, we clear all of the read-ahead state
* kept for the inode here since the file is now closed. * kept for the inode here since the file is now closed.
*/ */
void int
xfs_inactive( xfs_inactive(
xfs_inode_t *ip) xfs_inode_t *ip)
{ {
struct xfs_mount *mp; struct xfs_mount *mp;
int error; int error = 0;
int truncate = 0; int truncate = 0;
/* /*
...@@ -1736,7 +1727,7 @@ xfs_inactive( ...@@ -1736,7 +1727,7 @@ xfs_inactive(
* reference to the inode at this point anyways. * reference to the inode at this point anyways.
*/ */
if (xfs_can_free_eofblocks(ip, true)) if (xfs_can_free_eofblocks(ip, true))
xfs_free_eofblocks(ip); error = xfs_free_eofblocks(ip);
goto out; goto out;
} }
...@@ -1773,7 +1764,7 @@ xfs_inactive( ...@@ -1773,7 +1764,7 @@ xfs_inactive(
/* /*
* Free the inode. * Free the inode.
*/ */
xfs_inactive_ifree(ip); error = xfs_inactive_ifree(ip);
out: out:
/* /*
...@@ -1781,6 +1772,7 @@ xfs_inactive( ...@@ -1781,6 +1772,7 @@ xfs_inactive(
* the attached dquots. * the attached dquots.
*/ */
xfs_qm_dqdetach(ip); xfs_qm_dqdetach(ip);
return error;
} }
/* /*
......
...@@ -470,7 +470,7 @@ enum layout_break_reason { ...@@ -470,7 +470,7 @@ enum layout_break_reason {
(xfs_has_grpid((pip)->i_mount) || (VFS_I(pip)->i_mode & S_ISGID)) (xfs_has_grpid((pip)->i_mount) || (VFS_I(pip)->i_mode & S_ISGID))
int xfs_release(struct xfs_inode *ip); int xfs_release(struct xfs_inode *ip);
void xfs_inactive(struct xfs_inode *ip); int xfs_inactive(struct xfs_inode *ip);
int xfs_lookup(struct xfs_inode *dp, const struct xfs_name *name, int xfs_lookup(struct xfs_inode *dp, const struct xfs_name *name,
struct xfs_inode **ipp, struct xfs_name *ci_name); struct xfs_inode **ipp, struct xfs_name *ci_name);
int xfs_create(struct mnt_idmap *idmap, int xfs_create(struct mnt_idmap *idmap,
......
...@@ -2711,7 +2711,9 @@ xlog_recover_iunlink_bucket( ...@@ -2711,7 +2711,9 @@ xlog_recover_iunlink_bucket(
* just to flush the inodegc queue and wait for it to * just to flush the inodegc queue and wait for it to
* complete. * complete.
*/ */
xfs_inodegc_flush(mp); error = xfs_inodegc_flush(mp);
if (error)
break;
} }
prev_agino = agino; prev_agino = agino;
...@@ -2719,10 +2721,15 @@ xlog_recover_iunlink_bucket( ...@@ -2719,10 +2721,15 @@ xlog_recover_iunlink_bucket(
} }
if (prev_ip) { if (prev_ip) {
int error2;
ip->i_prev_unlinked = prev_agino; ip->i_prev_unlinked = prev_agino;
xfs_irele(prev_ip); xfs_irele(prev_ip);
error2 = xfs_inodegc_flush(mp);
if (error2 && !error)
return error2;
} }
xfs_inodegc_flush(mp);
return error; return error;
} }
...@@ -2789,7 +2796,6 @@ xlog_recover_iunlink_ag( ...@@ -2789,7 +2796,6 @@ xlog_recover_iunlink_ag(
* bucket and remaining inodes on it unreferenced and * bucket and remaining inodes on it unreferenced and
* unfreeable. * unfreeable.
*/ */
xfs_inodegc_flush(pag->pag_mount);
xlog_recover_clear_agi_bucket(pag, bucket); xlog_recover_clear_agi_bucket(pag, bucket);
} }
} }
...@@ -2806,13 +2812,6 @@ xlog_recover_process_iunlinks( ...@@ -2806,13 +2812,6 @@ xlog_recover_process_iunlinks(
for_each_perag(log->l_mp, agno, pag) for_each_perag(log->l_mp, agno, pag)
xlog_recover_iunlink_ag(pag); xlog_recover_iunlink_ag(pag);
/*
* Flush the pending unlinked inodes to ensure that the inactivations
* are fully completed on disk and the incore inodes can be reclaimed
* before we signal that recovery is complete.
*/
xfs_inodegc_flush(log->l_mp);
} }
STATIC void STATIC void
......
...@@ -62,6 +62,7 @@ struct xfs_error_cfg { ...@@ -62,6 +62,7 @@ struct xfs_error_cfg {
struct xfs_inodegc { struct xfs_inodegc {
struct llist_head list; struct llist_head list;
struct delayed_work work; struct delayed_work work;
int error;
/* approximate count of inodes in the list */ /* approximate count of inodes in the list */
unsigned int items; unsigned int items;
......
...@@ -1100,6 +1100,7 @@ xfs_inodegc_init_percpu( ...@@ -1100,6 +1100,7 @@ xfs_inodegc_init_percpu(
#endif #endif
init_llist_head(&gc->list); init_llist_head(&gc->list);
gc->items = 0; gc->items = 0;
gc->error = 0;
INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker); INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
} }
return 0; return 0;
......
...@@ -290,7 +290,9 @@ xfs_trans_alloc( ...@@ -290,7 +290,9 @@ xfs_trans_alloc(
* Do not perform a synchronous scan because callers can hold * Do not perform a synchronous scan because callers can hold
* other locks. * other locks.
*/ */
xfs_blockgc_flush_all(mp); error = xfs_blockgc_flush_all(mp);
if (error)
return error;
want_retry = false; want_retry = false;
goto retry; goto retry;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment