Commit 1ad2cfe0 authored by Darrick J. Wong's avatar Darrick J. Wong

xfs: move the quotaoff dqrele inode walk into xfs_icache.c

The only external caller of xfs_inode_walk* happens in quotaoff, when we
want to walk all the incore inodes to detach the dquots.  Move this code
to xfs_icache.c so that we can hide xfs_inode_walk as the starting step
in more cleanups of inode walks.
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
parent 977ec4dd
...@@ -26,6 +26,18 @@ ...@@ -26,6 +26,18 @@
#include <linux/iversion.h> #include <linux/iversion.h>
/*
* Private inode cache walk flags for struct xfs_eofblocks. Must not coincide
* with XFS_EOF_FLAGS_*.
*/
#define XFS_ICWALK_FLAG_DROP_UDQUOT (1U << 31)
#define XFS_ICWALK_FLAG_DROP_GDQUOT (1U << 30)
#define XFS_ICWALK_FLAG_DROP_PDQUOT (1U << 29)
#define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_DROP_UDQUOT | \
XFS_ICWALK_FLAG_DROP_GDQUOT | \
XFS_ICWALK_FLAG_DROP_PDQUOT)
/* /*
* Allocate and initialise an xfs_inode. * Allocate and initialise an xfs_inode.
*/ */
...@@ -890,7 +902,7 @@ xfs_inode_walk_get_perag( ...@@ -890,7 +902,7 @@ xfs_inode_walk_get_perag(
* Call the @execute function on all incore inodes matching the radix tree * Call the @execute function on all incore inodes matching the radix tree
* @tag. * @tag.
*/ */
int static int
xfs_inode_walk( xfs_inode_walk(
struct xfs_mount *mp, struct xfs_mount *mp,
int iter_flags, int iter_flags,
...@@ -915,7 +927,58 @@ xfs_inode_walk( ...@@ -915,7 +927,58 @@ xfs_inode_walk(
} }
} }
return last_error; return last_error;
BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_EOF_FLAGS_VALID);
}
#ifdef CONFIG_XFS_QUOTA
/* Drop this inode's dquots. */
static int
xfs_dqrele_inode(
struct xfs_inode *ip,
void *priv)
{
struct xfs_eofblocks *eofb = priv;
xfs_ilock(ip, XFS_ILOCK_EXCL);
if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_UDQUOT) {
xfs_qm_dqrele(ip->i_udquot);
ip->i_udquot = NULL;
}
if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_GDQUOT) {
xfs_qm_dqrele(ip->i_gdquot);
ip->i_gdquot = NULL;
}
if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_PDQUOT) {
xfs_qm_dqrele(ip->i_pdquot);
ip->i_pdquot = NULL;
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return 0;
}
/*
* Detach all dquots from incore inodes if we can. The caller must already
* have dropped the relevant XFS_[UGP]QUOTA_ACTIVE flags so that dquots will
* not get reattached.
*/
int
xfs_dqrele_all_inodes(
struct xfs_mount *mp,
unsigned int qflags)
{
struct xfs_eofblocks eofb = { .eof_flags = 0 };
if (qflags & XFS_UQUOTA_ACCT)
eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_UDQUOT;
if (qflags & XFS_GQUOTA_ACCT)
eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_GDQUOT;
if (qflags & XFS_PQUOTA_ACCT)
eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_PDQUOT;
return xfs_inode_walk(mp, XFS_INODE_WALK_INEW_WAIT, xfs_dqrele_inode,
&eofb, XFS_ICI_NO_TAG);
} }
#endif /* CONFIG_XFS_QUOTA */
/* /*
* Grab the inode for reclaim exclusively. * Grab the inode for reclaim exclusively.
......
...@@ -68,9 +68,11 @@ void xfs_inode_clear_cowblocks_tag(struct xfs_inode *ip); ...@@ -68,9 +68,11 @@ void xfs_inode_clear_cowblocks_tag(struct xfs_inode *ip);
void xfs_blockgc_worker(struct work_struct *work); void xfs_blockgc_worker(struct work_struct *work);
int xfs_inode_walk(struct xfs_mount *mp, int iter_flags, #ifdef CONFIG_XFS_QUOTA
int (*execute)(struct xfs_inode *ip, void *args), int xfs_dqrele_all_inodes(struct xfs_mount *mp, unsigned int qflags);
void *args, int tag); #else
# define xfs_dqrele_all_inodes(mp, qflags) (0)
#endif
int xfs_icache_inode_is_allocated(struct xfs_mount *mp, struct xfs_trans *tp, int xfs_icache_inode_is_allocated(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_ino_t ino, bool *inuse); xfs_ino_t ino, bool *inuse);
......
...@@ -142,7 +142,6 @@ extern void xfs_qm_destroy_quotainfo(struct xfs_mount *); ...@@ -142,7 +142,6 @@ extern void xfs_qm_destroy_quotainfo(struct xfs_mount *);
/* dquot stuff */ /* dquot stuff */
extern void xfs_qm_dqpurge_all(struct xfs_mount *, uint); extern void xfs_qm_dqpurge_all(struct xfs_mount *, uint);
extern void xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint);
/* quota ops */ /* quota ops */
extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint); extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint);
......
...@@ -201,7 +201,8 @@ xfs_qm_scall_quotaoff( ...@@ -201,7 +201,8 @@ xfs_qm_scall_quotaoff(
* depend on the quota inodes (and other things) being valid as long as * depend on the quota inodes (and other things) being valid as long as
* we keep the lock(s). * we keep the lock(s).
*/ */
xfs_qm_dqrele_all_inodes(mp, flags); error = xfs_dqrele_all_inodes(mp, flags);
ASSERT(!error);
/* /*
* Next we make the changes in the quota flag in the mount struct. * Next we make the changes in the quota flag in the mount struct.
...@@ -747,54 +748,3 @@ xfs_qm_scall_getquota_next( ...@@ -747,54 +748,3 @@ xfs_qm_scall_getquota_next(
xfs_qm_dqput(dqp); xfs_qm_dqput(dqp);
return error; return error;
} }
STATIC int
xfs_dqrele_inode(
struct xfs_inode *ip,
void *args)
{
uint *flags = args;
/* skip quota inodes */
if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
ip == ip->i_mount->m_quotainfo->qi_gquotaip ||
ip == ip->i_mount->m_quotainfo->qi_pquotaip) {
ASSERT(ip->i_udquot == NULL);
ASSERT(ip->i_gdquot == NULL);
ASSERT(ip->i_pdquot == NULL);
return 0;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
if ((*flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
xfs_qm_dqrele(ip->i_udquot);
ip->i_udquot = NULL;
}
if ((*flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
xfs_qm_dqrele(ip->i_gdquot);
ip->i_gdquot = NULL;
}
if ((*flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) {
xfs_qm_dqrele(ip->i_pdquot);
ip->i_pdquot = NULL;
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return 0;
}
/*
* Go thru all the inodes in the file system, releasing their dquots.
*
* Note that the mount structure gets modified to indicate that quotas are off
* AFTER this, in the case of quotaoff.
*/
void
xfs_qm_dqrele_all_inodes(
struct xfs_mount *mp,
uint flags)
{
ASSERT(mp->m_quotainfo);
xfs_inode_walk(mp, XFS_INODE_WALK_INEW_WAIT, xfs_dqrele_inode,
&flags, XFS_ICI_NO_TAG);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment