Commit ea7b0820 authored by Darrick J. Wong's avatar Darrick J. Wong

xfs: move xfs_rmap_update_defer_add to xfs_rmap_item.c

Move the code that adds the incore xfs_rmap_update_item deferred work
data to a transaction to live with the RUI log item code.  This means
that the rmap code no longer has to know about the inner workings of the
RUI log items.

As a consequence, we can get rid of the _{get,put}_group helpers.
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent 905af726
......@@ -24,6 +24,7 @@
#include "xfs_inode.h"
#include "xfs_ag.h"
#include "xfs_health.h"
#include "xfs_rmap_item.h"
struct kmem_cache *xfs_rmap_intent_cache;
......@@ -2656,10 +2657,7 @@ __xfs_rmap_add(
ri->ri_whichfork = whichfork;
ri->ri_bmap = *bmap;
trace_xfs_rmap_defer(tp->t_mountp, ri);
xfs_rmap_update_get_group(tp->t_mountp, ri);
xfs_defer_add(tp, &ri->ri_list, &xfs_rmap_update_defer_type);
xfs_rmap_defer_add(tp, ri);
}
/* Map an extent into a file. */
......
......@@ -176,9 +176,6 @@ struct xfs_rmap_intent {
struct xfs_perag *ri_pag;
};
void xfs_rmap_update_get_group(struct xfs_mount *mp,
struct xfs_rmap_intent *ri);
/* functions for updating the rmapbt based on bmbt map/unmap operations */
void xfs_rmap_map_extent(struct xfs_trans *tp, struct xfs_inode *ip,
int whichfork, struct xfs_bmbt_irec *imap);
......
......@@ -22,6 +22,7 @@
#include "xfs_log_recover.h"
#include "xfs_ag.h"
#include "xfs_btree.h"
#include "xfs_trace.h"
struct kmem_cache *xfs_rui_cache;
struct kmem_cache *xfs_rud_cache;
......@@ -342,21 +343,18 @@ xfs_rmap_update_create_done(
return &rudp->rud_item;
}
/* Take a passive ref to the AG containing the space we're rmapping. */
/* Add this deferred RUI to the transaction. */
void
xfs_rmap_update_get_group(
struct xfs_mount *mp,
xfs_rmap_defer_add(
struct xfs_trans *tp,
struct xfs_rmap_intent *ri)
{
ri->ri_pag = xfs_perag_intent_get(mp, ri->ri_bmap.br_startblock);
}
struct xfs_mount *mp = tp->t_mountp;
/* Release a passive AG ref after finishing rmapping work. */
static inline void
xfs_rmap_update_put_group(
struct xfs_rmap_intent *ri)
{
xfs_perag_intent_put(ri->ri_pag);
trace_xfs_rmap_defer(mp, ri);
ri->ri_pag = xfs_perag_intent_get(mp, ri->ri_bmap.br_startblock);
xfs_defer_add(tp, &ri->ri_list, &xfs_rmap_update_defer_type);
}
/* Cancel a deferred rmap update. */
......@@ -366,7 +364,7 @@ xfs_rmap_update_cancel_item(
{
struct xfs_rmap_intent *ri = ri_entry(item);
xfs_rmap_update_put_group(ri);
xfs_perag_intent_put(ri->ri_pag);
kmem_cache_free(xfs_rmap_intent_cache, ri);
}
......@@ -496,7 +494,7 @@ xfs_rui_recover_work(
ri->ri_bmap.br_blockcount = map->me_len;
ri->ri_bmap.br_state = (map->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
xfs_rmap_update_get_group(mp, ri);
ri->ri_pag = xfs_perag_intent_get(mp, map->me_startblock);
xfs_defer_add_item(dfp, &ri->ri_list);
}
......
......@@ -71,4 +71,8 @@ struct xfs_rud_log_item {
extern struct kmem_cache *xfs_rui_cache;
extern struct kmem_cache *xfs_rud_cache;
struct xfs_rmap_intent;
void xfs_rmap_defer_add(struct xfs_trans *tp, struct xfs_rmap_intent *ri);
#endif /* __XFS_RMAP_ITEM_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment