Commit f3c799c2 authored by Darrick J. Wong's avatar Darrick J. Wong

xfs: create slab caches for frequently-used deferred items

Create slab caches for the high-level structures that coordinate
deferred intent items, since they're used fairly heavily.
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarChandan Babu R <chandan.babu@oracle.com>
parent 9e253954
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#include "xfs_icache.h" #include "xfs_icache.h"
#include "xfs_iomap.h" #include "xfs_iomap.h"
struct kmem_cache *xfs_bmap_intent_cache;
struct kmem_cache *xfs_bmap_free_item_cache; struct kmem_cache *xfs_bmap_free_item_cache;
/* /*
...@@ -6190,7 +6190,7 @@ __xfs_bmap_add( ...@@ -6190,7 +6190,7 @@ __xfs_bmap_add(
bmap->br_blockcount, bmap->br_blockcount,
bmap->br_state); bmap->br_state);
bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_NOFS); bi = kmem_cache_alloc(xfs_bmap_intent_cache, GFP_NOFS | __GFP_NOFAIL);
INIT_LIST_HEAD(&bi->bi_list); INIT_LIST_HEAD(&bi->bi_list);
bi->bi_type = type; bi->bi_type = type;
bi->bi_owner = ip; bi->bi_owner = ip;
...@@ -6301,3 +6301,20 @@ xfs_bmap_validate_extent( ...@@ -6301,3 +6301,20 @@ xfs_bmap_validate_extent(
return __this_address; return __this_address;
return NULL; return NULL;
} }
int __init
xfs_bmap_intent_init_cache(void)
{
xfs_bmap_intent_cache = kmem_cache_create("xfs_bmap_intent",
sizeof(struct xfs_bmap_intent),
0, 0, NULL);
return xfs_bmap_intent_cache != NULL ? 0 : -ENOMEM;
}
void
xfs_bmap_intent_destroy_cache(void)
{
kmem_cache_destroy(xfs_bmap_intent_cache);
xfs_bmap_intent_cache = NULL;
}
...@@ -290,4 +290,9 @@ int xfs_bmapi_remap(struct xfs_trans *tp, struct xfs_inode *ip, ...@@ -290,4 +290,9 @@ int xfs_bmapi_remap(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t len, xfs_fsblock_t startblock, xfs_fileoff_t bno, xfs_filblks_t len, xfs_fsblock_t startblock,
int flags); int flags);
extern struct kmem_cache *xfs_bmap_intent_cache;
int __init xfs_bmap_intent_init_cache(void);
void xfs_bmap_intent_destroy_cache(void);
#endif /* __XFS_BMAP_H__ */ #endif /* __XFS_BMAP_H__ */
...@@ -18,6 +18,11 @@ ...@@ -18,6 +18,11 @@
#include "xfs_trace.h" #include "xfs_trace.h"
#include "xfs_icache.h" #include "xfs_icache.h"
#include "xfs_log.h" #include "xfs_log.h"
#include "xfs_rmap.h"
#include "xfs_refcount.h"
#include "xfs_bmap.h"
static struct kmem_cache *xfs_defer_pending_cache;
/* /*
* Deferred Operations in XFS * Deferred Operations in XFS
...@@ -365,7 +370,7 @@ xfs_defer_cancel_list( ...@@ -365,7 +370,7 @@ xfs_defer_cancel_list(
ops->cancel_item(pwi); ops->cancel_item(pwi);
} }
ASSERT(dfp->dfp_count == 0); ASSERT(dfp->dfp_count == 0);
kmem_free(dfp); kmem_cache_free(xfs_defer_pending_cache, dfp);
} }
} }
...@@ -462,7 +467,7 @@ xfs_defer_finish_one( ...@@ -462,7 +467,7 @@ xfs_defer_finish_one(
/* Done with the dfp, free it. */ /* Done with the dfp, free it. */
list_del(&dfp->dfp_list); list_del(&dfp->dfp_list);
kmem_free(dfp); kmem_cache_free(xfs_defer_pending_cache, dfp);
out: out:
if (ops->finish_cleanup) if (ops->finish_cleanup)
ops->finish_cleanup(tp, state, error); ops->finish_cleanup(tp, state, error);
...@@ -596,8 +601,8 @@ xfs_defer_add( ...@@ -596,8 +601,8 @@ xfs_defer_add(
dfp = NULL; dfp = NULL;
} }
if (!dfp) { if (!dfp) {
dfp = kmem_alloc(sizeof(struct xfs_defer_pending), dfp = kmem_cache_zalloc(xfs_defer_pending_cache,
KM_NOFS); GFP_NOFS | __GFP_NOFAIL);
dfp->dfp_type = type; dfp->dfp_type = type;
dfp->dfp_intent = NULL; dfp->dfp_intent = NULL;
dfp->dfp_done = NULL; dfp->dfp_done = NULL;
...@@ -809,3 +814,55 @@ xfs_defer_resources_rele( ...@@ -809,3 +814,55 @@ xfs_defer_resources_rele(
dres->dr_bufs = 0; dres->dr_bufs = 0;
dres->dr_ordered = 0; dres->dr_ordered = 0;
} }
static inline int __init
xfs_defer_init_cache(void)
{
xfs_defer_pending_cache = kmem_cache_create("xfs_defer_pending",
sizeof(struct xfs_defer_pending),
0, 0, NULL);
return xfs_defer_pending_cache != NULL ? 0 : -ENOMEM;
}
static inline void
xfs_defer_destroy_cache(void)
{
kmem_cache_destroy(xfs_defer_pending_cache);
xfs_defer_pending_cache = NULL;
}
/* Set up caches for deferred work items. */
int __init
xfs_defer_init_item_caches(void)
{
int error;
error = xfs_defer_init_cache();
if (error)
return error;
error = xfs_rmap_intent_init_cache();
if (error)
goto err;
error = xfs_refcount_intent_init_cache();
if (error)
goto err;
error = xfs_bmap_intent_init_cache();
if (error)
goto err;
return 0;
err:
xfs_defer_destroy_item_caches();
return error;
}
/* Destroy all the deferred work item caches, if they've been allocated. */
void
xfs_defer_destroy_item_caches(void)
{
xfs_bmap_intent_destroy_cache();
xfs_refcount_intent_destroy_cache();
xfs_rmap_intent_destroy_cache();
xfs_defer_destroy_cache();
}
...@@ -122,4 +122,7 @@ void xfs_defer_ops_capture_free(struct xfs_mount *mp, ...@@ -122,4 +122,7 @@ void xfs_defer_ops_capture_free(struct xfs_mount *mp,
struct xfs_defer_capture *d); struct xfs_defer_capture *d);
void xfs_defer_resources_rele(struct xfs_defer_resources *dres); void xfs_defer_resources_rele(struct xfs_defer_resources *dres);
int __init xfs_defer_init_item_caches(void);
void xfs_defer_destroy_item_caches(void);
#endif /* __XFS_DEFER_H__ */ #endif /* __XFS_DEFER_H__ */
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#include "xfs_rmap.h" #include "xfs_rmap.h"
#include "xfs_ag.h" #include "xfs_ag.h"
struct kmem_cache *xfs_refcount_intent_cache;
/* Allowable refcount adjustment amounts. */ /* Allowable refcount adjustment amounts. */
enum xfs_refc_adjust_op { enum xfs_refc_adjust_op {
XFS_REFCOUNT_ADJUST_INCREASE = 1, XFS_REFCOUNT_ADJUST_INCREASE = 1,
...@@ -1235,8 +1237,8 @@ __xfs_refcount_add( ...@@ -1235,8 +1237,8 @@ __xfs_refcount_add(
type, XFS_FSB_TO_AGBNO(tp->t_mountp, startblock), type, XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
blockcount); blockcount);
ri = kmem_alloc(sizeof(struct xfs_refcount_intent), ri = kmem_cache_alloc(xfs_refcount_intent_cache,
KM_NOFS); GFP_NOFS | __GFP_NOFAIL);
INIT_LIST_HEAD(&ri->ri_list); INIT_LIST_HEAD(&ri->ri_list);
ri->ri_type = type; ri->ri_type = type;
ri->ri_startblock = startblock; ri->ri_startblock = startblock;
...@@ -1782,3 +1784,20 @@ xfs_refcount_has_record( ...@@ -1782,3 +1784,20 @@ xfs_refcount_has_record(
return xfs_btree_has_record(cur, &low, &high, exists); return xfs_btree_has_record(cur, &low, &high, exists);
} }
int __init
xfs_refcount_intent_init_cache(void)
{
xfs_refcount_intent_cache = kmem_cache_create("xfs_refc_intent",
sizeof(struct xfs_refcount_intent),
0, 0, NULL);
return xfs_refcount_intent_cache != NULL ? 0 : -ENOMEM;
}
void
xfs_refcount_intent_destroy_cache(void)
{
kmem_cache_destroy(xfs_refcount_intent_cache);
xfs_refcount_intent_cache = NULL;
}
...@@ -83,4 +83,9 @@ extern void xfs_refcount_btrec_to_irec(const union xfs_btree_rec *rec, ...@@ -83,4 +83,9 @@ extern void xfs_refcount_btrec_to_irec(const union xfs_btree_rec *rec,
extern int xfs_refcount_insert(struct xfs_btree_cur *cur, extern int xfs_refcount_insert(struct xfs_btree_cur *cur,
struct xfs_refcount_irec *irec, int *stat); struct xfs_refcount_irec *irec, int *stat);
extern struct kmem_cache *xfs_refcount_intent_cache;
int __init xfs_refcount_intent_init_cache(void);
void xfs_refcount_intent_destroy_cache(void);
#endif /* __XFS_REFCOUNT_H__ */ #endif /* __XFS_REFCOUNT_H__ */
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#include "xfs_inode.h" #include "xfs_inode.h"
#include "xfs_ag.h" #include "xfs_ag.h"
struct kmem_cache *xfs_rmap_intent_cache;
/* /*
* Lookup the first record less than or equal to [bno, len, owner, offset] * Lookup the first record less than or equal to [bno, len, owner, offset]
* in the btree given by cur. * in the btree given by cur.
...@@ -2485,7 +2487,7 @@ __xfs_rmap_add( ...@@ -2485,7 +2487,7 @@ __xfs_rmap_add(
bmap->br_blockcount, bmap->br_blockcount,
bmap->br_state); bmap->br_state);
ri = kmem_alloc(sizeof(struct xfs_rmap_intent), KM_NOFS); ri = kmem_cache_alloc(xfs_rmap_intent_cache, GFP_NOFS | __GFP_NOFAIL);
INIT_LIST_HEAD(&ri->ri_list); INIT_LIST_HEAD(&ri->ri_list);
ri->ri_type = type; ri->ri_type = type;
ri->ri_owner = owner; ri->ri_owner = owner;
...@@ -2779,3 +2781,20 @@ const struct xfs_owner_info XFS_RMAP_OINFO_REFC = { ...@@ -2779,3 +2781,20 @@ const struct xfs_owner_info XFS_RMAP_OINFO_REFC = {
const struct xfs_owner_info XFS_RMAP_OINFO_COW = { const struct xfs_owner_info XFS_RMAP_OINFO_COW = {
.oi_owner = XFS_RMAP_OWN_COW, .oi_owner = XFS_RMAP_OWN_COW,
}; };
int __init
xfs_rmap_intent_init_cache(void)
{
xfs_rmap_intent_cache = kmem_cache_create("xfs_rmap_intent",
sizeof(struct xfs_rmap_intent),
0, 0, NULL);
return xfs_rmap_intent_cache != NULL ? 0 : -ENOMEM;
}
void
xfs_rmap_intent_destroy_cache(void)
{
kmem_cache_destroy(xfs_rmap_intent_cache);
xfs_rmap_intent_cache = NULL;
}
...@@ -215,4 +215,9 @@ extern const struct xfs_owner_info XFS_RMAP_OINFO_INODES; ...@@ -215,4 +215,9 @@ extern const struct xfs_owner_info XFS_RMAP_OINFO_INODES;
extern const struct xfs_owner_info XFS_RMAP_OINFO_REFC; extern const struct xfs_owner_info XFS_RMAP_OINFO_REFC;
extern const struct xfs_owner_info XFS_RMAP_OINFO_COW; extern const struct xfs_owner_info XFS_RMAP_OINFO_COW;
extern struct kmem_cache *xfs_rmap_intent_cache;
int __init xfs_rmap_intent_init_cache(void);
void xfs_rmap_intent_destroy_cache(void);
#endif /* __XFS_RMAP_H__ */ #endif /* __XFS_RMAP_H__ */
...@@ -384,7 +384,7 @@ xfs_bmap_update_finish_item( ...@@ -384,7 +384,7 @@ xfs_bmap_update_finish_item(
bmap->bi_bmap.br_blockcount = count; bmap->bi_bmap.br_blockcount = count;
return -EAGAIN; return -EAGAIN;
} }
kmem_free(bmap); kmem_cache_free(xfs_bmap_intent_cache, bmap);
return error; return error;
} }
...@@ -404,7 +404,7 @@ xfs_bmap_update_cancel_item( ...@@ -404,7 +404,7 @@ xfs_bmap_update_cancel_item(
struct xfs_bmap_intent *bmap; struct xfs_bmap_intent *bmap;
bmap = container_of(item, struct xfs_bmap_intent, bi_list); bmap = container_of(item, struct xfs_bmap_intent, bi_list);
kmem_free(bmap); kmem_cache_free(xfs_bmap_intent_cache, bmap);
} }
const struct xfs_defer_op_type xfs_bmap_update_defer_type = { const struct xfs_defer_op_type xfs_bmap_update_defer_type = {
......
...@@ -384,7 +384,7 @@ xfs_refcount_update_finish_item( ...@@ -384,7 +384,7 @@ xfs_refcount_update_finish_item(
refc->ri_blockcount = new_aglen; refc->ri_blockcount = new_aglen;
return -EAGAIN; return -EAGAIN;
} }
kmem_free(refc); kmem_cache_free(xfs_refcount_intent_cache, refc);
return error; return error;
} }
...@@ -404,7 +404,7 @@ xfs_refcount_update_cancel_item( ...@@ -404,7 +404,7 @@ xfs_refcount_update_cancel_item(
struct xfs_refcount_intent *refc; struct xfs_refcount_intent *refc;
refc = container_of(item, struct xfs_refcount_intent, ri_list); refc = container_of(item, struct xfs_refcount_intent, ri_list);
kmem_free(refc); kmem_cache_free(xfs_refcount_intent_cache, refc);
} }
const struct xfs_defer_op_type xfs_refcount_update_defer_type = { const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
......
...@@ -427,7 +427,7 @@ xfs_rmap_update_finish_item( ...@@ -427,7 +427,7 @@ xfs_rmap_update_finish_item(
rmap->ri_bmap.br_startoff, rmap->ri_bmap.br_startblock, rmap->ri_bmap.br_startoff, rmap->ri_bmap.br_startblock,
rmap->ri_bmap.br_blockcount, rmap->ri_bmap.br_state, rmap->ri_bmap.br_blockcount, rmap->ri_bmap.br_state,
state); state);
kmem_free(rmap); kmem_cache_free(xfs_rmap_intent_cache, rmap);
return error; return error;
} }
...@@ -447,7 +447,7 @@ xfs_rmap_update_cancel_item( ...@@ -447,7 +447,7 @@ xfs_rmap_update_cancel_item(
struct xfs_rmap_intent *rmap; struct xfs_rmap_intent *rmap;
rmap = container_of(item, struct xfs_rmap_intent, ri_list); rmap = container_of(item, struct xfs_rmap_intent, ri_list);
kmem_free(rmap); kmem_cache_free(xfs_rmap_intent_cache, rmap);
} }
const struct xfs_defer_op_type xfs_rmap_update_defer_type = { const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include "xfs_pwork.h" #include "xfs_pwork.h"
#include "xfs_ag.h" #include "xfs_ag.h"
#include "xfs_btree.h" #include "xfs_btree.h"
#include "xfs_defer.h"
#include <linux/magic.h> #include <linux/magic.h>
#include <linux/fs_context.h> #include <linux/fs_context.h>
...@@ -1972,11 +1973,15 @@ xfs_init_caches(void) ...@@ -1972,11 +1973,15 @@ xfs_init_caches(void)
if (error) if (error)
goto out_destroy_bmap_free_item_cache; goto out_destroy_bmap_free_item_cache;
error = xfs_defer_init_item_caches();
if (error)
goto out_destroy_btree_cur_cache;
xfs_da_state_cache = kmem_cache_create("xfs_da_state", xfs_da_state_cache = kmem_cache_create("xfs_da_state",
sizeof(struct xfs_da_state), sizeof(struct xfs_da_state),
0, 0, NULL); 0, 0, NULL);
if (!xfs_da_state_cache) if (!xfs_da_state_cache)
goto out_destroy_btree_cur_cache; goto out_destroy_defer_item_cache;
xfs_ifork_cache = kmem_cache_create("xfs_ifork", xfs_ifork_cache = kmem_cache_create("xfs_ifork",
sizeof(struct xfs_ifork), sizeof(struct xfs_ifork),
...@@ -2106,6 +2111,8 @@ xfs_init_caches(void) ...@@ -2106,6 +2111,8 @@ xfs_init_caches(void)
kmem_cache_destroy(xfs_ifork_cache); kmem_cache_destroy(xfs_ifork_cache);
out_destroy_da_state_cache: out_destroy_da_state_cache:
kmem_cache_destroy(xfs_da_state_cache); kmem_cache_destroy(xfs_da_state_cache);
out_destroy_defer_item_cache:
xfs_defer_destroy_item_caches();
out_destroy_btree_cur_cache: out_destroy_btree_cur_cache:
xfs_btree_destroy_cur_caches(); xfs_btree_destroy_cur_caches();
out_destroy_bmap_free_item_cache: out_destroy_bmap_free_item_cache:
...@@ -2139,6 +2146,7 @@ xfs_destroy_caches(void) ...@@ -2139,6 +2146,7 @@ xfs_destroy_caches(void)
kmem_cache_destroy(xfs_trans_cache); kmem_cache_destroy(xfs_trans_cache);
kmem_cache_destroy(xfs_ifork_cache); kmem_cache_destroy(xfs_ifork_cache);
kmem_cache_destroy(xfs_da_state_cache); kmem_cache_destroy(xfs_da_state_cache);
xfs_defer_destroy_item_caches();
xfs_btree_destroy_cur_caches(); xfs_btree_destroy_cur_caches();
kmem_cache_destroy(xfs_bmap_free_item_cache); kmem_cache_destroy(xfs_bmap_free_item_cache);
kmem_cache_destroy(xfs_log_ticket_cache); kmem_cache_destroy(xfs_log_ticket_cache);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment