Commit 8a072a4d authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Alex Elder

xfs: reduce the number of pagb_lock roundtrips in xfs_alloc_clear_busy

Instead of finding the per-ag and then taking and releasing the pagb_lock
for every single busy extent completed sort the list of busy extents and
only switch betweens AGs where nessecary.  This becomes especially important
with the online discard support which will hit this lock more often.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAlex Elder <aelder@sgi.com>
parent 97d3ac75
...@@ -33,7 +33,6 @@ ...@@ -33,7 +33,6 @@
#include <linux/migrate.h> #include <linux/migrate.h>
#include <linux/backing-dev.h> #include <linux/backing-dev.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/list_sort.h>
#include "xfs_sb.h" #include "xfs_sb.h"
#include "xfs_inum.h" #include "xfs_inum.h"
......
...@@ -70,6 +70,7 @@ ...@@ -70,6 +70,7 @@
#include <linux/ctype.h> #include <linux/ctype.h>
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/list_sort.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/div64.h> #include <asm/div64.h>
......
...@@ -2964,24 +2964,60 @@ xfs_alloc_busy_trim( ...@@ -2964,24 +2964,60 @@ xfs_alloc_busy_trim(
*rlen = 0; *rlen = 0;
} }
void static void
xfs_alloc_busy_clear( xfs_alloc_busy_clear_one(
struct xfs_mount *mp, struct xfs_mount *mp,
struct xfs_perag *pag,
struct xfs_busy_extent *busyp) struct xfs_busy_extent *busyp)
{ {
struct xfs_perag *pag;
list_del_init(&busyp->list);
pag = xfs_perag_get(mp, busyp->agno);
spin_lock(&pag->pagb_lock);
if (busyp->length) { if (busyp->length) {
trace_xfs_alloc_busy_clear(mp, busyp->agno, busyp->bno, trace_xfs_alloc_busy_clear(mp, busyp->agno, busyp->bno,
busyp->length); busyp->length);
rb_erase(&busyp->rb_node, &pag->pagb_tree); rb_erase(&busyp->rb_node, &pag->pagb_tree);
} }
spin_unlock(&pag->pagb_lock);
xfs_perag_put(pag);
list_del_init(&busyp->list);
kmem_free(busyp); kmem_free(busyp);
} }
void
xfs_alloc_busy_clear(
struct xfs_mount *mp,
struct list_head *list)
{
struct xfs_busy_extent *busyp, *n;
struct xfs_perag *pag = NULL;
xfs_agnumber_t agno = NULLAGNUMBER;
list_for_each_entry_safe(busyp, n, list, list) {
if (busyp->agno != agno) {
if (pag) {
spin_unlock(&pag->pagb_lock);
xfs_perag_put(pag);
}
pag = xfs_perag_get(mp, busyp->agno);
spin_lock(&pag->pagb_lock);
agno = busyp->agno;
}
xfs_alloc_busy_clear_one(mp, pag, busyp);
}
if (pag) {
spin_unlock(&pag->pagb_lock);
xfs_perag_put(pag);
}
}
/*
* Callback for list_sort to sort busy extents by the AG they reside in.
*/
int
xfs_busy_extent_ag_cmp(
void *priv,
struct list_head *a,
struct list_head *b)
{
return container_of(a, struct xfs_busy_extent, list)->agno -
container_of(b, struct xfs_busy_extent, list)->agno;
}
...@@ -140,7 +140,7 @@ xfs_alloc_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno, ...@@ -140,7 +140,7 @@ xfs_alloc_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno,
xfs_agblock_t bno, xfs_extlen_t len); xfs_agblock_t bno, xfs_extlen_t len);
void void
xfs_alloc_busy_clear(struct xfs_mount *mp, struct xfs_busy_extent *busyp); xfs_alloc_busy_clear(struct xfs_mount *mp, struct list_head *list);
int int
xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
...@@ -149,6 +149,15 @@ xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno, ...@@ -149,6 +149,15 @@ xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
void void
xfs_alloc_busy_reuse(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_alloc_busy_reuse(struct xfs_mount *mp, xfs_agnumber_t agno,
xfs_agblock_t fbno, xfs_extlen_t flen, bool userdata); xfs_agblock_t fbno, xfs_extlen_t flen, bool userdata);
int
xfs_busy_extent_ag_cmp(void *priv, struct list_head *a, struct list_head *b);
static inline void xfs_alloc_busy_sort(struct list_head *list)
{
list_sort(NULL, list, xfs_busy_extent_ag_cmp);
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
/* /*
......
...@@ -361,13 +361,12 @@ xlog_cil_committed( ...@@ -361,13 +361,12 @@ xlog_cil_committed(
int abort) int abort)
{ {
struct xfs_cil_ctx *ctx = args; struct xfs_cil_ctx *ctx = args;
struct xfs_busy_extent *busyp, *n;
xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain, xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
ctx->start_lsn, abort); ctx->start_lsn, abort);
list_for_each_entry_safe(busyp, n, &ctx->busy_extents, list) xfs_alloc_busy_sort(&ctx->busy_extents);
xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, busyp); xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, &ctx->busy_extents);
spin_lock(&ctx->cil->xc_cil_lock); spin_lock(&ctx->cil->xc_cil_lock);
list_del(&ctx->committing); list_del(&ctx->committing);
......
...@@ -608,10 +608,8 @@ STATIC void ...@@ -608,10 +608,8 @@ STATIC void
xfs_trans_free( xfs_trans_free(
struct xfs_trans *tp) struct xfs_trans *tp)
{ {
struct xfs_busy_extent *busyp, *n; xfs_alloc_busy_sort(&tp->t_busy);
xfs_alloc_busy_clear(tp->t_mountp, &tp->t_busy);
list_for_each_entry_safe(busyp, n, &tp->t_busy, list)
xfs_alloc_busy_clear(tp->t_mountp, busyp);
atomic_dec(&tp->t_mountp->m_active_trans); atomic_dec(&tp->t_mountp->m_active_trans);
xfs_trans_free_dqinfo(tp); xfs_trans_free_dqinfo(tp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment