Commit 306195f3 authored by Darrick J. Wong's avatar Darrick J. Wong

xfs: pivot online scrub away from kmem.[ch]

Convert all the online scrub code to use the Linux slab allocator
functions directly instead of going through the kmem wrappers.
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
parent fcd2a434
...@@ -685,7 +685,7 @@ xrep_agfl_init_header( ...@@ -685,7 +685,7 @@ xrep_agfl_init_header(
if (br->len) if (br->len)
break; break;
list_del(&br->list); list_del(&br->list);
kmem_free(br); kfree(br);
} }
/* Write new AGFL to disk. */ /* Write new AGFL to disk. */
......
...@@ -49,7 +49,7 @@ xchk_setup_xattr_buf( ...@@ -49,7 +49,7 @@ xchk_setup_xattr_buf(
if (ab) { if (ab) {
if (sz <= ab->sz) if (sz <= ab->sz)
return 0; return 0;
kmem_free(ab); kvfree(ab);
sc->buf = NULL; sc->buf = NULL;
} }
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include "xfs_trans_resv.h" #include "xfs_trans_resv.h"
#include "xfs_mount.h" #include "xfs_mount.h"
#include "xfs_btree.h" #include "xfs_btree.h"
#include "scrub/scrub.h"
#include "scrub/bitmap.h" #include "scrub/bitmap.h"
/* /*
...@@ -25,7 +26,7 @@ xbitmap_set( ...@@ -25,7 +26,7 @@ xbitmap_set(
{ {
struct xbitmap_range *bmr; struct xbitmap_range *bmr;
bmr = kmem_alloc(sizeof(struct xbitmap_range), KM_MAYFAIL); bmr = kmalloc(sizeof(struct xbitmap_range), XCHK_GFP_FLAGS);
if (!bmr) if (!bmr)
return -ENOMEM; return -ENOMEM;
...@@ -47,7 +48,7 @@ xbitmap_destroy( ...@@ -47,7 +48,7 @@ xbitmap_destroy(
for_each_xbitmap_extent(bmr, n, bitmap) { for_each_xbitmap_extent(bmr, n, bitmap) {
list_del(&bmr->list); list_del(&bmr->list);
kmem_free(bmr); kfree(bmr);
} }
} }
...@@ -174,15 +175,15 @@ xbitmap_disunion( ...@@ -174,15 +175,15 @@ xbitmap_disunion(
/* Total overlap, just delete ex. */ /* Total overlap, just delete ex. */
lp = lp->next; lp = lp->next;
list_del(&br->list); list_del(&br->list);
kmem_free(br); kfree(br);
break; break;
case 0: case 0:
/* /*
* Deleting from the middle: add the new right extent * Deleting from the middle: add the new right extent
* and then shrink the left extent. * and then shrink the left extent.
*/ */
new_br = kmem_alloc(sizeof(struct xbitmap_range), new_br = kmalloc(sizeof(struct xbitmap_range),
KM_MAYFAIL); XCHK_GFP_FLAGS);
if (!new_br) { if (!new_br) {
error = -ENOMEM; error = -ENOMEM;
goto out; goto out;
......
...@@ -432,8 +432,7 @@ xchk_btree_check_owner( ...@@ -432,8 +432,7 @@ xchk_btree_check_owner(
if (cur->bc_btnum == XFS_BTNUM_BNO || cur->bc_btnum == XFS_BTNUM_RMAP) { if (cur->bc_btnum == XFS_BTNUM_BNO || cur->bc_btnum == XFS_BTNUM_RMAP) {
struct check_owner *co; struct check_owner *co;
co = kmem_alloc(sizeof(struct check_owner), co = kmalloc(sizeof(struct check_owner), XCHK_GFP_FLAGS);
KM_MAYFAIL);
if (!co) if (!co)
return -ENOMEM; return -ENOMEM;
...@@ -652,7 +651,7 @@ xchk_btree( ...@@ -652,7 +651,7 @@ xchk_btree(
xchk_btree_set_corrupt(sc, cur, 0); xchk_btree_set_corrupt(sc, cur, 0);
return 0; return 0;
} }
bs = kmem_zalloc(cur_sz, KM_NOFS | KM_MAYFAIL); bs = kzalloc(cur_sz, XCHK_GFP_FLAGS);
if (!bs) if (!bs)
return -ENOMEM; return -ENOMEM;
bs->cur = cur; bs->cur = cur;
...@@ -743,9 +742,9 @@ xchk_btree( ...@@ -743,9 +742,9 @@ xchk_btree(
error = xchk_btree_check_block_owner(bs, co->level, error = xchk_btree_check_block_owner(bs, co->level,
co->daddr); co->daddr);
list_del(&co->list); list_del(&co->list);
kmem_free(co); kfree(co);
} }
kmem_free(bs); kfree(bs);
return error; return error;
} }
...@@ -486,7 +486,7 @@ xchk_da_btree( ...@@ -486,7 +486,7 @@ xchk_da_btree(
return 0; return 0;
/* Set up initial da state. */ /* Set up initial da state. */
ds = kmem_zalloc(sizeof(struct xchk_da_btree), KM_NOFS | KM_MAYFAIL); ds = kzalloc(sizeof(struct xchk_da_btree), XCHK_GFP_FLAGS);
if (!ds) if (!ds)
return -ENOMEM; return -ENOMEM;
ds->dargs.dp = sc->ip; ds->dargs.dp = sc->ip;
...@@ -591,6 +591,6 @@ xchk_da_btree( ...@@ -591,6 +591,6 @@ xchk_da_btree(
out_state: out_state:
xfs_da_state_free(ds->state); xfs_da_state_free(ds->state);
kmem_free(ds); kfree(ds);
return error; return error;
} }
...@@ -116,7 +116,7 @@ xchk_setup_fscounters( ...@@ -116,7 +116,7 @@ xchk_setup_fscounters(
struct xchk_fscounters *fsc; struct xchk_fscounters *fsc;
int error; int error;
sc->buf = kmem_zalloc(sizeof(struct xchk_fscounters), 0); sc->buf = kzalloc(sizeof(struct xchk_fscounters), XCHK_GFP_FLAGS);
if (!sc->buf) if (!sc->buf)
return -ENOMEM; return -ENOMEM;
fsc = sc->buf; fsc = sc->buf;
......
...@@ -127,8 +127,8 @@ xchk_refcountbt_rmap_check( ...@@ -127,8 +127,8 @@ xchk_refcountbt_rmap_check(
* is healthy each rmap_irec we see will be in agbno order * is healthy each rmap_irec we see will be in agbno order
* so we don't need insertion sort here. * so we don't need insertion sort here.
*/ */
frag = kmem_alloc(sizeof(struct xchk_refcnt_frag), frag = kmalloc(sizeof(struct xchk_refcnt_frag),
KM_MAYFAIL); XCHK_GFP_FLAGS);
if (!frag) if (!frag)
return -ENOMEM; return -ENOMEM;
memcpy(&frag->rm, rec, sizeof(frag->rm)); memcpy(&frag->rm, rec, sizeof(frag->rm));
...@@ -215,7 +215,7 @@ xchk_refcountbt_process_rmap_fragments( ...@@ -215,7 +215,7 @@ xchk_refcountbt_process_rmap_fragments(
continue; continue;
} }
list_del(&frag->list); list_del(&frag->list);
kmem_free(frag); kfree(frag);
nr++; nr++;
} }
...@@ -257,11 +257,11 @@ xchk_refcountbt_process_rmap_fragments( ...@@ -257,11 +257,11 @@ xchk_refcountbt_process_rmap_fragments(
/* Delete fragments and work list. */ /* Delete fragments and work list. */
list_for_each_entry_safe(frag, n, &worklist, list) { list_for_each_entry_safe(frag, n, &worklist, list) {
list_del(&frag->list); list_del(&frag->list);
kmem_free(frag); kfree(frag);
} }
list_for_each_entry_safe(frag, n, &refchk->fragments, list) { list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
list_del(&frag->list); list_del(&frag->list);
kmem_free(frag); kfree(frag);
} }
} }
...@@ -306,7 +306,7 @@ xchk_refcountbt_xref_rmap( ...@@ -306,7 +306,7 @@ xchk_refcountbt_xref_rmap(
out_free: out_free:
list_for_each_entry_safe(frag, n, &refchk.fragments, list) { list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
list_del(&frag->list); list_del(&frag->list);
kmem_free(frag); kfree(frag);
} }
} }
......
...@@ -174,7 +174,7 @@ xchk_teardown( ...@@ -174,7 +174,7 @@ xchk_teardown(
if (sc->flags & XCHK_REAPING_DISABLED) if (sc->flags & XCHK_REAPING_DISABLED)
xchk_start_reaping(sc); xchk_start_reaping(sc);
if (sc->buf) { if (sc->buf) {
kmem_free(sc->buf); kvfree(sc->buf);
sc->buf = NULL; sc->buf = NULL;
} }
return error; return error;
...@@ -467,7 +467,7 @@ xfs_scrub_metadata( ...@@ -467,7 +467,7 @@ xfs_scrub_metadata(
xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SCRUB, xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SCRUB,
"EXPERIMENTAL online scrub feature in use. Use at your own risk!"); "EXPERIMENTAL online scrub feature in use. Use at your own risk!");
sc = kmem_zalloc(sizeof(struct xfs_scrub), KM_NOFS | KM_MAYFAIL); sc = kzalloc(sizeof(struct xfs_scrub), XCHK_GFP_FLAGS);
if (!sc) { if (!sc) {
error = -ENOMEM; error = -ENOMEM;
goto out; goto out;
...@@ -557,7 +557,7 @@ xfs_scrub_metadata( ...@@ -557,7 +557,7 @@ xfs_scrub_metadata(
out_teardown: out_teardown:
error = xchk_teardown(sc, error); error = xchk_teardown(sc, error);
out_sc: out_sc:
kmem_free(sc); kfree(sc);
out: out:
trace_xchk_done(XFS_I(file_inode(file)), sm, error); trace_xchk_done(XFS_I(file_inode(file)), sm, error);
if (error == -EFSCORRUPTED || error == -EFSBADCRC) { if (error == -EFSCORRUPTED || error == -EFSBADCRC) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment