Commit 58c4e173 authored by David Sterba's avatar David Sterba

btrfs: scrub: use GFP_KERNEL on the submission path

Scrub is not on the critical writeback path we don't need to use
GFP_NOFS for all allocations. The failures are handled and stats passed
back to userspace.

Let's use GFP_KERNEL on the paths where everything is ok, ie. setup the
global structures and the IO submission paths.

Functions that do the repair and fixups still use GFP_NOFS as we might
want to skip any other filesystem activity if we encounter an error.
This could turn out to be unnecessary, but requires more review compared
to the easy cases in this patch.
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent ed0244fa
...@@ -802,7 +802,7 @@ static int btrfs_dev_replace_kthread(void *data) ...@@ -802,7 +802,7 @@ static int btrfs_dev_replace_kthread(void *data)
struct btrfs_ioctl_dev_replace_args *status_args; struct btrfs_ioctl_dev_replace_args *status_args;
u64 progress; u64 progress;
status_args = kzalloc(sizeof(*status_args), GFP_NOFS); status_args = kzalloc(sizeof(*status_args), GFP_KERNEL);
if (status_args) { if (status_args) {
btrfs_dev_replace_status(fs_info, status_args); btrfs_dev_replace_status(fs_info, status_args);
progress = status_args->status.progress_1000; progress = status_args->status.progress_1000;
......
...@@ -461,7 +461,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) ...@@ -461,7 +461,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
int ret; int ret;
sctx = kzalloc(sizeof(*sctx), GFP_NOFS); sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
if (!sctx) if (!sctx)
goto nomem; goto nomem;
atomic_set(&sctx->refs, 1); atomic_set(&sctx->refs, 1);
...@@ -472,7 +472,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) ...@@ -472,7 +472,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
struct scrub_bio *sbio; struct scrub_bio *sbio;
sbio = kzalloc(sizeof(*sbio), GFP_NOFS); sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
if (!sbio) if (!sbio)
goto nomem; goto nomem;
sctx->bios[i] = sbio; sctx->bios[i] = sbio;
...@@ -1654,7 +1654,7 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, ...@@ -1654,7 +1654,7 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
again: again:
if (!wr_ctx->wr_curr_bio) { if (!wr_ctx->wr_curr_bio) {
wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio), wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
GFP_NOFS); GFP_KERNEL);
if (!wr_ctx->wr_curr_bio) { if (!wr_ctx->wr_curr_bio) {
mutex_unlock(&wr_ctx->wr_lock); mutex_unlock(&wr_ctx->wr_lock);
return -ENOMEM; return -ENOMEM;
...@@ -1671,7 +1671,8 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, ...@@ -1671,7 +1671,8 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
sbio->dev = wr_ctx->tgtdev; sbio->dev = wr_ctx->tgtdev;
bio = sbio->bio; bio = sbio->bio;
if (!bio) { if (!bio) {
bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio); bio = btrfs_io_bio_alloc(GFP_KERNEL,
wr_ctx->pages_per_wr_bio);
if (!bio) { if (!bio) {
mutex_unlock(&wr_ctx->wr_lock); mutex_unlock(&wr_ctx->wr_lock);
return -ENOMEM; return -ENOMEM;
...@@ -2076,7 +2077,8 @@ static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, ...@@ -2076,7 +2077,8 @@ static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
sbio->dev = spage->dev; sbio->dev = spage->dev;
bio = sbio->bio; bio = sbio->bio;
if (!bio) { if (!bio) {
bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio); bio = btrfs_io_bio_alloc(GFP_KERNEL,
sctx->pages_per_rd_bio);
if (!bio) if (!bio)
return -ENOMEM; return -ENOMEM;
sbio->bio = bio; sbio->bio = bio;
...@@ -2241,7 +2243,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, ...@@ -2241,7 +2243,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
struct scrub_block *sblock; struct scrub_block *sblock;
int index; int index;
sblock = kzalloc(sizeof(*sblock), GFP_NOFS); sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
if (!sblock) { if (!sblock) {
spin_lock(&sctx->stat_lock); spin_lock(&sctx->stat_lock);
sctx->stat.malloc_errors++; sctx->stat.malloc_errors++;
...@@ -2259,7 +2261,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, ...@@ -2259,7 +2261,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
struct scrub_page *spage; struct scrub_page *spage;
u64 l = min_t(u64, len, PAGE_SIZE); u64 l = min_t(u64, len, PAGE_SIZE);
spage = kzalloc(sizeof(*spage), GFP_NOFS); spage = kzalloc(sizeof(*spage), GFP_KERNEL);
if (!spage) { if (!spage) {
leave_nomem: leave_nomem:
spin_lock(&sctx->stat_lock); spin_lock(&sctx->stat_lock);
...@@ -2286,7 +2288,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, ...@@ -2286,7 +2288,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
spage->have_csum = 0; spage->have_csum = 0;
} }
sblock->page_count++; sblock->page_count++;
spage->page = alloc_page(GFP_NOFS); spage->page = alloc_page(GFP_KERNEL);
if (!spage->page) if (!spage->page)
goto leave_nomem; goto leave_nomem;
len -= l; len -= l;
...@@ -2541,7 +2543,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity, ...@@ -2541,7 +2543,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
struct scrub_block *sblock; struct scrub_block *sblock;
int index; int index;
sblock = kzalloc(sizeof(*sblock), GFP_NOFS); sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
if (!sblock) { if (!sblock) {
spin_lock(&sctx->stat_lock); spin_lock(&sctx->stat_lock);
sctx->stat.malloc_errors++; sctx->stat.malloc_errors++;
...@@ -2561,7 +2563,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity, ...@@ -2561,7 +2563,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
struct scrub_page *spage; struct scrub_page *spage;
u64 l = min_t(u64, len, PAGE_SIZE); u64 l = min_t(u64, len, PAGE_SIZE);
spage = kzalloc(sizeof(*spage), GFP_NOFS); spage = kzalloc(sizeof(*spage), GFP_KERNEL);
if (!spage) { if (!spage) {
leave_nomem: leave_nomem:
spin_lock(&sctx->stat_lock); spin_lock(&sctx->stat_lock);
...@@ -2591,7 +2593,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity, ...@@ -2591,7 +2593,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
spage->have_csum = 0; spage->have_csum = 0;
} }
sblock->page_count++; sblock->page_count++;
spage->page = alloc_page(GFP_NOFS); spage->page = alloc_page(GFP_KERNEL);
if (!spage->page) if (!spage->page)
goto leave_nomem; goto leave_nomem;
len -= l; len -= l;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment