Commit 55b2598e authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

bdi: initialize ->ra_pages and ->io_pages in bdi_init

Set up a readahead size by default, as very few users have a good
reason to change it.  This means code, ecryptfs, and orangefs now
set up the values while they were previously missing it, while ubifs,
mtd and vboxsf manually set it to 0 to avoid readahead.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Acked-by: David Sterba <dsterba@suse.com> [btrfs]
Acked-by: Richard Weinberger <richard@nod.at> [ubifs, mtd]
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 9e82d35b
...@@ -538,8 +538,6 @@ struct request_queue *blk_alloc_queue(int node_id) ...@@ -538,8 +538,6 @@ struct request_queue *blk_alloc_queue(int node_id)
if (!q->stats) if (!q->stats)
goto fail_stats; goto fail_stats;
q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES;
q->backing_dev_info->io_pages = VM_READAHEAD_PAGES;
q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->node = node_id; q->node = node_id;
......
...@@ -2196,6 +2196,8 @@ static struct backing_dev_info * __init mtd_bdi_init(char *name) ...@@ -2196,6 +2196,8 @@ static struct backing_dev_info * __init mtd_bdi_init(char *name)
bdi = bdi_alloc(NUMA_NO_NODE); bdi = bdi_alloc(NUMA_NO_NODE);
if (!bdi) if (!bdi)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
bdi->ra_pages = 0;
bdi->io_pages = 0;
/* /*
* We put '-0' suffix to the name to get the same name format as we * We put '-0' suffix to the name to get the same name format as we
......
...@@ -80,8 +80,10 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses, ...@@ -80,8 +80,10 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses,
if (ret) if (ret)
return ret; return ret;
if (v9ses->cache) if (!v9ses->cache) {
sb->s_bdi->ra_pages = VM_READAHEAD_PAGES; sb->s_bdi->ra_pages = 0;
sb->s_bdi->io_pages = 0;
}
sb->s_flags |= SB_ACTIVE | SB_DIRSYNC; sb->s_flags |= SB_ACTIVE | SB_DIRSYNC;
if (!v9ses->cache) if (!v9ses->cache)
......
...@@ -456,7 +456,6 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx) ...@@ -456,7 +456,6 @@ static int afs_fill_super(struct super_block *sb, struct afs_fs_context *ctx)
ret = super_setup_bdi(sb); ret = super_setup_bdi(sb);
if (ret) if (ret)
return ret; return ret;
sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
/* allocate the root inode and dentry */ /* allocate the root inode and dentry */
if (as->dyn_root) { if (as->dyn_root) {
......
...@@ -3092,7 +3092,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device ...@@ -3092,7 +3092,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
} }
sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK; sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK;
sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super); sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE); sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE);
......
...@@ -1049,7 +1049,6 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb) ...@@ -1049,7 +1049,6 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb)
if (err) if (err)
return err; return err;
sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
/* fuse does it's own writeback accounting */ /* fuse does it's own writeback accounting */
sb->s_bdi->capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT; sb->s_bdi->capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT;
......
...@@ -1200,13 +1200,6 @@ static void nfs_get_cache_cookie(struct super_block *sb, ...@@ -1200,13 +1200,6 @@ static void nfs_get_cache_cookie(struct super_block *sb,
} }
#endif #endif
static void nfs_set_readahead(struct backing_dev_info *bdi,
unsigned long iomax_pages)
{
bdi->ra_pages = VM_READAHEAD_PAGES;
bdi->io_pages = iomax_pages;
}
int nfs_get_tree_common(struct fs_context *fc) int nfs_get_tree_common(struct fs_context *fc)
{ {
struct nfs_fs_context *ctx = nfs_fc2context(fc); struct nfs_fs_context *ctx = nfs_fc2context(fc);
...@@ -1251,7 +1244,7 @@ int nfs_get_tree_common(struct fs_context *fc) ...@@ -1251,7 +1244,7 @@ int nfs_get_tree_common(struct fs_context *fc)
MINOR(server->s_dev)); MINOR(server->s_dev));
if (error) if (error)
goto error_splat_super; goto error_splat_super;
nfs_set_readahead(s->s_bdi, server->rpages); s->s_bdi->io_pages = server->rpages;
server->super = s; server->super = s;
} }
......
...@@ -2177,6 +2177,8 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) ...@@ -2177,6 +2177,8 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
c->vi.vol_id); c->vi.vol_id);
if (err) if (err)
goto out_close; goto out_close;
sb->s_bdi->ra_pages = 0;
sb->s_bdi->io_pages = 0;
sb->s_fs_info = c; sb->s_fs_info = c;
sb->s_magic = UBIFS_SUPER_MAGIC; sb->s_magic = UBIFS_SUPER_MAGIC;
......
...@@ -167,6 +167,8 @@ static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc) ...@@ -167,6 +167,8 @@ static int vboxsf_fill_super(struct super_block *sb, struct fs_context *fc)
err = super_setup_bdi_name(sb, "vboxsf-%d", sbi->bdi_id); err = super_setup_bdi_name(sb, "vboxsf-%d", sbi->bdi_id);
if (err) if (err)
goto fail_free; goto fail_free;
sb->s_bdi->ra_pages = 0;
sb->s_bdi->io_pages = 0;
/* Turn source into a shfl_string and map the folder */ /* Turn source into a shfl_string and map the folder */
size = strlen(fc->source) + 1; size = strlen(fc->source) + 1;
......
...@@ -746,6 +746,8 @@ struct backing_dev_info *bdi_alloc(int node_id) ...@@ -746,6 +746,8 @@ struct backing_dev_info *bdi_alloc(int node_id)
kfree(bdi); kfree(bdi);
return NULL; return NULL;
} }
bdi->ra_pages = VM_READAHEAD_PAGES;
bdi->io_pages = VM_READAHEAD_PAGES;
return bdi; return bdi;
} }
EXPORT_SYMBOL(bdi_alloc); EXPORT_SYMBOL(bdi_alloc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment