Commit a0102bda authored by Jeff Layton's avatar Jeff Layton Committed by Ilya Dryomov

ceph: move sb->wb_pagevec_pool to be a global mempool

When doing some testing recently, I hit some page allocation failures
on mount, when creating the wb_pagevec_pool for the mount. That
requires 128k (32 contiguous pages), and after thrashing the memory
during an xfstests run, sometimes that would fail.

128k for each mount seems like a lot to hold in reserve for a rainy
day, so let's change this to a global mempool that gets allocated
when the module is plugged in.
Signed-off-by: default avatarJeff Layton <jlayton@kernel.org>
Reviewed-by: default avatarIlya Dryomov <idryomov@gmail.com>
Signed-off-by: default avatarIlya Dryomov <idryomov@gmail.com>
parent b748fc7a
...@@ -862,8 +862,7 @@ static void writepages_finish(struct ceph_osd_request *req) ...@@ -862,8 +862,7 @@ static void writepages_finish(struct ceph_osd_request *req)
osd_data = osd_req_op_extent_osd_data(req, 0); osd_data = osd_req_op_extent_osd_data(req, 0);
if (osd_data->pages_from_pool) if (osd_data->pages_from_pool)
mempool_free(osd_data->pages, mempool_free(osd_data->pages, ceph_wb_pagevec_pool);
ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool);
else else
kfree(osd_data->pages); kfree(osd_data->pages);
ceph_osdc_put_request(req); ceph_osdc_put_request(req);
...@@ -955,10 +954,10 @@ static int ceph_writepages_start(struct address_space *mapping, ...@@ -955,10 +954,10 @@ static int ceph_writepages_start(struct address_space *mapping,
int num_ops = 0, op_idx; int num_ops = 0, op_idx;
unsigned i, pvec_pages, max_pages, locked_pages = 0; unsigned i, pvec_pages, max_pages, locked_pages = 0;
struct page **pages = NULL, **data_pages; struct page **pages = NULL, **data_pages;
mempool_t *pool = NULL; /* Becomes non-null if mempool used */
struct page *page; struct page *page;
pgoff_t strip_unit_end = 0; pgoff_t strip_unit_end = 0;
u64 offset = 0, len = 0; u64 offset = 0, len = 0;
bool from_pool = false;
max_pages = wsize >> PAGE_SHIFT; max_pages = wsize >> PAGE_SHIFT;
...@@ -1057,16 +1056,16 @@ static int ceph_writepages_start(struct address_space *mapping, ...@@ -1057,16 +1056,16 @@ static int ceph_writepages_start(struct address_space *mapping,
sizeof(*pages), sizeof(*pages),
GFP_NOFS); GFP_NOFS);
if (!pages) { if (!pages) {
pool = fsc->wb_pagevec_pool; from_pool = true;
pages = mempool_alloc(pool, GFP_NOFS); pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
BUG_ON(!pages); BUG_ON(!pages);
} }
len = 0; len = 0;
} else if (page->index != } else if (page->index !=
(offset + len) >> PAGE_SHIFT) { (offset + len) >> PAGE_SHIFT) {
if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS : if (num_ops >= (from_pool ? CEPH_OSD_SLAB_OPS :
CEPH_OSD_MAX_OPS)) { CEPH_OSD_MAX_OPS)) {
redirty_page_for_writepage(wbc, page); redirty_page_for_writepage(wbc, page);
unlock_page(page); unlock_page(page);
break; break;
...@@ -1161,7 +1160,7 @@ static int ceph_writepages_start(struct address_space *mapping, ...@@ -1161,7 +1160,7 @@ static int ceph_writepages_start(struct address_space *mapping,
offset, len); offset, len);
osd_req_op_extent_osd_data_pages(req, op_idx, osd_req_op_extent_osd_data_pages(req, op_idx,
data_pages, len, 0, data_pages, len, 0,
!!pool, false); from_pool, false);
osd_req_op_extent_update(req, op_idx, len); osd_req_op_extent_update(req, op_idx, len);
len = 0; len = 0;
...@@ -1188,12 +1187,12 @@ static int ceph_writepages_start(struct address_space *mapping, ...@@ -1188,12 +1187,12 @@ static int ceph_writepages_start(struct address_space *mapping,
dout("writepages got pages at %llu~%llu\n", offset, len); dout("writepages got pages at %llu~%llu\n", offset, len);
osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
0, !!pool, false); 0, from_pool, false);
osd_req_op_extent_update(req, op_idx, len); osd_req_op_extent_update(req, op_idx, len);
BUG_ON(op_idx + 1 != req->r_num_ops); BUG_ON(op_idx + 1 != req->r_num_ops);
pool = NULL; from_pool = false;
if (i < locked_pages) { if (i < locked_pages) {
BUG_ON(num_ops <= req->r_num_ops); BUG_ON(num_ops <= req->r_num_ops);
num_ops -= req->r_num_ops; num_ops -= req->r_num_ops;
...@@ -1204,8 +1203,8 @@ static int ceph_writepages_start(struct address_space *mapping, ...@@ -1204,8 +1203,8 @@ static int ceph_writepages_start(struct address_space *mapping,
pages = kmalloc_array(locked_pages, sizeof(*pages), pages = kmalloc_array(locked_pages, sizeof(*pages),
GFP_NOFS); GFP_NOFS);
if (!pages) { if (!pages) {
pool = fsc->wb_pagevec_pool; from_pool = true;
pages = mempool_alloc(pool, GFP_NOFS); pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
BUG_ON(!pages); BUG_ON(!pages);
} }
memcpy(pages, data_pages + i, memcpy(pages, data_pages + i,
......
...@@ -637,8 +637,6 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, ...@@ -637,8 +637,6 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
struct ceph_options *opt) struct ceph_options *opt)
{ {
struct ceph_fs_client *fsc; struct ceph_fs_client *fsc;
int page_count;
size_t size;
int err; int err;
fsc = kzalloc(sizeof(*fsc), GFP_KERNEL); fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
...@@ -686,22 +684,12 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, ...@@ -686,22 +684,12 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
if (!fsc->cap_wq) if (!fsc->cap_wq)
goto fail_inode_wq; goto fail_inode_wq;
/* set up mempools */
err = -ENOMEM;
page_count = fsc->mount_options->wsize >> PAGE_SHIFT;
size = sizeof (struct page *) * (page_count ? page_count : 1);
fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
if (!fsc->wb_pagevec_pool)
goto fail_cap_wq;
spin_lock(&ceph_fsc_lock); spin_lock(&ceph_fsc_lock);
list_add_tail(&fsc->metric_wakeup, &ceph_fsc_list); list_add_tail(&fsc->metric_wakeup, &ceph_fsc_list);
spin_unlock(&ceph_fsc_lock); spin_unlock(&ceph_fsc_lock);
return fsc; return fsc;
fail_cap_wq:
destroy_workqueue(fsc->cap_wq);
fail_inode_wq: fail_inode_wq:
destroy_workqueue(fsc->inode_wq); destroy_workqueue(fsc->inode_wq);
fail_client: fail_client:
...@@ -732,8 +720,6 @@ static void destroy_fs_client(struct ceph_fs_client *fsc) ...@@ -732,8 +720,6 @@ static void destroy_fs_client(struct ceph_fs_client *fsc)
destroy_workqueue(fsc->inode_wq); destroy_workqueue(fsc->inode_wq);
destroy_workqueue(fsc->cap_wq); destroy_workqueue(fsc->cap_wq);
mempool_destroy(fsc->wb_pagevec_pool);
destroy_mount_options(fsc->mount_options); destroy_mount_options(fsc->mount_options);
ceph_destroy_client(fsc->client); ceph_destroy_client(fsc->client);
...@@ -752,6 +738,7 @@ struct kmem_cache *ceph_dentry_cachep; ...@@ -752,6 +738,7 @@ struct kmem_cache *ceph_dentry_cachep;
struct kmem_cache *ceph_file_cachep; struct kmem_cache *ceph_file_cachep;
struct kmem_cache *ceph_dir_file_cachep; struct kmem_cache *ceph_dir_file_cachep;
struct kmem_cache *ceph_mds_request_cachep; struct kmem_cache *ceph_mds_request_cachep;
mempool_t *ceph_wb_pagevec_pool;
static void ceph_inode_init_once(void *foo) static void ceph_inode_init_once(void *foo)
{ {
...@@ -796,6 +783,10 @@ static int __init init_caches(void) ...@@ -796,6 +783,10 @@ static int __init init_caches(void)
if (!ceph_mds_request_cachep) if (!ceph_mds_request_cachep)
goto bad_mds_req; goto bad_mds_req;
ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10, CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT);
if (!ceph_wb_pagevec_pool)
goto bad_pagevec_pool;
error = ceph_fscache_register(); error = ceph_fscache_register();
if (error) if (error)
goto bad_fscache; goto bad_fscache;
...@@ -804,6 +795,8 @@ static int __init init_caches(void) ...@@ -804,6 +795,8 @@ static int __init init_caches(void)
bad_fscache: bad_fscache:
kmem_cache_destroy(ceph_mds_request_cachep); kmem_cache_destroy(ceph_mds_request_cachep);
bad_pagevec_pool:
mempool_destroy(ceph_wb_pagevec_pool);
bad_mds_req: bad_mds_req:
kmem_cache_destroy(ceph_dir_file_cachep); kmem_cache_destroy(ceph_dir_file_cachep);
bad_dir_file: bad_dir_file:
...@@ -834,6 +827,7 @@ static void destroy_caches(void) ...@@ -834,6 +827,7 @@ static void destroy_caches(void)
kmem_cache_destroy(ceph_file_cachep); kmem_cache_destroy(ceph_file_cachep);
kmem_cache_destroy(ceph_dir_file_cachep); kmem_cache_destroy(ceph_dir_file_cachep);
kmem_cache_destroy(ceph_mds_request_cachep); kmem_cache_destroy(ceph_mds_request_cachep);
mempool_destroy(ceph_wb_pagevec_pool);
ceph_fscache_unregister(); ceph_fscache_unregister();
} }
......
...@@ -118,8 +118,6 @@ struct ceph_fs_client { ...@@ -118,8 +118,6 @@ struct ceph_fs_client {
struct ceph_mds_client *mdsc; struct ceph_mds_client *mdsc;
/* writeback */
mempool_t *wb_pagevec_pool;
atomic_long_t writeback_count; atomic_long_t writeback_count;
struct workqueue_struct *inode_wq; struct workqueue_struct *inode_wq;
......
...@@ -282,6 +282,7 @@ extern struct kmem_cache *ceph_dentry_cachep; ...@@ -282,6 +282,7 @@ extern struct kmem_cache *ceph_dentry_cachep;
extern struct kmem_cache *ceph_file_cachep; extern struct kmem_cache *ceph_file_cachep;
extern struct kmem_cache *ceph_dir_file_cachep; extern struct kmem_cache *ceph_dir_file_cachep;
extern struct kmem_cache *ceph_mds_request_cachep; extern struct kmem_cache *ceph_mds_request_cachep;
extern mempool_t *ceph_wb_pagevec_pool;
/* ceph_common.c */ /* ceph_common.c */
extern bool libceph_compatible(void *data); extern bool libceph_compatible(void *data);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment