Commit 49d1ec85 authored by Ming Lei's avatar Ming Lei Committed by Jens Axboe

block: manage bio slab cache by xarray

Managing bio slab cache via xarray by using slab cache size as xarray
index, and storing 'struct bio_slab' instance into xarray.

So code is simplified a lot, meantime it becomes more readable than before.
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Reviewed-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Tested-by: default avatarPavel Begunkov <asml.silence@gmail.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 1a23e06c
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/highmem.h> #include <linux/highmem.h>
#include <linux/sched/sysctl.h> #include <linux/sched/sysctl.h>
#include <linux/blk-crypto.h> #include <linux/blk-crypto.h>
#include <linux/xarray.h>
#include <trace/events/block.h> #include <trace/events/block.h>
#include "blk.h" #include "blk.h"
...@@ -58,89 +59,80 @@ struct bio_slab { ...@@ -58,89 +59,80 @@ struct bio_slab {
char name[8]; char name[8];
}; };
static DEFINE_MUTEX(bio_slab_lock); static DEFINE_MUTEX(bio_slab_lock);
static struct bio_slab *bio_slabs; static DEFINE_XARRAY(bio_slabs);
static unsigned int bio_slab_nr, bio_slab_max;
static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) static struct bio_slab *create_bio_slab(unsigned int size)
{ {
unsigned int sz = sizeof(struct bio) + extra_size; struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
struct kmem_cache *slab = NULL;
struct bio_slab *bslab, *new_bio_slabs;
unsigned int new_bio_slab_max;
unsigned int i, entry = -1;
mutex_lock(&bio_slab_lock); if (!bslab)
return NULL;
i = 0; snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
while (i < bio_slab_nr) { bslab->slab = kmem_cache_create(bslab->name, size,
bslab = &bio_slabs[i]; ARCH_KMALLOC_MINALIGN, SLAB_HWCACHE_ALIGN, NULL);
if (!bslab->slab)
goto fail_alloc_slab;
if (!bslab->slab && entry == -1) bslab->slab_ref = 1;
entry = i; bslab->slab_size = size;
else if (bslab->slab_size == sz) {
slab = bslab->slab;
bslab->slab_ref++;
break;
}
i++;
}
if (slab) if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
goto out_unlock; return bslab;
if (bio_slab_nr == bio_slab_max && entry == -1) {
new_bio_slab_max = bio_slab_max << 1;
new_bio_slabs = krealloc(bio_slabs,
new_bio_slab_max * sizeof(struct bio_slab),
GFP_KERNEL);
if (!new_bio_slabs)
goto out_unlock;
bio_slab_max = new_bio_slab_max;
bio_slabs = new_bio_slabs;
}
if (entry == -1)
entry = bio_slab_nr++;
bslab = &bio_slabs[entry]; kmem_cache_destroy(bslab->slab);
snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry); fail_alloc_slab:
slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN, kfree(bslab);
SLAB_HWCACHE_ALIGN, NULL); return NULL;
if (!slab) }
goto out_unlock;
bslab->slab = slab; static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
bslab->slab_ref = 1; {
bslab->slab_size = sz; return bs->front_pad + sizeof(struct bio) +
out_unlock: BIO_INLINE_VECS * sizeof(struct bio_vec);
}
static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
{
unsigned int size = bs_bio_slab_size(bs);
struct bio_slab *bslab;
mutex_lock(&bio_slab_lock);
bslab = xa_load(&bio_slabs, size);
if (bslab)
bslab->slab_ref++;
else
bslab = create_bio_slab(size);
mutex_unlock(&bio_slab_lock); mutex_unlock(&bio_slab_lock);
return slab;
if (bslab)
return bslab->slab;
return NULL;
} }
static void bio_put_slab(struct bio_set *bs) static void bio_put_slab(struct bio_set *bs)
{ {
struct bio_slab *bslab = NULL; struct bio_slab *bslab = NULL;
unsigned int i; unsigned int slab_size = bs_bio_slab_size(bs);
mutex_lock(&bio_slab_lock); mutex_lock(&bio_slab_lock);
for (i = 0; i < bio_slab_nr; i++) { bslab = xa_load(&bio_slabs, slab_size);
if (bs->bio_slab == bio_slabs[i].slab) {
bslab = &bio_slabs[i];
break;
}
}
if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
goto out; goto out;
WARN_ON_ONCE(bslab->slab != bs->bio_slab);
WARN_ON(!bslab->slab_ref); WARN_ON(!bslab->slab_ref);
if (--bslab->slab_ref) if (--bslab->slab_ref)
goto out; goto out;
xa_erase(&bio_slabs, slab_size);
kmem_cache_destroy(bslab->slab); kmem_cache_destroy(bslab->slab);
bslab->slab = NULL; kfree(bslab);
out: out:
mutex_unlock(&bio_slab_lock); mutex_unlock(&bio_slab_lock);
...@@ -1570,15 +1562,13 @@ int bioset_init(struct bio_set *bs, ...@@ -1570,15 +1562,13 @@ int bioset_init(struct bio_set *bs,
unsigned int front_pad, unsigned int front_pad,
int flags) int flags)
{ {
unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
bs->front_pad = front_pad; bs->front_pad = front_pad;
spin_lock_init(&bs->rescue_lock); spin_lock_init(&bs->rescue_lock);
bio_list_init(&bs->rescue_list); bio_list_init(&bs->rescue_list);
INIT_WORK(&bs->rescue_work, bio_alloc_rescue); INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad); bs->bio_slab = bio_find_or_create_slab(bs);
if (!bs->bio_slab) if (!bs->bio_slab)
return -ENOMEM; return -ENOMEM;
...@@ -1642,16 +1632,8 @@ static void __init biovec_init_slabs(void) ...@@ -1642,16 +1632,8 @@ static void __init biovec_init_slabs(void)
static int __init init_bio(void) static int __init init_bio(void)
{ {
bio_slab_max = 2;
bio_slab_nr = 0;
bio_slabs = kcalloc(bio_slab_max, sizeof(struct bio_slab),
GFP_KERNEL);
BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET); BUILD_BUG_ON(BIO_FLAG_LAST > BVEC_POOL_OFFSET);
if (!bio_slabs)
panic("bio: can't allocate bios\n");
bio_integrity_init(); bio_integrity_init();
biovec_init_slabs(); biovec_init_slabs();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment