Commit cb6fc943 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: kill kvpmalloc()

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 0225bdfa
...@@ -60,7 +60,7 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b) ...@@ -60,7 +60,7 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b)
clear_btree_node_just_written(b); clear_btree_node_just_written(b);
kvpfree(b->data, btree_buf_bytes(b)); kvfree(b->data);
b->data = NULL; b->data = NULL;
#ifdef __KERNEL__ #ifdef __KERNEL__
kvfree(b->aux_data); kvfree(b->aux_data);
...@@ -94,7 +94,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp) ...@@ -94,7 +94,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
{ {
BUG_ON(b->data || b->aux_data); BUG_ON(b->data || b->aux_data);
b->data = kvpmalloc(btree_buf_bytes(b), gfp); b->data = kvmalloc(btree_buf_bytes(b), gfp);
if (!b->data) if (!b->data)
return -BCH_ERR_ENOMEM_btree_node_mem_alloc; return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
#ifdef __KERNEL__ #ifdef __KERNEL__
...@@ -107,7 +107,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp) ...@@ -107,7 +107,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
b->aux_data = NULL; b->aux_data = NULL;
#endif #endif
if (!b->aux_data) { if (!b->aux_data) {
kvpfree(b->data, btree_buf_bytes(b)); kvfree(b->data);
b->data = NULL; b->data = NULL;
return -BCH_ERR_ENOMEM_btree_node_mem_alloc; return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
} }
...@@ -408,7 +408,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c) ...@@ -408,7 +408,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
if (c->verify_data) if (c->verify_data)
list_move(&c->verify_data->list, &bc->live); list_move(&c->verify_data->list, &bc->live);
kvpfree(c->verify_ondisk, c->opts.btree_node_size); kvfree(c->verify_ondisk);
for (i = 0; i < btree_id_nr_alive(c); i++) { for (i = 0; i < btree_id_nr_alive(c); i++) {
struct btree_root *r = bch2_btree_id_root(c, i); struct btree_root *r = bch2_btree_id_root(c, i);
......
...@@ -1193,9 +1193,7 @@ static void bch2_gc_free(struct bch_fs *c) ...@@ -1193,9 +1193,7 @@ static void bch2_gc_free(struct bch_fs *c)
genradix_free(&c->gc_stripes); genradix_free(&c->gc_stripes);
for_each_member_device(c, ca) { for_each_member_device(c, ca) {
kvpfree(rcu_dereference_protected(ca->buckets_gc, 1), kvfree(rcu_dereference_protected(ca->buckets_gc, 1));
sizeof(struct bucket_array) +
ca->mi.nbuckets * sizeof(struct bucket));
ca->buckets_gc = NULL; ca->buckets_gc = NULL;
free_percpu(ca->usage_gc); free_percpu(ca->usage_gc);
...@@ -1494,7 +1492,7 @@ static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only) ...@@ -1494,7 +1492,7 @@ static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only) static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
{ {
for_each_member_device(c, ca) { for_each_member_device(c, ca) {
struct bucket_array *buckets = kvpmalloc(sizeof(struct bucket_array) + struct bucket_array *buckets = kvmalloc(sizeof(struct bucket_array) +
ca->mi.nbuckets * sizeof(struct bucket), ca->mi.nbuckets * sizeof(struct bucket),
GFP_KERNEL|__GFP_ZERO); GFP_KERNEL|__GFP_ZERO);
if (!buckets) { if (!buckets) {
......
...@@ -103,7 +103,7 @@ static void btree_bounce_free(struct bch_fs *c, size_t size, ...@@ -103,7 +103,7 @@ static void btree_bounce_free(struct bch_fs *c, size_t size,
if (used_mempool) if (used_mempool)
mempool_free(p, &c->btree_bounce_pool); mempool_free(p, &c->btree_bounce_pool);
else else
vpfree(p, size); kvfree(p);
} }
static void *btree_bounce_alloc(struct bch_fs *c, size_t size, static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
...@@ -115,7 +115,7 @@ static void *btree_bounce_alloc(struct bch_fs *c, size_t size, ...@@ -115,7 +115,7 @@ static void *btree_bounce_alloc(struct bch_fs *c, size_t size,
BUG_ON(size > c->opts.btree_node_size); BUG_ON(size > c->opts.btree_node_size);
*used_mempool = false; *used_mempool = false;
p = vpmalloc(size, __GFP_NOWARN|GFP_NOWAIT); p = kvmalloc(size, __GFP_NOWARN|GFP_NOWAIT);
if (!p) { if (!p) {
*used_mempool = true; *used_mempool = true;
p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS); p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
......
...@@ -447,9 +447,7 @@ void bch2_journal_entries_free(struct bch_fs *c) ...@@ -447,9 +447,7 @@ void bch2_journal_entries_free(struct bch_fs *c)
struct genradix_iter iter; struct genradix_iter iter;
genradix_for_each(&c->journal_entries, iter, i) genradix_for_each(&c->journal_entries, iter, i)
if (*i) kvfree(*i);
kvpfree(*i, offsetof(struct journal_replay, j) +
vstruct_bytes(&(*i)->j));
genradix_free(&c->journal_entries); genradix_free(&c->journal_entries);
} }
......
...@@ -1335,7 +1335,7 @@ static void bucket_gens_free_rcu(struct rcu_head *rcu) ...@@ -1335,7 +1335,7 @@ static void bucket_gens_free_rcu(struct rcu_head *rcu)
struct bucket_gens *buckets = struct bucket_gens *buckets =
container_of(rcu, struct bucket_gens, rcu); container_of(rcu, struct bucket_gens, rcu);
kvpfree(buckets, sizeof(*buckets) + buckets->nbuckets); kvfree(buckets);
} }
int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
...@@ -1345,16 +1345,16 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) ...@@ -1345,16 +1345,16 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
bool resize = ca->bucket_gens != NULL; bool resize = ca->bucket_gens != NULL;
int ret; int ret;
if (!(bucket_gens = kvpmalloc(sizeof(struct bucket_gens) + nbuckets, if (!(bucket_gens = kvmalloc(sizeof(struct bucket_gens) + nbuckets,
GFP_KERNEL|__GFP_ZERO))) { GFP_KERNEL|__GFP_ZERO))) {
ret = -BCH_ERR_ENOMEM_bucket_gens; ret = -BCH_ERR_ENOMEM_bucket_gens;
goto err; goto err;
} }
if ((c->opts.buckets_nouse && if ((c->opts.buckets_nouse &&
!(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) * !(buckets_nouse = kvmalloc(BITS_TO_LONGS(nbuckets) *
sizeof(unsigned long), sizeof(unsigned long),
GFP_KERNEL|__GFP_ZERO)))) { GFP_KERNEL|__GFP_ZERO)))) {
ret = -BCH_ERR_ENOMEM_buckets_nouse; ret = -BCH_ERR_ENOMEM_buckets_nouse;
goto err; goto err;
} }
...@@ -1397,8 +1397,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) ...@@ -1397,8 +1397,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
ret = 0; ret = 0;
err: err:
kvpfree(buckets_nouse, kvfree(buckets_nouse);
BITS_TO_LONGS(nbuckets) * sizeof(unsigned long));
if (bucket_gens) if (bucket_gens)
call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu); call_rcu(&bucket_gens->rcu, bucket_gens_free_rcu);
...@@ -1407,27 +1406,21 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) ...@@ -1407,27 +1406,21 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
void bch2_dev_buckets_free(struct bch_dev *ca) void bch2_dev_buckets_free(struct bch_dev *ca)
{ {
unsigned i; kvfree(ca->buckets_nouse);
kvfree(rcu_dereference_protected(ca->bucket_gens, 1));
kvpfree(ca->buckets_nouse,
BITS_TO_LONGS(ca->mi.nbuckets) * sizeof(unsigned long));
kvpfree(rcu_dereference_protected(ca->bucket_gens, 1),
sizeof(struct bucket_gens) + ca->mi.nbuckets);
for (i = 0; i < ARRAY_SIZE(ca->usage); i++) for (unsigned i = 0; i < ARRAY_SIZE(ca->usage); i++)
free_percpu(ca->usage[i]); free_percpu(ca->usage[i]);
kfree(ca->usage_base); kfree(ca->usage_base);
} }
int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca) int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
{ {
unsigned i;
ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL); ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
if (!ca->usage_base) if (!ca->usage_base)
return -BCH_ERR_ENOMEM_usage_init; return -BCH_ERR_ENOMEM_usage_init;
for (i = 0; i < ARRAY_SIZE(ca->usage); i++) { for (unsigned i = 0; i < ARRAY_SIZE(ca->usage); i++) {
ca->usage[i] = alloc_percpu(struct bch_dev_usage); ca->usage[i] = alloc_percpu(struct bch_dev_usage);
if (!ca->usage[i]) if (!ca->usage[i])
return -BCH_ERR_ENOMEM_usage_init; return -BCH_ERR_ENOMEM_usage_init;
......
...@@ -601,13 +601,13 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features) ...@@ -601,13 +601,13 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
return 0; return 0;
if (!mempool_initialized(&c->compression_bounce[READ]) && if (!mempool_initialized(&c->compression_bounce[READ]) &&
mempool_init_kvpmalloc_pool(&c->compression_bounce[READ], mempool_init_kvmalloc_pool(&c->compression_bounce[READ],
1, c->opts.encoded_extent_max)) 1, c->opts.encoded_extent_max))
return -BCH_ERR_ENOMEM_compression_bounce_read_init; return -BCH_ERR_ENOMEM_compression_bounce_read_init;
if (!mempool_initialized(&c->compression_bounce[WRITE]) && if (!mempool_initialized(&c->compression_bounce[WRITE]) &&
mempool_init_kvpmalloc_pool(&c->compression_bounce[WRITE], mempool_init_kvmalloc_pool(&c->compression_bounce[WRITE],
1, c->opts.encoded_extent_max)) 1, c->opts.encoded_extent_max))
return -BCH_ERR_ENOMEM_compression_bounce_write_init; return -BCH_ERR_ENOMEM_compression_bounce_write_init;
for (i = compression_types; for (i = compression_types;
...@@ -622,15 +622,15 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features) ...@@ -622,15 +622,15 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
if (mempool_initialized(&c->compress_workspace[i->type])) if (mempool_initialized(&c->compress_workspace[i->type]))
continue; continue;
if (mempool_init_kvpmalloc_pool( if (mempool_init_kvmalloc_pool(
&c->compress_workspace[i->type], &c->compress_workspace[i->type],
1, i->compress_workspace)) 1, i->compress_workspace))
return -BCH_ERR_ENOMEM_compression_workspace_init; return -BCH_ERR_ENOMEM_compression_workspace_init;
} }
if (!mempool_initialized(&c->decompress_workspace) && if (!mempool_initialized(&c->decompress_workspace) &&
mempool_init_kvpmalloc_pool(&c->decompress_workspace, mempool_init_kvmalloc_pool(&c->decompress_workspace,
1, decompress_workspace_size)) 1, decompress_workspace_size))
return -BCH_ERR_ENOMEM_decompression_workspace_init; return -BCH_ERR_ENOMEM_decompression_workspace_init;
return 0; return 0;
......
...@@ -137,7 +137,7 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b) ...@@ -137,7 +137,7 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
mutex_lock(&c->verify_lock); mutex_lock(&c->verify_lock);
if (!c->verify_ondisk) { if (!c->verify_ondisk) {
c->verify_ondisk = kvpmalloc(btree_buf_bytes(b), GFP_KERNEL); c->verify_ondisk = kvmalloc(btree_buf_bytes(b), GFP_KERNEL);
if (!c->verify_ondisk) if (!c->verify_ondisk)
goto out; goto out;
} }
...@@ -199,7 +199,7 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c, ...@@ -199,7 +199,7 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
return; return;
} }
n_ondisk = kvpmalloc(btree_buf_bytes(b), GFP_KERNEL); n_ondisk = kvmalloc(btree_buf_bytes(b), GFP_KERNEL);
if (!n_ondisk) { if (!n_ondisk) {
prt_printf(out, "memory allocation failure\n"); prt_printf(out, "memory allocation failure\n");
goto out; goto out;
...@@ -293,7 +293,7 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c, ...@@ -293,7 +293,7 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
out: out:
if (bio) if (bio)
bio_put(bio); bio_put(bio);
kvpfree(n_ondisk, btree_buf_bytes(b)); kvfree(n_ondisk);
percpu_ref_put(&ca->io_ref); percpu_ref_put(&ca->io_ref);
} }
......
...@@ -504,7 +504,7 @@ static void ec_stripe_buf_exit(struct ec_stripe_buf *buf) ...@@ -504,7 +504,7 @@ static void ec_stripe_buf_exit(struct ec_stripe_buf *buf)
unsigned i; unsigned i;
for (i = 0; i < s->v.nr_blocks; i++) { for (i = 0; i < s->v.nr_blocks; i++) {
kvpfree(buf->data[i], buf->size << 9); kvfree(buf->data[i]);
buf->data[i] = NULL; buf->data[i] = NULL;
} }
} }
...@@ -531,7 +531,7 @@ static int ec_stripe_buf_init(struct ec_stripe_buf *buf, ...@@ -531,7 +531,7 @@ static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
memset(buf->valid, 0xFF, sizeof(buf->valid)); memset(buf->valid, 0xFF, sizeof(buf->valid));
for (i = 0; i < v->nr_blocks; i++) { for (i = 0; i < v->nr_blocks; i++) {
buf->data[i] = kvpmalloc(buf->size << 9, GFP_KERNEL); buf->data[i] = kvmalloc(buf->size << 9, GFP_KERNEL);
if (!buf->data[i]) if (!buf->data[i])
goto err; goto err;
} }
......
...@@ -24,12 +24,12 @@ struct { \ ...@@ -24,12 +24,12 @@ struct { \
(fifo)->mask = (fifo)->size \ (fifo)->mask = (fifo)->size \
? roundup_pow_of_two((fifo)->size) - 1 \ ? roundup_pow_of_two((fifo)->size) - 1 \
: 0; \ : 0; \
(fifo)->data = kvpmalloc(fifo_buf_size(fifo), (_gfp)); \ (fifo)->data = kvmalloc(fifo_buf_size(fifo), (_gfp)); \
}) })
#define free_fifo(fifo) \ #define free_fifo(fifo) \
do { \ do { \
kvpfree((fifo)->data, fifo_buf_size(fifo)); \ kvfree((fifo)->data); \
(fifo)->data = NULL; \ (fifo)->data = NULL; \
} while (0) } while (0)
......
...@@ -1343,7 +1343,7 @@ void bch2_fs_journal_exit(struct journal *j) ...@@ -1343,7 +1343,7 @@ void bch2_fs_journal_exit(struct journal *j)
darray_exit(&j->early_journal_entries); darray_exit(&j->early_journal_entries);
for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++)
kvpfree(j->buf[i].data, j->buf[i].buf_size); kvfree(j->buf[i].data);
free_fifo(&j->pin); free_fifo(&j->pin);
} }
...@@ -1372,7 +1372,7 @@ int bch2_fs_journal_init(struct journal *j) ...@@ -1372,7 +1372,7 @@ int bch2_fs_journal_init(struct journal *j)
for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) { for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++) {
j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN; j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL); j->buf[i].data = kvmalloc(j->buf[i].buf_size, GFP_KERNEL);
if (!j->buf[i].data) if (!j->buf[i].data)
return -BCH_ERR_ENOMEM_journal_buf; return -BCH_ERR_ENOMEM_journal_buf;
j->buf[i].idx = i; j->buf[i].idx = i;
......
...@@ -84,8 +84,7 @@ static void __journal_replay_free(struct bch_fs *c, ...@@ -84,8 +84,7 @@ static void __journal_replay_free(struct bch_fs *c,
BUG_ON(*p != i); BUG_ON(*p != i);
*p = NULL; *p = NULL;
kvpfree(i, offsetof(struct journal_replay, j) + kvfree(i);
vstruct_bytes(&i->j));
} }
static void journal_replay_free(struct bch_fs *c, struct journal_replay *i) static void journal_replay_free(struct bch_fs *c, struct journal_replay *i)
...@@ -196,7 +195,7 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca, ...@@ -196,7 +195,7 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
goto out; goto out;
} }
replace: replace:
i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL); i = kvmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
if (!i) if (!i)
return -BCH_ERR_ENOMEM_journal_entry_add; return -BCH_ERR_ENOMEM_journal_entry_add;
...@@ -965,11 +964,11 @@ static int journal_read_buf_realloc(struct journal_read_buf *b, ...@@ -965,11 +964,11 @@ static int journal_read_buf_realloc(struct journal_read_buf *b,
return -BCH_ERR_ENOMEM_journal_read_buf_realloc; return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
new_size = roundup_pow_of_two(new_size); new_size = roundup_pow_of_two(new_size);
n = kvpmalloc(new_size, GFP_KERNEL); n = kvmalloc(new_size, GFP_KERNEL);
if (!n) if (!n)
return -BCH_ERR_ENOMEM_journal_read_buf_realloc; return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
kvpfree(b->data, b->size); kvfree(b->data);
b->data = n; b->data = n;
b->size = new_size; b->size = new_size;
return 0; return 0;
...@@ -1195,7 +1194,7 @@ static CLOSURE_CALLBACK(bch2_journal_read_device) ...@@ -1195,7 +1194,7 @@ static CLOSURE_CALLBACK(bch2_journal_read_device)
ja->dirty_idx = (ja->cur_idx + 1) % ja->nr; ja->dirty_idx = (ja->cur_idx + 1) % ja->nr;
out: out:
bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret); bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
kvpfree(buf.data, buf.size); kvfree(buf.data);
percpu_ref_put(&ca->io_ref); percpu_ref_put(&ca->io_ref);
closure_return(cl); closure_return(cl);
return; return;
...@@ -1576,7 +1575,7 @@ static void journal_buf_realloc(struct journal *j, struct journal_buf *buf) ...@@ -1576,7 +1575,7 @@ static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
if (bch2_btree_write_buffer_resize(c, btree_write_buffer_size)) if (bch2_btree_write_buffer_resize(c, btree_write_buffer_size))
return; return;
new_buf = kvpmalloc(new_size, GFP_NOFS|__GFP_NOWARN); new_buf = kvmalloc(new_size, GFP_NOFS|__GFP_NOWARN);
if (!new_buf) if (!new_buf)
return; return;
...@@ -1587,7 +1586,7 @@ static void journal_buf_realloc(struct journal *j, struct journal_buf *buf) ...@@ -1587,7 +1586,7 @@ static void journal_buf_realloc(struct journal *j, struct journal_buf *buf)
swap(buf->buf_size, new_size); swap(buf->buf_size, new_size);
spin_unlock(&j->lock); spin_unlock(&j->lock);
kvpfree(new_buf, new_size); kvfree(new_buf);
} }
static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j) static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j)
......
...@@ -576,7 +576,7 @@ static void __bch2_fs_free(struct bch_fs *c) ...@@ -576,7 +576,7 @@ static void __bch2_fs_free(struct bch_fs *c)
destroy_workqueue(c->btree_update_wq); destroy_workqueue(c->btree_update_wq);
bch2_free_super(&c->disk_sb); bch2_free_super(&c->disk_sb);
kvpfree(c, sizeof(*c)); kvfree(c);
module_put(THIS_MODULE); module_put(THIS_MODULE);
} }
...@@ -715,7 +715,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) ...@@ -715,7 +715,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
unsigned i, iter_size; unsigned i, iter_size;
int ret = 0; int ret = 0;
c = kvpmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO); c = kvmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
if (!c) { if (!c) {
c = ERR_PTR(-BCH_ERR_ENOMEM_fs_alloc); c = ERR_PTR(-BCH_ERR_ENOMEM_fs_alloc);
goto out; goto out;
...@@ -882,8 +882,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts) ...@@ -882,8 +882,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
BIOSET_NEED_BVECS) || BIOSET_NEED_BVECS) ||
!(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) || !(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
!(c->online_reserved = alloc_percpu(u64)) || !(c->online_reserved = alloc_percpu(u64)) ||
mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1, mempool_init_kvmalloc_pool(&c->btree_bounce_pool, 1,
c->opts.btree_node_size) || c->opts.btree_node_size) ||
mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) || mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
!(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits, !(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits,
sizeof(u64), GFP_KERNEL))) { sizeof(u64), GFP_KERNEL))) {
......
...@@ -1007,28 +1007,6 @@ void sort_cmp_size(void *base, size_t num, size_t size, ...@@ -1007,28 +1007,6 @@ void sort_cmp_size(void *base, size_t num, size_t size,
} }
} }
static void mempool_free_vp(void *element, void *pool_data)
{
size_t size = (size_t) pool_data;
vpfree(element, size);
}
static void *mempool_alloc_vp(gfp_t gfp_mask, void *pool_data)
{
size_t size = (size_t) pool_data;
return vpmalloc(size, gfp_mask);
}
int mempool_init_kvpmalloc_pool(mempool_t *pool, int min_nr, size_t size)
{
return size < PAGE_SIZE
? mempool_init_kmalloc_pool(pool, min_nr, size)
: mempool_init(pool, min_nr, mempool_alloc_vp,
mempool_free_vp, (void *) size);
}
#if 0 #if 0
void eytzinger1_test(void) void eytzinger1_test(void)
{ {
......
...@@ -53,38 +53,6 @@ static inline size_t buf_pages(void *p, size_t len) ...@@ -53,38 +53,6 @@ static inline size_t buf_pages(void *p, size_t len)
PAGE_SIZE); PAGE_SIZE);
} }
static inline void vpfree(void *p, size_t size)
{
if (is_vmalloc_addr(p))
vfree(p);
else
free_pages((unsigned long) p, get_order(size));
}
static inline void *vpmalloc(size_t size, gfp_t gfp_mask)
{
return (void *) __get_free_pages(gfp_mask|__GFP_NOWARN,
get_order(size)) ?:
__vmalloc(size, gfp_mask);
}
static inline void kvpfree(void *p, size_t size)
{
if (size < PAGE_SIZE)
kfree(p);
else
vpfree(p, size);
}
static inline void *kvpmalloc(size_t size, gfp_t gfp_mask)
{
return size < PAGE_SIZE
? kmalloc(size, gfp_mask)
: vpmalloc(size, gfp_mask);
}
int mempool_init_kvpmalloc_pool(mempool_t *, int, size_t);
#define HEAP(type) \ #define HEAP(type) \
struct { \ struct { \
size_t size, used; \ size_t size, used; \
...@@ -97,13 +65,13 @@ struct { \ ...@@ -97,13 +65,13 @@ struct { \
({ \ ({ \
(heap)->used = 0; \ (heap)->used = 0; \
(heap)->size = (_size); \ (heap)->size = (_size); \
(heap)->data = kvpmalloc((heap)->size * sizeof((heap)->data[0]),\ (heap)->data = kvmalloc((heap)->size * sizeof((heap)->data[0]),\
(gfp)); \ (gfp)); \
}) })
#define free_heap(heap) \ #define free_heap(heap) \
do { \ do { \
kvpfree((heap)->data, (heap)->size * sizeof((heap)->data[0])); \ kvfree((heap)->data); \
(heap)->data = NULL; \ (heap)->data = NULL; \
} while (0) } while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment