Commit f0f41a6d authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Add error messages for memory allocation failures

This adds some missing diagnostics from rare but annoying to debug
runtime allocation failure paths.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
parent 5ba2fd11
...@@ -147,19 +147,23 @@ bkey_cached_reuse(struct btree_key_cache *c) ...@@ -147,19 +147,23 @@ bkey_cached_reuse(struct btree_key_cache *c)
} }
static struct bkey_cached * static struct bkey_cached *
btree_key_cache_create(struct btree_key_cache *c, btree_key_cache_create(struct bch_fs *c,
enum btree_id btree_id, enum btree_id btree_id,
struct bpos pos) struct bpos pos)
{ {
struct btree_key_cache *bc = &c->btree_key_cache;
struct bkey_cached *ck; struct bkey_cached *ck;
bool was_new = true; bool was_new = true;
ck = bkey_cached_alloc(c); ck = bkey_cached_alloc(bc);
if (unlikely(!ck)) { if (unlikely(!ck)) {
ck = bkey_cached_reuse(c); ck = bkey_cached_reuse(bc);
if (unlikely(!ck)) if (unlikely(!ck)) {
bch_err(c, "error allocating memory for key cache item, btree %s",
bch2_btree_ids[btree_id]);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
}
was_new = false; was_new = false;
} }
...@@ -176,7 +180,7 @@ btree_key_cache_create(struct btree_key_cache *c, ...@@ -176,7 +180,7 @@ btree_key_cache_create(struct btree_key_cache *c,
ck->valid = false; ck->valid = false;
ck->flags = 1U << BKEY_CACHED_ACCESSED; ck->flags = 1U << BKEY_CACHED_ACCESSED;
if (unlikely(rhashtable_lookup_insert_fast(&c->table, if (unlikely(rhashtable_lookup_insert_fast(&bc->table,
&ck->hash, &ck->hash,
bch2_btree_key_cache_params))) { bch2_btree_key_cache_params))) {
/* We raced with another fill: */ /* We raced with another fill: */
...@@ -186,15 +190,15 @@ btree_key_cache_create(struct btree_key_cache *c, ...@@ -186,15 +190,15 @@ btree_key_cache_create(struct btree_key_cache *c,
six_unlock_intent(&ck->c.lock); six_unlock_intent(&ck->c.lock);
kfree(ck); kfree(ck);
} else { } else {
mutex_lock(&c->lock); mutex_lock(&bc->lock);
bkey_cached_free(c, ck); bkey_cached_free(bc, ck);
mutex_unlock(&c->lock); mutex_unlock(&bc->lock);
} }
return NULL; return NULL;
} }
atomic_long_inc(&c->nr_keys); atomic_long_inc(&bc->nr_keys);
six_unlock_write(&ck->c.lock); six_unlock_write(&ck->c.lock);
...@@ -205,6 +209,7 @@ static int btree_key_cache_fill(struct btree_trans *trans, ...@@ -205,6 +209,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
struct btree_path *ck_path, struct btree_path *ck_path,
struct bkey_cached *ck) struct bkey_cached *ck)
{ {
struct bch_fs *c = trans->c;
struct btree_iter iter; struct btree_iter iter;
struct bkey_s_c k; struct bkey_s_c k;
unsigned new_u64s = 0; unsigned new_u64s = 0;
...@@ -234,6 +239,8 @@ static int btree_key_cache_fill(struct btree_trans *trans, ...@@ -234,6 +239,8 @@ static int btree_key_cache_fill(struct btree_trans *trans,
new_u64s = roundup_pow_of_two(new_u64s); new_u64s = roundup_pow_of_two(new_u64s);
new_k = kmalloc(new_u64s * sizeof(u64), GFP_NOFS); new_k = kmalloc(new_u64s * sizeof(u64), GFP_NOFS);
if (!new_k) { if (!new_k) {
bch_err(c, "error allocating memory for key cache key, btree %s u64s %u",
bch2_btree_ids[ck->key.btree_id], new_u64s);
ret = -ENOMEM; ret = -ENOMEM;
goto err; goto err;
} }
...@@ -294,8 +301,7 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path ...@@ -294,8 +301,7 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path
return 0; return 0;
} }
ck = btree_key_cache_create(&c->btree_key_cache, ck = btree_key_cache_create(c, path->btree_id, path->pos);
path->btree_id, path->pos);
ret = PTR_ERR_OR_ZERO(ck); ret = PTR_ERR_OR_ZERO(ck);
if (ret) if (ret)
goto err; goto err;
......
...@@ -308,6 +308,7 @@ btree_key_can_insert_cached(struct btree_trans *trans, ...@@ -308,6 +308,7 @@ btree_key_can_insert_cached(struct btree_trans *trans,
struct btree_path *path, struct btree_path *path,
unsigned u64s) unsigned u64s)
{ {
struct bch_fs *c = trans->c;
struct bkey_cached *ck = (void *) path->l[0].b; struct bkey_cached *ck = (void *) path->l[0].b;
unsigned new_u64s; unsigned new_u64s;
struct bkey_i *new_k; struct bkey_i *new_k;
...@@ -315,7 +316,7 @@ btree_key_can_insert_cached(struct btree_trans *trans, ...@@ -315,7 +316,7 @@ btree_key_can_insert_cached(struct btree_trans *trans,
EBUG_ON(path->level); EBUG_ON(path->level);
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) && if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
bch2_btree_key_cache_must_wait(trans->c) && bch2_btree_key_cache_must_wait(c) &&
!(trans->flags & BTREE_INSERT_JOURNAL_RECLAIM)) !(trans->flags & BTREE_INSERT_JOURNAL_RECLAIM))
return BTREE_INSERT_NEED_JOURNAL_RECLAIM; return BTREE_INSERT_NEED_JOURNAL_RECLAIM;
...@@ -330,8 +331,11 @@ btree_key_can_insert_cached(struct btree_trans *trans, ...@@ -330,8 +331,11 @@ btree_key_can_insert_cached(struct btree_trans *trans,
new_u64s = roundup_pow_of_two(u64s); new_u64s = roundup_pow_of_two(u64s);
new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOFS); new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOFS);
if (!new_k) if (!new_k) {
bch_err(c, "error allocating memory for key cache key, btree %s u64s %u",
bch2_btree_ids[path->btree_id], new_u64s);
return -ENOMEM; return -ENOMEM;
}
ck->u64s = new_u64s; ck->u64s = new_u64s;
ck->k = new_k; ck->k = new_k;
......
...@@ -926,9 +926,11 @@ static int bch2_mark_stripe_ptr(struct btree_trans *trans, ...@@ -926,9 +926,11 @@ static int bch2_mark_stripe_ptr(struct btree_trans *trans,
BUG_ON(!(flags & BTREE_TRIGGER_GC)); BUG_ON(!(flags & BTREE_TRIGGER_GC));
m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL); m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
if (!m) {
if (!m) bch_err(c, "error allocating memory for gc_stripes, idx %llu",
(u64) p.idx);
return -ENOMEM; return -ENOMEM;
}
spin_lock(&c->ec_stripes_heap_lock); spin_lock(&c->ec_stripes_heap_lock);
...@@ -1039,7 +1041,7 @@ static int bch2_mark_stripe(struct btree_trans *trans, ...@@ -1039,7 +1041,7 @@ static int bch2_mark_stripe(struct btree_trans *trans,
bool gc = flags & BTREE_TRIGGER_GC; bool gc = flags & BTREE_TRIGGER_GC;
u64 journal_seq = trans->journal_res.seq; u64 journal_seq = trans->journal_res.seq;
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
size_t idx = new.k->p.offset; u64 idx = new.k->p.offset;
const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
? bkey_s_c_to_stripe(old).v : NULL; ? bkey_s_c_to_stripe(old).v : NULL;
const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
...@@ -1057,7 +1059,7 @@ static int bch2_mark_stripe(struct btree_trans *trans, ...@@ -1057,7 +1059,7 @@ static int bch2_mark_stripe(struct btree_trans *trans,
bch2_bkey_val_to_text(&PBUF(buf1), c, old); bch2_bkey_val_to_text(&PBUF(buf1), c, old);
bch2_bkey_val_to_text(&PBUF(buf2), c, new); bch2_bkey_val_to_text(&PBUF(buf2), c, new);
bch_err_ratelimited(c, "error marking nonexistent stripe %zu while marking\n" bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
"old %s\n" "old %s\n"
"new %s", idx, buf1, buf2); "new %s", idx, buf1, buf2);
bch2_inconsistent_error(c); bch2_inconsistent_error(c);
...@@ -1089,9 +1091,11 @@ static int bch2_mark_stripe(struct btree_trans *trans, ...@@ -1089,9 +1091,11 @@ static int bch2_mark_stripe(struct btree_trans *trans,
struct gc_stripe *m = struct gc_stripe *m =
genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL); genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
if (!m) if (!m) {
bch_err(c, "error allocating memory for gc_stripes, idx %llu",
idx);
return -ENOMEM; return -ENOMEM;
}
/* /*
* This will be wrong when we bring back runtime gc: we should * This will be wrong when we bring back runtime gc: we should
* be unmarking the old key and then marking the new key * be unmarking the old key and then marking the new key
......
...@@ -564,14 +564,17 @@ static struct inode_walker inode_walker_init(void) ...@@ -564,14 +564,17 @@ static struct inode_walker inode_walker_init(void)
return (struct inode_walker) { 0, }; return (struct inode_walker) { 0, };
} }
static int inode_walker_realloc(struct inode_walker *w) static int inode_walker_realloc(struct bch_fs *c, struct inode_walker *w)
{ {
if (w->nr == w->size) { if (w->nr == w->size) {
size_t new_size = max_t(size_t, 8UL, w->size * 2); size_t new_size = max_t(size_t, 8UL, w->size * 2);
void *d = krealloc(w->d, new_size * sizeof(w->d[0]), void *d = krealloc(w->d, new_size * sizeof(w->d[0]),
GFP_KERNEL); GFP_KERNEL);
if (!d) if (!d) {
bch_err(c, "fsck: error allocating memory for inode_walker, size %zu",
new_size);
return -ENOMEM; return -ENOMEM;
}
w->d = d; w->d = d;
w->size = new_size; w->size = new_size;
...@@ -586,7 +589,7 @@ static int add_inode(struct bch_fs *c, struct inode_walker *w, ...@@ -586,7 +589,7 @@ static int add_inode(struct bch_fs *c, struct inode_walker *w,
struct bch_inode_unpacked u; struct bch_inode_unpacked u;
int ret; int ret;
ret = inode_walker_realloc(w); ret = inode_walker_realloc(c, w);
if (ret) if (ret)
return ret; return ret;
...@@ -647,7 +650,7 @@ static int __walk_inode(struct btree_trans *trans, ...@@ -647,7 +650,7 @@ static int __walk_inode(struct btree_trans *trans,
while (i && w->d[i - 1].snapshot > pos.snapshot) while (i && w->d[i - 1].snapshot > pos.snapshot)
--i; --i;
ret = inode_walker_realloc(w); ret = inode_walker_realloc(c, w);
if (ret) if (ret)
return ret; return ret;
...@@ -1812,7 +1815,8 @@ static bool path_is_dup(struct pathbuf *p, u64 inum, u32 snapshot) ...@@ -1812,7 +1815,8 @@ static bool path_is_dup(struct pathbuf *p, u64 inum, u32 snapshot)
return false; return false;
} }
static int path_down(struct pathbuf *p, u64 inum, u32 snapshot) static int path_down(struct bch_fs *c, struct pathbuf *p,
u64 inum, u32 snapshot)
{ {
if (p->nr == p->size) { if (p->nr == p->size) {
size_t new_size = max_t(size_t, 256UL, p->size * 2); size_t new_size = max_t(size_t, 256UL, p->size * 2);
...@@ -1820,6 +1824,8 @@ static int path_down(struct pathbuf *p, u64 inum, u32 snapshot) ...@@ -1820,6 +1824,8 @@ static int path_down(struct pathbuf *p, u64 inum, u32 snapshot)
new_size * sizeof(p->entries[0]), new_size * sizeof(p->entries[0]),
GFP_KERNEL); GFP_KERNEL);
if (!n) { if (!n) {
bch_err(c, "fsck: error allocating memory for pathbuf, size %zu",
new_size);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1893,7 +1899,7 @@ static int check_path(struct btree_trans *trans, ...@@ -1893,7 +1899,7 @@ static int check_path(struct btree_trans *trans,
if (!S_ISDIR(inode->bi_mode)) if (!S_ISDIR(inode->bi_mode))
break; break;
ret = path_down(p, inode->bi_inum, snapshot); ret = path_down(c, p, inode->bi_inum, snapshot);
if (ret) { if (ret) {
bch_err(c, "memory allocation failure"); bch_err(c, "memory allocation failure");
return ret; return ret;
...@@ -1998,12 +2004,15 @@ struct nlink_table { ...@@ -1998,12 +2004,15 @@ struct nlink_table {
} *d; } *d;
}; };
static int add_nlink(struct nlink_table *t, u64 inum, u32 snapshot) static int add_nlink(struct bch_fs *c, struct nlink_table *t,
u64 inum, u32 snapshot)
{ {
if (t->nr == t->size) { if (t->nr == t->size) {
size_t new_size = max_t(size_t, 128UL, t->size * 2); size_t new_size = max_t(size_t, 128UL, t->size * 2);
void *d = kvmalloc(new_size * sizeof(t->d[0]), GFP_KERNEL); void *d = kvmalloc(new_size * sizeof(t->d[0]), GFP_KERNEL);
if (!d) { if (!d) {
bch_err(c, "fsck: error allocating memory for nlink_table, size %zu",
new_size);
return -ENOMEM; return -ENOMEM;
} }
...@@ -2093,7 +2102,7 @@ static int check_nlinks_find_hardlinks(struct bch_fs *c, ...@@ -2093,7 +2102,7 @@ static int check_nlinks_find_hardlinks(struct bch_fs *c,
if (!u.bi_nlink) if (!u.bi_nlink)
continue; continue;
ret = add_nlink(t, k.k->p.offset, k.k->p.snapshot); ret = add_nlink(c, t, k.k->p.offset, k.k->p.snapshot);
if (ret) { if (ret) {
*end = k.k->p.offset; *end = k.k->p.offset;
ret = 0; ret = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment