Commit 5d4c8513 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'bcachefs-2024-07-12' of https://evilpiepirate.org/git/bcachefs

Pull more bcachefs fixes from Kent Overstreet:

 - revert the SLAB_ACCOUNT patch, something crazy is going on in memcg
   and someone forgot to test

 - minor fixes: missing rcu_read_lock(), scheduling while atomic (in an
   emergency shutdown path)

 - two lockdep fixes; these could have gone earlier, but were left to
   bake awhile

* tag 'bcachefs-2024-07-12' of https://evilpiepirate.org/git/bcachefs:
  bcachefs: bch2_gc_btree() should not use btree_root_lock
  bcachefs: Set PF_MEMALLOC_NOFS when trans->locked
  bcachefs; Use trans_unlock_long() when waiting on allocator
  Revert "bcachefs: Mark bch_inode_info as SLAB_ACCOUNT"
  bcachefs: fix scheduling while atomic in break_cycle()
  bcachefs: Fix RCU splat
parents 43db1e03 1841027c
......@@ -641,16 +641,30 @@ static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree, bool in
target_depth = 0;
/* root */
mutex_lock(&c->btree_root_lock);
struct btree *b = bch2_btree_id_root(c, btree)->b;
if (!btree_node_fake(b)) {
do {
retry_root:
bch2_trans_begin(trans);
struct btree_iter iter;
bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN,
0, bch2_btree_id_root(c, btree)->b->c.level, 0);
struct btree *b = bch2_btree_iter_peek_node(&iter);
ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto err_root;
if (b != btree_node_root(c, b)) {
bch2_trans_iter_exit(trans, &iter);
goto retry_root;
}
gc_pos_set(c, gc_pos_btree(btree, b->c.level + 1, SPOS_MAX));
ret = lockrestart_do(trans,
bch2_gc_mark_key(trans, b->c.btree_id, b->c.level + 1,
NULL, NULL, bkey_i_to_s_c(&b->key), initial));
struct bkey_s_c k = bkey_i_to_s_c(&b->key);
ret = bch2_gc_mark_key(trans, btree, b->c.level + 1, NULL, NULL, k, initial);
level = b->c.level;
}
mutex_unlock(&c->btree_root_lock);
err_root:
bch2_trans_iter_exit(trans, &iter);
} while (bch2_err_matches(ret, BCH_ERR_transaction_restart));
if (ret)
return ret;
......
......@@ -996,7 +996,7 @@ static int bch2_btree_path_traverse_all(struct btree_trans *trans)
bch2_trans_unlock(trans);
cond_resched();
trans->locked = true;
trans_set_locked(trans);
if (unlikely(trans->memory_allocation_failure)) {
struct closure cl;
......@@ -3089,7 +3089,8 @@ u32 bch2_trans_begin(struct btree_trans *trans)
bch2_trans_srcu_unlock(trans);
trans->last_begin_ip = _RET_IP_;
trans->locked = true;
trans_set_locked(trans);
if (trans->restarted) {
bch2_btree_path_traverse_all(trans);
......@@ -3159,7 +3160,6 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
trans->last_begin_time = local_clock();
trans->fn_idx = fn_idx;
trans->locking_wait.task = current;
trans->locked = true;
trans->journal_replay_not_finished =
unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)) &&
atomic_inc_not_zero(&c->journal_keys.ref);
......@@ -3193,6 +3193,7 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
trans->srcu_lock_time = jiffies;
trans->srcu_held = true;
trans_set_locked(trans);
closure_init_stack_release(&trans->ref);
return trans;
......
......@@ -231,7 +231,7 @@ static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
prt_newline(&buf);
}
bch2_print_string_as_lines(KERN_ERR, buf.buf);
bch2_print_string_as_lines_nonblocking(KERN_ERR, buf.buf);
printbuf_exit(&buf);
BUG();
}
......@@ -792,7 +792,7 @@ static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace)
return bch2_trans_relock_fail(trans, path, &f, trace);
}
trans->locked = true;
trans_set_locked(trans);
out:
bch2_trans_verify_locks(trans);
return 0;
......@@ -812,16 +812,14 @@ void bch2_trans_unlock_noassert(struct btree_trans *trans)
{
__bch2_trans_unlock(trans);
trans->locked = false;
trans->last_unlock_ip = _RET_IP_;
trans_set_unlocked(trans);
}
void bch2_trans_unlock(struct btree_trans *trans)
{
__bch2_trans_unlock(trans);
trans->locked = false;
trans->last_unlock_ip = _RET_IP_;
trans_set_unlocked(trans);
}
void bch2_trans_unlock_long(struct btree_trans *trans)
......
......@@ -193,6 +193,28 @@ int bch2_six_check_for_deadlock(struct six_lock *lock, void *p);
/* lock: */
static inline void trans_set_locked(struct btree_trans *trans)
{
if (!trans->locked) {
trans->locked = true;
trans->last_unlock_ip = 0;
trans->pf_memalloc_nofs = (current->flags & PF_MEMALLOC_NOFS) != 0;
current->flags |= PF_MEMALLOC_NOFS;
}
}
static inline void trans_set_unlocked(struct btree_trans *trans)
{
if (trans->locked) {
trans->locked = false;
trans->last_unlock_ip = _RET_IP_;
if (!trans->pf_memalloc_nofs)
current->flags &= ~PF_MEMALLOC_NOFS;
}
}
static inline int __btree_node_lock_nopath(struct btree_trans *trans,
struct btree_bkey_cached_common *b,
enum six_lock_type type,
......
......@@ -484,6 +484,7 @@ struct btree_trans {
bool lock_may_not_fail:1;
bool srcu_held:1;
bool locked:1;
bool pf_memalloc_nofs:1;
bool write_locked:1;
bool used_mempool:1;
bool in_traverse_all:1;
......
......@@ -805,7 +805,7 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca,
"bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n"
"while marking %s",
ptr->dev, bucket_nr, b_gen,
*bucket_gen(ca, bucket_nr),
bucket_gen_get(ca, bucket_nr),
bch2_data_type_str(bucket_data_type ?: ptr_data_type),
ptr->gen,
(printbuf_reset(&buf),
......
......@@ -116,6 +116,14 @@ static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
return gens->b + b;
}
static inline u8 bucket_gen_get(struct bch_dev *ca, size_t b)
{
rcu_read_lock();
u8 gen = *bucket_gen(ca, b);
rcu_read_unlock();
return gen;
}
static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
const struct bch_extent_ptr *ptr)
{
......
......@@ -2073,8 +2073,7 @@ int __init bch2_vfs_init(void)
{
int ret = -ENOMEM;
bch2_inode_cache = KMEM_CACHE(bch_inode_info, SLAB_RECLAIM_ACCOUNT |
SLAB_ACCOUNT);
bch2_inode_cache = KMEM_CACHE(bch_inode_info, SLAB_RECLAIM_ACCOUNT);
if (!bch2_inode_cache)
goto err;
......
......@@ -125,7 +125,7 @@ int bch2_extent_fallocate(struct btree_trans *trans,
bch2_bkey_buf_exit(&old, c);
if (closure_nr_remaining(&cl) != 1) {
bch2_trans_unlock(trans);
bch2_trans_unlock_long(trans);
closure_sync(&cl);
}
......
......@@ -252,8 +252,10 @@ void bch2_prt_u64_base2(struct printbuf *out, u64 v)
bch2_prt_u64_base2_nbits(out, v, fls64(v) ?: 1);
}
void bch2_print_string_as_lines(const char *prefix, const char *lines)
static void __bch2_print_string_as_lines(const char *prefix, const char *lines,
bool nonblocking)
{
bool locked = false;
const char *p;
if (!lines) {
......@@ -261,7 +263,13 @@ void bch2_print_string_as_lines(const char *prefix, const char *lines)
return;
}
console_lock();
if (!nonblocking) {
console_lock();
locked = true;
} else {
locked = console_trylock();
}
while (1) {
p = strchrnul(lines, '\n');
printk("%s%.*s\n", prefix, (int) (p - lines), lines);
......@@ -269,7 +277,18 @@ void bch2_print_string_as_lines(const char *prefix, const char *lines)
break;
lines = p + 1;
}
console_unlock();
if (locked)
console_unlock();
}
void bch2_print_string_as_lines(const char *prefix, const char *lines)
{
return __bch2_print_string_as_lines(prefix, lines, false);
}
void bch2_print_string_as_lines_nonblocking(const char *prefix, const char *lines)
{
return __bch2_print_string_as_lines(prefix, lines, true);
}
int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task, unsigned skipnr,
......
......@@ -315,6 +315,7 @@ void bch2_prt_u64_base2_nbits(struct printbuf *, u64, unsigned);
void bch2_prt_u64_base2(struct printbuf *, u64);
void bch2_print_string_as_lines(const char *prefix, const char *lines);
void bch2_print_string_as_lines_nonblocking(const char *prefix, const char *lines);
typedef DARRAY(unsigned long) bch_stacktrace;
int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *, unsigned, gfp_t);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment