Commit cd63a278 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'bcachefs-2024-06-28' of https://evilpiepirate.org/git/bcachefs

Pull bcachefs fixes from Kent Overstreet:
 "Simple stuff:

   - NULL ptr/err ptr deref fixes

   - fix for getting wedged on shutdown after journal error

   - fix missing recalc_capacity() call, capacity now changes correctly
     after a device goes read only

     however: our capacity calculation still doesn't take into account
     when we have mixed ro/rw devices and the ro devices have data on
     them, that's going to be a more involved fix to separate accounting
     for "capacity used on ro devices" and "capacity used on rw devices"

   - boring syzbot stuff

  Slightly more involved:

   - discard, invalidate workers are now per device

     this has the effect of simplifying how we take device refs in these
     paths, and the device ref cleanup fixes a longstanding race between
     the device removal path and the discard path

   - fixes for how the debugfs code takes refs on btree_trans objects we
     have debugfs code that prints in use btree_trans objects.

     It uses closure_get() on trans->ref, which is mainly for the cycle
     detector, but the debugfs code was using it on a closure that may
     have hit 0, which is not allowed; for performance reasons we cannot
     avoid having not-in-use transactions on the global list.

     Introduce some new primitives to fix this and make the
     synchronization here a whole lot saner"

* tag 'bcachefs-2024-06-28' of https://evilpiepirate.org/git/bcachefs:
  bcachefs: Fix kmalloc bug in __snapshot_t_mut
  bcachefs: Discard, invalidate workers are now per device
  bcachefs: Fix shift-out-of-bounds in bch2_blacklist_entries_gc
  bcachefs: slab-use-after-free Read in bch2_sb_errors_from_cpu
  bcachefs: Add missing bch2_journal_do_writes() call
  bcachefs: Fix null ptr deref in journal_pins_to_text()
  bcachefs: Add missing recalc_capacity() call
  bcachefs: Fix btree_trans list ordering
  bcachefs: Fix race between trans_put() and btree_transactions_read()
  closures: closure_get_not_zero(), closure_return_sync()
  bcachefs: Make btree_deadlock_to_text() clearer
  bcachefs: fix seqmutex_relock()
  bcachefs: Fix freeing of error pointers
parents cd17613f 64cd7de9
This diff is collapsed.
...@@ -275,6 +275,7 @@ int bch2_trigger_alloc(struct btree_trans *, enum btree_id, unsigned, ...@@ -275,6 +275,7 @@ int bch2_trigger_alloc(struct btree_trans *, enum btree_id, unsigned,
enum btree_iter_update_trigger_flags); enum btree_iter_update_trigger_flags);
int bch2_check_alloc_info(struct bch_fs *); int bch2_check_alloc_info(struct bch_fs *);
int bch2_check_alloc_to_lru_refs(struct bch_fs *); int bch2_check_alloc_to_lru_refs(struct bch_fs *);
void bch2_dev_do_discards(struct bch_dev *);
void bch2_do_discards(struct bch_fs *); void bch2_do_discards(struct bch_fs *);
static inline u64 should_invalidate_buckets(struct bch_dev *ca, static inline u64 should_invalidate_buckets(struct bch_dev *ca,
...@@ -289,6 +290,7 @@ static inline u64 should_invalidate_buckets(struct bch_dev *ca, ...@@ -289,6 +290,7 @@ static inline u64 should_invalidate_buckets(struct bch_dev *ca,
return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets); return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets);
} }
void bch2_dev_do_invalidates(struct bch_dev *);
void bch2_do_invalidates(struct bch_fs *); void bch2_do_invalidates(struct bch_fs *);
static inline struct bch_backpointer *alloc_v4_backpointers(struct bch_alloc_v4 *a) static inline struct bch_backpointer *alloc_v4_backpointers(struct bch_alloc_v4 *a)
...@@ -312,7 +314,9 @@ u64 bch2_min_rw_member_capacity(struct bch_fs *); ...@@ -312,7 +314,9 @@ u64 bch2_min_rw_member_capacity(struct bch_fs *);
void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *); void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *);
void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *); void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *);
void bch2_fs_allocator_background_exit(struct bch_fs *); void bch2_dev_allocator_background_exit(struct bch_dev *);
void bch2_dev_allocator_background_init(struct bch_dev *);
void bch2_fs_allocator_background_init(struct bch_fs *); void bch2_fs_allocator_background_init(struct bch_fs *);
#endif /* _BCACHEFS_ALLOC_BACKGROUND_H */ #endif /* _BCACHEFS_ALLOC_BACKGROUND_H */
...@@ -621,13 +621,13 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans, ...@@ -621,13 +621,13 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
avail = dev_buckets_free(ca, *usage, watermark); avail = dev_buckets_free(ca, *usage, watermark);
if (usage->d[BCH_DATA_need_discard].buckets > avail) if (usage->d[BCH_DATA_need_discard].buckets > avail)
bch2_do_discards(c); bch2_dev_do_discards(ca);
if (usage->d[BCH_DATA_need_gc_gens].buckets > avail) if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
bch2_gc_gens_async(c); bch2_gc_gens_async(c);
if (should_invalidate_buckets(ca, *usage)) if (should_invalidate_buckets(ca, *usage))
bch2_do_invalidates(c); bch2_dev_do_invalidates(ca);
if (!avail) { if (!avail) {
if (cl && !waiting) { if (cl && !waiting) {
......
...@@ -493,6 +493,11 @@ struct io_count { ...@@ -493,6 +493,11 @@ struct io_count {
u64 sectors[2][BCH_DATA_NR]; u64 sectors[2][BCH_DATA_NR];
}; };
struct discard_in_flight {
bool in_progress:1;
u64 bucket:63;
};
struct bch_dev { struct bch_dev {
struct kobject kobj; struct kobject kobj;
#ifdef CONFIG_BCACHEFS_DEBUG #ifdef CONFIG_BCACHEFS_DEBUG
...@@ -554,6 +559,12 @@ struct bch_dev { ...@@ -554,6 +559,12 @@ struct bch_dev {
size_t inc_gen_really_needs_gc; size_t inc_gen_really_needs_gc;
size_t buckets_waiting_on_journal; size_t buckets_waiting_on_journal;
struct work_struct invalidate_work;
struct work_struct discard_work;
struct mutex discard_buckets_in_flight_lock;
DARRAY(struct discard_in_flight) discard_buckets_in_flight;
struct work_struct discard_fast_work;
atomic64_t rebalance_work; atomic64_t rebalance_work;
struct journal_device journal; struct journal_device journal;
...@@ -915,11 +926,6 @@ struct bch_fs { ...@@ -915,11 +926,6 @@ struct bch_fs {
unsigned write_points_nr; unsigned write_points_nr;
struct buckets_waiting_for_journal buckets_waiting_for_journal; struct buckets_waiting_for_journal buckets_waiting_for_journal;
struct work_struct invalidate_work;
struct work_struct discard_work;
struct mutex discard_buckets_in_flight_lock;
DARRAY(struct bpos) discard_buckets_in_flight;
struct work_struct discard_fast_work;
/* GARBAGE COLLECTION */ /* GARBAGE COLLECTION */
struct work_struct gc_gens_work; struct work_struct gc_gens_work;
......
...@@ -3130,7 +3130,6 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx) ...@@ -3130,7 +3130,6 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS); trans = mempool_alloc(&c->btree_trans_pool, GFP_NOFS);
memset(trans, 0, sizeof(*trans)); memset(trans, 0, sizeof(*trans));
closure_init_stack(&trans->ref);
seqmutex_lock(&c->btree_trans_lock); seqmutex_lock(&c->btree_trans_lock);
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) { if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
...@@ -3150,18 +3149,12 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx) ...@@ -3150,18 +3149,12 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
BUG_ON(pos_task && BUG_ON(pos_task &&
pid == pos_task->pid && pid == pos_task->pid &&
pos->locked); pos->locked);
if (pos_task && pid < pos_task->pid) {
list_add_tail(&trans->list, &pos->list);
goto list_add_done;
}
} }
} }
list_add_tail(&trans->list, &c->btree_trans_list);
list_add_done: list_add(&trans->list, &c->btree_trans_list);
seqmutex_unlock(&c->btree_trans_lock); seqmutex_unlock(&c->btree_trans_lock);
got_trans: got_trans:
trans->ref.closure_get_happened = false;
trans->c = c; trans->c = c;
trans->last_begin_time = local_clock(); trans->last_begin_time = local_clock();
trans->fn_idx = fn_idx; trans->fn_idx = fn_idx;
...@@ -3200,6 +3193,8 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx) ...@@ -3200,6 +3193,8 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier); trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
trans->srcu_lock_time = jiffies; trans->srcu_lock_time = jiffies;
trans->srcu_held = true; trans->srcu_held = true;
closure_init_stack_release(&trans->ref);
return trans; return trans;
} }
...@@ -3257,10 +3252,10 @@ void bch2_trans_put(struct btree_trans *trans) ...@@ -3257,10 +3252,10 @@ void bch2_trans_put(struct btree_trans *trans)
bch2_journal_keys_put(c); bch2_journal_keys_put(c);
/* /*
* trans->ref protects trans->locking_wait.task, btree_paths arary; used * trans->ref protects trans->locking_wait.task, btree_paths array; used
* by cycle detector * by cycle detector
*/ */
closure_sync(&trans->ref); closure_return_sync(&trans->ref);
trans->locking_wait.task = NULL; trans->locking_wait.task = NULL;
unsigned long *paths_allocated = trans->paths_allocated; unsigned long *paths_allocated = trans->paths_allocated;
...@@ -3385,8 +3380,6 @@ void bch2_fs_btree_iter_exit(struct bch_fs *c) ...@@ -3385,8 +3380,6 @@ void bch2_fs_btree_iter_exit(struct bch_fs *c)
per_cpu_ptr(c->btree_trans_bufs, cpu)->trans; per_cpu_ptr(c->btree_trans_bufs, cpu)->trans;
if (trans) { if (trans) {
closure_sync(&trans->ref);
seqmutex_lock(&c->btree_trans_lock); seqmutex_lock(&c->btree_trans_lock);
list_del(&trans->list); list_del(&trans->list);
seqmutex_unlock(&c->btree_trans_lock); seqmutex_unlock(&c->btree_trans_lock);
......
...@@ -216,7 +216,8 @@ static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_a ...@@ -216,7 +216,8 @@ static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_a
ret = PTR_ERR_OR_ZERO(optstr) ?: ret = PTR_ERR_OR_ZERO(optstr) ?:
bch2_parse_mount_opts(NULL, &thr->opts, optstr); bch2_parse_mount_opts(NULL, &thr->opts, optstr);
kfree(optstr); if (!IS_ERR(optstr))
kfree(optstr);
if (ret) if (ret)
goto err; goto err;
...@@ -319,7 +320,8 @@ static long bch2_ioctl_disk_add(struct bch_fs *c, struct bch_ioctl_disk arg) ...@@ -319,7 +320,8 @@ static long bch2_ioctl_disk_add(struct bch_fs *c, struct bch_ioctl_disk arg)
return ret; return ret;
ret = bch2_dev_add(c, path); ret = bch2_dev_add(c, path);
kfree(path); if (!IS_ERR(path))
kfree(path);
return ret; return ret;
} }
...@@ -850,7 +852,8 @@ static long bch2_ioctl_fsck_online(struct bch_fs *c, ...@@ -850,7 +852,8 @@ static long bch2_ioctl_fsck_online(struct bch_fs *c,
ret = PTR_ERR_OR_ZERO(optstr) ?: ret = PTR_ERR_OR_ZERO(optstr) ?:
bch2_parse_mount_opts(c, &thr->opts, optstr); bch2_parse_mount_opts(c, &thr->opts, optstr);
kfree(optstr); if (!IS_ERR(optstr))
kfree(optstr);
if (ret) if (ret)
goto err; goto err;
......
...@@ -568,6 +568,32 @@ static const struct file_operations cached_btree_nodes_ops = { ...@@ -568,6 +568,32 @@ static const struct file_operations cached_btree_nodes_ops = {
.read = bch2_cached_btree_nodes_read, .read = bch2_cached_btree_nodes_read,
}; };
typedef int (*list_cmp_fn)(const struct list_head *l, const struct list_head *r);
static void list_sort(struct list_head *head, list_cmp_fn cmp)
{
struct list_head *pos;
list_for_each(pos, head)
while (!list_is_last(pos, head) &&
cmp(pos, pos->next) > 0) {
struct list_head *pos2, *next = pos->next;
list_del(next);
list_for_each(pos2, head)
if (cmp(next, pos2) < 0)
goto pos_found;
BUG();
pos_found:
list_add_tail(next, pos2);
}
}
static int list_ptr_order_cmp(const struct list_head *l, const struct list_head *r)
{
return cmp_int(l, r);
}
static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf, static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
size_t size, loff_t *ppos) size_t size, loff_t *ppos)
{ {
...@@ -575,41 +601,39 @@ static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf, ...@@ -575,41 +601,39 @@ static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
struct bch_fs *c = i->c; struct bch_fs *c = i->c;
struct btree_trans *trans; struct btree_trans *trans;
ssize_t ret = 0; ssize_t ret = 0;
u32 seq;
i->ubuf = buf; i->ubuf = buf;
i->size = size; i->size = size;
i->ret = 0; i->ret = 0;
restart: restart:
seqmutex_lock(&c->btree_trans_lock); seqmutex_lock(&c->btree_trans_lock);
list_for_each_entry(trans, &c->btree_trans_list, list) { list_sort(&c->btree_trans_list, list_ptr_order_cmp);
struct task_struct *task = READ_ONCE(trans->locking_wait.task);
if (!task || task->pid <= i->iter) list_for_each_entry(trans, &c->btree_trans_list, list) {
if ((ulong) trans < i->iter)
continue; continue;
closure_get(&trans->ref); i->iter = (ulong) trans;
seq = seqmutex_seq(&c->btree_trans_lock);
seqmutex_unlock(&c->btree_trans_lock);
ret = flush_buf(i); if (!closure_get_not_zero(&trans->ref))
if (ret) { continue;
closure_put(&trans->ref);
goto unlocked; u32 seq = seqmutex_unlock(&c->btree_trans_lock);
}
bch2_btree_trans_to_text(&i->buf, trans); bch2_btree_trans_to_text(&i->buf, trans);
prt_printf(&i->buf, "backtrace:\n"); prt_printf(&i->buf, "backtrace:\n");
printbuf_indent_add(&i->buf, 2); printbuf_indent_add(&i->buf, 2);
bch2_prt_task_backtrace(&i->buf, task, 0, GFP_KERNEL); bch2_prt_task_backtrace(&i->buf, trans->locking_wait.task, 0, GFP_KERNEL);
printbuf_indent_sub(&i->buf, 2); printbuf_indent_sub(&i->buf, 2);
prt_newline(&i->buf); prt_newline(&i->buf);
i->iter = task->pid;
closure_put(&trans->ref); closure_put(&trans->ref);
ret = flush_buf(i);
if (ret)
goto unlocked;
if (!seqmutex_relock(&c->btree_trans_lock, seq)) if (!seqmutex_relock(&c->btree_trans_lock, seq))
goto restart; goto restart;
} }
...@@ -804,50 +828,55 @@ static const struct file_operations btree_transaction_stats_op = { ...@@ -804,50 +828,55 @@ static const struct file_operations btree_transaction_stats_op = {
.read = btree_transaction_stats_read, .read = btree_transaction_stats_read,
}; };
static ssize_t bch2_btree_deadlock_read(struct file *file, char __user *buf, /* walk btree transactions until we find a deadlock and print it */
size_t size, loff_t *ppos) static void btree_deadlock_to_text(struct printbuf *out, struct bch_fs *c)
{ {
struct dump_iter *i = file->private_data;
struct bch_fs *c = i->c;
struct btree_trans *trans; struct btree_trans *trans;
ssize_t ret = 0; pid_t iter = 0;
u32 seq;
i->ubuf = buf;
i->size = size;
i->ret = 0;
if (i->iter)
goto out;
restart: restart:
seqmutex_lock(&c->btree_trans_lock); seqmutex_lock(&c->btree_trans_lock);
list_for_each_entry(trans, &c->btree_trans_list, list) { list_for_each_entry(trans, &c->btree_trans_list, list) {
struct task_struct *task = READ_ONCE(trans->locking_wait.task); struct task_struct *task = READ_ONCE(trans->locking_wait.task);
if (!task || task->pid <= i->iter) if (!task || task->pid <= iter)
continue; continue;
closure_get(&trans->ref); iter = task->pid;
seq = seqmutex_seq(&c->btree_trans_lock);
seqmutex_unlock(&c->btree_trans_lock);
ret = flush_buf(i); if (!closure_get_not_zero(&trans->ref))
if (ret) { continue;
closure_put(&trans->ref);
goto out;
}
bch2_check_for_deadlock(trans, &i->buf); u32 seq = seqmutex_unlock(&c->btree_trans_lock);
i->iter = task->pid; bool found = bch2_check_for_deadlock(trans, out) != 0;
closure_put(&trans->ref); closure_put(&trans->ref);
if (found)
return;
if (!seqmutex_relock(&c->btree_trans_lock, seq)) if (!seqmutex_relock(&c->btree_trans_lock, seq))
goto restart; goto restart;
} }
seqmutex_unlock(&c->btree_trans_lock); seqmutex_unlock(&c->btree_trans_lock);
out: }
static ssize_t bch2_btree_deadlock_read(struct file *file, char __user *buf,
size_t size, loff_t *ppos)
{
struct dump_iter *i = file->private_data;
struct bch_fs *c = i->c;
ssize_t ret = 0;
i->ubuf = buf;
i->size = size;
i->ret = 0;
if (!i->iter) {
btree_deadlock_to_text(&i->buf, c);
i->iter++;
}
if (i->buf.allocation_failure) if (i->buf.allocation_failure)
ret = -ENOMEM; ret = -ENOMEM;
......
...@@ -1521,6 +1521,11 @@ bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 ...@@ -1521,6 +1521,11 @@ bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64
struct journal_entry_pin *pin; struct journal_entry_pin *pin;
spin_lock(&j->lock); spin_lock(&j->lock);
if (!test_bit(JOURNAL_running, &j->flags)) {
spin_unlock(&j->lock);
return true;
}
*seq = max(*seq, j->pin.front); *seq = max(*seq, j->pin.front);
if (*seq >= j->pin.back) { if (*seq >= j->pin.back) {
......
...@@ -1677,6 +1677,13 @@ static CLOSURE_CALLBACK(journal_write_done) ...@@ -1677,6 +1677,13 @@ static CLOSURE_CALLBACK(journal_write_done)
mod_delayed_work(j->wq, &j->write_work, max(0L, delta)); mod_delayed_work(j->wq, &j->write_work, max(0L, delta));
} }
/*
* We don't typically trigger journal writes from her - the next journal
* write will be triggered immediately after the previous one is
* allocated, in bch2_journal_write() - but the journal write error path
* is special:
*/
bch2_journal_do_writes(j);
spin_unlock(&j->lock); spin_unlock(&j->lock);
} }
......
...@@ -232,7 +232,7 @@ bool bch2_blacklist_entries_gc(struct bch_fs *c) ...@@ -232,7 +232,7 @@ bool bch2_blacklist_entries_gc(struct bch_fs *c)
BUG_ON(nr != t->nr); BUG_ON(nr != t->nr);
unsigned i; unsigned i;
for (src = bl->start, i = eytzinger0_first(t->nr); for (src = bl->start, i = t->nr == 0 ? 0 : eytzinger0_first(t->nr);
src < bl->start + nr; src < bl->start + nr;
src++, i = eytzinger0_next(i, nr)) { src++, i = eytzinger0_next(i, nr)) {
BUG_ON(t->entries[i].start != le64_to_cpu(src->start)); BUG_ON(t->entries[i].start != le64_to_cpu(src->start));
......
...@@ -110,19 +110,25 @@ void bch2_sb_error_count(struct bch_fs *c, enum bch_sb_error_id err) ...@@ -110,19 +110,25 @@ void bch2_sb_error_count(struct bch_fs *c, enum bch_sb_error_id err)
void bch2_sb_errors_from_cpu(struct bch_fs *c) void bch2_sb_errors_from_cpu(struct bch_fs *c)
{ {
bch_sb_errors_cpu *src = &c->fsck_error_counts; bch_sb_errors_cpu *src = &c->fsck_error_counts;
struct bch_sb_field_errors *dst = struct bch_sb_field_errors *dst;
bch2_sb_field_resize(&c->disk_sb, errors,
bch2_sb_field_errors_u64s(src->nr));
unsigned i; unsigned i;
mutex_lock(&c->fsck_error_counts_lock);
dst = bch2_sb_field_resize(&c->disk_sb, errors,
bch2_sb_field_errors_u64s(src->nr));
if (!dst) if (!dst)
return; goto err;
for (i = 0; i < src->nr; i++) { for (i = 0; i < src->nr; i++) {
SET_BCH_SB_ERROR_ENTRY_ID(&dst->entries[i], src->data[i].id); SET_BCH_SB_ERROR_ENTRY_ID(&dst->entries[i], src->data[i].id);
SET_BCH_SB_ERROR_ENTRY_NR(&dst->entries[i], src->data[i].nr); SET_BCH_SB_ERROR_ENTRY_NR(&dst->entries[i], src->data[i].nr);
dst->entries[i].last_error_time = cpu_to_le64(src->data[i].last_error_time); dst->entries[i].last_error_time = cpu_to_le64(src->data[i].last_error_time);
} }
err:
mutex_unlock(&c->fsck_error_counts_lock);
} }
static int bch2_sb_errors_to_cpu(struct bch_fs *c) static int bch2_sb_errors_to_cpu(struct bch_fs *c)
......
...@@ -19,17 +19,14 @@ static inline bool seqmutex_trylock(struct seqmutex *lock) ...@@ -19,17 +19,14 @@ static inline bool seqmutex_trylock(struct seqmutex *lock)
static inline void seqmutex_lock(struct seqmutex *lock) static inline void seqmutex_lock(struct seqmutex *lock)
{ {
mutex_lock(&lock->lock); mutex_lock(&lock->lock);
}
static inline void seqmutex_unlock(struct seqmutex *lock)
{
lock->seq++; lock->seq++;
mutex_unlock(&lock->lock);
} }
static inline u32 seqmutex_seq(struct seqmutex *lock) static inline u32 seqmutex_unlock(struct seqmutex *lock)
{ {
return lock->seq; u32 seq = lock->seq;
mutex_unlock(&lock->lock);
return seq;
} }
static inline bool seqmutex_relock(struct seqmutex *lock, u32 seq) static inline bool seqmutex_relock(struct seqmutex *lock, u32 seq)
......
...@@ -168,6 +168,9 @@ static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id) ...@@ -168,6 +168,9 @@ static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id)
size_t new_bytes = kmalloc_size_roundup(struct_size(new, s, idx + 1)); size_t new_bytes = kmalloc_size_roundup(struct_size(new, s, idx + 1));
size_t new_size = (new_bytes - sizeof(*new)) / sizeof(new->s[0]); size_t new_size = (new_bytes - sizeof(*new)) / sizeof(new->s[0]);
if (unlikely(new_bytes > INT_MAX))
return NULL;
new = kvzalloc(new_bytes, GFP_KERNEL); new = kvzalloc(new_bytes, GFP_KERNEL);
if (!new) if (!new)
return NULL; return NULL;
......
...@@ -536,7 +536,6 @@ static void __bch2_fs_free(struct bch_fs *c) ...@@ -536,7 +536,6 @@ static void __bch2_fs_free(struct bch_fs *c)
bch2_find_btree_nodes_exit(&c->found_btree_nodes); bch2_find_btree_nodes_exit(&c->found_btree_nodes);
bch2_free_pending_node_rewrites(c); bch2_free_pending_node_rewrites(c);
bch2_fs_allocator_background_exit(c);
bch2_fs_sb_errors_exit(c); bch2_fs_sb_errors_exit(c);
bch2_fs_counters_exit(c); bch2_fs_counters_exit(c);
bch2_fs_snapshots_exit(c); bch2_fs_snapshots_exit(c);
...@@ -1195,6 +1194,7 @@ static void bch2_dev_free(struct bch_dev *ca) ...@@ -1195,6 +1194,7 @@ static void bch2_dev_free(struct bch_dev *ca)
kfree(ca->buckets_nouse); kfree(ca->buckets_nouse);
bch2_free_super(&ca->disk_sb); bch2_free_super(&ca->disk_sb);
bch2_dev_allocator_background_exit(ca);
bch2_dev_journal_exit(ca); bch2_dev_journal_exit(ca);
free_percpu(ca->io_done); free_percpu(ca->io_done);
...@@ -1317,6 +1317,8 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c, ...@@ -1317,6 +1317,8 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
atomic_long_set(&ca->ref, 1); atomic_long_set(&ca->ref, 1);
#endif #endif
bch2_dev_allocator_background_init(ca);
if (percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete, if (percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
PERCPU_REF_INIT_DEAD, GFP_KERNEL) || PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
!(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) || !(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) ||
...@@ -1529,6 +1531,7 @@ static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca) ...@@ -1529,6 +1531,7 @@ static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
* The allocator thread itself allocates btree nodes, so stop it first: * The allocator thread itself allocates btree nodes, so stop it first:
*/ */
bch2_dev_allocator_remove(c, ca); bch2_dev_allocator_remove(c, ca);
bch2_recalc_capacity(c);
bch2_dev_journal_stop(&c->journal, ca); bch2_dev_journal_stop(&c->journal, ca);
} }
...@@ -1540,6 +1543,7 @@ static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca) ...@@ -1540,6 +1543,7 @@ static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
bch2_dev_allocator_add(c, ca); bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c); bch2_recalc_capacity(c);
bch2_dev_do_discards(ca);
} }
int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca, int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
......
...@@ -284,6 +284,21 @@ static inline void closure_get(struct closure *cl) ...@@ -284,6 +284,21 @@ static inline void closure_get(struct closure *cl)
#endif #endif
} }
/**
* closure_get_not_zero
*/
static inline bool closure_get_not_zero(struct closure *cl)
{
unsigned old = atomic_read(&cl->remaining);
do {
if (!(old & CLOSURE_REMAINING_MASK))
return false;
} while (!atomic_try_cmpxchg_acquire(&cl->remaining, &old, old + 1));
return true;
}
/** /**
* closure_init - Initialize a closure, setting the refcount to 1 * closure_init - Initialize a closure, setting the refcount to 1
* @cl: closure to initialize * @cl: closure to initialize
...@@ -310,6 +325,12 @@ static inline void closure_init_stack(struct closure *cl) ...@@ -310,6 +325,12 @@ static inline void closure_init_stack(struct closure *cl)
atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER); atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
} }
static inline void closure_init_stack_release(struct closure *cl)
{
memset(cl, 0, sizeof(struct closure));
atomic_set_release(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
}
/** /**
* closure_wake_up - wake up all closures on a wait list, * closure_wake_up - wake up all closures on a wait list,
* with memory barrier * with memory barrier
...@@ -355,6 +376,8 @@ do { \ ...@@ -355,6 +376,8 @@ do { \
*/ */
#define closure_return(_cl) continue_at((_cl), NULL, NULL) #define closure_return(_cl) continue_at((_cl), NULL, NULL)
void closure_return_sync(struct closure *cl);
/** /**
* continue_at_nobarrier - jump to another function without barrier * continue_at_nobarrier - jump to another function without barrier
* *
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/sched/debug.h> #include <linux/sched/debug.h>
static inline void closure_put_after_sub(struct closure *cl, int flags) static inline void closure_put_after_sub_checks(int flags)
{ {
int r = flags & CLOSURE_REMAINING_MASK; int r = flags & CLOSURE_REMAINING_MASK;
...@@ -22,12 +22,17 @@ static inline void closure_put_after_sub(struct closure *cl, int flags) ...@@ -22,12 +22,17 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r))) flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r)))
r &= ~CLOSURE_GUARD_MASK; r &= ~CLOSURE_GUARD_MASK;
if (!r) { WARN(!r && (flags & ~CLOSURE_DESTRUCTOR),
smp_acquire__after_ctrl_dep(); "closure ref hit 0 with incorrect flags set: %x (%u)",
flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags));
}
static inline void closure_put_after_sub(struct closure *cl, int flags)
{
closure_put_after_sub_checks(flags);
WARN(flags & ~CLOSURE_DESTRUCTOR, if (!(flags & CLOSURE_REMAINING_MASK)) {
"closure ref hit 0 with incorrect flags set: %x (%u)", smp_acquire__after_ctrl_dep();
flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags));
cl->closure_get_happened = false; cl->closure_get_happened = false;
...@@ -145,6 +150,41 @@ void __sched __closure_sync(struct closure *cl) ...@@ -145,6 +150,41 @@ void __sched __closure_sync(struct closure *cl)
} }
EXPORT_SYMBOL(__closure_sync); EXPORT_SYMBOL(__closure_sync);
/*
* closure_return_sync - finish running a closure, synchronously (i.e. waiting
* for outstanding get()s to finish) and returning once closure refcount is 0.
*
* Unlike closure_sync() this doesn't reinit the ref to 1; subsequent
* closure_get_not_zero() calls waill fail.
*/
void __sched closure_return_sync(struct closure *cl)
{
struct closure_syncer s = { .task = current };
cl->s = &s;
set_closure_fn(cl, closure_sync_fn, NULL);
unsigned flags = atomic_sub_return_release(1 + CLOSURE_RUNNING - CLOSURE_DESTRUCTOR,
&cl->remaining);
closure_put_after_sub_checks(flags);
if (unlikely(flags & CLOSURE_REMAINING_MASK)) {
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (s.done)
break;
schedule();
}
__set_current_state(TASK_RUNNING);
}
if (cl->parent)
closure_put(cl->parent);
}
EXPORT_SYMBOL(closure_return_sync);
int __sched __closure_sync_timeout(struct closure *cl, unsigned long timeout) int __sched __closure_sync_timeout(struct closure *cl, unsigned long timeout)
{ {
struct closure_syncer s = { .task = current }; struct closure_syncer s = { .task = current };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment