Commit 80db4e47 authored by Coly Li's avatar Coly Li Committed by Jens Axboe

bcache: remove incremental dirty sector counting for bch_sectors_dirty_init()

After making bch_sectors_dirty_init() being multithreaded, the existing
incremental dirty sector counting in bch_root_node_dirty_init() doesn't
release btree occupation after iterating 500000 (INIT_KEYS_EACH_TIME)
bkeys. Because a read lock is added on btree root node to prevent the
btree to be split during the dirty sectors counting, other I/O requester
has no chance to gain the write lock even restart bcache_btree().

That is to say, the incremental dirty sectors counting is incompatible
to the multhreaded bch_sectors_dirty_init(). We have to choose one and
drop another one.

In my testing, with 512 bytes random writes, I generate 1.2T dirty data
and a btree with 400K nodes. With single thread and incremental dirty
sectors counting, it takes 30+ minites to register the backing device.
And with multithreaded dirty sectors counting, the backing device
registration can be accomplished within 2 minutes.

The 30+ minutes V.S. 2- minutes difference makes me decide to keep
multithreaded bch_sectors_dirty_init() and drop the incremental dirty
sectors counting. This is what this patch does.

But INIT_KEYS_EACH_TIME is kept, in sectors_dirty_init_fn() the CPU
will be released by cond_resched() after every INIT_KEYS_EACH_TIME keys
iterated. This is to avoid the watchdog reports a bogus soft lockup
warning.

Fixes: b144e45f ("bcache: make bch_sectors_dirty_init() to be multithreaded")
Signed-off-by: default avatarColy Li <colyli@suse.de>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/r/20220524102336.10684-4-colyli@suse.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 4dc34ae1
...@@ -805,13 +805,11 @@ static int bch_writeback_thread(void *arg) ...@@ -805,13 +805,11 @@ static int bch_writeback_thread(void *arg)
/* Init */ /* Init */
#define INIT_KEYS_EACH_TIME 500000 #define INIT_KEYS_EACH_TIME 500000
#define INIT_KEYS_SLEEP_MS 100
struct sectors_dirty_init { struct sectors_dirty_init {
struct btree_op op; struct btree_op op;
unsigned int inode; unsigned int inode;
size_t count; size_t count;
struct bkey start;
}; };
static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
...@@ -827,11 +825,8 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, ...@@ -827,11 +825,8 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
KEY_START(k), KEY_SIZE(k)); KEY_START(k), KEY_SIZE(k));
op->count++; op->count++;
if (atomic_read(&b->c->search_inflight) && if (!(op->count % INIT_KEYS_EACH_TIME))
!(op->count % INIT_KEYS_EACH_TIME)) { cond_resched();
bkey_copy_key(&op->start, k);
return -EAGAIN;
}
return MAP_CONTINUE; return MAP_CONTINUE;
} }
...@@ -846,24 +841,16 @@ static int bch_root_node_dirty_init(struct cache_set *c, ...@@ -846,24 +841,16 @@ static int bch_root_node_dirty_init(struct cache_set *c,
bch_btree_op_init(&op.op, -1); bch_btree_op_init(&op.op, -1);
op.inode = d->id; op.inode = d->id;
op.count = 0; op.count = 0;
op.start = KEY(op.inode, 0, 0);
do {
ret = bcache_btree(map_keys_recurse, ret = bcache_btree(map_keys_recurse,
k, k,
c->root, c->root,
&op.op, &op.op,
&op.start, &KEY(op.inode, 0, 0),
sectors_dirty_init_fn, sectors_dirty_init_fn,
0); 0);
if (ret == -EAGAIN) if (ret < 0)
schedule_timeout_interruptible(
msecs_to_jiffies(INIT_KEYS_SLEEP_MS));
else if (ret < 0) {
pr_warn("sectors dirty init failed, ret=%d!\n", ret); pr_warn("sectors dirty init failed, ret=%d!\n", ret);
break;
}
} while (ret == -EAGAIN);
return ret; return ret;
} }
...@@ -907,7 +894,6 @@ static int bch_dirty_init_thread(void *arg) ...@@ -907,7 +894,6 @@ static int bch_dirty_init_thread(void *arg)
goto out; goto out;
} }
skip_nr--; skip_nr--;
cond_resched();
} }
if (p) { if (p) {
...@@ -917,7 +903,6 @@ static int bch_dirty_init_thread(void *arg) ...@@ -917,7 +903,6 @@ static int bch_dirty_init_thread(void *arg)
p = NULL; p = NULL;
prev_idx = cur_idx; prev_idx = cur_idx;
cond_resched();
} }
out: out:
...@@ -956,11 +941,11 @@ void bch_sectors_dirty_init(struct bcache_device *d) ...@@ -956,11 +941,11 @@ void bch_sectors_dirty_init(struct bcache_device *d)
bch_btree_op_init(&op.op, -1); bch_btree_op_init(&op.op, -1);
op.inode = d->id; op.inode = d->id;
op.count = 0; op.count = 0;
op.start = KEY(op.inode, 0, 0);
for_each_key_filter(&c->root->keys, for_each_key_filter(&c->root->keys,
k, &iter, bch_ptr_invalid) k, &iter, bch_ptr_invalid)
sectors_dirty_init_fn(&op.op, c->root, k); sectors_dirty_init_fn(&op.op, c->root, k);
rw_unlock(0, c->root); rw_unlock(0, c->root);
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment