Commit 5ceaaad7 authored by Kent Overstreet's avatar Kent Overstreet

bcache: Bypass torture test

More testing ftw! Also, now verify mode doesn't break if you read dirty
data.
Signed-off-by: default avatarKent Overstreet <kmo@daterainc.com>
parent 098fb254
...@@ -364,6 +364,7 @@ struct cached_dev { ...@@ -364,6 +364,7 @@ struct cached_dev {
unsigned readahead; unsigned readahead;
unsigned verify:1; unsigned verify:1;
unsigned bypass_torture_test:1;
unsigned partial_stripes_expensive:1; unsigned partial_stripes_expensive:1;
unsigned writeback_metadata:1; unsigned writeback_metadata:1;
......
...@@ -189,13 +189,14 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) ...@@ -189,13 +189,14 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
void *p1 = kmap_atomic(bv->bv_page); void *p1 = kmap_atomic(bv->bv_page);
void *p2 = page_address(check->bi_io_vec[i].bv_page); void *p2 = page_address(check->bi_io_vec[i].bv_page);
if (memcmp(p1 + bv->bv_offset, cache_set_err_on(memcmp(p1 + bv->bv_offset,
p2 + bv->bv_offset, p2 + bv->bv_offset,
bv->bv_len)) bv->bv_len),
printk(KERN_ERR dc->disk.c,
"bcache (%s): verify failed at sector %llu\n", "verify failed at dev %s sector %llu",
bdevname(dc->bdev, name), bdevname(dc->bdev, name),
(uint64_t) bio->bi_sector); (uint64_t) bio->bi_sector);
kunmap_atomic(p1); kunmap_atomic(p1);
} }
......
...@@ -16,6 +16,7 @@ void bch_btree_iter_next_check(struct btree_iter *); ...@@ -16,6 +16,7 @@ void bch_btree_iter_next_check(struct btree_iter *);
#define EBUG_ON(cond) BUG_ON(cond) #define EBUG_ON(cond) BUG_ON(cond)
#define expensive_debug_checks(c) ((c)->expensive_debug_checks) #define expensive_debug_checks(c) ((c)->expensive_debug_checks)
#define key_merging_disabled(c) ((c)->key_merging_disabled) #define key_merging_disabled(c) ((c)->key_merging_disabled)
#define bypass_torture_test(d) ((d)->bypass_torture_test)
#else /* DEBUG */ #else /* DEBUG */
...@@ -28,6 +29,7 @@ static inline void bch_btree_iter_next_check(struct btree_iter *iter) {} ...@@ -28,6 +29,7 @@ static inline void bch_btree_iter_next_check(struct btree_iter *iter) {}
#define EBUG_ON(cond) do { if (cond); } while (0) #define EBUG_ON(cond) do { if (cond); } while (0)
#define expensive_debug_checks(c) 0 #define expensive_debug_checks(c) 0
#define key_merging_disabled(c) 0 #define key_merging_disabled(c) 0
#define bypass_torture_test(d) 0
#endif #endif
......
...@@ -528,6 +528,13 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) ...@@ -528,6 +528,13 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
goto skip; goto skip;
} }
if (bypass_torture_test(dc)) {
if ((get_random_int() & 3) == 3)
goto skip;
else
goto rescale;
}
if (!congested && !dc->sequential_cutoff) if (!congested && !dc->sequential_cutoff)
goto rescale; goto rescale;
...@@ -601,6 +608,7 @@ struct search { ...@@ -601,6 +608,7 @@ struct search {
unsigned recoverable:1; unsigned recoverable:1;
unsigned unaligned_bvec:1; unsigned unaligned_bvec:1;
unsigned write:1; unsigned write:1;
unsigned read_dirty_data:1;
unsigned long start_time; unsigned long start_time;
...@@ -669,6 +677,9 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) ...@@ -669,6 +677,9 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO; PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
if (KEY_DIRTY(k))
s->read_dirty_data = true;
n = bch_bio_split(bio, min_t(uint64_t, INT_MAX, n = bch_bio_split(bio, min_t(uint64_t, INT_MAX,
KEY_OFFSET(k) - bio->bi_sector), KEY_OFFSET(k) - bio->bi_sector),
GFP_NOIO, s->d->bio_split); GFP_NOIO, s->d->bio_split);
...@@ -894,7 +905,8 @@ static void cached_dev_read_done(struct closure *cl) ...@@ -894,7 +905,8 @@ static void cached_dev_read_done(struct closure *cl)
s->cache_miss = NULL; s->cache_miss = NULL;
} }
if (verify(dc, &s->bio.bio) && s->recoverable && !s->unaligned_bvec) if (verify(dc, &s->bio.bio) && s->recoverable &&
!s->unaligned_bvec && !s->read_dirty_data)
bch_data_verify(dc, s->orig_bio); bch_data_verify(dc, s->orig_bio);
bio_complete(s); bio_complete(s);
......
...@@ -99,6 +99,7 @@ rw_attribute(errors); ...@@ -99,6 +99,7 @@ rw_attribute(errors);
rw_attribute(io_error_limit); rw_attribute(io_error_limit);
rw_attribute(io_error_halflife); rw_attribute(io_error_halflife);
rw_attribute(verify); rw_attribute(verify);
rw_attribute(bypass_torture_test);
rw_attribute(key_merging_disabled); rw_attribute(key_merging_disabled);
rw_attribute(gc_always_rewrite); rw_attribute(gc_always_rewrite);
rw_attribute(expensive_debug_checks); rw_attribute(expensive_debug_checks);
...@@ -123,6 +124,7 @@ SHOW(__bch_cached_dev) ...@@ -123,6 +124,7 @@ SHOW(__bch_cached_dev)
sysfs_printf(data_csum, "%i", dc->disk.data_csum); sysfs_printf(data_csum, "%i", dc->disk.data_csum);
var_printf(verify, "%i"); var_printf(verify, "%i");
var_printf(bypass_torture_test, "%i");
var_printf(writeback_metadata, "%i"); var_printf(writeback_metadata, "%i");
var_printf(writeback_running, "%i"); var_printf(writeback_running, "%i");
var_print(writeback_delay); var_print(writeback_delay);
...@@ -191,6 +193,7 @@ STORE(__cached_dev) ...@@ -191,6 +193,7 @@ STORE(__cached_dev)
sysfs_strtoul(data_csum, dc->disk.data_csum); sysfs_strtoul(data_csum, dc->disk.data_csum);
d_strtoul(verify); d_strtoul(verify);
d_strtoul(bypass_torture_test);
d_strtoul(writeback_metadata); d_strtoul(writeback_metadata);
d_strtoul(writeback_running); d_strtoul(writeback_running);
d_strtoul(writeback_delay); d_strtoul(writeback_delay);
...@@ -323,6 +326,7 @@ static struct attribute *bch_cached_dev_files[] = { ...@@ -323,6 +326,7 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_readahead, &sysfs_readahead,
#ifdef CONFIG_BCACHE_DEBUG #ifdef CONFIG_BCACHE_DEBUG
&sysfs_verify, &sysfs_verify,
&sysfs_bypass_torture_test,
#endif #endif
NULL NULL
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment