Commit 829a60b9 authored by Kent Overstreet's avatar Kent Overstreet

bcache: Move insert_fixup() to btree_keys_ops

Now handling overlapping extents/keys is a method that's specific to what the
btree node contains.
Signed-off-by: default avatarKent Overstreet <kmo@daterainc.com>
parent 89ebb4a2
...@@ -676,6 +676,8 @@ void bch_bset_build_written_tree(struct btree_keys *b) ...@@ -676,6 +676,8 @@ void bch_bset_build_written_tree(struct btree_keys *b)
} }
EXPORT_SYMBOL(bch_bset_build_written_tree); EXPORT_SYMBOL(bch_bset_build_written_tree);
/* Insert */
void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k) void bch_bset_fix_invalidated_key(struct btree_keys *b, struct bkey *k)
{ {
struct bset_tree *t; struct bset_tree *t;
...@@ -791,6 +793,54 @@ void bch_bset_insert(struct btree_keys *b, struct bkey *where, ...@@ -791,6 +793,54 @@ void bch_bset_insert(struct btree_keys *b, struct bkey *where,
} }
EXPORT_SYMBOL(bch_bset_insert); EXPORT_SYMBOL(bch_bset_insert);
unsigned bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
struct bkey *replace_key)
{
unsigned status = BTREE_INSERT_STATUS_NO_INSERT;
struct bset *i = bset_tree_last(b)->data;
struct bkey *m, *prev = NULL;
struct btree_iter iter;
BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
m = bch_btree_iter_init(b, &iter, b->ops->is_extents
? PRECEDING_KEY(&START_KEY(k))
: PRECEDING_KEY(k));
if (b->ops->insert_fixup(b, k, &iter, replace_key))
return status;
status = BTREE_INSERT_STATUS_INSERT;
while (m != bset_bkey_last(i) &&
bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0)
prev = m, m = bkey_next(m);
/* prev is in the tree, if we merge we're done */
status = BTREE_INSERT_STATUS_BACK_MERGE;
if (prev &&
bch_bkey_try_merge(b, prev, k))
goto merged;
#if 0
status = BTREE_INSERT_STATUS_OVERWROTE;
if (m != bset_bkey_last(i) &&
KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
goto copy;
#endif
status = BTREE_INSERT_STATUS_FRONT_MERGE;
if (m != bset_bkey_last(i) &&
bch_bkey_try_merge(b, k, m))
goto copy;
bch_bset_insert(b, m, k);
copy: bkey_copy(m, k);
merged:
return status;
}
EXPORT_SYMBOL(bch_btree_insert_key);
/* Lookup */
struct bset_search_iter { struct bset_search_iter {
struct bkey *l, *r; struct bkey *l, *r;
}; };
......
...@@ -189,6 +189,8 @@ struct btree_keys_ops { ...@@ -189,6 +189,8 @@ struct btree_keys_ops {
bool (*sort_cmp)(struct btree_iter_set, bool (*sort_cmp)(struct btree_iter_set,
struct btree_iter_set); struct btree_iter_set);
struct bkey *(*sort_fixup)(struct btree_iter *, struct bkey *); struct bkey *(*sort_fixup)(struct btree_iter *, struct bkey *);
bool (*insert_fixup)(struct btree_keys *, struct bkey *,
struct btree_iter *, struct bkey *);
bool (*key_invalid)(struct btree_keys *, bool (*key_invalid)(struct btree_keys *,
const struct bkey *); const struct bkey *);
bool (*key_bad)(struct btree_keys *, const struct bkey *); bool (*key_bad)(struct btree_keys *, const struct bkey *);
...@@ -286,6 +288,16 @@ void bch_bset_init_next(struct btree_keys *, struct bset *, uint64_t); ...@@ -286,6 +288,16 @@ void bch_bset_init_next(struct btree_keys *, struct bset *, uint64_t);
void bch_bset_build_written_tree(struct btree_keys *); void bch_bset_build_written_tree(struct btree_keys *);
void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *); void bch_bset_fix_invalidated_key(struct btree_keys *, struct bkey *);
void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *); void bch_bset_insert(struct btree_keys *, struct bkey *, struct bkey *);
unsigned bch_btree_insert_key(struct btree_keys *, struct bkey *,
struct bkey *);
enum {
BTREE_INSERT_STATUS_NO_INSERT = 0,
BTREE_INSERT_STATUS_INSERT,
BTREE_INSERT_STATUS_BACK_MERGE,
BTREE_INSERT_STATUS_OVERWROTE,
BTREE_INSERT_STATUS_FRONT_MERGE,
};
/* /*
* Tries to merge l and r: l should be lower than r * Tries to merge l and r: l should be lower than r
......
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#include "btree.h" #include "btree.h"
#include "debug.h" #include "debug.h"
#include "extents.h" #include "extents.h"
#include "writeback.h"
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/bitops.h> #include <linux/bitops.h>
...@@ -90,13 +89,6 @@ ...@@ -90,13 +89,6 @@
* Test module load/unload * Test module load/unload
*/ */
enum {
BTREE_INSERT_STATUS_INSERT,
BTREE_INSERT_STATUS_BACK_MERGE,
BTREE_INSERT_STATUS_OVERWROTE,
BTREE_INSERT_STATUS_FRONT_MERGE,
};
#define MAX_NEED_GC 64 #define MAX_NEED_GC 64
#define MAX_SAVE_PRIO 72 #define MAX_SAVE_PRIO 72
...@@ -1792,230 +1784,23 @@ int bch_btree_check(struct cache_set *c) ...@@ -1792,230 +1784,23 @@ int bch_btree_check(struct cache_set *c)
/* Btree insertion */ /* Btree insertion */
static bool fix_overlapping_extents(struct btree *b, struct bkey *insert, static bool btree_insert_key(struct btree *b, struct bkey *k,
struct btree_iter *iter, struct bkey *replace_key)
struct bkey *replace_key)
{ {
void subtract_dirty(struct bkey *k, uint64_t offset, int sectors) unsigned status;
{
if (KEY_DIRTY(k))
bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
offset, -sectors);
}
uint64_t old_offset;
unsigned old_size, sectors_found = 0;
while (1) {
struct bkey *k = bch_btree_iter_next(iter);
if (!k)
break;
if (bkey_cmp(&START_KEY(k), insert) >= 0) {
if (KEY_SIZE(k))
break;
else
continue;
}
if (bkey_cmp(k, &START_KEY(insert)) <= 0)
continue;
old_offset = KEY_START(k);
old_size = KEY_SIZE(k);
/*
* We might overlap with 0 size extents; we can't skip these
* because if they're in the set we're inserting to we have to
* adjust them so they don't overlap with the key we're
* inserting. But we don't want to check them for replace
* operations.
*/
if (replace_key && KEY_SIZE(k)) {
/*
* k might have been split since we inserted/found the
* key we're replacing
*/
unsigned i;
uint64_t offset = KEY_START(k) -
KEY_START(replace_key);
/* But it must be a subset of the replace key */
if (KEY_START(k) < KEY_START(replace_key) ||
KEY_OFFSET(k) > KEY_OFFSET(replace_key))
goto check_failed;
/* We didn't find a key that we were supposed to */
if (KEY_START(k) > KEY_START(insert) + sectors_found)
goto check_failed;
if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
KEY_DIRTY(k) != KEY_DIRTY(replace_key))
goto check_failed;
/* skip past gen */
offset <<= 8;
BUG_ON(!KEY_PTRS(replace_key));
for (i = 0; i < KEY_PTRS(replace_key); i++)
if (k->ptr[i] != replace_key->ptr[i] + offset)
goto check_failed;
sectors_found = KEY_OFFSET(k) - KEY_START(insert);
}
if (bkey_cmp(insert, k) < 0 &&
bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
/*
* We overlapped in the middle of an existing key: that
* means we have to split the old key. But we have to do
* slightly different things depending on whether the
* old key has been written out yet.
*/
struct bkey *top;
subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
if (bkey_written(&b->keys, k)) {
/*
* We insert a new key to cover the top of the
* old key, and the old key is modified in place
* to represent the bottom split.
*
* It's completely arbitrary whether the new key
* is the top or the bottom, but it has to match
* up with what btree_sort_fixup() does - it
* doesn't check for this kind of overlap, it
* depends on us inserting a new key for the top
* here.
*/
top = bch_bset_search(&b->keys,
bset_tree_last(&b->keys),
insert);
bch_bset_insert(&b->keys, top, k);
} else {
BKEY_PADDED(key) temp;
bkey_copy(&temp.key, k);
bch_bset_insert(&b->keys, k, &temp.key);
top = bkey_next(k);
}
bch_cut_front(insert, top);
bch_cut_back(&START_KEY(insert), k);
bch_bset_fix_invalidated_key(&b->keys, k);
return false;
}
if (bkey_cmp(insert, k) < 0) {
bch_cut_front(insert, k);
} else {
if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
old_offset = KEY_START(insert);
if (bkey_written(&b->keys, k) &&
bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
/*
* Completely overwrote, so we don't have to
* invalidate the binary search tree
*/
bch_cut_front(k, k);
} else {
__bch_cut_back(&START_KEY(insert), k);
bch_bset_fix_invalidated_key(&b->keys, k);
}
}
subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
}
check_failed:
if (replace_key) {
if (!sectors_found) {
return true;
} else if (sectors_found < KEY_SIZE(insert)) {
SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
(KEY_SIZE(insert) - sectors_found));
SET_KEY_SIZE(insert, sectors_found);
}
}
return false;
}
static bool btree_insert_key(struct btree *b, struct btree_op *op,
struct bkey *k, struct bkey *replace_key)
{
struct bset *i = btree_bset_last(b);
struct bkey *m, *prev;
unsigned status = BTREE_INSERT_STATUS_INSERT;
BUG_ON(bkey_cmp(k, &b->key) > 0); BUG_ON(bkey_cmp(k, &b->key) > 0);
BUG_ON(b->level && !KEY_PTRS(k));
BUG_ON(!b->level && !KEY_OFFSET(k));
if (!b->level) { status = bch_btree_insert_key(&b->keys, k, replace_key);
struct btree_iter iter; if (status != BTREE_INSERT_STATUS_NO_INSERT) {
bch_check_keys(&b->keys, "%u for %s", status,
replace_key ? "replace" : "insert");
/* trace_bcache_btree_insert_key(b, k, replace_key != NULL,
* bset_search() returns the first key that is strictly greater status);
* than the search key - but for back merging, we want to find return true;
* the previous key. } else
*/ return false;
prev = NULL;
m = bch_btree_iter_init(&b->keys, &iter,
PRECEDING_KEY(&START_KEY(k)));
if (fix_overlapping_extents(b, k, &iter, replace_key)) {
op->insert_collision = true;
return false;
}
if (KEY_DIRTY(k))
bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
KEY_START(k), KEY_SIZE(k));
while (m != bset_bkey_last(i) &&
bkey_cmp(k, &START_KEY(m)) > 0)
prev = m, m = bkey_next(m);
if (key_merging_disabled(b->c))
goto insert;
/* prev is in the tree, if we merge we're done */
status = BTREE_INSERT_STATUS_BACK_MERGE;
if (prev &&
bch_bkey_try_merge(&b->keys, prev, k))
goto merged;
status = BTREE_INSERT_STATUS_OVERWROTE;
if (m != bset_bkey_last(i) &&
KEY_PTRS(m) == KEY_PTRS(k) && !KEY_SIZE(m))
goto copy;
status = BTREE_INSERT_STATUS_FRONT_MERGE;
if (m != bset_bkey_last(i) &&
bch_bkey_try_merge(&b->keys, k, m))
goto copy;
} else {
BUG_ON(replace_key);
m = bch_bset_search(&b->keys, bset_tree_last(&b->keys), k);
}
insert: bch_bset_insert(&b->keys, m, k);
copy: bkey_copy(m, k);
merged:
bch_check_keys(&b->keys, "%u for %s", status,
replace_key ? "replace" : "insert");
if (b->level && !KEY_OFFSET(k))
btree_current_write(b)->prio_blocked++;
trace_bcache_btree_insert_key(b, k, replace_key != NULL, status);
return true;
} }
static size_t insert_u64s_remaining(struct btree *b) static size_t insert_u64s_remaining(struct btree *b)
...@@ -2048,7 +1833,7 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, ...@@ -2048,7 +1833,7 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
if (!b->level) if (!b->level)
bkey_put(b->c, k); bkey_put(b->c, k);
ret |= btree_insert_key(b, op, k, replace_key); ret |= btree_insert_key(b, k, replace_key);
bch_keylist_pop_front(insert_keys); bch_keylist_pop_front(insert_keys);
} else if (bkey_cmp(&START_KEY(k), &b->key) < 0) { } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
BKEY_PADDED(key) temp; BKEY_PADDED(key) temp;
...@@ -2057,13 +1842,16 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op, ...@@ -2057,13 +1842,16 @@ static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
bch_cut_back(&b->key, &temp.key); bch_cut_back(&b->key, &temp.key);
bch_cut_front(&b->key, insert_keys->keys); bch_cut_front(&b->key, insert_keys->keys);
ret |= btree_insert_key(b, op, &temp.key, replace_key); ret |= btree_insert_key(b, &temp.key, replace_key);
break; break;
} else { } else {
break; break;
} }
} }
if (!ret)
op->insert_collision = true;
BUG_ON(!bch_keylist_empty(insert_keys) && b->level); BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
BUG_ON(bch_count_data(&b->keys) < oldsize); BUG_ON(bch_count_data(&b->keys) < oldsize);
......
...@@ -222,8 +222,22 @@ static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k) ...@@ -222,8 +222,22 @@ static bool bch_btree_ptr_bad(struct btree_keys *bk, const struct bkey *k)
return false; return false;
} }
static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk,
struct bkey *insert,
struct btree_iter *iter,
struct bkey *replace_key)
{
struct btree *b = container_of(bk, struct btree, keys);
if (!KEY_OFFSET(insert))
btree_current_write(b)->prio_blocked++;
return false;
}
const struct btree_keys_ops bch_btree_keys_ops = { const struct btree_keys_ops bch_btree_keys_ops = {
.sort_cmp = bch_key_sort_cmp, .sort_cmp = bch_key_sort_cmp,
.insert_fixup = bch_btree_ptr_insert_fixup,
.key_invalid = bch_btree_ptr_invalid, .key_invalid = bch_btree_ptr_invalid,
.key_bad = bch_btree_ptr_bad, .key_bad = bch_btree_ptr_bad,
.key_to_text = bch_extent_to_text, .key_to_text = bch_extent_to_text,
...@@ -294,6 +308,169 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter, ...@@ -294,6 +308,169 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
return NULL; return NULL;
} }
static bool bch_extent_insert_fixup(struct btree_keys *b,
struct bkey *insert,
struct btree_iter *iter,
struct bkey *replace_key)
{
struct cache_set *c = container_of(b, struct btree, keys)->c;
void subtract_dirty(struct bkey *k, uint64_t offset, int sectors)
{
if (KEY_DIRTY(k))
bcache_dev_sectors_dirty_add(c, KEY_INODE(k),
offset, -sectors);
}
uint64_t old_offset;
unsigned old_size, sectors_found = 0;
BUG_ON(!KEY_OFFSET(insert));
BUG_ON(!KEY_SIZE(insert));
while (1) {
struct bkey *k = bch_btree_iter_next(iter);
if (!k)
break;
if (bkey_cmp(&START_KEY(k), insert) >= 0) {
if (KEY_SIZE(k))
break;
else
continue;
}
if (bkey_cmp(k, &START_KEY(insert)) <= 0)
continue;
old_offset = KEY_START(k);
old_size = KEY_SIZE(k);
/*
* We might overlap with 0 size extents; we can't skip these
* because if they're in the set we're inserting to we have to
* adjust them so they don't overlap with the key we're
* inserting. But we don't want to check them for replace
* operations.
*/
if (replace_key && KEY_SIZE(k)) {
/*
* k might have been split since we inserted/found the
* key we're replacing
*/
unsigned i;
uint64_t offset = KEY_START(k) -
KEY_START(replace_key);
/* But it must be a subset of the replace key */
if (KEY_START(k) < KEY_START(replace_key) ||
KEY_OFFSET(k) > KEY_OFFSET(replace_key))
goto check_failed;
/* We didn't find a key that we were supposed to */
if (KEY_START(k) > KEY_START(insert) + sectors_found)
goto check_failed;
if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
KEY_DIRTY(k) != KEY_DIRTY(replace_key))
goto check_failed;
/* skip past gen */
offset <<= 8;
BUG_ON(!KEY_PTRS(replace_key));
for (i = 0; i < KEY_PTRS(replace_key); i++)
if (k->ptr[i] != replace_key->ptr[i] + offset)
goto check_failed;
sectors_found = KEY_OFFSET(k) - KEY_START(insert);
}
if (bkey_cmp(insert, k) < 0 &&
bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0) {
/*
* We overlapped in the middle of an existing key: that
* means we have to split the old key. But we have to do
* slightly different things depending on whether the
* old key has been written out yet.
*/
struct bkey *top;
subtract_dirty(k, KEY_START(insert), KEY_SIZE(insert));
if (bkey_written(b, k)) {
/*
* We insert a new key to cover the top of the
* old key, and the old key is modified in place
* to represent the bottom split.
*
* It's completely arbitrary whether the new key
* is the top or the bottom, but it has to match
* up with what btree_sort_fixup() does - it
* doesn't check for this kind of overlap, it
* depends on us inserting a new key for the top
* here.
*/
top = bch_bset_search(b, bset_tree_last(b),
insert);
bch_bset_insert(b, top, k);
} else {
BKEY_PADDED(key) temp;
bkey_copy(&temp.key, k);
bch_bset_insert(b, k, &temp.key);
top = bkey_next(k);
}
bch_cut_front(insert, top);
bch_cut_back(&START_KEY(insert), k);
bch_bset_fix_invalidated_key(b, k);
goto out;
}
if (bkey_cmp(insert, k) < 0) {
bch_cut_front(insert, k);
} else {
if (bkey_cmp(&START_KEY(insert), &START_KEY(k)) > 0)
old_offset = KEY_START(insert);
if (bkey_written(b, k) &&
bkey_cmp(&START_KEY(insert), &START_KEY(k)) <= 0) {
/*
* Completely overwrote, so we don't have to
* invalidate the binary search tree
*/
bch_cut_front(k, k);
} else {
__bch_cut_back(&START_KEY(insert), k);
bch_bset_fix_invalidated_key(b, k);
}
}
subtract_dirty(k, old_offset, old_size - KEY_SIZE(k));
}
check_failed:
if (replace_key) {
if (!sectors_found) {
return true;
} else if (sectors_found < KEY_SIZE(insert)) {
SET_KEY_OFFSET(insert, KEY_OFFSET(insert) -
(KEY_SIZE(insert) - sectors_found));
SET_KEY_SIZE(insert, sectors_found);
}
}
out:
if (KEY_DIRTY(insert))
bcache_dev_sectors_dirty_add(c, KEY_INODE(insert),
KEY_START(insert),
KEY_SIZE(insert));
return false;
}
static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k) static bool bch_extent_invalid(struct btree_keys *bk, const struct bkey *k)
{ {
struct btree *b = container_of(bk, struct btree, keys); struct btree *b = container_of(bk, struct btree, keys);
...@@ -435,6 +612,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey ...@@ -435,6 +612,7 @@ static bool bch_extent_merge(struct btree_keys *bk, struct bkey *l, struct bkey
const struct btree_keys_ops bch_extent_keys_ops = { const struct btree_keys_ops bch_extent_keys_ops = {
.sort_cmp = bch_extent_sort_cmp, .sort_cmp = bch_extent_sort_cmp,
.sort_fixup = bch_extent_sort_fixup, .sort_fixup = bch_extent_sort_fixup,
.insert_fixup = bch_extent_insert_fixup,
.key_invalid = bch_extent_invalid, .key_invalid = bch_extent_invalid,
.key_bad = bch_extent_bad, .key_bad = bch_extent_bad,
.key_merge = bch_extent_merge, .key_merge = bch_extent_merge,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment