Commit 911c9610 authored by Kent Overstreet's avatar Kent Overstreet

bcache: Split out sort_extent_cmp()

Only use extent comparison for comparing extents, so we're not using
START_KEY() on other key types (i.e. btree pointers)
Signed-off-by: default avatarKent Overstreet <kmo@daterainc.com>
parent fafff81c
...@@ -855,19 +855,13 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t, ...@@ -855,19 +855,13 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
/* Btree iterator */ /* Btree iterator */
/* typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
* Returns true if l > r - unless l == r, in which case returns true if l is struct btree_iter_set);
* older than r.
*
* Necessary for btree_sort_fixup() - if there are multiple keys that compare
* equal in different sets, we have to process them newest to oldest.
*/
static inline bool btree_iter_cmp(struct btree_iter_set l, static inline bool btree_iter_cmp(struct btree_iter_set l,
struct btree_iter_set r) struct btree_iter_set r)
{ {
int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k)); return bkey_cmp(l.k, r.k) > 0;
return c ? c > 0 : l.k < r.k;
} }
static inline bool btree_iter_end(struct btree_iter *iter) static inline bool btree_iter_end(struct btree_iter *iter)
...@@ -884,8 +878,10 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k, ...@@ -884,8 +878,10 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
btree_iter_cmp)); btree_iter_cmp));
} }
struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter, static struct bkey *__bch_btree_iter_init(struct btree *b,
struct bkey *search, struct bset_tree *start) struct btree_iter *iter,
struct bkey *search,
struct bset_tree *start)
{ {
struct bkey *ret = NULL; struct bkey *ret = NULL;
iter->size = ARRAY_SIZE(iter->data); iter->size = ARRAY_SIZE(iter->data);
...@@ -903,7 +899,15 @@ struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter, ...@@ -903,7 +899,15 @@ struct bkey *__bch_btree_iter_init(struct btree *b, struct btree_iter *iter,
return ret; return ret;
} }
struct bkey *bch_btree_iter_next(struct btree_iter *iter) struct bkey *bch_btree_iter_init(struct btree *b,
struct btree_iter *iter,
struct bkey *search)
{
return __bch_btree_iter_init(b, iter, search, b->sets);
}
static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
btree_iter_cmp_fn *cmp)
{ {
struct btree_iter_set unused; struct btree_iter_set unused;
struct bkey *ret = NULL; struct bkey *ret = NULL;
...@@ -920,14 +924,20 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter) ...@@ -920,14 +924,20 @@ struct bkey *bch_btree_iter_next(struct btree_iter *iter)
} }
if (iter->data->k == iter->data->end) if (iter->data->k == iter->data->end)
heap_pop(iter, unused, btree_iter_cmp); heap_pop(iter, unused, cmp);
else else
heap_sift(iter, 0, btree_iter_cmp); heap_sift(iter, 0, cmp);
} }
return ret; return ret;
} }
struct bkey *bch_btree_iter_next(struct btree_iter *iter)
{
return __bch_btree_iter_next(iter, btree_iter_cmp);
}
struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter, struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
struct btree *b, ptr_filter_fn fn) struct btree *b, ptr_filter_fn fn)
{ {
...@@ -951,13 +961,37 @@ static void sort_key_next(struct btree_iter *iter, ...@@ -951,13 +961,37 @@ static void sort_key_next(struct btree_iter *iter,
*i = iter->data[--iter->used]; *i = iter->data[--iter->used];
} }
static struct bkey *btree_sort_fixup(struct btree_iter *iter, struct bkey *tmp) /*
* Returns true if l > r - unless l == r, in which case returns true if l is
* older than r.
*
* Necessary for btree_sort_fixup() - if there are multiple keys that compare
* equal in different sets, we have to process them newest to oldest.
*/
static inline bool sort_extent_cmp(struct btree_iter_set l,
struct btree_iter_set r)
{
int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
return c ? c > 0 : l.k < r.k;
}
static inline bool sort_cmp(struct btree_iter_set l,
struct btree_iter_set r)
{
int64_t c = bkey_cmp(l.k, r.k);
return c ? c > 0 : l.k < r.k;
}
static struct bkey *btree_sort_fixup_extents(struct btree_iter *iter,
struct bkey *tmp)
{ {
while (iter->used > 1) { while (iter->used > 1) {
struct btree_iter_set *top = iter->data, *i = top + 1; struct btree_iter_set *top = iter->data, *i = top + 1;
if (iter->used > 2 && if (iter->used > 2 &&
btree_iter_cmp(i[0], i[1])) sort_extent_cmp(i[0], i[1]))
i++; i++;
if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0) if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
...@@ -965,7 +999,7 @@ static struct bkey *btree_sort_fixup(struct btree_iter *iter, struct bkey *tmp) ...@@ -965,7 +999,7 @@ static struct bkey *btree_sort_fixup(struct btree_iter *iter, struct bkey *tmp)
if (!KEY_SIZE(i->k)) { if (!KEY_SIZE(i->k)) {
sort_key_next(iter, i); sort_key_next(iter, i);
heap_sift(iter, i - top, btree_iter_cmp); heap_sift(iter, i - top, sort_extent_cmp);
continue; continue;
} }
...@@ -975,7 +1009,7 @@ static struct bkey *btree_sort_fixup(struct btree_iter *iter, struct bkey *tmp) ...@@ -975,7 +1009,7 @@ static struct bkey *btree_sort_fixup(struct btree_iter *iter, struct bkey *tmp)
else else
bch_cut_front(top->k, i->k); bch_cut_front(top->k, i->k);
heap_sift(iter, i - top, btree_iter_cmp); heap_sift(iter, i - top, sort_extent_cmp);
} else { } else {
/* can't happen because of comparison func */ /* can't happen because of comparison func */
BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k))); BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
...@@ -1001,20 +1035,28 @@ static void btree_mergesort(struct btree *b, struct bset *out, ...@@ -1001,20 +1035,28 @@ static void btree_mergesort(struct btree *b, struct bset *out,
struct btree_iter *iter, struct btree_iter *iter,
bool fixup, bool remove_stale) bool fixup, bool remove_stale)
{ {
int i;
struct bkey *k, *last = NULL; struct bkey *k, *last = NULL;
BKEY_PADDED(k) tmp; BKEY_PADDED(k) tmp;
btree_iter_cmp_fn *cmp = b->level
? sort_cmp
: sort_extent_cmp;
bool (*bad)(struct btree *, const struct bkey *) = remove_stale bool (*bad)(struct btree *, const struct bkey *) = remove_stale
? bch_ptr_bad ? bch_ptr_bad
: bch_ptr_invalid; : bch_ptr_invalid;
/* Heapify the iterator, using our comparison function */
for (i = iter->used / 2 - 1; i >= 0; --i)
heap_sift(iter, i, cmp);
while (!btree_iter_end(iter)) { while (!btree_iter_end(iter)) {
if (fixup && !b->level) if (fixup && !b->level)
k = btree_sort_fixup(iter, &tmp.k); k = btree_sort_fixup_extents(iter, &tmp.k);
else else
k = NULL; k = NULL;
if (!k) if (!k)
k = bch_btree_iter_next(iter); k = __bch_btree_iter_next(iter, cmp);
if (bad(b, k)) if (bad(b, k))
continue; continue;
......
...@@ -305,8 +305,8 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *, ...@@ -305,8 +305,8 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
struct btree *, ptr_filter_fn); struct btree *, ptr_filter_fn);
void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *); void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
struct bkey *__bch_btree_iter_init(struct btree *, struct btree_iter *, struct bkey *bch_btree_iter_init(struct btree *, struct btree_iter *,
struct bkey *, struct bset_tree *); struct bkey *);
/* 32 bits total: */ /* 32 bits total: */
#define BKEY_MID_BITS 3 #define BKEY_MID_BITS 3
......
...@@ -1854,10 +1854,16 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert, ...@@ -1854,10 +1854,16 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
while (1) { while (1) {
struct bkey *k = bch_btree_iter_next(iter); struct bkey *k = bch_btree_iter_next(iter);
if (!k || if (!k)
bkey_cmp(&START_KEY(k), insert) >= 0)
break; break;
if (bkey_cmp(&START_KEY(k), insert) >= 0) {
if (KEY_SIZE(k))
break;
else
continue;
}
if (bkey_cmp(k, &START_KEY(insert)) <= 0) if (bkey_cmp(k, &START_KEY(insert)) <= 0)
continue; continue;
......
...@@ -225,13 +225,6 @@ static inline void set_gc_sectors(struct cache_set *c) ...@@ -225,13 +225,6 @@ static inline void set_gc_sectors(struct cache_set *c)
atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16); atomic_set(&c->sectors_to_gc, c->sb.bucket_size * c->nbuckets / 16);
} }
static inline struct bkey *bch_btree_iter_init(struct btree *b,
struct btree_iter *iter,
struct bkey *search)
{
return __bch_btree_iter_init(b, iter, search, b->sets);
}
static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k) static inline bool bch_ptr_invalid(struct btree *b, const struct bkey *k)
{ {
if (b->level) if (b->level)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment