Commit 198d6700 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: add functionality for heaps to update backpointers

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 2252aa27
...@@ -583,7 +583,8 @@ static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca) ...@@ -583,7 +583,8 @@ static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
e.nr++; e.nr++;
} else { } else {
if (e.nr) if (e.nr)
heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp); heap_add_or_replace(&ca->alloc_heap, e,
-bucket_alloc_cmp, NULL);
e = (struct alloc_heap_entry) { e = (struct alloc_heap_entry) {
.bucket = b, .bucket = b,
...@@ -596,14 +597,15 @@ static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca) ...@@ -596,14 +597,15 @@ static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
} }
if (e.nr) if (e.nr)
heap_add_or_replace(&ca->alloc_heap, e, -bucket_alloc_cmp); heap_add_or_replace(&ca->alloc_heap, e,
-bucket_alloc_cmp, NULL);
for (i = 0; i < ca->alloc_heap.used; i++) for (i = 0; i < ca->alloc_heap.used; i++)
nr += ca->alloc_heap.data[i].nr; nr += ca->alloc_heap.data[i].nr;
while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) { while (nr - ca->alloc_heap.data[0].nr >= ALLOC_SCAN_BATCH(ca)) {
nr -= ca->alloc_heap.data[0].nr; nr -= ca->alloc_heap.data[0].nr;
heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp); heap_pop(&ca->alloc_heap, e, -bucket_alloc_cmp, NULL);
} }
up_read(&ca->bucket_lock); up_read(&ca->bucket_lock);
...@@ -633,7 +635,7 @@ static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca) ...@@ -633,7 +635,7 @@ static void find_reclaimable_buckets_fifo(struct bch_fs *c, struct bch_dev *ca)
if (bch2_can_invalidate_bucket(ca, b, m)) { if (bch2_can_invalidate_bucket(ca, b, m)) {
struct alloc_heap_entry e = { .bucket = b, .nr = 1, }; struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
heap_add(&ca->alloc_heap, e, bucket_alloc_cmp); heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
if (heap_full(&ca->alloc_heap)) if (heap_full(&ca->alloc_heap))
break; break;
} }
...@@ -660,7 +662,7 @@ static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca ...@@ -660,7 +662,7 @@ static void find_reclaimable_buckets_random(struct bch_fs *c, struct bch_dev *ca
if (bch2_can_invalidate_bucket(ca, b, m)) { if (bch2_can_invalidate_bucket(ca, b, m)) {
struct alloc_heap_entry e = { .bucket = b, .nr = 1, }; struct alloc_heap_entry e = { .bucket = b, .nr = 1, };
heap_add(&ca->alloc_heap, e, bucket_alloc_cmp); heap_add(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
if (heap_full(&ca->alloc_heap)) if (heap_full(&ca->alloc_heap))
break; break;
} }
...@@ -698,7 +700,7 @@ static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca) ...@@ -698,7 +700,7 @@ static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
break; break;
} }
heap_resort(&ca->alloc_heap, bucket_alloc_cmp); heap_resort(&ca->alloc_heap, bucket_alloc_cmp, NULL);
for (i = 0; i < ca->alloc_heap.used; i++) for (i = 0; i < ca->alloc_heap.used; i++)
nr += ca->alloc_heap.data[i].nr; nr += ca->alloc_heap.data[i].nr;
...@@ -719,7 +721,7 @@ static inline long next_alloc_bucket(struct bch_dev *ca) ...@@ -719,7 +721,7 @@ static inline long next_alloc_bucket(struct bch_dev *ca)
return b; return b;
} }
heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp); heap_pop(&ca->alloc_heap, e, bucket_alloc_cmp, NULL);
} }
return -1; return -1;
......
...@@ -35,7 +35,7 @@ void bch2_btree_node_iter_large_push(struct btree_node_iter_large *iter, ...@@ -35,7 +35,7 @@ void bch2_btree_node_iter_large_push(struct btree_node_iter_large *iter,
__btree_node_key_to_offset(b, end) __btree_node_key_to_offset(b, end)
}); });
__heap_add(iter, n, btree_node_iter_cmp_heap); __heap_add(iter, n, btree_node_iter_cmp_heap, NULL);
} }
} }
...@@ -48,9 +48,9 @@ void bch2_btree_node_iter_large_advance(struct btree_node_iter_large *iter, ...@@ -48,9 +48,9 @@ void bch2_btree_node_iter_large_advance(struct btree_node_iter_large *iter,
EBUG_ON(iter->data->k > iter->data->end); EBUG_ON(iter->data->k > iter->data->end);
if (iter->data->k == iter->data->end) if (iter->data->k == iter->data->end)
heap_del(iter, 0, btree_node_iter_cmp_heap); heap_del(iter, 0, btree_node_iter_cmp_heap, NULL);
else else
heap_sift_down(iter, 0, btree_node_iter_cmp_heap); heap_sift_down(iter, 0, btree_node_iter_cmp_heap, NULL);
} }
static void verify_no_dups(struct btree *b, static void verify_no_dups(struct btree *b,
......
...@@ -22,7 +22,7 @@ void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer) ...@@ -22,7 +22,7 @@ void bch2_io_timer_add(struct io_clock *clock, struct io_timer *timer)
if (clock->timers.data[i] == timer) if (clock->timers.data[i] == timer)
goto out; goto out;
BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp)); BUG_ON(!heap_add(&clock->timers, timer, io_timer_cmp, NULL));
out: out:
spin_unlock(&clock->timer_lock); spin_unlock(&clock->timer_lock);
} }
...@@ -35,7 +35,7 @@ void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer) ...@@ -35,7 +35,7 @@ void bch2_io_timer_del(struct io_clock *clock, struct io_timer *timer)
for (i = 0; i < clock->timers.used; i++) for (i = 0; i < clock->timers.used; i++)
if (clock->timers.data[i] == timer) { if (clock->timers.data[i] == timer) {
heap_del(&clock->timers, i, io_timer_cmp); heap_del(&clock->timers, i, io_timer_cmp, NULL);
break; break;
} }
...@@ -128,7 +128,7 @@ static struct io_timer *get_expired_timer(struct io_clock *clock, ...@@ -128,7 +128,7 @@ static struct io_timer *get_expired_timer(struct io_clock *clock,
if (clock->timers.used && if (clock->timers.used &&
time_after_eq(now, clock->timers.data[0]->expire)) time_after_eq(now, clock->timers.data[0]->expire))
heap_pop(&clock->timers, ret, io_timer_cmp); heap_pop(&clock->timers, ret, io_timer_cmp, NULL);
spin_unlock(&clock->timer_lock); spin_unlock(&clock->timer_lock);
......
...@@ -88,7 +88,7 @@ struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst, ...@@ -88,7 +88,7 @@ struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst,
memset(&nr, 0, sizeof(nr)); memset(&nr, 0, sizeof(nr));
heap_resort(iter, key_sort_cmp); heap_resort(iter, key_sort_cmp, NULL);
while (!bch2_btree_node_iter_large_end(iter)) { while (!bch2_btree_node_iter_large_end(iter)) {
if (!should_drop_next_key(iter, b)) { if (!should_drop_next_key(iter, b)) {
...@@ -101,7 +101,7 @@ struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst, ...@@ -101,7 +101,7 @@ struct btree_nr_keys bch2_key_sort_fix_overlapping(struct bset *dst,
} }
sort_key_next(iter, b, iter->data); sort_key_next(iter, b, iter->data);
heap_sift_down(iter, 0, key_sort_cmp); heap_sift_down(iter, 0, key_sort_cmp, NULL);
} }
dst->u64s = cpu_to_le16((u64 *) out - dst->_data); dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
...@@ -841,7 +841,7 @@ static bool extent_i_save(struct btree *b, struct bkey_packed *dst, ...@@ -841,7 +841,7 @@ static bool extent_i_save(struct btree *b, struct bkey_packed *dst,
static inline void extent_sort_sift(struct btree_node_iter_large *iter, static inline void extent_sort_sift(struct btree_node_iter_large *iter,
struct btree *b, size_t i) struct btree *b, size_t i)
{ {
heap_sift_down(iter, i, extent_sort_cmp); heap_sift_down(iter, i, extent_sort_cmp, NULL);
} }
static inline void extent_sort_next(struct btree_node_iter_large *iter, static inline void extent_sort_next(struct btree_node_iter_large *iter,
...@@ -849,7 +849,7 @@ static inline void extent_sort_next(struct btree_node_iter_large *iter, ...@@ -849,7 +849,7 @@ static inline void extent_sort_next(struct btree_node_iter_large *iter,
struct btree_node_iter_set *i) struct btree_node_iter_set *i)
{ {
sort_key_next(iter, b, i); sort_key_next(iter, b, i);
heap_sift_down(iter, i - iter->data, extent_sort_cmp); heap_sift_down(iter, i - iter->data, extent_sort_cmp, NULL);
} }
static void extent_sort_append(struct bch_fs *c, static void extent_sort_append(struct bch_fs *c,
...@@ -897,7 +897,7 @@ struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c, ...@@ -897,7 +897,7 @@ struct btree_nr_keys bch2_extent_sort_fix_overlapping(struct bch_fs *c,
memset(&nr, 0, sizeof(nr)); memset(&nr, 0, sizeof(nr));
heap_resort(iter, extent_sort_cmp); heap_resort(iter, extent_sort_cmp, NULL);
while (!bch2_btree_node_iter_large_end(iter)) { while (!bch2_btree_node_iter_large_end(iter)) {
lk = __btree_node_offset_to_key(b, _l->k); lk = __btree_node_offset_to_key(b, _l->k);
......
...@@ -161,7 +161,7 @@ static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca) ...@@ -161,7 +161,7 @@ static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca)
.sectors = bucket_sectors_used(m), .sectors = bucket_sectors_used(m),
.offset = bucket_to_sector(ca, b), .offset = bucket_to_sector(ca, b),
}; };
heap_add_or_replace(h, e, -sectors_used_cmp); heap_add_or_replace(h, e, -sectors_used_cmp, NULL);
} }
up_read(&ca->bucket_lock); up_read(&ca->bucket_lock);
up_read(&c->gc_lock); up_read(&c->gc_lock);
...@@ -170,7 +170,7 @@ static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca) ...@@ -170,7 +170,7 @@ static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca)
sectors_to_move += i->sectors; sectors_to_move += i->sectors;
while (sectors_to_move > COPYGC_SECTORS_PER_ITER(ca)) { while (sectors_to_move > COPYGC_SECTORS_PER_ITER(ca)) {
BUG_ON(!heap_pop(h, e, -sectors_used_cmp)); BUG_ON(!heap_pop(h, e, -sectors_used_cmp, NULL));
sectors_to_move -= e.sectors; sectors_to_move -= e.sectors;
} }
......
...@@ -127,7 +127,19 @@ do { \ ...@@ -127,7 +127,19 @@ do { \
(heap)->data = NULL; \ (heap)->data = NULL; \
} while (0) } while (0)
#define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j]) #define heap_set_backpointer(h, i, _fn) \
do { \
void (*fn)(typeof(h), size_t) = _fn; \
if (fn) \
fn(h, i); \
} while (0)
#define heap_swap(h, i, j, set_backpointer) \
do { \
swap((h)->data[i], (h)->data[j]); \
heap_set_backpointer(h, i, set_backpointer); \
heap_set_backpointer(h, j, set_backpointer); \
} while (0)
#define heap_peek(h) \ #define heap_peek(h) \
({ \ ({ \
...@@ -137,7 +149,7 @@ do { \ ...@@ -137,7 +149,7 @@ do { \
#define heap_full(h) ((h)->used == (h)->size) #define heap_full(h) ((h)->used == (h)->size)
#define heap_sift_down(h, i, cmp) \ #define heap_sift_down(h, i, cmp, set_backpointer) \
do { \ do { \
size_t _c, _j = i; \ size_t _c, _j = i; \
\ \
...@@ -149,72 +161,75 @@ do { \ ...@@ -149,72 +161,75 @@ do { \
\ \
if (cmp(h, (h)->data[_c], (h)->data[_j]) >= 0) \ if (cmp(h, (h)->data[_c], (h)->data[_j]) >= 0) \
break; \ break; \
heap_swap(h, _c, _j); \ heap_swap(h, _c, _j, set_backpointer); \
} \ } \
} while (0) } while (0)
#define heap_sift_up(h, i, cmp) \ #define heap_sift_up(h, i, cmp, set_backpointer) \
do { \ do { \
while (i) { \ while (i) { \
size_t p = (i - 1) / 2; \ size_t p = (i - 1) / 2; \
if (cmp(h, (h)->data[i], (h)->data[p]) >= 0) \ if (cmp(h, (h)->data[i], (h)->data[p]) >= 0) \
break; \ break; \
heap_swap(h, i, p); \ heap_swap(h, i, p, set_backpointer); \
i = p; \ i = p; \
} \ } \
} while (0) } while (0)
#define __heap_add(h, d, cmp) \ #define __heap_add(h, d, cmp, set_backpointer) \
do { \ ({ \
size_t _i = (h)->used++; \ size_t _i = (h)->used++; \
(h)->data[_i] = d; \ (h)->data[_i] = d; \
heap_set_backpointer(h, _i, set_backpointer); \
\ \
heap_sift_up(h, _i, cmp); \ heap_sift_up(h, _i, cmp, set_backpointer); \
} while (0) _i; \
})
#define heap_add(h, d, cmp) \ #define heap_add(h, d, cmp, set_backpointer) \
({ \ ({ \
bool _r = !heap_full(h); \ bool _r = !heap_full(h); \
if (_r) \ if (_r) \
__heap_add(h, d, cmp); \ __heap_add(h, d, cmp, set_backpointer); \
_r; \ _r; \
}) })
#define heap_add_or_replace(h, new, cmp) \ #define heap_add_or_replace(h, new, cmp, set_backpointer) \
do { \ do { \
if (!heap_add(h, new, cmp) && \ if (!heap_add(h, new, cmp, set_backpointer) && \
cmp(h, new, heap_peek(h)) >= 0) { \ cmp(h, new, heap_peek(h)) >= 0) { \
(h)->data[0] = new; \ (h)->data[0] = new; \
heap_sift_down(h, 0, cmp); \ heap_set_backpointer(h, 0, set_backpointer); \
heap_sift_down(h, 0, cmp, set_backpointer); \
} \ } \
} while (0) } while (0)
#define heap_del(h, i, cmp) \ #define heap_del(h, i, cmp, set_backpointer) \
do { \ do { \
size_t _i = (i); \ size_t _i = (i); \
\ \
BUG_ON(_i >= (h)->used); \ BUG_ON(_i >= (h)->used); \
(h)->used--; \ (h)->used--; \
heap_swap(h, _i, (h)->used); \ heap_swap(h, _i, (h)->used, set_backpointer); \
heap_sift_up(h, _i, cmp); \ heap_sift_up(h, _i, cmp, set_backpointer); \
heap_sift_down(h, _i, cmp); \ heap_sift_down(h, _i, cmp, set_backpointer); \
} while (0) } while (0)
#define heap_pop(h, d, cmp) \ #define heap_pop(h, d, cmp, set_backpointer) \
({ \ ({ \
bool _r = (h)->used; \ bool _r = (h)->used; \
if (_r) { \ if (_r) { \
(d) = (h)->data[0]; \ (d) = (h)->data[0]; \
heap_del(h, 0, cmp); \ heap_del(h, 0, cmp, set_backpointer); \
} \ } \
_r; \ _r; \
}) })
#define heap_resort(heap, cmp) \ #define heap_resort(heap, cmp, set_backpointer) \
do { \ do { \
ssize_t _i; \ ssize_t _i; \
for (_i = (ssize_t) (heap)->used / 2 - 1; _i >= 0; --_i) \ for (_i = (ssize_t) (heap)->used / 2 - 1; _i >= 0; --_i) \
heap_sift_down(heap, _i, cmp); \ heap_sift_down(heap, _i, cmp, set_backpointer); \
} while (0) } while (0)
#define ANYSINT_MAX(t) \ #define ANYSINT_MAX(t) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment