Commit 691f2cba authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: btree cache counters should be size_t

32 bits won't overflow any time soon, but size_t is the correct type for
counting objects in memory.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent ad5dbe3c
...@@ -32,24 +32,24 @@ const char * const bch2_btree_node_flags[] = { ...@@ -32,24 +32,24 @@ const char * const bch2_btree_node_flags[] = {
void bch2_recalc_btree_reserve(struct bch_fs *c) void bch2_recalc_btree_reserve(struct bch_fs *c)
{ {
unsigned i, reserve = 16; unsigned reserve = 16;
if (!c->btree_roots_known[0].b) if (!c->btree_roots_known[0].b)
reserve += 8; reserve += 8;
for (i = 0; i < btree_id_nr_alive(c); i++) { for (unsigned i = 0; i < btree_id_nr_alive(c); i++) {
struct btree_root *r = bch2_btree_id_root(c, i); struct btree_root *r = bch2_btree_id_root(c, i);
if (r->b) if (r->b)
reserve += min_t(unsigned, 1, r->b->c.level) * 8; reserve += min_t(unsigned, 1, r->b->c.level) * 8;
} }
c->btree_cache.reserve = reserve; c->btree_cache.nr_reserve = reserve;
} }
static inline unsigned btree_cache_can_free(struct btree_cache *bc) static inline size_t btree_cache_can_free(struct btree_cache *bc)
{ {
return max_t(int, 0, bc->used - bc->reserve); return max_t(int, 0, bc->nr_used - bc->nr_reserve);
} }
static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b) static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b)
...@@ -87,7 +87,7 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b) ...@@ -87,7 +87,7 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b)
#endif #endif
b->aux_data = NULL; b->aux_data = NULL;
bc->used--; bc->nr_used--;
btree_node_to_freedlist(bc, b); btree_node_to_freedlist(bc, b);
} }
...@@ -167,7 +167,7 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c) ...@@ -167,7 +167,7 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c)
bch2_btree_lock_init(&b->c, 0); bch2_btree_lock_init(&b->c, 0);
bc->used++; bc->nr_used++;
list_add(&b->list, &bc->freeable); list_add(&b->list, &bc->freeable);
return b; return b;
} }
...@@ -194,7 +194,7 @@ void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) ...@@ -194,7 +194,7 @@ void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b)
b->hash_val = 0; b->hash_val = 0;
if (b->c.btree_id < BTREE_ID_NR) if (b->c.btree_id < BTREE_ID_NR)
--bc->used_by_btree[b->c.btree_id]; --bc->nr_by_btree[b->c.btree_id];
} }
int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b) int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
...@@ -205,7 +205,7 @@ int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b) ...@@ -205,7 +205,7 @@ int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b)
int ret = rhashtable_lookup_insert_fast(&bc->table, &b->hash, int ret = rhashtable_lookup_insert_fast(&bc->table, &b->hash,
bch_btree_cache_params); bch_btree_cache_params);
if (!ret && b->c.btree_id < BTREE_ID_NR) if (!ret && b->c.btree_id < BTREE_ID_NR)
bc->used_by_btree[b->c.btree_id]++; bc->nr_by_btree[b->c.btree_id]++;
return ret; return ret;
} }
...@@ -401,8 +401,8 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, ...@@ -401,8 +401,8 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
unsigned long touched = 0; unsigned long touched = 0;
unsigned i, flags; unsigned i, flags;
unsigned long ret = SHRINK_STOP; unsigned long ret = SHRINK_STOP;
bool trigger_writes = atomic_read(&bc->dirty) + nr >= bool trigger_writes = atomic_long_read(&bc->nr_dirty) + nr >=
bc->used * 3 / 4; bc->nr_used * 3 / 4;
if (bch2_btree_shrinker_disabled) if (bch2_btree_shrinker_disabled)
return SHRINK_STOP; return SHRINK_STOP;
...@@ -439,7 +439,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, ...@@ -439,7 +439,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
six_unlock_write(&b->c.lock); six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock); six_unlock_intent(&b->c.lock);
freed++; freed++;
bc->freed++; bc->nr_freed++;
} }
} }
restart: restart:
...@@ -453,7 +453,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, ...@@ -453,7 +453,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
} else if (!btree_node_reclaim(c, b, true)) { } else if (!btree_node_reclaim(c, b, true)) {
freed++; freed++;
btree_node_data_free(c, b); btree_node_data_free(c, b);
bc->freed++; bc->nr_freed++;
bch2_btree_node_hash_remove(bc, b); bch2_btree_node_hash_remove(bc, b);
six_unlock_write(&b->c.lock); six_unlock_write(&b->c.lock);
...@@ -539,7 +539,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c) ...@@ -539,7 +539,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
} }
BUG_ON(!bch2_journal_error(&c->journal) && BUG_ON(!bch2_journal_error(&c->journal) &&
atomic_read(&c->btree_cache.dirty)); atomic_long_read(&c->btree_cache.nr_dirty));
list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu); list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu);
...@@ -572,7 +572,7 @@ int bch2_fs_btree_cache_init(struct bch_fs *c) ...@@ -572,7 +572,7 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
bch2_recalc_btree_reserve(c); bch2_recalc_btree_reserve(c);
for (i = 0; i < bc->reserve; i++) for (i = 0; i < bc->nr_reserve; i++)
if (!__bch2_btree_node_mem_alloc(c)) if (!__bch2_btree_node_mem_alloc(c))
goto err; goto err;
...@@ -739,7 +739,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea ...@@ -739,7 +739,7 @@ struct btree *bch2_btree_node_mem_alloc(struct btree_trans *trans, bool pcpu_rea
} }
mutex_lock(&bc->lock); mutex_lock(&bc->lock);
bc->used++; bc->nr_used++;
got_mem: got_mem:
mutex_unlock(&bc->lock); mutex_unlock(&bc->lock);
...@@ -1353,11 +1353,11 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struc ...@@ -1353,11 +1353,11 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struc
} }
static void prt_btree_cache_line(struct printbuf *out, const struct bch_fs *c, static void prt_btree_cache_line(struct printbuf *out, const struct bch_fs *c,
const char *label, unsigned nr) const char *label, size_t nr)
{ {
prt_printf(out, "%s\t", label); prt_printf(out, "%s\t", label);
prt_human_readable_u64(out, nr * c->opts.btree_node_size); prt_human_readable_u64(out, nr * c->opts.btree_node_size);
prt_printf(out, " (%u)\n", nr); prt_printf(out, " (%zu)\n", nr);
} }
static const char * const bch2_btree_cache_not_freed_reasons_strs[] = { static const char * const bch2_btree_cache_not_freed_reasons_strs[] = {
...@@ -1374,16 +1374,16 @@ void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc ...@@ -1374,16 +1374,16 @@ void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc
if (!out->nr_tabstops) if (!out->nr_tabstops)
printbuf_tabstop_push(out, 32); printbuf_tabstop_push(out, 32);
prt_btree_cache_line(out, c, "total:", bc->used); prt_btree_cache_line(out, c, "total:", bc->nr_used);
prt_btree_cache_line(out, c, "nr dirty:", atomic_read(&bc->dirty)); prt_btree_cache_line(out, c, "nr dirty:", atomic_long_read(&bc->nr_dirty));
prt_printf(out, "cannibalize lock:\t%p\n", bc->alloc_lock); prt_printf(out, "cannibalize lock:\t%p\n", bc->alloc_lock);
prt_newline(out); prt_newline(out);
for (unsigned i = 0; i < ARRAY_SIZE(bc->used_by_btree); i++) for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++)
prt_btree_cache_line(out, c, bch2_btree_id_str(i), bc->used_by_btree[i]); prt_btree_cache_line(out, c, bch2_btree_id_str(i), bc->nr_by_btree[i]);
prt_newline(out); prt_newline(out);
prt_printf(out, "freed:\t%u\n", bc->freed); prt_printf(out, "freed:\t%zu\n", bc->nr_freed);
prt_printf(out, "not freed:\n"); prt_printf(out, "not freed:\n");
for (unsigned i = 0; i < ARRAY_SIZE(bc->not_freed); i++) for (unsigned i = 0; i < ARRAY_SIZE(bc->not_freed); i++)
......
...@@ -2031,7 +2031,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags) ...@@ -2031,7 +2031,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
do_write: do_write:
BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0)); BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
atomic_dec(&c->btree_cache.dirty); atomic_long_dec(&c->btree_cache.nr_dirty);
BUG_ON(btree_node_fake(b)); BUG_ON(btree_node_fake(b));
BUG_ON((b->will_make_reachable != 0) != !b->written); BUG_ON((b->will_make_reachable != 0) != !b->written);
......
...@@ -18,13 +18,13 @@ struct btree_node_read_all; ...@@ -18,13 +18,13 @@ struct btree_node_read_all;
static inline void set_btree_node_dirty_acct(struct bch_fs *c, struct btree *b) static inline void set_btree_node_dirty_acct(struct bch_fs *c, struct btree *b)
{ {
if (!test_and_set_bit(BTREE_NODE_dirty, &b->flags)) if (!test_and_set_bit(BTREE_NODE_dirty, &b->flags))
atomic_inc(&c->btree_cache.dirty); atomic_long_inc(&c->btree_cache.nr_dirty);
} }
static inline void clear_btree_node_dirty_acct(struct bch_fs *c, struct btree *b) static inline void clear_btree_node_dirty_acct(struct bch_fs *c, struct btree *b)
{ {
if (test_and_clear_bit(BTREE_NODE_dirty, &b->flags)) if (test_and_clear_bit(BTREE_NODE_dirty, &b->flags))
atomic_dec(&c->btree_cache.dirty); atomic_long_dec(&c->btree_cache.nr_dirty);
} }
static inline unsigned btree_ptr_sectors_written(struct bkey_s_c k) static inline unsigned btree_ptr_sectors_written(struct bkey_s_c k)
......
...@@ -180,15 +180,16 @@ struct btree_cache { ...@@ -180,15 +180,16 @@ struct btree_cache {
struct list_head freed_nonpcpu; struct list_head freed_nonpcpu;
/* Number of elements in live + freeable lists */ /* Number of elements in live + freeable lists */
unsigned used; size_t nr_used;
unsigned reserve; size_t nr_reserve;
unsigned freed; size_t nr_by_btree[BTREE_ID_NR];
atomic_t dirty; atomic_long_t nr_dirty;
/* shrinker stats */
size_t nr_freed;
u64 not_freed[BCH_BTREE_CACHE_NOT_FREED_REASONS_NR]; u64 not_freed[BCH_BTREE_CACHE_NOT_FREED_REASONS_NR];
struct shrinker *shrink; struct shrinker *shrink;
unsigned used_by_btree[BTREE_ID_NR];
/* /*
* If we need to allocate memory for a new btree node and that * If we need to allocate memory for a new btree node and that
* allocation fails, we can cannibalize another node in the btree cache * allocation fails, we can cannibalize another node in the btree cache
......
...@@ -681,7 +681,7 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked) ...@@ -681,7 +681,7 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
if (j->watermark != BCH_WATERMARK_stripe) if (j->watermark != BCH_WATERMARK_stripe)
min_nr = 1; min_nr = 1;
if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used) if (atomic_long_read(&c->btree_cache.nr_dirty) * 2 > c->btree_cache.nr_used)
min_nr = 1; min_nr = 1;
min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128); min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
...@@ -689,8 +689,8 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked) ...@@ -689,8 +689,8 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
trace_and_count(c, journal_reclaim_start, c, trace_and_count(c, journal_reclaim_start, c,
direct, kicked, direct, kicked,
min_nr, min_key_cache, min_nr, min_key_cache,
atomic_read(&c->btree_cache.dirty), atomic_long_read(&c->btree_cache.nr_dirty),
c->btree_cache.used, c->btree_cache.nr_used,
atomic_long_read(&c->btree_key_cache.nr_dirty), atomic_long_read(&c->btree_key_cache.nr_dirty),
atomic_long_read(&c->btree_key_cache.nr_keys)); atomic_long_read(&c->btree_key_cache.nr_keys));
......
...@@ -370,7 +370,7 @@ void bch2_fs_read_only(struct bch_fs *c) ...@@ -370,7 +370,7 @@ void bch2_fs_read_only(struct bch_fs *c)
test_bit(BCH_FS_clean_shutdown, &c->flags) && test_bit(BCH_FS_clean_shutdown, &c->flags) &&
c->recovery_pass_done >= BCH_RECOVERY_PASS_journal_replay) { c->recovery_pass_done >= BCH_RECOVERY_PASS_journal_replay) {
BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal)); BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal));
BUG_ON(atomic_read(&c->btree_cache.dirty)); BUG_ON(atomic_long_read(&c->btree_cache.nr_dirty));
BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty)); BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty));
BUG_ON(c->btree_write_buffer.inc.keys.nr); BUG_ON(c->btree_write_buffer.inc.keys.nr);
BUG_ON(c->btree_write_buffer.flushing.keys.nr); BUG_ON(c->btree_write_buffer.flushing.keys.nr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment