Commit 46bf2e9c authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Fix excess transaction restarts in __bchfs_fallocate()

drop_locks_do() should not be used in a fastpath without first trying
the do in nonblocking mode - the unlock and relock will cause excessive
transaction restarts and potentially livelocking with other threads that
are contending for the same locks.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 1a503904
...@@ -819,6 +819,11 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans, ...@@ -819,6 +819,11 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
#define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \ #define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
for_each_btree_key_upto_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret) for_each_btree_key_upto_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret)
/*
* This should not be used in a fastpath, without first trying _do in
* nonblocking mode - it will cause excessive transaction restarts and
* potentially livelocking:
*/
#define drop_locks_do(_trans, _do) \ #define drop_locks_do(_trans, _do) \
({ \ ({ \
bch2_trans_unlock(_trans); \ bch2_trans_unlock(_trans); \
......
...@@ -309,39 +309,49 @@ void bch2_mark_pagecache_unallocated(struct bch_inode_info *inode, ...@@ -309,39 +309,49 @@ void bch2_mark_pagecache_unallocated(struct bch_inode_info *inode,
} }
} }
void bch2_mark_pagecache_reserved(struct bch_inode_info *inode, int bch2_mark_pagecache_reserved(struct bch_inode_info *inode,
u64 start, u64 end) u64 *start, u64 end,
bool nonblocking)
{ {
struct bch_fs *c = inode->v.i_sb->s_fs_info; struct bch_fs *c = inode->v.i_sb->s_fs_info;
pgoff_t index = start >> PAGE_SECTORS_SHIFT; pgoff_t index = *start >> PAGE_SECTORS_SHIFT;
pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT; pgoff_t end_index = (end - 1) >> PAGE_SECTORS_SHIFT;
struct folio_batch fbatch; struct folio_batch fbatch;
s64 i_sectors_delta = 0; s64 i_sectors_delta = 0;
unsigned i, j; int ret = 0;
if (end <= start) if (end <= *start)
return; return 0;
folio_batch_init(&fbatch); folio_batch_init(&fbatch);
while (filemap_get_folios(inode->v.i_mapping, while (filemap_get_folios(inode->v.i_mapping,
&index, end_index, &fbatch)) { &index, end_index, &fbatch)) {
for (i = 0; i < folio_batch_count(&fbatch); i++) { for (unsigned i = 0; i < folio_batch_count(&fbatch); i++) {
struct folio *folio = fbatch.folios[i]; struct folio *folio = fbatch.folios[i];
if (!nonblocking)
folio_lock(folio);
else if (!folio_trylock(folio)) {
folio_batch_release(&fbatch);
ret = -EAGAIN;
break;
}
u64 folio_start = folio_sector(folio); u64 folio_start = folio_sector(folio);
u64 folio_end = folio_end_sector(folio); u64 folio_end = folio_end_sector(folio);
unsigned folio_offset = max(start, folio_start) - folio_start;
unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
struct bch_folio *s;
BUG_ON(end <= folio_start); BUG_ON(end <= folio_start);
folio_lock(folio); *start = min(end, folio_end);
s = bch2_folio(folio);
struct bch_folio *s = bch2_folio(folio);
if (s) { if (s) {
unsigned folio_offset = max(*start, folio_start) - folio_start;
unsigned folio_len = min(end, folio_end) - folio_offset - folio_start;
spin_lock(&s->lock); spin_lock(&s->lock);
for (j = folio_offset; j < folio_offset + folio_len; j++) { for (unsigned j = folio_offset; j < folio_offset + folio_len; j++) {
i_sectors_delta -= s->s[j].state == SECTOR_dirty; i_sectors_delta -= s->s[j].state == SECTOR_dirty;
bch2_folio_sector_set(folio, s, j, bch2_folio_sector_set(folio, s, j,
folio_sector_reserve(s->s[j].state)); folio_sector_reserve(s->s[j].state));
...@@ -356,6 +366,7 @@ void bch2_mark_pagecache_reserved(struct bch_inode_info *inode, ...@@ -356,6 +366,7 @@ void bch2_mark_pagecache_reserved(struct bch_inode_info *inode,
} }
bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta); bch2_i_sectors_acct(c, inode, NULL, i_sectors_delta);
return ret;
} }
static inline unsigned sectors_to_reserve(struct bch_folio_sector *s, static inline unsigned sectors_to_reserve(struct bch_folio_sector *s,
......
...@@ -143,7 +143,7 @@ int bch2_folio_set(struct bch_fs *, subvol_inum, struct folio **, unsigned); ...@@ -143,7 +143,7 @@ int bch2_folio_set(struct bch_fs *, subvol_inum, struct folio **, unsigned);
void bch2_bio_page_state_set(struct bio *, struct bkey_s_c); void bch2_bio_page_state_set(struct bio *, struct bkey_s_c);
void bch2_mark_pagecache_unallocated(struct bch_inode_info *, u64, u64); void bch2_mark_pagecache_unallocated(struct bch_inode_info *, u64, u64);
void bch2_mark_pagecache_reserved(struct bch_inode_info *, u64, u64); int bch2_mark_pagecache_reserved(struct bch_inode_info *, u64 *, u64, bool);
int bch2_get_folio_disk_reservation(struct bch_fs *, int bch2_get_folio_disk_reservation(struct bch_fs *,
struct bch_inode_info *, struct bch_inode_info *,
......
...@@ -675,8 +675,11 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode, ...@@ -675,8 +675,11 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
bch2_i_sectors_acct(c, inode, &quota_res, i_sectors_delta); bch2_i_sectors_acct(c, inode, &quota_res, i_sectors_delta);
if (bch2_mark_pagecache_reserved(inode, &hole_start,
iter.pos.offset, true))
drop_locks_do(trans, drop_locks_do(trans,
(bch2_mark_pagecache_reserved(inode, hole_start, iter.pos.offset), 0)); bch2_mark_pagecache_reserved(inode, &hole_start,
iter.pos.offset, false));
bkey_err: bkey_err:
bch2_quota_reservation_put(c, inode, &quota_res); bch2_quota_reservation_put(c, inode, &quota_res);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment