Commit 2341ccd1 authored by Josef Bacik's avatar Josef Bacik Committed by David Sterba

btrfs: rework wake_all_tickets

Now that we no longer partially fill tickets we need to rework
wake_all_tickets to call btrfs_try_to_wakeup_tickets() in order to see
if any subsequent tickets are able to be satisfied.  If our tickets_id
changes we know something happened and we can keep flushing.

Also if we find a ticket that is smaller than the first ticket in our
queue then we want to retry the flushing loop again in case
may_commit_transaction() decides we could satisfy the ticket by
committing the transaction.

Rename this to maybe_fail_all_tickets() while we're at it, to better
reflect what the function is actually doing.
Signed-off-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 18fa2284
...@@ -679,19 +679,61 @@ static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info, ...@@ -679,19 +679,61 @@ static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state)); !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
} }
static bool wake_all_tickets(struct list_head *head) /*
* maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
* @fs_info - fs_info for this fs
* @space_info - the space info we were flushing
*
* We call this when we've exhausted our flushing ability and haven't made
* progress in satisfying tickets. The reservation code handles tickets in
* order, so if there is a large ticket first and then smaller ones we could
* very well satisfy the smaller tickets. This will attempt to wake up any
* tickets in the list to catch this case.
*
* This function returns true if it was able to make progress by clearing out
* other tickets, or if it stumbles across a ticket that was smaller than the
* first ticket.
*/
static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
struct btrfs_space_info *space_info)
{ {
struct reserve_ticket *ticket; struct reserve_ticket *ticket;
u64 tickets_id = space_info->tickets_id;
u64 first_ticket_bytes = 0;
while (!list_empty(&space_info->tickets) &&
tickets_id == space_info->tickets_id) {
ticket = list_first_entry(&space_info->tickets,
struct reserve_ticket, list);
/*
* may_commit_transaction will avoid committing the transaction
* if it doesn't feel like the space reclaimed by the commit
* would result in the ticket succeeding. However if we have a
* smaller ticket in the queue it may be small enough to be
* satisified by committing the transaction, so if any
* subsequent ticket is smaller than the first ticket go ahead
* and send us back for another loop through the enospc flushing
* code.
*/
if (first_ticket_bytes == 0)
first_ticket_bytes = ticket->bytes;
else if (first_ticket_bytes > ticket->bytes)
return true;
while (!list_empty(head)) {
ticket = list_first_entry(head, struct reserve_ticket, list);
list_del_init(&ticket->list); list_del_init(&ticket->list);
ticket->error = -ENOSPC; ticket->error = -ENOSPC;
wake_up(&ticket->wait); wake_up(&ticket->wait);
if (ticket->bytes != ticket->orig_bytes)
return true; /*
* We're just throwing tickets away, so more flushing may not
* trip over btrfs_try_granting_tickets, so we need to call it
* here to see if we can make progress with the next ticket in
* the list.
*/
btrfs_try_granting_tickets(fs_info, space_info);
} }
return false; return (tickets_id != space_info->tickets_id);
} }
/* /*
...@@ -759,7 +801,7 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work) ...@@ -759,7 +801,7 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
if (flush_state > COMMIT_TRANS) { if (flush_state > COMMIT_TRANS) {
commit_cycles++; commit_cycles++;
if (commit_cycles > 2) { if (commit_cycles > 2) {
if (wake_all_tickets(&space_info->tickets)) { if (maybe_fail_all_tickets(fs_info, space_info)) {
flush_state = FLUSH_DELAYED_ITEMS_NR; flush_state = FLUSH_DELAYED_ITEMS_NR;
commit_cycles--; commit_cycles--;
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment