space-info.c 55.3 KB
Newer Older
1 2
// SPDX-License-Identifier: GPL-2.0

3
#include "misc.h"
4 5 6 7
#include "ctree.h"
#include "space-info.h"
#include "sysfs.h"
#include "volumes.h"
8
#include "free-space-cache.h"
9 10
#include "ordered-data.h"
#include "transaction.h"
11
#include "block-group.h"
12
#include "zoned.h"
13

14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
/*
 * HOW DOES SPACE RESERVATION WORK
 *
 * If you want to know about delalloc specifically, there is a separate comment
 * for that with the delalloc code.  This comment is about how the whole system
 * works generally.
 *
 * BASIC CONCEPTS
 *
 *   1) space_info.  This is the ultimate arbiter of how much space we can use.
 *   There's a description of the bytes_ fields with the struct declaration,
 *   refer to that for specifics on each field.  Suffice it to say that for
 *   reservations we care about total_bytes - SUM(space_info->bytes_) when
 *   determining if there is space to make an allocation.  There is a space_info
 *   for METADATA, SYSTEM, and DATA areas.
 *
 *   2) block_rsv's.  These are basically buckets for every different type of
 *   metadata reservation we have.  You can see the comment in the block_rsv
 *   code on the rules for each type, but generally block_rsv->reserved is how
 *   much space is accounted for in space_info->bytes_may_use.
 *
 *   3) btrfs_calc*_size.  These are the worst case calculations we used based
 *   on the number of items we will want to modify.  We have one for changing
 *   items, and one for inserting new items.  Generally we use these helpers to
 *   determine the size of the block reserves, and then use the actual bytes
 *   values to adjust the space_info counters.
 *
 * MAKING RESERVATIONS, THE NORMAL CASE
 *
 *   We call into either btrfs_reserve_data_bytes() or
 *   btrfs_reserve_metadata_bytes(), depending on which we're looking for, with
 *   num_bytes we want to reserve.
 *
 *   ->reserve
 *     space_info->bytes_may_reserve += num_bytes
 *
 *   ->extent allocation
 *     Call btrfs_add_reserved_bytes() which does
 *     space_info->bytes_may_reserve -= num_bytes
 *     space_info->bytes_reserved += extent_bytes
 *
 *   ->insert reference
 *     Call btrfs_update_block_group() which does
 *     space_info->bytes_reserved -= extent_bytes
 *     space_info->bytes_used += extent_bytes
 *
 * MAKING RESERVATIONS, FLUSHING NORMALLY (non-priority)
 *
 *   Assume we are unable to simply make the reservation because we do not have
 *   enough space
 *
 *   -> __reserve_bytes
 *     create a reserve_ticket with ->bytes set to our reservation, add it to
 *     the tail of space_info->tickets, kick async flush thread
 *
 *   ->handle_reserve_ticket
 *     wait on ticket->wait for ->bytes to be reduced to 0, or ->error to be set
 *     on the ticket.
 *
 *   -> btrfs_async_reclaim_metadata_space/btrfs_async_reclaim_data_space
 *     Flushes various things attempting to free up space.
 *
 *   -> btrfs_try_granting_tickets()
 *     This is called by anything that either subtracts space from
 *     space_info->bytes_may_use, ->bytes_pinned, etc, or adds to the
 *     space_info->total_bytes.  This loops through the ->priority_tickets and
 *     then the ->tickets list checking to see if the reservation can be
 *     completed.  If it can the space is added to space_info->bytes_may_use and
 *     the ticket is woken up.
 *
 *   -> ticket wakeup
 *     Check if ->bytes == 0, if it does we got our reservation and we can carry
 *     on, if not return the appropriate error (ENOSPC, but can be EINTR if we
 *     were interrupted.)
 *
 * MAKING RESERVATIONS, FLUSHING HIGH PRIORITY
 *
 *   Same as the above, except we add ourselves to the
 *   space_info->priority_tickets, and we do not use ticket->wait, we simply
 *   call flush_space() ourselves for the states that are safe for us to call
 *   without deadlocking and hope for the best.
 *
 * THE FLUSHING STATES
 *
 *   Generally speaking we will have two cases for each state, a "nice" state
 *   and a "ALL THE THINGS" state.  In btrfs we delay a lot of work in order to
 *   reduce the locking over head on the various trees, and even to keep from
 *   doing any work at all in the case of delayed refs.  Each of these delayed
 *   things however hold reservations, and so letting them run allows us to
 *   reclaim space so we can make new reservations.
 *
 *   FLUSH_DELAYED_ITEMS
 *     Every inode has a delayed item to update the inode.  Take a simple write
 *     for example, we would update the inode item at write time to update the
 *     mtime, and then again at finish_ordered_io() time in order to update the
 *     isize or bytes.  We keep these delayed items to coalesce these operations
 *     into a single operation done on demand.  These are an easy way to reclaim
 *     metadata space.
 *
 *   FLUSH_DELALLOC
 *     Look at the delalloc comment to get an idea of how much space is reserved
 *     for delayed allocation.  We can reclaim some of this space simply by
 *     running delalloc, but usually we need to wait for ordered extents to
 *     reclaim the bulk of this space.
 *
 *   FLUSH_DELAYED_REFS
 *     We have a block reserve for the outstanding delayed refs space, and every
 *     delayed ref operation holds a reservation.  Running these is a quick way
 *     to reclaim space, but we want to hold this until the end because COW can
 *     churn a lot and we can avoid making some extent tree modifications if we
 *     are able to delay for as long as possible.
 *
 *   ALLOC_CHUNK
 *     We will skip this the first time through space reservation, because of
 *     overcommit and we don't want to have a lot of useless metadata space when
 *     our worst case reservations will likely never come true.
 *
 *   RUN_DELAYED_IPUTS
 *     If we're freeing inodes we're likely freeing checksums, file extent
 *     items, and extent tree items.  Loads of space could be freed up by these
 *     operations, however they won't be usable until the transaction commits.
 *
 *   COMMIT_TRANS
137 138 139 140 141 142 143
 *     This will commit the transaction.  Historically we had a lot of logic
 *     surrounding whether or not we'd commit the transaction, but this waits born
 *     out of a pre-tickets era where we could end up committing the transaction
 *     thousands of times in a row without making progress.  Now thanks to our
 *     ticketing system we know if we're not making progress and can error
 *     everybody out after a few commits rather than burning the disk hoping for
 *     a different answer.
144
 *
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
 * OVERCOMMIT
 *
 *   Because we hold so many reservations for metadata we will allow you to
 *   reserve more space than is currently free in the currently allocate
 *   metadata space.  This only happens with metadata, data does not allow
 *   overcommitting.
 *
 *   You can see the current logic for when we allow overcommit in
 *   btrfs_can_overcommit(), but it only applies to unallocated space.  If there
 *   is no unallocated space to be had, all reservations are kept within the
 *   free space in the allocated metadata chunks.
 *
 *   Because of overcommitting, you generally want to use the
 *   btrfs_can_overcommit() logic for metadata allocations, as it does the right
 *   thing with or without extra unallocated space.
 */

162
u64 __pure btrfs_space_info_used(struct btrfs_space_info *s_info,
163 164 165 166 167
			  bool may_use_included)
{
	ASSERT(s_info);
	return s_info->bytes_used + s_info->bytes_reserved +
		s_info->bytes_pinned + s_info->bytes_readonly +
168
		s_info->bytes_zone_unusable +
169 170 171 172 173 174 175 176 177 178 179 180
		(may_use_included ? s_info->bytes_may_use : 0);
}

/*
 * after adding space to the filesystem, we need to clear the full flags
 * on all the space infos.
 */
void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
{
	struct list_head *head = &info->space_info;
	struct btrfs_space_info *found;

181
	list_for_each_entry(found, head, list)
182 183 184
		found->full = 0;
}

185 186 187 188 189 190
/*
 * Block groups with more than this value (percents) of unusable space will be
 * scheduled for background reclaim.
 */
#define BTRFS_DEFAULT_ZONED_RECLAIM_THRESH			(75)

191 192 193 194 195 196 197 198 199 200 201
/*
 * Calculate chunk size depending on volume type (regular or zoned).
 */
static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
{
	if (btrfs_is_zoned(fs_info))
		return fs_info->zone_size;

	ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);

	if (flags & BTRFS_BLOCK_GROUP_DATA)
202
		return BTRFS_MAX_DATA_CHUNK_SIZE;
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
	else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
		return SZ_32M;

	/* Handle BTRFS_BLOCK_GROUP_METADATA */
	if (fs_info->fs_devices->total_rw_bytes > 50ULL * SZ_1G)
		return SZ_1G;

	return SZ_256M;
}

/*
 * Update default chunk size.
 */
void btrfs_update_space_info_chunk_size(struct btrfs_space_info *space_info,
					u64 chunk_size)
{
	WRITE_ONCE(space_info->chunk_size, chunk_size);
}

222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
static int create_space_info(struct btrfs_fs_info *info, u64 flags)
{

	struct btrfs_space_info *space_info;
	int i;
	int ret;

	space_info = kzalloc(sizeof(*space_info), GFP_NOFS);
	if (!space_info)
		return -ENOMEM;

	for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
		INIT_LIST_HEAD(&space_info->block_groups[i]);
	init_rwsem(&space_info->groups_sem);
	spin_lock_init(&space_info->lock);
	space_info->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
	space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
	INIT_LIST_HEAD(&space_info->ro_bgs);
	INIT_LIST_HEAD(&space_info->tickets);
	INIT_LIST_HEAD(&space_info->priority_tickets);
242
	space_info->clamp = 1;
243
	btrfs_update_space_info_chunk_size(space_info, calc_chunk_size(info, flags));
244

245 246 247
	if (btrfs_is_zoned(info))
		space_info->bg_reclaim_threshold = BTRFS_DEFAULT_ZONED_RECLAIM_THRESH;

248 249
	ret = btrfs_sysfs_add_space_info_type(info, space_info);
	if (ret)
250 251
		return ret;

252
	list_add(&space_info->list, &info->space_info);
253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
	if (flags & BTRFS_BLOCK_GROUP_DATA)
		info->data_sinfo = space_info;

	return ret;
}

int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
{
	struct btrfs_super_block *disk_super;
	u64 features;
	u64 flags;
	int mixed = 0;
	int ret;

	disk_super = fs_info->super_copy;
	if (!btrfs_super_root(disk_super))
		return -EINVAL;

	features = btrfs_super_incompat_flags(disk_super);
	if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
		mixed = 1;

	flags = BTRFS_BLOCK_GROUP_SYSTEM;
	ret = create_space_info(fs_info, flags);
	if (ret)
		goto out;

	if (mixed) {
		flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
		ret = create_space_info(fs_info, flags);
	} else {
		flags = BTRFS_BLOCK_GROUP_METADATA;
		ret = create_space_info(fs_info, flags);
		if (ret)
			goto out;

		flags = BTRFS_BLOCK_GROUP_DATA;
		ret = create_space_info(fs_info, flags);
	}
out:
	return ret;
}

296
void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
297
				struct btrfs_block_group *block_group)
298 299
{
	struct btrfs_space_info *found;
300
	int factor, index;
301

302
	factor = btrfs_bg_type_to_factor(block_group->flags);
303

304
	found = btrfs_find_space_info(info, block_group->flags);
305 306
	ASSERT(found);
	spin_lock(&found->lock);
307 308 309 310 311 312 313 314 315
	found->total_bytes += block_group->length;
	if (block_group->zone_is_active)
		found->active_total_bytes += block_group->length;
	found->disk_total += block_group->length * factor;
	found->bytes_used += block_group->used;
	found->disk_used += block_group->used * factor;
	found->bytes_readonly += block_group->bytes_super;
	found->bytes_zone_unusable += block_group->zone_unusable;
	if (block_group->length > 0)
316
		found->full = 0;
317
	btrfs_try_granting_tickets(info, found);
318
	spin_unlock(&found->lock);
319 320 321 322 323 324 325

	block_group->space_info = found;

	index = btrfs_bg_flags_to_raid_index(block_group->flags);
	down_write(&found->groups_sem);
	list_add_tail(&block_group->list, &found->block_groups[index]);
	up_write(&found->groups_sem);
326 327 328 329 330 331 332 333 334 335
}

struct btrfs_space_info *btrfs_find_space_info(struct btrfs_fs_info *info,
					       u64 flags)
{
	struct list_head *head = &info->space_info;
	struct btrfs_space_info *found;

	flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;

336 337
	list_for_each_entry(found, head, list) {
		if (found->flags & flags)
338 339 340 341
			return found;
	}
	return NULL;
}
342

343 344 345
static u64 calc_available_free_space(struct btrfs_fs_info *fs_info,
			  struct btrfs_space_info *space_info,
			  enum btrfs_reserve_flush_enum flush)
346 347 348 349 350
{
	u64 profile;
	u64 avail;
	int factor;

351
	if (space_info->flags & BTRFS_BLOCK_GROUP_SYSTEM)
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
		profile = btrfs_system_alloc_profile(fs_info);
	else
		profile = btrfs_metadata_alloc_profile(fs_info);

	avail = atomic64_read(&fs_info->free_chunk_space);

	/*
	 * If we have dup, raid1 or raid10 then only half of the free
	 * space is actually usable.  For raid56, the space info used
	 * doesn't include the parity drive, so we don't have to
	 * change the math
	 */
	factor = btrfs_bg_type_to_factor(profile);
	avail = div_u64(avail, factor);

	/*
	 * If we aren't flushing all things, let us overcommit up to
	 * 1/2th of the space. If we can flush, don't let us overcommit
	 * too much, let it overcommit up to 1/8 of the space.
	 */
	if (flush == BTRFS_RESERVE_FLUSH_ALL)
		avail >>= 3;
	else
		avail >>= 1;
376 377 378
	return avail;
}

379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394
static inline u64 writable_total_bytes(struct btrfs_fs_info *fs_info,
				       struct btrfs_space_info *space_info)
{
	/*
	 * On regular filesystem, all total_bytes are always writable. On zoned
	 * filesystem, there may be a limitation imposed by max_active_zones.
	 * For metadata allocation, we cannot finish an existing active block
	 * group to avoid a deadlock. Thus, we need to consider only the active
	 * groups to be writable for metadata space.
	 */
	if (!btrfs_is_zoned(fs_info) || (space_info->flags & BTRFS_BLOCK_GROUP_DATA))
		return space_info->total_bytes;

	return space_info->active_total_bytes;
}

395 396 397 398 399 400 401 402 403 404 405 406
int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
			 struct btrfs_space_info *space_info, u64 bytes,
			 enum btrfs_reserve_flush_enum flush)
{
	u64 avail;
	u64 used;

	/* Don't overcommit when in mixed mode */
	if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
		return 0;

	used = btrfs_space_info_used(space_info, true);
407 408 409 410
	if (btrfs_is_zoned(fs_info) && (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
		avail = 0;
	else
		avail = calc_available_free_space(fs_info, space_info, flush);
411

412
	if (used + bytes < writable_total_bytes(fs_info, space_info) + avail)
413 414 415
		return 1;
	return 0;
}
416

417 418 419 420 421 422 423 424 425 426
static void remove_ticket(struct btrfs_space_info *space_info,
			  struct reserve_ticket *ticket)
{
	if (!list_empty(&ticket->list)) {
		list_del_init(&ticket->list);
		ASSERT(space_info->reclaim_size >= ticket->bytes);
		space_info->reclaim_size -= ticket->bytes;
	}
}

427 428 429 430
/*
 * This is for space we already have accounted in space_info->bytes_may_use, so
 * basically when we're returning space from block_rsv's.
 */
431 432
void btrfs_try_granting_tickets(struct btrfs_fs_info *fs_info,
				struct btrfs_space_info *space_info)
433 434 435 436
{
	struct list_head *head;
	enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;

437
	lockdep_assert_held(&space_info->lock);
438

439
	head = &space_info->priority_tickets;
440
again:
441 442 443 444 445 446
	while (!list_empty(head)) {
		struct reserve_ticket *ticket;
		u64 used = btrfs_space_info_used(space_info, true);

		ticket = list_first_entry(head, struct reserve_ticket, list);

David Sterba's avatar
David Sterba committed
447
		/* Check and see if our ticket can be satisfied now. */
448
		if ((used + ticket->bytes <= writable_total_bytes(fs_info, space_info)) ||
449 450
		    btrfs_can_overcommit(fs_info, space_info, ticket->bytes,
					 flush)) {
451 452 453
			btrfs_space_info_update_bytes_may_use(fs_info,
							      space_info,
							      ticket->bytes);
454
			remove_ticket(space_info, ticket);
455 456 457 458
			ticket->bytes = 0;
			space_info->tickets_id++;
			wake_up(&ticket->wait);
		} else {
459
			break;
460 461 462
		}
	}

463
	if (head == &space_info->priority_tickets) {
464 465 466 467 468
		head = &space_info->tickets;
		flush = BTRFS_RESERVE_FLUSH_ALL;
		goto again;
	}
}
469 470 471 472 473 474 475 476 477 478

#define DUMP_BLOCK_RSV(fs_info, rsv_name)				\
do {									\
	struct btrfs_block_rsv *__rsv = &(fs_info)->rsv_name;		\
	spin_lock(&__rsv->lock);					\
	btrfs_info(fs_info, #rsv_name ": size %llu reserved %llu",	\
		   __rsv->size, __rsv->reserved);			\
	spin_unlock(&__rsv->lock);					\
} while (0)

479 480
static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
				    struct btrfs_space_info *info)
481
{
482
	lockdep_assert_held(&info->lock);
483

484 485
	/* The free space could be negative in case of overcommit */
	btrfs_info(fs_info, "space_info %llu has %lld free, is %sfull",
486
		   info->flags,
487
		   (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
488 489
		   info->full ? "" : "not ");
	btrfs_info(fs_info,
490
		"space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
491 492
		info->total_bytes, info->bytes_used, info->bytes_pinned,
		info->bytes_reserved, info->bytes_may_use,
493
		info->bytes_readonly, info->bytes_zone_unusable);
494 495 496 497 498 499 500

	DUMP_BLOCK_RSV(fs_info, global_block_rsv);
	DUMP_BLOCK_RSV(fs_info, trans_block_rsv);
	DUMP_BLOCK_RSV(fs_info, chunk_block_rsv);
	DUMP_BLOCK_RSV(fs_info, delayed_block_rsv);
	DUMP_BLOCK_RSV(fs_info, delayed_refs_rsv);

501 502 503 504 505 506
}

void btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
			   struct btrfs_space_info *info, u64 bytes,
			   int dump_block_groups)
{
507
	struct btrfs_block_group *cache;
508 509 510 511 512 513
	int index = 0;

	spin_lock(&info->lock);
	__btrfs_dump_space_info(fs_info, info);
	spin_unlock(&info->lock);

514 515 516 517 518 519 520 521
	if (!dump_block_groups)
		return;

	down_read(&info->groups_sem);
again:
	list_for_each_entry(cache, &info->block_groups[index], list) {
		spin_lock(&cache->lock);
		btrfs_info(fs_info,
522
			"block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu zone_unusable %s",
523
			cache->start, cache->length, cache->used, cache->pinned,
524 525
			cache->reserved, cache->zone_unusable,
			cache->ro ? "[readonly]" : "");
526
		spin_unlock(&cache->lock);
527
		btrfs_dump_free_space(cache, bytes);
528 529 530 531 532
	}
	if (++index < BTRFS_NR_RAID_TYPES)
		goto again;
	up_read(&info->groups_sem);
}
533 534 535 536 537 538 539

static inline u64 calc_reclaim_items_nr(struct btrfs_fs_info *fs_info,
					u64 to_reclaim)
{
	u64 bytes;
	u64 nr;

540
	bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
541 542 543 544 545 546 547 548 549 550 551
	nr = div64_u64(to_reclaim, bytes);
	if (!nr)
		nr = 1;
	return nr;
}

#define EXTENT_SIZE_PER_ITEM	SZ_256K

/*
 * shrink metadata reservation for delalloc
 */
552 553
static void shrink_delalloc(struct btrfs_fs_info *fs_info,
			    struct btrfs_space_info *space_info,
554 555
			    u64 to_reclaim, bool wait_ordered,
			    bool for_preempt)
556 557 558
{
	struct btrfs_trans_handle *trans;
	u64 delalloc_bytes;
559
	u64 ordered_bytes;
560 561 562 563
	u64 items;
	long time_left;
	int loops;

564 565 566 567 568
	delalloc_bytes = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
	ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
	if (delalloc_bytes == 0 && ordered_bytes == 0)
		return;

569
	/* Calc the number of the pages we need flush for space reservation */
570 571 572 573 574 575
	if (to_reclaim == U64_MAX) {
		items = U64_MAX;
	} else {
		/*
		 * to_reclaim is set to however much metadata we need to
		 * reclaim, but reclaiming that much data doesn't really track
576 577 578 579 580 581 582 583
		 * exactly.  What we really want to do is reclaim full inode's
		 * worth of reservations, however that's not available to us
		 * here.  We will take a fraction of the delalloc bytes for our
		 * flushing loops and hope for the best.  Delalloc will expand
		 * the amount we write to cover an entire dirty extent, which
		 * will reclaim the metadata reservation for that range.  If
		 * it's not enough subsequent flush stages will be more
		 * aggressive.
584
		 */
585
		to_reclaim = max(to_reclaim, delalloc_bytes >> 3);
586 587
		items = calc_reclaim_items_nr(fs_info, to_reclaim) * 2;
	}
588

Yu Zhe's avatar
Yu Zhe committed
589
	trans = current->journal_info;
590 591 592 593 594 595

	/*
	 * If we are doing more ordered than delalloc we need to just wait on
	 * ordered extents, otherwise we'll waste time trying to flush delalloc
	 * that likely won't give us the space back we need.
	 */
596
	if (ordered_bytes > delalloc_bytes && !for_preempt)
597 598 599
		wait_ordered = true;

	loops = 0;
600
	while ((delalloc_bytes || ordered_bytes) && loops < 3) {
601 602
		u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
		long nr_pages = min_t(u64, temp, LONG_MAX);
603
		int async_pages;
604 605

		btrfs_start_delalloc_roots(fs_info, nr_pages, true);
606

607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
		/*
		 * We need to make sure any outstanding async pages are now
		 * processed before we continue.  This is because things like
		 * sync_inode() try to be smart and skip writing if the inode is
		 * marked clean.  We don't use filemap_fwrite for flushing
		 * because we want to control how many pages we write out at a
		 * time, thus this is the only safe way to make sure we've
		 * waited for outstanding compressed workers to have started
		 * their jobs and thus have ordered extents set up properly.
		 *
		 * This exists because we do not want to wait for each
		 * individual inode to finish its async work, we simply want to
		 * start the IO on everybody, and then come back here and wait
		 * for all of the async work to catch up.  Once we're done with
		 * that we know we'll have ordered extents for everything and we
		 * can decide if we wait for that or not.
		 *
		 * If we choose to replace this in the future, make absolutely
		 * sure that the proper waiting is being done in the async case,
		 * as there have been bugs in that area before.
		 */
		async_pages = atomic_read(&fs_info->async_delalloc_pages);
		if (!async_pages)
			goto skip_async;

		/*
		 * We don't want to wait forever, if we wrote less pages in this
		 * loop than we have outstanding, only wait for that number of
		 * pages, otherwise we can wait for all async pages to finish
		 * before continuing.
		 */
		if (async_pages > nr_pages)
			async_pages -= nr_pages;
		else
			async_pages = 0;
		wait_event(fs_info->async_submit_wait,
			   atomic_read(&fs_info->async_delalloc_pages) <=
			   async_pages);
skip_async:
646 647 648 649 650 651 652 653
		loops++;
		if (wait_ordered && !trans) {
			btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
		} else {
			time_left = schedule_timeout_killable(1);
			if (time_left)
				break;
		}
654

655 656 657 658 659 660 661 662
		/*
		 * If we are for preemption we just want a one-shot of delalloc
		 * flushing so we can stop flushing if we decide we don't need
		 * to anymore.
		 */
		if (for_preempt)
			break;

663 664 665 666 667 668 669 670
		spin_lock(&space_info->lock);
		if (list_empty(&space_info->tickets) &&
		    list_empty(&space_info->priority_tickets)) {
			spin_unlock(&space_info->lock);
			break;
		}
		spin_unlock(&space_info->lock);

671 672
		delalloc_bytes = percpu_counter_sum_positive(
						&fs_info->delalloc_bytes);
673 674
		ordered_bytes = percpu_counter_sum_positive(
						&fs_info->ordered_bytes);
675 676 677 678 679 680 681 682 683 684
	}
}

/*
 * Try to flush some data based on policy set by @state. This is only advisory
 * and may fail for various reasons. The caller is supposed to examine the
 * state of @space_info to detect the outcome.
 */
static void flush_space(struct btrfs_fs_info *fs_info,
		       struct btrfs_space_info *space_info, u64 num_bytes,
685
		       enum btrfs_flush_state state, bool for_preempt)
686
{
687
	struct btrfs_root *root = fs_info->tree_root;
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
	struct btrfs_trans_handle *trans;
	int nr;
	int ret = 0;

	switch (state) {
	case FLUSH_DELAYED_ITEMS_NR:
	case FLUSH_DELAYED_ITEMS:
		if (state == FLUSH_DELAYED_ITEMS_NR)
			nr = calc_reclaim_items_nr(fs_info, num_bytes) * 2;
		else
			nr = -1;

		trans = btrfs_join_transaction(root);
		if (IS_ERR(trans)) {
			ret = PTR_ERR(trans);
			break;
		}
		ret = btrfs_run_delayed_items_nr(trans, nr);
		btrfs_end_transaction(trans);
		break;
	case FLUSH_DELALLOC:
	case FLUSH_DELALLOC_WAIT:
710 711 712
	case FLUSH_DELALLOC_FULL:
		if (state == FLUSH_DELALLOC_FULL)
			num_bytes = U64_MAX;
713
		shrink_delalloc(fs_info, space_info, num_bytes,
714
				state != FLUSH_DELALLOC, for_preempt);
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
		break;
	case FLUSH_DELAYED_REFS_NR:
	case FLUSH_DELAYED_REFS:
		trans = btrfs_join_transaction(root);
		if (IS_ERR(trans)) {
			ret = PTR_ERR(trans);
			break;
		}
		if (state == FLUSH_DELAYED_REFS_NR)
			nr = calc_reclaim_items_nr(fs_info, num_bytes);
		else
			nr = 0;
		btrfs_run_delayed_refs(trans, nr);
		btrfs_end_transaction(trans);
		break;
	case ALLOC_CHUNK:
	case ALLOC_CHUNK_FORCE:
732 733 734 735 736 737 738 739 740 741 742 743
		/*
		 * For metadata space on zoned filesystem, reaching here means we
		 * don't have enough space left in active_total_bytes. Try to
		 * activate a block group first, because we may have inactive
		 * block group already allocated.
		 */
		ret = btrfs_zoned_activate_one_bg(fs_info, space_info, false);
		if (ret < 0)
			break;
		else if (ret == 1)
			break;

744 745 746 747 748 749
		trans = btrfs_join_transaction(root);
		if (IS_ERR(trans)) {
			ret = PTR_ERR(trans);
			break;
		}
		ret = btrfs_chunk_alloc(trans,
750
				btrfs_get_alloc_profile(fs_info, space_info->flags),
751 752 753
				(state == ALLOC_CHUNK) ? CHUNK_ALLOC_NO_FORCE :
					CHUNK_ALLOC_FORCE);
		btrfs_end_transaction(trans);
754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770

		/*
		 * For metadata space on zoned filesystem, allocating a new chunk
		 * is not enough. We still need to activate the block * group.
		 * Active the newly allocated block group by (maybe) finishing
		 * a block group.
		 */
		if (ret == 1) {
			ret = btrfs_zoned_activate_one_bg(fs_info, space_info, true);
			/*
			 * Revert to the original ret regardless we could finish
			 * one block group or not.
			 */
			if (ret >= 0)
				ret = 1;
		}

771 772 773
		if (ret > 0 || ret == -ENOSPC)
			ret = 0;
		break;
774
	case RUN_DELAYED_IPUTS:
775 776 777 778 779 780 781
		/*
		 * If we have pending delayed iputs then we could free up a
		 * bunch of pinned space, so make sure we run the iputs before
		 * we do our pinned bytes check below.
		 */
		btrfs_run_delayed_iputs(fs_info);
		btrfs_wait_on_delayed_iputs(fs_info);
782 783
		break;
	case COMMIT_TRANS:
784
		ASSERT(current->journal_info == NULL);
785 786 787 788 789 790 791
		trans = btrfs_join_transaction(root);
		if (IS_ERR(trans)) {
			ret = PTR_ERR(trans);
			break;
		}
		ret = btrfs_commit_transaction(trans);
		break;
792 793 794 795 796 797
	default:
		ret = -ENOSPC;
		break;
	}

	trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
798
				ret, for_preempt);
799 800 801 802 803
	return;
}

static inline u64
btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
804
				 struct btrfs_space_info *space_info)
805 806
{
	u64 used;
807
	u64 avail;
808
	u64 total;
809
	u64 to_reclaim = space_info->reclaim_size;
810

811
	lockdep_assert_held(&space_info->lock);
812 813 814 815 816 817 818 819 820 821 822

	avail = calc_available_free_space(fs_info, space_info,
					  BTRFS_RESERVE_FLUSH_ALL);
	used = btrfs_space_info_used(space_info, true);

	/*
	 * We may be flushing because suddenly we have less space than we had
	 * before, and now we're well over-committed based on our current free
	 * space.  If that's the case add in our overage so we make sure to put
	 * appropriate pressure on the flushing state machine.
	 */
823 824 825
	total = writable_total_bytes(fs_info, space_info);
	if (total + avail < used)
		to_reclaim += used - (total + avail);
826

827 828 829
	return to_reclaim;
}

830
static bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
831
				    struct btrfs_space_info *space_info)
832
{
833
	u64 global_rsv_size = fs_info->global_block_rsv.reserved;
834
	u64 ordered, delalloc;
835 836
	u64 total = writable_total_bytes(fs_info, space_info);
	u64 thresh;
837
	u64 used;
838

839 840
	thresh = div_factor_fine(total, 90);

841 842
	lockdep_assert_held(&space_info->lock);

843
	/* If we're just plain full then async reclaim just slows us down. */
844 845
	if ((space_info->bytes_used + space_info->bytes_reserved +
	     global_rsv_size) >= thresh)
846
		return false;
847

848 849 850 851 852 853 854 855 856 857 858 859 860 861
	used = space_info->bytes_may_use + space_info->bytes_pinned;

	/* The total flushable belongs to the global rsv, don't flush. */
	if (global_rsv_size >= used)
		return false;

	/*
	 * 128MiB is 1/4 of the maximum global rsv size.  If we have less than
	 * that devoted to other reservations then there's no sense in flushing,
	 * we don't have a lot of things that need flushing.
	 */
	if (used - global_rsv_size <= SZ_128M)
		return false;

862 863 864 865 866 867 868
	/*
	 * We have tickets queued, bail so we don't compete with the async
	 * flushers.
	 */
	if (space_info->reclaim_size)
		return false;

869 870 871 872 873 874 875 876 877 878 879 880
	/*
	 * If we have over half of the free space occupied by reservations or
	 * pinned then we want to start flushing.
	 *
	 * We do not do the traditional thing here, which is to say
	 *
	 *   if (used >= ((total_bytes + avail) / 2))
	 *     return 1;
	 *
	 * because this doesn't quite work how we want.  If we had more than 50%
	 * of the space_info used by bytes_used and we had 0 available we'd just
	 * constantly run the background flusher.  Instead we want it to kick in
881 882 883 884 885 886 887 888 889 890 891 892 893 894 895
	 * if our reclaimable space exceeds our clamped free space.
	 *
	 * Our clamping range is 2^1 -> 2^8.  Practically speaking that means
	 * the following:
	 *
	 * Amount of RAM        Minimum threshold       Maximum threshold
	 *
	 *        256GiB                     1GiB                  128GiB
	 *        128GiB                   512MiB                   64GiB
	 *         64GiB                   256MiB                   32GiB
	 *         32GiB                   128MiB                   16GiB
	 *         16GiB                    64MiB                    8GiB
	 *
	 * These are the range our thresholds will fall in, corresponding to how
	 * much delalloc we need for the background flusher to kick in.
896
	 */
897

898 899
	thresh = calc_available_free_space(fs_info, space_info,
					   BTRFS_RESERVE_FLUSH_ALL);
900 901
	used = space_info->bytes_used + space_info->bytes_reserved +
	       space_info->bytes_readonly + global_rsv_size;
902 903
	if (used < total)
		thresh += total - used;
904
	thresh >>= space_info->clamp;
905

906
	used = space_info->bytes_pinned;
907

908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923
	/*
	 * If we have more ordered bytes than delalloc bytes then we're either
	 * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
	 * around.  Preemptive flushing is only useful in that it can free up
	 * space before tickets need to wait for things to finish.  In the case
	 * of ordered extents, preemptively waiting on ordered extents gets us
	 * nothing, if our reservations are tied up in ordered extents we'll
	 * simply have to slow down writers by forcing them to wait on ordered
	 * extents.
	 *
	 * In the case that ordered is larger than delalloc, only include the
	 * block reserves that we would actually be able to directly reclaim
	 * from.  In this case if we're heavy on metadata operations this will
	 * clearly be heavy enough to warrant preemptive flushing.  In the case
	 * of heavy DIO or ordered reservations, preemptive flushing will just
	 * waste time and cause us to slow down.
924 925 926 927 928 929
	 *
	 * We want to make sure we truly are maxed out on ordered however, so
	 * cut ordered in half, and if it's still higher than delalloc then we
	 * can keep flushing.  This is to avoid the case where we start
	 * flushing, and now delalloc == ordered and we stop preemptively
	 * flushing when we could still have several gigs of delalloc to flush.
930
	 */
931
	ordered = percpu_counter_read_positive(&fs_info->ordered_bytes) >> 1;
932
	delalloc = percpu_counter_read_positive(&fs_info->delalloc_bytes);
933 934 935
	if (ordered >= delalloc)
		used += fs_info->delayed_refs_rsv.reserved +
			fs_info->delayed_block_rsv.reserved;
936
	else
937
		used += space_info->bytes_may_use - global_rsv_size;
938 939 940 941 942

	return (used >= thresh && !btrfs_fs_closing(fs_info) &&
		!test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
}

943 944 945 946 947 948 949
static bool steal_from_global_rsv(struct btrfs_fs_info *fs_info,
				  struct btrfs_space_info *space_info,
				  struct reserve_ticket *ticket)
{
	struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
	u64 min_bytes;

950 951 952
	if (!ticket->steal)
		return false;

953 954 955 956
	if (global_rsv->space_info != space_info)
		return false;

	spin_lock(&global_rsv->lock);
957
	min_bytes = div_factor(global_rsv->size, 1);
958 959 960 961 962
	if (global_rsv->reserved < min_bytes + ticket->bytes) {
		spin_unlock(&global_rsv->lock);
		return false;
	}
	global_rsv->reserved -= ticket->bytes;
963
	remove_ticket(space_info, ticket);
964 965 966 967 968 969 970 971 972 973
	ticket->bytes = 0;
	wake_up(&ticket->wait);
	space_info->tickets_id++;
	if (global_rsv->reserved < global_rsv->size)
		global_rsv->full = 0;
	spin_unlock(&global_rsv->lock);

	return true;
}

Josef Bacik's avatar
Josef Bacik committed
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
/*
 * maybe_fail_all_tickets - we've exhausted our flushing, start failing tickets
 * @fs_info - fs_info for this fs
 * @space_info - the space info we were flushing
 *
 * We call this when we've exhausted our flushing ability and haven't made
 * progress in satisfying tickets.  The reservation code handles tickets in
 * order, so if there is a large ticket first and then smaller ones we could
 * very well satisfy the smaller tickets.  This will attempt to wake up any
 * tickets in the list to catch this case.
 *
 * This function returns true if it was able to make progress by clearing out
 * other tickets, or if it stumbles across a ticket that was smaller than the
 * first ticket.
 */
static bool maybe_fail_all_tickets(struct btrfs_fs_info *fs_info,
				   struct btrfs_space_info *space_info)
991 992
{
	struct reserve_ticket *ticket;
Josef Bacik's avatar
Josef Bacik committed
993
	u64 tickets_id = space_info->tickets_id;
994
	const bool aborted = BTRFS_FS_ERROR(fs_info);
Josef Bacik's avatar
Josef Bacik committed
995

996 997
	trace_btrfs_fail_all_tickets(fs_info, space_info);

998 999 1000 1001 1002
	if (btrfs_test_opt(fs_info, ENOSPC_DEBUG)) {
		btrfs_info(fs_info, "cannot satisfy tickets, dumping space info");
		__btrfs_dump_space_info(fs_info, space_info);
	}

Josef Bacik's avatar
Josef Bacik committed
1003 1004 1005 1006 1007
	while (!list_empty(&space_info->tickets) &&
	       tickets_id == space_info->tickets_id) {
		ticket = list_first_entry(&space_info->tickets,
					  struct reserve_ticket, list);

1008
		if (!aborted && steal_from_global_rsv(fs_info, space_info, ticket))
1009 1010
			return true;

1011
		if (!aborted && btrfs_test_opt(fs_info, ENOSPC_DEBUG))
1012 1013 1014
			btrfs_info(fs_info, "failing ticket with %llu bytes",
				   ticket->bytes);

1015
		remove_ticket(space_info, ticket);
1016 1017 1018 1019
		if (aborted)
			ticket->error = -EIO;
		else
			ticket->error = -ENOSPC;
1020
		wake_up(&ticket->wait);
Josef Bacik's avatar
Josef Bacik committed
1021 1022 1023 1024 1025 1026 1027

		/*
		 * We're just throwing tickets away, so more flushing may not
		 * trip over btrfs_try_granting_tickets, so we need to call it
		 * here to see if we can make progress with the next ticket in
		 * the list.
		 */
1028 1029
		if (!aborted)
			btrfs_try_granting_tickets(fs_info, space_info);
1030
	}
Josef Bacik's avatar
Josef Bacik committed
1031
	return (tickets_id != space_info->tickets_id);
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043
}

/*
 * This is for normal flushers, we can wait all goddamned day if we want to.  We
 * will loop and continuously try to flush as long as we are making progress.
 * We count progress as clearing off tickets each time we have to loop.
 */
static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
{
	struct btrfs_fs_info *fs_info;
	struct btrfs_space_info *space_info;
	u64 to_reclaim;
1044
	enum btrfs_flush_state flush_state;
1045 1046 1047 1048 1049 1050 1051
	int commit_cycles = 0;
	u64 last_tickets_id;

	fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
	space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);

	spin_lock(&space_info->lock);
1052
	to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
	if (!to_reclaim) {
		space_info->flush = 0;
		spin_unlock(&space_info->lock);
		return;
	}
	last_tickets_id = space_info->tickets_id;
	spin_unlock(&space_info->lock);

	flush_state = FLUSH_DELAYED_ITEMS_NR;
	do {
1063
		flush_space(fs_info, space_info, to_reclaim, flush_state, false);
1064 1065 1066 1067 1068 1069 1070
		spin_lock(&space_info->lock);
		if (list_empty(&space_info->tickets)) {
			space_info->flush = 0;
			spin_unlock(&space_info->lock);
			return;
		}
		to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info,
1071
							      space_info);
1072 1073 1074 1075 1076 1077 1078 1079 1080
		if (last_tickets_id == space_info->tickets_id) {
			flush_state++;
		} else {
			last_tickets_id = space_info->tickets_id;
			flush_state = FLUSH_DELAYED_ITEMS_NR;
			if (commit_cycles)
				commit_cycles--;
		}

1081 1082 1083 1084 1085 1086 1087 1088
		/*
		 * We do not want to empty the system of delalloc unless we're
		 * under heavy pressure, so allow one trip through the flushing
		 * logic before we start doing a FLUSH_DELALLOC_FULL.
		 */
		if (flush_state == FLUSH_DELALLOC_FULL && !commit_cycles)
			flush_state++;

1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
		/*
		 * We don't want to force a chunk allocation until we've tried
		 * pretty hard to reclaim space.  Think of the case where we
		 * freed up a bunch of space and so have a lot of pinned space
		 * to reclaim.  We would rather use that than possibly create a
		 * underutilized metadata chunk.  So if this is our first run
		 * through the flushing state machine skip ALLOC_CHUNK_FORCE and
		 * commit the transaction.  If nothing has changed the next go
		 * around then we can force a chunk allocation.
		 */
		if (flush_state == ALLOC_CHUNK_FORCE && !commit_cycles)
			flush_state++;

		if (flush_state > COMMIT_TRANS) {
			commit_cycles++;
			if (commit_cycles > 2) {
Josef Bacik's avatar
Josef Bacik committed
1105
				if (maybe_fail_all_tickets(fs_info, space_info)) {
1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
					flush_state = FLUSH_DELAYED_ITEMS_NR;
					commit_cycles--;
				} else {
					space_info->flush = 0;
				}
			} else {
				flush_state = FLUSH_DELAYED_ITEMS_NR;
			}
		}
		spin_unlock(&space_info->lock);
	} while (flush_state <= COMMIT_TRANS);
}

1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134
/*
 * This handles pre-flushing of metadata space before we get to the point that
 * we need to start blocking threads on tickets.  The logic here is different
 * from the other flush paths because it doesn't rely on tickets to tell us how
 * much we need to flush, instead it attempts to keep us below the 80% full
 * watermark of space by flushing whichever reservation pool is currently the
 * largest.
 */
static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
{
	struct btrfs_fs_info *fs_info;
	struct btrfs_space_info *space_info;
	struct btrfs_block_rsv *delayed_block_rsv;
	struct btrfs_block_rsv *delayed_refs_rsv;
	struct btrfs_block_rsv *global_rsv;
	struct btrfs_block_rsv *trans_rsv;
1135
	int loops = 0;
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145

	fs_info = container_of(work, struct btrfs_fs_info,
			       preempt_reclaim_work);
	space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
	delayed_block_rsv = &fs_info->delayed_block_rsv;
	delayed_refs_rsv = &fs_info->delayed_refs_rsv;
	global_rsv = &fs_info->global_block_rsv;
	trans_rsv = &fs_info->trans_block_rsv;

	spin_lock(&space_info->lock);
1146
	while (need_preemptive_reclaim(fs_info, space_info)) {
1147 1148 1149 1150 1151
		enum btrfs_flush_state flush;
		u64 delalloc_size = 0;
		u64 to_reclaim, block_rsv_size;
		u64 global_rsv_size = global_rsv->reserved;

1152 1153
		loops++;

1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
		/*
		 * We don't have a precise counter for the metadata being
		 * reserved for delalloc, so we'll approximate it by subtracting
		 * out the block rsv's space from the bytes_may_use.  If that
		 * amount is higher than the individual reserves, then we can
		 * assume it's tied up in delalloc reservations.
		 */
		block_rsv_size = global_rsv_size +
			delayed_block_rsv->reserved +
			delayed_refs_rsv->reserved +
			trans_rsv->reserved;
		if (block_rsv_size < space_info->bytes_may_use)
			delalloc_size = space_info->bytes_may_use - block_rsv_size;

		/*
		 * We don't want to include the global_rsv in our calculation,
		 * because that's space we can't touch.  Subtract it from the
		 * block_rsv_size for the next checks.
		 */
		block_rsv_size -= global_rsv_size;

		/*
		 * We really want to avoid flushing delalloc too much, as it
		 * could result in poor allocation patterns, so only flush it if
		 * it's larger than the rest of the pools combined.
		 */
		if (delalloc_size > block_rsv_size) {
			to_reclaim = delalloc_size;
			flush = FLUSH_DELALLOC;
		} else if (space_info->bytes_pinned >
			   (delayed_block_rsv->reserved +
			    delayed_refs_rsv->reserved)) {
			to_reclaim = space_info->bytes_pinned;
1187
			flush = COMMIT_TRANS;
1188 1189 1190 1191 1192 1193 1194 1195 1196
		} else if (delayed_block_rsv->reserved >
			   delayed_refs_rsv->reserved) {
			to_reclaim = delayed_block_rsv->reserved;
			flush = FLUSH_DELAYED_ITEMS_NR;
		} else {
			to_reclaim = delayed_refs_rsv->reserved;
			flush = FLUSH_DELAYED_REFS_NR;
		}

1197 1198
		spin_unlock(&space_info->lock);

1199 1200 1201 1202 1203 1204 1205 1206
		/*
		 * We don't want to reclaim everything, just a portion, so scale
		 * down the to_reclaim by 1/4.  If it takes us down to 0,
		 * reclaim 1 items worth.
		 */
		to_reclaim >>= 2;
		if (!to_reclaim)
			to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
1207
		flush_space(fs_info, space_info, to_reclaim, flush, true);
1208 1209 1210
		cond_resched();
		spin_lock(&space_info->lock);
	}
1211 1212 1213 1214

	/* We only went through once, back off our clamping. */
	if (loops == 1 && !space_info->reclaim_size)
		space_info->clamp = max(1, space_info->clamp - 1);
1215
	trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
1216 1217 1218
	spin_unlock(&space_info->lock);
}

1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243
/*
 * FLUSH_DELALLOC_WAIT:
 *   Space is freed from flushing delalloc in one of two ways.
 *
 *   1) compression is on and we allocate less space than we reserved
 *   2) we are overwriting existing space
 *
 *   For #1 that extra space is reclaimed as soon as the delalloc pages are
 *   COWed, by way of btrfs_add_reserved_bytes() which adds the actual extent
 *   length to ->bytes_reserved, and subtracts the reserved space from
 *   ->bytes_may_use.
 *
 *   For #2 this is trickier.  Once the ordered extent runs we will drop the
 *   extent in the range we are overwriting, which creates a delayed ref for
 *   that freed extent.  This however is not reclaimed until the transaction
 *   commits, thus the next stages.
 *
 * RUN_DELAYED_IPUTS
 *   If we are freeing inodes, we want to make sure all delayed iputs have
 *   completed, because they could have been on an inode with i_nlink == 0, and
 *   thus have been truncated and freed up space.  But again this space is not
 *   immediately re-usable, it comes in the form of a delayed ref, which must be
 *   run and then the transaction must be committed.
 *
 * COMMIT_TRANS
1244 1245
 *   This is where we reclaim all of the pinned space generated by running the
 *   iputs
1246 1247 1248 1249 1250
 *
 * ALLOC_CHUNK_FORCE
 *   For data we start with alloc chunk force, however we could have been full
 *   before, and then the transaction commit could have freed new block groups,
 *   so if we now have space to allocate do the force chunk allocation.
1251
 */
1252
static const enum btrfs_flush_state data_flush_states[] = {
1253
	FLUSH_DELALLOC_FULL,
1254 1255
	RUN_DELAYED_IPUTS,
	COMMIT_TRANS,
1256
	ALLOC_CHUNK_FORCE,
1257 1258 1259
};

static void btrfs_async_reclaim_data_space(struct work_struct *work)
1260
{
1261 1262 1263
	struct btrfs_fs_info *fs_info;
	struct btrfs_space_info *space_info;
	u64 last_tickets_id;
1264
	enum btrfs_flush_state flush_state = 0;
1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278

	fs_info = container_of(work, struct btrfs_fs_info, async_data_reclaim_work);
	space_info = fs_info->data_sinfo;

	spin_lock(&space_info->lock);
	if (list_empty(&space_info->tickets)) {
		space_info->flush = 0;
		spin_unlock(&space_info->lock);
		return;
	}
	last_tickets_id = space_info->tickets_id;
	spin_unlock(&space_info->lock);

	while (!space_info->full) {
1279
		flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1280 1281 1282 1283 1284 1285
		spin_lock(&space_info->lock);
		if (list_empty(&space_info->tickets)) {
			space_info->flush = 0;
			spin_unlock(&space_info->lock);
			return;
		}
1286 1287 1288 1289

		/* Something happened, fail everything and bail. */
		if (BTRFS_FS_ERROR(fs_info))
			goto aborted_fs;
1290 1291 1292 1293 1294 1295
		last_tickets_id = space_info->tickets_id;
		spin_unlock(&space_info->lock);
	}

	while (flush_state < ARRAY_SIZE(data_flush_states)) {
		flush_space(fs_info, space_info, U64_MAX,
1296
			    data_flush_states[flush_state], false);
1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319
		spin_lock(&space_info->lock);
		if (list_empty(&space_info->tickets)) {
			space_info->flush = 0;
			spin_unlock(&space_info->lock);
			return;
		}

		if (last_tickets_id == space_info->tickets_id) {
			flush_state++;
		} else {
			last_tickets_id = space_info->tickets_id;
			flush_state = 0;
		}

		if (flush_state >= ARRAY_SIZE(data_flush_states)) {
			if (space_info->full) {
				if (maybe_fail_all_tickets(fs_info, space_info))
					flush_state = 0;
				else
					space_info->flush = 0;
			} else {
				flush_state = 0;
			}
1320 1321 1322 1323 1324

			/* Something happened, fail everything and bail. */
			if (BTRFS_FS_ERROR(fs_info))
				goto aborted_fs;

1325 1326 1327
		}
		spin_unlock(&space_info->lock);
	}
1328 1329 1330 1331 1332 1333
	return;

aborted_fs:
	maybe_fail_all_tickets(fs_info, space_info);
	space_info->flush = 0;
	spin_unlock(&space_info->lock);
1334 1335 1336 1337 1338 1339
}

void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
{
	INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
	INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
1340 1341
	INIT_WORK(&fs_info->preempt_reclaim_work,
		  btrfs_preempt_reclaim_metadata_space);
1342 1343 1344 1345 1346 1347 1348 1349
}

static const enum btrfs_flush_state priority_flush_states[] = {
	FLUSH_DELAYED_ITEMS_NR,
	FLUSH_DELAYED_ITEMS,
	ALLOC_CHUNK,
};

1350 1351 1352 1353 1354 1355 1356
static const enum btrfs_flush_state evict_flush_states[] = {
	FLUSH_DELAYED_ITEMS_NR,
	FLUSH_DELAYED_ITEMS,
	FLUSH_DELAYED_REFS_NR,
	FLUSH_DELAYED_REFS,
	FLUSH_DELALLOC,
	FLUSH_DELALLOC_WAIT,
1357
	FLUSH_DELALLOC_FULL,
1358 1359 1360 1361
	ALLOC_CHUNK,
	COMMIT_TRANS,
};

1362
static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
1363 1364 1365 1366
				struct btrfs_space_info *space_info,
				struct reserve_ticket *ticket,
				const enum btrfs_flush_state *states,
				int states_nr)
1367 1368
{
	u64 to_reclaim;
1369
	int flush_state = 0;
1370 1371

	spin_lock(&space_info->lock);
1372
	to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info, space_info);
1373 1374
	/*
	 * This is the priority reclaim path, so to_reclaim could be >0 still
David Sterba's avatar
David Sterba committed
1375
	 * because we may have only satisfied the priority tickets and still
1376 1377 1378 1379
	 * left non priority tickets on the list.  We would then have
	 * to_reclaim but ->bytes == 0.
	 */
	if (ticket->bytes == 0) {
1380 1381 1382 1383
		spin_unlock(&space_info->lock);
		return;
	}

1384 1385
	while (flush_state < states_nr) {
		spin_unlock(&space_info->lock);
1386 1387
		flush_space(fs_info, space_info, to_reclaim, states[flush_state],
			    false);
1388 1389 1390 1391 1392 1393
		flush_state++;
		spin_lock(&space_info->lock);
		if (ticket->bytes == 0) {
			spin_unlock(&space_info->lock);
			return;
		}
1394 1395
	}

1396 1397 1398 1399 1400 1401
	/* Attempt to steal from the global rsv if we can. */
	if (!steal_from_global_rsv(fs_info, space_info, ticket)) {
		ticket->error = -ENOSPC;
		remove_ticket(space_info, ticket);
	}

1402 1403 1404 1405 1406 1407 1408
	/*
	 * We must run try_granting_tickets here because we could be a large
	 * ticket in front of a smaller ticket that can now be satisfied with
	 * the available space.
	 */
	btrfs_try_granting_tickets(fs_info, space_info);
	spin_unlock(&space_info->lock);
1409 1410
}

1411 1412
static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
					struct btrfs_space_info *space_info,
1413
					struct reserve_ticket *ticket)
1414
{
1415
	spin_lock(&space_info->lock);
1416 1417 1418 1419 1420 1421 1422

	/* We could have been granted before we got here. */
	if (ticket->bytes == 0) {
		spin_unlock(&space_info->lock);
		return;
	}

1423
	while (!space_info->full) {
1424
		spin_unlock(&space_info->lock);
1425
		flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE, false);
1426 1427 1428 1429 1430 1431
		spin_lock(&space_info->lock);
		if (ticket->bytes == 0) {
			spin_unlock(&space_info->lock);
			return;
		}
	}
1432 1433 1434 1435 1436

	ticket->error = -ENOSPC;
	remove_ticket(space_info, ticket);
	btrfs_try_granting_tickets(fs_info, space_info);
	spin_unlock(&space_info->lock);
1437 1438
}

1439 1440 1441
static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
				struct btrfs_space_info *space_info,
				struct reserve_ticket *ticket)
1442 1443 1444 1445 1446 1447 1448 1449 1450

{
	DEFINE_WAIT(wait);
	int ret = 0;

	spin_lock(&space_info->lock);
	while (ticket->bytes > 0 && ticket->error == 0) {
		ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
		if (ret) {
1451 1452 1453 1454 1455 1456 1457 1458
			/*
			 * Delete us from the list. After we unlock the space
			 * info, we don't want the async reclaim job to reserve
			 * space for this ticket. If that would happen, then the
			 * ticket's task would not known that space was reserved
			 * despite getting an error, resulting in a space leak
			 * (bytes_may_use counter of our space_info).
			 */
1459
			remove_ticket(space_info, ticket);
1460
			ticket->error = -EINTR;
1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472
			break;
		}
		spin_unlock(&space_info->lock);

		schedule();

		finish_wait(&ticket->wait, &wait);
		spin_lock(&space_info->lock);
	}
	spin_unlock(&space_info->lock);
}

1473
/**
1474 1475 1476 1477 1478
 * Do the appropriate flushing and waiting for a ticket
 *
 * @fs_info:    the filesystem
 * @space_info: space info for the reservation
 * @ticket:     ticket for the reservation
1479 1480
 * @start_ns:   timestamp when the reservation started
 * @orig_bytes: amount of bytes originally reserved
1481
 * @flush:      how much we can flush
1482 1483 1484 1485 1486 1487 1488
 *
 * This does the work of figuring out how to flush for the ticket, waiting for
 * the reservation, and returning the appropriate error if there is one.
 */
static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
				 struct btrfs_space_info *space_info,
				 struct reserve_ticket *ticket,
1489
				 u64 start_ns, u64 orig_bytes,
1490 1491 1492 1493
				 enum btrfs_reserve_flush_enum flush)
{
	int ret;

1494
	switch (flush) {
1495
	case BTRFS_RESERVE_FLUSH_DATA:
1496
	case BTRFS_RESERVE_FLUSH_ALL:
1497
	case BTRFS_RESERVE_FLUSH_ALL_STEAL:
1498
		wait_reserve_ticket(fs_info, space_info, ticket);
1499 1500
		break;
	case BTRFS_RESERVE_FLUSH_LIMIT:
1501 1502 1503
		priority_reclaim_metadata_space(fs_info, space_info, ticket,
						priority_flush_states,
						ARRAY_SIZE(priority_flush_states));
1504 1505 1506 1507 1508 1509
		break;
	case BTRFS_RESERVE_FLUSH_EVICT:
		priority_reclaim_metadata_space(fs_info, space_info, ticket,
						evict_flush_states,
						ARRAY_SIZE(evict_flush_states));
		break;
1510
	case BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE:
1511
		priority_reclaim_data_space(fs_info, space_info, ticket);
1512
		break;
1513 1514 1515 1516
	default:
		ASSERT(0);
		break;
	}
1517 1518 1519

	ret = ticket->error;
	ASSERT(list_empty(&ticket->list));
1520 1521 1522 1523 1524 1525 1526
	/*
	 * Check that we can't have an error set if the reservation succeeded,
	 * as that would confuse tasks and lead them to error out without
	 * releasing reserved space (if an error happens the expectation is that
	 * space wasn't reserved at all).
	 */
	ASSERT(!(ticket->bytes == 0 && ticket->error));
1527 1528
	trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
				   start_ns, flush, ticket->error);
1529 1530 1531
	return ret;
}

1532 1533 1534 1535 1536 1537 1538 1539 1540 1541
/*
 * This returns true if this flush state will go through the ordinary flushing
 * code.
 */
static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
{
	return	(flush == BTRFS_RESERVE_FLUSH_ALL) ||
		(flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
}

1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559
static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
				       struct btrfs_space_info *space_info)
{
	u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
	u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);

	/*
	 * If we're heavy on ordered operations then clamping won't help us.  We
	 * need to clamp specifically to keep up with dirty'ing buffered
	 * writers, because there's not a 1:1 correlation of writing delalloc
	 * and freeing space, like there is with flushing delayed refs or
	 * delayed nodes.  If we're already more ordered than delalloc then
	 * we're keeping up, otherwise we aren't and should probably clamp.
	 */
	if (ordered < delalloc)
		space_info->clamp = min(space_info->clamp + 1, 8);
}

1560 1561 1562 1563 1564 1565
static inline bool can_steal(enum btrfs_reserve_flush_enum flush)
{
	return (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
		flush == BTRFS_RESERVE_FLUSH_EVICT);
}

1566
/**
1567 1568 1569 1570 1571 1572
 * Try to reserve bytes from the block_rsv's space
 *
 * @fs_info:    the filesystem
 * @space_info: space info we want to allocate from
 * @orig_bytes: number of bytes we want
 * @flush:      whether or not we can flush to make our reservation
1573 1574 1575 1576 1577 1578 1579 1580
 *
 * This will reserve orig_bytes number of bytes from the space info associated
 * with the block_rsv.  If there is not enough space it will make an attempt to
 * flush out space to make room.  It will do this by flushing delalloc if
 * possible or committing the transaction.  If flush is 0 then no attempts to
 * regain reservations will be made and this will fail if there is not enough
 * space already.
 */
1581 1582 1583
static int __reserve_bytes(struct btrfs_fs_info *fs_info,
			   struct btrfs_space_info *space_info, u64 orig_bytes,
			   enum btrfs_reserve_flush_enum flush)
1584
{
1585
	struct work_struct *async_work;
1586
	struct reserve_ticket ticket;
1587
	u64 start_ns = 0;
1588 1589
	u64 used;
	int ret = 0;
1590
	bool pending_tickets;
1591 1592 1593 1594

	ASSERT(orig_bytes);
	ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);

1595 1596 1597 1598 1599
	if (flush == BTRFS_RESERVE_FLUSH_DATA)
		async_work = &fs_info->async_data_reclaim_work;
	else
		async_work = &fs_info->async_reclaim_work;

1600 1601 1602
	spin_lock(&space_info->lock);
	ret = -ENOSPC;
	used = btrfs_space_info_used(space_info, true);
1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613

	/*
	 * We don't want NO_FLUSH allocations to jump everybody, they can
	 * generally handle ENOSPC in a different way, so treat them the same as
	 * normal flushers when it comes to skipping pending tickets.
	 */
	if (is_normal_flushing(flush) || (flush == BTRFS_RESERVE_NO_FLUSH))
		pending_tickets = !list_empty(&space_info->tickets) ||
			!list_empty(&space_info->priority_tickets);
	else
		pending_tickets = !list_empty(&space_info->priority_tickets);
1614 1615

	/*
1616 1617
	 * Carry on if we have enough space (short-circuit) OR call
	 * can_overcommit() to ensure we can overcommit to continue.
1618
	 */
1619
	if (!pending_tickets &&
1620
	    ((used + orig_bytes <= writable_total_bytes(fs_info, space_info)) ||
1621
	     btrfs_can_overcommit(fs_info, space_info, orig_bytes, flush))) {
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636
		btrfs_space_info_update_bytes_may_use(fs_info, space_info,
						      orig_bytes);
		ret = 0;
	}

	/*
	 * If we couldn't make a reservation then setup our reservation ticket
	 * and kick the async worker if it's not already running.
	 *
	 * If we are a priority flusher then we just need to add our ticket to
	 * the list and we will do our own flushing further down.
	 */
	if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
		ticket.bytes = orig_bytes;
		ticket.error = 0;
1637
		space_info->reclaim_size += ticket.bytes;
1638
		init_waitqueue_head(&ticket.wait);
1639
		ticket.steal = can_steal(flush);
1640 1641 1642
		if (trace_btrfs_reserve_ticket_enabled())
			start_ns = ktime_get_ns();

1643
		if (flush == BTRFS_RESERVE_FLUSH_ALL ||
1644 1645
		    flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
		    flush == BTRFS_RESERVE_FLUSH_DATA) {
1646 1647
			list_add_tail(&ticket.list, &space_info->tickets);
			if (!space_info->flush) {
1648 1649 1650 1651 1652 1653 1654 1655 1656
				/*
				 * We were forced to add a reserve ticket, so
				 * our preemptive flushing is unable to keep
				 * up.  Clamp down on the threshold for the
				 * preemptive flushing in order to keep up with
				 * the workload.
				 */
				maybe_clamp_preempt(fs_info, space_info);

1657 1658 1659 1660 1661
				space_info->flush = 1;
				trace_btrfs_trigger_flush(fs_info,
							  space_info->flags,
							  orig_bytes, flush,
							  "enospc");
1662
				queue_work(system_unbound_wq, async_work);
1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675
			}
		} else {
			list_add_tail(&ticket.list,
				      &space_info->priority_tickets);
		}
	} else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
		used += orig_bytes;
		/*
		 * We will do the space reservation dance during log replay,
		 * which means we won't have fs_info->fs_root set, so don't do
		 * the async reclaim as we will panic.
		 */
		if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
1676 1677
		    !work_busy(&fs_info->preempt_reclaim_work) &&
		    need_preemptive_reclaim(fs_info, space_info)) {
1678 1679 1680
			trace_btrfs_trigger_flush(fs_info, space_info->flags,
						  orig_bytes, flush, "preempt");
			queue_work(system_unbound_wq,
1681
				   &fs_info->preempt_reclaim_work);
1682 1683 1684 1685 1686 1687
		}
	}
	spin_unlock(&space_info->lock);
	if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
		return ret;

1688 1689
	return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
				     orig_bytes, flush);
1690 1691 1692
}

/**
1693 1694
 * Trye to reserve metadata bytes from the block_rsv's space
 *
1695
 * @fs_info:    the filesystem
1696 1697 1698
 * @block_rsv:  block_rsv we're allocating for
 * @orig_bytes: number of bytes we want
 * @flush:      whether or not we can flush to make our reservation
1699 1700 1701 1702 1703 1704 1705 1706
 *
 * This will reserve orig_bytes number of bytes from the space info associated
 * with the block_rsv.  If there is not enough space it will make an attempt to
 * flush out space to make room.  It will do this by flushing delalloc if
 * possible or committing the transaction.  If flush is 0 then no attempts to
 * regain reservations will be made and this will fail if there is not enough
 * space already.
 */
1707
int btrfs_reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
1708 1709 1710 1711 1712 1713
				 struct btrfs_block_rsv *block_rsv,
				 u64 orig_bytes,
				 enum btrfs_reserve_flush_enum flush)
{
	int ret;

1714
	ret = __reserve_bytes(fs_info, block_rsv->space_info, orig_bytes, flush);
1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725
	if (ret == -ENOSPC) {
		trace_btrfs_space_reservation(fs_info, "space_info:enospc",
					      block_rsv->space_info->flags,
					      orig_bytes, 1);

		if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
			btrfs_dump_space_info(fs_info, block_rsv->space_info,
					      orig_bytes, 0);
	}
	return ret;
}
1726 1727

/**
1728 1729 1730 1731 1732
 * Try to reserve data bytes for an allocation
 *
 * @fs_info: the filesystem
 * @bytes:   number of bytes we need
 * @flush:   how we are allowed to flush
1733 1734 1735 1736 1737 1738 1739 1740
 *
 * This will reserve bytes from the data space info.  If there is not enough
 * space then we will attempt to flush space as specified by flush.
 */
int btrfs_reserve_data_bytes(struct btrfs_fs_info *fs_info, u64 bytes,
			     enum btrfs_reserve_flush_enum flush)
{
	struct btrfs_space_info *data_sinfo = fs_info->data_sinfo;
1741
	int ret;
1742

1743 1744
	ASSERT(flush == BTRFS_RESERVE_FLUSH_DATA ||
	       flush == BTRFS_RESERVE_FLUSH_FREE_SPACE_INODE);
1745 1746
	ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_DATA);

1747 1748 1749
	ret = __reserve_bytes(fs_info, data_sinfo, bytes, flush);
	if (ret == -ENOSPC) {
		trace_btrfs_space_reservation(fs_info, "space_info:enospc",
1750
					      data_sinfo->flags, bytes, 1);
1751 1752 1753
		if (btrfs_test_opt(fs_info, ENOSPC_DEBUG))
			btrfs_dump_space_info(fs_info, data_sinfo, bytes, 0);
	}
1754 1755
	return ret;
}