extents.c 30.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
 *
 * Code for managing the extent btree and dynamically updating the writeback
 * dirty sector count.
 */

#include "bcachefs.h"
#include "bkey_methods.h"
#include "btree_gc.h"
12
#include "btree_iter.h"
13 14 15 16 17 18 19 20 21 22 23 24 25 26
#include "buckets.h"
#include "checksum.h"
#include "debug.h"
#include "disk_groups.h"
#include "error.h"
#include "extents.h"
#include "inode.h"
#include "journal.h"
#include "replicas.h"
#include "super.h"
#include "super-io.h"
#include "trace.h"
#include "util.h"

27 28 29 30 31
static unsigned bch2_crc_field_size_max[] = {
	[BCH_EXTENT_ENTRY_crc32] = CRC32_SIZE_MAX,
	[BCH_EXTENT_ENTRY_crc64] = CRC64_SIZE_MAX,
	[BCH_EXTENT_ENTRY_crc128] = CRC128_SIZE_MAX,
};
32

33 34 35
static void bch2_extent_crc_pack(union bch_extent_crc *,
				 struct bch_extent_crc_unpacked,
				 enum bch_extent_entry_type);
36

37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
static struct bch_dev_io_failures *dev_io_failures(struct bch_io_failures *f,
						   unsigned dev)
{
	struct bch_dev_io_failures *i;

	for (i = f->devs; i < f->devs + f->nr; i++)
		if (i->dev == dev)
			return i;

	return NULL;
}

void bch2_mark_io_failure(struct bch_io_failures *failed,
			  struct extent_ptr_decoded *p)
{
	struct bch_dev_io_failures *f = dev_io_failures(failed, p->ptr.dev);

	if (!f) {
		BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));

		f = &failed->devs[failed->nr++];
		f->dev		= p->ptr.dev;
		f->idx		= p->idx;
		f->nr_failed	= 1;
		f->nr_retries	= 0;
	} else if (p->idx != f->idx) {
		f->idx		= p->idx;
		f->nr_failed	= 1;
		f->nr_retries	= 0;
	} else {
		f->nr_failed++;
	}
}

/*
 * returns true if p1 is better than p2:
 */
static inline bool ptr_better(struct bch_fs *c,
			      const struct extent_ptr_decoded p1,
			      const struct extent_ptr_decoded p2)
{
	if (likely(!p1.idx && !p2.idx)) {
		struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
		struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);

		u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
		u64 l2 = atomic64_read(&dev2->cur_latency[READ]);

		/* Pick at random, biased in favor of the faster device: */

		return bch2_rand_range(l1 + l2) > l1;
	}

	if (force_reconstruct_read(c))
		return p1.idx > p2.idx;

	return p1.idx < p2.idx;
}

/*
 * This picks a non-stale pointer, preferably from a device other than @avoid.
 * Avoid can be NULL, meaning pick any. If there are no non-stale pointers to
 * other devices, it will still pick a pointer from avoid.
 */
int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
			       struct bch_io_failures *failed,
			       struct extent_ptr_decoded *pick)
{
	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
	const union bch_extent_entry *entry;
	struct extent_ptr_decoded p;
	struct bch_dev_io_failures *f;
	struct bch_dev *ca;
	int ret = 0;

	if (k.k->type == KEY_TYPE_error)
		return -EIO;

	bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
		ca = bch_dev_bkey_exists(c, p.ptr.dev);

		/*
		 * If there are any dirty pointers it's an error if we can't
		 * read:
		 */
		if (!ret && !p.ptr.cached)
			ret = -EIO;

		if (p.ptr.cached && ptr_stale(ca, &p.ptr))
			continue;

		f = failed ? dev_io_failures(failed, p.ptr.dev) : NULL;
		if (f)
			p.idx = f->nr_failed < f->nr_retries
				? f->idx
				: f->idx + 1;

		if (!p.idx &&
		    !bch2_dev_is_readable(ca))
			p.idx++;

		if (force_reconstruct_read(c) &&
139
		    !p.idx && p.has_ec)
140 141
			p.idx++;

142
		if (p.idx >= (unsigned) p.has_ec + 1)
143 144 145 146 147 148 149 150 151 152 153 154
			continue;

		if (ret > 0 && !ptr_better(c, p, *pick))
			continue;

		*pick = p;
		ret = 1;
	}

	return ret;
}

155
/* KEY_TYPE_btree_ptr: */
156

157
const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
158
{
159 160
	if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
		return "value too big";
161

162
	return bch2_bkey_ptrs_invalid(c, k);
163 164
}

165
void bch2_btree_ptr_debugcheck(struct bch_fs *c, struct bkey_s_c k)
166 167 168
{
	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
	const struct bch_extent_ptr *ptr;
169 170 171 172
	const char *err;
	char buf[160];
	struct bucket_mark mark;
	struct bch_dev *ca;
173

174 175
	if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
		return;
176

177 178 179 180 181 182 183 184
	if (!percpu_down_read_trylock(&c->mark_lock))
		return;

	bch2_fs_inconsistent_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
		!bch2_bkey_replicas_marked(c, k, false), c,
		"btree key bad (replicas not marked in superblock):\n%s",
		(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));

185 186
	bkey_for_each_ptr(ptrs, ptr) {
		ca = bch_dev_bkey_exists(c, ptr->dev);
187

188
		mark = ptr_bucket_mark(ca, ptr);
189

190 191 192
		err = "stale";
		if (gen_after(mark.gen, ptr->gen))
			goto err;
193

194 195 196 197 198
		err = "inconsistent";
		if (mark.data_type != BCH_DATA_BTREE ||
		    mark.dirty_sectors < c->opts.btree_node_size)
			goto err;
	}
199 200
out:
	percpu_up_read(&c->mark_lock);
201 202
	return;
err:
203 204 205 206 207
	bch2_fs_inconsistent(c, "%s btree pointer %s: bucket %zi gen %i mark %08x",
		err, (bch2_bkey_val_to_text(&PBUF(buf), c, k), buf),
		PTR_BUCKET_NR(ca, ptr),
		mark.gen, (unsigned) mark.v.counter);
	goto out;
208 209
}

210 211
void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
			    struct bkey_s_c k)
212
{
213 214
	bch2_bkey_ptrs_to_text(out, c, k);
}
215

216
/* KEY_TYPE_extent: */
217

218 219 220
const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
	return bch2_bkey_ptrs_invalid(c, k);
221 222
}

223
void bch2_extent_debugcheck(struct bch_fs *c, struct bkey_s_c k)
224
{
225
	struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
226 227
	const union bch_extent_entry *entry;
	struct extent_ptr_decoded p;
228
	char buf[160];
229

230 231
	if (!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) ||
	    !test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
232
		return;
233

234 235 236 237 238 239 240 241
	if (!percpu_down_read_trylock(&c->mark_lock))
		return;

	bch2_fs_inconsistent_on(!test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) &&
		!bch2_bkey_replicas_marked_locked(c, e.s_c, false), c,
		"extent key bad (replicas not marked in superblock):\n%s",
		(bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf));

242 243 244 245 246 247 248 249
	extent_for_each_ptr_decode(e, p, entry) {
		struct bch_dev *ca	= bch_dev_bkey_exists(c, p.ptr.dev);
		struct bucket_mark mark = ptr_bucket_mark(ca, &p.ptr);
		unsigned stale		= gen_after(mark.gen, p.ptr.gen);
		unsigned disk_sectors	= ptr_disk_sectors(p);
		unsigned mark_sectors	= p.ptr.cached
			? mark.cached_sectors
			: mark.dirty_sectors;
250

251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
		bch2_fs_inconsistent_on(stale && !p.ptr.cached, c,
			"stale dirty pointer (ptr gen %u bucket %u",
			p.ptr.gen, mark.gen);

		bch2_fs_inconsistent_on(stale > 96, c,
			"key too stale: %i", stale);

		bch2_fs_inconsistent_on(!stale &&
			(mark.data_type != BCH_DATA_USER ||
			 mark_sectors < disk_sectors), c,
			"extent pointer not marked: %s:\n"
			"type %u sectors %u < %u",
			(bch2_bkey_val_to_text(&PBUF(buf), c, e.s_c), buf),
			mark.data_type,
			mark_sectors, disk_sectors);
266
	}
267 268

	percpu_up_read(&c->mark_lock);
269 270
}

271 272
void bch2_extent_to_text(struct printbuf *out, struct bch_fs *c,
			 struct bkey_s_c k)
273
{
274
	bch2_bkey_ptrs_to_text(out, c, k);
275 276
}

277 278
enum merge_result bch2_extent_merge(struct bch_fs *c,
				    struct bkey_s _l, struct bkey_s _r)
279
{
280 281 282 283 284
	struct bkey_s_extent l = bkey_s_to_extent(_l);
	struct bkey_s_extent r = bkey_s_to_extent(_r);
	union bch_extent_entry *en_l = l.v->start;
	union bch_extent_entry *en_r = r.v->start;
	struct bch_extent_crc_unpacked crc_l, crc_r;
285

286 287
	if (bkey_val_u64s(l.k) != bkey_val_u64s(r.k))
		return BCH_MERGE_NOMERGE;
288

289
	crc_l = bch2_extent_crc_unpack(l.k, NULL);
290

291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
	extent_for_each_entry(l, en_l) {
		en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);

		if (extent_entry_type(en_l) != extent_entry_type(en_r))
			return BCH_MERGE_NOMERGE;

		switch (extent_entry_type(en_l)) {
		case BCH_EXTENT_ENTRY_ptr: {
			const struct bch_extent_ptr *lp = &en_l->ptr;
			const struct bch_extent_ptr *rp = &en_r->ptr;
			struct bch_dev *ca;

			if (lp->offset + crc_l.compressed_size != rp->offset ||
			    lp->dev			!= rp->dev ||
			    lp->gen			!= rp->gen)
				return BCH_MERGE_NOMERGE;

			/* We don't allow extents to straddle buckets: */
			ca = bch_dev_bkey_exists(c, lp->dev);

			if (PTR_BUCKET_NR(ca, lp) != PTR_BUCKET_NR(ca, rp))
				return BCH_MERGE_NOMERGE;

			break;
		}
		case BCH_EXTENT_ENTRY_stripe_ptr:
			if (en_l->stripe_ptr.block	!= en_r->stripe_ptr.block ||
			    en_l->stripe_ptr.idx	!= en_r->stripe_ptr.idx)
				return BCH_MERGE_NOMERGE;
320
			break;
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
		case BCH_EXTENT_ENTRY_crc32:
		case BCH_EXTENT_ENTRY_crc64:
		case BCH_EXTENT_ENTRY_crc128:
			crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
			crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));

			if (crc_l.csum_type		!= crc_r.csum_type ||
			    crc_l.compression_type	!= crc_r.compression_type ||
			    crc_l.nonce			!= crc_r.nonce)
				return BCH_MERGE_NOMERGE;

			if (crc_l.offset + crc_l.live_size != crc_l.compressed_size ||
			    crc_r.offset)
				return BCH_MERGE_NOMERGE;

			if (!bch2_checksum_mergeable(crc_l.csum_type))
				return BCH_MERGE_NOMERGE;

339
			if (crc_is_compressed(crc_l))
340 341 342 343 344 345 346
				return BCH_MERGE_NOMERGE;

			if (crc_l.csum_type &&
			    crc_l.uncompressed_size +
			    crc_r.uncompressed_size > c->sb.encoded_extent_max)
				return BCH_MERGE_NOMERGE;

347
			if (crc_l.uncompressed_size + crc_r.uncompressed_size >
348 349
			    bch2_crc_field_size_max[extent_entry_type(en_l)])
				return BCH_MERGE_NOMERGE;
350 351

			break;
352 353
		default:
			return BCH_MERGE_NOMERGE;
354
		}
355
	}
356

357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
	extent_for_each_entry(l, en_l) {
		struct bch_extent_crc_unpacked crc_l, crc_r;

		en_r = vstruct_idx(r.v, (u64 *) en_l - l.v->_data);

		if (!extent_entry_is_crc(en_l))
			continue;

		crc_l = bch2_extent_crc_unpack(l.k, entry_to_crc(en_l));
		crc_r = bch2_extent_crc_unpack(r.k, entry_to_crc(en_r));

		crc_l.csum = bch2_checksum_merge(crc_l.csum_type,
						 crc_l.csum,
						 crc_r.csum,
						 crc_r.uncompressed_size << 9);

		crc_l.uncompressed_size	+= crc_r.uncompressed_size;
		crc_l.compressed_size	+= crc_r.compressed_size;

		bch2_extent_crc_pack(entry_to_crc(en_l), crc_l,
				     extent_entry_type(en_l));
378 379
	}

380
	bch2_key_resize(l.k, l.k->size + r.k->size);
381

382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
	return BCH_MERGE_MERGE;
}

/* KEY_TYPE_reservation: */

const char *bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
	struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);

	if (bkey_val_bytes(k.k) != sizeof(struct bch_reservation))
		return "incorrect value size";

	if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX)
		return "invalid nr_replicas";

	return NULL;
}

void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
			      struct bkey_s_c k)
{
	struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);

	pr_buf(out, "generation %u replicas %u",
	       le32_to_cpu(r.v->generation),
	       r.v->nr_replicas);
}

enum merge_result bch2_reservation_merge(struct bch_fs *c,
					 struct bkey_s _l, struct bkey_s _r)
{
	struct bkey_s_reservation l = bkey_s_to_reservation(_l);
	struct bkey_s_reservation r = bkey_s_to_reservation(_r);

	if (l.v->generation != r.v->generation ||
	    l.v->nr_replicas != r.v->nr_replicas)
		return BCH_MERGE_NOMERGE;

	if ((u64) l.k->size + r.k->size > KEY_SIZE_MAX) {
		bch2_key_resize(l.k, KEY_SIZE_MAX);
		bch2_cut_front_s(l.k->p, r.s);
		return BCH_MERGE_PARTIAL;
	}

	bch2_key_resize(l.k, l.k->size + r.k->size);

	return BCH_MERGE_MERGE;
}

/* Extent checksum entries: */

/* returns true if not equal */
static inline bool bch2_crc_unpacked_cmp(struct bch_extent_crc_unpacked l,
					 struct bch_extent_crc_unpacked r)
{
	return (l.csum_type		!= r.csum_type ||
		l.compression_type	!= r.compression_type ||
		l.compressed_size	!= r.compressed_size ||
		l.uncompressed_size	!= r.uncompressed_size ||
		l.offset		!= r.offset ||
		l.live_size		!= r.live_size ||
		l.nonce			!= r.nonce ||
		bch2_crc_cmp(l.csum, r.csum));
445 446 447 448 449
}

static inline bool can_narrow_crc(struct bch_extent_crc_unpacked u,
				  struct bch_extent_crc_unpacked n)
{
450
	return !crc_is_compressed(u) &&
451 452 453 454 455 456
		u.csum_type &&
		u.uncompressed_size > u.live_size &&
		bch2_csum_type_is_encryption(u.csum_type) ==
		bch2_csum_type_is_encryption(n.csum_type);
}

457
bool bch2_can_narrow_extent_crcs(struct bkey_s_c k,
458 459
				 struct bch_extent_crc_unpacked n)
{
460
	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
461 462 463 464 465 466
	struct bch_extent_crc_unpacked crc;
	const union bch_extent_entry *i;

	if (!n.csum_type)
		return false;

467
	bkey_for_each_crc(k.k, ptrs, crc, i)
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
		if (can_narrow_crc(crc, n))
			return true;

	return false;
}

/*
 * We're writing another replica for this extent, so while we've got the data in
 * memory we'll be computing a new checksum for the currently live data.
 *
 * If there are other replicas we aren't moving, and they are checksummed but
 * not compressed, we can modify them to point to only the data that is
 * currently live (so that readers won't have to bounce) while we've got the
 * checksum we need:
 */
483
bool bch2_bkey_narrow_crcs(struct bkey_i *k, struct bch_extent_crc_unpacked n)
484
{
485
	struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
486
	struct bch_extent_crc_unpacked u;
487
	struct extent_ptr_decoded p;
488
	union bch_extent_entry *i;
489
	bool ret = false;
490 491

	/* Find a checksum entry that covers only live data: */
492
	if (!n.csum_type) {
493
		bkey_for_each_crc(&k->k, ptrs, u, i)
494
			if (!crc_is_compressed(u) &&
495 496 497
			    u.csum_type &&
			    u.live_size == u.uncompressed_size) {
				n = u;
498
				goto found;
499 500
			}
		return false;
501 502
	}
found:
503
	BUG_ON(crc_is_compressed(n));
504
	BUG_ON(n.offset);
505
	BUG_ON(n.live_size != k->k.size);
506 507

restart_narrow_pointers:
508 509
	ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));

510
	bkey_for_each_ptr_decode(&k->k, ptrs, p, i)
511
		if (can_narrow_crc(p.crc, n)) {
512
			bch2_bkey_drop_ptr(bkey_i_to_s(k), &i->ptr);
513 514
			p.ptr.offset += p.crc.offset;
			p.crc = n;
515
			bch2_extent_ptr_decoded_append(k, &p);
516
			ret = true;
517 518 519
			goto restart_narrow_pointers;
		}

520
	return ret;
521 522
}

523 524 525
static void bch2_extent_crc_pack(union bch_extent_crc *dst,
				 struct bch_extent_crc_unpacked src,
				 enum bch_extent_entry_type type)
526
{
527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
#define set_common_fields(_dst, _src)					\
		_dst.type		= 1 << type;			\
		_dst.csum_type		= _src.csum_type,		\
		_dst.compression_type	= _src.compression_type,	\
		_dst._compressed_size	= _src.compressed_size - 1,	\
		_dst._uncompressed_size	= _src.uncompressed_size - 1,	\
		_dst.offset		= _src.offset

	switch (type) {
	case BCH_EXTENT_ENTRY_crc32:
		set_common_fields(dst->crc32, src);
		dst->crc32.csum	 = *((__le32 *) &src.csum.lo);
		break;
	case BCH_EXTENT_ENTRY_crc64:
		set_common_fields(dst->crc64, src);
		dst->crc64.nonce	= src.nonce;
		dst->crc64.csum_lo	= src.csum.lo;
		dst->crc64.csum_hi	= *((__le16 *) &src.csum.hi);
		break;
	case BCH_EXTENT_ENTRY_crc128:
		set_common_fields(dst->crc128, src);
		dst->crc128.nonce	= src.nonce;
		dst->crc128.csum	= src.csum;
		break;
	default:
		BUG();
	}
#undef set_common_fields
555 556
}

557 558
void bch2_extent_crc_append(struct bkey_i *k,
			    struct bch_extent_crc_unpacked new)
559
{
560 561 562
	struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
	union bch_extent_crc *crc = (void *) ptrs.end;
	enum bch_extent_entry_type type;
563

564
	if (bch_crc_bytes[new.csum_type]	<= 4 &&
565
	    new.uncompressed_size		<= CRC32_SIZE_MAX &&
566 567 568
	    new.nonce				<= CRC32_NONCE_MAX)
		type = BCH_EXTENT_ENTRY_crc32;
	else if (bch_crc_bytes[new.csum_type]	<= 10 &&
569
		   new.uncompressed_size	<= CRC64_SIZE_MAX &&
570 571 572
		   new.nonce			<= CRC64_NONCE_MAX)
		type = BCH_EXTENT_ENTRY_crc64;
	else if (bch_crc_bytes[new.csum_type]	<= 16 &&
573
		   new.uncompressed_size	<= CRC128_SIZE_MAX &&
574 575 576 577
		   new.nonce			<= CRC128_NONCE_MAX)
		type = BCH_EXTENT_ENTRY_crc128;
	else
		BUG();
578

579 580 581 582 583
	bch2_extent_crc_pack(crc, new, type);

	k->k.u64s += extent_entry_u64s(ptrs.end);

	EBUG_ON(bkey_val_u64s(&k->k) > BKEY_EXTENT_VAL_U64s_MAX);
584 585
}

586
/* Generic code for keys with pointers: */
587

588 589 590
unsigned bch2_bkey_nr_ptrs(struct bkey_s_c k)
{
	return bch2_bkey_devs(k).nr;
591 592
}

593
unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c k)
594
{
595 596 597
	return k.k->type == KEY_TYPE_reservation
		? bkey_s_c_to_reservation(k).v->nr_replicas
		: bch2_bkey_dirty_devs(k).nr;
598 599
}

600
unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c k)
601
{
602
	unsigned ret = 0;
603

604 605 606 607 608 609
	if (k.k->type == KEY_TYPE_reservation) {
		ret = bkey_s_c_to_reservation(k).v->nr_replicas;
	} else {
		struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
		const union bch_extent_entry *entry;
		struct extent_ptr_decoded p;
610

611
		bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
612
			ret += !p.ptr.cached && !crc_is_compressed(p.crc);
613
	}
614

615
	return ret;
616 617
}

618
unsigned bch2_bkey_sectors_compressed(struct bkey_s_c k)
619
{
620
	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
621 622 623
	const union bch_extent_entry *entry;
	struct extent_ptr_decoded p;
	unsigned ret = 0;
624

625
	bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
626
		if (!p.ptr.cached && crc_is_compressed(p.crc))
627
			ret += p.crc.compressed_size;
628

629
	return ret;
630 631
}

632 633 634 635 636 637 638 639 640 641 642 643
bool bch2_bkey_is_incompressible(struct bkey_s_c k)
{
	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
	const union bch_extent_entry *entry;
	struct bch_extent_crc_unpacked crc;

	bkey_for_each_crc(k.k, ptrs, crc, entry)
		if (crc.compression_type == BCH_COMPRESSION_TYPE_incompressible)
			return true;
	return false;
}

644 645
bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
				unsigned nr_replicas)
646
{
647 648 649 650 651 652
	struct btree_trans trans;
	struct btree_iter *iter;
	struct bpos end = pos;
	struct bkey_s_c k;
	bool ret = true;
	int err;
653

654
	end.offset += size;
655

656
	bch2_trans_init(&trans, c, 0, 0);
657

658 659 660 661
	for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, pos,
			   BTREE_ITER_SLOTS, k, err) {
		if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
			break;
662

663 664 665
		if (nr_replicas > bch2_bkey_nr_ptrs_fully_allocated(k)) {
			ret = false;
			break;
666
		}
Kent Overstreet's avatar
Kent Overstreet committed
667
	}
668
	bch2_trans_exit(&trans);
669

670
	return ret;
671 672
}

673 674
static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
					   struct extent_ptr_decoded p)
675
{
676 677
	unsigned durability = 0;
	struct bch_dev *ca;
678

679
	if (p.ptr.cached)
680
		return 0;
681

682
	ca = bch_dev_bkey_exists(c, p.ptr.dev);
683

684 685
	if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
		durability = max_t(unsigned, durability, ca->mi.durability);
686

687 688 689
	if (p.has_ec) {
		struct stripe *s =
			genradix_ptr(&c->stripes[0], p.ec.idx);
690

691 692
		if (WARN_ON(!s))
			goto out;
693

694
		durability = max_t(unsigned, durability, s->nr_redundant);
695
	}
696 697
out:
	return durability;
698 699
}

700
unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
701
{
702
	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
703 704
	const union bch_extent_entry *entry;
	struct extent_ptr_decoded p;
705
	unsigned durability = 0;
706

707 708
	bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
		durability += bch2_extent_ptr_durability(c, p);
709

710
	return durability;
711 712
}

713 714 715
void bch2_bkey_mark_replicas_cached(struct bch_fs *c, struct bkey_s k,
				    unsigned target,
				    unsigned nr_desired_replicas)
716
{
717 718 719 720
	struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
	union bch_extent_entry *entry;
	struct extent_ptr_decoded p;
	int extra = bch2_bkey_durability(c, k.s_c) - nr_desired_replicas;
721

722 723 724
	if (target && extra > 0)
		bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
			int n = bch2_extent_ptr_durability(c, p);
725

726 727 728 729 730 731
			if (n && n <= extra &&
			    !bch2_dev_in_target(c, p.ptr.dev, target)) {
				entry->ptr.cached = true;
				extra -= n;
			}
		}
732

733 734 735
	if (extra > 0)
		bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
			int n = bch2_extent_ptr_durability(c, p);
736

737 738 739 740 741 742
			if (n && n <= extra) {
				entry->ptr.cached = true;
				extra -= n;
			}
		}
}
743

744 745 746 747
void bch2_bkey_append_ptr(struct bkey_i *k,
			  struct bch_extent_ptr ptr)
{
	EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
748

749 750
	switch (k->k.type) {
	case KEY_TYPE_btree_ptr:
Kent Overstreet's avatar
Kent Overstreet committed
751
	case KEY_TYPE_btree_ptr_v2:
752 753
	case KEY_TYPE_extent:
		EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
754

755
		ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
756

757 758 759 760 761 762 763 764
		memcpy((void *) &k->v + bkey_val_bytes(&k->k),
		       &ptr,
		       sizeof(ptr));
		k->u64s++;
		break;
	default:
		BUG();
	}
765
}
766

767
static inline void __extent_entry_insert(struct bkey_i *k,
768 769 770
					 union bch_extent_entry *dst,
					 union bch_extent_entry *new)
{
771
	union bch_extent_entry *end = bkey_val_end(bkey_i_to_s(k));
772

773 774
	memmove_u64s_up_small((u64 *) dst + extent_entry_u64s(new),
			      dst, (u64 *) end - (u64 *) dst);
775
	k->k.u64s += extent_entry_u64s(new);
776 777
	memcpy_u64s_small(dst, new, extent_entry_u64s(new));
}
778

779
void bch2_extent_ptr_decoded_append(struct bkey_i *k,
780 781
				    struct extent_ptr_decoded *p)
{
782 783 784
	struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(k));
	struct bch_extent_crc_unpacked crc =
		bch2_extent_crc_unpack(&k->k, NULL);
785
	union bch_extent_entry *pos;
786

787
	if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
788
		pos = ptrs.start;
789 790 791
		goto found;
	}

792
	bkey_for_each_crc(&k->k, ptrs, crc, pos)
793 794 795 796 797
		if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
			pos = extent_entry_next(pos);
			goto found;
		}

798 799
	bch2_extent_crc_append(k, p->crc);
	pos = bkey_val_end(bkey_i_to_s(k));
800 801
found:
	p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
802
	__extent_entry_insert(k, pos, to_entry(&p->ptr));
803

804 805 806
	if (p->has_ec) {
		p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
		__extent_entry_insert(k, pos, to_entry(&p->ec));
807
	}
808 809
}

810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910
static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
					  union bch_extent_entry *entry)
{
	union bch_extent_entry *i = ptrs.start;

	if (i == entry)
		return NULL;

	while (extent_entry_next(i) != entry)
		i = extent_entry_next(i);
	return i;
}

union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s k,
					   struct bch_extent_ptr *ptr)
{
	struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
	union bch_extent_entry *dst, *src, *prev;
	bool drop_crc = true;

	EBUG_ON(ptr < &ptrs.start->ptr ||
		ptr >= &ptrs.end->ptr);
	EBUG_ON(ptr->type != 1 << BCH_EXTENT_ENTRY_ptr);

	src = extent_entry_next(to_entry(ptr));
	if (src != ptrs.end &&
	    !extent_entry_is_crc(src))
		drop_crc = false;

	dst = to_entry(ptr);
	while ((prev = extent_entry_prev(ptrs, dst))) {
		if (extent_entry_is_ptr(prev))
			break;

		if (extent_entry_is_crc(prev)) {
			if (drop_crc)
				dst = prev;
			break;
		}

		dst = prev;
	}

	memmove_u64s_down(dst, src,
			  (u64 *) ptrs.end - (u64 *) src);
	k.k->u64s -= (u64 *) src - (u64 *) dst;

	return dst;
}

void bch2_bkey_drop_device(struct bkey_s k, unsigned dev)
{
	struct bch_extent_ptr *ptr;

	bch2_bkey_drop_ptrs(k, ptr, ptr->dev == dev);
}

const struct bch_extent_ptr *
bch2_bkey_has_device(struct bkey_s_c k, unsigned dev)
{
	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
	const struct bch_extent_ptr *ptr;

	bkey_for_each_ptr(ptrs, ptr)
		if (ptr->dev == dev)
			return ptr;

	return NULL;
}

bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
{
	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
	const struct bch_extent_ptr *ptr;

	bkey_for_each_ptr(ptrs, ptr)
		if (bch2_dev_in_target(c, ptr->dev, target) &&
		    (!ptr->cached ||
		     !ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
			return true;

	return false;
}

bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
			   struct bch_extent_ptr m, u64 offset)
{
	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
	const union bch_extent_entry *entry;
	struct extent_ptr_decoded p;

	bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
		if (p.ptr.dev	== m.dev &&
		    p.ptr.gen	== m.gen &&
		    (s64) p.ptr.offset + p.crc.offset - bkey_start_offset(k.k) ==
		    (s64) m.offset  - offset)
			return true;

	return false;
}

911 912 913 914 915 916 917 918 919 920
/*
 * bch_extent_normalize - clean up an extent, dropping stale pointers etc.
 *
 * Returns true if @k should be dropped entirely
 *
 * For existing keys, only called when btree nodes are being rewritten, not when
 * they're merely being compacted/resorted in memory.
 */
bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
{
921
	struct bch_extent_ptr *ptr;
922

923 924 925
	bch2_bkey_drop_ptrs(k, ptr,
		ptr->cached &&
		ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
926

927
	/* will only happen if all pointers were cached: */
928
	if (!bch2_bkey_nr_ptrs(k.s_c))
929
		k.k->type = KEY_TYPE_discard;
930

931
	return bkey_whiteout(k.k);
932 933
}

934 935
void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
			    struct bkey_s_c k)
936
{
937 938 939 940 941 942 943
	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
	const union bch_extent_entry *entry;
	struct bch_extent_crc_unpacked crc;
	const struct bch_extent_ptr *ptr;
	const struct bch_extent_stripe_ptr *ec;
	struct bch_dev *ca;
	bool first = true;
944

945 946 947
	bkey_extent_entry_for_each(ptrs, entry) {
		if (!first)
			pr_buf(out, " ");
948

949 950 951 952 953 954
		switch (__extent_entry_type(entry)) {
		case BCH_EXTENT_ENTRY_ptr:
			ptr = entry_to_ptr(entry);
			ca = ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
				? bch_dev_bkey_exists(c, ptr->dev)
				: NULL;
955

956 957 958 959 960 961 962 963 964 965
			pr_buf(out, "ptr: %u:%llu gen %u%s%s", ptr->dev,
			       (u64) ptr->offset, ptr->gen,
			       ptr->cached ? " cached" : "",
			       ca && ptr_stale(ca, ptr)
			       ? " stale" : "");
			break;
		case BCH_EXTENT_ENTRY_crc32:
		case BCH_EXTENT_ENTRY_crc64:
		case BCH_EXTENT_ENTRY_crc128:
			crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
966

967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982
			pr_buf(out, "crc: c_size %u size %u offset %u nonce %u csum %u compress %u",
			       crc.compressed_size,
			       crc.uncompressed_size,
			       crc.offset, crc.nonce,
			       crc.csum_type,
			       crc.compression_type);
			break;
		case BCH_EXTENT_ENTRY_stripe_ptr:
			ec = &entry->stripe_ptr;

			pr_buf(out, "ec: idx %llu block %u",
			       (u64) ec->idx, ec->block);
			break;
		default:
			pr_buf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
			return;
983
		}
984 985 986

		first = false;
	}
987 988
}

989 990 991 992 993
static const char *extent_ptr_invalid(const struct bch_fs *c,
				      struct bkey_s_c k,
				      const struct bch_extent_ptr *ptr,
				      unsigned size_ondisk,
				      bool metadata)
994
{
995 996 997
	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
	const struct bch_extent_ptr *ptr2;
	struct bch_dev *ca;
998

999 1000
	if (!bch2_dev_exists2(c, ptr->dev))
		return "pointer to invalid device";
1001

1002 1003 1004
	ca = bch_dev_bkey_exists(c, ptr->dev);
	if (!ca)
		return "pointer to invalid device";
1005

1006 1007 1008
	bkey_for_each_ptr(ptrs, ptr2)
		if (ptr != ptr2 && ptr->dev == ptr2->dev)
			return "multiple pointers to same device";
1009

1010 1011
	if (ptr->offset + size_ondisk > bucket_to_sector(ca, ca->mi.nbuckets))
		return "offset past end of device";
1012

1013 1014
	if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket))
		return "offset before first bucket";
1015

1016 1017 1018
	if (bucket_remainder(ca, ptr->offset) +
	    size_ondisk > ca->mi.bucket_size)
		return "spans multiple buckets";
1019

1020 1021
	return NULL;
}
1022

1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033
const char *bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
	const union bch_extent_entry *entry;
	struct bch_extent_crc_unpacked crc;
	unsigned size_ondisk = k.k->size;
	const char *reason;
	unsigned nonce = UINT_MAX;

	if (k.k->type == KEY_TYPE_btree_ptr)
		size_ondisk = c->opts.btree_node_size;
Kent Overstreet's avatar
Kent Overstreet committed
1034 1035
	if (k.k->type == KEY_TYPE_btree_ptr_v2)
		size_ondisk = le16_to_cpu(bkey_s_c_to_btree_ptr_v2(k).v->sectors);
1036 1037 1038 1039

	bkey_extent_entry_for_each(ptrs, entry) {
		if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)
			return "invalid extent entry type";
1040

1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
		if (k.k->type == KEY_TYPE_btree_ptr &&
		    !extent_entry_is_ptr(entry))
			return "has non ptr field";

		switch (extent_entry_type(entry)) {
		case BCH_EXTENT_ENTRY_ptr:
			reason = extent_ptr_invalid(c, k, &entry->ptr,
						    size_ondisk, false);
			if (reason)
				return reason;
1051 1052 1053 1054
			break;
		case BCH_EXTENT_ENTRY_crc32:
		case BCH_EXTENT_ENTRY_crc64:
		case BCH_EXTENT_ENTRY_crc128:
1055
			crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
1056

1057 1058 1059
			if (crc.offset + crc.live_size >
			    crc.uncompressed_size)
				return "checksum offset + key size > uncompressed size";
1060

1061
			size_ondisk = crc.compressed_size;
1062

1063 1064
			if (!bch2_checksum_type_valid(c, crc.csum_type))
				return "invalid checksum type";
1065

1066
			if (crc.compression_type >= BCH_COMPRESSION_TYPE_NR)
1067
				return "invalid compression type";
1068

1069 1070 1071 1072 1073 1074 1075 1076
			if (bch2_csum_type_is_encryption(crc.csum_type)) {
				if (nonce == UINT_MAX)
					nonce = crc.offset + crc.nonce;
				else if (nonce != crc.offset + crc.nonce)
					return "incorrect nonce";
			}
			break;
		case BCH_EXTENT_ENTRY_stripe_ptr:
1077 1078
			break;
		}
1079 1080
	}

1081 1082
	return NULL;
}
1083

1084
void bch2_ptr_swab(struct bkey_s k)
1085
{
1086
	struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
1087
	union bch_extent_entry *entry;
1088
	u64 *d;
1089

1090 1091 1092 1093
	for (d =  (u64 *) ptrs.start;
	     d != (u64 *) ptrs.end;
	     d++)
		*d = swab64(*d);
1094

1095 1096
	for (entry = ptrs.start;
	     entry < ptrs.end;
1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
	     entry = extent_entry_next(entry)) {
		switch (extent_entry_type(entry)) {
		case BCH_EXTENT_ENTRY_ptr:
			break;
		case BCH_EXTENT_ENTRY_crc32:
			entry->crc32.csum = swab32(entry->crc32.csum);
			break;
		case BCH_EXTENT_ENTRY_crc64:
			entry->crc64.csum_hi = swab16(entry->crc64.csum_hi);
			entry->crc64.csum_lo = swab64(entry->crc64.csum_lo);
			break;
		case BCH_EXTENT_ENTRY_crc128:
			entry->crc128.csum.hi = (__force __le64)
				swab64((__force u64) entry->crc128.csum.hi);
			entry->crc128.csum.lo = (__force __le64)
				swab64((__force u64) entry->crc128.csum.lo);
			break;
		case BCH_EXTENT_ENTRY_stripe_ptr:
			break;
		}
1117 1118 1119
	}
}

1120 1121 1122
/* Generic extent code: */

int bch2_cut_front_s(struct bpos where, struct bkey_s k)
1123
{
1124 1125 1126
	unsigned new_val_u64s = bkey_val_u64s(k.k);
	int val_u64s_delta;
	u64 sub;
1127

1128 1129
	if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
		return 0;
1130

1131
	EBUG_ON(bkey_cmp(where, k.k->p) > 0);
1132

1133
	sub = where.offset - bkey_start_offset(k.k);
1134

1135 1136 1137 1138 1139
	k.k->size -= sub;

	if (!k.k->size) {
		k.k->type = KEY_TYPE_deleted;
		new_val_u64s = 0;
1140 1141
	}

1142 1143 1144 1145 1146 1147
	switch (k.k->type) {
	case KEY_TYPE_extent:
	case KEY_TYPE_reflink_v: {
		struct bkey_ptrs ptrs = bch2_bkey_ptrs(k);
		union bch_extent_entry *entry;
		bool seen_crc = false;
1148

1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
		bkey_extent_entry_for_each(ptrs, entry) {
			switch (extent_entry_type(entry)) {
			case BCH_EXTENT_ENTRY_ptr:
				if (!seen_crc)
					entry->ptr.offset += sub;
				break;
			case BCH_EXTENT_ENTRY_crc32:
				entry->crc32.offset += sub;
				break;
			case BCH_EXTENT_ENTRY_crc64:
				entry->crc64.offset += sub;
				break;
			case BCH_EXTENT_ENTRY_crc128:
				entry->crc128.offset += sub;
				break;
			case BCH_EXTENT_ENTRY_stripe_ptr:
				break;
			}
1167

1168 1169 1170
			if (extent_entry_is_crc(entry))
				seen_crc = true;
		}
1171 1172 1173

		break;
	}
1174 1175 1176 1177
	case KEY_TYPE_reflink_p: {
		struct bkey_s_reflink_p p = bkey_s_to_reflink_p(k);

		le64_add_cpu(&p.v->idx, sub);
1178 1179
		break;
	}
1180 1181
	case KEY_TYPE_inline_data: {
		struct bkey_s_inline_data d = bkey_s_to_inline_data(k);
1182

1183
		sub = min_t(u64, sub << 9, bkey_val_bytes(d.k));
1184

1185 1186 1187
		memmove(d.v->data,
			d.v->data + sub,
			bkey_val_bytes(d.k) - sub);
1188

1189 1190 1191 1192
		new_val_u64s -= sub >> 3;
		break;
	}
	}
1193

1194 1195
	val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
	BUG_ON(val_u64s_delta < 0);
1196

1197 1198 1199
	set_bkey_val_u64s(k.k, new_val_u64s);
	memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
	return -val_u64s_delta;
1200 1201
}

1202
int bch2_cut_back_s(struct bpos where, struct bkey_s k)
1203
{
1204 1205 1206
	unsigned new_val_u64s = bkey_val_u64s(k.k);
	int val_u64s_delta;
	u64 len = 0;
1207

1208 1209
	if (bkey_cmp(where, k.k->p) >= 0)
		return 0;
1210

1211
	EBUG_ON(bkey_cmp(where, bkey_start_pos(k.k)) < 0);
1212

1213
	len = where.offset - bkey_start_offset(k.k);
1214

1215 1216 1217 1218 1219 1220
	k.k->p = where;
	k.k->size = len;

	if (!len) {
		k.k->type = KEY_TYPE_deleted;
		new_val_u64s = 0;
1221 1222
	}

1223 1224 1225 1226 1227
	switch (k.k->type) {
	case KEY_TYPE_inline_data:
		new_val_u64s = min(new_val_u64s, k.k->size << 6);
		break;
	}
1228

1229 1230 1231 1232 1233 1234
	val_u64s_delta = bkey_val_u64s(k.k) - new_val_u64s;
	BUG_ON(val_u64s_delta < 0);

	set_bkey_val_u64s(k.k, new_val_u64s);
	memset(bkey_val_end(k), 0, val_u64s_delta * sizeof(u64));
	return -val_u64s_delta;
1235
}