Commit a8b3a677 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Nocow support

This adds support for nocow mode, where we do writes in-place when
possible. Patch components:

 - New boolean filesystem and inode option, nocow: note that when nocow
   is enabled, data checksumming and compression are implicitly disabled

 - To prevent in-place writes from racing with data moves
   (data_update.c) or bucket reuse (i.e. a bucket being reused and
   re-allocated while a nocow write is in flight, we have a new locking
   mechanism.

   Buckets can be locked for either data update or data move, using a
   fixed size hash table of two_state_shared locks. We don't have any
   chaining, meaning updates and moves to different buckets that hash to
   the same lock will wait unnecessarily - we'll want to watch for this
   becoming an issue.

 - The allocator path also needs to check for in-place writes in flight
   to a given bucket before giving it out: thus we add another counter
   to bucket_alloc_state so we can track this.

 - Fsync now may need to issue cache flushes to block devices instead of
   flushing the journal. We add a device bitmask to bch_inode_info,
   ei_devs_need_flush, which tracks devices that need to have flushes
   issued - note that this will lead to unnecessary flushes when other
   codepaths have already issued flushes, we may want to replace this with
   a sequence number.

 - New nocow write path: look up extents, and if they're writable write
   to them - otherwise fall back to the normal COW write path.

XXX: switch to sequence numbers instead of bitmask for devs needing
journal flush

XXX: ei_quota_lock being a mutex means bch2_nocow_write_done() needs to
run in process context - see if we can improve this
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 4dcd1cae
......@@ -52,6 +52,7 @@ bcachefs-y := \
migrate.o \
move.o \
movinggc.o \
nocow_locking.o \
opts.o \
printbuf.o \
quota.o \
......
......@@ -227,6 +227,11 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *
return NULL;
}
if (bch2_bucket_nocow_is_locked(&c->nocow_locks, POS(ca->dev_idx, bucket))) {
s->skipped_nocow++;
return NULL;
}
spin_lock(&c->freelist_lock);
if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
......
......@@ -12,6 +12,7 @@ struct bucket_alloc_state {
u64 buckets_seen;
u64 skipped_open;
u64 skipped_need_journal_commit;
u64 skipped_nocow;
u64 skipped_nouse;
};
......
......@@ -206,6 +206,7 @@
#include "bcachefs_format.h"
#include "errcode.h"
#include "fifo.h"
#include "nocow_locking.h"
#include "opts.h"
#include "util.h"
......@@ -383,7 +384,8 @@ BCH_DEBUG_PARAMS_DEBUG()
x(journal_flush_seq) \
x(blocked_journal) \
x(blocked_allocate) \
x(blocked_allocate_open_bucket)
x(blocked_allocate_open_bucket) \
x(nocow_lock_contended)
enum bch_time_stats {
#define x(name) BCH_TIME_##name,
......@@ -483,6 +485,7 @@ struct bch_dev {
struct bch_sb *sb_read_scratch;
int sb_write_error;
dev_t dev;
atomic_t flush_seq;
struct bch_devs_mask self;
......@@ -897,7 +900,9 @@ struct bch_fs {
struct bio_set bio_read_split;
struct bio_set bio_write;
struct mutex bio_bounce_pages_lock;
mempool_t bio_bounce_pages;
mempool_t bio_bounce_pages;
struct bucket_nocow_lock_table
nocow_locks;
struct rhashtable promote_table;
mempool_t compression_bounce[2];
......@@ -959,6 +964,7 @@ mempool_t bio_bounce_pages;
struct bio_set writepage_bioset;
struct bio_set dio_write_bioset;
struct bio_set dio_read_bioset;
struct bio_set nocow_flush_bioset;
/* ERRORS */
struct list_head fsck_errors;
......
......@@ -798,7 +798,8 @@ struct bch_inode_generation {
x(bi_dir, 64) \
x(bi_dir_offset, 64) \
x(bi_subvol, 32) \
x(bi_parent_subvol, 32)
x(bi_parent_subvol, 32) \
x(bi_nocow, 8)
/* subset of BCH_INODE_FIELDS */
#define BCH_INODE_OPTS() \
......@@ -810,7 +811,8 @@ struct bch_inode_generation {
x(promote_target, 16) \
x(foreground_target, 16) \
x(background_target, 16) \
x(erasure_code, 16)
x(erasure_code, 16) \
x(nocow, 8)
enum inode_opt_id {
#define x(name, ...) \
......@@ -1548,7 +1550,8 @@ struct bch_sb_field_journal_seq_blacklist {
x(alloc_v4, 20) \
x(new_data_types, 21) \
x(backpointers, 22) \
x(inode_v3, 23)
x(inode_v3, 23) \
x(unwritten_extents, 24)
enum bcachefs_metadata_version {
bcachefs_metadata_version_min = 9,
......@@ -1696,6 +1699,7 @@ LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62);
LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63);
LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
LE64_BITMASK(BCH_SB_NOCOW, struct bch_sb, flags[4], 33, 34);
LE64_BITMASK(BCH_SB_WRITE_BUFFER_SIZE, struct bch_sb, flags[4], 34, 54);
/*
......
......@@ -1832,7 +1832,8 @@ static void btree_write_submit(struct work_struct *work)
bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
ptr->offset += wbio->sector_offset;
bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, &tmp.k);
bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree,
&tmp.k, false);
}
void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
......
......@@ -99,14 +99,17 @@ static inline enum bch_csum_type bch2_csum_opt_to_type(enum bch_csum_opts type,
}
static inline enum bch_csum_type bch2_data_checksum_type(struct bch_fs *c,
unsigned opt)
struct bch_io_opts opts)
{
if (opts.nocow)
return 0;
if (c->sb.encryption_type)
return c->opts.wide_macs
? BCH_CSUM_chacha20_poly1305_128
: BCH_CSUM_chacha20_poly1305_80;
return bch2_csum_opt_to_type(opt, true);
return bch2_csum_opt_to_type(opts.data_checksum, true);
}
static inline enum bch_csum_type bch2_meta_checksum_type(struct bch_fs *c)
......
......@@ -303,6 +303,13 @@ void bch2_data_update_read_done(struct data_update *m,
void bch2_data_update_exit(struct data_update *update)
{
struct bch_fs *c = update->op.c;
struct bkey_ptrs_c ptrs =
bch2_bkey_ptrs_c(bkey_i_to_s_c(update->k.k));
const struct bch_extent_ptr *ptr;
bkey_for_each_ptr(ptrs, ptr)
bch2_bucket_nocow_unlock(&c->nocow_locks,
PTR_BUCKET_POS(c, ptr), 0);
bch2_bkey_buf_exit(&update->k, c);
bch2_disk_reservation_put(c, &update->op.res);
......@@ -451,6 +458,9 @@ int bch2_data_update_init(struct bch_fs *c, struct data_update *m,
m->op.incompressible = true;
i++;
bch2_bucket_nocow_lock(&c->nocow_locks,
PTR_BUCKET_POS(c, &p.ptr), 0);
}
if (reserve_sectors) {
......
......@@ -664,22 +664,21 @@ unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
return replicas;
}
static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
struct extent_ptr_decoded p)
unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
{
unsigned durability = 0;
struct bch_dev *ca;
if (p.ptr.cached)
if (p->ptr.cached)
return 0;
ca = bch_dev_bkey_exists(c, p.ptr.dev);
ca = bch_dev_bkey_exists(c, p->ptr.dev);
if (ca->mi.state != BCH_MEMBER_STATE_failed)
durability = max_t(unsigned, durability, ca->mi.durability);
if (p.has_ec)
durability += p.ec.redundancy;
if (p->has_ec)
durability += p->ec.redundancy;
return durability;
}
......@@ -692,7 +691,7 @@ unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
unsigned durability = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
durability += bch2_extent_ptr_durability(c, p);
durability += bch2_extent_ptr_durability(c,& p);
return durability;
}
......@@ -907,23 +906,31 @@ bool bch2_bkey_matches_ptr(struct bch_fs *c, struct bkey_s_c k,
*/
bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2)
{
struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1);
struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
const union bch_extent_entry *entry1, *entry2;
struct extent_ptr_decoded p1, p2;
if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2))
if (k1.k->type != k2.k->type)
return false;
bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
if (bkey_extent_is_direct_data(k1.k)) {
struct bkey_ptrs_c ptrs1 = bch2_bkey_ptrs_c(k1);
struct bkey_ptrs_c ptrs2 = bch2_bkey_ptrs_c(k2);
const union bch_extent_entry *entry1, *entry2;
struct extent_ptr_decoded p1, p2;
if (bkey_extent_is_unwritten(k1) != bkey_extent_is_unwritten(k2))
return false;
bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
if (p1.ptr.dev == p2.ptr.dev &&
p1.ptr.gen == p2.ptr.gen &&
(s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
(s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
return true;
return false;
return false;
} else {
/* KEY_TYPE_deleted, etc. */
return true;
}
}
bool bch2_extent_has_ptr(struct bkey_s_c k1, struct extent_ptr_decoded p1,
......
......@@ -596,6 +596,7 @@ bool bch2_bkey_is_incompressible(struct bkey_s_c);
unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c);
unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *);
unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
void bch2_bkey_drop_device(struct bkey_s, unsigned);
......
......@@ -35,6 +35,72 @@
#include <trace/events/writeback.h>
struct nocow_flush {
struct closure *cl;
struct bch_dev *ca;
struct bio bio;
};
static void nocow_flush_endio(struct bio *_bio)
{
struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
closure_put(bio->cl);
percpu_ref_put(&bio->ca->io_ref);
bio_put(&bio->bio);
}
static void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
struct bch_inode_info *inode,
struct closure *cl)
{
struct nocow_flush *bio;
struct bch_dev *ca;
struct bch_devs_mask devs;
unsigned dev;
dev = find_first_bit(inode->ei_devs_need_flush.d, BCH_SB_MEMBERS_MAX);
if (dev == BCH_SB_MEMBERS_MAX)
return;
devs = inode->ei_devs_need_flush;
memset(&inode->ei_devs_need_flush, 0, sizeof(inode->ei_devs_need_flush));
for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
rcu_read_lock();
ca = rcu_dereference(c->devs[dev]);
if (ca && !percpu_ref_tryget(&ca->io_ref))
ca = NULL;
rcu_read_unlock();
if (!ca)
continue;
bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
REQ_OP_FLUSH,
GFP_KERNEL,
&c->nocow_flush_bioset),
struct nocow_flush, bio);
bio->cl = cl;
bio->ca = ca;
bio->bio.bi_end_io = nocow_flush_endio;
closure_bio_submit(&bio->bio, cl);
}
}
static int bch2_inode_flush_nocow_writes(struct bch_fs *c,
struct bch_inode_info *inode)
{
struct closure cl;
closure_init_stack(&cl);
bch2_inode_flush_nocow_writes_async(c, inode, &cl);
closure_sync(&cl);
return 0;
}
static inline bool bio_full(struct bio *bio, unsigned len)
{
if (bio->bi_vcnt >= bio->bi_max_vecs)
......@@ -1327,6 +1393,7 @@ static void bch2_writepage_io_alloc(struct bch_fs *c,
op->subvol = inode->ei_subvol;
op->pos = POS(inode->v.i_ino, sector);
op->end_io = bch2_writepage_io_done;
op->devs_need_flush = &inode->ei_devs_need_flush;
op->wbio.bio.bi_iter.bi_sector = sector;
op->wbio.bio.bi_opf = wbc_to_write_flags(wbc);
}
......@@ -2148,10 +2215,12 @@ static noinline void bch2_dio_write_flush(struct dio_write *dio)
if (!dio->op.error) {
ret = bch2_inode_find_by_inum(c, inode_inum(dio->inode), &inode);
if (ret)
if (ret) {
dio->op.error = ret;
else
} else {
bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq, &dio->op.cl);
bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
}
}
if (dio->sync) {
......@@ -2296,6 +2365,7 @@ static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
dio->op.nr_replicas = dio->op.opts.data_replicas;
dio->op.subvol = inode->ei_subvol;
dio->op.pos = POS(inode->v.i_ino, (u64) req->ki_pos >> 9);
dio->op.devs_need_flush = &inode->ei_devs_need_flush;
if (sync)
dio->op.flags |= BCH_WRITE_SYNC;
......@@ -2495,19 +2565,21 @@ ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
* inode->ei_inode.bi_journal_seq won't be up to date since it's set in an
* insert trigger: look up the btree inode instead
*/
static int bch2_flush_inode(struct bch_fs *c, subvol_inum inum)
static int bch2_flush_inode(struct bch_fs *c,
struct bch_inode_info *inode)
{
struct bch_inode_unpacked inode;
struct bch_inode_unpacked u;
int ret;
if (c->opts.journal_flush_disabled)
return 0;
ret = bch2_inode_find_by_inum(c, inum, &inode);
ret = bch2_inode_find_by_inum(c, inode_inum(inode), &u);
if (ret)
return ret;
return bch2_journal_flush_seq(&c->journal, inode.bi_journal_seq);
return bch2_journal_flush_seq(&c->journal, u.bi_journal_seq) ?:
bch2_inode_flush_nocow_writes(c, inode);
}
int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
......@@ -2518,7 +2590,7 @@ int bch2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
ret = file_write_and_wait_range(file, start, end);
ret2 = sync_inode_metadata(&inode->v, 1);
ret3 = bch2_flush_inode(c, inode_inum(inode));
ret3 = bch2_flush_inode(c, inode);
return bch2_err_class(ret ?: ret2 ?: ret3);
}
......@@ -3105,6 +3177,11 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
continue;
}
/*
* XXX: for nocow mode, we should promote shared extents to
* unshared here
*/
sectors = bpos_min(k.k->p, end_pos).offset - iter.pos.offset;
if (!bkey_extent_is_allocation(k.k)) {
......@@ -3368,7 +3445,7 @@ loff_t bch2_remap_file_range(struct file *file_src, loff_t pos_src,
if ((file_dst->f_flags & (__O_SYNC | O_DSYNC)) ||
IS_SYNC(file_inode(file_dst)))
ret = bch2_flush_inode(c, inode_inum(dst));
ret = bch2_flush_inode(c, dst);
err:
bch2_quota_reservation_put(c, dst, &quota_res);
bch2_unlock_inodes(INODE_LOCK|INODE_PAGECACHE_BLOCK, src, dst);
......@@ -3622,6 +3699,7 @@ loff_t bch2_llseek(struct file *file, loff_t offset, int whence)
void bch2_fs_fsio_exit(struct bch_fs *c)
{
bioset_exit(&c->nocow_flush_bioset);
bioset_exit(&c->dio_write_bioset);
bioset_exit(&c->dio_read_bioset);
bioset_exit(&c->writepage_bioset);
......@@ -3641,7 +3719,9 @@ int bch2_fs_fsio_init(struct bch_fs *c)
BIOSET_NEED_BVECS) ||
bioset_init(&c->dio_write_bioset,
4, offsetof(struct dio_write, op.wbio.bio),
BIOSET_NEED_BVECS))
BIOSET_NEED_BVECS) ||
bioset_init(&c->nocow_flush_bioset,
1, offsetof(struct nocow_flush, bio), 0))
ret = -ENOMEM;
pr_verbose_init(c->opts, "ret %i", ret);
......
......@@ -25,6 +25,17 @@ struct bch_inode_info {
u32 ei_subvol;
/*
* When we've been doing nocow writes we'll need to issue flushes to the
* underlying block devices
*
* XXX: a device may have had a flush issued by some other codepath. It
* would be better to keep for each device a sequence number that's
* incremented when we isusue a cache flush, and track here the sequence
* number that needs flushing.
*/
struct bch_devs_mask ei_devs_need_flush;
/* copy of inode in btree: */
struct bch_inode_unpacked ei_inode;
};
......
......@@ -892,4 +892,7 @@ void bch2_inode_opts_get(struct bch_io_opts *opts, struct bch_fs *c,
#define x(_name, _bits) opts->_name = inode_opt_get(c, inode, _name);
BCH_INODE_OPTS()
#undef x
if (opts->nocow)
opts->compression = opts->background_compression = opts->data_checksum = opts->erasure_code = 0;
}
This diff is collapsed.
......@@ -22,7 +22,7 @@ static inline void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw
#endif
void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
enum bch_data_type, const struct bkey_i *);
enum bch_data_type, const struct bkey_i *, bool);
#define BLK_STS_REMOVED ((__force blk_status_t)128)
......@@ -43,6 +43,7 @@ enum bch_write_flags {
__BCH_WRITE_IN_WORKER,
__BCH_WRITE_DONE,
__BCH_WRITE_IO_ERROR,
__BCH_WRITE_CONVERT_UNWRITTEN,
};
#define BCH_WRITE_ALLOC_NOWAIT (1U << __BCH_WRITE_ALLOC_NOWAIT)
......@@ -61,6 +62,7 @@ enum bch_write_flags {
#define BCH_WRITE_IN_WORKER (1U << __BCH_WRITE_IN_WORKER)
#define BCH_WRITE_DONE (1U << __BCH_WRITE_DONE)
#define BCH_WRITE_IO_ERROR (1U << __BCH_WRITE_IO_ERROR)
#define BCH_WRITE_CONVERT_UNWRITTEN (1U << __BCH_WRITE_CONVERT_UNWRITTEN)
static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
{
......@@ -90,7 +92,7 @@ static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
op->flags = 0;
op->written = 0;
op->error = 0;
op->csum_type = bch2_data_checksum_type(c, opts.data_checksum);
op->csum_type = bch2_data_checksum_type(c, opts);
op->compression_type = bch2_compression_opt_to_type[opts.compression];
op->nr_replicas = 0;
op->nr_replicas_required = c->opts.data_replicas_required;
......@@ -107,6 +109,7 @@ static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
op->res = (struct disk_reservation) { 0 };
op->new_i_size = U64_MAX;
op->i_sectors_delta = 0;
op->devs_need_flush = NULL;
}
void bch2_write(struct closure *);
......
......@@ -97,6 +97,7 @@ struct bch_write_bio {
bounce:1,
put_bio:1,
have_ioref:1,
nocow:1,
used_mempool:1,
first_btree_write:1;
);
......@@ -151,6 +152,12 @@ struct bch_write_op {
struct keylist insert_keys;
u64 inline_keys[BKEY_EXTENT_U64s_MAX * 2];
/*
* Bitmask of devices that have had nocow writes issued to them since
* last flush:
*/
struct bch_devs_mask *devs_need_flush;
/* Must be last: */
struct bch_write_bio wbio;
};
......
......@@ -260,6 +260,12 @@ static int bch2_move_extent(struct btree_trans *trans,
if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_move))
return -BCH_ERR_erofs_no_writes;
/*
* Before memory allocations & taking nocow locks in
* bch2_data_update_init():
*/
bch2_trans_unlock(trans);
/* write path might have to decompress data: */
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
sectors = max_t(unsigned, sectors, p.crc.uncompressed_size);
......@@ -506,6 +512,7 @@ static int __bch2_move_data(struct moving_context *ctxt,
*/
bch2_bkey_buf_reassemble(&sk, c, k);
k = bkey_i_to_s_c(sk.k);
bch2_trans_unlock(&trans);
ret2 = bch2_move_extent(&trans, &iter, ctxt, io_opts,
btree_id, k, data_opts);
......
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "nocow_locking.h"
#include "util.h"
void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *t,
two_state_lock_t *l, int flags)
{
struct bch_fs *c = container_of(t, struct bch_fs, nocow_locks);
u64 start_time = local_clock();
bch2_two_state_lock(l, flags & BUCKET_NOCOW_LOCK_UPDATE);
bch2_time_stats_update(&c->times[BCH_TIME_nocow_lock_contended], start_time);
}
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_NOCOW_LOCKING_H
#define _BCACHEFS_NOCOW_LOCKING_H
#include "bcachefs_format.h"
#include "two_state_shared_lock.h"
#include <linux/hash.h>
#define BUCKET_NOCOW_LOCKS_BITS 10
#define BUCKET_NOCOW_LOCKS (1U << BUCKET_NOCOW_LOCKS_BITS)
struct bucket_nocow_lock_table {
two_state_lock_t l[BUCKET_NOCOW_LOCKS];
};
#define BUCKET_NOCOW_LOCK_UPDATE (1 << 0)
static inline two_state_lock_t *bucket_nocow_lock(struct bucket_nocow_lock_table *t,
struct bpos bucket)
{
u64 dev_bucket = bucket.inode << 56 | bucket.offset;
unsigned h = hash_64(dev_bucket, BUCKET_NOCOW_LOCKS_BITS);
return t->l + (h & (BUCKET_NOCOW_LOCKS - 1));
}
static inline bool bch2_bucket_nocow_is_locked(struct bucket_nocow_lock_table *t,
struct bpos bucket)
{
two_state_lock_t *l = bucket_nocow_lock(t, bucket);
return atomic_long_read(&l->v) != 0;
}
static inline void bch2_bucket_nocow_unlock(struct bucket_nocow_lock_table *t,
struct bpos bucket, int flags)
{
two_state_lock_t *l = bucket_nocow_lock(t, bucket);
bch2_two_state_unlock(l, flags & BUCKET_NOCOW_LOCK_UPDATE);
}
void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *, two_state_lock_t *, int);
static inline void bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *t,
struct bpos bucket, int flags)
{
two_state_lock_t *l = bucket_nocow_lock(t, bucket);
if (!bch2_two_state_trylock(l, flags & BUCKET_NOCOW_LOCK_UPDATE))
__bch2_bucket_nocow_lock(t, l, flags);
}
#endif /* _BCACHEFS_NOCOW_LOCKING_H */
......@@ -392,6 +392,13 @@ enum opt_type {
OPT_BOOL(), \
BCH2_NO_SB_OPT, false, \
NULL, NULL) \
x(nocow, u8, \
OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME|OPT_INODE, \
OPT_BOOL(), \
BCH_SB_NOCOW, false, \
NULL, "Nocow mode: Writes will be done in place when possible.\n"\
"Snapshots and reflink will still caused writes to be COW\n"\
"Implicitly disables data checksumming, compression and encryption")\
x(no_data_io, u8, \
OPT_MOUNT, \
OPT_BOOL(), \
......
......@@ -88,9 +88,10 @@ static inline void bch2_dev_list_drop_dev(struct bch_devs_list *devs,
static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs,
unsigned dev)
{
BUG_ON(bch2_dev_list_has_dev(*devs, dev));
BUG_ON(devs->nr >= ARRAY_SIZE(devs->devs));
devs->devs[devs->nr++] = dev;
if (!bch2_dev_list_has_dev(*devs, dev)) {
BUG_ON(devs->nr >= ARRAY_SIZE(devs->devs));
devs->devs[devs->nr++] = dev;
}
}
static inline struct bch_devs_list bch2_dev_list_single(unsigned dev)
......
......@@ -543,6 +543,7 @@ DECLARE_EVENT_CLASS(bucket_alloc,
__field(u64, need_journal_commit )
__field(u64, nouse )
__field(bool, nonblocking )
__field(u64, nocow )
__array(char, err, 32 )
),
......@@ -560,10 +561,11 @@ DECLARE_EVENT_CLASS(bucket_alloc,
__entry->need_journal_commit = s->skipped_need_journal_commit;
__entry->nouse = s->skipped_nouse;
__entry->nonblocking = nonblocking;
__entry->nocow = s->skipped_nocow;
strscpy(__entry->err, err, sizeof(__entry->err));
),
TP_printk("%d,%d reserve %s user %u bucket %llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nonblocking %u err %s",
TP_printk("%d,%d reserve %s user %u bucket %llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nocow %llu nonblocking %u err %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->reserve,
__entry->user,
......@@ -576,6 +578,7 @@ DECLARE_EVENT_CLASS(bucket_alloc,
__entry->open,
__entry->need_journal_commit,
__entry->nouse,
__entry->nocow,
__entry->nonblocking,
__entry->err)
);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment