Commit 655f3aad authored by Mike Snitzer's avatar Mike Snitzer

dm: switch dm_target_io booleans over to proper flags

Add flags to dm_target_io and manage them using the same pattern used
for bi_flags in struct bio.
Signed-off-by: default avatarMike Snitzer <snitzer@kernel.org>
parent 82f6cdcc
...@@ -215,12 +215,29 @@ struct dm_target_io { ...@@ -215,12 +215,29 @@ struct dm_target_io {
struct dm_io *io; struct dm_io *io;
struct dm_target *ti; struct dm_target *ti;
unsigned int *len_ptr; unsigned int *len_ptr;
bool inside_dm_io:1; unsigned short flags;
bool is_duplicate_bio:1;
sector_t old_sector; sector_t old_sector;
struct bio clone; struct bio clone;
}; };
/*
* dm_target_io flags
*/
enum {
DM_TIO_INSIDE_DM_IO,
DM_TIO_IS_DUPLICATE_BIO
};
static inline bool dm_tio_flagged(struct dm_target_io *tio, unsigned int bit)
{
return (tio->flags & (1U << bit)) != 0;
}
static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit)
{
tio->flags |= (1U << bit);
}
/* /*
* One of these is allocated per original bio. * One of these is allocated per original bio.
* It contains the first clone used for that original. * It contains the first clone used for that original.
......
...@@ -94,7 +94,7 @@ static inline struct dm_target_io *clone_to_tio(struct bio *clone) ...@@ -94,7 +94,7 @@ static inline struct dm_target_io *clone_to_tio(struct bio *clone)
void *dm_per_bio_data(struct bio *bio, size_t data_size) void *dm_per_bio_data(struct bio *bio, size_t data_size)
{ {
if (!clone_to_tio(bio)->inside_dm_io) if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO))
return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size; return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size;
return (char *)bio - DM_IO_BIO_OFFSET - data_size; return (char *)bio - DM_IO_BIO_OFFSET - data_size;
} }
...@@ -538,9 +538,10 @@ static void dm_start_io_acct(struct dm_io *io, struct bio *clone) ...@@ -538,9 +538,10 @@ static void dm_start_io_acct(struct dm_io *io, struct bio *clone)
/* /*
* Ensure IO accounting is only ever started once. * Ensure IO accounting is only ever started once.
* Expect no possibility for race unless is_duplicate_bio. * Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO.
*/ */
if (!clone || likely(!clone_to_tio(clone)->is_duplicate_bio)) { if (!clone ||
likely(!dm_tio_flagged(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO))) {
if (WARN_ON_ONCE(dm_io_flagged(io, DM_IO_ACCOUNTED))) if (WARN_ON_ONCE(dm_io_flagged(io, DM_IO_ACCOUNTED)))
return; return;
dm_io_set_flag(io, DM_IO_ACCOUNTED); dm_io_set_flag(io, DM_IO_ACCOUNTED);
...@@ -548,7 +549,7 @@ static void dm_start_io_acct(struct dm_io *io, struct bio *clone) ...@@ -548,7 +549,7 @@ static void dm_start_io_acct(struct dm_io *io, struct bio *clone)
unsigned long flags; unsigned long flags;
if (dm_io_flagged(io, DM_IO_ACCOUNTED)) if (dm_io_flagged(io, DM_IO_ACCOUNTED))
return; return;
/* Can afford locking given is_duplicate_bio */ /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */
spin_lock_irqsave(&io->startio_lock, flags); spin_lock_irqsave(&io->startio_lock, flags);
dm_io_set_flag(io, DM_IO_ACCOUNTED); dm_io_set_flag(io, DM_IO_ACCOUNTED);
spin_unlock_irqrestore(&io->startio_lock, flags); spin_unlock_irqrestore(&io->startio_lock, flags);
...@@ -571,7 +572,8 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) ...@@ -571,7 +572,8 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
clone = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, &md->io_bs); clone = bio_alloc_clone(bio->bi_bdev, bio, GFP_NOIO, &md->io_bs);
tio = clone_to_tio(clone); tio = clone_to_tio(clone);
tio->inside_dm_io = true; tio->flags = 0;
dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO);
tio->io = NULL; tio->io = NULL;
io = container_of(tio, struct dm_io, tio); io = container_of(tio, struct dm_io, tio);
...@@ -618,14 +620,13 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, ...@@ -618,14 +620,13 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
clone->bi_opf &= ~REQ_DM_POLL_LIST; clone->bi_opf &= ~REQ_DM_POLL_LIST;
tio = clone_to_tio(clone); tio = clone_to_tio(clone);
tio->inside_dm_io = false; tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */
} }
tio->magic = DM_TIO_MAGIC; tio->magic = DM_TIO_MAGIC;
tio->io = ci->io; tio->io = ci->io;
tio->ti = ti; tio->ti = ti;
tio->target_bio_nr = target_bio_nr; tio->target_bio_nr = target_bio_nr;
tio->is_duplicate_bio = false;
tio->len_ptr = len; tio->len_ptr = len;
tio->old_sector = 0; tio->old_sector = 0;
...@@ -640,7 +641,7 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, ...@@ -640,7 +641,7 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
static void free_tio(struct bio *clone) static void free_tio(struct bio *clone)
{ {
if (clone_to_tio(clone)->inside_dm_io) if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO))
return; return;
bio_put(clone); bio_put(clone);
} }
...@@ -917,6 +918,12 @@ static void dm_io_complete(struct dm_io *io) ...@@ -917,6 +918,12 @@ static void dm_io_complete(struct dm_io *io)
} }
} }
static inline bool dm_tio_is_normal(struct dm_target_io *tio)
{
return (dm_tio_flagged(tio, DM_TIO_INSIDE_DM_IO) &&
!dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
}
/* /*
* Decrements the number of outstanding ios that a bio has been * Decrements the number of outstanding ios that a bio has been
* cloned into, completing the original io if necc. * cloned into, completing the original io if necc.
...@@ -1180,7 +1187,7 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) ...@@ -1180,7 +1187,7 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
struct dm_target_io *tio = clone_to_tio(bio); struct dm_target_io *tio = clone_to_tio(bio);
unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
BUG_ON(tio->is_duplicate_bio); BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
BUG_ON(op_is_zone_mgmt(bio_op(bio))); BUG_ON(op_is_zone_mgmt(bio_op(bio)));
BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND); BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND);
BUG_ON(bi_size > *tio->len_ptr); BUG_ON(bi_size > *tio->len_ptr);
...@@ -1362,13 +1369,13 @@ static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti, ...@@ -1362,13 +1369,13 @@ static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
break; break;
case 1: case 1:
clone = alloc_tio(ci, ti, 0, len, GFP_NOIO); clone = alloc_tio(ci, ti, 0, len, GFP_NOIO);
clone_to_tio(clone)->is_duplicate_bio = true; dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
__map_bio(clone); __map_bio(clone);
break; break;
default: default:
alloc_multiple_bios(&blist, ci, ti, num_bios, len); alloc_multiple_bios(&blist, ci, ti, num_bios, len);
while ((clone = bio_list_pop(&blist))) { while ((clone = bio_list_pop(&blist))) {
clone_to_tio(clone)->is_duplicate_bio = true; dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
__map_bio(clone); __map_bio(clone);
} }
break; break;
...@@ -1648,7 +1655,7 @@ static void dm_submit_bio(struct bio *bio) ...@@ -1648,7 +1655,7 @@ static void dm_submit_bio(struct bio *bio)
static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob, static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
unsigned int flags) unsigned int flags)
{ {
WARN_ON_ONCE(!io->tio.inside_dm_io); WARN_ON_ONCE(!dm_tio_is_normal(&io->tio));
/* don't poll if the mapped io is done */ /* don't poll if the mapped io is done */
if (atomic_read(&io->io_count) > 1) if (atomic_read(&io->io_count) > 1)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment