Commit 5ee54061 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block layer fixes from Jens Axboe:
 "A small collection of fixes for the current series. It contains:

   - A fix for a use-after-free of a request in blk-mq.  From Ming Lei

   - A fix for a blk-mq bug that could attempt to dereference a NULL rq
     if allocation failed

   - Two xen-blkfront small fixes

   - Cleanup of submit_bio_wait() type uses in the kernel, unifying
     that.  From Kent

   - A fix for 32-bit blkg_rwstat reading.  I apologize for this one
     looking mangled in the shortlog, it's entirely my fault for missing
     an empty line between the description and body of the text"

* 'for-linus' of git://git.kernel.dk/linux-block:
  blk-mq: fix use-after-free of request
  blk-mq: fix dereference of rq->mq_ctx if allocation fails
  block: xen-blkfront: Fix possible NULL ptr dereference
  xen-blkfront: Silence pfn maybe-uninitialized warning
  block: submit_bio_wait() conversions
  Update of blkg_stat and blkg_rwstat may happen in bh context
parents 29be6345 0d11e6ac
...@@ -435,9 +435,9 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat) ...@@ -435,9 +435,9 @@ static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
uint64_t v; uint64_t v;
do { do {
start = u64_stats_fetch_begin(&stat->syncp); start = u64_stats_fetch_begin_bh(&stat->syncp);
v = stat->cnt; v = stat->cnt;
} while (u64_stats_fetch_retry(&stat->syncp, start)); } while (u64_stats_fetch_retry_bh(&stat->syncp, start));
return v; return v;
} }
...@@ -508,9 +508,9 @@ static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) ...@@ -508,9 +508,9 @@ static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
struct blkg_rwstat tmp; struct blkg_rwstat tmp;
do { do {
start = u64_stats_fetch_begin(&rwstat->syncp); start = u64_stats_fetch_begin_bh(&rwstat->syncp);
tmp = *rwstat; tmp = *rwstat;
} while (u64_stats_fetch_retry(&rwstat->syncp, start)); } while (u64_stats_fetch_retry_bh(&rwstat->syncp, start));
return tmp; return tmp;
} }
......
...@@ -502,15 +502,6 @@ void blk_abort_flushes(struct request_queue *q) ...@@ -502,15 +502,6 @@ void blk_abort_flushes(struct request_queue *q)
} }
} }
static void bio_end_flush(struct bio *bio, int err)
{
if (err)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
if (bio->bi_private)
complete(bio->bi_private);
bio_put(bio);
}
/** /**
* blkdev_issue_flush - queue a flush * blkdev_issue_flush - queue a flush
* @bdev: blockdev to issue flush for * @bdev: blockdev to issue flush for
...@@ -526,7 +517,6 @@ static void bio_end_flush(struct bio *bio, int err) ...@@ -526,7 +517,6 @@ static void bio_end_flush(struct bio *bio, int err)
int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
sector_t *error_sector) sector_t *error_sector)
{ {
DECLARE_COMPLETION_ONSTACK(wait);
struct request_queue *q; struct request_queue *q;
struct bio *bio; struct bio *bio;
int ret = 0; int ret = 0;
...@@ -548,13 +538,9 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, ...@@ -548,13 +538,9 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
return -ENXIO; return -ENXIO;
bio = bio_alloc(gfp_mask, 0); bio = bio_alloc(gfp_mask, 0);
bio->bi_end_io = bio_end_flush;
bio->bi_bdev = bdev; bio->bi_bdev = bdev;
bio->bi_private = &wait;
bio_get(bio); ret = submit_bio_wait(WRITE_FLUSH, bio);
submit_bio(WRITE_FLUSH, bio);
wait_for_completion_io(&wait);
/* /*
* The driver must store the error location in ->bi_sector, if * The driver must store the error location in ->bi_sector, if
...@@ -564,9 +550,6 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, ...@@ -564,9 +550,6 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
if (error_sector) if (error_sector)
*error_sector = bio->bi_sector; *error_sector = bio->bi_sector;
if (!bio_flagged(bio, BIO_UPTODATE))
ret = -EIO;
bio_put(bio); bio_put(bio);
return ret; return ret;
} }
......
...@@ -202,10 +202,12 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, ...@@ -202,10 +202,12 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
if (rq) { if (rq) {
blk_mq_rq_ctx_init(q, ctx, rq, rw); blk_mq_rq_ctx_init(q, ctx, rq, rw);
break; break;
} else if (!(gfp & __GFP_WAIT)) }
break;
blk_mq_put_ctx(ctx); blk_mq_put_ctx(ctx);
if (!(gfp & __GFP_WAIT))
break;
__blk_mq_run_hw_queue(hctx); __blk_mq_run_hw_queue(hctx);
blk_mq_wait_for_tags(hctx->tags); blk_mq_wait_for_tags(hctx->tags);
} while (1); } while (1);
...@@ -222,6 +224,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, ...@@ -222,6 +224,7 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
return NULL; return NULL;
rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved); rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
if (rq)
blk_mq_put_ctx(rq->mq_ctx); blk_mq_put_ctx(rq->mq_ctx);
return rq; return rq;
} }
...@@ -235,6 +238,7 @@ struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, ...@@ -235,6 +238,7 @@ struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw,
return NULL; return NULL;
rq = blk_mq_alloc_request_pinned(q, rw, gfp, true); rq = blk_mq_alloc_request_pinned(q, rw, gfp, true);
if (rq)
blk_mq_put_ctx(rq->mq_ctx); blk_mq_put_ctx(rq->mq_ctx);
return rq; return rq;
} }
...@@ -308,12 +312,12 @@ void blk_mq_complete_request(struct request *rq, int error) ...@@ -308,12 +312,12 @@ void blk_mq_complete_request(struct request *rq, int error)
blk_account_io_completion(rq, bytes); blk_account_io_completion(rq, bytes);
blk_account_io_done(rq);
if (rq->end_io) if (rq->end_io)
rq->end_io(rq, error); rq->end_io(rq, error);
else else
blk_mq_free_request(rq); blk_mq_free_request(rq);
blk_account_io_done(rq);
} }
void __blk_mq_end_io(struct request *rq, int error) void __blk_mq_end_io(struct request *rq, int error)
......
...@@ -489,7 +489,7 @@ static int blkif_queue_request(struct request *req) ...@@ -489,7 +489,7 @@ static int blkif_queue_request(struct request *req)
if ((ring_req->operation == BLKIF_OP_INDIRECT) && if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
(i % SEGS_PER_INDIRECT_FRAME == 0)) { (i % SEGS_PER_INDIRECT_FRAME == 0)) {
unsigned long pfn; unsigned long uninitialized_var(pfn);
if (segments) if (segments)
kunmap_atomic(segments); kunmap_atomic(segments);
...@@ -2011,6 +2011,10 @@ static void blkif_release(struct gendisk *disk, fmode_t mode) ...@@ -2011,6 +2011,10 @@ static void blkif_release(struct gendisk *disk, fmode_t mode)
bdev = bdget_disk(disk, 0); bdev = bdget_disk(disk, 0);
if (!bdev) {
WARN(1, "Block device %s yanked out from us!\n", disk->disk_name);
goto out_mutex;
}
if (bdev->bd_openers) if (bdev->bd_openers)
goto out; goto out;
...@@ -2041,6 +2045,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode) ...@@ -2041,6 +2045,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode)
out: out:
bdput(bdev); bdput(bdev);
out_mutex:
mutex_unlock(&blkfront_mutex); mutex_unlock(&blkfront_mutex);
} }
......
...@@ -776,16 +776,10 @@ void md_super_wait(struct mddev *mddev) ...@@ -776,16 +776,10 @@ void md_super_wait(struct mddev *mddev)
finish_wait(&mddev->sb_wait, &wq); finish_wait(&mddev->sb_wait, &wq);
} }
static void bi_complete(struct bio *bio, int error)
{
complete((struct completion*)bio->bi_private);
}
int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int rw, bool metadata_op) struct page *page, int rw, bool metadata_op)
{ {
struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
struct completion event;
int ret; int ret;
rw |= REQ_SYNC; rw |= REQ_SYNC;
...@@ -801,11 +795,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, ...@@ -801,11 +795,7 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
else else
bio->bi_sector = sector + rdev->data_offset; bio->bi_sector = sector + rdev->data_offset;
bio_add_page(bio, page, size, 0); bio_add_page(bio, page, size, 0);
init_completion(&event); submit_bio_wait(rw, bio);
bio->bi_private = &event;
bio->bi_end_io = bi_complete;
submit_bio(rw, bio);
wait_for_completion(&event);
ret = test_bit(BIO_UPTODATE, &bio->bi_flags); ret = test_bit(BIO_UPTODATE, &bio->bi_flags);
bio_put(bio); bio_put(bio);
......
...@@ -333,7 +333,6 @@ static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx); ...@@ -333,7 +333,6 @@ static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx);
static int btrfsic_read_block(struct btrfsic_state *state, static int btrfsic_read_block(struct btrfsic_state *state,
struct btrfsic_block_data_ctx *block_ctx); struct btrfsic_block_data_ctx *block_ctx);
static void btrfsic_dump_database(struct btrfsic_state *state); static void btrfsic_dump_database(struct btrfsic_state *state);
static void btrfsic_complete_bio_end_io(struct bio *bio, int err);
static int btrfsic_test_for_metadata(struct btrfsic_state *state, static int btrfsic_test_for_metadata(struct btrfsic_state *state,
char **datav, unsigned int num_pages); char **datav, unsigned int num_pages);
static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
...@@ -1687,7 +1686,6 @@ static int btrfsic_read_block(struct btrfsic_state *state, ...@@ -1687,7 +1686,6 @@ static int btrfsic_read_block(struct btrfsic_state *state,
for (i = 0; i < num_pages;) { for (i = 0; i < num_pages;) {
struct bio *bio; struct bio *bio;
unsigned int j; unsigned int j;
DECLARE_COMPLETION_ONSTACK(complete);
bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i); bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i);
if (!bio) { if (!bio) {
...@@ -1698,8 +1696,6 @@ static int btrfsic_read_block(struct btrfsic_state *state, ...@@ -1698,8 +1696,6 @@ static int btrfsic_read_block(struct btrfsic_state *state,
} }
bio->bi_bdev = block_ctx->dev->bdev; bio->bi_bdev = block_ctx->dev->bdev;
bio->bi_sector = dev_bytenr >> 9; bio->bi_sector = dev_bytenr >> 9;
bio->bi_end_io = btrfsic_complete_bio_end_io;
bio->bi_private = &complete;
for (j = i; j < num_pages; j++) { for (j = i; j < num_pages; j++) {
ret = bio_add_page(bio, block_ctx->pagev[j], ret = bio_add_page(bio, block_ctx->pagev[j],
...@@ -1712,12 +1708,7 @@ static int btrfsic_read_block(struct btrfsic_state *state, ...@@ -1712,12 +1708,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
"btrfsic: error, failed to add a single page!\n"); "btrfsic: error, failed to add a single page!\n");
return -1; return -1;
} }
submit_bio(READ, bio); if (submit_bio_wait(READ, bio)) {
/* this will also unplug the queue */
wait_for_completion(&complete);
if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
printk(KERN_INFO printk(KERN_INFO
"btrfsic: read error at logical %llu dev %s!\n", "btrfsic: read error at logical %llu dev %s!\n",
block_ctx->start, block_ctx->dev->name); block_ctx->start, block_ctx->dev->name);
...@@ -1740,11 +1731,6 @@ static int btrfsic_read_block(struct btrfsic_state *state, ...@@ -1740,11 +1731,6 @@ static int btrfsic_read_block(struct btrfsic_state *state,
return block_ctx->len; return block_ctx->len;
} }
static void btrfsic_complete_bio_end_io(struct bio *bio, int err)
{
complete((struct completion *)bio->bi_private);
}
static void btrfsic_dump_database(struct btrfsic_state *state) static void btrfsic_dump_database(struct btrfsic_state *state)
{ {
struct list_head *elem_all; struct list_head *elem_all;
...@@ -3008,14 +2994,12 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh) ...@@ -3008,14 +2994,12 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh)
return submit_bh(rw, bh); return submit_bh(rw, bh);
} }
void btrfsic_submit_bio(int rw, struct bio *bio) static void __btrfsic_submit_bio(int rw, struct bio *bio)
{ {
struct btrfsic_dev_state *dev_state; struct btrfsic_dev_state *dev_state;
if (!btrfsic_is_initialized) { if (!btrfsic_is_initialized)
submit_bio(rw, bio);
return; return;
}
mutex_lock(&btrfsic_mutex); mutex_lock(&btrfsic_mutex);
/* since btrfsic_submit_bio() is also called before /* since btrfsic_submit_bio() is also called before
...@@ -3106,10 +3090,20 @@ void btrfsic_submit_bio(int rw, struct bio *bio) ...@@ -3106,10 +3090,20 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
} }
leave: leave:
mutex_unlock(&btrfsic_mutex); mutex_unlock(&btrfsic_mutex);
}
void btrfsic_submit_bio(int rw, struct bio *bio)
{
__btrfsic_submit_bio(rw, bio);
submit_bio(rw, bio); submit_bio(rw, bio);
} }
int btrfsic_submit_bio_wait(int rw, struct bio *bio)
{
__btrfsic_submit_bio(rw, bio);
return submit_bio_wait(rw, bio);
}
int btrfsic_mount(struct btrfs_root *root, int btrfsic_mount(struct btrfs_root *root,
struct btrfs_fs_devices *fs_devices, struct btrfs_fs_devices *fs_devices,
int including_extent_data, u32 print_mask) int including_extent_data, u32 print_mask)
......
...@@ -22,9 +22,11 @@ ...@@ -22,9 +22,11 @@
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
int btrfsic_submit_bh(int rw, struct buffer_head *bh); int btrfsic_submit_bh(int rw, struct buffer_head *bh);
void btrfsic_submit_bio(int rw, struct bio *bio); void btrfsic_submit_bio(int rw, struct bio *bio);
int btrfsic_submit_bio_wait(int rw, struct bio *bio);
#else #else
#define btrfsic_submit_bh submit_bh #define btrfsic_submit_bh submit_bh
#define btrfsic_submit_bio submit_bio #define btrfsic_submit_bio submit_bio
#define btrfsic_submit_bio_wait submit_bio_wait
#endif #endif
int btrfsic_mount(struct btrfs_root *root, int btrfsic_mount(struct btrfs_root *root,
......
...@@ -1952,11 +1952,6 @@ static int free_io_failure(struct inode *inode, struct io_failure_record *rec, ...@@ -1952,11 +1952,6 @@ static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
return err; return err;
} }
static void repair_io_failure_callback(struct bio *bio, int err)
{
complete(bio->bi_private);
}
/* /*
* this bypasses the standard btrfs submit functions deliberately, as * this bypasses the standard btrfs submit functions deliberately, as
* the standard behavior is to write all copies in a raid setup. here we only * the standard behavior is to write all copies in a raid setup. here we only
...@@ -1973,7 +1968,6 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start, ...@@ -1973,7 +1968,6 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
{ {
struct bio *bio; struct bio *bio;
struct btrfs_device *dev; struct btrfs_device *dev;
DECLARE_COMPLETION_ONSTACK(compl);
u64 map_length = 0; u64 map_length = 0;
u64 sector; u64 sector;
struct btrfs_bio *bbio = NULL; struct btrfs_bio *bbio = NULL;
...@@ -1990,8 +1984,6 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start, ...@@ -1990,8 +1984,6 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
bio = btrfs_io_bio_alloc(GFP_NOFS, 1); bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
if (!bio) if (!bio)
return -EIO; return -EIO;
bio->bi_private = &compl;
bio->bi_end_io = repair_io_failure_callback;
bio->bi_size = 0; bio->bi_size = 0;
map_length = length; map_length = length;
...@@ -2012,10 +2004,8 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start, ...@@ -2012,10 +2004,8 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
} }
bio->bi_bdev = dev->bdev; bio->bi_bdev = dev->bdev;
bio_add_page(bio, page, length, start - page_offset(page)); bio_add_page(bio, page, length, start - page_offset(page));
btrfsic_submit_bio(WRITE_SYNC, bio);
wait_for_completion(&compl);
if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
/* try to remap that extent elsewhere? */ /* try to remap that extent elsewhere? */
bio_put(bio); bio_put(bio);
btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
......
...@@ -208,7 +208,6 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info, ...@@ -208,7 +208,6 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
int is_metadata, int have_csum, int is_metadata, int have_csum,
const u8 *csum, u64 generation, const u8 *csum, u64 generation,
u16 csum_size); u16 csum_size);
static void scrub_complete_bio_end_io(struct bio *bio, int err);
static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
struct scrub_block *sblock_good, struct scrub_block *sblock_good,
int force_write); int force_write);
...@@ -1294,7 +1293,6 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info, ...@@ -1294,7 +1293,6 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
for (page_num = 0; page_num < sblock->page_count; page_num++) { for (page_num = 0; page_num < sblock->page_count; page_num++) {
struct bio *bio; struct bio *bio;
struct scrub_page *page = sblock->pagev[page_num]; struct scrub_page *page = sblock->pagev[page_num];
DECLARE_COMPLETION_ONSTACK(complete);
if (page->dev->bdev == NULL) { if (page->dev->bdev == NULL) {
page->io_error = 1; page->io_error = 1;
...@@ -1311,18 +1309,11 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info, ...@@ -1311,18 +1309,11 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
} }
bio->bi_bdev = page->dev->bdev; bio->bi_bdev = page->dev->bdev;
bio->bi_sector = page->physical >> 9; bio->bi_sector = page->physical >> 9;
bio->bi_end_io = scrub_complete_bio_end_io;
bio->bi_private = &complete;
bio_add_page(bio, page->page, PAGE_SIZE, 0); bio_add_page(bio, page->page, PAGE_SIZE, 0);
btrfsic_submit_bio(READ, bio); if (btrfsic_submit_bio_wait(READ, bio))
/* this will also unplug the queue */
wait_for_completion(&complete);
page->io_error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
sblock->no_io_error_seen = 0; sblock->no_io_error_seen = 0;
bio_put(bio); bio_put(bio);
} }
...@@ -1391,11 +1382,6 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info, ...@@ -1391,11 +1382,6 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
sblock->checksum_error = 1; sblock->checksum_error = 1;
} }
static void scrub_complete_bio_end_io(struct bio *bio, int err)
{
complete((struct completion *)bio->bi_private);
}
static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
struct scrub_block *sblock_good, struct scrub_block *sblock_good,
int force_write) int force_write)
...@@ -1430,7 +1416,6 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, ...@@ -1430,7 +1416,6 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
sblock_bad->checksum_error || page_bad->io_error) { sblock_bad->checksum_error || page_bad->io_error) {
struct bio *bio; struct bio *bio;
int ret; int ret;
DECLARE_COMPLETION_ONSTACK(complete);
if (!page_bad->dev->bdev) { if (!page_bad->dev->bdev) {
printk_ratelimited(KERN_WARNING printk_ratelimited(KERN_WARNING
...@@ -1443,19 +1428,14 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, ...@@ -1443,19 +1428,14 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
return -EIO; return -EIO;
bio->bi_bdev = page_bad->dev->bdev; bio->bi_bdev = page_bad->dev->bdev;
bio->bi_sector = page_bad->physical >> 9; bio->bi_sector = page_bad->physical >> 9;
bio->bi_end_io = scrub_complete_bio_end_io;
bio->bi_private = &complete;
ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
if (PAGE_SIZE != ret) { if (PAGE_SIZE != ret) {
bio_put(bio); bio_put(bio);
return -EIO; return -EIO;
} }
btrfsic_submit_bio(WRITE, bio);
/* this will also unplug the queue */ if (btrfsic_submit_bio_wait(WRITE, bio)) {
wait_for_completion(&complete);
if (!bio_flagged(bio, BIO_UPTODATE)) {
btrfs_dev_stat_inc_and_print(page_bad->dev, btrfs_dev_stat_inc_and_print(page_bad->dev,
BTRFS_DEV_STAT_WRITE_ERRS); BTRFS_DEV_STAT_WRITE_ERRS);
btrfs_dev_replace_stats_inc( btrfs_dev_replace_stats_inc(
...@@ -3375,7 +3355,6 @@ static int write_page_nocow(struct scrub_ctx *sctx, ...@@ -3375,7 +3355,6 @@ static int write_page_nocow(struct scrub_ctx *sctx,
struct bio *bio; struct bio *bio;
struct btrfs_device *dev; struct btrfs_device *dev;
int ret; int ret;
DECLARE_COMPLETION_ONSTACK(compl);
dev = sctx->wr_ctx.tgtdev; dev = sctx->wr_ctx.tgtdev;
if (!dev) if (!dev)
...@@ -3392,8 +3371,6 @@ static int write_page_nocow(struct scrub_ctx *sctx, ...@@ -3392,8 +3371,6 @@ static int write_page_nocow(struct scrub_ctx *sctx,
spin_unlock(&sctx->stat_lock); spin_unlock(&sctx->stat_lock);
return -ENOMEM; return -ENOMEM;
} }
bio->bi_private = &compl;
bio->bi_end_io = scrub_complete_bio_end_io;
bio->bi_size = 0; bio->bi_size = 0;
bio->bi_sector = physical_for_dev_replace >> 9; bio->bi_sector = physical_for_dev_replace >> 9;
bio->bi_bdev = dev->bdev; bio->bi_bdev = dev->bdev;
...@@ -3404,10 +3381,8 @@ static int write_page_nocow(struct scrub_ctx *sctx, ...@@ -3404,10 +3381,8 @@ static int write_page_nocow(struct scrub_ctx *sctx,
btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
return -EIO; return -EIO;
} }
btrfsic_submit_bio(WRITE_SYNC, bio);
wait_for_completion(&compl);
if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
goto leave_with_eio; goto leave_with_eio;
bio_put(bio); bio_put(bio);
......
...@@ -24,13 +24,6 @@ struct hfsplus_wd { ...@@ -24,13 +24,6 @@ struct hfsplus_wd {
u16 embed_count; u16 embed_count;
}; };
static void hfsplus_end_io_sync(struct bio *bio, int err)
{
if (err)
clear_bit(BIO_UPTODATE, &bio->bi_flags);
complete(bio->bi_private);
}
/* /*
* hfsplus_submit_bio - Perfrom block I/O * hfsplus_submit_bio - Perfrom block I/O
* @sb: super block of volume for I/O * @sb: super block of volume for I/O
...@@ -53,7 +46,6 @@ static void hfsplus_end_io_sync(struct bio *bio, int err) ...@@ -53,7 +46,6 @@ static void hfsplus_end_io_sync(struct bio *bio, int err)
int hfsplus_submit_bio(struct super_block *sb, sector_t sector, int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
void *buf, void **data, int rw) void *buf, void **data, int rw)
{ {
DECLARE_COMPLETION_ONSTACK(wait);
struct bio *bio; struct bio *bio;
int ret = 0; int ret = 0;
u64 io_size; u64 io_size;
...@@ -73,8 +65,6 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector, ...@@ -73,8 +65,6 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc(GFP_NOIO, 1);
bio->bi_sector = sector; bio->bi_sector = sector;
bio->bi_bdev = sb->s_bdev; bio->bi_bdev = sb->s_bdev;
bio->bi_end_io = hfsplus_end_io_sync;
bio->bi_private = &wait;
if (!(rw & WRITE) && data) if (!(rw & WRITE) && data)
*data = (u8 *)buf + offset; *data = (u8 *)buf + offset;
...@@ -93,12 +83,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector, ...@@ -93,12 +83,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
buf = (u8 *)buf + len; buf = (u8 *)buf + len;
} }
submit_bio(rw, bio); ret = submit_bio_wait(rw, bio);
wait_for_completion(&wait);
if (!bio_flagged(bio, BIO_UPTODATE))
ret = -EIO;
out: out:
bio_put(bio); bio_put(bio);
return ret < 0 ? ret : 0; return ret < 0 ? ret : 0;
......
...@@ -14,16 +14,10 @@ ...@@ -14,16 +14,10 @@
#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1)) #define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
static void request_complete(struct bio *bio, int err)
{
complete((struct completion *)bio->bi_private);
}
static int sync_request(struct page *page, struct block_device *bdev, int rw) static int sync_request(struct page *page, struct block_device *bdev, int rw)
{ {
struct bio bio; struct bio bio;
struct bio_vec bio_vec; struct bio_vec bio_vec;
struct completion complete;
bio_init(&bio); bio_init(&bio);
bio.bi_max_vecs = 1; bio.bi_max_vecs = 1;
...@@ -35,13 +29,8 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw) ...@@ -35,13 +29,8 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
bio.bi_size = PAGE_SIZE; bio.bi_size = PAGE_SIZE;
bio.bi_bdev = bdev; bio.bi_bdev = bdev;
bio.bi_sector = page->index * (PAGE_SIZE >> 9); bio.bi_sector = page->index * (PAGE_SIZE >> 9);
init_completion(&complete);
bio.bi_private = &complete;
bio.bi_end_io = request_complete;
submit_bio(rw, &bio); return submit_bio_wait(rw, &bio);
wait_for_completion(&complete);
return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
} }
static int bdev_readpage(void *_sb, struct page *page) static int bdev_readpage(void *_sb, struct page *page)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment