Commit 5a6c35f9 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: remove direct_make_request

Now that submit_bio_noacct has a decent blk-mq fast path there is no
more need for this bypass.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ff93ea0c
...@@ -1211,34 +1211,6 @@ blk_qc_t submit_bio_noacct(struct bio *bio) ...@@ -1211,34 +1211,6 @@ blk_qc_t submit_bio_noacct(struct bio *bio)
} }
EXPORT_SYMBOL(submit_bio_noacct); EXPORT_SYMBOL(submit_bio_noacct);
/**
* direct_make_request - hand a buffer directly to its device driver for I/O
* @bio: The bio describing the location in memory and on the device.
*
* This function behaves like submit_bio_noacct(), but does not protect
* against recursion. Must only be used if the called driver is known
* to be blk-mq based.
*/
blk_qc_t direct_make_request(struct bio *bio)
{
struct gendisk *disk = bio->bi_disk;
if (WARN_ON_ONCE(!disk->queue->mq_ops)) {
bio_io_error(bio);
return BLK_QC_T_NONE;
}
if (!submit_bio_checks(bio))
return BLK_QC_T_NONE;
if (unlikely(bio_queue_enter(bio)))
return BLK_QC_T_NONE;
if (!blk_crypto_bio_prep(&bio)) {
blk_queue_exit(disk->queue);
return BLK_QC_T_NONE;
}
return blk_mq_submit_bio(bio);
}
EXPORT_SYMBOL_GPL(direct_make_request);
/** /**
* submit_bio - submit a bio to the block device layer for I/O * submit_bio - submit a bio to the block device layer for I/O
* @bio: The &struct bio which describes the I/O * @bio: The &struct bio which describes the I/O
......
...@@ -1302,9 +1302,6 @@ static blk_qc_t __map_bio(struct dm_target_io *tio) ...@@ -1302,9 +1302,6 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
/* the bio has been remapped so dispatch it */ /* the bio has been remapped so dispatch it */
trace_block_bio_remap(clone->bi_disk->queue, clone, trace_block_bio_remap(clone->bi_disk->queue, clone,
bio_dev(io->orig_bio), sector); bio_dev(io->orig_bio), sector);
if (md->type == DM_TYPE_NVME_BIO_BASED)
ret = direct_make_request(clone);
else
ret = submit_bio_noacct(clone); ret = submit_bio_noacct(clone);
break; break;
case DM_MAPIO_KILL: case DM_MAPIO_KILL:
......
...@@ -314,7 +314,7 @@ blk_qc_t nvme_ns_head_submit_bio(struct bio *bio) ...@@ -314,7 +314,7 @@ blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
trace_block_bio_remap(bio->bi_disk->queue, bio, trace_block_bio_remap(bio->bi_disk->queue, bio,
disk_devt(ns->head->disk), disk_devt(ns->head->disk),
bio->bi_iter.bi_sector); bio->bi_iter.bi_sector);
ret = direct_make_request(bio); ret = submit_bio_noacct(bio);
} else if (nvme_available_path(head)) { } else if (nvme_available_path(head)) {
dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n"); dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
......
...@@ -853,7 +853,6 @@ static inline void rq_flush_dcache_pages(struct request *rq) ...@@ -853,7 +853,6 @@ static inline void rq_flush_dcache_pages(struct request *rq)
extern int blk_register_queue(struct gendisk *disk); extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk);
blk_qc_t submit_bio_noacct(struct bio *bio); blk_qc_t submit_bio_noacct(struct bio *bio);
extern blk_qc_t direct_make_request(struct bio *bio);
extern void blk_rq_init(struct request_queue *q, struct request *rq); extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_put_request(struct request *); extern void blk_put_request(struct request *);
extern struct request *blk_get_request(struct request_queue *, unsigned int op, extern struct request *blk_get_request(struct request_queue *, unsigned int op,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment