Commit abd45c15 authored by Jens Axboe's avatar Jens Axboe

block: handle fast path of bio splitting inline

The fast path is no splitting needed. Separate the handling into a
check part we can inline, and an out-of-line handling path if we do
need to split.
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 09ce8744
...@@ -324,6 +324,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, ...@@ -324,6 +324,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
/** /**
* __blk_queue_split - split a bio and submit the second half * __blk_queue_split - split a bio and submit the second half
* @q: [in] request_queue new bio is being queued at
* @bio: [in, out] bio to be split * @bio: [in, out] bio to be split
* @nr_segs: [out] number of segments in the first bio * @nr_segs: [out] number of segments in the first bio
* *
...@@ -334,9 +335,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, ...@@ -334,9 +335,9 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
* of the caller to ensure that q->bio_split is only released after processing * of the caller to ensure that q->bio_split is only released after processing
* of the split bio has finished. * of the split bio has finished.
*/ */
void __blk_queue_split(struct bio **bio, unsigned int *nr_segs) void __blk_queue_split(struct request_queue *q, struct bio **bio,
unsigned int *nr_segs)
{ {
struct request_queue *q = (*bio)->bi_bdev->bd_disk->queue;
struct bio *split = NULL; struct bio *split = NULL;
switch (bio_op(*bio)) { switch (bio_op(*bio)) {
...@@ -353,21 +354,6 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs) ...@@ -353,21 +354,6 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
nr_segs); nr_segs);
break; break;
default: default:
/*
* All drivers must accept single-segments bios that are <=
* PAGE_SIZE. This is a quick and dirty check that relies on
* the fact that bi_io_vec[0] is always valid if a bio has data.
* The check might lead to occasional false negatives when bios
* are cloned, but compared to the performance impact of cloned
* bios themselves the loop below doesn't matter anyway.
*/
if (!q->limits.chunk_sectors &&
(*bio)->bi_vcnt == 1 &&
((*bio)->bi_io_vec[0].bv_len +
(*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) {
*nr_segs = 1;
break;
}
split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs); split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
break; break;
} }
...@@ -397,9 +383,11 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs) ...@@ -397,9 +383,11 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
*/ */
void blk_queue_split(struct bio **bio) void blk_queue_split(struct bio **bio)
{ {
struct request_queue *q = (*bio)->bi_bdev->bd_disk->queue;
unsigned int nr_segs; unsigned int nr_segs;
__blk_queue_split(bio, &nr_segs); if (blk_may_split(q, *bio))
__blk_queue_split(q, bio, &nr_segs);
} }
EXPORT_SYMBOL(blk_queue_split); EXPORT_SYMBOL(blk_queue_split);
......
...@@ -2259,11 +2259,12 @@ void blk_mq_submit_bio(struct bio *bio) ...@@ -2259,11 +2259,12 @@ void blk_mq_submit_bio(struct bio *bio)
struct request *rq; struct request *rq;
struct blk_plug *plug; struct blk_plug *plug;
struct request *same_queue_rq = NULL; struct request *same_queue_rq = NULL;
unsigned int nr_segs; unsigned int nr_segs = 1;
blk_status_t ret; blk_status_t ret;
blk_queue_bounce(q, &bio); blk_queue_bounce(q, &bio);
__blk_queue_split(&bio, &nr_segs); if (blk_may_split(q, bio))
__blk_queue_split(q, &bio, &nr_segs);
if (!bio_integrity_prep(bio)) if (!bio_integrity_prep(bio))
goto queue_exit; goto queue_exit;
......
...@@ -266,7 +266,32 @@ ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); ...@@ -266,7 +266,32 @@ ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
ssize_t part_timeout_store(struct device *, struct device_attribute *, ssize_t part_timeout_store(struct device *, struct device_attribute *,
const char *, size_t); const char *, size_t);
void __blk_queue_split(struct bio **bio, unsigned int *nr_segs); static inline bool blk_may_split(struct request_queue *q, struct bio *bio)
{
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE_SAME:
return true; /* non-trivial splitting decisions */
default:
break;
}
/*
* All drivers must accept single-segments bios that are <= PAGE_SIZE.
* This is a quick and dirty check that relies on the fact that
* bi_io_vec[0] is always valid if a bio has data. The check might
* lead to occasional false negatives when bios are cloned, but compared
* to the performance impact of cloned bios themselves the loop below
* doesn't matter anyway.
*/
return q->limits.chunk_sectors || bio->bi_vcnt != 1 ||
bio->bi_io_vec->bv_len + bio->bi_io_vec->bv_offset > PAGE_SIZE;
}
void __blk_queue_split(struct request_queue *q, struct bio **bio,
unsigned int *nr_segs);
int ll_back_merge_fn(struct request *req, struct bio *bio, int ll_back_merge_fn(struct request *req, struct bio *bio,
unsigned int nr_segs); unsigned int nr_segs);
bool blk_attempt_req_merge(struct request_queue *q, struct request *rq, bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment