Commit eded341c authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: don't decrement nr_phys_segments for physically contigous segments

Currently ll_merge_requests_fn, unlike all other merge functions,
reduces nr_phys_segments by one if the last segment of the previous,
and the first segment of the next segement are contigous.  While this
seems like a nice solution to avoid building smaller than possible
requests it causes a mismatch between the segments actually present
in the request and those iterated over by the bvec iterators, including
__rq_for_each_bio.  This can for example mistrigger the single segment
optimization in the nvme-pci driver, and might lead to mismatching
nr_phys_segments number when recalculating the number of request
when inserting a cloned request.

We could possibly work around this by making the bvec iterators take
the front and back segment size into account, but that would require
moving them from the bio to the bio_iter and spreading this mess
over all users of bvecs.  Or we could simply remove this optimization
under the assumption that most users already build good enough bvecs,
and that the bio merge patch never cared about this optimization
either.  The latter is what this patch does.

dff824b2 ("nvme-pci: optimize mapping of small single segment requests").
Reviewed-by: default avatarMing Lei <ming.lei@redhat.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent a0934fd2
...@@ -358,7 +358,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, ...@@ -358,7 +358,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
unsigned front_seg_size; unsigned front_seg_size;
struct bio *fbio, *bbio; struct bio *fbio, *bbio;
struct bvec_iter iter; struct bvec_iter iter;
bool new_bio = false;
if (!bio) if (!bio)
return 0; return 0;
...@@ -379,31 +378,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, ...@@ -379,31 +378,12 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
nr_phys_segs = 0; nr_phys_segs = 0;
for_each_bio(bio) { for_each_bio(bio) {
bio_for_each_bvec(bv, bio, iter) { bio_for_each_bvec(bv, bio, iter) {
if (new_bio) {
if (seg_size + bv.bv_len
> queue_max_segment_size(q))
goto new_segment;
if (!biovec_phys_mergeable(q, &bvprv, &bv))
goto new_segment;
seg_size += bv.bv_len;
if (nr_phys_segs == 1 && seg_size >
front_seg_size)
front_seg_size = seg_size;
continue;
}
new_segment:
bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size, bvec_split_segs(q, &bv, &nr_phys_segs, &seg_size,
&front_seg_size, NULL, UINT_MAX); &front_seg_size, NULL, UINT_MAX);
new_bio = false;
} }
bbio = bio; bbio = bio;
if (likely(bio->bi_iter.bi_size)) { if (likely(bio->bi_iter.bi_size))
bvprv = bv; bvprv = bv;
new_bio = true;
}
} }
fbio->bi_seg_front_size = front_seg_size; fbio->bi_seg_front_size = front_seg_size;
...@@ -725,7 +705,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, ...@@ -725,7 +705,6 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
req->bio->bi_seg_front_size = seg_size; req->bio->bi_seg_front_size = seg_size;
if (next->nr_phys_segments == 1) if (next->nr_phys_segments == 1)
next->biotail->bi_seg_back_size = seg_size; next->biotail->bi_seg_back_size = seg_size;
total_phys_segments--;
} }
if (total_phys_segments > queue_max_segments(q)) if (total_phys_segments > queue_max_segments(q))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment