Commit 9dfa5283 authored by NeilBrown's avatar NeilBrown Committed by Jens Axboe

Merge blk_recount_segments into blk_recalc_rq_segments

blk_recalc_rq_segments calls blk_recount_segments on each bio,
then does some extra calculations to handle segments that overlap
two bios.

If we merge the code from blk_recount_segments into
blk_recalc_rq_segments, we can process the whole request one bio_vec
at a time, and not need the messy cross-bio calculations.

Then blk_recount_segments can be implemented by calling
blk_recalc_rq_segments, passing it a simple on-stack request which
stores just the bio.
Signed-off-by: default avatarNeil Brown <neilb@suse.de>

diff .prev/block/ll_rw_blk.c ./block/ll_rw_blk.c
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent bbf25010
......@@ -42,6 +42,7 @@ static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io);
static void init_request_from_bio(struct request *req, struct bio *bio);
static int __make_request(struct request_queue *q, struct bio *bio);
static struct io_context *current_io_context(gfp_t gfp_flags, int node);
static void blk_recalc_rq_segments(struct request *rq);
/*
* For the allocated request tables
......@@ -1220,15 +1221,41 @@ EXPORT_SYMBOL(blk_dump_rq_flags);
void blk_recount_segments(struct request_queue *q, struct bio *bio)
{
struct request rq;
struct bio *nxt = bio->bi_next;
rq.q = q;
rq.bio = rq.biotail = bio;
bio->bi_next = NULL;
blk_recalc_rq_segments(&rq);
bio->bi_next = nxt;
bio->bi_phys_segments = rq.nr_phys_segments;
bio->bi_hw_segments = rq.nr_hw_segments;
bio->bi_flags |= (1 << BIO_SEG_VALID);
}
EXPORT_SYMBOL(blk_recount_segments);
static void blk_recalc_rq_segments(struct request *rq)
{
int nr_phys_segs;
int nr_hw_segs;
unsigned int phys_size;
unsigned int hw_size;
struct bio_vec *bv, *bvprv = NULL;
int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;
int seg_size;
int hw_seg_size;
int cluster;
struct bio *bio;
int i;
int high, highprv = 1;
struct request_queue *q = rq->q;
if (unlikely(!bio->bi_io_vec))
if (!rq->bio)
return;
cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0;
hw_seg_size = seg_size = 0;
phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
rq_for_each_bio(bio, rq)
bio_for_each_segment(bv, bio, i) {
/*
* the trick here is making sure that a high page is never
......@@ -1255,12 +1282,13 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
}
new_segment:
if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
!BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) {
!BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
hw_seg_size += bv->bv_len;
} else {
else {
new_hw_segment:
if (hw_seg_size > bio->bi_hw_front_size)
bio->bi_hw_front_size = hw_seg_size;
if (nr_hw_segs == 1 &&
hw_seg_size > rq->bio->bi_hw_front_size)
rq->bio->bi_hw_front_size = hw_seg_size;
hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
nr_hw_segs++;
}
......@@ -1270,15 +1298,15 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
seg_size = bv->bv_len;
highprv = high;
}
if (hw_seg_size > bio->bi_hw_back_size)
bio->bi_hw_back_size = hw_seg_size;
if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size)
bio->bi_hw_front_size = hw_seg_size;
bio->bi_phys_segments = nr_phys_segs;
bio->bi_hw_segments = nr_hw_segs;
bio->bi_flags |= (1 << BIO_SEG_VALID);
if (nr_hw_segs == 1 &&
hw_seg_size > rq->bio->bi_hw_front_size)
rq->bio->bi_hw_front_size = hw_seg_size;
if (hw_seg_size > rq->biotail->bi_hw_back_size)
rq->biotail->bi_hw_back_size = hw_seg_size;
rq->nr_phys_segments = nr_phys_segs;
rq->nr_hw_segments = nr_hw_segs;
}
EXPORT_SYMBOL(blk_recount_segments);
static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt)
......@@ -3329,48 +3357,6 @@ void submit_bio(int rw, struct bio *bio)
EXPORT_SYMBOL(submit_bio);
static void blk_recalc_rq_segments(struct request *rq)
{
struct bio *bio, *prevbio = NULL;
int nr_phys_segs, nr_hw_segs;
unsigned int phys_size, hw_size;
struct request_queue *q = rq->q;
if (!rq->bio)
return;
phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
rq_for_each_bio(bio, rq) {
/* Force bio hw/phys segs to be recalculated. */
bio->bi_flags &= ~(1 << BIO_SEG_VALID);
nr_phys_segs += bio_phys_segments(q, bio);
nr_hw_segs += bio_hw_segments(q, bio);
if (prevbio) {
int pseg = phys_size + prevbio->bi_size + bio->bi_size;
int hseg = hw_size + prevbio->bi_size + bio->bi_size;
if (blk_phys_contig_segment(q, prevbio, bio) &&
pseg <= q->max_segment_size) {
nr_phys_segs--;
phys_size += prevbio->bi_size + bio->bi_size;
} else
phys_size = 0;
if (blk_hw_contig_segment(q, prevbio, bio) &&
hseg <= q->max_segment_size) {
nr_hw_segs--;
hw_size += prevbio->bi_size + bio->bi_size;
} else
hw_size = 0;
}
prevbio = bio;
}
rq->nr_phys_segments = nr_phys_segs;
rq->nr_hw_segments = nr_hw_segs;
}
static void blk_recalc_rq_sectors(struct request *rq, int nsect)
{
if (blk_fs_request(rq)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment