Commit 89de1504 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: provide a blk_rq_map_sg variant that returns the last element

To be able to move some of the special purpose hacks in blk_rq_map_sg
into the callers we need a variant that returns the last mapped
S/G list element to the caller.  Add that variant as __blk_rq_map_sg
and make blk_rq_map_sg a trivial inline wrapper around it.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent e64a0e16
...@@ -519,24 +519,23 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, ...@@ -519,24 +519,23 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
* map a request to scatterlist, return number of sg entries setup. Caller * map a request to scatterlist, return number of sg entries setup. Caller
* must make sure sg can hold rq->nr_phys_segments entries * must make sure sg can hold rq->nr_phys_segments entries
*/ */
int blk_rq_map_sg(struct request_queue *q, struct request *rq, int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
struct scatterlist *sglist) struct scatterlist *sglist, struct scatterlist **last_sg)
{ {
struct scatterlist *sg = NULL;
int nsegs = 0; int nsegs = 0;
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, &sg); nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME) else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, &sg); nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, last_sg);
else if (rq->bio) else if (rq->bio)
nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
if (blk_rq_bytes(rq) && (blk_rq_bytes(rq) & q->dma_pad_mask)) { if (blk_rq_bytes(rq) && (blk_rq_bytes(rq) & q->dma_pad_mask)) {
unsigned int pad_len = unsigned int pad_len =
(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
sg->length += pad_len; (*last_sg)->length += pad_len;
rq->extra_len += pad_len; rq->extra_len += pad_len;
} }
...@@ -544,9 +543,9 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, ...@@ -544,9 +543,9 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
if (op_is_write(req_op(rq))) if (op_is_write(req_op(rq)))
memset(q->dma_drain_buffer, 0, q->dma_drain_size); memset(q->dma_drain_buffer, 0, q->dma_drain_size);
sg_unmark_end(sg); sg_unmark_end(*last_sg);
sg = sg_next(sg); *last_sg = sg_next(*last_sg);
sg_set_page(sg, virt_to_page(q->dma_drain_buffer), sg_set_page(*last_sg, virt_to_page(q->dma_drain_buffer),
q->dma_drain_size, q->dma_drain_size,
((unsigned long)q->dma_drain_buffer) & ((unsigned long)q->dma_drain_buffer) &
(PAGE_SIZE - 1)); (PAGE_SIZE - 1));
...@@ -554,8 +553,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, ...@@ -554,8 +553,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
rq->extra_len += q->dma_drain_size; rq->extra_len += q->dma_drain_size;
} }
if (sg) if (*last_sg)
sg_mark_end(sg); sg_mark_end(*last_sg);
/* /*
* Something must have been wrong if the figured number of * Something must have been wrong if the figured number of
...@@ -565,7 +564,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, ...@@ -565,7 +564,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
return nsegs; return nsegs;
} }
EXPORT_SYMBOL(blk_rq_map_sg); EXPORT_SYMBOL(__blk_rq_map_sg);
static inline int ll_new_hw_segment(struct request *req, struct bio *bio, static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
unsigned int nr_phys_segs) unsigned int nr_phys_segs)
......
...@@ -1136,7 +1136,15 @@ static inline unsigned short blk_rq_nr_discard_segments(struct request *rq) ...@@ -1136,7 +1136,15 @@ static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
return max_t(unsigned short, rq->nr_phys_segments, 1); return max_t(unsigned short, rq->nr_phys_segments, 1);
} }
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
struct scatterlist *sglist, struct scatterlist **last_sg);
static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq,
struct scatterlist *sglist)
{
struct scatterlist *last_sg = NULL;
return __blk_rq_map_sg(q, rq, sglist, &last_sg);
}
extern void blk_dump_rq_flags(struct request *, char *); extern void blk_dump_rq_flags(struct request *, char *);
extern long nr_blockdev_pages(void); extern long nr_blockdev_pages(void);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment