Commit 2fb98e84 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

block: implement request_queue->dma_drain_needed

Draining shouldn't be done for commands where overflow may indicate
data integrity issues.  Add dma_drain_needed callback to
request_queue.  Drain buffer is appened iff this function returns
non-zero.
Signed-off-by: default avatarTejun Heo <htejun@gmail.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 6b00769f
...@@ -220,7 +220,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, ...@@ -220,7 +220,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
bvprv = bvec; bvprv = bvec;
} /* segments in rq */ } /* segments in rq */
if (q->dma_drain_size) { if (q->dma_drain_size && q->dma_drain_needed(rq)) {
sg->page_link &= ~0x02; sg->page_link &= ~0x02;
sg = sg_next(sg); sg = sg_next(sg);
sg_set_page(sg, virt_to_page(q->dma_drain_buffer), sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
......
...@@ -296,6 +296,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits); ...@@ -296,6 +296,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
* blk_queue_dma_drain - Set up a drain buffer for excess dma. * blk_queue_dma_drain - Set up a drain buffer for excess dma.
* *
* @q: the request queue for the device * @q: the request queue for the device
* @dma_drain_needed: fn which returns non-zero if drain is necessary
* @buf: physically contiguous buffer * @buf: physically contiguous buffer
* @size: size of the buffer in bytes * @size: size of the buffer in bytes
* *
...@@ -315,14 +316,16 @@ EXPORT_SYMBOL(blk_queue_stack_limits); ...@@ -315,14 +316,16 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
* device can support otherwise there won't be room for the drain * device can support otherwise there won't be room for the drain
* buffer. * buffer.
*/ */
int blk_queue_dma_drain(struct request_queue *q, void *buf, extern int blk_queue_dma_drain(struct request_queue *q,
unsigned int size) dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size)
{ {
if (q->max_hw_segments < 2 || q->max_phys_segments < 2) if (q->max_hw_segments < 2 || q->max_phys_segments < 2)
return -EINVAL; return -EINVAL;
/* make room for appending the drain */ /* make room for appending the drain */
--q->max_hw_segments; --q->max_hw_segments;
--q->max_phys_segments; --q->max_phys_segments;
q->dma_drain_needed = dma_drain_needed;
q->dma_drain_buffer = buf; q->dma_drain_buffer = buf;
q->dma_drain_size = size; q->dma_drain_size = size;
......
...@@ -259,6 +259,7 @@ struct bio_vec; ...@@ -259,6 +259,7 @@ struct bio_vec;
typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *);
typedef void (prepare_flush_fn) (struct request_queue *, struct request *); typedef void (prepare_flush_fn) (struct request_queue *, struct request *);
typedef void (softirq_done_fn)(struct request *); typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);
enum blk_queue_state { enum blk_queue_state {
Queue_down, Queue_down,
...@@ -295,6 +296,7 @@ struct request_queue ...@@ -295,6 +296,7 @@ struct request_queue
merge_bvec_fn *merge_bvec_fn; merge_bvec_fn *merge_bvec_fn;
prepare_flush_fn *prepare_flush_fn; prepare_flush_fn *prepare_flush_fn;
softirq_done_fn *softirq_done_fn; softirq_done_fn *softirq_done_fn;
dma_drain_needed_fn *dma_drain_needed;
/* /*
* Dispatch queue sorting * Dispatch queue sorting
...@@ -699,8 +701,9 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); ...@@ -699,8 +701,9 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
extern int blk_queue_dma_drain(struct request_queue *q, void *buf, extern int blk_queue_dma_drain(struct request_queue *q,
unsigned int size); dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size);
extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment