Commit 2e46e8b2 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

block: drop request->hard_* and *nr_sectors

struct request has had a few different ways to represent some
properties of a request.  ->hard_* represent block layer's view of the
request progress (completion cursor) and the ones without the prefix
are supposed to represent the issue cursor and allowed to be updated
as necessary by the low level drivers.  The thing is that as block
layer supports partial completion, the two cursors really aren't
necessary and only cause confusion.  In addition, manual management of
request detail from low level drivers is cumbersome and error-prone at
the very least.

Another interesting duplicate fields are rq->[hard_]nr_sectors and
rq->{hard_cur|current}_nr_sectors against rq->data_len and
rq->bio->bi_size.  This is more convoluted than the hard_ case.

rq->[hard_]nr_sectors are initialized for requests with bio but
blk_rq_bytes() uses it only for !pc requests.  rq->data_len is
initialized for all request but blk_rq_bytes() uses it only for pc
requests.  This causes good amount of confusion throughout block layer
and its drivers and determining the request length has been a bit of
black magic which may or may not work depending on circumstances and
what the specific LLD is actually doing.

rq->{hard_cur|current}_nr_sectors represent the number of sectors in
the contiguous data area at the front.  This is mainly used by drivers
which transfers data by walking request segment-by-segment.  This
value always equals rq->bio->bi_size >> 9.  However, data length for
pc requests may not be multiple of 512 bytes and using this field
becomes a bit confusing.

In general, having multiple fields to represent the same property
leads only to confusion and subtle bugs.  With recent block low level
driver cleanups, no driver is accessing or manipulating these
duplicate fields directly.  Drop all the duplicates.  Now rq->sector
means the current sector, rq->data_len the current total length and
rq->bio->bi_size the current segment length.  Everything else is
defined in terms of these three and available only through accessors.

* blk_recalc_rq_sectors() is collapsed into blk_update_request() and
  now handles pc and fs requests equally other than rq->sector update.
  This means that now pc requests can use partial completion too (no
  in-kernel user yet tho).

* bio_cur_sectors() is replaced with bio_cur_bytes() as block layer
  now uses byte count as the primary data length.

* blk_rq_pos() is now guranteed to be always correct.  In-block users
  converted.

* blk_rq_bytes() is now guaranteed to be always valid as is
  blk_rq_sectors().  In-block users converted.

* blk_rq_sectors() is now guaranteed to equal blk_rq_bytes() >> 9.
  More convenient one is used.

* blk_rq_bytes() and blk_rq_cur_bytes() are now inlined and take const
  pointer to request.

[ Impact: API cleanup, single way to represent one property of a request ]
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Boaz Harrosh <bharrosh@panasas.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 9780e2dd
...@@ -127,7 +127,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq) ...@@ -127,7 +127,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
INIT_LIST_HEAD(&rq->timeout_list); INIT_LIST_HEAD(&rq->timeout_list);
rq->cpu = -1; rq->cpu = -1;
rq->q = q; rq->q = q;
rq->sector = rq->hard_sector = (sector_t) -1; rq->sector = (sector_t) -1;
INIT_HLIST_NODE(&rq->hash); INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node); RB_CLEAR_NODE(&rq->rb_node);
rq->cmd = rq->__cmd; rq->cmd = rq->__cmd;
...@@ -189,8 +189,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg) ...@@ -189,8 +189,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
(unsigned long long)blk_rq_pos(rq), (unsigned long long)blk_rq_pos(rq),
blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
rq->bio, rq->biotail, rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
rq->buffer, rq->data_len);
if (blk_pc_request(rq)) { if (blk_pc_request(rq)) {
printk(KERN_INFO " cdb: "); printk(KERN_INFO " cdb: ");
...@@ -1096,7 +1095,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) ...@@ -1096,7 +1095,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
req->cmd_flags |= REQ_NOIDLE; req->cmd_flags |= REQ_NOIDLE;
req->errors = 0; req->errors = 0;
req->hard_sector = req->sector = bio->bi_sector; req->sector = bio->bi_sector;
req->ioprio = bio_prio(bio); req->ioprio = bio_prio(bio);
blk_rq_bio_prep(req->q, req, bio); blk_rq_bio_prep(req->q, req, bio);
} }
...@@ -1113,14 +1112,13 @@ static inline bool queue_should_plug(struct request_queue *q) ...@@ -1113,14 +1112,13 @@ static inline bool queue_should_plug(struct request_queue *q)
static int __make_request(struct request_queue *q, struct bio *bio) static int __make_request(struct request_queue *q, struct bio *bio)
{ {
struct request *req; struct request *req;
int el_ret, nr_sectors; int el_ret;
unsigned int bytes = bio->bi_size;
const unsigned short prio = bio_prio(bio); const unsigned short prio = bio_prio(bio);
const int sync = bio_sync(bio); const int sync = bio_sync(bio);
const int unplug = bio_unplug(bio); const int unplug = bio_unplug(bio);
int rw_flags; int rw_flags;
nr_sectors = bio_sectors(bio);
/* /*
* low level driver can indicate that it wants pages above a * low level driver can indicate that it wants pages above a
* certain limit bounced to low memory (ie for highmem, or even * certain limit bounced to low memory (ie for highmem, or even
...@@ -1145,7 +1143,7 @@ static int __make_request(struct request_queue *q, struct bio *bio) ...@@ -1145,7 +1143,7 @@ static int __make_request(struct request_queue *q, struct bio *bio)
req->biotail->bi_next = bio; req->biotail->bi_next = bio;
req->biotail = bio; req->biotail = bio;
req->nr_sectors = req->hard_nr_sectors += nr_sectors; req->data_len += bytes;
req->ioprio = ioprio_best(req->ioprio, prio); req->ioprio = ioprio_best(req->ioprio, prio);
if (!blk_rq_cpu_valid(req)) if (!blk_rq_cpu_valid(req))
req->cpu = bio->bi_comp_cpu; req->cpu = bio->bi_comp_cpu;
...@@ -1171,10 +1169,8 @@ static int __make_request(struct request_queue *q, struct bio *bio) ...@@ -1171,10 +1169,8 @@ static int __make_request(struct request_queue *q, struct bio *bio)
* not touch req->buffer either... * not touch req->buffer either...
*/ */
req->buffer = bio_data(bio); req->buffer = bio_data(bio);
req->current_nr_sectors = bio_cur_sectors(bio); req->sector = bio->bi_sector;
req->hard_cur_sectors = req->current_nr_sectors; req->data_len += bytes;
req->sector = req->hard_sector = bio->bi_sector;
req->nr_sectors = req->hard_nr_sectors += nr_sectors;
req->ioprio = ioprio_best(req->ioprio, prio); req->ioprio = ioprio_best(req->ioprio, prio);
if (!blk_rq_cpu_valid(req)) if (!blk_rq_cpu_valid(req))
req->cpu = bio->bi_comp_cpu; req->cpu = bio->bi_comp_cpu;
...@@ -1557,7 +1553,7 @@ EXPORT_SYMBOL(submit_bio); ...@@ -1557,7 +1553,7 @@ EXPORT_SYMBOL(submit_bio);
int blk_rq_check_limits(struct request_queue *q, struct request *rq) int blk_rq_check_limits(struct request_queue *q, struct request *rq)
{ {
if (blk_rq_sectors(rq) > q->max_sectors || if (blk_rq_sectors(rq) > q->max_sectors ||
rq->data_len > q->max_hw_sectors << 9) { blk_rq_bytes(rq) > q->max_hw_sectors << 9) {
printk(KERN_ERR "%s: over max size limit.\n", __func__); printk(KERN_ERR "%s: over max size limit.\n", __func__);
return -EIO; return -EIO;
} }
...@@ -1675,35 +1671,6 @@ static void blk_account_io_done(struct request *req) ...@@ -1675,35 +1671,6 @@ static void blk_account_io_done(struct request *req)
} }
} }
/**
* blk_rq_bytes - Returns bytes left to complete in the entire request
* @rq: the request being processed
**/
unsigned int blk_rq_bytes(struct request *rq)
{
if (blk_fs_request(rq))
return blk_rq_sectors(rq) << 9;
return rq->data_len;
}
EXPORT_SYMBOL_GPL(blk_rq_bytes);
/**
* blk_rq_cur_bytes - Returns bytes left to complete in the current segment
* @rq: the request being processed
**/
unsigned int blk_rq_cur_bytes(struct request *rq)
{
if (blk_fs_request(rq))
return rq->current_nr_sectors << 9;
if (rq->bio)
return rq->bio->bi_size;
return rq->data_len;
}
EXPORT_SYMBOL_GPL(blk_rq_cur_bytes);
struct request *elv_next_request(struct request_queue *q) struct request *elv_next_request(struct request_queue *q)
{ {
struct request *rq; struct request *rq;
...@@ -1736,7 +1703,7 @@ struct request *elv_next_request(struct request_queue *q) ...@@ -1736,7 +1703,7 @@ struct request *elv_next_request(struct request_queue *q)
if (rq->cmd_flags & REQ_DONTPREP) if (rq->cmd_flags & REQ_DONTPREP)
break; break;
if (q->dma_drain_size && rq->data_len) { if (q->dma_drain_size && blk_rq_bytes(rq)) {
/* /*
* make sure space for the drain appears we * make sure space for the drain appears we
* know we can do this because max_hw_segments * know we can do this because max_hw_segments
...@@ -1759,7 +1726,7 @@ struct request *elv_next_request(struct request_queue *q) ...@@ -1759,7 +1726,7 @@ struct request *elv_next_request(struct request_queue *q)
* avoid resource deadlock. REQ_STARTED will * avoid resource deadlock. REQ_STARTED will
* prevent other fs requests from passing this one. * prevent other fs requests from passing this one.
*/ */
if (q->dma_drain_size && rq->data_len && if (q->dma_drain_size && blk_rq_bytes(rq) &&
!(rq->cmd_flags & REQ_DONTPREP)) { !(rq->cmd_flags & REQ_DONTPREP)) {
/* /*
* remove the space for the drain we added * remove the space for the drain we added
...@@ -1911,8 +1878,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) ...@@ -1911,8 +1878,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
* can find how many bytes remain in the request * can find how many bytes remain in the request
* later. * later.
*/ */
req->nr_sectors = req->hard_nr_sectors = 0; req->data_len = 0;
req->current_nr_sectors = req->hard_cur_sectors = 0;
return false; return false;
} }
...@@ -1926,8 +1892,25 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) ...@@ -1926,8 +1892,25 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
bio_iovec(bio)->bv_len -= nr_bytes; bio_iovec(bio)->bv_len -= nr_bytes;
} }
blk_recalc_rq_sectors(req, total_bytes >> 9); req->data_len -= total_bytes;
req->buffer = bio_data(req->bio);
/* update sector only for requests with clear definition of sector */
if (blk_fs_request(req) || blk_discard_rq(req))
req->sector += total_bytes >> 9;
/*
* If total number of sectors is less than the first segment
* size, something has gone terribly wrong.
*/
if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) {
printk(KERN_ERR "blk: request botched\n");
req->data_len = blk_rq_cur_bytes(req);
}
/* recalculate the number of segments */
blk_recalc_rq_segments(req); blk_recalc_rq_segments(req);
return true; return true;
} }
EXPORT_SYMBOL_GPL(blk_update_request); EXPORT_SYMBOL_GPL(blk_update_request);
...@@ -2049,11 +2032,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, ...@@ -2049,11 +2032,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
rq->nr_phys_segments = bio_phys_segments(q, bio); rq->nr_phys_segments = bio_phys_segments(q, bio);
rq->buffer = bio_data(bio); rq->buffer = bio_data(bio);
} }
rq->current_nr_sectors = bio_cur_sectors(bio);
rq->hard_cur_sectors = rq->current_nr_sectors;
rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
rq->data_len = bio->bi_size; rq->data_len = bio->bi_size;
rq->bio = rq->biotail = bio; rq->bio = rq->biotail = bio;
if (bio->bi_bdev) if (bio->bi_bdev)
......
...@@ -9,35 +9,6 @@ ...@@ -9,35 +9,6 @@
#include "blk.h" #include "blk.h"
void blk_recalc_rq_sectors(struct request *rq, int nsect)
{
if (blk_fs_request(rq) || blk_discard_rq(rq)) {
rq->hard_sector += nsect;
rq->hard_nr_sectors -= nsect;
/*
* Move the I/O submission pointers ahead if required.
*/
if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
(rq->sector <= rq->hard_sector)) {
rq->sector = rq->hard_sector;
rq->nr_sectors = rq->hard_nr_sectors;
rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
rq->current_nr_sectors = rq->hard_cur_sectors;
rq->buffer = bio_data(rq->bio);
}
/*
* if total number of sectors is less than the first segment
* size, something has gone terribly wrong
*/
if (rq->nr_sectors < rq->current_nr_sectors) {
printk(KERN_ERR "blk: request botched\n");
rq->nr_sectors = rq->current_nr_sectors;
}
}
}
static unsigned int __blk_recalc_rq_segments(struct request_queue *q, static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
struct bio *bio) struct bio *bio)
{ {
...@@ -199,8 +170,9 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, ...@@ -199,8 +170,9 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
if (unlikely(rq->cmd_flags & REQ_COPY_USER) && if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
(rq->data_len & q->dma_pad_mask)) { (blk_rq_bytes(rq) & q->dma_pad_mask)) {
unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1; unsigned int pad_len =
(q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
sg->length += pad_len; sg->length += pad_len;
rq->extra_len += pad_len; rq->extra_len += pad_len;
...@@ -398,7 +370,7 @@ static int attempt_merge(struct request_queue *q, struct request *req, ...@@ -398,7 +370,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
req->biotail->bi_next = next->bio; req->biotail->bi_next = next->bio;
req->biotail = next->biotail; req->biotail = next->biotail;
req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; req->data_len += blk_rq_bytes(next);
elv_merge_requests(q, req, next); elv_merge_requests(q, req, next);
......
...@@ -101,7 +101,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, ...@@ -101,7 +101,6 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
int attempt_back_merge(struct request_queue *q, struct request *rq); int attempt_back_merge(struct request_queue *q, struct request *rq);
int attempt_front_merge(struct request_queue *q, struct request *rq); int attempt_front_merge(struct request_queue *q, struct request *rq);
void blk_recalc_rq_segments(struct request *rq); void blk_recalc_rq_segments(struct request *rq);
void blk_recalc_rq_sectors(struct request *rq, int nsect);
void blk_queue_congestion_threshold(struct request_queue *q); void blk_queue_congestion_threshold(struct request_queue *q);
......
...@@ -579,9 +579,9 @@ cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root, ...@@ -579,9 +579,9 @@ cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
* Sort strictly based on sector. Smallest to the left, * Sort strictly based on sector. Smallest to the left,
* largest to the right. * largest to the right.
*/ */
if (sector > cfqq->next_rq->sector) if (sector > blk_rq_pos(cfqq->next_rq))
n = &(*p)->rb_right; n = &(*p)->rb_right;
else if (sector < cfqq->next_rq->sector) else if (sector < blk_rq_pos(cfqq->next_rq))
n = &(*p)->rb_left; n = &(*p)->rb_left;
else else
break; break;
...@@ -611,8 +611,8 @@ static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq) ...@@ -611,8 +611,8 @@ static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
return; return;
cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
__cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, cfqq->next_rq->sector, __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
&parent, &p); blk_rq_pos(cfqq->next_rq), &parent, &p);
if (!__cfqq) { if (!__cfqq) {
rb_link_node(&cfqq->p_node, parent, p); rb_link_node(&cfqq->p_node, parent, p);
rb_insert_color(&cfqq->p_node, cfqq->p_root); rb_insert_color(&cfqq->p_node, cfqq->p_root);
...@@ -996,7 +996,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, ...@@ -996,7 +996,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
if (cfq_rq_close(cfqd, __cfqq->next_rq)) if (cfq_rq_close(cfqd, __cfqq->next_rq))
return __cfqq; return __cfqq;
if (__cfqq->next_rq->sector < sector) if (blk_rq_pos(__cfqq->next_rq) < sector)
node = rb_next(&__cfqq->p_node); node = rb_next(&__cfqq->p_node);
else else
node = rb_prev(&__cfqq->p_node); node = rb_prev(&__cfqq->p_node);
......
...@@ -218,12 +218,12 @@ struct bio { ...@@ -218,12 +218,12 @@ struct bio {
#define bio_sectors(bio) ((bio)->bi_size >> 9) #define bio_sectors(bio) ((bio)->bi_size >> 9)
#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
static inline unsigned int bio_cur_sectors(struct bio *bio) static inline unsigned int bio_cur_bytes(struct bio *bio)
{ {
if (bio->bi_vcnt) if (bio->bi_vcnt)
return bio_iovec(bio)->bv_len >> 9; return bio_iovec(bio)->bv_len;
else /* dataless requests such as discard */ else /* dataless requests such as discard */
return bio->bi_size >> 9; return bio->bi_size;
} }
static inline void *bio_data(struct bio *bio) static inline void *bio_data(struct bio *bio)
......
...@@ -166,19 +166,8 @@ struct request { ...@@ -166,19 +166,8 @@ struct request {
enum rq_cmd_type_bits cmd_type; enum rq_cmd_type_bits cmd_type;
unsigned long atomic_flags; unsigned long atomic_flags;
/* Maintain bio traversal state for part by part I/O submission. sector_t sector; /* sector cursor */
* hard_* are block layer internals, no driver should touch them! unsigned int data_len; /* total data len, don't access directly */
*/
sector_t sector; /* next sector to submit */
sector_t hard_sector; /* next sector to complete */
unsigned long nr_sectors; /* no. of sectors left to submit */
unsigned long hard_nr_sectors; /* no. of sectors left to complete */
/* no. of sectors left to submit in the current segment */
unsigned int current_nr_sectors;
/* no. of sectors left to complete in the current segment */
unsigned int hard_cur_sectors;
struct bio *bio; struct bio *bio;
struct bio *biotail; struct bio *biotail;
...@@ -226,7 +215,6 @@ struct request { ...@@ -226,7 +215,6 @@ struct request {
unsigned char __cmd[BLK_MAX_CDB]; unsigned char __cmd[BLK_MAX_CDB];
unsigned char *cmd; unsigned char *cmd;
unsigned int data_len;
unsigned int extra_len; /* length of alignment and padding */ unsigned int extra_len; /* length of alignment and padding */
unsigned int sense_len; unsigned int sense_len;
unsigned int resid_len; /* residual count */ unsigned int resid_len; /* residual count */
...@@ -840,20 +828,27 @@ extern void blkdev_dequeue_request(struct request *req); ...@@ -840,20 +828,27 @@ extern void blkdev_dequeue_request(struct request *req);
*/ */
static inline sector_t blk_rq_pos(const struct request *rq) static inline sector_t blk_rq_pos(const struct request *rq)
{ {
return rq->hard_sector; return rq->sector;
}
static inline unsigned int blk_rq_bytes(const struct request *rq)
{
return rq->data_len;
} }
extern unsigned int blk_rq_bytes(struct request *rq); static inline int blk_rq_cur_bytes(const struct request *rq)
extern unsigned int blk_rq_cur_bytes(struct request *rq); {
return rq->bio ? bio_cur_bytes(rq->bio) : 0;
}
static inline unsigned int blk_rq_sectors(const struct request *rq) static inline unsigned int blk_rq_sectors(const struct request *rq)
{ {
return rq->hard_nr_sectors; return blk_rq_bytes(rq) >> 9;
} }
static inline unsigned int blk_rq_cur_sectors(const struct request *rq) static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
{ {
return rq->hard_cur_sectors; return blk_rq_cur_bytes(rq) >> 9;
} }
/* /*
...@@ -928,7 +923,7 @@ static inline void blk_end_request_all(struct request *rq, int error) ...@@ -928,7 +923,7 @@ static inline void blk_end_request_all(struct request *rq, int error)
*/ */
static inline bool blk_end_request_cur(struct request *rq, int error) static inline bool blk_end_request_cur(struct request *rq, int error)
{ {
return blk_end_request(rq, error, rq->hard_cur_sectors << 9); return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
} }
/** /**
...@@ -981,7 +976,7 @@ static inline void __blk_end_request_all(struct request *rq, int error) ...@@ -981,7 +976,7 @@ static inline void __blk_end_request_all(struct request *rq, int error)
*/ */
static inline bool __blk_end_request_cur(struct request *rq, int error) static inline bool __blk_end_request_cur(struct request *rq, int error)
{ {
return __blk_end_request(rq, error, rq->hard_cur_sectors << 9); return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
} }
extern void blk_complete_request(struct request *); extern void blk_complete_request(struct request *);
......
...@@ -171,7 +171,7 @@ enum { ...@@ -171,7 +171,7 @@ enum {
ELV_MQUEUE_MUST, ELV_MQUEUE_MUST,
}; };
#define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors) #define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
#define rb_entry_rq(node) rb_entry((node), struct request, rb_node) #define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
/* /*
......
...@@ -642,12 +642,12 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq, ...@@ -642,12 +642,12 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
if (blk_pc_request(rq)) { if (blk_pc_request(rq)) {
what |= BLK_TC_ACT(BLK_TC_PC); what |= BLK_TC_ACT(BLK_TC_PC);
__blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
rq->cmd_len, rq->cmd); what, rq->errors, rq->cmd_len, rq->cmd);
} else { } else {
what |= BLK_TC_ACT(BLK_TC_FS); what |= BLK_TC_ACT(BLK_TC_FS);
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_sectors(rq) << 9, __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw,
rw, what, rq->errors, 0, NULL); what, rq->errors, 0, NULL);
} }
} }
...@@ -854,11 +854,11 @@ void blk_add_driver_data(struct request_queue *q, ...@@ -854,11 +854,11 @@ void blk_add_driver_data(struct request_queue *q,
return; return;
if (blk_pc_request(rq)) if (blk_pc_request(rq))
__blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA, __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
rq->errors, len, data); BLK_TA_DRV_DATA, rq->errors, len, data);
else else
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_sectors(rq) << 9, __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
0, BLK_TA_DRV_DATA, rq->errors, len, data); BLK_TA_DRV_DATA, rq->errors, len, data);
} }
EXPORT_SYMBOL_GPL(blk_add_driver_data); EXPORT_SYMBOL_GPL(blk_add_driver_data);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment