Commit 8a5ecdd4 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

block: add q->nr_rqs[] and move q->rq.elvpriv to q->nr_rqs_elvpriv

Add q->nr_rqs[] which currently behaves the same as q->rq.count[] and
move q->rq.elvpriv to q->nr_rqs_elvpriv.  blk_drain_queue() is updated
to use q->nr_rqs[] instead of q->rq.count[].

These counters separates queue-wide request statistics from the
request list and allow implementation of per-queue request allocation.

While at it, properly indent fields of struct request_list.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Acked-by: default avatarVivek Goyal <vgoyal@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent b1208b56
...@@ -387,7 +387,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) ...@@ -387,7 +387,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
if (!list_empty(&q->queue_head) && q->request_fn) if (!list_empty(&q->queue_head) && q->request_fn)
__blk_run_queue(q); __blk_run_queue(q);
drain |= q->rq.elvpriv; drain |= q->nr_rqs_elvpriv;
/* /*
* Unfortunately, requests are queued at and tracked from * Unfortunately, requests are queued at and tracked from
...@@ -397,7 +397,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) ...@@ -397,7 +397,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
if (drain_all) { if (drain_all) {
drain |= !list_empty(&q->queue_head); drain |= !list_empty(&q->queue_head);
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
drain |= q->rq.count[i]; drain |= q->nr_rqs[i];
drain |= q->in_flight[i]; drain |= q->in_flight[i];
drain |= !list_empty(&q->flush_queue[i]); drain |= !list_empty(&q->flush_queue[i]);
} }
...@@ -526,7 +526,6 @@ static int blk_init_free_list(struct request_queue *q) ...@@ -526,7 +526,6 @@ static int blk_init_free_list(struct request_queue *q)
rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
rl->elvpriv = 0;
init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
...@@ -791,9 +790,10 @@ static void freed_request(struct request_queue *q, unsigned int flags) ...@@ -791,9 +790,10 @@ static void freed_request(struct request_queue *q, unsigned int flags)
struct request_list *rl = &q->rq; struct request_list *rl = &q->rq;
int sync = rw_is_sync(flags); int sync = rw_is_sync(flags);
q->nr_rqs[sync]--;
rl->count[sync]--; rl->count[sync]--;
if (flags & REQ_ELVPRIV) if (flags & REQ_ELVPRIV)
rl->elvpriv--; q->nr_rqs_elvpriv--;
__freed_request(q, sync); __freed_request(q, sync);
...@@ -902,6 +902,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, ...@@ -902,6 +902,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,
if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
return NULL; return NULL;
q->nr_rqs[is_sync]++;
rl->count[is_sync]++; rl->count[is_sync]++;
rl->starved[is_sync] = 0; rl->starved[is_sync] = 0;
...@@ -917,7 +918,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, ...@@ -917,7 +918,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,
*/ */
if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
rw_flags |= REQ_ELVPRIV; rw_flags |= REQ_ELVPRIV;
rl->elvpriv++; q->nr_rqs_elvpriv++;
if (et->icq_cache && ioc) if (et->icq_cache && ioc)
icq = ioc_lookup_icq(ioc, q); icq = ioc_lookup_icq(ioc, q);
} }
...@@ -978,7 +979,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, ...@@ -978,7 +979,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,
rq->elv.icq = NULL; rq->elv.icq = NULL;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
rl->elvpriv--; q->nr_rqs_elvpriv--;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
goto out; goto out;
......
...@@ -51,11 +51,10 @@ struct request_list { ...@@ -51,11 +51,10 @@ struct request_list {
* count[], starved[], and wait[] are indexed by * count[], starved[], and wait[] are indexed by
* BLK_RW_SYNC/BLK_RW_ASYNC * BLK_RW_SYNC/BLK_RW_ASYNC
*/ */
int count[2]; int count[2];
int starved[2]; int starved[2];
int elvpriv; mempool_t *rq_pool;
mempool_t *rq_pool; wait_queue_head_t wait[2];
wait_queue_head_t wait[2];
}; };
/* /*
...@@ -282,6 +281,8 @@ struct request_queue { ...@@ -282,6 +281,8 @@ struct request_queue {
struct list_head queue_head; struct list_head queue_head;
struct request *last_merge; struct request *last_merge;
struct elevator_queue *elevator; struct elevator_queue *elevator;
int nr_rqs[2]; /* # allocated [a]sync rqs */
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
/* /*
* the queue request freelist, one for reads and one for writes * the queue request freelist, one for reads and one for writes
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment