Commit 5b788ce3 authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

block: prepare for multiple request_lists

Request allocation is about to be made per-blkg meaning that there'll
be multiple request lists.

* Make queue full state per request_list.  blk_*queue_full() functions
  are renamed to blk_*rl_full() and takes @rl instead of @q.

* Rename blk_init_free_list() to blk_init_rl() and make it take @rl
  instead of @q.  Also add @gfp_mask parameter.

* Add blk_exit_rl() instead of destroying rl directly from
  blk_release_queue().

* Add request_list->q and make request alloc/free functions -
  blk_free_request(), [__]freed_request(), __get_request() - take @rl
  instead of @q.

This patch doesn't introduce any functional difference.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Acked-by: default avatarVivek Goyal <vgoyal@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 8a5ecdd4
...@@ -517,13 +517,13 @@ void blk_cleanup_queue(struct request_queue *q) ...@@ -517,13 +517,13 @@ void blk_cleanup_queue(struct request_queue *q)
} }
EXPORT_SYMBOL(blk_cleanup_queue); EXPORT_SYMBOL(blk_cleanup_queue);
static int blk_init_free_list(struct request_queue *q) int blk_init_rl(struct request_list *rl, struct request_queue *q,
gfp_t gfp_mask)
{ {
struct request_list *rl = &q->rq;
if (unlikely(rl->rq_pool)) if (unlikely(rl->rq_pool))
return 0; return 0;
rl->q = q;
rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
...@@ -531,13 +531,19 @@ static int blk_init_free_list(struct request_queue *q) ...@@ -531,13 +531,19 @@ static int blk_init_free_list(struct request_queue *q)
rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
mempool_free_slab, request_cachep, mempool_free_slab, request_cachep,
GFP_KERNEL, q->node); gfp_mask, q->node);
if (!rl->rq_pool) if (!rl->rq_pool)
return -ENOMEM; return -ENOMEM;
return 0; return 0;
} }
void blk_exit_rl(struct request_list *rl)
{
if (rl->rq_pool)
mempool_destroy(rl->rq_pool);
}
struct request_queue *blk_alloc_queue(gfp_t gfp_mask) struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
{ {
return blk_alloc_queue_node(gfp_mask, -1); return blk_alloc_queue_node(gfp_mask, -1);
...@@ -679,7 +685,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn, ...@@ -679,7 +685,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
if (!q) if (!q)
return NULL; return NULL;
if (blk_init_free_list(q)) if (blk_init_rl(&q->rq, q, GFP_KERNEL))
return NULL; return NULL;
q->request_fn = rfn; q->request_fn = rfn;
...@@ -721,15 +727,15 @@ bool blk_get_queue(struct request_queue *q) ...@@ -721,15 +727,15 @@ bool blk_get_queue(struct request_queue *q)
} }
EXPORT_SYMBOL(blk_get_queue); EXPORT_SYMBOL(blk_get_queue);
static inline void blk_free_request(struct request_queue *q, struct request *rq) static inline void blk_free_request(struct request_list *rl, struct request *rq)
{ {
if (rq->cmd_flags & REQ_ELVPRIV) { if (rq->cmd_flags & REQ_ELVPRIV) {
elv_put_request(q, rq); elv_put_request(rl->q, rq);
if (rq->elv.icq) if (rq->elv.icq)
put_io_context(rq->elv.icq->ioc); put_io_context(rq->elv.icq->ioc);
} }
mempool_free(rq, q->rq.rq_pool); mempool_free(rq, rl->rq_pool);
} }
/* /*
...@@ -766,9 +772,9 @@ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) ...@@ -766,9 +772,9 @@ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
ioc->last_waited = jiffies; ioc->last_waited = jiffies;
} }
static void __freed_request(struct request_queue *q, int sync) static void __freed_request(struct request_list *rl, int sync)
{ {
struct request_list *rl = &q->rq; struct request_queue *q = rl->q;
if (rl->count[sync] < queue_congestion_off_threshold(q)) if (rl->count[sync] < queue_congestion_off_threshold(q))
blk_clear_queue_congested(q, sync); blk_clear_queue_congested(q, sync);
...@@ -777,7 +783,7 @@ static void __freed_request(struct request_queue *q, int sync) ...@@ -777,7 +783,7 @@ static void __freed_request(struct request_queue *q, int sync)
if (waitqueue_active(&rl->wait[sync])) if (waitqueue_active(&rl->wait[sync]))
wake_up(&rl->wait[sync]); wake_up(&rl->wait[sync]);
blk_clear_queue_full(q, sync); blk_clear_rl_full(rl, sync);
} }
} }
...@@ -785,9 +791,9 @@ static void __freed_request(struct request_queue *q, int sync) ...@@ -785,9 +791,9 @@ static void __freed_request(struct request_queue *q, int sync)
* A request has just been released. Account for it, update the full and * A request has just been released. Account for it, update the full and
* congestion status, wake up any waiters. Called under q->queue_lock. * congestion status, wake up any waiters. Called under q->queue_lock.
*/ */
static void freed_request(struct request_queue *q, unsigned int flags) static void freed_request(struct request_list *rl, unsigned int flags)
{ {
struct request_list *rl = &q->rq; struct request_queue *q = rl->q;
int sync = rw_is_sync(flags); int sync = rw_is_sync(flags);
q->nr_rqs[sync]--; q->nr_rqs[sync]--;
...@@ -795,10 +801,10 @@ static void freed_request(struct request_queue *q, unsigned int flags) ...@@ -795,10 +801,10 @@ static void freed_request(struct request_queue *q, unsigned int flags)
if (flags & REQ_ELVPRIV) if (flags & REQ_ELVPRIV)
q->nr_rqs_elvpriv--; q->nr_rqs_elvpriv--;
__freed_request(q, sync); __freed_request(rl, sync);
if (unlikely(rl->starved[sync ^ 1])) if (unlikely(rl->starved[sync ^ 1]))
__freed_request(q, sync ^ 1); __freed_request(rl, sync ^ 1);
} }
/* /*
...@@ -838,7 +844,7 @@ static struct io_context *rq_ioc(struct bio *bio) ...@@ -838,7 +844,7 @@ static struct io_context *rq_ioc(struct bio *bio)
/** /**
* __get_request - get a free request * __get_request - get a free request
* @q: request_queue to allocate request from * @rl: request list to allocate from
* @rw_flags: RW and SYNC flags * @rw_flags: RW and SYNC flags
* @bio: bio to allocate request for (can be %NULL) * @bio: bio to allocate request for (can be %NULL)
* @gfp_mask: allocation mask * @gfp_mask: allocation mask
...@@ -850,11 +856,11 @@ static struct io_context *rq_ioc(struct bio *bio) ...@@ -850,11 +856,11 @@ static struct io_context *rq_ioc(struct bio *bio)
* Returns %NULL on failure, with @q->queue_lock held. * Returns %NULL on failure, with @q->queue_lock held.
* Returns !%NULL on success, with @q->queue_lock *not held*. * Returns !%NULL on success, with @q->queue_lock *not held*.
*/ */
static struct request *__get_request(struct request_queue *q, int rw_flags, static struct request *__get_request(struct request_list *rl, int rw_flags,
struct bio *bio, gfp_t gfp_mask) struct bio *bio, gfp_t gfp_mask)
{ {
struct request_queue *q = rl->q;
struct request *rq; struct request *rq;
struct request_list *rl = &q->rq;
struct elevator_type *et = q->elevator->type; struct elevator_type *et = q->elevator->type;
struct io_context *ioc = rq_ioc(bio); struct io_context *ioc = rq_ioc(bio);
struct io_cq *icq = NULL; struct io_cq *icq = NULL;
...@@ -876,9 +882,9 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, ...@@ -876,9 +882,9 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,
* This process will be allowed to complete a batch of * This process will be allowed to complete a batch of
* requests, others will be blocked. * requests, others will be blocked.
*/ */
if (!blk_queue_full(q, is_sync)) { if (!blk_rl_full(rl, is_sync)) {
ioc_set_batching(q, ioc); ioc_set_batching(q, ioc);
blk_set_queue_full(q, is_sync); blk_set_rl_full(rl, is_sync);
} else { } else {
if (may_queue != ELV_MQUEUE_MUST if (may_queue != ELV_MQUEUE_MUST
&& !ioc_batching(q, ioc)) { && !ioc_batching(q, ioc)) {
...@@ -928,7 +934,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, ...@@ -928,7 +934,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
/* allocate and init request */ /* allocate and init request */
rq = mempool_alloc(q->rq.rq_pool, gfp_mask); rq = mempool_alloc(rl->rq_pool, gfp_mask);
if (!rq) if (!rq)
goto fail_alloc; goto fail_alloc;
...@@ -992,7 +998,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, ...@@ -992,7 +998,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,
* queue, but this is pretty rare. * queue, but this is pretty rare.
*/ */
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
freed_request(q, rw_flags); freed_request(rl, rw_flags);
/* /*
* in the very unlikely event that allocation failed and no * in the very unlikely event that allocation failed and no
...@@ -1029,7 +1035,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, ...@@ -1029,7 +1035,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
struct request_list *rl = &q->rq; struct request_list *rl = &q->rq;
struct request *rq; struct request *rq;
retry: retry:
rq = __get_request(q, rw_flags, bio, gfp_mask); rq = __get_request(&q->rq, rw_flags, bio, gfp_mask);
if (rq) if (rq)
return rq; return rq;
...@@ -1229,8 +1235,8 @@ void __blk_put_request(struct request_queue *q, struct request *req) ...@@ -1229,8 +1235,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
BUG_ON(!list_empty(&req->queuelist)); BUG_ON(!list_empty(&req->queuelist));
BUG_ON(!hlist_unhashed(&req->hash)); BUG_ON(!hlist_unhashed(&req->hash));
blk_free_request(q, req); blk_free_request(&q->rq, req);
freed_request(q, flags); freed_request(&q->rq, flags);
} }
} }
EXPORT_SYMBOL_GPL(__blk_put_request); EXPORT_SYMBOL_GPL(__blk_put_request);
......
...@@ -66,16 +66,16 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) ...@@ -66,16 +66,16 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
blk_clear_queue_congested(q, BLK_RW_ASYNC); blk_clear_queue_congested(q, BLK_RW_ASYNC);
if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
blk_set_queue_full(q, BLK_RW_SYNC); blk_set_rl_full(rl, BLK_RW_SYNC);
} else { } else {
blk_clear_queue_full(q, BLK_RW_SYNC); blk_clear_rl_full(rl, BLK_RW_SYNC);
wake_up(&rl->wait[BLK_RW_SYNC]); wake_up(&rl->wait[BLK_RW_SYNC]);
} }
if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
blk_set_queue_full(q, BLK_RW_ASYNC); blk_set_rl_full(rl, BLK_RW_ASYNC);
} else { } else {
blk_clear_queue_full(q, BLK_RW_ASYNC); blk_clear_rl_full(rl, BLK_RW_ASYNC);
wake_up(&rl->wait[BLK_RW_ASYNC]); wake_up(&rl->wait[BLK_RW_ASYNC]);
} }
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
...@@ -476,7 +476,6 @@ static void blk_release_queue(struct kobject *kobj) ...@@ -476,7 +476,6 @@ static void blk_release_queue(struct kobject *kobj)
{ {
struct request_queue *q = struct request_queue *q =
container_of(kobj, struct request_queue, kobj); container_of(kobj, struct request_queue, kobj);
struct request_list *rl = &q->rq;
blk_sync_queue(q); blk_sync_queue(q);
...@@ -489,8 +488,7 @@ static void blk_release_queue(struct kobject *kobj) ...@@ -489,8 +488,7 @@ static void blk_release_queue(struct kobject *kobj)
elevator_exit(q->elevator); elevator_exit(q->elevator);
} }
if (rl->rq_pool) blk_exit_rl(&q->rq);
mempool_destroy(rl->rq_pool);
if (q->queue_tags) if (q->queue_tags)
__blk_queue_free_tags(q); __blk_queue_free_tags(q);
......
...@@ -18,6 +18,9 @@ static inline void __blk_get_queue(struct request_queue *q) ...@@ -18,6 +18,9 @@ static inline void __blk_get_queue(struct request_queue *q)
kobject_get(&q->kobj); kobject_get(&q->kobj);
} }
int blk_init_rl(struct request_list *rl, struct request_queue *q,
gfp_t gfp_mask);
void blk_exit_rl(struct request_list *rl);
void init_request_from_bio(struct request *req, struct bio *bio); void init_request_from_bio(struct request *req, struct bio *bio);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq, void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio); struct bio *bio);
......
...@@ -46,7 +46,12 @@ struct blkcg_gq; ...@@ -46,7 +46,12 @@ struct blkcg_gq;
struct request; struct request;
typedef void (rq_end_io_fn)(struct request *, int); typedef void (rq_end_io_fn)(struct request *, int);
#define BLK_RL_SYNCFULL (1U << 0)
#define BLK_RL_ASYNCFULL (1U << 1)
struct request_list { struct request_list {
struct request_queue *q; /* the queue this rl belongs to */
/* /*
* count[], starved[], and wait[] are indexed by * count[], starved[], and wait[] are indexed by
* BLK_RW_SYNC/BLK_RW_ASYNC * BLK_RW_SYNC/BLK_RW_ASYNC
...@@ -55,6 +60,7 @@ struct request_list { ...@@ -55,6 +60,7 @@ struct request_list {
int starved[2]; int starved[2];
mempool_t *rq_pool; mempool_t *rq_pool;
wait_queue_head_t wait[2]; wait_queue_head_t wait[2];
unsigned int flags;
}; };
/* /*
...@@ -562,27 +568,25 @@ static inline bool rq_is_sync(struct request *rq) ...@@ -562,27 +568,25 @@ static inline bool rq_is_sync(struct request *rq)
return rw_is_sync(rq->cmd_flags); return rw_is_sync(rq->cmd_flags);
} }
static inline int blk_queue_full(struct request_queue *q, int sync) static inline bool blk_rl_full(struct request_list *rl, bool sync)
{ {
if (sync) unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); return rl->flags & flag;
} }
static inline void blk_set_queue_full(struct request_queue *q, int sync) static inline void blk_set_rl_full(struct request_list *rl, bool sync)
{ {
if (sync) unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
else rl->flags |= flag;
queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
} }
static inline void blk_clear_queue_full(struct request_queue *q, int sync) static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
{ {
if (sync) unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
else rl->flags &= ~flag;
queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment