Commit 3abbd8ff authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] bring back the batch_requests function

From: Nick Piggin <piggin@cyberone.com.au>

The batch_requests function got lost during the merge of the dynamic request
allocation patch.

We need it for the anticipatory scheduler - when the number of threads
exceeds the number of requests, the anticipated-upon task will undesirably
sleep in get_request_wait().

And apparently some block devices which use small requests need it so they
string a decent number together.

Jens has acked this patch.
parent 3faa61fe
...@@ -51,6 +51,11 @@ static int queue_nr_requests; ...@@ -51,6 +51,11 @@ static int queue_nr_requests;
unsigned long blk_max_low_pfn, blk_max_pfn; unsigned long blk_max_low_pfn, blk_max_pfn;
static wait_queue_head_t congestion_wqh[2]; static wait_queue_head_t congestion_wqh[2];
static inline int batch_requests(void)
{
return min(BLKDEV_MAX_RQ / 8, 8);
}
/* /*
* Return the threshold (number of free requests) at which the queue is * Return the threshold (number of free requests) at which the queue is
* considered to be congested. It include a little hysteresis to keep the * considered to be congested. It include a little hysteresis to keep the
...@@ -1180,6 +1185,8 @@ static int blk_init_free_list(request_queue_t *q) ...@@ -1180,6 +1185,8 @@ static int blk_init_free_list(request_queue_t *q)
struct request_list *rl = &q->rq; struct request_list *rl = &q->rq;
rl->count[READ] = rl->count[WRITE] = 0; rl->count[READ] = rl->count[WRITE] = 0;
init_waitqueue_head(&rl->wait[READ]);
init_waitqueue_head(&rl->wait[WRITE]);
rl->rq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, request_cachep); rl->rq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, request_cachep);
...@@ -1325,18 +1332,33 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask) ...@@ -1325,18 +1332,33 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
} }
/* /*
* No available requests for this queue, unplug the device. * No available requests for this queue, unplug the device and wait for some
* requests to become available.
*/ */
static struct request *get_request_wait(request_queue_t *q, int rw) static struct request *get_request_wait(request_queue_t *q, int rw)
{ {
DEFINE_WAIT(wait);
struct request *rq; struct request *rq;
generic_unplug_device(q); generic_unplug_device(q);
do { do {
rq = get_request(q, rw, GFP_NOIO); rq = get_request(q, rw, GFP_NOIO);
if (!rq) if (!rq) {
blk_congestion_wait(rw, HZ / 50); struct request_list *rl = &q->rq;
prepare_to_wait_exclusive(&rl->wait[rw], &wait,
TASK_UNINTERRUPTIBLE);
/*
* If _all_ the requests were suddenly returned then
* no wakeup will be delivered. So now we're on the
* waitqueue, go check for that.
*/
rq = get_request(q, rw, GFP_ATOMIC & ~__GFP_HIGH);
if (!rq)
io_schedule();
finish_wait(&rl->wait[rw], &wait);
}
} while (!rq); } while (!rq);
return rq; return rq;
...@@ -1498,8 +1520,12 @@ void __blk_put_request(request_queue_t *q, struct request *req) ...@@ -1498,8 +1520,12 @@ void __blk_put_request(request_queue_t *q, struct request *req)
blk_free_request(q, req); blk_free_request(q, req);
rl->count[rw]--; rl->count[rw]--;
if ((BLKDEV_MAX_RQ - rl->count[rw]) >= queue_congestion_off_threshold()) if ((BLKDEV_MAX_RQ - rl->count[rw]) >=
queue_congestion_off_threshold())
clear_queue_congested(q, rw); clear_queue_congested(q, rw);
if ((BLKDEV_MAX_RQ - rl->count[rw]) >= batch_requests() &&
waitqueue_active(&rl->wait[rw]))
wake_up(&rl->wait[rw]);
} }
} }
......
...@@ -27,6 +27,7 @@ struct request_pm_state; ...@@ -27,6 +27,7 @@ struct request_pm_state;
struct request_list { struct request_list {
int count[2]; int count[2];
mempool_t *rq_pool; mempool_t *rq_pool;
wait_queue_head_t wait[2];
}; };
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment