Commit 4f292a21 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] Fix IO scheduler regression

From: Nick Piggin <piggin@cyberone.com.au>

Randy has just reported that this fixes up his regression. Its now as good
as test5 in his tests with this patch. He is also seeing quite large
increases (above test5) when IO scheduler barriers are disabled (this patch
doesn't do that). Perhaps something is using them too liberally.

Anyway, this reverts AS back to defaulting to not anticipate IO for a
program that submits its first request (this really hurts find | xargs grep
sort of things).  I am working on something to fix this up properly.  That
can go in after 2.6.0 anyway.
parent 91d61a0f
......@@ -99,7 +99,6 @@ struct as_data {
sector_t last_sector[2]; /* last REQ_SYNC & REQ_ASYNC sectors */
struct list_head *dispatch; /* driver dispatch queue */
struct list_head *hash; /* request hash */
unsigned long new_success; /* anticipation success on new proc */
unsigned long current_batch_expires;
unsigned long last_check_fifo[2];
int changed_batch; /* 1: waiting for old batch to end */
......@@ -585,18 +584,11 @@ static void as_antic_stop(struct as_data *ad)
int status = ad->antic_status;
if (status == ANTIC_WAIT_REQ || status == ANTIC_WAIT_NEXT) {
struct as_io_context *aic;
if (status == ANTIC_WAIT_NEXT)
del_timer(&ad->antic_timer);
ad->antic_status = ANTIC_FINISHED;
/* see as_work_handler */
kblockd_schedule_work(&ad->antic_work);
aic = ad->io_context->aic;
if (aic->seek_samples == 0)
/* new process */
ad->new_success = (ad->new_success * 3) / 4 + 256;
}
}
......@@ -612,14 +604,8 @@ static void as_antic_timeout(unsigned long data)
spin_lock_irqsave(q->queue_lock, flags);
if (ad->antic_status == ANTIC_WAIT_REQ
|| ad->antic_status == ANTIC_WAIT_NEXT) {
struct as_io_context *aic;
ad->antic_status = ANTIC_FINISHED;
kblockd_schedule_work(&ad->antic_work);
aic = ad->io_context->aic;
if (aic->seek_samples == 0)
/* new process */
ad->new_success = (ad->new_success * 3) / 4;
}
spin_unlock_irqrestore(q->queue_lock, flags);
}
......@@ -708,11 +694,10 @@ static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
return 1;
}
if (ad->new_success < 256 &&
(aic->seek_samples == 0 || aic->ttime_samples == 0)) {
if (aic->seek_samples == 0 || aic->ttime_samples == 0) {
/*
* Process has just started IO and we have a bad history of
* success anticipating on new processes!
* Process has just started IO. Don't anticipate.
* TODO! Must fix this up.
*/
return 1;
}
......@@ -1292,7 +1277,7 @@ static int as_dispatch_request(struct as_data *ad)
ad->changed_batch = 0;
arq->request->flags |= REQ_SOFTBARRIER;
// arq->request->flags |= REQ_SOFTBARRIER;
}
/*
......@@ -1391,6 +1376,7 @@ static void as_add_request(struct as_data *ad, struct as_rq *arq)
} else {
as_add_aliased_request(ad, arq, alias);
/*
* have we been anticipating this request?
* or does it come from the same process as the one we are
......@@ -1427,8 +1413,6 @@ static void as_requeue_request(request_queue_t *q, struct request *rq)
/* Stop anticipating - let this request get through */
as_antic_stop(ad);
return;
}
static void
......@@ -1437,10 +1421,12 @@ as_insert_request(request_queue_t *q, struct request *rq, int where)
struct as_data *ad = q->elevator.elevator_data;
struct as_rq *arq = RQ_DATA(rq);
#if 0
/* barriers must flush the reorder queue */
if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
&& where == ELEVATOR_INSERT_SORT))
where = ELEVATOR_INSERT_BACK;
#endif
switch (where) {
case ELEVATOR_INSERT_BACK:
......@@ -1823,8 +1809,6 @@ static int as_init(request_queue_t *q, elevator_t *e)
if (ad->write_batch_count < 2)
ad->write_batch_count = 2;
ad->new_success = 512;
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment