Commit 6936b17e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'cfq' of git://git.kernel.dk/data/git/linux-2.6-block

* 'cfq' of git://git.kernel.dk/data/git/linux-2.6-block:
  cfq: Write-only stuff in CFQ data structures
  cfq: async queue allocation per priority
parents 5c72fc5c 8350163a
...@@ -92,7 +92,11 @@ struct cfq_data { ...@@ -92,7 +92,11 @@ struct cfq_data {
struct cfq_queue *active_queue; struct cfq_queue *active_queue;
struct cfq_io_context *active_cic; struct cfq_io_context *active_cic;
struct cfq_queue *async_cfqq[IOPRIO_BE_NR]; /*
* async queue for each priority case
*/
struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
struct cfq_queue *async_idle_cfqq;
struct timer_list idle_class_timer; struct timer_list idle_class_timer;
...@@ -111,9 +115,6 @@ struct cfq_data { ...@@ -111,9 +115,6 @@ struct cfq_data {
unsigned int cfq_slice_idle; unsigned int cfq_slice_idle;
struct list_head cic_list; struct list_head cic_list;
sector_t new_seek_mean;
u64 new_seek_total;
}; };
/* /*
...@@ -153,8 +154,6 @@ struct cfq_queue { ...@@ -153,8 +154,6 @@ struct cfq_queue {
/* various state flags, see below */ /* various state flags, see below */
unsigned int flags; unsigned int flags;
sector_t last_request_pos;
}; };
enum cfqq_state_flags { enum cfqq_state_flags {
...@@ -1414,24 +1413,44 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, ...@@ -1414,24 +1413,44 @@ cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
return cfqq; return cfqq;
} }
static struct cfq_queue **
cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
{
switch(ioprio_class) {
case IOPRIO_CLASS_RT:
return &cfqd->async_cfqq[0][ioprio];
case IOPRIO_CLASS_BE:
return &cfqd->async_cfqq[1][ioprio];
case IOPRIO_CLASS_IDLE:
return &cfqd->async_idle_cfqq;
default:
BUG();
}
}
static struct cfq_queue * static struct cfq_queue *
cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
const int ioprio = task_ioprio(tsk); const int ioprio = task_ioprio(tsk);
const int ioprio_class = task_ioprio_class(tsk);
struct cfq_queue **async_cfqq = NULL;
struct cfq_queue *cfqq = NULL; struct cfq_queue *cfqq = NULL;
if (!is_sync) if (!is_sync) {
cfqq = cfqd->async_cfqq[ioprio]; async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
cfqq = *async_cfqq;
}
if (!cfqq) if (!cfqq)
cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask); cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
/* /*
* pin the queue now that it's allocated, scheduler exit will prune it * pin the queue now that it's allocated, scheduler exit will prune it
*/ */
if (!is_sync && !cfqd->async_cfqq[ioprio]) { if (!is_sync && !(*async_cfqq)) {
atomic_inc(&cfqq->ref); atomic_inc(&cfqq->ref);
cfqd->async_cfqq[ioprio] = cfqq; *async_cfqq = cfqq;
} }
atomic_inc(&cfqq->ref); atomic_inc(&cfqq->ref);
...@@ -1597,11 +1616,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic, ...@@ -1597,11 +1616,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
else else
sdist = cic->last_request_pos - rq->sector; sdist = cic->last_request_pos - rq->sector;
if (!cic->seek_samples) {
cfqd->new_seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
cfqd->new_seek_mean = cfqd->new_seek_total / 256;
}
/* /*
* Don't allow the seek distance to get too large from the * Don't allow the seek distance to get too large from the
* odd fragment, pagein, etc * odd fragment, pagein, etc
...@@ -1737,7 +1751,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -1737,7 +1751,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_update_idle_window(cfqd, cfqq, cic); cfq_update_idle_window(cfqd, cfqq, cic);
cic->last_request_pos = rq->sector + rq->nr_sectors; cic->last_request_pos = rq->sector + rq->nr_sectors;
cfqq->last_request_pos = cic->last_request_pos;
if (cfqq == cfqd->active_queue) { if (cfqq == cfqd->active_queue) {
/* /*
...@@ -2042,11 +2055,24 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) ...@@ -2042,11 +2055,24 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
blk_sync_queue(cfqd->queue); blk_sync_queue(cfqd->queue);
} }
static void cfq_put_async_queues(struct cfq_data *cfqd)
{
int i;
for (i = 0; i < IOPRIO_BE_NR; i++) {
if (cfqd->async_cfqq[0][i])
cfq_put_queue(cfqd->async_cfqq[0][i]);
if (cfqd->async_cfqq[1][i])
cfq_put_queue(cfqd->async_cfqq[1][i]);
if (cfqd->async_idle_cfqq)
cfq_put_queue(cfqd->async_idle_cfqq);
}
}
static void cfq_exit_queue(elevator_t *e) static void cfq_exit_queue(elevator_t *e)
{ {
struct cfq_data *cfqd = e->elevator_data; struct cfq_data *cfqd = e->elevator_data;
request_queue_t *q = cfqd->queue; request_queue_t *q = cfqd->queue;
int i;
cfq_shutdown_timer_wq(cfqd); cfq_shutdown_timer_wq(cfqd);
...@@ -2063,12 +2089,7 @@ static void cfq_exit_queue(elevator_t *e) ...@@ -2063,12 +2089,7 @@ static void cfq_exit_queue(elevator_t *e)
__cfq_exit_single_io_context(cfqd, cic); __cfq_exit_single_io_context(cfqd, cic);
} }
/* cfq_put_async_queues(cfqd);
* Put the async queues
*/
for (i = 0; i < IOPRIO_BE_NR; i++)
if (cfqd->async_cfqq[i])
cfq_put_queue(cfqd->async_cfqq[i]);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
......
...@@ -53,6 +53,14 @@ static inline int task_ioprio(struct task_struct *task) ...@@ -53,6 +53,14 @@ static inline int task_ioprio(struct task_struct *task)
return IOPRIO_NORM; return IOPRIO_NORM;
} }
static inline int task_ioprio_class(struct task_struct *task)
{
if (ioprio_valid(task->ioprio))
return IOPRIO_PRIO_CLASS(task->ioprio);
return IOPRIO_CLASS_BE;
}
static inline int task_nice_ioprio(struct task_struct *task) static inline int task_nice_ioprio(struct task_struct *task)
{ {
return (task_nice(task) + 20) / 5; return (task_nice(task) + 20) / 5;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment