Commit 65b32a57 authored by Vivek Goyal's avatar Vivek Goyal Committed by Jens Axboe

cfq-iosched: Remove prio_change logic for workload selection

o CFQ now internally divides cfq queues in therr workload categories. sync-idle,
  sync-noidle and async. Which workload to run depends primarily on rb_key
  offset across three service trees. Which is a combination of mulitiple things
  including what time queue got queued on the service tree.

  There is one exception though. That is if we switched the prio class, say
  we served some RT tasks and again started serving BE class, then with-in
  BE class we always started with sync-noidle workload irrespective of rb_key
  offset in service trees.

  This can provide better latencies for sync-noidle workload in the presence
  of RT tasks.

o This patch gets rid of that exception and which workload to run with-in
  class always depends on lowest rb_key across service trees. The reason
  being that now we have multiple BE class groups and if we always switch
  to sync-noidle workload with-in group, we can potentially starve a sync-idle
  workload with-in group. Same is true for async workload which will be in
  root group. Also the workload-switching with-in group will become very
  unpredictable as it now depends whether some RT workload was running in
  the system or not.
Signed-off-by: default avatarVivek Goyal <vgoyal@redhat.com>
Reviewed-by: default avatarGui Jianfeng <guijianfeng@cn.fujitsu.com>
Acked-by: default avatarCorrado Zoccolo <czoccolo@gmail.com>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent fb104db4
...@@ -292,8 +292,7 @@ static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); ...@@ -292,8 +292,7 @@ static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg, static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
enum wl_prio_t prio, enum wl_prio_t prio,
enum wl_type_t type, enum wl_type_t type)
struct cfq_data *cfqd)
{ {
if (!cfqg) if (!cfqg)
return NULL; return NULL;
...@@ -1146,7 +1145,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, ...@@ -1146,7 +1145,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
#endif #endif
service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq), service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
cfqq_type(cfqq), cfqd); cfqq_type(cfqq));
if (cfq_class_idle(cfqq)) { if (cfq_class_idle(cfqq)) {
rb_key = CFQ_IDLE_DELAY; rb_key = CFQ_IDLE_DELAY;
parent = rb_last(&service_tree->rb); parent = rb_last(&service_tree->rb);
...@@ -1609,7 +1608,7 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) ...@@ -1609,7 +1608,7 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
{ {
struct cfq_rb_root *service_tree = struct cfq_rb_root *service_tree =
service_tree_for(cfqd->serving_group, cfqd->serving_prio, service_tree_for(cfqd->serving_group, cfqd->serving_prio,
cfqd->serving_type, cfqd); cfqd->serving_type);
if (!cfqd->rq_queued) if (!cfqd->rq_queued)
return NULL; return NULL;
...@@ -1956,8 +1955,7 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) ...@@ -1956,8 +1955,7 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
} }
static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
struct cfq_group *cfqg, enum wl_prio_t prio, struct cfq_group *cfqg, enum wl_prio_t prio)
bool prio_changed)
{ {
struct cfq_queue *queue; struct cfq_queue *queue;
int i; int i;
...@@ -1965,24 +1963,9 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, ...@@ -1965,24 +1963,9 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
unsigned long lowest_key = 0; unsigned long lowest_key = 0;
enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD; enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
if (prio_changed) { for (i = 0; i <= SYNC_WORKLOAD; ++i) {
/* /* select the one with lowest rb_key */
* When priorities switched, we prefer starting queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
* from SYNC_NOIDLE (first choice), or just SYNC
* over ASYNC
*/
if (service_tree_for(cfqg, prio, cur_best, cfqd)->count)
return cur_best;
cur_best = SYNC_WORKLOAD;
if (service_tree_for(cfqg, prio, cur_best, cfqd)->count)
return cur_best;
return ASYNC_WORKLOAD;
}
for (i = 0; i < 3; ++i) {
/* otherwise, select the one with lowest rb_key */
queue = cfq_rb_first(service_tree_for(cfqg, prio, i, cfqd));
if (queue && if (queue &&
(!key_valid || time_before(queue->rb_key, lowest_key))) { (!key_valid || time_before(queue->rb_key, lowest_key))) {
lowest_key = queue->rb_key; lowest_key = queue->rb_key;
...@@ -1996,8 +1979,6 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd, ...@@ -1996,8 +1979,6 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
{ {
enum wl_prio_t previous_prio = cfqd->serving_prio;
bool prio_changed;
unsigned slice; unsigned slice;
unsigned count; unsigned count;
struct cfq_rb_root *st; struct cfq_rb_root *st;
...@@ -2025,24 +2006,19 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg) ...@@ -2025,24 +2006,19 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
* (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
* expiration time * expiration time
*/ */
prio_changed = (cfqd->serving_prio != previous_prio); st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type,
cfqd);
count = st->count; count = st->count;
/* /*
* If priority didn't change, check workload expiration, * check workload expiration, and that we still have other queues ready
* and that we still have other queues ready
*/ */
if (!prio_changed && count && if (count && !time_after(jiffies, cfqd->workload_expires))
!time_after(jiffies, cfqd->workload_expires))
return; return;
/* otherwise select new workload type */ /* otherwise select new workload type */
cfqd->serving_type = cfqd->serving_type =
cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio, prio_changed); cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type, st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
cfqd);
count = st->count; count = st->count;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment