Commit db292ca3 authored by Ingo Molnar's avatar Ingo Molnar

sched: default to more agressive yield for SCHED_BATCH tasks

do more agressive yield for SCHED_BATCH tuned tasks: they are all
about throughput anyway. This allows a gentler migration path for
any apps that relied on stronger yield.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 77034937
...@@ -799,8 +799,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) ...@@ -799,8 +799,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
*/ */
static void yield_task_fair(struct rq *rq) static void yield_task_fair(struct rq *rq)
{ {
struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr); struct task_struct *curr = rq->curr;
struct sched_entity *rightmost, *se = &rq->curr->se; struct cfs_rq *cfs_rq = task_cfs_rq(curr);
struct sched_entity *rightmost, *se = &curr->se;
/* /*
* Are we the only task in the tree? * Are we the only task in the tree?
...@@ -808,7 +809,7 @@ static void yield_task_fair(struct rq *rq) ...@@ -808,7 +809,7 @@ static void yield_task_fair(struct rq *rq)
if (unlikely(cfs_rq->nr_running == 1)) if (unlikely(cfs_rq->nr_running == 1))
return; return;
if (likely(!sysctl_sched_compat_yield)) { if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
__update_rq_clock(rq); __update_rq_clock(rq);
/* /*
* Update run-time statistics of the 'current'. * Update run-time statistics of the 'current'.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment