Commit 95158a89 authored by Peter Zijlstra's avatar Peter Zijlstra

sched,rt: Use the full cpumask for balancing

We want migrate_disable() tasks to get PULLs in order for them to PUSH
away the higher priority task.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Reviewed-by: default avatarDaniel Bristot de Oliveira <bristot@redhat.com>
Link: https://lkml.kernel.org/r/20201023102347.310519774@infradead.org
parent 14e292f8
...@@ -120,7 +120,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, ...@@ -120,7 +120,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
const struct sched_dl_entity *dl_se = &p->dl; const struct sched_dl_entity *dl_se = &p->dl;
if (later_mask && if (later_mask &&
cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr)) { cpumask_and(later_mask, cp->free_cpus, &p->cpus_mask)) {
unsigned long cap, max_cap = 0; unsigned long cap, max_cap = 0;
int cpu, max_cpu = -1; int cpu, max_cpu = -1;
...@@ -151,7 +151,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, ...@@ -151,7 +151,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
WARN_ON(best_cpu != -1 && !cpu_present(best_cpu)); WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
if (cpumask_test_cpu(best_cpu, p->cpus_ptr) && if (cpumask_test_cpu(best_cpu, &p->cpus_mask) &&
dl_time_before(dl_se->deadline, cp->elements[0].dl)) { dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
if (later_mask) if (later_mask)
cpumask_set_cpu(best_cpu, later_mask); cpumask_set_cpu(best_cpu, later_mask);
......
...@@ -73,11 +73,11 @@ static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p, ...@@ -73,11 +73,11 @@ static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
if (skip) if (skip)
return 0; return 0;
if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids) if (cpumask_any_and(&p->cpus_mask, vec->mask) >= nr_cpu_ids)
return 0; return 0;
if (lowest_mask) { if (lowest_mask) {
cpumask_and(lowest_mask, p->cpus_ptr, vec->mask); cpumask_and(lowest_mask, &p->cpus_mask, vec->mask);
/* /*
* We have to ensure that we have at least one bit * We have to ensure that we have at least one bit
......
...@@ -1912,7 +1912,7 @@ static void task_fork_dl(struct task_struct *p) ...@@ -1912,7 +1912,7 @@ static void task_fork_dl(struct task_struct *p)
static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
{ {
if (!task_running(rq, p) && if (!task_running(rq, p) &&
cpumask_test_cpu(cpu, p->cpus_ptr)) cpumask_test_cpu(cpu, &p->cpus_mask))
return 1; return 1;
return 0; return 0;
} }
...@@ -2062,7 +2062,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq) ...@@ -2062,7 +2062,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
/* Retry if something changed. */ /* Retry if something changed. */
if (double_lock_balance(rq, later_rq)) { if (double_lock_balance(rq, later_rq)) {
if (unlikely(task_rq(task) != rq || if (unlikely(task_rq(task) != rq ||
!cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) || !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
task_running(rq, task) || task_running(rq, task) ||
!dl_task(task) || !dl_task(task) ||
!task_on_rq_queued(task))) { !task_on_rq_queued(task))) {
......
...@@ -1658,7 +1658,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) ...@@ -1658,7 +1658,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{ {
if (!task_running(rq, p) && if (!task_running(rq, p) &&
cpumask_test_cpu(cpu, p->cpus_ptr)) cpumask_test_cpu(cpu, &p->cpus_mask))
return 1; return 1;
return 0; return 0;
...@@ -1811,7 +1811,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) ...@@ -1811,7 +1811,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
* Also make sure that it wasn't scheduled on its rq. * Also make sure that it wasn't scheduled on its rq.
*/ */
if (unlikely(task_rq(task) != rq || if (unlikely(task_rq(task) != rq ||
!cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) || !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
task_running(rq, task) || task_running(rq, task) ||
!rt_task(task) || !rt_task(task) ||
!task_on_rq_queued(task))) { !task_on_rq_queued(task))) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment