Commit 60ffd5ed authored by Luca Abeni's avatar Luca Abeni Committed by Peter Zijlstra

sched/deadline: Improve admission control for asymmetric CPU capacities

The current SCHED_DEADLINE (DL) admission control ensures that

    sum of reserved CPU bandwidth < x * M

where

    x = /proc/sys/kernel/sched_rt_{runtime,period}_us
    M = # CPUs in root domain.

DL admission control works well for homogeneous systems where the
capacity of all CPUs are equal (1024). I.e. bounded tardiness for DL
and non-starvation of non-DL tasks is guaranteed.

But on heterogeneous systems where capacity of CPUs are different it
could fail by over-allocating CPU time on smaller capacity CPUs.

On an Arm big.LITTLE/DynamIQ system DL tasks can easily starve other
tasks making it unusable.

Fix this by explicitly considering the CPU capacity in the DL admission
test by replacing M with the root domain CPU capacity sum.
Signed-off-by: default avatarLuca Abeni <luca.abeni@santannapisa.it>
Signed-off-by: default avatarDietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarJuri Lelli <juri.lelli@redhat.com>
Link: https://lkml.kernel.org/r/20200520134243.19352-4-dietmar.eggemann@arm.com
parent fc9dc698
...@@ -2590,11 +2590,12 @@ void sched_dl_do_global(void) ...@@ -2590,11 +2590,12 @@ void sched_dl_do_global(void)
int sched_dl_overflow(struct task_struct *p, int policy, int sched_dl_overflow(struct task_struct *p, int policy,
const struct sched_attr *attr) const struct sched_attr *attr)
{ {
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
u64 period = attr->sched_period ?: attr->sched_deadline; u64 period = attr->sched_period ?: attr->sched_deadline;
u64 runtime = attr->sched_runtime; u64 runtime = attr->sched_runtime;
u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
int cpus, err = -1; int cpus, err = -1, cpu = task_cpu(p);
struct dl_bw *dl_b = dl_bw_of(cpu);
unsigned long cap;
if (attr->sched_flags & SCHED_FLAG_SUGOV) if (attr->sched_flags & SCHED_FLAG_SUGOV)
return 0; return 0;
...@@ -2609,15 +2610,17 @@ int sched_dl_overflow(struct task_struct *p, int policy, ...@@ -2609,15 +2610,17 @@ int sched_dl_overflow(struct task_struct *p, int policy,
* allocated bandwidth of the container. * allocated bandwidth of the container.
*/ */
raw_spin_lock(&dl_b->lock); raw_spin_lock(&dl_b->lock);
cpus = dl_bw_cpus(task_cpu(p)); cpus = dl_bw_cpus(cpu);
cap = dl_bw_capacity(cpu);
if (dl_policy(policy) && !task_has_dl_policy(p) && if (dl_policy(policy) && !task_has_dl_policy(p) &&
!__dl_overflow(dl_b, cpus, 0, new_bw)) { !__dl_overflow(dl_b, cap, 0, new_bw)) {
if (hrtimer_active(&p->dl.inactive_timer)) if (hrtimer_active(&p->dl.inactive_timer))
__dl_sub(dl_b, p->dl.dl_bw, cpus); __dl_sub(dl_b, p->dl.dl_bw, cpus);
__dl_add(dl_b, new_bw, cpus); __dl_add(dl_b, new_bw, cpus);
err = 0; err = 0;
} else if (dl_policy(policy) && task_has_dl_policy(p) && } else if (dl_policy(policy) && task_has_dl_policy(p) &&
!__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) { !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
/* /*
* XXX this is slightly incorrect: when the task * XXX this is slightly incorrect: when the task
* utilization decreases, we should delay the total * utilization decreases, we should delay the total
...@@ -2772,19 +2775,19 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr) ...@@ -2772,19 +2775,19 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed) int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
{ {
unsigned long flags, cap;
unsigned int dest_cpu; unsigned int dest_cpu;
struct dl_bw *dl_b; struct dl_bw *dl_b;
bool overflow; bool overflow;
int cpus, ret; int ret;
unsigned long flags;
dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed); dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
rcu_read_lock_sched(); rcu_read_lock_sched();
dl_b = dl_bw_of(dest_cpu); dl_b = dl_bw_of(dest_cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags); raw_spin_lock_irqsave(&dl_b->lock, flags);
cpus = dl_bw_cpus(dest_cpu); cap = dl_bw_capacity(dest_cpu);
overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw); overflow = __dl_overflow(dl_b, cap, 0, p->dl.dl_bw);
if (overflow) { if (overflow) {
ret = -EBUSY; ret = -EBUSY;
} else { } else {
...@@ -2794,6 +2797,8 @@ int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allo ...@@ -2794,6 +2797,8 @@ int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allo
* We will free resources in the source root_domain * We will free resources in the source root_domain
* later on (see set_cpus_allowed_dl()). * later on (see set_cpus_allowed_dl()).
*/ */
int cpus = dl_bw_cpus(dest_cpu);
__dl_add(dl_b, p->dl.dl_bw, cpus); __dl_add(dl_b, p->dl.dl_bw, cpus);
ret = 0; ret = 0;
} }
...@@ -2826,16 +2831,15 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, ...@@ -2826,16 +2831,15 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
bool dl_cpu_busy(unsigned int cpu) bool dl_cpu_busy(unsigned int cpu)
{ {
unsigned long flags; unsigned long flags, cap;
struct dl_bw *dl_b; struct dl_bw *dl_b;
bool overflow; bool overflow;
int cpus;
rcu_read_lock_sched(); rcu_read_lock_sched();
dl_b = dl_bw_of(cpu); dl_b = dl_bw_of(cpu);
raw_spin_lock_irqsave(&dl_b->lock, flags); raw_spin_lock_irqsave(&dl_b->lock, flags);
cpus = dl_bw_cpus(cpu); cap = dl_bw_capacity(cpu);
overflow = __dl_overflow(dl_b, cpus, 0, 0); overflow = __dl_overflow(dl_b, cap, 0, 0);
raw_spin_unlock_irqrestore(&dl_b->lock, flags); raw_spin_unlock_irqrestore(&dl_b->lock, flags);
rcu_read_unlock_sched(); rcu_read_unlock_sched();
......
...@@ -310,11 +310,11 @@ void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus) ...@@ -310,11 +310,11 @@ void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
__dl_update(dl_b, -((s32)tsk_bw / cpus)); __dl_update(dl_b, -((s32)tsk_bw / cpus));
} }
static inline static inline bool __dl_overflow(struct dl_bw *dl_b, unsigned long cap,
bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) u64 old_bw, u64 new_bw)
{ {
return dl_b->bw != -1 && return dl_b->bw != -1 &&
dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;
} }
extern void init_dl_bw(struct dl_bw *dl_b); extern void init_dl_bw(struct dl_bw *dl_b);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment