Commit 2b4cf585 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull scheduler fixes from Ingo Molnar:
 "A deadline scheduler warning/race fix, and a cfs_period_us quota
  calculation workaround where the real fix looks too involved to merge
  immediately"

* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/deadline: Correctly handle active 0-lag timers
  sched/fair: Limit sched_cfs_period_timer() loop to avoid hard lockup
parents de3af9a9 1b02cd6a
...@@ -252,7 +252,6 @@ static void task_non_contending(struct task_struct *p) ...@@ -252,7 +252,6 @@ static void task_non_contending(struct task_struct *p)
if (dl_entity_is_special(dl_se)) if (dl_entity_is_special(dl_se))
return; return;
WARN_ON(hrtimer_active(&dl_se->inactive_timer));
WARN_ON(dl_se->dl_non_contending); WARN_ON(dl_se->dl_non_contending);
zerolag_time = dl_se->deadline - zerolag_time = dl_se->deadline -
...@@ -269,7 +268,7 @@ static void task_non_contending(struct task_struct *p) ...@@ -269,7 +268,7 @@ static void task_non_contending(struct task_struct *p)
* If the "0-lag time" already passed, decrease the active * If the "0-lag time" already passed, decrease the active
* utilization now, instead of starting a timer * utilization now, instead of starting a timer
*/ */
if (zerolag_time < 0) { if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
if (dl_task(p)) if (dl_task(p))
sub_running_bw(dl_se, dl_rq); sub_running_bw(dl_se, dl_rq);
if (!dl_task(p) || p->state == TASK_DEAD) { if (!dl_task(p) || p->state == TASK_DEAD) {
......
...@@ -4885,6 +4885,8 @@ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) ...@@ -4885,6 +4885,8 @@ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
return HRTIMER_NORESTART; return HRTIMER_NORESTART;
} }
extern const u64 max_cfs_quota_period;
static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
{ {
struct cfs_bandwidth *cfs_b = struct cfs_bandwidth *cfs_b =
...@@ -4892,6 +4894,7 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) ...@@ -4892,6 +4894,7 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
unsigned long flags; unsigned long flags;
int overrun; int overrun;
int idle = 0; int idle = 0;
int count = 0;
raw_spin_lock_irqsave(&cfs_b->lock, flags); raw_spin_lock_irqsave(&cfs_b->lock, flags);
for (;;) { for (;;) {
...@@ -4899,6 +4902,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) ...@@ -4899,6 +4902,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
if (!overrun) if (!overrun)
break; break;
if (++count > 3) {
u64 new, old = ktime_to_ns(cfs_b->period);
new = (old * 147) / 128; /* ~115% */
new = min(new, max_cfs_quota_period);
cfs_b->period = ns_to_ktime(new);
/* since max is 1s, this is limited to 1e9^2, which fits in u64 */
cfs_b->quota *= new;
cfs_b->quota = div64_u64(cfs_b->quota, old);
pr_warn_ratelimited(
"cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n",
smp_processor_id(),
div_u64(new, NSEC_PER_USEC),
div_u64(cfs_b->quota, NSEC_PER_USEC));
/* reset count so we don't come right back in here */
count = 0;
}
idle = do_sched_cfs_period_timer(cfs_b, overrun, flags); idle = do_sched_cfs_period_timer(cfs_b, overrun, flags);
} }
if (idle) if (idle)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment