Commit f707e40d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'sched-urgent-2023-10-08' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull misc scheduler fixes from Ingo Molnar:

 - Two EEVDF fixes: one to fix sysctl_sched_base_slice propagation, and
   to fix an avg_vruntime() corner-case.

 - A cpufreq frequency scaling fix

* tag 'sched-urgent-2023-10-08' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  cpufreq: schedutil: Update next_freq when cpufreq_limits change
  sched/eevdf: Fix avg_vruntime()
  sched/eevdf: Also update slice on placement
parents 7e20d344 9e0bc36a
...@@ -350,7 +350,8 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time, ...@@ -350,7 +350,8 @@ static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
* Except when the rq is capped by uclamp_max. * Except when the rq is capped by uclamp_max.
*/ */
if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) && if (!uclamp_rq_is_capped(cpu_rq(sg_cpu->cpu)) &&
sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) { sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq &&
!sg_policy->need_freq_update) {
next_f = sg_policy->next_freq; next_f = sg_policy->next_freq;
/* Restore cached freq as next_freq has changed */ /* Restore cached freq as next_freq has changed */
......
...@@ -664,6 +664,10 @@ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta) ...@@ -664,6 +664,10 @@ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
cfs_rq->avg_vruntime -= cfs_rq->avg_load * delta; cfs_rq->avg_vruntime -= cfs_rq->avg_load * delta;
} }
/*
* Specifically: avg_runtime() + 0 must result in entity_eligible() := true
* For this to be so, the result of this function must have a left bias.
*/
u64 avg_vruntime(struct cfs_rq *cfs_rq) u64 avg_vruntime(struct cfs_rq *cfs_rq)
{ {
struct sched_entity *curr = cfs_rq->curr; struct sched_entity *curr = cfs_rq->curr;
...@@ -677,8 +681,12 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq) ...@@ -677,8 +681,12 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
load += weight; load += weight;
} }
if (load) if (load) {
/* sign flips effective floor / ceil */
if (avg < 0)
avg -= (load - 1);
avg = div_s64(avg, load); avg = div_s64(avg, load);
}
return cfs_rq->min_vruntime + avg; return cfs_rq->min_vruntime + avg;
} }
...@@ -4919,10 +4927,12 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {} ...@@ -4919,10 +4927,12 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
static void static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{ {
u64 vslice = calc_delta_fair(se->slice, se); u64 vslice, vruntime = avg_vruntime(cfs_rq);
u64 vruntime = avg_vruntime(cfs_rq);
s64 lag = 0; s64 lag = 0;
se->slice = sysctl_sched_base_slice;
vslice = calc_delta_fair(se->slice, se);
/* /*
* Due to how V is constructed as the weighted average of entities, * Due to how V is constructed as the weighted average of entities,
* adding tasks with positive lag, or removing tasks with negative lag * adding tasks with positive lag, or removing tasks with negative lag
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment