Commit d07f09a1 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched/fair: Propagate enqueue flags into place_entity()

This allows place_entity() to consider ENQUEUE_WAKEUP and
ENQUEUE_MIGRATED.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20230531124604.274010996@infradead.org
parent e4ec3318
...@@ -4909,7 +4909,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {} ...@@ -4909,7 +4909,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq) {}
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
static void static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{ {
u64 vslice = calc_delta_fair(se->slice, se); u64 vslice = calc_delta_fair(se->slice, se);
u64 vruntime = avg_vruntime(cfs_rq); u64 vruntime = avg_vruntime(cfs_rq);
...@@ -4998,7 +4998,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) ...@@ -4998,7 +4998,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
* on average, halfway through their slice, as such start tasks * on average, halfway through their slice, as such start tasks
* off with half a slice to ease into the competition. * off with half a slice to ease into the competition.
*/ */
if (sched_feat(PLACE_DEADLINE_INITIAL) && initial) if (sched_feat(PLACE_DEADLINE_INITIAL) && (flags & ENQUEUE_INITIAL))
vslice /= 2; vslice /= 2;
/* /*
...@@ -5022,7 +5022,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) ...@@ -5022,7 +5022,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* update_curr(). * update_curr().
*/ */
if (curr) if (curr)
place_entity(cfs_rq, se, 0); place_entity(cfs_rq, se, flags);
update_curr(cfs_rq); update_curr(cfs_rq);
...@@ -5049,7 +5049,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) ...@@ -5049,7 +5049,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* we can place the entity. * we can place the entity.
*/ */
if (!curr) if (!curr)
place_entity(cfs_rq, se, 0); place_entity(cfs_rq, se, flags);
account_entity_enqueue(cfs_rq, se); account_entity_enqueue(cfs_rq, se);
...@@ -12280,7 +12280,7 @@ static void task_fork_fair(struct task_struct *p) ...@@ -12280,7 +12280,7 @@ static void task_fork_fair(struct task_struct *p)
curr = cfs_rq->curr; curr = cfs_rq->curr;
if (curr) if (curr)
update_curr(cfs_rq); update_curr(cfs_rq);
place_entity(cfs_rq, se, 1); place_entity(cfs_rq, se, ENQUEUE_INITIAL);
rq_unlock(rq, &rf); rq_unlock(rq, &rf);
} }
......
...@@ -2199,6 +2199,7 @@ extern const u32 sched_prio_to_wmult[40]; ...@@ -2199,6 +2199,7 @@ extern const u32 sched_prio_to_wmult[40];
#else #else
#define ENQUEUE_MIGRATED 0x00 #define ENQUEUE_MIGRATED 0x00
#endif #endif
#define ENQUEUE_INITIAL 0x80
#define RETRY_TASK ((void *)-1UL) #define RETRY_TASK ((void *)-1UL)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment