Commit 94b548a1 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: Simplify set_user_nice()

Use guards to reduce gotos and simplify control flow.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 0bb80ecc
...@@ -7187,9 +7187,8 @@ static inline int rt_effective_prio(struct task_struct *p, int prio) ...@@ -7187,9 +7187,8 @@ static inline int rt_effective_prio(struct task_struct *p, int prio)
void set_user_nice(struct task_struct *p, long nice) void set_user_nice(struct task_struct *p, long nice)
{ {
bool queued, running; bool queued, running;
int old_prio;
struct rq_flags rf;
struct rq *rq; struct rq *rq;
int old_prio;
if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
return; return;
...@@ -7197,7 +7196,9 @@ void set_user_nice(struct task_struct *p, long nice) ...@@ -7197,7 +7196,9 @@ void set_user_nice(struct task_struct *p, long nice)
* We have to be careful, if called from sys_setpriority(), * We have to be careful, if called from sys_setpriority(),
* the task might be in the middle of scheduling on another CPU. * the task might be in the middle of scheduling on another CPU.
*/ */
rq = task_rq_lock(p, &rf); CLASS(task_rq_lock, rq_guard)(p);
rq = rq_guard.rq;
update_rq_clock(rq); update_rq_clock(rq);
/* /*
...@@ -7208,8 +7209,9 @@ void set_user_nice(struct task_struct *p, long nice) ...@@ -7208,8 +7209,9 @@ void set_user_nice(struct task_struct *p, long nice)
*/ */
if (task_has_dl_policy(p) || task_has_rt_policy(p)) { if (task_has_dl_policy(p) || task_has_rt_policy(p)) {
p->static_prio = NICE_TO_PRIO(nice); p->static_prio = NICE_TO_PRIO(nice);
goto out_unlock; return;
} }
queued = task_on_rq_queued(p); queued = task_on_rq_queued(p);
running = task_current(rq, p); running = task_current(rq, p);
if (queued) if (queued)
...@@ -7232,9 +7234,6 @@ void set_user_nice(struct task_struct *p, long nice) ...@@ -7232,9 +7234,6 @@ void set_user_nice(struct task_struct *p, long nice)
* lowered its priority, then reschedule its CPU: * lowered its priority, then reschedule its CPU:
*/ */
p->sched_class->prio_changed(rq, p, old_prio); p->sched_class->prio_changed(rq, p, old_prio);
out_unlock:
task_rq_unlock(rq, p, &rf);
} }
EXPORT_SYMBOL(set_user_nice); EXPORT_SYMBOL(set_user_nice);
......
...@@ -1658,6 +1658,11 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) ...@@ -1658,6 +1658,11 @@ task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
} }
DEFINE_LOCK_GUARD_1(task_rq_lock, struct task_struct,
_T->rq = task_rq_lock(_T->lock, &_T->rf),
task_rq_unlock(_T->rq, _T->lock, &_T->rf),
struct rq *rq; struct rq_flags rf)
static inline void static inline void
rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
__acquires(rq->lock) __acquires(rq->lock)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment