Commit 92c2ec5b authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

sched: Simplify sched_{set,get}affinity()

Use guards to reduce gotos and simplify control flow.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent febe162d
...@@ -8347,39 +8347,24 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) ...@@ -8347,39 +8347,24 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
{ {
struct affinity_context ac; struct affinity_context ac;
struct cpumask *user_mask; struct cpumask *user_mask;
struct task_struct *p;
int retval; int retval;
rcu_read_lock(); CLASS(find_get_task, p)(pid);
if (!p)
p = find_process_by_pid(pid);
if (!p) {
rcu_read_unlock();
return -ESRCH; return -ESRCH;
}
/* Prevent p going away */
get_task_struct(p);
rcu_read_unlock();
if (p->flags & PF_NO_SETAFFINITY) { if (p->flags & PF_NO_SETAFFINITY)
retval = -EINVAL; return -EINVAL;
goto out_put_task;
}
if (!check_same_owner(p)) { if (!check_same_owner(p)) {
rcu_read_lock(); guard(rcu)();
if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE))
rcu_read_unlock(); return -EPERM;
retval = -EPERM;
goto out_put_task;
}
rcu_read_unlock();
} }
retval = security_task_setscheduler(p); retval = security_task_setscheduler(p);
if (retval) if (retval)
goto out_put_task; return retval;
/* /*
* With non-SMP configs, user_cpus_ptr/user_mask isn't used and * With non-SMP configs, user_cpus_ptr/user_mask isn't used and
...@@ -8389,8 +8374,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) ...@@ -8389,8 +8374,7 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
if (user_mask) { if (user_mask) {
cpumask_copy(user_mask, in_mask); cpumask_copy(user_mask, in_mask);
} else if (IS_ENABLED(CONFIG_SMP)) { } else if (IS_ENABLED(CONFIG_SMP)) {
retval = -ENOMEM; return -ENOMEM;
goto out_put_task;
} }
ac = (struct affinity_context){ ac = (struct affinity_context){
...@@ -8402,8 +8386,6 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) ...@@ -8402,8 +8386,6 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
retval = __sched_setaffinity(p, &ac); retval = __sched_setaffinity(p, &ac);
kfree(ac.user_mask); kfree(ac.user_mask);
out_put_task:
put_task_struct(p);
return retval; return retval;
} }
...@@ -8445,28 +8427,21 @@ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, ...@@ -8445,28 +8427,21 @@ SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
long sched_getaffinity(pid_t pid, struct cpumask *mask) long sched_getaffinity(pid_t pid, struct cpumask *mask)
{ {
struct task_struct *p; struct task_struct *p;
unsigned long flags;
int retval; int retval;
rcu_read_lock(); guard(rcu)();
retval = -ESRCH;
p = find_process_by_pid(pid); p = find_process_by_pid(pid);
if (!p) if (!p)
goto out_unlock; return -ESRCH;
retval = security_task_getscheduler(p); retval = security_task_getscheduler(p);
if (retval) if (retval)
goto out_unlock; return retval;
raw_spin_lock_irqsave(&p->pi_lock, flags); guard(raw_spinlock_irqsave)(&p->pi_lock);
cpumask_and(mask, &p->cpus_mask, cpu_active_mask); cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out_unlock: return 0;
rcu_read_unlock();
return retval;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment