Commit 3a5c359a authored by Andi Kleen's avatar Andi Kleen Committed by Ingo Molnar

sched: cleanup: remove unnecessary gotos

Replace loops implemented with gotos with real loops.
Replace err = ...; goto x; x: return err; with return ...;

No functional changes.
Signed-off-by: default avatarAndi Kleen <ak@suse.de>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent d274a4ce
...@@ -562,16 +562,13 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) ...@@ -562,16 +562,13 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
static inline struct rq *__task_rq_lock(struct task_struct *p) static inline struct rq *__task_rq_lock(struct task_struct *p)
__acquires(rq->lock) __acquires(rq->lock)
{ {
struct rq *rq; for (;;) {
struct rq *rq = task_rq(p);
repeat_lock_task: spin_lock(&rq->lock);
rq = task_rq(p); if (likely(rq == task_rq(p)))
spin_lock(&rq->lock); return rq;
if (unlikely(rq != task_rq(p))) {
spin_unlock(&rq->lock); spin_unlock(&rq->lock);
goto repeat_lock_task;
} }
return rq;
} }
/* /*
...@@ -584,15 +581,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) ...@@ -584,15 +581,14 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
{ {
struct rq *rq; struct rq *rq;
repeat_lock_task: for (;;) {
local_irq_save(*flags); local_irq_save(*flags);
rq = task_rq(p); rq = task_rq(p);
spin_lock(&rq->lock); spin_lock(&rq->lock);
if (unlikely(rq != task_rq(p))) { if (likely(rq == task_rq(p)))
return rq;
spin_unlock_irqrestore(&rq->lock, *flags); spin_unlock_irqrestore(&rq->lock, *flags);
goto repeat_lock_task;
} }
return rq;
} }
static void __task_rq_unlock(struct rq *rq) static void __task_rq_unlock(struct rq *rq)
...@@ -1083,69 +1079,71 @@ void wait_task_inactive(struct task_struct *p) ...@@ -1083,69 +1079,71 @@ void wait_task_inactive(struct task_struct *p)
int running, on_rq; int running, on_rq;
struct rq *rq; struct rq *rq;
repeat: for (;;) {
/* /*
* We do the initial early heuristics without holding * We do the initial early heuristics without holding
* any task-queue locks at all. We'll only try to get * any task-queue locks at all. We'll only try to get
* the runqueue lock when things look like they will * the runqueue lock when things look like they will
* work out! * work out!
*/ */
rq = task_rq(p); rq = task_rq(p);
/* /*
* If the task is actively running on another CPU * If the task is actively running on another CPU
* still, just relax and busy-wait without holding * still, just relax and busy-wait without holding
* any locks. * any locks.
* *
* NOTE! Since we don't hold any locks, it's not * NOTE! Since we don't hold any locks, it's not
* even sure that "rq" stays as the right runqueue! * even sure that "rq" stays as the right runqueue!
* But we don't care, since "task_running()" will * But we don't care, since "task_running()" will
* return false if the runqueue has changed and p * return false if the runqueue has changed and p
* is actually now running somewhere else! * is actually now running somewhere else!
*/ */
while (task_running(rq, p)) while (task_running(rq, p))
cpu_relax(); cpu_relax();
/* /*
* Ok, time to look more closely! We need the rq * Ok, time to look more closely! We need the rq
* lock now, to be *sure*. If we're wrong, we'll * lock now, to be *sure*. If we're wrong, we'll
* just go back and repeat. * just go back and repeat.
*/ */
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
running = task_running(rq, p); running = task_running(rq, p);
on_rq = p->se.on_rq; on_rq = p->se.on_rq;
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
/* /*
* Was it really running after all now that we * Was it really running after all now that we
* checked with the proper locks actually held? * checked with the proper locks actually held?
* *
* Oops. Go back and try again.. * Oops. Go back and try again..
*/ */
if (unlikely(running)) { if (unlikely(running)) {
cpu_relax(); cpu_relax();
goto repeat; continue;
} }
/* /*
* It's not enough that it's not actively running, * It's not enough that it's not actively running,
* it must be off the runqueue _entirely_, and not * it must be off the runqueue _entirely_, and not
* preempted! * preempted!
* *
* So if it wa still runnable (but just not actively * So if it wa still runnable (but just not actively
* running right now), it's preempted, and we should * running right now), it's preempted, and we should
* yield - it could be a while. * yield - it could be a while.
*/ */
if (unlikely(on_rq)) { if (unlikely(on_rq)) {
schedule_timeout_uninterruptible(1); schedule_timeout_uninterruptible(1);
goto repeat; continue;
} }
/* /*
* Ahh, all good. It wasn't running, and it wasn't * Ahh, all good. It wasn't running, and it wasn't
* runnable, which means that it will never become * runnable, which means that it will never become
* running in the future either. We're all done! * running in the future either. We're all done!
*/ */
break;
}
} }
/*** /***
...@@ -1236,7 +1234,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) ...@@ -1236,7 +1234,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
/* Skip over this group if it has no CPUs allowed */ /* Skip over this group if it has no CPUs allowed */
if (!cpus_intersects(group->cpumask, p->cpus_allowed)) if (!cpus_intersects(group->cpumask, p->cpus_allowed))
goto nextgroup; continue;
local_group = cpu_isset(this_cpu, group->cpumask); local_group = cpu_isset(this_cpu, group->cpumask);
...@@ -1264,9 +1262,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) ...@@ -1264,9 +1262,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
min_load = avg_load; min_load = avg_load;
idlest = group; idlest = group;
} }
nextgroup: } while (group = group->next, group != sd->groups);
group = group->next;
} while (group != sd->groups);
if (!idlest || 100*this_load < imbalance*min_load) if (!idlest || 100*this_load < imbalance*min_load)
return NULL; return NULL;
...@@ -3517,27 +3513,30 @@ asmlinkage void __sched preempt_schedule(void) ...@@ -3517,27 +3513,30 @@ asmlinkage void __sched preempt_schedule(void)
if (likely(ti->preempt_count || irqs_disabled())) if (likely(ti->preempt_count || irqs_disabled()))
return; return;
need_resched: do {
add_preempt_count(PREEMPT_ACTIVE); add_preempt_count(PREEMPT_ACTIVE);
/*
* We keep the big kernel semaphore locked, but we /*
* clear ->lock_depth so that schedule() doesnt * We keep the big kernel semaphore locked, but we
* auto-release the semaphore: * clear ->lock_depth so that schedule() doesnt
*/ * auto-release the semaphore:
*/
#ifdef CONFIG_PREEMPT_BKL #ifdef CONFIG_PREEMPT_BKL
saved_lock_depth = task->lock_depth; saved_lock_depth = task->lock_depth;
task->lock_depth = -1; task->lock_depth = -1;
#endif #endif
schedule(); schedule();
#ifdef CONFIG_PREEMPT_BKL #ifdef CONFIG_PREEMPT_BKL
task->lock_depth = saved_lock_depth; task->lock_depth = saved_lock_depth;
#endif #endif
sub_preempt_count(PREEMPT_ACTIVE); sub_preempt_count(PREEMPT_ACTIVE);
/* we could miss a preemption opportunity between schedule and now */ /*
barrier(); * Check again in case we missed a preemption opportunity
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) * between schedule and now.
goto need_resched; */
barrier();
} while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
} }
EXPORT_SYMBOL(preempt_schedule); EXPORT_SYMBOL(preempt_schedule);
...@@ -3557,29 +3556,32 @@ asmlinkage void __sched preempt_schedule_irq(void) ...@@ -3557,29 +3556,32 @@ asmlinkage void __sched preempt_schedule_irq(void)
/* Catch callers which need to be fixed */ /* Catch callers which need to be fixed */
BUG_ON(ti->preempt_count || !irqs_disabled()); BUG_ON(ti->preempt_count || !irqs_disabled());
need_resched: do {
add_preempt_count(PREEMPT_ACTIVE); add_preempt_count(PREEMPT_ACTIVE);
/*
* We keep the big kernel semaphore locked, but we /*
* clear ->lock_depth so that schedule() doesnt * We keep the big kernel semaphore locked, but we
* auto-release the semaphore: * clear ->lock_depth so that schedule() doesnt
*/ * auto-release the semaphore:
*/
#ifdef CONFIG_PREEMPT_BKL #ifdef CONFIG_PREEMPT_BKL
saved_lock_depth = task->lock_depth; saved_lock_depth = task->lock_depth;
task->lock_depth = -1; task->lock_depth = -1;
#endif #endif
local_irq_enable(); local_irq_enable();
schedule(); schedule();
local_irq_disable(); local_irq_disable();
#ifdef CONFIG_PREEMPT_BKL #ifdef CONFIG_PREEMPT_BKL
task->lock_depth = saved_lock_depth; task->lock_depth = saved_lock_depth;
#endif #endif
sub_preempt_count(PREEMPT_ACTIVE); sub_preempt_count(PREEMPT_ACTIVE);
/* we could miss a preemption opportunity between schedule and now */ /*
barrier(); * Check again in case we missed a preemption opportunity
if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) * between schedule and now.
goto need_resched; */
barrier();
} while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
} }
#endif /* CONFIG_PREEMPT */ #endif /* CONFIG_PREEMPT */
...@@ -4324,10 +4326,10 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) ...@@ -4324,10 +4326,10 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
asmlinkage long sys_sched_getscheduler(pid_t pid) asmlinkage long sys_sched_getscheduler(pid_t pid)
{ {
struct task_struct *p; struct task_struct *p;
int retval = -EINVAL; int retval;
if (pid < 0) if (pid < 0)
goto out_nounlock; return -EINVAL;
retval = -ESRCH; retval = -ESRCH;
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
...@@ -4338,8 +4340,6 @@ asmlinkage long sys_sched_getscheduler(pid_t pid) ...@@ -4338,8 +4340,6 @@ asmlinkage long sys_sched_getscheduler(pid_t pid)
retval = p->policy; retval = p->policy;
} }
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
out_nounlock:
return retval; return retval;
} }
...@@ -4352,10 +4352,10 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) ...@@ -4352,10 +4352,10 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
{ {
struct sched_param lp; struct sched_param lp;
struct task_struct *p; struct task_struct *p;
int retval = -EINVAL; int retval;
if (!param || pid < 0) if (!param || pid < 0)
goto out_nounlock; return -EINVAL;
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
p = find_process_by_pid(pid); p = find_process_by_pid(pid);
...@@ -4375,7 +4375,6 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) ...@@ -4375,7 +4375,6 @@ asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
*/ */
retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
out_nounlock:
return retval; return retval;
out_unlock: out_unlock:
...@@ -4731,11 +4730,11 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) ...@@ -4731,11 +4730,11 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
{ {
struct task_struct *p; struct task_struct *p;
unsigned int time_slice; unsigned int time_slice;
int retval = -EINVAL; int retval;
struct timespec t; struct timespec t;
if (pid < 0) if (pid < 0)
goto out_nounlock; return -EINVAL;
retval = -ESRCH; retval = -ESRCH;
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
...@@ -4763,8 +4762,8 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) ...@@ -4763,8 +4762,8 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
jiffies_to_timespec(time_slice, &t); jiffies_to_timespec(time_slice, &t);
retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
out_nounlock:
return retval; return retval;
out_unlock: out_unlock:
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
return retval; return retval;
...@@ -5070,35 +5069,34 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) ...@@ -5070,35 +5069,34 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
struct rq *rq; struct rq *rq;
int dest_cpu; int dest_cpu;
restart: do {
/* On same node? */ /* On same node? */
mask = node_to_cpumask(cpu_to_node(dead_cpu)); mask = node_to_cpumask(cpu_to_node(dead_cpu));
cpus_and(mask, mask, p->cpus_allowed); cpus_and(mask, mask, p->cpus_allowed);
dest_cpu = any_online_cpu(mask); dest_cpu = any_online_cpu(mask);
/* On any allowed CPU? */ /* On any allowed CPU? */
if (dest_cpu == NR_CPUS) if (dest_cpu == NR_CPUS)
dest_cpu = any_online_cpu(p->cpus_allowed); dest_cpu = any_online_cpu(p->cpus_allowed);
/* No more Mr. Nice Guy. */ /* No more Mr. Nice Guy. */
if (dest_cpu == NR_CPUS) { if (dest_cpu == NR_CPUS) {
rq = task_rq_lock(p, &flags); rq = task_rq_lock(p, &flags);
cpus_setall(p->cpus_allowed); cpus_setall(p->cpus_allowed);
dest_cpu = any_online_cpu(p->cpus_allowed); dest_cpu = any_online_cpu(p->cpus_allowed);
task_rq_unlock(rq, &flags); task_rq_unlock(rq, &flags);
/* /*
* Don't tell them about moving exiting tasks or * Don't tell them about moving exiting tasks or
* kernel threads (both mm NULL), since they never * kernel threads (both mm NULL), since they never
* leave kernel. * leave kernel.
*/ */
if (p->mm && printk_ratelimit()) if (p->mm && printk_ratelimit())
printk(KERN_INFO "process %d (%s) no " printk(KERN_INFO "process %d (%s) no "
"longer affine to cpu%d\n", "longer affine to cpu%d\n",
p->pid, p->comm, dead_cpu); p->pid, p->comm, dead_cpu);
} }
if (!__migrate_task(p, dead_cpu, dest_cpu)) } while (!__migrate_task(p, dead_cpu, dest_cpu));
goto restart;
} }
/* /*
...@@ -5913,24 +5911,23 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) ...@@ -5913,24 +5911,23 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
if (!sg) if (!sg)
return; return;
next_sg: do {
for_each_cpu_mask(j, sg->cpumask) { for_each_cpu_mask(j, sg->cpumask) {
struct sched_domain *sd; struct sched_domain *sd;
sd = &per_cpu(phys_domains, j); sd = &per_cpu(phys_domains, j);
if (j != first_cpu(sd->groups->cpumask)) { if (j != first_cpu(sd->groups->cpumask)) {
/* /*
* Only add "power" once for each * Only add "power" once for each
* physical package. * physical package.
*/ */
continue; continue;
} }
sg_inc_cpu_power(sg, sd->groups->__cpu_power); sg_inc_cpu_power(sg, sd->groups->__cpu_power);
} }
sg = sg->next; sg = sg->next;
if (sg != group_head) } while (sg != group_head);
goto next_sg;
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment