Commit 95cdf3b7 authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds

[PATCH] sched cleanups

whitespace cleanups.
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent da5a5522
...@@ -875,7 +875,7 @@ static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req) ...@@ -875,7 +875,7 @@ static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req)
* smp_call_function() if an IPI is sent by the same process we are * smp_call_function() if an IPI is sent by the same process we are
* waiting to become inactive. * waiting to become inactive.
*/ */
void wait_task_inactive(task_t * p) void wait_task_inactive(task_t *p)
{ {
unsigned long flags; unsigned long flags;
runqueue_t *rq; runqueue_t *rq;
...@@ -1007,8 +1007,8 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) ...@@ -1007,8 +1007,8 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
/* /*
* find_idlest_queue - find the idlest runqueue among the cpus in group. * find_idlest_queue - find the idlest runqueue among the cpus in group.
*/ */
static int find_idlest_cpu(struct sched_group *group, static int
struct task_struct *p, int this_cpu) find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
{ {
cpumask_t tmp; cpumask_t tmp;
unsigned long load, min_load = ULONG_MAX; unsigned long load, min_load = ULONG_MAX;
...@@ -1136,7 +1136,7 @@ static inline int wake_idle(int cpu, task_t *p) ...@@ -1136,7 +1136,7 @@ static inline int wake_idle(int cpu, task_t *p)
* *
* returns failure only if the task is already active. * returns failure only if the task is already active.
*/ */
static int try_to_wake_up(task_t * p, unsigned int state, int sync) static int try_to_wake_up(task_t *p, unsigned int state, int sync)
{ {
int cpu, this_cpu, success = 0; int cpu, this_cpu, success = 0;
unsigned long flags; unsigned long flags;
...@@ -1283,7 +1283,7 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync) ...@@ -1283,7 +1283,7 @@ static int try_to_wake_up(task_t * p, unsigned int state, int sync)
return success; return success;
} }
int fastcall wake_up_process(task_t * p) int fastcall wake_up_process(task_t *p)
{ {
return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED | return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0); TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
...@@ -1362,7 +1362,7 @@ void fastcall sched_fork(task_t *p, int clone_flags) ...@@ -1362,7 +1362,7 @@ void fastcall sched_fork(task_t *p, int clone_flags)
* that must be done for every newly created context, then puts the task * that must be done for every newly created context, then puts the task
* on the runqueue and wakes it. * on the runqueue and wakes it.
*/ */
void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags) void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
{ {
unsigned long flags; unsigned long flags;
int this_cpu, cpu; int this_cpu, cpu;
...@@ -1445,7 +1445,7 @@ void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags) ...@@ -1445,7 +1445,7 @@ void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags)
* artificially, because any timeslice recovered here * artificially, because any timeslice recovered here
* was given away by the parent in the first place.) * was given away by the parent in the first place.)
*/ */
void fastcall sched_exit(task_t * p) void fastcall sched_exit(task_t *p)
{ {
unsigned long flags; unsigned long flags;
runqueue_t *rq; runqueue_t *rq;
...@@ -1766,7 +1766,8 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, ...@@ -1766,7 +1766,8 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
*/ */
static inline static inline
int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
struct sched_domain *sd, enum idle_type idle, int *all_pinned) struct sched_domain *sd, enum idle_type idle,
int *all_pinned)
{ {
/* /*
* We do not migrate tasks that are: * We do not migrate tasks that are:
...@@ -3058,7 +3059,8 @@ asmlinkage void __sched preempt_schedule_irq(void) ...@@ -3058,7 +3059,8 @@ asmlinkage void __sched preempt_schedule_irq(void)
#endif /* CONFIG_PREEMPT */ #endif /* CONFIG_PREEMPT */
int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, void *key) int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
void *key)
{ {
task_t *p = curr->private; task_t *p = curr->private;
return try_to_wake_up(p, mode, sync); return try_to_wake_up(p, mode, sync);
...@@ -3132,7 +3134,8 @@ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode) ...@@ -3132,7 +3134,8 @@ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
* *
* On UP it can prevent extra preemption. * On UP it can prevent extra preemption.
*/ */
void fastcall __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) void fastcall
__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
{ {
unsigned long flags; unsigned long flags;
int sync = 1; int sync = 1;
...@@ -3323,7 +3326,8 @@ void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q) ...@@ -3323,7 +3326,8 @@ void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
EXPORT_SYMBOL(interruptible_sleep_on); EXPORT_SYMBOL(interruptible_sleep_on);
long fastcall __sched interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout) long fastcall __sched
interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
{ {
SLEEP_ON_VAR SLEEP_ON_VAR
...@@ -3542,7 +3546,8 @@ static void __setscheduler(struct task_struct *p, int policy, int prio) ...@@ -3542,7 +3546,8 @@ static void __setscheduler(struct task_struct *p, int policy, int prio)
* @policy: new policy. * @policy: new policy.
* @param: structure containing the new RT priority. * @param: structure containing the new RT priority.
*/ */
int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *param) int sched_setscheduler(struct task_struct *p, int policy,
struct sched_param *param)
{ {
int retval; int retval;
int oldprio, oldpolicy = -1; int oldprio, oldpolicy = -1;
...@@ -3625,7 +3630,8 @@ int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *pa ...@@ -3625,7 +3630,8 @@ int sched_setscheduler(struct task_struct *p, int policy, struct sched_param *pa
} }
EXPORT_SYMBOL_GPL(sched_setscheduler); EXPORT_SYMBOL_GPL(sched_setscheduler);
static int do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) static int
do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
{ {
int retval; int retval;
struct sched_param lparam; struct sched_param lparam;
...@@ -3956,7 +3962,7 @@ EXPORT_SYMBOL(cond_resched); ...@@ -3956,7 +3962,7 @@ EXPORT_SYMBOL(cond_resched);
* operations here to prevent schedule() from being called twice (once via * operations here to prevent schedule() from being called twice (once via
* spin_unlock(), once by hand). * spin_unlock(), once by hand).
*/ */
int cond_resched_lock(spinlock_t * lock) int cond_resched_lock(spinlock_t *lock)
{ {
int ret = 0; int ret = 0;
...@@ -4139,7 +4145,7 @@ static inline struct task_struct *younger_sibling(struct task_struct *p) ...@@ -4139,7 +4145,7 @@ static inline struct task_struct *younger_sibling(struct task_struct *p)
return list_entry(p->sibling.next,struct task_struct,sibling); return list_entry(p->sibling.next,struct task_struct,sibling);
} }
static void show_task(task_t * p) static void show_task(task_t *p)
{ {
task_t *relative; task_t *relative;
unsigned state; unsigned state;
...@@ -4165,7 +4171,7 @@ static void show_task(task_t * p) ...@@ -4165,7 +4171,7 @@ static void show_task(task_t * p)
#endif #endif
#ifdef CONFIG_DEBUG_STACK_USAGE #ifdef CONFIG_DEBUG_STACK_USAGE
{ {
unsigned long * n = (unsigned long *) (p->thread_info+1); unsigned long *n = (unsigned long *) (p->thread_info+1);
while (!*n) while (!*n)
n++; n++;
free = (unsigned long) n - (unsigned long)(p->thread_info+1); free = (unsigned long) n - (unsigned long)(p->thread_info+1);
...@@ -4374,7 +4380,7 @@ static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) ...@@ -4374,7 +4380,7 @@ static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
* thread migration by bumping thread off CPU then 'pushing' onto * thread migration by bumping thread off CPU then 'pushing' onto
* another runqueue. * another runqueue.
*/ */
static int migration_thread(void * data) static int migration_thread(void *data)
{ {
runqueue_t *rq; runqueue_t *rq;
int cpu = (long)data; int cpu = (long)data;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment