sched.c 70.7 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
/*
2
 *  kernel/sched.c
Linus Torvalds's avatar
Linus Torvalds committed
3 4 5
 *
 *  Kernel scheduler and related syscalls
 *
6
 *  Copyright (C) 1991-2002  Linus Torvalds
Linus Torvalds's avatar
Linus Torvalds committed
7 8
 *
 *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
9
 *		make semaphores SMP safe
Linus Torvalds's avatar
Linus Torvalds committed
10 11
 *  1998-11-19	Implemented schedule_timeout() and related stuff
 *		by Andrea Arcangeli
12
 *  2002-01-04	New ultra-scalable O(1) scheduler by Ingo Molnar:
13 14
 *		hybrid priority-list and round-robin design with
 *		an array-switch method of distributing timeslices
Ingo Molnar's avatar
Ingo Molnar committed
15 16
 *		and per-CPU runqueues.  Cleanups and useful suggestions
 *		by Davide Libenzi, preemptible kernel bits by Robert Love.
17
 *  2003-09-03	Interactivity tuning by Con Kolivas.
Linus Torvalds's avatar
Linus Torvalds committed
18 19 20
 */

#include <linux/mm.h>
21
#include <linux/module.h>
Linus Torvalds's avatar
Linus Torvalds committed
22
#include <linux/nmi.h>
Linus Torvalds's avatar
Linus Torvalds committed
23
#include <linux/init.h>
24
#include <asm/uaccess.h>
25
#include <linux/highmem.h>
Linus Torvalds's avatar
Linus Torvalds committed
26
#include <linux/smp_lock.h>
27
#include <asm/mmu_context.h>
Linus Torvalds's avatar
Linus Torvalds committed
28
#include <linux/interrupt.h>
29
#include <linux/completion.h>
Ingo Molnar's avatar
Ingo Molnar committed
30
#include <linux/kernel_stat.h>
31 32
#include <linux/security.h>
#include <linux/notifier.h>
33
#include <linux/suspend.h>
34
#include <linux/blkdev.h>
35
#include <linux/delay.h>
36
#include <linux/timer.h>
37
#include <linux/rcupdate.h>
38
#include <linux/cpu.h>
39
#include <linux/percpu.h>
Linus Torvalds's avatar
Linus Torvalds committed
40

41 42 43 44 45 46
#ifdef CONFIG_NUMA
#define cpu_to_node_mask(cpu) node_to_cpumask(cpu_to_node(cpu))
#else
#define cpu_to_node_mask(cpu) (cpu_online_map)
#endif

Ingo Molnar's avatar
Ingo Molnar committed
47 48
/*
 * Convert user-nice values [ -20 ... 0 ... 19 ]
49
 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
Ingo Molnar's avatar
Ingo Molnar committed
50 51 52 53 54 55 56 57 58 59 60 61 62 63
 * and back.
 */
#define NICE_TO_PRIO(nice)	(MAX_RT_PRIO + (nice) + 20)
#define PRIO_TO_NICE(prio)	((prio) - MAX_RT_PRIO - 20)
#define TASK_NICE(p)		PRIO_TO_NICE((p)->static_prio)

/*
 * 'User priority' is the nice value converted to something we
 * can work with better when scaling various scheduler parameters,
 * it's a [ 0 ... 39 ] range.
 */
#define USER_PRIO(p)		((p)-MAX_RT_PRIO)
#define TASK_USER_PRIO(p)	USER_PRIO((p)->static_prio)
#define MAX_USER_PRIO		(USER_PRIO(MAX_PRIO))
64 65 66 67 68 69 70 71
#define AVG_TIMESLICE	(MIN_TIMESLICE + ((MAX_TIMESLICE - MIN_TIMESLICE) *\
			(MAX_PRIO-1-NICE_TO_PRIO(0))/(MAX_USER_PRIO - 1)))

/*
 * Some helpers for converting nanosecond timing to jiffy resolution
 */
#define NS_TO_JIFFIES(TIME)	((TIME) / (1000000000 / HZ))
#define JIFFIES_TO_NS(TIME)	((TIME) * (1000000000 / HZ))
Ingo Molnar's avatar
Ingo Molnar committed
72 73 74 75

/*
 * These are the 'tuning knobs' of the scheduler:
 *
76 77
 * Minimum timeslice is 10 msecs, default timeslice is 100 msecs,
 * maximum timeslice is 200 msecs. Timeslices get refilled after
Ingo Molnar's avatar
Ingo Molnar committed
78 79 80
 * they expire.
 */
#define MIN_TIMESLICE		( 10 * HZ / 1000)
81
#define MAX_TIMESLICE		(200 * HZ / 1000)
82 83
#define ON_RUNQUEUE_WEIGHT	30
#define CHILD_PENALTY		95
Ingo Molnar's avatar
Ingo Molnar committed
84 85 86
#define PARENT_PENALTY		100
#define EXIT_WEIGHT		3
#define PRIO_BONUS_RATIO	25
87
#define MAX_BONUS		(MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
Ingo Molnar's avatar
Ingo Molnar committed
88
#define INTERACTIVE_DELTA	2
89 90 91
#define MAX_SLEEP_AVG		(AVG_TIMESLICE * MAX_BONUS)
#define STARVATION_LIMIT	(MAX_SLEEP_AVG)
#define NS_MAX_SLEEP_AVG	(JIFFIES_TO_NS(MAX_SLEEP_AVG))
92
#define NODE_THRESHOLD		125
93
#define CREDIT_LIMIT		100
Ingo Molnar's avatar
Ingo Molnar committed
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122

/*
 * If a task is 'interactive' then we reinsert it in the active
 * array after it has expired its current timeslice. (it will not
 * continue to run immediately, it will still roundrobin with
 * other interactive tasks.)
 *
 * This part scales the interactivity limit depending on niceness.
 *
 * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
 * Here are a few examples of different nice levels:
 *
 *  TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
 *  TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
 *  TASK_INTERACTIVE(  0): [1,1,1,1,0,0,0,0,0,0,0]
 *  TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
 *  TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
 *
 * (the X axis represents the possible -5 ... 0 ... +5 dynamic
 *  priority range a task can explore, a value of '1' means the
 *  task is rated interactive.)
 *
 * Ie. nice +19 tasks can never get 'interactive' enough to be
 * reinserted into the active array. And only heavily CPU-hog nice -20
 * tasks will be expired. Default nice 0 tasks are somewhere between,
 * it takes some effort for them to get interactive, but it's not
 * too hard.
 */

123 124 125 126 127 128 129 130 131 132 133 134 135
#define CURRENT_BONUS(p) \
	(NS_TO_JIFFIES((p)->sleep_avg) * MAX_BONUS / \
		MAX_SLEEP_AVG)

#ifdef CONFIG_SMP
#define TIMESLICE_GRANULARITY(p)	(MIN_TIMESLICE * \
		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
			num_online_cpus())
#else
#define TIMESLICE_GRANULARITY(p)	(MIN_TIMESLICE * \
		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
#endif

Ingo Molnar's avatar
Ingo Molnar committed
136 137 138 139 140 141 142 143 144 145
#define SCALE(v1,v1_max,v2_max) \
	(v1) * (v2_max) / (v1_max)

#define DELTA(p) \
	(SCALE(TASK_NICE(p), 40, MAX_USER_PRIO*PRIO_BONUS_RATIO/100) + \
		INTERACTIVE_DELTA)

#define TASK_INTERACTIVE(p) \
	((p)->prio <= (p)->static_prio - DELTA(p))

146 147 148 149 150 151 152 153 154 155
#define JUST_INTERACTIVE_SLEEP(p) \
	(JIFFIES_TO_NS(MAX_SLEEP_AVG * \
		(MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))

#define HIGH_CREDIT(p) \
	((p)->interactive_credit > CREDIT_LIMIT)

#define LOW_CREDIT(p) \
	((p)->interactive_credit < -CREDIT_LIMIT)

156
#define TASK_PREEMPTS_CURR(p, rq) \
157
	((p)->prio < (rq)->curr->prio)
158

Ingo Molnar's avatar
Ingo Molnar committed
159
/*
Ingo Molnar's avatar
Ingo Molnar committed
160
 * BASE_TIMESLICE scales user-nice values [ -20 ... 19 ]
Ingo Molnar's avatar
Ingo Molnar committed
161 162
 * to time slice values.
 *
Ingo Molnar's avatar
Ingo Molnar committed
163
 * The higher a thread's priority, the bigger timeslices
Ingo Molnar's avatar
Ingo Molnar committed
164
 * it gets during one round of execution. But even the lowest
Ingo Molnar's avatar
Ingo Molnar committed
165
 * priority thread gets MIN_TIMESLICE worth of execution time.
Ingo Molnar's avatar
Ingo Molnar committed
166 167
 *
 * task_timeslice() is the interface that is used by the scheduler.
Ingo Molnar's avatar
Ingo Molnar committed
168 169
 */

Ingo Molnar's avatar
Ingo Molnar committed
170 171 172 173 174 175 176
#define BASE_TIMESLICE(p) (MIN_TIMESLICE + \
	((MAX_TIMESLICE - MIN_TIMESLICE) * (MAX_PRIO-1-(p)->static_prio)/(MAX_USER_PRIO - 1)))

static inline unsigned int task_timeslice(task_t *p)
{
	return BASE_TIMESLICE(p);
}
Ingo Molnar's avatar
Ingo Molnar committed
177 178 179 180 181 182

/*
 * These are the runqueue data structures:
 */

#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
Linus Torvalds's avatar
Linus Torvalds committed
183

Linus Torvalds's avatar
Linus Torvalds committed
184
typedef struct runqueue runqueue_t;
Linus Torvalds's avatar
Linus Torvalds committed
185

Linus Torvalds's avatar
Linus Torvalds committed
186 187
struct prio_array {
	int nr_active;
Linus Torvalds's avatar
Linus Torvalds committed
188
	unsigned long bitmap[BITMAP_SIZE];
Rusty Russell's avatar
Rusty Russell committed
189
	struct list_head queue[MAX_PRIO];
Linus Torvalds's avatar
Linus Torvalds committed
190
};
Linus Torvalds's avatar
Linus Torvalds committed
191 192

/*
Linus Torvalds's avatar
Linus Torvalds committed
193
 * This is the main, per-CPU runqueue data structure.
Linus Torvalds's avatar
Linus Torvalds committed
194
 *
Linus Torvalds's avatar
Linus Torvalds committed
195
 * Locking rule: those places that want to lock multiple runqueues
Ingo Molnar's avatar
Ingo Molnar committed
196
 * (such as the load balancing or the thread migration code), lock
Linus Torvalds's avatar
Linus Torvalds committed
197
 * acquire operations must be ordered by ascending &runqueue.
Linus Torvalds's avatar
Linus Torvalds committed
198
 */
Linus Torvalds's avatar
Linus Torvalds committed
199
struct runqueue {
Linus Torvalds's avatar
Linus Torvalds committed
200
	spinlock_t lock;
Ingo Molnar's avatar
Ingo Molnar committed
201 202
	unsigned long nr_running, nr_switches, expired_timestamp,
			nr_uninterruptible;
Linus Torvalds's avatar
Linus Torvalds committed
203
	task_t *curr, *idle;
204
	struct mm_struct *prev_mm;
Linus Torvalds's avatar
Linus Torvalds committed
205
	prio_array_t *active, *expired, arrays[2];
206
	int prev_cpu_load[NR_CPUS];
207 208
#ifdef CONFIG_NUMA
	atomic_t *node_nr_running;
209
	int prev_node_load[MAX_NUMNODES];
210
#endif
211
	task_t *migration_thread;
Rusty Russell's avatar
Rusty Russell committed
212
	struct list_head migration_queue;
Ingo Molnar's avatar
Ingo Molnar committed
213

Andrew Morton's avatar
Andrew Morton committed
214
	atomic_t nr_iowait;
215
};
Linus Torvalds's avatar
Linus Torvalds committed
216

217
static DEFINE_PER_CPU(struct runqueue, runqueues);
Linus Torvalds's avatar
Linus Torvalds committed
218

219 220
#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
#define this_rq()		(&__get_cpu_var(runqueues))
221
#define task_rq(p)		cpu_rq(task_cpu(p))
Linus Torvalds's avatar
Linus Torvalds committed
222 223
#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)

Ingo Molnar's avatar
Ingo Molnar committed
224 225 226 227 228 229 230 231 232
/*
 * Default context-switch locking:
 */
#ifndef prepare_arch_switch
# define prepare_arch_switch(rq, next)	do { } while(0)
# define finish_arch_switch(rq, next)	spin_unlock_irq(&(rq)->lock)
# define task_running(rq, p)		((rq)->curr == (p))
#endif

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
#ifdef CONFIG_NUMA

/*
 * Keep track of running tasks.
 */

static atomic_t node_nr_running[MAX_NUMNODES] ____cacheline_maxaligned_in_smp =
	{[0 ...MAX_NUMNODES-1] = ATOMIC_INIT(0)};

static inline void nr_running_init(struct runqueue *rq)
{
	rq->node_nr_running = &node_nr_running[0];
}

static inline void nr_running_inc(runqueue_t *rq)
{
	atomic_inc(rq->node_nr_running);
	rq->nr_running++;
}

static inline void nr_running_dec(runqueue_t *rq)
{
	atomic_dec(rq->node_nr_running);
	rq->nr_running--;
}

__init void node_nr_running_init(void)
{
	int i;

Andrew Morton's avatar
Andrew Morton committed
263 264 265 266 267
	for (i = 0; i < NR_CPUS; i++) {
		if (cpu_possible(i))
			cpu_rq(i)->node_nr_running =
				&node_nr_running[cpu_to_node(i)];
	}
268 269 270 271 272 273 274 275 276 277
}

#else /* !CONFIG_NUMA */

# define nr_running_init(rq)   do { } while (0)
# define nr_running_inc(rq)    do { (rq)->nr_running++; } while (0)
# define nr_running_dec(rq)    do { (rq)->nr_running--; } while (0)

#endif /* CONFIG_NUMA */

278 279 280 281 282
/*
 * task_rq_lock - lock the runqueue a given task resides on and disable
 * interrupts.  Note the ordering: we can safely lookup the task_rq without
 * explicitly disabling preemption.
 */
283
static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
Linus Torvalds's avatar
Linus Torvalds committed
284
{
285
	struct runqueue *rq;
Linus Torvalds's avatar
Linus Torvalds committed
286 287

repeat_lock_task:
288
	local_irq_save(*flags);
289
	rq = task_rq(p);
290
	spin_lock(&rq->lock);
291
	if (unlikely(rq != task_rq(p))) {
292
		spin_unlock_irqrestore(&rq->lock, *flags);
Linus Torvalds's avatar
Linus Torvalds committed
293 294
		goto repeat_lock_task;
	}
295
	return rq;
Linus Torvalds's avatar
Linus Torvalds committed
296
}
Linus Torvalds's avatar
Linus Torvalds committed
297

298
static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
Linus Torvalds's avatar
Linus Torvalds committed
299
{
300
	spin_unlock_irqrestore(&rq->lock, *flags);
301 302 303 304 305
}

/*
 * rq_lock - lock a given runqueue and disable interrupts.
 */
306
static inline runqueue_t *this_rq_lock(void)
307
{
308 309
	runqueue_t *rq;

310 311 312
	local_irq_disable();
	rq = this_rq();
	spin_lock(&rq->lock);
313

314 315 316 317 318
	return rq;
}

static inline void rq_unlock(runqueue_t *rq)
{
319
	spin_unlock_irq(&rq->lock);
Linus Torvalds's avatar
Linus Torvalds committed
320
}
Ingo Molnar's avatar
Ingo Molnar committed
321

Linus Torvalds's avatar
Linus Torvalds committed
322
/*
Linus Torvalds's avatar
Linus Torvalds committed
323
 * Adding/removing a task to/from a priority array:
Linus Torvalds's avatar
Linus Torvalds committed
324
 */
Linus Torvalds's avatar
Linus Torvalds committed
325 326 327
static inline void dequeue_task(struct task_struct *p, prio_array_t *array)
{
	array->nr_active--;
328
	list_del(&p->run_list);
Linus Torvalds's avatar
Linus Torvalds committed
329
	if (list_empty(array->queue + p->prio))
Linus Torvalds's avatar
Linus Torvalds committed
330
		__clear_bit(p->prio, array->bitmap);
Linus Torvalds's avatar
Linus Torvalds committed
331
}
Linus Torvalds's avatar
Linus Torvalds committed
332

Linus Torvalds's avatar
Linus Torvalds committed
333 334 335
static inline void enqueue_task(struct task_struct *p, prio_array_t *array)
{
	list_add_tail(&p->run_list, array->queue + p->prio);
Linus Torvalds's avatar
Linus Torvalds committed
336
	__set_bit(p->prio, array->bitmap);
Linus Torvalds's avatar
Linus Torvalds committed
337 338 339
	array->nr_active++;
	p->array = array;
}
Linus Torvalds's avatar
Linus Torvalds committed
340

Ingo Molnar's avatar
Ingo Molnar committed
341 342 343 344 345 346 347 348 349 350 351 352 353 354
/*
 * effective_prio - return the priority that is based on the static
 * priority but is modified by bonuses/penalties.
 *
 * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
 * into the -5 ... 0 ... +5 bonus/penalty range.
 *
 * We use 25% of the full 0...39 priority range so that:
 *
 * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
 * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
 *
 * Both properties are important to certain workloads.
 */
355
static int effective_prio(task_t *p)
Linus Torvalds's avatar
Linus Torvalds committed
356 357 358
{
	int bonus, prio;

359 360 361
	if (rt_task(p))
		return p->prio;

362
	bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
Linus Torvalds's avatar
Linus Torvalds committed
363

Ingo Molnar's avatar
Ingo Molnar committed
364
	prio = p->static_prio - bonus;
Linus Torvalds's avatar
Linus Torvalds committed
365 366 367 368 369 370 371
	if (prio < MAX_RT_PRIO)
		prio = MAX_RT_PRIO;
	if (prio > MAX_PRIO-1)
		prio = MAX_PRIO-1;
	return prio;
}

Ingo Molnar's avatar
Ingo Molnar committed
372
/*
373
 * __activate_task - move a task to the runqueue.
Ingo Molnar's avatar
Ingo Molnar committed
374
 */
375 376 377
static inline void __activate_task(task_t *p, runqueue_t *rq)
{
	enqueue_task(p, rq->active);
378
	nr_running_inc(rq);
379 380
}

381
static void recalc_task_prio(task_t *p, unsigned long long now)
Linus Torvalds's avatar
Linus Torvalds committed
382
{
383 384 385
	unsigned long long __sleep_time = now - p->timestamp;
	unsigned long sleep_time;

386 387
	if (__sleep_time > NS_MAX_SLEEP_AVG)
		sleep_time = NS_MAX_SLEEP_AVG;
388 389
	else
		sleep_time = (unsigned long)__sleep_time;
Linus Torvalds's avatar
Linus Torvalds committed
390

391
	if (likely(sleep_time > 0)) {
392
		/*
393 394 395 396
		 * User tasks that sleep a long time are categorised as
		 * idle and will get just interactive status to stay active &
		 * prevent them suddenly becoming cpu hogs and starving
		 * other processes.
397
		 */
398 399 400 401 402 403 404 405 406 407 408 409
		if (p->mm && p->activated != -1 &&
			sleep_time > JUST_INTERACTIVE_SLEEP(p)){
				p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG -
						AVG_TIMESLICE);
				if (!HIGH_CREDIT(p))
					p->interactive_credit++;
		} else {
			/*
			 * The lower the sleep avg a task has the more
			 * rapidly it will rise with sleep time.
			 */
			sleep_time *= (MAX_BONUS - CURRENT_BONUS(p)) ? : 1;
410

411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450
			/*
			 * Tasks with low interactive_credit are limited to
			 * one timeslice worth of sleep avg bonus.
			 */
			if (LOW_CREDIT(p) &&
				sleep_time > JIFFIES_TO_NS(task_timeslice(p)))
					sleep_time =
						JIFFIES_TO_NS(task_timeslice(p));

			/*
			 * Non high_credit tasks waking from uninterruptible
			 * sleep are limited in their sleep_avg rise as they
			 * are likely to be cpu hogs waiting on I/O
			 */
			if (p->activated == -1 && !HIGH_CREDIT(p) && p->mm){
				if (p->sleep_avg >= JUST_INTERACTIVE_SLEEP(p))
					sleep_time = 0;
				else if (p->sleep_avg + sleep_time >=
					JUST_INTERACTIVE_SLEEP(p)){
						p->sleep_avg =
							JUST_INTERACTIVE_SLEEP(p);
						sleep_time = 0;
					}
			}

			/*
			 * This code gives a bonus to interactive tasks.
			 *
			 * The boost works by updating the 'average sleep time'
			 * value here, based on ->timestamp. The more time a task
			 * spends sleeping, the higher the average gets - and the
			 * higher the priority boost gets as well.
			 */
			p->sleep_avg += sleep_time;

			if (p->sleep_avg > NS_MAX_SLEEP_AVG){
				p->sleep_avg = NS_MAX_SLEEP_AVG;
				if (!HIGH_CREDIT(p))
					p->interactive_credit++;
			}
451
		}
Linus Torvalds's avatar
Linus Torvalds committed
452
	}
453 454

	p->prio = effective_prio(p);
455 456
}

457 458 459 460 461 462
/*
 * activate_task - move a task to the runqueue and do priority recalculation
 *
 * Update all the scheduling statistics stuff. (sleep average
 * calculation, priority modifiers, etc.)
 */
Ingo Molnar's avatar
Ingo Molnar committed
463
static inline void activate_task(task_t *p, runqueue_t *rq)
Linus Torvalds's avatar
Linus Torvalds committed
464
{
465
	unsigned long long now = sched_clock();
Linus Torvalds's avatar
Linus Torvalds committed
466

467
	recalc_task_prio(p, now);
468

469
	/*
470 471
	 * This checks to make sure it's not an uninterruptible task
	 * that is now waking up.
472
	 */
473
	if (!p->activated){
474
		/*
475 476 477 478 479
		 * Tasks which were woken up by interrupts (ie. hw events)
		 * are most likely of interactive nature. So we give them
		 * the credit of extending their sleep time to the period
		 * of time they spend on the runqueue, waiting for execution
		 * on a CPU, first time around:
480
		 */
481 482 483
		if (in_interrupt())
			p->activated = 2;
		else
Linus Torvalds's avatar
Linus Torvalds committed
484
		/*
485 486
		 * Normal first-time wakeups get a credit too for on-runqueue
		 * time, but it will be weighted down:
Linus Torvalds's avatar
Linus Torvalds committed
487
		 */
488
			p->activated = 1;
489
		}
490 491
	p->timestamp = now;

492
	__activate_task(p, rq);
Linus Torvalds's avatar
Linus Torvalds committed
493
}
Linus Torvalds's avatar
Linus Torvalds committed
494

Ingo Molnar's avatar
Ingo Molnar committed
495 496 497
/*
 * deactivate_task - remove a task from the runqueue.
 */
Linus Torvalds's avatar
Linus Torvalds committed
498 499
static inline void deactivate_task(struct task_struct *p, runqueue_t *rq)
{
500
	nr_running_dec(rq);
501 502
	if (p->state == TASK_UNINTERRUPTIBLE)
		rq->nr_uninterruptible++;
Linus Torvalds's avatar
Linus Torvalds committed
503 504 505
	dequeue_task(p, p->array);
	p->array = NULL;
}
Linus Torvalds's avatar
Linus Torvalds committed
506

Ingo Molnar's avatar
Ingo Molnar committed
507 508 509 510 511 512 513
/*
 * resched_task - mark a task 'to be rescheduled now'.
 *
 * On UP this means the setting of the need_resched flag, on SMP it
 * might also involve a cross-CPU call to trigger the scheduler on
 * the target CPU.
 */
Linus Torvalds's avatar
Linus Torvalds committed
514 515
static inline void resched_task(task_t *p)
{
516 517 518
#ifdef CONFIG_SMP
	int need_resched, nrpolling;

519
	preempt_disable();
520 521 522 523
	/* minimise the chance of sending an interrupt to poll_idle() */
	nrpolling = test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
	need_resched = test_and_set_tsk_thread_flag(p,TIF_NEED_RESCHED);
	nrpolling |= test_tsk_thread_flag(p,TIF_POLLING_NRFLAG);
Linus Torvalds's avatar
Linus Torvalds committed
524

525 526
	if (!need_resched && !nrpolling && (task_cpu(p) != smp_processor_id()))
		smp_send_reschedule(task_cpu(p));
527
	preempt_enable();
528 529 530
#else
	set_tsk_need_resched(p);
#endif
Linus Torvalds's avatar
Linus Torvalds committed
531 532
}

Linus Torvalds's avatar
Linus Torvalds committed
533 534
#ifdef CONFIG_SMP

Linus Torvalds's avatar
Linus Torvalds committed
535
/*
Ingo Molnar's avatar
Ingo Molnar committed
536 537 538
 * wait_task_inactive - wait for a thread to unschedule.
 *
 * The caller must ensure that the task *will* unschedule sometime soon,
539 540 541 542
 * else this function might spin for a *long* time. This function can't
 * be called with interrupts off, or it may introduce deadlock with
 * smp_call_function() if an IPI is sent by the same process we are
 * waiting to become inactive.
Linus Torvalds's avatar
Linus Torvalds committed
543
 */
Linus Torvalds's avatar
Linus Torvalds committed
544
void wait_task_inactive(task_t * p)
Linus Torvalds's avatar
Linus Torvalds committed
545
{
Linus Torvalds's avatar
Linus Torvalds committed
546 547
	unsigned long flags;
	runqueue_t *rq;
Linus Torvalds's avatar
Linus Torvalds committed
548

Linus Torvalds's avatar
Linus Torvalds committed
549
repeat:
550
	preempt_disable();
Linus Torvalds's avatar
Linus Torvalds committed
551
	rq = task_rq(p);
Ingo Molnar's avatar
Ingo Molnar committed
552
	if (unlikely(task_running(rq, p))) {
Linus Torvalds's avatar
Linus Torvalds committed
553
		cpu_relax();
554 555 556 557 558 559 560
		/*
		 * enable/disable preemption just to make this
		 * a preemption point - we are busy-waiting
		 * anyway.
		 */
		preempt_enable();
		goto repeat;
Linus Torvalds's avatar
Linus Torvalds committed
561
	}
562
	rq = task_rq_lock(p, &flags);
Ingo Molnar's avatar
Ingo Molnar committed
563
	if (unlikely(task_running(rq, p))) {
564
		task_rq_unlock(rq, &flags);
565
		preempt_enable();
Linus Torvalds's avatar
Linus Torvalds committed
566 567
		goto repeat;
	}
568
	task_rq_unlock(rq, &flags);
569
	preempt_enable();
Linus Torvalds's avatar
Linus Torvalds committed
570
}
Ingo Molnar's avatar
Ingo Molnar committed
571
#endif
Linus Torvalds's avatar
Linus Torvalds committed
572

Ingo Molnar's avatar
Ingo Molnar committed
573 574 575
/***
 * try_to_wake_up - wake up a thread
 * @p: the to-be-woken-up thread
576
 * @state: the mask of task states that can be woken
Ingo Molnar's avatar
Ingo Molnar committed
577
 * @sync: do a synchronous wakeup?
Ingo Molnar's avatar
Ingo Molnar committed
578
 * @kick: kick the CPU if the task is already running?
Ingo Molnar's avatar
Ingo Molnar committed
579 580 581 582 583 584
 *
 * Put it on the run-queue if it's not already there. The "current"
 * thread is always on the run-queue (except when the actual
 * re-schedule is in progress), and as such you're allowed to do
 * the simpler "current->state = TASK_RUNNING" to mark yourself
 * runnable without the overhead of this.
Ingo Molnar's avatar
Ingo Molnar committed
585 586
 *
 * returns failure only if the task is already active.
Linus Torvalds's avatar
Linus Torvalds committed
587
 */
Ingo Molnar's avatar
Ingo Molnar committed
588
static int try_to_wake_up(task_t * p, unsigned int state, int sync, int kick)
Linus Torvalds's avatar
Linus Torvalds committed
589 590
{
	unsigned long flags;
Ingo Molnar's avatar
Ingo Molnar committed
591
	int success = 0;
592
	long old_state;
Linus Torvalds's avatar
Linus Torvalds committed
593
	runqueue_t *rq;
Linus Torvalds's avatar
Linus Torvalds committed
594

595
repeat_lock_task:
596
	rq = task_rq_lock(p, &flags);
597
	old_state = p->state;
598 599 600 601 602 603 604 605
	if (old_state & state) {
		if (!p->array) {
			/*
			 * Fast-migrate the task if it's not running or runnable
			 * currently. Do not violate hard affinity.
			 */
			if (unlikely(sync && !task_running(rq, p) &&
				(task_cpu(p) != smp_processor_id()) &&
606
				cpu_isset(smp_processor_id(), p->cpus_allowed))) {
607 608 609 610 611

				set_task_cpu(p, smp_processor_id());
				task_rq_unlock(rq, &flags);
				goto repeat_lock_task;
			}
612
			if (old_state == TASK_UNINTERRUPTIBLE){
613
				rq->nr_uninterruptible--;
614 615 616 617 618 619
				/*
				 * Tasks on involuntary sleep don't earn
				 * sleep_avg beyond just interactive state.
				 */
				p->activated = -1;
			}
620 621 622
			if (sync)
				__activate_task(p, rq);
			else {
Ingo Molnar's avatar
Ingo Molnar committed
623
				activate_task(p, rq);
624
				if (TASK_PREEMPTS_CURR(p, rq))
625 626
					resched_task(rq->curr);
			}
627
			success = 1;
628
		}
629
#ifdef CONFIG_SMP
630
	       	else
631 632
			if (unlikely(kick) && task_running(rq, p) && (task_cpu(p) != smp_processor_id()))
				smp_send_reschedule(task_cpu(p));
633
#endif
634
		p->state = TASK_RUNNING;
Linus Torvalds's avatar
Linus Torvalds committed
635
	}
636
	task_rq_unlock(rq, &flags);
637

Linus Torvalds's avatar
Linus Torvalds committed
638
	return success;
Linus Torvalds's avatar
Linus Torvalds committed
639 640
}

Linus Torvalds's avatar
Linus Torvalds committed
641
int wake_up_process(task_t * p)
Linus Torvalds's avatar
Linus Torvalds committed
642
{
Ingo Molnar's avatar
Ingo Molnar committed
643 644 645 646 647 648
	return try_to_wake_up(p, TASK_STOPPED | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0, 0);
}

int wake_up_process_kick(task_t * p)
{
	return try_to_wake_up(p, TASK_STOPPED | TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0, 1);
649 650 651 652
}

int wake_up_state(task_t *p, unsigned int state)
{
Ingo Molnar's avatar
Ingo Molnar committed
653
	return try_to_wake_up(p, state, 0, 0);
Linus Torvalds's avatar
Linus Torvalds committed
654 655
}

Ingo Molnar's avatar
Ingo Molnar committed
656 657 658 659 660 661
/*
 * wake_up_forked_process - wake up a freshly forked process.
 *
 * This function will do some initial scheduler statistics housekeeping
 * that must be done for every newly created process.
 */
Linus Torvalds's avatar
Linus Torvalds committed
662
void wake_up_forked_process(task_t * p)
Linus Torvalds's avatar
Linus Torvalds committed
663
{
664 665
	unsigned long flags;
	runqueue_t *rq = task_rq_lock(current, &flags);
Linus Torvalds's avatar
Linus Torvalds committed
666

Linus Torvalds's avatar
Linus Torvalds committed
667
	p->state = TASK_RUNNING;
668 669 670 671 672
	/*
	 * We decrease the sleep average of forking parents
	 * and children as well, to keep max-interactive tasks
	 * from forking tasks that are max-interactive.
	 */
673 674 675 676 677 678 679 680
	current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
		PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);

	p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
		CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);

	p->interactive_credit = 0;

681
	p->prio = effective_prio(p);
682
	set_task_cpu(p, smp_processor_id());
683

684 685 686 687 688 689 690
	if (unlikely(!current->array))
		__activate_task(p, rq);
	else {
		p->prio = current->prio;
		list_add_tail(&p->run_list, &current->run_list);
		p->array = current->array;
		p->array->nr_active++;
691
		nr_running_inc(rq);
692
	}
693
	task_rq_unlock(rq, &flags);
Linus Torvalds's avatar
Linus Torvalds committed
694
}
Linus Torvalds's avatar
Linus Torvalds committed
695

Ingo Molnar's avatar
Ingo Molnar committed
696 697 698
/*
 * Potentially available exiting-child timeslices are
 * retrieved here - this way the parent does not get
Ingo Molnar's avatar
Ingo Molnar committed
699
 * penalized for creating too many threads.
Ingo Molnar's avatar
Ingo Molnar committed
700 701 702 703 704 705 706
 *
 * (this cannot be used to 'generate' timeslices
 * artificially, because any timeslice recovered here
 * was given away by the parent in the first place.)
 */
void sched_exit(task_t * p)
{
Ingo Molnar's avatar
Ingo Molnar committed
707 708 709
	unsigned long flags;

	local_irq_save(flags);
Ingo Molnar's avatar
Ingo Molnar committed
710
	if (p->first_time_slice) {
711 712 713
		p->parent->time_slice += p->time_slice;
		if (unlikely(p->parent->time_slice > MAX_TIMESLICE))
			p->parent->time_slice = MAX_TIMESLICE;
Ingo Molnar's avatar
Ingo Molnar committed
714
	}
Ingo Molnar's avatar
Ingo Molnar committed
715
	local_irq_restore(flags);
Ingo Molnar's avatar
Ingo Molnar committed
716 717 718 719
	/*
	 * If the child was a (relative-) CPU hog then decrease
	 * the sleep_avg of the parent as well.
	 */
720
	if (p->sleep_avg < p->parent->sleep_avg)
721 722 723
		p->parent->sleep_avg = p->parent->sleep_avg /
		(EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
		(EXIT_WEIGHT + 1);
Ingo Molnar's avatar
Ingo Molnar committed
724 725
}

726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
/**
 * finish_task_switch - clean up after a task-switch
 * @prev: the thread we just switched away from.
 *
 * We enter this with the runqueue still locked, and finish_arch_switch()
 * will unlock it along with doing any other architecture-specific cleanup
 * actions.
 *
 * Note that we may have delayed dropping an mm in context_switch(). If
 * so, we finish that here outside of the runqueue lock.  (Doing it
 * with the lock held can cause deadlocks; see schedule() for
 * details.)
 */
static inline void finish_task_switch(task_t *prev)
{
	runqueue_t *rq = this_rq();
	struct mm_struct *mm = rq->prev_mm;
743
	int drop_task_ref;
744 745

	rq->prev_mm = NULL;
746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761

	/*
	 * A task struct has one reference for the use as "current".
	 * If a task dies, then it sets TASK_ZOMBIE in tsk->state and calls
	 * schedule one last time. The schedule call will never return,
	 * and the scheduled task must drop that reference.
	 * The test for TASK_ZOMBIE must occur while the runqueue locks are
	 * still held, otherwise prev could be scheduled on another cpu, die
	 * there before we look at prev->state, and then the reference would
	 * be dropped twice.
	 * 		Manfred Spraul <manfred@colorfullife.com>
	 */
	drop_task_ref = 0;
	if (unlikely(prev->state & (TASK_DEAD | TASK_ZOMBIE)))
		drop_task_ref = 1;

762 763 764
	finish_arch_switch(rq, prev);
	if (mm)
		mmdrop(mm);
765
	if (drop_task_ref)
766
		put_task_struct(prev);
767 768
}

Ingo Molnar's avatar
Ingo Molnar committed
769 770 771 772
/**
 * schedule_tail - first thing a freshly forked thread must call.
 * @prev: the thread we just switched away from.
 */
773
asmlinkage void schedule_tail(task_t *prev)
Linus Torvalds's avatar
Linus Torvalds committed
774
{
775 776
	finish_task_switch(prev);

777 778
	if (current->set_child_tid)
		put_user(current->pid, current->set_child_tid);
Linus Torvalds's avatar
Linus Torvalds committed
779 780
}

Ingo Molnar's avatar
Ingo Molnar committed
781 782 783 784
/*
 * context_switch - switch to the new MM and the new
 * thread's register state.
 */
785
static inline task_t * context_switch(runqueue_t *rq, task_t *prev, task_t *next)
Linus Torvalds's avatar
Linus Torvalds committed
786
{
Linus Torvalds's avatar
Linus Torvalds committed
787 788
	struct mm_struct *mm = next->mm;
	struct mm_struct *oldmm = prev->active_mm;
Linus Torvalds's avatar
Linus Torvalds committed
789

Linus Torvalds's avatar
Linus Torvalds committed
790
	if (unlikely(!mm)) {
Linus Torvalds's avatar
Linus Torvalds committed
791 792
		next->active_mm = oldmm;
		atomic_inc(&oldmm->mm_count);
793
		enter_lazy_tlb(oldmm, next);
Linus Torvalds's avatar
Linus Torvalds committed
794
	} else
795
		switch_mm(oldmm, mm, next);
Linus Torvalds's avatar
Linus Torvalds committed
796

Linus Torvalds's avatar
Linus Torvalds committed
797
	if (unlikely(!prev->mm)) {
Linus Torvalds's avatar
Linus Torvalds committed
798
		prev->active_mm = NULL;
799 800
		WARN_ON(rq->prev_mm);
		rq->prev_mm = oldmm;
Linus Torvalds's avatar
Linus Torvalds committed
801
	}
Linus Torvalds's avatar
Linus Torvalds committed
802

803
	/* Here we just switch the register state and the stack. */
804 805 806
	switch_to(prev, next, prev);

	return prev;
Linus Torvalds's avatar
Linus Torvalds committed
807
}
Linus Torvalds's avatar
Linus Torvalds committed
808

Ingo Molnar's avatar
Ingo Molnar committed
809 810 811 812 813 814 815
/*
 * nr_running, nr_uninterruptible and nr_context_switches:
 *
 * externally visible scheduler statistics: current number of runnable
 * threads, current number of uninterruptible-sleeping threads, total
 * number of context switches performed since bootup.
 */
Linus Torvalds's avatar
Linus Torvalds committed
816 817 818
unsigned long nr_running(void)
{
	unsigned long i, sum = 0;
Linus Torvalds's avatar
Linus Torvalds committed
819

820 821
	for (i = 0; i < NR_CPUS; i++)
		sum += cpu_rq(i)->nr_running;
Linus Torvalds's avatar
Linus Torvalds committed
822 823

	return sum;
Linus Torvalds's avatar
Linus Torvalds committed
824 825
}

826 827 828 829
unsigned long nr_uninterruptible(void)
{
	unsigned long i, sum = 0;

Andrew Morton's avatar
Andrew Morton committed
830 831 832
	for (i = 0; i < NR_CPUS; i++) {
		if (!cpu_online(i))
			continue;
833
		sum += cpu_rq(i)->nr_uninterruptible;
Andrew Morton's avatar
Andrew Morton committed
834
	}
835 836 837
	return sum;
}

Linus Torvalds's avatar
Linus Torvalds committed
838
unsigned long nr_context_switches(void)
Linus Torvalds's avatar
Linus Torvalds committed
839
{
Linus Torvalds's avatar
Linus Torvalds committed
840 841
	unsigned long i, sum = 0;

Andrew Morton's avatar
Andrew Morton committed
842 843 844
	for (i = 0; i < NR_CPUS; i++) {
		if (!cpu_online(i))
			continue;
845
		sum += cpu_rq(i)->nr_switches;
Andrew Morton's avatar
Andrew Morton committed
846 847 848 849 850 851 852
	}
	return sum;
}

unsigned long nr_iowait(void)
{
	unsigned long i, sum = 0;
Linus Torvalds's avatar
Linus Torvalds committed
853

Andrew Morton's avatar
Andrew Morton committed
854 855 856 857 858
	for (i = 0; i < NR_CPUS; ++i) {
		if (!cpu_online(i))
			continue;
		sum += atomic_read(&cpu_rq(i)->nr_iowait);
	}
Linus Torvalds's avatar
Linus Torvalds committed
859
	return sum;
Linus Torvalds's avatar
Linus Torvalds committed
860 861
}

Ingo Molnar's avatar
Ingo Molnar committed
862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895
/*
 * double_rq_lock - safely lock two runqueues
 *
 * Note this does not disable interrupts like task_rq_lock,
 * you need to do so manually before calling.
 */
static inline void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
{
	if (rq1 == rq2)
		spin_lock(&rq1->lock);
	else {
		if (rq1 < rq2) {
			spin_lock(&rq1->lock);
			spin_lock(&rq2->lock);
		} else {
			spin_lock(&rq2->lock);
			spin_lock(&rq1->lock);
		}
	}
}

/*
 * double_rq_unlock - safely unlock two runqueues
 *
 * Note this does not restore interrupts like task_rq_unlock,
 * you need to do so manually after calling.
 */
static inline void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
{
	spin_unlock(&rq1->lock);
	if (rq1 != rq2)
		spin_unlock(&rq2->lock);
}

896
#ifdef CONFIG_NUMA
897 898 899 900 901 902 903 904
/*
 * If dest_cpu is allowed for this process, migrate the task to it.
 * This is accomplished by forcing the cpu_allowed mask to only
 * allow dest_cpu, which will force the cpu onto dest_cpu.  Then
 * the cpu_allowed mask is restored.
 */
static void sched_migrate_task(task_t *p, int dest_cpu)
{
905
	cpumask_t old_mask;
906 907

	old_mask = p->cpus_allowed;
908
	if (!cpu_isset(dest_cpu, old_mask))
909 910
		return;
	/* force the process onto the specified CPU */
911
	set_cpus_allowed(p, cpumask_of_cpu(dest_cpu));
912 913 914 915 916 917 918 919 920 921 922 923

	/* restore the cpus allowed mask */
	set_cpus_allowed(p, old_mask);
}

/*
 * Find the least loaded CPU.  Slightly favor the current CPU by
 * setting its runqueue length as the minimum to start.
 */
static int sched_best_cpu(struct task_struct *p)
{
	int i, minload, load, best_cpu, node = 0;
924
	cpumask_t cpumask;
925 926 927 928 929 930

	best_cpu = task_cpu(p);
	if (cpu_rq(best_cpu)->nr_running <= 2)
		return best_cpu;

	minload = 10000000;
931
	for_each_node_with_cpus(i) {
932 933 934 935 936 937 938
		/*
		 * Node load is always divided by nr_cpus_node to normalise 
		 * load values in case cpu count differs from node to node.
		 * We first multiply node_nr_running by 10 to get a little
		 * better resolution.   
		 */
		load = 10 * atomic_read(&node_nr_running[i]) / nr_cpus_node(i);
939 940 941 942 943 944 945
		if (load < minload) {
			minload = load;
			node = i;
		}
	}

	minload = 10000000;
946
	cpumask = node_to_cpumask(node);
947
	for (i = 0; i < NR_CPUS; ++i) {
948
		if (!cpu_isset(i, cpumask))
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
			continue;
		if (cpu_rq(i)->nr_running < minload) {
			best_cpu = i;
			minload = cpu_rq(i)->nr_running;
		}
	}
	return best_cpu;
}

void sched_balance_exec(void)
{
	int new_cpu;

	if (numnodes > 1) {
		new_cpu = sched_best_cpu(current);
		if (new_cpu != smp_processor_id())
			sched_migrate_task(current, new_cpu);
	}
}
968

969
/*
Ingo Molnar's avatar
Ingo Molnar committed
970
 * Find the busiest node. All previous node loads contribute with a
971 972 973
 * geometrically deccaying weight to the load measure:
 *      load_{t} = load_{t-1}/2 + nr_node_running_{t}
 * This way sudden load peaks are flattened out a bit.
974 975 976
 * Node load is divided by nr_cpus_node() in order to compare nodes
 * of different cpu count but also [first] multiplied by 10 to 
 * provide better resolution.
977 978 979 980
 */
static int find_busiest_node(int this_node)
{
	int i, node = -1, load, this_load, maxload;
Ingo Molnar's avatar
Ingo Molnar committed
981

982 983
	if (!nr_cpus_node(this_node))
		return node;
984
	this_load = maxload = (this_rq()->prev_node_load[this_node] >> 1)
985 986
		+ (10 * atomic_read(&node_nr_running[this_node])
		/ nr_cpus_node(this_node));
987
	this_rq()->prev_node_load[this_node] = this_load;
988
	for_each_node_with_cpus(i) {
989 990 991
		if (i == this_node)
			continue;
		load = (this_rq()->prev_node_load[i] >> 1)
992 993
			+ (10 * atomic_read(&node_nr_running[i])
			/ nr_cpus_node(i));
994 995 996 997 998 999 1000 1001 1002
		this_rq()->prev_node_load[i] = load;
		if (load > maxload && (100*load > NODE_THRESHOLD*this_load)) {
			maxload = load;
			node = i;
		}
	}
	return node;
}

1003 1004
#endif /* CONFIG_NUMA */

1005
#ifdef CONFIG_SMP
Ingo Molnar's avatar
Ingo Molnar committed
1006

Linus Torvalds's avatar
Linus Torvalds committed
1007
/*
Ingo Molnar's avatar
Ingo Molnar committed
1008 1009 1010 1011
 * double_lock_balance - lock the busiest runqueue
 *
 * this_rq is locked already. Recalculate nr_running if we have to
 * drop the runqueue lock.
Linus Torvalds's avatar
Linus Torvalds committed
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
 */
static inline unsigned int double_lock_balance(runqueue_t *this_rq,
	runqueue_t *busiest, int this_cpu, int idle, unsigned int nr_running)
{
	if (unlikely(!spin_trylock(&busiest->lock))) {
		if (busiest < this_rq) {
			spin_unlock(&this_rq->lock);
			spin_lock(&busiest->lock);
			spin_lock(&this_rq->lock);
			/* Need to recalculate nr_running */
1022
			if (idle || (this_rq->nr_running > this_rq->prev_cpu_load[this_cpu]))
Linus Torvalds's avatar
Linus Torvalds committed
1023 1024
				nr_running = this_rq->nr_running;
			else
1025
				nr_running = this_rq->prev_cpu_load[this_cpu];
Linus Torvalds's avatar
Linus Torvalds committed
1026 1027 1028 1029 1030
		} else
			spin_lock(&busiest->lock);
	}
	return nr_running;
}
Ingo Molnar's avatar
Ingo Molnar committed
1031

Ingo Molnar's avatar
Ingo Molnar committed
1032
/*
1033
 * find_busiest_queue - find the busiest runqueue among the cpus in cpumask.
Ingo Molnar's avatar
Ingo Molnar committed
1034
 */
1035
static inline runqueue_t *find_busiest_queue(runqueue_t *this_rq, int this_cpu, int idle, int *imbalance, cpumask_t cpumask)
Linus Torvalds's avatar
Linus Torvalds committed
1036
{
Ingo Molnar's avatar
Ingo Molnar committed
1037
	int nr_running, load, max_load, i;
Linus Torvalds's avatar
Linus Torvalds committed
1038
	runqueue_t *busiest, *rq_src;
Linus Torvalds's avatar
Linus Torvalds committed
1039 1040 1041 1042

	/*
	 * We search all runqueues to find the most busy one.
	 * We do this lockless to reduce cache-bouncing overhead,
Linus Torvalds's avatar
Linus Torvalds committed
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
	 * we re-check the 'best' source CPU later on again, with
	 * the lock held.
	 *
	 * We fend off statistical fluctuations in runqueue lengths by
	 * saving the runqueue length during the previous load-balancing
	 * operation and using the smaller one the current and saved lengths.
	 * If a runqueue is long enough for a longer amount of time then
	 * we recognize it and pull tasks from it.
	 *
	 * The 'current runqueue length' is a statistical maximum variable,
	 * for that one we take the longer one - to avoid fluctuations in
	 * the other direction. So for a load-balance to happen it needs
	 * stable long runqueue on the target CPU and stable short runqueue
	 * on the local runqueue.
	 *
	 * We make an exception if this CPU is about to become idle - in
	 * that case we are less picky about moving a task across CPUs and
	 * take what can be taken.
Linus Torvalds's avatar
Linus Torvalds committed
1061
	 */
1062
	if (idle || (this_rq->nr_running > this_rq->prev_cpu_load[this_cpu]))
Linus Torvalds's avatar
Linus Torvalds committed
1063 1064
		nr_running = this_rq->nr_running;
	else
1065
		nr_running = this_rq->prev_cpu_load[this_cpu];
Linus Torvalds's avatar
Linus Torvalds committed
1066

Linus Torvalds's avatar
Linus Torvalds committed
1067
	busiest = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
1068
	max_load = 1;
1069
	for (i = 0; i < NR_CPUS; i++) {
1070
		if (!cpu_isset(i, cpumask))
1071
			continue;
1072

1073
		rq_src = cpu_rq(i);
1074
		if (idle || (rq_src->nr_running < this_rq->prev_cpu_load[i]))
Linus Torvalds's avatar
Linus Torvalds committed
1075 1076
			load = rq_src->nr_running;
		else
1077 1078
			load = this_rq->prev_cpu_load[i];
		this_rq->prev_cpu_load[i] = rq_src->nr_running;
Linus Torvalds's avatar
Linus Torvalds committed
1079

Linus Torvalds's avatar
Linus Torvalds committed
1080
		if ((load > max_load) && (rq_src != this_rq)) {
Linus Torvalds's avatar
Linus Torvalds committed
1081
			busiest = rq_src;
Linus Torvalds's avatar
Linus Torvalds committed
1082
			max_load = load;
Linus Torvalds's avatar
Linus Torvalds committed
1083
		}
Linus Torvalds's avatar
Linus Torvalds committed
1084
	}
Linus Torvalds's avatar
Linus Torvalds committed
1085 1086

	if (likely(!busiest))
Ingo Molnar's avatar
Ingo Molnar committed
1087
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
1088

1089
	*imbalance = max_load - nr_running;
Linus Torvalds's avatar
Linus Torvalds committed
1090

Linus Torvalds's avatar
Linus Torvalds committed
1091
	/* It needs an at least ~25% imbalance to trigger balancing. */
1092
	if (!idle && ((*imbalance)*4 < max_load)) {
Ingo Molnar's avatar
Ingo Molnar committed
1093 1094 1095
		busiest = NULL;
		goto out;
	}
Linus Torvalds's avatar
Linus Torvalds committed
1096

Linus Torvalds's avatar
Linus Torvalds committed
1097
	nr_running = double_lock_balance(this_rq, busiest, this_cpu, idle, nr_running);
Linus Torvalds's avatar
Linus Torvalds committed
1098 1099 1100 1101
	/*
	 * Make sure nothing changed since we checked the
	 * runqueue length.
	 */
1102
	if (busiest->nr_running <= nr_running) {
Ingo Molnar's avatar
Ingo Molnar committed
1103 1104 1105 1106 1107 1108 1109 1110
		spin_unlock(&busiest->lock);
		busiest = NULL;
	}
out:
	return busiest;
}

/*
Ingo Molnar's avatar
Ingo Molnar committed
1111
 * pull_task - move a task from a remote runqueue to the local runqueue.
Ingo Molnar's avatar
Ingo Molnar committed
1112 1113 1114 1115 1116
 * Both runqueues must be locked.
 */
static inline void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, runqueue_t *this_rq, int this_cpu)
{
	dequeue_task(p, src_array);
1117
	nr_running_dec(src_rq);
Ingo Molnar's avatar
Ingo Molnar committed
1118
	set_task_cpu(p, this_cpu);
1119
	nr_running_inc(this_rq);
Ingo Molnar's avatar
Ingo Molnar committed
1120 1121 1122 1123 1124
	enqueue_task(p, this_rq->active);
	/*
	 * Note that idle threads have a prio of MAX_PRIO, for this test
	 * to be always true for them.
	 */
1125
	if (TASK_PREEMPTS_CURR(p, this_rq))
Ingo Molnar's avatar
Ingo Molnar committed
1126 1127 1128
		set_need_resched();
}

1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149
/*
 * Previously:
 *
 * #define CAN_MIGRATE_TASK(p,rq,this_cpu)	\
 *	((!idle || (NS_TO_JIFFIES(now - (p)->timestamp) > \
 *		cache_decay_ticks)) && !task_running(rq, p) && \
 *			cpu_isset(this_cpu, (p)->cpus_allowed))
 */

static inline int
can_migrate_task(task_t *tsk, runqueue_t *rq, int this_cpu, int idle)
{
	unsigned long delta = sched_clock() - tsk->timestamp;

	if (!idle && (delta <= JIFFIES_TO_NS(cache_decay_ticks)))
		return 0;
	if (task_running(rq, tsk))
		return 0;
	if (!cpu_isset(this_cpu, tsk->cpus_allowed))
		return 0;
	return 1;
Ingo Molnar's avatar
Ingo Molnar committed
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159
}

/*
 * Current runqueue is empty, or rebalance tick: if there is an
 * inbalance (current runqueue is too short) then pull from
 * busiest runqueue(s).
 *
 * We call this with the current runqueue locked,
 * irqs disabled.
 */
1160
static void load_balance(runqueue_t *this_rq, int idle, cpumask_t cpumask)
Ingo Molnar's avatar
Ingo Molnar committed
1161 1162 1163 1164
{
	int imbalance, idx, this_cpu = smp_processor_id();
	runqueue_t *busiest;
	prio_array_t *array;
Rusty Russell's avatar
Rusty Russell committed
1165
	struct list_head *head, *curr;
Ingo Molnar's avatar
Ingo Molnar committed
1166 1167
	task_t *tmp;

1168
	busiest = find_busiest_queue(this_rq, this_cpu, idle, &imbalance, cpumask);
Ingo Molnar's avatar
Ingo Molnar committed
1169 1170
	if (!busiest)
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
1171

1172 1173 1174 1175 1176 1177
	/*
	 * We only want to steal a number of tasks equal to 1/2 the imbalance,
	 * otherwise we'll just shift the imbalance to the new queue:
	 */
	imbalance /= 2;

Linus Torvalds's avatar
Linus Torvalds committed
1178
	/*
Linus Torvalds's avatar
Linus Torvalds committed
1179 1180 1181
	 * We first consider expired tasks. Those will likely not be
	 * executed in the near future, and they are most likely to
	 * be cache-cold, thus switching CPUs has the least effect
Linus Torvalds's avatar
Linus Torvalds committed
1182
	 * on them.
Linus Torvalds's avatar
Linus Torvalds committed
1183
	 */
Linus Torvalds's avatar
Linus Torvalds committed
1184 1185 1186 1187
	if (busiest->expired->nr_active)
		array = busiest->expired;
	else
		array = busiest->active;
Linus Torvalds's avatar
Linus Torvalds committed
1188

Linus Torvalds's avatar
Linus Torvalds committed
1189
new_array:
Ingo Molnar's avatar
Ingo Molnar committed
1190 1191
	/* Start searching at priority 0: */
	idx = 0;
Linus Torvalds's avatar
Linus Torvalds committed
1192
skip_bitmap:
Ingo Molnar's avatar
Ingo Molnar committed
1193 1194 1195 1196
	if (!idx)
		idx = sched_find_first_bit(array->bitmap);
	else
		idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
1197
	if (idx >= MAX_PRIO) {
Linus Torvalds's avatar
Linus Torvalds committed
1198 1199 1200
		if (array == busiest->expired) {
			array = busiest->active;
			goto new_array;
Linus Torvalds's avatar
Linus Torvalds committed
1201
		}
Linus Torvalds's avatar
Linus Torvalds committed
1202
		goto out_unlock;
Linus Torvalds's avatar
Linus Torvalds committed
1203 1204
	}

Linus Torvalds's avatar
Linus Torvalds committed
1205
	head = array->queue + idx;
Linus Torvalds's avatar
Linus Torvalds committed
1206
	curr = head->prev;
Linus Torvalds's avatar
Linus Torvalds committed
1207 1208
skip_queue:
	tmp = list_entry(curr, task_t, run_list);
Linus Torvalds's avatar
Linus Torvalds committed
1209 1210 1211 1212 1213 1214 1215 1216

	/*
	 * We do not migrate tasks that are:
	 * 1) running (obviously), or
	 * 2) cannot be migrated to this CPU due to cpus_allowed, or
	 * 3) are cache-hot on their current CPU.
	 */

Ingo Molnar's avatar
Ingo Molnar committed
1217 1218
	curr = curr->prev;

1219
	if (!can_migrate_task(tmp, busiest, this_cpu, idle)) {
Linus Torvalds's avatar
Linus Torvalds committed
1220 1221 1222 1223
		if (curr != head)
			goto skip_queue;
		idx++;
		goto skip_bitmap;
Linus Torvalds's avatar
Linus Torvalds committed
1224
	}
Ingo Molnar's avatar
Ingo Molnar committed
1225
	pull_task(busiest, array, tmp, this_rq, this_cpu);
Linus Torvalds's avatar
Linus Torvalds committed
1226
	if (!idle && --imbalance) {
Ingo Molnar's avatar
Ingo Molnar committed
1227 1228 1229 1230
		if (curr != head)
			goto skip_queue;
		idx++;
		goto skip_bitmap;
Linus Torvalds's avatar
Linus Torvalds committed
1231
	}
Linus Torvalds's avatar
Linus Torvalds committed
1232 1233
out_unlock:
	spin_unlock(&busiest->lock);
Ingo Molnar's avatar
Ingo Molnar committed
1234 1235
out:
	;
Linus Torvalds's avatar
Linus Torvalds committed
1236
}
Linus Torvalds's avatar
Linus Torvalds committed
1237

Linus Torvalds's avatar
Linus Torvalds committed
1238
/*
Ingo Molnar's avatar
Ingo Molnar committed
1239 1240
 * One of the idle_cpu_tick() and busy_cpu_tick() functions will
 * get called every timer tick, on every CPU. Our balancing action
Linus Torvalds's avatar
Linus Torvalds committed
1241 1242 1243
 * frequency and balancing agressivity depends on whether the CPU is
 * idle or not.
 *
1244
 * busy-rebalance every 200 msecs. idle-rebalance every 1 msec. (or on
Linus Torvalds's avatar
Linus Torvalds committed
1245
 * systems with HZ=100, every 10 msecs.)
1246 1247
 *
 * On NUMA, do a node-rebalance every 400 msecs.
Linus Torvalds's avatar
Linus Torvalds committed
1248 1249
 */
#define IDLE_REBALANCE_TICK (HZ/1000 ?: 1)
1250 1251
#define BUSY_REBALANCE_TICK (HZ/5 ?: 1)
#define IDLE_NODE_REBALANCE_TICK (IDLE_REBALANCE_TICK * 5)
1252
#define BUSY_NODE_REBALANCE_TICK (BUSY_REBALANCE_TICK * 2)
Linus Torvalds's avatar
Linus Torvalds committed
1253

1254
#ifdef CONFIG_NUMA
1255
static void balance_node(runqueue_t *this_rq, int idle, int this_cpu)
Linus Torvalds's avatar
Linus Torvalds committed
1256
{
1257 1258 1259
	int node = find_busiest_node(cpu_to_node(this_cpu));

	if (node >= 0) {
1260 1261
		cpumask_t cpumask = node_to_cpumask(node);
		cpu_set(this_cpu, cpumask);
1262 1263 1264 1265
		spin_lock(&this_rq->lock);
		load_balance(this_rq, idle, cpumask);
		spin_unlock(&this_rq->lock);
	}
Linus Torvalds's avatar
Linus Torvalds committed
1266
}
1267 1268 1269 1270
#endif

static void rebalance_tick(runqueue_t *this_rq, int idle)
{
1271
#ifdef CONFIG_NUMA
1272 1273 1274
	int this_cpu = smp_processor_id();
#endif
	unsigned long j = jiffies;
Linus Torvalds's avatar
Linus Torvalds committed
1275

1276 1277 1278 1279 1280 1281 1282 1283 1284
	/*
	 * First do inter-node rebalancing, then intra-node rebalancing,
	 * if both events happen in the same tick. The inter-node
	 * rebalancing does not necessarily have to create a perfect
	 * balance within the node, since we load-balance the most loaded
	 * node with the current CPU. (ie. other CPUs in the local node
	 * are not balanced.)
	 */
	if (idle) {
1285
#ifdef CONFIG_NUMA
1286 1287 1288 1289 1290
		if (!(j % IDLE_NODE_REBALANCE_TICK))
			balance_node(this_rq, idle, this_cpu);
#endif
		if (!(j % IDLE_REBALANCE_TICK)) {
			spin_lock(&this_rq->lock);
1291
			load_balance(this_rq, idle, cpu_to_node_mask(this_cpu));
1292 1293 1294 1295
			spin_unlock(&this_rq->lock);
		}
		return;
	}
1296
#ifdef CONFIG_NUMA
1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312
	if (!(j % BUSY_NODE_REBALANCE_TICK))
		balance_node(this_rq, idle, this_cpu);
#endif
	if (!(j % BUSY_REBALANCE_TICK)) {
		spin_lock(&this_rq->lock);
		load_balance(this_rq, idle, cpu_to_node_mask(this_cpu));
		spin_unlock(&this_rq->lock);
	}
}
#else
/*
 * on UP we do not need to balance between CPUs:
 */
static inline void rebalance_tick(runqueue_t *this_rq, int idle)
{
}
Linus Torvalds's avatar
Linus Torvalds committed
1313 1314
#endif

1315
DEFINE_PER_CPU(struct kernel_stat, kstat) = { { 0 } };
1316

Kai Germaschewski's avatar
Kai Germaschewski committed
1317 1318
EXPORT_PER_CPU_SYMBOL(kstat);

Ingo Molnar's avatar
Ingo Molnar committed
1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
/*
 * We place interactive tasks back into the active array, if possible.
 *
 * To guarantee that this does not starve expired tasks we ignore the
 * interactivity of a task if the first expired task had to wait more
 * than a 'reasonable' amount of time. This deadline timeout is
 * load-dependent, as the frequency of array switched decreases with
 * increasing number of running tasks:
 */
#define EXPIRED_STARVING(rq) \
1329
		(STARVATION_LIMIT && ((rq)->expired_timestamp && \
Ingo Molnar's avatar
Ingo Molnar committed
1330
		(jiffies - (rq)->expired_timestamp >= \
1331
			STARVATION_LIMIT * ((rq)->nr_running) + 1)))
Ingo Molnar's avatar
Ingo Molnar committed
1332

Linus Torvalds's avatar
Linus Torvalds committed
1333 1334 1335
/*
 * This function gets called by the timer code, with HZ frequency.
 * We call it with interrupts disabled.
Ingo Molnar's avatar
Ingo Molnar committed
1336 1337 1338
 *
 * It also gets called by the fork code, when changing the parent's
 * timeslices.
Linus Torvalds's avatar
Linus Torvalds committed
1339
 */
Ingo Molnar's avatar
Ingo Molnar committed
1340
void scheduler_tick(int user_ticks, int sys_ticks)
Linus Torvalds's avatar
Linus Torvalds committed
1341
{
Ingo Molnar's avatar
Ingo Molnar committed
1342
	int cpu = smp_processor_id();
1343
	struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
Linus Torvalds's avatar
Linus Torvalds committed
1344
	runqueue_t *rq = this_rq();
Ingo Molnar's avatar
Ingo Molnar committed
1345
	task_t *p = current;
Linus Torvalds's avatar
Linus Torvalds committed
1346

Ingo Molnar's avatar
Ingo Molnar committed
1347 1348
	if (rcu_pending(cpu))
		rcu_check_callbacks(cpu, user_ticks);
1349

1350 1351 1352 1353 1354 1355 1356 1357 1358
	/* note: this timer irq context must be accounted for as well */
	if (hardirq_count() - HARDIRQ_OFFSET) {
		cpustat->irq += sys_ticks;
		sys_ticks = 0;
	} else if (softirq_count()) {
		cpustat->softirq += sys_ticks;
		sys_ticks = 0;
	}

Ingo Molnar's avatar
Ingo Molnar committed
1359
	if (p == rq->idle) {
1360
		if (atomic_read(&rq->nr_iowait) > 0)
1361
			cpustat->iowait += sys_ticks;
1362
		else
1363
			cpustat->idle += sys_ticks;
1364
		rebalance_tick(rq, 1);
Ingo Molnar's avatar
Ingo Molnar committed
1365 1366 1367
		return;
	}
	if (TASK_NICE(p) > 0)
1368
		cpustat->nice += user_ticks;
Ingo Molnar's avatar
Ingo Molnar committed
1369
	else
1370 1371
		cpustat->user += user_ticks;
	cpustat->system += sys_ticks;
Ingo Molnar's avatar
Ingo Molnar committed
1372

Linus Torvalds's avatar
Linus Torvalds committed
1373
	/* Task might have expired already, but not scheduled off yet */
Linus Torvalds's avatar
Linus Torvalds committed
1374
	if (p->array != rq->active) {
1375
		set_tsk_need_resched(p);
1376
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
1377
	}
Linus Torvalds's avatar
Linus Torvalds committed
1378
	spin_lock(&rq->lock);
1379 1380
	/*
	 * The task was running during this tick - update the
1381 1382 1383 1384
	 * time slice counter. Note: we do not update a thread's
	 * priority until it either goes to sleep or uses up its
	 * timeslice. This makes it possible for interactive tasks
	 * to use up their timeslices at their highest priority levels.
1385
	 */
Linus Torvalds's avatar
Linus Torvalds committed
1386 1387 1388 1389 1390 1391
	if (unlikely(rt_task(p))) {
		/*
		 * RR tasks need a special form of timeslice management.
		 * FIFO tasks have no timeslices.
		 */
		if ((p->policy == SCHED_RR) && !--p->time_slice) {
Ingo Molnar's avatar
Ingo Molnar committed
1392 1393
			p->time_slice = task_timeslice(p);
			p->first_time_slice = 0;
1394
			set_tsk_need_resched(p);
Linus Torvalds's avatar
Linus Torvalds committed
1395 1396 1397 1398 1399

			/* put it at the end of the queue: */
			dequeue_task(p, rq->active);
			enqueue_task(p, rq->active);
		}
1400
		goto out_unlock;
Linus Torvalds's avatar
Linus Torvalds committed
1401 1402
	}
	if (!--p->time_slice) {
Linus Torvalds's avatar
Linus Torvalds committed
1403
		dequeue_task(p, rq->active);
1404
		set_tsk_need_resched(p);
Linus Torvalds's avatar
Linus Torvalds committed
1405
		p->prio = effective_prio(p);
Ingo Molnar's avatar
Ingo Molnar committed
1406 1407
		p->time_slice = task_timeslice(p);
		p->first_time_slice = 0;
Linus Torvalds's avatar
Linus Torvalds committed
1408

1409 1410
		if (!rq->expired_timestamp)
			rq->expired_timestamp = jiffies;
Linus Torvalds's avatar
Linus Torvalds committed
1411 1412 1413 1414
		if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
			enqueue_task(p, rq->expired);
		} else
			enqueue_task(p, rq->active);
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427
	} else {
		/*
		 * Prevent a too long timeslice allowing a task to monopolize
		 * the CPU. We do this by splitting up the timeslice into
		 * smaller pieces.
		 *
		 * Note: this does not mean the task's timeslices expire or
		 * get lost in any way, they just might be preempted by
		 * another task of equal priority. (one with higher
		 * priority would have preempted this task already.) We
		 * requeue this task to the end of the list on this priority
		 * level, which is in essence a round-robin of tasks with
		 * equal priority.
1428 1429 1430
		 *
		 * This only applies to tasks in the interactive
		 * delta range with at least TIMESLICE_GRANULARITY to requeue.
1431
		 */
1432 1433 1434 1435 1436
		if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
			p->time_slice) % TIMESLICE_GRANULARITY(p)) &&
			(p->time_slice >= TIMESLICE_GRANULARITY(p)) &&
			(p->array == rq->active)) {

1437 1438 1439 1440 1441
			dequeue_task(p, rq->active);
			set_tsk_need_resched(p);
			p->prio = effective_prio(p);
			enqueue_task(p, rq->active);
		}
Linus Torvalds's avatar
Linus Torvalds committed
1442
	}
1443
out_unlock:
Linus Torvalds's avatar
Linus Torvalds committed
1444
	spin_unlock(&rq->lock);
1445
out:
1446
	rebalance_tick(rq, 0);
Linus Torvalds's avatar
Linus Torvalds committed
1447
}
Linus Torvalds's avatar
Linus Torvalds committed
1448

Linus Torvalds's avatar
Linus Torvalds committed
1449 1450 1451
void scheduling_functions_start_here(void) { }

/*
Ingo Molnar's avatar
Ingo Molnar committed
1452
 * schedule() is the main scheduler function.
Linus Torvalds's avatar
Linus Torvalds committed
1453 1454 1455
 */
asmlinkage void schedule(void)
{
1456 1457
	task_t *prev, *next;
	runqueue_t *rq;
Linus Torvalds's avatar
Linus Torvalds committed
1458
	prio_array_t *array;
Rusty Russell's avatar
Rusty Russell committed
1459
	struct list_head *queue;
1460 1461
	unsigned long long now;
	unsigned long run_time;
Linus Torvalds's avatar
Linus Torvalds committed
1462 1463
	int idx;

1464 1465 1466 1467 1468
	/*
	 * Test if we are atomic.  Since do_exit() needs to call into
	 * schedule() atomically, we ignore that path for now.
	 * Otherwise, whine if we are scheduling when we should not be.
	 */
1469
	if (likely(!(current->state & (TASK_DEAD | TASK_ZOMBIE)))) {
1470 1471 1472 1473 1474
		if (unlikely(in_atomic())) {
			printk(KERN_ERR "bad: scheduling while atomic!\n");
			dump_stack();
		}
	}
1475

1476
need_resched:
1477 1478 1479
	preempt_disable();
	prev = current;
	rq = this_rq();
Ingo Molnar's avatar
Ingo Molnar committed
1480

1481
	release_kernel_lock(prev);
1482
	now = sched_clock();
1483
	if (likely(now - prev->timestamp < NS_MAX_SLEEP_AVG))
1484 1485
		run_time = now - prev->timestamp;
	else
1486 1487 1488 1489 1490 1491 1492 1493 1494 1495
		run_time = NS_MAX_SLEEP_AVG;

	/*
	 * Tasks with interactive credits get charged less run_time
	 * at high sleep_avg to delay them losing their interactive
	 * status
	 */
	if (HIGH_CREDIT(prev))
		run_time /= (CURRENT_BONUS(prev) ? : 1);

Linus Torvalds's avatar
Linus Torvalds committed
1496 1497
	spin_lock_irq(&rq->lock);

1498
	/*
1499
	 * if entering off of a kernel preemption go straight
1500
	 * to picking the next task.
1501
	 */
1502
	if (unlikely(preempt_count() & PREEMPT_ACTIVE))
1503
		goto pick_next_task;
Robert Love's avatar
Robert Love committed
1504

Linus Torvalds's avatar
Linus Torvalds committed
1505
	switch (prev->state) {
Linus Torvalds's avatar
Linus Torvalds committed
1506 1507 1508 1509 1510 1511 1512
	case TASK_INTERRUPTIBLE:
		if (unlikely(signal_pending(prev))) {
			prev->state = TASK_RUNNING;
			break;
		}
	default:
		deactivate_task(prev, rq);
1513 1514
		prev->nvcsw++;
		break;
Ingo Molnar's avatar
Ingo Molnar committed
1515
	case TASK_RUNNING:
1516
		prev->nivcsw++;
Linus Torvalds's avatar
Linus Torvalds committed
1517 1518 1519
	}
pick_next_task:
	if (unlikely(!rq->nr_running)) {
1520
#ifdef CONFIG_SMP
1521
		load_balance(rq, 1, cpu_to_node_mask(smp_processor_id()));
Linus Torvalds's avatar
Linus Torvalds committed
1522 1523
		if (rq->nr_running)
			goto pick_next_task;
Linus Torvalds's avatar
Linus Torvalds committed
1524
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1525
		next = rq->idle;
Linus Torvalds's avatar
Linus Torvalds committed
1526
		rq->expired_timestamp = 0;
Linus Torvalds's avatar
Linus Torvalds committed
1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537
		goto switch_tasks;
	}

	array = rq->active;
	if (unlikely(!array->nr_active)) {
		/*
		 * Switch the active and expired arrays.
		 */
		rq->active = rq->expired;
		rq->expired = array;
		array = rq->active;
Linus Torvalds's avatar
Linus Torvalds committed
1538
		rq->expired_timestamp = 0;
Linus Torvalds's avatar
Linus Torvalds committed
1539 1540
	}

Linus Torvalds's avatar
Linus Torvalds committed
1541
	idx = sched_find_first_bit(array->bitmap);
Linus Torvalds's avatar
Linus Torvalds committed
1542 1543 1544
	queue = array->queue + idx;
	next = list_entry(queue->next, task_t, run_list);

1545
	if (next->activated > 0) {
1546 1547 1548 1549 1550 1551 1552 1553 1554 1555
		unsigned long long delta = now - next->timestamp;

		if (next->activated == 1)
			delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;

		array = next->array;
		dequeue_task(next, array);
		recalc_task_prio(next, next->timestamp + delta);
		enqueue_task(next, array);
	}
1556
	next->activated = 0;
Linus Torvalds's avatar
Linus Torvalds committed
1557
switch_tasks:
Linus Torvalds's avatar
Linus Torvalds committed
1558
	prefetch(next);
1559
	clear_tsk_need_resched(prev);
1560
	RCU_qsctr(task_cpu(prev))++;
Linus Torvalds's avatar
Linus Torvalds committed
1561

1562
	prev->sleep_avg -= run_time;
1563
	if ((long)prev->sleep_avg <= 0){
1564
		prev->sleep_avg = 0;
1565 1566 1567
		if (!(HIGH_CREDIT(prev) || LOW_CREDIT(prev)))
			prev->interactive_credit--;
	}
1568 1569
	prev->timestamp = now;

Linus Torvalds's avatar
Linus Torvalds committed
1570
	if (likely(prev != next)) {
1571
		next->timestamp = now;
Linus Torvalds's avatar
Linus Torvalds committed
1572 1573
		rq->nr_switches++;
		rq->curr = next;
Ingo Molnar's avatar
Ingo Molnar committed
1574

Ingo Molnar's avatar
Ingo Molnar committed
1575
		prepare_arch_switch(rq, next);
1576
		prev = context_switch(rq, prev, next);
1577
		barrier();
1578 1579

		finish_task_switch(prev);
1580 1581
	} else
		spin_unlock_irq(&rq->lock);
Linus Torvalds's avatar
Linus Torvalds committed
1582 1583

	reacquire_kernel_lock(current);
1584
	preempt_enable_no_resched();
1585 1586
	if (test_thread_flag(TIF_NEED_RESCHED))
		goto need_resched;
Linus Torvalds's avatar
Linus Torvalds committed
1587 1588
}

1589 1590
#ifdef CONFIG_PREEMPT
/*
1591 1592 1593
 * this is is the entry point to schedule() from in-kernel preemption
 * off of preempt_enable.  Kernel preemptions off return from interrupt
 * occur there and call schedule directly.
1594 1595 1596
 */
asmlinkage void preempt_schedule(void)
{
1597 1598
	struct thread_info *ti = current_thread_info();

1599 1600 1601 1602 1603
	/*
	 * If there is a non-zero preempt_count or interrupts are disabled,
	 * we do not want to preempt the current task.  Just return..
	 */
	if (unlikely(ti->preempt_count || irqs_disabled()))
1604
		return;
1605

1606
need_resched:
1607
	ti->preempt_count = PREEMPT_ACTIVE;
1608
	schedule();
1609
	ti->preempt_count = 0;
1610

1611
	/* we could miss a preemption opportunity between schedule and now */
1612 1613 1614
	barrier();
	if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
		goto need_resched;
1615 1616 1617
}
#endif /* CONFIG_PREEMPT */

1618 1619 1620
int default_wake_function(wait_queue_t *curr, unsigned mode, int sync)
{
	task_t *p = curr->task;
Ingo Molnar's avatar
Ingo Molnar committed
1621
	return try_to_wake_up(p, mode, sync, 0);
1622 1623
}

Linus Torvalds's avatar
Linus Torvalds committed
1624
/*
Linus Torvalds's avatar
Linus Torvalds committed
1625 1626 1627
 * The core wakeup function.  Non-exclusive wakeups (nr_exclusive == 0) just
 * wake everything up.  If it's an exclusive wakeup (nr_exclusive == small +ve
 * number) then we wake all the non-exclusive tasks and one exclusive task.
Linus Torvalds's avatar
Linus Torvalds committed
1628 1629
 *
 * There are circumstances in which we can try to wake a task which has already
Linus Torvalds's avatar
Linus Torvalds committed
1630 1631
 * started to run but is not in state TASK_RUNNING.  try_to_wake_up() returns
 * zero in this (rare) case, and we handle it by continuing to scan the queue.
Linus Torvalds's avatar
Linus Torvalds committed
1632
 */
1633
static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync)
Linus Torvalds's avatar
Linus Torvalds committed
1634
{
1635
	struct list_head *tmp, *next;
Linus Torvalds's avatar
Linus Torvalds committed
1636

1637 1638 1639
	list_for_each_safe(tmp, next, &q->task_list) {
		wait_queue_t *curr;
		unsigned flags;
1640
		curr = list_entry(tmp, wait_queue_t, task_list);
1641 1642 1643 1644 1645
		flags = curr->flags;
		if (curr->func(curr, mode, sync) &&
		    (flags & WQ_FLAG_EXCLUSIVE) &&
		    !--nr_exclusive)
			break;
Linus Torvalds's avatar
Linus Torvalds committed
1646 1647 1648
	}
}

Ingo Molnar's avatar
Ingo Molnar committed
1649 1650 1651 1652 1653 1654
/**
 * __wake_up - wake up threads blocked on a waitqueue.
 * @q: the waitqueue
 * @mode: which threads
 * @nr_exclusive: how many wake-one or wake-many threads to wake up
 */
1655
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
Linus Torvalds's avatar
Linus Torvalds committed
1656
{
1657
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
1658

Robert Love's avatar
Robert Love committed
1659
	spin_lock_irqsave(&q->lock, flags);
1660
	__wake_up_common(q, mode, nr_exclusive, 0);
Robert Love's avatar
Robert Love committed
1661
	spin_unlock_irqrestore(&q->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
1662 1663
}

1664 1665 1666 1667 1668 1669 1670 1671
/*
 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
 */
void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
{
	__wake_up_common(q, mode, 1, 0);
}

Ingo Molnar's avatar
Ingo Molnar committed
1672 1673 1674 1675 1676 1677 1678 1679 1680 1681
/**
 * __wake_up - sync- wake up threads blocked on a waitqueue.
 * @q: the waitqueue
 * @mode: which threads
 * @nr_exclusive: how many wake-one or wake-many threads to wake up
 *
 * The sync wakeup differs that the waker knows that it will schedule
 * away soon, so while the target thread will be woken up, it will not
 * be migrated to another CPU - ie. the two threads are 'synchronized'
 * with each other. This can prevent needless bouncing between CPUs.
Ingo Molnar's avatar
Ingo Molnar committed
1682 1683
 *
 * On UP it can prevent extra preemption.
Ingo Molnar's avatar
Ingo Molnar committed
1684
 */
1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699
void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
{
	unsigned long flags;

	if (unlikely(!q))
		return;

	spin_lock_irqsave(&q->lock, flags);
	if (likely(nr_exclusive))
		__wake_up_common(q, mode, nr_exclusive, 1);
	else
		__wake_up_common(q, mode, nr_exclusive, 0);
	spin_unlock_irqrestore(&q->lock, flags);
}

Linus Torvalds's avatar
Linus Torvalds committed
1700 1701 1702 1703 1704 1705
void complete(struct completion *x)
{
	unsigned long flags;

	spin_lock_irqsave(&x->wait.lock, flags);
	x->done++;
1706
	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0);
Linus Torvalds's avatar
Linus Torvalds committed
1707 1708 1709
	spin_unlock_irqrestore(&x->wait.lock, flags);
}

Ingo Molnar's avatar
Ingo Molnar committed
1710 1711 1712 1713 1714 1715 1716 1717 1718 1719
void complete_all(struct completion *x)
{
	unsigned long flags;

	spin_lock_irqsave(&x->wait.lock, flags);
	x->done += UINT_MAX/2;
	__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0, 0);
	spin_unlock_irqrestore(&x->wait.lock, flags);
}

Linus Torvalds's avatar
Linus Torvalds committed
1720 1721
void wait_for_completion(struct completion *x)
{
1722
	might_sleep();
Linus Torvalds's avatar
Linus Torvalds committed
1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740
	spin_lock_irq(&x->wait.lock);
	if (!x->done) {
		DECLARE_WAITQUEUE(wait, current);

		wait.flags |= WQ_FLAG_EXCLUSIVE;
		__add_wait_queue_tail(&x->wait, &wait);
		do {
			__set_current_state(TASK_UNINTERRUPTIBLE);
			spin_unlock_irq(&x->wait.lock);
			schedule();
			spin_lock_irq(&x->wait.lock);
		} while (!x->done);
		__remove_wait_queue(&x->wait, &wait);
	}
	x->done--;
	spin_unlock_irq(&x->wait.lock);
}

Linus Torvalds's avatar
Linus Torvalds committed
1741 1742 1743 1744 1745
#define	SLEEP_ON_VAR				\
	unsigned long flags;			\
	wait_queue_t wait;			\
	init_waitqueue_entry(&wait, current);

1746
#define SLEEP_ON_HEAD					\
Robert Love's avatar
Robert Love committed
1747
	spin_lock_irqsave(&q->lock,flags);		\
Linus Torvalds's avatar
Linus Torvalds committed
1748
	__add_wait_queue(q, &wait);			\
Robert Love's avatar
Robert Love committed
1749
	spin_unlock(&q->lock);
Linus Torvalds's avatar
Linus Torvalds committed
1750 1751

#define	SLEEP_ON_TAIL						\
Robert Love's avatar
Robert Love committed
1752
	spin_lock_irq(&q->lock);				\
Linus Torvalds's avatar
Linus Torvalds committed
1753
	__remove_wait_queue(q, &wait);				\
Robert Love's avatar
Robert Love committed
1754
	spin_unlock_irqrestore(&q->lock, flags);
Linus Torvalds's avatar
Linus Torvalds committed
1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782

void interruptible_sleep_on(wait_queue_head_t *q)
{
	SLEEP_ON_VAR

	current->state = TASK_INTERRUPTIBLE;

	SLEEP_ON_HEAD
	schedule();
	SLEEP_ON_TAIL
}

long interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
	SLEEP_ON_VAR

	current->state = TASK_INTERRUPTIBLE;

	SLEEP_ON_HEAD
	timeout = schedule_timeout(timeout);
	SLEEP_ON_TAIL

	return timeout;
}

void sleep_on(wait_queue_head_t *q)
{
	SLEEP_ON_VAR
Ingo Molnar's avatar
Ingo Molnar committed
1783

Linus Torvalds's avatar
Linus Torvalds committed
1784 1785 1786 1787 1788 1789 1790 1791 1792 1793
	current->state = TASK_UNINTERRUPTIBLE;

	SLEEP_ON_HEAD
	schedule();
	SLEEP_ON_TAIL
}

long sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
	SLEEP_ON_VAR
Ingo Molnar's avatar
Ingo Molnar committed
1794

Linus Torvalds's avatar
Linus Torvalds committed
1795 1796 1797 1798 1799 1800 1801 1802 1803
	current->state = TASK_UNINTERRUPTIBLE;

	SLEEP_ON_HEAD
	timeout = schedule_timeout(timeout);
	SLEEP_ON_TAIL

	return timeout;
}

1804 1805
void scheduling_functions_end_here(void) { }

Linus Torvalds's avatar
Linus Torvalds committed
1806 1807 1808 1809 1810
void set_user_nice(task_t *p, long nice)
{
	unsigned long flags;
	prio_array_t *array;
	runqueue_t *rq;
1811
	int old_prio, new_prio, delta;
Linus Torvalds's avatar
Linus Torvalds committed
1812

Ingo Molnar's avatar
Ingo Molnar committed
1813
	if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
Linus Torvalds's avatar
Linus Torvalds committed
1814 1815 1816 1817 1818
		return;
	/*
	 * We have to be careful, if called from sys_setpriority(),
	 * the task might be in the middle of scheduling on another CPU.
	 */
1819
	rq = task_rq_lock(p, &flags);
1820 1821 1822 1823 1824 1825
	/*
	 * The RT priorities are set via setscheduler(), but we still
	 * allow the 'normal' nice value to be set - but as expected
	 * it wont have any effect on scheduling until the task is
	 * not SCHED_NORMAL:
	 */
Linus Torvalds's avatar
Linus Torvalds committed
1826
	if (rt_task(p)) {
Ingo Molnar's avatar
Ingo Molnar committed
1827
		p->static_prio = NICE_TO_PRIO(nice);
Linus Torvalds's avatar
Linus Torvalds committed
1828 1829 1830
		goto out_unlock;
	}
	array = p->array;
Linus Torvalds's avatar
Linus Torvalds committed
1831
	if (array)
Linus Torvalds's avatar
Linus Torvalds committed
1832
		dequeue_task(p, array);
1833 1834 1835 1836

	old_prio = p->prio;
	new_prio = NICE_TO_PRIO(nice);
	delta = new_prio - old_prio;
Ingo Molnar's avatar
Ingo Molnar committed
1837
	p->static_prio = NICE_TO_PRIO(nice);
1838 1839
	p->prio += delta;

Linus Torvalds's avatar
Linus Torvalds committed
1840 1841 1842
	if (array) {
		enqueue_task(p, array);
		/*
1843 1844
		 * If the task increased its priority or is running and
		 * lowered its priority, then reschedule its CPU:
Linus Torvalds's avatar
Linus Torvalds committed
1845
		 */
1846
		if (delta < 0 || (delta > 0 && task_running(rq, p)))
Linus Torvalds's avatar
Linus Torvalds committed
1847 1848 1849
			resched_task(rq->curr);
	}
out_unlock:
1850
	task_rq_unlock(rq, &flags);
Linus Torvalds's avatar
Linus Torvalds committed
1851 1852
}

Linus Torvalds's avatar
Linus Torvalds committed
1853 1854 1855
#ifndef __alpha__

/*
Ingo Molnar's avatar
Ingo Molnar committed
1856 1857 1858 1859 1860
 * sys_nice - change the priority of the current process.
 * @increment: priority increment
 *
 * sys_setpriority is a more generic, but much slower function that
 * does similar things.
Linus Torvalds's avatar
Linus Torvalds committed
1861 1862 1863
 */
asmlinkage long sys_nice(int increment)
{
1864
	int retval;
Linus Torvalds's avatar
Linus Torvalds committed
1865
	long nice;
Linus Torvalds's avatar
Linus Torvalds committed
1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880

	/*
	 *	Setpriority might change our priority at the same moment.
	 *	We don't have to worry. Conceptually one call occurs first
	 *	and we have a single winner.
	 */
	if (increment < 0) {
		if (!capable(CAP_SYS_NICE))
			return -EPERM;
		if (increment < -40)
			increment = -40;
	}
	if (increment > 40)
		increment = 40;

Ingo Molnar's avatar
Ingo Molnar committed
1881
	nice = PRIO_TO_NICE(current->static_prio) + increment;
Linus Torvalds's avatar
Linus Torvalds committed
1882 1883 1884 1885
	if (nice < -20)
		nice = -20;
	if (nice > 19)
		nice = 19;
1886

1887 1888
	retval = security_task_setnice(current, nice);
	if (retval)
1889 1890
		return retval;

Linus Torvalds's avatar
Linus Torvalds committed
1891
	set_user_nice(current, nice);
Linus Torvalds's avatar
Linus Torvalds committed
1892 1893 1894 1895 1896
	return 0;
}

#endif

Ingo Molnar's avatar
Ingo Molnar committed
1897 1898 1899
/**
 * task_prio - return the priority value of a given task.
 * @p: the task in question.
Ingo Molnar's avatar
Ingo Molnar committed
1900
 *
Ingo Molnar's avatar
Ingo Molnar committed
1901
 * This is the priority value as seen by users in /proc.
Ingo Molnar's avatar
Ingo Molnar committed
1902 1903 1904 1905 1906
 * RT tasks are offset by -200. Normal tasks are centered
 * around 0, value goes from -16 to +15.
 */
int task_prio(task_t *p)
{
1907
	return p->prio - MAX_RT_PRIO;
Ingo Molnar's avatar
Ingo Molnar committed
1908 1909
}

Ingo Molnar's avatar
Ingo Molnar committed
1910 1911 1912 1913
/**
 * task_nice - return the nice value of a given task.
 * @p: the task in question.
 */
Ingo Molnar's avatar
Ingo Molnar committed
1914 1915 1916 1917 1918
int task_nice(task_t *p)
{
	return TASK_NICE(p);
}

Ingo Molnar's avatar
Ingo Molnar committed
1919 1920 1921 1922 1923 1924 1925 1926 1927
/**
 * task_curr - is this task currently executing on a CPU?
 * @p: the task in question.
 */
int task_curr(task_t *p)
{
	return cpu_curr(task_cpu(p)) == p;
}

Ingo Molnar's avatar
Ingo Molnar committed
1928 1929 1930 1931
/**
 * idle_cpu - is a given cpu idle currently?
 * @cpu: the processor in question.
 */
Ingo Molnar's avatar
Ingo Molnar committed
1932 1933 1934 1935 1936
int idle_cpu(int cpu)
{
	return cpu_curr(cpu) == cpu_rq(cpu)->idle;
}

Ingo Molnar's avatar
Ingo Molnar committed
1937 1938 1939 1940
/**
 * find_process_by_pid - find a process with a matching PID value.
 * @pid: the pid in question.
 */
Linus Torvalds's avatar
Linus Torvalds committed
1941
static inline task_t *find_process_by_pid(pid_t pid)
Linus Torvalds's avatar
Linus Torvalds committed
1942
{
Linus Torvalds's avatar
Linus Torvalds committed
1943
	return pid ? find_task_by_pid(pid) : current;
Linus Torvalds's avatar
Linus Torvalds committed
1944 1945
}

Ingo Molnar's avatar
Ingo Molnar committed
1946 1947 1948
/*
 * setscheduler - change the scheduling policy and/or RT priority of a thread.
 */
1949
static int setscheduler(pid_t pid, int policy, struct sched_param __user *param)
Linus Torvalds's avatar
Linus Torvalds committed
1950 1951
{
	struct sched_param lp;
1952
	int retval = -EINVAL;
1953
	int oldprio;
Linus Torvalds's avatar
Linus Torvalds committed
1954 1955 1956 1957
	prio_array_t *array;
	unsigned long flags;
	runqueue_t *rq;
	task_t *p;
Linus Torvalds's avatar
Linus Torvalds committed
1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974

	if (!param || pid < 0)
		goto out_nounlock;

	retval = -EFAULT;
	if (copy_from_user(&lp, param, sizeof(struct sched_param)))
		goto out_nounlock;

	/*
	 * We play safe to avoid deadlocks.
	 */
	read_lock_irq(&tasklist_lock);

	p = find_process_by_pid(pid);

	retval = -ESRCH;
	if (!p)
Linus Torvalds's avatar
Linus Torvalds committed
1975 1976 1977 1978 1979 1980
		goto out_unlock_tasklist;

	/*
	 * To be able to change p->policy safely, the apropriate
	 * runqueue lock must be held.
	 */
1981
	rq = task_rq_lock(p, &flags);
Linus Torvalds's avatar
Linus Torvalds committed
1982

Linus Torvalds's avatar
Linus Torvalds committed
1983 1984 1985 1986 1987
	if (policy < 0)
		policy = p->policy;
	else {
		retval = -EINVAL;
		if (policy != SCHED_FIFO && policy != SCHED_RR &&
Ingo Molnar's avatar
Ingo Molnar committed
1988
				policy != SCHED_NORMAL)
Linus Torvalds's avatar
Linus Torvalds committed
1989 1990
			goto out_unlock;
	}
Robert Love's avatar
Robert Love committed
1991

Linus Torvalds's avatar
Linus Torvalds committed
1992
	/*
1993
	 * Valid priorities for SCHED_FIFO and SCHED_RR are
Ingo Molnar's avatar
Ingo Molnar committed
1994
	 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL is 0.
Linus Torvalds's avatar
Linus Torvalds committed
1995 1996
	 */
	retval = -EINVAL;
1997
	if (lp.sched_priority < 0 || lp.sched_priority > MAX_USER_RT_PRIO-1)
Linus Torvalds's avatar
Linus Torvalds committed
1998
		goto out_unlock;
Ingo Molnar's avatar
Ingo Molnar committed
1999
	if ((policy == SCHED_NORMAL) != (lp.sched_priority == 0))
Linus Torvalds's avatar
Linus Torvalds committed
2000 2001 2002
		goto out_unlock;

	retval = -EPERM;
Linus Torvalds's avatar
Linus Torvalds committed
2003
	if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
Linus Torvalds's avatar
Linus Torvalds committed
2004 2005 2006 2007 2008 2009
	    !capable(CAP_SYS_NICE))
		goto out_unlock;
	if ((current->euid != p->euid) && (current->euid != p->uid) &&
	    !capable(CAP_SYS_NICE))
		goto out_unlock;

2010 2011
	retval = security_task_setscheduler(p, policy, &lp);
	if (retval)
2012 2013
		goto out_unlock;

Linus Torvalds's avatar
Linus Torvalds committed
2014 2015 2016
	array = p->array;
	if (array)
		deactivate_task(p, task_rq(p));
Linus Torvalds's avatar
Linus Torvalds committed
2017 2018 2019
	retval = 0;
	p->policy = policy;
	p->rt_priority = lp.sched_priority;
2020
	oldprio = p->prio;
Ingo Molnar's avatar
Ingo Molnar committed
2021
	if (policy != SCHED_NORMAL)
2022
		p->prio = MAX_USER_RT_PRIO-1 - p->rt_priority;
Linus Torvalds's avatar
Linus Torvalds committed
2023
	else
Ingo Molnar's avatar
Ingo Molnar committed
2024
		p->prio = p->static_prio;
2025
	if (array) {
2026
		__activate_task(p, task_rq(p));
2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037
		/*
		 * Reschedule if we are currently running on this runqueue and
		 * our priority decreased, or if we are not currently running on
		 * this runqueue and our priority is higher than the current's
		 */
		if (rq->curr == p) {
			if (p->prio > oldprio)
				resched_task(rq->curr);
		} else if (p->prio < rq->curr->prio)
			resched_task(rq->curr);
	}
Linus Torvalds's avatar
Linus Torvalds committed
2038 2039

out_unlock:
2040
	task_rq_unlock(rq, &flags);
Linus Torvalds's avatar
Linus Torvalds committed
2041
out_unlock_tasklist:
Linus Torvalds's avatar
Linus Torvalds committed
2042 2043 2044 2045 2046 2047
	read_unlock_irq(&tasklist_lock);

out_nounlock:
	return retval;
}

Ingo Molnar's avatar
Ingo Molnar committed
2048 2049 2050 2051 2052 2053
/**
 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
 * @pid: the pid in question.
 * @policy: new policy
 * @param: structure containing the new RT priority.
 */
Linus Torvalds's avatar
Linus Torvalds committed
2054
asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
2055
				      struct sched_param __user *param)
Linus Torvalds's avatar
Linus Torvalds committed
2056 2057 2058 2059
{
	return setscheduler(pid, policy, param);
}

Ingo Molnar's avatar
Ingo Molnar committed
2060 2061 2062 2063 2064
/**
 * sys_sched_setparam - set/change the RT priority of a thread
 * @pid: the pid in question.
 * @param: structure containing the new RT priority.
 */
2065
asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
Linus Torvalds's avatar
Linus Torvalds committed
2066 2067 2068 2069
{
	return setscheduler(pid, -1, param);
}

Ingo Molnar's avatar
Ingo Molnar committed
2070 2071 2072 2073
/**
 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
 * @pid: the pid in question.
 */
Linus Torvalds's avatar
Linus Torvalds committed
2074 2075
asmlinkage long sys_sched_getscheduler(pid_t pid)
{
2076
	int retval = -EINVAL;
Linus Torvalds's avatar
Linus Torvalds committed
2077
	task_t *p;
Linus Torvalds's avatar
Linus Torvalds committed
2078 2079 2080 2081 2082 2083 2084

	if (pid < 0)
		goto out_nounlock;

	retval = -ESRCH;
	read_lock(&tasklist_lock);
	p = find_process_by_pid(pid);
2085
	if (p) {
2086 2087
		retval = security_task_getscheduler(p);
		if (!retval)
2088 2089
			retval = p->policy;
	}
Linus Torvalds's avatar
Linus Torvalds committed
2090 2091 2092 2093 2094 2095
	read_unlock(&tasklist_lock);

out_nounlock:
	return retval;
}

Ingo Molnar's avatar
Ingo Molnar committed
2096 2097 2098 2099 2100
/**
 * sys_sched_getscheduler - get the RT priority of a thread
 * @pid: the pid in question.
 * @param: structure containing the RT priority.
 */
2101
asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
Linus Torvalds's avatar
Linus Torvalds committed
2102 2103
{
	struct sched_param lp;
2104 2105
	int retval = -EINVAL;
	task_t *p;
Linus Torvalds's avatar
Linus Torvalds committed
2106 2107 2108 2109 2110 2111 2112 2113 2114

	if (!param || pid < 0)
		goto out_nounlock;

	read_lock(&tasklist_lock);
	p = find_process_by_pid(pid);
	retval = -ESRCH;
	if (!p)
		goto out_unlock;
2115

2116 2117
	retval = security_task_getscheduler(p);
	if (retval)
2118 2119
		goto out_unlock;

Linus Torvalds's avatar
Linus Torvalds committed
2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135
	lp.sched_priority = p->rt_priority;
	read_unlock(&tasklist_lock);

	/*
	 * This one might sleep, we cannot do it with a spinlock held ...
	 */
	retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;

out_nounlock:
	return retval;

out_unlock:
	read_unlock(&tasklist_lock);
	return retval;
}

Robert Love's avatar
Robert Love committed
2136 2137 2138
/**
 * sys_sched_setaffinity - set the cpu affinity of a process
 * @pid: pid of the process
2139
 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
2140
 * @user_mask_ptr: user-space pointer to the new cpu mask
Robert Love's avatar
Robert Love committed
2141
 */
2142
asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
2143
				      unsigned long __user *user_mask_ptr)
Robert Love's avatar
Robert Love committed
2144
{
2145
	cpumask_t new_mask;
Robert Love's avatar
Robert Love committed
2146
	int retval;
2147
	task_t *p;
Robert Love's avatar
Robert Love committed
2148 2149 2150 2151

	if (len < sizeof(new_mask))
		return -EINVAL;

2152
	if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
Robert Love's avatar
Robert Love committed
2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175
		return -EFAULT;

	read_lock(&tasklist_lock);

	p = find_process_by_pid(pid);
	if (!p) {
		read_unlock(&tasklist_lock);
		return -ESRCH;
	}

	/*
	 * It is not safe to call set_cpus_allowed with the
	 * tasklist_lock held.  We will bump the task_struct's
	 * usage count and then drop tasklist_lock.
	 */
	get_task_struct(p);
	read_unlock(&tasklist_lock);

	retval = -EPERM;
	if ((current->euid != p->euid) && (current->euid != p->uid) &&
			!capable(CAP_SYS_NICE))
		goto out_unlock;

2176
	retval = set_cpus_allowed(p, new_mask);
Robert Love's avatar
Robert Love committed
2177 2178 2179 2180 2181 2182 2183 2184 2185

out_unlock:
	put_task_struct(p);
	return retval;
}

/**
 * sys_sched_getaffinity - get the cpu affinity of a process
 * @pid: pid of the process
2186
 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
2187
 * @user_mask_ptr: user-space pointer to hold the current cpu mask
Robert Love's avatar
Robert Love committed
2188
 */
2189
asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
2190
				      unsigned long __user *user_mask_ptr)
Robert Love's avatar
Robert Love committed
2191 2192
{
	unsigned int real_len;
2193
	cpumask_t mask;
Robert Love's avatar
Robert Love committed
2194
	int retval;
2195
	task_t *p;
Robert Love's avatar
Robert Love committed
2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208

	real_len = sizeof(mask);
	if (len < real_len)
		return -EINVAL;

	read_lock(&tasklist_lock);

	retval = -ESRCH;
	p = find_process_by_pid(pid);
	if (!p)
		goto out_unlock;

	retval = 0;
2209
	cpus_and(mask, p->cpus_allowed, cpu_online_map);
Robert Love's avatar
Robert Love committed
2210 2211 2212 2213 2214 2215 2216 2217 2218 2219

out_unlock:
	read_unlock(&tasklist_lock);
	if (retval)
		return retval;
	if (copy_to_user(user_mask_ptr, &mask, real_len))
		return -EFAULT;
	return real_len;
}

Ingo Molnar's avatar
Ingo Molnar committed
2220 2221 2222 2223 2224 2225 2226
/**
 * sys_sched_yield - yield the current processor to other threads.
 *
 * this function yields the current CPU by moving the calling thread
 * to the expired array. If there are no other threads running on this
 * CPU then this function will return.
 */
Linus Torvalds's avatar
Linus Torvalds committed
2227 2228
asmlinkage long sys_sched_yield(void)
{
2229
	runqueue_t *rq = this_rq_lock();
2230
	prio_array_t *array = current->array;
2231

Linus Torvalds's avatar
Linus Torvalds committed
2232
	/*
Ingo Molnar's avatar
Ingo Molnar committed
2233 2234
	 * We implement yielding by moving the task into the expired
	 * queue.
Linus Torvalds's avatar
Linus Torvalds committed
2235
	 *
Ingo Molnar's avatar
Ingo Molnar committed
2236 2237
	 * (special rule: RT tasks will just roundrobin in the active
	 *  array.)
Linus Torvalds's avatar
Linus Torvalds committed
2238
	 */
Ingo Molnar's avatar
Ingo Molnar committed
2239 2240 2241
	if (likely(!rt_task(current))) {
		dequeue_task(current, array);
		enqueue_task(current, rq->expired);
Linus Torvalds's avatar
Linus Torvalds committed
2242 2243 2244 2245
	} else {
		list_del(&current->run_list);
		list_add_tail(&current->run_list, array->queue + current->prio);
	}
2246 2247 2248 2249 2250 2251
	/*
	 * Since we are going to call schedule() anyway, there's
	 * no need to preempt:
	 */
	_raw_spin_unlock(&rq->lock);
	preempt_enable_no_resched();
Linus Torvalds's avatar
Linus Torvalds committed
2252

Linus Torvalds's avatar
Linus Torvalds committed
2253
	schedule();
Linus Torvalds's avatar
Linus Torvalds committed
2254

Linus Torvalds's avatar
Linus Torvalds committed
2255 2256
	return 0;
}
Robert Love's avatar
Robert Love committed
2257

2258 2259 2260 2261 2262 2263
void __cond_resched(void)
{
	set_current_state(TASK_RUNNING);
	schedule();
}

Ingo Molnar's avatar
Ingo Molnar committed
2264 2265 2266 2267 2268 2269
/**
 * yield - yield the current processor to other threads.
 *
 * this is a shortcut for kernel-space yielding - it marks the
 * thread runnable and calls sys_sched_yield().
 */
2270 2271 2272 2273 2274 2275
void yield(void)
{
	set_current_state(TASK_RUNNING);
	sys_sched_yield();
}

Andrew Morton's avatar
Andrew Morton committed
2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291
/*
 * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
 * that process accounting knows that this is a task in IO wait state.
 *
 * But don't do that if it is a deliberate, throttling IO wait (this task
 * has set its backing_dev_info: the queue against which it should throttle)
 */
void io_schedule(void)
{
	struct runqueue *rq = this_rq();

	atomic_inc(&rq->nr_iowait);
	schedule();
	atomic_dec(&rq->nr_iowait);
}

2292
long io_schedule_timeout(long timeout)
Andrew Morton's avatar
Andrew Morton committed
2293 2294
{
	struct runqueue *rq = this_rq();
2295
	long ret;
Andrew Morton's avatar
Andrew Morton committed
2296 2297

	atomic_inc(&rq->nr_iowait);
2298
	ret = schedule_timeout(timeout);
Andrew Morton's avatar
Andrew Morton committed
2299
	atomic_dec(&rq->nr_iowait);
2300
	return ret;
Andrew Morton's avatar
Andrew Morton committed
2301 2302
}

Ingo Molnar's avatar
Ingo Molnar committed
2303 2304 2305 2306 2307 2308 2309
/**
 * sys_sched_get_priority_max - return maximum RT priority.
 * @policy: scheduling class.
 *
 * this syscall returns the maximum rt_priority that can be used
 * by a given scheduling class.
 */
Linus Torvalds's avatar
Linus Torvalds committed
2310 2311 2312 2313 2314 2315 2316
asmlinkage long sys_sched_get_priority_max(int policy)
{
	int ret = -EINVAL;

	switch (policy) {
	case SCHED_FIFO:
	case SCHED_RR:
2317
		ret = MAX_USER_RT_PRIO-1;
Linus Torvalds's avatar
Linus Torvalds committed
2318
		break;
Ingo Molnar's avatar
Ingo Molnar committed
2319
	case SCHED_NORMAL:
Linus Torvalds's avatar
Linus Torvalds committed
2320 2321 2322 2323 2324 2325
		ret = 0;
		break;
	}
	return ret;
}

Ingo Molnar's avatar
Ingo Molnar committed
2326
/**
2327
 * sys_sched_get_priority_min - return minimum RT priority.
Ingo Molnar's avatar
Ingo Molnar committed
2328 2329 2330 2331 2332
 * @policy: scheduling class.
 *
 * this syscall returns the minimum rt_priority that can be used
 * by a given scheduling class.
 */
Linus Torvalds's avatar
Linus Torvalds committed
2333 2334 2335 2336 2337 2338 2339 2340 2341
asmlinkage long sys_sched_get_priority_min(int policy)
{
	int ret = -EINVAL;

	switch (policy) {
	case SCHED_FIFO:
	case SCHED_RR:
		ret = 1;
		break;
Ingo Molnar's avatar
Ingo Molnar committed
2342
	case SCHED_NORMAL:
Linus Torvalds's avatar
Linus Torvalds committed
2343 2344 2345 2346 2347
		ret = 0;
	}
	return ret;
}

Ingo Molnar's avatar
Ingo Molnar committed
2348 2349 2350 2351 2352 2353 2354 2355
/**
 * sys_sched_rr_get_interval - return the default timeslice of a process.
 * @pid: pid of the process.
 * @interval: userspace pointer to the timeslice value.
 *
 * this syscall writes the default timeslice value of a given process
 * into the user-space timespec buffer. A value of '0' means infinity.
 */
2356
asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
Linus Torvalds's avatar
Linus Torvalds committed
2357
{
2358
	int retval = -EINVAL;
Linus Torvalds's avatar
Linus Torvalds committed
2359
	struct timespec t;
Linus Torvalds's avatar
Linus Torvalds committed
2360
	task_t *p;
Linus Torvalds's avatar
Linus Torvalds committed
2361 2362 2363 2364 2365 2366 2367

	if (pid < 0)
		goto out_nounlock;

	retval = -ESRCH;
	read_lock(&tasklist_lock);
	p = find_process_by_pid(pid);
2368 2369 2370
	if (!p)
		goto out_unlock;

2371 2372
	retval = security_task_getscheduler(p);
	if (retval)
2373 2374 2375
		goto out_unlock;

	jiffies_to_timespec(p->policy & SCHED_FIFO ?
Ingo Molnar's avatar
Ingo Molnar committed
2376
				0 : task_timeslice(p), &t);
Linus Torvalds's avatar
Linus Torvalds committed
2377
	read_unlock(&tasklist_lock);
2378
	retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
Linus Torvalds's avatar
Linus Torvalds committed
2379 2380
out_nounlock:
	return retval;
2381 2382 2383
out_unlock:
	read_unlock(&tasklist_lock);
	return retval;
Linus Torvalds's avatar
Linus Torvalds committed
2384 2385
}

2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403
static inline struct task_struct *eldest_child(struct task_struct *p)
{
	if (list_empty(&p->children)) return NULL;
	return list_entry(p->children.next,struct task_struct,sibling);
}

static inline struct task_struct *older_sibling(struct task_struct *p)
{
	if (p->sibling.prev==&p->parent->children) return NULL;
	return list_entry(p->sibling.prev,struct task_struct,sibling);
}

static inline struct task_struct *younger_sibling(struct task_struct *p)
{
	if (p->sibling.next==&p->parent->children) return NULL;
	return list_entry(p->sibling.next,struct task_struct,sibling);
}

Linus Torvalds's avatar
Linus Torvalds committed
2404
static void show_task(task_t * p)
Linus Torvalds's avatar
Linus Torvalds committed
2405 2406
{
	unsigned long free = 0;
2407
	task_t *relative;
Linus Torvalds's avatar
Linus Torvalds committed
2408
	int state;
2409
	static const char * stat_nam[] = { "R", "S", "D", "T", "Z", "W" };
Linus Torvalds's avatar
Linus Torvalds committed
2410

Linus Torvalds's avatar
Linus Torvalds committed
2411
	printk("%-13.13s ", p->comm);
Linus Torvalds's avatar
Linus Torvalds committed
2412
	state = p->state ? __ffs(p->state) + 1 : 0;
Linus Torvalds's avatar
Linus Torvalds committed
2413 2414 2415 2416 2417 2418 2419 2420
	if (((unsigned) state) < sizeof(stat_nam)/sizeof(char *))
		printk(stat_nam[state]);
	else
		printk(" ");
#if (BITS_PER_LONG == 32)
	if (p == current)
		printk(" current  ");
	else
2421
		printk(" %08lX ", thread_saved_pc(p));
Linus Torvalds's avatar
Linus Torvalds committed
2422 2423 2424 2425
#else
	if (p == current)
		printk("   current task   ");
	else
2426
		printk(" %016lx ", thread_saved_pc(p));
Linus Torvalds's avatar
Linus Torvalds committed
2427 2428
#endif
	{
Andrew Morton's avatar
Andrew Morton committed
2429
		unsigned long * n = (unsigned long *) (p->thread_info+1);
Linus Torvalds's avatar
Linus Torvalds committed
2430 2431 2432 2433
		while (!*n)
			n++;
		free = (unsigned long) n - (unsigned long)(p+1);
	}
2434 2435 2436
	printk("%5lu %5d %6d ", free, p->pid, p->parent->pid);
	if ((relative = eldest_child(p)))
		printk("%5d ", relative->pid);
Linus Torvalds's avatar
Linus Torvalds committed
2437 2438
	else
		printk("      ");
2439 2440
	if ((relative = younger_sibling(p)))
		printk("%7d", relative->pid);
Linus Torvalds's avatar
Linus Torvalds committed
2441 2442
	else
		printk("       ");
2443 2444
	if ((relative = older_sibling(p)))
		printk(" %5d", relative->pid);
Linus Torvalds's avatar
Linus Torvalds committed
2445 2446 2447 2448
	else
		printk("      ");
	if (!p->mm)
		printk(" (L-TLB)\n");
Linus Torvalds's avatar
Linus Torvalds committed
2449
	else
Linus Torvalds's avatar
Linus Torvalds committed
2450
		printk(" (NOTLB)\n");
Linus Torvalds's avatar
Linus Torvalds committed
2451

2452
	show_stack(p, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
2453 2454 2455 2456
}

void show_state(void)
{
Ingo Molnar's avatar
Ingo Molnar committed
2457
	task_t *g, *p;
Linus Torvalds's avatar
Linus Torvalds committed
2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468

#if (BITS_PER_LONG == 32)
	printk("\n"
	       "                         free                        sibling\n");
	printk("  task             PC    stack   pid father child younger older\n");
#else
	printk("\n"
	       "                                 free                        sibling\n");
	printk("  task                 PC        stack   pid father child younger older\n");
#endif
	read_lock(&tasklist_lock);
Ingo Molnar's avatar
Ingo Molnar committed
2469
	do_each_thread(g, p) {
Linus Torvalds's avatar
Linus Torvalds committed
2470 2471 2472 2473 2474
		/*
		 * reset the NMI-timeout, listing all files on a slow
		 * console might take alot of time:
		 */
		touch_nmi_watchdog();
Linus Torvalds's avatar
Linus Torvalds committed
2475
		show_task(p);
Ingo Molnar's avatar
Ingo Molnar committed
2476 2477
	} while_each_thread(g, p);

Linus Torvalds's avatar
Linus Torvalds committed
2478 2479 2480
	read_unlock(&tasklist_lock);
}

Linus Torvalds's avatar
Linus Torvalds committed
2481
void __init init_idle(task_t *idle, int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
2482
{
2483
	runqueue_t *idle_rq = cpu_rq(cpu), *rq = cpu_rq(task_cpu(idle));
Linus Torvalds's avatar
Linus Torvalds committed
2484
	unsigned long flags;
Linus Torvalds's avatar
Linus Torvalds committed
2485

2486
	local_irq_save(flags);
Linus Torvalds's avatar
Linus Torvalds committed
2487 2488 2489 2490 2491 2492 2493
	double_rq_lock(idle_rq, rq);

	idle_rq->curr = idle_rq->idle = idle;
	deactivate_task(idle, rq);
	idle->array = NULL;
	idle->prio = MAX_PRIO;
	idle->state = TASK_RUNNING;
2494
	set_task_cpu(idle, cpu);
Linus Torvalds's avatar
Linus Torvalds committed
2495
	double_rq_unlock(idle_rq, rq);
2496
	set_tsk_need_resched(idle);
2497
	local_irq_restore(flags);
Linus Torvalds's avatar
Linus Torvalds committed
2498 2499

	/* Set the preempt count _outside_ the spinlocks! */
2500
#ifdef CONFIG_PREEMPT
Linus Torvalds's avatar
Linus Torvalds committed
2501
	idle->thread_info->preempt_count = (idle->lock_depth >= 0);
2502 2503
#else
	idle->thread_info->preempt_count = 0;
2504
#endif
Linus Torvalds's avatar
Linus Torvalds committed
2505 2506
}

2507
#ifdef CONFIG_SMP
2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524
/*
 * This is how migration works:
 *
 * 1) we queue a migration_req_t structure in the source CPU's
 *    runqueue and wake up that CPU's migration thread.
 * 2) we down() the locked semaphore => thread blocks.
 * 3) migration thread wakes up (implicitly it forces the migrated
 *    thread off the CPU)
 * 4) it gets the migration request and checks whether the migrated
 *    task is still in the wrong runqueue.
 * 5) if it's in the wrong runqueue then the migration thread removes
 *    it and puts it into the right queue.
 * 6) migration thread up()s the semaphore.
 * 7) we wake up and the migration is done.
 */

typedef struct {
Rusty Russell's avatar
Rusty Russell committed
2525
	struct list_head list;
2526
	task_t *task;
2527
	struct completion done;
2528 2529 2530
} migration_req_t;

/*
Ingo Molnar's avatar
Ingo Molnar committed
2531
 * Change a given task's CPU affinity. Migrate the thread to a
2532 2533 2534 2535
 * proper CPU and schedule it away if the CPU it's executing on
 * is removed from the allowed bitmask.
 *
 * NOTE: the caller must have a valid reference to the task, the
2536 2537
 * task must not exit() & deallocate itself prematurely.  The
 * call is not atomic; no spinlocks may be held.
2538
 */
2539
int set_cpus_allowed(task_t *p, cpumask_t new_mask)
2540 2541 2542 2543 2544
{
	unsigned long flags;
	migration_req_t req;
	runqueue_t *rq;

2545 2546
	if (any_online_cpu(new_mask) == NR_CPUS)
		return -EINVAL;
2547 2548 2549 2550 2551

	rq = task_rq_lock(p, &flags);
	p->cpus_allowed = new_mask;
	/*
	 * Can the task run on the task's current CPU? If not then
Ingo Molnar's avatar
Ingo Molnar committed
2552
	 * migrate the thread off to a proper CPU.
2553
	 */
2554
	if (cpu_isset(task_cpu(p), new_mask)) {
2555
		task_rq_unlock(rq, &flags);
2556
		return 0;
2557
	}
2558 2559 2560 2561
	/*
	 * If the task is not on a runqueue (and not running), then
	 * it is sufficient to simply update the task's cpu field.
	 */
Ingo Molnar's avatar
Ingo Molnar committed
2562
	if (!p->array && !task_running(rq, p)) {
2563
		set_task_cpu(p, any_online_cpu(p->cpus_allowed));
2564
		task_rq_unlock(rq, &flags);
2565
		return 0;
2566
	}
2567
	init_completion(&req.done);
2568 2569 2570
	req.task = p;
	list_add(&req.list, &rq->migration_queue);
	task_rq_unlock(rq, &flags);
Ingo Molnar's avatar
Ingo Molnar committed
2571

2572 2573
	wake_up_process(rq->migration_thread);

2574
	wait_for_completion(&req.done);
2575
	return 0;
2576 2577
}

2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602
/* Move (not current) task off this cpu, onto dest cpu. */
static void move_task_away(struct task_struct *p, int dest_cpu)
{
	runqueue_t *rq_dest;
	unsigned long flags;

	rq_dest = cpu_rq(dest_cpu);

	local_irq_save(flags);
	double_rq_lock(this_rq(), rq_dest);
	if (task_cpu(p) != smp_processor_id())
		goto out; /* Already moved */

	set_task_cpu(p, dest_cpu);
	if (p->array) {
		deactivate_task(p, this_rq());
		activate_task(p, rq_dest);
		if (p->prio < rq_dest->curr->prio)
			resched_task(rq_dest->curr);
	}
 out:
	double_rq_unlock(this_rq(), rq_dest);
	local_irq_restore(flags);
}

2603 2604 2605 2606 2607 2608
typedef struct {
	int cpu;
	struct completion startup_done;
	task_t *task;
} migration_startup_t;

Ingo Molnar's avatar
Ingo Molnar committed
2609 2610
/*
 * migration_thread - this is a highprio system thread that performs
2611 2612
 * thread migration by bumping thread off CPU then 'pushing' onto
 * another runqueue.
Ingo Molnar's avatar
Ingo Molnar committed
2613 2614
 */
static int migration_thread(void * data)
2615
{
2616 2617
	/* Marking "param" __user is ok, since we do a set_fs(KERNEL_DS); */
	struct sched_param __user param = { .sched_priority = MAX_RT_PRIO-1 };
2618 2619
	migration_startup_t *startup = data;
	int cpu = startup->cpu;
2620
	runqueue_t *rq;
2621
	int ret;
2622

2623 2624 2625 2626 2627 2628 2629
	startup->task = current;
	complete(&startup->startup_done);
	set_current_state(TASK_UNINTERRUPTIBLE);
	schedule();

	BUG_ON(smp_processor_id() != cpu);

2630
	daemonize("migration/%d", cpu);
2631
	set_fs(KERNEL_DS);
2632

2633 2634
	ret = setscheduler(0, SCHED_FIFO, &param);

2635 2636 2637 2638 2639
	rq = this_rq();
	rq->migration_thread = current;

	for (;;) {
		struct list_head *head;
2640
		migration_req_t *req;
2641

2642 2643 2644
		if (current->flags & PF_FREEZE)
			refrigerator(PF_IOTHREAD);

2645
		spin_lock_irq(&rq->lock);
2646
		head = &rq->migration_queue;
2647
		current->state = TASK_INTERRUPTIBLE;
2648
		if (list_empty(head)) {
2649
			spin_unlock_irq(&rq->lock);
2650 2651 2652
			schedule();
			continue;
		}
2653
		req = list_entry(head->next, migration_req_t, list);
2654
		list_del_init(head->next);
2655
		spin_unlock_irq(&rq->lock);
2656

2657 2658
		move_task_away(req->task,
			       any_online_cpu(req->task->cpus_allowed));
2659
		complete(&req->done);
2660 2661 2662
	}
}

Ingo Molnar's avatar
Ingo Molnar committed
2663 2664 2665 2666
/*
 * migration_call - callback that gets triggered when a CPU is added.
 * Here we can start up the necessary migration thread for the new CPU.
 */
2667 2668 2669
static int migration_call(struct notifier_block *nfb,
			  unsigned long action,
			  void *hcpu)
2670
{
2671 2672 2673
	long cpu = (long) hcpu;
	migration_startup_t startup;

2674 2675
	switch (action) {
	case CPU_ONLINE:
2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692

		printk("Starting migration thread for cpu %li\n", cpu);

		startup.cpu = cpu;
		startup.task = NULL;
		init_completion(&startup.startup_done);

		kernel_thread(migration_thread, &startup, CLONE_KERNEL);
		wait_for_completion(&startup.startup_done);
		wait_task_inactive(startup.task);

		startup.task->thread_info->cpu = cpu;
		startup.task->cpus_allowed = cpumask_of_cpu(cpu);

		wake_up_process(startup.task);

		while (!cpu_rq(cpu)->migration_thread)
2693
			yield();
2694

2695
		break;
2696
	}
2697 2698
	return NOTIFY_OK;
}
2699

2700 2701
static struct notifier_block migration_notifier = { &migration_call, NULL, 0 };

2702
__init int migration_init(void)
2703 2704 2705
{
	/* Start one for boot CPU. */
	migration_call(&migration_notifier, CPU_ONLINE,
2706
		       (void *)(long)smp_processor_id());
2707 2708
	register_cpu_notifier(&migration_notifier);
	return 0;
2709
}
2710

2711
#endif
Ingo Molnar's avatar
Ingo Molnar committed
2712

2713
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726
/*
 * The 'big kernel lock'
 *
 * This spinlock is taken and released recursively by lock_kernel()
 * and unlock_kernel().  It is transparently dropped and reaquired
 * over schedule().  It is used to protect legacy code that hasn't
 * been migrated to a proper locking design yet.
 *
 * Don't use in new code.
 */
spinlock_t kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
#endif

2727 2728
static void kstat_init_cpu(int cpu)
{
Ingo Molnar's avatar
Ingo Molnar committed
2729 2730
	/* Add any initialisation to kstat here */
	/* Useful when cpu offlining logic is added.. */
2731 2732 2733
}

static int __devinit kstat_cpu_notify(struct notifier_block *self,
Ingo Molnar's avatar
Ingo Molnar committed
2734
					unsigned long action, void *hcpu)
2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745
{
	int cpu = (unsigned long)hcpu;
	switch(action) {
	case CPU_UP_PREPARE:
		kstat_init_cpu(cpu);
		break;
	default:
		break;
	}
	return NOTIFY_OK;
}
Ingo Molnar's avatar
Ingo Molnar committed
2746

2747 2748 2749 2750 2751 2752 2753 2754
static struct notifier_block __devinitdata kstat_nb = {
	.notifier_call  = kstat_cpu_notify,
	.next           = NULL,
};

__init static void init_kstat(void) {
	kstat_cpu_notify(&kstat_nb, (unsigned long)CPU_UP_PREPARE,
			(void *)(long)smp_processor_id());
Ingo Molnar's avatar
Ingo Molnar committed
2755
	register_cpu_notifier(&kstat_nb);
2756 2757
}

Ingo Molnar's avatar
Ingo Molnar committed
2758 2759 2760 2761 2762
void __init sched_init(void)
{
	runqueue_t *rq;
	int i, j, k;

2763 2764
	/* Init the kstat counters */
	init_kstat();
Ingo Molnar's avatar
Ingo Molnar committed
2765 2766 2767 2768 2769 2770 2771 2772
	for (i = 0; i < NR_CPUS; i++) {
		prio_array_t *array;

		rq = cpu_rq(i);
		rq->active = rq->arrays;
		rq->expired = rq->arrays + 1;
		spin_lock_init(&rq->lock);
		INIT_LIST_HEAD(&rq->migration_queue);
Andrew Morton's avatar
Andrew Morton committed
2773
		atomic_set(&rq->nr_iowait, 0);
2774
		nr_running_init(rq);
Ingo Molnar's avatar
Ingo Molnar committed
2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787

		for (j = 0; j < 2; j++) {
			array = rq->arrays + j;
			for (k = 0; k < MAX_PRIO; k++) {
				INIT_LIST_HEAD(array->queue + k);
				__clear_bit(k, array->bitmap);
			}
			// delimiter for bitsearch
			__set_bit(MAX_PRIO, array->bitmap);
		}
	}
	/*
	 * We have to do a little magic to get the first
Ingo Molnar's avatar
Ingo Molnar committed
2788
	 * thread right in SMP mode.
Ingo Molnar's avatar
Ingo Molnar committed
2789 2790 2791 2792 2793
	 */
	rq = this_rq();
	rq->curr = current;
	rq->idle = current;
	set_task_cpu(current, smp_processor_id());
2794
	wake_up_forked_process(current);
Ingo Molnar's avatar
Ingo Molnar committed
2795

2796
	init_timers();
Ingo Molnar's avatar
Ingo Molnar committed
2797 2798 2799 2800 2801

	/*
	 * The boot idle thread does lazy MMU switching as well:
	 */
	atomic_inc(&init_mm.mm_count);
2802
	enter_lazy_tlb(&init_mm, current);
Ingo Molnar's avatar
Ingo Molnar committed
2803 2804
}

2805
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
2806 2807 2808 2809 2810
void __might_sleep(char *file, int line)
{
#if defined(in_atomic)
	static unsigned long prev_jiffy;	/* ratelimiting */

2811
	if (in_atomic() || irqs_disabled()) {
2812 2813 2814
		if (time_before(jiffies, prev_jiffy + HZ))
			return;
		prev_jiffy = jiffies;
2815
		printk(KERN_ERR "Debug: sleeping function called from invalid"
2816
				" context at %s:%d\n", file, line);
2817 2818
		printk("in_atomic():%d, irqs_disabled():%d\n",
				in_atomic(), irqs_disabled());
2819 2820 2821 2822 2823
		dump_stack();
	}
#endif
}
#endif
2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842


#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
/*
 * This could be a long-held lock.  If another CPU holds it for a long time,
 * and that CPU is not asked to reschedule then *this* CPU will spin on the
 * lock for a long time, even if *this* CPU is asked to reschedule.
 *
 * So what we do here, in the slow (contended) path is to spin on the lock by
 * hand while permitting preemption.
 *
 * Called inside preempt_disable().
 */
void __preempt_spin_lock(spinlock_t *lock)
{
	if (preempt_count() > 1) {
		_raw_spin_lock(lock);
		return;
	}
2843 2844 2845 2846 2847 2848
	do {
		preempt_enable();
		while (spin_is_locked(lock))
			cpu_relax();
		preempt_disable();
	} while (!_raw_spin_trylock(lock));
2849 2850 2851 2852 2853 2854 2855 2856 2857
}

void __preempt_write_lock(rwlock_t *lock)
{
	if (preempt_count() > 1) {
		_raw_write_lock(lock);
		return;
	}

2858 2859 2860 2861 2862 2863
	do {
		preempt_enable();
		while (rwlock_is_locked(lock))
			cpu_relax();
		preempt_disable();
	} while (!_raw_write_trylock(lock));
2864 2865
}
#endif
2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895

EXPORT_SYMBOL(__cond_resched);
EXPORT_SYMBOL(__wake_up);
EXPORT_SYMBOL(__wake_up_sync);
EXPORT_SYMBOL(complete);
EXPORT_SYMBOL(default_wake_function);
EXPORT_SYMBOL(idle_cpu);
EXPORT_SYMBOL(interruptible_sleep_on);
EXPORT_SYMBOL(interruptible_sleep_on_timeout);
EXPORT_SYMBOL(io_schedule);
EXPORT_SYMBOL(schedule);
EXPORT_SYMBOL(set_cpus_allowed);
EXPORT_SYMBOL(set_user_nice);
EXPORT_SYMBOL(sleep_on);
EXPORT_SYMBOL(sleep_on_timeout);
EXPORT_SYMBOL(task_nice);
EXPORT_SYMBOL(wait_for_completion);
EXPORT_SYMBOL(wake_up_process);
EXPORT_SYMBOL(yield);

#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
EXPORT_SYMBOL(__might_sleep);
#endif
#ifdef CONFIG_PREEMPT
#ifdef CONFIG_SMP
EXPORT_SYMBOL(__preempt_spin_lock);
EXPORT_SYMBOL(__preempt_write_lock);
#endif
EXPORT_SYMBOL(preempt_schedule);
#endif