sched_fair.c 27.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
 *
 *  Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *
 *  Interactivity improvements by Mike Galbraith
 *  (C) 2007 Mike Galbraith <efault@gmx.de>
 *
 *  Various enhancements by Dmitry Adamushko.
 *  (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
 *
 *  Group scheduling enhancements by Srivatsa Vaddagiri
 *  Copyright IBM Corporation, 2007
 *  Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
 *
 *  Scaled math optimizations by Thomas Gleixner
 *  Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18 19 20
 *
 *  Adaptive scheduling granularity, math enhancements by Peter Zijlstra
 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
21 22 23
 */

/*
24 25
 * Targeted preemption latency for CPU-bound tasks:
 * (default: 20ms, units: nanoseconds)
26
 *
27 28 29 30
 * NOTE: this latency value is not the same as the concept of
 * 'timeslice length' - timeslices in CFS are of variable length.
 * (to see the precise effective timeslice length of your workload,
 *  run vmstat and monitor the context-switches field)
31 32 33 34
 *
 * On SMP systems the value of this is multiplied by the log2 of the
 * number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way
 * systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
35
 * Targeted preemption latency for CPU-bound tasks:
36
 */
37 38 39 40 41 42 43
const_debug unsigned int sysctl_sched_latency = 20000000ULL;

/*
 * After fork, child runs first. (default) If set to 0 then
 * parent will (try to) run first.
 */
const_debug unsigned int sysctl_sched_child_runs_first = 1;
44 45 46 47 48

/*
 * Minimal preemption granularity for CPU-bound tasks:
 * (default: 2 msec, units: nanoseconds)
 */
49
unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL;
50

51 52 53 54 55 56 57 58
/*
 * sys_sched_yield() compat mode
 *
 * This option switches the agressive yield implementation of the
 * old scheduler back on.
 */
unsigned int __read_mostly sysctl_sched_compat_yield;

59 60
/*
 * SCHED_BATCH wake-up granularity.
61
 * (default: 25 msec, units: nanoseconds)
62 63 64 65 66
 *
 * This option delays the preemption effects of decoupled workloads
 * and reduces their over-scheduling. Synchronous workloads will still
 * have immediate wakeup/sleep latencies.
 */
67
const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL;
68 69 70 71 72 73 74 75 76

/*
 * SCHED_OTHER wake-up granularity.
 * (default: 1 msec, units: nanoseconds)
 *
 * This option delays the preemption effects of decoupled workloads
 * and reduces their over-scheduling. Synchronous workloads will still
 * have immediate wakeup/sleep latencies.
 */
77
const_debug unsigned int sysctl_sched_wakeup_granularity = 2000000UL;
78 79 80 81 82 83 84 85 86

unsigned int sysctl_sched_runtime_limit __read_mostly;

extern struct sched_class fair_sched_class;

/**************************************************************
 * CFS operations on generic schedulable entities:
 */

87
#ifdef CONFIG_FAIR_GROUP_SCHED
88

89
/* cpu runqueue to which this cfs_rq is attached */
90 91
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
{
92
	return cfs_rq->rq;
93 94
}

95 96
/* An entity is a task if it doesn't "own" a runqueue */
#define entity_is_task(se)	(!se->my_q)
97

98
#else	/* CONFIG_FAIR_GROUP_SCHED */
99

100 101 102
static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
{
	return container_of(cfs_rq, struct rq, cfs);
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
}

#define entity_is_task(se)	1

#endif	/* CONFIG_FAIR_GROUP_SCHED */

static inline struct task_struct *task_of(struct sched_entity *se)
{
	return container_of(se, struct task_struct, se);
}


/**************************************************************
 * Scheduling class tree data structure manipulation methods:
 */

Ingo Molnar's avatar
Ingo Molnar committed
119 120 121 122 123 124 125 126 127 128 129 130 131
static inline void
set_leftmost(struct cfs_rq *cfs_rq, struct rb_node *leftmost)
{
	struct sched_entity *se;

	cfs_rq->rb_leftmost = leftmost;
	if (leftmost) {
		se = rb_entry(leftmost, struct sched_entity, run_node);
		cfs_rq->min_vruntime = max(se->vruntime,
						cfs_rq->min_vruntime);
	}
}

132 133 134
/*
 * Enqueue an entity into the rb-tree:
 */
135
static void
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
__enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
	struct rb_node *parent = NULL;
	struct sched_entity *entry;
	s64 key = se->fair_key;
	int leftmost = 1;

	/*
	 * Find the right place in the rbtree:
	 */
	while (*link) {
		parent = *link;
		entry = rb_entry(parent, struct sched_entity, run_node);
		/*
		 * We dont care about collisions. Nodes with
		 * the same key stay together.
		 */
		if (key - entry->fair_key < 0) {
			link = &parent->rb_left;
		} else {
			link = &parent->rb_right;
			leftmost = 0;
		}
	}

	/*
	 * Maintain a cache of leftmost tree entries (it is frequently
	 * used):
	 */
	if (leftmost)
Ingo Molnar's avatar
Ingo Molnar committed
167
		set_leftmost(cfs_rq, &se->run_node);
168 169 170 171 172 173

	rb_link_node(&se->run_node, parent, link);
	rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
	update_load_add(&cfs_rq->load, se->load.weight);
	cfs_rq->nr_running++;
	se->on_rq = 1;
174 175

	schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
176 177
}

178
static void
179 180 181
__dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	if (cfs_rq->rb_leftmost == &se->run_node)
Ingo Molnar's avatar
Ingo Molnar committed
182 183
		set_leftmost(cfs_rq, rb_next(&se->run_node));

184 185 186 187
	rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
	update_load_sub(&cfs_rq->load, se->load.weight);
	cfs_rq->nr_running--;
	se->on_rq = 0;
188 189

	schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
}

static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
{
	return cfs_rq->rb_leftmost;
}

static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
{
	return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
}

/**************************************************************
 * Scheduling class statistics methods:
 */

206 207 208 209 210 211 212 213 214 215 216 217 218 219
static u64 __sched_period(unsigned long nr_running)
{
	u64 period = sysctl_sched_latency;
	unsigned long nr_latency =
		sysctl_sched_latency / sysctl_sched_min_granularity;

	if (unlikely(nr_running > nr_latency)) {
		period *= nr_running;
		do_div(period, nr_latency);
	}

	return period;
}

220
static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
221
{
222
	u64 period = __sched_period(cfs_rq->nr_running);
223

224 225
	period *= se->load.weight;
	do_div(period, cfs_rq->load.weight);
226

227
	return period;
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
}

static inline void
limit_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
	long limit = sysctl_sched_runtime_limit;

	/*
	 * Niced tasks have the same history dynamic range as
	 * non-niced tasks:
	 */
	if (unlikely(se->wait_runtime > limit)) {
		se->wait_runtime = limit;
		schedstat_inc(se, wait_runtime_overruns);
		schedstat_inc(cfs_rq, wait_runtime_overruns);
	}
	if (unlikely(se->wait_runtime < -limit)) {
		se->wait_runtime = -limit;
		schedstat_inc(se, wait_runtime_underruns);
		schedstat_inc(cfs_rq, wait_runtime_underruns);
	}
}

static inline void
__add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
{
	se->wait_runtime += delta;
	schedstat_add(se, sum_wait_runtime, delta);
	limit_wait_runtime(cfs_rq, se);
}

static void
add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
{
	schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
	__add_wait_runtime(cfs_rq, se, delta);
	schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
}

/*
 * Update the current task's runtime statistics. Skip current tasks that
 * are not in our scheduling class.
 */
static inline void
Ingo Molnar's avatar
Ingo Molnar committed
272 273
__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
	      unsigned long delta_exec)
274
{
Ingo Molnar's avatar
Ingo Molnar committed
275
	unsigned long delta, delta_fair, delta_mine, delta_exec_weighted;
276 277 278
	struct load_weight *lw = &cfs_rq->load;
	unsigned long load = lw->weight;

279
	schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
280 281 282

	curr->sum_exec_runtime += delta_exec;
	cfs_rq->exec_clock += delta_exec;
Ingo Molnar's avatar
Ingo Molnar committed
283 284 285 286 287 288
	delta_exec_weighted = delta_exec;
	if (unlikely(curr->load.weight != NICE_0_LOAD)) {
		delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
							&curr->load);
	}
	curr->vruntime += delta_exec_weighted;
289

290 291 292
	if (!sched_feat(FAIR_SLEEPERS))
		return;

Ingo Molnar's avatar
Ingo Molnar committed
293 294 295
	if (unlikely(!load))
		return;

296 297 298
	delta_fair = calc_delta_fair(delta_exec, lw);
	delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);

299
	if (cfs_rq->sleeper_bonus > sysctl_sched_min_granularity) {
300
		delta = min((u64)delta_mine, cfs_rq->sleeper_bonus);
301 302
		delta = min(delta, (unsigned long)(
			(long)sysctl_sched_runtime_limit - curr->wait_runtime));
303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
		cfs_rq->sleeper_bonus -= delta;
		delta_mine -= delta;
	}

	cfs_rq->fair_clock += delta_fair;
	/*
	 * We executed delta_exec amount of time on the CPU,
	 * but we were only entitled to delta_mine amount of
	 * time during that period (if nr_running == 1 then
	 * the two values are equal)
	 * [Note: delta_mine - delta_exec is negative]:
	 */
	add_wait_runtime(cfs_rq, curr, delta_mine - delta_exec);
}

318
static void update_curr(struct cfs_rq *cfs_rq)
319
{
320
	struct sched_entity *curr = cfs_rq->curr;
Ingo Molnar's avatar
Ingo Molnar committed
321
	u64 now = rq_of(cfs_rq)->clock;
322 323 324 325 326 327 328 329 330 331
	unsigned long delta_exec;

	if (unlikely(!curr))
		return;

	/*
	 * Get the amount of time the current task was running
	 * since the last time we changed load (this cannot
	 * overflow on 32 bits):
	 */
Ingo Molnar's avatar
Ingo Molnar committed
332
	delta_exec = (unsigned long)(now - curr->exec_start);
333

Ingo Molnar's avatar
Ingo Molnar committed
334 335
	__update_curr(cfs_rq, curr, delta_exec);
	curr->exec_start = now;
336 337 338
}

static inline void
339
update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
340 341
{
	se->wait_start_fair = cfs_rq->fair_clock;
342
	schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
343 344 345
}

static inline unsigned long
Ingo Molnar's avatar
Ingo Molnar committed
346
calc_weighted(unsigned long delta, struct sched_entity *se)
347
{
Ingo Molnar's avatar
Ingo Molnar committed
348
	unsigned long weight = se->load.weight;
349

Ingo Molnar's avatar
Ingo Molnar committed
350 351 352 353
	if (unlikely(weight != NICE_0_LOAD))
		return (u64)delta * se->load.weight >> NICE_0_SHIFT;
	else
		return delta;
354 355 356 357 358
}

/*
 * Task is being enqueued - update stats:
 */
359
static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
360 361 362 363 364
{
	/*
	 * Are we enqueueing a waiting task? (for current tasks
	 * a dequeue/enqueue event is a NOP)
	 */
365
	if (se != cfs_rq->curr)
366
		update_stats_wait_start(cfs_rq, se);
367 368 369
	/*
	 * Update the key:
	 */
Ingo Molnar's avatar
Ingo Molnar committed
370
	se->fair_key = se->vruntime;
371 372 373 374 375 376
}

/*
 * Note: must be called with a freshly updated rq->fair_clock.
 */
static inline void
Ingo Molnar's avatar
Ingo Molnar committed
377 378
__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se,
			unsigned long delta_fair)
379
{
380 381
	schedstat_set(se->wait_max, max(se->wait_max,
			rq_of(cfs_rq)->clock - se->wait_start));
382

Ingo Molnar's avatar
Ingo Molnar committed
383
	delta_fair = calc_weighted(delta_fair, se);
384 385 386 387 388

	add_wait_runtime(cfs_rq, se, delta_fair);
}

static void
389
update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
390 391 392
{
	unsigned long delta_fair;

393 394 395
	if (unlikely(!se->wait_start_fair))
		return;

396 397 398
	delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
			(u64)(cfs_rq->fair_clock - se->wait_start_fair));

Ingo Molnar's avatar
Ingo Molnar committed
399
	__update_stats_wait_end(cfs_rq, se, delta_fair);
400 401

	se->wait_start_fair = 0;
Ingo Molnar's avatar
Ingo Molnar committed
402
	schedstat_set(se->wait_start, 0);
403 404 405
}

static inline void
406
update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
407
{
408
	update_curr(cfs_rq);
409 410 411 412
	/*
	 * Mark the end of the wait period if dequeueing a
	 * waiting task:
	 */
413
	if (se != cfs_rq->curr)
414
		update_stats_wait_end(cfs_rq, se);
415 416 417 418 419 420
}

/*
 * We are picking a new current task - update its stats:
 */
static inline void
421
update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
422 423 424 425
{
	/*
	 * We are starting a new run period:
	 */
426
	se->exec_start = rq_of(cfs_rq)->clock;
427 428 429 430 431 432
}

/*
 * We are descheduling a task - update its stats:
 */
static inline void
433
update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
434 435 436 437 438 439 440 441
{
	se->exec_start = 0;
}

/**************************************************
 * Scheduling class queueing methods:
 */

Ingo Molnar's avatar
Ingo Molnar committed
442 443
static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se,
			      unsigned long delta_fair)
444
{
Ingo Molnar's avatar
Ingo Molnar committed
445
	unsigned long load = cfs_rq->load.weight;
446 447
	long prev_runtime;

448 449 450 451 452 453 454
	/*
	 * Do not boost sleepers if there's too much bonus 'in flight'
	 * already:
	 */
	if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit))
		return;

455
	if (sched_feat(SLEEPER_LOAD_AVG))
456 457 458 459 460 461
		load = rq_of(cfs_rq)->cpu_load[2];

	/*
	 * Fix up delta_fair with the effect of us running
	 * during the whole sleep period:
	 */
462
	if (sched_feat(SLEEPER_AVG))
463 464 465
		delta_fair = div64_likely32((u64)delta_fair * load,
						load + se->load.weight);

Ingo Molnar's avatar
Ingo Molnar committed
466
	delta_fair = calc_weighted(delta_fair, se);
467 468 469 470 471 472 473 474 475 476 477

	prev_runtime = se->wait_runtime;
	__add_wait_runtime(cfs_rq, se, delta_fair);
	delta_fair = se->wait_runtime - prev_runtime;

	/*
	 * Track the amount of bonus we've given to sleepers:
	 */
	cfs_rq->sleeper_bonus += delta_fair;
}

478
static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
479 480 481 482 483
{
	struct task_struct *tsk = task_of(se);
	unsigned long delta_fair;

	if ((entity_is_task(se) && tsk->policy == SCHED_BATCH) ||
484
			 !sched_feat(FAIR_SLEEPERS))
485 486 487 488 489
		return;

	delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
		(u64)(cfs_rq->fair_clock - se->sleep_start_fair));

Ingo Molnar's avatar
Ingo Molnar committed
490
	__enqueue_sleeper(cfs_rq, se, delta_fair);
491 492 493 494 495

	se->sleep_start_fair = 0;

#ifdef CONFIG_SCHEDSTATS
	if (se->sleep_start) {
496
		u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
497 498 499 500 501 502 503 504 505 506 507

		if ((s64)delta < 0)
			delta = 0;

		if (unlikely(delta > se->sleep_max))
			se->sleep_max = delta;

		se->sleep_start = 0;
		se->sum_sleep_runtime += delta;
	}
	if (se->block_start) {
508
		u64 delta = rq_of(cfs_rq)->clock - se->block_start;
509 510 511 512 513 514 515 516 517

		if ((s64)delta < 0)
			delta = 0;

		if (unlikely(delta > se->block_max))
			se->block_max = delta;

		se->block_start = 0;
		se->sum_sleep_runtime += delta;
Ingo Molnar's avatar
Ingo Molnar committed
518 519 520 521 522 523 524 525 526 527

		/*
		 * Blocking time is in units of nanosecs, so shift by 20 to
		 * get a milliseconds-range estimation of the amount of
		 * time that the task spent sleeping:
		 */
		if (unlikely(prof_on == SLEEP_PROFILING)) {
			profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
				     delta >> 20);
		}
528 529 530 531 532
	}
#endif
}

static void
533
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
534 535 536 537
{
	/*
	 * Update the fair clock.
	 */
538
	update_curr(cfs_rq);
539

Ingo Molnar's avatar
Ingo Molnar committed
540 541 542 543 544 545 546 547 548 549 550 551 552 553
	if (wakeup) {
		u64 min_runtime, latency;

		min_runtime = cfs_rq->min_vruntime;
		min_runtime += sysctl_sched_latency/2;

		if (sched_feat(NEW_FAIR_SLEEPERS)) {
			latency = calc_weighted(sysctl_sched_latency, se);
			if (min_runtime > latency)
				min_runtime -= latency;
		}

		se->vruntime = max(se->vruntime, min_runtime);

554
		enqueue_sleeper(cfs_rq, se);
Ingo Molnar's avatar
Ingo Molnar committed
555
	}
556

557
	update_stats_enqueue(cfs_rq, se);
558 559 560 561
	__enqueue_entity(cfs_rq, se);
}

static void
562
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
563
{
564
	update_stats_dequeue(cfs_rq, se);
565 566 567 568 569 570 571
	if (sleep) {
		se->sleep_start_fair = cfs_rq->fair_clock;
#ifdef CONFIG_SCHEDSTATS
		if (entity_is_task(se)) {
			struct task_struct *tsk = task_of(se);

			if (tsk->state & TASK_INTERRUPTIBLE)
572
				se->sleep_start = rq_of(cfs_rq)->clock;
573
			if (tsk->state & TASK_UNINTERRUPTIBLE)
574
				se->block_start = rq_of(cfs_rq)->clock;
575 576 577 578 579 580 581 582 583
		}
#endif
	}
	__dequeue_entity(cfs_rq, se);
}

/*
 * Preempt the current task with a newly woken task if needed:
 */
584
static void
585
check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
586
{
587 588
	unsigned long ideal_runtime, delta_exec;

589
	ideal_runtime = sched_slice(cfs_rq, curr);
590 591
	delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
	if (delta_exec > ideal_runtime)
592 593 594 595
		resched_task(rq_of(cfs_rq)->curr);
}

static inline void
596
set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
597 598 599 600 601 602 603 604
{
	/*
	 * Any task has to be enqueued before it get to execute on
	 * a CPU. So account for the time it spent waiting on the
	 * runqueue. (note, here we rely on pick_next_task() having
	 * done a put_prev_task_fair() shortly before this, which
	 * updated rq->fair_clock - used by update_stats_wait_end())
	 */
605
	update_stats_wait_end(cfs_rq, se);
606
	update_stats_curr_start(cfs_rq, se);
607
	cfs_rq->curr = se;
608 609 610 611 612 613 614 615 616 617 618
#ifdef CONFIG_SCHEDSTATS
	/*
	 * Track our maximum slice length, if the CPU's load is at
	 * least twice that of our own weight (i.e. dont track it
	 * when there are only lesser-weight tasks around):
	 */
	if (rq_of(cfs_rq)->ls.load.weight >= 2*se->load.weight) {
		se->slice_max = max(se->slice_max,
			se->sum_exec_runtime - se->prev_sum_exec_runtime);
	}
#endif
619
	se->prev_sum_exec_runtime = se->sum_exec_runtime;
620 621
}

622
static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
623 624 625
{
	struct sched_entity *se = __pick_next_entity(cfs_rq);

626
	set_next_entity(cfs_rq, se);
627 628 629 630

	return se;
}

631
static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
632 633 634 635 636 637
{
	/*
	 * If still on the runqueue then deactivate_task()
	 * was not called and update_curr() has to be done:
	 */
	if (prev->on_rq)
638
		update_curr(cfs_rq);
639

640
	update_stats_curr_end(cfs_rq, prev);
641 642

	if (prev->on_rq)
643
		update_stats_wait_start(cfs_rq, prev);
644
	cfs_rq->curr = NULL;
645 646 647 648 649 650 651 652
}

static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
{
	/*
	 * Dequeue and enqueue the task to update its
	 * position within the tree:
	 */
653
	dequeue_entity(cfs_rq, curr, 0);
654
	enqueue_entity(cfs_rq, curr, 0);
655

656 657
	if (cfs_rq->nr_running > 1)
		check_preempt_tick(cfs_rq, curr);
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752
}

/**************************************************
 * CFS operations on tasks:
 */

#ifdef CONFIG_FAIR_GROUP_SCHED

/* Walk up scheduling entities hierarchy */
#define for_each_sched_entity(se) \
		for (; se; se = se->parent)

static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
	return p->se.cfs_rq;
}

/* runqueue on which this entity is (to be) queued */
static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
	return se->cfs_rq;
}

/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
	return grp->my_q;
}

/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
 * another cpu ('this_cpu')
 */
static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
{
	/* A later patch will take group into account */
	return &cpu_rq(this_cpu)->cfs;
}

/* Iterate thr' all leaf cfs_rq's on a runqueue */
#define for_each_leaf_cfs_rq(rq, cfs_rq) \
	list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)

/* Do the two (enqueued) tasks belong to the same group ? */
static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
{
	if (curr->se.cfs_rq == p->se.cfs_rq)
		return 1;

	return 0;
}

#else	/* CONFIG_FAIR_GROUP_SCHED */

#define for_each_sched_entity(se) \
		for (; se; se = NULL)

static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
{
	return &task_rq(p)->cfs;
}

static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
{
	struct task_struct *p = task_of(se);
	struct rq *rq = task_rq(p);

	return &rq->cfs;
}

/* runqueue "owned" by this group */
static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
{
	return NULL;
}

static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
{
	return &cpu_rq(this_cpu)->cfs;
}

#define for_each_leaf_cfs_rq(rq, cfs_rq) \
		for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)

static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
{
	return 1;
}

#endif	/* CONFIG_FAIR_GROUP_SCHED */

/*
 * The enqueue_task method is called before nr_running is
 * increased. Here we update the fair scheduling stats and
 * then put the task into the rbtree:
 */
753
static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
754 755 756 757 758 759 760 761
{
	struct cfs_rq *cfs_rq;
	struct sched_entity *se = &p->se;

	for_each_sched_entity(se) {
		if (se->on_rq)
			break;
		cfs_rq = cfs_rq_of(se);
762
		enqueue_entity(cfs_rq, se, wakeup);
763 764 765 766 767 768 769 770
	}
}

/*
 * The dequeue_task method is called before nr_running is
 * decreased. We remove the task from the rbtree and
 * update the fair scheduling stats:
 */
771
static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
772 773 774 775 776 777
{
	struct cfs_rq *cfs_rq;
	struct sched_entity *se = &p->se;

	for_each_sched_entity(se) {
		cfs_rq = cfs_rq_of(se);
778
		dequeue_entity(cfs_rq, se, sleep);
779 780 781 782 783 784 785
		/* Don't dequeue parent if it has other entities besides us */
		if (cfs_rq->load.weight)
			break;
	}
}

/*
786 787 788
 * sched_yield() support is very simple - we dequeue and enqueue.
 *
 * If compat_yield is turned on then we requeue to the end of the tree.
789 790 791 792
 */
static void yield_task_fair(struct rq *rq, struct task_struct *p)
{
	struct cfs_rq *cfs_rq = task_cfs_rq(p);
793 794 795
	struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
	struct sched_entity *rightmost, *se = &p->se;
	struct rb_node *parent;
796 797

	/*
798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815
	 * Are we the only task in the tree?
	 */
	if (unlikely(cfs_rq->nr_running == 1))
		return;

	if (likely(!sysctl_sched_compat_yield)) {
		__update_rq_clock(rq);
		/*
		 * Dequeue and enqueue the task to update its
		 * position within the tree:
		 */
		dequeue_entity(cfs_rq, &p->se, 0);
		enqueue_entity(cfs_rq, &p->se, 0);

		return;
	}
	/*
	 * Find the rightmost entry in the rbtree:
816
	 */
817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841
	do {
		parent = *link;
		link = &parent->rb_right;
	} while (*link);

	rightmost = rb_entry(parent, struct sched_entity, run_node);
	/*
	 * Already in the rightmost position?
	 */
	if (unlikely(rightmost == se))
		return;

	/*
	 * Minimally necessary key value to be last in the tree:
	 */
	se->fair_key = rightmost->fair_key + 1;

	if (cfs_rq->rb_leftmost == &se->run_node)
		cfs_rq->rb_leftmost = rb_next(&se->run_node);
	/*
	 * Relink the task to the rightmost position:
	 */
	rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
	rb_link_node(&se->run_node, parent, link);
	rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
842 843 844 845 846
}

/*
 * Preempt the current task with a newly woken task if needed:
 */
847
static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
848 849 850 851 852
{
	struct task_struct *curr = rq->curr;
	struct cfs_rq *cfs_rq = task_cfs_rq(curr);

	if (unlikely(rt_prio(p->prio))) {
Ingo Molnar's avatar
Ingo Molnar committed
853
		update_rq_clock(rq);
854
		update_curr(cfs_rq);
855 856 857
		resched_task(curr);
		return;
	}
858 859
	if (is_same_group(curr, p)) {
		s64 delta = curr->se.vruntime - p->se.vruntime;
860

861 862 863
		if (delta > (s64)sysctl_sched_wakeup_granularity)
			resched_task(curr);
	}
864 865
}

866
static struct task_struct *pick_next_task_fair(struct rq *rq)
867 868 869 870 871 872 873 874
{
	struct cfs_rq *cfs_rq = &rq->cfs;
	struct sched_entity *se;

	if (unlikely(!cfs_rq->nr_running))
		return NULL;

	do {
875
		se = pick_next_entity(cfs_rq);
876 877 878 879 880 881 882 883 884
		cfs_rq = group_cfs_rq(se);
	} while (cfs_rq);

	return task_of(se);
}

/*
 * Account for a descheduled task:
 */
885
static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
886 887 888 889 890 891
{
	struct sched_entity *se = &prev->se;
	struct cfs_rq *cfs_rq;

	for_each_sched_entity(se) {
		cfs_rq = cfs_rq_of(se);
892
		put_prev_entity(cfs_rq, se);
893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934
	}
}

/**************************************************
 * Fair scheduling class load-balancing methods:
 */

/*
 * Load-balancing iterator. Note: while the runqueue stays locked
 * during the whole iteration, the current task might be
 * dequeued so the iterator has to be dequeue-safe. Here we
 * achieve that by always pre-iterating before returning
 * the current task:
 */
static inline struct task_struct *
__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
{
	struct task_struct *p;

	if (!curr)
		return NULL;

	p = rb_entry(curr, struct task_struct, se.run_node);
	cfs_rq->rb_load_balance_curr = rb_next(curr);

	return p;
}

static struct task_struct *load_balance_start_fair(void *arg)
{
	struct cfs_rq *cfs_rq = arg;

	return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
}

static struct task_struct *load_balance_next_fair(void *arg)
{
	struct cfs_rq *cfs_rq = arg;

	return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
}

935
#ifdef CONFIG_FAIR_GROUP_SCHED
936 937 938 939 940 941 942 943 944 945 946 947 948
static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
{
	struct sched_entity *curr;
	struct task_struct *p;

	if (!cfs_rq->nr_running)
		return MAX_PRIO;

	curr = __pick_next_entity(cfs_rq);
	p = task_of(curr);

	return p->prio;
}
949
#endif
950

Peter Williams's avatar
Peter Williams committed
951
static unsigned long
952
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
953 954 955
		  unsigned long max_nr_move, unsigned long max_load_move,
		  struct sched_domain *sd, enum cpu_idle_type idle,
		  int *all_pinned, int *this_best_prio)
956 957 958 959 960 961 962 963 964 965
{
	struct cfs_rq *busy_cfs_rq;
	unsigned long load_moved, total_nr_moved = 0, nr_moved;
	long rem_load_move = max_load_move;
	struct rq_iterator cfs_rq_iterator;

	cfs_rq_iterator.start = load_balance_start_fair;
	cfs_rq_iterator.next = load_balance_next_fair;

	for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
966
#ifdef CONFIG_FAIR_GROUP_SCHED
967
		struct cfs_rq *this_cfs_rq;
968
		long imbalance;
969 970 971 972
		unsigned long maxload;

		this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);

973
		imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
974 975 976 977 978 979 980 981
		/* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
		if (imbalance <= 0)
			continue;

		/* Don't pull more than imbalance/2 */
		imbalance /= 2;
		maxload = min(rem_load_move, imbalance);

982 983
		*this_best_prio = cfs_rq_best_prio(this_cfs_rq);
#else
984
# define maxload rem_load_move
985
#endif
986 987 988 989 990 991
		/* pass busy_cfs_rq argument into
		 * load_balance_[start|next]_fair iterators
		 */
		cfs_rq_iterator.arg = busy_cfs_rq;
		nr_moved = balance_tasks(this_rq, this_cpu, busiest,
				max_nr_move, maxload, sd, idle, all_pinned,
992
				&load_moved, this_best_prio, &cfs_rq_iterator);
993 994 995 996 997 998 999 1000 1001

		total_nr_moved += nr_moved;
		max_nr_move -= nr_moved;
		rem_load_move -= load_moved;

		if (max_nr_move <= 0 || rem_load_move <= 0)
			break;
	}

Peter Williams's avatar
Peter Williams committed
1002
	return max_load_move - rem_load_move;
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
}

/*
 * scheduler tick hitting a task of our scheduling class:
 */
static void task_tick_fair(struct rq *rq, struct task_struct *curr)
{
	struct cfs_rq *cfs_rq;
	struct sched_entity *se = &curr->se;

	for_each_sched_entity(se) {
		cfs_rq = cfs_rq_of(se);
		entity_tick(cfs_rq, se);
	}
}

1019 1020
#define swap(a,b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)

1021 1022 1023 1024 1025 1026 1027
/*
 * Share the fairness runtime between parent and child, thus the
 * total amount of pressure for CPU stays equal - new tasks
 * get a chance to run but frequent forkers are not allowed to
 * monopolize the CPU. Note: the parent runqueue is locked,
 * the child is not running yet.
 */
1028
static void task_new_fair(struct rq *rq, struct task_struct *p)
1029 1030
{
	struct cfs_rq *cfs_rq = task_cfs_rq(p);
1031
	struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
1032 1033 1034

	sched_info_queued(p);

1035
	update_curr(cfs_rq);
1036
	se->vruntime = cfs_rq->min_vruntime;
1037
	update_stats_enqueue(cfs_rq, se);
1038

1039 1040 1041 1042
	/*
	 * The first wait is dominated by the child-runs-first logic,
	 * so do not credit it with that waiting time yet:
	 */
1043
	if (sched_feat(SKIP_INITIAL))
Ingo Molnar's avatar
Ingo Molnar committed
1044
		se->wait_start_fair = 0;
1045 1046 1047 1048 1049

	/*
	 * The statistical average of wait_runtime is about
	 * -granularity/2, so initialize the task with that:
	 */
1050
	if (sched_feat(START_DEBIT))
1051 1052 1053 1054 1055 1056 1057 1058 1059
		se->wait_runtime = -(__sched_period(cfs_rq->nr_running+1) / 2);

	if (sysctl_sched_child_runs_first &&
			curr->vruntime < se->vruntime) {

		dequeue_entity(cfs_rq, curr, 0);
		swap(curr->vruntime, se->vruntime);
		enqueue_entity(cfs_rq, curr, 0);
	}
1060

Ingo Molnar's avatar
Ingo Molnar committed
1061
	update_stats_enqueue(cfs_rq, se);
1062
	__enqueue_entity(cfs_rq, se);
1063
	resched_task(rq->curr);
1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
}

#ifdef CONFIG_FAIR_GROUP_SCHED
/* Account for a task changing its policy or group.
 *
 * This routine is mostly called to set cfs_rq->curr field when a task
 * migrates between groups/classes.
 */
static void set_curr_task_fair(struct rq *rq)
{
1074
	struct sched_entity *se = &rq->curr->se;
Ingo Molnar's avatar
Ingo Molnar committed
1075

1076 1077
	for_each_sched_entity(se)
		set_next_entity(cfs_rq_of(se), se);
1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
}
#else
static void set_curr_task_fair(struct rq *rq)
{
}
#endif

/*
 * All the scheduling class methods:
 */
struct sched_class fair_sched_class __read_mostly = {
	.enqueue_task		= enqueue_task_fair,
	.dequeue_task		= dequeue_task_fair,
	.yield_task		= yield_task_fair,

1093
	.check_preempt_curr	= check_preempt_wakeup,
1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105

	.pick_next_task		= pick_next_task_fair,
	.put_prev_task		= put_prev_task_fair,

	.load_balance		= load_balance_fair,

	.set_curr_task          = set_curr_task_fair,
	.task_tick		= task_tick_fair,
	.task_new		= task_new_fair,
};

#ifdef CONFIG_SCHED_DEBUG
1106
static void print_cfs_stats(struct seq_file *m, int cpu)
1107 1108 1109
{
	struct cfs_rq *cfs_rq;

1110
	for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
1111
		print_cfs_rq(m, cpu, cfs_rq);
1112 1113
}
#endif