cpuidle34xx.c 15.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * linux/arch/arm/mach-omap2/cpuidle34xx.c
 *
 * OMAP3 CPU IDLE Routines
 *
 * Copyright (C) 2008 Texas Instruments, Inc.
 * Rajendra Nayak <rnayak@ti.com>
 *
 * Copyright (C) 2007 Texas Instruments, Inc.
 * Karthik Dasu <karthik-dp@ti.com>
 *
 * Copyright (C) 2006 Nokia Corporation
 * Tony Lindgren <tony@atomide.com>
 *
 * Copyright (C) 2005 Texas Instruments, Inc.
 * Richard Woodruff <r-woodruff2@ti.com>
 *
 * Based on pm.c for omap2
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

25
#include <linux/sched.h>
26 27 28
#include <linux/cpuidle.h>

#include <plat/prcm.h>
29
#include <plat/irqs.h>
30
#include <plat/powerdomain.h>
31
#include "clockdomain.h"
32
#include <plat/serial.h>
33

34
#include "pm.h"
35
#include "control.h"
36

37 38
#ifdef CONFIG_CPU_IDLE

39 40 41 42 43 44 45 46
#define OMAP3_MAX_STATES 7
#define OMAP3_STATE_C1 0 /* C1 - MPU WFI + Core active */
#define OMAP3_STATE_C2 1 /* C2 - MPU WFI + Core inactive */
#define OMAP3_STATE_C3 2 /* C3 - MPU CSWR + Core inactive */
#define OMAP3_STATE_C4 3 /* C4 - MPU OFF + Core iactive */
#define OMAP3_STATE_C5 4 /* C5 - MPU RET + Core RET */
#define OMAP3_STATE_C6 5 /* C6 - MPU OFF + Core RET */
#define OMAP3_STATE_C7 6 /* C7 - MPU OFF + Core OFF */
47

48 49
#define OMAP3_STATE_MAX OMAP3_STATE_C7

50 51 52 53 54 55 56 57 58 59 60 61 62
struct omap3_processor_cx {
	u8 valid;
	u8 type;
	u32 sleep_latency;
	u32 wakeup_latency;
	u32 mpu_state;
	u32 core_state;
	u32 threshold;
	u32 flags;
};

struct omap3_processor_cx omap3_power_states[OMAP3_MAX_STATES];
struct omap3_processor_cx current_cx_state;
63 64
struct powerdomain *mpu_pd, *core_pd, *per_pd;
struct powerdomain *cam_pd;
65

66 67 68 69 70 71 72 73 74
/*
 * The latencies/thresholds for various C states have
 * to be configured from the respective board files.
 * These are some default values (which might not provide
 * the best power savings) used on boards which do not
 * pass these details from the board file.
 */
static struct cpuidle_params cpuidle_params_table[] = {
	/* C1 */
75
	{1, 2, 2, 5},
76
	/* C2 */
77
	{1, 10, 10, 30},
78
	/* C3 */
79
	{1, 50, 50, 300},
80
	/* C4 */
81
	{1, 1500, 1800, 4000},
82
	/* C5 */
83
	{1, 2500, 7500, 12000},
84
	/* C6 */
85
	{1, 3000, 8500, 15000},
86
	/* C7 */
87
	{1, 10000, 30000, 300000},
88 89
};

90 91
static int omap3_idle_bm_check(void)
{
92 93
	if (!omap3_can_sleep())
		return 1;
94 95 96
	return 0;
}

97 98 99 100 101 102 103 104 105 106 107 108 109 110
static int _cpuidle_allow_idle(struct powerdomain *pwrdm,
				struct clockdomain *clkdm)
{
	omap2_clkdm_allow_idle(clkdm);
	return 0;
}

static int _cpuidle_deny_idle(struct powerdomain *pwrdm,
				struct clockdomain *clkdm)
{
	omap2_clkdm_deny_idle(clkdm);
	return 0;
}

111 112 113 114 115 116 117 118 119 120 121 122 123
/**
 * omap3_enter_idle - Programs OMAP3 to enter the specified state
 * @dev: cpuidle device
 * @state: The target state to be programmed
 *
 * Called from the CPUidle framework to program the device to the
 * specified target state selected by the governor.
 */
static int omap3_enter_idle(struct cpuidle_device *dev,
			struct cpuidle_state *state)
{
	struct omap3_processor_cx *cx = cpuidle_get_statedata(state);
	struct timespec ts_preidle, ts_postidle, ts_idle;
124
	u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
125 126 127 128 129 130 131 132 133

	current_cx_state = *cx;

	/* Used to keep track of the total time in idle */
	getnstimeofday(&ts_preidle);

	local_irq_disable();
	local_fiq_disable();

134 135
	pwrdm_set_next_pwrst(mpu_pd, mpu_state);
	pwrdm_set_next_pwrst(core_pd, core_state);
136

137
	if (omap_irq_pending() || need_resched())
138
		goto return_sleep_time;
139

140 141 142 143 144
	if (cx->type == OMAP3_STATE_C1) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
	}

145 146 147
	/* Execute ARM wfi */
	omap_sram_idle();

148 149 150 151 152
	if (cx->type == OMAP3_STATE_C1) {
		pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
		pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
	}

153
return_sleep_time:
154 155 156 157 158 159
	getnstimeofday(&ts_postidle);
	ts_idle = timespec_sub(ts_postidle, ts_preidle);

	local_irq_enable();
	local_fiq_enable();

160
	return ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * USEC_PER_SEC;
161 162
}

163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
/**
 * next_valid_state - Find next valid c-state
 * @dev: cpuidle device
 * @state: Currently selected c-state
 *
 * If the current state is valid, it is returned back to the caller.
 * Else, this function searches for a lower c-state which is still
 * valid (as defined in omap3_power_states[]).
 */
static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
						struct cpuidle_state *curr)
{
	struct cpuidle_state *next = NULL;
	struct omap3_processor_cx *cx;

	cx = (struct omap3_processor_cx *)cpuidle_get_statedata(curr);

	/* Check if current state is valid */
	if (cx->valid) {
		return curr;
	} else {
		u8 idx = OMAP3_STATE_MAX;

		/*
		 * Reach the current state starting at highest C-state
		 */
		for (; idx >= OMAP3_STATE_C1; idx--) {
			if (&dev->states[idx] == curr) {
				next = &dev->states[idx];
				break;
			}
		}

		/*
		 * Should never hit this condition.
		 */
		WARN_ON(next == NULL);

		/*
		 * Drop to next valid state.
		 * Start search from the next (lower) state.
		 */
		idx--;
		for (; idx >= OMAP3_STATE_C1; idx--) {
			struct omap3_processor_cx *cx;

			cx = cpuidle_get_statedata(&dev->states[idx]);
			if (cx->valid) {
				next = &dev->states[idx];
				break;
			}
		}
		/*
		 * C1 and C2 are always valid.
		 * So, no need to check for 'next==NULL' outside this loop.
		 */
	}

	return next;
}

224 225 226 227 228 229 230 231 232 233 234 235
/**
 * omap3_enter_idle_bm - Checks for any bus activity
 * @dev: cpuidle device
 * @state: The target state to be programmed
 *
 * Used for C states with CPUIDLE_FLAG_CHECK_BM flag set. This
 * function checks for any pending activity and then programs the
 * device to the specified or a safer state.
 */
static int omap3_enter_idle_bm(struct cpuidle_device *dev,
			       struct cpuidle_state *state)
{
236
	struct cpuidle_state *new_state = next_valid_state(dev, state);
237 238 239 240
	u32 core_next_state, per_next_state = 0, per_saved_state = 0;
	u32 cam_state;
	struct omap3_processor_cx *cx;
	int ret;
241

242
	if ((state->flags & CPUIDLE_FLAG_CHECK_BM) && omap3_idle_bm_check()) {
243 244
		BUG_ON(!dev->safe_state);
		new_state = dev->safe_state;
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
		goto select_state;
	}

	cx = cpuidle_get_statedata(state);
	core_next_state = cx->core_state;

	/*
	 * FIXME: we currently manage device-specific idle states
	 *        for PER and CORE in combination with CPU-specific
	 *        idle states.  This is wrong, and device-specific
	 *        idle managment needs to be separated out into 
	 *        its own code.
	 */

	/*
	 * Prevent idle completely if CAM is active.
	 * CAM does not have wakeup capability in OMAP3.
	 */
	cam_state = pwrdm_read_pwrst(cam_pd);
	if (cam_state == PWRDM_POWER_ON) {
		new_state = dev->safe_state;
		goto select_state;
	}

	/*
	 * Prevent PER off if CORE is not in retention or off as this
	 * would disable PER wakeups completely.
	 */
	per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
	if ((per_next_state == PWRDM_POWER_OFF) &&
275
	    (core_next_state > PWRDM_POWER_RET))
276
		per_next_state = PWRDM_POWER_RET;
277

278 279 280 281 282
	/* Are we changing PER target state? */
	if (per_next_state != per_saved_state)
		pwrdm_set_next_pwrst(per_pd, per_next_state);

select_state:
283
	dev->last_state = new_state;
284 285 286 287 288 289 290
	ret = omap3_enter_idle(dev, new_state);

	/* Restore original PER state if it was modified */
	if (per_next_state != per_saved_state)
		pwrdm_set_next_pwrst(per_pd, per_saved_state);

	return ret;
291 292 293 294
}

DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);

295
/**
296 297 298
 * omap3_cpuidle_update_states() - Update the cpuidle states
 * @mpu_deepest_state:	Enable states upto and including this for mpu domain
 * @core_deepest_state:	Enable states upto and including this for core domain
299
 *
300 301 302
 * This goes through the list of states available and enables and disables the
 * validity of C states based on deepest state that can be achieved for the
 * variable domain
303
 */
304
void omap3_cpuidle_update_states(u32 mpu_deepest_state, u32 core_deepest_state)
305 306 307 308 309 310
{
	int i;

	for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
		struct omap3_processor_cx *cx = &omap3_power_states[i];

311 312
		if ((cx->mpu_state >= mpu_deepest_state) &&
		    (cx->core_state >= core_deepest_state)) {
313 314
			cx->valid = 1;
		} else {
315
			cx->valid = 0;
316 317 318 319
		}
	}
}

320 321 322 323 324 325 326 327
void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params)
{
	int i;

	if (!cpuidle_board_params)
		return;

	for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
328 329
		cpuidle_params_table[i].valid =
			cpuidle_board_params[i].valid;
330 331 332 333 334 335 336 337 338 339
		cpuidle_params_table[i].sleep_latency =
			cpuidle_board_params[i].sleep_latency;
		cpuidle_params_table[i].wake_latency =
			cpuidle_board_params[i].wake_latency;
		cpuidle_params_table[i].threshold =
			cpuidle_board_params[i].threshold;
	}
	return;
}

340 341 342
/* omap3_init_power_states - Initialises the OMAP3 specific C states.
 *
 * Below is the desciption of each C state.
343 344 345 346 347 348 349
 * 	C1 . MPU WFI + Core active
 *	C2 . MPU WFI + Core inactive
 *	C3 . MPU CSWR + Core inactive
 *	C4 . MPU OFF + Core inactive
 *	C5 . MPU CSWR + Core CSWR
 *	C6 . MPU OFF + Core CSWR
 *	C7 . MPU OFF + Core OFF
350 351 352 353
 */
void omap_init_power_states(void)
{
	/* C1 . MPU WFI + Core active */
354 355
	omap3_power_states[OMAP3_STATE_C1].valid =
			cpuidle_params_table[OMAP3_STATE_C1].valid;
356
	omap3_power_states[OMAP3_STATE_C1].type = OMAP3_STATE_C1;
357 358 359 360 361 362
	omap3_power_states[OMAP3_STATE_C1].sleep_latency =
			cpuidle_params_table[OMAP3_STATE_C1].sleep_latency;
	omap3_power_states[OMAP3_STATE_C1].wakeup_latency =
			cpuidle_params_table[OMAP3_STATE_C1].wake_latency;
	omap3_power_states[OMAP3_STATE_C1].threshold =
			cpuidle_params_table[OMAP3_STATE_C1].threshold;
363 364 365 366
	omap3_power_states[OMAP3_STATE_C1].mpu_state = PWRDM_POWER_ON;
	omap3_power_states[OMAP3_STATE_C1].core_state = PWRDM_POWER_ON;
	omap3_power_states[OMAP3_STATE_C1].flags = CPUIDLE_FLAG_TIME_VALID;

367
	/* C2 . MPU WFI + Core inactive */
368 369
	omap3_power_states[OMAP3_STATE_C2].valid =
			cpuidle_params_table[OMAP3_STATE_C2].valid;
370
	omap3_power_states[OMAP3_STATE_C2].type = OMAP3_STATE_C2;
371 372 373 374 375 376
	omap3_power_states[OMAP3_STATE_C2].sleep_latency =
			cpuidle_params_table[OMAP3_STATE_C2].sleep_latency;
	omap3_power_states[OMAP3_STATE_C2].wakeup_latency =
			cpuidle_params_table[OMAP3_STATE_C2].wake_latency;
	omap3_power_states[OMAP3_STATE_C2].threshold =
			cpuidle_params_table[OMAP3_STATE_C2].threshold;
377
	omap3_power_states[OMAP3_STATE_C2].mpu_state = PWRDM_POWER_ON;
378
	omap3_power_states[OMAP3_STATE_C2].core_state = PWRDM_POWER_ON;
379 380
	omap3_power_states[OMAP3_STATE_C2].flags = CPUIDLE_FLAG_TIME_VALID |
				CPUIDLE_FLAG_CHECK_BM;
381

382
	/* C3 . MPU CSWR + Core inactive */
383 384
	omap3_power_states[OMAP3_STATE_C3].valid =
			cpuidle_params_table[OMAP3_STATE_C3].valid;
385
	omap3_power_states[OMAP3_STATE_C3].type = OMAP3_STATE_C3;
386 387 388 389 390 391
	omap3_power_states[OMAP3_STATE_C3].sleep_latency =
			cpuidle_params_table[OMAP3_STATE_C3].sleep_latency;
	omap3_power_states[OMAP3_STATE_C3].wakeup_latency =
			cpuidle_params_table[OMAP3_STATE_C3].wake_latency;
	omap3_power_states[OMAP3_STATE_C3].threshold =
			cpuidle_params_table[OMAP3_STATE_C3].threshold;
392
	omap3_power_states[OMAP3_STATE_C3].mpu_state = PWRDM_POWER_RET;
393
	omap3_power_states[OMAP3_STATE_C3].core_state = PWRDM_POWER_ON;
394 395
	omap3_power_states[OMAP3_STATE_C3].flags = CPUIDLE_FLAG_TIME_VALID |
				CPUIDLE_FLAG_CHECK_BM;
396

397
	/* C4 . MPU OFF + Core inactive */
398 399
	omap3_power_states[OMAP3_STATE_C4].valid =
			cpuidle_params_table[OMAP3_STATE_C4].valid;
400
	omap3_power_states[OMAP3_STATE_C4].type = OMAP3_STATE_C4;
401 402 403 404 405 406
	omap3_power_states[OMAP3_STATE_C4].sleep_latency =
			cpuidle_params_table[OMAP3_STATE_C4].sleep_latency;
	omap3_power_states[OMAP3_STATE_C4].wakeup_latency =
			cpuidle_params_table[OMAP3_STATE_C4].wake_latency;
	omap3_power_states[OMAP3_STATE_C4].threshold =
			cpuidle_params_table[OMAP3_STATE_C4].threshold;
407 408
	omap3_power_states[OMAP3_STATE_C4].mpu_state = PWRDM_POWER_OFF;
	omap3_power_states[OMAP3_STATE_C4].core_state = PWRDM_POWER_ON;
409 410 411
	omap3_power_states[OMAP3_STATE_C4].flags = CPUIDLE_FLAG_TIME_VALID |
				CPUIDLE_FLAG_CHECK_BM;

412
	/* C5 . MPU CSWR + Core CSWR*/
413 414
	omap3_power_states[OMAP3_STATE_C5].valid =
			cpuidle_params_table[OMAP3_STATE_C5].valid;
415
	omap3_power_states[OMAP3_STATE_C5].type = OMAP3_STATE_C5;
416 417 418 419 420 421
	omap3_power_states[OMAP3_STATE_C5].sleep_latency =
			cpuidle_params_table[OMAP3_STATE_C5].sleep_latency;
	omap3_power_states[OMAP3_STATE_C5].wakeup_latency =
			cpuidle_params_table[OMAP3_STATE_C5].wake_latency;
	omap3_power_states[OMAP3_STATE_C5].threshold =
			cpuidle_params_table[OMAP3_STATE_C5].threshold;
422
	omap3_power_states[OMAP3_STATE_C5].mpu_state = PWRDM_POWER_RET;
423 424 425 426
	omap3_power_states[OMAP3_STATE_C5].core_state = PWRDM_POWER_RET;
	omap3_power_states[OMAP3_STATE_C5].flags = CPUIDLE_FLAG_TIME_VALID |
				CPUIDLE_FLAG_CHECK_BM;

427
	/* C6 . MPU OFF + Core CSWR */
428 429
	omap3_power_states[OMAP3_STATE_C6].valid =
			cpuidle_params_table[OMAP3_STATE_C6].valid;
430
	omap3_power_states[OMAP3_STATE_C6].type = OMAP3_STATE_C6;
431 432 433 434 435 436
	omap3_power_states[OMAP3_STATE_C6].sleep_latency =
			cpuidle_params_table[OMAP3_STATE_C6].sleep_latency;
	omap3_power_states[OMAP3_STATE_C6].wakeup_latency =
			cpuidle_params_table[OMAP3_STATE_C6].wake_latency;
	omap3_power_states[OMAP3_STATE_C6].threshold =
			cpuidle_params_table[OMAP3_STATE_C6].threshold;
437
	omap3_power_states[OMAP3_STATE_C6].mpu_state = PWRDM_POWER_OFF;
438
	omap3_power_states[OMAP3_STATE_C6].core_state = PWRDM_POWER_RET;
439 440
	omap3_power_states[OMAP3_STATE_C6].flags = CPUIDLE_FLAG_TIME_VALID |
				CPUIDLE_FLAG_CHECK_BM;
441 442

	/* C7 . MPU OFF + Core OFF */
443 444
	omap3_power_states[OMAP3_STATE_C7].valid =
			cpuidle_params_table[OMAP3_STATE_C7].valid;
445
	omap3_power_states[OMAP3_STATE_C7].type = OMAP3_STATE_C7;
446 447 448 449 450 451
	omap3_power_states[OMAP3_STATE_C7].sleep_latency =
			cpuidle_params_table[OMAP3_STATE_C7].sleep_latency;
	omap3_power_states[OMAP3_STATE_C7].wakeup_latency =
			cpuidle_params_table[OMAP3_STATE_C7].wake_latency;
	omap3_power_states[OMAP3_STATE_C7].threshold =
			cpuidle_params_table[OMAP3_STATE_C7].threshold;
452 453 454 455
	omap3_power_states[OMAP3_STATE_C7].mpu_state = PWRDM_POWER_OFF;
	omap3_power_states[OMAP3_STATE_C7].core_state = PWRDM_POWER_OFF;
	omap3_power_states[OMAP3_STATE_C7].flags = CPUIDLE_FLAG_TIME_VALID |
				CPUIDLE_FLAG_CHECK_BM;
456 457 458 459 460 461 462 463 464 465 466 467

	/*
	 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
	 * enable OFF mode in a stable form for previous revisions.
	 * we disable C7 state as a result.
	 */
	if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) {
		omap3_power_states[OMAP3_STATE_C7].valid = 0;
		cpuidle_params_table[OMAP3_STATE_C7].valid = 0;
		WARN_ONCE(1, "%s: core off state C7 disabled due to i583\n",
				__func__);
	}
468 469 470 471 472 473 474 475 476 477 478 479 480
}

struct cpuidle_driver omap3_idle_driver = {
	.name = 	"omap3_idle",
	.owner = 	THIS_MODULE,
};

/**
 * omap3_idle_init - Init routine for OMAP3 idle
 *
 * Registers the OMAP3 specific cpuidle driver with the cpuidle
 * framework with the valid set of states.
 */
481
int __init omap3_idle_init(void)
482 483 484 485 486 487 488
{
	int i, count = 0;
	struct omap3_processor_cx *cx;
	struct cpuidle_state *state;
	struct cpuidle_device *dev;

	mpu_pd = pwrdm_lookup("mpu_pwrdm");
489
	core_pd = pwrdm_lookup("core_pwrdm");
490 491
	per_pd = pwrdm_lookup("per_pwrdm");
	cam_pd = pwrdm_lookup("cam_pwrdm");
492 493 494 495 496 497

	omap_init_power_states();
	cpuidle_register_driver(&omap3_idle_driver);

	dev = &per_cpu(omap3_idle_dev, smp_processor_id());

498
	for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
		cx = &omap3_power_states[i];
		state = &dev->states[count];

		if (!cx->valid)
			continue;
		cpuidle_set_statedata(state, cx);
		state->exit_latency = cx->sleep_latency + cx->wakeup_latency;
		state->target_residency = cx->threshold;
		state->flags = cx->flags;
		state->enter = (state->flags & CPUIDLE_FLAG_CHECK_BM) ?
			omap3_enter_idle_bm : omap3_enter_idle;
		if (cx->type == OMAP3_STATE_C1)
			dev->safe_state = state;
		sprintf(state->name, "C%d", count+1);
		count++;
	}

	if (!count)
		return -EINVAL;
	dev->state_count = count;

520 521 522 523
	if (enable_off_mode)
		omap3_cpuidle_update_states(PWRDM_POWER_OFF, PWRDM_POWER_OFF);
	else
		omap3_cpuidle_update_states(PWRDM_POWER_RET, PWRDM_POWER_RET);
524

525 526 527 528 529 530 531 532
	if (cpuidle_register_device(dev)) {
		printk(KERN_ERR "%s: CPUidle register device failed\n",
		       __func__);
		return -EIO;
	}

	return 0;
}
533 534 535 536 537
#else
int __init omap3_idle_init(void)
{
	return 0;
}
538
#endif /* CONFIG_CPU_IDLE */