Commit efb90582 authored by Len Brown's avatar Len Brown

Merge branches 'acpi', 'idle', 'mrst-pmu' and 'pm-tools' into next

......@@ -33,7 +33,8 @@ static struct cpuidle_driver at91_idle_driver = {
/* Actual code that puts the SoC in different idle states */
static int at91_enter_idle(struct cpuidle_device *dev,
struct cpuidle_state *state)
struct cpuidle_driver *drv,
int index)
{
struct timeval before, after;
int idle_time;
......@@ -41,10 +42,10 @@ static int at91_enter_idle(struct cpuidle_device *dev,
local_irq_disable();
do_gettimeofday(&before);
if (state == &dev->states[0])
if (index == 0)
/* Wait for interrupt state */
cpu_do_idle();
else if (state == &dev->states[1]) {
else if (index == 1) {
asm("b 1f; .align 5; 1:");
asm("mcr p15, 0, r0, c7, c10, 4"); /* drain write buffer */
saved_lpr = sdram_selfrefresh_enable();
......@@ -55,34 +56,38 @@ static int at91_enter_idle(struct cpuidle_device *dev,
local_irq_enable();
idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
(after.tv_usec - before.tv_usec);
return idle_time;
dev->last_residency = idle_time;
return index;
}
/* Initialize CPU idle by registering the idle states */
static int at91_init_cpuidle(void)
{
struct cpuidle_device *device;
cpuidle_register_driver(&at91_idle_driver);
struct cpuidle_driver *driver = &at91_idle_driver;
device = &per_cpu(at91_cpuidle_device, smp_processor_id());
device->state_count = AT91_MAX_STATES;
driver->state_count = AT91_MAX_STATES;
/* Wait for interrupt state */
device->states[0].enter = at91_enter_idle;
device->states[0].exit_latency = 1;
device->states[0].target_residency = 10000;
device->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
strcpy(device->states[0].name, "WFI");
strcpy(device->states[0].desc, "Wait for interrupt");
driver->states[0].enter = at91_enter_idle;
driver->states[0].exit_latency = 1;
driver->states[0].target_residency = 10000;
driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
strcpy(driver->states[0].name, "WFI");
strcpy(driver->states[0].desc, "Wait for interrupt");
/* Wait for interrupt and RAM self refresh state */
device->states[1].enter = at91_enter_idle;
device->states[1].exit_latency = 10;
device->states[1].target_residency = 10000;
device->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
strcpy(device->states[1].name, "RAM_SR");
strcpy(device->states[1].desc, "WFI and RAM Self Refresh");
driver->states[1].enter = at91_enter_idle;
driver->states[1].exit_latency = 10;
driver->states[1].target_residency = 10000;
driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
strcpy(driver->states[1].name, "RAM_SR");
strcpy(driver->states[1].desc, "WFI and RAM Self Refresh");
cpuidle_register_driver(&at91_idle_driver);
if (cpuidle_register_device(device)) {
printk(KERN_ERR "at91_init_cpuidle: Failed registering\n");
......
......@@ -78,9 +78,11 @@ static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = {
/* Actual code that puts the SoC in different idle states */
static int davinci_enter_idle(struct cpuidle_device *dev,
struct cpuidle_state *state)
struct cpuidle_driver *drv,
int index)
{
struct davinci_ops *ops = cpuidle_get_statedata(state);
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
struct davinci_ops *ops = cpuidle_get_statedata(state_usage);
struct timeval before, after;
int idle_time;
......@@ -98,13 +100,17 @@ static int davinci_enter_idle(struct cpuidle_device *dev,
local_irq_enable();
idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
(after.tv_usec - before.tv_usec);
return idle_time;
dev->last_residency = idle_time;
return index;
}
static int __init davinci_cpuidle_probe(struct platform_device *pdev)
{
int ret;
struct cpuidle_device *device;
struct cpuidle_driver *driver = &davinci_idle_driver;
struct davinci_cpuidle_config *pdata = pdev->dev.platform_data;
device = &per_cpu(davinci_cpuidle_device, smp_processor_id());
......@@ -116,32 +122,33 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)
ddr2_reg_base = pdata->ddr2_ctlr_base;
ret = cpuidle_register_driver(&davinci_idle_driver);
if (ret) {
dev_err(&pdev->dev, "failed to register driver\n");
return ret;
}
/* Wait for interrupt state */
device->states[0].enter = davinci_enter_idle;
device->states[0].exit_latency = 1;
device->states[0].target_residency = 10000;
device->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
strcpy(device->states[0].name, "WFI");
strcpy(device->states[0].desc, "Wait for interrupt");
driver->states[0].enter = davinci_enter_idle;
driver->states[0].exit_latency = 1;
driver->states[0].target_residency = 10000;
driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
strcpy(driver->states[0].name, "WFI");
strcpy(driver->states[0].desc, "Wait for interrupt");
/* Wait for interrupt and DDR self refresh state */
device->states[1].enter = davinci_enter_idle;
device->states[1].exit_latency = 10;
device->states[1].target_residency = 10000;
device->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
strcpy(device->states[1].name, "DDR SR");
strcpy(device->states[1].desc, "WFI and DDR Self Refresh");
driver->states[1].enter = davinci_enter_idle;
driver->states[1].exit_latency = 10;
driver->states[1].target_residency = 10000;
driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
strcpy(driver->states[1].name, "DDR SR");
strcpy(driver->states[1].desc, "WFI and DDR Self Refresh");
if (pdata->ddr2_pdown)
davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN;
cpuidle_set_statedata(&device->states[1], &davinci_states[1]);
cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]);
device->state_count = DAVINCI_CPUIDLE_MAX_STATES;
driver->state_count = DAVINCI_CPUIDLE_MAX_STATES;
ret = cpuidle_register_driver(&davinci_idle_driver);
if (ret) {
dev_err(&pdev->dev, "failed to register driver\n");
return ret;
}
ret = cpuidle_register_device(device);
if (ret) {
......
......@@ -16,7 +16,8 @@
#include <asm/proc-fns.h>
static int exynos4_enter_idle(struct cpuidle_device *dev,
struct cpuidle_state *state);
struct cpuidle_driver *drv,
int index);
static struct cpuidle_state exynos4_cpuidle_set[] = {
[0] = {
......@@ -37,7 +38,8 @@ static struct cpuidle_driver exynos4_idle_driver = {
};
static int exynos4_enter_idle(struct cpuidle_device *dev,
struct cpuidle_state *state)
struct cpuidle_driver *drv,
int index)
{
struct timeval before, after;
int idle_time;
......@@ -52,29 +54,31 @@ static int exynos4_enter_idle(struct cpuidle_device *dev,
idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
(after.tv_usec - before.tv_usec);
return idle_time;
dev->last_residency = idle_time;
return index;
}
static int __init exynos4_init_cpuidle(void)
{
int i, max_cpuidle_state, cpu_id;
struct cpuidle_device *device;
struct cpuidle_driver *drv = &exynos4_idle_driver;
/* Setup cpuidle driver */
drv->state_count = (sizeof(exynos4_cpuidle_set) /
sizeof(struct cpuidle_state));
max_cpuidle_state = drv->state_count;
for (i = 0; i < max_cpuidle_state; i++) {
memcpy(&drv->states[i], &exynos4_cpuidle_set[i],
sizeof(struct cpuidle_state));
}
cpuidle_register_driver(&exynos4_idle_driver);
for_each_cpu(cpu_id, cpu_online_mask) {
device = &per_cpu(exynos4_cpuidle_device, cpu_id);
device->cpu = cpu_id;
device->state_count = (sizeof(exynos4_cpuidle_set) /
sizeof(struct cpuidle_state));
max_cpuidle_state = device->state_count;
for (i = 0; i < max_cpuidle_state; i++) {
memcpy(&device->states[i], &exynos4_cpuidle_set[i],
sizeof(struct cpuidle_state));
}
device->state_count = drv->state_count;
if (cpuidle_register_device(device)) {
printk(KERN_ERR "CPUidle register device failed\n,");
......
......@@ -32,17 +32,18 @@ static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device);
/* Actual code that puts the SoC in different idle states */
static int kirkwood_enter_idle(struct cpuidle_device *dev,
struct cpuidle_state *state)
struct cpuidle_driver *drv,
int index)
{
struct timeval before, after;
int idle_time;
local_irq_disable();
do_gettimeofday(&before);
if (state == &dev->states[0])
if (index == 0)
/* Wait for interrupt state */
cpu_do_idle();
else if (state == &dev->states[1]) {
else if (index == 1) {
/*
* Following write will put DDR in self refresh.
* Note that we have 256 cycles before DDR puts it
......@@ -57,35 +58,40 @@ static int kirkwood_enter_idle(struct cpuidle_device *dev,
local_irq_enable();
idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
(after.tv_usec - before.tv_usec);
return idle_time;
/* Update last residency */
dev->last_residency = idle_time;
return index;
}
/* Initialize CPU idle by registering the idle states */
static int kirkwood_init_cpuidle(void)
{
struct cpuidle_device *device;
cpuidle_register_driver(&kirkwood_idle_driver);
struct cpuidle_driver *driver = &kirkwood_idle_driver;
device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id());
device->state_count = KIRKWOOD_MAX_STATES;
driver->state_count = KIRKWOOD_MAX_STATES;
/* Wait for interrupt state */
device->states[0].enter = kirkwood_enter_idle;
device->states[0].exit_latency = 1;
device->states[0].target_residency = 10000;
device->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
strcpy(device->states[0].name, "WFI");
strcpy(device->states[0].desc, "Wait for interrupt");
driver->states[0].enter = kirkwood_enter_idle;
driver->states[0].exit_latency = 1;
driver->states[0].target_residency = 10000;
driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
strcpy(driver->states[0].name, "WFI");
strcpy(driver->states[0].desc, "Wait for interrupt");
/* Wait for interrupt and DDR self refresh state */
device->states[1].enter = kirkwood_enter_idle;
device->states[1].exit_latency = 10;
device->states[1].target_residency = 10000;
device->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
strcpy(device->states[1].name, "DDR SR");
strcpy(device->states[1].desc, "WFI and DDR Self Refresh");
driver->states[1].enter = kirkwood_enter_idle;
driver->states[1].exit_latency = 10;
driver->states[1].target_residency = 10000;
driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID;
strcpy(driver->states[1].name, "DDR SR");
strcpy(driver->states[1].desc, "WFI and DDR Self Refresh");
cpuidle_register_driver(&kirkwood_idle_driver);
if (cpuidle_register_device(device)) {
printk(KERN_ERR "kirkwood_init_cpuidle: Failed registering\n");
return -EIO;
......
This diff is collapsed.
......@@ -25,11 +25,12 @@ static unsigned long cpuidle_mode[] = {
};
static int cpuidle_sleep_enter(struct cpuidle_device *dev,
struct cpuidle_state *state)
struct cpuidle_driver *drv,
int index)
{
unsigned long allowed_mode = arch_hwblk_sleep_mode();
ktime_t before, after;
int requested_state = state - &dev->states[0];
int requested_state = index;
int allowed_state;
int k;
......@@ -46,11 +47,13 @@ static int cpuidle_sleep_enter(struct cpuidle_device *dev,
*/
k = min_t(int, allowed_state, requested_state);
dev->last_state = &dev->states[k];
before = ktime_get();
sh_mobile_call_standby(cpuidle_mode[k]);
after = ktime_get();
return ktime_to_ns(ktime_sub(after, before)) >> 10;
dev->last_residency = (int)ktime_to_ns(ktime_sub(after, before)) >> 10;
return k;
}
static struct cpuidle_device cpuidle_dev;
......@@ -62,19 +65,19 @@ static struct cpuidle_driver cpuidle_driver = {
void sh_mobile_setup_cpuidle(void)
{
struct cpuidle_device *dev = &cpuidle_dev;
struct cpuidle_driver *drv = &cpuidle_driver;
struct cpuidle_state *state;
int i;
cpuidle_register_driver(&cpuidle_driver);
for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
dev->states[i].name[0] = '\0';
dev->states[i].desc[0] = '\0';
drv->states[i].name[0] = '\0';
drv->states[i].desc[0] = '\0';
}
i = CPUIDLE_DRIVER_STATE_START;
state = &dev->states[i++];
state = &drv->states[i++];
snprintf(state->name, CPUIDLE_NAME_LEN, "C1");
strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN);
state->exit_latency = 1;
......@@ -84,10 +87,10 @@ void sh_mobile_setup_cpuidle(void)
state->flags |= CPUIDLE_FLAG_TIME_VALID;
state->enter = cpuidle_sleep_enter;
dev->safe_state = state;
drv->safe_state_index = i-1;
if (sh_mobile_sleep_supported & SUSP_SH_SF) {
state = &dev->states[i++];
state = &drv->states[i++];
snprintf(state->name, CPUIDLE_NAME_LEN, "C2");
strncpy(state->desc, "SuperH Sleep Mode [SF]",
CPUIDLE_DESC_LEN);
......@@ -100,7 +103,7 @@ void sh_mobile_setup_cpuidle(void)
}
if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) {
state = &dev->states[i++];
state = &drv->states[i++];
snprintf(state->name, CPUIDLE_NAME_LEN, "C3");
strncpy(state->desc, "SuperH Mobile Standby Mode [SF]",
CPUIDLE_DESC_LEN);
......@@ -112,7 +115,10 @@ void sh_mobile_setup_cpuidle(void)
state->enter = cpuidle_sleep_enter;
}
drv->state_count = i;
dev->state_count = i;
cpuidle_register_driver(&cpuidle_driver);
cpuidle_register_device(dev);
}
......@@ -70,7 +70,7 @@ static struct mrst_device mrst_devs[] = {
/* 24 */ { 0x4110, 0 }, /* Lincroft */
};
/* n.b. We ignore PCI-id 0x815 in LSS9 b/c MeeGo has no driver for it */
/* n.b. We ignore PCI-id 0x815 in LSS9 b/c Linux has no driver for it */
static u16 mrst_lss9_pci_ids[] = {0x080a, 0x0814, 0};
static u16 mrst_lss10_pci_ids[] = {0x0800, 0x0801, 0x0802, 0x0803,
0x0804, 0x0805, 0x080f, 0};
......
......@@ -426,7 +426,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
if (action == CPU_ONLINE && pr) {
acpi_processor_ppc_has_changed(pr, 0);
acpi_processor_cst_has_changed(pr);
acpi_processor_hotplug(pr);
acpi_processor_reevaluate_tstate(pr, action);
acpi_processor_tstate_has_changed(pr);
}
......@@ -503,8 +503,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
acpi_processor_get_throttling_info(pr);
acpi_processor_get_limit_info(pr);
if (cpuidle_get_driver() == &acpi_idle_driver)
if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
acpi_processor_power_init(pr, device);
pr->cdev = thermal_cooling_device_register("Processor", device,
......@@ -800,17 +799,9 @@ static int __init acpi_processor_init(void)
memset(&errata, 0, sizeof(errata));
if (!cpuidle_register_driver(&acpi_idle_driver)) {
printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
acpi_idle_driver.name);
} else {
printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n",
cpuidle_get_driver()->name);
}
result = acpi_bus_register_driver(&acpi_processor_driver);
if (result < 0)
goto out_cpuidle;
return result;
acpi_processor_install_hotplug_notify();
......@@ -821,11 +812,6 @@ static int __init acpi_processor_init(void)
acpi_processor_throttling_init();
return 0;
out_cpuidle:
cpuidle_unregister_driver(&acpi_idle_driver);
return result;
}
static void __exit acpi_processor_exit(void)
......
This diff is collapsed.
......@@ -61,8 +61,9 @@ static int __cpuidle_register_device(struct cpuidle_device *dev);
int cpuidle_idle_call(void)
{
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_driver *drv = cpuidle_get_driver();
struct cpuidle_state *target_state;
int next_state;
int next_state, entered_state;
if (off)
return -ENODEV;
......@@ -83,45 +84,36 @@ int cpuidle_idle_call(void)
hrtimer_peek_ahead_timers();
#endif
/*
* Call the device's prepare function before calling the
* governor's select function. ->prepare gives the device's
* cpuidle driver a chance to update any dynamic information
* of its cpuidle states for the current idle period, e.g.
* state availability, latencies, residencies, etc.
*/
if (dev->prepare)
dev->prepare(dev);
/* ask the governor for the next state */
next_state = cpuidle_curr_governor->select(dev);
next_state = cpuidle_curr_governor->select(drv, dev);
if (need_resched()) {
local_irq_enable();
return 0;
}
target_state = &dev->states[next_state];
/* enter the state and update stats */
dev->last_state = target_state;
target_state = &drv->states[next_state];
trace_power_start(POWER_CSTATE, next_state, dev->cpu);
trace_cpu_idle(next_state, dev->cpu);
dev->last_residency = target_state->enter(dev, target_state);
entered_state = target_state->enter(dev, drv, next_state);
trace_power_end(dev->cpu);
trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
if (dev->last_state)
target_state = dev->last_state;
target_state->time += (unsigned long long)dev->last_residency;
target_state->usage++;
if (entered_state >= 0) {
/* Update cpuidle counters */
/* This can be moved to within driver enter routine
* but that results in multiple copies of same code.
*/
dev->states_usage[entered_state].time +=
(unsigned long long)dev->last_residency;
dev->states_usage[entered_state].usage++;
}
/* give the governor an opportunity to reflect on the outcome */
if (cpuidle_curr_governor->reflect)
cpuidle_curr_governor->reflect(dev);
cpuidle_curr_governor->reflect(dev, entered_state);
return 0;
}
......@@ -172,11 +164,11 @@ void cpuidle_resume_and_unlock(void)
EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
#ifdef CONFIG_ARCH_HAS_CPU_RELAX
static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
static int poll_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
ktime_t t1, t2;
s64 diff;
int ret;
t1 = ktime_get();
local_irq_enable();
......@@ -188,15 +180,14 @@ static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st)
if (diff > INT_MAX)
diff = INT_MAX;
ret = (int) diff;
return ret;
dev->last_residency = (int) diff;
return index;
}
static void poll_idle_init(struct cpuidle_device *dev)
static void poll_idle_init(struct cpuidle_driver *drv)
{
struct cpuidle_state *state = &dev->states[0];
cpuidle_set_statedata(state, NULL);
struct cpuidle_state *state = &drv->states[0];
snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
......@@ -207,7 +198,7 @@ static void poll_idle_init(struct cpuidle_device *dev)
state->enter = poll_idle;
}
#else
static void poll_idle_init(struct cpuidle_device *dev) {}
static void poll_idle_init(struct cpuidle_driver *drv) {}
#endif /* CONFIG_ARCH_HAS_CPU_RELAX */
/**
......@@ -234,21 +225,20 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
return ret;
}
poll_idle_init(dev);
poll_idle_init(cpuidle_get_driver());
if ((ret = cpuidle_add_state_sysfs(dev)))
return ret;
if (cpuidle_curr_governor->enable &&
(ret = cpuidle_curr_governor->enable(dev)))
(ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev)))
goto fail_sysfs;
for (i = 0; i < dev->state_count; i++) {
dev->states[i].usage = 0;
dev->states[i].time = 0;
dev->states_usage[i].usage = 0;
dev->states_usage[i].time = 0;
}
dev->last_residency = 0;
dev->last_state = NULL;
smp_wmb();
......@@ -282,7 +272,7 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
dev->enabled = 0;
if (cpuidle_curr_governor->disable)
cpuidle_curr_governor->disable(dev);
cpuidle_curr_governor->disable(cpuidle_get_driver(), dev);
cpuidle_remove_state_sysfs(dev);
enabled_devices--;
......@@ -310,26 +300,6 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
init_completion(&dev->kobj_unregister);
/*
* cpuidle driver should set the dev->power_specified bit
* before registering the device if the driver provides
* power_usage numbers.
*
* For those devices whose ->power_specified is not set,
* we fill in power_usage with decreasing values as the
* cpuidle code has an implicit assumption that state Cn
* uses less power than C(n-1).
*
* With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
* an power value of -1. So we use -2, -3, etc, for other
* c-states.
*/
if (!dev->power_specified) {
int i;
for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++)
dev->states[i].power_usage = -1 - i;
}
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
if ((ret = cpuidle_add_sysfs(sys_dev))) {
......
......@@ -17,6 +17,30 @@
static struct cpuidle_driver *cpuidle_curr_driver;
DEFINE_SPINLOCK(cpuidle_driver_lock);
static void __cpuidle_register_driver(struct cpuidle_driver *drv)
{
int i;
/*
* cpuidle driver should set the drv->power_specified bit
* before registering if the driver provides
* power_usage numbers.
*
* If power_specified is not set,
* we fill in power_usage with decreasing values as the
* cpuidle code has an implicit assumption that state Cn
* uses less power than C(n-1).
*
* With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned
* an power value of -1. So we use -2, -3, etc, for other
* c-states.
*/
if (!drv->power_specified) {
for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++)
drv->states[i].power_usage = -1 - i;
}
}
/**
* cpuidle_register_driver - registers a driver
* @drv: the driver
......@@ -34,6 +58,7 @@ int cpuidle_register_driver(struct cpuidle_driver *drv)
spin_unlock(&cpuidle_driver_lock);
return -EBUSY;
}
__cpuidle_register_driver(drv);
cpuidle_curr_driver = drv;
spin_unlock(&cpuidle_driver_lock);
......
......@@ -60,9 +60,11 @@ static inline void ladder_do_selection(struct ladder_device *ldev,
/**
* ladder_select_state - selects the next state to enter
* @drv: cpuidle driver
* @dev: the CPU
*/
static int ladder_select_state(struct cpuidle_device *dev)
static int ladder_select_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
struct ladder_device_state *last_state;
......@@ -77,15 +79,17 @@ static int ladder_select_state(struct cpuidle_device *dev)
last_state = &ldev->states[last_idx];
if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID)
last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency;
if (drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) {
last_residency = cpuidle_get_last_residency(dev) - \
drv->states[last_idx].exit_latency;
}
else
last_residency = last_state->threshold.promotion_time + 1;
/* consider promotion */
if (last_idx < dev->state_count - 1 &&
if (last_idx < drv->state_count - 1 &&
last_residency > last_state->threshold.promotion_time &&
dev->states[last_idx + 1].exit_latency <= latency_req) {
drv->states[last_idx + 1].exit_latency <= latency_req) {
last_state->stats.promotion_count++;
last_state->stats.demotion_count = 0;
if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
......@@ -96,11 +100,11 @@ static int ladder_select_state(struct cpuidle_device *dev)
/* consider demotion */
if (last_idx > CPUIDLE_DRIVER_STATE_START &&
dev->states[last_idx].exit_latency > latency_req) {
drv->states[last_idx].exit_latency > latency_req) {
int i;
for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
if (dev->states[i].exit_latency <= latency_req)
if (drv->states[i].exit_latency <= latency_req)
break;
}
ladder_do_selection(ldev, last_idx, i);
......@@ -123,9 +127,11 @@ static int ladder_select_state(struct cpuidle_device *dev)
/**
* ladder_enable_device - setup for the governor
* @drv: cpuidle driver
* @dev: the CPU
*/
static int ladder_enable_device(struct cpuidle_device *dev)
static int ladder_enable_device(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
int i;
struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
......@@ -134,8 +140,8 @@ static int ladder_enable_device(struct cpuidle_device *dev)
ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
for (i = 0; i < dev->state_count; i++) {
state = &dev->states[i];
for (i = 0; i < drv->state_count; i++) {
state = &drv->states[i];
lstate = &ldev->states[i];
lstate->stats.promotion_count = 0;
......@@ -144,7 +150,7 @@ static int ladder_enable_device(struct cpuidle_device *dev)
lstate->threshold.promotion_count = PROMOTION_COUNT;
lstate->threshold.demotion_count = DEMOTION_COUNT;
if (i < dev->state_count - 1)
if (i < drv->state_count - 1)
lstate->threshold.promotion_time = state->exit_latency;
if (i > 0)
lstate->threshold.demotion_time = state->exit_latency;
......@@ -153,11 +159,24 @@ static int ladder_enable_device(struct cpuidle_device *dev)
return 0;
}
/**
* ladder_reflect - update the correct last_state_idx
* @dev: the CPU
* @index: the index of actual state entered
*/
static void ladder_reflect(struct cpuidle_device *dev, int index)
{
struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
if (index > 0)
ldev->last_state_idx = index;
}
static struct cpuidle_governor ladder_governor = {
.name = "ladder",
.rating = 10,
.enable = ladder_enable_device,
.select = ladder_select_state,
.reflect = ladder_reflect,
.owner = THIS_MODULE,
};
......
......@@ -182,7 +182,7 @@ static inline int performance_multiplier(void)
static DEFINE_PER_CPU(struct menu_device, menu_devices);
static void menu_update(struct cpuidle_device *dev);
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
static u64 div_round64(u64 dividend, u32 divisor)
......@@ -228,9 +228,10 @@ static void detect_repeating_patterns(struct menu_device *data)
/**
* menu_select - selects the next idle state to enter
* @drv: cpuidle driver containing state data
* @dev: the CPU
*/
static int menu_select(struct cpuidle_device *dev)
static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
......@@ -240,7 +241,7 @@ static int menu_select(struct cpuidle_device *dev)
struct timespec t;
if (data->needs_update) {
menu_update(dev);
menu_update(drv, dev);
data->needs_update = 0;
}
......@@ -285,11 +286,9 @@ static int menu_select(struct cpuidle_device *dev)
* Find the idle state with the lowest power while satisfying
* our constraints.
*/
for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) {
struct cpuidle_state *s = &dev->states[i];
for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
struct cpuidle_state *s = &drv->states[i];
if (s->flags & CPUIDLE_FLAG_IGNORE)
continue;
if (s->target_residency > data->predicted_us)
continue;
if (s->exit_latency > latency_req)
......@@ -310,26 +309,30 @@ static int menu_select(struct cpuidle_device *dev)
/**
* menu_reflect - records that data structures need update
* @dev: the CPU
* @index: the index of actual entered state
*
* NOTE: it's important to be fast here because this operation will add to
* the overall exit latency.
*/
static void menu_reflect(struct cpuidle_device *dev)
static void menu_reflect(struct cpuidle_device *dev, int index)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
data->last_state_idx = index;
if (index >= 0)
data->needs_update = 1;
}
/**
* menu_update - attempts to guess what happened after entry
* @drv: cpuidle driver containing state data
* @dev: the CPU
*/
static void menu_update(struct cpuidle_device *dev)
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
struct menu_device *data = &__get_cpu_var(menu_devices);
int last_idx = data->last_state_idx;
unsigned int last_idle_us = cpuidle_get_last_residency(dev);
struct cpuidle_state *target = &dev->states[last_idx];
struct cpuidle_state *target = &drv->states[last_idx];
unsigned int measured_us;
u64 new_factor;
......@@ -383,9 +386,11 @@ static void menu_update(struct cpuidle_device *dev)
/**
* menu_enable_device - scans a CPU's states and does setup
* @drv: cpuidle driver
* @dev: the CPU
*/
static int menu_enable_device(struct cpuidle_device *dev)
static int menu_enable_device(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
......
......@@ -216,7 +216,8 @@ static struct kobj_type ktype_cpuidle = {
struct cpuidle_state_attr {
struct attribute attr;
ssize_t (*show)(struct cpuidle_state *, char *);
ssize_t (*show)(struct cpuidle_state *, \
struct cpuidle_state_usage *, char *);
ssize_t (*store)(struct cpuidle_state *, const char *, size_t);
};
......@@ -224,19 +225,22 @@ struct cpuidle_state_attr {
static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL)
#define define_show_state_function(_name) \
static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \
static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, char *buf) \
{ \
return sprintf(buf, "%u\n", state->_name);\
}
#define define_show_state_ull_function(_name) \
static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \
static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, char *buf) \
{ \
return sprintf(buf, "%llu\n", state->_name);\
return sprintf(buf, "%llu\n", state_usage->_name);\
}
#define define_show_state_str_function(_name) \
static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \
static ssize_t show_state_##_name(struct cpuidle_state *state, \
struct cpuidle_state_usage *state_usage, char *buf) \
{ \
if (state->_name[0] == '\0')\
return sprintf(buf, "<null>\n");\
......@@ -269,16 +273,18 @@ static struct attribute *cpuidle_state_default_attrs[] = {
#define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj)
#define kobj_to_state(k) (kobj_to_state_obj(k)->state)
#define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage)
#define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr)
static ssize_t cpuidle_state_show(struct kobject * kobj,
struct attribute * attr ,char * buf)
{
int ret = -EIO;
struct cpuidle_state *state = kobj_to_state(kobj);
struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj);
struct cpuidle_state_attr * cattr = attr_to_stateattr(attr);
if (cattr->show)
ret = cattr->show(state, buf);
ret = cattr->show(state, state_usage, buf);
return ret;
}
......@@ -316,13 +322,15 @@ int cpuidle_add_state_sysfs(struct cpuidle_device *device)
{
int i, ret = -ENOMEM;
struct cpuidle_state_kobj *kobj;
struct cpuidle_driver *drv = cpuidle_get_driver();
/* state statistics */
for (i = 0; i < device->state_count; i++) {
kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
if (!kobj)
goto error_state;
kobj->state = &device->states[i];
kobj->state = &drv->states[i];
kobj->state_usage = &device->states_usage[i];
init_completion(&kobj->kobj_unregister);
ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj,
......
......@@ -81,7 +81,8 @@ static unsigned int mwait_substates;
static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */
static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
static int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index);
static struct cpuidle_state *cpuidle_state_table;
......@@ -109,7 +110,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C1 */
.name = "C1-NHM",
.desc = "MWAIT 0x00",
.driver_data = (void *) 0x00,
.flags = CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 3,
.target_residency = 6,
......@@ -117,7 +117,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C2 */
.name = "C3-NHM",
.desc = "MWAIT 0x10",
.driver_data = (void *) 0x10,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 20,
.target_residency = 80,
......@@ -125,7 +124,6 @@ static struct cpuidle_state nehalem_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C3 */
.name = "C6-NHM",
.desc = "MWAIT 0x20",
.driver_data = (void *) 0x20,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 200,
.target_residency = 800,
......@@ -137,7 +135,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C1 */
.name = "C1-SNB",
.desc = "MWAIT 0x00",
.driver_data = (void *) 0x00,
.flags = CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 1,
.target_residency = 1,
......@@ -145,7 +142,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C2 */
.name = "C3-SNB",
.desc = "MWAIT 0x10",
.driver_data = (void *) 0x10,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 80,
.target_residency = 211,
......@@ -153,7 +149,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C3 */
.name = "C6-SNB",
.desc = "MWAIT 0x20",
.driver_data = (void *) 0x20,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 104,
.target_residency = 345,
......@@ -161,7 +156,6 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C4 */
.name = "C7-SNB",
.desc = "MWAIT 0x30",
.driver_data = (void *) 0x30,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 109,
.target_residency = 345,
......@@ -173,7 +167,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C1 */
.name = "C1-ATM",
.desc = "MWAIT 0x00",
.driver_data = (void *) 0x00,
.flags = CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 1,
.target_residency = 4,
......@@ -181,7 +174,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C2 */
.name = "C2-ATM",
.desc = "MWAIT 0x10",
.driver_data = (void *) 0x10,
.flags = CPUIDLE_FLAG_TIME_VALID,
.exit_latency = 20,
.target_residency = 80,
......@@ -190,7 +182,6 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C4 */
.name = "C4-ATM",
.desc = "MWAIT 0x30",
.driver_data = (void *) 0x30,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 100,
.target_residency = 400,
......@@ -199,23 +190,55 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
{ /* MWAIT C6 */
.name = "C6-ATM",
.desc = "MWAIT 0x52",
.driver_data = (void *) 0x52,
.flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 140,
.target_residency = 560,
.enter = &intel_idle },
};
static int get_driver_data(int cstate)
{
int driver_data;
switch (cstate) {
case 1: /* MWAIT C1 */
driver_data = 0x00;
break;
case 2: /* MWAIT C2 */
driver_data = 0x10;
break;
case 3: /* MWAIT C3 */
driver_data = 0x20;
break;
case 4: /* MWAIT C4 */
driver_data = 0x30;
break;
case 5: /* MWAIT C5 */
driver_data = 0x40;
break;
case 6: /* MWAIT C6 */
driver_data = 0x52;
break;
default:
driver_data = 0x00;
}
return driver_data;
}
/**
* intel_idle
* @dev: cpuidle_device
* @state: cpuidle state
* @drv: cpuidle driver
* @index: index of cpuidle state
*
*/
static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
static int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
unsigned long ecx = 1; /* break on interrupt flag */
unsigned long eax = (unsigned long)cpuidle_get_statedata(state);
struct cpuidle_state *state = &drv->states[index];
struct cpuidle_state_usage *state_usage = &dev->states_usage[index];
unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage);
unsigned int cstate;
ktime_t kt_before, kt_after;
s64 usec_delta;
......@@ -256,7 +279,10 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state)
if (!(lapic_timer_reliable_states & (1 << (cstate))))
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
return usec_delta;
/* Update cpuidle counters */
dev->last_residency = (int)usec_delta;
return index;
}
static void __setup_broadcast_timer(void *arg)
......@@ -396,6 +422,60 @@ static void intel_idle_cpuidle_devices_uninit(void)
free_percpu(intel_idle_cpuidle_devices);
return;
}
/*
* intel_idle_cpuidle_driver_init()
* allocate, initialize cpuidle_states
*/
static int intel_idle_cpuidle_driver_init(void)
{
int cstate;
struct cpuidle_driver *drv = &intel_idle_driver;
drv->state_count = 1;
for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
int num_substates;
if (cstate > max_cstate) {
printk(PREFIX "max_cstate %d reached\n",
max_cstate);
break;
}
/* does the state exist in CPUID.MWAIT? */
num_substates = (mwait_substates >> ((cstate) * 4))
& MWAIT_SUBSTATE_MASK;
if (num_substates == 0)
continue;
/* is the state not enabled? */
if (cpuidle_state_table[cstate].enter == NULL) {
/* does the driver not know about the state? */
if (*cpuidle_state_table[cstate].name == '\0')
pr_debug(PREFIX "unaware of model 0x%x"
" MWAIT %d please"
" contact lenb@kernel.org",
boot_cpu_data.x86_model, cstate);
continue;
}
if ((cstate > 2) &&
!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halts in idle"
" states deeper than C2");
drv->states[drv->state_count] = /* structure copy */
cpuidle_state_table[cstate];
drv->state_count += 1;
}
if (auto_demotion_disable_flags)
smp_call_function(auto_demotion_disable, NULL, 1);
return 0;
}
/*
* intel_idle_cpuidle_devices_init()
* allocate, initialize, register cpuidle_devices
......@@ -430,22 +510,11 @@ static int intel_idle_cpuidle_devices_init(void)
continue;
/* is the state not enabled? */
if (cpuidle_state_table[cstate].enter == NULL) {
/* does the driver not know about the state? */
if (*cpuidle_state_table[cstate].name == '\0')
pr_debug(PREFIX "unaware of model 0x%x"
" MWAIT %d please"
" contact lenb@kernel.org",
boot_cpu_data.x86_model, cstate);
continue;
}
if ((cstate > 2) &&
!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
mark_tsc_unstable("TSC halts in idle"
" states deeper than C2");
dev->states[dev->state_count] = /* structure copy */
cpuidle_state_table[cstate];
dev->states_usage[dev->state_count].driver_data =
(void *)get_driver_data(cstate);
dev->state_count += 1;
}
......@@ -458,8 +527,6 @@ static int intel_idle_cpuidle_devices_init(void)
return -EIO;
}
}
if (auto_demotion_disable_flags)
smp_call_function(auto_demotion_disable, NULL, 1);
return 0;
}
......@@ -477,6 +544,7 @@ static int __init intel_idle_init(void)
if (retval)
return retval;
intel_idle_cpuidle_driver_init();
retval = cpuidle_register_driver(&intel_idle_driver);
if (retval) {
printk(KERN_DEBUG PREFIX "intel_idle yielding to %s",
......
......@@ -329,6 +329,7 @@ extern void acpi_processor_throttling_init(void);
int acpi_processor_power_init(struct acpi_processor *pr,
struct acpi_device *device);
int acpi_processor_cst_has_changed(struct acpi_processor *pr);
int acpi_processor_hotplug(struct acpi_processor *pr);
int acpi_processor_power_exit(struct acpi_processor *pr,
struct acpi_device *device);
int acpi_processor_suspend(struct acpi_device * device, pm_message_t state);
......
......@@ -22,57 +22,62 @@
#define CPUIDLE_DESC_LEN 32
struct cpuidle_device;
struct cpuidle_driver;
/****************************
* CPUIDLE DEVICE INTERFACE *
****************************/
struct cpuidle_state_usage {
void *driver_data;
unsigned long long usage;
unsigned long long time; /* in US */
};
struct cpuidle_state {
char name[CPUIDLE_NAME_LEN];
char desc[CPUIDLE_DESC_LEN];
void *driver_data;
unsigned int flags;
unsigned int exit_latency; /* in US */
unsigned int power_usage; /* in mW */
unsigned int target_residency; /* in US */
unsigned long long usage;
unsigned long long time; /* in US */
int (*enter) (struct cpuidle_device *dev,
struct cpuidle_state *state);
struct cpuidle_driver *drv,
int index);
};
/* Idle State Flags */
#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
#define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */
#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
/**
* cpuidle_get_statedata - retrieves private driver state data
* @state: the state
* @st_usage: the state usage statistics
*/
static inline void * cpuidle_get_statedata(struct cpuidle_state *state)
static inline void *cpuidle_get_statedata(struct cpuidle_state_usage *st_usage)
{
return state->driver_data;
return st_usage->driver_data;
}
/**
* cpuidle_set_statedata - stores private driver state data
* @state: the state
* @st_usage: the state usage statistics
* @data: the private data
*/
static inline void
cpuidle_set_statedata(struct cpuidle_state *state, void *data)
cpuidle_set_statedata(struct cpuidle_state_usage *st_usage, void *data)
{
state->driver_data = data;
st_usage->driver_data = data;
}
struct cpuidle_state_kobj {
struct cpuidle_state *state;
struct cpuidle_state_usage *state_usage;
struct completion kobj_unregister;
struct kobject kobj;
};
......@@ -80,22 +85,17 @@ struct cpuidle_state_kobj {
struct cpuidle_device {
unsigned int registered:1;
unsigned int enabled:1;
unsigned int power_specified:1;
unsigned int cpu;
int last_residency;
int state_count;
struct cpuidle_state states[CPUIDLE_STATE_MAX];
struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
struct cpuidle_state *last_state;
struct list_head device_list;
struct kobject kobj;
struct completion kobj_unregister;
void *governor_data;
struct cpuidle_state *safe_state;
int (*prepare) (struct cpuidle_device *dev);
};
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
......@@ -119,6 +119,11 @@ static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
struct cpuidle_driver {
char name[CPUIDLE_NAME_LEN];
struct module *owner;
unsigned int power_specified:1;
struct cpuidle_state states[CPUIDLE_STATE_MAX];
int state_count;
int safe_state_index;
};
#ifdef CONFIG_CPU_IDLE
......@@ -165,11 +170,14 @@ struct cpuidle_governor {
struct list_head governor_list;
unsigned int rating;
int (*enable) (struct cpuidle_device *dev);
void (*disable) (struct cpuidle_device *dev);
int (*enable) (struct cpuidle_driver *drv,
struct cpuidle_device *dev);
void (*disable) (struct cpuidle_driver *drv,
struct cpuidle_device *dev);
int (*select) (struct cpuidle_device *dev);
void (*reflect) (struct cpuidle_device *dev);
int (*select) (struct cpuidle_driver *drv,
struct cpuidle_device *dev);
void (*reflect) (struct cpuidle_device *dev, int index);
struct module *owner;
};
......
......@@ -162,19 +162,21 @@ void print_header(void)
void dump_cnt(struct counters *cnt)
{
fprintf(stderr, "package: %d ", cnt->pkg);
fprintf(stderr, "core:: %d ", cnt->core);
fprintf(stderr, "CPU: %d ", cnt->cpu);
fprintf(stderr, "TSC: %016llX\n", cnt->tsc);
fprintf(stderr, "c3: %016llX\n", cnt->c3);
fprintf(stderr, "c6: %016llX\n", cnt->c6);
fprintf(stderr, "c7: %016llX\n", cnt->c7);
fprintf(stderr, "aperf: %016llX\n", cnt->aperf);
fprintf(stderr, "pc2: %016llX\n", cnt->pc2);
fprintf(stderr, "pc3: %016llX\n", cnt->pc3);
fprintf(stderr, "pc6: %016llX\n", cnt->pc6);
fprintf(stderr, "pc7: %016llX\n", cnt->pc7);
fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr);
if (!cnt)
return;
if (cnt->pkg) fprintf(stderr, "package: %d ", cnt->pkg);
if (cnt->core) fprintf(stderr, "core:: %d ", cnt->core);
if (cnt->cpu) fprintf(stderr, "CPU: %d ", cnt->cpu);
if (cnt->tsc) fprintf(stderr, "TSC: %016llX\n", cnt->tsc);
if (cnt->c3) fprintf(stderr, "c3: %016llX\n", cnt->c3);
if (cnt->c6) fprintf(stderr, "c6: %016llX\n", cnt->c6);
if (cnt->c7) fprintf(stderr, "c7: %016llX\n", cnt->c7);
if (cnt->aperf) fprintf(stderr, "aperf: %016llX\n", cnt->aperf);
if (cnt->pc2) fprintf(stderr, "pc2: %016llX\n", cnt->pc2);
if (cnt->pc3) fprintf(stderr, "pc3: %016llX\n", cnt->pc3);
if (cnt->pc6) fprintf(stderr, "pc6: %016llX\n", cnt->pc6);
if (cnt->pc7) fprintf(stderr, "pc7: %016llX\n", cnt->pc7);
if (cnt->extra_msr) fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr);
}
void dump_list(struct counters *cnt)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment