Commit 95b00786 authored by Len Brown's avatar Len Brown

Pull cpuidle into release branch

parents 22201f74 ddc081a1
...@@ -197,6 +197,19 @@ static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2) ...@@ -197,6 +197,19 @@ static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2); return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
} }
static void acpi_safe_halt(void)
{
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we
* test NEED_RESCHED:
*/
smp_mb();
if (!need_resched())
safe_halt();
current_thread_info()->status |= TS_POLLING;
}
#ifndef CONFIG_CPU_IDLE #ifndef CONFIG_CPU_IDLE
static void static void
...@@ -239,19 +252,6 @@ acpi_processor_power_activate(struct acpi_processor *pr, ...@@ -239,19 +252,6 @@ acpi_processor_power_activate(struct acpi_processor *pr,
return; return;
} }
static void acpi_safe_halt(void)
{
current_thread_info()->status &= ~TS_POLLING;
/*
* TS_POLLING-cleared state must be visible before we
* test NEED_RESCHED:
*/
smp_mb();
if (!need_resched())
safe_halt();
current_thread_info()->status |= TS_POLLING;
}
static atomic_t c3_cpu_count; static atomic_t c3_cpu_count;
/* Common C-state entry for C2, C3, .. */ /* Common C-state entry for C2, C3, .. */
...@@ -1373,15 +1373,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, ...@@ -1373,15 +1373,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
if (pr->flags.bm_check) if (pr->flags.bm_check)
acpi_idle_update_bm_rld(pr, cx); acpi_idle_update_bm_rld(pr, cx);
current_thread_info()->status &= ~TS_POLLING; acpi_safe_halt();
/*
* TS_POLLING-cleared state must be visible before we test
* NEED_RESCHED:
*/
smp_mb();
if (!need_resched())
safe_halt();
current_thread_info()->status |= TS_POLLING;
cx->usage++; cx->usage++;
...@@ -1399,6 +1391,8 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, ...@@ -1399,6 +1391,8 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct acpi_processor *pr; struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state); struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
u32 t1, t2; u32 t1, t2;
int sleep_ticks = 0;
pr = processors[smp_processor_id()]; pr = processors[smp_processor_id()];
if (unlikely(!pr)) if (unlikely(!pr))
...@@ -1428,6 +1422,8 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, ...@@ -1428,6 +1422,8 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
ACPI_FLUSH_CPU_CACHE(); ACPI_FLUSH_CPU_CACHE();
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
acpi_state_timer_broadcast(pr, cx, 1); acpi_state_timer_broadcast(pr, cx, 1);
acpi_idle_do_entry(cx); acpi_idle_do_entry(cx);
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
...@@ -1436,6 +1432,10 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, ...@@ -1436,6 +1432,10 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
/* TSC could halt in idle, so notify users */ /* TSC could halt in idle, so notify users */
mark_tsc_unstable("TSC halts in idle");; mark_tsc_unstable("TSC halts in idle");;
#endif #endif
sleep_ticks = ticks_elapsed(t1, t2);
/* Tell the scheduler how much we idled: */
sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
local_irq_enable(); local_irq_enable();
current_thread_info()->status |= TS_POLLING; current_thread_info()->status |= TS_POLLING;
...@@ -1443,7 +1443,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, ...@@ -1443,7 +1443,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
cx->usage++; cx->usage++;
acpi_state_timer_broadcast(pr, cx, 0); acpi_state_timer_broadcast(pr, cx, 0);
cx->time += ticks_elapsed(t1, t2); cx->time += sleep_ticks;
return ticks_elapsed_in_us(t1, t2); return ticks_elapsed_in_us(t1, t2);
} }
...@@ -1463,6 +1463,8 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, ...@@ -1463,6 +1463,8 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct acpi_processor *pr; struct acpi_processor *pr;
struct acpi_processor_cx *cx = cpuidle_get_statedata(state); struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
u32 t1, t2; u32 t1, t2;
int sleep_ticks = 0;
pr = processors[smp_processor_id()]; pr = processors[smp_processor_id()];
if (unlikely(!pr)) if (unlikely(!pr))
...@@ -1471,6 +1473,15 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, ...@@ -1471,6 +1473,15 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
if (acpi_idle_suspend) if (acpi_idle_suspend)
return(acpi_idle_enter_c1(dev, state)); return(acpi_idle_enter_c1(dev, state));
if (acpi_idle_bm_check()) {
if (dev->safe_state) {
return dev->safe_state->enter(dev, dev->safe_state);
} else {
acpi_safe_halt();
return 0;
}
}
local_irq_disable(); local_irq_disable();
current_thread_info()->status &= ~TS_POLLING; current_thread_info()->status &= ~TS_POLLING;
/* /*
...@@ -1485,38 +1496,45 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, ...@@ -1485,38 +1496,45 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
return 0; return 0;
} }
/* Tell the scheduler that we are going deep-idle: */
sched_clock_idle_sleep_event();
/* /*
* Must be done before busmaster disable as we might need to * Must be done before busmaster disable as we might need to
* access HPET ! * access HPET !
*/ */
acpi_state_timer_broadcast(pr, cx, 1); acpi_state_timer_broadcast(pr, cx, 1);
if (acpi_idle_bm_check()) { acpi_idle_update_bm_rld(pr, cx);
cx = pr->power.bm_state;
acpi_idle_update_bm_rld(pr, cx);
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
acpi_idle_do_entry(cx);
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
} else {
acpi_idle_update_bm_rld(pr, cx);
/*
* disable bus master
* bm_check implies we need ARB_DIS
* !bm_check implies we need cache flush
* bm_control implies whether we can do ARB_DIS
*
* That leaves a case where bm_check is set and bm_control is
* not set. In that case we cannot do much, we enter C3
* without doing anything.
*/
if (pr->flags.bm_check && pr->flags.bm_control) {
spin_lock(&c3_lock); spin_lock(&c3_lock);
c3_cpu_count++; c3_cpu_count++;
/* Disable bus master arbitration when all CPUs are in C3 */ /* Disable bus master arbitration when all CPUs are in C3 */
if (c3_cpu_count == num_online_cpus()) if (c3_cpu_count == num_online_cpus())
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
spin_unlock(&c3_lock); spin_unlock(&c3_lock);
} else if (!pr->flags.bm_check) {
ACPI_FLUSH_CPU_CACHE();
}
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
acpi_idle_do_entry(cx); acpi_idle_do_entry(cx);
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
/* Re-enable bus master arbitration */
if (pr->flags.bm_check && pr->flags.bm_control) {
spin_lock(&c3_lock); spin_lock(&c3_lock);
/* Re-enable bus master arbitration */ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
if (c3_cpu_count == num_online_cpus())
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
c3_cpu_count--; c3_cpu_count--;
spin_unlock(&c3_lock); spin_unlock(&c3_lock);
} }
...@@ -1525,6 +1543,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, ...@@ -1525,6 +1543,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
/* TSC could halt in idle, so notify users */ /* TSC could halt in idle, so notify users */
mark_tsc_unstable("TSC halts in idle"); mark_tsc_unstable("TSC halts in idle");
#endif #endif
sleep_ticks = ticks_elapsed(t1, t2);
/* Tell the scheduler how much we idled: */
sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
local_irq_enable(); local_irq_enable();
current_thread_info()->status |= TS_POLLING; current_thread_info()->status |= TS_POLLING;
...@@ -1532,7 +1553,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, ...@@ -1532,7 +1553,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
cx->usage++; cx->usage++;
acpi_state_timer_broadcast(pr, cx, 0); acpi_state_timer_broadcast(pr, cx, 0);
cx->time += ticks_elapsed(t1, t2); cx->time += sleep_ticks;
return ticks_elapsed_in_us(t1, t2); return ticks_elapsed_in_us(t1, t2);
} }
...@@ -1584,12 +1605,14 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) ...@@ -1584,12 +1605,14 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
case ACPI_STATE_C1: case ACPI_STATE_C1:
state->flags |= CPUIDLE_FLAG_SHALLOW; state->flags |= CPUIDLE_FLAG_SHALLOW;
state->enter = acpi_idle_enter_c1; state->enter = acpi_idle_enter_c1;
dev->safe_state = state;
break; break;
case ACPI_STATE_C2: case ACPI_STATE_C2:
state->flags |= CPUIDLE_FLAG_BALANCED; state->flags |= CPUIDLE_FLAG_BALANCED;
state->flags |= CPUIDLE_FLAG_TIME_VALID; state->flags |= CPUIDLE_FLAG_TIME_VALID;
state->enter = acpi_idle_enter_simple; state->enter = acpi_idle_enter_simple;
dev->safe_state = state;
break; break;
case ACPI_STATE_C3: case ACPI_STATE_C3:
...@@ -1610,14 +1633,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) ...@@ -1610,14 +1633,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
if (!count) if (!count)
return -EINVAL; return -EINVAL;
/* find the deepest state that can handle active BM */
if (pr->flags.bm_check) {
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++)
if (pr->power.states[i].type == ACPI_STATE_C3)
break;
pr->power.bm_state = &pr->power.states[i-1];
}
return 0; return 0;
} }
......
...@@ -78,7 +78,6 @@ struct acpi_processor_cx { ...@@ -78,7 +78,6 @@ struct acpi_processor_cx {
struct acpi_processor_power { struct acpi_processor_power {
struct cpuidle_device dev; struct cpuidle_device dev;
struct acpi_processor_cx *state; struct acpi_processor_cx *state;
struct acpi_processor_cx *bm_state;
unsigned long bm_check_timestamp; unsigned long bm_check_timestamp;
u32 default_state; u32 default_state;
u32 bm_activity; u32 bm_activity;
......
...@@ -92,6 +92,7 @@ struct cpuidle_device { ...@@ -92,6 +92,7 @@ struct cpuidle_device {
struct kobject kobj; struct kobject kobj;
struct completion kobj_unregister; struct completion kobj_unregister;
void *governor_data; void *governor_data;
struct cpuidle_state *safe_state;
}; };
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment