Commit 7ff0d134 authored by Zwane Mwaikambo's avatar Zwane Mwaikambo Committed by Linus Torvalds

[PATCH] Close race with preempt and modular pm_idle callbacks

The following patch from Shaohua Li fixes a race with preempt enabled when
a module containing a pm_idle callback is unloaded.  Cached values in local
variables need to be protected as RCU critical sections so that the
synchronize_kernel() call in the unload path waits for all processors.
There original bugzilla entry can be found at

Shaohua, i had to make a small change (variable declaration after code in
code block) so that it compiles with geriatric compilers such as the ones
Andrew is attached to ;)

http://bugzilla.kernel.org/show_bug.cgi?id=1716Signed-off-by: default avatarLi Shaohua <shaohua.li@intel.com>
Signed-off-by: default avatarZwane Mwaikambo <zwane@linuxpower.ca>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent d40cb3a6
...@@ -2362,8 +2362,15 @@ static void __exit apm_exit(void) ...@@ -2362,8 +2362,15 @@ static void __exit apm_exit(void)
{ {
int error; int error;
if (set_pm_idle) if (set_pm_idle) {
pm_idle = original_pm_idle; pm_idle = original_pm_idle;
/*
* We are about to unload the current idle thread pm callback
* (pm_idle), Wait for all processors to update cached/local
* copies of pm_idle before proceeding.
*/
synchronize_kernel();
}
if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0) if (((apm_info.bios.flags & APM_BIOS_DISENGAGED) == 0)
&& (apm_info.connection_version > 0x0100)) { && (apm_info.connection_version > 0x0100)) {
error = apm_engage_power_management(APM_DEVICE_ALL, 0); error = apm_engage_power_management(APM_DEVICE_ALL, 0);
......
...@@ -142,13 +142,21 @@ void cpu_idle (void) ...@@ -142,13 +142,21 @@ void cpu_idle (void)
/* endless idle loop with no priority at all */ /* endless idle loop with no priority at all */
while (1) { while (1) {
while (!need_resched()) { while (!need_resched()) {
void (*idle)(void) = pm_idle; void (*idle)(void);
/*
* Mark this as an RCU critical section so that
* synchronize_kernel() in the unload path waits
* for our completion.
*/
rcu_read_lock();
idle = pm_idle;
if (!idle) if (!idle)
idle = default_idle; idle = default_idle;
irq_stat[smp_processor_id()].idle_timestamp = jiffies; irq_stat[smp_processor_id()].idle_timestamp = jiffies;
idle(); idle();
rcu_read_unlock();
} }
schedule(); schedule();
} }
......
...@@ -228,18 +228,26 @@ cpu_idle (void *unused) ...@@ -228,18 +228,26 @@ cpu_idle (void *unused)
/* endless idle loop with no priority at all */ /* endless idle loop with no priority at all */
while (1) { while (1) {
void (*idle)(void) = pm_idle;
if (!idle)
idle = default_idle;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
if (!need_resched()) if (!need_resched())
min_xtp(); min_xtp();
#endif #endif
while (!need_resched()) { while (!need_resched()) {
void (*idle)(void);
if (mark_idle) if (mark_idle)
(*mark_idle)(1); (*mark_idle)(1);
/*
* Mark this as an RCU critical section so that
* synchronize_kernel() in the unload path waits
* for our completion.
*/
rcu_read_lock();
idle = pm_idle;
if (!idle)
idle = default_idle;
(*idle)(); (*idle)();
rcu_read_unlock();
} }
if (mark_idle) if (mark_idle)
......
...@@ -130,11 +130,20 @@ void cpu_idle (void) ...@@ -130,11 +130,20 @@ void cpu_idle (void)
{ {
/* endless idle loop with no priority at all */ /* endless idle loop with no priority at all */
while (1) { while (1) {
void (*idle)(void) = pm_idle; while (!need_resched()) {
void (*idle)(void);
/*
* Mark this as an RCU critical section so that
* synchronize_kernel() in the unload path waits
* for our completion.
*/
rcu_read_lock();
idle = pm_idle;
if (!idle) if (!idle)
idle = default_idle; idle = default_idle;
while (!need_resched())
idle(); idle();
rcu_read_unlock();
}
schedule(); schedule();
} }
} }
......
...@@ -2419,6 +2419,11 @@ acpi_processor_remove ( ...@@ -2419,6 +2419,11 @@ acpi_processor_remove (
/* Unregister the idle handler when processor #0 is removed. */ /* Unregister the idle handler when processor #0 is removed. */
if (pr->id == 0) { if (pr->id == 0) {
pm_idle = pm_idle_save; pm_idle = pm_idle_save;
/*
* We are about to unload the current idle thread pm callback
* (pm_idle), Wait for all processors to update cached/local
* copies of pm_idle before proceeding.
*/
synchronize_kernel(); synchronize_kernel();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment