Commit 15316ba8 authored by Christoph Lameter's avatar Christoph Lameter Committed by Linus Torvalds

[PATCH] add schedule_on_each_cpu()

swap migration's isolate_lru_page() currently uses an IPI to notify other
processors that the lru caches need to be drained if the page cannot be
found on the LRU.  The IPI interrupt may interrupt a processor that is just
processing lru requests and cause a race condition.

This patch introduces a new function run_on_each_cpu() that uses the
keventd() to run the LRU draining on each processor.  Processors disable
preemption when dealing the LRU caches (these are per processor) and thus
executing LRU draining from another process is safe.

Thanks to Lee Schermerhorn <lee.schermerhorn@hp.com> for finding this race
condition.
Signed-off-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 48db57f8
...@@ -65,6 +65,7 @@ extern int FASTCALL(schedule_work(struct work_struct *work)); ...@@ -65,6 +65,7 @@ extern int FASTCALL(schedule_work(struct work_struct *work));
extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay)); extern int FASTCALL(schedule_delayed_work(struct work_struct *work, unsigned long delay));
extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay); extern int schedule_delayed_work_on(int cpu, struct work_struct *work, unsigned long delay);
extern int schedule_on_each_cpu(void (*func)(void *info), void *info);
extern void flush_scheduled_work(void); extern void flush_scheduled_work(void);
extern int current_is_keventd(void); extern int current_is_keventd(void);
extern int keventd_up(void); extern int keventd_up(void);
......
...@@ -419,6 +419,25 @@ int schedule_delayed_work_on(int cpu, ...@@ -419,6 +419,25 @@ int schedule_delayed_work_on(int cpu,
return ret; return ret;
} }
int schedule_on_each_cpu(void (*func) (void *info), void *info)
{
int cpu;
struct work_struct *work;
work = kmalloc(NR_CPUS * sizeof(struct work_struct), GFP_KERNEL);
if (!work)
return -ENOMEM;
for_each_online_cpu(cpu) {
INIT_WORK(work + cpu, func, info);
__queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
work + cpu);
}
flush_workqueue(keventd_wq);
kfree(work);
return 0;
}
void flush_scheduled_work(void) void flush_scheduled_work(void)
{ {
flush_workqueue(keventd_wq); flush_workqueue(keventd_wq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment