Commit a8dd6484 authored by Andrew Morton's avatar Andrew Morton Committed by David S. Miller

[PATCH] fix preempt-issues with smp_call_function()

Patch from Thomas Schlichter <schlicht@uni-mannheim.de>

Based on a patch from Dave Jones.

It converts a large number of instances of:

	smp_call_function(foo);
	foo();

into

	on_each_cpu(foo);

and in doing so fixes up the preempt-unsafeness of the first version.
parent e8375b8c
...@@ -155,10 +155,7 @@ common_shutdown(int mode, char *restart_cmd) ...@@ -155,10 +155,7 @@ common_shutdown(int mode, char *restart_cmd)
struct halt_info args; struct halt_info args;
args.mode = mode; args.mode = mode;
args.restart_cmd = restart_cmd; args.restart_cmd = restart_cmd;
#ifdef CONFIG_SMP on_each_cpu(common_shutdown_1, &args, 1, 0);
smp_call_function(common_shutdown_1, &args, 1, 0);
#endif
common_shutdown_1(&args);
} }
void void
......
...@@ -899,10 +899,8 @@ void ...@@ -899,10 +899,8 @@ void
smp_imb(void) smp_imb(void)
{ {
/* Must wait other processors to flush their icache before continue. */ /* Must wait other processors to flush their icache before continue. */
if (smp_call_function(ipi_imb, NULL, 1, 1)) if (on_each_cpu(ipi_imb, NULL, 1, 1))
printk(KERN_CRIT "smp_imb: timed out\n"); printk(KERN_CRIT "smp_imb: timed out\n");
imb();
} }
static void static void
...@@ -916,11 +914,9 @@ flush_tlb_all(void) ...@@ -916,11 +914,9 @@ flush_tlb_all(void)
{ {
/* Although we don't have any data to pass, we do want to /* Although we don't have any data to pass, we do want to
synchronize with the other processors. */ synchronize with the other processors. */
if (smp_call_function(ipi_flush_tlb_all, NULL, 1, 1)) { if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
printk(KERN_CRIT "flush_tlb_all: timed out\n"); printk(KERN_CRIT "flush_tlb_all: timed out\n");
} }
tbia();
} }
#define asn_locked() (cpu_data[smp_processor_id()].asn_lock) #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
...@@ -938,6 +934,8 @@ ipi_flush_tlb_mm(void *x) ...@@ -938,6 +934,8 @@ ipi_flush_tlb_mm(void *x)
void void
flush_tlb_mm(struct mm_struct *mm) flush_tlb_mm(struct mm_struct *mm)
{ {
preempt_disable();
if (mm == current->active_mm) { if (mm == current->active_mm) {
flush_tlb_current(mm); flush_tlb_current(mm);
if (atomic_read(&mm->mm_users) <= 1) { if (atomic_read(&mm->mm_users) <= 1) {
...@@ -948,6 +946,7 @@ flush_tlb_mm(struct mm_struct *mm) ...@@ -948,6 +946,7 @@ flush_tlb_mm(struct mm_struct *mm)
if (mm->context[cpu]) if (mm->context[cpu])
mm->context[cpu] = 0; mm->context[cpu] = 0;
} }
preempt_enable();
return; return;
} }
} }
...@@ -955,6 +954,8 @@ flush_tlb_mm(struct mm_struct *mm) ...@@ -955,6 +954,8 @@ flush_tlb_mm(struct mm_struct *mm)
if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) { if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
printk(KERN_CRIT "flush_tlb_mm: timed out\n"); printk(KERN_CRIT "flush_tlb_mm: timed out\n");
} }
preempt_enable();
} }
struct flush_tlb_page_struct { struct flush_tlb_page_struct {
...@@ -981,6 +982,8 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) ...@@ -981,6 +982,8 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
struct flush_tlb_page_struct data; struct flush_tlb_page_struct data;
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
preempt_disable();
if (mm == current->active_mm) { if (mm == current->active_mm) {
flush_tlb_current_page(mm, vma, addr); flush_tlb_current_page(mm, vma, addr);
if (atomic_read(&mm->mm_users) <= 1) { if (atomic_read(&mm->mm_users) <= 1) {
...@@ -991,6 +994,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) ...@@ -991,6 +994,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
if (mm->context[cpu]) if (mm->context[cpu])
mm->context[cpu] = 0; mm->context[cpu] = 0;
} }
preempt_enable();
return; return;
} }
} }
...@@ -1002,6 +1006,8 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) ...@@ -1002,6 +1006,8 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) { if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
printk(KERN_CRIT "flush_tlb_page: timed out\n"); printk(KERN_CRIT "flush_tlb_page: timed out\n");
} }
preempt_enable();
} }
void void
...@@ -1030,6 +1036,8 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page, ...@@ -1030,6 +1036,8 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
if ((vma->vm_flags & VM_EXEC) == 0) if ((vma->vm_flags & VM_EXEC) == 0)
return; return;
preempt_disable();
if (mm == current->active_mm) { if (mm == current->active_mm) {
__load_new_mm_context(mm); __load_new_mm_context(mm);
if (atomic_read(&mm->mm_users) <= 1) { if (atomic_read(&mm->mm_users) <= 1) {
...@@ -1040,6 +1048,7 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page, ...@@ -1040,6 +1048,7 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
if (mm->context[cpu]) if (mm->context[cpu])
mm->context[cpu] = 0; mm->context[cpu] = 0;
} }
preempt_enable();
return; return;
} }
} }
...@@ -1047,6 +1056,8 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page, ...@@ -1047,6 +1056,8 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) { if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
printk(KERN_CRIT "flush_icache_page: timed out\n"); printk(KERN_CRIT "flush_icache_page: timed out\n");
} }
preempt_enable();
} }
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
......
...@@ -1376,8 +1376,7 @@ void /*__init*/ print_local_APIC(void * dummy) ...@@ -1376,8 +1376,7 @@ void /*__init*/ print_local_APIC(void * dummy)
void print_all_local_APICs (void) void print_all_local_APICs (void)
{ {
smp_call_function(print_local_APIC, NULL, 1, 1); on_each_cpu(print_local_APIC, NULL, 1, 1);
print_local_APIC(NULL);
} }
void /*__init*/ print_PIC(void) void /*__init*/ print_PIC(void)
...@@ -1843,8 +1842,7 @@ static void setup_nmi (void) ...@@ -1843,8 +1842,7 @@ static void setup_nmi (void)
*/ */
printk(KERN_INFO "activating NMI Watchdog ..."); printk(KERN_INFO "activating NMI Watchdog ...");
smp_call_function(enable_NMI_through_LVT0, NULL, 1, 1); on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1);
enable_NMI_through_LVT0(NULL);
printk(" done.\n"); printk(" done.\n");
} }
......
...@@ -55,12 +55,14 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) ...@@ -55,12 +55,14 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
wmb(); wmb();
if (reload) { if (reload) {
load_LDT(pc);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
preempt_disable(); preempt_disable();
load_LDT(pc);
if (current->mm->cpu_vm_mask != (1 << smp_processor_id())) if (current->mm->cpu_vm_mask != (1 << smp_processor_id()))
smp_call_function(flush_ldt, 0, 1, 1); smp_call_function(flush_ldt, 0, 1, 1);
preempt_enable(); preempt_enable();
#else
load_LDT(pc);
#endif #endif
} }
if (oldsize) { if (oldsize) {
......
...@@ -183,11 +183,10 @@ static int do_microcode_update(void) ...@@ -183,11 +183,10 @@ static int do_microcode_update(void)
int i, error = 0, err; int i, error = 0, err;
struct microcode *m; struct microcode *m;
if (smp_call_function(do_update_one, NULL, 1, 1) != 0) { if (on_each_cpu(do_update_one, NULL, 1, 1) != 0) {
printk(KERN_ERR "microcode: IPI timeout, giving up\n"); printk(KERN_ERR "microcode: IPI timeout, giving up\n");
return -EIO; return -EIO;
} }
do_update_one(NULL);
for (i=0; i<NR_CPUS; i++) { for (i=0; i<NR_CPUS; i++) {
err = update_req[i].err; err = update_req[i].err;
......
...@@ -436,7 +436,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) ...@@ -436,7 +436,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
preempt_enable(); preempt_enable();
} }
static inline void do_flush_tlb_all_local(void) static void do_flush_tlb_all(void* info)
{ {
unsigned long cpu = smp_processor_id(); unsigned long cpu = smp_processor_id();
...@@ -445,18 +445,9 @@ static inline void do_flush_tlb_all_local(void) ...@@ -445,18 +445,9 @@ static inline void do_flush_tlb_all_local(void)
leave_mm(cpu); leave_mm(cpu);
} }
static void flush_tlb_all_ipi(void* info)
{
do_flush_tlb_all_local();
}
void flush_tlb_all(void) void flush_tlb_all(void)
{ {
preempt_disable(); on_each_cpu(do_flush_tlb_all, 0, 1, 1);
smp_call_function (flush_tlb_all_ipi,0,1,1);
do_flush_tlb_all_local();
preempt_enable();
} }
/* /*
......
...@@ -95,8 +95,7 @@ static int __init sysenter_setup(void) ...@@ -95,8 +95,7 @@ static int __init sysenter_setup(void)
return 0; return 0;
memcpy((void *) page, sysent, sizeof(sysent)); memcpy((void *) page, sysent, sizeof(sysent));
enable_sep_cpu(NULL); on_each_cpu(enable_sep_cpu, NULL, 1, 1);
smp_call_function(enable_sep_cpu, NULL, 1, 1);
return 0; return 0;
} }
......
...@@ -1209,8 +1209,8 @@ smp_vic_cpi_interrupt(struct pt_regs regs) ...@@ -1209,8 +1209,8 @@ smp_vic_cpi_interrupt(struct pt_regs regs)
smp_call_function_interrupt(); smp_call_function_interrupt();
} }
static inline void static void
do_flush_tlb_all_local(void) do_flush_tlb_all(void* info)
{ {
unsigned long cpu = smp_processor_id(); unsigned long cpu = smp_processor_id();
...@@ -1220,20 +1220,11 @@ do_flush_tlb_all_local(void) ...@@ -1220,20 +1220,11 @@ do_flush_tlb_all_local(void)
} }
static void
flush_tlb_all_function(void* info)
{
do_flush_tlb_all_local();
}
/* flush the TLB of every active CPU in the system */ /* flush the TLB of every active CPU in the system */
void void
flush_tlb_all(void) flush_tlb_all(void)
{ {
preempt_disable(); on_each_cpu(do_flush_tlb_all, 0, 1, 1);
smp_call_function (flush_tlb_all_function, 0, 1, 1);
do_flush_tlb_all_local();
preempt_enable();
} }
/* used to set up the trampoline for other CPUs when the memory manager /* used to set up the trampoline for other CPUs when the memory manager
......
...@@ -130,11 +130,8 @@ __change_page_attr(struct page *page, pgprot_t prot, struct page **oldpage) ...@@ -130,11 +130,8 @@ __change_page_attr(struct page *page, pgprot_t prot, struct page **oldpage)
} }
static inline void flush_map(void) static inline void flush_map(void)
{ {
#ifdef CONFIG_SMP on_each_cpu(flush_kernel_map, NULL, 1, 1);
smp_call_function(flush_kernel_map, NULL, 1, 1);
#endif
flush_kernel_map(NULL);
} }
struct deferred_page { struct deferred_page {
......
...@@ -95,8 +95,7 @@ static int nmi_setup(void) ...@@ -95,8 +95,7 @@ static int nmi_setup(void)
* without actually triggering any NMIs as this will * without actually triggering any NMIs as this will
* break the core code horrifically. * break the core code horrifically.
*/ */
smp_call_function(nmi_cpu_setup, NULL, 0, 1); on_each_cpu(nmi_cpu_setup, NULL, 0, 1);
nmi_cpu_setup(0);
set_nmi_callback(nmi_callback); set_nmi_callback(nmi_callback);
oprofile_pmdev = set_nmi_pm_callback(oprofile_pm_callback); oprofile_pmdev = set_nmi_pm_callback(oprofile_pm_callback);
return 0; return 0;
...@@ -148,8 +147,7 @@ static void nmi_shutdown(void) ...@@ -148,8 +147,7 @@ static void nmi_shutdown(void)
{ {
unset_nmi_pm_callback(oprofile_pmdev); unset_nmi_pm_callback(oprofile_pmdev);
unset_nmi_callback(); unset_nmi_callback();
smp_call_function(nmi_cpu_shutdown, NULL, 0, 1); on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1);
nmi_cpu_shutdown(0);
} }
...@@ -162,8 +160,7 @@ static void nmi_cpu_start(void * dummy) ...@@ -162,8 +160,7 @@ static void nmi_cpu_start(void * dummy)
static int nmi_start(void) static int nmi_start(void)
{ {
smp_call_function(nmi_cpu_start, NULL, 0, 1); on_each_cpu(nmi_cpu_start, NULL, 0, 1);
nmi_cpu_start(0);
return 0; return 0;
} }
...@@ -177,8 +174,7 @@ static void nmi_cpu_stop(void * dummy) ...@@ -177,8 +174,7 @@ static void nmi_cpu_stop(void * dummy)
static void nmi_stop(void) static void nmi_stop(void)
{ {
smp_call_function(nmi_cpu_stop, NULL, 0, 1); on_each_cpu(nmi_cpu_stop, NULL, 0, 1);
nmi_cpu_stop(0);
} }
......
...@@ -206,18 +206,18 @@ smp_send_reschedule_all (void) ...@@ -206,18 +206,18 @@ smp_send_reschedule_all (void)
void void
smp_flush_tlb_all (void) smp_flush_tlb_all (void)
{ {
smp_call_function((void (*)(void *))local_flush_tlb_all, 0, 1, 1); on_each_cpu((void (*)(void *))local_flush_tlb_all, 0, 1, 1);
local_flush_tlb_all();
} }
void void
smp_flush_tlb_mm (struct mm_struct *mm) smp_flush_tlb_mm (struct mm_struct *mm)
{ {
local_finish_flush_tlb_mm(mm);
/* this happens for the common case of a single-threaded fork(): */ /* this happens for the common case of a single-threaded fork(): */
if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1)) if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
{
local_finish_flush_tlb_mm(mm);
return; return;
}
/* /*
* We could optimize this further by using mm->cpu_vm_mask to track which CPUs * We could optimize this further by using mm->cpu_vm_mask to track which CPUs
...@@ -226,7 +226,7 @@ smp_flush_tlb_mm (struct mm_struct *mm) ...@@ -226,7 +226,7 @@ smp_flush_tlb_mm (struct mm_struct *mm)
* anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
* rather trivial. * rather trivial.
*/ */
smp_call_function((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1); on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1);
} }
/* /*
......
...@@ -195,8 +195,7 @@ static void flush_tlb_all_ipi(void *info) ...@@ -195,8 +195,7 @@ static void flush_tlb_all_ipi(void *info)
void flush_tlb_all(void) void flush_tlb_all(void)
{ {
smp_call_function(flush_tlb_all_ipi, 0, 1, 1); on_each_cpu(flush_tlb_all_ipi, 0, 1, 1);
_flush_tlb_all();
} }
static void flush_tlb_mm_ipi(void *mm) static void flush_tlb_mm_ipi(void *mm)
...@@ -219,6 +218,8 @@ static void flush_tlb_mm_ipi(void *mm) ...@@ -219,6 +218,8 @@ static void flush_tlb_mm_ipi(void *mm)
void flush_tlb_mm(struct mm_struct *mm) void flush_tlb_mm(struct mm_struct *mm)
{ {
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1); smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
} else { } else {
...@@ -228,6 +229,8 @@ void flush_tlb_mm(struct mm_struct *mm) ...@@ -228,6 +229,8 @@ void flush_tlb_mm(struct mm_struct *mm)
CPU_CONTEXT(i, mm) = 0; CPU_CONTEXT(i, mm) = 0;
} }
_flush_tlb_mm(mm); _flush_tlb_mm(mm);
preempt_enable();
} }
struct flush_tlb_data { struct flush_tlb_data {
...@@ -246,6 +249,8 @@ static void flush_tlb_range_ipi(void *info) ...@@ -246,6 +249,8 @@ static void flush_tlb_range_ipi(void *info)
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{ {
preempt_disable();
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
struct flush_tlb_data fd; struct flush_tlb_data fd;
...@@ -260,6 +265,8 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l ...@@ -260,6 +265,8 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
CPU_CONTEXT(i, mm) = 0; CPU_CONTEXT(i, mm) = 0;
} }
_flush_tlb_range(mm, start, end); _flush_tlb_range(mm, start, end);
preempt_enable();
} }
static void flush_tlb_page_ipi(void *info) static void flush_tlb_page_ipi(void *info)
...@@ -271,6 +278,8 @@ static void flush_tlb_page_ipi(void *info) ...@@ -271,6 +278,8 @@ static void flush_tlb_page_ipi(void *info)
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{ {
preempt_disable();
if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) { if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
struct flush_tlb_data fd; struct flush_tlb_data fd;
...@@ -284,5 +293,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) ...@@ -284,5 +293,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
CPU_CONTEXT(i, vma->vm_mm) = 0; CPU_CONTEXT(i, vma->vm_mm) = 0;
} }
_flush_tlb_page(vma, page); _flush_tlb_page(vma, page);
preempt_enable();
} }
...@@ -39,8 +39,7 @@ static struct pdc_btlb_info btlb_info; ...@@ -39,8 +39,7 @@ static struct pdc_btlb_info btlb_info;
void void
flush_data_cache(void) flush_data_cache(void)
{ {
smp_call_function((void (*)(void *))flush_data_cache_local, NULL, 1, 1); on_each_cpu((void (*)(void *))flush_data_cache_local, NULL, 1, 1);
flush_data_cache_local();
} }
#endif #endif
......
...@@ -61,20 +61,17 @@ static volatile unsigned long cpu_eiem = 0; ...@@ -61,20 +61,17 @@ static volatile unsigned long cpu_eiem = 0;
static spinlock_t irq_lock = SPIN_LOCK_UNLOCKED; /* protect IRQ regions */ static spinlock_t irq_lock = SPIN_LOCK_UNLOCKED; /* protect IRQ regions */
#ifdef CONFIG_SMP
static void cpu_set_eiem(void *info) static void cpu_set_eiem(void *info)
{ {
set_eiem((unsigned long) info); set_eiem((unsigned long) info);
} }
#endif
static inline void disable_cpu_irq(void *unused, int irq) static inline void disable_cpu_irq(void *unused, int irq)
{ {
unsigned long eirr_bit = EIEM_MASK(irq); unsigned long eirr_bit = EIEM_MASK(irq);
cpu_eiem &= ~eirr_bit; cpu_eiem &= ~eirr_bit;
set_eiem(cpu_eiem); on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
} }
static void enable_cpu_irq(void *unused, int irq) static void enable_cpu_irq(void *unused, int irq)
...@@ -83,8 +80,7 @@ static void enable_cpu_irq(void *unused, int irq) ...@@ -83,8 +80,7 @@ static void enable_cpu_irq(void *unused, int irq)
mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */ mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */
cpu_eiem |= eirr_bit; cpu_eiem |= eirr_bit;
smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1); on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
set_eiem(cpu_eiem);
} }
/* mask and disable are the same at the CPU level /* mask and disable are the same at the CPU level
...@@ -100,8 +96,7 @@ static inline void unmask_cpu_irq(void *unused, int irq) ...@@ -100,8 +96,7 @@ static inline void unmask_cpu_irq(void *unused, int irq)
** handle *any* unmasked pending interrupts. ** handle *any* unmasked pending interrupts.
** ie We don't need to check for pending interrupts here. ** ie We don't need to check for pending interrupts here.
*/ */
smp_call_function(cpu_set_eiem, (void *) cpu_eiem, 1, 1); on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
set_eiem(cpu_eiem);
} }
/* /*
......
...@@ -401,7 +401,7 @@ static int __init maxcpus(char *str) ...@@ -401,7 +401,7 @@ static int __init maxcpus(char *str)
__setup("maxcpus=", maxcpus); __setup("maxcpus=", maxcpus);
/* /*
* Flush all other CPU's tlb and then mine. Do this with smp_call_function() * Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
* as we want to ensure all TLB's flushed before proceeding. * as we want to ensure all TLB's flushed before proceeding.
*/ */
...@@ -410,8 +410,7 @@ extern void flush_tlb_all_local(void); ...@@ -410,8 +410,7 @@ extern void flush_tlb_all_local(void);
void void
smp_flush_tlb_all(void) smp_flush_tlb_all(void)
{ {
smp_call_function((void (*)(void *))flush_tlb_all_local, NULL, 1, 1); on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
flush_tlb_all_local();
} }
......
...@@ -974,8 +974,7 @@ void flush_tlb_all(void) ...@@ -974,8 +974,7 @@ void flush_tlb_all(void)
do_recycle++; do_recycle++;
} }
spin_unlock(&sid_lock); spin_unlock(&sid_lock);
smp_call_function((void (*)(void *))flush_tlb_all_local, NULL, 1, 1); on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
flush_tlb_all_local();
if (do_recycle) { if (do_recycle) {
spin_lock(&sid_lock); spin_lock(&sid_lock);
recycle_sids(recycle_ndirty,recycle_dirty_array); recycle_sids(recycle_ndirty,recycle_dirty_array);
......
...@@ -194,10 +194,7 @@ static void tau_timeout_smp(unsigned long unused) ...@@ -194,10 +194,7 @@ static void tau_timeout_smp(unsigned long unused)
/* schedule ourselves to be run again */ /* schedule ourselves to be run again */
mod_timer(&tau_timer, jiffies + shrink_timer) ; mod_timer(&tau_timer, jiffies + shrink_timer) ;
#ifdef CONFIG_SMP on_each_cpu(tau_timeout, NULL, 1, 0);
smp_call_function(tau_timeout, NULL, 1, 0);
#endif
tau_timeout(NULL);
} }
/* /*
...@@ -239,10 +236,7 @@ int __init TAU_init(void) ...@@ -239,10 +236,7 @@ int __init TAU_init(void)
tau_timer.expires = jiffies + shrink_timer; tau_timer.expires = jiffies + shrink_timer;
add_timer(&tau_timer); add_timer(&tau_timer);
#ifdef CONFIG_SMP on_each_cpu(TAU_init_smp, NULL, 1, 0);
smp_call_function(TAU_init_smp, NULL, 1, 0);
#endif
TAU_init_smp(NULL);
printk("Thermal assist unit "); printk("Thermal assist unit ");
#ifdef CONFIG_TAU_INT #ifdef CONFIG_TAU_INT
......
...@@ -228,8 +228,7 @@ static void do_machine_restart(void * __unused) ...@@ -228,8 +228,7 @@ static void do_machine_restart(void * __unused)
void machine_restart_smp(char * __unused) void machine_restart_smp(char * __unused)
{ {
cpu_restart_map = cpu_online_map; cpu_restart_map = cpu_online_map;
smp_call_function(do_machine_restart, NULL, 0, 0); on_each_cpu(do_machine_restart, NULL, 0, 0);
do_machine_restart(NULL);
} }
static void do_machine_halt(void * __unused) static void do_machine_halt(void * __unused)
...@@ -247,8 +246,7 @@ static void do_machine_halt(void * __unused) ...@@ -247,8 +246,7 @@ static void do_machine_halt(void * __unused)
void machine_halt_smp(void) void machine_halt_smp(void)
{ {
smp_call_function(do_machine_halt, NULL, 0, 0); on_each_cpu(do_machine_halt, NULL, 0, 0);
do_machine_halt(NULL);
} }
static void do_machine_power_off(void * __unused) static void do_machine_power_off(void * __unused)
...@@ -266,8 +264,7 @@ static void do_machine_power_off(void * __unused) ...@@ -266,8 +264,7 @@ static void do_machine_power_off(void * __unused)
void machine_power_off_smp(void) void machine_power_off_smp(void)
{ {
smp_call_function(do_machine_power_off, NULL, 0, 0); on_each_cpu(do_machine_power_off, NULL, 0, 0);
do_machine_power_off(NULL);
} }
/* /*
...@@ -339,8 +336,7 @@ void smp_ptlb_callback(void *info) ...@@ -339,8 +336,7 @@ void smp_ptlb_callback(void *info)
void smp_ptlb_all(void) void smp_ptlb_all(void)
{ {
smp_call_function(smp_ptlb_callback, NULL, 0, 1); on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
local_flush_tlb();
} }
/* /*
...@@ -400,8 +396,10 @@ void smp_ctl_set_bit(int cr, int bit) { ...@@ -400,8 +396,10 @@ void smp_ctl_set_bit(int cr, int bit) {
parms.end_ctl = cr; parms.end_ctl = cr;
parms.orvals[cr] = 1 << bit; parms.orvals[cr] = 1 << bit;
parms.andvals[cr] = 0xFFFFFFFF; parms.andvals[cr] = 0xFFFFFFFF;
preempt_disable();
smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
__ctl_set_bit(cr, bit); __ctl_set_bit(cr, bit);
preempt_enable();
} }
/* /*
...@@ -414,8 +412,10 @@ void smp_ctl_clear_bit(int cr, int bit) { ...@@ -414,8 +412,10 @@ void smp_ctl_clear_bit(int cr, int bit) {
parms.end_ctl = cr; parms.end_ctl = cr;
parms.orvals[cr] = 0x00000000; parms.orvals[cr] = 0x00000000;
parms.andvals[cr] = ~(1 << bit); parms.andvals[cr] = ~(1 << bit);
preempt_disable();
smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
__ctl_clear_bit(cr, bit); __ctl_clear_bit(cr, bit);
preempt_enable();
} }
/* /*
......
...@@ -227,8 +227,7 @@ static void do_machine_restart(void * __unused) ...@@ -227,8 +227,7 @@ static void do_machine_restart(void * __unused)
void machine_restart_smp(char * __unused) void machine_restart_smp(char * __unused)
{ {
cpu_restart_map = cpu_online_map; cpu_restart_map = cpu_online_map;
smp_call_function(do_machine_restart, NULL, 0, 0); on_each_cpu(do_machine_restart, NULL, 0, 0);
do_machine_restart(NULL);
} }
static void do_machine_halt(void * __unused) static void do_machine_halt(void * __unused)
...@@ -246,8 +245,7 @@ static void do_machine_halt(void * __unused) ...@@ -246,8 +245,7 @@ static void do_machine_halt(void * __unused)
void machine_halt_smp(void) void machine_halt_smp(void)
{ {
smp_call_function(do_machine_halt, NULL, 0, 0); on_each_cpu(do_machine_halt, NULL, 0, 0);
do_machine_halt(NULL);
} }
static void do_machine_power_off(void * __unused) static void do_machine_power_off(void * __unused)
...@@ -265,8 +263,7 @@ static void do_machine_power_off(void * __unused) ...@@ -265,8 +263,7 @@ static void do_machine_power_off(void * __unused)
void machine_power_off_smp(void) void machine_power_off_smp(void)
{ {
smp_call_function(do_machine_power_off, NULL, 0, 0); on_each_cpu(do_machine_power_off, NULL, 0, 0);
do_machine_power_off(NULL);
} }
/* /*
...@@ -383,8 +380,10 @@ void smp_ctl_set_bit(int cr, int bit) { ...@@ -383,8 +380,10 @@ void smp_ctl_set_bit(int cr, int bit) {
parms.end_ctl = cr; parms.end_ctl = cr;
parms.orvals[cr] = 1 << bit; parms.orvals[cr] = 1 << bit;
parms.andvals[cr] = -1L; parms.andvals[cr] = -1L;
preempt_disable();
smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
__ctl_set_bit(cr, bit); __ctl_set_bit(cr, bit);
preempt_enable();
} }
/* /*
...@@ -397,8 +396,10 @@ void smp_ctl_clear_bit(int cr, int bit) { ...@@ -397,8 +396,10 @@ void smp_ctl_clear_bit(int cr, int bit) {
parms.end_ctl = cr; parms.end_ctl = cr;
parms.orvals[cr] = 0; parms.orvals[cr] = 0;
parms.andvals[cr] = ~(1L << bit); parms.andvals[cr] = ~(1L << bit);
preempt_disable();
smp_call_function(smp_ctl_bit_callback, &parms, 0, 1); smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
__ctl_clear_bit(cr, bit); __ctl_clear_bit(cr, bit);
preempt_enable();
} }
......
...@@ -111,11 +111,7 @@ static void mce_checkregs (void *info) ...@@ -111,11 +111,7 @@ static void mce_checkregs (void *info)
{ {
u32 low, high; u32 low, high;
int i; int i;
unsigned int *cpu = info;
BUG_ON (*cpu != smp_processor_id());
preempt_disable();
for (i=0; i<banks; i++) { for (i=0; i<banks; i++) {
rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high); rdmsr(MSR_IA32_MC0_STATUS+i*4, low, high);
...@@ -130,20 +126,12 @@ static void mce_checkregs (void *info) ...@@ -130,20 +126,12 @@ static void mce_checkregs (void *info)
wmb(); wmb();
} }
} }
preempt_enable();
} }
static void mce_timerfunc (unsigned long data) static void mce_timerfunc (unsigned long data)
{ {
unsigned int i; on_each_cpu (mce_checkregs, NULL, 1, 1);
for (i=0; i<smp_num_cpus; i++) {
if (i == smp_processor_id())
mce_checkregs(&i);
else
smp_call_function (mce_checkregs, &i, 1, 1);
}
/* Refresh the timer. */ /* Refresh the timer. */
mce_timer.expires = jiffies + MCE_RATE; mce_timer.expires = jiffies + MCE_RATE;
......
...@@ -928,8 +928,7 @@ void /*__init*/ print_local_APIC(void * dummy) ...@@ -928,8 +928,7 @@ void /*__init*/ print_local_APIC(void * dummy)
void print_all_local_APICs (void) void print_all_local_APICs (void)
{ {
smp_call_function(print_local_APIC, NULL, 1, 1); on_each_cpu(print_local_APIC, NULL, 1, 1);
print_local_APIC(NULL);
} }
void /*__init*/ print_PIC(void) void /*__init*/ print_PIC(void)
......
...@@ -60,12 +60,14 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) ...@@ -60,12 +60,14 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
pc->size = mincount; pc->size = mincount;
wmb(); wmb();
if (reload) { if (reload) {
load_LDT(pc);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
preempt_disable(); preempt_disable();
load_LDT(pc);
if (current->mm->cpu_vm_mask != (1<<smp_processor_id())) if (current->mm->cpu_vm_mask != (1<<smp_processor_id()))
smp_call_function(flush_ldt, 0, 1, 1); smp_call_function(flush_ldt, 0, 1, 1);
preempt_enable(); preempt_enable();
#else
load_LDT(pc);
#endif #endif
} }
if (oldsize) { if (oldsize) {
......
...@@ -328,7 +328,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) ...@@ -328,7 +328,7 @@ void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
preempt_enable(); preempt_enable();
} }
static inline void do_flush_tlb_all_local(void) static void do_flush_tlb_all(void* info)
{ {
unsigned long cpu = smp_processor_id(); unsigned long cpu = smp_processor_id();
...@@ -337,18 +337,9 @@ static inline void do_flush_tlb_all_local(void) ...@@ -337,18 +337,9 @@ static inline void do_flush_tlb_all_local(void)
leave_mm(cpu); leave_mm(cpu);
} }
static void flush_tlb_all_ipi(void* info)
{
do_flush_tlb_all_local();
}
void flush_tlb_all(void) void flush_tlb_all(void)
{ {
preempt_disable(); on_each_cpu(do_flush_tlb_all, 0, 1, 1);
smp_call_function (flush_tlb_all_ipi,0,1,1);
do_flush_tlb_all_local();
preempt_enable();
} }
void smp_kdb_stop(void) void smp_kdb_stop(void)
......
...@@ -123,12 +123,7 @@ __change_page_attr(unsigned long address, struct page *page, pgprot_t prot, ...@@ -123,12 +123,7 @@ __change_page_attr(unsigned long address, struct page *page, pgprot_t prot,
static inline void flush_map(unsigned long address) static inline void flush_map(unsigned long address)
{ {
preempt_disable(); on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
#ifdef CONFIG_SMP
smp_call_function(flush_kernel_map, (void *)address, 1, 1);
#endif
flush_kernel_map((void *)address);
preempt_enable();
} }
struct deferred_page { struct deferred_page {
......
...@@ -42,9 +42,8 @@ static void ipi_handler(void *null) ...@@ -42,9 +42,8 @@ static void ipi_handler(void *null)
static void __attribute__((unused)) global_cache_flush(void) static void __attribute__((unused)) global_cache_flush(void)
{ {
if (smp_call_function(ipi_handler, NULL, 1, 1) != 0) if (on_each_cpu(ipi_handler, NULL, 1, 1) != 0)
panic(PFX "timed out waiting for the other CPUs!\n"); panic(PFX "timed out waiting for the other CPUs!\n");
flush_agp_cache();
} }
#else #else
static inline void global_cache_flush(void) static inline void global_cache_flush(void)
......
...@@ -481,8 +481,7 @@ static void ...@@ -481,8 +481,7 @@ static void
do_machine_quiesce(void) do_machine_quiesce(void)
{ {
cpu_quiesce_map = cpu_online_map; cpu_quiesce_map = cpu_online_map;
smp_call_function(do_load_quiesce_psw, NULL, 0, 0); on_each_cpu(do_load_quiesce_psw, NULL, 0, 0);
do_load_quiesce_psw(NULL);
} }
#else #else
static void static void
......
...@@ -1403,10 +1403,7 @@ static void invalidate_bh_lru(void *arg) ...@@ -1403,10 +1403,7 @@ static void invalidate_bh_lru(void *arg)
static void invalidate_bh_lrus(void) static void invalidate_bh_lrus(void)
{ {
preempt_disable(); on_each_cpu(invalidate_bh_lru, NULL, 1, 1);
invalidate_bh_lru(NULL);
smp_call_function(invalidate_bh_lru, NULL, 1, 1);
preempt_enable();
} }
void set_bh_page(struct buffer_head *bh, void set_bh_page(struct buffer_head *bh,
......
...@@ -25,16 +25,10 @@ flush_page_to_ram(struct page *page) ...@@ -25,16 +25,10 @@ flush_page_to_ram(struct page *page)
extern void flush_cache_all_local(void); extern void flush_cache_all_local(void);
#ifdef CONFIG_SMP
static inline void flush_cache_all(void) static inline void flush_cache_all(void)
{ {
smp_call_function((void (*)(void *))flush_cache_all_local, NULL, 1, 1); on_each_cpu((void (*)(void *))flush_cache_all_local, NULL, 1, 1);
flush_cache_all_local();
} }
#else
#define flush_cache_all flush_cache_all_local
#endif
/* The following value needs to be tuned and probably scaled with the /* The following value needs to be tuned and probably scaled with the
* cache size. * cache size.
......
...@@ -10,9 +10,10 @@ ...@@ -10,9 +10,10 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#include <linux/preempt.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/threads.h> #include <linux/thread_info.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/bug.h> #include <asm/bug.h>
...@@ -53,6 +54,21 @@ extern void smp_cpus_done(unsigned int max_cpus); ...@@ -53,6 +54,21 @@ extern void smp_cpus_done(unsigned int max_cpus);
extern int smp_call_function (void (*func) (void *info), void *info, extern int smp_call_function (void (*func) (void *info), void *info,
int retry, int wait); int retry, int wait);
/*
* Call a function on all processors
*/
static inline int on_each_cpu(void (*func) (void *info), void *info,
int retry, int wait)
{
int ret = 0;
preempt_disable();
ret = smp_call_function(func, info, retry, wait);
func(info);
preempt_enable();
return ret;
}
/* /*
* True once the per process idle is forked * True once the per process idle is forked
*/ */
...@@ -96,6 +112,7 @@ void smp_prepare_boot_cpu(void); ...@@ -96,6 +112,7 @@ void smp_prepare_boot_cpu(void);
#define hard_smp_processor_id() 0 #define hard_smp_processor_id() 0
#define smp_threads_ready 1 #define smp_threads_ready 1
#define smp_call_function(func,info,retry,wait) ({ 0; }) #define smp_call_function(func,info,retry,wait) ({ 0; })
#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; })
static inline void smp_send_reschedule(int cpu) { } static inline void smp_send_reschedule(int cpu) { }
static inline void smp_send_reschedule_all(void) { } static inline void smp_send_reschedule_all(void) { }
#define cpu_online_map 1 #define cpu_online_map 1
......
...@@ -1116,12 +1116,16 @@ static inline void check_spinlock_acquired(kmem_cache_t *cachep) ...@@ -1116,12 +1116,16 @@ static inline void check_spinlock_acquired(kmem_cache_t *cachep)
static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg) static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
{ {
check_irq_on(); check_irq_on();
preempt_disable();
local_irq_disable(); local_irq_disable();
func(arg); func(arg);
local_irq_enable(); local_irq_enable();
if (smp_call_function(func, arg, 1, 1)) if (smp_call_function(func, arg, 1, 1))
BUG(); BUG();
preempt_enable();
} }
static void free_block (kmem_cache_t* cachep, void** objpp, int len); static void free_block (kmem_cache_t* cachep, void** objpp, int len);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment