Commit 5d2b86d9 authored by Don Zickus's avatar Don Zickus Committed by Ingo Molnar

Revert "x86, reboot: Use NMI instead of REBOOT_VECTOR to stop cpus"

This reverts commit 3603a251.

Originally I wanted a better hammer to shutdown cpus during
panic. However, this really steps on the toes of various
spinlocks in the panic path.  Sometimes it is easier to wait for
the IRQ to become re-enabled to indictate the cpu left the
critical region and then shutdown the cpu.

The next patch moves the NMI addition after the IRQ part.  To
make it easier to see the logic of everything, revert this patch
and apply the next simpler patch.
Signed-off-by: default avatarDon Zickus <dzickus@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1336761675-24296-2-git-send-email-dzickus@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 144d102b
...@@ -29,7 +29,6 @@ ...@@ -29,7 +29,6 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/nmi.h>
/* /*
* Some notes on x86 processor bugs affecting SMP operation: * Some notes on x86 processor bugs affecting SMP operation:
* *
...@@ -149,60 +148,6 @@ void native_send_call_func_ipi(const struct cpumask *mask) ...@@ -149,60 +148,6 @@ void native_send_call_func_ipi(const struct cpumask *mask)
free_cpumask_var(allbutself); free_cpumask_var(allbutself);
} }
static atomic_t stopping_cpu = ATOMIC_INIT(-1);
static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
{
/* We are registered on stopping cpu too, avoid spurious NMI */
if (raw_smp_processor_id() == atomic_read(&stopping_cpu))
return NMI_HANDLED;
stop_this_cpu(NULL);
return NMI_HANDLED;
}
static void native_nmi_stop_other_cpus(int wait)
{
unsigned long flags;
unsigned long timeout;
if (reboot_force)
return;
/*
* Use an own vector here because smp_call_function
* does lots of things not suitable in a panic situation.
*/
if (num_online_cpus() > 1) {
/* did someone beat us here? */
if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1)
return;
if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
NMI_FLAG_FIRST, "smp_stop"))
/* Note: we ignore failures here */
return;
/* sync above data before sending NMI */
wmb();
apic->send_IPI_allbutself(NMI_VECTOR);
/*
* Don't wait longer than a second if the caller
* didn't ask us to wait.
*/
timeout = USEC_PER_SEC;
while (num_online_cpus() > 1 && (wait || timeout--))
udelay(1);
}
local_irq_save(flags);
disable_local_APIC();
local_irq_restore(flags);
}
/* /*
* this function calls the 'stop' function on all other CPUs in the system. * this function calls the 'stop' function on all other CPUs in the system.
*/ */
...@@ -215,7 +160,7 @@ asmlinkage void smp_reboot_interrupt(void) ...@@ -215,7 +160,7 @@ asmlinkage void smp_reboot_interrupt(void)
irq_exit(); irq_exit();
} }
static void native_irq_stop_other_cpus(int wait) static void native_stop_other_cpus(int wait)
{ {
unsigned long flags; unsigned long flags;
unsigned long timeout; unsigned long timeout;
...@@ -298,7 +243,7 @@ struct smp_ops smp_ops = { ...@@ -298,7 +243,7 @@ struct smp_ops smp_ops = {
.smp_prepare_cpus = native_smp_prepare_cpus, .smp_prepare_cpus = native_smp_prepare_cpus,
.smp_cpus_done = native_smp_cpus_done, .smp_cpus_done = native_smp_cpus_done,
.stop_other_cpus = native_nmi_stop_other_cpus, .stop_other_cpus = native_stop_other_cpus,
.smp_send_reschedule = native_smp_send_reschedule, .smp_send_reschedule = native_smp_send_reschedule,
.cpu_up = native_cpu_up, .cpu_up = native_cpu_up,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment