Commit 2335a836 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-reboot-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 reboot changes from Ingo Molnar:
 "The biggest change is a gentler method of rebooting/stopping via IRQs
  first and then via NMIs.  There are several cleanups in the tree as
  well."

* 'x86-reboot-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/reboot: Update nonmi_ipi parameter
  x86/reboot: Use NMI to assist in shutting down if IRQ fails
  Revert "x86, reboot: Use NMI instead of REBOOT_VECTOR to stop cpus"
  x86/reboot: Clean up coding style
  x86/reboot: Reduce to a single DMI table for reboot quirks
parents 44bc40e1 3aac27ab
This diff is collapsed.
......@@ -109,6 +109,9 @@
* about nothing of note with C stepping upwards.
*/
static atomic_t stopping_cpu = ATOMIC_INIT(-1);
static bool smp_no_nmi_ipi = false;
/*
* this function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing
......@@ -149,8 +152,6 @@ void native_send_call_func_ipi(const struct cpumask *mask)
free_cpumask_var(allbutself);
}
static atomic_t stopping_cpu = ATOMIC_INIT(-1);
static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
{
/* We are registered on stopping cpu too, avoid spurious NMI */
......@@ -162,7 +163,19 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
return NMI_HANDLED;
}
static void native_nmi_stop_other_cpus(int wait)
/*
* this function calls the 'stop' function on all other CPUs in the system.
*/
asmlinkage void smp_reboot_interrupt(void)
{
ack_APIC_irq();
irq_enter();
stop_this_cpu(NULL);
irq_exit();
}
static void native_stop_other_cpus(int wait)
{
unsigned long flags;
unsigned long timeout;
......@@ -174,20 +187,25 @@ static void native_nmi_stop_other_cpus(int wait)
* Use an own vector here because smp_call_function
* does lots of things not suitable in a panic situation.
*/
/*
* We start by using the REBOOT_VECTOR irq.
* The irq is treated as a sync point to allow critical
* regions of code on other cpus to release their spin locks
* and re-enable irqs. Jumping straight to an NMI might
* accidentally cause deadlocks with further shutdown/panic
* code. By syncing, we give the cpus up to one second to
* finish their work before we force them off with the NMI.
*/
if (num_online_cpus() > 1) {
/* did someone beat us here? */
if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1)
return;
if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
NMI_FLAG_FIRST, "smp_stop"))
/* Note: we ignore failures here */
return;
/* sync above data before sending NMI */
/* sync above data before sending IRQ */
wmb();
apic->send_IPI_allbutself(NMI_VECTOR);
apic->send_IPI_allbutself(REBOOT_VECTOR);
/*
* Don't wait longer than a second if the caller
......@@ -197,63 +215,37 @@ static void native_nmi_stop_other_cpus(int wait)
while (num_online_cpus() > 1 && (wait || timeout--))
udelay(1);
}
/* if the REBOOT_VECTOR didn't work, try with the NMI */
if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) {
if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
NMI_FLAG_FIRST, "smp_stop"))
/* Note: we ignore failures here */
/* Hope the REBOOT_IRQ is good enough */
goto finish;
local_irq_save(flags);
disable_local_APIC();
local_irq_restore(flags);
}
/*
* this function calls the 'stop' function on all other CPUs in the system.
*/
asmlinkage void smp_reboot_interrupt(void)
{
ack_APIC_irq();
irq_enter();
stop_this_cpu(NULL);
irq_exit();
}
static void native_irq_stop_other_cpus(int wait)
{
unsigned long flags;
unsigned long timeout;
/* sync above data before sending IRQ */
wmb();
if (reboot_force)
return;
pr_emerg("Shutting down cpus with NMI\n");
/*
* Use an own vector here because smp_call_function
* does lots of things not suitable in a panic situation.
* On most systems we could also use an NMI here,
* but there are a few systems around where NMI
* is problematic so stay with an non NMI for now
* (this implies we cannot stop CPUs spinning with irq off
* currently)
*/
if (num_online_cpus() > 1) {
apic->send_IPI_allbutself(REBOOT_VECTOR);
apic->send_IPI_allbutself(NMI_VECTOR);
/*
* Don't wait longer than a second if the caller
* Don't wait longer than a 10 ms if the caller
* didn't ask us to wait.
*/
timeout = USEC_PER_SEC;
timeout = USEC_PER_MSEC * 10;
while (num_online_cpus() > 1 && (wait || timeout--))
udelay(1);
}
finish:
local_irq_save(flags);
disable_local_APIC();
local_irq_restore(flags);
}
static void native_smp_disable_nmi_ipi(void)
{
smp_ops.stop_other_cpus = native_irq_stop_other_cpus;
}
/*
* Reschedule call back.
*/
......@@ -287,8 +279,8 @@ void smp_call_function_single_interrupt(struct pt_regs *regs)
static int __init nonmi_ipi_setup(char *str)
{
native_smp_disable_nmi_ipi();
return 1;
smp_no_nmi_ipi = true;
return 1;
}
__setup("nonmi_ipi", nonmi_ipi_setup);
......@@ -298,7 +290,7 @@ struct smp_ops smp_ops = {
.smp_prepare_cpus = native_smp_prepare_cpus,
.smp_cpus_done = native_smp_cpus_done,
.stop_other_cpus = native_nmi_stop_other_cpus,
.stop_other_cpus = native_stop_other_cpus,
.smp_send_reschedule = native_smp_send_reschedule,
.cpu_up = native_cpu_up,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment