Commit 1e740163 authored by travis@sgi.com's avatar travis@sgi.com Committed by Ingo Molnar

x86/platform/UV: Clean up the NMI code to match current coding style

Update UV NMI to current coding style.
Signed-off-by: default avatarMike Travis <travis@sgi.com>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russ Anderson <rja@hpe.com>
Link: http://lkml.kernel.org/r/20170125163518.419094259@asylum.americas.sgi.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 9ec808a0
...@@ -45,8 +45,8 @@ ...@@ -45,8 +45,8 @@
* *
* Handle system-wide NMI events generated by the global 'power nmi' command. * Handle system-wide NMI events generated by the global 'power nmi' command.
* *
* Basic operation is to field the NMI interrupt on each cpu and wait * Basic operation is to field the NMI interrupt on each CPU and wait
* until all cpus have arrived into the nmi handler. If some cpus do not * until all CPU's have arrived into the nmi handler. If some CPU's do not
* make it into the handler, try and force them in with the IPI(NMI) signal. * make it into the handler, try and force them in with the IPI(NMI) signal.
* *
* We also have to lessen UV Hub MMR accesses as much as possible as this * We also have to lessen UV Hub MMR accesses as much as possible as this
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
* To do this we register our primary NMI notifier on the NMI_UNKNOWN * To do this we register our primary NMI notifier on the NMI_UNKNOWN
* chain. This reduces the number of false NMI calls when the perf * chain. This reduces the number of false NMI calls when the perf
* tools are running which generate an enormous number of NMIs per * tools are running which generate an enormous number of NMIs per
* second (~4M/s for 1024 cpu threads). Our secondary NMI handler is * second (~4M/s for 1024 CPU threads). Our secondary NMI handler is
* very short as it only checks that if it has been "pinged" with the * very short as it only checks that if it has been "pinged" with the
* IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR. * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR.
* *
...@@ -113,7 +113,7 @@ static int param_get_local64(char *buffer, const struct kernel_param *kp) ...@@ -113,7 +113,7 @@ static int param_get_local64(char *buffer, const struct kernel_param *kp)
static int param_set_local64(const char *val, const struct kernel_param *kp) static int param_set_local64(const char *val, const struct kernel_param *kp)
{ {
/* clear on any write */ /* Clear on any write */
local64_set((local64_t *)kp->arg, 0); local64_set((local64_t *)kp->arg, 0);
return 0; return 0;
} }
...@@ -322,7 +322,7 @@ static struct init_nmi { ...@@ -322,7 +322,7 @@ static struct init_nmi {
.data = 0x0, /* ACPI Mode */ .data = 0x0, /* ACPI Mode */
}, },
/* clear status */ /* Clear status: */
{ /* GPI_INT_STS_GPP_D_0 */ { /* GPI_INT_STS_GPP_D_0 */
.offset = 0x104, .offset = 0x104,
.mask = 0x0, .mask = 0x0,
...@@ -344,29 +344,29 @@ static struct init_nmi { ...@@ -344,29 +344,29 @@ static struct init_nmi {
.data = 0x1, /* Clear Status */ .data = 0x1, /* Clear Status */
}, },
/* disable interrupts */ /* Disable interrupts: */
{ /* GPI_INT_EN_GPP_D_0 */ { /* GPI_INT_EN_GPP_D_0 */
.offset = 0x114, .offset = 0x114,
.mask = 0x1, .mask = 0x1,
.data = 0x0, /* disable interrupt generation */ .data = 0x0, /* Disable interrupt generation */
}, },
{ /* GPI_GPE_EN_GPP_D_0 */ { /* GPI_GPE_EN_GPP_D_0 */
.offset = 0x134, .offset = 0x134,
.mask = 0x1, .mask = 0x1,
.data = 0x0, /* disable interrupt generation */ .data = 0x0, /* Disable interrupt generation */
}, },
{ /* GPI_SMI_EN_GPP_D_0 */ { /* GPI_SMI_EN_GPP_D_0 */
.offset = 0x154, .offset = 0x154,
.mask = 0x1, .mask = 0x1,
.data = 0x0, /* disable interrupt generation */ .data = 0x0, /* Disable interrupt generation */
}, },
{ /* GPI_NMI_EN_GPP_D_0 */ { /* GPI_NMI_EN_GPP_D_0 */
.offset = 0x174, .offset = 0x174,
.mask = 0x1, .mask = 0x1,
.data = 0x0, /* disable interrupt generation */ .data = 0x0, /* Disable interrupt generation */
}, },
/* setup GPP_D_0 Pad Config */ /* Setup GPP_D_0 Pad Config: */
{ /* PAD_CFG_DW0_GPP_D_0 */ { /* PAD_CFG_DW0_GPP_D_0 */
.offset = 0x4c0, .offset = 0x4c0,
.mask = 0xffffffff, .mask = 0xffffffff,
...@@ -444,7 +444,7 @@ static int uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi) ...@@ -444,7 +444,7 @@ static int uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi)
return 0; return 0;
*pstat = STS_GPP_D_0_MASK; /* Is a UV NMI: clear GPP_D_0 status */ *pstat = STS_GPP_D_0_MASK; /* Is a UV NMI: clear GPP_D_0 status */
(void)*pstat; /* flush write */ (void)*pstat; /* Flush write */
return 1; return 1;
} }
...@@ -461,8 +461,8 @@ static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi) ...@@ -461,8 +461,8 @@ static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi)
} }
/* /*
* If first cpu in on this hub, set hub_nmi "in_nmi" and "owner" values and * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and
* return true. If first cpu in on the system, set global "in_nmi" flag. * return true. If first CPU in on the system, set global "in_nmi" flag.
*/ */
static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi) static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
{ {
...@@ -496,7 +496,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi) ...@@ -496,7 +496,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
if (raw_spin_trylock(&hub_nmi->nmi_lock)) { if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
nmi_detected = uv_test_nmi(hub_nmi); nmi_detected = uv_test_nmi(hub_nmi);
/* check flag for UV external NMI */ /* Check flag for UV external NMI */
if (nmi_detected > 0) { if (nmi_detected > 0) {
uv_set_in_nmi(cpu, hub_nmi); uv_set_in_nmi(cpu, hub_nmi);
nmi = 1; nmi = 1;
...@@ -516,7 +516,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi) ...@@ -516,7 +516,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
slave_wait: cpu_relax(); slave_wait: cpu_relax();
udelay(uv_nmi_slave_delay); udelay(uv_nmi_slave_delay);
/* re-check hub in_nmi flag */ /* Re-check hub in_nmi flag */
nmi = atomic_read(&hub_nmi->in_nmi); nmi = atomic_read(&hub_nmi->in_nmi);
if (nmi) if (nmi)
break; break;
...@@ -560,7 +560,7 @@ static inline void uv_clear_nmi(int cpu) ...@@ -560,7 +560,7 @@ static inline void uv_clear_nmi(int cpu)
} }
} }
/* Ping non-responding cpus attemping to force them into the NMI handler */ /* Ping non-responding CPU's attemping to force them into the NMI handler */
static void uv_nmi_nr_cpus_ping(void) static void uv_nmi_nr_cpus_ping(void)
{ {
int cpu; int cpu;
...@@ -571,7 +571,7 @@ static void uv_nmi_nr_cpus_ping(void) ...@@ -571,7 +571,7 @@ static void uv_nmi_nr_cpus_ping(void)
apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI); apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
} }
/* Clean up flags for cpus that ignored both NMI and ping */ /* Clean up flags for CPU's that ignored both NMI and ping */
static void uv_nmi_cleanup_mask(void) static void uv_nmi_cleanup_mask(void)
{ {
int cpu; int cpu;
...@@ -583,7 +583,7 @@ static void uv_nmi_cleanup_mask(void) ...@@ -583,7 +583,7 @@ static void uv_nmi_cleanup_mask(void)
} }
} }
/* Loop waiting as cpus enter NMI handler */ /* Loop waiting as CPU's enter NMI handler */
static int uv_nmi_wait_cpus(int first) static int uv_nmi_wait_cpus(int first)
{ {
int i, j, k, n = num_online_cpus(); int i, j, k, n = num_online_cpus();
...@@ -597,7 +597,7 @@ static int uv_nmi_wait_cpus(int first) ...@@ -597,7 +597,7 @@ static int uv_nmi_wait_cpus(int first)
k = n - cpumask_weight(uv_nmi_cpu_mask); k = n - cpumask_weight(uv_nmi_cpu_mask);
} }
/* PCH NMI causes only one cpu to respond */ /* PCH NMI causes only one CPU to respond */
if (first && uv_pch_intr_now_enabled) { if (first && uv_pch_intr_now_enabled) {
cpumask_clear_cpu(cpu, uv_nmi_cpu_mask); cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
return n - k - 1; return n - k - 1;
...@@ -618,13 +618,13 @@ static int uv_nmi_wait_cpus(int first) ...@@ -618,13 +618,13 @@ static int uv_nmi_wait_cpus(int first)
k = n; k = n;
break; break;
} }
if (last_k != k) { /* abort if no new cpus coming in */ if (last_k != k) { /* abort if no new CPU's coming in */
last_k = k; last_k = k;
waiting = 0; waiting = 0;
} else if (++waiting > uv_nmi_wait_count) } else if (++waiting > uv_nmi_wait_count)
break; break;
/* extend delay if waiting only for cpu 0 */ /* Extend delay if waiting only for CPU 0: */
if (waiting && (n - k) == 1 && if (waiting && (n - k) == 1 &&
cpumask_test_cpu(0, uv_nmi_cpu_mask)) cpumask_test_cpu(0, uv_nmi_cpu_mask))
loop_delay *= 100; loop_delay *= 100;
...@@ -635,29 +635,29 @@ static int uv_nmi_wait_cpus(int first) ...@@ -635,29 +635,29 @@ static int uv_nmi_wait_cpus(int first)
return n - k; return n - k;
} }
/* Wait until all slave cpus have entered UV NMI handler */ /* Wait until all slave CPU's have entered UV NMI handler */
static void uv_nmi_wait(int master) static void uv_nmi_wait(int master)
{ {
/* indicate this cpu is in */ /* Indicate this CPU is in: */
this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN); this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
/* if not the first cpu in (the master), then we are a slave cpu */ /* If not the first CPU in (the master), then we are a slave CPU */
if (!master) if (!master)
return; return;
do { do {
/* wait for all other cpus to gather here */ /* Wait for all other CPU's to gather here */
if (!uv_nmi_wait_cpus(1)) if (!uv_nmi_wait_cpus(1))
break; break;
/* if not all made it in, send IPI NMI to them */ /* If not all made it in, send IPI NMI to them */
pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n", pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n",
cpumask_weight(uv_nmi_cpu_mask), cpumask_weight(uv_nmi_cpu_mask),
cpumask_pr_args(uv_nmi_cpu_mask)); cpumask_pr_args(uv_nmi_cpu_mask));
uv_nmi_nr_cpus_ping(); uv_nmi_nr_cpus_ping();
/* if all cpus are in, then done */ /* If all CPU's are in, then done */
if (!uv_nmi_wait_cpus(0)) if (!uv_nmi_wait_cpus(0))
break; break;
...@@ -709,7 +709,7 @@ static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs) ...@@ -709,7 +709,7 @@ static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE); this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
} }
/* Trigger a slave cpu to dump it's state */ /* Trigger a slave CPU to dump it's state */
static void uv_nmi_trigger_dump(int cpu) static void uv_nmi_trigger_dump(int cpu)
{ {
int retry = uv_nmi_trigger_delay; int retry = uv_nmi_trigger_delay;
...@@ -730,7 +730,7 @@ static void uv_nmi_trigger_dump(int cpu) ...@@ -730,7 +730,7 @@ static void uv_nmi_trigger_dump(int cpu)
uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE; uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
} }
/* Wait until all cpus ready to exit */ /* Wait until all CPU's ready to exit */
static void uv_nmi_sync_exit(int master) static void uv_nmi_sync_exit(int master)
{ {
atomic_dec(&uv_nmi_cpus_in_nmi); atomic_dec(&uv_nmi_cpus_in_nmi);
...@@ -760,7 +760,7 @@ static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master) ...@@ -760,7 +760,7 @@ static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master)
uv_nmi_sync_exit(master); uv_nmi_sync_exit(master);
} }
/* Walk through cpu list and dump state of each */ /* Walk through CPU list and dump state of each */
static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master) static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
{ {
if (master) { if (master) {
...@@ -872,7 +872,7 @@ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master) ...@@ -872,7 +872,7 @@ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
if (reason < 0) if (reason < 0)
return; return;
/* call KGDB NMI handler as MASTER */ /* Call KGDB NMI handler as MASTER */
ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason, ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason,
&uv_nmi_slave_continue); &uv_nmi_slave_continue);
if (ret) { if (ret) {
...@@ -880,7 +880,7 @@ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master) ...@@ -880,7 +880,7 @@ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT); atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
} }
} else { } else {
/* wait for KGDB signal that it's ready for slaves to enter */ /* Wait for KGDB signal that it's ready for slaves to enter */
int sig; int sig;
do { do {
...@@ -888,7 +888,7 @@ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master) ...@@ -888,7 +888,7 @@ static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
sig = atomic_read(&uv_nmi_slave_continue); sig = atomic_read(&uv_nmi_slave_continue);
} while (!sig); } while (!sig);
/* call KGDB as slave */ /* Call KGDB as slave */
if (sig == SLAVE_CONTINUE) if (sig == SLAVE_CONTINUE)
kgdb_nmicallback(cpu, regs); kgdb_nmicallback(cpu, regs);
} }
...@@ -932,7 +932,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs) ...@@ -932,7 +932,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action)); strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action));
} }
/* Pause as all cpus enter the NMI handler */ /* Pause as all CPU's enter the NMI handler */
uv_nmi_wait(master); uv_nmi_wait(master);
/* Process actions other than "kdump": */ /* Process actions other than "kdump": */
...@@ -972,7 +972,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs) ...@@ -972,7 +972,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
} }
/* /*
* NMI handler for pulling in CPUs when perf events are grabbing our NMI * NMI handler for pulling in CPU's when perf events are grabbing our NMI
*/ */
static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs) static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
{ {
...@@ -1005,7 +1005,7 @@ void uv_nmi_init(void) ...@@ -1005,7 +1005,7 @@ void uv_nmi_init(void)
unsigned int value; unsigned int value;
/* /*
* Unmask NMI on all cpus * Unmask NMI on all CPU's
*/ */
value = apic_read(APIC_LVT1) | APIC_DM_NMI; value = apic_read(APIC_LVT1) | APIC_DM_NMI;
value &= ~APIC_LVT_MASKED; value &= ~APIC_LVT_MASKED;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment