Commit 7ccb4a66 authored by Mohan Kumar M's avatar Mohan Kumar M Committed by Paul Mackerras

[POWERPC] Fix interrupt distribution in ppc970

In some of the PPC970 based systems, interrupt would be distributed to
offline cpus also even when booted with "maxcpus=1".  So check whether
cpu online map and cpu present map are equal or not.  If they are equal
default_distrib_server is used as interrupt server otherwise boot cpu
(default_server) used as interrupt server.

In addition to this, if an interrupt is assigned to a specific cpu (ie
smp affinity) and if that cpu is not online, the earlier code used to
return the default_distrib_server as interrupt server.  This
introduces an additional parameter to the get_irq function, called
strict_check.  Based on this parameter, if the cpu is not online
either default_distrib_server or -1 is returned.
Signed-off-by: default avatarMohan Kumar M <mohan@in.ibm.com>
Cc: Michael Ellerman <michael@ellerman.id.au>
Acked-by: default avatarMilton Miller <miltonm@bga.com>
Signed-off-by: default avatarPaul Mackerras <paulus@samba.org>
parent 5a26f6bb
...@@ -156,9 +156,9 @@ static inline void lpar_qirr_info(int n_cpu , u8 value) ...@@ -156,9 +156,9 @@ static inline void lpar_qirr_info(int n_cpu , u8 value)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int get_irq_server(unsigned int virq) static int get_irq_server(unsigned int virq, unsigned int strict_check)
{ {
unsigned int server; int server;
/* For the moment only implement delivery to all cpus or one cpu */ /* For the moment only implement delivery to all cpus or one cpu */
cpumask_t cpumask = irq_desc[virq].affinity; cpumask_t cpumask = irq_desc[virq].affinity;
cpumask_t tmp = CPU_MASK_NONE; cpumask_t tmp = CPU_MASK_NONE;
...@@ -166,22 +166,25 @@ static int get_irq_server(unsigned int virq) ...@@ -166,22 +166,25 @@ static int get_irq_server(unsigned int virq)
if (!distribute_irqs) if (!distribute_irqs)
return default_server; return default_server;
if (cpus_equal(cpumask, CPU_MASK_ALL)) { if (!cpus_equal(cpumask, CPU_MASK_ALL)) {
server = default_distrib_server;
} else {
cpus_and(tmp, cpu_online_map, cpumask); cpus_and(tmp, cpu_online_map, cpumask);
if (cpus_empty(tmp)) server = first_cpu(tmp);
server = default_distrib_server;
else if (server < NR_CPUS)
server = get_hard_smp_processor_id(first_cpu(tmp)); return get_hard_smp_processor_id(server);
if (strict_check)
return -1;
} }
return server; if (cpus_equal(cpu_online_map, cpu_present_map))
return default_distrib_server;
return default_server;
} }
#else #else
static int get_irq_server(unsigned int virq) static int get_irq_server(unsigned int virq, unsigned int strict_check)
{ {
return default_server; return default_server;
} }
...@@ -192,7 +195,7 @@ static void xics_unmask_irq(unsigned int virq) ...@@ -192,7 +195,7 @@ static void xics_unmask_irq(unsigned int virq)
{ {
unsigned int irq; unsigned int irq;
int call_status; int call_status;
unsigned int server; int server;
pr_debug("xics: unmask virq %d\n", virq); pr_debug("xics: unmask virq %d\n", virq);
...@@ -201,7 +204,7 @@ static void xics_unmask_irq(unsigned int virq) ...@@ -201,7 +204,7 @@ static void xics_unmask_irq(unsigned int virq)
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
return; return;
server = get_irq_server(virq); server = get_irq_server(virq, 0);
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
DEFAULT_PRIORITY); DEFAULT_PRIORITY);
...@@ -398,8 +401,7 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) ...@@ -398,8 +401,7 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
unsigned int irq; unsigned int irq;
int status; int status;
int xics_status[2]; int xics_status[2];
unsigned long newmask; int irq_server;
cpumask_t tmp = CPU_MASK_NONE;
irq = (unsigned int)irq_map[virq].hwirq; irq = (unsigned int)irq_map[virq].hwirq;
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
...@@ -413,18 +415,21 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) ...@@ -413,18 +415,21 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask)
return; return;
} }
/* For the moment only implement delivery to all cpus or one cpu */ /*
if (cpus_equal(cpumask, CPU_MASK_ALL)) { * For the moment only implement delivery to all cpus or one cpu.
newmask = default_distrib_server; * Get current irq_server for the given irq
} else { */
cpus_and(tmp, cpu_online_map, cpumask); irq_server = get_irq_server(irq, 1);
if (cpus_empty(tmp)) if (irq_server == -1) {
return; char cpulist[128];
newmask = get_hard_smp_processor_id(first_cpu(tmp)); cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
printk(KERN_WARNING "xics_set_affinity: No online cpus in "
"the mask %s for irq %d\n", cpulist, virq);
return;
} }
status = rtas_call(ibm_set_xive, 3, 1, NULL, status = rtas_call(ibm_set_xive, 3, 1, NULL,
irq, newmask, xics_status[1]); irq, irq_server, xics_status[1]);
if (status) { if (status) {
printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive " printk(KERN_ERR "xics_set_affinity: irq=%u ibm,set-xive "
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment