Commit 3a8280de authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] fix kirq for clustered apic mode

Patch from Dave Hansen <haveblue@us.ibm.com>

The new kirq patch assumes flat addressing APIC mode where apicid = (1
<< cpu).  This isn't true for clustered mode.

 - Change name/type of irq_balance_mask.  The type of apicid seems to
   be int.
 - Change instance of (1<<cpu) to cpu_to_logical_apicid()
 - Don't use target_cpu_mask, use min_loaded, and convert the real way

Tested on Summit, and plain SMP.  Martin Bligh and I figured this out
together, and he agrees.
parent 97ed1b30
...@@ -222,7 +222,7 @@ static void set_ioapic_affinity (unsigned int irq, unsigned long mask) ...@@ -222,7 +222,7 @@ static void set_ioapic_affinity (unsigned int irq, unsigned long mask)
# endif # endif
extern unsigned long irq_affinity [NR_IRQS]; extern unsigned long irq_affinity [NR_IRQS];
unsigned long __cacheline_aligned irq_balance_mask [NR_IRQS]; int __cacheline_aligned pending_irq_balance_apicid [NR_IRQS];
static int irqbalance_disabled __initdata = 0; static int irqbalance_disabled __initdata = 0;
static int physical_balance = 0; static int physical_balance = 0;
...@@ -441,7 +441,7 @@ static void do_irq_balance(void) ...@@ -441,7 +441,7 @@ static void do_irq_balance(void)
Dprintk("irq = %d moved to cpu = %d\n", selected_irq, min_loaded); Dprintk("irq = %d moved to cpu = %d\n", selected_irq, min_loaded);
/* mark for change destination */ /* mark for change destination */
spin_lock(&desc->lock); spin_lock(&desc->lock);
irq_balance_mask[selected_irq] = target_cpu_mask; pending_irq_balance_apicid[selected_irq] = cpu_to_logical_apicid(min_loaded);
spin_unlock(&desc->lock); spin_unlock(&desc->lock);
/* Since we made a change, come back sooner to /* Since we made a change, come back sooner to
* check for more variation. * check for more variation.
...@@ -500,7 +500,7 @@ static inline void balance_irq (int cpu, int irq) ...@@ -500,7 +500,7 @@ static inline void balance_irq (int cpu, int irq)
if (cpu != new_cpu) { if (cpu != new_cpu) {
irq_desc_t *desc = irq_desc + irq; irq_desc_t *desc = irq_desc + irq;
spin_lock(&desc->lock); spin_lock(&desc->lock);
irq_balance_mask[irq] = cpu_to_logical_apicid(new_cpu); pending_irq_balance_apicid[irq] = cpu_to_logical_apicid(new_cpu);
spin_unlock(&desc->lock); spin_unlock(&desc->lock);
} }
} }
...@@ -515,7 +515,7 @@ int balanced_irq(void *unused) ...@@ -515,7 +515,7 @@ int balanced_irq(void *unused)
/* push everything to CPU 0 to give us a starting point. */ /* push everything to CPU 0 to give us a starting point. */
for (i = 0 ; i < NR_IRQS ; i++) for (i = 0 ; i < NR_IRQS ; i++)
irq_balance_mask[i] = 1 << 0; pending_irq_balance_apicid[i] = cpu_to_logical_apicid(0);
for (;;) { for (;;) {
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
time_remaining = schedule_timeout(time_remaining); time_remaining = schedule_timeout(time_remaining);
...@@ -580,9 +580,9 @@ static void set_ioapic_affinity (unsigned int irq, unsigned long mask); ...@@ -580,9 +580,9 @@ static void set_ioapic_affinity (unsigned int irq, unsigned long mask);
static inline void move_irq(int irq) static inline void move_irq(int irq)
{ {
/* note - we hold the desc->lock */ /* note - we hold the desc->lock */
if (unlikely(irq_balance_mask[irq])) { if (unlikely(pending_irq_balance_apicid[irq])) {
set_ioapic_affinity(irq, irq_balance_mask[irq]); set_ioapic_affinity(irq, pending_irq_balance_apicid[irq]);
irq_balance_mask[irq] = 0; pending_irq_balance_apicid[irq] = 0;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment