Commit ea46825c authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] i386 IRQ balancing cleanup

General coding cleanups in io_apic.c:

- make pending_irq_balance_apicid[] static

- reorder several functions so that forward decls are not needed and so
  that `inline' actually works.

- 80 col fixes.
parent 61f84a1e
......@@ -221,8 +221,9 @@ static void set_ioapic_affinity (unsigned int irq, unsigned long mask)
# define Dprintk(x...)
# endif
extern unsigned long irq_affinity [NR_IRQS];
int __cacheline_aligned pending_irq_balance_apicid [NR_IRQS];
extern unsigned long irq_affinity[NR_IRQS];
static int __cacheline_aligned pending_irq_balance_apicid[NR_IRQS];
static int irqbalance_disabled = NO_BALANCE_IRQ;
static int physical_balance = 0;
......@@ -252,7 +253,52 @@ struct irq_cpu_info {
long balanced_irq_interval = MAX_BALANCED_IRQ_INTERVAL;
static inline void balance_irq(int cpu, int irq);
static unsigned long move(int curr_cpu, unsigned long allowed_mask,
unsigned long now, int direction)
{
int search_idle = 1;
int cpu = curr_cpu;
goto inside;
do {
if (unlikely(cpu == curr_cpu))
search_idle = 0;
inside:
if (direction == 1) {
cpu++;
if (cpu >= NR_CPUS)
cpu = 0;
} else {
cpu--;
if (cpu == -1)
cpu = NR_CPUS-1;
}
} while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) ||
(search_idle && !IDLE_ENOUGH(cpu,now)));
return cpu;
}
static inline void balance_irq(int cpu, int irq)
{
unsigned long now = jiffies;
unsigned long allowed_mask;
unsigned int new_cpu;
if (irqbalance_disabled)
return;
allowed_mask = cpu_online_map & irq_affinity[irq];
new_cpu = move(cpu, allowed_mask, now, 1);
if (cpu != new_cpu) {
irq_desc_t *desc = irq_desc + irq;
spin_lock(&desc->lock);
pending_irq_balance_apicid[irq]=cpu_to_logical_apicid(new_cpu);
spin_unlock(&desc->lock);
}
}
static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
{
......@@ -263,7 +309,8 @@ static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
if (!irq_desc[j].action)
continue;
/* Is it a significant load ? */
if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) < useful_load_threshold)
if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) <
useful_load_threshold)
continue;
balance_irq(i, j);
}
......@@ -430,7 +477,8 @@ static void do_irq_balance(void)
* We seek the least loaded sibling by making the comparison
* (A+B)/2 vs B
*/
if (physical_balance && (CPU_IRQ(min_loaded) >> 1) > CPU_IRQ(cpu_sibling_map[min_loaded]))
if (physical_balance && (CPU_IRQ(min_loaded) >> 1) >
CPU_IRQ(cpu_sibling_map[min_loaded]))
min_loaded = cpu_sibling_map[min_loaded];
allowed_mask = cpu_online_map & irq_affinity[selected_irq];
......@@ -438,10 +486,12 @@ static void do_irq_balance(void)
if (target_cpu_mask & allowed_mask) {
irq_desc_t *desc = irq_desc + selected_irq;
Dprintk("irq = %d moved to cpu = %d\n", selected_irq, min_loaded);
Dprintk("irq = %d moved to cpu = %d\n",
selected_irq, min_loaded);
/* mark for change destination */
spin_lock(&desc->lock);
pending_irq_balance_apicid[selected_irq] = cpu_to_logical_apicid(min_loaded);
pending_irq_balance_apicid[selected_irq] =
cpu_to_logical_apicid(min_loaded);
spin_unlock(&desc->lock);
/* Since we made a change, come back sooner to
* check for more variation.
......@@ -453,58 +503,16 @@ static void do_irq_balance(void)
goto tryanotherirq;
not_worth_the_effort:
/* if we did not find an IRQ to move, then adjust the time interval upward */
/*
* if we did not find an IRQ to move, then adjust the time interval
* upward
*/
balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);
Dprintk("IRQ worth rotating not found\n");
return;
}
static unsigned long move(int curr_cpu, unsigned long allowed_mask, unsigned long now, int direction)
{
int search_idle = 1;
int cpu = curr_cpu;
goto inside;
do {
if (unlikely(cpu == curr_cpu))
search_idle = 0;
inside:
if (direction == 1) {
cpu++;
if (cpu >= NR_CPUS)
cpu = 0;
} else {
cpu--;
if (cpu == -1)
cpu = NR_CPUS-1;
}
} while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) ||
(search_idle && !IDLE_ENOUGH(cpu,now)));
return cpu;
}
static inline void balance_irq (int cpu, int irq)
{
unsigned long now = jiffies;
unsigned long allowed_mask;
unsigned int new_cpu;
if (irqbalance_disabled)
return;
allowed_mask = cpu_online_map & irq_affinity[irq];
new_cpu = move(cpu, allowed_mask, now, 1);
if (cpu != new_cpu) {
irq_desc_t *desc = irq_desc + irq;
spin_lock(&desc->lock);
pending_irq_balance_apicid[irq] = cpu_to_logical_apicid(new_cpu);
spin_unlock(&desc->lock);
}
}
int balanced_irq(void *unused)
{
int i;
......@@ -516,26 +524,32 @@ int balanced_irq(void *unused)
/* push everything to CPU 0 to give us a starting point. */
for (i = 0 ; i < NR_IRQS ; i++)
pending_irq_balance_apicid[i] = cpu_to_logical_apicid(0);
for (;;) {
repeat:
set_current_state(TASK_INTERRUPTIBLE);
time_remaining = schedule_timeout(time_remaining);
if (time_after(jiffies, prev_balance_time+balanced_irq_interval)) {
Dprintk("balanced_irq: calling do_irq_balance() %lu\n", jiffies);
Dprintk("balanced_irq: calling do_irq_balance() %lu\n",
jiffies);
do_irq_balance();
prev_balance_time = jiffies;
time_remaining = balanced_irq_interval;
}
}
goto repeat;
}
static int __init balanced_irq_init(void)
{
int i;
struct cpuinfo_x86 *c;
c = &boot_cpu_data;
if (irqbalance_disabled)
return 0;
/* Enable physical balance only if more than 1 physical processor is present */
/*
* Enable physical balance only if more than 1 physical processor
* is present
*/
if (smp_num_siblings > 1 && cpu_online_map >> 2)
physical_balance = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment