Commit f4fbfb0d authored by Tony Luck's avatar Tony Luck

Pull vector-domain into release branch

parents ffc72040 bf903d0a
...@@ -1885,6 +1885,9 @@ and is between 256 and 4096 characters. It is defined in the file ...@@ -1885,6 +1885,9 @@ and is between 256 and 4096 characters. It is defined in the file
vdso=1: enable VDSO (default) vdso=1: enable VDSO (default)
vdso=0: disable VDSO mapping vdso=0: disable VDSO mapping
vector= [IA-64,SMP]
vector=percpu: enable percpu vector domain
video= [FB] Frame buffer configuration video= [FB] Frame buffer configuration
See Documentation/fb/modedb.txt. See Documentation/fb/modedb.txt.
......
...@@ -118,15 +118,25 @@ static DEFINE_SPINLOCK(iosapic_lock); ...@@ -118,15 +118,25 @@ static DEFINE_SPINLOCK(iosapic_lock);
* vector. * vector.
*/ */
struct iosapic_rte_info { #define NO_REF_RTE 0
struct list_head rte_list; /* node in list of RTEs sharing the
* same vector */ static struct iosapic {
char __iomem *addr; /* base address of IOSAPIC */ char __iomem *addr; /* base address of IOSAPIC */
unsigned int gsi_base; /* first GSI assigned to this unsigned int gsi_base; /* GSI base */
* IOSAPIC */ unsigned short num_rte; /* # of RTEs on this IOSAPIC */
int rtes_inuse; /* # of RTEs in use on this IOSAPIC */
#ifdef CONFIG_NUMA
unsigned short node; /* numa node association via pxm */
#endif
spinlock_t lock; /* lock for indirect reg access */
} iosapic_lists[NR_IOSAPICS];
struct iosapic_rte_info {
struct list_head rte_list; /* RTEs sharing the same vector */
char rte_index; /* IOSAPIC RTE index */ char rte_index; /* IOSAPIC RTE index */
int refcnt; /* reference counter */ int refcnt; /* reference counter */
unsigned int flags; /* flags */ unsigned int flags; /* flags */
struct iosapic *iosapic;
} ____cacheline_aligned; } ____cacheline_aligned;
static struct iosapic_intr_info { static struct iosapic_intr_info {
...@@ -140,24 +150,23 @@ static struct iosapic_intr_info { ...@@ -140,24 +150,23 @@ static struct iosapic_intr_info {
unsigned char polarity: 1; /* interrupt polarity unsigned char polarity: 1; /* interrupt polarity
* (see iosapic.h) */ * (see iosapic.h) */
unsigned char trigger : 1; /* trigger mode (see iosapic.h) */ unsigned char trigger : 1; /* trigger mode (see iosapic.h) */
} iosapic_intr_info[IA64_NUM_VECTORS]; } iosapic_intr_info[NR_IRQS];
static struct iosapic {
char __iomem *addr; /* base address of IOSAPIC */
unsigned int gsi_base; /* first GSI assigned to this
* IOSAPIC */
unsigned short num_rte; /* # of RTEs on this IOSAPIC */
int rtes_inuse; /* # of RTEs in use on this IOSAPIC */
#ifdef CONFIG_NUMA
unsigned short node; /* numa node association via pxm */
#endif
} iosapic_lists[NR_IOSAPICS];
static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */ static unsigned char pcat_compat __devinitdata; /* 8259 compatibility flag */
static int iosapic_kmalloc_ok; static int iosapic_kmalloc_ok;
static LIST_HEAD(free_rte_list); static LIST_HEAD(free_rte_list);
static inline void
iosapic_write(struct iosapic *iosapic, unsigned int reg, u32 val)
{
unsigned long flags;
spin_lock_irqsave(&iosapic->lock, flags);
__iosapic_write(iosapic->addr, reg, val);
spin_unlock_irqrestore(&iosapic->lock, flags);
}
/* /*
* Find an IOSAPIC associated with a GSI * Find an IOSAPIC associated with a GSI
*/ */
...@@ -175,17 +184,18 @@ find_iosapic (unsigned int gsi) ...@@ -175,17 +184,18 @@ find_iosapic (unsigned int gsi)
return -1; return -1;
} }
static inline int static inline int __gsi_to_irq(unsigned int gsi)
_gsi_to_vector (unsigned int gsi)
{ {
int irq;
struct iosapic_intr_info *info; struct iosapic_intr_info *info;
struct iosapic_rte_info *rte; struct iosapic_rte_info *rte;
for (info = iosapic_intr_info; info < for (irq = 0; irq < NR_IRQS; irq++) {
iosapic_intr_info + IA64_NUM_VECTORS; ++info) info = &iosapic_intr_info[irq];
list_for_each_entry(rte, &info->rtes, rte_list) list_for_each_entry(rte, &info->rtes, rte_list)
if (rte->gsi_base + rte->rte_index == gsi) if (rte->iosapic->gsi_base + rte->rte_index == gsi)
return info - iosapic_intr_info; return irq;
}
return -1; return -1;
} }
...@@ -196,7 +206,10 @@ _gsi_to_vector (unsigned int gsi) ...@@ -196,7 +206,10 @@ _gsi_to_vector (unsigned int gsi)
inline int inline int
gsi_to_vector (unsigned int gsi) gsi_to_vector (unsigned int gsi)
{ {
return _gsi_to_vector(gsi); int irq = __gsi_to_irq(gsi);
if (check_irq_used(irq) < 0)
return -1;
return irq_to_vector(irq);
} }
int int
...@@ -204,66 +217,48 @@ gsi_to_irq (unsigned int gsi) ...@@ -204,66 +217,48 @@ gsi_to_irq (unsigned int gsi)
{ {
unsigned long flags; unsigned long flags;
int irq; int irq;
/*
* XXX fix me: this assumes an identity mapping between IA-64 vector
* and Linux irq numbers...
*/
spin_lock_irqsave(&iosapic_lock, flags); spin_lock_irqsave(&iosapic_lock, flags);
{ irq = __gsi_to_irq(gsi);
irq = _gsi_to_vector(gsi);
}
spin_unlock_irqrestore(&iosapic_lock, flags); spin_unlock_irqrestore(&iosapic_lock, flags);
return irq; return irq;
} }
static struct iosapic_rte_info *gsi_vector_to_rte(unsigned int gsi, static struct iosapic_rte_info *find_rte(unsigned int irq, unsigned int gsi)
unsigned int vec)
{ {
struct iosapic_rte_info *rte; struct iosapic_rte_info *rte;
list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
if (rte->gsi_base + rte->rte_index == gsi) if (rte->iosapic->gsi_base + rte->rte_index == gsi)
return rte; return rte;
return NULL; return NULL;
} }
static void static void
set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask) set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask)
{ {
unsigned long pol, trigger, dmode; unsigned long pol, trigger, dmode;
u32 low32, high32; u32 low32, high32;
char __iomem *addr;
int rte_index; int rte_index;
char redir; char redir;
struct iosapic_rte_info *rte; struct iosapic_rte_info *rte;
ia64_vector vector = irq_to_vector(irq);
DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest); DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest);
rte = gsi_vector_to_rte(gsi, vector); rte = find_rte(irq, gsi);
if (!rte) if (!rte)
return; /* not an IOSAPIC interrupt */ return; /* not an IOSAPIC interrupt */
rte_index = rte->rte_index; rte_index = rte->rte_index;
addr = rte->addr; pol = iosapic_intr_info[irq].polarity;
pol = iosapic_intr_info[vector].polarity; trigger = iosapic_intr_info[irq].trigger;
trigger = iosapic_intr_info[vector].trigger; dmode = iosapic_intr_info[irq].dmode;
dmode = iosapic_intr_info[vector].dmode;
redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0; redir = (dmode == IOSAPIC_LOWEST_PRIORITY) ? 1 : 0;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
{ set_irq_affinity_info(irq, (int)(dest & 0xffff), redir);
unsigned int irq;
for (irq = 0; irq < NR_IRQS; ++irq)
if (irq_to_vector(irq) == vector) {
set_irq_affinity_info(irq,
(int)(dest & 0xffff),
redir);
break;
}
}
#endif #endif
low32 = ((pol << IOSAPIC_POLARITY_SHIFT) | low32 = ((pol << IOSAPIC_POLARITY_SHIFT) |
...@@ -275,10 +270,10 @@ set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask) ...@@ -275,10 +270,10 @@ set_rte (unsigned int gsi, unsigned int vector, unsigned int dest, int mask)
/* dest contains both id and eid */ /* dest contains both id and eid */
high32 = (dest << IOSAPIC_DEST_SHIFT); high32 = (dest << IOSAPIC_DEST_SHIFT);
iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index), high32); iosapic_write(rte->iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32); iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
iosapic_intr_info[vector].low32 = low32; iosapic_intr_info[irq].low32 = low32;
iosapic_intr_info[vector].dest = dest; iosapic_intr_info[irq].dest = dest;
} }
static void static void
...@@ -294,15 +289,18 @@ kexec_disable_iosapic(void) ...@@ -294,15 +289,18 @@ kexec_disable_iosapic(void)
{ {
struct iosapic_intr_info *info; struct iosapic_intr_info *info;
struct iosapic_rte_info *rte; struct iosapic_rte_info *rte;
u8 vec = 0; ia64_vector vec;
for (info = iosapic_intr_info; info < int irq;
iosapic_intr_info + IA64_NUM_VECTORS; ++info, ++vec) {
for (irq = 0; irq < NR_IRQS; irq++) {
info = &iosapic_intr_info[irq];
vec = irq_to_vector(irq);
list_for_each_entry(rte, &info->rtes, list_for_each_entry(rte, &info->rtes,
rte_list) { rte_list) {
iosapic_write(rte->addr, iosapic_write(rte->iosapic,
IOSAPIC_RTE_LOW(rte->rte_index), IOSAPIC_RTE_LOW(rte->rte_index),
IOSAPIC_MASK|vec); IOSAPIC_MASK|vec);
iosapic_eoi(rte->addr, vec); iosapic_eoi(rte->iosapic->addr, vec);
} }
} }
} }
...@@ -311,54 +309,36 @@ kexec_disable_iosapic(void) ...@@ -311,54 +309,36 @@ kexec_disable_iosapic(void)
static void static void
mask_irq (unsigned int irq) mask_irq (unsigned int irq)
{ {
unsigned long flags;
char __iomem *addr;
u32 low32; u32 low32;
int rte_index; int rte_index;
ia64_vector vec = irq_to_vector(irq);
struct iosapic_rte_info *rte; struct iosapic_rte_info *rte;
if (list_empty(&iosapic_intr_info[vec].rtes)) if (list_empty(&iosapic_intr_info[irq].rtes))
return; /* not an IOSAPIC interrupt! */ return; /* not an IOSAPIC interrupt! */
spin_lock_irqsave(&iosapic_lock, flags); /* set only the mask bit */
{ low32 = iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
/* set only the mask bit */ list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
low32 = iosapic_intr_info[vec].low32 |= IOSAPIC_MASK; rte_index = rte->rte_index;
list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
rte_list) {
addr = rte->addr;
rte_index = rte->rte_index;
iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
}
} }
spin_unlock_irqrestore(&iosapic_lock, flags);
} }
static void static void
unmask_irq (unsigned int irq) unmask_irq (unsigned int irq)
{ {
unsigned long flags;
char __iomem *addr;
u32 low32; u32 low32;
int rte_index; int rte_index;
ia64_vector vec = irq_to_vector(irq);
struct iosapic_rte_info *rte; struct iosapic_rte_info *rte;
if (list_empty(&iosapic_intr_info[vec].rtes)) if (list_empty(&iosapic_intr_info[irq].rtes))
return; /* not an IOSAPIC interrupt! */ return; /* not an IOSAPIC interrupt! */
spin_lock_irqsave(&iosapic_lock, flags); low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK;
{ list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
low32 = iosapic_intr_info[vec].low32 &= ~IOSAPIC_MASK; rte_index = rte->rte_index;
list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
rte_list) {
addr = rte->addr;
rte_index = rte->rte_index;
iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
}
} }
spin_unlock_irqrestore(&iosapic_lock, flags);
} }
...@@ -366,23 +346,24 @@ static void ...@@ -366,23 +346,24 @@ static void
iosapic_set_affinity (unsigned int irq, cpumask_t mask) iosapic_set_affinity (unsigned int irq, cpumask_t mask)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned long flags;
u32 high32, low32; u32 high32, low32;
int dest, rte_index; int dest, rte_index;
char __iomem *addr;
int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0; int redir = (irq & IA64_IRQ_REDIRECTED) ? 1 : 0;
ia64_vector vec;
struct iosapic_rte_info *rte; struct iosapic_rte_info *rte;
struct iosapic *iosapic;
irq &= (~IA64_IRQ_REDIRECTED); irq &= (~IA64_IRQ_REDIRECTED);
vec = irq_to_vector(irq);
cpus_and(mask, mask, cpu_online_map);
if (cpus_empty(mask)) if (cpus_empty(mask))
return; return;
if (reassign_irq_vector(irq, first_cpu(mask)))
return;
dest = cpu_physical_id(first_cpu(mask)); dest = cpu_physical_id(first_cpu(mask));
if (list_empty(&iosapic_intr_info[vec].rtes)) if (list_empty(&iosapic_intr_info[irq].rtes))
return; /* not an IOSAPIC interrupt */ return; /* not an IOSAPIC interrupt */
set_irq_affinity_info(irq, dest, redir); set_irq_affinity_info(irq, dest, redir);
...@@ -390,31 +371,24 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask) ...@@ -390,31 +371,24 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
/* dest contains both id and eid */ /* dest contains both id and eid */
high32 = dest << IOSAPIC_DEST_SHIFT; high32 = dest << IOSAPIC_DEST_SHIFT;
spin_lock_irqsave(&iosapic_lock, flags); low32 = iosapic_intr_info[irq].low32 & ~(7 << IOSAPIC_DELIVERY_SHIFT);
{ if (redir)
low32 = iosapic_intr_info[vec].low32 & /* change delivery mode to lowest priority */
~(7 << IOSAPIC_DELIVERY_SHIFT); low32 |= (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
else
if (redir) /* change delivery mode to fixed */
/* change delivery mode to lowest priority */ low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT);
low32 |= (IOSAPIC_LOWEST_PRIORITY << low32 &= IOSAPIC_VECTOR_MASK;
IOSAPIC_DELIVERY_SHIFT); low32 |= irq_to_vector(irq);
else
/* change delivery mode to fixed */ iosapic_intr_info[irq].low32 = low32;
low32 |= (IOSAPIC_FIXED << IOSAPIC_DELIVERY_SHIFT); iosapic_intr_info[irq].dest = dest;
list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list) {
iosapic_intr_info[vec].low32 = low32; iosapic = rte->iosapic;
iosapic_intr_info[vec].dest = dest; rte_index = rte->rte_index;
list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, iosapic_write(iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
rte_list) { iosapic_write(iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
addr = rte->addr;
rte_index = rte->rte_index;
iosapic_write(addr, IOSAPIC_RTE_HIGH(rte_index),
high32);
iosapic_write(addr, IOSAPIC_RTE_LOW(rte_index), low32);
}
} }
spin_unlock_irqrestore(&iosapic_lock, flags);
#endif #endif
} }
...@@ -434,10 +408,20 @@ iosapic_end_level_irq (unsigned int irq) ...@@ -434,10 +408,20 @@ iosapic_end_level_irq (unsigned int irq)
{ {
ia64_vector vec = irq_to_vector(irq); ia64_vector vec = irq_to_vector(irq);
struct iosapic_rte_info *rte; struct iosapic_rte_info *rte;
int do_unmask_irq = 0;
move_native_irq(irq); if (unlikely(irq_desc[irq].status & IRQ_MOVE_PENDING)) {
list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) do_unmask_irq = 1;
iosapic_eoi(rte->addr, vec); mask_irq(irq);
}
list_for_each_entry(rte, &iosapic_intr_info[irq].rtes, rte_list)
iosapic_eoi(rte->iosapic->addr, vec);
if (unlikely(do_unmask_irq)) {
move_masked_irq(irq);
unmask_irq(irq);
}
} }
#define iosapic_shutdown_level_irq mask_irq #define iosapic_shutdown_level_irq mask_irq
...@@ -519,13 +503,12 @@ iosapic_version (char __iomem *addr) ...@@ -519,13 +503,12 @@ iosapic_version (char __iomem *addr)
* unsigned int reserved2 : 8; * unsigned int reserved2 : 8;
* } * }
*/ */
return iosapic_read(addr, IOSAPIC_VERSION); return __iosapic_read(addr, IOSAPIC_VERSION);
} }
static int iosapic_find_sharable_vector (unsigned long trigger, static int iosapic_find_sharable_irq(unsigned long trigger, unsigned long pol)
unsigned long pol)
{ {
int i, vector = -1, min_count = -1; int i, irq = -ENOSPC, min_count = -1;
struct iosapic_intr_info *info; struct iosapic_intr_info *info;
/* /*
...@@ -533,21 +516,21 @@ static int iosapic_find_sharable_vector (unsigned long trigger, ...@@ -533,21 +516,21 @@ static int iosapic_find_sharable_vector (unsigned long trigger,
* supported yet * supported yet
*/ */
if (trigger == IOSAPIC_EDGE) if (trigger == IOSAPIC_EDGE)
return -1; return -EINVAL;
for (i = IA64_FIRST_DEVICE_VECTOR; i <= IA64_LAST_DEVICE_VECTOR; i++) { for (i = 0; i <= NR_IRQS; i++) {
info = &iosapic_intr_info[i]; info = &iosapic_intr_info[i];
if (info->trigger == trigger && info->polarity == pol && if (info->trigger == trigger && info->polarity == pol &&
(info->dmode == IOSAPIC_FIXED || info->dmode == (info->dmode == IOSAPIC_FIXED ||
IOSAPIC_LOWEST_PRIORITY)) { info->dmode == IOSAPIC_LOWEST_PRIORITY) &&
can_request_irq(i, IRQF_SHARED)) {
if (min_count == -1 || info->count < min_count) { if (min_count == -1 || info->count < min_count) {
vector = i; irq = i;
min_count = info->count; min_count = info->count;
} }
} }
} }
return irq;
return vector;
} }
/* /*
...@@ -555,25 +538,25 @@ static int iosapic_find_sharable_vector (unsigned long trigger, ...@@ -555,25 +538,25 @@ static int iosapic_find_sharable_vector (unsigned long trigger,
* assign a new vector for the other and make the vector available * assign a new vector for the other and make the vector available
*/ */
static void __init static void __init
iosapic_reassign_vector (int vector) iosapic_reassign_vector (int irq)
{ {
int new_vector; int new_irq;
if (!list_empty(&iosapic_intr_info[vector].rtes)) { if (!list_empty(&iosapic_intr_info[irq].rtes)) {
new_vector = assign_irq_vector(AUTO_ASSIGN); new_irq = create_irq();
if (new_vector < 0) if (new_irq < 0)
panic("%s: out of interrupt vectors!\n", __FUNCTION__); panic("%s: out of interrupt vectors!\n", __FUNCTION__);
printk(KERN_INFO "Reassigning vector %d to %d\n", printk(KERN_INFO "Reassigning vector %d to %d\n",
vector, new_vector); irq_to_vector(irq), irq_to_vector(new_irq));
memcpy(&iosapic_intr_info[new_vector], &iosapic_intr_info[vector], memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq],
sizeof(struct iosapic_intr_info)); sizeof(struct iosapic_intr_info));
INIT_LIST_HEAD(&iosapic_intr_info[new_vector].rtes); INIT_LIST_HEAD(&iosapic_intr_info[new_irq].rtes);
list_move(iosapic_intr_info[vector].rtes.next, list_move(iosapic_intr_info[irq].rtes.next,
&iosapic_intr_info[new_vector].rtes); &iosapic_intr_info[new_irq].rtes);
memset(&iosapic_intr_info[vector], 0, memset(&iosapic_intr_info[irq], 0,
sizeof(struct iosapic_intr_info)); sizeof(struct iosapic_intr_info));
iosapic_intr_info[vector].low32 = IOSAPIC_MASK; iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
} }
} }
...@@ -610,29 +593,18 @@ static struct iosapic_rte_info *iosapic_alloc_rte (void) ...@@ -610,29 +593,18 @@ static struct iosapic_rte_info *iosapic_alloc_rte (void)
return rte; return rte;
} }
static void iosapic_free_rte (struct iosapic_rte_info *rte) static inline int irq_is_shared (int irq)
{ {
if (rte->flags & RTE_PREALLOCATED) return (iosapic_intr_info[irq].count > 1);
list_add_tail(&rte->rte_list, &free_rte_list);
else
kfree(rte);
}
static inline int vector_is_shared (int vector)
{
return (iosapic_intr_info[vector].count > 1);
} }
static int static int
register_intr (unsigned int gsi, int vector, unsigned char delivery, register_intr (unsigned int gsi, int irq, unsigned char delivery,
unsigned long polarity, unsigned long trigger) unsigned long polarity, unsigned long trigger)
{ {
irq_desc_t *idesc; irq_desc_t *idesc;
struct hw_interrupt_type *irq_type; struct hw_interrupt_type *irq_type;
int rte_index;
int index; int index;
unsigned long gsi_base;
void __iomem *iosapic_address;
struct iosapic_rte_info *rte; struct iosapic_rte_info *rte;
index = find_iosapic(gsi); index = find_iosapic(gsi);
...@@ -642,10 +614,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, ...@@ -642,10 +614,7 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
return -ENODEV; return -ENODEV;
} }
iosapic_address = iosapic_lists[index].addr; rte = find_rte(irq, gsi);
gsi_base = iosapic_lists[index].gsi_base;
rte = gsi_vector_to_rte(gsi, vector);
if (!rte) { if (!rte) {
rte = iosapic_alloc_rte(); rte = iosapic_alloc_rte();
if (!rte) { if (!rte) {
...@@ -654,40 +623,42 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, ...@@ -654,40 +623,42 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
return -ENOMEM; return -ENOMEM;
} }
rte_index = gsi - gsi_base; rte->iosapic = &iosapic_lists[index];
rte->rte_index = rte_index; rte->rte_index = gsi - rte->iosapic->gsi_base;
rte->addr = iosapic_address;
rte->gsi_base = gsi_base;
rte->refcnt++; rte->refcnt++;
list_add_tail(&rte->rte_list, &iosapic_intr_info[vector].rtes); list_add_tail(&rte->rte_list, &iosapic_intr_info[irq].rtes);
iosapic_intr_info[vector].count++; iosapic_intr_info[irq].count++;
iosapic_lists[index].rtes_inuse++; iosapic_lists[index].rtes_inuse++;
} }
else if (vector_is_shared(vector)) { else if (rte->refcnt == NO_REF_RTE) {
struct iosapic_intr_info *info = &iosapic_intr_info[vector]; struct iosapic_intr_info *info = &iosapic_intr_info[irq];
if (info->trigger != trigger || info->polarity != polarity) { if (info->count > 0 &&
(info->trigger != trigger || info->polarity != polarity)){
printk (KERN_WARNING printk (KERN_WARNING
"%s: cannot override the interrupt\n", "%s: cannot override the interrupt\n",
__FUNCTION__); __FUNCTION__);
return -EINVAL; return -EINVAL;
} }
rte->refcnt++;
iosapic_intr_info[irq].count++;
iosapic_lists[index].rtes_inuse++;
} }
iosapic_intr_info[vector].polarity = polarity; iosapic_intr_info[irq].polarity = polarity;
iosapic_intr_info[vector].dmode = delivery; iosapic_intr_info[irq].dmode = delivery;
iosapic_intr_info[vector].trigger = trigger; iosapic_intr_info[irq].trigger = trigger;
if (trigger == IOSAPIC_EDGE) if (trigger == IOSAPIC_EDGE)
irq_type = &irq_type_iosapic_edge; irq_type = &irq_type_iosapic_edge;
else else
irq_type = &irq_type_iosapic_level; irq_type = &irq_type_iosapic_level;
idesc = irq_desc + vector; idesc = irq_desc + irq;
if (idesc->chip != irq_type) { if (idesc->chip != irq_type) {
if (idesc->chip != &no_irq_type) if (idesc->chip != &no_irq_type)
printk(KERN_WARNING printk(KERN_WARNING
"%s: changing vector %d from %s to %s\n", "%s: changing vector %d from %s to %s\n",
__FUNCTION__, vector, __FUNCTION__, irq_to_vector(irq),
idesc->chip->name, irq_type->name); idesc->chip->name, irq_type->name);
idesc->chip = irq_type; idesc->chip = irq_type;
} }
...@@ -695,18 +666,19 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery, ...@@ -695,18 +666,19 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
} }
static unsigned int static unsigned int
get_target_cpu (unsigned int gsi, int vector) get_target_cpu (unsigned int gsi, int irq)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int cpu = -1; static int cpu = -1;
extern int cpe_vector; extern int cpe_vector;
cpumask_t domain = irq_to_domain(irq);
/* /*
* In case of vector shared by multiple RTEs, all RTEs that * In case of vector shared by multiple RTEs, all RTEs that
* share the vector need to use the same destination CPU. * share the vector need to use the same destination CPU.
*/ */
if (!list_empty(&iosapic_intr_info[vector].rtes)) if (!list_empty(&iosapic_intr_info[irq].rtes))
return iosapic_intr_info[vector].dest; return iosapic_intr_info[irq].dest;
/* /*
* If the platform supports redirection via XTP, let it * If the platform supports redirection via XTP, let it
...@@ -723,7 +695,7 @@ get_target_cpu (unsigned int gsi, int vector) ...@@ -723,7 +695,7 @@ get_target_cpu (unsigned int gsi, int vector)
return cpu_physical_id(smp_processor_id()); return cpu_physical_id(smp_processor_id());
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
if (cpe_vector > 0 && vector == IA64_CPEP_VECTOR) if (cpe_vector > 0 && irq_to_vector(irq) == IA64_CPEP_VECTOR)
return get_cpei_target_cpu(); return get_cpei_target_cpu();
#endif #endif
...@@ -738,7 +710,7 @@ get_target_cpu (unsigned int gsi, int vector) ...@@ -738,7 +710,7 @@ get_target_cpu (unsigned int gsi, int vector)
goto skip_numa_setup; goto skip_numa_setup;
cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node); cpu_mask = node_to_cpumask(iosapic_lists[iosapic_index].node);
cpus_and(cpu_mask, cpu_mask, domain);
for_each_cpu_mask(numa_cpu, cpu_mask) { for_each_cpu_mask(numa_cpu, cpu_mask) {
if (!cpu_online(numa_cpu)) if (!cpu_online(numa_cpu))
cpu_clear(numa_cpu, cpu_mask); cpu_clear(numa_cpu, cpu_mask);
...@@ -749,8 +721,8 @@ get_target_cpu (unsigned int gsi, int vector) ...@@ -749,8 +721,8 @@ get_target_cpu (unsigned int gsi, int vector)
if (!num_cpus) if (!num_cpus)
goto skip_numa_setup; goto skip_numa_setup;
/* Use vector assignment to distribute across cpus in node */ /* Use irq assignment to distribute across cpus in node */
cpu_index = vector % num_cpus; cpu_index = irq % num_cpus;
for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++) for (numa_cpu = first_cpu(cpu_mask) ; i < cpu_index ; i++)
numa_cpu = next_cpu(numa_cpu, cpu_mask); numa_cpu = next_cpu(numa_cpu, cpu_mask);
...@@ -768,7 +740,7 @@ get_target_cpu (unsigned int gsi, int vector) ...@@ -768,7 +740,7 @@ get_target_cpu (unsigned int gsi, int vector)
do { do {
if (++cpu >= NR_CPUS) if (++cpu >= NR_CPUS)
cpu = 0; cpu = 0;
} while (!cpu_online(cpu)); } while (!cpu_online(cpu) || !cpu_isset(cpu, domain));
return cpu_physical_id(cpu); return cpu_physical_id(cpu);
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
...@@ -785,84 +757,72 @@ int ...@@ -785,84 +757,72 @@ int
iosapic_register_intr (unsigned int gsi, iosapic_register_intr (unsigned int gsi,
unsigned long polarity, unsigned long trigger) unsigned long polarity, unsigned long trigger)
{ {
int vector, mask = 1, err; int irq, mask = 1, err;
unsigned int dest; unsigned int dest;
unsigned long flags; unsigned long flags;
struct iosapic_rte_info *rte; struct iosapic_rte_info *rte;
u32 low32; u32 low32;
again:
/* /*
* If this GSI has already been registered (i.e., it's a * If this GSI has already been registered (i.e., it's a
* shared interrupt, or we lost a race to register it), * shared interrupt, or we lost a race to register it),
* don't touch the RTE. * don't touch the RTE.
*/ */
spin_lock_irqsave(&iosapic_lock, flags); spin_lock_irqsave(&iosapic_lock, flags);
{ irq = __gsi_to_irq(gsi);
vector = gsi_to_vector(gsi); if (irq > 0) {
if (vector > 0) { rte = find_rte(irq, gsi);
rte = gsi_vector_to_rte(gsi, vector); if(iosapic_intr_info[irq].count == 0) {
assign_irq_vector(irq);
dynamic_irq_init(irq);
} else if (rte->refcnt != NO_REF_RTE) {
rte->refcnt++; rte->refcnt++;
spin_unlock_irqrestore(&iosapic_lock, flags); goto unlock_iosapic_lock;
return vector;
} }
} } else
spin_unlock_irqrestore(&iosapic_lock, flags); irq = create_irq();
/* If vector is running out, we try to find a sharable vector */ /* If vector is running out, we try to find a sharable vector */
vector = assign_irq_vector(AUTO_ASSIGN); if (irq < 0) {
if (vector < 0) { irq = iosapic_find_sharable_irq(trigger, polarity);
vector = iosapic_find_sharable_vector(trigger, polarity); if (irq < 0)
if (vector < 0) goto unlock_iosapic_lock;
return -ENOSPC;
} }
spin_lock_irqsave(&irq_desc[vector].lock, flags); spin_lock(&irq_desc[irq].lock);
spin_lock(&iosapic_lock); dest = get_target_cpu(gsi, irq);
{ err = register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY,
if (gsi_to_vector(gsi) > 0) { polarity, trigger);
if (list_empty(&iosapic_intr_info[vector].rtes)) if (err < 0) {
free_irq_vector(vector); irq = err;
spin_unlock(&iosapic_lock); goto unlock_all;
spin_unlock_irqrestore(&irq_desc[vector].lock,
flags);
goto again;
}
dest = get_target_cpu(gsi, vector);
err = register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY,
polarity, trigger);
if (err < 0) {
spin_unlock(&iosapic_lock);
spin_unlock_irqrestore(&irq_desc[vector].lock,
flags);
return err;
}
/*
* If the vector is shared and already unmasked for
* other interrupt sources, don't mask it.
*/
low32 = iosapic_intr_info[vector].low32;
if (vector_is_shared(vector) && !(low32 & IOSAPIC_MASK))
mask = 0;
set_rte(gsi, vector, dest, mask);
} }
spin_unlock(&iosapic_lock);
spin_unlock_irqrestore(&irq_desc[vector].lock, flags); /*
* If the vector is shared and already unmasked for other
* interrupt sources, don't mask it.
*/
low32 = iosapic_intr_info[irq].low32;
if (irq_is_shared(irq) && !(low32 & IOSAPIC_MASK))
mask = 0;
set_rte(gsi, irq, dest, mask);
printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n", printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"), gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
(polarity == IOSAPIC_POL_HIGH ? "high" : "low"), (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
cpu_logical_id(dest), dest, vector); cpu_logical_id(dest), dest, irq_to_vector(irq));
unlock_all:
return vector; spin_unlock(&irq_desc[irq].lock);
unlock_iosapic_lock:
spin_unlock_irqrestore(&iosapic_lock, flags);
return irq;
} }
void void
iosapic_unregister_intr (unsigned int gsi) iosapic_unregister_intr (unsigned int gsi)
{ {
unsigned long flags; unsigned long flags;
int irq, vector, index; int irq, index;
irq_desc_t *idesc; irq_desc_t *idesc;
u32 low32; u32 low32;
unsigned long trigger, polarity; unsigned long trigger, polarity;
...@@ -881,78 +841,56 @@ iosapic_unregister_intr (unsigned int gsi) ...@@ -881,78 +841,56 @@ iosapic_unregister_intr (unsigned int gsi)
WARN_ON(1); WARN_ON(1);
return; return;
} }
vector = irq_to_vector(irq);
idesc = irq_desc + irq; spin_lock_irqsave(&iosapic_lock, flags);
spin_lock_irqsave(&idesc->lock, flags); if ((rte = find_rte(irq, gsi)) == NULL) {
spin_lock(&iosapic_lock); printk(KERN_ERR "iosapic_unregister_intr(%u) unbalanced\n",
{ gsi);
if ((rte = gsi_vector_to_rte(gsi, vector)) == NULL) { WARN_ON(1);
printk(KERN_ERR goto out;
"iosapic_unregister_intr(%u) unbalanced\n", }
gsi);
WARN_ON(1);
goto out;
}
if (--rte->refcnt > 0) if (--rte->refcnt > 0)
goto out; goto out;
/* Mask the interrupt */ idesc = irq_desc + irq;
low32 = iosapic_intr_info[vector].low32 | IOSAPIC_MASK; rte->refcnt = NO_REF_RTE;
iosapic_write(rte->addr, IOSAPIC_RTE_LOW(rte->rte_index),
low32);
/* Remove the rte entry from the list */ /* Mask the interrupt */
list_del(&rte->rte_list); low32 = iosapic_intr_info[irq].low32 | IOSAPIC_MASK;
iosapic_intr_info[vector].count--; iosapic_write(rte->iosapic, IOSAPIC_RTE_LOW(rte->rte_index), low32);
iosapic_free_rte(rte);
index = find_iosapic(gsi);
iosapic_lists[index].rtes_inuse--;
WARN_ON(iosapic_lists[index].rtes_inuse < 0);
trigger = iosapic_intr_info[vector].trigger;
polarity = iosapic_intr_info[vector].polarity;
dest = iosapic_intr_info[vector].dest;
printk(KERN_INFO
"GSI %u (%s, %s) -> CPU %d (0x%04x)"
" vector %d unregistered\n",
gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
(polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
cpu_logical_id(dest), dest, vector);
if (list_empty(&iosapic_intr_info[vector].rtes)) { iosapic_intr_info[irq].count--;
/* Sanity check */ index = find_iosapic(gsi);
BUG_ON(iosapic_intr_info[vector].count); iosapic_lists[index].rtes_inuse--;
WARN_ON(iosapic_lists[index].rtes_inuse < 0);
/* Clear the interrupt controller descriptor */ trigger = iosapic_intr_info[irq].trigger;
idesc->chip = &no_irq_type; polarity = iosapic_intr_info[irq].polarity;
dest = iosapic_intr_info[irq].dest;
printk(KERN_INFO
"GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d unregistered\n",
gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
(polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
cpu_logical_id(dest), dest, irq_to_vector(irq));
if (iosapic_intr_info[irq].count == 0) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* Clear affinity */ /* Clear affinity */
cpus_setall(idesc->affinity); cpus_setall(idesc->affinity);
#endif #endif
/* Clear the interrupt information */
/* Clear the interrupt information */ iosapic_intr_info[irq].dest = 0;
memset(&iosapic_intr_info[vector], 0, iosapic_intr_info[irq].dmode = 0;
sizeof(struct iosapic_intr_info)); iosapic_intr_info[irq].polarity = 0;
iosapic_intr_info[vector].low32 |= IOSAPIC_MASK; iosapic_intr_info[irq].trigger = 0;
INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); iosapic_intr_info[irq].low32 |= IOSAPIC_MASK;
if (idesc->action) { /* Destroy and reserve IRQ */
printk(KERN_ERR destroy_and_reserve_irq(irq);
"interrupt handlers still exist on"
"IRQ %u\n", irq);
WARN_ON(1);
}
/* Free the interrupt vector */
free_irq_vector(vector);
}
} }
out: out:
spin_unlock(&iosapic_lock); spin_unlock_irqrestore(&iosapic_lock, flags);
spin_unlock_irqrestore(&idesc->lock, flags);
} }
/* /*
...@@ -965,27 +903,30 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi, ...@@ -965,27 +903,30 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
{ {
static const char * const name[] = {"unknown", "PMI", "INIT", "CPEI"}; static const char * const name[] = {"unknown", "PMI", "INIT", "CPEI"};
unsigned char delivery; unsigned char delivery;
int vector, mask = 0; int irq, vector, mask = 0;
unsigned int dest = ((id << 8) | eid) & 0xffff; unsigned int dest = ((id << 8) | eid) & 0xffff;
switch (int_type) { switch (int_type) {
case ACPI_INTERRUPT_PMI: case ACPI_INTERRUPT_PMI:
vector = iosapic_vector; irq = vector = iosapic_vector;
bind_irq_vector(irq, vector, CPU_MASK_ALL);
/* /*
* since PMI vector is alloc'd by FW(ACPI) not by kernel, * since PMI vector is alloc'd by FW(ACPI) not by kernel,
* we need to make sure the vector is available * we need to make sure the vector is available
*/ */
iosapic_reassign_vector(vector); iosapic_reassign_vector(irq);
delivery = IOSAPIC_PMI; delivery = IOSAPIC_PMI;
break; break;
case ACPI_INTERRUPT_INIT: case ACPI_INTERRUPT_INIT:
vector = assign_irq_vector(AUTO_ASSIGN); irq = create_irq();
if (vector < 0) if (irq < 0)
panic("%s: out of interrupt vectors!\n", __FUNCTION__); panic("%s: out of interrupt vectors!\n", __FUNCTION__);
vector = irq_to_vector(irq);
delivery = IOSAPIC_INIT; delivery = IOSAPIC_INIT;
break; break;
case ACPI_INTERRUPT_CPEI: case ACPI_INTERRUPT_CPEI:
vector = IA64_CPE_VECTOR; irq = vector = IA64_CPE_VECTOR;
BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
delivery = IOSAPIC_LOWEST_PRIORITY; delivery = IOSAPIC_LOWEST_PRIORITY;
mask = 1; mask = 1;
break; break;
...@@ -995,7 +936,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi, ...@@ -995,7 +936,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
return -1; return -1;
} }
register_intr(gsi, vector, delivery, polarity, trigger); register_intr(gsi, irq, delivery, polarity, trigger);
printk(KERN_INFO printk(KERN_INFO
"PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)" "PLATFORM int %s (0x%x): GSI %u (%s, %s) -> CPU %d (0x%04x)"
...@@ -1005,7 +946,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi, ...@@ -1005,7 +946,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi,
(polarity == IOSAPIC_POL_HIGH ? "high" : "low"), (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
cpu_logical_id(dest), dest, vector); cpu_logical_id(dest), dest, vector);
set_rte(gsi, vector, dest, mask); set_rte(gsi, irq, dest, mask);
return vector; return vector;
} }
...@@ -1017,30 +958,32 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi, ...@@ -1017,30 +958,32 @@ iosapic_override_isa_irq (unsigned int isa_irq, unsigned int gsi,
unsigned long polarity, unsigned long polarity,
unsigned long trigger) unsigned long trigger)
{ {
int vector; int vector, irq;
unsigned int dest = cpu_physical_id(smp_processor_id()); unsigned int dest = cpu_physical_id(smp_processor_id());
vector = isa_irq_to_vector(isa_irq); irq = vector = isa_irq_to_vector(isa_irq);
BUG_ON(bind_irq_vector(irq, vector, CPU_MASK_ALL));
register_intr(gsi, vector, IOSAPIC_LOWEST_PRIORITY, polarity, trigger); register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY, polarity, trigger);
DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n", DBG("ISA: IRQ %u -> GSI %u (%s,%s) -> CPU %d (0x%04x) vector %d\n",
isa_irq, gsi, trigger == IOSAPIC_EDGE ? "edge" : "level", isa_irq, gsi, trigger == IOSAPIC_EDGE ? "edge" : "level",
polarity == IOSAPIC_POL_HIGH ? "high" : "low", polarity == IOSAPIC_POL_HIGH ? "high" : "low",
cpu_logical_id(dest), dest, vector); cpu_logical_id(dest), dest, vector);
set_rte(gsi, vector, dest, 1); set_rte(gsi, irq, dest, 1);
} }
void __init void __init
iosapic_system_init (int system_pcat_compat) iosapic_system_init (int system_pcat_compat)
{ {
int vector; int irq;
for (vector = 0; vector < IA64_NUM_VECTORS; ++vector) { for (irq = 0; irq < NR_IRQS; ++irq) {
iosapic_intr_info[vector].low32 = IOSAPIC_MASK; iosapic_intr_info[irq].low32 = IOSAPIC_MASK;
/* mark as unused */ /* mark as unused */
INIT_LIST_HEAD(&iosapic_intr_info[vector].rtes); INIT_LIST_HEAD(&iosapic_intr_info[irq].rtes);
iosapic_intr_info[irq].count = 0;
} }
pcat_compat = system_pcat_compat; pcat_compat = system_pcat_compat;
...@@ -1108,31 +1051,35 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base) ...@@ -1108,31 +1051,35 @@ iosapic_init (unsigned long phys_addr, unsigned int gsi_base)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&iosapic_lock, flags); spin_lock_irqsave(&iosapic_lock, flags);
{ index = find_iosapic(gsi_base);
addr = ioremap(phys_addr, 0); if (index >= 0) {
ver = iosapic_version(addr); spin_unlock_irqrestore(&iosapic_lock, flags);
return -EBUSY;
}
if ((err = iosapic_check_gsi_range(gsi_base, ver))) { addr = ioremap(phys_addr, 0);
iounmap(addr); ver = iosapic_version(addr);
spin_unlock_irqrestore(&iosapic_lock, flags); if ((err = iosapic_check_gsi_range(gsi_base, ver))) {
return err; iounmap(addr);
} spin_unlock_irqrestore(&iosapic_lock, flags);
return err;
}
/* /*
* The MAX_REDIR register holds the highest input pin * The MAX_REDIR register holds the highest input pin number
* number (starting from 0). * (starting from 0). We add 1 so that we can use it for
* We add 1 so that we can use it for number of pins (= RTEs) * number of pins (= RTEs)
*/ */
num_rte = ((ver >> 16) & 0xff) + 1; num_rte = ((ver >> 16) & 0xff) + 1;
index = iosapic_alloc(); index = iosapic_alloc();
iosapic_lists[index].addr = addr; iosapic_lists[index].addr = addr;
iosapic_lists[index].gsi_base = gsi_base; iosapic_lists[index].gsi_base = gsi_base;
iosapic_lists[index].num_rte = num_rte; iosapic_lists[index].num_rte = num_rte;
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
iosapic_lists[index].node = MAX_NUMNODES; iosapic_lists[index].node = MAX_NUMNODES;
#endif #endif
} spin_lock_init(&iosapic_lists[index].lock);
spin_unlock_irqrestore(&iosapic_lock, flags); spin_unlock_irqrestore(&iosapic_lock, flags);
if ((gsi_base == 0) && pcat_compat) { if ((gsi_base == 0) && pcat_compat) {
...@@ -1157,25 +1104,22 @@ iosapic_remove (unsigned int gsi_base) ...@@ -1157,25 +1104,22 @@ iosapic_remove (unsigned int gsi_base)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&iosapic_lock, flags); spin_lock_irqsave(&iosapic_lock, flags);
{ index = find_iosapic(gsi_base);
index = find_iosapic(gsi_base); if (index < 0) {
if (index < 0) { printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n",
printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n", __FUNCTION__, gsi_base);
__FUNCTION__, gsi_base); goto out;
goto out; }
}
if (iosapic_lists[index].rtes_inuse) {
err = -EBUSY;
printk(KERN_WARNING
"%s: IOSAPIC for GSI base %u is busy\n",
__FUNCTION__, gsi_base);
goto out;
}
iounmap(iosapic_lists[index].addr); if (iosapic_lists[index].rtes_inuse) {
iosapic_free(index); err = -EBUSY;
printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n",
__FUNCTION__, gsi_base);
goto out;
} }
iounmap(iosapic_lists[index].addr);
iosapic_free(index);
out: out:
spin_unlock_irqrestore(&iosapic_lock, flags); spin_unlock_irqrestore(&iosapic_lock, flags);
return err; return err;
......
...@@ -35,7 +35,7 @@ void ack_bad_irq(unsigned int irq) ...@@ -35,7 +35,7 @@ void ack_bad_irq(unsigned int irq)
#ifdef CONFIG_IA64_GENERIC #ifdef CONFIG_IA64_GENERIC
unsigned int __ia64_local_vector_to_irq (ia64_vector vec) unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
{ {
return (unsigned int) vec; return __get_cpu_var(vector_irq)[vec];
} }
#endif #endif
......
...@@ -46,6 +46,12 @@ ...@@ -46,6 +46,12 @@
#define IRQ_DEBUG 0 #define IRQ_DEBUG 0
#define IRQ_VECTOR_UNASSIGNED (0)
#define IRQ_UNUSED (0)
#define IRQ_USED (1)
#define IRQ_RSVD (2)
/* These can be overridden in platform_irq_init */ /* These can be overridden in platform_irq_init */
int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR; int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR; int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
...@@ -54,6 +60,8 @@ int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR; ...@@ -54,6 +60,8 @@ int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
void __iomem *ipi_base_addr = ((void __iomem *) void __iomem *ipi_base_addr = ((void __iomem *)
(__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR)); (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
static cpumask_t vector_allocation_domain(int cpu);
/* /*
* Legacy IRQ to IA-64 vector translation table. * Legacy IRQ to IA-64 vector translation table.
*/ */
...@@ -64,46 +72,269 @@ __u8 isa_irq_to_vector_map[16] = { ...@@ -64,46 +72,269 @@ __u8 isa_irq_to_vector_map[16] = {
}; };
EXPORT_SYMBOL(isa_irq_to_vector_map); EXPORT_SYMBOL(isa_irq_to_vector_map);
static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)]; DEFINE_SPINLOCK(vector_lock);
struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
[0 ... NR_IRQS - 1] = {
.vector = IRQ_VECTOR_UNASSIGNED,
.domain = CPU_MASK_NONE
}
};
DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
[0 ... IA64_NUM_VECTORS - 1] = IA64_SPURIOUS_INT_VECTOR
};
static cpumask_t vector_table[IA64_MAX_DEVICE_VECTORS] = {
[0 ... IA64_MAX_DEVICE_VECTORS - 1] = CPU_MASK_NONE
};
static int irq_status[NR_IRQS] = {
[0 ... NR_IRQS -1] = IRQ_UNUSED
};
int check_irq_used(int irq)
{
if (irq_status[irq] == IRQ_USED)
return 1;
return -1;
}
static void reserve_irq(unsigned int irq)
{
unsigned long flags;
spin_lock_irqsave(&vector_lock, flags);
irq_status[irq] = IRQ_RSVD;
spin_unlock_irqrestore(&vector_lock, flags);
}
static inline int find_unassigned_irq(void)
{
int irq;
for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
if (irq_status[irq] == IRQ_UNUSED)
return irq;
return -ENOSPC;
}
static inline int find_unassigned_vector(cpumask_t domain)
{
cpumask_t mask;
int pos;
cpus_and(mask, domain, cpu_online_map);
if (cpus_empty(mask))
return -EINVAL;
for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
cpus_and(mask, domain, vector_table[pos]);
if (!cpus_empty(mask))
continue;
return IA64_FIRST_DEVICE_VECTOR + pos;
}
return -ENOSPC;
}
static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
{
cpumask_t mask;
int cpu, pos;
struct irq_cfg *cfg = &irq_cfg[irq];
cpus_and(mask, domain, cpu_online_map);
if (cpus_empty(mask))
return -EINVAL;
if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
return 0;
if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
return -EBUSY;
for_each_cpu_mask(cpu, mask)
per_cpu(vector_irq, cpu)[vector] = irq;
cfg->vector = vector;
cfg->domain = domain;
irq_status[irq] = IRQ_USED;
pos = vector - IA64_FIRST_DEVICE_VECTOR;
cpus_or(vector_table[pos], vector_table[pos], domain);
return 0;
}
int bind_irq_vector(int irq, int vector, cpumask_t domain)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&vector_lock, flags);
ret = __bind_irq_vector(irq, vector, domain);
spin_unlock_irqrestore(&vector_lock, flags);
return ret;
}
static void __clear_irq_vector(int irq)
{
int vector, cpu, pos;
cpumask_t mask;
cpumask_t domain;
struct irq_cfg *cfg = &irq_cfg[irq];
BUG_ON((unsigned)irq >= NR_IRQS);
BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
vector = cfg->vector;
domain = cfg->domain;
cpus_and(mask, cfg->domain, cpu_online_map);
for_each_cpu_mask(cpu, mask)
per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR;
cfg->vector = IRQ_VECTOR_UNASSIGNED;
cfg->domain = CPU_MASK_NONE;
irq_status[irq] = IRQ_UNUSED;
pos = vector - IA64_FIRST_DEVICE_VECTOR;
cpus_andnot(vector_table[pos], vector_table[pos], domain);
}
static void clear_irq_vector(int irq)
{
unsigned long flags;
spin_lock_irqsave(&vector_lock, flags);
__clear_irq_vector(irq);
spin_unlock_irqrestore(&vector_lock, flags);
}
int int
assign_irq_vector (int irq) assign_irq_vector (int irq)
{ {
int pos, vector; unsigned long flags;
again: int vector, cpu;
pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS); cpumask_t domain;
vector = IA64_FIRST_DEVICE_VECTOR + pos;
if (vector > IA64_LAST_DEVICE_VECTOR) vector = -ENOSPC;
return -ENOSPC;
if (test_and_set_bit(pos, ia64_vector_mask)) spin_lock_irqsave(&vector_lock, flags);
goto again; if (irq < 0) {
goto out;
}
for_each_online_cpu(cpu) {
domain = vector_allocation_domain(cpu);
vector = find_unassigned_vector(domain);
if (vector >= 0)
break;
}
if (vector < 0)
goto out;
BUG_ON(__bind_irq_vector(irq, vector, domain));
out:
spin_unlock_irqrestore(&vector_lock, flags);
return vector; return vector;
} }
void void
free_irq_vector (int vector) free_irq_vector (int vector)
{ {
int pos; if (vector < IA64_FIRST_DEVICE_VECTOR ||
vector > IA64_LAST_DEVICE_VECTOR)
if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR)
return; return;
clear_irq_vector(vector);
pos = vector - IA64_FIRST_DEVICE_VECTOR;
if (!test_and_clear_bit(pos, ia64_vector_mask))
printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
} }
int int
reserve_irq_vector (int vector) reserve_irq_vector (int vector)
{ {
int pos;
if (vector < IA64_FIRST_DEVICE_VECTOR || if (vector < IA64_FIRST_DEVICE_VECTOR ||
vector > IA64_LAST_DEVICE_VECTOR) vector > IA64_LAST_DEVICE_VECTOR)
return -EINVAL; return -EINVAL;
return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
}
pos = vector - IA64_FIRST_DEVICE_VECTOR; /*
return test_and_set_bit(pos, ia64_vector_mask); * Initialize vector_irq on a new cpu. This function must be called
* with vector_lock held.
*/
void __setup_vector_irq(int cpu)
{
int irq, vector;
/* Clear vector_irq */
for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
per_cpu(vector_irq, cpu)[vector] = IA64_SPURIOUS_INT_VECTOR;
/* Mark the inuse vectors */
for (irq = 0; irq < NR_IRQS; ++irq) {
if (!cpu_isset(cpu, irq_cfg[irq].domain))
continue;
vector = irq_to_vector(irq);
per_cpu(vector_irq, cpu)[vector] = irq;
}
}
#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
static enum vector_domain_type {
VECTOR_DOMAIN_NONE,
VECTOR_DOMAIN_PERCPU
} vector_domain_type = VECTOR_DOMAIN_NONE;
static cpumask_t vector_allocation_domain(int cpu)
{
if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
return cpumask_of_cpu(cpu);
return CPU_MASK_ALL;
}
static int __init parse_vector_domain(char *arg)
{
if (!arg)
return -EINVAL;
if (!strcmp(arg, "percpu")) {
vector_domain_type = VECTOR_DOMAIN_PERCPU;
no_int_routing = 1;
}
return 1;
}
early_param("vector", parse_vector_domain);
#else
static cpumask_t vector_allocation_domain(int cpu)
{
return CPU_MASK_ALL;
}
#endif
void destroy_and_reserve_irq(unsigned int irq)
{
dynamic_irq_cleanup(irq);
clear_irq_vector(irq);
reserve_irq(irq);
}
static int __reassign_irq_vector(int irq, int cpu)
{
struct irq_cfg *cfg = &irq_cfg[irq];
int vector;
cpumask_t domain;
if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
return -EINVAL;
if (cpu_isset(cpu, cfg->domain))
return 0;
domain = vector_allocation_domain(cpu);
vector = find_unassigned_vector(domain);
if (vector < 0)
return -ENOSPC;
__clear_irq_vector(irq);
BUG_ON(__bind_irq_vector(irq, vector, domain));
return 0;
}
int reassign_irq_vector(int irq, int cpu)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&vector_lock, flags);
ret = __reassign_irq_vector(irq, cpu);
spin_unlock_irqrestore(&vector_lock, flags);
return ret;
} }
/* /*
...@@ -111,18 +342,35 @@ reserve_irq_vector (int vector) ...@@ -111,18 +342,35 @@ reserve_irq_vector (int vector)
*/ */
int create_irq(void) int create_irq(void)
{ {
int vector = assign_irq_vector(AUTO_ASSIGN); unsigned long flags;
int irq, vector, cpu;
if (vector >= 0) cpumask_t domain;
dynamic_irq_init(vector);
irq = vector = -ENOSPC;
return vector; spin_lock_irqsave(&vector_lock, flags);
for_each_online_cpu(cpu) {
domain = vector_allocation_domain(cpu);
vector = find_unassigned_vector(domain);
if (vector >= 0)
break;
}
if (vector < 0)
goto out;
irq = find_unassigned_irq();
if (irq < 0)
goto out;
BUG_ON(__bind_irq_vector(irq, vector, domain));
out:
spin_unlock_irqrestore(&vector_lock, flags);
if (irq >= 0)
dynamic_irq_init(irq);
return irq;
} }
void destroy_irq(unsigned int irq) void destroy_irq(unsigned int irq)
{ {
dynamic_irq_cleanup(irq); dynamic_irq_cleanup(irq);
free_irq_vector(irq); clear_irq_vector(irq);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -301,14 +549,13 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action) ...@@ -301,14 +549,13 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action)
irq_desc_t *desc; irq_desc_t *desc;
unsigned int irq; unsigned int irq;
for (irq = 0; irq < NR_IRQS; ++irq) irq = vec;
if (irq_to_vector(irq) == vec) { BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
desc = irq_desc + irq; desc = irq_desc + irq;
desc->status |= IRQ_PER_CPU; desc->status |= IRQ_PER_CPU;
desc->chip = &irq_type_ia64_lsapic; desc->chip = &irq_type_ia64_lsapic;
if (action) if (action)
setup_irq(irq, action); setup_irq(irq, action);
}
} }
void __init void __init
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#define MSI_DATA_VECTOR_SHIFT 0 #define MSI_DATA_VECTOR_SHIFT 0
#define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT) #define MSI_DATA_VECTOR(v) (((u8)v) << MSI_DATA_VECTOR_SHIFT)
#define MSI_DATA_VECTOR_MASK 0xffffff00
#define MSI_DATA_DELIVERY_SHIFT 8 #define MSI_DATA_DELIVERY_SHIFT 8
#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT) #define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_SHIFT)
...@@ -50,17 +51,29 @@ static struct irq_chip ia64_msi_chip; ...@@ -50,17 +51,29 @@ static struct irq_chip ia64_msi_chip;
static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask) static void ia64_set_msi_irq_affinity(unsigned int irq, cpumask_t cpu_mask)
{ {
struct msi_msg msg; struct msi_msg msg;
u32 addr; u32 addr, data;
int cpu = first_cpu(cpu_mask);
if (!cpu_online(cpu))
return;
if (reassign_irq_vector(irq, cpu))
return;
read_msi_msg(irq, &msg); read_msi_msg(irq, &msg);
addr = msg.address_lo; addr = msg.address_lo;
addr &= MSI_ADDR_DESTID_MASK; addr &= MSI_ADDR_DESTID_MASK;
addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(first_cpu(cpu_mask))); addr |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
msg.address_lo = addr; msg.address_lo = addr;
data = msg.data;
data &= MSI_DATA_VECTOR_MASK;
data |= MSI_DATA_VECTOR(irq_to_vector(irq));
msg.data = data;
write_msi_msg(irq, &msg); write_msi_msg(irq, &msg);
irq_desc[irq].affinity = cpu_mask; irq_desc[irq].affinity = cpumask_of_cpu(cpu);
} }
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
...@@ -69,13 +82,15 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) ...@@ -69,13 +82,15 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
struct msi_msg msg; struct msi_msg msg;
unsigned long dest_phys_id; unsigned long dest_phys_id;
int irq, vector; int irq, vector;
cpumask_t mask;
irq = create_irq(); irq = create_irq();
if (irq < 0) if (irq < 0)
return irq; return irq;
set_irq_msi(irq, desc); set_irq_msi(irq, desc);
dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map)); cpus_and(mask, irq_to_domain(irq), cpu_online_map);
dest_phys_id = cpu_physical_id(first_cpu(mask));
vector = irq_to_vector(irq); vector = irq_to_vector(irq);
msg.address_hi = 0; msg.address_hi = 0;
......
...@@ -395,9 +395,13 @@ smp_callin (void) ...@@ -395,9 +395,13 @@ smp_callin (void)
fix_b0_for_bsp(); fix_b0_for_bsp();
lock_ipi_calllock(); lock_ipi_calllock();
spin_lock(&vector_lock);
/* Setup the per cpu irq handling data structures */
__setup_vector_irq(cpuid);
cpu_set(cpuid, cpu_online_map); cpu_set(cpuid, cpu_online_map);
unlock_ipi_calllock(); unlock_ipi_calllock();
per_cpu(cpu_state, cpuid) = CPU_ONLINE; per_cpu(cpu_state, cpuid) = CPU_ONLINE;
spin_unlock(&vector_lock);
smp_setup_percpu_timer(); smp_setup_percpu_timer();
......
...@@ -90,13 +90,27 @@ enum { ...@@ -90,13 +90,27 @@ enum {
extern __u8 isa_irq_to_vector_map[16]; extern __u8 isa_irq_to_vector_map[16];
#define isa_irq_to_vector(x) isa_irq_to_vector_map[(x)] #define isa_irq_to_vector(x) isa_irq_to_vector_map[(x)]
struct irq_cfg {
ia64_vector vector;
cpumask_t domain;
};
extern spinlock_t vector_lock;
extern struct irq_cfg irq_cfg[NR_IRQS];
#define irq_to_domain(x) irq_cfg[(x)].domain
DECLARE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq);
extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */ extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */
extern int bind_irq_vector(int irq, int vector, cpumask_t domain);
extern int assign_irq_vector (int irq); /* allocate a free vector */ extern int assign_irq_vector (int irq); /* allocate a free vector */
extern void free_irq_vector (int vector); extern void free_irq_vector (int vector);
extern int reserve_irq_vector (int vector); extern int reserve_irq_vector (int vector);
extern void __setup_vector_irq(int cpu);
extern int reassign_irq_vector(int irq, int cpu);
extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
extern void register_percpu_irq (ia64_vector vec, struct irqaction *action); extern void register_percpu_irq (ia64_vector vec, struct irqaction *action);
extern int check_irq_used (int irq);
extern void destroy_and_reserve_irq (unsigned int irq);
static inline void ia64_resend_irq(unsigned int vector) static inline void ia64_resend_irq(unsigned int vector)
{ {
...@@ -113,7 +127,7 @@ extern irq_desc_t irq_desc[NR_IRQS]; ...@@ -113,7 +127,7 @@ extern irq_desc_t irq_desc[NR_IRQS];
static inline unsigned int static inline unsigned int
__ia64_local_vector_to_irq (ia64_vector vec) __ia64_local_vector_to_irq (ia64_vector vec)
{ {
return (unsigned int) vec; return __get_cpu_var(vector_irq)[vec];
} }
#endif #endif
...@@ -131,7 +145,7 @@ __ia64_local_vector_to_irq (ia64_vector vec) ...@@ -131,7 +145,7 @@ __ia64_local_vector_to_irq (ia64_vector vec)
static inline ia64_vector static inline ia64_vector
irq_to_vector (int irq) irq_to_vector (int irq)
{ {
return (ia64_vector) irq; return irq_cfg[irq].vector;
} }
/* /*
......
...@@ -47,19 +47,21 @@ ...@@ -47,19 +47,21 @@
#define IOSAPIC_MASK_SHIFT 16 #define IOSAPIC_MASK_SHIFT 16
#define IOSAPIC_MASK (1<<IOSAPIC_MASK_SHIFT) #define IOSAPIC_MASK (1<<IOSAPIC_MASK_SHIFT)
#define IOSAPIC_VECTOR_MASK 0xffffff00
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#ifdef CONFIG_IOSAPIC #ifdef CONFIG_IOSAPIC
#define NR_IOSAPICS 256 #define NR_IOSAPICS 256
static inline unsigned int iosapic_read(char __iomem *iosapic, unsigned int reg) static inline unsigned int __iosapic_read(char __iomem *iosapic, unsigned int reg)
{ {
writel(reg, iosapic + IOSAPIC_REG_SELECT); writel(reg, iosapic + IOSAPIC_REG_SELECT);
return readl(iosapic + IOSAPIC_WINDOW); return readl(iosapic + IOSAPIC_WINDOW);
} }
static inline void iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val) static inline void __iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
{ {
writel(reg, iosapic + IOSAPIC_REG_SELECT); writel(reg, iosapic + IOSAPIC_REG_SELECT);
writel(val, iosapic + IOSAPIC_WINDOW); writel(val, iosapic + IOSAPIC_WINDOW);
......
...@@ -14,8 +14,13 @@ ...@@ -14,8 +14,13 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#define NR_IRQS 256 #define NR_VECTORS 256
#define NR_IRQ_VECTORS NR_IRQS
#if (NR_VECTORS + 32 * NR_CPUS) < 1024
#define NR_IRQS (NR_VECTORS + 32 * NR_CPUS)
#else
#define NR_IRQS 1024
#endif
static __inline__ int static __inline__ int
irq_canonicalize (int irq) irq_canonicalize (int irq)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment