Commit fc67b16e authored by Linus Torvalds's avatar Linus Torvalds
parents e8108c98 2d29306b
...@@ -329,7 +329,7 @@ menu "Power management and ACPI" ...@@ -329,7 +329,7 @@ menu "Power management and ACPI"
config PM config PM
bool "Power Management support" bool "Power Management support"
depends on IA64_GENERIC || IA64_DIG || IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB depends on !IA64_HP_SIM
default y default y
help help
"Power Management" means that parts of your computer are shut "Power Management" means that parts of your computer are shut
......
...@@ -574,6 +574,8 @@ CONFIG_SERIAL_NONSTANDARD=y ...@@ -574,6 +574,8 @@ CONFIG_SERIAL_NONSTANDARD=y
# CONFIG_N_HDLC is not set # CONFIG_N_HDLC is not set
# CONFIG_STALDRV is not set # CONFIG_STALDRV is not set
CONFIG_SGI_SNSC=y CONFIG_SGI_SNSC=y
CONFIG_SGI_TIOCX=y
CONFIG_SGI_MBCS=m
# #
# Serial drivers # Serial drivers
......
/* /*
** IA64 System Bus Adapter (SBA) I/O MMU manager ** IA64 System Bus Adapter (SBA) I/O MMU manager
** **
** (c) Copyright 2002-2004 Alex Williamson ** (c) Copyright 2002-2005 Alex Williamson
** (c) Copyright 2002-2003 Grant Grundler ** (c) Copyright 2002-2003 Grant Grundler
** (c) Copyright 2002-2004 Hewlett-Packard Company ** (c) Copyright 2002-2005 Hewlett-Packard Company
** **
** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code) ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code) ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
...@@ -459,21 +459,32 @@ get_iovp_order (unsigned long size) ...@@ -459,21 +459,32 @@ get_iovp_order (unsigned long size)
* sba_search_bitmap - find free space in IO PDIR resource bitmap * sba_search_bitmap - find free space in IO PDIR resource bitmap
* @ioc: IO MMU structure which owns the pdir we are interested in. * @ioc: IO MMU structure which owns the pdir we are interested in.
* @bits_wanted: number of entries we need. * @bits_wanted: number of entries we need.
* @use_hint: use res_hint to indicate where to start looking
* *
* Find consecutive free bits in resource bitmap. * Find consecutive free bits in resource bitmap.
* Each bit represents one entry in the IO Pdir. * Each bit represents one entry in the IO Pdir.
* Cool perf optimization: search for log2(size) bits at a time. * Cool perf optimization: search for log2(size) bits at a time.
*/ */
static SBA_INLINE unsigned long static SBA_INLINE unsigned long
sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
{ {
unsigned long *res_ptr = ioc->res_hint; unsigned long *res_ptr;
unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
unsigned long pide = ~0UL; unsigned long flags, pide = ~0UL;
ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
ASSERT(res_ptr < res_end); ASSERT(res_ptr < res_end);
spin_lock_irqsave(&ioc->res_lock, flags);
/* Allow caller to force a search through the entire resource space */
if (likely(use_hint)) {
res_ptr = ioc->res_hint;
} else {
res_ptr = (ulong *)ioc->res_map;
ioc->res_bitshift = 0;
}
/* /*
* N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
* if a TLB entry is purged while in use. sba_mark_invalid() * if a TLB entry is purged while in use. sba_mark_invalid()
...@@ -570,10 +581,12 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted) ...@@ -570,10 +581,12 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted)
prefetch(ioc->res_map); prefetch(ioc->res_map);
ioc->res_hint = (unsigned long *) ioc->res_map; ioc->res_hint = (unsigned long *) ioc->res_map;
ioc->res_bitshift = 0; ioc->res_bitshift = 0;
spin_unlock_irqrestore(&ioc->res_lock, flags);
return (pide); return (pide);
found_it: found_it:
ioc->res_hint = res_ptr; ioc->res_hint = res_ptr;
spin_unlock_irqrestore(&ioc->res_lock, flags);
return (pide); return (pide);
} }
...@@ -594,36 +607,36 @@ sba_alloc_range(struct ioc *ioc, size_t size) ...@@ -594,36 +607,36 @@ sba_alloc_range(struct ioc *ioc, size_t size)
unsigned long itc_start; unsigned long itc_start;
#endif #endif
unsigned long pide; unsigned long pide;
unsigned long flags;
ASSERT(pages_needed); ASSERT(pages_needed);
ASSERT(0 == (size & ~iovp_mask)); ASSERT(0 == (size & ~iovp_mask));
spin_lock_irqsave(&ioc->res_lock, flags);
#ifdef PDIR_SEARCH_TIMING #ifdef PDIR_SEARCH_TIMING
itc_start = ia64_get_itc(); itc_start = ia64_get_itc();
#endif #endif
/* /*
** "seek and ye shall find"...praying never hurts either... ** "seek and ye shall find"...praying never hurts either...
*/ */
pide = sba_search_bitmap(ioc, pages_needed); pide = sba_search_bitmap(ioc, pages_needed, 1);
if (unlikely(pide >= (ioc->res_size << 3))) { if (unlikely(pide >= (ioc->res_size << 3))) {
pide = sba_search_bitmap(ioc, pages_needed); pide = sba_search_bitmap(ioc, pages_needed, 0);
if (unlikely(pide >= (ioc->res_size << 3))) { if (unlikely(pide >= (ioc->res_size << 3))) {
#if DELAYED_RESOURCE_CNT > 0 #if DELAYED_RESOURCE_CNT > 0
unsigned long flags;
/* /*
** With delayed resource freeing, we can give this one more shot. We're ** With delayed resource freeing, we can give this one more shot. We're
** getting close to being in trouble here, so do what we can to make this ** getting close to being in trouble here, so do what we can to make this
** one count. ** one count.
*/ */
spin_lock(&ioc->saved_lock); spin_lock_irqsave(&ioc->saved_lock, flags);
if (ioc->saved_cnt > 0) { if (ioc->saved_cnt > 0) {
struct sba_dma_pair *d; struct sba_dma_pair *d;
int cnt = ioc->saved_cnt; int cnt = ioc->saved_cnt;
d = &(ioc->saved[ioc->saved_cnt]); d = &(ioc->saved[ioc->saved_cnt - 1]);
spin_lock(&ioc->res_lock);
while (cnt--) { while (cnt--) {
sba_mark_invalid(ioc, d->iova, d->size); sba_mark_invalid(ioc, d->iova, d->size);
sba_free_range(ioc, d->iova, d->size); sba_free_range(ioc, d->iova, d->size);
...@@ -631,10 +644,11 @@ sba_alloc_range(struct ioc *ioc, size_t size) ...@@ -631,10 +644,11 @@ sba_alloc_range(struct ioc *ioc, size_t size)
} }
ioc->saved_cnt = 0; ioc->saved_cnt = 0;
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
spin_unlock(&ioc->res_lock);
} }
spin_unlock(&ioc->saved_lock); spin_unlock_irqrestore(&ioc->saved_lock, flags);
pide = sba_search_bitmap(ioc, pages_needed); pide = sba_search_bitmap(ioc, pages_needed, 0);
if (unlikely(pide >= (ioc->res_size << 3))) if (unlikely(pide >= (ioc->res_size << 3)))
panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
ioc->ioc_hpa); ioc->ioc_hpa);
...@@ -664,8 +678,6 @@ sba_alloc_range(struct ioc *ioc, size_t size) ...@@ -664,8 +678,6 @@ sba_alloc_range(struct ioc *ioc, size_t size)
(uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
ioc->res_bitshift ); ioc->res_bitshift );
spin_unlock_irqrestore(&ioc->res_lock, flags);
return (pide); return (pide);
} }
...@@ -950,6 +962,30 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir) ...@@ -950,6 +962,30 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
return SBA_IOVA(ioc, iovp, offset); return SBA_IOVA(ioc, iovp, offset);
} }
#ifdef ENABLE_MARK_CLEAN
static SBA_INLINE void
sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
{
u32 iovp = (u32) SBA_IOVP(ioc,iova);
int off = PDIR_INDEX(iovp);
void *addr;
if (size <= iovp_size) {
addr = phys_to_virt(ioc->pdir_base[off] &
~0xE000000000000FFFULL);
mark_clean(addr, size);
} else {
do {
addr = phys_to_virt(ioc->pdir_base[off] &
~0xE000000000000FFFULL);
mark_clean(addr, min(size, iovp_size));
off++;
size -= iovp_size;
} while (size > 0);
}
}
#endif
/** /**
* sba_unmap_single - unmap one IOVA and free resources * sba_unmap_single - unmap one IOVA and free resources
* @dev: instance of PCI owned by the driver that's asking. * @dev: instance of PCI owned by the driver that's asking.
...@@ -995,6 +1031,10 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) ...@@ -995,6 +1031,10 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
size += offset; size += offset;
size = ROUNDUP(size, iovp_size); size = ROUNDUP(size, iovp_size);
#ifdef ENABLE_MARK_CLEAN
if (dir == DMA_FROM_DEVICE)
sba_mark_clean(ioc, iova, size);
#endif
#if DELAYED_RESOURCE_CNT > 0 #if DELAYED_RESOURCE_CNT > 0
spin_lock_irqsave(&ioc->saved_lock, flags); spin_lock_irqsave(&ioc->saved_lock, flags);
...@@ -1021,30 +1061,6 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) ...@@ -1021,30 +1061,6 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
spin_unlock_irqrestore(&ioc->res_lock, flags); spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif /* DELAYED_RESOURCE_CNT == 0 */ #endif /* DELAYED_RESOURCE_CNT == 0 */
#ifdef ENABLE_MARK_CLEAN
if (dir == DMA_FROM_DEVICE) {
u32 iovp = (u32) SBA_IOVP(ioc,iova);
int off = PDIR_INDEX(iovp);
void *addr;
if (size <= iovp_size) {
addr = phys_to_virt(ioc->pdir_base[off] &
~0xE000000000000FFFULL);
mark_clean(addr, size);
} else {
size_t byte_cnt = size;
do {
addr = phys_to_virt(ioc->pdir_base[off] &
~0xE000000000000FFFULL);
mark_clean(addr, min(byte_cnt, iovp_size));
off++;
byte_cnt -= iovp_size;
} while (byte_cnt > 0);
}
}
#endif
} }
......
...@@ -728,12 +728,8 @@ ENTRY(ia64_leave_syscall) ...@@ -728,12 +728,8 @@ ENTRY(ia64_leave_syscall)
mov f8=f0 // clear f8 mov f8=f0 // clear f8
;; ;;
ld8 r30=[r2],16 // M0|1 load cr.ifs ld8 r30=[r2],16 // M0|1 load cr.ifs
mov.m ar.ssd=r0 // M2 clear ar.ssd
cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
;;
ld8 r25=[r3],16 // M0|1 load ar.unat ld8 r25=[r3],16 // M0|1 load ar.unat
mov.m ar.csd=r0 // M2 clear ar.csd cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
mov r22=r0 // clear r22
;; ;;
ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled (pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
...@@ -756,11 +752,15 @@ ENTRY(ia64_leave_syscall) ...@@ -756,11 +752,15 @@ ENTRY(ia64_leave_syscall)
mov f7=f0 // clear f7 mov f7=f0 // clear f7
;; ;;
ld8.fill r12=[r2] // restore r12 (sp) ld8.fill r12=[r2] // restore r12 (sp)
mov.m ar.ssd=r0 // M2 clear ar.ssd
mov r22=r0 // clear r22
ld8.fill r15=[r3] // restore r15 ld8.fill r15=[r3] // restore r15
(pUStk) st1 [r14]=r17
addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0 addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0
;; ;;
(pUStk) ld4 r3=[r3] // r3 = cpu_data->phys_stacked_size_p8 (pUStk) ld4 r17=[r3] // r17 = cpu_data->phys_stacked_size_p8
(pUStk) st1 [r14]=r17 mov.m ar.csd=r0 // M2 clear ar.csd
mov b6=r18 // I0 restore b6 mov b6=r18 // I0 restore b6
;; ;;
mov r14=r0 // clear r14 mov r14=r0 // clear r14
......
This diff is collapsed.
...@@ -63,20 +63,30 @@ EXPORT_SYMBOL(isa_irq_to_vector_map); ...@@ -63,20 +63,30 @@ EXPORT_SYMBOL(isa_irq_to_vector_map);
static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)]; static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)];
int int
assign_irq_vector (int irq) assign_irq_vector_nopanic (int irq)
{ {
int pos, vector; int pos, vector;
again: again:
pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS); pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
vector = IA64_FIRST_DEVICE_VECTOR + pos; vector = IA64_FIRST_DEVICE_VECTOR + pos;
if (vector > IA64_LAST_DEVICE_VECTOR) if (vector > IA64_LAST_DEVICE_VECTOR)
/* XXX could look for sharable vectors instead of panic'ing... */ return -1;
panic("assign_irq_vector: out of interrupt vectors!");
if (test_and_set_bit(pos, ia64_vector_mask)) if (test_and_set_bit(pos, ia64_vector_mask))
goto again; goto again;
return vector; return vector;
} }
int
assign_irq_vector (int irq)
{
int vector = assign_irq_vector_nopanic(irq);
if (vector < 0)
panic("assign_irq_vector: out of interrupt vectors!");
return vector;
}
void void
free_irq_vector (int vector) free_irq_vector (int vector)
{ {
......
...@@ -479,14 +479,6 @@ typedef struct { ...@@ -479,14 +479,6 @@ typedef struct {
#define PFM_CMD_ARG_MANY -1 /* cannot be zero */ #define PFM_CMD_ARG_MANY -1 /* cannot be zero */
typedef struct {
int debug; /* turn on/off debugging via syslog */
int debug_ovfl; /* turn on/off debug printk in overflow handler */
int fastctxsw; /* turn on/off fast (unsecure) ctxsw */
int expert_mode; /* turn on/off value checking */
int debug_pfm_read;
} pfm_sysctl_t;
typedef struct { typedef struct {
unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */ unsigned long pfm_spurious_ovfl_intr_count; /* keep track of spurious ovfl interrupts */
unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */ unsigned long pfm_replay_ovfl_intr_count; /* keep track of replayed ovfl interrupts */
...@@ -514,8 +506,8 @@ static LIST_HEAD(pfm_buffer_fmt_list); ...@@ -514,8 +506,8 @@ static LIST_HEAD(pfm_buffer_fmt_list);
static pmu_config_t *pmu_conf; static pmu_config_t *pmu_conf;
/* sysctl() controls */ /* sysctl() controls */
static pfm_sysctl_t pfm_sysctl; pfm_sysctl_t pfm_sysctl;
int pfm_debug_var; EXPORT_SYMBOL(pfm_sysctl);
static ctl_table pfm_ctl_table[]={ static ctl_table pfm_ctl_table[]={
{1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,}, {1, "debug", &pfm_sysctl.debug, sizeof(int), 0666, NULL, &proc_dointvec, NULL,},
...@@ -1576,7 +1568,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) ...@@ -1576,7 +1568,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
goto abort_locked; goto abort_locked;
} }
DPRINT(("[%d] fd=%d type=%d\n", current->pid, msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type)); DPRINT(("fd=%d type=%d\n", msg->pfm_gen_msg.msg_ctx_fd, msg->pfm_gen_msg.msg_type));
ret = -EFAULT; ret = -EFAULT;
if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t); if(copy_to_user(buf, msg, sizeof(pfm_msg_t)) == 0) ret = sizeof(pfm_msg_t);
...@@ -3695,8 +3687,6 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) ...@@ -3695,8 +3687,6 @@ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
pfm_sysctl.debug = m == 0 ? 0 : 1; pfm_sysctl.debug = m == 0 ? 0 : 1;
pfm_debug_var = pfm_sysctl.debug;
printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off"); printk(KERN_INFO "perfmon debugging %s (timing reset)\n", pfm_sysctl.debug ? "on" : "off");
if (m == 0) { if (m == 0) {
...@@ -4996,13 +4986,21 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs) ...@@ -4996,13 +4986,21 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
} }
static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds); static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
/*
* pfm_handle_work() can be called with interrupts enabled
* (TIF_NEED_RESCHED) or disabled. The down_interruptible
* call may sleep, therefore we must re-enable interrupts
* to avoid deadlocks. It is safe to do so because this function
* is called ONLY when returning to user level (PUStk=1), in which case
* there is no risk of kernel stack overflow due to deep
* interrupt nesting.
*/
void void
pfm_handle_work(void) pfm_handle_work(void)
{ {
pfm_context_t *ctx; pfm_context_t *ctx;
struct pt_regs *regs; struct pt_regs *regs;
unsigned long flags; unsigned long flags, dummy_flags;
unsigned long ovfl_regs; unsigned long ovfl_regs;
unsigned int reason; unsigned int reason;
int ret; int ret;
...@@ -5039,18 +5037,15 @@ pfm_handle_work(void) ...@@ -5039,18 +5037,15 @@ pfm_handle_work(void)
//if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking; //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking;
if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking; if (reason == PFM_TRAP_REASON_RESET) goto skip_blocking;
/*
* restore interrupt mask to what it was on entry.
* Could be enabled/diasbled.
*/
UNPROTECT_CTX(ctx, flags); UNPROTECT_CTX(ctx, flags);
/* /*
* pfm_handle_work() is currently called with interrupts disabled. * force interrupt enable because of down_interruptible()
* The down_interruptible call may sleep, therefore we */
* must re-enable interrupts to avoid deadlocks. It is
* safe to do so because this function is called ONLY
* when returning to user level (PUStk=1), in which case
* there is no risk of kernel stack overflow due to deep
* interrupt nesting.
*/
BUG_ON(flags & IA64_PSR_I);
local_irq_enable(); local_irq_enable();
DPRINT(("before block sleeping\n")); DPRINT(("before block sleeping\n"));
...@@ -5064,12 +5059,12 @@ pfm_handle_work(void) ...@@ -5064,12 +5059,12 @@ pfm_handle_work(void)
DPRINT(("after block sleeping ret=%d\n", ret)); DPRINT(("after block sleeping ret=%d\n", ret));
/* /*
* disable interrupts to restore state we had upon entering * lock context and mask interrupts again
* this function * We save flags into a dummy because we may have
* altered interrupts mask compared to entry in this
* function.
*/ */
local_irq_disable(); PROTECT_CTX(ctx, dummy_flags);
PROTECT_CTX(ctx, flags);
/* /*
* we need to read the ovfl_regs only after wake-up * we need to read the ovfl_regs only after wake-up
...@@ -5095,7 +5090,9 @@ pfm_handle_work(void) ...@@ -5095,7 +5090,9 @@ pfm_handle_work(void)
ctx->ctx_ovfl_regs[0] = 0UL; ctx->ctx_ovfl_regs[0] = 0UL;
nothing_to_do: nothing_to_do:
/*
* restore flags as they were upon entry
*/
UNPROTECT_CTX(ctx, flags); UNPROTECT_CTX(ctx, flags);
} }
......
...@@ -20,24 +20,17 @@ MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>"); ...@@ -20,24 +20,17 @@ MODULE_AUTHOR("Stephane Eranian <eranian@hpl.hp.com>");
MODULE_DESCRIPTION("perfmon default sampling format"); MODULE_DESCRIPTION("perfmon default sampling format");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_PARM(debug, "i");
MODULE_PARM_DESC(debug, "debug");
MODULE_PARM(debug_ovfl, "i");
MODULE_PARM_DESC(debug_ovfl, "debug ovfl");
#define DEFAULT_DEBUG 1 #define DEFAULT_DEBUG 1
#ifdef DEFAULT_DEBUG #ifdef DEFAULT_DEBUG
#define DPRINT(a) \ #define DPRINT(a) \
do { \ do { \
if (unlikely(debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
} while (0) } while (0)
#define DPRINT_ovfl(a) \ #define DPRINT_ovfl(a) \
do { \ do { \
if (unlikely(debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \
} while (0) } while (0)
#else #else
...@@ -45,8 +38,6 @@ MODULE_PARM_DESC(debug_ovfl, "debug ovfl"); ...@@ -45,8 +38,6 @@ MODULE_PARM_DESC(debug_ovfl, "debug ovfl");
#define DPRINT_ovfl(a) #define DPRINT_ovfl(a)
#endif #endif
static int debug, debug_ovfl;
static int static int
default_validate(struct task_struct *task, unsigned int flags, int cpu, void *data) default_validate(struct task_struct *task, unsigned int flags, int cpu, void *data)
{ {
......
...@@ -4,10 +4,15 @@ ...@@ -4,10 +4,15 @@
* Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com> * Copyright (C) 2000, 2004 Intel Corp
* Rohit Seth <rohit.seth@intel.com>
* Suresh Siddha <suresh.b.siddha@intel.com>
* Gordon Jin <gordon.jin@intel.com>
* Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* *
* 12/26/04 S.Siddha, G.Jin, R.Seth
* Add multi-threading and multi-core detection
* 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo().
* 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map
* 03/31/00 R.Seth cpu_initialized and current->processor fixes * 03/31/00 R.Seth cpu_initialized and current->processor fixes
...@@ -296,6 +301,34 @@ mark_bsp_online (void) ...@@ -296,6 +301,34 @@ mark_bsp_online (void)
#endif #endif
} }
#ifdef CONFIG_SMP
static void
check_for_logical_procs (void)
{
pal_logical_to_physical_t info;
s64 status;
status = ia64_pal_logical_to_phys(0, &info);
if (status == -1) {
printk(KERN_INFO "No logical to physical processor mapping "
"available\n");
return;
}
if (status) {
printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
status);
return;
}
/*
* Total number of siblings that BSP has. Though not all of them
* may have booted successfully. The correct number of siblings
* booted is in info.overview_num_log.
*/
smp_num_siblings = info.overview_tpc;
smp_num_cpucores = info.overview_cpp;
}
#endif
void __init void __init
setup_arch (char **cmdline_p) setup_arch (char **cmdline_p)
{ {
...@@ -356,6 +389,19 @@ setup_arch (char **cmdline_p) ...@@ -356,6 +389,19 @@ setup_arch (char **cmdline_p)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
cpu_physical_id(0) = hard_smp_processor_id(); cpu_physical_id(0) = hard_smp_processor_id();
cpu_set(0, cpu_sibling_map[0]);
cpu_set(0, cpu_core_map[0]);
check_for_logical_procs();
if (smp_num_cpucores > 1)
printk(KERN_INFO
"cpu package is Multi-Core capable: number of cores=%d\n",
smp_num_cpucores);
if (smp_num_siblings > 1)
printk(KERN_INFO
"cpu package is Multi-Threading capable: number of siblings=%d\n",
smp_num_siblings);
#endif #endif
cpu_init(); /* initialize the bootstrap CPU */ cpu_init(); /* initialize the bootstrap CPU */
...@@ -459,12 +505,23 @@ show_cpuinfo (struct seq_file *m, void *v) ...@@ -459,12 +505,23 @@ show_cpuinfo (struct seq_file *m, void *v)
"cpu regs : %u\n" "cpu regs : %u\n"
"cpu MHz : %lu.%06lu\n" "cpu MHz : %lu.%06lu\n"
"itc MHz : %lu.%06lu\n" "itc MHz : %lu.%06lu\n"
"BogoMIPS : %lu.%02lu\n\n", "BogoMIPS : %lu.%02lu\n",
cpunum, c->vendor, family, c->model, c->revision, c->archrev, cpunum, c->vendor, family, c->model, c->revision, c->archrev,
features, c->ppn, c->number, features, c->ppn, c->number,
c->proc_freq / 1000000, c->proc_freq % 1000000, c->proc_freq / 1000000, c->proc_freq % 1000000,
c->itc_freq / 1000000, c->itc_freq % 1000000, c->itc_freq / 1000000, c->itc_freq % 1000000,
lpj*HZ/500000, (lpj*HZ/5000) % 100); lpj*HZ/500000, (lpj*HZ/5000) % 100);
#ifdef CONFIG_SMP
seq_printf(m, "siblings : %u\n", c->num_log);
if (c->threads_per_core > 1 || c->cores_per_socket > 1)
seq_printf(m,
"physical id: %u\n"
"core id : %u\n"
"thread id : %u\n",
c->socket_id, c->core_id, c->thread_id);
#endif
seq_printf(m,"\n");
return 0; return 0;
} }
...@@ -533,6 +590,14 @@ identify_cpu (struct cpuinfo_ia64 *c) ...@@ -533,6 +590,14 @@ identify_cpu (struct cpuinfo_ia64 *c)
memcpy(c->vendor, cpuid.field.vendor, 16); memcpy(c->vendor, cpuid.field.vendor, 16);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
c->cpu = smp_processor_id(); c->cpu = smp_processor_id();
/* below default values will be overwritten by identify_siblings()
* for Multi-Threading/Multi-Core capable cpu's
*/
c->threads_per_core = c->cores_per_socket = c->num_log = 1;
c->socket_id = -1;
identify_siblings(c);
#endif #endif
c->ppn = cpuid.field.ppn; c->ppn = cpuid.field.ppn;
c->number = cpuid.field.number; c->number = cpuid.field.number;
......
/* /*
* SMP boot-related support * SMP boot-related support
* *
* Copyright (C) 1998-2003 Hewlett-Packard Co * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2001, 2004-2005 Intel Corp
* Rohit Seth <rohit.seth@intel.com>
* Suresh Siddha <suresh.b.siddha@intel.com>
* Gordon Jin <gordon.jin@intel.com>
* Ashok Raj <ashok.raj@intel.com>
* *
* 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here. * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here.
* 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code. * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code.
...@@ -10,6 +15,11 @@ ...@@ -10,6 +15,11 @@
* smp_boot_cpus()/smp_commence() is replaced by * smp_boot_cpus()/smp_commence() is replaced by
* smp_prepare_cpus()/__cpu_up()/smp_cpus_done(). * smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
* 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support * 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
* 04/12/26 Jin Gordon <gordon.jin@intel.com>
* 04/12/26 Rohit Seth <rohit.seth@intel.com>
* Add multi-threading and multi-core detection
* 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com>
* Setup cpu_sibling_map and cpu_core_map
*/ */
#include <linux/config.h> #include <linux/config.h>
...@@ -122,6 +132,11 @@ EXPORT_SYMBOL(cpu_online_map); ...@@ -122,6 +132,11 @@ EXPORT_SYMBOL(cpu_online_map);
cpumask_t cpu_possible_map; cpumask_t cpu_possible_map;
EXPORT_SYMBOL(cpu_possible_map); EXPORT_SYMBOL(cpu_possible_map);
cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
int smp_num_siblings = 1;
int smp_num_cpucores = 1;
/* which logical CPU number maps to which CPU (physical APIC ID) */ /* which logical CPU number maps to which CPU (physical APIC ID) */
volatile int ia64_cpu_to_sapicid[NR_CPUS]; volatile int ia64_cpu_to_sapicid[NR_CPUS];
EXPORT_SYMBOL(ia64_cpu_to_sapicid); EXPORT_SYMBOL(ia64_cpu_to_sapicid);
...@@ -156,7 +171,8 @@ sync_master (void *arg) ...@@ -156,7 +171,8 @@ sync_master (void *arg)
local_irq_save(flags); local_irq_save(flags);
{ {
for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) { for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
while (!go[MASTER]); while (!go[MASTER])
cpu_relax();
go[MASTER] = 0; go[MASTER] = 0;
go[SLAVE] = ia64_get_itc(); go[SLAVE] = ia64_get_itc();
} }
...@@ -179,7 +195,8 @@ get_delta (long *rt, long *master) ...@@ -179,7 +195,8 @@ get_delta (long *rt, long *master)
for (i = 0; i < NUM_ITERS; ++i) { for (i = 0; i < NUM_ITERS; ++i) {
t0 = ia64_get_itc(); t0 = ia64_get_itc();
go[MASTER] = 1; go[MASTER] = 1;
while (!(tm = go[SLAVE])); while (!(tm = go[SLAVE]))
cpu_relax();
go[SLAVE] = 0; go[SLAVE] = 0;
t1 = ia64_get_itc(); t1 = ia64_get_itc();
...@@ -258,7 +275,8 @@ ia64_sync_itc (unsigned int master) ...@@ -258,7 +275,8 @@ ia64_sync_itc (unsigned int master)
return; return;
} }
while (go[MASTER]); /* wait for master to be ready */ while (go[MASTER])
cpu_relax(); /* wait for master to be ready */
spin_lock_irqsave(&itc_sync_lock, flags); spin_lock_irqsave(&itc_sync_lock, flags);
{ {
...@@ -595,7 +613,68 @@ void __devinit smp_prepare_boot_cpu(void) ...@@ -595,7 +613,68 @@ void __devinit smp_prepare_boot_cpu(void)
cpu_set(smp_processor_id(), cpu_callin_map); cpu_set(smp_processor_id(), cpu_callin_map);
} }
/*
* mt_info[] is a temporary store for all info returned by
* PAL_LOGICAL_TO_PHYSICAL, to be copied into cpuinfo_ia64 when the
* specific cpu comes.
*/
static struct {
__u32 socket_id;
__u16 core_id;
__u16 thread_id;
__u16 proc_fixed_addr;
__u8 valid;
}mt_info[NR_CPUS] __devinit;
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static inline void
remove_from_mtinfo(int cpu)
{
int i;
for_each_cpu(i)
if (mt_info[i].valid && mt_info[i].socket_id ==
cpu_data(cpu)->socket_id)
mt_info[i].valid = 0;
}
static inline void
clear_cpu_sibling_map(int cpu)
{
int i;
for_each_cpu_mask(i, cpu_sibling_map[cpu])
cpu_clear(cpu, cpu_sibling_map[i]);
for_each_cpu_mask(i, cpu_core_map[cpu])
cpu_clear(cpu, cpu_core_map[i]);
cpu_sibling_map[cpu] = cpu_core_map[cpu] = CPU_MASK_NONE;
}
static void
remove_siblinginfo(int cpu)
{
int last = 0;
if (cpu_data(cpu)->threads_per_core == 1 &&
cpu_data(cpu)->cores_per_socket == 1) {
cpu_clear(cpu, cpu_core_map[cpu]);
cpu_clear(cpu, cpu_sibling_map[cpu]);
return;
}
last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0);
/* remove it from all sibling map's */
clear_cpu_sibling_map(cpu);
/* if this cpu is the last in the core group, remove all its info
* from mt_info structure
*/
if (last)
remove_from_mtinfo(cpu);
}
extern void fixup_irqs(void); extern void fixup_irqs(void);
/* must be called with cpucontrol mutex held */ /* must be called with cpucontrol mutex held */
int __cpu_disable(void) int __cpu_disable(void)
...@@ -608,6 +687,7 @@ int __cpu_disable(void) ...@@ -608,6 +687,7 @@ int __cpu_disable(void)
if (cpu == 0) if (cpu == 0)
return -EBUSY; return -EBUSY;
remove_siblinginfo(cpu);
fixup_irqs(); fixup_irqs();
local_flush_tlb_all(); local_flush_tlb_all();
cpu_clear(cpu, cpu_callin_map); cpu_clear(cpu, cpu_callin_map);
...@@ -660,6 +740,23 @@ smp_cpus_done (unsigned int dummy) ...@@ -660,6 +740,23 @@ smp_cpus_done (unsigned int dummy)
(int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100); (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
} }
static inline void __devinit
set_cpu_sibling_map(int cpu)
{
int i;
for_each_online_cpu(i) {
if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);
if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
cpu_set(i, cpu_sibling_map[cpu]);
cpu_set(cpu, cpu_sibling_map[i]);
}
}
}
}
int __devinit int __devinit
__cpu_up (unsigned int cpu) __cpu_up (unsigned int cpu)
{ {
...@@ -682,6 +779,15 @@ __cpu_up (unsigned int cpu) ...@@ -682,6 +779,15 @@ __cpu_up (unsigned int cpu)
if (ret < 0) if (ret < 0)
return ret; return ret;
if (cpu_data(cpu)->threads_per_core == 1 &&
cpu_data(cpu)->cores_per_socket == 1) {
cpu_set(cpu, cpu_sibling_map[cpu]);
cpu_set(cpu, cpu_core_map[cpu]);
return 0;
}
set_cpu_sibling_map(cpu);
return 0; return 0;
} }
...@@ -709,3 +815,106 @@ init_smp_config(void) ...@@ -709,3 +815,106 @@ init_smp_config(void)
ia64_sal_strerror(sal_ret)); ia64_sal_strerror(sal_ret));
} }
static inline int __devinit
check_for_mtinfo_index(void)
{
int i;
for_each_cpu(i)
if (!mt_info[i].valid)
return i;
return -1;
}
/*
* Search the mt_info to find out if this socket's cid/tid information is
* cached or not. If the socket exists, fill in the core_id and thread_id
* in cpuinfo
*/
static int __devinit
check_for_new_socket(__u16 logical_address, struct cpuinfo_ia64 *c)
{
int i;
__u32 sid = c->socket_id;
for_each_cpu(i) {
if (mt_info[i].valid && mt_info[i].proc_fixed_addr == logical_address
&& mt_info[i].socket_id == sid) {
c->core_id = mt_info[i].core_id;
c->thread_id = mt_info[i].thread_id;
return 1; /* not a new socket */
}
}
return 0;
}
/*
* identify_siblings(cpu) gets called from identify_cpu. This populates the
* information related to logical execution units in per_cpu_data structure.
*/
void __devinit
identify_siblings(struct cpuinfo_ia64 *c)
{
s64 status;
u16 pltid;
u64 proc_fixed_addr;
int count, i;
pal_logical_to_physical_t info;
if (smp_num_cpucores == 1 && smp_num_siblings == 1)
return;
if ((status = ia64_pal_logical_to_phys(0, &info)) != PAL_STATUS_SUCCESS) {
printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n",
status);
return;
}
if ((status = ia64_sal_physical_id_info(&pltid)) != PAL_STATUS_SUCCESS) {
printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status);
return;
}
if ((status = ia64_pal_fixed_addr(&proc_fixed_addr)) != PAL_STATUS_SUCCESS) {
printk(KERN_ERR "ia64_pal_fixed_addr failed with %ld\n", status);
return;
}
c->socket_id = (pltid << 8) | info.overview_ppid;
c->cores_per_socket = info.overview_cpp;
c->threads_per_core = info.overview_tpc;
count = c->num_log = info.overview_num_log;
/* If the thread and core id information is already cached, then
* we will simply update cpu_info and return. Otherwise, we will
* do the PAL calls and cache core and thread id's of all the siblings.
*/
if (check_for_new_socket(proc_fixed_addr, c))
return;
for (i = 0; i < count; i++) {
int index;
if (i && (status = ia64_pal_logical_to_phys(i, &info))
!= PAL_STATUS_SUCCESS) {
printk(KERN_ERR "ia64_pal_logical_to_phys failed"
" with %ld\n", status);
return;
}
if (info.log2_la == proc_fixed_addr) {
c->core_id = info.log1_cid;
c->thread_id = info.log1_tid;
}
index = check_for_mtinfo_index();
/* We will not do the mt_info caching optimization in this case.
*/
if (index < 0)
continue;
mt_info[index].valid = 1;
mt_info[index].socket_id = c->socket_id;
mt_info[index].core_id = info.log1_cid;
mt_info[index].thread_id = info.log1_tid;
mt_info[index].proc_fixed_addr = info.log2_la;
}
}
...@@ -1943,23 +1943,30 @@ EXPORT_SYMBOL(unw_unwind); ...@@ -1943,23 +1943,30 @@ EXPORT_SYMBOL(unw_unwind);
int int
unw_unwind_to_user (struct unw_frame_info *info) unw_unwind_to_user (struct unw_frame_info *info)
{ {
unsigned long ip, sp; unsigned long ip, sp, pr = 0;
while (unw_unwind(info) >= 0) { while (unw_unwind(info) >= 0) {
if (unw_get_rp(info, &ip) < 0) {
unw_get_ip(info, &ip);
UNW_DPRINT(0, "unwind.%s: failed to read return pointer (ip=0x%lx)\n",
__FUNCTION__, ip);
return -1;
}
unw_get_sp(info, &sp); unw_get_sp(info, &sp);
if (sp >= (unsigned long)info->task + IA64_STK_OFFSET) if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
< IA64_PT_REGS_SIZE) {
UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
__FUNCTION__);
break; break;
if (ip < FIXADDR_USER_END) }
if (unw_is_intr_frame(info) &&
(pr & (1UL << PRED_USER_STACK)))
return 0; return 0;
if (unw_get_pr (info, &pr) < 0) {
unw_get_rp(info, &ip);
UNW_DPRINT(0, "unwind.%s: failed to read "
"predicate register (ip=0x%lx)\n",
__FUNCTION__, ip);
return -1;
}
} }
unw_get_ip(info, &ip); unw_get_ip(info, &ip);
UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", __FUNCTION__, ip); UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
__FUNCTION__, ip);
return -1; return -1;
} }
EXPORT_SYMBOL(unw_unwind_to_user); EXPORT_SYMBOL(unw_unwind_to_user);
......
...@@ -300,7 +300,7 @@ EK(.ex_handler, (p[D]) st8 [dst1] = t15, 4*8) ...@@ -300,7 +300,7 @@ EK(.ex_handler, (p[D]) st8 [dst1] = t15, 4*8)
add src_pre_mem=0,src0 // prefetch src pointer add src_pre_mem=0,src0 // prefetch src pointer
add dst_pre_mem=0,dst0 // prefetch dest pointer add dst_pre_mem=0,dst0 // prefetch dest pointer
and src0=-8,src0 // 1st src pointer and src0=-8,src0 // 1st src pointer
(p7) mov ar.lc = r21 (p7) mov ar.lc = cnt
(p8) mov ar.lc = r0 (p8) mov ar.lc = r0
;; ;;
TEXT_ALIGN(32) TEXT_ALIGN(32)
......
...@@ -61,7 +61,8 @@ show_mem (void) ...@@ -61,7 +61,8 @@ show_mem (void)
printk("%d reserved pages\n", reserved); printk("%d reserved pages\n", reserved);
printk("%d pages shared\n", shared); printk("%d pages shared\n", shared);
printk("%d pages swap cached\n", cached); printk("%d pages swap cached\n", cached);
printk("%ld pages in page table cache\n", pgtable_cache_size); printk("%ld pages in page table cache\n",
pgtable_quicklist_total_size());
} }
/* physical address where the bootmem map is located */ /* physical address where the bootmem map is located */
......
...@@ -582,7 +582,8 @@ void show_mem(void) ...@@ -582,7 +582,8 @@ void show_mem(void)
printk("%d reserved pages\n", total_reserved); printk("%d reserved pages\n", total_reserved);
printk("%d pages shared\n", total_shared); printk("%d pages shared\n", total_shared);
printk("%d pages swap cached\n", total_cached); printk("%d pages swap cached\n", total_cached);
printk("Total of %ld pages in page table cache\n", pgtable_cache_size); printk("Total of %ld pages in page table cache\n",
pgtable_quicklist_total_size());
printk("%d free buffer pages\n", nr_free_buffer_pages()); printk("%d free buffer pages\n", nr_free_buffer_pages());
} }
......
...@@ -209,10 +209,13 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re ...@@ -209,10 +209,13 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
} }
no_context: no_context:
if (isr & IA64_ISR_SP) { if ((isr & IA64_ISR_SP)
|| ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH))
{
/* /*
* This fault was due to a speculative load set the "ed" bit in the psr to * This fault was due to a speculative load or lfetch.fault, set the "ed"
* ensure forward progress (target register will get a NaT). * bit in the psr to ensure forward progress. (Target register will get a
* NaT for ld.s, lfetch will be canceled.)
*/ */
ia64_psr(regs)->ed = 1; ia64_psr(regs)->ed = 1;
return; return;
......
...@@ -39,6 +39,9 @@ ...@@ -39,6 +39,9 @@
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
DEFINE_PER_CPU(unsigned long *, __pgtable_quicklist);
DEFINE_PER_CPU(long, __pgtable_quicklist_size);
extern void ia64_tlb_init (void); extern void ia64_tlb_init (void);
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
...@@ -50,27 +53,53 @@ struct page *vmem_map; ...@@ -50,27 +53,53 @@ struct page *vmem_map;
EXPORT_SYMBOL(vmem_map); EXPORT_SYMBOL(vmem_map);
#endif #endif
static int pgt_cache_water[2] = { 25, 50 }; struct page *zero_page_memmap_ptr; /* map entry for zero page */
struct page *zero_page_memmap_ptr; /* map entry for zero page */
EXPORT_SYMBOL(zero_page_memmap_ptr); EXPORT_SYMBOL(zero_page_memmap_ptr);
#define MIN_PGT_PAGES 25UL
#define MAX_PGT_FREES_PER_PASS 16L
#define PGT_FRACTION_OF_NODE_MEM 16
static inline long
max_pgt_pages(void)
{
u64 node_free_pages, max_pgt_pages;
#ifndef CONFIG_NUMA
node_free_pages = nr_free_pages();
#else
node_free_pages = nr_free_pages_pgdat(NODE_DATA(numa_node_id()));
#endif
max_pgt_pages = node_free_pages / PGT_FRACTION_OF_NODE_MEM;
max_pgt_pages = max(max_pgt_pages, MIN_PGT_PAGES);
return max_pgt_pages;
}
static inline long
min_pages_to_free(void)
{
long pages_to_free;
pages_to_free = pgtable_quicklist_size - max_pgt_pages();
pages_to_free = min(pages_to_free, MAX_PGT_FREES_PER_PASS);
return pages_to_free;
}
void void
check_pgt_cache (void) check_pgt_cache(void)
{ {
int low, high; long pages_to_free;
low = pgt_cache_water[0]; if (unlikely(pgtable_quicklist_size <= MIN_PGT_PAGES))
high = pgt_cache_water[1]; return;
preempt_disable(); preempt_disable();
if (pgtable_cache_size > (u64) high) { while (unlikely((pages_to_free = min_pages_to_free()) > 0)) {
do { while (pages_to_free--) {
if (pgd_quicklist) free_page((unsigned long)pgtable_quicklist_alloc());
free_page((unsigned long)pgd_alloc_one_fast(NULL)); }
if (pmd_quicklist) preempt_enable();
free_page((unsigned long)pmd_alloc_one_fast(NULL, 0)); preempt_disable();
} while (pgtable_cache_size > (u64) low);
} }
preempt_enable(); preempt_enable();
} }
...@@ -523,11 +552,14 @@ void ...@@ -523,11 +552,14 @@ void
mem_init (void) mem_init (void)
{ {
long reserved_pages, codesize, datasize, initsize; long reserved_pages, codesize, datasize, initsize;
unsigned long num_pgt_pages;
pg_data_t *pgdat; pg_data_t *pgdat;
int i; int i;
static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel; static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel;
BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
/* /*
* This needs to be called _after_ the command line has been parsed but _before_ * This needs to be called _after_ the command line has been parsed but _before_
...@@ -564,18 +596,6 @@ mem_init (void) ...@@ -564,18 +596,6 @@ mem_init (void)
num_physpages << (PAGE_SHIFT - 10), codesize >> 10, num_physpages << (PAGE_SHIFT - 10), codesize >> 10,
reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10); reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10);
/*
* Allow for enough (cached) page table pages so that we can map the entire memory
* at least once. Each task also needs a couple of page tables pages, so add in a
* fudge factor for that (don't use "threads-max" here; that would be wrong!).
* Don't allow the cache to be more than 10% of total memory, though.
*/
# define NUM_TASKS 500 /* typical number of tasks */
num_pgt_pages = nr_free_pages() / PTRS_PER_PGD + NUM_TASKS;
if (num_pgt_pages > nr_free_pages() / 10)
num_pgt_pages = nr_free_pages() / 10;
if (num_pgt_pages > (u64) pgt_cache_water[1])
pgt_cache_water[1] = num_pgt_pages;
/* /*
* For fsyscall entrpoints with no light-weight handler, use the ordinary * For fsyscall entrpoints with no light-weight handler, use the ordinary
......
...@@ -123,9 +123,11 @@ pcibr_lock(struct pcibus_info *pcibus_info) ...@@ -123,9 +123,11 @@ pcibr_lock(struct pcibus_info *pcibus_info)
} }
#define pcibr_unlock(pcibus_info, flag) spin_unlock_irqrestore(&pcibus_info->pbi_lock, flag) #define pcibr_unlock(pcibus_info, flag) spin_unlock_irqrestore(&pcibus_info->pbi_lock, flag)
extern int pcibr_init_provider(void);
extern void *pcibr_bus_fixup(struct pcibus_bussoft *); extern void *pcibr_bus_fixup(struct pcibus_bussoft *);
extern uint64_t pcibr_dma_map(struct pcidev_info *, unsigned long, size_t, unsigned int); extern dma_addr_t pcibr_dma_map(struct pci_dev *, unsigned long, size_t);
extern void pcibr_dma_unmap(struct pcidev_info *, dma_addr_t, int); extern dma_addr_t pcibr_dma_map_consistent(struct pci_dev *, unsigned long, size_t);
extern void pcibr_dma_unmap(struct pci_dev *, dma_addr_t, int);
/* /*
* prototypes for the bridge asic register access routines in pcibr_reg.c * prototypes for the bridge asic register access routines in pcibr_reg.c
......
...@@ -10,3 +10,4 @@ ...@@ -10,3 +10,4 @@
obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \ obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
huberror.o io_init.o iomv.o klconflib.o sn2/ huberror.o io_init.o iomv.o klconflib.o sn2/
obj-$(CONFIG_IA64_GENERIC) += machvec.o obj-$(CONFIG_IA64_GENERIC) += machvec.o
obj-$(CONFIG_SGI_TIOCX) += tiocx.o
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
*/ */
#include <linux/config.h> #include <linux/config.h>
...@@ -170,10 +170,6 @@ bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification) ...@@ -170,10 +170,6 @@ bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
/* Initialize the notification to a known value. */ /* Initialize the notification to a known value. */
*bte->most_rcnt_na = BTE_WORD_BUSY; *bte->most_rcnt_na = BTE_WORD_BUSY;
/* Set the status reg busy bit and transfer length */
BTE_PRINTKV(("IBLS = 0x%lx\n", IBLS_BUSY | transfer_size));
BTE_LNSTAT_STORE(bte, IBLS_BUSY | transfer_size);
/* Set the source and destination registers */ /* Set the source and destination registers */
BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src)))); BTE_PRINTKV(("IBSA = 0x%lx)\n", (TO_PHYS(src))));
BTE_SRC_STORE(bte, TO_PHYS(src)); BTE_SRC_STORE(bte, TO_PHYS(src));
...@@ -188,7 +184,7 @@ bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification) ...@@ -188,7 +184,7 @@ bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
/* Initiate the transfer */ /* Initiate the transfer */
BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode))); BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode)));
BTE_CTRL_STORE(bte, BTE_VALID_MODE(mode)); BTE_START_TRANSFER(bte, transfer_size, BTE_VALID_MODE(mode));
itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec); itc_end = ia64_get_itc() + (40000000 * local_cpu_data->cyc_per_usec);
...@@ -429,10 +425,16 @@ void bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode) ...@@ -429,10 +425,16 @@ void bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda; mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda;
for (i = 0; i < BTES_PER_NODE; i++) { for (i = 0; i < BTES_PER_NODE; i++) {
u64 *base_addr;
/* Which link status register should we use? */ /* Which link status register should we use? */
unsigned long link_status = (i == 0 ? IIO_IBLS0 : IIO_IBLS1); base_addr = (u64 *)
mynodepda->bte_if[i].bte_base_addr = (u64 *) REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), BTE_BASE_ADDR(i));
REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), link_status); mynodepda->bte_if[i].bte_base_addr = base_addr;
mynodepda->bte_if[i].bte_source_addr = BTE_SOURCE_ADDR(base_addr);
mynodepda->bte_if[i].bte_destination_addr = BTE_DEST_ADDR(base_addr);
mynodepda->bte_if[i].bte_control_addr = BTE_CTRL_ADDR(base_addr);
mynodepda->bte_if[i].bte_notify_addr = BTE_NOTIF_ADDR(base_addr);
/* /*
* Initialize the notification and spinlock * Initialize the notification and spinlock
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved. * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
*/ */
#include <linux/types.h> #include <linux/types.h>
...@@ -33,48 +33,28 @@ void bte_error_handler(unsigned long); ...@@ -33,48 +33,28 @@ void bte_error_handler(unsigned long);
* Wait until all BTE related CRBs are completed * Wait until all BTE related CRBs are completed
* and then reset the interfaces. * and then reset the interfaces.
*/ */
void bte_error_handler(unsigned long _nodepda) void shub1_bte_error_handler(unsigned long _nodepda)
{ {
struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer; struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
nasid_t nasid; nasid_t nasid;
int i; int i;
int valid_crbs; int valid_crbs;
unsigned long irq_flags;
volatile u64 *notify;
bte_result_t bh_error;
ii_imem_u_t imem; /* II IMEM Register */ ii_imem_u_t imem; /* II IMEM Register */
ii_icrb0_d_u_t icrbd; /* II CRB Register D */ ii_icrb0_d_u_t icrbd; /* II CRB Register D */
ii_ibcr_u_t ibcr; ii_ibcr_u_t ibcr;
ii_icmr_u_t icmr; ii_icmr_u_t icmr;
ii_ieclr_u_t ieclr; ii_ieclr_u_t ieclr;
BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda, BTE_PRINTK(("shub1_bte_error_handler(%p) - %d\n", err_nodepda,
smp_processor_id())); smp_processor_id()));
spin_lock_irqsave(recovery_lock, irq_flags);
if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) && if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) &&
(err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) { (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) {
BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda, BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda,
smp_processor_id())); smp_processor_id()));
spin_unlock_irqrestore(recovery_lock, irq_flags);
return; return;
} }
/*
* Lock all interfaces on this node to prevent new transfers
* from being queued.
*/
for (i = 0; i < BTES_PER_NODE; i++) {
if (err_nodepda->bte_if[i].cleanup_active) {
continue;
}
spin_lock(&err_nodepda->bte_if[i].spinlock);
BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
smp_processor_id(), i));
err_nodepda->bte_if[i].cleanup_active = 1;
}
/* Determine information about our hub */ /* Determine information about our hub */
nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode); nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
...@@ -101,7 +81,6 @@ void bte_error_handler(unsigned long _nodepda) ...@@ -101,7 +81,6 @@ void bte_error_handler(unsigned long _nodepda)
mod_timer(recovery_timer, HZ * 5); mod_timer(recovery_timer, HZ * 5);
BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda, BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
smp_processor_id())); smp_processor_id()));
spin_unlock_irqrestore(recovery_lock, irq_flags);
return; return;
} }
if (icmr.ii_icmr_fld_s.i_crb_vld != 0) { if (icmr.ii_icmr_fld_s.i_crb_vld != 0) {
...@@ -120,8 +99,6 @@ void bte_error_handler(unsigned long _nodepda) ...@@ -120,8 +99,6 @@ void bte_error_handler(unsigned long _nodepda)
BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n", BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
err_nodepda, smp_processor_id(), err_nodepda, smp_processor_id(),
i)); i));
spin_unlock_irqrestore(recovery_lock,
irq_flags);
return; return;
} }
} }
...@@ -146,6 +123,51 @@ void bte_error_handler(unsigned long _nodepda) ...@@ -146,6 +123,51 @@ void bte_error_handler(unsigned long _nodepda)
ibcr.ii_ibcr_fld_s.i_soft_reset = 1; ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval); REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval);
del_timer(recovery_timer);
}
/*
* Wait until all BTE related CRBs are completed
* and then reset the interfaces.
*/
void bte_error_handler(unsigned long _nodepda)
{
struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
int i;
nasid_t nasid;
unsigned long irq_flags;
volatile u64 *notify;
bte_result_t bh_error;
BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
smp_processor_id()));
spin_lock_irqsave(recovery_lock, irq_flags);
/*
* Lock all interfaces on this node to prevent new transfers
* from being queued.
*/
for (i = 0; i < BTES_PER_NODE; i++) {
if (err_nodepda->bte_if[i].cleanup_active) {
continue;
}
spin_lock(&err_nodepda->bte_if[i].spinlock);
BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
smp_processor_id(), i));
err_nodepda->bte_if[i].cleanup_active = 1;
}
if (is_shub1()) {
shub1_bte_error_handler(_nodepda);
} else {
nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
if (ia64_sn_bte_recovery(nasid))
panic("bte_error_handler(): Fatal BTE Error");
}
for (i = 0; i < BTES_PER_NODE; i++) { for (i = 0; i < BTES_PER_NODE; i++) {
bh_error = err_nodepda->bte_if[i].bh_error; bh_error = err_nodepda->bte_if[i].bh_error;
if (bh_error != BTE_SUCCESS) { if (bh_error != BTE_SUCCESS) {
...@@ -165,8 +187,6 @@ void bte_error_handler(unsigned long _nodepda) ...@@ -165,8 +187,6 @@ void bte_error_handler(unsigned long _nodepda)
spin_unlock(&err_nodepda->bte_if[i].spinlock); spin_unlock(&err_nodepda->bte_if[i].spinlock);
} }
del_timer(recovery_timer);
spin_unlock_irqrestore(recovery_lock, irq_flags); spin_unlock_irqrestore(recovery_lock, irq_flags);
} }
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 1992 - 1997, 2000,2002-2004 Silicon Graphics, Inc. All rights reserved. * Copyright (C) 1992 - 1997, 2000,2002-2005 Silicon Graphics, Inc. All rights reserved.
*/ */
#include <linux/types.h> #include <linux/types.h>
...@@ -38,8 +38,11 @@ static irqreturn_t hub_eint_handler(int irq, void *arg, struct pt_regs *ep) ...@@ -38,8 +38,11 @@ static irqreturn_t hub_eint_handler(int irq, void *arg, struct pt_regs *ep)
if ((int)ret_stuff.v0) if ((int)ret_stuff.v0)
panic("hubii_eint_handler(): Fatal TIO Error"); panic("hubii_eint_handler(): Fatal TIO Error");
if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ if (is_shub1()) {
(void)hubiio_crb_error_handler(hubdev_info); if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
(void)hubiio_crb_error_handler(hubdev_info);
} else
bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid)));
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
...@@ -11,14 +11,15 @@ ...@@ -11,14 +11,15 @@
#include <asm/sn/types.h> #include <asm/sn/types.h>
#include <asm/sn/sn_sal.h> #include <asm/sn/sn_sal.h>
#include <asm/sn/addrs.h> #include <asm/sn/addrs.h>
#include "pci/pcibus_provider_defs.h" #include <asm/sn/pcibus_provider_defs.h>
#include "pci/pcidev.h" #include <asm/sn/pcidev.h>
#include "pci/pcibr_provider.h" #include "pci/pcibr_provider.h"
#include "xtalk/xwidgetdev.h" #include "xtalk/xwidgetdev.h"
#include <asm/sn/geo.h> #include <asm/sn/geo.h>
#include "xtalk/hubdev.h" #include "xtalk/hubdev.h"
#include <asm/sn/io.h> #include <asm/sn/io.h>
#include <asm/sn/simulator.h> #include <asm/sn/simulator.h>
#include <asm/sn/tioca_provider.h>
char master_baseio_wid; char master_baseio_wid;
nasid_t master_nasid = INVALID_NASID; /* Partition Master */ nasid_t master_nasid = INVALID_NASID; /* Partition Master */
...@@ -34,6 +35,37 @@ struct brick { ...@@ -34,6 +35,37 @@ struct brick {
int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */ int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */
struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
/*
* Hooks and struct for unsupported pci providers
*/
static dma_addr_t
sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size)
{
return 0;
}
static void
sn_default_pci_unmap(struct pci_dev *pdev, dma_addr_t addr, int direction)
{
return;
}
static void *
sn_default_pci_bus_fixup(struct pcibus_bussoft *soft)
{
return NULL;
}
static struct sn_pcibus_provider sn_pci_default_provider = {
.dma_map = sn_default_pci_map,
.dma_map_consistent = sn_default_pci_map,
.dma_unmap = sn_default_pci_unmap,
.bus_fixup = sn_default_pci_bus_fixup,
};
/* /*
* Retrieve the DMA Flush List given nasid. This list is needed * Retrieve the DMA Flush List given nasid. This list is needed
* to implement the WAR - Flush DMA data on PIO Reads. * to implement the WAR - Flush DMA data on PIO Reads.
...@@ -201,6 +233,7 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) ...@@ -201,6 +233,7 @@ static void sn_pci_fixup_slot(struct pci_dev *dev)
struct sn_irq_info *sn_irq_info; struct sn_irq_info *sn_irq_info;
struct pci_dev *host_pci_dev; struct pci_dev *host_pci_dev;
int status = 0; int status = 0;
struct pcibus_bussoft *bs;
dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL); dev->sysdata = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL);
if (SN_PCIDEV_INFO(dev) <= 0) if (SN_PCIDEV_INFO(dev) <= 0)
...@@ -241,6 +274,7 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) ...@@ -241,6 +274,7 @@ static void sn_pci_fixup_slot(struct pci_dev *dev)
} }
/* set up host bus linkages */ /* set up host bus linkages */
bs = SN_PCIBUS_BUSSOFT(dev->bus);
host_pci_dev = host_pci_dev =
pci_find_slot(SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32, pci_find_slot(SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32,
SN_PCIDEV_INFO(dev)-> SN_PCIDEV_INFO(dev)->
...@@ -248,10 +282,16 @@ static void sn_pci_fixup_slot(struct pci_dev *dev) ...@@ -248,10 +282,16 @@ static void sn_pci_fixup_slot(struct pci_dev *dev)
SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info = SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info =
SN_PCIDEV_INFO(host_pci_dev); SN_PCIDEV_INFO(host_pci_dev);
SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev; SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev;
SN_PCIDEV_INFO(dev)->pdi_pcibus_info = SN_PCIBUS_BUSSOFT(dev->bus); SN_PCIDEV_INFO(dev)->pdi_pcibus_info = bs;
if (bs && bs->bs_asic_type < PCIIO_ASIC_MAX_TYPES) {
SN_PCIDEV_BUSPROVIDER(dev) = sn_pci_provider[bs->bs_asic_type];
} else {
SN_PCIDEV_BUSPROVIDER(dev) = &sn_pci_default_provider;
}
/* Only set up IRQ stuff if this device has a host bus context */ /* Only set up IRQ stuff if this device has a host bus context */
if (SN_PCIDEV_BUSSOFT(dev) && sn_irq_info->irq_irq) { if (bs && sn_irq_info->irq_irq) {
SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info; SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info;
dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq; dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq;
sn_irq_fixup(dev, sn_irq_info); sn_irq_fixup(dev, sn_irq_info);
...@@ -271,6 +311,7 @@ static void sn_pci_controller_fixup(int segment, int busnum) ...@@ -271,6 +311,7 @@ static void sn_pci_controller_fixup(int segment, int busnum)
struct pcibus_bussoft *prom_bussoft_ptr; struct pcibus_bussoft *prom_bussoft_ptr;
struct hubdev_info *hubdev_info; struct hubdev_info *hubdev_info;
void *provider_soft; void *provider_soft;
struct sn_pcibus_provider *provider;
status = status =
sal_get_pcibus_info((u64) segment, (u64) busnum, sal_get_pcibus_info((u64) segment, (u64) busnum,
...@@ -291,16 +332,22 @@ static void sn_pci_controller_fixup(int segment, int busnum) ...@@ -291,16 +332,22 @@ static void sn_pci_controller_fixup(int segment, int busnum)
/* /*
* Per-provider fixup. Copies the contents from prom to local * Per-provider fixup. Copies the contents from prom to local
* area and links SN_PCIBUS_BUSSOFT(). * area and links SN_PCIBUS_BUSSOFT().
*
* Note: Provider is responsible for ensuring that prom_bussoft_ptr
* represents an asic-type that it can handle.
*/ */
if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB) { if (prom_bussoft_ptr->bs_asic_type >= PCIIO_ASIC_MAX_TYPES) {
return; /* no further fixup necessary */ return; /* unsupported asic type */
}
provider = sn_pci_provider[prom_bussoft_ptr->bs_asic_type];
if (provider == NULL) {
return; /* no provider registerd for this asic */
}
provider_soft = NULL;
if (provider->bus_fixup) {
provider_soft = (*provider->bus_fixup) (prom_bussoft_ptr);
} }
provider_soft = pcibr_bus_fixup(prom_bussoft_ptr);
if (provider_soft == NULL) { if (provider_soft == NULL) {
return; /* fixup failed or not applicable */ return; /* fixup failed or not applicable */
} }
...@@ -338,6 +385,17 @@ static int __init sn_pci_init(void) ...@@ -338,6 +385,17 @@ static int __init sn_pci_init(void)
if (!ia64_platform_is("sn2") || IS_RUNNING_ON_SIMULATOR()) if (!ia64_platform_is("sn2") || IS_RUNNING_ON_SIMULATOR())
return 0; return 0;
/*
* prime sn_pci_provider[]. Individial provider init routines will
* override their respective default entries.
*/
for (i = 0; i < PCIIO_ASIC_MAX_TYPES; i++)
sn_pci_provider[i] = &sn_pci_default_provider;
pcibr_init_provider();
tioca_init_provider();
/* /*
* This is needed to avoid bounce limit checks in the blk layer * This is needed to avoid bounce limit checks in the blk layer
*/ */
......
...@@ -13,8 +13,8 @@ ...@@ -13,8 +13,8 @@
#include <asm/sn/addrs.h> #include <asm/sn/addrs.h>
#include <asm/sn/arch.h> #include <asm/sn/arch.h>
#include "xtalk/xwidgetdev.h" #include "xtalk/xwidgetdev.h"
#include "pci/pcibus_provider_defs.h" #include <asm/sn/pcibus_provider_defs.h>
#include "pci/pcidev.h" #include <asm/sn/pcidev.h>
#include "pci/pcibr_provider.h" #include "pci/pcibr_provider.h"
#include <asm/sn/shub_mmr.h> #include <asm/sn/shub_mmr.h>
#include <asm/sn/sn_sal.h> #include <asm/sn/sn_sal.h>
...@@ -82,20 +82,9 @@ static void sn_ack_irq(unsigned int irq) ...@@ -82,20 +82,9 @@ static void sn_ack_irq(unsigned int irq)
nasid = get_nasid(); nasid = get_nasid();
event_occurred = event_occurred =
HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED)); HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED));
if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) { mask = event_occurred & SH_ALL_INT_MASK;
mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT);
}
if (event_occurred & SH_EVENT_OCCURRED_IPI_INT_MASK) {
mask |= (1 << SH_EVENT_OCCURRED_IPI_INT_SHFT);
}
if (event_occurred & SH_EVENT_OCCURRED_II_INT0_MASK) {
mask |= (1 << SH_EVENT_OCCURRED_II_INT0_SHFT);
}
if (event_occurred & SH_EVENT_OCCURRED_II_INT1_MASK) {
mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT);
}
HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS), HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS),
mask); mask);
__set_bit(irq, (volatile void *)pda->sn_in_service_ivecs); __set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
move_irq(irq); move_irq(irq);
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/root_dev.h> #include <linux/root_dev.h>
#include <linux/nodemask.h> #include <linux/nodemask.h>
#include <linux/pm.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/sal.h> #include <asm/sal.h>
...@@ -353,6 +354,14 @@ void __init sn_setup(char **cmdline_p) ...@@ -353,6 +354,14 @@ void __init sn_setup(char **cmdline_p)
screen_info = sn_screen_info; screen_info = sn_screen_info;
sn_timer_init(); sn_timer_init();
/*
* set pm_power_off to a SAL call to allow
* sn machines to power off. The SAL call can be replaced
* by an ACPI interface call when ACPI is fully implemented
* for sn.
*/
pm_power_off = ia64_sn_power_down;
} }
/** /**
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/miscdevice.h> #include <linux/miscdevice.h>
#include <linux/utsname.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#include <linux/nodemask.h> #include <linux/nodemask.h>
...@@ -43,6 +44,7 @@ ...@@ -43,6 +44,7 @@
#include <asm/sn/module.h> #include <asm/sn/module.h>
#include <asm/sn/geo.h> #include <asm/sn/geo.h>
#include <asm/sn/sn2/sn_hwperf.h> #include <asm/sn/sn2/sn_hwperf.h>
#include <asm/sn/addrs.h>
static void *sn_hwperf_salheap = NULL; static void *sn_hwperf_salheap = NULL;
static int sn_hwperf_obj_cnt = 0; static int sn_hwperf_obj_cnt = 0;
...@@ -81,26 +83,45 @@ static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret) ...@@ -81,26 +83,45 @@ static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret)
return e; return e;
} }
static int sn_hwperf_location_to_bpos(char *location,
int *rack, int *bay, int *slot, int *slab)
{
char type;
/* first scan for an old style geoid string */
if (sscanf(location, "%03d%c%02d#%d",
rack, &type, bay, slab) == 4)
*slot = 0;
else /* scan for a new bladed geoid string */
if (sscanf(location, "%03d%c%02d^%02d#%d",
rack, &type, bay, slot, slab) != 5)
return -1;
/* success */
return 0;
}
static int sn_hwperf_geoid_to_cnode(char *location) static int sn_hwperf_geoid_to_cnode(char *location)
{ {
int cnode; int cnode;
geoid_t geoid; geoid_t geoid;
moduleid_t module_id; moduleid_t module_id;
char type; int rack, bay, slot, slab;
int rack, slot, slab; int this_rack, this_bay, this_slot, this_slab;
int this_rack, this_slot, this_slab;
if (sscanf(location, "%03d%c%02d#%d", &rack, &type, &slot, &slab) != 4) if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab))
return -1; return -1;
for (cnode = 0; cnode < numionodes; cnode++) { for (cnode = 0; cnode < numionodes; cnode++) {
geoid = cnodeid_get_geoid(cnode); geoid = cnodeid_get_geoid(cnode);
module_id = geo_module(geoid); module_id = geo_module(geoid);
this_rack = MODULE_GET_RACK(module_id); this_rack = MODULE_GET_RACK(module_id);
this_slot = MODULE_GET_BPOS(module_id); this_bay = MODULE_GET_BPOS(module_id);
this_slot = geo_slot(geoid);
this_slab = geo_slab(geoid); this_slab = geo_slab(geoid);
if (rack == this_rack && slot == this_slot && slab == this_slab) if (rack == this_rack && bay == this_bay &&
slot == this_slot && slab == this_slab) {
break; break;
}
} }
return cnode < numionodes ? cnode : -1; return cnode < numionodes ? cnode : -1;
...@@ -153,11 +174,36 @@ static const char *sn_hwperf_get_slabname(struct sn_hwperf_object_info *obj, ...@@ -153,11 +174,36 @@ static const char *sn_hwperf_get_slabname(struct sn_hwperf_object_info *obj,
return slabname; return slabname;
} }
static void print_pci_topology(struct seq_file *s,
struct sn_hwperf_object_info *obj, int *ordinal,
u64 rack, u64 bay, u64 slot, u64 slab)
{
char *p1;
char *p2;
char *pg;
if (!(pg = (char *)get_zeroed_page(GFP_KERNEL)))
return; /* ignore */
if (ia64_sn_ioif_get_pci_topology(rack, bay, slot, slab,
__pa(pg), PAGE_SIZE) == SN_HWPERF_OP_OK) {
for (p1=pg; *p1 && p1 < pg + PAGE_SIZE;) {
if (!(p2 = strchr(p1, '\n')))
break;
*p2 = '\0';
seq_printf(s, "pcibus %d %s-%s\n",
*ordinal, obj->location, p1);
(*ordinal)++;
p1 = p2 + 1;
}
}
free_page((unsigned long)pg);
}
static int sn_topology_show(struct seq_file *s, void *d) static int sn_topology_show(struct seq_file *s, void *d)
{ {
int sz; int sz;
int pt; int pt;
int e; int e = 0;
int i; int i;
int j; int j;
const char *slabname; const char *slabname;
...@@ -169,11 +215,44 @@ static int sn_topology_show(struct seq_file *s, void *d) ...@@ -169,11 +215,44 @@ static int sn_topology_show(struct seq_file *s, void *d)
struct sn_hwperf_object_info *p; struct sn_hwperf_object_info *p;
struct sn_hwperf_object_info *obj = d; /* this object */ struct sn_hwperf_object_info *obj = d; /* this object */
struct sn_hwperf_object_info *objs = s->private; /* all objects */ struct sn_hwperf_object_info *objs = s->private; /* all objects */
int rack, bay, slot, slab;
u8 shubtype;
u8 system_size;
u8 sharing_size;
u8 partid;
u8 coher;
u8 nasid_shift;
u8 region_size;
u16 nasid_mask;
int nasid_msb;
int pci_bus_ordinal = 0;
if (obj == objs) { if (obj == objs) {
seq_printf(s, "# sn_topology version 1\n"); seq_printf(s, "# sn_topology version 2\n");
seq_printf(s, "# objtype ordinal location partition" seq_printf(s, "# objtype ordinal location partition"
" [attribute value [, ...]]\n"); " [attribute value [, ...]]\n");
if (ia64_sn_get_sn_info(0,
&shubtype, &nasid_mask, &nasid_shift, &system_size,
&sharing_size, &partid, &coher, &region_size))
BUG();
for (nasid_msb=63; nasid_msb > 0; nasid_msb--) {
if (((u64)nasid_mask << nasid_shift) & (1ULL << nasid_msb))
break;
}
seq_printf(s, "partition %u %s local "
"shubtype %s, "
"nasid_mask 0x%016lx, "
"nasid_bits %d:%d, "
"system_size %d, "
"sharing_size %d, "
"coherency_domain %d, "
"region_size %d\n",
partid, system_utsname.nodename,
shubtype ? "shub2" : "shub1",
(u64)nasid_mask << nasid_shift, nasid_msb, nasid_shift,
system_size, sharing_size, coher, region_size);
} }
if (SN_HWPERF_FOREIGN(obj)) { if (SN_HWPERF_FOREIGN(obj)) {
...@@ -181,7 +260,7 @@ static int sn_topology_show(struct seq_file *s, void *d) ...@@ -181,7 +260,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
return 0; return 0;
} }
for (i = 0; obj->name[i]; i++) { for (i = 0; i < SN_HWPERF_MAXSTRING && obj->name[i]; i++) {
if (obj->name[i] == ' ') if (obj->name[i] == ' ')
obj->name[i] = '_'; obj->name[i] = '_';
} }
...@@ -221,6 +300,17 @@ static int sn_topology_show(struct seq_file *s, void *d) ...@@ -221,6 +300,17 @@ static int sn_topology_show(struct seq_file *s, void *d)
seq_putc(s, '\n'); seq_putc(s, '\n');
} }
} }
/*
* PCI busses attached to this node, if any
*/
if (sn_hwperf_location_to_bpos(obj->location,
&rack, &bay, &slot, &slab)) {
/* export pci bus info */
print_pci_topology(s, obj, &pci_bus_ordinal,
rack, bay, slot, slab);
}
} }
if (obj->ports) { if (obj->ports) {
...@@ -397,6 +487,9 @@ static int sn_hwperf_map_err(int hwperf_err) ...@@ -397,6 +487,9 @@ static int sn_hwperf_map_err(int hwperf_err)
break; break;
case SN_HWPERF_OP_BUSY: case SN_HWPERF_OP_BUSY:
e = -EBUSY;
break;
case SN_HWPERF_OP_RECONFIGURE: case SN_HWPERF_OP_RECONFIGURE:
e = -EAGAIN; e = -EAGAIN;
break; break;
...@@ -549,6 +642,7 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg) ...@@ -549,6 +642,7 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
r = sn_hwperf_op_cpu(&op_info); r = sn_hwperf_op_cpu(&op_info);
if (r) { if (r) {
r = sn_hwperf_map_err(r); r = sn_hwperf_map_err(r);
a.v0 = v0;
goto error; goto error;
} }
break; break;
......
This diff is collapsed.
...@@ -7,4 +7,4 @@ ...@@ -7,4 +7,4 @@
# #
# Makefile for the sn pci general routines. # Makefile for the sn pci general routines.
obj-y := pci_dma.o pcibr/ obj-y := pci_dma.o tioca_provider.o pcibr/
...@@ -12,9 +12,8 @@ ...@@ -12,9 +12,8 @@
#include <linux/module.h> #include <linux/module.h>
#include <asm/dma.h> #include <asm/dma.h>
#include <asm/sn/sn_sal.h> #include <asm/sn/sn_sal.h>
#include "pci/pcibus_provider_defs.h" #include <asm/sn/pcibus_provider_defs.h>
#include "pci/pcidev.h" #include <asm/sn/pcidev.h>
#include "pci/pcibr_provider.h"
#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) #define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) #define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
...@@ -79,7 +78,8 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size, ...@@ -79,7 +78,8 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
{ {
void *cpuaddr; void *cpuaddr;
unsigned long phys_addr; unsigned long phys_addr;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
BUG_ON(dev->bus != &pci_bus_type); BUG_ON(dev->bus != &pci_bus_type);
...@@ -102,8 +102,7 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size, ...@@ -102,8 +102,7 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
* resources. * resources.
*/ */
*dma_handle = pcibr_dma_map(pcidev_info, phys_addr, size, *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size);
SN_PCIDMA_CONSISTENT);
if (!*dma_handle) { if (!*dma_handle) {
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
free_pages((unsigned long)cpuaddr, get_order(size)); free_pages((unsigned long)cpuaddr, get_order(size));
...@@ -127,11 +126,12 @@ EXPORT_SYMBOL(sn_dma_alloc_coherent); ...@@ -127,11 +126,12 @@ EXPORT_SYMBOL(sn_dma_alloc_coherent);
void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle) dma_addr_t dma_handle)
{ {
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
BUG_ON(dev->bus != &pci_bus_type); BUG_ON(dev->bus != &pci_bus_type);
pcibr_dma_unmap(pcidev_info, dma_handle, 0); provider->dma_unmap(pdev, dma_handle, 0);
free_pages((unsigned long)cpu_addr, get_order(size)); free_pages((unsigned long)cpu_addr, get_order(size));
} }
EXPORT_SYMBOL(sn_dma_free_coherent); EXPORT_SYMBOL(sn_dma_free_coherent);
...@@ -159,12 +159,13 @@ dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size, ...@@ -159,12 +159,13 @@ dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
{ {
dma_addr_t dma_addr; dma_addr_t dma_addr;
unsigned long phys_addr; unsigned long phys_addr;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
BUG_ON(dev->bus != &pci_bus_type); BUG_ON(dev->bus != &pci_bus_type);
phys_addr = __pa(cpu_addr); phys_addr = __pa(cpu_addr);
dma_addr = pcibr_dma_map(pcidev_info, phys_addr, size, 0); dma_addr = provider->dma_map(pdev, phys_addr, size);
if (!dma_addr) { if (!dma_addr) {
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
return 0; return 0;
...@@ -187,10 +188,12 @@ EXPORT_SYMBOL(sn_dma_map_single); ...@@ -187,10 +188,12 @@ EXPORT_SYMBOL(sn_dma_map_single);
void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
int direction) int direction)
{ {
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
BUG_ON(dev->bus != &pci_bus_type); BUG_ON(dev->bus != &pci_bus_type);
pcibr_dma_unmap(pcidev_info, dma_addr, direction);
provider->dma_unmap(pdev, dma_addr, direction);
} }
EXPORT_SYMBOL(sn_dma_unmap_single); EXPORT_SYMBOL(sn_dma_unmap_single);
...@@ -207,12 +210,13 @@ void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg, ...@@ -207,12 +210,13 @@ void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nhwentries, int direction) int nhwentries, int direction)
{ {
int i; int i;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
BUG_ON(dev->bus != &pci_bus_type); BUG_ON(dev->bus != &pci_bus_type);
for (i = 0; i < nhwentries; i++, sg++) { for (i = 0; i < nhwentries; i++, sg++) {
pcibr_dma_unmap(pcidev_info, sg->dma_address, direction); provider->dma_unmap(pdev, sg->dma_address, direction);
sg->dma_address = (dma_addr_t) NULL; sg->dma_address = (dma_addr_t) NULL;
sg->dma_length = 0; sg->dma_length = 0;
} }
...@@ -233,7 +237,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries, ...@@ -233,7 +237,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
{ {
unsigned long phys_addr; unsigned long phys_addr;
struct scatterlist *saved_sg = sg; struct scatterlist *saved_sg = sg;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(to_pci_dev(dev)); struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
int i; int i;
BUG_ON(dev->bus != &pci_bus_type); BUG_ON(dev->bus != &pci_bus_type);
...@@ -243,8 +248,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries, ...@@ -243,8 +248,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
*/ */
for (i = 0; i < nhwentries; i++, sg++) { for (i = 0; i < nhwentries; i++, sg++) {
phys_addr = SG_ENT_PHYS_ADDRESS(sg); phys_addr = SG_ENT_PHYS_ADDRESS(sg);
sg->dma_address = pcibr_dma_map(pcidev_info, phys_addr, sg->dma_address = provider->dma_map(pdev,
sg->length, 0); phys_addr, sg->length);
if (!sg->dma_address) { if (!sg->dma_address) {
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
......
...@@ -8,8 +8,8 @@ ...@@ -8,8 +8,8 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/sn/sn_sal.h> #include <asm/sn/sn_sal.h>
#include "pci/pcibus_provider_defs.h" #include <asm/sn/pcibus_provider_defs.h>
#include "pci/pcidev.h" #include <asm/sn/pcidev.h>
#include "pci/pcibr_provider.h" #include "pci/pcibr_provider.h"
int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */ int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */
......
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
#include <asm/sn/geo.h> #include <asm/sn/geo.h>
#include "xtalk/xwidgetdev.h" #include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h" #include "xtalk/hubdev.h"
#include "pci/pcibus_provider_defs.h" #include <asm/sn/pcibus_provider_defs.h>
#include "pci/pcidev.h" #include <asm/sn/pcidev.h>
#include "pci/tiocp.h" #include "pci/tiocp.h"
#include "pci/pic.h" #include "pci/pic.h"
#include "pci/pcibr_provider.h" #include "pci/pcibr_provider.h"
...@@ -40,7 +40,7 @@ extern int sn_ioif_inited; ...@@ -40,7 +40,7 @@ extern int sn_ioif_inited;
* we do not have to allocate entries in the PMU. * we do not have to allocate entries in the PMU.
*/ */
static uint64_t static dma_addr_t
pcibr_dmamap_ate32(struct pcidev_info *info, pcibr_dmamap_ate32(struct pcidev_info *info,
uint64_t paddr, size_t req_size, uint64_t flags) uint64_t paddr, size_t req_size, uint64_t flags)
{ {
...@@ -109,7 +109,7 @@ pcibr_dmamap_ate32(struct pcidev_info *info, ...@@ -109,7 +109,7 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
return pci_addr; return pci_addr;
} }
static uint64_t static dma_addr_t
pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr, pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
uint64_t dma_attributes) uint64_t dma_attributes)
{ {
...@@ -141,7 +141,7 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr, ...@@ -141,7 +141,7 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
} }
static uint64_t static dma_addr_t
pcibr_dmatrans_direct32(struct pcidev_info * info, pcibr_dmatrans_direct32(struct pcidev_info * info,
uint64_t paddr, size_t req_size, uint64_t flags) uint64_t paddr, size_t req_size, uint64_t flags)
{ {
...@@ -180,11 +180,11 @@ pcibr_dmatrans_direct32(struct pcidev_info * info, ...@@ -180,11 +180,11 @@ pcibr_dmatrans_direct32(struct pcidev_info * info,
* DMA mappings for Direct 64 and 32 do not have any DMA maps. * DMA mappings for Direct 64 and 32 do not have any DMA maps.
*/ */
void void
pcibr_dma_unmap(struct pcidev_info *pcidev_info, dma_addr_t dma_handle, pcibr_dma_unmap(struct pci_dev *hwdev, dma_addr_t dma_handle, int direction)
int direction)
{ {
struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info-> struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
pdi_pcibus_info; struct pcibus_info *pcibus_info =
(struct pcibus_info *)pcidev_info->pdi_pcibus_info;
if (IS_PCI32_MAPPED(dma_handle)) { if (IS_PCI32_MAPPED(dma_handle)) {
int ate_index; int ate_index;
...@@ -316,64 +316,63 @@ void sn_dma_flush(uint64_t addr) ...@@ -316,64 +316,63 @@ void sn_dma_flush(uint64_t addr)
} }
/* /*
* Wrapper DMA interface. Called from pci_dma.c routines. * DMA interfaces. Called from pci_dma.c routines.
*/ */
uint64_t dma_addr_t
pcibr_dma_map(struct pcidev_info * pcidev_info, unsigned long phys_addr, pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
size_t size, unsigned int flags)
{ {
dma_addr_t dma_handle; dma_addr_t dma_handle;
struct pci_dev *pcidev = pcidev_info->pdi_linux_pcidev; struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
if (flags & SN_PCIDMA_CONSISTENT) {
/* sn_pci_alloc_consistent interfaces */
if (pcidev->dev.coherent_dma_mask == ~0UL) {
dma_handle =
pcibr_dmatrans_direct64(pcidev_info, phys_addr,
PCI64_ATTR_BAR);
} else {
dma_handle =
(dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
phys_addr, size,
PCI32_ATE_BAR);
}
} else {
/* map_sg/map_single interfaces */
/* SN cannot support DMA addresses smaller than 32 bits. */ /* SN cannot support DMA addresses smaller than 32 bits. */
if (pcidev->dma_mask < 0x7fffffff) { if (hwdev->dma_mask < 0x7fffffff) {
return 0; return 0;
} }
if (pcidev->dma_mask == ~0UL) { if (hwdev->dma_mask == ~0UL) {
/*
* Handle the most common case: 64 bit cards. This
* call should always succeed.
*/
dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
PCI64_ATTR_PREF);
} else {
/* Handle 32-63 bit cards via direct mapping */
dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr,
size, 0);
if (!dma_handle) {
/* /*
* Handle the most common case: 64 bit cards. This * It is a 32 bit card and we cannot do direct mapping,
* call should always succeed. * so we use an ATE.
*/ */
dma_handle = dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr,
pcibr_dmatrans_direct64(pcidev_info, phys_addr, size, PCI32_ATE_PREF);
PCI64_ATTR_PREF);
} else {
/* Handle 32-63 bit cards via direct mapping */
dma_handle =
pcibr_dmatrans_direct32(pcidev_info, phys_addr,
size, 0);
if (!dma_handle) {
/*
* It is a 32 bit card and we cannot do direct mapping,
* so we use an ATE.
*/
dma_handle =
pcibr_dmamap_ate32(pcidev_info, phys_addr,
size, PCI32_ATE_PREF);
}
} }
} }
return dma_handle; return dma_handle;
} }
dma_addr_t
pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr,
size_t size)
{
dma_addr_t dma_handle;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
if (hwdev->dev.coherent_dma_mask == ~0UL) {
dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
PCI64_ATTR_BAR);
} else {
dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
phys_addr, size,
PCI32_ATE_BAR);
}
return dma_handle;
}
EXPORT_SYMBOL(sn_dma_flush); EXPORT_SYMBOL(sn_dma_flush);
...@@ -13,8 +13,8 @@ ...@@ -13,8 +13,8 @@
#include "xtalk/xwidgetdev.h" #include "xtalk/xwidgetdev.h"
#include <asm/sn/geo.h> #include <asm/sn/geo.h>
#include "xtalk/hubdev.h" #include "xtalk/hubdev.h"
#include "pci/pcibus_provider_defs.h" #include <asm/sn/pcibus_provider_defs.h>
#include "pci/pcidev.h" #include <asm/sn/pcidev.h>
#include "pci/pcibr_provider.h" #include "pci/pcibr_provider.h"
#include <asm/sn/addrs.h> #include <asm/sn/addrs.h>
...@@ -168,3 +168,23 @@ void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info) ...@@ -168,3 +168,23 @@ void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info)
pcibr_force_interrupt(sn_irq_info); pcibr_force_interrupt(sn_irq_info);
} }
} }
/*
* Provider entries for PIC/CP
*/
struct sn_pcibus_provider pcibr_provider = {
.dma_map = pcibr_dma_map,
.dma_map_consistent = pcibr_dma_map_consistent,
.dma_unmap = pcibr_dma_unmap,
.bus_fixup = pcibr_bus_fixup,
};
int
pcibr_init_provider(void)
{
sn_pci_provider[PCIIO_ASIC_TYPE_PIC] = &pcibr_provider;
sn_pci_provider[PCIIO_ASIC_TYPE_TIOCP] = &pcibr_provider;
return 0;
}
...@@ -8,8 +8,8 @@ ...@@ -8,8 +8,8 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include "pci/pcibus_provider_defs.h" #include <asm/sn/pcibus_provider_defs.h>
#include "pci/pcidev.h" #include <asm/sn/pcidev.h>
#include "pci/tiocp.h" #include "pci/tiocp.h"
#include "pci/pic.h" #include "pci/pic.h"
#include "pci/pcibr_provider.h" #include "pci/pcibr_provider.h"
......
This diff is collapsed.
...@@ -399,6 +399,20 @@ config SGI_SNSC ...@@ -399,6 +399,20 @@ config SGI_SNSC
controller communication from user space (you want this!), controller communication from user space (you want this!),
say Y. Otherwise, say N. say Y. Otherwise, say N.
config SGI_TIOCX
bool "SGI TIO CX driver support"
depends on (IA64_SGI_SN2 || IA64_GENERIC)
help
If you have an SGI Altix and you have fpga devices attached
to your TIO, say Y here, otherwise say N.
config SGI_MBCS
tristate "SGI FPGA Core Services driver support"
depends on (IA64_SGI_SN2 || IA64_GENERIC)
help
If you have an SGI Altix with an attached SABrick
say Y or M here, otherwise say N.
source "drivers/serial/Kconfig" source "drivers/serial/Kconfig"
config UNIX98_PTYS config UNIX98_PTYS
......
...@@ -42,11 +42,12 @@ obj-$(CONFIG_SX) += sx.o generic_serial.o ...@@ -42,11 +42,12 @@ obj-$(CONFIG_SX) += sx.o generic_serial.o
obj-$(CONFIG_RIO) += rio/ generic_serial.o obj-$(CONFIG_RIO) += rio/ generic_serial.o
obj-$(CONFIG_HVC_CONSOLE) += hvc_console.o hvsi.o obj-$(CONFIG_HVC_CONSOLE) += hvc_console.o hvsi.o
obj-$(CONFIG_RAW_DRIVER) += raw.o obj-$(CONFIG_RAW_DRIVER) += raw.o
obj-$(CONFIG_SGI_SNSC) += snsc.o obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
obj-$(CONFIG_MMTIMER) += mmtimer.o obj-$(CONFIG_MMTIMER) += mmtimer.o
obj-$(CONFIG_VIOCONS) += viocons.o obj-$(CONFIG_VIOCONS) += viocons.o
obj-$(CONFIG_VIOTAPE) += viotape.o obj-$(CONFIG_VIOTAPE) += viotape.o
obj-$(CONFIG_HVCS) += hvcs.o obj-$(CONFIG_HVCS) += hvcs.o
obj-$(CONFIG_SGI_MBCS) += mbcs.o
obj-$(CONFIG_PRINTER) += lp.o obj-$(CONFIG_PRINTER) += lp.o
obj-$(CONFIG_TIPAR) += tipar.o obj-$(CONFIG_TIPAR) += tipar.o
......
This diff is collapsed.
This diff is collapsed.
...@@ -374,6 +374,7 @@ scdrv_init(void) ...@@ -374,6 +374,7 @@ scdrv_init(void)
void *salbuf; void *salbuf;
struct class_simple *snsc_class; struct class_simple *snsc_class;
dev_t first_dev, dev; dev_t first_dev, dev;
nasid_t event_nasid = ia64_sn_get_console_nasid();
if (alloc_chrdev_region(&first_dev, 0, numionodes, if (alloc_chrdev_region(&first_dev, 0, numionodes,
SYSCTL_BASENAME) < 0) { SYSCTL_BASENAME) < 0) {
...@@ -441,6 +442,13 @@ scdrv_init(void) ...@@ -441,6 +442,13 @@ scdrv_init(void)
ia64_sn_irtr_intr_enable(scd->scd_nasid, ia64_sn_irtr_intr_enable(scd->scd_nasid,
0 /*ignored */ , 0 /*ignored */ ,
SAL_IROUTER_INTR_RECV); SAL_IROUTER_INTR_RECV);
/* on the console nasid, prepare to receive
* system controller environmental events
*/
if(scd->scd_nasid == event_nasid) {
scdrv_event_init(scd);
}
} }
return 0; return 0;
} }
......
...@@ -47,4 +47,44 @@ struct sysctl_data_s { ...@@ -47,4 +47,44 @@ struct sysctl_data_s {
nasid_t scd_nasid; /* Node on which subchannels are opened. */ nasid_t scd_nasid; /* Node on which subchannels are opened. */
}; };
/* argument types */
#define IR_ARG_INT 0x00 /* 4-byte integer (big-endian) */
#define IR_ARG_ASCII 0x01 /* null-terminated ASCII string */
#define IR_ARG_UNKNOWN 0x80 /* unknown data type. The low
* 7 bits will contain the data
* length. */
#define IR_ARG_UNKNOWN_LENGTH_MASK 0x7f
/* system controller event codes */
#define EV_CLASS_MASK 0xf000ul
#define EV_SEVERITY_MASK 0x0f00ul
#define EV_COMPONENT_MASK 0x00fful
#define EV_CLASS_POWER 0x1000ul
#define EV_CLASS_FAN 0x2000ul
#define EV_CLASS_TEMP 0x3000ul
#define EV_CLASS_ENV 0x4000ul
#define EV_CLASS_TEST_FAULT 0x5000ul
#define EV_CLASS_TEST_WARNING 0x6000ul
#define EV_CLASS_PWRD_NOTIFY 0x8000ul
#define EV_SEVERITY_POWER_STABLE 0x0000ul
#define EV_SEVERITY_POWER_LOW_WARNING 0x0100ul
#define EV_SEVERITY_POWER_HIGH_WARNING 0x0200ul
#define EV_SEVERITY_POWER_HIGH_FAULT 0x0300ul
#define EV_SEVERITY_POWER_LOW_FAULT 0x0400ul
#define EV_SEVERITY_FAN_STABLE 0x0000ul
#define EV_SEVERITY_FAN_WARNING 0x0100ul
#define EV_SEVERITY_FAN_FAULT 0x0200ul
#define EV_SEVERITY_TEMP_STABLE 0x0000ul
#define EV_SEVERITY_TEMP_ADVISORY 0x0100ul
#define EV_SEVERITY_TEMP_CRITICAL 0x0200ul
#define EV_SEVERITY_TEMP_FAULT 0x0300ul
void scdrv_event_init(struct sysctl_data_s *);
#endif /* _SN_SYSCTL_H_ */ #endif /* _SN_SYSCTL_H_ */
/*
* SN Platform system controller communication support
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
*/
/*
* System controller event handler
*
* These routines deal with environmental events arriving from the
* system controllers.
*/
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/byteorder/generic.h>
#include <asm/sn/sn_sal.h>
#include "snsc.h"
static struct subch_data_s *event_sd;
void scdrv_event(unsigned long);
DECLARE_TASKLET(sn_sysctl_event, scdrv_event, 0);
/*
* scdrv_event_interrupt
*
* Pull incoming environmental events off the physical link to the
* system controller and put them in a temporary holding area in SAL.
* Schedule scdrv_event() to move them along to their ultimate
* destination.
*/
static irqreturn_t
scdrv_event_interrupt(int irq, void *subch_data, struct pt_regs *regs)
{
struct subch_data_s *sd = subch_data;
unsigned long flags;
int status;
spin_lock_irqsave(&sd->sd_rlock, flags);
status = ia64_sn_irtr_intr(sd->sd_nasid, sd->sd_subch);
if ((status > 0) && (status & SAL_IROUTER_INTR_RECV)) {
tasklet_schedule(&sn_sysctl_event);
}
spin_unlock_irqrestore(&sd->sd_rlock, flags);
return IRQ_HANDLED;
}
/*
* scdrv_parse_event
*
* Break an event (as read from SAL) into useful pieces so we can decide
* what to do with it.
*/
static int
scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc)
{
char *desc_end;
/* record event source address */
*src = be32_to_cpup((__be32 *)event);
event += 4; /* move on to event code */
/* record the system controller's event code */
*code = be32_to_cpup((__be32 *)event);
event += 4; /* move on to event arguments */
/* how many arguments are in the packet? */
if (*event++ != 2) {
/* if not 2, give up */
return -1;
}
/* parse out the ESP code */
if (*event++ != IR_ARG_INT) {
/* not an integer argument, so give up */
return -1;
}
*esp_code = be32_to_cpup((__be32 *)event);
event += 4;
/* parse out the event description */
if (*event++ != IR_ARG_ASCII) {
/* not an ASCII string, so give up */
return -1;
}
event[CHUNKSIZE-1] = '\0'; /* ensure this string ends! */
event += 2; /* skip leading CR/LF */
desc_end = desc + sprintf(desc, "%s", event);
/* strip trailing CR/LF (if any) */
for (desc_end--;
(desc_end != desc) && ((*desc_end == 0xd) || (*desc_end == 0xa));
desc_end--) {
*desc_end = '\0';
}
return 0;
}
/*
* scdrv_event_severity
*
* Figure out how urgent a message we should write to the console/syslog
* via printk.
*/
static char *
scdrv_event_severity(int code)
{
int ev_class = (code & EV_CLASS_MASK);
int ev_severity = (code & EV_SEVERITY_MASK);
char *pk_severity = KERN_NOTICE;
switch (ev_class) {
case EV_CLASS_POWER:
switch (ev_severity) {
case EV_SEVERITY_POWER_LOW_WARNING:
case EV_SEVERITY_POWER_HIGH_WARNING:
pk_severity = KERN_WARNING;
break;
case EV_SEVERITY_POWER_HIGH_FAULT:
case EV_SEVERITY_POWER_LOW_FAULT:
pk_severity = KERN_ALERT;
break;
}
break;
case EV_CLASS_FAN:
switch (ev_severity) {
case EV_SEVERITY_FAN_WARNING:
pk_severity = KERN_WARNING;
break;
case EV_SEVERITY_FAN_FAULT:
pk_severity = KERN_CRIT;
break;
}
break;
case EV_CLASS_TEMP:
switch (ev_severity) {
case EV_SEVERITY_TEMP_ADVISORY:
pk_severity = KERN_WARNING;
break;
case EV_SEVERITY_TEMP_CRITICAL:
pk_severity = KERN_CRIT;
break;
case EV_SEVERITY_TEMP_FAULT:
pk_severity = KERN_ALERT;
break;
}
break;
case EV_CLASS_ENV:
pk_severity = KERN_ALERT;
break;
case EV_CLASS_TEST_FAULT:
pk_severity = KERN_ALERT;
break;
case EV_CLASS_TEST_WARNING:
pk_severity = KERN_WARNING;
break;
case EV_CLASS_PWRD_NOTIFY:
pk_severity = KERN_ALERT;
break;
}
return pk_severity;
}
/*
* scdrv_dispatch_event
*
* Do the right thing with an incoming event. That's often nothing
* more than printing it to the system log. For power-down notifications
* we start a graceful shutdown.
*/
static void
scdrv_dispatch_event(char *event, int len)
{
int code, esp_code, src;
char desc[CHUNKSIZE];
char *severity;
if (scdrv_parse_event(event, &src, &code, &esp_code, desc) < 0) {
/* ignore uninterpretible event */
return;
}
/* how urgent is the message? */
severity = scdrv_event_severity(code);
if ((code & EV_CLASS_MASK) == EV_CLASS_PWRD_NOTIFY) {
struct task_struct *p;
/* give a SIGPWR signal to init proc */
/* first find init's task */
read_lock(&tasklist_lock);
for_each_process(p) {
if (p->pid == 1)
break;
}
if (p) { /* we found init's task */
printk(KERN_EMERG "Power off indication received. Initiating power fail sequence...\n");
force_sig(SIGPWR, p);
} else { /* failed to find init's task - just give message(s) */
printk(KERN_WARNING "Failed to find init proc to handle power off!\n");
printk("%s|$(0x%x)%s\n", severity, esp_code, desc);
}
read_unlock(&tasklist_lock);
} else {
/* print to system log */
printk("%s|$(0x%x)%s\n", severity, esp_code, desc);
}
}
/*
* scdrv_event
*
* Called as a tasklet when an event arrives from the L1. Read the event
* from where it's temporarily stored in SAL and call scdrv_dispatch_event()
* to send it on its way. Keep trying to read events until SAL indicates
* that there are no more immediately available.
*/
void
scdrv_event(unsigned long dummy)
{
int status;
int len;
unsigned long flags;
struct subch_data_s *sd = event_sd;
/* anything to read? */
len = CHUNKSIZE;
spin_lock_irqsave(&sd->sd_rlock, flags);
status = ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch,
sd->sd_rb, &len);
while (!(status < 0)) {
spin_unlock_irqrestore(&sd->sd_rlock, flags);
scdrv_dispatch_event(sd->sd_rb, len);
len = CHUNKSIZE;
spin_lock_irqsave(&sd->sd_rlock, flags);
status = ia64_sn_irtr_recv(sd->sd_nasid, sd->sd_subch,
sd->sd_rb, &len);
}
spin_unlock_irqrestore(&sd->sd_rlock, flags);
}
/*
* scdrv_event_init
*
* Sets up a system controller subchannel to begin receiving event
* messages. This is sort of a specialized version of scdrv_open()
* in drivers/char/sn_sysctl.c.
*/
void
scdrv_event_init(struct sysctl_data_s *scd)
{
int rv;
event_sd = kmalloc(sizeof (struct subch_data_s), GFP_KERNEL);
if (event_sd == NULL) {
printk(KERN_WARNING "%s: couldn't allocate subchannel info"
" for event monitoring\n", __FUNCTION__);
return;
}
/* initialize subch_data_s fields */
memset(event_sd, 0, sizeof (struct subch_data_s));
event_sd->sd_nasid = scd->scd_nasid;
spin_lock_init(&event_sd->sd_rlock);
/* ask the system controllers to send events to this node */
event_sd->sd_subch = ia64_sn_sysctl_event_init(scd->scd_nasid);
if (event_sd->sd_subch < 0) {
kfree(event_sd);
printk(KERN_WARNING "%s: couldn't open event subchannel\n",
__FUNCTION__);
return;
}
/* hook event subchannel up to the system controller interrupt */
rv = request_irq(SGI_UART_VECTOR, scdrv_event_interrupt,
SA_SHIRQ | SA_INTERRUPT,
"system controller events", event_sd);
if (rv) {
printk(KERN_WARNING "%s: irq request failed (%d)\n",
__FUNCTION__, rv);
ia64_sn_irtr_close(event_sd->sd_nasid, event_sd->sd_subch);
kfree(event_sd);
return;
}
}
...@@ -81,6 +81,7 @@ extern __u8 isa_irq_to_vector_map[16]; ...@@ -81,6 +81,7 @@ extern __u8 isa_irq_to_vector_map[16];
extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */ extern struct hw_interrupt_type irq_type_ia64_lsapic; /* CPU-internal interrupt controller */
extern int assign_irq_vector_nopanic (int irq); /* allocate a free vector without panic */
extern int assign_irq_vector (int irq); /* allocate a free vector */ extern int assign_irq_vector (int irq); /* allocate a free vector */
extern void free_irq_vector (int vector); extern void free_irq_vector (int vector);
extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect); extern void ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect);
......
This diff is collapsed.
...@@ -254,6 +254,18 @@ extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int ...@@ -254,6 +254,18 @@ extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int
#define PFM_CPUINFO_DCR_PP 0x2 /* if set the system wide session has started */ #define PFM_CPUINFO_DCR_PP 0x2 /* if set the system wide session has started */
#define PFM_CPUINFO_EXCL_IDLE 0x4 /* the system wide session excludes the idle task */ #define PFM_CPUINFO_EXCL_IDLE 0x4 /* the system wide session excludes the idle task */
/*
* sysctl control structure. visible to sampling formats
*/
typedef struct {
int debug; /* turn on/off debugging via syslog */
int debug_ovfl; /* turn on/off debug printk in overflow handler */
int fastctxsw; /* turn on/off fast (unsecure) ctxsw */
int expert_mode; /* turn on/off value checking */
} pfm_sysctl_t;
extern pfm_sysctl_t pfm_sysctl;
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_IA64_PERFMON_H */ #endif /* _ASM_IA64_PERFMON_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment