Commit eccaedd5 authored by Pat Gefre's avatar Pat Gefre Committed by Tony Luck

[IA64-SGI] Add in Altix I/O code

Signed-off-by: default avatarPatrick Gefre <pfg@sgi.com>
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent e4870e45
......@@ -4,9 +4,11 @@
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2003 Silicon Graphics, Inc. All Rights Reserved.
# Copyright (C) 2004 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn ia64 subplatform
#
obj-y += kernel/
CPPFLAGS += -I$(srctree)/arch/ia64/sn/include
obj-y += kernel/ pci/
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_IOERROR_H
#define _ASM_IA64_SN_IOERROR_H
/*
* IO error structure.
*
* This structure would expand to hold the information retrieved from
* all IO related error registers.
*
* This structure is defined to hold all system specific
* information related to a single error.
*
* This serves a couple of purpose.
* - Error handling often involves translating one form of address to other
* form. So, instead of having different data structures at each level,
* we have a single structure, and the appropriate fields get filled in
* at each layer.
* - This provides a way to dump all error related information in any layer
* of erorr handling (debugging aid).
*
* A second possibility is to allow each layer to define its own error
* data structure, and fill in the proper fields. This has the advantage
* of isolating the layers.
* A big concern is the potential stack usage (and overflow), if each layer
* defines these structures on stack (assuming we don't want to do kmalloc.
*
* Any layer wishing to pass extra information to a layer next to it in
* error handling hierarchy, can do so as a separate parameter.
*/
typedef struct io_error_s {
/* Bit fields indicating which structure fields are valid */
union {
struct {
unsigned ievb_errortype:1;
unsigned ievb_widgetnum:1;
unsigned ievb_widgetdev:1;
unsigned ievb_srccpu:1;
unsigned ievb_srcnode:1;
unsigned ievb_errnode:1;
unsigned ievb_sysioaddr:1;
unsigned ievb_xtalkaddr:1;
unsigned ievb_busspace:1;
unsigned ievb_busaddr:1;
unsigned ievb_vaddr:1;
unsigned ievb_memaddr:1;
unsigned ievb_epc:1;
unsigned ievb_ef:1;
unsigned ievb_tnum:1;
} iev_b;
unsigned iev_a;
} ie_v;
short ie_errortype; /* error type: extra info about error */
short ie_widgetnum; /* Widget number that's in error */
short ie_widgetdev; /* Device within widget in error */
cpuid_t ie_srccpu; /* CPU on srcnode generating error */
cnodeid_t ie_srcnode; /* Node which caused the error */
cnodeid_t ie_errnode; /* Node where error was noticed */
iopaddr_t ie_sysioaddr; /* Sys specific IO address */
iopaddr_t ie_xtalkaddr; /* Xtalk (48bit) addr of Error */
iopaddr_t ie_busspace; /* Bus specific address space */
iopaddr_t ie_busaddr; /* Bus specific address */
caddr_t ie_vaddr; /* Virtual address of error */
iopaddr_t ie_memaddr; /* Physical memory address */
caddr_t ie_epc; /* pc when error reported */
caddr_t ie_ef; /* eframe when error reported */
short ie_tnum; /* Xtalk TNUM field */
} ioerror_t;
#define IOERROR_INIT(e) do { (e)->ie_v.iev_a = 0; } while (0)
#define IOERROR_SETVALUE(e,f,v) do { (e)->ie_ ## f = (v); (e)->ie_v.iev_b.ievb_ ## f = 1; } while (0)
#endif /* _ASM_IA64_SN_IOERROR_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
#define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
/* Workarounds */
#define PV907516 (1 << 1) /* TIOCP: Don't write the write buffer flush reg */
#define BUSTYPE_MASK 0x1
/* Macros given a pcibus structure */
#define IS_PCIX(ps) ((ps)->pbi_bridge_mode & BUSTYPE_MASK)
#define IS_PCI_BRIDGE_ASIC(asic) (asic == PCIIO_ASIC_TYPE_PIC || \
asic == PCIIO_ASIC_TYPE_TIOCP)
#define IS_PIC_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_PIC)
/*
* The different PCI Bridge types supported on the SGI Altix platforms
*/
#define PCIBR_BRIDGETYPE_UNKNOWN -1
#define PCIBR_BRIDGETYPE_PIC 2
#define PCIBR_BRIDGETYPE_TIOCP 3
/*
* Bridge 64bit Direct Map Attributes
*/
#define PCI64_ATTR_PREF (1ull << 59)
#define PCI64_ATTR_PREC (1ull << 58)
#define PCI64_ATTR_VIRTUAL (1ull << 57)
#define PCI64_ATTR_BAR (1ull << 56)
#define PCI64_ATTR_SWAP (1ull << 55)
#define PCI64_ATTR_VIRTUAL1 (1ull << 54)
#define PCI32_LOCAL_BASE 0
#define PCI32_MAPPED_BASE 0x40000000
#define PCI32_DIRECT_BASE 0x80000000
#define IS_PCI32_MAPPED(x) ((uint64_t)(x) < PCI32_DIRECT_BASE && \
(uint64_t)(x) >= PCI32_MAPPED_BASE)
#define IS_PCI32_DIRECT(x) ((uint64_t)(x) >= PCI32_MAPPED_BASE)
/*
* Bridge PMU Address Transaltion Entry Attibutes
*/
#define PCI32_ATE_V (0x1 << 0)
#define PCI32_ATE_CO (0x1 << 1)
#define PCI32_ATE_PREC (0x1 << 2)
#define PCI32_ATE_PREF (0x1 << 3)
#define PCI32_ATE_BAR (0x1 << 4)
#define PCI32_ATE_ADDR_SHFT 12
#define MINIMAL_ATES_REQUIRED(addr, size) \
(IOPG(IOPGOFF(addr) + (size) - 1) == IOPG((size) - 1))
#define MINIMAL_ATE_FLAG(addr, size) \
(MINIMAL_ATES_REQUIRED((uint64_t)addr, size) ? 1 : 0)
/* bit 29 of the pci address is the SWAP bit */
#define ATE_SWAPSHIFT 29
#define ATE_SWAP_ON(x) ((x) |= (1 << ATE_SWAPSHIFT))
#define ATE_SWAP_OFF(x) ((x) &= ~(1 << ATE_SWAPSHIFT))
/*
* I/O page size
*/
#if PAGE_SIZE < 16384
#define IOPFNSHIFT 12 /* 4K per mapped page */
#else
#define IOPFNSHIFT 14 /* 16K per mapped page */
#endif
#define IOPGSIZE (1 << IOPFNSHIFT)
#define IOPG(x) ((x) >> IOPFNSHIFT)
#define IOPGOFF(x) ((x) & (IOPGSIZE-1))
#define PCIBR_DEV_SWAP_DIR (1ull << 19)
#define PCIBR_CTRL_PAGE_SIZE (0x1 << 21)
/*
* PMU resources.
*/
struct ate_resource{
uint64_t *ate;
uint64_t num_ate;
uint64_t lowest_free_index;
};
struct pcibus_info {
struct pcibus_bussoft pbi_buscommon; /* common header */
uint32_t pbi_moduleid;
short pbi_bridge_type;
short pbi_bridge_mode;
struct ate_resource pbi_int_ate_resource;
uint64_t pbi_int_ate_size;
uint64_t pbi_dir_xbase;
char pbi_hub_xid;
uint64_t pbi_devreg[8];
spinlock_t pbi_lock;
uint32_t pbi_valid_devices;
uint32_t pbi_enabled_devices;
};
/*
* pcibus_info structure locking macros
*/
inline static unsigned long
pcibr_lock(struct pcibus_info *pcibus_info)
{
unsigned long flag;
spin_lock_irqsave(&pcibus_info->pbi_lock, flag);
return(flag);
}
#define pcibr_unlock(pcibus_info, flag) spin_unlock_irqrestore(&pcibus_info->pbi_lock, flag)
extern void *pcibr_bus_fixup(struct pcibus_bussoft *);
extern uint64_t pcibr_dma_map(struct pcidev_info *, unsigned long, size_t, unsigned int);
extern void pcibr_dma_unmap(struct pcidev_info *, dma_addr_t, int);
/*
* prototypes for the bridge asic register access routines in pcibr_reg.c
*/
extern void pcireg_control_bit_clr(struct pcibus_info *, uint64_t);
extern void pcireg_control_bit_set(struct pcibus_info *, uint64_t);
extern uint64_t pcireg_tflush_get(struct pcibus_info *);
extern uint64_t pcireg_intr_status_get(struct pcibus_info *);
extern void pcireg_intr_enable_bit_clr(struct pcibus_info *, uint64_t);
extern void pcireg_intr_enable_bit_set(struct pcibus_info *, uint64_t);
extern void pcireg_intr_addr_addr_set(struct pcibus_info *, int, uint64_t);
extern void pcireg_force_intr_set(struct pcibus_info *, int);
extern uint64_t pcireg_wrb_flush_get(struct pcibus_info *, int);
extern void pcireg_int_ate_set(struct pcibus_info *, int, uint64_t);
extern uint64_t * pcireg_int_ate_addr(struct pcibus_info *, int);
extern void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info);
extern void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info);
extern int pcibr_ate_alloc(struct pcibus_info *, int);
extern void pcibr_ate_free(struct pcibus_info *, int);
extern void ate_write(struct pcibus_info *, int, int, uint64_t);
#endif
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
#define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
/*
* SN pci asic types. Do not ever renumber these or reuse values. The
* values must agree with what prom thinks they are.
*/
#define PCIIO_ASIC_TYPE_UNKNOWN 0
#define PCIIO_ASIC_TYPE_PPB 1
#define PCIIO_ASIC_TYPE_PIC 2
#define PCIIO_ASIC_TYPE_TIOCP 3
/*
* Common pciio bus provider data. There should be one of these as the
* first field in any pciio based provider soft structure (e.g. pcibr_soft
* tioca_soft, etc).
*/
struct pcibus_bussoft {
uint32_t bs_asic_type; /* chipset type */
uint32_t bs_xid; /* xwidget id */
uint64_t bs_persist_busnum; /* Persistent Bus Number */
uint64_t bs_legacy_io; /* legacy io pio addr */
uint64_t bs_legacy_mem; /* legacy mem pio addr */
uint64_t bs_base; /* widget base */
struct xwidget_info *bs_xwidget_info;
};
/*
* DMA mapping flags
*/
#define SN_PCIDMA_CONSISTENT 0x0001
#endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_PCIDEV_H
#define _ASM_IA64_SN_PCI_PCIDEV_H
#include <linux/pci.h>
extern struct sn_irq_info **sn_irq;
#define SN_PCIDEV_INFO(pci_dev) \
((struct pcidev_info *)((pci_dev)->sysdata))
/*
* Given a pci_bus, return the sn pcibus_bussoft struct. Note that
* this only works for root busses, not for busses represented by PPB's.
*/
#define SN_PCIBUS_BUSSOFT(pci_bus) \
((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data))
/*
* Given a struct pci_dev, return the sn pcibus_bussoft struct. Note
* that this is not equivalent to SN_PCIBUS_BUSSOFT(pci_dev->bus) due
* due to possible PPB's in the path.
*/
#define SN_PCIDEV_BUSSOFT(pci_dev) \
(SN_PCIDEV_INFO(pci_dev)->pdi_host_pcidev_info->pdi_pcibus_info)
#define PCIIO_BUS_NONE 255 /* bus 255 reserved */
#define PCIIO_SLOT_NONE 255
#define PCIIO_FUNC_NONE 255
#define PCIIO_VENDOR_ID_NONE (-1)
struct pcidev_info {
uint64_t pdi_pio_mapped_addr[7]; /* 6 BARs PLUS 1 ROM */
uint64_t pdi_slot_host_handle; /* Bus and devfn Host pci_dev */
struct pcibus_bussoft *pdi_pcibus_info; /* Kernel common bus soft */
struct pcidev_info *pdi_host_pcidev_info; /* Kernel Host pci_dev */
struct pci_dev *pdi_linux_pcidev; /* Kernel pci_dev */
struct sn_irq_info *pdi_sn_irq_info;
};
#endif /* _ASM_IA64_SN_PCI_PCIDEV_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_PIC_H
#define _ASM_IA64_SN_PCI_PIC_H
/*
* PIC AS DEVICE ZERO
* ------------------
*
* PIC handles PCI/X busses. PCI/X requires that the 'bridge' (i.e. PIC)
* be designated as 'device 0'. That is a departure from earlier SGI
* PCI bridges. Because of that we use config space 1 to access the
* config space of the first actual PCI device on the bus.
* Here's what the PIC manual says:
*
* The current PCI-X bus specification now defines that the parent
* hosts bus bridge (PIC for example) must be device 0 on bus 0. PIC
* reduced the total number of devices from 8 to 4 and removed the
* device registers and windows, now only supporting devices 0,1,2, and
* 3. PIC did leave all 8 configuration space windows. The reason was
* there was nothing to gain by removing them. Here in lies the problem.
* The device numbering we do using 0 through 3 is unrelated to the device
* numbering which PCI-X requires in configuration space. In the past we
* correlated Configs pace and our device space 0 <-> 0, 1 <-> 1, etc.
* PCI-X requires we start a 1, not 0 and currently the PX brick
* does associate our:
*
* device 0 with configuration space window 1,
* device 1 with configuration space window 2,
* device 2 with configuration space window 3,
* device 3 with configuration space window 4.
*
* The net effect is that all config space access are off-by-one with
* relation to other per-slot accesses on the PIC.
* Here is a table that shows some of that:
*
* Internal Slot#
* |
* | 0 1 2 3
* ----------|---------------------------------------
* config | 0x21000 0x22000 0x23000 0x24000
* |
* even rrb | 0[0] n/a 1[0] n/a [] == implied even/odd
* |
* odd rrb | n/a 0[1] n/a 1[1]
* |
* int dev | 00 01 10 11
* |
* ext slot# | 1 2 3 4
* ----------|---------------------------------------
*/
#define PIC_ATE_TARGETID_SHFT 8
#define PIC_HOST_INTR_ADDR 0x0000FFFFFFFFFFFF
#define PIC_PCI64_ATTR_TARG_SHFT 60
/*****************************************************************************
*********************** PIC MMR structure mapping ***************************
*****************************************************************************/
/* NOTE: PIC WAR. PV#854697. PIC does not allow writes just to [31:0]
* of a 64-bit register. When writing PIC registers, always write the
* entire 64 bits.
*/
struct pic {
/* 0x000000-0x00FFFF -- Local Registers */
/* 0x000000-0x000057 -- Standard Widget Configuration */
uint64_t p_wid_id; /* 0x000000 */
uint64_t p_wid_stat; /* 0x000008 */
uint64_t p_wid_err_upper; /* 0x000010 */
uint64_t p_wid_err_lower; /* 0x000018 */
#define p_wid_err p_wid_err_lower
uint64_t p_wid_control; /* 0x000020 */
uint64_t p_wid_req_timeout; /* 0x000028 */
uint64_t p_wid_int_upper; /* 0x000030 */
uint64_t p_wid_int_lower; /* 0x000038 */
#define p_wid_int p_wid_int_lower
uint64_t p_wid_err_cmdword; /* 0x000040 */
uint64_t p_wid_llp; /* 0x000048 */
uint64_t p_wid_tflush; /* 0x000050 */
/* 0x000058-0x00007F -- Bridge-specific Widget Configuration */
uint64_t p_wid_aux_err; /* 0x000058 */
uint64_t p_wid_resp_upper; /* 0x000060 */
uint64_t p_wid_resp_lower; /* 0x000068 */
#define p_wid_resp p_wid_resp_lower
uint64_t p_wid_tst_pin_ctrl; /* 0x000070 */
uint64_t p_wid_addr_lkerr; /* 0x000078 */
/* 0x000080-0x00008F -- PMU & MAP */
uint64_t p_dir_map; /* 0x000080 */
uint64_t _pad_000088; /* 0x000088 */
/* 0x000090-0x00009F -- SSRAM */
uint64_t p_map_fault; /* 0x000090 */
uint64_t _pad_000098; /* 0x000098 */
/* 0x0000A0-0x0000AF -- Arbitration */
uint64_t p_arb; /* 0x0000A0 */
uint64_t _pad_0000A8; /* 0x0000A8 */
/* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
uint64_t p_ate_parity_err; /* 0x0000B0 */
uint64_t _pad_0000B8; /* 0x0000B8 */
/* 0x0000C0-0x0000FF -- PCI/GIO */
uint64_t p_bus_timeout; /* 0x0000C0 */
uint64_t p_pci_cfg; /* 0x0000C8 */
uint64_t p_pci_err_upper; /* 0x0000D0 */
uint64_t p_pci_err_lower; /* 0x0000D8 */
#define p_pci_err p_pci_err_lower
uint64_t _pad_0000E0[4]; /* 0x0000{E0..F8} */
/* 0x000100-0x0001FF -- Interrupt */
uint64_t p_int_status; /* 0x000100 */
uint64_t p_int_enable; /* 0x000108 */
uint64_t p_int_rst_stat; /* 0x000110 */
uint64_t p_int_mode; /* 0x000118 */
uint64_t p_int_device; /* 0x000120 */
uint64_t p_int_host_err; /* 0x000128 */
uint64_t p_int_addr[8]; /* 0x0001{30,,,68} */
uint64_t p_err_int_view; /* 0x000170 */
uint64_t p_mult_int; /* 0x000178 */
uint64_t p_force_always[8]; /* 0x0001{80,,,B8} */
uint64_t p_force_pin[8]; /* 0x0001{C0,,,F8} */
/* 0x000200-0x000298 -- Device */
uint64_t p_device[4]; /* 0x0002{00,,,18} */
uint64_t _pad_000220[4]; /* 0x0002{20,,,38} */
uint64_t p_wr_req_buf[4]; /* 0x0002{40,,,58} */
uint64_t _pad_000260[4]; /* 0x0002{60,,,78} */
uint64_t p_rrb_map[2]; /* 0x0002{80,,,88} */
#define p_even_resp p_rrb_map[0] /* 0x000280 */
#define p_odd_resp p_rrb_map[1] /* 0x000288 */
uint64_t p_resp_status; /* 0x000290 */
uint64_t p_resp_clear; /* 0x000298 */
uint64_t _pad_0002A0[12]; /* 0x0002{A0..F8} */
/* 0x000300-0x0003F8 -- Buffer Address Match Registers */
struct {
uint64_t upper; /* 0x0003{00,,,F0} */
uint64_t lower; /* 0x0003{08,,,F8} */
} p_buf_addr_match[16];
/* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
struct {
uint64_t flush_w_touch; /* 0x000{400,,,5C0} */
uint64_t flush_wo_touch; /* 0x000{408,,,5C8} */
uint64_t inflight; /* 0x000{410,,,5D0} */
uint64_t prefetch; /* 0x000{418,,,5D8} */
uint64_t total_pci_retry; /* 0x000{420,,,5E0} */
uint64_t max_pci_retry; /* 0x000{428,,,5E8} */
uint64_t max_latency; /* 0x000{430,,,5F0} */
uint64_t clear_all; /* 0x000{438,,,5F8} */
} p_buf_count[8];
/* 0x000600-0x0009FF -- PCI/X registers */
uint64_t p_pcix_bus_err_addr; /* 0x000600 */
uint64_t p_pcix_bus_err_attr; /* 0x000608 */
uint64_t p_pcix_bus_err_data; /* 0x000610 */
uint64_t p_pcix_pio_split_addr; /* 0x000618 */
uint64_t p_pcix_pio_split_attr; /* 0x000620 */
uint64_t p_pcix_dma_req_err_attr; /* 0x000628 */
uint64_t p_pcix_dma_req_err_addr; /* 0x000630 */
uint64_t p_pcix_timeout; /* 0x000638 */
uint64_t _pad_000640[120]; /* 0x000{640,,,9F8} */
/* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
struct {
uint64_t p_buf_addr; /* 0x000{A00,,,AF0} */
uint64_t p_buf_attr; /* 0X000{A08,,,AF8} */
} p_pcix_read_buf_64[16];
struct {
uint64_t p_buf_addr; /* 0x000{B00,,,BE0} */
uint64_t p_buf_attr; /* 0x000{B08,,,BE8} */
uint64_t p_buf_valid; /* 0x000{B10,,,BF0} */
uint64_t __pad1; /* 0x000{B18,,,BF8} */
} p_pcix_write_buf_64[8];
/* End of Local Registers -- Start of Address Map space */
char _pad_000c00[0x010000 - 0x000c00];
/* 0x010000-0x011fff -- Internal ATE RAM (Auto Parity Generation) */
uint64_t p_int_ate_ram[1024]; /* 0x010000-0x011fff */
/* 0x012000-0x013fff -- Internal ATE RAM (Manual Parity Generation) */
uint64_t p_int_ate_ram_mp[1024]; /* 0x012000-0x013fff */
char _pad_014000[0x18000 - 0x014000];
/* 0x18000-0x197F8 -- PIC Write Request Ram */
uint64_t p_wr_req_lower[256]; /* 0x18000 - 0x187F8 */
uint64_t p_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */
uint64_t p_wr_req_parity[256]; /* 0x19000 - 0x197F8 */
char _pad_019800[0x20000 - 0x019800];
/* 0x020000-0x027FFF -- PCI Device Configuration Spaces */
union {
uint8_t c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */
uint16_t s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */
uint32_t l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */
uint64_t d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */
union {
uint8_t c[0x100 / 1];
uint16_t s[0x100 / 2];
uint32_t l[0x100 / 4];
uint64_t d[0x100 / 8];
} f[8];
} p_type0_cfg_dev[8]; /* 0x02{0000,,,7FFF} */
/* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
union {
uint8_t c[0x1000 / 1]; /* 0x028000-0x029000 */
uint16_t s[0x1000 / 2]; /* 0x028000-0x029000 */
uint32_t l[0x1000 / 4]; /* 0x028000-0x029000 */
uint64_t d[0x1000 / 8]; /* 0x028000-0x029000 */
union {
uint8_t c[0x100 / 1];
uint16_t s[0x100 / 2];
uint32_t l[0x100 / 4];
uint64_t d[0x100 / 8];
} f[8];
} p_type1_cfg; /* 0x028000-0x029000 */
char _pad_029000[0x030000-0x029000];
/* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
union {
uint8_t c[8 / 1];
uint16_t s[8 / 2];
uint32_t l[8 / 4];
uint64_t d[8 / 8];
} p_pci_iack; /* 0x030000-0x030007 */
char _pad_030007[0x040000-0x030008];
/* 0x040000-0x030007 -- PCIX Special Cycle */
union {
uint8_t c[8 / 1];
uint16_t s[8 / 2];
uint32_t l[8 / 4];
uint64_t d[8 / 8];
} p_pcix_cycle; /* 0x040000-0x040007 */
};
#endif /* _ASM_IA64_SN_PCI_PIC_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_TIOCP_H
#define _ASM_IA64_SN_PCI_TIOCP_H
#define TIOCP_HOST_INTR_ADDR 0x003FFFFFFFFFFFFF
#define TIOCP_PCI64_CMDTYPE_MEM (0x1ull << 60)
/*****************************************************************************
*********************** TIOCP MMR structure mapping ***************************
*****************************************************************************/
struct tiocp{
/* 0x000000-0x00FFFF -- Local Registers */
/* 0x000000-0x000057 -- (Legacy Widget Space) Configuration */
uint64_t cp_id; /* 0x000000 */
uint64_t cp_stat; /* 0x000008 */
uint64_t cp_err_upper; /* 0x000010 */
uint64_t cp_err_lower; /* 0x000018 */
#define cp_err cp_err_lower
uint64_t cp_control; /* 0x000020 */
uint64_t cp_req_timeout; /* 0x000028 */
uint64_t cp_intr_upper; /* 0x000030 */
uint64_t cp_intr_lower; /* 0x000038 */
#define cp_intr cp_intr_lower
uint64_t cp_err_cmdword; /* 0x000040 */
uint64_t _pad_000048; /* 0x000048 */
uint64_t cp_tflush; /* 0x000050 */
/* 0x000058-0x00007F -- Bridge-specific Configuration */
uint64_t cp_aux_err; /* 0x000058 */
uint64_t cp_resp_upper; /* 0x000060 */
uint64_t cp_resp_lower; /* 0x000068 */
#define cp_resp cp_resp_lower
uint64_t cp_tst_pin_ctrl; /* 0x000070 */
uint64_t cp_addr_lkerr; /* 0x000078 */
/* 0x000080-0x00008F -- PMU & MAP */
uint64_t cp_dir_map; /* 0x000080 */
uint64_t _pad_000088; /* 0x000088 */
/* 0x000090-0x00009F -- SSRAM */
uint64_t cp_map_fault; /* 0x000090 */
uint64_t _pad_000098; /* 0x000098 */
/* 0x0000A0-0x0000AF -- Arbitration */
uint64_t cp_arb; /* 0x0000A0 */
uint64_t _pad_0000A8; /* 0x0000A8 */
/* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
uint64_t cp_ate_parity_err; /* 0x0000B0 */
uint64_t _pad_0000B8; /* 0x0000B8 */
/* 0x0000C0-0x0000FF -- PCI/GIO */
uint64_t cp_bus_timeout; /* 0x0000C0 */
uint64_t cp_pci_cfg; /* 0x0000C8 */
uint64_t cp_pci_err_upper; /* 0x0000D0 */
uint64_t cp_pci_err_lower; /* 0x0000D8 */
#define cp_pci_err cp_pci_err_lower
uint64_t _pad_0000E0[4]; /* 0x0000{E0..F8} */
/* 0x000100-0x0001FF -- Interrupt */
uint64_t cp_int_status; /* 0x000100 */
uint64_t cp_int_enable; /* 0x000108 */
uint64_t cp_int_rst_stat; /* 0x000110 */
uint64_t cp_int_mode; /* 0x000118 */
uint64_t cp_int_device; /* 0x000120 */
uint64_t cp_int_host_err; /* 0x000128 */
uint64_t cp_int_addr[8]; /* 0x0001{30,,,68} */
uint64_t cp_err_int_view; /* 0x000170 */
uint64_t cp_mult_int; /* 0x000178 */
uint64_t cp_force_always[8]; /* 0x0001{80,,,B8} */
uint64_t cp_force_pin[8]; /* 0x0001{C0,,,F8} */
/* 0x000200-0x000298 -- Device */
uint64_t cp_device[4]; /* 0x0002{00,,,18} */
uint64_t _pad_000220[4]; /* 0x0002{20,,,38} */
uint64_t cp_wr_req_buf[4]; /* 0x0002{40,,,58} */
uint64_t _pad_000260[4]; /* 0x0002{60,,,78} */
uint64_t cp_rrb_map[2]; /* 0x0002{80,,,88} */
#define cp_even_resp cp_rrb_map[0] /* 0x000280 */
#define cp_odd_resp cp_rrb_map[1] /* 0x000288 */
uint64_t cp_resp_status; /* 0x000290 */
uint64_t cp_resp_clear; /* 0x000298 */
uint64_t _pad_0002A0[12]; /* 0x0002{A0..F8} */
/* 0x000300-0x0003F8 -- Buffer Address Match Registers */
struct {
uint64_t upper; /* 0x0003{00,,,F0} */
uint64_t lower; /* 0x0003{08,,,F8} */
} cp_buf_addr_match[16];
/* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
struct {
uint64_t flush_w_touch; /* 0x000{400,,,5C0} */
uint64_t flush_wo_touch; /* 0x000{408,,,5C8} */
uint64_t inflight; /* 0x000{410,,,5D0} */
uint64_t prefetch; /* 0x000{418,,,5D8} */
uint64_t total_pci_retry; /* 0x000{420,,,5E0} */
uint64_t max_pci_retry; /* 0x000{428,,,5E8} */
uint64_t max_latency; /* 0x000{430,,,5F0} */
uint64_t clear_all; /* 0x000{438,,,5F8} */
} cp_buf_count[8];
/* 0x000600-0x0009FF -- PCI/X registers */
uint64_t cp_pcix_bus_err_addr; /* 0x000600 */
uint64_t cp_pcix_bus_err_attr; /* 0x000608 */
uint64_t cp_pcix_bus_err_data; /* 0x000610 */
uint64_t cp_pcix_pio_split_addr; /* 0x000618 */
uint64_t cp_pcix_pio_split_attr; /* 0x000620 */
uint64_t cp_pcix_dma_req_err_attr; /* 0x000628 */
uint64_t cp_pcix_dma_req_err_addr; /* 0x000630 */
uint64_t cp_pcix_timeout; /* 0x000638 */
uint64_t _pad_000640[24]; /* 0x000{640,,,6F8} */
/* 0x000700-0x000737 -- Debug Registers */
uint64_t cp_ct_debug_ctl; /* 0x000700 */
uint64_t cp_br_debug_ctl; /* 0x000708 */
uint64_t cp_mux3_debug_ctl; /* 0x000710 */
uint64_t cp_mux4_debug_ctl; /* 0x000718 */
uint64_t cp_mux5_debug_ctl; /* 0x000720 */
uint64_t cp_mux6_debug_ctl; /* 0x000728 */
uint64_t cp_mux7_debug_ctl; /* 0x000730 */
uint64_t _pad_000738[89]; /* 0x000{738,,,9F8} */
/* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
struct {
uint64_t cp_buf_addr; /* 0x000{A00,,,AF0} */
uint64_t cp_buf_attr; /* 0X000{A08,,,AF8} */
} cp_pcix_read_buf_64[16];
struct {
uint64_t cp_buf_addr; /* 0x000{B00,,,BE0} */
uint64_t cp_buf_attr; /* 0x000{B08,,,BE8} */
uint64_t cp_buf_valid; /* 0x000{B10,,,BF0} */
uint64_t __pad1; /* 0x000{B18,,,BF8} */
} cp_pcix_write_buf_64[8];
/* End of Local Registers -- Start of Address Map space */
char _pad_000c00[0x010000 - 0x000c00];
/* 0x010000-0x011FF8 -- Internal ATE RAM (Auto Parity Generation) */
uint64_t cp_int_ate_ram[1024]; /* 0x010000-0x011FF8 */
char _pad_012000[0x14000 - 0x012000];
/* 0x014000-0x015FF8 -- Internal ATE RAM (Manual Parity Generation) */
uint64_t cp_int_ate_ram_mp[1024]; /* 0x014000-0x015FF8 */
char _pad_016000[0x18000 - 0x016000];
/* 0x18000-0x197F8 -- TIOCP Write Request Ram */
uint64_t cp_wr_req_lower[256]; /* 0x18000 - 0x187F8 */
uint64_t cp_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */
uint64_t cp_wr_req_parity[256]; /* 0x19000 - 0x197F8 */
char _pad_019800[0x1C000 - 0x019800];
/* 0x1C000-0x1EFF8 -- TIOCP Read Response Ram */
uint64_t cp_rd_resp_lower[512]; /* 0x1C000 - 0x1CFF8 */
uint64_t cp_rd_resp_upper[512]; /* 0x1D000 - 0x1DFF8 */
uint64_t cp_rd_resp_parity[512]; /* 0x1E000 - 0x1EFF8 */
char _pad_01F000[0x20000 - 0x01F000];
/* 0x020000-0x021FFF -- Host Device (CP) Configuration Space (not used) */
char _pad_020000[0x021000 - 0x20000];
/* 0x021000-0x027FFF -- PCI Device Configuration Spaces */
union {
uint8_t c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */
uint16_t s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */
uint32_t l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */
uint64_t d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */
union {
uint8_t c[0x100 / 1];
uint16_t s[0x100 / 2];
uint32_t l[0x100 / 4];
uint64_t d[0x100 / 8];
} f[8];
} cp_type0_cfg_dev[7]; /* 0x02{1000,,,7FFF} */
/* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
union {
uint8_t c[0x1000 / 1]; /* 0x028000-0x029000 */
uint16_t s[0x1000 / 2]; /* 0x028000-0x029000 */
uint32_t l[0x1000 / 4]; /* 0x028000-0x029000 */
uint64_t d[0x1000 / 8]; /* 0x028000-0x029000 */
union {
uint8_t c[0x100 / 1];
uint16_t s[0x100 / 2];
uint32_t l[0x100 / 4];
uint64_t d[0x100 / 8];
} f[8];
} cp_type1_cfg; /* 0x028000-0x029000 */
char _pad_029000[0x030000-0x029000];
/* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
union {
uint8_t c[8 / 1];
uint16_t s[8 / 2];
uint32_t l[8 / 4];
uint64_t d[8 / 8];
} cp_pci_iack; /* 0x030000-0x030007 */
char _pad_030007[0x040000-0x030008];
/* 0x040000-0x040007 -- PCIX Special Cycle */
union {
uint8_t c[8 / 1];
uint16_t s[8 / 2];
uint32_t l[8 / 4];
uint64_t d[8 / 8];
} cp_pcix_cycle; /* 0x040000-0x040007 */
char _pad_040007[0x200000-0x040008];
/* 0x200000-0x7FFFFF -- PCI/GIO Device Spaces */
union {
uint8_t c[0x100000 / 1];
uint16_t s[0x100000 / 2];
uint32_t l[0x100000 / 4];
uint64_t d[0x100000 / 8];
} cp_devio_raw[6]; /* 0x200000-0x7FFFFF */
#define cp_devio(n) cp_devio_raw[((n)<2)?(n*2):(n+2)]
char _pad_800000[0xA00000-0x800000];
/* 0xA00000-0xBFFFFF -- PCI/GIO Device Spaces w/flush */
union {
uint8_t c[0x100000 / 1];
uint16_t s[0x100000 / 2];
uint32_t l[0x100000 / 4];
uint64_t d[0x100000 / 8];
} cp_devio_raw_flush[6]; /* 0xA00000-0xBFFFFF */
#define cp_devio_flush(n) cp_devio_raw_flush[((n)<2)?(n*2):(n+2)]
};
#endif /* _ASM_IA64_SN_PCI_TIOCP_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SHUB_H
#define _ASM_IA64_SN_SHUB_H
#define MD_MEM_BANKS 4
/*
* Junk Bus Address Space
* The junk bus is used to access the PROM, LED's, and UART. It's
* accessed through the local block MMR space. The data path is
* 16 bits wide. This space requires address bits 31-27 to be set, and
* is further divided by address bits 26:15.
* The LED addresses are write-only. To read the LEDs, you need to use
* SH_JUNK_BUS_LED0-3, defined in shub_mmr.h
*
*/
#define SH_REAL_JUNK_BUS_LED0 0x7fed00000
#define SH_REAL_JUNK_BUS_LED1 0x7fed10000
#define SH_REAL_JUNK_BUS_LED2 0x7fed20000
#define SH_REAL_JUNK_BUS_LED3 0x7fed30000
#define SH_JUNK_BUS_UART0 0x7fed40000
#define SH_JUNK_BUS_UART1 0x7fed40008
#define SH_JUNK_BUS_UART2 0x7fed40010
#define SH_JUNK_BUS_UART3 0x7fed40018
#define SH_JUNK_BUS_UART4 0x7fed40020
#define SH_JUNK_BUS_UART5 0x7fed40028
#define SH_JUNK_BUS_UART6 0x7fed40030
#define SH_JUNK_BUS_UART7 0x7fed40038
#endif /* _ASM_IA64_SN_SHUB_H */
This source diff could not be displayed because it is too large. You can view the blob instead.
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_TIO_H
#define _ASM_IA64_SN_TIO_H
#define TIO_MMR_ADDR_MOD
#define TIO_NODE_ID TIO_MMR_ADDR_MOD(0x0000000090060e80)
#define TIO_ITTE_BASE 0xb0008800 /* base of translation table entries */
#define TIO_ITTE(bigwin) (TIO_ITTE_BASE + 8*(bigwin))
#define TIO_ITTE_OFFSET_BITS 8 /* size of offset field */
#define TIO_ITTE_OFFSET_MASK ((1<<TIO_ITTE_OFFSET_BITS)-1)
#define TIO_ITTE_OFFSET_SHIFT 0
#define TIO_ITTE_WIDGET_BITS 2 /* size of widget field */
#define TIO_ITTE_WIDGET_MASK ((1<<TIO_ITTE_WIDGET_BITS)-1)
#define TIO_ITTE_WIDGET_SHIFT 12
#define TIO_ITTE_VALID_MASK 0x1
#define TIO_ITTE_VALID_SHIFT 16
#define TIO_ITTE_PUT(nasid, bigwin, widget, addr, valid) \
REMOTE_HUB_S((nasid), TIO_ITTE(bigwin), \
(((((addr) >> TIO_BWIN_SIZE_BITS) & \
TIO_ITTE_OFFSET_MASK) << TIO_ITTE_OFFSET_SHIFT) | \
(((widget) & TIO_ITTE_WIDGET_MASK) << TIO_ITTE_WIDGET_SHIFT)) | \
(( (valid) & TIO_ITTE_VALID_MASK) << TIO_ITTE_VALID_SHIFT))
#endif /* _ASM_IA64_SN_TIO_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_XTALK_HUBDEV_H
#define _ASM_IA64_SN_XTALK_HUBDEV_H
#define HUB_WIDGET_ID_MAX 0xf
#define DEV_PER_WIDGET (2*2*8)
#define IIO_ITTE_WIDGET_BITS 4 /* size of widget field */
#define IIO_ITTE_WIDGET_MASK ((1<<IIO_ITTE_WIDGET_BITS)-1)
#define IIO_ITTE_WIDGET_SHIFT 8
/*
* Use the top big window as a surrogate for the first small window
*/
#define SWIN0_BIGWIN HUB_NUM_BIG_WINDOW
#define IIO_NUM_ITTES 7
#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
struct sn_flush_device_list {
int sfdl_bus;
int sfdl_slot;
int sfdl_pin;
struct bar_list {
unsigned long start;
unsigned long end;
} sfdl_bar_list[6];
unsigned long sfdl_force_int_addr;
unsigned long sfdl_flush_value;
volatile unsigned long *sfdl_flush_addr;
uint64_t sfdl_persistent_busnum;
struct pcibus_info *sfdl_pcibus_info;
spinlock_t sfdl_flush_lock;
};
/*
* **widget_p - Used as an array[wid_num][device] of sn_flush_device_list.
*/
struct sn_flush_nasid_entry {
struct sn_flush_device_list **widget_p; /* Used as a array of wid_num */
uint64_t iio_itte[8];
};
struct hubdev_info {
geoid_t hdi_geoid;
short hdi_nasid;
short hdi_peer_nasid; /* Dual Porting Peer */
struct sn_flush_nasid_entry hdi_flush_nasid_list;
struct xwidget_info hdi_xwidget_info[HUB_WIDGET_ID_MAX + 1];
void *hdi_nodepda;
void *hdi_node_vertex;
void *hdi_xtalk_vertex;
};
extern void hubdev_init_node(nodepda_t *, cnodeid_t);
extern void hub_error_init(struct hubdev_info *);
extern void ice_error_init(struct hubdev_info *);
#endif /* _ASM_IA64_SN_XTALK_HUBDEV_H */
This diff is collapsed.
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_IA64_SN_XTALK_XWIDGET_H
#define _ASM_IA64_SN_XTALK_XWIDGET_H
/* WIDGET_ID */
#define WIDGET_REV_NUM 0xf0000000
#define WIDGET_PART_NUM 0x0ffff000
#define WIDGET_MFG_NUM 0x00000ffe
#define WIDGET_REV_NUM_SHFT 28
#define WIDGET_PART_NUM_SHFT 12
#define WIDGET_MFG_NUM_SHFT 1
#define XWIDGET_PART_NUM(widgetid) (((widgetid) & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT)
#define XWIDGET_REV_NUM(widgetid) (((widgetid) & WIDGET_REV_NUM) >> WIDGET_REV_NUM_SHFT)
#define XWIDGET_MFG_NUM(widgetid) (((widgetid) & WIDGET_MFG_NUM) >> WIDGET_MFG_NUM_SHFT)
#define XWIDGET_PART_REV_NUM(widgetid) ((XWIDGET_PART_NUM(widgetid) << 4) | \
XWIDGET_REV_NUM(widgetid))
#define XWIDGET_PART_REV_NUM_REV(partrev) (partrev & 0xf)
/* widget configuration registers */
struct widget_cfg{
uint32_t w_id; /* 0x04 */
uint32_t w_pad_0; /* 0x00 */
uint32_t w_status; /* 0x0c */
uint32_t w_pad_1; /* 0x08 */
uint32_t w_err_upper_addr; /* 0x14 */
uint32_t w_pad_2; /* 0x10 */
uint32_t w_err_lower_addr; /* 0x1c */
uint32_t w_pad_3; /* 0x18 */
uint32_t w_control; /* 0x24 */
uint32_t w_pad_4; /* 0x20 */
uint32_t w_req_timeout; /* 0x2c */
uint32_t w_pad_5; /* 0x28 */
uint32_t w_intdest_upper_addr; /* 0x34 */
uint32_t w_pad_6; /* 0x30 */
uint32_t w_intdest_lower_addr; /* 0x3c */
uint32_t w_pad_7; /* 0x38 */
uint32_t w_err_cmd_word; /* 0x44 */
uint32_t w_pad_8; /* 0x40 */
uint32_t w_llp_cfg; /* 0x4c */
uint32_t w_pad_9; /* 0x48 */
uint32_t w_tflush; /* 0x54 */
uint32_t w_pad_10; /* 0x50 */
};
/*
* Crosstalk Widget Hardware Identification, as defined in the Crosstalk spec.
*/
struct xwidget_hwid{
int mfg_num;
int rev_num;
int part_num;
};
struct xwidget_info{
struct xwidget_hwid xwi_hwid; /* Widget Identification */
char xwi_masterxid; /* Hub's Widget Port Number */
void *xwi_hubinfo; /* Hub's provider private info */
uint64_t *xwi_hub_provider; /* prom provider functions */
void *xwi_vertex;
};
#endif /* _ASM_IA64_SN_XTALK_XWIDGET_H */
......@@ -7,5 +7,6 @@
# Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
#
obj-y += probe.o setup.o bte.o irq.o mca.o idle.o sn2/
obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
huberror.o io_init.o iomv.o klconflib.o sn2/
obj-$(CONFIG_IA64_GENERIC) += machvec.o
......@@ -8,13 +8,12 @@
#include <linux/config.h>
#include <linux/module.h>
#include <asm/sn/sgi.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/pda.h>
#include <asm/sn/sn2/shubio.h>
#include "shubio.h"
#include <asm/nodedata.h>
#include <linux/bootmem.h>
......@@ -30,8 +29,7 @@
/* two interfaces on two btes */
#define MAX_INTERFACES_TO_TRY 4
static struct bteinfo_s *
bte_if_on_node(nasid_t nasid, int interface)
static struct bteinfo_s *bte_if_on_node(nasid_t nasid, int interface)
{
nodepda_t *tmp_nodepda;
......@@ -40,13 +38,11 @@ bte_if_on_node(nasid_t nasid, int interface)
}
/************************************************************************
* Block Transfer Engine copy related functions.
*
***********************************************************************/
/*
* bte_copy(src, dest, len, mode, notification)
*
......@@ -66,8 +62,7 @@ bte_if_on_node(nasid_t nasid, int interface)
* NOTE: This function requires src, dest, and len to
* be cacheline aligned.
*/
bte_result_t
bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
{
u64 transfer_size;
u64 transfer_stat;
......@@ -77,7 +72,6 @@ bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
struct bteinfo_s *btes_to_try[MAX_INTERFACES_TO_TRY];
int bte_if_index;
BTE_PRINTK(("bte_copy(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%p)\n",
src, dest, len, mode, notification));
......@@ -85,9 +79,9 @@ bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
return BTE_SUCCESS;
}
ASSERT(!((len & L1_CACHE_MASK) ||
BUG_ON(!((len & L1_CACHE_MASK) ||
(src & L1_CACHE_MASK) || (dest & L1_CACHE_MASK)));
ASSERT(len < ((BTE_LEN_MASK + 1) << L1_CACHE_SHIFT));
BUG_ON(len < ((BTE_LEN_MASK + 1) << L1_CACHE_SHIFT));
if (mode & BTE_USE_DEST) {
/* try remote then local */
......@@ -150,7 +144,6 @@ bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
}
} while (1);
if (notification == NULL) {
/* User does not want to be notified. */
bte->most_rcnt_na = &bte->notify;
......@@ -177,8 +170,8 @@ bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
/* Set the notification register */
BTE_PRINTKV(("IBNA = 0x%lx)\n",
TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na))));
BTE_NOTIF_STORE(bte, TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na)));
BTE_NOTIF_STORE(bte,
TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na)));
/* Initiate the transfer */
BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode)));
......@@ -186,7 +179,6 @@ bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
spin_unlock_irqrestore(&bte->spinlock, irq_flags);
if (notification != NULL) {
return BTE_SUCCESS;
}
......@@ -194,7 +186,6 @@ bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
while ((transfer_stat = *bte->most_rcnt_na) == -1UL) {
}
BTE_PRINTKV((" Delay Done. IBLS = 0x%lx, most_rcnt_na = 0x%lx\n",
BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
......@@ -209,8 +200,8 @@ bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
return bte_status;
}
EXPORT_SYMBOL(bte_copy);
EXPORT_SYMBOL(bte_copy);
/*
* bte_unaligned_copy(src, dest, len, mode)
......@@ -228,8 +219,7 @@ EXPORT_SYMBOL(bte_copy);
* NOTE: If the source, dest, and len are all cache line aligned,
* then it would be _FAR_ preferrable to use bte_copy instead.
*/
bte_result_t
bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
{
int destFirstCacheOffset;
u64 headBteSource;
......@@ -254,7 +244,7 @@ bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
if (bteBlock_unaligned == NULL) {
return BTEFAIL_NOTAVAIL;
}
bteBlock = (char *) L1_CACHE_ALIGN((u64) bteBlock_unaligned);
bteBlock = (char *)L1_CACHE_ALIGN((u64) bteBlock_unaligned);
headBcopySrcOffset = src & L1_CACHE_MASK;
destFirstCacheOffset = dest & L1_CACHE_MASK;
......@@ -302,15 +292,13 @@ bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
}
if (len > headBcopyLen) {
footBcopyLen =
(len - headBcopyLen) & L1_CACHE_MASK;
footBcopyLen = (len - headBcopyLen) & L1_CACHE_MASK;
footBteLen = L1_CACHE_BYTES;
footBteSource = src + len - footBcopyLen;
footBcopyDest = dest + len - footBcopyLen;
if (footBcopyDest ==
(headBcopyDest + headBcopyLen)) {
if (footBcopyDest == (headBcopyDest + headBcopyLen)) {
/*
* We have two contigous bcopy
* blocks. Merge them.
......@@ -326,9 +314,8 @@ bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
return rv;
}
memcpy(__va(footBcopyDest),
(char *) bteBlock, footBcopyLen);
(char *)bteBlock, footBcopyLen);
}
} else {
footBcopyLen = 0;
......@@ -350,7 +337,6 @@ bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
}
} else {
/*
* The transfer is not symetric, we will
* allocate a buffer large enough for all the
......@@ -361,8 +347,7 @@ bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
/* Add the leader from source */
headBteLen = len + (src & L1_CACHE_MASK);
/* Add the trailing bytes from footer. */
headBteLen +=
L1_CACHE_BYTES - (headBteLen & L1_CACHE_MASK);
headBteLen += L1_CACHE_BYTES - (headBteLen & L1_CACHE_MASK);
headBteSource = src & ~L1_CACHE_MASK;
headBcopySrcOffset = src & L1_CACHE_MASK;
headBcopyDest = dest;
......@@ -371,40 +356,37 @@ bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
if (headBcopyLen > 0) {
rv = bte_copy(headBteSource,
ia64_tpa((unsigned long)bteBlock), headBteLen, mode, NULL);
ia64_tpa((unsigned long)bteBlock), headBteLen,
mode, NULL);
if (rv != BTE_SUCCESS) {
kfree(bteBlock_unaligned);
return rv;
}
memcpy(__va(headBcopyDest), ((char *) bteBlock +
headBcopySrcOffset),
headBcopyLen);
memcpy(__va(headBcopyDest), ((char *)bteBlock +
headBcopySrcOffset), headBcopyLen);
}
kfree(bteBlock_unaligned);
return BTE_SUCCESS;
}
EXPORT_SYMBOL(bte_unaligned_copy);
EXPORT_SYMBOL(bte_unaligned_copy);
/************************************************************************
* Block Transfer Engine initialization functions.
*
***********************************************************************/
/*
* bte_init_node(nodepda, cnode)
*
* Initialize the nodepda structure with BTE base addresses and
* spinlocks.
*/
void
bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
void bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
{
int i;
/*
* Indicate that all the block transfer engines on this node
* are available.
......@@ -418,7 +400,7 @@ bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
spin_lock_init(&mynodepda->bte_recovery_lock);
init_timer(&mynodepda->bte_recovery_timer);
mynodepda->bte_recovery_timer.function = bte_error_handler;
mynodepda->bte_recovery_timer.data = (unsigned long) mynodepda;
mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda;
for (i = 0; i < BTES_PER_NODE; i++) {
/* Which link status register should we use? */
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/types.h>
#include <asm/sn/sn_sal.h>
#include "ioerror.h"
#include <asm/sn/addrs.h>
#include "shubio.h"
#include <asm/sn/geo.h>
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
#include <asm/sn/bte.h>
/*
* Bte error handling is done in two parts. The first captures
* any crb related errors. Since there can be multiple crbs per
* interface and multiple interfaces active, we need to wait until
* all active crbs are completed. This is the first job of the
* second part error handler. When all bte related CRBs are cleanly
* completed, it resets the interfaces and gets them ready for new
* transfers to be queued.
*/
void bte_error_handler(unsigned long);
/*
* Wait until all BTE related CRBs are completed
* and then reset the interfaces.
*/
void bte_error_handler(unsigned long _nodepda)
{
struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
nasid_t nasid;
int i;
int valid_crbs;
unsigned long irq_flags;
volatile u64 *notify;
bte_result_t bh_error;
ii_imem_u_t imem; /* II IMEM Register */
ii_icrb0_d_u_t icrbd; /* II CRB Register D */
ii_ibcr_u_t ibcr;
ii_icmr_u_t icmr;
BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
smp_processor_id()));
spin_lock_irqsave(recovery_lock, irq_flags);
if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) &&
(err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) {
BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda,
smp_processor_id()));
spin_unlock_irqrestore(recovery_lock, irq_flags);
return;
}
/*
* Lock all interfaces on this node to prevent new transfers
* from being queued.
*/
for (i = 0; i < BTES_PER_NODE; i++) {
if (err_nodepda->bte_if[i].cleanup_active) {
continue;
}
spin_lock(&err_nodepda->bte_if[i].spinlock);
BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
smp_processor_id(), i));
err_nodepda->bte_if[i].cleanup_active = 1;
}
/* Determine information about our hub */
nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
/*
* A BTE transfer can use multiple CRBs. We need to make sure
* that all the BTE CRBs are complete (or timed out) before
* attempting to clean up the error. Resetting the BTE while
* there are still BTE CRBs active will hang the BTE.
* We should look at all the CRBs to see if they are allocated
* to the BTE and see if they are still active. When none
* are active, we can continue with the cleanup.
*
* We also want to make sure that the local NI port is up.
* When a router resets the NI port can go down, while it
* goes through the LLP handshake, but then comes back up.
*/
icmr.ii_icmr_regval = REMOTE_HUB_L(nasid, IIO_ICMR);
if (icmr.ii_icmr_fld_s.i_crb_mark != 0) {
/*
* There are errors which still need to be cleaned up by
* hubiio_crb_error_handler
*/
mod_timer(recovery_timer, HZ * 5);
BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
smp_processor_id()));
spin_unlock_irqrestore(recovery_lock, irq_flags);
return;
}
if (icmr.ii_icmr_fld_s.i_crb_vld != 0) {
valid_crbs = icmr.ii_icmr_fld_s.i_crb_vld;
for (i = 0; i < IIO_NUM_CRBS; i++) {
if (!((1 << i) & valid_crbs)) {
/* This crb was not marked as valid, ignore */
continue;
}
icrbd.ii_icrb0_d_regval =
REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
if (icrbd.d_bteop) {
mod_timer(recovery_timer, HZ * 5);
BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
err_nodepda, smp_processor_id(),
i));
spin_unlock_irqrestore(recovery_lock,
irq_flags);
return;
}
}
}
BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda, smp_processor_id()));
/* Reenable both bte interfaces */
imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM);
imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;
REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);
/* Reinitialize both BTE state machines. */
ibcr.ii_ibcr_regval = REMOTE_HUB_L(nasid, IIO_IBCR);
ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval);
for (i = 0; i < BTES_PER_NODE; i++) {
bh_error = err_nodepda->bte_if[i].bh_error;
if (bh_error != BTE_SUCCESS) {
/* There is an error which needs to be notified */
notify = err_nodepda->bte_if[i].most_rcnt_na;
BTE_PRINTK(("cnode %d bte %d error=0x%lx\n",
err_nodepda->bte_if[i].bte_cnode,
err_nodepda->bte_if[i].bte_num,
IBLS_ERROR | (u64) bh_error));
*notify = IBLS_ERROR | bh_error;
err_nodepda->bte_if[i].bh_error = BTE_SUCCESS;
}
err_nodepda->bte_if[i].cleanup_active = 0;
BTE_PRINTK(("eh:%p:%d Unlocked %d\n", err_nodepda,
smp_processor_id(), i));
spin_unlock(&pda->cpu_bte_if[i]->spinlock);
}
del_timer(recovery_timer);
spin_unlock_irqrestore(recovery_lock, irq_flags);
}
/*
* First part error handler. This is called whenever any error CRB interrupt
* is generated by the II.
*/
void
bte_crb_error_handler(cnodeid_t cnode, int btenum,
int crbnum, ioerror_t * ioe, int bteop)
{
struct bteinfo_s *bte;
bte = &(NODEPDA(cnode)->bte_if[btenum]);
/*
* The caller has already figured out the error type, we save that
* in the bte handle structure for the thread excercising the
* interface to consume.
*/
bte->bh_error = ioe->ie_errortype + BTEFAIL_OFFSET;
bte->bte_error_count++;
BTE_PRINTK(("Got an error on cnode %d bte %d: HW error type 0x%x\n",
bte->bte_cnode, bte->bte_num, ioe->ie_errortype));
bte_error_handler((unsigned long) NODEPDA(cnode));
}
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000,2002-2004 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <asm/delay.h>
#include <asm/sn/sn_sal.h>
#include "ioerror.h"
#include <asm/sn/addrs.h>
#include "shubio.h"
#include <asm/sn/geo.h>
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
#include <asm/sn/bte.h>
void hubiio_crb_error_handler(struct hubdev_info *hubdev_info);
extern void bte_crb_error_handler(struct hubdev_info *, int, int, ioerror_t *,
int);
static irqreturn_t hub_eint_handler(int irq, void *arg, struct pt_regs *ep)
{
struct hubdev_info *hubdev_info;
struct ia64_sal_retval ret_stuff;
nasid_t nasid;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
hubdev_info = (struct hubdev_info *)arg;
nasid = hubdev_info->hdi_nasid;
SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
(u64) nasid, 0, 0, 0, 0, 0, 0);
if ((int)ret_stuff.v0)
panic("hubii_eint_handler(): Fatal TIO Error");
if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
(void)hubiio_crb_error_handler(hubdev_info);
return IRQ_HANDLED;
}
/*
* Free the hub CRB "crbnum" which encountered an error.
* Assumption is, error handling was successfully done,
* and we now want to return the CRB back to Hub for normal usage.
*
* In order to free the CRB, all that's needed is to de-allocate it
*
* Assumption:
* No other processor is mucking around with the hub control register.
* So, upper layer has to single thread this.
*/
void hubiio_crb_free(struct hubdev_info *hubdev_info, int crbnum)
{
ii_icrb0_b_u_t icrbb;
/*
* The hardware does NOT clear the mark bit, so it must get cleared
* here to be sure the error is not processed twice.
*/
icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(hubdev_info->hdi_nasid,
IIO_ICRB_B(crbnum));
icrbb.b_mark = 0;
REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICRB_B(crbnum),
icrbb.ii_icrb0_b_regval);
/*
* Deallocate the register wait till hub indicates it's done.
*/
REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICDR, (IIO_ICDR_PND | crbnum));
while (REMOTE_HUB_L(hubdev_info->hdi_nasid, IIO_ICDR) & IIO_ICDR_PND)
udelay(1);
}
/*
* hubiio_crb_error_handler
*
* This routine gets invoked when a hub gets an error
* interrupt. So, the routine is running in interrupt context
* at error interrupt level.
* Action:
* It's responsible for identifying ALL the CRBs that are marked
* with error, and process them.
*
* If you find the CRB that's marked with error, map this to the
* reason it caused error, and invoke appropriate error handler.
*
* XXX Be aware of the information in the context register.
*
* NOTE:
* Use REMOTE_HUB_* macro instead of LOCAL_HUB_* so that the interrupt
* handler can be run on any node. (not necessarily the node
* corresponding to the hub that encountered error).
*/
void hubiio_crb_error_handler(struct hubdev_info *hubdev_info)
{
nasid_t nasid;
ii_icrb0_a_u_t icrba; /* II CRB Register A */
ii_icrb0_b_u_t icrbb; /* II CRB Register B */
ii_icrb0_c_u_t icrbc; /* II CRB Register C */
ii_icrb0_d_u_t icrbd; /* II CRB Register D */
ii_icrb0_e_u_t icrbe; /* II CRB Register D */
int i;
int num_errors = 0; /* Num of errors handled */
ioerror_t ioerror;
nasid = hubdev_info->hdi_nasid;
/*
* XXX - Add locking for any recovery actions
*/
/*
* Scan through all CRBs in the Hub, and handle the errors
* in any of the CRBs marked.
*/
for (i = 0; i < IIO_NUM_CRBS; i++) {
/* Check this crb entry to see if it is in error. */
icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(nasid, IIO_ICRB_B(i));
if (icrbb.b_mark == 0) {
continue;
}
icrba.ii_icrb0_a_regval = REMOTE_HUB_L(nasid, IIO_ICRB_A(i));
IOERROR_INIT(&ioerror);
/* read other CRB error registers. */
icrbc.ii_icrb0_c_regval = REMOTE_HUB_L(nasid, IIO_ICRB_C(i));
icrbd.ii_icrb0_d_regval = REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
icrbe.ii_icrb0_e_regval = REMOTE_HUB_L(nasid, IIO_ICRB_E(i));
IOERROR_SETVALUE(&ioerror, errortype, icrbb.b_ecode);
/* Check if this error is due to BTE operation,
* and handle it separately.
*/
if (icrbd.d_bteop ||
((icrbb.b_initiator == IIO_ICRB_INIT_BTE0 ||
icrbb.b_initiator == IIO_ICRB_INIT_BTE1) &&
(icrbb.b_imsgtype == IIO_ICRB_IMSGT_BTE ||
icrbb.b_imsgtype == IIO_ICRB_IMSGT_SN1NET))) {
int bte_num;
if (icrbd.d_bteop)
bte_num = icrbc.c_btenum;
else /* b_initiator bit 2 gives BTE number */
bte_num = (icrbb.b_initiator & 0x4) >> 2;
hubiio_crb_free(hubdev_info, i);
bte_crb_error_handler(hubdev_info, bte_num,
i, &ioerror, icrbd.d_bteop);
num_errors++;
continue;
}
}
}
/*
* Function : hub_error_init
* Purpose : initialize the error handling requirements for a given hub.
* Parameters : cnode, the compact nodeid.
* Assumptions : Called only once per hub, either by a local cpu. Or by a
* remote cpu, when this hub is headless.(cpuless)
* Returns : None
*/
void hub_error_init(struct hubdev_info *hubdev_info)
{
if (request_irq(SGI_II_ERROR, (void *)hub_eint_handler, SA_SHIRQ,
"SN_hub_error", (void *)hubdev_info))
printk("hub_error_init: Failed to request_irq for 0x%p\n",
hubdev_info);
return;
}
/*
* Function : ice_error_init
* Purpose : initialize the error handling requirements for a given tio.
* Parameters : cnode, the compact nodeid.
* Assumptions : Called only once per tio.
* Returns : None
*/
void ice_error_init(struct hubdev_info *hubdev_info)
{
if (request_irq
(SGI_TIO_ERROR, (void *)hub_eint_handler, SA_SHIRQ, "SN_TIO_error",
(void *)hubdev_info))
printk("ice_error_init: request_irq() error hubdev_info 0x%p\n",
hubdev_info);
return;
}
This diff is collapsed.
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/sn/simulator.h>
#include <asm/sn/pda.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/shub_mmr.h>
/**
* sn_io_addr - convert an in/out port to an i/o address
* @port: port to convert
*
* Legacy in/out instructions are converted to ld/st instructions
* on IA64. This routine will convert a port number into a valid
* SN i/o address. Used by sn_in*() and sn_out*().
*/
void *sn_io_addr(unsigned long port)
{
if (!IS_RUNNING_ON_SIMULATOR()) {
/* On sn2, legacy I/O ports don't point at anything */
if (port < (64 * 1024))
return 0;
return ((void *)(port | __IA64_UNCACHED_OFFSET));
} else {
/* but the simulator uses them... */
unsigned long io_base;
unsigned long addr;
/*
* word align port, but need more than 10 bits
* for accessing registers in bedrock local block
* (so we don't do port&0xfff)
*/
if ((port >= 0x1f0 && port <= 0x1f7) ||
port == 0x3f6 || port == 0x3f7) {
io_base =
(0xc000000fcc000000 |
((unsigned long)get_nasid() << 38));
addr = io_base | ((port >> 2) << 12) | (port & 0xfff);
} else {
addr = __ia64_get_io_port_base() | ((port >> 2) << 2);
}
return (void *)addr;
}
}
EXPORT_SYMBOL(sn_io_addr);
/**
* sn_mmiob - I/O space memory barrier
*
* Acts as a memory mapped I/O barrier for platforms that queue writes to
* I/O space. This ensures that subsequent writes to I/O space arrive after
* all previous writes. For most ia64 platforms, this is a simple
* 'mf.a' instruction. For other platforms, mmiob() may have to read
* a chipset register to ensure ordering.
*
* On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear.
* See PV 871084 for details about the WAR about zero value.
*
*/
void sn_mmiob(void)
{
while ((((volatile unsigned long)(*pda->
pio_write_status_addr)) &
SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) !=
SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK)
udelay(1);
}
EXPORT_SYMBOL(sn_mmiob);
This diff is collapsed.
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <asm/sn/types.h>
#include <asm/sn/module.h>
#include <asm/sn/l1.h>
char brick_types[MAX_BRICK_TYPES + 1] = "cri.xdpn%#=vo^kjbf890123456789...";
/*
* Format a module id for printing.
*
* There are three possible formats:
*
* MODULE_FORMAT_BRIEF is the brief 6-character format, including
* the actual brick-type as recorded in the
* moduleid_t, eg. 002c15 for a C-brick, or
* 101#17 for a PX-brick.
*
* MODULE_FORMAT_LONG is the hwgraph format, eg. rack/002/bay/15
* of rack/101/bay/17 (note that the brick
* type does not appear in this format).
*
* MODULE_FORMAT_LCD is like MODULE_FORMAT_BRIEF, except that it
* ensures that the module id provided appears
* exactly as it would on the LCD display of
* the corresponding brick, eg. still 002c15
* for a C-brick, but 101p17 for a PX-brick.
*
* maule (9/13/04): Removed top-level check for (fmt == MODULE_FORMAT_LCD)
* making MODULE_FORMAT_LCD equivalent to MODULE_FORMAT_BRIEF. It was
* decided that all callers should assume the returned string should be what
* is displayed on the brick L1 LCD.
*/
void
format_module_id(char *buffer, moduleid_t m, int fmt)
{
int rack, position;
unsigned char brickchar;
rack = MODULE_GET_RACK(m);
brickchar = MODULE_GET_BTCHAR(m);
/* Be sure we use the same brick type character as displayed
* on the brick's LCD
*/
switch (brickchar)
{
case L1_BRICKTYPE_GA:
case L1_BRICKTYPE_OPUS_TIO:
brickchar = L1_BRICKTYPE_C;
break;
case L1_BRICKTYPE_PX:
case L1_BRICKTYPE_PE:
case L1_BRICKTYPE_PA:
case L1_BRICKTYPE_SA: /* we can move this to the "I's" later
* if that makes more sense
*/
brickchar = L1_BRICKTYPE_P;
break;
case L1_BRICKTYPE_IX:
case L1_BRICKTYPE_IA:
brickchar = L1_BRICKTYPE_I;
break;
}
position = MODULE_GET_BPOS(m);
if ((fmt == MODULE_FORMAT_BRIEF) || (fmt == MODULE_FORMAT_LCD)) {
/* Brief module number format, eg. 002c15 */
/* Decompress the rack number */
*buffer++ = '0' + RACK_GET_CLASS(rack);
*buffer++ = '0' + RACK_GET_GROUP(rack);
*buffer++ = '0' + RACK_GET_NUM(rack);
/* Add the brick type */
*buffer++ = brickchar;
}
else if (fmt == MODULE_FORMAT_LONG) {
/* Fuller hwgraph format, eg. rack/002/bay/15 */
strcpy(buffer, "rack" "/"); buffer += strlen(buffer);
*buffer++ = '0' + RACK_GET_CLASS(rack);
*buffer++ = '0' + RACK_GET_GROUP(rack);
*buffer++ = '0' + RACK_GET_NUM(rack);
strcpy(buffer, "/" "bay" "/"); buffer += strlen(buffer);
}
/* Add the bay position, using at least two digits */
if (position < 10)
*buffer++ = '0';
sprintf(buffer, "%d", position);
}
......@@ -10,13 +10,10 @@
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
#include <asm/sn/sgi.h>
#include <asm/mca.h>
#include <asm/sal.h>
#include <asm/sn/sn_sal.h>
/*
* Interval for calling SAL to poll for errors that do NOT cause error
* interrupts. SAL will raise a CPEI if any errors are present that
......@@ -24,7 +21,6 @@
*/
#define CPEI_INTERVAL (5*HZ)
struct timer_list sn_cpei_timer;
void sn_init_cpei_timer(void);
......@@ -42,8 +38,7 @@ static u64 *sn_oemdata_size, sn_oemdata_bufsize;
* info for platform errors. buf is appended to sn_oemdata, resizing as
* required.
*/
static int
print_hook(const char *fmt, ...)
static int print_hook(const char *fmt, ...)
{
char buf[400];
int len;
......@@ -55,7 +50,8 @@ print_hook(const char *fmt, ...)
while (*sn_oemdata_size + len + 1 > sn_oemdata_bufsize) {
u8 *newbuf = vmalloc(sn_oemdata_bufsize += 1000);
if (!newbuf) {
printk(KERN_ERR "%s: unable to extend sn_oemdata\n", __FUNCTION__);
printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
__FUNCTION__);
return 0;
}
memcpy(newbuf, *sn_oemdata, *sn_oemdata_size);
......@@ -67,9 +63,7 @@ print_hook(const char *fmt, ...)
return 0;
}
static void
sn_cpei_handler(int irq, void *devid, struct pt_regs *regs)
static void sn_cpei_handler(int irq, void *devid, struct pt_regs *regs)
{
/*
* this function's sole purpose is to call SAL when we receive
......@@ -82,16 +76,13 @@ sn_cpei_handler(int irq, void *devid, struct pt_regs *regs)
ia64_sn_plat_cpei_handler();
}
static void
sn_cpei_timer_handler(unsigned long dummy)
static void sn_cpei_timer_handler(unsigned long dummy)
{
sn_cpei_handler(-1, NULL, NULL);
mod_timer(&sn_cpei_timer, jiffies + CPEI_INTERVAL);
}
void
sn_init_cpei_timer(void)
void sn_init_cpei_timer(void)
{
init_timer(&sn_cpei_timer);
sn_cpei_timer.expires = jiffies + CPEI_INTERVAL;
......@@ -100,9 +91,11 @@ sn_init_cpei_timer(void)
}
static int
sn_platform_plat_specific_err_print(const u8 *sect_header, u8 **oemdata, u64 *oemdata_size)
sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
u64 * oemdata_size)
{
sal_log_plat_specific_err_info_t *psei = (sal_log_plat_specific_err_info_t *)sect_header;
sal_log_plat_specific_err_info_t *psei =
(sal_log_plat_specific_err_info_t *) sect_header;
if (!psei->valid.oem_data)
return 0;
down(&sn_oemdata_mutex);
......@@ -117,15 +110,17 @@ sn_platform_plat_specific_err_print(const u8 *sect_header, u8 **oemdata, u64 *oe
/* Callback when userspace salinfo wants to decode oem data via the platform
* kernel and/or prom.
*/
int sn_salinfo_platform_oemdata(const u8 *sect_header, u8 **oemdata, u64 *oemdata_size)
int sn_salinfo_platform_oemdata(const u8 * sect_header, u8 ** oemdata,
u64 * oemdata_size)
{
efi_guid_t guid = *(efi_guid_t *)sect_header;
efi_guid_t guid = *(efi_guid_t *) sect_header;
*oemdata_size = 0;
vfree(*oemdata);
*oemdata = NULL;
if (efi_guidcmp(guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID) == 0 ||
efi_guidcmp(guid, SAL_PLAT_MEM_DEV_ERR_SECT_GUID) == 0)
return sn_platform_plat_specific_err_print(sect_header, oemdata, oemdata_size);
return sn_platform_plat_specific_err_print(sect_header, oemdata,
oemdata_size);
return 0;
}
......
This diff is collapsed.
......@@ -9,7 +9,7 @@
* we wrap the inlines from asm/ia64/sn/sn2/io.h here.
*/
#include <asm/sn/sn2/io.h>
#include <asm/sn/io.h>
#ifdef CONFIG_IA64_GENERIC
......@@ -28,88 +28,74 @@
#undef __sn_readl_relaxed
#undef __sn_readq_relaxed
unsigned int
__sn_inb (unsigned long port)
unsigned int __sn_inb(unsigned long port)
{
return ___sn_inb(port);
}
unsigned int
__sn_inw (unsigned long port)
unsigned int __sn_inw(unsigned long port)
{
return ___sn_inw(port);
}
unsigned int
__sn_inl (unsigned long port)
unsigned int __sn_inl(unsigned long port)
{
return ___sn_inl(port);
}
void
__sn_outb (unsigned char val, unsigned long port)
void __sn_outb(unsigned char val, unsigned long port)
{
___sn_outb(val, port);
}
void
__sn_outw (unsigned short val, unsigned long port)
void __sn_outw(unsigned short val, unsigned long port)
{
___sn_outw(val, port);
}
void
__sn_outl (unsigned int val, unsigned long port)
void __sn_outl(unsigned int val, unsigned long port)
{
___sn_outl(val, port);
}
unsigned char
__sn_readb (void *addr)
unsigned char __sn_readb(void *addr)
{
return ___sn_readb (addr);
return ___sn_readb(addr);
}
unsigned short
__sn_readw (void *addr)
unsigned short __sn_readw(void *addr)
{
return ___sn_readw (addr);
return ___sn_readw(addr);
}
unsigned int
__sn_readl (void *addr)
unsigned int __sn_readl(void *addr)
{
return ___sn_readl (addr);
return ___sn_readl(addr);
}
unsigned long
__sn_readq (void *addr)
unsigned long __sn_readq(void *addr)
{
return ___sn_readq (addr);
return ___sn_readq(addr);
}
unsigned char
__sn_readb_relaxed (void *addr)
unsigned char __sn_readb_relaxed(void *addr)
{
return ___sn_readb_relaxed (addr);
return ___sn_readb_relaxed(addr);
}
unsigned short
__sn_readw_relaxed (void *addr)
unsigned short __sn_readw_relaxed(void *addr)
{
return ___sn_readw_relaxed (addr);
return ___sn_readw_relaxed(addr);
}
unsigned int
__sn_readl_relaxed (void *addr)
unsigned int __sn_readl_relaxed(void *addr)
{
return ___sn_readl_relaxed (addr);
return ___sn_readl_relaxed(addr);
}
unsigned long
__sn_readq_relaxed (void *addr)
unsigned long __sn_readq_relaxed(void *addr)
{
return ___sn_readq_relaxed (addr);
return ___sn_readq_relaxed(addr);
}
#endif
......@@ -16,7 +16,7 @@
#include <asm/io.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/sn2/addrs.h>
#include <asm/sn/addrs.h>
MODULE_DESCRIPTION("PROM version reporting for /proc");
MODULE_AUTHOR("Chad Talbott");
......@@ -60,25 +60,24 @@ struct fit_type_map_t {
};
static const struct fit_type_map_t fit_entry_types[] = {
{ FIT_ENTRY_FIT_HEADER, "FIT Header" },
{ FIT_ENTRY_PAL_A_GEN, "Generic PAL_A" },
{ FIT_ENTRY_PAL_A_PROC, "Processor-specific PAL_A" },
{ FIT_ENTRY_PAL_A, "PAL_A" },
{ FIT_ENTRY_PAL_B, "PAL_B" },
{ FIT_ENTRY_SAL_A, "SAL_A" },
{ FIT_ENTRY_SAL_B, "SAL_B" },
{ FIT_ENTRY_SALRUNTIME, "SAL runtime" },
{ FIT_ENTRY_EFI, "EFI" },
{ FIT_ENTRY_VMLINUX, "Embedded Linux" },
{ FIT_ENTRY_FPSWA, "Embedded FPSWA" },
{ FIT_ENTRY_UNUSED, "Unused" },
{ 0xff, "Error" },
{FIT_ENTRY_FIT_HEADER, "FIT Header"},
{FIT_ENTRY_PAL_A_GEN, "Generic PAL_A"},
{FIT_ENTRY_PAL_A_PROC, "Processor-specific PAL_A"},
{FIT_ENTRY_PAL_A, "PAL_A"},
{FIT_ENTRY_PAL_B, "PAL_B"},
{FIT_ENTRY_SAL_A, "SAL_A"},
{FIT_ENTRY_SAL_B, "SAL_B"},
{FIT_ENTRY_SALRUNTIME, "SAL runtime"},
{FIT_ENTRY_EFI, "EFI"},
{FIT_ENTRY_VMLINUX, "Embedded Linux"},
{FIT_ENTRY_FPSWA, "Embedded FPSWA"},
{FIT_ENTRY_UNUSED, "Unused"},
{0xff, "Error"},
};
static const char *
fit_type_name(unsigned char type)
static const char *fit_type_name(unsigned char type)
{
struct fit_type_map_t const*mapp;
struct fit_type_map_t const *mapp;
for (mapp = fit_entry_types; mapp->type != 0xff; mapp++)
if (type == mapp->type)
......@@ -125,11 +124,10 @@ fit_type_name(unsigned char type)
#define FW_BASE 0x00000000FF000000
#define FW_TOP 0x0000000100000000
static unsigned long
convert_fw_addr(nasid_t nasid, unsigned long addr)
static unsigned long convert_fw_addr(nasid_t nasid, unsigned long addr)
{
/* snag just the node-relative offset */
addr &= ~0ul >> (63-35);
addr &= ~0ul >> (63 - 35);
/* the pointer to SAL A is relative to IA-64 compatibility
* space. However, the PROM is mapped at a different offset
* in MMR space (both local and global)
......@@ -138,8 +136,7 @@ convert_fw_addr(nasid_t nasid, unsigned long addr)
return GLOBAL_MMR_ADDR(nasid, addr);
}
static int
valid_fw_addr(unsigned long addr)
static int valid_fw_addr(unsigned long addr)
{
addr &= ~(1ul << 63); /* Clear cached/uncached bit */
return (addr >= FW_BASE && addr < FW_TOP);
......@@ -216,8 +213,7 @@ get_fit_entry(unsigned long nasid, int index, unsigned long *fentry,
/*
* These two routines display the FIT table for each node.
*/
static int
dump_fit_entry(char *page, unsigned long *fentry)
static int dump_fit_entry(char *page, unsigned long *fentry)
{
unsigned type;
......@@ -289,11 +285,14 @@ static int
proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof,
int len)
{
if (len <= off+count) *eof = 1;
if (len <= off + count)
*eof = 1;
*start = page + off;
len -= off;
if (len>count) len = count;
if (len<0) len = 0;
if (len > count)
len = count;
if (len < 0)
len = 0;
return len;
}
......@@ -334,8 +333,7 @@ static struct proc_dir_entry *sgi_prominfo_entry;
#define NODE_NAME_LEN 11
int __init
prominfo_init(void)
int __init prominfo_init(void)
{
struct proc_dir_entry **entp;
struct proc_dir_entry *p;
......@@ -372,16 +370,14 @@ prominfo_init(void)
return 0;
}
void __exit
prominfo_exit(void)
void __exit prominfo_exit(void)
{
struct proc_dir_entry **entp;
unsigned cnodeid;
char name[NODE_NAME_LEN];
for (cnodeid = 0, entp = proc_entries;
cnodeid < numnodes;
cnodeid++, entp++) {
cnodeid < numnodes; cnodeid++, entp++) {
remove_proc_entry("fit", *entp);
remove_proc_entry("version", *entp);
sprintf(name, "node%d", cnodeid);
......
......@@ -3,10 +3,10 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#include <asm/sn/sn2/shub_mmr.h>
#include <asm/sn/shub_mmr.h>
#define ZEROVAL 0x3f // "zero" value for outstanding PIO requests
#define DEADLOCKBIT SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK_SHFT
......
......@@ -21,7 +21,6 @@
#include <asm/processor.h>
#include <asm/irq.h>
#include <asm/sn/sgi.h>
#include <asm/sal.h>
#include <asm/system.h>
#include <asm/delay.h>
......@@ -34,20 +33,17 @@
#include <asm/current.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/addrs.h>
#include <asm/sn/sn2/shub_mmr.h>
#include <asm/sn/shub_mmr.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/rw_mmr.h>
void sn2_ptc_deadlock_recovery(unsigned long data0, unsigned long data1);
static spinlock_t sn2_global_ptc_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
static unsigned long sn2_ptc_deadlock_count;
static inline unsigned long
wait_piowc(void)
static inline unsigned long wait_piowc(void)
{
volatile unsigned long *piows;
unsigned long ws;
......@@ -55,20 +51,18 @@ wait_piowc(void)
piows = pda->pio_write_status_addr;
do {
ia64_mfa();
} while (((ws = *piows) & SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) !=
} while (((ws =
*piows) & SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) !=
SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK);
return ws;
}
void
sn_tlb_migrate_finish(struct mm_struct *mm)
void sn_tlb_migrate_finish(struct mm_struct *mm)
{
if (mm == current->mm)
flush_tlb_mm(mm);
}
/**
* sn2_global_tlb_purge - globally purge translation cache of virtual address range
* @start: start of virtual address range
......@@ -90,12 +84,13 @@ sn_tlb_migrate_finish(struct mm_struct *mm)
*/
void
sn2_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbits)
sn2_global_tlb_purge(unsigned long start, unsigned long end,
unsigned long nbits)
{
int i, cnode, mynasid, cpu, lcpu=0, nasid, flushed=0;
int i, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0;
volatile unsigned long *ptc0, *ptc1;
unsigned long flags=0, data0, data1;
struct mm_struct *mm=current->active_mm;
unsigned long flags = 0, data0, data1;
struct mm_struct *mm = current->active_mm;
short nasids[NR_NODES], nix;
DECLARE_BITMAP(nodes_flushed, NR_NODES);
......@@ -114,7 +109,7 @@ sn2_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbit
if (likely(i == 1 && lcpu == smp_processor_id())) {
do {
ia64_ptcl(start, nbits<<2);
ia64_ptcl(start, nbits << 2);
start += (1UL << nbits);
} while (start < end);
ia64_srlz_i();
......@@ -128,42 +123,42 @@ sn2_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbit
return;
}
nix = 0;
for (cnode=find_first_bit(&nodes_flushed, NR_NODES); cnode < NR_NODES;
cnode=find_next_bit(&nodes_flushed, NR_NODES, ++cnode))
for (cnode = find_first_bit(&nodes_flushed, NR_NODES); cnode < NR_NODES;
cnode = find_next_bit(&nodes_flushed, NR_NODES, ++cnode))
nasids[nix++] = cnodeid_to_nasid(cnode);
data0 = (1UL << SH_PTC_0_A_SHFT) |
(nbits << SH_PTC_0_PS_SHFT) |
((ia64_get_rr(start) >> 8) << SH_PTC_0_RID_SHFT) |
(1UL << SH_PTC_0_START_SHFT);
data0 = (1UL<<SH_PTC_0_A_SHFT) |
(nbits<<SH_PTC_0_PS_SHFT) |
((ia64_get_rr(start)>>8)<<SH_PTC_0_RID_SHFT) |
(1UL<<SH_PTC_0_START_SHFT);
ptc0 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_0);
ptc1 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_1);
ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_0);
ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_1);
mynasid = smp_physical_node_id();
spin_lock_irqsave(&sn2_global_ptc_lock, flags);
do {
data1 = start | (1UL<<SH_PTC_1_START_SHFT);
for (i=0; i<nix; i++) {
data1 = start | (1UL << SH_PTC_1_START_SHFT);
for (i = 0; i < nix; i++) {
nasid = nasids[i];
if (likely(nasid == mynasid)) {
ia64_ptcga(start, nbits<<2);
ia64_ptcga(start, nbits << 2);
ia64_srlz_i();
} else {
ptc0 = CHANGE_NASID(nasid, ptc0);
ptc1 = CHANGE_NASID(nasid, ptc1);
pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1);
pio_atomic_phys_write_mmrs(ptc0, data0, ptc1,
data1);
flushed = 1;
}
}
if (flushed && (wait_piowc() & SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK_MASK)) {
if (flushed
&& (wait_piowc() &
SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK_MASK)) {
sn2_ptc_deadlock_recovery(data0, data1);
}
......@@ -183,18 +178,18 @@ sn2_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbit
* TLB flush transaction. The recovery sequence is somewhat tricky & is
* coded in assembly language.
*/
void
sn2_ptc_deadlock_recovery(unsigned long data0, unsigned long data1)
void sn2_ptc_deadlock_recovery(unsigned long data0, unsigned long data1)
{
extern void sn2_ptc_deadlock_recovery_core(long*, long, long*, long, long*);
extern void sn2_ptc_deadlock_recovery_core(long *, long, long *, long,
long *);
int cnode, mycnode, nasid;
long *ptc0, *ptc1, *piows;
sn2_ptc_deadlock_count++;
ptc0 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_0);
ptc1 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_1);
piows = (long*)pda->pio_write_status_addr;
ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_0);
ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_1);
piows = (long *)pda->pio_write_status_addr;
mycnode = numa_node_id();
......@@ -224,34 +219,34 @@ sn2_ptc_deadlock_recovery(unsigned long data0, unsigned long data1)
* %IA64_IPI_DM_NMI - pend an NMI
* %IA64_IPI_DM_INIT - pend an INIT interrupt
*/
void
sn_send_IPI_phys(long physid, int vector, int delivery_mode)
void sn_send_IPI_phys(long physid, int vector, int delivery_mode)
{
long nasid, slice, val;
unsigned long flags=0;
unsigned long flags = 0;
volatile long *p;
nasid = cpu_physical_id_to_nasid(physid);
slice = cpu_physical_id_to_slice(physid);
p = (long*)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT);
val = (1UL<<SH_IPI_INT_SEND_SHFT) |
(physid<<SH_IPI_INT_PID_SHFT) |
((long)delivery_mode<<SH_IPI_INT_TYPE_SHFT) |
((long)vector<<SH_IPI_INT_IDX_SHFT) |
(0x000feeUL<<SH_IPI_INT_BASE_SHFT);
p = (long *)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT);
val = (1UL << SH_IPI_INT_SEND_SHFT) |
(physid << SH_IPI_INT_PID_SHFT) |
((long)delivery_mode << SH_IPI_INT_TYPE_SHFT) |
((long)vector << SH_IPI_INT_IDX_SHFT) |
(0x000feeUL << SH_IPI_INT_BASE_SHFT);
mb();
if (enable_shub_wars_1_1() ) {
if (enable_shub_wars_1_1()) {
spin_lock_irqsave(&sn2_global_ptc_lock, flags);
}
pio_phys_write_mmr(p, val);
if (enable_shub_wars_1_1() ) {
if (enable_shub_wars_1_1()) {
wait_piowc();
spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
}
}
EXPORT_SYMBOL(sn_send_IPI_phys);
/**
......@@ -270,8 +265,7 @@ EXPORT_SYMBOL(sn_send_IPI_phys);
* %IA64_IPI_DM_NMI - pend an NMI
* %IA64_IPI_DM_INIT - pend an INIT interrupt
*/
void
sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
{
long physid;
......
......@@ -38,6 +38,8 @@
#include <asm/uaccess.h>
#include <asm-ia64/sal.h>
#include <asm-ia64/sn/sn_sal.h>
#include <asm/sn/module.h>
#include <asm/sn/geo.h>
#include <asm-ia64/sn/sn2/sn_hwperf.h>
static void *sn_hwperf_salheap = NULL;
......@@ -45,6 +47,7 @@ static int sn_hwperf_obj_cnt = 0;
static nasid_t sn_hwperf_master_nasid = INVALID_NASID;
static int sn_hwperf_init(void);
static DECLARE_MUTEX(sn_hwperf_init_mutex);
extern int numionodes;
static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret)
{
......@@ -80,21 +83,26 @@ static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret)
static int sn_hwperf_geoid_to_cnode(char *location)
{
int cnode;
int mod, slot, slab;
int cmod, cslot, cslab;
geoid_t geoid;
moduleid_t module_id;
char type;
int rack, slot, slab;
int this_rack, this_slot, this_slab;
if (sscanf(location, "%03dc%02d#%d", &mod, &slot, &slab) != 3)
if (sscanf(location, "%03d%c%02d#%d", &rack, &type, &slot, &slab) != 4)
return -1;
for (cnode = 0; cnode < numnodes; cnode++) {
/* XXX: need a better way than this ... */
if (sscanf(NODEPDA(cnode)->hwg_node_name,
"hw/module/%03dc%02d/slab/%d", &cmod, &cslot, &cslab) == 3) {
if (mod == cmod && slot == cslot && slab == cslab)
for (cnode = 0; cnode < numionodes; cnode++) {
geoid = cnodeid_get_geoid(cnode);
module_id = geo_module(geoid);
this_rack = MODULE_GET_RACK(module_id);
this_slot = MODULE_GET_BPOS(module_id);
this_slab = geo_slab(geoid);
if (rack == this_rack && slot == this_slot && slab == this_slab)
break;
}
}
return cnode < numnodes ? cnode : -1;
return cnode < numionodes ? cnode : -1;
}
static int sn_hwperf_obj_to_cnode(struct sn_hwperf_object_info * obj)
......@@ -202,7 +210,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
seq_putc(s, '\n');
else {
seq_printf(s, ", nasid 0x%x", cnodeid_to_nasid(ordinal));
for (i=0; i < numnodes; i++) {
for (i=0; i < numionodes; i++) {
seq_printf(s, i ? ":%d" : ", dist %d",
node_distance(ordinal, i));
}
......@@ -473,7 +481,7 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
case SN_HWPERF_GET_NODE_NASID:
if (a.sz != sizeof(u64) ||
(node = a.arg) < 0 || node >= numnodes) {
(node = a.arg) < 0 || node >= numionodes) {
r = -EINVAL;
goto error;
}
......
......@@ -11,7 +11,6 @@
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/sn/sgi.h>
#include <asm/sn/sn_sal.h>
static int partition_id_show(struct seq_file *s, void *p)
......
......@@ -15,16 +15,16 @@
#include <asm/hw_irq.h>
#include <asm/system.h>
#include "shub.h"
#include <asm/sn/leds.h>
#include <asm/sn/shub_mmr.h>
#include <asm/sn/clksupport.h>
extern unsigned long sn_rtc_cycles_per_second;
static struct time_interpolator sn2_interpolator;
void __init
sn_timer_init(void)
void __init sn_timer_init(void)
{
sn2_interpolator.frequency = sn_rtc_cycles_per_second;
sn2_interpolator.drift = -1; /* unknown */
......
......@@ -34,20 +34,21 @@
#include <linux/interrupt.h>
#include <asm/sn/pda.h>
#include "shub.h"
#include <asm/sn/leds.h>
extern void sn_lb_int_war_check(void);
extern irqreturn_t timer_interrupt (int irq, void *dev_id, struct pt_regs *regs);
extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
#define SN_LB_INT_WAR_INTERVAL 100
void
sn_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
void sn_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
/* LED blinking */
if (!pda->hb_count--) {
pda->hb_count = HZ/2;
set_led_bits(pda->hb_state ^= LED_CPU_HEARTBEAT, LED_CPU_HEARTBEAT);
pda->hb_count = HZ / 2;
set_led_bits(pda->hb_state ^=
LED_CPU_HEARTBEAT, LED_CPU_HEARTBEAT);
}
if (enable_shub_wars_1_1()) {
......
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn pci general routines.
obj-y := pci_dma.o pcibr/
This diff is collapsed.
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn2 io routines.
obj-y += pcibr_dma.o pcibr_reg.o \
pcibr_ate.o pcibr_provider.o
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -28,6 +28,7 @@
#include <asm/uaccess.h>
#include <asm/sn/addrs.h>
#include <asm/sn/clksupport.h>
#include <asm/sn/shub_mmr.h>
MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>");
MODULE_DESCRIPTION("Multimedia timer support");
......
......@@ -22,6 +22,8 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/module.h>
#include <asm/sn/geo.h>
#include <asm/sn/nodepda.h>
#include "snsc.h"
......@@ -364,17 +366,15 @@ int __init
scdrv_init(void)
{
geoid_t geoid;
cmoduleid_t cmod;
int i;
cnodeid_t cnode;
char devname[32];
char *devnamep;
module_t *m;
struct sysctl_data_s *scd;
void *salbuf;
struct class_simple *snsc_class;
dev_t first_dev, dev;
if (alloc_chrdev_region(&first_dev, 0, (MAX_SLABS*nummodules),
if (alloc_chrdev_region(&first_dev, 0, numionodes,
SYSCTL_BASENAME) < 0) {
printk("%s: failed to register SN system controller device\n",
__FUNCTION__);
......@@ -382,16 +382,8 @@ scdrv_init(void)
}
snsc_class = class_simple_create(THIS_MODULE, SYSCTL_BASENAME);
for (cmod = 0; cmod < nummodules; cmod++) {
m = sn_modules[cmod];
for (i = 0; i <= MAX_SLABS; i++) {
if (m->nodes[i] == -1) {
/* node is not alive in module */
continue;
}
geoid = m->geoid[i];
for (cnode = 0; cnode < numionodes; cnode++) {
geoid = cnodeid_get_geoid(cnode);
devnamep = devname;
format_module_id(devnamep, geo_module(geoid),
MODULE_FORMAT_BRIEF);
......@@ -410,7 +402,7 @@ scdrv_init(void)
memset(scd, 0, sizeof (struct sysctl_data_s));
/* initialize sysctl device data fields */
scd->scd_nasid = cnodeid_to_nasid(m->nodes[i]);
scd->scd_nasid = cnodeid_to_nasid(cnode);
if (!(salbuf = kmalloc(SCDRV_BUFSZ, GFP_KERNEL))) {
printk("%s: failed to allocate driver buffer"
"(%s%s)\n", __FUNCTION__,
......@@ -431,7 +423,7 @@ scdrv_init(void)
continue;
}
dev = first_dev + m->nodes[i];
dev = first_dev + cnode;
cdev_init(&scd->scd_cdev, &scdrv_fops);
if (cdev_add(&scd->scd_cdev, dev, 1)) {
printk("%s: failed to register system"
......@@ -449,7 +441,6 @@ scdrv_init(void)
0 /*ignored */ ,
SAL_IROUTER_INTR_RECV);
}
}
return 0;
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -117,6 +117,6 @@ extern ia64_mv_dma_supported sn_dma_supported;
#define platform_dma_mapping_error sn_dma_mapping_error
#define platform_dma_supported sn_dma_supported
#include <asm/sn/sn2/io.h>
#include <asm/sn/io.h>
#endif /* _ASM_IA64_MACHVEC_SN2_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2001-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_FETCHOP_H
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment