Commit eccaedd5 authored by Pat Gefre's avatar Pat Gefre Committed by Tony Luck

[IA64-SGI] Add in Altix I/O code

Signed-off-by: default avatarPatrick Gefre <pfg@sgi.com>
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent e4870e45
......@@ -4,9 +4,11 @@
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2003 Silicon Graphics, Inc. All Rights Reserved.
# Copyright (C) 2004 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn ia64 subplatform
#
obj-y += kernel/
CPPFLAGS += -I$(srctree)/arch/ia64/sn/include
obj-y += kernel/ pci/
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_IOERROR_H
#define _ASM_IA64_SN_IOERROR_H
/*
* IO error structure.
*
* This structure would expand to hold the information retrieved from
* all IO related error registers.
*
* This structure is defined to hold all system specific
* information related to a single error.
*
* This serves a couple of purpose.
* - Error handling often involves translating one form of address to other
* form. So, instead of having different data structures at each level,
* we have a single structure, and the appropriate fields get filled in
* at each layer.
* - This provides a way to dump all error related information in any layer
* of erorr handling (debugging aid).
*
* A second possibility is to allow each layer to define its own error
* data structure, and fill in the proper fields. This has the advantage
* of isolating the layers.
* A big concern is the potential stack usage (and overflow), if each layer
* defines these structures on stack (assuming we don't want to do kmalloc.
*
* Any layer wishing to pass extra information to a layer next to it in
* error handling hierarchy, can do so as a separate parameter.
*/
typedef struct io_error_s {
/* Bit fields indicating which structure fields are valid */
union {
struct {
unsigned ievb_errortype:1;
unsigned ievb_widgetnum:1;
unsigned ievb_widgetdev:1;
unsigned ievb_srccpu:1;
unsigned ievb_srcnode:1;
unsigned ievb_errnode:1;
unsigned ievb_sysioaddr:1;
unsigned ievb_xtalkaddr:1;
unsigned ievb_busspace:1;
unsigned ievb_busaddr:1;
unsigned ievb_vaddr:1;
unsigned ievb_memaddr:1;
unsigned ievb_epc:1;
unsigned ievb_ef:1;
unsigned ievb_tnum:1;
} iev_b;
unsigned iev_a;
} ie_v;
short ie_errortype; /* error type: extra info about error */
short ie_widgetnum; /* Widget number that's in error */
short ie_widgetdev; /* Device within widget in error */
cpuid_t ie_srccpu; /* CPU on srcnode generating error */
cnodeid_t ie_srcnode; /* Node which caused the error */
cnodeid_t ie_errnode; /* Node where error was noticed */
iopaddr_t ie_sysioaddr; /* Sys specific IO address */
iopaddr_t ie_xtalkaddr; /* Xtalk (48bit) addr of Error */
iopaddr_t ie_busspace; /* Bus specific address space */
iopaddr_t ie_busaddr; /* Bus specific address */
caddr_t ie_vaddr; /* Virtual address of error */
iopaddr_t ie_memaddr; /* Physical memory address */
caddr_t ie_epc; /* pc when error reported */
caddr_t ie_ef; /* eframe when error reported */
short ie_tnum; /* Xtalk TNUM field */
} ioerror_t;
#define IOERROR_INIT(e) do { (e)->ie_v.iev_a = 0; } while (0)
#define IOERROR_SETVALUE(e,f,v) do { (e)->ie_ ## f = (v); (e)->ie_v.iev_b.ievb_ ## f = 1; } while (0)
#endif /* _ASM_IA64_SN_IOERROR_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
#define _ASM_IA64_SN_PCI_PCIBR_PROVIDER_H
/* Workarounds */
#define PV907516 (1 << 1) /* TIOCP: Don't write the write buffer flush reg */
#define BUSTYPE_MASK 0x1
/* Macros given a pcibus structure */
#define IS_PCIX(ps) ((ps)->pbi_bridge_mode & BUSTYPE_MASK)
#define IS_PCI_BRIDGE_ASIC(asic) (asic == PCIIO_ASIC_TYPE_PIC || \
asic == PCIIO_ASIC_TYPE_TIOCP)
#define IS_PIC_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_PIC)
/*
* The different PCI Bridge types supported on the SGI Altix platforms
*/
#define PCIBR_BRIDGETYPE_UNKNOWN -1
#define PCIBR_BRIDGETYPE_PIC 2
#define PCIBR_BRIDGETYPE_TIOCP 3
/*
* Bridge 64bit Direct Map Attributes
*/
#define PCI64_ATTR_PREF (1ull << 59)
#define PCI64_ATTR_PREC (1ull << 58)
#define PCI64_ATTR_VIRTUAL (1ull << 57)
#define PCI64_ATTR_BAR (1ull << 56)
#define PCI64_ATTR_SWAP (1ull << 55)
#define PCI64_ATTR_VIRTUAL1 (1ull << 54)
#define PCI32_LOCAL_BASE 0
#define PCI32_MAPPED_BASE 0x40000000
#define PCI32_DIRECT_BASE 0x80000000
#define IS_PCI32_MAPPED(x) ((uint64_t)(x) < PCI32_DIRECT_BASE && \
(uint64_t)(x) >= PCI32_MAPPED_BASE)
#define IS_PCI32_DIRECT(x) ((uint64_t)(x) >= PCI32_MAPPED_BASE)
/*
* Bridge PMU Address Transaltion Entry Attibutes
*/
#define PCI32_ATE_V (0x1 << 0)
#define PCI32_ATE_CO (0x1 << 1)
#define PCI32_ATE_PREC (0x1 << 2)
#define PCI32_ATE_PREF (0x1 << 3)
#define PCI32_ATE_BAR (0x1 << 4)
#define PCI32_ATE_ADDR_SHFT 12
#define MINIMAL_ATES_REQUIRED(addr, size) \
(IOPG(IOPGOFF(addr) + (size) - 1) == IOPG((size) - 1))
#define MINIMAL_ATE_FLAG(addr, size) \
(MINIMAL_ATES_REQUIRED((uint64_t)addr, size) ? 1 : 0)
/* bit 29 of the pci address is the SWAP bit */
#define ATE_SWAPSHIFT 29
#define ATE_SWAP_ON(x) ((x) |= (1 << ATE_SWAPSHIFT))
#define ATE_SWAP_OFF(x) ((x) &= ~(1 << ATE_SWAPSHIFT))
/*
* I/O page size
*/
#if PAGE_SIZE < 16384
#define IOPFNSHIFT 12 /* 4K per mapped page */
#else
#define IOPFNSHIFT 14 /* 16K per mapped page */
#endif
#define IOPGSIZE (1 << IOPFNSHIFT)
#define IOPG(x) ((x) >> IOPFNSHIFT)
#define IOPGOFF(x) ((x) & (IOPGSIZE-1))
#define PCIBR_DEV_SWAP_DIR (1ull << 19)
#define PCIBR_CTRL_PAGE_SIZE (0x1 << 21)
/*
* PMU resources.
*/
struct ate_resource{
uint64_t *ate;
uint64_t num_ate;
uint64_t lowest_free_index;
};
struct pcibus_info {
struct pcibus_bussoft pbi_buscommon; /* common header */
uint32_t pbi_moduleid;
short pbi_bridge_type;
short pbi_bridge_mode;
struct ate_resource pbi_int_ate_resource;
uint64_t pbi_int_ate_size;
uint64_t pbi_dir_xbase;
char pbi_hub_xid;
uint64_t pbi_devreg[8];
spinlock_t pbi_lock;
uint32_t pbi_valid_devices;
uint32_t pbi_enabled_devices;
};
/*
* pcibus_info structure locking macros
*/
inline static unsigned long
pcibr_lock(struct pcibus_info *pcibus_info)
{
unsigned long flag;
spin_lock_irqsave(&pcibus_info->pbi_lock, flag);
return(flag);
}
#define pcibr_unlock(pcibus_info, flag) spin_unlock_irqrestore(&pcibus_info->pbi_lock, flag)
extern void *pcibr_bus_fixup(struct pcibus_bussoft *);
extern uint64_t pcibr_dma_map(struct pcidev_info *, unsigned long, size_t, unsigned int);
extern void pcibr_dma_unmap(struct pcidev_info *, dma_addr_t, int);
/*
* prototypes for the bridge asic register access routines in pcibr_reg.c
*/
extern void pcireg_control_bit_clr(struct pcibus_info *, uint64_t);
extern void pcireg_control_bit_set(struct pcibus_info *, uint64_t);
extern uint64_t pcireg_tflush_get(struct pcibus_info *);
extern uint64_t pcireg_intr_status_get(struct pcibus_info *);
extern void pcireg_intr_enable_bit_clr(struct pcibus_info *, uint64_t);
extern void pcireg_intr_enable_bit_set(struct pcibus_info *, uint64_t);
extern void pcireg_intr_addr_addr_set(struct pcibus_info *, int, uint64_t);
extern void pcireg_force_intr_set(struct pcibus_info *, int);
extern uint64_t pcireg_wrb_flush_get(struct pcibus_info *, int);
extern void pcireg_int_ate_set(struct pcibus_info *, int, uint64_t);
extern uint64_t * pcireg_int_ate_addr(struct pcibus_info *, int);
extern void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info);
extern void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info);
extern int pcibr_ate_alloc(struct pcibus_info *, int);
extern void pcibr_ate_free(struct pcibus_info *, int);
extern void ate_write(struct pcibus_info *, int, int, uint64_t);
#endif
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
#define _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H
/*
* SN pci asic types. Do not ever renumber these or reuse values. The
* values must agree with what prom thinks they are.
*/
#define PCIIO_ASIC_TYPE_UNKNOWN 0
#define PCIIO_ASIC_TYPE_PPB 1
#define PCIIO_ASIC_TYPE_PIC 2
#define PCIIO_ASIC_TYPE_TIOCP 3
/*
* Common pciio bus provider data. There should be one of these as the
* first field in any pciio based provider soft structure (e.g. pcibr_soft
* tioca_soft, etc).
*/
struct pcibus_bussoft {
uint32_t bs_asic_type; /* chipset type */
uint32_t bs_xid; /* xwidget id */
uint64_t bs_persist_busnum; /* Persistent Bus Number */
uint64_t bs_legacy_io; /* legacy io pio addr */
uint64_t bs_legacy_mem; /* legacy mem pio addr */
uint64_t bs_base; /* widget base */
struct xwidget_info *bs_xwidget_info;
};
/*
* DMA mapping flags
*/
#define SN_PCIDMA_CONSISTENT 0x0001
#endif /* _ASM_IA64_SN_PCI_PCIBUS_PROVIDER_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_PCIDEV_H
#define _ASM_IA64_SN_PCI_PCIDEV_H
#include <linux/pci.h>
extern struct sn_irq_info **sn_irq;
#define SN_PCIDEV_INFO(pci_dev) \
((struct pcidev_info *)((pci_dev)->sysdata))
/*
* Given a pci_bus, return the sn pcibus_bussoft struct. Note that
* this only works for root busses, not for busses represented by PPB's.
*/
#define SN_PCIBUS_BUSSOFT(pci_bus) \
((struct pcibus_bussoft *)(PCI_CONTROLLER((pci_bus))->platform_data))
/*
* Given a struct pci_dev, return the sn pcibus_bussoft struct. Note
* that this is not equivalent to SN_PCIBUS_BUSSOFT(pci_dev->bus) due
* due to possible PPB's in the path.
*/
#define SN_PCIDEV_BUSSOFT(pci_dev) \
(SN_PCIDEV_INFO(pci_dev)->pdi_host_pcidev_info->pdi_pcibus_info)
#define PCIIO_BUS_NONE 255 /* bus 255 reserved */
#define PCIIO_SLOT_NONE 255
#define PCIIO_FUNC_NONE 255
#define PCIIO_VENDOR_ID_NONE (-1)
struct pcidev_info {
uint64_t pdi_pio_mapped_addr[7]; /* 6 BARs PLUS 1 ROM */
uint64_t pdi_slot_host_handle; /* Bus and devfn Host pci_dev */
struct pcibus_bussoft *pdi_pcibus_info; /* Kernel common bus soft */
struct pcidev_info *pdi_host_pcidev_info; /* Kernel Host pci_dev */
struct pci_dev *pdi_linux_pcidev; /* Kernel pci_dev */
struct sn_irq_info *pdi_sn_irq_info;
};
#endif /* _ASM_IA64_SN_PCI_PCIDEV_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_PIC_H
#define _ASM_IA64_SN_PCI_PIC_H
/*
* PIC AS DEVICE ZERO
* ------------------
*
* PIC handles PCI/X busses. PCI/X requires that the 'bridge' (i.e. PIC)
* be designated as 'device 0'. That is a departure from earlier SGI
* PCI bridges. Because of that we use config space 1 to access the
* config space of the first actual PCI device on the bus.
* Here's what the PIC manual says:
*
* The current PCI-X bus specification now defines that the parent
* hosts bus bridge (PIC for example) must be device 0 on bus 0. PIC
* reduced the total number of devices from 8 to 4 and removed the
* device registers and windows, now only supporting devices 0,1,2, and
* 3. PIC did leave all 8 configuration space windows. The reason was
* there was nothing to gain by removing them. Here in lies the problem.
* The device numbering we do using 0 through 3 is unrelated to the device
* numbering which PCI-X requires in configuration space. In the past we
* correlated Configs pace and our device space 0 <-> 0, 1 <-> 1, etc.
* PCI-X requires we start a 1, not 0 and currently the PX brick
* does associate our:
*
* device 0 with configuration space window 1,
* device 1 with configuration space window 2,
* device 2 with configuration space window 3,
* device 3 with configuration space window 4.
*
* The net effect is that all config space access are off-by-one with
* relation to other per-slot accesses on the PIC.
* Here is a table that shows some of that:
*
* Internal Slot#
* |
* | 0 1 2 3
* ----------|---------------------------------------
* config | 0x21000 0x22000 0x23000 0x24000
* |
* even rrb | 0[0] n/a 1[0] n/a [] == implied even/odd
* |
* odd rrb | n/a 0[1] n/a 1[1]
* |
* int dev | 00 01 10 11
* |
* ext slot# | 1 2 3 4
* ----------|---------------------------------------
*/
#define PIC_ATE_TARGETID_SHFT 8
#define PIC_HOST_INTR_ADDR 0x0000FFFFFFFFFFFF
#define PIC_PCI64_ATTR_TARG_SHFT 60
/*****************************************************************************
*********************** PIC MMR structure mapping ***************************
*****************************************************************************/
/* NOTE: PIC WAR. PV#854697. PIC does not allow writes just to [31:0]
* of a 64-bit register. When writing PIC registers, always write the
* entire 64 bits.
*/
struct pic {
/* 0x000000-0x00FFFF -- Local Registers */
/* 0x000000-0x000057 -- Standard Widget Configuration */
uint64_t p_wid_id; /* 0x000000 */
uint64_t p_wid_stat; /* 0x000008 */
uint64_t p_wid_err_upper; /* 0x000010 */
uint64_t p_wid_err_lower; /* 0x000018 */
#define p_wid_err p_wid_err_lower
uint64_t p_wid_control; /* 0x000020 */
uint64_t p_wid_req_timeout; /* 0x000028 */
uint64_t p_wid_int_upper; /* 0x000030 */
uint64_t p_wid_int_lower; /* 0x000038 */
#define p_wid_int p_wid_int_lower
uint64_t p_wid_err_cmdword; /* 0x000040 */
uint64_t p_wid_llp; /* 0x000048 */
uint64_t p_wid_tflush; /* 0x000050 */
/* 0x000058-0x00007F -- Bridge-specific Widget Configuration */
uint64_t p_wid_aux_err; /* 0x000058 */
uint64_t p_wid_resp_upper; /* 0x000060 */
uint64_t p_wid_resp_lower; /* 0x000068 */
#define p_wid_resp p_wid_resp_lower
uint64_t p_wid_tst_pin_ctrl; /* 0x000070 */
uint64_t p_wid_addr_lkerr; /* 0x000078 */
/* 0x000080-0x00008F -- PMU & MAP */
uint64_t p_dir_map; /* 0x000080 */
uint64_t _pad_000088; /* 0x000088 */
/* 0x000090-0x00009F -- SSRAM */
uint64_t p_map_fault; /* 0x000090 */
uint64_t _pad_000098; /* 0x000098 */
/* 0x0000A0-0x0000AF -- Arbitration */
uint64_t p_arb; /* 0x0000A0 */
uint64_t _pad_0000A8; /* 0x0000A8 */
/* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
uint64_t p_ate_parity_err; /* 0x0000B0 */
uint64_t _pad_0000B8; /* 0x0000B8 */
/* 0x0000C0-0x0000FF -- PCI/GIO */
uint64_t p_bus_timeout; /* 0x0000C0 */
uint64_t p_pci_cfg; /* 0x0000C8 */
uint64_t p_pci_err_upper; /* 0x0000D0 */
uint64_t p_pci_err_lower; /* 0x0000D8 */
#define p_pci_err p_pci_err_lower
uint64_t _pad_0000E0[4]; /* 0x0000{E0..F8} */
/* 0x000100-0x0001FF -- Interrupt */
uint64_t p_int_status; /* 0x000100 */
uint64_t p_int_enable; /* 0x000108 */
uint64_t p_int_rst_stat; /* 0x000110 */
uint64_t p_int_mode; /* 0x000118 */
uint64_t p_int_device; /* 0x000120 */
uint64_t p_int_host_err; /* 0x000128 */
uint64_t p_int_addr[8]; /* 0x0001{30,,,68} */
uint64_t p_err_int_view; /* 0x000170 */
uint64_t p_mult_int; /* 0x000178 */
uint64_t p_force_always[8]; /* 0x0001{80,,,B8} */
uint64_t p_force_pin[8]; /* 0x0001{C0,,,F8} */
/* 0x000200-0x000298 -- Device */
uint64_t p_device[4]; /* 0x0002{00,,,18} */
uint64_t _pad_000220[4]; /* 0x0002{20,,,38} */
uint64_t p_wr_req_buf[4]; /* 0x0002{40,,,58} */
uint64_t _pad_000260[4]; /* 0x0002{60,,,78} */
uint64_t p_rrb_map[2]; /* 0x0002{80,,,88} */
#define p_even_resp p_rrb_map[0] /* 0x000280 */
#define p_odd_resp p_rrb_map[1] /* 0x000288 */
uint64_t p_resp_status; /* 0x000290 */
uint64_t p_resp_clear; /* 0x000298 */
uint64_t _pad_0002A0[12]; /* 0x0002{A0..F8} */
/* 0x000300-0x0003F8 -- Buffer Address Match Registers */
struct {
uint64_t upper; /* 0x0003{00,,,F0} */
uint64_t lower; /* 0x0003{08,,,F8} */
} p_buf_addr_match[16];
/* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
struct {
uint64_t flush_w_touch; /* 0x000{400,,,5C0} */
uint64_t flush_wo_touch; /* 0x000{408,,,5C8} */
uint64_t inflight; /* 0x000{410,,,5D0} */
uint64_t prefetch; /* 0x000{418,,,5D8} */
uint64_t total_pci_retry; /* 0x000{420,,,5E0} */
uint64_t max_pci_retry; /* 0x000{428,,,5E8} */
uint64_t max_latency; /* 0x000{430,,,5F0} */
uint64_t clear_all; /* 0x000{438,,,5F8} */
} p_buf_count[8];
/* 0x000600-0x0009FF -- PCI/X registers */
uint64_t p_pcix_bus_err_addr; /* 0x000600 */
uint64_t p_pcix_bus_err_attr; /* 0x000608 */
uint64_t p_pcix_bus_err_data; /* 0x000610 */
uint64_t p_pcix_pio_split_addr; /* 0x000618 */
uint64_t p_pcix_pio_split_attr; /* 0x000620 */
uint64_t p_pcix_dma_req_err_attr; /* 0x000628 */
uint64_t p_pcix_dma_req_err_addr; /* 0x000630 */
uint64_t p_pcix_timeout; /* 0x000638 */
uint64_t _pad_000640[120]; /* 0x000{640,,,9F8} */
/* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
struct {
uint64_t p_buf_addr; /* 0x000{A00,,,AF0} */
uint64_t p_buf_attr; /* 0X000{A08,,,AF8} */
} p_pcix_read_buf_64[16];
struct {
uint64_t p_buf_addr; /* 0x000{B00,,,BE0} */
uint64_t p_buf_attr; /* 0x000{B08,,,BE8} */
uint64_t p_buf_valid; /* 0x000{B10,,,BF0} */
uint64_t __pad1; /* 0x000{B18,,,BF8} */
} p_pcix_write_buf_64[8];
/* End of Local Registers -- Start of Address Map space */
char _pad_000c00[0x010000 - 0x000c00];
/* 0x010000-0x011fff -- Internal ATE RAM (Auto Parity Generation) */
uint64_t p_int_ate_ram[1024]; /* 0x010000-0x011fff */
/* 0x012000-0x013fff -- Internal ATE RAM (Manual Parity Generation) */
uint64_t p_int_ate_ram_mp[1024]; /* 0x012000-0x013fff */
char _pad_014000[0x18000 - 0x014000];
/* 0x18000-0x197F8 -- PIC Write Request Ram */
uint64_t p_wr_req_lower[256]; /* 0x18000 - 0x187F8 */
uint64_t p_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */
uint64_t p_wr_req_parity[256]; /* 0x19000 - 0x197F8 */
char _pad_019800[0x20000 - 0x019800];
/* 0x020000-0x027FFF -- PCI Device Configuration Spaces */
union {
uint8_t c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */
uint16_t s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */
uint32_t l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */
uint64_t d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */
union {
uint8_t c[0x100 / 1];
uint16_t s[0x100 / 2];
uint32_t l[0x100 / 4];
uint64_t d[0x100 / 8];
} f[8];
} p_type0_cfg_dev[8]; /* 0x02{0000,,,7FFF} */
/* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
union {
uint8_t c[0x1000 / 1]; /* 0x028000-0x029000 */
uint16_t s[0x1000 / 2]; /* 0x028000-0x029000 */
uint32_t l[0x1000 / 4]; /* 0x028000-0x029000 */
uint64_t d[0x1000 / 8]; /* 0x028000-0x029000 */
union {
uint8_t c[0x100 / 1];
uint16_t s[0x100 / 2];
uint32_t l[0x100 / 4];
uint64_t d[0x100 / 8];
} f[8];
} p_type1_cfg; /* 0x028000-0x029000 */
char _pad_029000[0x030000-0x029000];
/* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
union {
uint8_t c[8 / 1];
uint16_t s[8 / 2];
uint32_t l[8 / 4];
uint64_t d[8 / 8];
} p_pci_iack; /* 0x030000-0x030007 */
char _pad_030007[0x040000-0x030008];
/* 0x040000-0x030007 -- PCIX Special Cycle */
union {
uint8_t c[8 / 1];
uint16_t s[8 / 2];
uint32_t l[8 / 4];
uint64_t d[8 / 8];
} p_pcix_cycle; /* 0x040000-0x040007 */
};
#endif /* _ASM_IA64_SN_PCI_PIC_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PCI_TIOCP_H
#define _ASM_IA64_SN_PCI_TIOCP_H
#define TIOCP_HOST_INTR_ADDR 0x003FFFFFFFFFFFFF
#define TIOCP_PCI64_CMDTYPE_MEM (0x1ull << 60)
/*****************************************************************************
*********************** TIOCP MMR structure mapping ***************************
*****************************************************************************/
struct tiocp{
/* 0x000000-0x00FFFF -- Local Registers */
/* 0x000000-0x000057 -- (Legacy Widget Space) Configuration */
uint64_t cp_id; /* 0x000000 */
uint64_t cp_stat; /* 0x000008 */
uint64_t cp_err_upper; /* 0x000010 */
uint64_t cp_err_lower; /* 0x000018 */
#define cp_err cp_err_lower
uint64_t cp_control; /* 0x000020 */
uint64_t cp_req_timeout; /* 0x000028 */
uint64_t cp_intr_upper; /* 0x000030 */
uint64_t cp_intr_lower; /* 0x000038 */
#define cp_intr cp_intr_lower
uint64_t cp_err_cmdword; /* 0x000040 */
uint64_t _pad_000048; /* 0x000048 */
uint64_t cp_tflush; /* 0x000050 */
/* 0x000058-0x00007F -- Bridge-specific Configuration */
uint64_t cp_aux_err; /* 0x000058 */
uint64_t cp_resp_upper; /* 0x000060 */
uint64_t cp_resp_lower; /* 0x000068 */
#define cp_resp cp_resp_lower
uint64_t cp_tst_pin_ctrl; /* 0x000070 */
uint64_t cp_addr_lkerr; /* 0x000078 */
/* 0x000080-0x00008F -- PMU & MAP */
uint64_t cp_dir_map; /* 0x000080 */
uint64_t _pad_000088; /* 0x000088 */
/* 0x000090-0x00009F -- SSRAM */
uint64_t cp_map_fault; /* 0x000090 */
uint64_t _pad_000098; /* 0x000098 */
/* 0x0000A0-0x0000AF -- Arbitration */
uint64_t cp_arb; /* 0x0000A0 */
uint64_t _pad_0000A8; /* 0x0000A8 */
/* 0x0000B0-0x0000BF -- Number In A Can or ATE Parity Error */
uint64_t cp_ate_parity_err; /* 0x0000B0 */
uint64_t _pad_0000B8; /* 0x0000B8 */
/* 0x0000C0-0x0000FF -- PCI/GIO */
uint64_t cp_bus_timeout; /* 0x0000C0 */
uint64_t cp_pci_cfg; /* 0x0000C8 */
uint64_t cp_pci_err_upper; /* 0x0000D0 */
uint64_t cp_pci_err_lower; /* 0x0000D8 */
#define cp_pci_err cp_pci_err_lower
uint64_t _pad_0000E0[4]; /* 0x0000{E0..F8} */
/* 0x000100-0x0001FF -- Interrupt */
uint64_t cp_int_status; /* 0x000100 */
uint64_t cp_int_enable; /* 0x000108 */
uint64_t cp_int_rst_stat; /* 0x000110 */
uint64_t cp_int_mode; /* 0x000118 */
uint64_t cp_int_device; /* 0x000120 */
uint64_t cp_int_host_err; /* 0x000128 */
uint64_t cp_int_addr[8]; /* 0x0001{30,,,68} */
uint64_t cp_err_int_view; /* 0x000170 */
uint64_t cp_mult_int; /* 0x000178 */
uint64_t cp_force_always[8]; /* 0x0001{80,,,B8} */
uint64_t cp_force_pin[8]; /* 0x0001{C0,,,F8} */
/* 0x000200-0x000298 -- Device */
uint64_t cp_device[4]; /* 0x0002{00,,,18} */
uint64_t _pad_000220[4]; /* 0x0002{20,,,38} */
uint64_t cp_wr_req_buf[4]; /* 0x0002{40,,,58} */
uint64_t _pad_000260[4]; /* 0x0002{60,,,78} */
uint64_t cp_rrb_map[2]; /* 0x0002{80,,,88} */
#define cp_even_resp cp_rrb_map[0] /* 0x000280 */
#define cp_odd_resp cp_rrb_map[1] /* 0x000288 */
uint64_t cp_resp_status; /* 0x000290 */
uint64_t cp_resp_clear; /* 0x000298 */
uint64_t _pad_0002A0[12]; /* 0x0002{A0..F8} */
/* 0x000300-0x0003F8 -- Buffer Address Match Registers */
struct {
uint64_t upper; /* 0x0003{00,,,F0} */
uint64_t lower; /* 0x0003{08,,,F8} */
} cp_buf_addr_match[16];
/* 0x000400-0x0005FF -- Performance Monitor Registers (even only) */
struct {
uint64_t flush_w_touch; /* 0x000{400,,,5C0} */
uint64_t flush_wo_touch; /* 0x000{408,,,5C8} */
uint64_t inflight; /* 0x000{410,,,5D0} */
uint64_t prefetch; /* 0x000{418,,,5D8} */
uint64_t total_pci_retry; /* 0x000{420,,,5E0} */
uint64_t max_pci_retry; /* 0x000{428,,,5E8} */
uint64_t max_latency; /* 0x000{430,,,5F0} */
uint64_t clear_all; /* 0x000{438,,,5F8} */
} cp_buf_count[8];
/* 0x000600-0x0009FF -- PCI/X registers */
uint64_t cp_pcix_bus_err_addr; /* 0x000600 */
uint64_t cp_pcix_bus_err_attr; /* 0x000608 */
uint64_t cp_pcix_bus_err_data; /* 0x000610 */
uint64_t cp_pcix_pio_split_addr; /* 0x000618 */
uint64_t cp_pcix_pio_split_attr; /* 0x000620 */
uint64_t cp_pcix_dma_req_err_attr; /* 0x000628 */
uint64_t cp_pcix_dma_req_err_addr; /* 0x000630 */
uint64_t cp_pcix_timeout; /* 0x000638 */
uint64_t _pad_000640[24]; /* 0x000{640,,,6F8} */
/* 0x000700-0x000737 -- Debug Registers */
uint64_t cp_ct_debug_ctl; /* 0x000700 */
uint64_t cp_br_debug_ctl; /* 0x000708 */
uint64_t cp_mux3_debug_ctl; /* 0x000710 */
uint64_t cp_mux4_debug_ctl; /* 0x000718 */
uint64_t cp_mux5_debug_ctl; /* 0x000720 */
uint64_t cp_mux6_debug_ctl; /* 0x000728 */
uint64_t cp_mux7_debug_ctl; /* 0x000730 */
uint64_t _pad_000738[89]; /* 0x000{738,,,9F8} */
/* 0x000A00-0x000BFF -- PCI/X Read&Write Buffer */
struct {
uint64_t cp_buf_addr; /* 0x000{A00,,,AF0} */
uint64_t cp_buf_attr; /* 0X000{A08,,,AF8} */
} cp_pcix_read_buf_64[16];
struct {
uint64_t cp_buf_addr; /* 0x000{B00,,,BE0} */
uint64_t cp_buf_attr; /* 0x000{B08,,,BE8} */
uint64_t cp_buf_valid; /* 0x000{B10,,,BF0} */
uint64_t __pad1; /* 0x000{B18,,,BF8} */
} cp_pcix_write_buf_64[8];
/* End of Local Registers -- Start of Address Map space */
char _pad_000c00[0x010000 - 0x000c00];
/* 0x010000-0x011FF8 -- Internal ATE RAM (Auto Parity Generation) */
uint64_t cp_int_ate_ram[1024]; /* 0x010000-0x011FF8 */
char _pad_012000[0x14000 - 0x012000];
/* 0x014000-0x015FF8 -- Internal ATE RAM (Manual Parity Generation) */
uint64_t cp_int_ate_ram_mp[1024]; /* 0x014000-0x015FF8 */
char _pad_016000[0x18000 - 0x016000];
/* 0x18000-0x197F8 -- TIOCP Write Request Ram */
uint64_t cp_wr_req_lower[256]; /* 0x18000 - 0x187F8 */
uint64_t cp_wr_req_upper[256]; /* 0x18800 - 0x18FF8 */
uint64_t cp_wr_req_parity[256]; /* 0x19000 - 0x197F8 */
char _pad_019800[0x1C000 - 0x019800];
/* 0x1C000-0x1EFF8 -- TIOCP Read Response Ram */
uint64_t cp_rd_resp_lower[512]; /* 0x1C000 - 0x1CFF8 */
uint64_t cp_rd_resp_upper[512]; /* 0x1D000 - 0x1DFF8 */
uint64_t cp_rd_resp_parity[512]; /* 0x1E000 - 0x1EFF8 */
char _pad_01F000[0x20000 - 0x01F000];
/* 0x020000-0x021FFF -- Host Device (CP) Configuration Space (not used) */
char _pad_020000[0x021000 - 0x20000];
/* 0x021000-0x027FFF -- PCI Device Configuration Spaces */
union {
uint8_t c[0x1000 / 1]; /* 0x02{0000,,,7FFF} */
uint16_t s[0x1000 / 2]; /* 0x02{0000,,,7FFF} */
uint32_t l[0x1000 / 4]; /* 0x02{0000,,,7FFF} */
uint64_t d[0x1000 / 8]; /* 0x02{0000,,,7FFF} */
union {
uint8_t c[0x100 / 1];
uint16_t s[0x100 / 2];
uint32_t l[0x100 / 4];
uint64_t d[0x100 / 8];
} f[8];
} cp_type0_cfg_dev[7]; /* 0x02{1000,,,7FFF} */
/* 0x028000-0x028FFF -- PCI Type 1 Configuration Space */
union {
uint8_t c[0x1000 / 1]; /* 0x028000-0x029000 */
uint16_t s[0x1000 / 2]; /* 0x028000-0x029000 */
uint32_t l[0x1000 / 4]; /* 0x028000-0x029000 */
uint64_t d[0x1000 / 8]; /* 0x028000-0x029000 */
union {
uint8_t c[0x100 / 1];
uint16_t s[0x100 / 2];
uint32_t l[0x100 / 4];
uint64_t d[0x100 / 8];
} f[8];
} cp_type1_cfg; /* 0x028000-0x029000 */
char _pad_029000[0x030000-0x029000];
/* 0x030000-0x030007 -- PCI Interrupt Acknowledge Cycle */
union {
uint8_t c[8 / 1];
uint16_t s[8 / 2];
uint32_t l[8 / 4];
uint64_t d[8 / 8];
} cp_pci_iack; /* 0x030000-0x030007 */
char _pad_030007[0x040000-0x030008];
/* 0x040000-0x040007 -- PCIX Special Cycle */
union {
uint8_t c[8 / 1];
uint16_t s[8 / 2];
uint32_t l[8 / 4];
uint64_t d[8 / 8];
} cp_pcix_cycle; /* 0x040000-0x040007 */
char _pad_040007[0x200000-0x040008];
/* 0x200000-0x7FFFFF -- PCI/GIO Device Spaces */
union {
uint8_t c[0x100000 / 1];
uint16_t s[0x100000 / 2];
uint32_t l[0x100000 / 4];
uint64_t d[0x100000 / 8];
} cp_devio_raw[6]; /* 0x200000-0x7FFFFF */
#define cp_devio(n) cp_devio_raw[((n)<2)?(n*2):(n+2)]
char _pad_800000[0xA00000-0x800000];
/* 0xA00000-0xBFFFFF -- PCI/GIO Device Spaces w/flush */
union {
uint8_t c[0x100000 / 1];
uint16_t s[0x100000 / 2];
uint32_t l[0x100000 / 4];
uint64_t d[0x100000 / 8];
} cp_devio_raw_flush[6]; /* 0xA00000-0xBFFFFF */
#define cp_devio_flush(n) cp_devio_raw_flush[((n)<2)?(n*2):(n+2)]
};
#endif /* _ASM_IA64_SN_PCI_TIOCP_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SHUB_H
#define _ASM_IA64_SN_SHUB_H
#define MD_MEM_BANKS 4
/*
* Junk Bus Address Space
* The junk bus is used to access the PROM, LED's, and UART. It's
* accessed through the local block MMR space. The data path is
* 16 bits wide. This space requires address bits 31-27 to be set, and
* is further divided by address bits 26:15.
* The LED addresses are write-only. To read the LEDs, you need to use
* SH_JUNK_BUS_LED0-3, defined in shub_mmr.h
*
*/
#define SH_REAL_JUNK_BUS_LED0 0x7fed00000
#define SH_REAL_JUNK_BUS_LED1 0x7fed10000
#define SH_REAL_JUNK_BUS_LED2 0x7fed20000
#define SH_REAL_JUNK_BUS_LED3 0x7fed30000
#define SH_JUNK_BUS_UART0 0x7fed40000
#define SH_JUNK_BUS_UART1 0x7fed40008
#define SH_JUNK_BUS_UART2 0x7fed40010
#define SH_JUNK_BUS_UART3 0x7fed40018
#define SH_JUNK_BUS_UART4 0x7fed40020
#define SH_JUNK_BUS_UART5 0x7fed40028
#define SH_JUNK_BUS_UART6 0x7fed40030
#define SH_JUNK_BUS_UART7 0x7fed40038
#endif /* _ASM_IA64_SN_SHUB_H */
This source diff could not be displayed because it is too large. You can view the blob instead.
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_TIO_H
#define _ASM_IA64_SN_TIO_H
#define TIO_MMR_ADDR_MOD
#define TIO_NODE_ID TIO_MMR_ADDR_MOD(0x0000000090060e80)
#define TIO_ITTE_BASE 0xb0008800 /* base of translation table entries */
#define TIO_ITTE(bigwin) (TIO_ITTE_BASE + 8*(bigwin))
#define TIO_ITTE_OFFSET_BITS 8 /* size of offset field */
#define TIO_ITTE_OFFSET_MASK ((1<<TIO_ITTE_OFFSET_BITS)-1)
#define TIO_ITTE_OFFSET_SHIFT 0
#define TIO_ITTE_WIDGET_BITS 2 /* size of widget field */
#define TIO_ITTE_WIDGET_MASK ((1<<TIO_ITTE_WIDGET_BITS)-1)
#define TIO_ITTE_WIDGET_SHIFT 12
#define TIO_ITTE_VALID_MASK 0x1
#define TIO_ITTE_VALID_SHIFT 16
#define TIO_ITTE_PUT(nasid, bigwin, widget, addr, valid) \
REMOTE_HUB_S((nasid), TIO_ITTE(bigwin), \
(((((addr) >> TIO_BWIN_SIZE_BITS) & \
TIO_ITTE_OFFSET_MASK) << TIO_ITTE_OFFSET_SHIFT) | \
(((widget) & TIO_ITTE_WIDGET_MASK) << TIO_ITTE_WIDGET_SHIFT)) | \
(( (valid) & TIO_ITTE_VALID_MASK) << TIO_ITTE_VALID_SHIFT))
#endif /* _ASM_IA64_SN_TIO_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_XTALK_HUBDEV_H
#define _ASM_IA64_SN_XTALK_HUBDEV_H
#define HUB_WIDGET_ID_MAX 0xf
#define DEV_PER_WIDGET (2*2*8)
#define IIO_ITTE_WIDGET_BITS 4 /* size of widget field */
#define IIO_ITTE_WIDGET_MASK ((1<<IIO_ITTE_WIDGET_BITS)-1)
#define IIO_ITTE_WIDGET_SHIFT 8
/*
* Use the top big window as a surrogate for the first small window
*/
#define SWIN0_BIGWIN HUB_NUM_BIG_WINDOW
#define IIO_NUM_ITTES 7
#define HUB_NUM_BIG_WINDOW (IIO_NUM_ITTES - 1)
struct sn_flush_device_list {
int sfdl_bus;
int sfdl_slot;
int sfdl_pin;
struct bar_list {
unsigned long start;
unsigned long end;
} sfdl_bar_list[6];
unsigned long sfdl_force_int_addr;
unsigned long sfdl_flush_value;
volatile unsigned long *sfdl_flush_addr;
uint64_t sfdl_persistent_busnum;
struct pcibus_info *sfdl_pcibus_info;
spinlock_t sfdl_flush_lock;
};
/*
* **widget_p - Used as an array[wid_num][device] of sn_flush_device_list.
*/
struct sn_flush_nasid_entry {
struct sn_flush_device_list **widget_p; /* Used as a array of wid_num */
uint64_t iio_itte[8];
};
struct hubdev_info {
geoid_t hdi_geoid;
short hdi_nasid;
short hdi_peer_nasid; /* Dual Porting Peer */
struct sn_flush_nasid_entry hdi_flush_nasid_list;
struct xwidget_info hdi_xwidget_info[HUB_WIDGET_ID_MAX + 1];
void *hdi_nodepda;
void *hdi_node_vertex;
void *hdi_xtalk_vertex;
};
extern void hubdev_init_node(nodepda_t *, cnodeid_t);
extern void hub_error_init(struct hubdev_info *);
extern void ice_error_init(struct hubdev_info *);
#endif /* _ASM_IA64_SN_XTALK_HUBDEV_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_IA64_SN_XTALK_XBOW_H
#define _ASM_IA64_SN_XTALK_XBOW_H
#define XBOW_PORT_8 0x8
#define XBOW_PORT_C 0xc
#define XBOW_PORT_F 0xf
#define MAX_XBOW_PORTS 8 /* number of ports on xbow chip */
#define BASE_XBOW_PORT XBOW_PORT_8 /* Lowest external port */
#define XBOW_CREDIT 4
#define MAX_XBOW_NAME 16
/* Register set for each xbow link */
typedef volatile struct xb_linkregs_s {
/*
* we access these through synergy unswizzled space, so the address
* gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
* That's why we put the register first and filler second.
*/
uint32_t link_ibf;
uint32_t filler0; /* filler for proper alignment */
uint32_t link_control;
uint32_t filler1;
uint32_t link_status;
uint32_t filler2;
uint32_t link_arb_upper;
uint32_t filler3;
uint32_t link_arb_lower;
uint32_t filler4;
uint32_t link_status_clr;
uint32_t filler5;
uint32_t link_reset;
uint32_t filler6;
uint32_t link_aux_status;
uint32_t filler7;
} xb_linkregs_t;
typedef volatile struct xbow_s {
/* standard widget configuration 0x000000-0x000057 */
struct widget_cfg xb_widget; /* 0x000000 */
/* helper fieldnames for accessing bridge widget */
#define xb_wid_id xb_widget.w_id
#define xb_wid_stat xb_widget.w_status
#define xb_wid_err_upper xb_widget.w_err_upper_addr
#define xb_wid_err_lower xb_widget.w_err_lower_addr
#define xb_wid_control xb_widget.w_control
#define xb_wid_req_timeout xb_widget.w_req_timeout
#define xb_wid_int_upper xb_widget.w_intdest_upper_addr
#define xb_wid_int_lower xb_widget.w_intdest_lower_addr
#define xb_wid_err_cmdword xb_widget.w_err_cmd_word
#define xb_wid_llp xb_widget.w_llp_cfg
#define xb_wid_stat_clr xb_widget.w_tflush
/*
* we access these through synergy unswizzled space, so the address
* gets twiddled (i.e. references to 0x4 actually go to 0x0 and vv.)
* That's why we put the register first and filler second.
*/
/* xbow-specific widget configuration 0x000058-0x0000FF */
uint32_t xb_wid_arb_reload; /* 0x00005C */
uint32_t _pad_000058;
uint32_t xb_perf_ctr_a; /* 0x000064 */
uint32_t _pad_000060;
uint32_t xb_perf_ctr_b; /* 0x00006c */
uint32_t _pad_000068;
uint32_t xb_nic; /* 0x000074 */
uint32_t _pad_000070;
/* Xbridge only */
uint32_t xb_w0_rst_fnc; /* 0x00007C */
uint32_t _pad_000078;
uint32_t xb_l8_rst_fnc; /* 0x000084 */
uint32_t _pad_000080;
uint32_t xb_l9_rst_fnc; /* 0x00008c */
uint32_t _pad_000088;
uint32_t xb_la_rst_fnc; /* 0x000094 */
uint32_t _pad_000090;
uint32_t xb_lb_rst_fnc; /* 0x00009c */
uint32_t _pad_000098;
uint32_t xb_lc_rst_fnc; /* 0x0000a4 */
uint32_t _pad_0000a0;
uint32_t xb_ld_rst_fnc; /* 0x0000ac */
uint32_t _pad_0000a8;
uint32_t xb_le_rst_fnc; /* 0x0000b4 */
uint32_t _pad_0000b0;
uint32_t xb_lf_rst_fnc; /* 0x0000bc */
uint32_t _pad_0000b8;
uint32_t xb_lock; /* 0x0000c4 */
uint32_t _pad_0000c0;
uint32_t xb_lock_clr; /* 0x0000cc */
uint32_t _pad_0000c8;
/* end of Xbridge only */
uint32_t _pad_0000d0[12];
/* Link Specific Registers, port 8..15 0x000100-0x000300 */
xb_linkregs_t xb_link_raw[MAX_XBOW_PORTS];
#define xb_link(p) xb_link_raw[(p) & (MAX_XBOW_PORTS - 1)]
} xbow_t;
#define XB_FLAGS_EXISTS 0x1 /* device exists */
#define XB_FLAGS_MASTER 0x2
#define XB_FLAGS_SLAVE 0x0
#define XB_FLAGS_GBR 0x4
#define XB_FLAGS_16BIT 0x8
#define XB_FLAGS_8BIT 0x0
/* is widget port number valid? (based on version 7.0 of xbow spec) */
#define XBOW_WIDGET_IS_VALID(wid) ((wid) >= XBOW_PORT_8 && (wid) <= XBOW_PORT_F)
/* whether to use upper or lower arbitration register, given source widget id */
#define XBOW_ARB_IS_UPPER(wid) ((wid) >= XBOW_PORT_8 && (wid) <= XBOW_PORT_B)
#define XBOW_ARB_IS_LOWER(wid) ((wid) >= XBOW_PORT_C && (wid) <= XBOW_PORT_F)
/* offset of arbitration register, given source widget id */
#define XBOW_ARB_OFF(wid) (XBOW_ARB_IS_UPPER(wid) ? 0x1c : 0x24)
#define XBOW_WID_ID WIDGET_ID
#define XBOW_WID_STAT WIDGET_STATUS
#define XBOW_WID_ERR_UPPER WIDGET_ERR_UPPER_ADDR
#define XBOW_WID_ERR_LOWER WIDGET_ERR_LOWER_ADDR
#define XBOW_WID_CONTROL WIDGET_CONTROL
#define XBOW_WID_REQ_TO WIDGET_REQ_TIMEOUT
#define XBOW_WID_INT_UPPER WIDGET_INTDEST_UPPER_ADDR
#define XBOW_WID_INT_LOWER WIDGET_INTDEST_LOWER_ADDR
#define XBOW_WID_ERR_CMDWORD WIDGET_ERR_CMD_WORD
#define XBOW_WID_LLP WIDGET_LLP_CFG
#define XBOW_WID_STAT_CLR WIDGET_TFLUSH
#define XBOW_WID_ARB_RELOAD 0x5c
#define XBOW_WID_PERF_CTR_A 0x64
#define XBOW_WID_PERF_CTR_B 0x6c
#define XBOW_WID_NIC 0x74
/* Xbridge only */
#define XBOW_W0_RST_FNC 0x00007C
#define XBOW_L8_RST_FNC 0x000084
#define XBOW_L9_RST_FNC 0x00008c
#define XBOW_LA_RST_FNC 0x000094
#define XBOW_LB_RST_FNC 0x00009c
#define XBOW_LC_RST_FNC 0x0000a4
#define XBOW_LD_RST_FNC 0x0000ac
#define XBOW_LE_RST_FNC 0x0000b4
#define XBOW_LF_RST_FNC 0x0000bc
#define XBOW_RESET_FENCE(x) ((x) > 7 && (x) < 16) ? \
(XBOW_W0_RST_FNC + ((x) - 7) * 8) : \
((x) == 0) ? XBOW_W0_RST_FNC : 0
#define XBOW_LOCK 0x0000c4
#define XBOW_LOCK_CLR 0x0000cc
/* End of Xbridge only */
/* used only in ide, but defined here within the reserved portion */
/* of the widget0 address space (before 0xf4) */
#define XBOW_WID_UNDEF 0xe4
/* xbow link register set base, legal value for x is 0x8..0xf */
#define XB_LINK_BASE 0x100
#define XB_LINK_OFFSET 0x40
#define XB_LINK_REG_BASE(x) (XB_LINK_BASE + ((x) & (MAX_XBOW_PORTS - 1)) * XB_LINK_OFFSET)
#define XB_LINK_IBUF_FLUSH(x) (XB_LINK_REG_BASE(x) + 0x4)
#define XB_LINK_CTRL(x) (XB_LINK_REG_BASE(x) + 0xc)
#define XB_LINK_STATUS(x) (XB_LINK_REG_BASE(x) + 0x14)
#define XB_LINK_ARB_UPPER(x) (XB_LINK_REG_BASE(x) + 0x1c)
#define XB_LINK_ARB_LOWER(x) (XB_LINK_REG_BASE(x) + 0x24)
#define XB_LINK_STATUS_CLR(x) (XB_LINK_REG_BASE(x) + 0x2c)
#define XB_LINK_RESET(x) (XB_LINK_REG_BASE(x) + 0x34)
#define XB_LINK_AUX_STATUS(x) (XB_LINK_REG_BASE(x) + 0x3c)
/* link_control(x) */
#define XB_CTRL_LINKALIVE_IE 0x80000000 /* link comes alive */
/* reserved: 0x40000000 */
#define XB_CTRL_PERF_CTR_MODE_MSK 0x30000000 /* perf counter mode */
#define XB_CTRL_IBUF_LEVEL_MSK 0x0e000000 /* input packet buffer level */
#define XB_CTRL_8BIT_MODE 0x01000000 /* force link into 8 bit mode */
#define XB_CTRL_BAD_LLP_PKT 0x00800000 /* force bad LLP packet */
#define XB_CTRL_WIDGET_CR_MSK 0x007c0000 /* LLP widget credit mask */
#define XB_CTRL_WIDGET_CR_SHFT 18 /* LLP widget credit shift */
#define XB_CTRL_ILLEGAL_DST_IE 0x00020000 /* illegal destination */
#define XB_CTRL_OALLOC_IBUF_IE 0x00010000 /* overallocated input buffer */
/* reserved: 0x0000fe00 */
#define XB_CTRL_BNDWDTH_ALLOC_IE 0x00000100 /* bandwidth alloc */
#define XB_CTRL_RCV_CNT_OFLOW_IE 0x00000080 /* rcv retry overflow */
#define XB_CTRL_XMT_CNT_OFLOW_IE 0x00000040 /* xmt retry overflow */
#define XB_CTRL_XMT_MAX_RTRY_IE 0x00000020 /* max transmit retry */
#define XB_CTRL_RCV_IE 0x00000010 /* receive */
#define XB_CTRL_XMT_RTRY_IE 0x00000008 /* transmit retry */
/* reserved: 0x00000004 */
#define XB_CTRL_MAXREQ_TOUT_IE 0x00000002 /* maximum request timeout */
#define XB_CTRL_SRC_TOUT_IE 0x00000001 /* source timeout */
/* link_status(x) */
#define XB_STAT_LINKALIVE XB_CTRL_LINKALIVE_IE
/* reserved: 0x7ff80000 */
#define XB_STAT_MULTI_ERR 0x00040000 /* multi error */
#define XB_STAT_ILLEGAL_DST_ERR XB_CTRL_ILLEGAL_DST_IE
#define XB_STAT_OALLOC_IBUF_ERR XB_CTRL_OALLOC_IBUF_IE
#define XB_STAT_BNDWDTH_ALLOC_ID_MSK 0x0000ff00 /* port bitmask */
#define XB_STAT_RCV_CNT_OFLOW_ERR XB_CTRL_RCV_CNT_OFLOW_IE
#define XB_STAT_XMT_CNT_OFLOW_ERR XB_CTRL_XMT_CNT_OFLOW_IE
#define XB_STAT_XMT_MAX_RTRY_ERR XB_CTRL_XMT_MAX_RTRY_IE
#define XB_STAT_RCV_ERR XB_CTRL_RCV_IE
#define XB_STAT_XMT_RTRY_ERR XB_CTRL_XMT_RTRY_IE
/* reserved: 0x00000004 */
#define XB_STAT_MAXREQ_TOUT_ERR XB_CTRL_MAXREQ_TOUT_IE
#define XB_STAT_SRC_TOUT_ERR XB_CTRL_SRC_TOUT_IE
/* link_aux_status(x) */
#define XB_AUX_STAT_RCV_CNT 0xff000000
#define XB_AUX_STAT_XMT_CNT 0x00ff0000
#define XB_AUX_STAT_TOUT_DST 0x0000ff00
#define XB_AUX_LINKFAIL_RST_BAD 0x00000040
#define XB_AUX_STAT_PRESENT 0x00000020
#define XB_AUX_STAT_PORT_WIDTH 0x00000010
/* reserved: 0x0000000f */
/*
* link_arb_upper/link_arb_lower(x), (reg) should be the link_arb_upper
* register if (x) is 0x8..0xb, link_arb_lower if (x) is 0xc..0xf
*/
#define XB_ARB_GBR_MSK 0x1f
#define XB_ARB_RR_MSK 0x7
#define XB_ARB_GBR_SHFT(x) (((x) & 0x3) * 8)
#define XB_ARB_RR_SHFT(x) (((x) & 0x3) * 8 + 5)
#define XB_ARB_GBR_CNT(reg,x) ((reg) >> XB_ARB_GBR_SHFT(x) & XB_ARB_GBR_MSK)
#define XB_ARB_RR_CNT(reg,x) ((reg) >> XB_ARB_RR_SHFT(x) & XB_ARB_RR_MSK)
/* XBOW_WID_STAT */
#define XB_WID_STAT_LINK_INTR_SHFT (24)
#define XB_WID_STAT_LINK_INTR_MASK (0xFF << XB_WID_STAT_LINK_INTR_SHFT)
#define XB_WID_STAT_LINK_INTR(x) (0x1 << (((x)&7) + XB_WID_STAT_LINK_INTR_SHFT))
#define XB_WID_STAT_WIDGET0_INTR 0x00800000
#define XB_WID_STAT_SRCID_MASK 0x000003c0 /* Xbridge only */
#define XB_WID_STAT_REG_ACC_ERR 0x00000020
#define XB_WID_STAT_RECV_TOUT 0x00000010 /* Xbridge only */
#define XB_WID_STAT_ARB_TOUT 0x00000008 /* Xbridge only */
#define XB_WID_STAT_XTALK_ERR 0x00000004
#define XB_WID_STAT_DST_TOUT 0x00000002 /* Xbridge only */
#define XB_WID_STAT_MULTI_ERR 0x00000001
#define XB_WID_STAT_SRCID_SHFT 6
/* XBOW_WID_CONTROL */
#define XB_WID_CTRL_REG_ACC_IE XB_WID_STAT_REG_ACC_ERR
#define XB_WID_CTRL_RECV_TOUT XB_WID_STAT_RECV_TOUT
#define XB_WID_CTRL_ARB_TOUT XB_WID_STAT_ARB_TOUT
#define XB_WID_CTRL_XTALK_IE XB_WID_STAT_XTALK_ERR
/* XBOW_WID_INT_UPPER */
/* defined in xwidget.h for WIDGET_INTDEST_UPPER_ADDR */
/* XBOW WIDGET part number, in the ID register */
#define XBOW_WIDGET_PART_NUM 0x0 /* crossbow */
#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbridge */
#define XBOW_WIDGET_MFGR_NUM 0x0
#define XXBOW_WIDGET_MFGR_NUM 0x0
#define PXBOW_WIDGET_PART_NUM 0xd100 /* PIC */
#define XBOW_REV_1_0 0x1 /* xbow rev 1.0 is "1" */
#define XBOW_REV_1_1 0x2 /* xbow rev 1.1 is "2" */
#define XBOW_REV_1_2 0x3 /* xbow rev 1.2 is "3" */
#define XBOW_REV_1_3 0x4 /* xbow rev 1.3 is "4" */
#define XBOW_REV_2_0 0x5 /* xbow rev 2.0 is "5" */
#define XXBOW_PART_REV_1_0 (XXBOW_WIDGET_PART_NUM << 4 | 0x1 )
#define XXBOW_PART_REV_2_0 (XXBOW_WIDGET_PART_NUM << 4 | 0x2 )
/* XBOW_WID_ARB_RELOAD */
#define XBOW_WID_ARB_RELOAD_INT 0x3f /* GBR reload interval */
#define IS_XBRIDGE_XBOW(wid) \
(XWIDGET_PART_NUM(wid) == XXBOW_WIDGET_PART_NUM && \
XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
#define IS_PIC_XBOW(wid) \
(XWIDGET_PART_NUM(wid) == PXBOW_WIDGET_PART_NUM && \
XWIDGET_MFG_NUM(wid) == XXBOW_WIDGET_MFGR_NUM)
#define XBOW_WAR_ENABLED(pv, widid) ((1 << XWIDGET_REV_NUM(widid)) & pv)
#endif /* _ASM_IA64_SN_XTALK_XBOW_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_IA64_SN_XTALK_XWIDGET_H
#define _ASM_IA64_SN_XTALK_XWIDGET_H
/* WIDGET_ID */
#define WIDGET_REV_NUM 0xf0000000
#define WIDGET_PART_NUM 0x0ffff000
#define WIDGET_MFG_NUM 0x00000ffe
#define WIDGET_REV_NUM_SHFT 28
#define WIDGET_PART_NUM_SHFT 12
#define WIDGET_MFG_NUM_SHFT 1
#define XWIDGET_PART_NUM(widgetid) (((widgetid) & WIDGET_PART_NUM) >> WIDGET_PART_NUM_SHFT)
#define XWIDGET_REV_NUM(widgetid) (((widgetid) & WIDGET_REV_NUM) >> WIDGET_REV_NUM_SHFT)
#define XWIDGET_MFG_NUM(widgetid) (((widgetid) & WIDGET_MFG_NUM) >> WIDGET_MFG_NUM_SHFT)
#define XWIDGET_PART_REV_NUM(widgetid) ((XWIDGET_PART_NUM(widgetid) << 4) | \
XWIDGET_REV_NUM(widgetid))
#define XWIDGET_PART_REV_NUM_REV(partrev) (partrev & 0xf)
/* widget configuration registers */
struct widget_cfg{
uint32_t w_id; /* 0x04 */
uint32_t w_pad_0; /* 0x00 */
uint32_t w_status; /* 0x0c */
uint32_t w_pad_1; /* 0x08 */
uint32_t w_err_upper_addr; /* 0x14 */
uint32_t w_pad_2; /* 0x10 */
uint32_t w_err_lower_addr; /* 0x1c */
uint32_t w_pad_3; /* 0x18 */
uint32_t w_control; /* 0x24 */
uint32_t w_pad_4; /* 0x20 */
uint32_t w_req_timeout; /* 0x2c */
uint32_t w_pad_5; /* 0x28 */
uint32_t w_intdest_upper_addr; /* 0x34 */
uint32_t w_pad_6; /* 0x30 */
uint32_t w_intdest_lower_addr; /* 0x3c */
uint32_t w_pad_7; /* 0x38 */
uint32_t w_err_cmd_word; /* 0x44 */
uint32_t w_pad_8; /* 0x40 */
uint32_t w_llp_cfg; /* 0x4c */
uint32_t w_pad_9; /* 0x48 */
uint32_t w_tflush; /* 0x54 */
uint32_t w_pad_10; /* 0x50 */
};
/*
* Crosstalk Widget Hardware Identification, as defined in the Crosstalk spec.
*/
struct xwidget_hwid{
int mfg_num;
int rev_num;
int part_num;
};
struct xwidget_info{
struct xwidget_hwid xwi_hwid; /* Widget Identification */
char xwi_masterxid; /* Hub's Widget Port Number */
void *xwi_hubinfo; /* Hub's provider private info */
uint64_t *xwi_hub_provider; /* prom provider functions */
void *xwi_vertex;
};
#endif /* _ASM_IA64_SN_XTALK_XWIDGET_H */
......@@ -7,5 +7,6 @@
# Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All Rights Reserved.
#
obj-y += probe.o setup.o bte.o irq.o mca.o idle.o sn2/
obj-y += setup.o bte.o bte_error.o irq.o mca.o idle.o \
huberror.o io_init.o iomv.o klconflib.o sn2/
obj-$(CONFIG_IA64_GENERIC) += machvec.o
......@@ -8,13 +8,12 @@
#include <linux/config.h>
#include <linux/module.h>
#include <asm/sn/sgi.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/addrs.h>
#include <asm/sn/arch.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/pda.h>
#include <asm/sn/sn2/shubio.h>
#include "shubio.h"
#include <asm/nodedata.h>
#include <linux/bootmem.h>
......@@ -30,8 +29,7 @@
/* two interfaces on two btes */
#define MAX_INTERFACES_TO_TRY 4
static struct bteinfo_s *
bte_if_on_node(nasid_t nasid, int interface)
static struct bteinfo_s *bte_if_on_node(nasid_t nasid, int interface)
{
nodepda_t *tmp_nodepda;
......@@ -40,13 +38,11 @@ bte_if_on_node(nasid_t nasid, int interface)
}
/************************************************************************
* Block Transfer Engine copy related functions.
*
***********************************************************************/
/*
* bte_copy(src, dest, len, mode, notification)
*
......@@ -66,8 +62,7 @@ bte_if_on_node(nasid_t nasid, int interface)
* NOTE: This function requires src, dest, and len to
* be cacheline aligned.
*/
bte_result_t
bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
{
u64 transfer_size;
u64 transfer_stat;
......@@ -77,7 +72,6 @@ bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
struct bteinfo_s *btes_to_try[MAX_INTERFACES_TO_TRY];
int bte_if_index;
BTE_PRINTK(("bte_copy(0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%p)\n",
src, dest, len, mode, notification));
......@@ -85,9 +79,9 @@ bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
return BTE_SUCCESS;
}
ASSERT(!((len & L1_CACHE_MASK) ||
BUG_ON(!((len & L1_CACHE_MASK) ||
(src & L1_CACHE_MASK) || (dest & L1_CACHE_MASK)));
ASSERT(len < ((BTE_LEN_MASK + 1) << L1_CACHE_SHIFT));
BUG_ON(len < ((BTE_LEN_MASK + 1) << L1_CACHE_SHIFT));
if (mode & BTE_USE_DEST) {
/* try remote then local */
......@@ -150,7 +144,6 @@ bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
}
} while (1);
if (notification == NULL) {
/* User does not want to be notified. */
bte->most_rcnt_na = &bte->notify;
......@@ -175,10 +168,10 @@ bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
BTE_DEST_STORE(bte, TO_PHYS(dest));
/* Set the notification register */
BTE_PRINTKV(("IBNA = 0x%lx)\n",
BTE_PRINTKV(("IBNA = 0x%lx)\n",
TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na))));
BTE_NOTIF_STORE(bte, TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na)));
BTE_NOTIF_STORE(bte,
TO_PHYS(ia64_tpa((unsigned long)bte->most_rcnt_na)));
/* Initiate the transfer */
BTE_PRINTK(("IBCT = 0x%lx)\n", BTE_VALID_MODE(mode)));
......@@ -186,7 +179,6 @@ bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
spin_unlock_irqrestore(&bte->spinlock, irq_flags);
if (notification != NULL) {
return BTE_SUCCESS;
}
......@@ -194,9 +186,8 @@ bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
while ((transfer_stat = *bte->most_rcnt_na) == -1UL) {
}
BTE_PRINTKV((" Delay Done. IBLS = 0x%lx, most_rcnt_na = 0x%lx\n",
BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
if (transfer_stat & IBLS_ERROR) {
bte_status = transfer_stat & ~IBLS_ERROR;
......@@ -205,12 +196,12 @@ bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
bte_status = BTE_SUCCESS;
}
BTE_PRINTK(("Returning status is 0x%lx and most_rcnt_na is 0x%lx\n",
BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na));
return bte_status;
}
EXPORT_SYMBOL(bte_copy);
EXPORT_SYMBOL(bte_copy);
/*
* bte_unaligned_copy(src, dest, len, mode)
......@@ -228,8 +219,7 @@ EXPORT_SYMBOL(bte_copy);
* NOTE: If the source, dest, and len are all cache line aligned,
* then it would be _FAR_ preferrable to use bte_copy instead.
*/
bte_result_t
bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
bte_result_t bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
{
int destFirstCacheOffset;
u64 headBteSource;
......@@ -254,7 +244,7 @@ bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
if (bteBlock_unaligned == NULL) {
return BTEFAIL_NOTAVAIL;
}
bteBlock = (char *) L1_CACHE_ALIGN((u64) bteBlock_unaligned);
bteBlock = (char *)L1_CACHE_ALIGN((u64) bteBlock_unaligned);
headBcopySrcOffset = src & L1_CACHE_MASK;
destFirstCacheOffset = dest & L1_CACHE_MASK;
......@@ -302,15 +292,13 @@ bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
}
if (len > headBcopyLen) {
footBcopyLen =
(len - headBcopyLen) & L1_CACHE_MASK;
footBcopyLen = (len - headBcopyLen) & L1_CACHE_MASK;
footBteLen = L1_CACHE_BYTES;
footBteSource = src + len - footBcopyLen;
footBcopyDest = dest + len - footBcopyLen;
if (footBcopyDest ==
(headBcopyDest + headBcopyLen)) {
if (footBcopyDest == (headBcopyDest + headBcopyLen)) {
/*
* We have two contigous bcopy
* blocks. Merge them.
......@@ -326,9 +314,8 @@ bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
return rv;
}
memcpy(__va(footBcopyDest),
(char *) bteBlock, footBcopyLen);
(char *)bteBlock, footBcopyLen);
}
} else {
footBcopyLen = 0;
......@@ -350,7 +337,6 @@ bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
}
} else {
/*
* The transfer is not symetric, we will
* allocate a buffer large enough for all the
......@@ -361,8 +347,7 @@ bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
/* Add the leader from source */
headBteLen = len + (src & L1_CACHE_MASK);
/* Add the trailing bytes from footer. */
headBteLen +=
L1_CACHE_BYTES - (headBteLen & L1_CACHE_MASK);
headBteLen += L1_CACHE_BYTES - (headBteLen & L1_CACHE_MASK);
headBteSource = src & ~L1_CACHE_MASK;
headBcopySrcOffset = src & L1_CACHE_MASK;
headBcopyDest = dest;
......@@ -371,40 +356,37 @@ bte_unaligned_copy(u64 src, u64 dest, u64 len, u64 mode)
if (headBcopyLen > 0) {
rv = bte_copy(headBteSource,
ia64_tpa((unsigned long)bteBlock), headBteLen, mode, NULL);
ia64_tpa((unsigned long)bteBlock), headBteLen,
mode, NULL);
if (rv != BTE_SUCCESS) {
kfree(bteBlock_unaligned);
return rv;
}
memcpy(__va(headBcopyDest), ((char *) bteBlock +
headBcopySrcOffset),
headBcopyLen);
memcpy(__va(headBcopyDest), ((char *)bteBlock +
headBcopySrcOffset), headBcopyLen);
}
kfree(bteBlock_unaligned);
return BTE_SUCCESS;
}
EXPORT_SYMBOL(bte_unaligned_copy);
EXPORT_SYMBOL(bte_unaligned_copy);
/************************************************************************
* Block Transfer Engine initialization functions.
*
***********************************************************************/
/*
* bte_init_node(nodepda, cnode)
*
* Initialize the nodepda structure with BTE base addresses and
* spinlocks.
*/
void
bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
void bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
{
int i;
/*
* Indicate that all the block transfer engines on this node
* are available.
......@@ -418,13 +400,13 @@ bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode)
spin_lock_init(&mynodepda->bte_recovery_lock);
init_timer(&mynodepda->bte_recovery_timer);
mynodepda->bte_recovery_timer.function = bte_error_handler;
mynodepda->bte_recovery_timer.data = (unsigned long) mynodepda;
mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda;
for (i = 0; i < BTES_PER_NODE; i++) {
/* Which link status register should we use? */
unsigned long link_status = (i == 0 ? IIO_IBLS0 : IIO_IBLS1);
mynodepda->bte_if[i].bte_base_addr = (u64 *)
REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), link_status);
REMOTE_HUB_ADDR(cnodeid_to_nasid(cnode), link_status);
/*
* Initialize the notification and spinlock
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/types.h>
#include <asm/sn/sn_sal.h>
#include "ioerror.h"
#include <asm/sn/addrs.h>
#include "shubio.h"
#include <asm/sn/geo.h>
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
#include <asm/sn/bte.h>
/*
* Bte error handling is done in two parts. The first captures
* any crb related errors. Since there can be multiple crbs per
* interface and multiple interfaces active, we need to wait until
* all active crbs are completed. This is the first job of the
* second part error handler. When all bte related CRBs are cleanly
* completed, it resets the interfaces and gets them ready for new
* transfers to be queued.
*/
void bte_error_handler(unsigned long);
/*
* Wait until all BTE related CRBs are completed
* and then reset the interfaces.
*/
void bte_error_handler(unsigned long _nodepda)
{
struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
nasid_t nasid;
int i;
int valid_crbs;
unsigned long irq_flags;
volatile u64 *notify;
bte_result_t bh_error;
ii_imem_u_t imem; /* II IMEM Register */
ii_icrb0_d_u_t icrbd; /* II CRB Register D */
ii_ibcr_u_t ibcr;
ii_icmr_u_t icmr;
BTE_PRINTK(("bte_error_handler(%p) - %d\n", err_nodepda,
smp_processor_id()));
spin_lock_irqsave(recovery_lock, irq_flags);
if ((err_nodepda->bte_if[0].bh_error == BTE_SUCCESS) &&
(err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) {
BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda,
smp_processor_id()));
spin_unlock_irqrestore(recovery_lock, irq_flags);
return;
}
/*
* Lock all interfaces on this node to prevent new transfers
* from being queued.
*/
for (i = 0; i < BTES_PER_NODE; i++) {
if (err_nodepda->bte_if[i].cleanup_active) {
continue;
}
spin_lock(&err_nodepda->bte_if[i].spinlock);
BTE_PRINTK(("eh:%p:%d locked %d\n", err_nodepda,
smp_processor_id(), i));
err_nodepda->bte_if[i].cleanup_active = 1;
}
/* Determine information about our hub */
nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
/*
* A BTE transfer can use multiple CRBs. We need to make sure
* that all the BTE CRBs are complete (or timed out) before
* attempting to clean up the error. Resetting the BTE while
* there are still BTE CRBs active will hang the BTE.
* We should look at all the CRBs to see if they are allocated
* to the BTE and see if they are still active. When none
* are active, we can continue with the cleanup.
*
* We also want to make sure that the local NI port is up.
* When a router resets the NI port can go down, while it
* goes through the LLP handshake, but then comes back up.
*/
icmr.ii_icmr_regval = REMOTE_HUB_L(nasid, IIO_ICMR);
if (icmr.ii_icmr_fld_s.i_crb_mark != 0) {
/*
* There are errors which still need to be cleaned up by
* hubiio_crb_error_handler
*/
mod_timer(recovery_timer, HZ * 5);
BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
smp_processor_id()));
spin_unlock_irqrestore(recovery_lock, irq_flags);
return;
}
if (icmr.ii_icmr_fld_s.i_crb_vld != 0) {
valid_crbs = icmr.ii_icmr_fld_s.i_crb_vld;
for (i = 0; i < IIO_NUM_CRBS; i++) {
if (!((1 << i) & valid_crbs)) {
/* This crb was not marked as valid, ignore */
continue;
}
icrbd.ii_icrb0_d_regval =
REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
if (icrbd.d_bteop) {
mod_timer(recovery_timer, HZ * 5);
BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
err_nodepda, smp_processor_id(),
i));
spin_unlock_irqrestore(recovery_lock,
irq_flags);
return;
}
}
}
BTE_PRINTK(("eh:%p:%d Cleaning up\n", err_nodepda, smp_processor_id()));
/* Reenable both bte interfaces */
imem.ii_imem_regval = REMOTE_HUB_L(nasid, IIO_IMEM);
imem.ii_imem_fld_s.i_b0_esd = imem.ii_imem_fld_s.i_b1_esd = 1;
REMOTE_HUB_S(nasid, IIO_IMEM, imem.ii_imem_regval);
/* Reinitialize both BTE state machines. */
ibcr.ii_ibcr_regval = REMOTE_HUB_L(nasid, IIO_IBCR);
ibcr.ii_ibcr_fld_s.i_soft_reset = 1;
REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval);
for (i = 0; i < BTES_PER_NODE; i++) {
bh_error = err_nodepda->bte_if[i].bh_error;
if (bh_error != BTE_SUCCESS) {
/* There is an error which needs to be notified */
notify = err_nodepda->bte_if[i].most_rcnt_na;
BTE_PRINTK(("cnode %d bte %d error=0x%lx\n",
err_nodepda->bte_if[i].bte_cnode,
err_nodepda->bte_if[i].bte_num,
IBLS_ERROR | (u64) bh_error));
*notify = IBLS_ERROR | bh_error;
err_nodepda->bte_if[i].bh_error = BTE_SUCCESS;
}
err_nodepda->bte_if[i].cleanup_active = 0;
BTE_PRINTK(("eh:%p:%d Unlocked %d\n", err_nodepda,
smp_processor_id(), i));
spin_unlock(&pda->cpu_bte_if[i]->spinlock);
}
del_timer(recovery_timer);
spin_unlock_irqrestore(recovery_lock, irq_flags);
}
/*
* First part error handler. This is called whenever any error CRB interrupt
* is generated by the II.
*/
void
bte_crb_error_handler(cnodeid_t cnode, int btenum,
int crbnum, ioerror_t * ioe, int bteop)
{
struct bteinfo_s *bte;
bte = &(NODEPDA(cnode)->bte_if[btenum]);
/*
* The caller has already figured out the error type, we save that
* in the bte handle structure for the thread excercising the
* interface to consume.
*/
bte->bh_error = ioe->ie_errortype + BTEFAIL_OFFSET;
bte->bte_error_count++;
BTE_PRINTK(("Got an error on cnode %d bte %d: HW error type 0x%x\n",
bte->bte_cnode, bte->bte_num, ioe->ie_errortype));
bte_error_handler((unsigned long) NODEPDA(cnode));
}
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000,2002-2004 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <asm/delay.h>
#include <asm/sn/sn_sal.h>
#include "ioerror.h"
#include <asm/sn/addrs.h>
#include "shubio.h"
#include <asm/sn/geo.h>
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
#include <asm/sn/bte.h>
void hubiio_crb_error_handler(struct hubdev_info *hubdev_info);
extern void bte_crb_error_handler(struct hubdev_info *, int, int, ioerror_t *,
int);
static irqreturn_t hub_eint_handler(int irq, void *arg, struct pt_regs *ep)
{
struct hubdev_info *hubdev_info;
struct ia64_sal_retval ret_stuff;
nasid_t nasid;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
hubdev_info = (struct hubdev_info *)arg;
nasid = hubdev_info->hdi_nasid;
SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
(u64) nasid, 0, 0, 0, 0, 0, 0);
if ((int)ret_stuff.v0)
panic("hubii_eint_handler(): Fatal TIO Error");
if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
(void)hubiio_crb_error_handler(hubdev_info);
return IRQ_HANDLED;
}
/*
* Free the hub CRB "crbnum" which encountered an error.
* Assumption is, error handling was successfully done,
* and we now want to return the CRB back to Hub for normal usage.
*
* In order to free the CRB, all that's needed is to de-allocate it
*
* Assumption:
* No other processor is mucking around with the hub control register.
* So, upper layer has to single thread this.
*/
void hubiio_crb_free(struct hubdev_info *hubdev_info, int crbnum)
{
ii_icrb0_b_u_t icrbb;
/*
* The hardware does NOT clear the mark bit, so it must get cleared
* here to be sure the error is not processed twice.
*/
icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(hubdev_info->hdi_nasid,
IIO_ICRB_B(crbnum));
icrbb.b_mark = 0;
REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICRB_B(crbnum),
icrbb.ii_icrb0_b_regval);
/*
* Deallocate the register wait till hub indicates it's done.
*/
REMOTE_HUB_S(hubdev_info->hdi_nasid, IIO_ICDR, (IIO_ICDR_PND | crbnum));
while (REMOTE_HUB_L(hubdev_info->hdi_nasid, IIO_ICDR) & IIO_ICDR_PND)
udelay(1);
}
/*
* hubiio_crb_error_handler
*
* This routine gets invoked when a hub gets an error
* interrupt. So, the routine is running in interrupt context
* at error interrupt level.
* Action:
* It's responsible for identifying ALL the CRBs that are marked
* with error, and process them.
*
* If you find the CRB that's marked with error, map this to the
* reason it caused error, and invoke appropriate error handler.
*
* XXX Be aware of the information in the context register.
*
* NOTE:
* Use REMOTE_HUB_* macro instead of LOCAL_HUB_* so that the interrupt
* handler can be run on any node. (not necessarily the node
* corresponding to the hub that encountered error).
*/
void hubiio_crb_error_handler(struct hubdev_info *hubdev_info)
{
nasid_t nasid;
ii_icrb0_a_u_t icrba; /* II CRB Register A */
ii_icrb0_b_u_t icrbb; /* II CRB Register B */
ii_icrb0_c_u_t icrbc; /* II CRB Register C */
ii_icrb0_d_u_t icrbd; /* II CRB Register D */
ii_icrb0_e_u_t icrbe; /* II CRB Register D */
int i;
int num_errors = 0; /* Num of errors handled */
ioerror_t ioerror;
nasid = hubdev_info->hdi_nasid;
/*
* XXX - Add locking for any recovery actions
*/
/*
* Scan through all CRBs in the Hub, and handle the errors
* in any of the CRBs marked.
*/
for (i = 0; i < IIO_NUM_CRBS; i++) {
/* Check this crb entry to see if it is in error. */
icrbb.ii_icrb0_b_regval = REMOTE_HUB_L(nasid, IIO_ICRB_B(i));
if (icrbb.b_mark == 0) {
continue;
}
icrba.ii_icrb0_a_regval = REMOTE_HUB_L(nasid, IIO_ICRB_A(i));
IOERROR_INIT(&ioerror);
/* read other CRB error registers. */
icrbc.ii_icrb0_c_regval = REMOTE_HUB_L(nasid, IIO_ICRB_C(i));
icrbd.ii_icrb0_d_regval = REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
icrbe.ii_icrb0_e_regval = REMOTE_HUB_L(nasid, IIO_ICRB_E(i));
IOERROR_SETVALUE(&ioerror, errortype, icrbb.b_ecode);
/* Check if this error is due to BTE operation,
* and handle it separately.
*/
if (icrbd.d_bteop ||
((icrbb.b_initiator == IIO_ICRB_INIT_BTE0 ||
icrbb.b_initiator == IIO_ICRB_INIT_BTE1) &&
(icrbb.b_imsgtype == IIO_ICRB_IMSGT_BTE ||
icrbb.b_imsgtype == IIO_ICRB_IMSGT_SN1NET))) {
int bte_num;
if (icrbd.d_bteop)
bte_num = icrbc.c_btenum;
else /* b_initiator bit 2 gives BTE number */
bte_num = (icrbb.b_initiator & 0x4) >> 2;
hubiio_crb_free(hubdev_info, i);
bte_crb_error_handler(hubdev_info, bte_num,
i, &ioerror, icrbd.d_bteop);
num_errors++;
continue;
}
}
}
/*
* Function : hub_error_init
* Purpose : initialize the error handling requirements for a given hub.
* Parameters : cnode, the compact nodeid.
* Assumptions : Called only once per hub, either by a local cpu. Or by a
* remote cpu, when this hub is headless.(cpuless)
* Returns : None
*/
void hub_error_init(struct hubdev_info *hubdev_info)
{
if (request_irq(SGI_II_ERROR, (void *)hub_eint_handler, SA_SHIRQ,
"SN_hub_error", (void *)hubdev_info))
printk("hub_error_init: Failed to request_irq for 0x%p\n",
hubdev_info);
return;
}
/*
* Function : ice_error_init
* Purpose : initialize the error handling requirements for a given tio.
* Parameters : cnode, the compact nodeid.
* Assumptions : Called only once per tio.
* Returns : None
*/
void ice_error_init(struct hubdev_info *hubdev_info)
{
if (request_irq
(SGI_TIO_ERROR, (void *)hub_eint_handler, SA_SHIRQ, "SN_TIO_error",
(void *)hubdev_info))
printk("ice_error_init: request_irq() error hubdev_info 0x%p\n",
hubdev_info);
return;
}
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/bootmem.h>
#include <asm/sn/types.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/addrs.h>
#include "pci/pcibus_provider_defs.h"
#include "pci/pcidev.h"
#include "pci/pcibr_provider.h"
#include "xtalk/xwidgetdev.h"
#include <asm/sn/geo.h>
#include "xtalk/hubdev.h"
#include <asm/sn/io.h>
#include <asm/sn/simulator.h>
char master_baseio_wid;
nasid_t master_nasid = INVALID_NASID; /* Partition Master */
struct slab_info {
struct hubdev_info hubdev;
};
struct brick {
moduleid_t id; /* Module ID of this module */
struct slab_info slab_info[MAX_SLABS + 1];
};
int sn_ioif_inited = 0; /* SN I/O infrastructure initialized? */
static int
sn_pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size,
u32 * value)
{
return raw_pci_ops->read(pci_domain_nr(bus), bus->number,
devfn, where, size, value);
}
static int
sn_pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size,
u32 value)
{
return raw_pci_ops->write(pci_domain_nr(bus), bus->number,
devfn, where, size, value);
}
struct pci_ops sn_pci_root_ops = {
.read = sn_pci_read,
.write = sn_pci_write,
};
/*
* Retrieve the DMA Flush List given nasid. This list is needed
* to implement the WAR - Flush DMA data on PIO Reads.
*/
static inline uint64_t
sal_get_widget_dmaflush_list(u64 nasid, u64 widget_num, u64 address)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
(u64) nasid, (u64) widget_num, (u64) address, 0, 0, 0,
0);
return ret_stuff.v0;
}
/*
* Retrieve the hub device info structure for the given nasid.
*/
static inline uint64_t sal_get_hubdev_info(u64 handle, u64 address)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_HUBDEV_INFO,
(u64) handle, (u64) address, 0, 0, 0, 0, 0);
return ret_stuff.v0;
}
/*
* Retrieve the pci bus information given the bus number.
*/
static inline uint64_t sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_PCIBUS_INFO,
(u64) segment, (u64) busnum, (u64) address, 0, 0, 0, 0);
return ret_stuff.v0;
}
/*
* Retrieve the pci device information given the bus and device|function number.
*/
static inline uint64_t
sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
u64 sn_irq_info)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
(u64) segment, (u64) bus_number, (u64) devfn,
(u64) pci_dev,
sn_irq_info, 0, 0);
return ret_stuff.v0;
}
/*
* sn_alloc_pci_sysdata() - This routine allocates a pci controller
* which is expected as the pci_dev and pci_bus sysdata by the Linux
* PCI infrastructure.
*/
static inline struct pci_controller *sn_alloc_pci_sysdata(void)
{
struct pci_controller *pci_sysdata;
pci_sysdata = kmalloc(sizeof(*pci_sysdata), GFP_KERNEL);
if (!pci_sysdata)
BUG();
memset(pci_sysdata, 0, sizeof(*pci_sysdata));
return pci_sysdata;
}
/*
* sn_fixup_ionodes() - This routine initializes the HUB data strcuture for
* each node in the system.
*/
static void sn_fixup_ionodes(void)
{
struct sn_flush_device_list *sn_flush_device_list;
struct hubdev_info *hubdev;
uint64_t status;
uint64_t nasid;
int i, widget;
for (i = 0; i < numionodes; i++) {
hubdev = (struct hubdev_info *)(NODEPDA(i)->pdinfo);
nasid = COMPACT_TO_NASID_NODEID(i);
status = sal_get_hubdev_info(nasid, (uint64_t) __pa(hubdev));
if (status)
continue;
for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++)
hubdev->hdi_xwidget_info[widget].xwi_hubinfo = hubdev;
if (!hubdev->hdi_flush_nasid_list.widget_p)
continue;
hubdev->hdi_flush_nasid_list.widget_p =
kmalloc((HUB_WIDGET_ID_MAX + 1) *
sizeof(struct sn_flush_device_list *), GFP_KERNEL);
memset(hubdev->hdi_flush_nasid_list.widget_p, 0x0,
(HUB_WIDGET_ID_MAX + 1) *
sizeof(struct sn_flush_device_list *));
for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
sn_flush_device_list = kmalloc(DEV_PER_WIDGET *
sizeof(struct
sn_flush_device_list),
GFP_KERNEL);
memset(sn_flush_device_list, 0x0,
DEV_PER_WIDGET *
sizeof(struct sn_flush_device_list));
status =
sal_get_widget_dmaflush_list(nasid, widget,
(uint64_t)
__pa
(sn_flush_device_list));
if (status) {
kfree(sn_flush_device_list);
continue;
}
hubdev->hdi_flush_nasid_list.widget_p[widget] =
sn_flush_device_list;
}
if (!(i & 1))
hub_error_init(hubdev);
else
ice_error_init(hubdev);
}
}
/*
* sn_pci_fixup_slot() - This routine sets up a slot's resources
* consistent with the Linux PCI abstraction layer. Resources acquired
* from our PCI provider include PIO maps to BAR space and interrupt
* objects.
*/
static void sn_pci_fixup_slot(struct pci_dev *dev)
{
int idx;
int segment = 0;
uint64_t size;
struct sn_irq_info *sn_irq_info;
struct pci_dev *host_pci_dev;
int status = 0;
extern void sn_irq_fixup(struct pci_dev *pci_dev,
struct sn_irq_info *sn_irq_info);
SN_PCIDEV_INFO(dev) = kmalloc(sizeof(struct pcidev_info), GFP_KERNEL);
if (SN_PCIDEV_INFO(dev) <= 0)
BUG(); /* Cannot afford to run out of memory */
sn_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
if (sn_irq_info <= 0)
BUG(); /* Cannot afford to run out of memory */
/* Call to retrieve pci device information needed by kernel. */
status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
dev->devfn,
(u64) __pa(SN_PCIDEV_INFO(dev)),
(u64) __pa(sn_irq_info));
if (status)
BUG(); /* Cannot get platform pci device information information */
/* Copy over PIO Mapped Addresses */
for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
unsigned long start, end, addr;
if (!SN_PCIDEV_INFO(dev)->pdi_pio_mapped_addr[idx])
continue;
start = dev->resource[idx].start;
end = dev->resource[idx].end;
size = end - start;
addr = SN_PCIDEV_INFO(dev)->pdi_pio_mapped_addr[idx];
addr = ((addr << 4) >> 4) | __IA64_UNCACHED_OFFSET;
dev->resource[idx].start = addr;
dev->resource[idx].end = addr + size;
if (dev->resource[idx].flags & IORESOURCE_IO)
dev->resource[idx].parent = &ioport_resource;
else
dev->resource[idx].parent = &iomem_resource;
}
/* set up host bus linkages */
host_pci_dev =
pci_find_slot(SN_PCIDEV_INFO(dev)->pdi_slot_host_handle >> 32,
SN_PCIDEV_INFO(dev)->
pdi_slot_host_handle & 0xffffffff);
SN_PCIDEV_INFO(dev)->pdi_host_pcidev_info =
SN_PCIDEV_INFO(host_pci_dev);
SN_PCIDEV_INFO(dev)->pdi_linux_pcidev = dev;
SN_PCIDEV_INFO(dev)->pdi_pcibus_info = SN_PCIBUS_BUSSOFT(dev->bus);
/* Only set up IRQ stuff if this device has a host bus context */
if (SN_PCIDEV_BUSSOFT(dev)) {
SN_PCIDEV_INFO(dev)->pdi_sn_irq_info = sn_irq_info;
dev->irq = SN_PCIDEV_INFO(dev)->pdi_sn_irq_info->irq_irq;
sn_irq_fixup(dev, sn_irq_info);
}
}
/*
* sn_pci_fixup_bus() - This routine sets up a bus's resources
* consistent with the Linux PCI abstraction layer.
*/
static void sn_pci_fixup_bus(int segment, int busnum)
{
int status = 0;
int nasid, cnode;
struct pci_bus *bus;
struct pci_controller *controller;
struct pcibus_bussoft *prom_bussoft_ptr;
struct hubdev_info *hubdev_info;
void *provider_soft;
status =
sal_get_pcibus_info((u64) segment, (u64) busnum,
(u64) ia64_tpa(&prom_bussoft_ptr));
if (status > 0) {
return; /* bus # does not exist */
}
prom_bussoft_ptr = __va(prom_bussoft_ptr);
controller = sn_alloc_pci_sysdata();
if (!controller) {
BUG();
}
bus = pci_scan_bus(busnum, &sn_pci_root_ops, controller);
if (bus == NULL) {
return; /* error, or bus already scanned */
}
/*
* Per-provider fixup. Copies the contents from prom to local
* area and links SN_PCIBUS_BUSSOFT().
*
* Note: Provider is responsible for ensuring that prom_bussoft_ptr
* represents an asic-type that it can handle.
*/
if (prom_bussoft_ptr->bs_asic_type == PCIIO_ASIC_TYPE_PPB) {
return; /* no further fixup necessary */
}
provider_soft = pcibr_bus_fixup(prom_bussoft_ptr);
if (provider_soft == NULL) {
return; /* fixup failed or not applicable */
}
/*
* Generic bus fixup goes here. Don't reference prom_bussoft_ptr
* after this point.
*/
PCI_CONTROLLER(bus) = controller;
SN_PCIBUS_BUSSOFT(bus) = provider_soft;
nasid = NASID_GET(SN_PCIBUS_BUSSOFT(bus)->bs_base);
cnode = NASID_TO_COMPACT_NODEID(nasid);
hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
SN_PCIBUS_BUSSOFT(bus)->bs_xwidget_info =
&(hubdev_info->hdi_xwidget_info[SN_PCIBUS_BUSSOFT(bus)->bs_xid]);
}
/*
* Ugly hack to get PCI setup until we have a proper ACPI namespace.
*/
#define PCI_BUSES_TO_SCAN 256
static int __init sn_pci_init(void)
{
int i = 0;
struct pci_dev *pci_dev = NULL;
extern void sn_init_cpei_timer(void);
#ifdef CONFIG_PROC_FS
extern void register_sn_procfs(void);
#endif
if (!ia64_platform_is("sn2") || IS_RUNNING_ON_SIMULATOR())
return 0;
/*
* This is needed to avoid bounce limit checks in the blk layer
*/
ia64_max_iommu_merge_mask = ~PAGE_MASK;
sn_fixup_ionodes();
sn_irq = kmalloc(sizeof(struct sn_irq_info *) * NR_IRQS, GFP_KERNEL);
if (sn_irq <= 0)
BUG(); /* Canno afford to run out of memory. */
memset(sn_irq, 0, sizeof(struct sn_irq_info *) * NR_IRQS);
sn_init_cpei_timer();
#ifdef CONFIG_PROC_FS
register_sn_procfs();
#endif
for (i = 0; i < PCI_BUSES_TO_SCAN; i++) {
sn_pci_fixup_bus(0, i);
}
/*
* Generic Linux PCI Layer has created the pci_bus and pci_dev
* structures - time for us to add our SN PLatform specific
* information.
*/
while ((pci_dev =
pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pci_dev)) != NULL) {
sn_pci_fixup_slot(pci_dev);
}
sn_ioif_inited = 1; /* sn I/O infrastructure now initialized */
return 0;
}
/*
* hubdev_init_node() - Creates the HUB data structure and link them to it's
* own NODE specific data area.
*/
void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
{
struct hubdev_info *hubdev_info;
if (node >= numnodes) /* Headless/memless IO nodes */
hubdev_info =
(struct hubdev_info *)alloc_bootmem_node(NODE_DATA(0),
sizeof(struct
hubdev_info));
else
hubdev_info =
(struct hubdev_info *)alloc_bootmem_node(NODE_DATA(node),
sizeof(struct
hubdev_info));
npda->pdinfo = (void *)hubdev_info;
}
geoid_t
cnodeid_get_geoid(cnodeid_t cnode)
{
struct hubdev_info *hubdev;
hubdev = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
return hubdev->hdi_geoid;
}
subsys_initcall(sn_pci_init);
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/sn/simulator.h>
#include <asm/sn/pda.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/shub_mmr.h>
/**
* sn_io_addr - convert an in/out port to an i/o address
* @port: port to convert
*
* Legacy in/out instructions are converted to ld/st instructions
* on IA64. This routine will convert a port number into a valid
* SN i/o address. Used by sn_in*() and sn_out*().
*/
void *sn_io_addr(unsigned long port)
{
if (!IS_RUNNING_ON_SIMULATOR()) {
/* On sn2, legacy I/O ports don't point at anything */
if (port < (64 * 1024))
return 0;
return ((void *)(port | __IA64_UNCACHED_OFFSET));
} else {
/* but the simulator uses them... */
unsigned long io_base;
unsigned long addr;
/*
* word align port, but need more than 10 bits
* for accessing registers in bedrock local block
* (so we don't do port&0xfff)
*/
if ((port >= 0x1f0 && port <= 0x1f7) ||
port == 0x3f6 || port == 0x3f7) {
io_base =
(0xc000000fcc000000 |
((unsigned long)get_nasid() << 38));
addr = io_base | ((port >> 2) << 12) | (port & 0xfff);
} else {
addr = __ia64_get_io_port_base() | ((port >> 2) << 2);
}
return (void *)addr;
}
}
EXPORT_SYMBOL(sn_io_addr);
/**
* sn_mmiob - I/O space memory barrier
*
* Acts as a memory mapped I/O barrier for platforms that queue writes to
* I/O space. This ensures that subsequent writes to I/O space arrive after
* all previous writes. For most ia64 platforms, this is a simple
* 'mf.a' instruction. For other platforms, mmiob() may have to read
* a chipset register to ensure ordering.
*
* On SN2, we wait for the PIO_WRITE_STATUS SHub register to clear.
* See PV 871084 for details about the WAR about zero value.
*
*/
void sn_mmiob(void)
{
while ((((volatile unsigned long)(*pda->
pio_write_status_addr)) &
SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) !=
SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK)
udelay(1);
}
EXPORT_SYMBOL(sn_mmiob);
......@@ -5,97 +5,83 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/bootmem.h>
#include <linux/cpumask.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/sn/sgi.h>
#include <asm/sn/hcl.h>
#include <asm/sn/types.h>
#include <asm/sn/pci/pciio.h>
#include <asm/sn/pci/pciio_private.h>
#include <asm/sn/pci/pcibr.h>
#include <asm/sn/pci/pcibr_private.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/io.h>
#include <asm/sn/intr.h>
#include <asm/sn/addrs.h>
#include <asm/sn/driver.h>
#include <asm/sn/arch.h>
#include <asm/sn/pda.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/bitops.h>
#include <asm/sn/sn2/shub_mmr.h>
#include "xtalk/xwidgetdev.h"
#include "pci/pcibus_provider_defs.h"
#include "pci/pcidev.h"
#include "pci/pcibr_provider.h"
#include <asm/sn/shub_mmr.h>
#include <asm/sn/sn_sal.h>
static void force_interrupt(int irq);
extern void pcibr_force_interrupt(pcibr_intr_t intr);
extern int sn_force_interrupt_flag;
struct irq_desc * sn_irq_desc(unsigned int irq);
extern cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
struct sn_intr_list_t {
struct sn_intr_list_t *next;
pcibr_intr_t intr;
};
static void register_intr_pda(struct sn_irq_info *sn_irq_info);
static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
static struct sn_intr_list_t *sn_intr_list[NR_IRQS];
extern int sn_force_interrupt_flag;
extern int sn_ioif_inited;
struct sn_irq_info **sn_irq;
static inline uint64_t sn_intr_alloc(nasid_t local_nasid, int local_widget,
u64 sn_irq_info,
int req_irq, nasid_t req_nasid,
int req_slice)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
(u64) SAL_INTR_ALLOC, (u64) local_nasid,
(u64) local_widget, (u64) sn_irq_info, (u64) req_irq,
(u64) req_nasid, (u64) req_slice);
return ret_stuff.status;
}
static unsigned int
sn_startup_irq(unsigned int irq)
static inline void sn_intr_free(nasid_t local_nasid, int local_widget,
struct sn_irq_info *sn_irq_info)
{
return(0);
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
(u64) SAL_INTR_FREE, (u64) local_nasid,
(u64) local_widget, (u64) sn_irq_info->irq_irq,
(u64) sn_irq_info->irq_cookie, 0, 0);
}
static void
sn_shutdown_irq(unsigned int irq)
static unsigned int sn_startup_irq(unsigned int irq)
{
return 0;
}
static void
sn_disable_irq(unsigned int irq)
static void sn_shutdown_irq(unsigned int irq)
{
}
static void
sn_enable_irq(unsigned int irq)
static void sn_disable_irq(unsigned int irq)
{
}
static inline void sn_move_irq(int irq)
static void sn_enable_irq(unsigned int irq)
{
/* note - we hold desc->lock */
cpumask_t tmp;
irq_desc_t *desc = irq_descp(irq);
if (!cpus_empty(pending_irq_cpumask[irq])) {
cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map);
if (unlikely(!cpus_empty(tmp))) {
desc->handler->set_affinity(irq, pending_irq_cpumask[irq]);
}
cpus_clear(pending_irq_cpumask[irq]);
}
}
static void
sn_ack_irq(unsigned int irq)
static void sn_ack_irq(unsigned int irq)
{
unsigned long event_occurred, mask = 0;
uint64_t event_occurred, mask = 0;
int nasid;
irq = irq & 0xff;
nasid = smp_physical_node_id();
event_occurred = HUB_L( (unsigned long *)GLOBAL_MMR_ADDR(nasid,SH_EVENT_OCCURRED) );
event_occurred =
HUB_L((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED));
if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT);
}
......@@ -108,63 +94,102 @@ sn_ack_irq(unsigned int irq)
if (event_occurred & SH_EVENT_OCCURRED_II_INT1_MASK) {
mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT);
}
HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS), mask );
HUB_S((uint64_t *) GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS),
mask);
__set_bit(irq, (volatile void *)pda->sn_in_service_ivecs);
sn_move_irq(irq);
move_irq(irq);
}
static void
sn_end_irq(unsigned int irq)
static void sn_end_irq(unsigned int irq)
{
int nasid;
int ivec;
unsigned long event_occurred;
irq_desc_t *desc = sn_irq_desc(irq);
unsigned int status = desc->status;
uint64_t event_occurred;
ivec = irq & 0xff;
if (ivec == SGI_UART_VECTOR) {
nasid = smp_physical_node_id();
event_occurred = HUB_L( (unsigned long *)GLOBAL_MMR_ADDR(nasid,SH_EVENT_OCCURRED) );
// If the UART bit is set here, we may have received an interrupt from the
// UART that the driver missed. To make sure, we IPI ourselves to force us
// to look again.
event_occurred = HUB_L((uint64_t *) GLOBAL_MMR_ADDR
(nasid, SH_EVENT_OCCURRED));
/* If the UART bit is set here, we may have received an
* interrupt from the UART that the driver missed. To
* make sure, we IPI ourselves to force us to look again.
*/
if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR, IA64_IPI_DM_INT, 0);
platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR,
IA64_IPI_DM_INT, 0);
}
}
__clear_bit(ivec, (volatile void *)pda->sn_in_service_ivecs);
if (sn_force_interrupt_flag)
if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))
force_interrupt(irq);
force_interrupt(irq);
}
static void
sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
{
#ifdef CONFIG_SMP
int redir = 0;
int cpu;
struct sn_intr_list_t *p = sn_intr_list[irq];
pcibr_intr_t intr;
extern void sn_shub_redirect_intr(pcibr_intr_t intr, unsigned long cpu);
extern void sn_tio_redirect_intr(pcibr_intr_t intr, unsigned long cpu);
if (p == NULL)
return;
intr = p->intr;
if (intr == NULL)
return;
cpu = first_cpu(mask);
sn_shub_redirect_intr(intr, cpu);
irq = irq & 0xff; /* strip off redirect bit, if someone stuck it on. */
(void) set_irq_affinity_info(irq, cpu_physical_id(intr->bi_cpu), redir);
#endif /* CONFIG_SMP */
}
struct sn_irq_info *sn_irq_info = sn_irq[irq];
int cpuid, cpuphys;
nasid_t t_nasid; /* nasid to target */
int t_slice; /* slice to target */
int status;
cpuid = first_cpu(mask);
cpuphys = cpu_physical_id(cpuid);
t_nasid = cpu_physical_id_to_nasid(cpuphys);
t_slice = cpu_physical_id_to_slice(cpuphys);
while (sn_irq_info) {
int local_widget;
struct sn_irq_info *new_sn_irq_info;
uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge;
nasid_t local_nasid = NASID_GET(bridge);
if (!bridge)
break; /* irq is not a bridge interrupt */
new_sn_irq_info = kmalloc(sizeof(*new_sn_irq_info), GFP_KERNEL);
if (!new_sn_irq_info)
break;
if (local_nasid & 1)
local_widget = TIO_SWIN_WIDGETNUM(bridge);
else
local_widget = SWIN_WIDGETNUM(bridge);
/* Free the old PROM sn_irq_info structure */
sn_intr_free(local_nasid, local_widget, sn_irq_info);
/* allocate a new PROM sn_irq_info struct */
status = sn_intr_alloc(local_nasid, local_widget,
__pa(new_sn_irq_info), irq, t_nasid,
t_slice);
if (status == 0) {
/* Update kernels sn_irq_info with new target info */
unregister_intr_pda(sn_irq_info);
sn_irq_info->irq_cpuid = cpuid;
sn_irq_info->irq_nasid = t_nasid;
sn_irq_info->irq_slice = t_slice;
sn_irq_info->irq_xtalkaddr =
new_sn_irq_info->irq_xtalkaddr;
sn_irq_info->irq_cookie = new_sn_irq_info->irq_cookie;
register_intr_pda(sn_irq_info);
if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type)) {
pcibr_change_devices_irq(sn_irq_info);
}
sn_irq_info = sn_irq_info->irq_next;
set_irq_affinity_info((irq & 0xff), cpuphys, 0);
} else {
break; /* snp_affiity failed the intr_alloc */
}
kfree(new_sn_irq_info);
}
}
struct hw_interrupt_type irq_type_sn = {
"SN hub",
......@@ -172,208 +197,243 @@ struct hw_interrupt_type irq_type_sn = {
sn_shutdown_irq,
sn_enable_irq,
sn_disable_irq,
sn_ack_irq,
sn_ack_irq,
sn_end_irq,
sn_set_affinity_irq
};
struct irq_desc *
sn_irq_desc(unsigned int irq)
struct irq_desc *sn_irq_desc(unsigned int irq)
{
irq = SN_IVEC_FROM_IRQ(irq);
return(_irq_desc + irq);
return (_irq_desc + irq);
}
u8
sn_irq_to_vector(unsigned int irq)
u8 sn_irq_to_vector(unsigned int irq)
{
return(irq);
return irq;
}
unsigned int
sn_local_vector_to_irq(u8 vector)
unsigned int sn_local_vector_to_irq(u8 vector)
{
return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
}
void
sn_irq_init (void)
void sn_irq_init(void)
{
int i;
irq_desc_t *base_desc = _irq_desc;
for (i=0; i<NR_IRQS; i++) {
for (i = 0; i < NR_IRQS; i++) {
if (base_desc[i].handler == &no_irq_type) {
base_desc[i].handler = &irq_type_sn;
}
}
}
void
register_pcibr_intr(int irq, pcibr_intr_t intr)
static void register_intr_pda(struct sn_irq_info *sn_irq_info)
{
struct sn_intr_list_t *p = kmalloc(sizeof(struct sn_intr_list_t), GFP_KERNEL);
struct sn_intr_list_t *list;
int cpu = intr->bi_cpu;
int irq = sn_irq_info->irq_irq;
int cpu = sn_irq_info->irq_cpuid;
if (pdacpu(cpu)->sn_last_irq < irq) {
pdacpu(cpu)->sn_last_irq = irq;
}
if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) pdacpu(cpu)->sn_first_irq = irq;
if (!p) panic("Could not allocate memory for sn_intr_list_t\n");
if ((list = sn_intr_list[irq])) {
while (list->next) list = list->next;
list->next = p;
p->next = NULL;
p->intr = intr;
} else {
sn_intr_list[irq] = p;
p->next = NULL;
p->intr = intr;
if (pdacpu(cpu)->sn_first_irq == 0 || pdacpu(cpu)->sn_first_irq > irq) {
pdacpu(cpu)->sn_first_irq = irq;
}
}
void
unregister_pcibr_intr(int irq, pcibr_intr_t intr)
static void unregister_intr_pda(struct sn_irq_info *sn_irq_info)
{
struct sn_intr_list_t **prev, *curr;
int cpu = intr->bi_cpu;
int i;
if (sn_intr_list[irq] == NULL)
return;
prev = &sn_intr_list[irq];
curr = sn_intr_list[irq];
while (curr) {
if (curr->intr == intr) {
*prev = curr->next;
break;
int irq = sn_irq_info->irq_irq;
int cpu = sn_irq_info->irq_cpuid;
struct sn_irq_info *tmp_irq_info;
int i, foundmatch;
if (pdacpu(cpu)->sn_last_irq == irq) {
foundmatch = 0;
for (i = pdacpu(cpu)->sn_last_irq - 1; i; i--) {
tmp_irq_info = sn_irq[i];
while (tmp_irq_info) {
if (tmp_irq_info->irq_cpuid == cpu) {
foundmatch++;
break;
}
tmp_irq_info = tmp_irq_info->irq_next;
}
if (foundmatch) {
break;
}
}
prev = &curr->next;
curr = curr->next;
pdacpu(cpu)->sn_last_irq = i;
}
if (curr)
kfree(curr);
if (!sn_intr_list[irq]) {
if (pdacpu(cpu)->sn_last_irq == irq) {
for (i = pdacpu(cpu)->sn_last_irq - 1; i; i--)
if (sn_intr_list[i])
if (pdacpu(cpu)->sn_first_irq == irq) {
foundmatch = 0;
for (i = pdacpu(cpu)->sn_first_irq + 1; i < NR_IRQS; i++) {
tmp_irq_info = sn_irq[i];
while (tmp_irq_info) {
if (tmp_irq_info->irq_cpuid == cpu) {
foundmatch++;
break;
pdacpu(cpu)->sn_last_irq = i;
}
tmp_irq_info = tmp_irq_info->irq_next;
}
if (foundmatch) {
break;
}
}
pdacpu(cpu)->sn_first_irq = ((i == NR_IRQS) ? 0 : i);
}
}
if (pdacpu(cpu)->sn_first_irq == irq) {
pdacpu(cpu)->sn_first_irq = 0;
for (i = pdacpu(cpu)->sn_first_irq + 1; i < NR_IRQS; i++)
if (sn_intr_list[i])
pdacpu(cpu)->sn_first_irq = i;
}
struct sn_irq_info *sn_irq_alloc(nasid_t local_nasid, int local_widget, int irq,
nasid_t nasid, int slice)
{
struct sn_irq_info *sn_irq_info;
int status;
sn_irq_info = kmalloc(sizeof(*sn_irq_info), GFP_KERNEL);
if (sn_irq_info == NULL)
return NULL;
memset(sn_irq_info, 0x0, sizeof(*sn_irq_info));
status =
sn_intr_alloc(local_nasid, local_widget, __pa(sn_irq_info), irq,
nasid, slice);
if (status) {
kfree(sn_irq_info);
return NULL;
} else {
return sn_irq_info;
}
}
void sn_irq_free(struct sn_irq_info *sn_irq_info)
{
uint64_t bridge = (uint64_t) sn_irq_info->irq_bridge;
nasid_t local_nasid = NASID_GET(bridge);
int local_widget;
if (local_nasid & 1) /* tio check */
local_widget = TIO_SWIN_WIDGETNUM(bridge);
else
local_widget = SWIN_WIDGETNUM(bridge);
sn_intr_free(local_nasid, local_widget, sn_irq_info);
kfree(sn_irq_info);
}
void
force_polled_int(void)
void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
{
int i;
struct sn_intr_list_t *p;
nasid_t nasid = sn_irq_info->irq_nasid;
int slice = sn_irq_info->irq_slice;
int cpu = nasid_slice_to_cpuid(nasid, slice);
for (i=0; i<NR_IRQS;i++) {
p = sn_intr_list[i];
while (p) {
if (p->intr){
pcibr_force_interrupt(p->intr);
}
p = p->next;
}
}
sn_irq_info->irq_cpuid = cpu;
sn_irq_info->irq_pciioinfo = SN_PCIDEV_INFO(pci_dev);
/* link it into the sn_irq[irq] list */
sn_irq_info->irq_next = sn_irq[sn_irq_info->irq_irq];
sn_irq[sn_irq_info->irq_irq] = sn_irq_info;
(void)register_intr_pda(sn_irq_info);
}
static void
force_interrupt(int irq)
static void force_interrupt(int irq)
{
struct sn_intr_list_t *p = sn_intr_list[irq];
struct sn_irq_info *sn_irq_info;
while (p) {
if (p->intr) {
pcibr_force_interrupt(p->intr);
if (!sn_ioif_inited)
return;
sn_irq_info = sn_irq[irq];
while (sn_irq_info) {
if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
(sn_irq_info->irq_bridge != NULL)) {
pcibr_force_interrupt(sn_irq_info);
}
p = p->next;
sn_irq_info = sn_irq_info->irq_next;
}
}
/*
Check for lost interrupts. If the PIC int_status reg. says that
an interrupt has been sent, but not handled, and the interrupt
is not pending in either the cpu irr regs or in the soft irr regs,
and the interrupt is not in service, then the interrupt may have
been lost. Force an interrupt on that pin. It is possible that
the interrupt is in flight, so we may generate a spurious interrupt,
but we should never miss a real lost interrupt.
*/
static void
sn_check_intr(int irq, pcibr_intr_t intr)
* Check for lost interrupts. If the PIC int_status reg. says that
* an interrupt has been sent, but not handled, and the interrupt
* is not pending in either the cpu irr regs or in the soft irr regs,
* and the interrupt is not in service, then the interrupt may have
* been lost. Force an interrupt on that pin. It is possible that
* the interrupt is in flight, so we may generate a spurious interrupt,
* but we should never miss a real lost interrupt.
*/
static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info)
{
unsigned long regval;
uint64_t regval;
int irr_reg_num;
int irr_bit;
unsigned long irr_reg;
uint64_t irr_reg;
struct pcidev_info *pcidev_info;
struct pcibus_info *pcibus_info;
pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
if (!pcidev_info)
return;
pcibus_info =
(struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
pdi_pcibus_info;
regval = pcireg_intr_status_get(pcibus_info);
regval = pcireg_intr_status_get(intr->bi_soft);
irr_reg_num = irq_to_vector(irq) / 64;
irr_bit = irq_to_vector(irq) % 64;
switch (irr_reg_num) {
case 0:
irr_reg = ia64_getreg(_IA64_REG_CR_IRR0);
break;
case 1:
irr_reg = ia64_getreg(_IA64_REG_CR_IRR1);
break;
case 2:
irr_reg = ia64_getreg(_IA64_REG_CR_IRR2);
break;
case 3:
irr_reg = ia64_getreg(_IA64_REG_CR_IRR3);
break;
case 0:
irr_reg = ia64_getreg(_IA64_REG_CR_IRR0);
break;
case 1:
irr_reg = ia64_getreg(_IA64_REG_CR_IRR1);
break;
case 2:
irr_reg = ia64_getreg(_IA64_REG_CR_IRR2);
break;
case 3:
irr_reg = ia64_getreg(_IA64_REG_CR_IRR3);
break;
}
if (!test_bit(irr_bit, &irr_reg) ) {
if (!test_bit(irq, pda->sn_soft_irr) ) {
if (!test_bit(irq, pda->sn_in_service_ivecs) ) {
if (!test_bit(irr_bit, &irr_reg)) {
if (!test_bit(irq, pda->sn_soft_irr)) {
if (!test_bit(irq, pda->sn_in_service_ivecs)) {
regval &= 0xff;
if (intr->bi_ibits & regval & intr->bi_last_intr) {
regval &= ~(intr->bi_ibits & regval);
pcibr_force_interrupt(intr);
if (sn_irq_info->irq_int_bit & regval &
sn_irq_info->irq_last_intr) {
regval &=
~(sn_irq_info->
irq_int_bit & regval);
pcibr_force_interrupt(sn_irq_info);
}
}
}
}
intr->bi_last_intr = regval;
sn_irq_info->irq_last_intr = regval;
}
void
sn_lb_int_war_check(void)
void sn_lb_int_war_check(void)
{
int i;
if (pda->sn_first_irq == 0) return;
for (i=pda->sn_first_irq;
i <= pda->sn_last_irq; i++) {
struct sn_intr_list_t *p = sn_intr_list[i];
if (p == NULL) {
continue;
}
while (p) {
sn_check_intr(i, p->intr);
p = p->next;
if (!sn_ioif_inited || pda->sn_first_irq == 0)
return;
for (i = pda->sn_first_irq; i <= pda->sn_last_irq; i++) {
struct sn_irq_info *sn_irq_info = sn_irq[i];
while (sn_irq_info) {
/* Only call for PCI bridges that are fully initialized. */
if (IS_PCI_BRIDGE_ASIC(sn_irq_info->irq_bridge_type) &&
(sn_irq_info->irq_bridge != NULL)) {
sn_check_intr(i, sn_irq_info);
}
sn_irq_info = sn_irq_info->irq_next;
}
}
}
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <asm/sn/types.h>
#include <asm/sn/module.h>
#include <asm/sn/l1.h>
char brick_types[MAX_BRICK_TYPES + 1] = "cri.xdpn%#=vo^kjbf890123456789...";
/*
* Format a module id for printing.
*
* There are three possible formats:
*
* MODULE_FORMAT_BRIEF is the brief 6-character format, including
* the actual brick-type as recorded in the
* moduleid_t, eg. 002c15 for a C-brick, or
* 101#17 for a PX-brick.
*
* MODULE_FORMAT_LONG is the hwgraph format, eg. rack/002/bay/15
* of rack/101/bay/17 (note that the brick
* type does not appear in this format).
*
* MODULE_FORMAT_LCD is like MODULE_FORMAT_BRIEF, except that it
* ensures that the module id provided appears
* exactly as it would on the LCD display of
* the corresponding brick, eg. still 002c15
* for a C-brick, but 101p17 for a PX-brick.
*
* maule (9/13/04): Removed top-level check for (fmt == MODULE_FORMAT_LCD)
* making MODULE_FORMAT_LCD equivalent to MODULE_FORMAT_BRIEF. It was
* decided that all callers should assume the returned string should be what
* is displayed on the brick L1 LCD.
*/
void
format_module_id(char *buffer, moduleid_t m, int fmt)
{
int rack, position;
unsigned char brickchar;
rack = MODULE_GET_RACK(m);
brickchar = MODULE_GET_BTCHAR(m);
/* Be sure we use the same brick type character as displayed
* on the brick's LCD
*/
switch (brickchar)
{
case L1_BRICKTYPE_GA:
case L1_BRICKTYPE_OPUS_TIO:
brickchar = L1_BRICKTYPE_C;
break;
case L1_BRICKTYPE_PX:
case L1_BRICKTYPE_PE:
case L1_BRICKTYPE_PA:
case L1_BRICKTYPE_SA: /* we can move this to the "I's" later
* if that makes more sense
*/
brickchar = L1_BRICKTYPE_P;
break;
case L1_BRICKTYPE_IX:
case L1_BRICKTYPE_IA:
brickchar = L1_BRICKTYPE_I;
break;
}
position = MODULE_GET_BPOS(m);
if ((fmt == MODULE_FORMAT_BRIEF) || (fmt == MODULE_FORMAT_LCD)) {
/* Brief module number format, eg. 002c15 */
/* Decompress the rack number */
*buffer++ = '0' + RACK_GET_CLASS(rack);
*buffer++ = '0' + RACK_GET_GROUP(rack);
*buffer++ = '0' + RACK_GET_NUM(rack);
/* Add the brick type */
*buffer++ = brickchar;
}
else if (fmt == MODULE_FORMAT_LONG) {
/* Fuller hwgraph format, eg. rack/002/bay/15 */
strcpy(buffer, "rack" "/"); buffer += strlen(buffer);
*buffer++ = '0' + RACK_GET_CLASS(rack);
*buffer++ = '0' + RACK_GET_GROUP(rack);
*buffer++ = '0' + RACK_GET_NUM(rack);
strcpy(buffer, "/" "bay" "/"); buffer += strlen(buffer);
}
/* Add the bay position, using at least two digits */
if (position < 10)
*buffer++ = '0';
sprintf(buffer, "%d", position);
}
......@@ -10,13 +10,10 @@
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/vmalloc.h>
#include <asm/sn/sgi.h>
#include <asm/mca.h>
#include <asm/sal.h>
#include <asm/sn/sn_sal.h>
/*
* Interval for calling SAL to poll for errors that do NOT cause error
* interrupts. SAL will raise a CPEI if any errors are present that
......@@ -24,7 +21,6 @@
*/
#define CPEI_INTERVAL (5*HZ)
struct timer_list sn_cpei_timer;
void sn_init_cpei_timer(void);
......@@ -42,8 +38,7 @@ static u64 *sn_oemdata_size, sn_oemdata_bufsize;
* info for platform errors. buf is appended to sn_oemdata, resizing as
* required.
*/
static int
print_hook(const char *fmt, ...)
static int print_hook(const char *fmt, ...)
{
char buf[400];
int len;
......@@ -55,7 +50,8 @@ print_hook(const char *fmt, ...)
while (*sn_oemdata_size + len + 1 > sn_oemdata_bufsize) {
u8 *newbuf = vmalloc(sn_oemdata_bufsize += 1000);
if (!newbuf) {
printk(KERN_ERR "%s: unable to extend sn_oemdata\n", __FUNCTION__);
printk(KERN_ERR "%s: unable to extend sn_oemdata\n",
__FUNCTION__);
return 0;
}
memcpy(newbuf, *sn_oemdata, *sn_oemdata_size);
......@@ -67,9 +63,7 @@ print_hook(const char *fmt, ...)
return 0;
}
static void
sn_cpei_handler(int irq, void *devid, struct pt_regs *regs)
static void sn_cpei_handler(int irq, void *devid, struct pt_regs *regs)
{
/*
* this function's sole purpose is to call SAL when we receive
......@@ -82,16 +76,13 @@ sn_cpei_handler(int irq, void *devid, struct pt_regs *regs)
ia64_sn_plat_cpei_handler();
}
static void
sn_cpei_timer_handler(unsigned long dummy)
static void sn_cpei_timer_handler(unsigned long dummy)
{
sn_cpei_handler(-1, NULL, NULL);
mod_timer(&sn_cpei_timer, jiffies + CPEI_INTERVAL);
}
void
sn_init_cpei_timer(void)
void sn_init_cpei_timer(void)
{
init_timer(&sn_cpei_timer);
sn_cpei_timer.expires = jiffies + CPEI_INTERVAL;
......@@ -100,9 +91,11 @@ sn_init_cpei_timer(void)
}
static int
sn_platform_plat_specific_err_print(const u8 *sect_header, u8 **oemdata, u64 *oemdata_size)
sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata,
u64 * oemdata_size)
{
sal_log_plat_specific_err_info_t *psei = (sal_log_plat_specific_err_info_t *)sect_header;
sal_log_plat_specific_err_info_t *psei =
(sal_log_plat_specific_err_info_t *) sect_header;
if (!psei->valid.oem_data)
return 0;
down(&sn_oemdata_mutex);
......@@ -117,15 +110,17 @@ sn_platform_plat_specific_err_print(const u8 *sect_header, u8 **oemdata, u64 *oe
/* Callback when userspace salinfo wants to decode oem data via the platform
* kernel and/or prom.
*/
int sn_salinfo_platform_oemdata(const u8 *sect_header, u8 **oemdata, u64 *oemdata_size)
int sn_salinfo_platform_oemdata(const u8 * sect_header, u8 ** oemdata,
u64 * oemdata_size)
{
efi_guid_t guid = *(efi_guid_t *)sect_header;
efi_guid_t guid = *(efi_guid_t *) sect_header;
*oemdata_size = 0;
vfree(*oemdata);
*oemdata = NULL;
if (efi_guidcmp(guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID) == 0 ||
efi_guidcmp(guid, SAL_PLAT_MEM_DEV_ERR_SECT_GUID) == 0)
return sn_platform_plat_specific_err_print(sect_header, oemdata, oemdata_size);
return sn_platform_plat_specific_err_print(sect_header, oemdata,
oemdata_size);
return 0;
}
......
......@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1999,2001-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 1999,2001-2004 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/config.h>
......@@ -34,47 +34,54 @@
#include <asm/machvec.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/sn/sgi.h>
#include <asm/sn/io.h>
#include <asm/sn/arch.h>
#include <asm/sn/addrs.h>
#include <asm/sn/pda.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/sn_private.h>
#include <asm/sn/simulator.h>
#include "shub.h"
#include <asm/sn/leds.h>
#include <asm/sn/bte.h>
#include <asm/sn/shub_mmr.h>
#include <asm/sn/clksupport.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/sn2/shub.h>
#include <asm/sn/geo.h>
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
#include <asm/sn/klconfig.h>
DEFINE_PER_CPU(struct pda_s, pda_percpu);
#define MAX_PHYS_MEMORY (1UL << 49) /* 1 TB */
#define MAX_PHYS_MEMORY (1UL << 49) /* 1 TB */
lboard_t *root_lboard[MAX_COMPACT_NODES];
extern void bte_init_node(nodepda_t *, cnodeid_t);
extern void bte_init_node (nodepda_t *, cnodeid_t);
extern void sn_timer_init(void);
extern unsigned long last_time_offset;
extern void init_platform_hubinfo(nodepda_t **nodepdaindr);
extern void (*ia64_mark_idle)(int);
extern void (*ia64_mark_idle) (int);
extern void snidle(int);
extern unsigned char acpi_kbd_controller_present;
unsigned long sn_rtc_cycles_per_second;
unsigned long sn_rtc_cycles_per_second;
EXPORT_SYMBOL(sn_rtc_cycles_per_second);
partid_t sn_partid = -1;
EXPORT_SYMBOL(sn_partid);
char sn_system_serial_number_string[128];
EXPORT_SYMBOL(sn_system_serial_number_string);
u64 sn_partition_serial_number;
EXPORT_SYMBOL(sn_partition_serial_number);
short physical_node_map[MAX_PHYSNODE_ID];
EXPORT_SYMBOL(physical_node_map);
int numionodes;
int numionodes;
/*
* This is the address of the RRegs in the HSpace of the global
* master. It is used by a hack in serial.c (serial_[in|out],
......@@ -88,11 +95,7 @@ u64 master_node_bedrock_address;
static void sn_init_pdas(char **);
static void scan_for_ionodes(void);
static nodepda_t *nodepdaindr[MAX_COMPACT_NODES];
irqpda_t *irqpdaindr;
static nodepda_t *nodepdaindr[MAX_COMPACT_NODES];
/*
* The format of "screen_info" is strange, and due to early i386-setup
......@@ -100,14 +103,14 @@ irqpda_t *irqpdaindr;
* VGA color display.
*/
struct screen_info sn_screen_info = {
.orig_x = 0,
.orig_y = 0,
.orig_video_mode = 3,
.orig_video_cols = 80,
.orig_video_ega_bx = 3,
.orig_video_lines = 25,
.orig_video_isVGA = 1,
.orig_video_points = 16
.orig_x = 0,
.orig_y = 0,
.orig_video_mode = 3,
.orig_video_cols = 80,
.orig_video_ega_bx = 3,
.orig_video_lines = 25,
.orig_video_isVGA = 1,
.orig_video_points = 16
};
/*
......@@ -119,9 +122,9 @@ struct screen_info sn_screen_info = {
* is sufficient (the IDE driver will autodetect the drive geometry).
*/
#ifdef CONFIG_IA64_GENERIC
extern char drive_info[4*16];
extern char drive_info[4 * 16];
#else
char drive_info[4*16];
char drive_info[4 * 16];
#endif
/*
......@@ -133,8 +136,7 @@ char drive_info[4*16];
* may not be initialized yet.
*/
static int __init
pxm_to_nasid(int pxm)
static int __init pxm_to_nasid(int pxm)
{
int i;
int nid;
......@@ -147,6 +149,7 @@ pxm_to_nasid(int pxm)
}
return -1;
}
/**
* early_sn_setup - early setup routine for SN platforms
*
......@@ -154,16 +157,15 @@ pxm_to_nasid(int pxm)
* for bringup. See start_kernel() in init/main.c.
*/
void __init
early_sn_setup(void)
void __init early_sn_setup(void)
{
void ia64_sal_handler_init (void *entry_point, void *gpval);
efi_system_table_t *efi_systab;
efi_config_table_t *config_tables;
struct ia64_sal_systab *sal_systab;
struct ia64_sal_desc_entry_point *ep;
char *p;
int i;
void ia64_sal_handler_init(void *entry_point, void *gpval);
efi_system_table_t *efi_systab;
efi_config_table_t *config_tables;
struct ia64_sal_systab *sal_systab;
struct ia64_sal_desc_entry_point *ep;
char *p;
int i;
/*
* Parse enough of the SAL tables to locate the SAL entry point. Since, console
......@@ -172,16 +174,20 @@ early_sn_setup(void)
* This code duplicates some of the ACPI table parsing that is in efi.c & sal.c.
* Any changes to those file may have to be made hereas well.
*/
efi_systab = (efi_system_table_t*)__va(ia64_boot_param->efi_systab);
efi_systab = (efi_system_table_t *) __va(ia64_boot_param->efi_systab);
config_tables = __va(efi_systab->tables);
for (i = 0; i < efi_systab->nr_tables; i++) {
if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) {
if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) ==
0) {
sal_systab = __va(config_tables[i].table);
p = (char*)(sal_systab+1);
p = (char *)(sal_systab + 1);
for (i = 0; i < sal_systab->entry_count; i++) {
if (*p == SAL_DESC_ENTRY_POINT) {
ep = (struct ia64_sal_desc_entry_point *) p;
ia64_sal_handler_init(__va(ep->sal_proc), __va(ep->gp));
ep = (struct ia64_sal_desc_entry_point
*)p;
ia64_sal_handler_init(__va
(ep->sal_proc),
__va(ep->gp));
break;
}
p += SAL_DESC_SIZE(*p);
......@@ -189,9 +195,12 @@ early_sn_setup(void)
}
}
if ( IS_RUNNING_ON_SIMULATOR() ) {
master_node_bedrock_address = (u64)REMOTE_HUB(get_nasid(), SH_JUNK_BUS_UART0);
printk(KERN_DEBUG "early_sn_setup: setting master_node_bedrock_address to 0x%lx\n", master_node_bedrock_address);
if (IS_RUNNING_ON_SIMULATOR()) {
master_node_bedrock_address =
(u64) REMOTE_HUB(get_nasid(), SH_JUNK_BUS_UART0);
printk(KERN_DEBUG
"early_sn_setup: setting master_node_bedrock_address to 0x%lx\n",
master_node_bedrock_address);
}
}
......@@ -199,54 +208,31 @@ extern int platform_intr_list[];
extern nasid_t master_nasid;
static int shub_1_1_found __initdata;
/*
* sn_check_for_wars
*
* Set flag for enabling shub specific wars
*/
static inline int __init
is_shub_1_1(int nasid)
static inline int __init is_shub_1_1(int nasid)
{
unsigned long id;
int rev;
int rev;
id = REMOTE_HUB_L(nasid, SH_SHUB_ID);
rev = (id & SH_SHUB_ID_REVISION_MASK) >> SH_SHUB_ID_REVISION_SHFT;
rev = (id & SH_SHUB_ID_REVISION_MASK) >> SH_SHUB_ID_REVISION_SHFT;
return rev <= 2;
}
static void __init
sn_check_for_wars(void)
static void __init sn_check_for_wars(void)
{
int cnode;
int cnode;
for (cnode=0; cnode< numnodes; cnode++)
for (cnode = 0; cnode < numnodes; cnode++)
if (is_shub_1_1(cnodeid_to_nasid(cnode)))
shub_1_1_found = 1;
}
/**
* sn_set_error_handling_features - Tell the SN prom how to handle certain
* error types.
*/
static void __init
sn_set_error_handling_features(void)
{
u64 ret;
u64 sn_ehf_bits[7]; /* see ia64_sn_set_error_handling_features */
memset(sn_ehf_bits, 0, sizeof(sn_ehf_bits));
#define EHF(x) __set_bit(SN_SAL_EHF_ ## x, sn_ehf_bits)
EHF(MCA_SLV_TO_OS_INIT_SLV);
EHF(NO_RZ_TLBC);
// Uncomment once Jesse's code goes in - EHF(NO_RZ_IO_READ);
#undef EHF
ret = ia64_sn_set_error_handling_features(sn_ehf_bits);
if (ret)
printk(KERN_ERR "%s: failed, return code %ld\n", __FUNCTION__, ret);
}
/**
* sn_setup - SN platform setup routine
* @cmdline_p: kernel command line
......@@ -255,15 +241,12 @@ sn_set_error_handling_features(void)
* the RTC frequency (via a SAL call), initializing secondary CPUs, and
* setting up per-node data areas. The console is also initialized here.
*/
void __init
sn_setup(char **cmdline_p)
void __init sn_setup(char **cmdline_p)
{
long status, ticks_per_sec, drift;
int pxm;
int major = sn_sal_rev_major(), minor = sn_sal_rev_minor();
extern nasid_t snia_get_master_baseio_nasid(void);
extern void sn_cpu_init(void);
extern nasid_t snia_get_console_nasid(void);
/*
* If the generic code has enabled vga console support - lets
......@@ -278,17 +261,17 @@ sn_setup(char **cmdline_p)
conswitchp = &dummy_con;
#else
conswitchp = NULL;
#endif /* CONFIG_DUMMY_CONSOLE */
#endif /* CONFIG_DUMMY_CONSOLE */
}
#endif /* def(CONFIG_VT) && def(CONFIG_VGA_CONSOLE) */
#endif /* def(CONFIG_VT) && def(CONFIG_VGA_CONSOLE) */
MAX_DMA_ADDRESS = PAGE_OFFSET + MAX_PHYS_MEMORY;
memset(physical_node_map, -1, sizeof(physical_node_map));
for (pxm=0; pxm<MAX_PXM_DOMAINS; pxm++)
for (pxm = 0; pxm < MAX_PXM_DOMAINS; pxm++)
if (pxm_to_nid_map[pxm] != -1)
physical_node_map[pxm_to_nasid(pxm)] = pxm_to_nid_map[pxm];
physical_node_map[pxm_to_nasid(pxm)] =
pxm_to_nid_map[pxm];
/*
* Old PROMs do not provide an ACPI FADT. Disable legacy keyboard
......@@ -315,31 +298,28 @@ sn_setup(char **cmdline_p)
}
master_nasid = get_nasid();
(void)snia_get_console_nasid();
(void)snia_get_master_baseio_nasid();
status = ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec, &drift);
status =
ia64_sal_freq_base(SAL_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec,
&drift);
if (status != 0 || ticks_per_sec < 100000) {
printk(KERN_WARNING "unable to determine platform RTC clock frequency, guessing.\n");
printk(KERN_WARNING
"unable to determine platform RTC clock frequency, guessing.\n");
/* PROM gives wrong value for clock freq. so guess */
sn_rtc_cycles_per_second = 1000000000000UL/30000UL;
}
else
sn_rtc_cycles_per_second = 1000000000000UL / 30000UL;
} else
sn_rtc_cycles_per_second = ticks_per_sec;
platform_intr_list[ACPI_INTERRUPT_CPEI] = IA64_CPE_VECTOR;
if ( IS_RUNNING_ON_SIMULATOR() )
{
master_node_bedrock_address = (u64)REMOTE_HUB(get_nasid(), SH_JUNK_BUS_UART0);
printk(KERN_DEBUG "sn_setup: setting master_node_bedrock_address to 0x%lx\n",
if (IS_RUNNING_ON_SIMULATOR()) {
master_node_bedrock_address =
(u64) REMOTE_HUB(get_nasid(), SH_JUNK_BUS_UART0);
printk(KERN_DEBUG
"sn_setup: setting master_node_bedrock_address to 0x%lx\n",
master_node_bedrock_address);
}
/* Tell the prom how to handle certain error types */
sn_set_error_handling_features();
/*
* we set the default root device to /dev/hda
* to make simulation easy
......@@ -359,12 +339,6 @@ sn_setup(char **cmdline_p)
*/
sn_cpu_init();
/*
* Setup hubinfo stuff. Has to happen AFTER sn_cpu_init(),
* because it uses the cnode to nasid tables.
*/
init_platform_hubinfo(nodepdaindr);
#ifdef CONFIG_SMP
init_smp_config();
#endif
......@@ -378,32 +352,43 @@ sn_setup(char **cmdline_p)
*
* One time setup for Node Data Area. Called by sn_setup().
*/
void __init
sn_init_pdas(char **cmdline_p)
void __init sn_init_pdas(char **cmdline_p)
{
cnodeid_t cnode;
cnodeid_t cnode;
memset(pda->cnodeid_to_nasid_table, -1, sizeof(pda->cnodeid_to_nasid_table));
for (cnode=0; cnode<numnodes; cnode++)
pda->cnodeid_to_nasid_table[cnode] = pxm_to_nasid(nid_to_pxm_map[cnode]);
memset(pda->cnodeid_to_nasid_table, -1,
sizeof(pda->cnodeid_to_nasid_table));
for (cnode = 0; cnode < numnodes; cnode++)
pda->cnodeid_to_nasid_table[cnode] =
pxm_to_nasid(nid_to_pxm_map[cnode]);
numionodes = numnodes;
scan_for_ionodes();
/*
* Allocate & initalize the nodepda for each node.
*/
for (cnode=0; cnode < numnodes; cnode++) {
nodepdaindr[cnode] = alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t));
/*
* Allocate & initalize the nodepda for each node.
*/
for (cnode = 0; cnode < numnodes; cnode++) {
nodepdaindr[cnode] =
alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t));
memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
}
}
/*
* Now copy the array of nodepda pointers to each nodepda.
* Allocate & initialize nodepda for TIOs. For now, put them on node 0.
*/
for (cnode=0; cnode < numionodes; cnode++)
memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr, sizeof(nodepdaindr));
for (cnode = numnodes; cnode < numionodes; cnode++) {
nodepdaindr[cnode] =
alloc_bootmem_node(NODE_DATA(0), sizeof(nodepda_t));
memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
}
/*
* Now copy the array of nodepda pointers to each nodepda.
*/
for (cnode = 0; cnode < numionodes; cnode++)
memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr,
sizeof(nodepdaindr));
/*
* Set up IO related platform-dependent nodepda fields.
......@@ -411,8 +396,15 @@ sn_init_pdas(char **cmdline_p)
* in nodepda.
*/
for (cnode = 0; cnode < numnodes; cnode++) {
init_platform_nodepda(nodepdaindr[cnode], cnode);
bte_init_node (nodepdaindr[cnode], cnode);
bte_init_node(nodepdaindr[cnode], cnode);
}
/*
* Initialize the per node hubdev. This includes IO Nodes and
* headless/memless nodes.
*/
for (cnode = 0; cnode < numionodes; cnode++) {
hubdev_init_node(nodepdaindr[cnode], cnode);
}
}
......@@ -425,15 +417,14 @@ sn_init_pdas(char **cmdline_p)
* Also sets up a few fields in the nodepda. Also known as
* platform_cpu_init() by the ia64 machvec code.
*/
void __init
sn_cpu_init(void)
void __init sn_cpu_init(void)
{
int cpuid;
int cpuphyid;
int nasid;
int slice;
int cnode;
static int wars_have_been_checked;
int cpuid;
int cpuphyid;
int nasid;
int slice;
int cnode;
static int wars_have_been_checked;
/*
* The boot cpu makes this call again after platform initialization is
......@@ -450,15 +441,17 @@ sn_cpu_init(void)
memset(pda, 0, sizeof(pda));
pda->p_nodepda = nodepdaindr[cnode];
pda->led_address = (typeof(pda->led_address)) (LED0 + (slice<<LED_CPU_SHIFT));
pda->led_address =
(typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
pda->led_state = LED_ALWAYS_SET;
pda->hb_count = HZ/2;
pda->hb_count = HZ / 2;
pda->hb_state = 0;
pda->idle_flag = 0;
if (cpuid != 0){
memcpy(pda->cnodeid_to_nasid_table, pdacpu(0)->cnodeid_to_nasid_table,
sizeof(pda->cnodeid_to_nasid_table));
if (cpuid != 0) {
memcpy(pda->cnodeid_to_nasid_table,
pdacpu(0)->cnodeid_to_nasid_table,
sizeof(pda->cnodeid_to_nasid_table));
}
/*
......@@ -472,30 +465,27 @@ sn_cpu_init(void)
wars_have_been_checked = 1;
}
pda->shub_1_1_found = shub_1_1_found;
/*
* We must use different memory allocators for first cpu (bootmem
* allocator) than for the other cpus (regular allocator).
*/
if (cpuid == 0)
irqpdaindr = alloc_bootmem_node(NODE_DATA(cpuid_to_cnodeid(cpuid)),sizeof(irqpda_t));
memset(irqpdaindr, 0, sizeof(irqpda_t));
irqpdaindr->irq_flags[SGI_PCIBR_ERROR] = SN2_IRQ_SHARED;
irqpdaindr->irq_flags[SGI_PCIBR_ERROR] |= SN2_IRQ_RESERVED;
irqpdaindr->irq_flags[SGI_II_ERROR] = SN2_IRQ_SHARED;
irqpdaindr->irq_flags[SGI_II_ERROR] |= SN2_IRQ_RESERVED;
pda->pio_write_status_addr = (volatile unsigned long *)
LOCAL_MMR_ADDR((slice < 2 ? SH_PIO_WRITE_STATUS_0 : SH_PIO_WRITE_STATUS_1 ) );
LOCAL_MMR_ADDR((slice <
2 ? SH_PIO_WRITE_STATUS_0 : SH_PIO_WRITE_STATUS_1));
pda->mem_write_status_addr = (volatile u64 *)
LOCAL_MMR_ADDR((slice < 2 ? SH_MEMORY_WRITE_STATUS_0 : SH_MEMORY_WRITE_STATUS_1 ) );
LOCAL_MMR_ADDR((slice <
2 ? SH_MEMORY_WRITE_STATUS_0 :
SH_MEMORY_WRITE_STATUS_1));
if (local_node_data->active_cpu_count++ == 0) {
int buddy_nasid;
buddy_nasid = cnodeid_to_nasid(numa_node_id() == numnodes-1 ? 0 : numa_node_id()+ 1);
pda->pio_shub_war_cam_addr = (volatile unsigned long*)GLOBAL_MMR_ADDR(nasid, SH_PI_CAM_CONTROL);
int buddy_nasid;
buddy_nasid =
cnodeid_to_nasid(numa_node_id() ==
numnodes - 1 ? 0 : numa_node_id() + 1);
pda->pio_shub_war_cam_addr =
(volatile unsigned long *)GLOBAL_MMR_ADDR(nasid,
SH_PI_CAM_CONTROL);
}
}
......@@ -504,48 +494,53 @@ sn_cpu_init(void)
* physical_node_map and the pda and increment numionodes.
*/
static void __init
scan_for_ionodes(void)
static void __init scan_for_ionodes(void)
{
int nasid = 0;
lboard_t *brd;
/* Setup ionodes with memory */
for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid +=2) {
for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) {
u64 klgraph_header;
cnodeid_t cnodeid;
if (physical_node_map[nasid] == -1)
if (physical_node_map[nasid] == -1)
continue;
klgraph_header = cnodeid = -1;
klgraph_header = ia64_sn_get_klconfig_addr(nasid);
if (klgraph_header <= 0) {
if ( IS_RUNNING_ON_SIMULATOR() )
if (IS_RUNNING_ON_SIMULATOR())
continue;
BUG(); /* All nodes must have klconfig tables! */
BUG(); /* All nodes must have klconfig tables! */
}
cnodeid = nasid_to_cnodeid(nasid);
root_lboard[cnodeid] = (lboard_t *)
NODE_OFFSET_TO_LBOARD( (nasid),
((kl_config_hdr_t *)(klgraph_header))->
ch_board_info);
NODE_OFFSET_TO_LBOARD((nasid),
((kl_config_hdr_t
*) (klgraph_header))->
ch_board_info);
}
/* Scan headless/memless IO Nodes. */
for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid +=2) {
for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) {
/* if there's no nasid, don't try to read the klconfig on the node */
if (physical_node_map[nasid] == -1) continue;
brd = find_lboard_any((lboard_t *)root_lboard[nasid_to_cnodeid(nasid)], KLTYPE_SNIA);
if (physical_node_map[nasid] == -1)
continue;
brd = find_lboard_any((lboard_t *)
root_lboard[nasid_to_cnodeid(nasid)],
KLTYPE_SNIA);
if (brd) {
brd = KLCF_NEXT_ANY(brd); /* Skip this node's lboard */
brd = KLCF_NEXT_ANY(brd); /* Skip this node's lboard */
if (!brd)
continue;
}
brd = find_lboard_any(brd, KLTYPE_SNIA);
while (brd) {
pda->cnodeid_to_nasid_table[numionodes] = brd->brd_nasid;
pda->cnodeid_to_nasid_table[numionodes] =
brd->brd_nasid;
physical_node_map[brd->brd_nasid] = numionodes;
root_lboard[numionodes] = brd;
numionodes++;
......@@ -556,4 +551,27 @@ scan_for_ionodes(void)
brd = find_lboard_any(brd, KLTYPE_SNIA);
}
}
/* Scan for TIO nodes. */
for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) {
/* if there's no nasid, don't try to read the klconfig on the node */
if (physical_node_map[nasid] == -1)
continue;
brd = find_lboard_any((lboard_t *)
root_lboard[nasid_to_cnodeid(nasid)],
KLTYPE_TIO);
while (brd) {
pda->cnodeid_to_nasid_table[numionodes] =
brd->brd_nasid;
physical_node_map[brd->brd_nasid] = numionodes;
root_lboard[numionodes] = brd;
numionodes++;
brd = KLCF_NEXT_ANY(brd);
if (!brd)
break;
brd = find_lboard_any(brd, KLTYPE_TIO);
}
}
}
......@@ -9,7 +9,7 @@
* we wrap the inlines from asm/ia64/sn/sn2/io.h here.
*/
#include <asm/sn/sn2/io.h>
#include <asm/sn/io.h>
#ifdef CONFIG_IA64_GENERIC
......@@ -28,88 +28,74 @@
#undef __sn_readl_relaxed
#undef __sn_readq_relaxed
unsigned int
__sn_inb (unsigned long port)
unsigned int __sn_inb(unsigned long port)
{
return ___sn_inb(port);
}
unsigned int
__sn_inw (unsigned long port)
unsigned int __sn_inw(unsigned long port)
{
return ___sn_inw(port);
}
unsigned int
__sn_inl (unsigned long port)
unsigned int __sn_inl(unsigned long port)
{
return ___sn_inl(port);
}
void
__sn_outb (unsigned char val, unsigned long port)
void __sn_outb(unsigned char val, unsigned long port)
{
___sn_outb(val, port);
}
void
__sn_outw (unsigned short val, unsigned long port)
void __sn_outw(unsigned short val, unsigned long port)
{
___sn_outw(val, port);
}
void
__sn_outl (unsigned int val, unsigned long port)
void __sn_outl(unsigned int val, unsigned long port)
{
___sn_outl(val, port);
}
unsigned char
__sn_readb (void *addr)
unsigned char __sn_readb(void *addr)
{
return ___sn_readb (addr);
return ___sn_readb(addr);
}
unsigned short
__sn_readw (void *addr)
unsigned short __sn_readw(void *addr)
{
return ___sn_readw (addr);
return ___sn_readw(addr);
}
unsigned int
__sn_readl (void *addr)
unsigned int __sn_readl(void *addr)
{
return ___sn_readl (addr);
return ___sn_readl(addr);
}
unsigned long
__sn_readq (void *addr)
unsigned long __sn_readq(void *addr)
{
return ___sn_readq (addr);
return ___sn_readq(addr);
}
unsigned char
__sn_readb_relaxed (void *addr)
unsigned char __sn_readb_relaxed(void *addr)
{
return ___sn_readb_relaxed (addr);
return ___sn_readb_relaxed(addr);
}
unsigned short
__sn_readw_relaxed (void *addr)
unsigned short __sn_readw_relaxed(void *addr)
{
return ___sn_readw_relaxed (addr);
return ___sn_readw_relaxed(addr);
}
unsigned int
__sn_readl_relaxed (void *addr)
unsigned int __sn_readl_relaxed(void *addr)
{
return ___sn_readl_relaxed (addr);
return ___sn_readl_relaxed(addr);
}
unsigned long
__sn_readq_relaxed (void *addr)
unsigned long __sn_readq_relaxed(void *addr)
{
return ___sn_readq_relaxed (addr);
return ___sn_readq_relaxed(addr);
}
#endif
......@@ -16,7 +16,7 @@
#include <asm/io.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/sn2/addrs.h>
#include <asm/sn/addrs.h>
MODULE_DESCRIPTION("PROM version reporting for /proc");
MODULE_AUTHOR("Chad Talbott");
......@@ -55,30 +55,29 @@ MODULE_LICENSE("GPL");
((unsigned) ((q) >> FIT_TYPE_SHIFT) & FIT_TYPE_MASK)
struct fit_type_map_t {
unsigned char type;
const char *name;
unsigned char type;
const char *name;
};
static const struct fit_type_map_t fit_entry_types[] = {
{ FIT_ENTRY_FIT_HEADER, "FIT Header" },
{ FIT_ENTRY_PAL_A_GEN, "Generic PAL_A" },
{ FIT_ENTRY_PAL_A_PROC, "Processor-specific PAL_A" },
{ FIT_ENTRY_PAL_A, "PAL_A" },
{ FIT_ENTRY_PAL_B, "PAL_B" },
{ FIT_ENTRY_SAL_A, "SAL_A" },
{ FIT_ENTRY_SAL_B, "SAL_B" },
{ FIT_ENTRY_SALRUNTIME, "SAL runtime" },
{ FIT_ENTRY_EFI, "EFI" },
{ FIT_ENTRY_VMLINUX, "Embedded Linux" },
{ FIT_ENTRY_FPSWA, "Embedded FPSWA" },
{ FIT_ENTRY_UNUSED, "Unused" },
{ 0xff, "Error" },
{FIT_ENTRY_FIT_HEADER, "FIT Header"},
{FIT_ENTRY_PAL_A_GEN, "Generic PAL_A"},
{FIT_ENTRY_PAL_A_PROC, "Processor-specific PAL_A"},
{FIT_ENTRY_PAL_A, "PAL_A"},
{FIT_ENTRY_PAL_B, "PAL_B"},
{FIT_ENTRY_SAL_A, "SAL_A"},
{FIT_ENTRY_SAL_B, "SAL_B"},
{FIT_ENTRY_SALRUNTIME, "SAL runtime"},
{FIT_ENTRY_EFI, "EFI"},
{FIT_ENTRY_VMLINUX, "Embedded Linux"},
{FIT_ENTRY_FPSWA, "Embedded FPSWA"},
{FIT_ENTRY_UNUSED, "Unused"},
{0xff, "Error"},
};
static const char *
fit_type_name(unsigned char type)
static const char *fit_type_name(unsigned char type)
{
struct fit_type_map_t const*mapp;
struct fit_type_map_t const *mapp;
for (mapp = fit_entry_types; mapp->type != 0xff; mapp++)
if (type == mapp->type)
......@@ -115,7 +114,7 @@ fit_type_name(unsigned char type)
#define FIT_SIGNATURE 0x2020205f5449465ful
/* Sub-regions determined by bits in Node Offset */
#define LB_PROM_SPACE 0x0000000700000000ul /* Local LB PROM */
#define LB_PROM_SPACE 0x0000000700000000ul /* Local LB PROM */
/* Offset of PROM banner pointers in SAL A and SAL B */
#define SAL_A_BANNER_OFFSET (1 * 16)
......@@ -125,11 +124,10 @@ fit_type_name(unsigned char type)
#define FW_BASE 0x00000000FF000000
#define FW_TOP 0x0000000100000000
static unsigned long
convert_fw_addr(nasid_t nasid, unsigned long addr)
static unsigned long convert_fw_addr(nasid_t nasid, unsigned long addr)
{
/* snag just the node-relative offset */
addr &= ~0ul >> (63-35);
addr &= ~0ul >> (63 - 35);
/* the pointer to SAL A is relative to IA-64 compatibility
* space. However, the PROM is mapped at a different offset
* in MMR space (both local and global)
......@@ -138,10 +136,9 @@ convert_fw_addr(nasid_t nasid, unsigned long addr)
return GLOBAL_MMR_ADDR(nasid, addr);
}
static int
valid_fw_addr(unsigned long addr)
static int valid_fw_addr(unsigned long addr)
{
addr &= ~(1ul << 63); /* Clear cached/uncached bit */
addr &= ~(1ul << 63); /* Clear cached/uncached bit */
return (addr >= FW_BASE && addr < FW_TOP);
}
......@@ -216,8 +213,7 @@ get_fit_entry(unsigned long nasid, int index, unsigned long *fentry,
/*
* These two routines display the FIT table for each node.
*/
static int
dump_fit_entry(char *page, unsigned long *fentry)
static int dump_fit_entry(char *page, unsigned long *fentry)
{
unsigned type;
......@@ -289,11 +285,14 @@ static int
proc_calc_metrics(char *page, char **start, off_t off, int count, int *eof,
int len)
{
if (len <= off+count) *eof = 1;
if (len <= off + count)
*eof = 1;
*start = page + off;
len -= off;
if (len>count) len = count;
if (len<0) len = 0;
if (len > count)
len = count;
if (len < 0)
len = 0;
return len;
}
......@@ -334,8 +333,7 @@ static struct proc_dir_entry *sgi_prominfo_entry;
#define NODE_NAME_LEN 11
int __init
prominfo_init(void)
int __init prominfo_init(void)
{
struct proc_dir_entry **entp;
struct proc_dir_entry *p;
......@@ -372,16 +370,14 @@ prominfo_init(void)
return 0;
}
void __exit
prominfo_exit(void)
void __exit prominfo_exit(void)
{
struct proc_dir_entry **entp;
unsigned cnodeid;
char name[NODE_NAME_LEN];
for (cnodeid = 0, entp = proc_entries;
cnodeid < numnodes;
cnodeid++, entp++) {
cnodeid < numnodes; cnodeid++, entp++) {
remove_proc_entry("fit", *entp);
remove_proc_entry("version", *entp);
sprintf(name, "node%d", cnodeid);
......
......@@ -3,10 +3,10 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#include <asm/sn/sn2/shub_mmr.h>
#include <asm/sn/shub_mmr.h>
#define ZEROVAL 0x3f // "zero" value for outstanding PIO requests
#define DEADLOCKBIT SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK_SHFT
......
......@@ -21,7 +21,6 @@
#include <asm/processor.h>
#include <asm/irq.h>
#include <asm/sn/sgi.h>
#include <asm/sal.h>
#include <asm/system.h>
#include <asm/delay.h>
......@@ -34,41 +33,36 @@
#include <asm/current.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/addrs.h>
#include <asm/sn/sn2/shub_mmr.h>
#include <asm/sn/shub_mmr.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/rw_mmr.h>
void sn2_ptc_deadlock_recovery(unsigned long data0, unsigned long data1);
static spinlock_t sn2_global_ptc_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED;
static unsigned long sn2_ptc_deadlock_count;
static inline unsigned long
wait_piowc(void)
static inline unsigned long wait_piowc(void)
{
volatile unsigned long *piows;
unsigned long ws;
unsigned long ws;
piows = pda->pio_write_status_addr;
do {
ia64_mfa();
} while (((ws = *piows) & SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) !=
SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK);
} while (((ws =
*piows) & SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK) !=
SH_PIO_WRITE_STATUS_0_PENDING_WRITE_COUNT_MASK);
return ws;
}
void
sn_tlb_migrate_finish(struct mm_struct *mm)
void sn_tlb_migrate_finish(struct mm_struct *mm)
{
if (mm == current->mm)
flush_tlb_mm(mm);
}
/**
* sn2_global_tlb_purge - globally purge translation cache of virtual address range
* @start: start of virtual address range
......@@ -90,13 +84,14 @@ sn_tlb_migrate_finish(struct mm_struct *mm)
*/
void
sn2_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbits)
sn2_global_tlb_purge(unsigned long start, unsigned long end,
unsigned long nbits)
{
int i, cnode, mynasid, cpu, lcpu=0, nasid, flushed=0;
volatile unsigned long *ptc0, *ptc1;
unsigned long flags=0, data0, data1;
struct mm_struct *mm=current->active_mm;
short nasids[NR_NODES], nix;
int i, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0;
volatile unsigned long *ptc0, *ptc1;
unsigned long flags = 0, data0, data1;
struct mm_struct *mm = current->active_mm;
short nasids[NR_NODES], nix;
DECLARE_BITMAP(nodes_flushed, NR_NODES);
bitmap_zero(nodes_flushed, NR_NODES);
......@@ -114,7 +109,7 @@ sn2_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbit
if (likely(i == 1 && lcpu == smp_processor_id())) {
do {
ia64_ptcl(start, nbits<<2);
ia64_ptcl(start, nbits << 2);
start += (1UL << nbits);
} while (start < end);
ia64_srlz_i();
......@@ -128,42 +123,42 @@ sn2_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbit
return;
}
nix = 0;
for (cnode=find_first_bit(&nodes_flushed, NR_NODES); cnode < NR_NODES;
cnode=find_next_bit(&nodes_flushed, NR_NODES, ++cnode))
for (cnode = find_first_bit(&nodes_flushed, NR_NODES); cnode < NR_NODES;
cnode = find_next_bit(&nodes_flushed, NR_NODES, ++cnode))
nasids[nix++] = cnodeid_to_nasid(cnode);
data0 = (1UL << SH_PTC_0_A_SHFT) |
(nbits << SH_PTC_0_PS_SHFT) |
((ia64_get_rr(start) >> 8) << SH_PTC_0_RID_SHFT) |
(1UL << SH_PTC_0_START_SHFT);
data0 = (1UL<<SH_PTC_0_A_SHFT) |
(nbits<<SH_PTC_0_PS_SHFT) |
((ia64_get_rr(start)>>8)<<SH_PTC_0_RID_SHFT) |
(1UL<<SH_PTC_0_START_SHFT);
ptc0 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_0);
ptc1 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_1);
ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_0);
ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_1);
mynasid = smp_physical_node_id();
spin_lock_irqsave(&sn2_global_ptc_lock, flags);
do {
data1 = start | (1UL<<SH_PTC_1_START_SHFT);
for (i=0; i<nix; i++) {
data1 = start | (1UL << SH_PTC_1_START_SHFT);
for (i = 0; i < nix; i++) {
nasid = nasids[i];
if (likely(nasid == mynasid)) {
ia64_ptcga(start, nbits<<2);
ia64_ptcga(start, nbits << 2);
ia64_srlz_i();
} else {
ptc0 = CHANGE_NASID(nasid, ptc0);
ptc1 = CHANGE_NASID(nasid, ptc1);
pio_atomic_phys_write_mmrs(ptc0, data0, ptc1, data1);
pio_atomic_phys_write_mmrs(ptc0, data0, ptc1,
data1);
flushed = 1;
}
}
if (flushed && (wait_piowc() & SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK_MASK)) {
if (flushed
&& (wait_piowc() &
SH_PIO_WRITE_STATUS_0_WRITE_DEADLOCK_MASK)) {
sn2_ptc_deadlock_recovery(data0, data1);
}
......@@ -183,18 +178,18 @@ sn2_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbit
* TLB flush transaction. The recovery sequence is somewhat tricky & is
* coded in assembly language.
*/
void
sn2_ptc_deadlock_recovery(unsigned long data0, unsigned long data1)
void sn2_ptc_deadlock_recovery(unsigned long data0, unsigned long data1)
{
extern void sn2_ptc_deadlock_recovery_core(long*, long, long*, long, long*);
int cnode, mycnode, nasid;
long *ptc0, *ptc1, *piows;
extern void sn2_ptc_deadlock_recovery_core(long *, long, long *, long,
long *);
int cnode, mycnode, nasid;
long *ptc0, *ptc1, *piows;
sn2_ptc_deadlock_count++;
ptc0 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_0);
ptc1 = (long*)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_1);
piows = (long*)pda->pio_write_status_addr;
ptc0 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_0);
ptc1 = (long *)GLOBAL_MMR_PHYS_ADDR(0, SH_PTC_1);
piows = (long *)pda->pio_write_status_addr;
mycnode = numa_node_id();
......@@ -224,34 +219,34 @@ sn2_ptc_deadlock_recovery(unsigned long data0, unsigned long data1)
* %IA64_IPI_DM_NMI - pend an NMI
* %IA64_IPI_DM_INIT - pend an INIT interrupt
*/
void
sn_send_IPI_phys(long physid, int vector, int delivery_mode)
void sn_send_IPI_phys(long physid, int vector, int delivery_mode)
{
long nasid, slice, val;
unsigned long flags=0;
volatile long *p;
long nasid, slice, val;
unsigned long flags = 0;
volatile long *p;
nasid = cpu_physical_id_to_nasid(physid);
slice = cpu_physical_id_to_slice(physid);
slice = cpu_physical_id_to_slice(physid);
p = (long*)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT);
val = (1UL<<SH_IPI_INT_SEND_SHFT) |
(physid<<SH_IPI_INT_PID_SHFT) |
((long)delivery_mode<<SH_IPI_INT_TYPE_SHFT) |
((long)vector<<SH_IPI_INT_IDX_SHFT) |
(0x000feeUL<<SH_IPI_INT_BASE_SHFT);
p = (long *)GLOBAL_MMR_PHYS_ADDR(nasid, SH_IPI_INT);
val = (1UL << SH_IPI_INT_SEND_SHFT) |
(physid << SH_IPI_INT_PID_SHFT) |
((long)delivery_mode << SH_IPI_INT_TYPE_SHFT) |
((long)vector << SH_IPI_INT_IDX_SHFT) |
(0x000feeUL << SH_IPI_INT_BASE_SHFT);
mb();
if (enable_shub_wars_1_1() ) {
if (enable_shub_wars_1_1()) {
spin_lock_irqsave(&sn2_global_ptc_lock, flags);
}
pio_phys_write_mmr(p, val);
if (enable_shub_wars_1_1() ) {
if (enable_shub_wars_1_1()) {
wait_piowc();
spin_unlock_irqrestore(&sn2_global_ptc_lock, flags);
}
}
EXPORT_SYMBOL(sn_send_IPI_phys);
/**
......@@ -270,10 +265,9 @@ EXPORT_SYMBOL(sn_send_IPI_phys);
* %IA64_IPI_DM_NMI - pend an NMI
* %IA64_IPI_DM_INIT - pend an INIT interrupt
*/
void
sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
void sn2_send_IPI(int cpuid, int vector, int delivery_mode, int redirect)
{
long physid;
long physid;
physid = cpu_physical_id(cpuid);
......
......@@ -38,6 +38,8 @@
#include <asm/uaccess.h>
#include <asm-ia64/sal.h>
#include <asm-ia64/sn/sn_sal.h>
#include <asm/sn/module.h>
#include <asm/sn/geo.h>
#include <asm-ia64/sn/sn2/sn_hwperf.h>
static void *sn_hwperf_salheap = NULL;
......@@ -45,6 +47,7 @@ static int sn_hwperf_obj_cnt = 0;
static nasid_t sn_hwperf_master_nasid = INVALID_NASID;
static int sn_hwperf_init(void);
static DECLARE_MUTEX(sn_hwperf_init_mutex);
extern int numionodes;
static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret)
{
......@@ -80,21 +83,26 @@ static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret)
static int sn_hwperf_geoid_to_cnode(char *location)
{
int cnode;
int mod, slot, slab;
int cmod, cslot, cslab;
geoid_t geoid;
moduleid_t module_id;
char type;
int rack, slot, slab;
int this_rack, this_slot, this_slab;
if (sscanf(location, "%03dc%02d#%d", &mod, &slot, &slab) != 3)
if (sscanf(location, "%03d%c%02d#%d", &rack, &type, &slot, &slab) != 4)
return -1;
for (cnode = 0; cnode < numnodes; cnode++) {
/* XXX: need a better way than this ... */
if (sscanf(NODEPDA(cnode)->hwg_node_name,
"hw/module/%03dc%02d/slab/%d", &cmod, &cslot, &cslab) == 3) {
if (mod == cmod && slot == cslot && slab == cslab)
for (cnode = 0; cnode < numionodes; cnode++) {
geoid = cnodeid_get_geoid(cnode);
module_id = geo_module(geoid);
this_rack = MODULE_GET_RACK(module_id);
this_slot = MODULE_GET_BPOS(module_id);
this_slab = geo_slab(geoid);
if (rack == this_rack && slot == this_slot && slab == this_slab)
break;
}
}
return cnode < numnodes ? cnode : -1;
return cnode < numionodes ? cnode : -1;
}
static int sn_hwperf_obj_to_cnode(struct sn_hwperf_object_info * obj)
......@@ -202,7 +210,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
seq_putc(s, '\n');
else {
seq_printf(s, ", nasid 0x%x", cnodeid_to_nasid(ordinal));
for (i=0; i < numnodes; i++) {
for (i=0; i < numionodes; i++) {
seq_printf(s, i ? ":%d" : ", dist %d",
node_distance(ordinal, i));
}
......@@ -473,7 +481,7 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
case SN_HWPERF_GET_NODE_NASID:
if (a.sz != sizeof(u64) ||
(node = a.arg) < 0 || node >= numnodes) {
(node = a.arg) < 0 || node >= numionodes) {
r = -EINVAL;
goto error;
}
......
......@@ -11,7 +11,6 @@
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/sn/sgi.h>
#include <asm/sn/sn_sal.h>
static int partition_id_show(struct seq_file *s, void *p)
......
......@@ -15,16 +15,16 @@
#include <asm/hw_irq.h>
#include <asm/system.h>
#include "shub.h"
#include <asm/sn/leds.h>
#include <asm/sn/shub_mmr.h>
#include <asm/sn/clksupport.h>
extern unsigned long sn_rtc_cycles_per_second;
static struct time_interpolator sn2_interpolator;
void __init
sn_timer_init(void)
void __init sn_timer_init(void)
{
sn2_interpolator.frequency = sn_rtc_cycles_per_second;
sn2_interpolator.drift = -1; /* unknown */
......
......@@ -34,26 +34,27 @@
#include <linux/interrupt.h>
#include <asm/sn/pda.h>
#include "shub.h"
#include <asm/sn/leds.h>
extern void sn_lb_int_war_check(void);
extern irqreturn_t timer_interrupt (int irq, void *dev_id, struct pt_regs *regs);
extern irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs);
#define SN_LB_INT_WAR_INTERVAL 100
void
sn_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
void sn_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
/* LED blinking */
if (!pda->hb_count--) {
pda->hb_count = HZ/2;
set_led_bits(pda->hb_state ^= LED_CPU_HEARTBEAT, LED_CPU_HEARTBEAT);
pda->hb_count = HZ / 2;
set_led_bits(pda->hb_state ^=
LED_CPU_HEARTBEAT, LED_CPU_HEARTBEAT);
}
if (enable_shub_wars_1_1()) {
/* Bugfix code for SHUB 1.1 */
if (pda->pio_shub_war_cam_addr)
*pda->pio_shub_war_cam_addr = 0x8000000000000010UL;
if (pda->pio_shub_war_cam_addr)
*pda->pio_shub_war_cam_addr = 0x8000000000000010UL;
}
if (pda->sn_lb_int_war_ticks == 0)
sn_lb_int_war_check();
......
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn pci general routines.
obj-y := pci_dma.o pcibr/
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000,2002-2004 Silicon Graphics, Inc. All rights reserved.
*
* Routines for PCI DMA mapping. See Documentation/DMA-mapping.txt for
* a description of how these routines should be used.
*/
#include <linux/module.h>
#include <asm/sn/sn_sal.h>
#include "pci/pcibus_provider_defs.h"
#include "pci/pcidev.h"
#include "pci/pcibr_provider.h"
void sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
int direction);
/**
* sn_pci_alloc_consistent - allocate memory for coherent DMA
* @hwdev: device to allocate for
* @size: size of the region
* @dma_handle: DMA (bus) address
*
* pci_alloc_consistent() returns a pointer to a memory region suitable for
* coherent DMA traffic to/from a PCI device. On SN platforms, this means
* that @dma_handle will have the %PCIIO_DMA_CMD flag set.
*
* This interface is usually used for "command" streams (e.g. the command
* queue for a SCSI controller). See Documentation/DMA-mapping.txt for
* more information.
*
* Also known as platform_pci_alloc_consistent() by the IA64 machvec code.
*/
void *sn_pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
dma_addr_t * dma_handle)
{
void *cpuaddr;
unsigned long phys_addr;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(hwdev);
if (bussoft == NULL) {
return NULL;
}
if (! IS_PCI_BRIDGE_ASIC(bussoft->bs_asic_type)) {
return NULL; /* unsupported asic type */
}
/*
* Allocate the memory.
* FIXME: We should be doing alloc_pages_node for the node closest
* to the PCI device.
*/
if (!(cpuaddr = (void *)__get_free_pages(GFP_ATOMIC, get_order(size))))
return NULL;
memset(cpuaddr, 0x0, size);
/* physical addr. of the memory we just got */
phys_addr = __pa(cpuaddr);
/*
* 64 bit address translations should never fail.
* 32 bit translations can fail if there are insufficient mapping
* resources.
*/
*dma_handle = pcibr_dma_map(pcidev_info, phys_addr, size, SN_PCIDMA_CONSISTENT);
if (!*dma_handle) {
printk(KERN_ERR
"sn_pci_alloc_consistent(): failed *dma_handle = 0x%lx hwdev->dev.coherent_dma_mask = 0x%lx \n",
*dma_handle, hwdev->dev.coherent_dma_mask);
free_pages((unsigned long)cpuaddr, get_order(size));
return NULL;
}
return cpuaddr;
}
/**
* sn_pci_free_consistent - free memory associated with coherent DMAable region
* @hwdev: device to free for
* @size: size to free
* @vaddr: kernel virtual address to free
* @dma_handle: DMA address associated with this region
*
* Frees the memory allocated by pci_alloc_consistent(). Also known
* as platform_pci_free_consistent() by the IA64 machvec code.
*/
void
sn_pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(hwdev);
if (! bussoft) {
return;
}
pcibr_dma_unmap(pcidev_info, dma_handle, 0);
free_pages((unsigned long)vaddr, get_order(size));
}
/**
* sn_pci_map_sg - map a scatter-gather list for DMA
* @hwdev: device to map for
* @sg: scatterlist to map
* @nents: number of entries
* @direction: direction of the DMA transaction
*
* Maps each entry of @sg for DMA. Also known as platform_pci_map_sg by the
* IA64 machvec code.
*/
int
sn_pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
int direction)
{
int i;
unsigned long phys_addr;
struct scatterlist *saved_sg = sg;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(hwdev);
/* can't go anywhere w/o a direction in life */
if (direction == PCI_DMA_NONE)
BUG();
if (! bussoft) {
return 0;
}
/* SN cannot support DMA addresses smaller than 32 bits. */
if (hwdev->dma_mask < 0x7fffffff)
return 0;
/*
* Setup a DMA address for each entry in the
* scatterlist.
*/
for (i = 0; i < nents; i++, sg++) {
phys_addr =
__pa((unsigned long)page_address(sg->page) + sg->offset);
sg->dma_address = pcibr_dma_map(pcidev_info, phys_addr, sg->length, 0);
if (!sg->dma_address) {
printk(KERN_ERR "sn_pci_map_sg: Unable to allocate "
"anymore page map entries.\n");
/*
* We will need to free all previously allocated entries.
*/
if (i > 0) {
sn_pci_unmap_sg(hwdev, saved_sg, i, direction);
}
return (0);
}
sg->dma_length = sg->length;
}
return nents;
}
/**
* sn_pci_unmap_sg - unmap a scatter-gather list
* @hwdev: device to unmap
* @sg: scatterlist to unmap
* @nents: number of scatterlist entries
* @direction: DMA direction
*
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
* concerning calls here are the same as for pci_unmap_single() below. Also
* known as sn_pci_unmap_sg() by the IA64 machvec code.
*/
void
sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
int direction)
{
int i;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(hwdev);
/* can't go anywhere w/o a direction in life */
if (direction == PCI_DMA_NONE)
BUG();
if (! bussoft) {
return;
}
for (i = 0; i < nents; i++, sg++) {
pcibr_dma_unmap(pcidev_info, sg->dma_address, direction);
sg->dma_address = (dma_addr_t) NULL;
sg->dma_length = 0;
}
}
/**
* sn_pci_map_single - map a single region for DMA
* @hwdev: device to map for
* @ptr: kernel virtual address of the region to map
* @size: size of the region
* @direction: DMA direction
*
* Map the region pointed to by @ptr for DMA and return the
* DMA address. Also known as platform_pci_map_single() by
* the IA64 machvec code.
*
* We map this to the one step pcibr_dmamap_trans interface rather than
* the two step pcibr_dmamap_alloc/pcibr_dmamap_addr because we have
* no way of saving the dmamap handle from the alloc to later free
* (which is pretty much unacceptable).
*
* TODO: simplify our interface;
* get rid of dev_desc and vhdl (seems redundant given a pci_dev);
* figure out how to save dmamap handle so can use two step.
*/
dma_addr_t
sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
{
dma_addr_t dma_addr;
unsigned long phys_addr;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(hwdev);
if (direction == PCI_DMA_NONE)
BUG();
if (bussoft == NULL) {
return 0;
}
if (! IS_PCI_BRIDGE_ASIC(bussoft->bs_asic_type)) {
return 0; /* unsupported asic type */
}
/* SN cannot support DMA addresses smaller than 32 bits. */
if (hwdev->dma_mask < 0x7fffffff)
return 0;
/*
* Call our dmamap interface
*/
phys_addr = __pa(ptr);
dma_addr = pcibr_dma_map(pcidev_info, phys_addr, size, 0);
if (!dma_addr) {
printk(KERN_ERR "pci_map_single: Unable to allocate anymore "
"page map entries.\n");
return 0;
}
return ((dma_addr_t) dma_addr);
}
/**
* sn_pci_dma_sync_single_* - make sure all DMAs or CPU accesses
* have completed
* @hwdev: device to sync
* @dma_handle: DMA address to sync
* @size: size of region
* @direction: DMA direction
*
* This routine is supposed to sync the DMA region specified
* by @dma_handle into the 'coherence domain'. We do not need to do
* anything on our platform.
*/
void
sn_pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size,
int direction)
{
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
struct pcibus_bussoft *bussoft = SN_PCIDEV_BUSSOFT(hwdev);
if (direction == PCI_DMA_NONE)
BUG();
if (bussoft == NULL) {
return;
}
if (! IS_PCI_BRIDGE_ASIC(bussoft->bs_asic_type)) {
return; /* unsupported asic type */
}
pcibr_dma_unmap(pcidev_info, dma_addr, direction);
}
/**
* sn_dma_supported - test a DMA mask
* @hwdev: device to test
* @mask: DMA mask to test
*
* Return whether the given PCI device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
* during PCI bus mastering, then you would pass 0x00ffffff as the mask to
* this function. Of course, SN only supports devices that have 32 or more
* address bits when using the PMU. We could theoretically support <32 bit
* cards using direct mapping, but we'll worry about that later--on the off
* chance that someone actually wants to use such a card.
*/
int sn_pci_dma_supported(struct pci_dev *hwdev, u64 mask)
{
if (mask < 0x7fffffff)
return 0;
return 1;
}
/*
* New generic DMA routines just wrap sn2 PCI routines until we
* support other bus types (if ever).
*/
int sn_dma_supported(struct device *dev, u64 mask)
{
BUG_ON(dev->bus != &pci_bus_type);
return sn_pci_dma_supported(to_pci_dev(dev), mask);
}
EXPORT_SYMBOL(sn_dma_supported);
int sn_dma_set_mask(struct device *dev, u64 dma_mask)
{
BUG_ON(dev->bus != &pci_bus_type);
if (!sn_dma_supported(dev, dma_mask))
return 0;
*dev->dma_mask = dma_mask;
return 1;
}
EXPORT_SYMBOL(sn_dma_set_mask);
void *sn_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, int flag)
{
BUG_ON(dev->bus != &pci_bus_type);
return sn_pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
}
EXPORT_SYMBOL(sn_dma_alloc_coherent);
void
sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle)
{
BUG_ON(dev->bus != &pci_bus_type);
sn_pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
}
EXPORT_SYMBOL(sn_dma_free_coherent);
dma_addr_t
sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
int direction)
{
BUG_ON(dev->bus != &pci_bus_type);
return sn_pci_map_single(to_pci_dev(dev), cpu_addr, size,
(int)direction);
}
EXPORT_SYMBOL(sn_dma_map_single);
void
sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
int direction)
{
BUG_ON(dev->bus != &pci_bus_type);
sn_pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
}
EXPORT_SYMBOL(sn_dma_unmap_single);
dma_addr_t
sn_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, int direction)
{
BUG_ON(dev->bus != &pci_bus_type);
return pci_map_page(to_pci_dev(dev), page, offset, size,
(int)direction);
}
EXPORT_SYMBOL(sn_dma_map_page);
void
sn_dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
int direction)
{
BUG_ON(dev->bus != &pci_bus_type);
pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
}
EXPORT_SYMBOL(sn_dma_unmap_page);
int
sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
int direction)
{
BUG_ON(dev->bus != &pci_bus_type);
return sn_pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
}
EXPORT_SYMBOL(sn_dma_map_sg);
void
sn_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
int direction)
{
BUG_ON(dev->bus != &pci_bus_type);
sn_pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
}
EXPORT_SYMBOL(sn_dma_unmap_sg);
void
sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
size_t size, int direction)
{
BUG_ON(dev->bus != &pci_bus_type);
}
EXPORT_SYMBOL(sn_dma_sync_single_for_cpu);
void
sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
size_t size, int direction)
{
BUG_ON(dev->bus != &pci_bus_type);
}
EXPORT_SYMBOL(sn_dma_sync_single_for_device);
void
sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
int direction)
{
BUG_ON(dev->bus != &pci_bus_type);
}
EXPORT_SYMBOL(sn_dma_sync_sg_for_cpu);
void
sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int nelems, int direction)
{
BUG_ON(dev->bus != &pci_bus_type);
}
int sn_dma_mapping_error(dma_addr_t dma_addr)
{
return 0;
}
EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
EXPORT_SYMBOL(sn_pci_unmap_single);
EXPORT_SYMBOL(sn_pci_map_single);
EXPORT_SYMBOL(sn_pci_map_sg);
EXPORT_SYMBOL(sn_pci_unmap_sg);
EXPORT_SYMBOL(sn_pci_alloc_consistent);
EXPORT_SYMBOL(sn_pci_free_consistent);
EXPORT_SYMBOL(sn_pci_dma_supported);
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved.
#
# Makefile for the sn2 io routines.
obj-y += pcibr_dma.o pcibr_reg.o \
pcibr_ate.o pcibr_provider.o
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <asm/sn/sn_sal.h>
#include "pci/pcibus_provider_defs.h"
#include "pci/pcidev.h"
#include "pci/pcibr_provider.h"
int pcibr_invalidate_ate = 0; /* by default don't invalidate ATE on free */
/*
* mark_ate: Mark the ate as either free or inuse.
*/
static void mark_ate(struct ate_resource *ate_resource, int start, int number,
uint64_t value)
{
uint64_t *ate = ate_resource->ate;
int index;
int length = 0;
for (index = start; length < number; index++, length++)
ate[index] = value;
}
/*
* find_free_ate: Find the first free ate index starting from the given
* index for the desired consequtive count.
*/
static int find_free_ate(struct ate_resource *ate_resource, int start,
int count)
{
uint64_t *ate = ate_resource->ate;
int index;
int start_free;
for (index = start; index < ate_resource->num_ate;) {
if (!ate[index]) {
int i;
int free;
free = 0;
start_free = index; /* Found start free ate */
for (i = start_free; i < ate_resource->num_ate; i++) {
if (!ate[i]) { /* This is free */
if (++free == count)
return start_free;
} else {
index = i + 1;
break;
}
}
} else
index++; /* Try next ate */
}
return -1;
}
/*
* free_ate_resource: Free the requested number of ATEs.
*/
static inline void free_ate_resource(struct ate_resource *ate_resource,
int start)
{
mark_ate(ate_resource, start, ate_resource->ate[start], 0);
if ((ate_resource->lowest_free_index > start) ||
(ate_resource->lowest_free_index < 0))
ate_resource->lowest_free_index = start;
}
/*
* alloc_ate_resource: Allocate the requested number of ATEs.
*/
static inline int alloc_ate_resource(struct ate_resource *ate_resource,
int ate_needed)
{
int start_index;
/*
* Check for ate exhaustion.
*/
if (ate_resource->lowest_free_index < 0)
return -1;
/*
* Find the required number of free consequtive ates.
*/
start_index =
find_free_ate(ate_resource, ate_resource->lowest_free_index,
ate_needed);
if (start_index >= 0)
mark_ate(ate_resource, start_index, ate_needed, ate_needed);
ate_resource->lowest_free_index =
find_free_ate(ate_resource, ate_resource->lowest_free_index, 1);
return start_index;
}
/*
* Allocate "count" contiguous Bridge Address Translation Entries
* on the specified bridge to be used for PCI to XTALK mappings.
* Indices in rm map range from 1..num_entries. Indicies returned
* to caller range from 0..num_entries-1.
*
* Return the start index on success, -1 on failure.
*/
int pcibr_ate_alloc(struct pcibus_info *pcibus_info, int count)
{
int status = 0;
uint64_t flag;
flag = pcibr_lock(pcibus_info);
status = alloc_ate_resource(&pcibus_info->pbi_int_ate_resource, count);
if (status < 0) {
/* Failed to allocate */
pcibr_unlock(pcibus_info, flag);
return -1;
}
pcibr_unlock(pcibus_info, flag);
return status;
}
/*
* Setup an Address Translation Entry as specified. Use either the Bridge
* internal maps or the external map RAM, as appropriate.
*/
static inline uint64_t *pcibr_ate_addr(struct pcibus_info *pcibus_info,
int ate_index)
{
if (ate_index < pcibus_info->pbi_int_ate_size) {
return pcireg_int_ate_addr(pcibus_info, ate_index);
}
panic("pcibr_ate_addr: invalid ate_index 0x%x", ate_index);
}
/*
* Update the ate.
*/
void inline
ate_write(struct pcibus_info *pcibus_info, int ate_index, int count,
volatile uint64_t ate)
{
while (count-- > 0) {
if (ate_index < pcibus_info->pbi_int_ate_size) {
pcireg_int_ate_set(pcibus_info, ate_index, ate);
} else {
panic("ate_write: invalid ate_index 0x%x", ate_index);
}
ate_index++;
ate += IOPGSIZE;
}
pcireg_tflush_get(pcibus_info); /* wait until Bridge PIO complete */
}
void pcibr_ate_free(struct pcibus_info *pcibus_info, int index)
{
volatile uint64_t ate;
int count;
uint64_t flags;
if (pcibr_invalidate_ate) {
/* For debugging purposes, clear the valid bit in the ATE */
ate = *pcibr_ate_addr(pcibus_info, index);
count = pcibus_info->pbi_int_ate_resource.ate[index];
ate_write(pcibus_info, index, count, (ate & ~PCI32_ATE_V));
}
flags = pcibr_lock(pcibus_info);
free_ate_resource(&pcibus_info->pbi_int_ate_resource, index);
pcibr_unlock(pcibus_info, flags);
}
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/geo.h>
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
#include "pci/pcibus_provider_defs.h"
#include "pci/pcidev.h"
#include "pci/tiocp.h"
#include "pci/pic.h"
#include "pci/pcibr_provider.h"
#include "pci/tiocp.h"
#include "tio.h"
#include <asm/sn/addrs.h>
extern int sn_ioif_inited;
/* =====================================================================
* DMA MANAGEMENT
*
* The Bridge ASIC provides three methods of doing DMA: via a "direct map"
* register available in 32-bit PCI space (which selects a contiguous 2G
* address space on some other widget), via "direct" addressing via 64-bit
* PCI space (all destination information comes from the PCI address,
* including transfer attributes), and via a "mapped" region that allows
* a bunch of different small mappings to be established with the PMU.
*
* For efficiency, we most prefer to use the 32bit direct mapping facility,
* since it requires no resource allocations. The advantage of using the
* PMU over the 64-bit direct is that single-cycle PCI addressing can be
* used; the advantage of using 64-bit direct over PMU addressing is that
* we do not have to allocate entries in the PMU.
*/
static uint64_t
pcibr_dmamap_ate32(struct pcidev_info *info,
uint64_t paddr, size_t req_size, uint64_t flags)
{
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
pdi_pcibus_info;
uint8_t internal_device = (PCI_SLOT(pcidev_info->pdi_host_pcidev_info->
pdi_linux_pcidev->devfn)) - 1;
int ate_count;
int ate_index;
uint64_t ate_flags = flags | PCI32_ATE_V;
uint64_t ate;
uint64_t pci_addr;
uint64_t xio_addr;
uint64_t offset;
/* PIC in PCI-X mode does not supports 32bit PageMap mode */
if (IS_PIC_SOFT(pcibus_info) && IS_PCIX(pcibus_info)) {
return 0;
}
/* Calculate the number of ATEs needed. */
if (!(MINIMAL_ATE_FLAG(paddr, req_size))) {
ate_count = IOPG((IOPGSIZE - 1) /* worst case start offset */
+req_size /* max mapping bytes */
- 1) + 1; /* round UP */
} else { /* assume requested target is page aligned */
ate_count = IOPG(req_size /* max mapping bytes */
- 1) + 1; /* round UP */
}
/* Get the number of ATEs required. */
ate_index = pcibr_ate_alloc(pcibus_info, ate_count);
if (ate_index < 0)
return 0;
/* In PCI-X mode, Prefetch not supported */
if (IS_PCIX(pcibus_info))
ate_flags &= ~(PCI32_ATE_PREF);
xio_addr =
IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
PHYS_TO_TIODMA(paddr);
offset = IOPGOFF(xio_addr);
ate = ate_flags | (xio_addr - offset);
/* If PIC, put the targetid in the ATE */
if (IS_PIC_SOFT(pcibus_info)) {
ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT);
}
ate_write(pcibus_info, ate_index, ate_count, ate);
/*
* Set up the DMA mapped Address.
*/
pci_addr = PCI32_MAPPED_BASE + offset + IOPGSIZE * ate_index;
/*
* If swap was set in device in pcibr_endian_set()
* we need to turn swapping on.
*/
if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR)
ATE_SWAP_ON(pci_addr);
return pci_addr;
}
static uint64_t
pcibr_dmatrans_direct64(struct pcidev_info * info, uint64_t paddr,
uint64_t dma_attributes)
{
struct pcibus_info *pcibus_info = (struct pcibus_info *)
((info->pdi_host_pcidev_info)->pdi_pcibus_info);
uint64_t pci_addr;
/* Translate to Crosstalk View of Physical Address */
pci_addr = (IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
PHYS_TO_TIODMA(paddr)) | dma_attributes;
/* Handle Bus mode */
if (IS_PCIX(pcibus_info))
pci_addr &= ~PCI64_ATTR_PREF;
/* Handle Bridge Chipset differences */
if (IS_PIC_SOFT(pcibus_info)) {
pci_addr |=
((uint64_t) pcibus_info->
pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT);
} else
pci_addr |= TIOCP_PCI64_CMDTYPE_MEM;
return pci_addr;
}
static uint64_t
pcibr_dmatrans_direct32(struct pcidev_info * info,
uint64_t paddr, size_t req_size, uint64_t flags)
{
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
pdi_pcibus_info;
uint64_t xio_addr;
uint64_t xio_base;
uint64_t offset;
uint64_t endoff;
if (IS_PCIX(pcibus_info)) {
return 0;
}
xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
PHYS_TO_TIODMA(paddr);
xio_base = pcibus_info->pbi_dir_xbase;
offset = xio_addr - xio_base;
endoff = req_size + offset;
if ((req_size > (1ULL << 31)) || /* Too Big */
(xio_addr < xio_base) || /* Out of range for mappings */
(endoff > (1ULL << 31))) { /* Too Big */
return 0;
}
return PCI32_DIRECT_BASE | offset;
}
/*
* Wrapper routine for free'ing DMA maps
* DMA mappings for Direct 64 and 32 do not have any DMA maps.
*/
void
pcibr_dma_unmap(struct pcidev_info *pcidev_info, dma_addr_t dma_handle,
int direction)
{
struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
pdi_pcibus_info;
if (IS_PCI32_MAPPED(dma_handle)) {
int ate_index;
ate_index =
IOPG((ATE_SWAP_OFF(dma_handle) - PCI32_MAPPED_BASE));
pcibr_ate_free(pcibus_info, ate_index);
}
}
/*
* On SN systems there is a race condition between a PIO read response and
* DMA's. In rare cases, the read response may beat the DMA, causing the
* driver to think that data in memory is complete and meaningful. This code
* eliminates that race. This routine is called by the PIO read routines
* after doing the read. For PIC this routine then forces a fake interrupt
* on another line, which is logically associated with the slot that the PIO
* is addressed to. It then spins while watching the memory location that
* the interrupt is targetted to. When the interrupt response arrives, we
* are sure that the DMA has landed in memory and it is safe for the driver
* to proceed. For TIOCP use the Device(x) Write Request Buffer Flush
* Bridge register since it ensures the data has entered the coherence domain,
* unlike the PIC Device(x) Write Request Buffer Flush register.
*/
void sn_dma_flush(uint64_t addr)
{
nasid_t nasid;
int is_tio;
int wid_num;
int i, j;
int bwin;
uint64_t flags;
struct hubdev_info *hubinfo;
volatile struct sn_flush_device_list *p;
struct sn_flush_nasid_entry *flush_nasid_list;
if (!sn_ioif_inited)
return;
nasid = NASID_GET(addr);
if (-1 == NASID_TO_COMPACT_NODEID(nasid))
return;
hubinfo = (NODEPDA(NASID_TO_COMPACT_NODEID(nasid)))->pdinfo;
if (!hubinfo) {
BUG();
}
is_tio = (nasid & 1);
if (is_tio) {
wid_num = TIO_SWIN_WIDGETNUM(addr);
bwin = TIO_BWIN_WINDOWNUM(addr);
} else {
wid_num = SWIN_WIDGETNUM(addr);
bwin = BWIN_WINDOWNUM(addr);
}
flush_nasid_list = &hubinfo->hdi_flush_nasid_list;
if (flush_nasid_list->widget_p == NULL)
return;
if (bwin > 0) {
uint64_t itte = flush_nasid_list->iio_itte[bwin];
if (is_tio) {
wid_num = (itte >> TIO_ITTE_WIDGET_SHIFT) &
TIO_ITTE_WIDGET_MASK;
} else {
wid_num = (itte >> IIO_ITTE_WIDGET_SHIFT) &
IIO_ITTE_WIDGET_MASK;
}
}
if (flush_nasid_list->widget_p == NULL)
return;
if (flush_nasid_list->widget_p[wid_num] == NULL)
return;
p = &flush_nasid_list->widget_p[wid_num][0];
/* find a matching BAR */
for (i = 0; i < DEV_PER_WIDGET; i++) {
for (j = 0; j < PCI_ROM_RESOURCE; j++) {
if (p->sfdl_bar_list[j].start == 0)
break;
if (addr >= p->sfdl_bar_list[j].start
&& addr <= p->sfdl_bar_list[j].end)
break;
}
if (j < PCI_ROM_RESOURCE && p->sfdl_bar_list[j].start != 0)
break;
p++;
}
/* if no matching BAR, return without doing anything. */
if (i == DEV_PER_WIDGET)
return;
/*
* For TIOCP use the Device(x) Write Request Buffer Flush Bridge
* register since it ensures the data has entered the coherence
* domain, unlike PIC
*/
if (is_tio) {
uint32_t tio_id = REMOTE_HUB_L(nasid, TIO_NODE_ID);
uint32_t revnum = XWIDGET_PART_REV_NUM(tio_id);
/* TIOCP BRINGUP WAR (PV907516): Don't write buffer flush reg */
if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) {
return;
} else {
pcireg_wrb_flush_get(p->sfdl_pcibus_info,
(p->sfdl_slot - 1));
}
} else {
spin_lock_irqsave(&((struct sn_flush_device_list *)p)->
sfdl_flush_lock, flags);
p->sfdl_flush_value = 0;
/* force an interrupt. */
*(volatile uint32_t *)(p->sfdl_force_int_addr) = 1;
/* wait for the interrupt to come back. */
while (*(p->sfdl_flush_addr) != 0x10f) ;
/* okay, everything is synched up. */
spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, flags);
}
return;
}
/*
* Wrapper DMA interface. Called from pci_dma.c routines.
*/
uint64_t
pcibr_dma_map(struct pcidev_info * pcidev_info, unsigned long phys_addr,
size_t size, unsigned int flags)
{
dma_addr_t dma_handle;
struct pci_dev *pcidev = pcidev_info->pdi_linux_pcidev;
if (flags & SN_PCIDMA_CONSISTENT) {
/* sn_pci_alloc_consistent interfaces */
if (pcidev->dev.coherent_dma_mask == ~0UL) {
dma_handle =
pcibr_dmatrans_direct64(pcidev_info, phys_addr,
PCI64_ATTR_BAR);
} else {
dma_handle =
(dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
phys_addr, size,
PCI32_ATE_BAR);
}
} else {
/* map_sg/map_single interfaces */
/* SN cannot support DMA addresses smaller than 32 bits. */
if (pcidev->dma_mask < 0x7fffffff) {
return 0;
}
if (pcidev->dma_mask == ~0UL) {
/*
* Handle the most common case: 64 bit cards. This
* call should always succeed.
*/
dma_handle =
pcibr_dmatrans_direct64(pcidev_info, phys_addr,
PCI64_ATTR_PREF);
} else {
/* Handle 32-63 bit cards via direct mapping */
dma_handle =
pcibr_dmatrans_direct32(pcidev_info, phys_addr,
size, 0);
if (!dma_handle) {
/*
* It is a 32 bit card and we cannot do direct mapping,
* so we use an ATE.
*/
dma_handle =
pcibr_dmamap_ate32(pcidev_info, phys_addr,
size, PCI32_ATE_PREF);
}
}
}
return dma_handle;
}
EXPORT_SYMBOL(sn_dma_flush);
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001-2004 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <asm/sn/sn_sal.h>
#include "xtalk/xwidgetdev.h"
#include <asm/sn/geo.h>
#include "xtalk/hubdev.h"
#include "pci/pcibus_provider_defs.h"
#include "pci/pcidev.h"
#include "pci/pcibr_provider.h"
#include <asm/sn/addrs.h>
static int sal_pcibr_error_interrupt(struct pcibus_info *soft)
{
struct ia64_sal_retval ret_stuff;
uint64_t busnum;
int segment;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
segment = 0;
busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_ERROR_INTERRUPT,
(u64) segment, (u64) busnum, 0, 0, 0, 0, 0);
return (int)ret_stuff.v0;
}
/*
* PCI Bridge Error interrupt handler. Gets invoked whenever a PCI
* bridge sends an error interrupt.
*/
static irqreturn_t
pcibr_error_intr_handler(int irq, void *arg, struct pt_regs *regs)
{
struct pcibus_info *soft = (struct pcibus_info *)arg;
if (sal_pcibr_error_interrupt(soft) < 0) {
panic("pcibr_error_intr_handler(): Fatal Bridge Error");
}
return IRQ_HANDLED;
}
void *
pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft)
{
int nasid, cnode, j;
struct hubdev_info *hubdev_info;
struct pcibus_info *soft;
struct sn_flush_device_list *sn_flush_device_list;
if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) {
return NULL;
}
/*
* Allocate kernel bus soft and copy from prom.
*/
soft = kmalloc(sizeof(struct pcibus_info), GFP_KERNEL);
if (!soft) {
return NULL;
}
memcpy(soft, prom_bussoft, sizeof(struct pcibus_info));
soft->pbi_buscommon.bs_base =
(((u64) soft->pbi_buscommon.
bs_base << 4) >> 4) | __IA64_UNCACHED_OFFSET;
spin_lock_init(&soft->pbi_lock);
/*
* register the bridge's error interrupt handler
*/
if (request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler,
SA_SHIRQ, "PCIBR error", (void *)(soft))) {
printk(KERN_WARNING
"pcibr cannot allocate interrupt for error handler\n");
}
/*
* Update the Bridge with the "kernel" pagesize
*/
if (PAGE_SIZE < 16384) {
pcireg_control_bit_clr(soft, PCIBR_CTRL_PAGE_SIZE);
} else {
pcireg_control_bit_set(soft, PCIBR_CTRL_PAGE_SIZE);
}
nasid = NASID_GET(soft->pbi_buscommon.bs_base);
cnode = NASID_TO_COMPACT_NODEID(nasid);
hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
if (hubdev_info->hdi_flush_nasid_list.widget_p) {
sn_flush_device_list = hubdev_info->hdi_flush_nasid_list.
widget_p[(int)soft->pbi_buscommon.bs_xid];
if (sn_flush_device_list) {
for (j = 0; j < DEV_PER_WIDGET;
j++, sn_flush_device_list++) {
if (sn_flush_device_list->sfdl_slot == -1)
continue;
if (sn_flush_device_list->
sfdl_persistent_busnum ==
soft->pbi_buscommon.bs_persist_busnum)
sn_flush_device_list->sfdl_pcibus_info =
soft;
}
}
}
/* Setup the PMU ATE map */
soft->pbi_int_ate_resource.lowest_free_index = 0;
soft->pbi_int_ate_resource.ate =
kmalloc(soft->pbi_int_ate_size * sizeof(uint64_t), GFP_KERNEL);
memset(soft->pbi_int_ate_resource.ate, 0,
(soft->pbi_int_ate_size * sizeof(uint64_t)));
return soft;
}
void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info)
{
struct pcidev_info *pcidev_info;
struct pcibus_info *pcibus_info;
int bit = sn_irq_info->irq_int_bit;
pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
if (pcidev_info) {
pcibus_info =
(struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
pdi_pcibus_info;
pcireg_force_intr_set(pcibus_info, bit);
}
}
void pcibr_change_devices_irq(struct sn_irq_info *sn_irq_info)
{
struct pcidev_info *pcidev_info;
struct pcibus_info *pcibus_info;
int bit = sn_irq_info->irq_int_bit;
uint64_t xtalk_addr = sn_irq_info->irq_xtalkaddr;
pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
if (pcidev_info) {
pcibus_info =
(struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
pdi_pcibus_info;
/* Disable the device's IRQ */
pcireg_intr_enable_bit_clr(pcibus_info, bit);
/* Change the device's IRQ */
pcireg_intr_addr_addr_set(pcibus_info, bit, xtalk_addr);
/* Re-enable the device's IRQ */
pcireg_intr_enable_bit_set(pcibus_info, bit);
pcibr_force_interrupt(sn_irq_info);
}
}
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/interrupt.h>
#include "pci/pcibus_provider_defs.h"
#include "pci/pcidev.h"
#include "pci/tiocp.h"
#include "pci/pic.h"
#include "pci/pcibr_provider.h"
union br_ptr {
struct tiocp tio;
struct pic pic;
};
/*
* Control Register Access -- Read/Write 0000_0020
*/
void pcireg_control_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits)
{
union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ptr->tio.cp_control &= ~bits;
break;
case PCIBR_BRIDGETYPE_PIC:
ptr->pic.p_wid_control &= ~bits;
break;
default:
panic
("pcireg_control_bit_clr: unknown bridgetype bridge 0x%p",
(void *)ptr);
}
}
}
void pcireg_control_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
{
union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ptr->tio.cp_control |= bits;
break;
case PCIBR_BRIDGETYPE_PIC:
ptr->pic.p_wid_control |= bits;
break;
default:
panic
("pcireg_control_bit_set: unknown bridgetype bridge 0x%p",
(void *)ptr);
}
}
}
/*
* PCI/PCIX Target Flush Register Access -- Read Only 0000_0050
*/
uint64_t pcireg_tflush_get(struct pcibus_info *pcibus_info)
{
union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
uint64_t ret = 0;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ret = ptr->tio.cp_tflush;
break;
case PCIBR_BRIDGETYPE_PIC:
ret = ptr->pic.p_wid_tflush;
break;
default:
panic
("pcireg_tflush_get: unknown bridgetype bridge 0x%p",
(void *)ptr);
}
}
/* Read of the Target Flush should always return zero */
if (ret != 0)
panic("pcireg_tflush_get:Target Flush failed\n");
return ret;
}
/*
* Interrupt Status Register Access -- Read Only 0000_0100
*/
uint64_t pcireg_intr_status_get(struct pcibus_info * pcibus_info)
{
union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
uint64_t ret = 0;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ret = ptr->tio.cp_int_status;
break;
case PCIBR_BRIDGETYPE_PIC:
ret = ptr->pic.p_int_status;
break;
default:
panic
("pcireg_intr_status_get: unknown bridgetype bridge 0x%p",
(void *)ptr);
}
}
return ret;
}
/*
* Interrupt Enable Register Access -- Read/Write 0000_0108
*/
void pcireg_intr_enable_bit_clr(struct pcibus_info *pcibus_info, uint64_t bits)
{
union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ptr->tio.cp_int_enable &= ~bits;
break;
case PCIBR_BRIDGETYPE_PIC:
ptr->pic.p_int_enable &= ~bits;
break;
default:
panic
("pcireg_intr_enable_bit_clr: unknown bridgetype bridge 0x%p",
(void *)ptr);
}
}
}
void pcireg_intr_enable_bit_set(struct pcibus_info *pcibus_info, uint64_t bits)
{
union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ptr->tio.cp_int_enable |= bits;
break;
case PCIBR_BRIDGETYPE_PIC:
ptr->pic.p_int_enable |= bits;
break;
default:
panic
("pcireg_intr_enable_bit_set: unknown bridgetype bridge 0x%p",
(void *)ptr);
}
}
}
/*
* Intr Host Address Register (int_addr) -- Read/Write 0000_0130 - 0000_0168
*/
void pcireg_intr_addr_addr_set(struct pcibus_info *pcibus_info, int int_n,
uint64_t addr)
{
union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ptr->tio.cp_int_addr[int_n] &= ~TIOCP_HOST_INTR_ADDR;
ptr->tio.cp_int_addr[int_n] |=
(addr & TIOCP_HOST_INTR_ADDR);
break;
case PCIBR_BRIDGETYPE_PIC:
ptr->pic.p_int_addr[int_n] &= ~PIC_HOST_INTR_ADDR;
ptr->pic.p_int_addr[int_n] |=
(addr & PIC_HOST_INTR_ADDR);
break;
default:
panic
("pcireg_intr_addr_addr_get: unknown bridgetype bridge 0x%p",
(void *)ptr);
}
}
}
/*
* Force Interrupt Register Access -- Write Only 0000_01C0 - 0000_01F8
*/
void pcireg_force_intr_set(struct pcibus_info *pcibus_info, int int_n)
{
union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ptr->tio.cp_force_pin[int_n] = 1;
break;
case PCIBR_BRIDGETYPE_PIC:
ptr->pic.p_force_pin[int_n] = 1;
break;
default:
panic
("pcireg_force_intr_set: unknown bridgetype bridge 0x%p",
(void *)ptr);
}
}
}
/*
* Device(x) Write Buffer Flush Reg Access -- Read Only 0000_0240 - 0000_0258
*/
uint64_t pcireg_wrb_flush_get(struct pcibus_info *pcibus_info, int device)
{
union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
uint64_t ret = 0;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ret = ptr->tio.cp_wr_req_buf[device];
break;
case PCIBR_BRIDGETYPE_PIC:
ret = ptr->pic.p_wr_req_buf[device];
break;
default:
panic("pcireg_wrb_flush_get: unknown bridgetype bridge 0x%p", (void *)ptr);
}
}
/* Read of the Write Buffer Flush should always return zero */
return ret;
}
void pcireg_int_ate_set(struct pcibus_info *pcibus_info, int ate_index,
uint64_t val)
{
union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ptr->tio.cp_int_ate_ram[ate_index] = (uint64_t) val;
break;
case PCIBR_BRIDGETYPE_PIC:
ptr->pic.p_int_ate_ram[ate_index] = (uint64_t) val;
break;
default:
panic
("pcireg_int_ate_set: unknown bridgetype bridge 0x%p",
(void *)ptr);
}
}
}
uint64_t *pcireg_int_ate_addr(struct pcibus_info *pcibus_info, int ate_index)
{
union br_ptr *ptr = (union br_ptr *)pcibus_info->pbi_buscommon.bs_base;
uint64_t *ret = (uint64_t *) 0;
if (pcibus_info) {
switch (pcibus_info->pbi_bridge_type) {
case PCIBR_BRIDGETYPE_TIOCP:
ret =
(uint64_t *) & (ptr->tio.cp_int_ate_ram[ate_index]);
break;
case PCIBR_BRIDGETYPE_PIC:
ret =
(uint64_t *) & (ptr->pic.p_int_ate_ram[ate_index]);
break;
default:
panic
("pcireg_int_ate_addr: unknown bridgetype bridge 0x%p",
(void *)ptr);
}
}
return ret;
}
......@@ -28,6 +28,7 @@
#include <asm/uaccess.h>
#include <asm/sn/addrs.h>
#include <asm/sn/clksupport.h>
#include <asm/sn/shub_mmr.h>
MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>");
MODULE_DESCRIPTION("Multimedia timer support");
......
......@@ -22,6 +22,8 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/module.h>
#include <asm/sn/geo.h>
#include <asm/sn/nodepda.h>
#include "snsc.h"
......@@ -364,17 +366,15 @@ int __init
scdrv_init(void)
{
geoid_t geoid;
cmoduleid_t cmod;
int i;
cnodeid_t cnode;
char devname[32];
char *devnamep;
module_t *m;
struct sysctl_data_s *scd;
void *salbuf;
struct class_simple *snsc_class;
dev_t first_dev, dev;
if (alloc_chrdev_region(&first_dev, 0, (MAX_SLABS*nummodules),
if (alloc_chrdev_region(&first_dev, 0, numionodes,
SYSCTL_BASENAME) < 0) {
printk("%s: failed to register SN system controller device\n",
__FUNCTION__);
......@@ -382,16 +382,8 @@ scdrv_init(void)
}
snsc_class = class_simple_create(THIS_MODULE, SYSCTL_BASENAME);
for (cmod = 0; cmod < nummodules; cmod++) {
m = sn_modules[cmod];
for (i = 0; i <= MAX_SLABS; i++) {
if (m->nodes[i] == -1) {
/* node is not alive in module */
continue;
}
geoid = m->geoid[i];
for (cnode = 0; cnode < numionodes; cnode++) {
geoid = cnodeid_get_geoid(cnode);
devnamep = devname;
format_module_id(devnamep, geo_module(geoid),
MODULE_FORMAT_BRIEF);
......@@ -410,7 +402,7 @@ scdrv_init(void)
memset(scd, 0, sizeof (struct sysctl_data_s));
/* initialize sysctl device data fields */
scd->scd_nasid = cnodeid_to_nasid(m->nodes[i]);
scd->scd_nasid = cnodeid_to_nasid(cnode);
if (!(salbuf = kmalloc(SCDRV_BUFSZ, GFP_KERNEL))) {
printk("%s: failed to allocate driver buffer"
"(%s%s)\n", __FUNCTION__,
......@@ -431,7 +423,7 @@ scdrv_init(void)
continue;
}
dev = first_dev + m->nodes[i];
dev = first_dev + cnode;
cdev_init(&scd->scd_cdev, &scdrv_fops);
if (cdev_add(&scd->scd_cdev, dev, 1)) {
printk("%s: failed to register system"
......@@ -448,7 +440,6 @@ scdrv_init(void)
ia64_sn_irtr_intr_enable(scd->scd_nasid,
0 /*ignored */ ,
SAL_IROUTER_INTR_RECV);
}
}
return 0;
}
......
......@@ -708,22 +708,10 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev, ide_pci_device_t * d)
return 0;
}
/* This ensures that we can build this for generic kernels without
* having all the SN2 code sync'd and merged.
*/
typedef enum pciio_endian_e {
PCIDMA_ENDIAN_BIG,
PCIDMA_ENDIAN_LITTLE
} pciio_endian_t;
pciio_endian_t snia_pciio_endian_set(struct pci_dev
*pci_dev, pciio_endian_t device_end,
pciio_endian_t desired_end);
static unsigned int __init
pci_init_sgiioc4(struct pci_dev *dev, ide_pci_device_t * d)
{
unsigned int class_rev;
pciio_endian_t endian_status;
if (pci_enable_device(dev)) {
printk(KERN_ERR
......@@ -743,17 +731,6 @@ pci_init_sgiioc4(struct pci_dev *dev, ide_pci_device_t * d)
"46 or higher\n", d->name, dev->slot_name);
return -ENODEV;
}
/* Enable Byte Swapping in the PIC... */
endian_status = snia_pciio_endian_set(dev, PCIDMA_ENDIAN_LITTLE,
PCIDMA_ENDIAN_BIG);
if (endian_status != PCIDMA_ENDIAN_BIG) {
printk(KERN_ERR
"Failed to set endianness for device %s at slot %s\n",
d->name, dev->slot_name);
return -ENODEV;
}
return sgiioc4_ide_setup_pci_device(dev, d);
}
......
......@@ -52,7 +52,6 @@
#include <asm/io.h>
#include <asm/sn/simulator.h>
#include <asm/sn/sn2/sn_private.h>
#include <asm/sn/sn_sal.h>
/* number of characters we can transmit to the SAL console at a time */
......@@ -71,12 +70,12 @@
/* To use dynamic numbers only and not use the assigned major and minor,
* define the following.. */
/* #define USE_DYNAMIC_MINOR 1 */ /* use dynamic minor number */
#define USE_DYNAMIC_MINOR 0 /* Don't rely on misc_register dynamic minor */
/* #define USE_DYNAMIC_MINOR 1 *//* use dynamic minor number */
#define USE_DYNAMIC_MINOR 0 /* Don't rely on misc_register dynamic minor */
/* Device name we're using */
#define DEVICE_NAME "ttySG"
#define DEVICE_NAME_DYNAMIC "ttySG0" /* need full name for misc_register */
#define DEVICE_NAME_DYNAMIC "ttySG0" /* need full name for misc_register */
/* The major/minor we are using, ignored for USE_DYNAMIC_MINOR */
#define DEVICE_MAJOR 204
#define DEVICE_MINOR 40
......@@ -107,7 +106,7 @@ struct sn_cons_port {
static struct sn_cons_port sal_console_port;
/* Only used if USE_DYNAMIC_MINOR is set to 1 */
static struct miscdevice misc; /* used with misc_register for dynamic */
static struct miscdevice misc; /* used with misc_register for dynamic */
extern u64 master_node_bedrock_address;
extern void early_sn_setup(void);
......@@ -169,15 +168,13 @@ static struct sn_sal_ops intr_ops = {
* output is buffered and sent to the SAL asynchronously (either by
* timer callback or by UART interrupt) */
/* routines for running the console in polling mode */
/**
* snt_poll_getc - Get a character from the console in polling mode
*
*/
static int
snt_poll_getc(void)
static int snt_poll_getc(void)
{
int ch;
......@@ -189,8 +186,7 @@ snt_poll_getc(void)
* snt_poll_input_pending - Check if any input is waiting - polling mode.
*
*/
static int
snt_poll_input_pending(void)
static int snt_poll_input_pending(void)
{
int status, input;
......@@ -206,8 +202,7 @@ snt_poll_input_pending(void)
* @count: length of string
*
*/
static int
snt_sim_puts(const char *str, int count)
static int snt_sim_puts(const char *str, int count)
{
int counter = count;
......@@ -231,8 +226,7 @@ snt_sim_puts(const char *str, int count)
* snt_sim_getc - Get character from console in simulator mode
*
*/
static int
snt_sim_getc(void)
static int snt_sim_getc(void)
{
return readb(master_node_bedrock_address + (UART_RX << 3));
}
......@@ -241,8 +235,7 @@ snt_sim_getc(void)
* snt_sim_input_pending - Check if there is input pending in simulator mode
*
*/
static int
snt_sim_input_pending(void)
static int snt_sim_input_pending(void)
{
return readb(master_node_bedrock_address +
(UART_LSR << 3)) & UART_LSR_DR;
......@@ -254,8 +247,7 @@ snt_sim_input_pending(void)
* snt_intr_getc - Get a character from the console, interrupt mode
*
*/
static int
snt_intr_getc(void)
static int snt_intr_getc(void)
{
return ia64_sn_console_readc();
}
......@@ -264,8 +256,7 @@ snt_intr_getc(void)
* snt_intr_input_pending - Check if input is pending, interrupt mode
*
*/
static int
snt_intr_input_pending(void)
static int snt_intr_input_pending(void)
{
return ia64_sn_console_intr_status() & SAL_CONSOLE_INTR_RECV;
}
......@@ -278,8 +269,7 @@ snt_intr_input_pending(void)
* @len: Length
*
*/
static int
snt_hw_puts_raw(const char *s, int len)
static int snt_hw_puts_raw(const char *s, int len)
{
/* this will call the PROM and not return until this is done */
return ia64_sn_console_putb(s, len);
......@@ -291,8 +281,7 @@ snt_hw_puts_raw(const char *s, int len)
* @len: Length
*
*/
static int
snt_hw_puts_buffered(const char *s, int len)
static int snt_hw_puts_buffered(const char *s, int len)
{
/* queue data to the PROM */
return ia64_sn_console_xmit_chars((char *)s, len);
......@@ -310,8 +299,7 @@ snt_hw_puts_buffered(const char *s, int len)
* @port: Port to operate with (we ignore since we only have one port)
*
*/
static const char *
snp_type(struct uart_port *port)
static const char *snp_type(struct uart_port *port)
{
return ("SGI SN L1");
}
......@@ -321,8 +309,7 @@ snp_type(struct uart_port *port)
* @port: Port to operate on (we ignore since we only have one port)
*
*/
static unsigned int
snp_tx_empty(struct uart_port *port)
static unsigned int snp_tx_empty(struct uart_port *port)
{
return 1;
}
......@@ -333,8 +320,7 @@ snp_tx_empty(struct uart_port *port)
* @tty_stop: Set to 1 if called via uart_stop
*
*/
static void
snp_stop_tx(struct uart_port *port, unsigned int tty_stop)
static void snp_stop_tx(struct uart_port *port, unsigned int tty_stop)
{
}
......@@ -343,8 +329,7 @@ snp_stop_tx(struct uart_port *port, unsigned int tty_stop)
* @port: Port to operate on - we ignore - no-op function
*
*/
static void
snp_release_port(struct uart_port *port)
static void snp_release_port(struct uart_port *port)
{
}
......@@ -353,8 +338,7 @@ snp_release_port(struct uart_port *port)
* @port: Port to operate on - we ignore - no-op function
*
*/
static void
snp_enable_ms(struct uart_port *port)
static void snp_enable_ms(struct uart_port *port)
{
}
......@@ -363,8 +347,7 @@ snp_enable_ms(struct uart_port *port)
* @port: Port to shut down - we ignore
*
*/
static void
snp_shutdown(struct uart_port *port)
static void snp_shutdown(struct uart_port *port)
{
}
......@@ -374,8 +357,7 @@ snp_shutdown(struct uart_port *port)
* @mctrl: Lines to set/unset - we ignore
*
*/
static void
snp_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void snp_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
}
......@@ -384,8 +366,7 @@ snp_set_mctrl(struct uart_port *port, unsigned int mctrl)
* @port: port to operate on - we only have one port so we ignore this
*
*/
static unsigned int
snp_get_mctrl(struct uart_port *port)
static unsigned int snp_get_mctrl(struct uart_port *port)
{
return TIOCM_CAR | TIOCM_RNG | TIOCM_DSR | TIOCM_CTS;
}
......@@ -395,8 +376,7 @@ snp_get_mctrl(struct uart_port *port)
* @port: Port to operate on - we ignore
*
*/
static void
snp_stop_rx(struct uart_port *port)
static void snp_stop_rx(struct uart_port *port)
{
}
......@@ -406,11 +386,11 @@ snp_stop_rx(struct uart_port *port)
* @tty_stop: Set to 1 if called via uart_start
*
*/
static void
snp_start_tx(struct uart_port *port, unsigned int tty_stop)
static void snp_start_tx(struct uart_port *port, unsigned int tty_stop)
{
if (sal_console_port.sc_ops->sal_wakeup_transmit)
sal_console_port.sc_ops->sal_wakeup_transmit(&sal_console_port, TRANSMIT_BUFFERED);
sal_console_port.sc_ops->sal_wakeup_transmit(&sal_console_port,
TRANSMIT_BUFFERED);
}
......@@ -420,8 +400,7 @@ snp_start_tx(struct uart_port *port, unsigned int tty_stop)
* @break_state: Break state
*
*/
static void
snp_break_ctl(struct uart_port *port, int break_state)
static void snp_break_ctl(struct uart_port *port, int break_state)
{
}
......@@ -430,8 +409,7 @@ snp_break_ctl(struct uart_port *port, int break_state)
* @port: Port to operate on
*
*/
static int
snp_startup(struct uart_port *port)
static int snp_startup(struct uart_port *port)
{
return 0;
}
......@@ -454,8 +432,7 @@ snp_set_termios(struct uart_port *port, struct termios *termios,
* @port: port to operate on
*
*/
static int
snp_request_port(struct uart_port *port)
static int snp_request_port(struct uart_port *port)
{
return 0;
}
......@@ -466,8 +443,7 @@ snp_request_port(struct uart_port *port)
* @flags: flags used for port setup
*
*/
static void
snp_config_port(struct uart_port *port, int flags)
static void snp_config_port(struct uart_port *port, int flags)
{
}
......@@ -505,15 +481,14 @@ static struct uart_ops sn_console_ops = {
* itself may be broken.
*
*/
static int
sn_debug_printf(const char *fmt, ...)
static int sn_debug_printf(const char *fmt, ...)
{
static char printk_buf[1024];
int printed_len;
va_list args;
va_start(args, fmt);
printed_len = vsnprintf(printk_buf, sizeof (printk_buf), fmt, args);
printed_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
if (!sal_console_port.sc_ops) {
if (IS_RUNNING_ON_SIMULATOR())
......@@ -528,13 +503,12 @@ sn_debug_printf(const char *fmt, ...)
va_end(args);
return printed_len;
}
#endif /* DEBUG */
#endif /* DEBUG */
/*
* Interrupt handling routines.
*/
/**
* sn_receive_chars - Grab characters, pass them to tty layer
* @port: Port to operate on
......@@ -635,8 +609,7 @@ sn_receive_chars(struct sn_cons_port *port, struct pt_regs *regs,
* ignore them until we register with the serial core stuffs.
*
*/
static void
sn_transmit_chars(struct sn_cons_port *port, int raw)
static void sn_transmit_chars(struct sn_cons_port *port, int raw)
{
int xmit_count, tail, head, loops, ii;
int result;
......@@ -651,8 +624,7 @@ sn_transmit_chars(struct sn_cons_port *port, int raw)
if (port->sc_port.info) {
/* We're initilized, using serial core infrastructure */
xmit = &port->sc_port.info->xmit;
}
else {
} else {
/* Probably sn_sal_switch_to_asynch has been run but serial core isn't
* initilized yet. Just return. Writes are going through
* sn_sal_console_write (due to register_console) at this time.
......@@ -704,7 +676,7 @@ sn_transmit_chars(struct sn_cons_port *port, int raw)
uart_write_wakeup(&port->sc_port);
if (uart_circ_empty(xmit))
snp_stop_tx(&port->sc_port, 0); /* no-op for us */
snp_stop_tx(&port->sc_port, 0); /* no-op for us */
}
/**
......@@ -714,10 +686,9 @@ sn_transmit_chars(struct sn_cons_port *port, int raw)
* @regs: Saved registers, used by sn_receive_chars for uart_handle_sysrq_char
*
*/
static irqreturn_t
sn_sal_interrupt(int irq, void *dev_id, struct pt_regs *regs)
static irqreturn_t sn_sal_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
struct sn_cons_port *port = (struct sn_cons_port *) dev_id;
struct sn_cons_port *port = (struct sn_cons_port *)dev_id;
unsigned long flags;
int status = ia64_sn_console_intr_status();
......@@ -742,8 +713,7 @@ sn_sal_interrupt(int irq, void *dev_id, struct pt_regs *regs)
* returns the console irq if interrupt is successfully registered, else 0
*
*/
static int
sn_sal_connect_interrupt(struct sn_cons_port *port)
static int sn_sal_connect_interrupt(struct sn_cons_port *port)
{
if (request_irq(SGI_UART_VECTOR, sn_sal_interrupt,
SA_INTERRUPT | SA_SHIRQ,
......@@ -764,10 +734,9 @@ sn_sal_connect_interrupt(struct sn_cons_port *port)
* Obviously not used in interrupt mode
*
*/
static void
sn_sal_timer_poll(unsigned long data)
static void sn_sal_timer_poll(unsigned long data)
{
struct sn_cons_port *port = (struct sn_cons_port *) data;
struct sn_cons_port *port = (struct sn_cons_port *)data;
unsigned long flags;
if (!port)
......@@ -797,8 +766,7 @@ sn_sal_timer_poll(unsigned long data)
* if we didn't already come through here via sn_sal_serial_console_init.
*
*/
static void __init
sn_sal_switch_to_asynch(struct sn_cons_port *port)
static void __init sn_sal_switch_to_asynch(struct sn_cons_port *port)
{
unsigned long flags;
......@@ -827,7 +795,7 @@ sn_sal_switch_to_asynch(struct sn_cons_port *port)
*/
init_timer(&port->sc_timer);
port->sc_timer.function = sn_sal_timer_poll;
port->sc_timer.data = (unsigned long) port;
port->sc_timer.data = (unsigned long)port;
if (IS_RUNNING_ON_SIMULATOR())
port->sc_interrupt_timeout = 6;
......@@ -854,8 +822,7 @@ sn_sal_switch_to_asynch(struct sn_cons_port *port)
* We attempt to switch to interrupt mode here by calling
* sn_sal_connect_interrupt. If that works out, we enable receive interrupts.
*/
static void __init
sn_sal_switch_to_interrupts(struct sn_cons_port *port)
static void __init sn_sal_switch_to_interrupts(struct sn_cons_port *port)
{
int irq;
unsigned long flags;
......@@ -893,7 +860,7 @@ static struct console sal_console = {
.write = sn_sal_console_write,
.device = uart_console_device,
.setup = sn_sal_console_setup,
.index = -1, /* unspecified */
.index = -1, /* unspecified */
.data = &sal_console_uart,
};
......@@ -903,9 +870,9 @@ static struct uart_driver sal_console_uart = {
.owner = THIS_MODULE,
.driver_name = "sn_console",
.dev_name = DEVICE_NAME,
.major = 0, /* major/minor set at registration time per USE_DYNAMIC_MINOR */
.major = 0, /* major/minor set at registration time per USE_DYNAMIC_MINOR */
.minor = 0,
.nr = 1, /* one port */
.nr = 1, /* one port */
.cons = SAL_CONSOLE,
};
......@@ -918,8 +885,7 @@ static struct uart_driver sal_console_uart = {
* core and try to enable interrupt driven mode.
*
*/
static int __init
sn_sal_module_init(void)
static int __init sn_sal_module_init(void)
{
int retval;
......@@ -933,23 +899,24 @@ sn_sal_module_init(void)
misc.name = DEVICE_NAME_DYNAMIC;
retval = misc_register(&misc);
if (retval != 0) {
printk("Failed to register console device using misc_register.\n");
printk
("Failed to register console device using misc_register.\n");
return -ENODEV;
}
sal_console_uart.major = MISC_MAJOR;
sal_console_uart.minor = misc.minor;
}
else {
} else {
sal_console_uart.major = DEVICE_MAJOR;
sal_console_uart.minor = DEVICE_MINOR;
}
/* We register the driver and the port before switching to interrupts
* or async above so the proper uart structures are populated */
* or async above so the proper uart structures are populated */
if (uart_register_driver(&sal_console_uart) < 0) {
printk("ERROR sn_sal_module_init failed uart_register_driver, line %d\n",
__LINE__);
printk
("ERROR sn_sal_module_init failed uart_register_driver, line %d\n",
__LINE__);
return -ENODEV;
}
......@@ -985,8 +952,7 @@ sn_sal_module_init(void)
* sn_sal_module_exit - When we're unloaded, remove the driver/port
*
*/
static void __exit
sn_sal_module_exit(void)
static void __exit sn_sal_module_exit(void)
{
del_timer_sync(&sal_console_port.sc_timer);
uart_remove_one_port(&sal_console_uart, &sal_console_port.sc_port);
......@@ -1008,7 +974,8 @@ module_exit(sn_sal_module_exit);
*
*/
static void puts_raw_fixed(int (*puts_raw) (const char *s, int len), const char *s, int count)
static void puts_raw_fixed(int (*puts_raw) (const char *s, int len),
const char *s, int count)
{
const char *s1;
......@@ -1056,7 +1023,7 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count)
if (port->sc_port.info) {
/* somebody really wants this output, might be an
* oops, kdb, panic, etc. make sure they get it. */
* oops, kdb, panic, etc. make sure they get it. */
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
if (spin_is_locked(&port->sc_port.lock)) {
int lhead = port->sc_port.info->xmit.head;
......@@ -1064,29 +1031,39 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count)
int counter, got_lock = 0;
/*
* We attempt to determine if someone has died with the
* lock. We wait ~20 secs after the head and tail ptrs
* stop moving and assume the lock holder is not functional
* and plow ahead. If the lock is freed within the time out
* period we re-get the lock and go ahead normally. We also
* remember if we have plowed ahead so that we don't have
* We attempt to determine if someone has died with the
* lock. We wait ~20 secs after the head and tail ptrs
* stop moving and assume the lock holder is not functional
* and plow ahead. If the lock is freed within the time out
* period we re-get the lock and go ahead normally. We also
* remember if we have plowed ahead so that we don't have
* to wait out the time out period again - the asumption
* is that we will time out again.
*/
* is that we will time out again.
*/
for (counter = 0; counter < 150; mdelay(125), counter++) {
if (!spin_is_locked(&port->sc_port.lock) || stole_lock) {
if (!spin_is_locked(&port->sc_port.lock)
|| stole_lock) {
if (!stole_lock) {
spin_lock_irqsave(&port->sc_port.lock, flags);
spin_lock_irqsave(&port->
sc_port.lock,
flags);
got_lock = 1;
}
break;
}
else {
} else {
/* still locked */
if ((lhead != port->sc_port.info->xmit.head) || (ltail != port->sc_port.info->xmit.tail)) {
lhead = port->sc_port.info->xmit.head;
ltail = port->sc_port.info->xmit.tail;
if ((lhead !=
port->sc_port.info->xmit.head)
|| (ltail !=
port->sc_port.info->xmit.
tail)) {
lhead =
port->sc_port.info->xmit.
head;
ltail =
port->sc_port.info->xmit.
tail;
counter = 0;
}
}
......@@ -1094,16 +1071,15 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count)
/* flush anything in the serial core xmit buffer, raw */
sn_transmit_chars(port, 1);
if (got_lock) {
spin_unlock_irqrestore(&port->sc_port.lock, flags);
spin_unlock_irqrestore(&port->sc_port.lock,
flags);
stole_lock = 0;
}
else {
} else {
/* fell thru */
stole_lock = 1;
}
puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count);
}
else {
} else {
stole_lock = 0;
#endif
spin_lock_irqsave(&port->sc_port.lock, flags);
......@@ -1134,8 +1110,7 @@ sn_sal_console_write(struct console *co, const char *s, unsigned count)
* here so providing it is easier.
*
*/
static int __init
sn_sal_console_setup(struct console *co, char *options)
static int __init sn_sal_console_setup(struct console *co, char *options)
{
return 0;
}
......@@ -1163,7 +1138,7 @@ static struct console sal_console_early __initdata = {
.name = "sn_sal",
.write = sn_sal_console_write_early,
.flags = CON_PRINTBUFFER,
.index = -1,
.index = -1,
};
/**
......@@ -1175,8 +1150,7 @@ static struct console sal_console_early __initdata = {
* sn_sal_serial_console_init is called, this console is unregistered
* and a new one registered.
*/
int __init
sn_serial_console_early_setup(void)
int __init sn_serial_console_early_setup(void)
{
if (!ia64_platform_is("sn2"))
return -1;
......@@ -1186,13 +1160,12 @@ sn_serial_console_early_setup(void)
else
sal_console_port.sc_ops = &poll_ops;
early_sn_setup(); /* Find SAL entry points */
early_sn_setup(); /* Find SAL entry points */
register_console(&sal_console_early);
return 0;
}
/**
* sn_sal_serial_console_init - Early console output - set up for register
*
......@@ -1205,12 +1178,11 @@ sn_serial_console_early_setup(void)
* it here doesn't hurt anything.
*
*/
static int __init
sn_sal_serial_console_init(void)
static int __init sn_sal_serial_console_init(void)
{
if (ia64_platform_is("sn2")) {
sn_sal_switch_to_asynch(&sal_console_port);
DPRINTF ("sn_sal_serial_console_init : register console\n");
DPRINTF("sn_sal_serial_console_init : register console\n");
register_console(&sal_console);
unregister_console(&sal_console_early);
}
......
......@@ -117,6 +117,6 @@ extern ia64_mv_dma_supported sn_dma_supported;
#define platform_dma_mapping_error sn_dma_mapping_error
#define platform_dma_supported sn_dma_supported
#include <asm/sn/sn2/io.h>
#include <asm/sn/io.h>
#endif /* _ASM_IA64_MACHVEC_SN2_H */
......@@ -3,20 +3,192 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 1992-1999,2001-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (c) 1992-1999,2001-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_ADDRS_H
#define _ASM_IA64_SN_ADDRS_H
#include <asm/sn/sn2/addrs.h>
/* McKinley Address Format:
*
* 4 4 3 3 3 3
* 9 8 8 7 6 5 0
* +-+---------+----+--------------+
* |0| Node ID | AS | Node Offset |
* +-+---------+----+--------------+
*
* Node ID: If bit 38 = 1, is ICE, else is SHUB
* AS: Address Space Identifier. Used only if bit 38 = 0.
* b'00: Local Resources and MMR space
* bit 35
* 0: Local resources space
* node id:
* 0: IA64/NT compatibility space
* 2: Local MMR Space
* 4: Local memory, regardless of local node id
* 1: Global MMR space
* b'01: GET space.
* b'10: AMO space.
* b'11: Cacheable memory space.
*
* NodeOffset: byte offset
*/
/* TIO address format:
* 4 4 3 3 3 3 3 0
* 9 8 8 7 6 5 4
* +-+----------+-+---+--------------+
* |0| Node ID |0|CID| Node offset |
* +-+----------+-+---+--------------+
*
* Node ID: if bit 38 == 1, is ICE.
* Bit 37: Must be zero.
* CID: Chiplet ID:
* b'01: TIO LB (Indicates TIO MMR access.)
* b'11: TIO ICE (indicates coretalk space access.)
* Node offset: byte offest.
*/
/*
* Note that in both of the above address formats, bit
* 35 set indicates that the reference is to the
* shub or tio MMRs.
*/
#ifndef __ASSEMBLY__
#include <asm/sn/types.h>
#endif
typedef union ia64_sn2_pa {
struct {
unsigned long off : 36;
unsigned long as : 2;
unsigned long nasid: 11;
unsigned long fill : 15;
} f;
unsigned long l;
void *p;
} ia64_sn2_pa_t;
#endif
#define TO_PHYS_MASK 0x0001ffcfffffffff /* Note - clear AS bits */
/* Regions determined by AS */
#define LOCAL_MMR_SPACE 0xc000008000000000 /* Local MMR space */
#define LOCAL_PHYS_MMR_SPACE 0x8000008000000000 /* Local PhysicalMMR space */
#define LOCAL_MEM_SPACE 0xc000010000000000 /* Local Memory space */
/* It so happens that setting bit 35 indicates a reference to the SHUB or TIO
* MMR space.
*/
#define GLOBAL_MMR_SPACE 0xc000000800000000 /* Global MMR space */
#define TIO_MMR_SPACE 0xc000000800000000 /* TIO MMR space */
#define ICE_MMR_SPACE 0xc000000000000000 /* ICE MMR space */
#define GLOBAL_PHYS_MMR_SPACE 0x0000000800000000 /* Global Physical MMR space */
#define GET_SPACE 0xe000001000000000 /* GET space */
#define AMO_SPACE 0xc000002000000000 /* AMO space */
#define CACHEABLE_MEM_SPACE 0xe000003000000000 /* Cacheable memory space */
#define UNCACHED 0xc000000000000000 /* UnCacheable memory space */
#define UNCACHED_PHYS 0x8000000000000000 /* UnCacheable physical memory space */
#define PHYS_MEM_SPACE 0x0000003000000000 /* physical memory space */
/* SN2 address macros */
/* NID_SHFT has the right value for both SHUB and TIO addresses.*/
#define NID_SHFT 38
#define LOCAL_MMR_ADDR(a) (UNCACHED | LOCAL_MMR_SPACE | (a))
#define LOCAL_MMR_PHYS_ADDR(a) (UNCACHED_PHYS | LOCAL_PHYS_MMR_SPACE | (a))
#define LOCAL_MEM_ADDR(a) (LOCAL_MEM_SPACE | (a))
#define REMOTE_ADDR(n,a) ((((unsigned long)(n))<<NID_SHFT) | (a))
#define GLOBAL_MMR_ADDR(n,a) (UNCACHED | GLOBAL_MMR_SPACE | REMOTE_ADDR(n,a))
#define GLOBAL_MMR_PHYS_ADDR(n,a) (UNCACHED_PHYS | GLOBAL_PHYS_MMR_SPACE | REMOTE_ADDR(n,a))
#define GET_ADDR(n,a) (GET_SPACE | REMOTE_ADDR(n,a))
#define AMO_ADDR(n,a) (UNCACHED | AMO_SPACE | REMOTE_ADDR(n,a))
#define GLOBAL_MEM_ADDR(n,a) (CACHEABLE_MEM_SPACE | REMOTE_ADDR(n,a))
/* non-II mmr's start at top of big window space (4G) */
#define BWIN_TOP 0x0000000100000000
/*
* general address defines - for code common to SN0/SN1/SN2
*/
#define CAC_BASE CACHEABLE_MEM_SPACE /* cacheable memory space */
#define IO_BASE (UNCACHED | GLOBAL_MMR_SPACE) /* lower 4G maps II's XIO space */
#define TIO_BASE (UNCACHED | ICE_MMR_SPACE) /* lower 4G maps TIO space */
#define AMO_BASE (UNCACHED | AMO_SPACE) /* fetch & op space */
#define MSPEC_BASE AMO_BASE /* fetch & op space */
#define UNCAC_BASE (UNCACHED | CACHEABLE_MEM_SPACE) /* uncached global memory */
#define GET_BASE GET_SPACE /* momentarily coherent remote mem. */
#define CALIAS_BASE LOCAL_CACHEABLE_BASE /* cached node-local memory */
#define UALIAS_BASE (UNCACHED | LOCAL_CACHEABLE_BASE) /* uncached node-local memory */
#define TO_PHYS(x) ( ((x) & TO_PHYS_MASK))
#define TO_CAC(x) (CAC_BASE | ((x) & TO_PHYS_MASK))
#define TO_UNCAC(x) (UNCAC_BASE | ((x) & TO_PHYS_MASK))
#define TO_MSPEC(x) (MSPEC_BASE | ((x) & TO_PHYS_MASK))
#define TO_GET(x) (GET_BASE | ((x) & TO_PHYS_MASK))
#define TO_CALIAS(x) (CALIAS_BASE | TO_NODE_ADDRSPACE(x))
#define TO_UALIAS(x) (UALIAS_BASE | TO_NODE_ADDRSPACE(x))
#define NODE_SIZE_BITS 36 /* node offset : bits <35:0> */
#define BWIN_SIZE_BITS 29 /* big window size: 512M */
#define TIO_BWIN_SIZE_BITS 30 /* big window size: 1G */
#define NASID_BITS 11 /* bits <48:38> */
#define NASID_BITMASK (0x7ffULL)
#define NASID_SHFT NID_SHFT
#define NASID_META_BITS 0 /* ???? */
#define NASID_LOCAL_BITS 7 /* same router as SN1 */
#define NODE_ADDRSPACE_SIZE (1UL << NODE_SIZE_BITS)
#define NASID_MASK ((uint64_t) NASID_BITMASK << NASID_SHFT)
#define NASID_GET(_pa) (int) (((uint64_t) (_pa) >> \
NASID_SHFT) & NASID_BITMASK)
#define PHYS_TO_DMA(x) ( ((x & NASID_MASK) >> 2) | \
(x & (NODE_ADDRSPACE_SIZE - 1)) )
/*
* This address requires a chiplet id in bits 38-39. For DMA to memory,
* the chiplet id is zero. If we implement TIO-TIO dma, we might need
* to insert a chiplet id into this macro. However, it is our belief
* right now that this chiplet id will be ICE, which is also zero.
*/
#define PHYS_TO_TIODMA(x) ( ((x & NASID_MASK) << 2) | \
(x & (NODE_ADDRSPACE_SIZE - 1)) )
#define CHANGE_NASID(n,x) ({ia64_sn2_pa_t _v; _v.l = (long) (x); _v.f.nasid = n; _v.p;})
#define HUBREG_CAST (volatile mmr_t *)
#ifndef __ASSEMBLY__
#define NODE_SWIN_BASE(nasid, widget) \
((widget == 0) ? NODE_BWIN_BASE((nasid), SWIN0_BIGWIN) \
: RAW_NODE_SWIN_BASE(nasid, widget))
#else
#define NODE_SWIN_BASE(nasid, widget) \
(NODE_IO_BASE(nasid) + ((uint64_t) (widget) << SWIN_SIZE_BITS))
#define LOCAL_SWIN_BASE(widget) \
(UNCACHED | LOCAL_MMR_SPACE | (((uint64_t) (widget) << SWIN_SIZE_BITS)))
#endif /* __ASSEMBLY__ */
/*
* The following definitions pertain to the IO special address
* space. They define the location of the big and little windows
* of any given node.
*/
#define BWIN_SIZE (1UL << BWIN_SIZE_BITS)
#define BWIN_SIZEMASK (BWIN_SIZE - 1)
#define BWIN_WIDGET_MASK 0x7
#define NODE_BWIN_BASE0(nasid) (NODE_IO_BASE(nasid) + BWIN_SIZE)
#define NODE_BWIN_BASE(nasid, bigwin) (NODE_BWIN_BASE0(nasid) + \
((uint64_t) (bigwin) << BWIN_SIZE_BITS))
#define BWIN_WIDGETADDR(addr) ((addr) & BWIN_SIZEMASK)
#define BWIN_WINDOWNUM(addr) (((addr) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
#define TIO_BWIN_WINDOW_SELECT_MASK 0x7
#define TIO_BWIN_WINDOWNUM(addr) (((addr) >> TIO_BWIN_SIZE_BITS) & TIO_BWIN_WINDOW_SELECT_MASK)
#ifndef __ASSEMBLY__
#include <asm/sn/types.h>
#endif
/*
* The following macros are used to index to the beginning of a specific
......@@ -31,17 +203,11 @@
#define NODE_MSPEC_BASE(_n) (MSPEC_BASE + NODE_OFFSET(_n))
#define NODE_UNCAC_BASE(_n) (UNCAC_BASE + NODE_OFFSET(_n))
#define TO_NODE(_n, _x) (NODE_OFFSET(_n) | ((_x) ))
#define TO_NODE_CAC(_n, _x) (NODE_CAC_BASE(_n) | ((_x) & TO_PHYS_MASK))
#define TO_NODE_UNCAC(_n, _x) (NODE_UNCAC_BASE(_n) | ((_x) & TO_PHYS_MASK))
#define TO_NODE_MSPEC(_n, _x) (NODE_MSPEC_BASE(_n) | ((_x) & TO_PHYS_MASK))
#define TO_NODE_HSPEC(_n, _x) (NODE_HSPEC_BASE(_n) | ((_x) & TO_PHYS_MASK))
#define RAW_NODE_SWIN_BASE(nasid, widget) \
(NODE_IO_BASE(nasid) + ((uint64_t) (widget) << SWIN_SIZE_BITS))
#define WIDGETID_GET(addr) ((unsigned char)((addr >> SWIN_SIZE_BITS) & 0xff))
/*
* The following definitions pertain to the IO special address
......@@ -54,43 +220,20 @@
#define SWIN_SIZEMASK (SWIN_SIZE - 1)
#define SWIN_WIDGET_MASK 0xF
#define TIO_SWIN_SIZE_BITS 28
#define TIO_SWIN_SIZE (1UL << 28)
#define TIO_SWIN_SIZEMASK (SWIN_SIZE - 1)
#define TIO_SWIN_WIDGET_MASK 0x3
/*
* Convert smallwindow address to xtalk address.
*
* 'addr' can be physical or virtual address, but will be converted
* to Xtalk address in the range 0 -> SWINZ_SIZEMASK
*/
#define SWIN_WIDGETADDR(addr) ((addr) & SWIN_SIZEMASK)
#define SWIN_WIDGETNUM(addr) (((addr) >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK)
/*
* Verify if addr belongs to small window address on node with "nasid"
*
*
* NOTE: "addr" is expected to be XKPHYS address, and NOT physical
* address
*
*
*/
#define NODE_SWIN_ADDR(nasid, addr) \
(((addr) >= NODE_SWIN_BASE(nasid, 0)) && \
((addr) < (NODE_SWIN_BASE(nasid, HUB_NUM_WIDGET) + SWIN_SIZE)\
))
/*
* The following define the major position-independent aliases used
* in SN.
* LBOOT -- 256MB in size, reads in the LBOOT area result in
* uncached references to the local hub's boot prom and
* other directory-bus connected devices.
* IALIAS -- 8MB in size, reads in the IALIAS result in uncached
* references to the local hub's registers.
*/
#define HUB_REGISTER_WIDGET 1
#define IALIAS_BASE LOCAL_SWIN_BASE(HUB_REGISTER_WIDGET)
#define IALIAS_SIZE 0x800000 /* 8 Megabytes */
#define IS_IALIAS(_a) (((_a) >= IALIAS_BASE) && \
((_a) < (IALIAS_BASE + IALIAS_SIZE)))
#define TIO_SWIN_WIDGETNUM(addr) (((addr) >> TIO_SWIN_SIZE_BITS) & TIO_SWIN_WIDGET_MASK)
/*
* The following macros produce the correct base virtual address for
......@@ -107,15 +250,13 @@
* As all other non-II mmr's located at the top of big window
* space.
*/
#define LOCAL_HUB_BASE(_x) (LOCAL_MMR_ADDR(_x) | (((~(_x)) & BWIN_TOP)>>8))
#define REMOTE_HUB_BASE(_x) \
(UNCACHED | GLOBAL_MMR_SPACE | \
(((~(_x)) & BWIN_TOP)>>8) | \
(((~(_x)) & BWIN_TOP)>>9) | (_x))
#define LOCAL_HUB(_x) (HUBREG_CAST LOCAL_HUB_BASE(_x))
#define REMOTE_HUB(_n, _x) \
(HUBREG_CAST (REMOTE_HUB_BASE(_x) | ((((long)(_n))<<NASID_SHFT))))
((volatile uint64_t *)(REMOTE_HUB_BASE(_x) | ((((long)(_n))<<NASID_SHFT))))
/*
......@@ -126,12 +267,20 @@
* Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
* They're always safe.
*/
/*
* LOCAL_HUB_ADDR doesn't need to be changed for TIO, since, by definition,
* there are no "local" TIOs.
*/
#define LOCAL_HUB_ADDR(_x) \
(((_x) & BWIN_TOP) ? (HUBREG_CAST (LOCAL_MMR_ADDR(_x))) \
: (HUBREG_CAST (IALIAS_BASE + (_x))))
(((_x) & BWIN_TOP) ? ((volatile uint64_t *)(LOCAL_MMR_ADDR(_x))) \
: ((volatile uint64_t *)(IALIAS_BASE + (_x))))
#define REMOTE_HUB_ADDR(_n, _x) \
(((_x) & BWIN_TOP) ? (HUBREG_CAST (GLOBAL_MMR_ADDR(_n, _x))) \
: (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + 0x800000 + (_x))))
((_n & 1) ? \
/* TIO: */ \
((volatile uint64_t *)(GLOBAL_MMR_ADDR(_n, _x))) \
: /* SHUB: */ \
(((_x) & BWIN_TOP) ? ((volatile uint64_t *)(GLOBAL_MMR_ADDR(_n, _x))) \
: ((volatile uint64_t *)(NODE_SWIN_BASE(_n, 1) + 0x800000 + (_x)))))
#ifndef __ASSEMBLY__
......@@ -152,7 +301,7 @@
* the base of the register space.
*/
#define HUB_REG_PTR(_base, _off) \
(HUBREG_CAST ((unsigned long)(_base) + (__psunsigned_t)(_off)))
(volatile uint64_t *)((unsigned long)(_base) + (__psunsigned_t)(_off)))
#define HUB_REG_PTR_L(_base, _off) \
HUB_L(HUB_REG_PTR((_base), (_off)))
......@@ -160,70 +309,4 @@
#define HUB_REG_PTR_S(_base, _off, _data) \
HUB_S(HUB_REG_PTR((_base), (_off)), (_data))
/*
* Software structure locations -- permanently fixed
* See diagram in kldir.h
*/
#define PHYS_RAMBASE 0x0
#define K0_RAMBASE PHYS_TO_K0(PHYS_RAMBASE)
#define ARCS_SPB_OFFSET 0x1000
#define ARCS_SPB_ADDR(nasid) \
PHYS_TO_K0(NODE_OFFSET(nasid) | ARCS_SPB_OFFSET)
#define ARCS_SPB_SIZE 0x0400
#define KLDIR_OFFSET 0x2000
#define KLDIR_ADDR(nasid) \
TO_NODE_CAC((nasid), KLDIR_OFFSET)
#define KLDIR_SIZE 0x0400
/*
* Software structure locations -- indirected through KLDIR
* See diagram in kldir.h
*
* Important: All low memory structures must only be accessed
* uncached, except for the symmon stacks.
*/
#define KLI_LAUNCH 0 /* Dir. entries */
#define KLI_KLCONFIG 1
#define KLI_NMI 2
#define KLI_GDA 3
#define KLI_FREEMEM 4
#define KLI_SYMMON_STK 5
#define KLI_PI_ERROR 6
#define KLI_KERN_VARS 7
#define KLI_KERN_XP 8
#define KLI_KERN_PARTID 9
#ifndef __ASSEMBLY__
#define KLD_BASE(nasid) ((kldir_ent_t *) KLDIR_ADDR(nasid))
#define KLD_LAUNCH(nasid) (KLD_BASE(nasid) + KLI_LAUNCH)
#define KLD_NMI(nasid) (KLD_BASE(nasid) + KLI_NMI)
#define KLD_KLCONFIG(nasid) (KLD_BASE(nasid) + KLI_KLCONFIG)
#define KLD_PI_ERROR(nasid) (KLD_BASE(nasid) + KLI_PI_ERROR)
#define KLD_GDA(nasid) (KLD_BASE(nasid) + KLI_GDA)
#define KLD_SYMMON_STK(nasid) (KLD_BASE(nasid) + KLI_SYMMON_STK)
#define KLD_FREEMEM(nasid) (KLD_BASE(nasid) + KLI_FREEMEM)
#define KLD_KERN_VARS(nasid) (KLD_BASE(nasid) + KLI_KERN_VARS)
#define KLD_KERN_XP(nasid) (KLD_BASE(nasid) + KLI_KERN_XP)
#define KLD_KERN_PARTID(nasid) (KLD_BASE(nasid) + KLI_KERN_PARTID)
#define KLCONFIG_OFFSET(nasid) ia64_sn_get_klconfig_addr(nasid)
#define KLCONFIG_ADDR(nasid) \
TO_NODE_CAC((nasid), KLCONFIG_OFFSET(nasid))
#define KLCONFIG_SIZE(nasid) KLD_KLCONFIG(nasid)->size
#define GDA_ADDR(nasid) KLD_GDA(nasid)->pointer
#define GDA_SIZE(nasid) KLD_GDA(nasid)->size
#define NODE_OFFSET_TO_K0(_nasid, _off) \
(CACHEABLE_MEM_SPACE | NODE_OFFSET(_nasid) | (_off))
#endif /* __ASSEMBLY__ */
#endif /* _ASM_IA64_SN_ADDRS_H */
......@@ -5,7 +5,7 @@
*
* SGI specific setup.
*
* Copyright (C) 1995-1997,1999,2001-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 1995-1997,1999,2001-2004 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
*/
#ifndef _ASM_IA64_SN_ARCH_H
......@@ -15,20 +15,24 @@
#include <asm/sn/types.h>
#include <asm/sn/sn_cpuid.h>
typedef u64 shubreg_t;
typedef u64 hubreg_t;
typedef u64 mmr_t;
/*
* This is the maximum number of nodes that can be part of a kernel.
* Effectively, it's the maximum number of compact node ids (cnodeid_t).
* This is not necessarily the same as MAX_NASIDS.
*/
#define MAX_COMPACT_NODES 2048
typedef u64 nic_t;
#define NASID_TO_COMPACT_NODEID(nasid) (nasid_to_cnodeid(nasid))
#define COMPACT_TO_NASID_NODEID(cnode) (cnodeid_to_nasid(cnode))
#define INVALID_NASID ((nasid_t)-1)
#define INVALID_CNODEID ((cnodeid_t)-1)
#define INVALID_PNODEID ((pnodeid_t)-1)
#define INVALID_SLAB (slabid_t)-1
#define INVALID_MODULE ((moduleid_t)-1)
#define INVALID_PARTID ((partid_t)-1)
extern cpuid_t cnodetocpu(cnodeid_t);
extern void sn_flush_all_caches(long addr, long bytes);
extern int is_fine_dirmode(void);
#endif /* _ASM_IA64_SN_ARCH_H */
......@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
/*
......@@ -14,39 +14,15 @@
*
* RTC_COUNTER_ADDR - contains the address of the counter
*
* GET_RTC_COUNTER() - macro to read the value of the clock
*
* RTC_CYCLES_PER_SEC - clock frequency in ticks per second
*
*/
#ifndef _ASM_IA64_SN_CLKSUPPORT_H
#define _ASM_IA64_SN_CLKSUPPORT_H
#include <asm/sn/arch.h>
#include <asm/sn/addrs.h>
#include <asm/sn/sn2/addrs.h>
#include <asm/sn/sn2/shubio.h>
#include <asm/sn/sn2/shub_mmr.h>
typedef long clkreg_t;
extern unsigned long sn_rtc_cycles_per_second;
extern unsigned long sn_rtc_per_itc;
#define RTC_MASK SH_RTC_MASK
#define RTC_COUNTER_ADDR ((clkreg_t*)LOCAL_MMR_ADDR(SH_RTC))
#define RTC_COMPARE_A_ADDR ((clkreg_t*)LOCAL_MMR_ADDR(SH_RTC))
#define RTC_COMPARE_B_ADDR ((clkreg_t*)LOCAL_MMR_ADDR(SH_RTC))
#define RTC_INT_PENDING_A_ADDR ((clkreg_t*)LOCAL_MMR_ADDR(SH_RTC))
#define RTC_INT_PENDING_B_ADDR ((clkreg_t*)LOCAL_MMR_ADDR(SH_RTC))
#define RTC_INT_ENABLED_A_ADDR ((clkreg_t*)LOCAL_MMR_ADDR(SH_RTC))
#define RTC_INT_ENABLED_B_ADDR ((clkreg_t*)LOCAL_MMR_ADDR(SH_RTC))
#define SN_RTC_PER_ITC_SHIFT 34
#define GET_RTC_COUNTER() (*RTC_COUNTER_ADDR)
#define rtc_time() GET_RTC_COUNTER()
#define RTC_COUNTER_ADDR ((long *)LOCAL_MMR_ADDR(SH_RTC))
#define RTC_CYCLES_PER_SEC sn_rtc_cycles_per_second
#define rtc_time() (*RTC_COUNTER_ADDR)
#endif /* _ASM_IA64_SN_CLKSUPPORT_H */
......@@ -4,7 +4,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2001-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (c) 2001-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_FETCHOP_H
......
......@@ -3,43 +3,122 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_GEO_H
#define _ASM_IA64_SN_GEO_H
/* Include a platform-specific geo.h. It must define at least:
* geoid_t: Geographic identifier data type
* geo_type_t: Data type for the kind of geoid this is
* GEO_TYPE_xxx: Values for geo_type_t vars, eg. GEO_TYPE_NODE
* GEO_MAX_LEN: The maximum length of a geoid, formatted for printing
*/
/* The geoid_t implementation below is based loosely on the pcfg_t
implementation in sys/SN/promcfg.h. */
/* Type declaractions */
/* Size of a geoid_t structure (must be before decl. of geoid_u) */
#define GEOID_SIZE 8 /* Would 16 be better? The size can
be different on different platforms. */
#define MAX_SLABS 0xe /* slabs per module */
typedef unsigned char geo_type_t;
/* Fields common to all substructures */
typedef struct geo_any_s {
moduleid_t module; /* The module (box) this h/w lives in */
geo_type_t type; /* What type of h/w is named by this geoid_t */
slabid_t slab; /* The logical assembly within the module */
} geo_any_t;
/* Additional fields for particular types of hardware */
typedef struct geo_node_s {
geo_any_t any; /* No additional fields needed */
} geo_node_t;
typedef struct geo_rtr_s {
geo_any_t any; /* No additional fields needed */
} geo_rtr_t;
typedef struct geo_iocntl_s {
geo_any_t any; /* No additional fields needed */
} geo_iocntl_t;
typedef struct geo_pcicard_s {
geo_iocntl_t any;
char bus; /* Bus/widget number */
char slot; /* PCI slot number */
} geo_pcicard_t;
/* Subcomponents of a node */
typedef struct geo_cpu_s {
geo_node_t node;
char slice; /* Which CPU on the node */
} geo_cpu_t;
typedef struct geo_mem_s {
geo_node_t node;
char membus; /* The memory bus on the node */
char memslot; /* The memory slot on the bus */
} geo_mem_t;
typedef union geoid_u {
geo_any_t any;
geo_node_t node;
geo_iocntl_t iocntl;
geo_pcicard_t pcicard;
geo_rtr_t rtr;
geo_cpu_t cpu;
geo_mem_t mem;
char padsize[GEOID_SIZE];
} geoid_t;
/* Preprocessor macros */
#define GEO_MAX_LEN 48 /* max. formatted length, plus some pad:
module/001c07/slab/5/node/memory/2/slot/4 */
#include <asm/sn/sn2/geo.h>
/* Values for geo_type_t */
#define GEO_TYPE_INVALID 0
#define GEO_TYPE_MODULE 1
#define GEO_TYPE_NODE 2
#define GEO_TYPE_RTR 3
#define GEO_TYPE_IOCNTL 4
#define GEO_TYPE_IOCARD 5
#define GEO_TYPE_CPU 6
#define GEO_TYPE_MEM 7
#define GEO_TYPE_MAX (GEO_TYPE_MEM+1)
/* Declarations applicable to all platforms */
/* Parameter for hwcfg_format_geoid_compt() */
#define GEO_COMPT_MODULE 1
#define GEO_COMPT_SLAB 2
#define GEO_COMPT_IOBUS 3
#define GEO_COMPT_IOSLOT 4
#define GEO_COMPT_CPU 5
#define GEO_COMPT_MEMBUS 6
#define GEO_COMPT_MEMSLOT 7
/* parameter for hwcfg_format_geoid() */
#define GEO_FORMAT_HWGRAPH 1
#define GEO_FORMAT_BRIEF 2
#define GEO_INVALID_STR "<invalid>"
/* (the parameter for hwcfg_format_geoid_compt() is defined in the
* platform-specific geo.h file) */
#define INVALID_NASID ((nasid_t)-1)
#define INVALID_CNODEID ((cnodeid_t)-1)
#define INVALID_PNODEID ((pnodeid_t)-1)
#define INVALID_SLAB (slabid_t)-1
#define INVALID_MODULE ((moduleid_t)-1)
#define INVALID_PARTID ((partid_t)-1)
/* Routines for manipulating geoid_t values */
static inline slabid_t geo_slab(geoid_t g)
{
return (g.any.type == GEO_TYPE_INVALID) ?
INVALID_SLAB : g.any.slab;
}
extern moduleid_t geo_module(geoid_t g);
extern slabid_t geo_slab(geoid_t g);
extern geo_type_t geo_type(geoid_t g);
extern int geo_valid(geoid_t g);
extern int geo_cmp(geoid_t g0, geoid_t g1);
extern geoid_t geo_new(geo_type_t type, ...);
static inline moduleid_t geo_module(geoid_t g)
{
return (g.any.type == GEO_TYPE_INVALID) ?
INVALID_MODULE : g.any.module;
}
extern geoid_t hwcfg_parse_geoid(char *buffer);
extern void hwcfg_format_geoid(char *buffer, geoid_t m, int fmt);
extern void hwcfg_format_geoid_compt(char *buffer, geoid_t m, int compt);
extern geoid_t hwcfg_geo_get_self(geo_type_t type);
extern geoid_t hwcfg_geo_get_by_nasid(geo_type_t type, nasid_t nasid);
extern geoid_t cnodeid_get_geoid(cnodeid_t cnode);
#endif /* _ASM_IA64_SN_GEO_H */
......@@ -3,19 +3,48 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_INTR_H
#define _ASM_IA64_SN_INTR_H
#include <asm/sn/types.h>
#include <asm/sn/sn2/intr.h>
#define SGI_UART_VECTOR (0xe9)
#define SGI_PCIBR_ERROR (0x33)
// These two IRQ's are used by partitioning.
#define SGI_XPC_ACTIVATE (0x30)
#define SGI_II_ERROR (0x31)
#define SGI_XBOW_ERROR (0x32)
#define SGI_PCIBR_ERROR (0x33)
#define SGI_ACPI_SCI_INT (0x34)
#define SGI_TIO_ERROR (0x36)
#define SGI_XPC_NOTIFY (0xe7)
#define SN2_IRQ_RESERVED (0x1)
#define SN2_IRQ_CONNECTED (0x2)
#define SN2_IRQ_SHARED (0x4)
// The SN PROM irq struct
struct sn_irq_info {
struct sn_irq_info *irq_next; /* sharing irq list */
short irq_nasid; /* Nasid IRQ is assigned to */
int irq_slice; /* slice IRQ is assigned to */
int irq_cpuid; /* kernel logical cpuid */
int irq_irq; /* the IRQ number */
int irq_int_bit; /* Bridge interrupt pin */
uint64_t irq_xtalkaddr; /* xtalkaddr IRQ is sent to */
int irq_bridge_type;/* pciio asic type (pciio.h) */
void *irq_bridge; /* bridge generating irq */
void *irq_pciioinfo; /* associated pciio_info_t */
int irq_last_intr; /* For Shub lb lost intr WAR */
int irq_cookie; /* unique cookie */
int irq_flags; /* flags */
int irq_share_cnt; /* num devices sharing IRQ */
};
extern void sn_send_IPI_phys(long, int, int);
extern void intr_init_vecblk(cnodeid_t node);
#define CPU_VECTOR_TO_IRQ(cpuid,vector) (vector)
#define SN_CPU_FROM_IRQ(irq) (0)
#define SN_IVEC_FROM_IRQ(irq) (irq)
#endif /* _ASM_IA64_SN_INTR_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_SN_IO_H
#define _ASM_SN_IO_H
#include <linux/compiler.h>
#include <asm/intrinsics.h>
extern void * sn_io_addr(unsigned long port) __attribute_const__; /* Forward definition */
extern void sn_mmiob(void); /* Forward definition */
extern int numionodes;
#define __sn_mf_a() ia64_mfa()
extern void sn_dma_flush(unsigned long);
#define __sn_inb ___sn_inb
#define __sn_inw ___sn_inw
#define __sn_inl ___sn_inl
#define __sn_outb ___sn_outb
#define __sn_outw ___sn_outw
#define __sn_outl ___sn_outl
#define __sn_readb ___sn_readb
#define __sn_readw ___sn_readw
#define __sn_readl ___sn_readl
#define __sn_readq ___sn_readq
#define __sn_readb_relaxed ___sn_readb_relaxed
#define __sn_readw_relaxed ___sn_readw_relaxed
#define __sn_readl_relaxed ___sn_readl_relaxed
#define __sn_readq_relaxed ___sn_readq_relaxed
/*
* The following routines are SN Platform specific, called when
* a reference is made to inX/outX set macros. SN Platform
* inX set of macros ensures that Posted DMA writes on the
* Bridge is flushed.
*
* The routines should be self explainatory.
*/
static inline unsigned int
___sn_inb (unsigned long port)
{
volatile unsigned char *addr;
unsigned char ret = -1;
if ((addr = sn_io_addr(port))) {
ret = *addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
}
return ret;
}
static inline unsigned int
___sn_inw (unsigned long port)
{
volatile unsigned short *addr;
unsigned short ret = -1;
if ((addr = sn_io_addr(port))) {
ret = *addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
}
return ret;
}
static inline unsigned int
___sn_inl (unsigned long port)
{
volatile unsigned int *addr;
unsigned int ret = -1;
if ((addr = sn_io_addr(port))) {
ret = *addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
}
return ret;
}
static inline void
___sn_outb (unsigned char val, unsigned long port)
{
volatile unsigned char *addr;
if ((addr = sn_io_addr(port))) {
*addr = val;
sn_mmiob();
}
}
static inline void
___sn_outw (unsigned short val, unsigned long port)
{
volatile unsigned short *addr;
if ((addr = sn_io_addr(port))) {
*addr = val;
sn_mmiob();
}
}
static inline void
___sn_outl (unsigned int val, unsigned long port)
{
volatile unsigned int *addr;
if ((addr = sn_io_addr(port))) {
*addr = val;
sn_mmiob();
}
}
/*
* The following routines are SN Platform specific, called when
* a reference is made to readX/writeX set macros. SN Platform
* readX set of macros ensures that Posted DMA writes on the
* Bridge is flushed.
*
* The routines should be self explainatory.
*/
static inline unsigned char
___sn_readb (void *addr)
{
unsigned char val;
val = *(volatile unsigned char *)addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
return val;
}
static inline unsigned short
___sn_readw (void *addr)
{
unsigned short val;
val = *(volatile unsigned short *)addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
return val;
}
static inline unsigned int
___sn_readl (void *addr)
{
unsigned int val;
val = *(volatile unsigned int *) addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
return val;
}
static inline unsigned long
___sn_readq (void *addr)
{
unsigned long val;
val = *(volatile unsigned long *) addr;
__sn_mf_a();
sn_dma_flush((unsigned long)addr);
return val;
}
/*
* For generic and SN2 kernels, we have a set of fast access
* PIO macros. These macros are provided on SN Platform
* because the normal inX and readX macros perform an
* additional task of flushing Post DMA request on the Bridge.
*
* These routines should be self explainatory.
*/
static inline unsigned int
sn_inb_fast (unsigned long port)
{
volatile unsigned char *addr = (unsigned char *)port;
unsigned char ret;
ret = *addr;
__sn_mf_a();
return ret;
}
static inline unsigned int
sn_inw_fast (unsigned long port)
{
volatile unsigned short *addr = (unsigned short *)port;
unsigned short ret;
ret = *addr;
__sn_mf_a();
return ret;
}
static inline unsigned int
sn_inl_fast (unsigned long port)
{
volatile unsigned int *addr = (unsigned int *)port;
unsigned int ret;
ret = *addr;
__sn_mf_a();
return ret;
}
static inline unsigned char
___sn_readb_relaxed (void *addr)
{
return *(volatile unsigned char *)addr;
}
static inline unsigned short
___sn_readw_relaxed (void *addr)
{
return *(volatile unsigned short *)addr;
}
static inline unsigned int
___sn_readl_relaxed (void *addr)
{
return *(volatile unsigned int *) addr;
}
static inline unsigned long
___sn_readq_relaxed (void *addr)
{
return *(volatile unsigned long *) addr;
}
struct pci_dev;
static inline int
sn_pci_set_vchan(struct pci_dev *pci_dev, unsigned long *addr, int vchan)
{
if (vchan > 1) {
return -1;
}
if (!(*addr >> 32)) /* Using a mask here would be cleaner */
return 0; /* but this generates better code */
if (vchan == 1) {
/* Set Bit 57 */
*addr |= (1UL << 57);
} else {
/* Clear Bit 57 */
*addr &= ~(1UL << 57);
}
return 0;
}
#endif /* _ASM_SN_IO_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Derived from IRIX <sys/SN/klconfig.h>.
*
* Copyright (C) 1992-1997,1999,2001-2004 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (C) 1999 by Ralf Baechle
*/
#ifndef _ASM_IA64_SN_KLCONFIG_H
#define _ASM_IA64_SN_KLCONFIG_H
/*
* The KLCONFIG structures store info about the various BOARDs found
* during Hardware Discovery. In addition, it stores info about the
* components found on the BOARDs.
*/
typedef s32 klconf_off_t;
/* Functions/macros needed to use this structure */
typedef struct kl_config_hdr {
char pad[20];
klconf_off_t ch_board_info; /* the link list of boards */
char pad0[88];
} kl_config_hdr_t;
#define NODE_OFFSET_TO_LBOARD(nasid,off) (lboard_t*)(NODE_CAC_BASE(nasid) + (off))
/*
* The KLCONFIG area is organized as a LINKED LIST of BOARDs. A BOARD
* can be either 'LOCAL' or 'REMOTE'. LOCAL means it is attached to
* the LOCAL/current NODE. REMOTE means it is attached to a different
* node.(TBD - Need a way to treat ROUTER boards.)
*
* There are 2 different structures to represent these boards -
* lboard - Local board, rboard - remote board. These 2 structures
* can be arbitrarily mixed in the LINKED LIST of BOARDs. (Refer
* Figure below). The first byte of the rboard or lboard structure
* is used to find out its type - no unions are used.
* If it is a lboard, then the config info of this board will be found
* on the local node. (LOCAL NODE BASE + offset value gives pointer to
* the structure.
* If it is a rboard, the local structure contains the node number
* and the offset of the beginning of the LINKED LIST on the remote node.
* The details of the hardware on a remote node can be built locally,
* if required, by reading the LINKED LIST on the remote node and
* ignoring all the rboards on that node.
*
* The local node uses the REMOTE NODE NUMBER + OFFSET to point to the
* First board info on the remote node. The remote node list is
* traversed as the local list, using the REMOTE BASE ADDRESS and not
* the local base address and ignoring all rboard values.
*
*
KLCONFIG
+------------+ +------------+ +------------+ +------------+
| lboard | +-->| lboard | +-->| rboard | +-->| lboard |
+------------+ | +------------+ | +------------+ | +------------+
| board info | | | board info | | |errinfo,bptr| | | board info |
+------------+ | +------------+ | +------------+ | +------------+
| offset |--+ | offset |--+ | offset |--+ |offset=NULL |
+------------+ +------------+ +------------+ +------------+
+------------+
| board info |
+------------+ +--------------------------------+
| compt 1 |------>| type, rev, diaginfo, size ... | (CPU)
+------------+ +--------------------------------+
| compt 2 |--+
+------------+ | +--------------------------------+
| ... | +--->| type, rev, diaginfo, size ... | (MEM_BANK)
+------------+ +--------------------------------+
| errinfo |--+
+------------+ | +--------------------------------+
+--->|r/l brd errinfo,compt err flags |
+--------------------------------+
*
* Each BOARD consists of COMPONENTs and the BOARD structure has
* pointers (offsets) to its COMPONENT structure.
* The COMPONENT structure has version info, size and speed info, revision,
* error info and the NIC info. This structure can accommodate any
* BOARD with arbitrary COMPONENT composition.
*
* The ERRORINFO part of each BOARD has error information
* that describes errors about the BOARD itself. It also has flags to
* indicate the COMPONENT(s) on the board that have errors. The error
* information specific to the COMPONENT is present in the respective
* COMPONENT structure.
*
* The ERRORINFO structure is also treated like a COMPONENT, ie. the
* BOARD has pointers(offset) to the ERRORINFO structure. The rboard
* structure also has a pointer to the ERRORINFO structure. This is
* the place to store ERRORINFO about a REMOTE NODE, if the HUB on
* that NODE is not working or if the REMOTE MEMORY is BAD. In cases where
* only the CPU of the REMOTE NODE is disabled, the ERRORINFO pointer can
* be a NODE NUMBER, REMOTE OFFSET combination, pointing to error info
* which is present on the REMOTE NODE.(TBD)
* REMOTE ERRINFO can be stored on any of the nearest nodes
* or on all the nearest nodes.(TBD)
* Like BOARD structures, REMOTE ERRINFO structures can be built locally
* using the rboard errinfo pointer.
*
* In order to get useful information from this Data organization, a set of
* interface routines are provided (TBD). The important thing to remember while
* manipulating the structures, is that, the NODE number information should
* be used. If the NODE is non-zero (remote) then each offset should
* be added to the REMOTE BASE ADDR else it should be added to the LOCAL BASE ADDR.
* This includes offsets for BOARDS, COMPONENTS and ERRORINFO.
*
* Note that these structures do not provide much info about connectivity.
* That info will be part of HWGRAPH, which is an extension of the cfg_t
* data structure. (ref IP27prom/cfg.h) It has to be extended to include
* the IO part of the Network(TBD).
*
* The data structures below define the above concepts.
*/
/*
* BOARD classes
*/
#define KLCLASS_MASK 0xf0
#define KLCLASS_NONE 0x00
#define KLCLASS_NODE 0x10 /* CPU, Memory and HUB board */
#define KLCLASS_CPU KLCLASS_NODE
#define KLCLASS_IO 0x20 /* BaseIO, 4 ch SCSI, ethernet, FDDI
and the non-graphics widget boards */
#define KLCLASS_ROUTER 0x30 /* Router board */
#define KLCLASS_MIDPLANE 0x40 /* We need to treat this as a board
so that we can record error info */
#define KLCLASS_IOBRICK 0x70 /* IP35 iobrick */
#define KLCLASS_MAX 8 /* Bump this if a new CLASS is added */
#define KLCLASS(_x) ((_x) & KLCLASS_MASK)
/*
* board types
*/
#define KLTYPE_MASK 0x0f
#define KLTYPE(_x) ((_x) & KLTYPE_MASK)
#define KLTYPE_SNIA (KLCLASS_CPU | 0x1)
#define KLTYPE_TIO (KLCLASS_CPU | 0x2)
#define KLTYPE_ROUTER (KLCLASS_ROUTER | 0x1)
#define KLTYPE_META_ROUTER (KLCLASS_ROUTER | 0x3)
#define KLTYPE_REPEATER_ROUTER (KLCLASS_ROUTER | 0x4)
#define KLTYPE_IOBRICK_XBOW (KLCLASS_MIDPLANE | 0x2)
#define KLTYPE_IOBRICK (KLCLASS_IOBRICK | 0x0)
#define KLTYPE_NBRICK (KLCLASS_IOBRICK | 0x4)
#define KLTYPE_PXBRICK (KLCLASS_IOBRICK | 0x6)
#define KLTYPE_IXBRICK (KLCLASS_IOBRICK | 0x7)
#define KLTYPE_CGBRICK (KLCLASS_IOBRICK | 0x8)
#define KLTYPE_OPUSBRICK (KLCLASS_IOBRICK | 0x9)
#define KLTYPE_SABRICK (KLCLASS_IOBRICK | 0xa)
#define KLTYPE_IABRICK (KLCLASS_IOBRICK | 0xb)
#define KLTYPE_PABRICK (KLCLASS_IOBRICK | 0xc)
#define KLTYPE_GABRICK (KLCLASS_IOBRICK | 0xd)
/*
* board structures
*/
#define MAX_COMPTS_PER_BRD 24
typedef struct lboard_s {
klconf_off_t brd_next_any; /* Next BOARD */
unsigned char struct_type; /* type of structure, local or remote */
unsigned char brd_type; /* type+class */
unsigned char brd_sversion; /* version of this structure */
unsigned char brd_brevision; /* board revision */
unsigned char brd_promver; /* board prom version, if any */
unsigned char brd_flags; /* Enabled, Disabled etc */
unsigned char brd_slot; /* slot number */
unsigned short brd_debugsw; /* Debug switches */
geoid_t brd_geoid; /* geo id */
partid_t brd_partition; /* Partition number */
unsigned short brd_diagval; /* diagnostic value */
unsigned short brd_diagparm; /* diagnostic parameter */
unsigned char brd_inventory; /* inventory history */
unsigned char brd_numcompts; /* Number of components */
nic_t brd_nic; /* Number in CAN */
nasid_t brd_nasid; /* passed parameter */
klconf_off_t brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */
klconf_off_t brd_errinfo; /* Board's error information */
struct lboard_s *brd_parent; /* Logical parent for this brd */
char pad0[4];
unsigned char brd_confidence; /* confidence that the board is bad */
nasid_t brd_owner; /* who owns this board */
unsigned char brd_nic_flags; /* To handle 8 more NICs */
char pad1[24]; /* future expansion */
char brd_name[32];
nasid_t brd_next_same_host; /* host of next brd w/same nasid */
klconf_off_t brd_next_same; /* Next BOARD with same nasid */
} lboard_t;
#define KLCF_NUM_COMPS(_brd) ((_brd)->brd_numcompts)
#define NODE_OFFSET_TO_KLINFO(n,off) ((klinfo_t*) TO_NODE_CAC(n,off))
#define KLCF_NEXT(_brd) \
((_brd)->brd_next_same ? \
(NODE_OFFSET_TO_LBOARD((_brd)->brd_next_same_host, (_brd)->brd_next_same)): NULL)
#define KLCF_NEXT_ANY(_brd) \
((_brd)->brd_next_any ? \
(NODE_OFFSET_TO_LBOARD(NASID_GET(_brd), (_brd)->brd_next_any)): NULL)
#define KLCF_COMP(_brd, _ndx) \
((((_brd)->brd_compts[(_ndx)]) == 0) ? 0 : \
(NODE_OFFSET_TO_KLINFO(NASID_GET(_brd), (_brd)->brd_compts[(_ndx)])))
/*
* Generic info structure. This stores common info about a
* component.
*/
typedef struct klinfo_s { /* Generic info */
unsigned char struct_type; /* type of this structure */
unsigned char struct_version; /* version of this structure */
unsigned char flags; /* Enabled, disabled etc */
unsigned char revision; /* component revision */
unsigned short diagval; /* result of diagnostics */
unsigned short diagparm; /* diagnostic parameter */
unsigned char inventory; /* previous inventory status */
unsigned short partid; /* widget part number */
nic_t nic; /* MUst be aligned properly */
unsigned char physid; /* physical id of component */
unsigned int virtid; /* virtual id as seen by system */
unsigned char widid; /* Widget id - if applicable */
nasid_t nasid; /* node number - from parent */
char pad1; /* pad out structure. */
char pad2; /* pad out structure. */
void *data;
klconf_off_t errinfo; /* component specific errors */
unsigned short pad3; /* pci fields have moved over to */
unsigned short pad4; /* klbri_t */
} klinfo_t ;
static inline lboard_t *find_lboard_any(lboard_t * start, unsigned char brd_type)
{
/* Search all boards stored on this node. */
while (start) {
if (start->brd_type == brd_type)
return start;
start = KLCF_NEXT_ANY(start);
}
/* Didn't find it. */
return (lboard_t *) NULL;
}
/* external declarations of Linux kernel functions. */
extern lboard_t *root_lboard[];
extern klinfo_t *find_component(lboard_t *brd, klinfo_t *kli, unsigned char type);
extern klinfo_t *find_first_component(lboard_t *brd, unsigned char type);
#endif /* _ASM_IA64_SN_KLCONFIG_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992-1997,2000-2004 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_IA64_SN_L1_H
#define _ASM_IA64_SN_L1_H
/* brick type response codes */
#define L1_BRICKTYPE_PX 0x23 /* # */
#define L1_BRICKTYPE_PE 0x25 /* % */
#define L1_BRICKTYPE_N_p0 0x26 /* & */
#define L1_BRICKTYPE_IP45 0x34 /* 4 */
#define L1_BRICKTYPE_IP41 0x35 /* 5 */
#define L1_BRICKTYPE_TWISTER 0x36 /* 6 */ /* IP53 & ROUTER */
#define L1_BRICKTYPE_IX 0x3d /* = */
#define L1_BRICKTYPE_IP34 0x61 /* a */
#define L1_BRICKTYPE_GA 0x62 /* b */
#define L1_BRICKTYPE_C 0x63 /* c */
#define L1_BRICKTYPE_OPUS_TIO 0x66 /* f */
#define L1_BRICKTYPE_I 0x69 /* i */
#define L1_BRICKTYPE_N 0x6e /* n */
#define L1_BRICKTYPE_OPUS 0x6f /* o */
#define L1_BRICKTYPE_P 0x70 /* p */
#define L1_BRICKTYPE_R 0x72 /* r */
#define L1_BRICKTYPE_CHI_CG 0x76 /* v */
#define L1_BRICKTYPE_X 0x78 /* x */
#define L1_BRICKTYPE_X2 0x79 /* y */
#define L1_BRICKTYPE_SA 0x5e /* ^ */ /* TIO bringup brick */
#define L1_BRICKTYPE_PA 0x6a /* j */
#define L1_BRICKTYPE_IA 0x6b /* k */
#endif /* _ASM_IA64_SN_L1_H */
#ifndef _ASM_IA64_SN_LEDS_H
#define _ASM_IA64_SN_LEDS_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_LEDS_H
#define _ASM_IA64_SN_LEDS_H
#include <asm/sn/addrs.h>
#include <asm/sn/pda.h>
#include <asm/sn/sn2/shub.h>
#define LED0 (LOCAL_MMR_ADDR(SH_REAL_JUNK_BUS_LED0))
#define LED_CPU_SHIFT 16
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_MODULE_H
#define _ASM_IA64_SN_MODULE_H
/* parameter for format_module_id() */
#define MODULE_FORMAT_BRIEF 1
#define MODULE_FORMAT_LONG 2
#define MODULE_FORMAT_LCD 3
/*
* Module id format
*
* 31-16 Rack ID (encoded class, group, number - 16-bit unsigned int)
* 15-8 Brick type (8-bit ascii character)
* 7-0 Bay (brick position in rack (0-63) - 8-bit unsigned int)
*
*/
/*
* Macros for getting the brick type
*/
#define MODULE_BTYPE_MASK 0xff00
#define MODULE_BTYPE_SHFT 8
#define MODULE_GET_BTYPE(_m) (((_m) & MODULE_BTYPE_MASK) >> MODULE_BTYPE_SHFT)
#define MODULE_BT_TO_CHAR(_b) ((char)(_b))
#define MODULE_GET_BTCHAR(_m) (MODULE_BT_TO_CHAR(MODULE_GET_BTYPE(_m)))
/*
* Macros for getting the rack ID.
*/
#define MODULE_RACK_MASK 0xffff0000
#define MODULE_RACK_SHFT 16
#define MODULE_GET_RACK(_m) (((_m) & MODULE_RACK_MASK) >> MODULE_RACK_SHFT)
/*
* Macros for getting the brick position
*/
#define MODULE_BPOS_MASK 0x00ff
#define MODULE_BPOS_SHFT 0
#define MODULE_GET_BPOS(_m) (((_m) & MODULE_BPOS_MASK) >> MODULE_BPOS_SHFT)
/*
* Macros for encoding and decoding rack IDs
* A rack number consists of three parts:
* class (0==CPU/mixed, 1==I/O), group, number
*
* Rack number is stored just as it is displayed on the screen:
* a 3-decimal-digit number.
*/
#define RACK_CLASS_DVDR 100
#define RACK_GROUP_DVDR 10
#define RACK_NUM_DVDR 1
#define RACK_CREATE_RACKID(_c, _g, _n) ((_c) * RACK_CLASS_DVDR + \
(_g) * RACK_GROUP_DVDR + (_n) * RACK_NUM_DVDR)
#define RACK_GET_CLASS(_r) ((_r) / RACK_CLASS_DVDR)
#define RACK_GET_GROUP(_r) (((_r) - RACK_GET_CLASS(_r) * \
RACK_CLASS_DVDR) / RACK_GROUP_DVDR)
#define RACK_GET_NUM(_r) (((_r) - RACK_GET_CLASS(_r) * \
RACK_CLASS_DVDR - RACK_GET_GROUP(_r) * \
RACK_GROUP_DVDR) / RACK_NUM_DVDR)
/*
* Macros for encoding and decoding rack IDs
* A rack number consists of three parts:
* class 1 bit, 0==CPU/mixed, 1==I/O
* group 2 bits for CPU/mixed, 3 bits for I/O
* number 3 bits for CPU/mixed, 2 bits for I/O (1 based)
*/
#define RACK_GROUP_BITS(_r) (RACK_GET_CLASS(_r) ? 3 : 2)
#define RACK_NUM_BITS(_r) (RACK_GET_CLASS(_r) ? 2 : 3)
#define RACK_CLASS_MASK(_r) 0x20
#define RACK_CLASS_SHFT(_r) 5
#define RACK_ADD_CLASS(_r, _c) \
((_r) |= (_c) << RACK_CLASS_SHFT(_r) & RACK_CLASS_MASK(_r))
#define RACK_GROUP_SHFT(_r) RACK_NUM_BITS(_r)
#define RACK_GROUP_MASK(_r) \
( (((unsigned)1<<RACK_GROUP_BITS(_r)) - 1) << RACK_GROUP_SHFT(_r) )
#define RACK_ADD_GROUP(_r, _g) \
((_r) |= (_g) << RACK_GROUP_SHFT(_r) & RACK_GROUP_MASK(_r))
#define RACK_NUM_SHFT(_r) 0
#define RACK_NUM_MASK(_r) \
( (((unsigned)1<<RACK_NUM_BITS(_r)) - 1) << RACK_NUM_SHFT(_r) )
#define RACK_ADD_NUM(_r, _n) \
((_r) |= ((_n) - 1) << RACK_NUM_SHFT(_r) & RACK_NUM_MASK(_r))
/*
* Brick type definitions
*/
#define MAX_BRICK_TYPES 256 /* brick type is stored as uchar */
extern char brick_types[];
#define MODULE_CBRICK 0
#define MODULE_RBRICK 1
#define MODULE_IBRICK 2
#define MODULE_KBRICK 3
#define MODULE_XBRICK 4
#define MODULE_DBRICK 5
#define MODULE_PBRICK 6
#define MODULE_NBRICK 7
#define MODULE_PEBRICK 8
#define MODULE_PXBRICK 9
#define MODULE_IXBRICK 10
#define MODULE_CGBRICK 11
#define MODULE_OPUSBRICK 12
#define MODULE_SABRICK 13 /* TIO BringUp Brick */
#define MODULE_IABRICK 14
#define MODULE_PABRICK 15
#define MODULE_GABRICK 16
#define MODULE_OPUS_TIO 17 /* OPUS TIO Riser */
extern char brick_types[];
extern void format_module_id(char *, moduleid_t, int);
#endif /* _ASM_IA64_SN_MODULE_H */
......@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_NODEPDA_H
#define _ASM_IA64_SN_NODEPDA_H
......@@ -14,9 +14,7 @@
#include <asm/sn/intr.h>
#include <asm/sn/router.h>
#include <asm/sn/pda.h>
#include <asm/sn/module.h>
#include <asm/sn/bte.h>
#include <asm/sn/sn2/arch.h>
/*
* NUMA Node-Specific Data structures are defined in this file.
......@@ -32,32 +30,8 @@
* This structure provides a convenient way of keeping together
* all per-node data structures.
*/
struct nodepda_s {
vertex_hdl_t xbow_vhdl;
nasid_t xbow_peer; /* NASID of our peer hub on xbow */
struct semaphore xbow_sema; /* Sema for xbow synchronization */
slotid_t slotdesc;
geoid_t geoid;
module_t *module; /* Pointer to containing module */
xwidgetnum_t basew_id;
vertex_hdl_t basew_xc;
int hubticks;
int num_routers; /* XXX not setup! Total routers in the system */
char *hwg_node_name; /* hwgraph node name */
vertex_hdl_t node_vertex; /* Hwgraph vertex for this node */
void *pdinfo; /* Platform-dependent per-node info */
nodepda_router_info_t *npda_rip_first;
nodepda_router_info_t **npda_rip_last;
spinlock_t bist_lock;
/*
......@@ -76,17 +50,6 @@ struct nodepda_s {
typedef struct nodepda_s nodepda_t;
struct irqpda_s {
int num_irq_used;
char irq_flags[NR_IRQS];
struct pci_dev *device_dev[NR_IRQS];
char share_count[NR_IRQS];
struct pci_dev *curr;
};
typedef struct irqpda_s irqpda_t;
/*
* Access Functions for node PDA.
* Since there is one nodepda for each node, we need a convenient mechanism
......@@ -104,32 +67,10 @@ typedef struct irqpda_s irqpda_t;
#define nodepda pda->p_nodepda /* Ptr to this node's PDA */
#define NODEPDA(cnode) (nodepda->pernode_pdaindr[cnode])
/*
* Macros to access data structures inside nodepda
*/
#define NODE_MODULEID(cnode) geo_module((NODEPDA(cnode)->geoid))
#define NODE_SLOTID(cnode) (NODEPDA(cnode)->slotdesc)
/*
* Quickly convert a compact node ID into a hwgraph vertex
*/
#define cnodeid_to_vertex(cnodeid) (NODEPDA(cnodeid)->node_vertex)
/*
* Check if given a compact node id the corresponding node has all the
* cpus disabled.
*/
#define is_headless_node(cnode) (nr_cpus_node(cnode) == 0)
/*
* Check if given a node vertex handle the corresponding node has all the
* cpus disabled.
*/
#define is_headless_node_vertex(_nodevhdl) \
is_headless_node(nodevertex_to_cnodeid(_nodevhdl))
#endif /* _ASM_IA64_SN_NODEPDA_H */
......@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_PDA_H
#define _ASM_IA64_SN_PDA_H
......@@ -49,13 +49,14 @@ typedef struct pda_s {
volatile unsigned long *pio_shub_war_cam_addr;
volatile unsigned long *mem_write_status_addr;
struct bteinfo_s *cpu_bte_if[BTES_PER_NODE]; /* cpu interface order */
unsigned long sn_soft_irr[4];
unsigned long sn_in_service_ivecs[4];
short cnodeid_to_nasid_table[MAX_NUMNODES];
int sn_lb_int_war_ticks;
int sn_last_irq;
int sn_first_irq;
int sn_num_irqs; /* number of irqs targeted for this cpu */
} pda_t;
......
......@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992 - 1997, 2000-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 1992 - 1997, 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_ROUTER_H
......@@ -17,10 +17,7 @@
#ifndef __ASSEMBLY__
#include <asm/sn/vector.h>
#include <asm/sn/slotnum.h>
#include <asm/sn/arch.h>
#include <asm/sn/sgi.h>
typedef uint64_t router_reg_t;
......@@ -411,7 +408,7 @@ typedef signed char port_no_t; /* Type for router port number */
typedef struct router_map_ent_s {
uint64_t nic;
moduleid_t module;
slotid_t slot;
char slot;
} router_map_ent_t;
struct rr_status_error_fmt {
......@@ -479,16 +476,16 @@ typedef struct router_info_s {
char ri_leds; /* Current LED bitmap */
char ri_portmask; /* Active port bitmap */
router_reg_t ri_stat_rev_id; /* Status rev ID value */
net_vec_t ri_vector; /* vector from guardian to router */
uint64_t ri_vector; /* vector from guardian to router */
int ri_writeid; /* router's vector write ID */
int64_t ri_timebase; /* Time of first sample */
int64_t ri_timestamp; /* Time of last sample */
router_port_info_t ri_port[MAX_ROUTER_PORTS]; /* per port info */
moduleid_t ri_module; /* Which module are we in? */
slotid_t ri_slotnum; /* Which slot are we in? */
char ri_slotnum; /* Which slot are we in? */
router_reg_t ri_glbl_parms[GLBL_PARMS_REGS];
/* Global parms0&1 register contents*/
vertex_hdl_t ri_vertex; /* hardware graph vertex */
void * ri_vertex; /* hardware graph vertex */
router_reg_t ri_prot_conf; /* protection config. register */
int64_t ri_per_minute; /* Ticks per minute */
......@@ -500,7 +497,7 @@ typedef struct router_info_s {
* the bottom of the structure, below the user stuff.
*/
char ri_hist_type; /* histogram type */
vertex_hdl_t ri_guardian; /* guardian node for the router */
void * ri_guardian; /* guardian node for the router */
int64_t ri_last_print; /* When did we last print */
char ri_print; /* Should we print */
char ri_just_blink; /* Should we blink the LEDs */
......@@ -509,7 +506,7 @@ typedef struct router_info_s {
int64_t ri_deltatime; /* Time it took to sample */
#endif
spinlock_t ri_lock; /* Lock for access to router info */
net_vec_t *ri_vecarray; /* Pointer to array of vectors */
uint64_t *ri_vecarray; /* Pointer to array of vectors */
struct lboard_s *ri_brd; /* Pointer to board structure */
char * ri_name; /* This board's hwg path */
unsigned char ri_port_maint[MAX_ROUTER_PORTS]; /* should we send a
......@@ -526,13 +523,13 @@ typedef struct router_info_s {
* Router info hanging in the nodepda
*/
typedef struct nodepda_router_info_s {
vertex_hdl_t router_vhdl; /* vertex handle of the router */
void * router_vhdl; /* vertex handle of the router */
short router_port; /* port thru which we entered */
short router_portmask;
moduleid_t router_module; /* module in which router is there */
slotid_t router_slot; /* router slot */
char router_slot; /* router slot */
unsigned char router_type; /* kind of router */
net_vec_t router_vector; /* vector from the guardian node */
uint64_t router_vector; /* vector from the guardian node */
router_info_t *router_infop; /* info hanging off the hwg vertex */
struct nodepda_router_info_s *router_next;
......@@ -560,7 +557,7 @@ typedef struct router_elt_s {
/* vector route from the master hub to
* this router.
*/
net_vec_t vec;
uint64_t vec;
/* port status */
uint64_t status;
char port_status[MAX_ROUTER_PORTS + 1];
......@@ -570,11 +567,11 @@ typedef struct router_elt_s {
*/
struct {
/* vertex handle for the router */
vertex_hdl_t vhdl;
void * vhdl;
/* guardian for this router */
vertex_hdl_t guard;
void * guard;
/* vector router from the guardian to the router */
net_vec_t vec;
uint64_t vec;
} k_elt;
} u;
/* easy to use port status interpretation */
......@@ -618,24 +615,4 @@ typedef struct router_queue_s {
#define RTABLE_SHFT(_L) (4 * ((_L) - 1))
#define RTABLE_MASK(_L) (0x7UL << RTABLE_SHFT(_L))
#define ROUTERINFO_STKSZ 4096
#ifndef __ASSEMBLY__
int router_reg_read(router_info_t *rip, int regno, router_reg_t *val);
int router_reg_write(router_info_t *rip, int regno, router_reg_t val);
int router_get_info(vertex_hdl_t routerv, router_info_t *, int);
int router_set_leds(router_info_t *rip);
void router_print_state(router_info_t *rip, int level,
void (*pf)(int, char *, ...),int print_where);
void capture_router_stats(router_info_t *rip);
int probe_routers(void);
void get_routername(unsigned char brd_type,char *rtrname);
void router_guardians_set(vertex_hdl_t hwgraph_root);
int router_hist_reselect(router_info_t *, int64_t);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_IA64_SN_ROUTER_H */
......@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (C) 2002-2004 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_IA64_SN_RW_MMR_H
#define _ASM_IA64_SN_RW_MMR_H
......
This source diff could not be displayed because it is too large. You can view the blob instead.
#ifndef _ASM_IA64_SN_SIMULATOR_H
#define _ASM_IA64_SN_SIMULATOR_H
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SIMULATOR_H
#define _ASM_IA64_SN_SIMULATOR_H
#include <linux/config.h>
#ifdef CONFIG_IA64_SGI_SN_SIM
......
......@@ -7,7 +7,6 @@
* Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SN_CPUID_H
#define _ASM_IA64_SN_SN_CPUID_H
......@@ -84,6 +83,7 @@
*/
#ifndef CONFIG_SMP
#define cpu_logical_id(cpu) 0
#define cpu_physical_id(cpuid) ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
#endif
......@@ -93,7 +93,7 @@
*/
#define cpu_physical_id_to_nasid(cpi) ((cpi) &0xfff)
#define cpu_physical_id_to_slice(cpi) ((cpi>>12) & 3)
#define cpu_physical_id_to_coherence_id(cpi) (cpu_physical_id_to_nasid(cpi) >> 9)
#define cpu_physical_id_to_coherence_id(cpi) (((cpi) & 0x600) >> 9)
#define get_nasid() ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xfff)
#define get_slice() ((ia64_getreg(_IA64_REG_CR_LID) >> 28) & 0xf)
#define get_node_number(addr) (((unsigned long)(addr)>>38) & 0x7ff)
......@@ -177,7 +177,8 @@ extern short physical_node_map[]; /* indexed by nasid to get cnode */
* cpuid_to_coherence_id - convert a cpuid to the coherence domain id it
* resides on
*/
#define cpuid_to_coherence_id(cpuid) cpu_physical_id_to_coherence_id(cpu_physical_id(cpuid))
#define cpuid_to_coherence_id(cpuid) cpu_physical_id_to_coherence_id(cpu_physical_id(cpuid))
#endif /* _ASM_IA64_SN_SN_CPUID_H */
......@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992-1997,1999-2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (C) 1992-1997,1999-2004 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_SN_FRU_H
#define _ASM_IA64_SN_SN_FRU_H
......
......@@ -17,8 +17,6 @@
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/arch.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/klconfig.h>
// SGI Specific Calls
#define SN_SAL_POD_MODE 0x02000001
......@@ -62,7 +60,19 @@
#define SN_SAL_SYSCTL_IOBRICK_PCI_OP 0x02000042 // reentrant
#define SN_SAL_IROUTER_OP 0x02000043
#define SN_SAL_IOIF_INTERRUPT 0x0200004a
#define SN_SAL_HWPERF_OP 0x02000050 // lock
#define SN_SAL_IOIF_ERROR_INTERRUPT 0x02000051
#define SN_SAL_IOIF_SLOT_ENABLE 0x02000053
#define SN_SAL_IOIF_SLOT_DISABLE 0x02000054
#define SN_SAL_IOIF_GET_HUBDEV_INFO 0x02000055
#define SN_SAL_IOIF_GET_PCIBUS_INFO 0x02000056
#define SN_SAL_IOIF_GET_PCIDEV_INFO 0x02000057
#define SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST 0x02000058
#define SN_SAL_HUB_ERROR_INTERRUPT 0x02000060
/*
* Service-specific constants
......@@ -77,16 +87,9 @@
#define SAL_CONSOLE_INTR_XMIT 1 /* output interrupt */
#define SAL_CONSOLE_INTR_RECV 2 /* input interrupt */
#ifdef CONFIG_HOTPLUG_PCI_SGI
/* power up / power down / reset a PCI slot or bus */
#define SAL_SYSCTL_PCI_POWER_UP 0
#define SAL_SYSCTL_PCI_POWER_DOWN 1
#define SAL_SYSCTL_PCI_RESET 2
/* what type of I/O brick? */
#define SAL_SYSCTL_IO_XTALK 0 /* connected via a compute node */
#endif /* CONFIG_HOTPLUG_PCI_SGI */
/* interrupt handling */
#define SAL_INTR_ALLOC 1
#define SAL_INTR_FREE 2
/*
* IRouter (i.e. generalized system controller) operations
......@@ -116,19 +119,6 @@
#define SALRET_INVALID_ARG (-2)
#define SALRET_ERROR (-3)
/*
* SN_SAL_SET_ERROR_HANDLING_FEATURES bit settings
*/
enum
{
/* if "rz always" is set, have the mca slaves call os_init_slave */
SN_SAL_EHF_MCA_SLV_TO_OS_INIT_SLV=0,
/* do not rz on tlb checks, even if "rz always" is set */
SN_SAL_EHF_NO_RZ_TLBC,
/* do not rz on PIO reads to I/O space, even if "rz always" is set */
SN_SAL_EHF_NO_RZ_IO_READ,
};
/**
* sn_sal_rev_major - get the major SGI SAL revision number
......@@ -164,10 +154,8 @@ sn_sal_rev_minor(void)
* Specify the minimum PROM revsion required for this kernel.
* Note that they're stored in hex format...
*/
#define SN_SAL_MIN_MAJOR 0x3 /* SN2 kernels need at least PROM 3.40 */
#define SN_SAL_MIN_MINOR 0x40
u64 ia64_sn_probe_io_slot(long paddr, long size, void *data_ptr);
#define SN_SAL_MIN_MAJOR 0x4 /* SN2 kernels need at least PROM 4.0 */
#define SN_SAL_MIN_MINOR 0x0
/*
* Returns the master console nasid, if the call fails, return an illegal
......@@ -325,7 +313,7 @@ ia64_sn_plat_specific_err_print(int (*hook)(const char*, ...), char *rec)
ret_stuff.v0 = 0;
ret_stuff.v1 = 0;
ret_stuff.v2 = 0;
SAL_CALL_REENTRANT(ret_stuff, SN_SAL_PRINT_ERROR, (uint64_t)hook, (uint64_t)rec, 0, 0, 0, 0, 0);
SAL_CALL_NOLOCK(ret_stuff, SN_SAL_PRINT_ERROR, (uint64_t)hook, (uint64_t)rec, 0, 0, 0, 0, 0);
return ret_stuff.status;
}
......@@ -646,12 +634,12 @@ sn_change_memprotect(u64 paddr, u64 len, u64 perms, u64 *nasid_array)
unsigned long irq_flags;
cnodeid = nasid_to_cnodeid(get_node_number(paddr));
spin_lock(&NODEPDA(cnodeid)->bist_lock);
// spin_lock(&NODEPDA(cnodeid)->bist_lock);
local_irq_save(irq_flags);
SAL_CALL_NOLOCK(ret_stuff, SN_SAL_MEMPROTECT, paddr, len, nasid_array,
perms, 0, 0, 0);
local_irq_restore(irq_flags);
spin_unlock(&NODEPDA(cnodeid)->bist_lock);
// spin_unlock(&NODEPDA(cnodeid)->bist_lock);
return ret_stuff.status;
}
#define SN_MEMPROT_ACCESS_CLASS_0 0x14a080
......@@ -695,7 +683,7 @@ ia64_sn_fru_capture(void)
*/
static inline u64
ia64_sn_sysctl_iobrick_pci_op(nasid_t n, u64 connection_type,
u64 bus, slotid_t slot,
u64 bus, char slot,
u64 action)
{
struct ia64_sal_retval rv = {0, 0, 0, 0};
......@@ -707,26 +695,6 @@ ia64_sn_sysctl_iobrick_pci_op(nasid_t n, u64 connection_type,
return 0;
}
/*
* Tell the prom how the OS wants to handle specific error features.
* It takes an array of 7 u64.
*/
static inline u64
ia64_sn_set_error_handling_features(const u64 *feature_bits)
{
struct ia64_sal_retval rv = {0, 0, 0, 0};
SAL_CALL_REENTRANT(rv, SN_SAL_SET_ERROR_HANDLING_FEATURES,
feature_bits[0],
feature_bits[1],
feature_bits[2],
feature_bits[3],
feature_bits[4],
feature_bits[5],
feature_bits[6]);
return rv.status;
}
/*
* Open a subchannel for sending arbitrary data to the system
......
......@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2002-2003 Silicon Graphics, Inc. All Rights Reserved.
* Copyright (c) 2002-2004 Silicon Graphics, Inc. All Rights Reserved.
*/
#ifndef _ASM_IA64_SN_SNDRV_H
......@@ -35,18 +35,6 @@
#define SNDRV_SYNERGY_ENABLE 34
#define SNDRV_SYNERGY_FREQ 35
/* see shubstats_ioctl() */
#define SNDRV_SHUB_INFOSIZE 40
#define SNDRV_SHUB_CONFIGURE 41
#define SNDRV_SHUB_RESETSTATS 42
#define SNDRV_SHUB_GETSTATS 43
#define SNDRV_SHUB_GETNASID 44
#define SNDRV_SHUB_GETMMR32 45
#define SNDRV_SHUB_GETMMR64 46
#define SNDRV_SHUB_GETMMR64_IO 47
#define SNDRV_SHUB_PUTMMR64 48
#define SNDRV_SHUB_PUTMMR64_IO 49
/* Devices */
#define SNDRV_UKNOWN_DEVICE -1
#define SNDRV_ROUTER_DEVICE 1
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment