Commit d2d20c26 authored by Linus Torvalds's avatar Linus Torvalds

Merge http://lia64.bkbits.net/to-linus-2.5

into home.osdl.org:/home/torvalds/v2.5/linux
parents e176cdfe 6853823b
......@@ -1849,18 +1849,19 @@ static struct file_operations ioc_map_fops = {
static void __init
ioc_proc_init(void)
{
if (ioc_list) {
struct proc_dir_entry *dir, *entry;
struct proc_dir_entry *dir, *entry;
dir = proc_mkdir("bus/mckinley", 0);
entry = create_proc_entry(ioc_list->name, 0, dir);
if (entry)
entry->proc_fops = &ioc_fops;
dir = proc_mkdir("bus/mckinley", 0);
if (!dir)
return;
entry = create_proc_entry("bitmap", 0, dir);
if (entry)
entry->proc_fops = &ioc_map_fops;
}
entry = create_proc_entry(ioc_list->name, 0, dir);
if (entry)
entry->proc_fops = &ioc_fops;
entry = create_proc_entry("bitmap", 0, dir);
if (entry)
entry->proc_fops = &ioc_map_fops;
}
#endif
......@@ -1946,6 +1947,8 @@ static int __init
sba_init(void)
{
acpi_bus_register_driver(&acpi_sba_ioc_driver);
if (!ioc_list)
return 0;
#ifdef CONFIG_PCI
{
......
......@@ -36,6 +36,7 @@
#include <asm/ia32.h>
#include <asm/machvec.h>
#include <asm/mca.h>
#include <asm/meminit.h>
#include <asm/page.h>
#include <asm/patch.h>
#include <asm/pgtable.h>
......@@ -83,91 +84,12 @@ unsigned long ia64_max_iommu_merge_mask = ~0UL;
char saved_command_line[COMMAND_LINE_SIZE]; /* used in proc filesystem */
/*
* Entries defined so far:
* - boot param structure itself
* - memory map
* - initrd (optional)
* - command line string
* - kernel code & data
*
* More could be added if necessary
*/
#define IA64_MAX_RSVD_REGIONS 5
struct rsvd_region {
unsigned long start; /* virtual address of beginning of element */
unsigned long end; /* virtual address of end of element + 1 */
};
/*
* We use a special marker for the end of memory and it uses the extra (+1) slot
*/
static struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
static int num_rsvd_regions;
#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
#ifndef CONFIG_DISCONTIGMEM
static unsigned long bootmap_start; /* physical address where the bootmem map is located */
static int
find_max_pfn (unsigned long start, unsigned long end, void *arg)
{
unsigned long *max_pfnp = arg, pfn;
pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
if (pfn > *max_pfnp)
*max_pfnp = pfn;
return 0;
}
struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
int num_rsvd_regions;
#else /* CONFIG_DISCONTIGMEM */
/*
* efi_memmap_walk() knows nothing about layout of memory across nodes. Find
* out to which node a block of memory belongs. Ignore memory that we cannot
* identify, and split blocks that run across multiple nodes.
*
* Take this opportunity to round the start address up and the end address
* down to page boundaries.
*/
void
call_pernode_memory (unsigned long start, unsigned long end, void *arg)
{
unsigned long rs, re;
void (*func)(unsigned long, unsigned long, int, int);
int i;
start = PAGE_ALIGN(start);
end &= PAGE_MASK;
if (start >= end)
return;
func = arg;
if (!num_memblks) {
/*
* This machine doesn't have SRAT, so call func with
* nid=0, bank=0.
*/
if (start < end)
(*func)(start, end - start, 0, 0);
return;
}
for (i = 0; i < num_memblks; i++) {
rs = max(start, node_memblk[i].start_paddr);
re = min(end, node_memblk[i].start_paddr+node_memblk[i].size);
if (rs < re)
(*func)(rs, re-rs, node_memblk[i].nid,
node_memblk[i].bank);
}
}
#endif /* CONFIG_DISCONTIGMEM */
/*
* Filter incoming memory segments based on the primitive map created from the boot
......@@ -215,48 +137,6 @@ filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
return 0;
}
#ifndef CONFIG_DISCONTIGMEM
/*
* Find a place to put the bootmap and return its starting address in bootmap_start.
* This address must be page-aligned.
*/
static int
find_bootmap_location (unsigned long start, unsigned long end, void *arg)
{
unsigned long needed = *(unsigned long *)arg;
unsigned long range_start, range_end, free_start;
int i;
#if IGNORE_PFN0
if (start == PAGE_OFFSET) {
start += PAGE_SIZE;
if (start >= end) return 0;
}
#endif
free_start = PAGE_OFFSET;
for (i = 0; i < num_rsvd_regions; i++) {
range_start = max(start, free_start);
range_end = min(end, rsvd_region[i].start & PAGE_MASK);
if (range_end <= range_start) continue; /* skip over empty range */
if (range_end - range_start >= needed) {
bootmap_start = __pa(range_start);
return 1; /* done */
}
/* nothing more available in this segment */
if (range_end == end) return 0;
free_start = PAGE_ALIGN(rsvd_region[i].end);
}
return 0;
}
#endif /* !CONFIG_DISCONTIGMEM */
static void
sort_regions (struct rsvd_region *rsvd_region, int max)
{
......@@ -275,10 +155,16 @@ sort_regions (struct rsvd_region *rsvd_region, int max)
}
}
static void
find_memory (void)
/**
* reserve_memory - setup reserved memory areas
*
* Setup the reserved memory areas set aside for the boot parameters,
* initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined,
* see include/asm-ia64/meminit.h if you need to define more.
*/
void
reserve_memory (void)
{
unsigned long bootmap_size;
int n = 0;
/*
......@@ -317,36 +203,17 @@ find_memory (void)
num_rsvd_regions = n;
sort_regions(rsvd_region, num_rsvd_regions);
}
#ifdef CONFIG_DISCONTIGMEM
{
extern void discontig_mem_init (void);
bootmap_size = max_pfn = 0; /* stop gcc warnings */
discontig_mem_init();
}
#else /* !CONFIG_DISCONTIGMEM */
/* first find highest page frame number */
max_pfn = 0;
efi_memmap_walk(find_max_pfn, &max_pfn);
/* how many bytes to cover all the pages */
bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
/* look for a location to hold the bootmap */
bootmap_start = ~0UL;
efi_memmap_walk(find_bootmap_location, &bootmap_size);
if (bootmap_start == ~0UL)
panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn);
/* Free all available memory, then mark bootmem-map as being in use. */
efi_memmap_walk(filter_rsvd_memory, free_bootmem);
reserve_bootmem(bootmap_start, bootmap_size);
#endif /* !CONFIG_DISCONTIGMEM */
/**
* find_initrd - get initrd parameters from the boot parameter structure
*
* Grab the initrd start and end from the boot parameter struct given us by
* the boot loader.
*/
void
find_initrd (void)
{
#ifdef CONFIG_BLK_DEV_INITRD
if (ia64_boot_param->initrd_start) {
initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
......
......@@ -335,7 +335,8 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
if ((fpu_swa_count < 4) && !(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) {
last_time = jiffies;
++fpu_swa_count;
printk(KERN_WARNING "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
printk(KERN_WARNING
"%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, isr);
}
......
......@@ -7,3 +7,6 @@ obj-y := init.o fault.o tlb.o extable.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_DISCONTIGMEM) += discontig.o
ifndef CONFIG_DISCONTIGMEM
obj-y += contig.o
endif
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 2000, Rohit Seth <rohit.seth@intel.com>
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 2003 Silicon Graphics, Inc. All rights reserved.
*
* Routines used by ia64 machines with contiguous (or virtually contiguous)
* memory.
*/
#include <linux/config.h>
#include <linux/bootmem.h>
#include <linux/efi.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <asm/meminit.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
/**
* show_mem - display a memory statistics summary
*
* Just walks the pages in the system and describes where they're allocated.
*/
void
show_mem (void)
{
int i, total = 0, reserved = 0;
int shared = 0, cached = 0;
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
i = max_mapnr;
while (i-- > 0) {
total++;
if (PageReserved(mem_map+i))
reserved++;
else if (PageSwapCache(mem_map+i))
cached++;
else if (page_count(mem_map + i))
shared += page_count(mem_map + i) - 1;
}
printk("%d pages of RAM\n", total);
printk("%d reserved pages\n", reserved);
printk("%d pages shared\n", shared);
printk("%d pages swap cached\n", cached);
printk("%ld pages in page table cache\n", pgtable_cache_size);
}
/* physical address where the bootmem map is located */
unsigned long bootmap_start;
/**
* find_max_pfn - adjust the maximum page number callback
* @start: start of range
* @end: end of range
* @arg: address of pointer to global max_pfn variable
*
* Passed as a callback function to efi_memmap_walk() to determine the highest
* available page frame number in the system.
*/
int
find_max_pfn (unsigned long start, unsigned long end, void *arg)
{
unsigned long *max_pfnp = arg, pfn;
pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
if (pfn > *max_pfnp)
*max_pfnp = pfn;
return 0;
}
/**
* find_bootmap_location - callback to find a memory area for the bootmap
* @start: start of region
* @end: end of region
* @arg: unused callback data
*
* Find a place to put the bootmap and return its starting address in
* bootmap_start. This address must be page-aligned.
*/
int
find_bootmap_location (unsigned long start, unsigned long end, void *arg)
{
unsigned long needed = *(unsigned long *)arg;
unsigned long range_start, range_end, free_start;
int i;
#if IGNORE_PFN0
if (start == PAGE_OFFSET) {
start += PAGE_SIZE;
if (start >= end)
return 0;
}
#endif
free_start = PAGE_OFFSET;
for (i = 0; i < num_rsvd_regions; i++) {
range_start = max(start, free_start);
range_end = min(end, rsvd_region[i].start & PAGE_MASK);
if (range_end <= range_start)
continue; /* skip over empty range */
if (range_end - range_start >= needed) {
bootmap_start = __pa(range_start);
return 1; /* done */
}
/* nothing more available in this segment */
if (range_end == end)
return 0;
free_start = PAGE_ALIGN(rsvd_region[i].end);
}
return 0;
}
/**
* find_memory - setup memory map
*
* Walk the EFI memory map and find usable memory for the system, taking
* into account reserved areas.
*/
void
find_memory (void)
{
unsigned long bootmap_size;
reserve_memory();
/* first find highest page frame number */
max_pfn = 0;
efi_memmap_walk(find_max_pfn, &max_pfn);
/* how many bytes to cover all the pages */
bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
/* look for a location to hold the bootmap */
bootmap_start = ~0UL;
efi_memmap_walk(find_bootmap_location, &bootmap_size);
if (bootmap_start == ~0UL)
panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn);
/* Free all available memory, then mark bootmem-map as being in use. */
efi_memmap_walk(filter_rsvd_memory, free_bootmem);
reserve_bootmem(bootmap_start, bootmap_size);
find_initrd();
}
/*
* Copyright (c) 2000 Silicon Graphics, Inc. All rights reserved.
* Copyright (c) 2000, 2003 Silicon Graphics, Inc. All rights reserved.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 Tony Luck <tony.luck@intel.com>
* Copyright (c) 2002 NEC Corp.
......@@ -12,10 +12,13 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/bootmem.h>
#include <linux/mmzone.h>
#include <linux/acpi.h>
#include <linux/efi.h>
#include <asm/pgalloc.h>
#include <asm/meminit.h>
/*
......@@ -27,9 +30,6 @@ static struct ia64_node_data *node_data[NR_NODES];
static long boot_pg_data[8*NR_NODES+sizeof(pg_data_t)] __initdata;
static pg_data_t *pg_data_ptr[NR_NODES] __initdata;
static bootmem_data_t bdata[NR_NODES][NR_BANKS_PER_NODE+1] __initdata;
extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
/*
* Return the compact node number of this cpu. Used prior to
* setting up the cpu_data area.
......@@ -198,7 +198,7 @@ allocate_pernode_structures(void)
pgdat->pgdat_next = new_pgdat_list;
new_pgdat_list = pgdat;
}
memcpy(node_data[mynode]->pg_data_ptrs, pg_data_ptr, sizeof(pg_data_ptr));
memcpy(node_data[mynode]->node_data_ptrs, node_data, sizeof(node_data));
......@@ -209,11 +209,12 @@ allocate_pernode_structures(void)
* Called early in boot to setup the boot memory allocator, and to
* allocate the node-local pg_data & node-directory data structures..
*/
void __init
discontig_mem_init(void)
void __init find_memory(void)
{
int node;
reserve_memory();
if (numnodes == 0) {
printk(KERN_ERR "node info missing!\n");
numnodes = 1;
......@@ -232,6 +233,8 @@ discontig_mem_init(void)
efi_memmap_walk(filter_rsvd_memory, discontig_free_bootmem_node);
discontig_reserve_bootmem();
allocate_pernode_structures();
find_initrd();
}
/*
......@@ -242,8 +245,8 @@ discontig_mem_init(void)
* the per-bank mem_map entries.
* - fix the page struct "virtual" pointers. These are bank specific
* values that the paging system doesn't understand.
* - replicate the nodedir structure to other nodes
*/
* - replicate the nodedir structure to other nodes
*/
void __init
discontig_paging_init(void)
......@@ -305,3 +308,71 @@ discontig_paging_init(void)
}
}
void show_mem(void)
{
int i, reserved = 0;
int shared = 0, cached = 0;
pg_data_t *pgdat;
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
for_each_pgdat(pgdat) {
printk("Node ID: %d\n", pgdat->node_id);
for(i = 0; i < pgdat->node_spanned_pages; i++) {
if (PageReserved(pgdat->node_mem_map+i))
reserved++;
else if (PageSwapCache(pgdat->node_mem_map+i))
cached++;
else if (page_count(pgdat->node_mem_map+i))
shared += page_count(pgdat->node_mem_map+i)-1;
}
printk("\t%ld pages of RAM\n", pgdat->node_present_pages);
printk("\t%d reserved pages\n", reserved);
printk("\t%d pages shared\n", shared);
printk("\t%d pages swap cached\n", cached);
}
printk("Total of %ld pages in page table cache\n", pgtable_cache_size);
printk("%d free buffer pages\n", nr_free_buffer_pages());
}
/*
* efi_memmap_walk() knows nothing about layout of memory across nodes. Find
* out to which node a block of memory belongs. Ignore memory that we cannot
* identify, and split blocks that run across multiple nodes.
*
* Take this opportunity to round the start address up and the end address
* down to page boundaries.
*/
void call_pernode_memory(unsigned long start, unsigned long end, void *arg)
{
unsigned long rs, re;
void (*func)(unsigned long, unsigned long, int, int);
int i;
start = PAGE_ALIGN(start);
end &= PAGE_MASK;
if (start >= end)
return;
func = arg;
if (!num_memblks) {
/*
* This machine doesn't have SRAT, so call func with
* nid=0, bank=0.
*/
if (start < end)
(*func)(start, end - start, 0, 0);
return;
}
for (i = 0; i < num_memblks; i++) {
rs = max(start, node_memblk[i].start_paddr);
re = min(end, node_memblk[i].start_paddr+node_memblk[i].size);
if (rs < re)
(*func)(rs, re-rs, node_memblk[i].nid,
node_memblk[i].bank);
}
}
......@@ -214,58 +214,6 @@ free_initrd_mem (unsigned long start, unsigned long end)
}
}
void
show_mem(void)
{
int i, total = 0, reserved = 0;
int shared = 0, cached = 0;
printk("Mem-info:\n");
show_free_areas();
#ifdef CONFIG_DISCONTIGMEM
{
pg_data_t *pgdat;
printk("Free swap: %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
for_each_pgdat(pgdat) {
printk("Node ID: %d\n", pgdat->node_id);
for(i = 0; i < pgdat->node_spanned_pages; i++) {
if (PageReserved(pgdat->node_mem_map+i))
reserved++;
else if (PageSwapCache(pgdat->node_mem_map+i))
cached++;
else if (page_count(pgdat->node_mem_map + i))
shared += page_count(pgdat->node_mem_map + i) - 1;
}
printk("\t%d pages of RAM\n", pgdat->node_spanned_pages);
printk("\t%d reserved pages\n", reserved);
printk("\t%d pages shared\n", shared);
printk("\t%d pages swap cached\n", cached);
}
printk("Total of %ld pages in page table cache\n", pgtable_cache_size);
printk("%d free buffer pages\n", nr_free_buffer_pages());
}
#else /* !CONFIG_DISCONTIGMEM */
printk("Free swap: %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
i = max_mapnr;
while (i-- > 0) {
total++;
if (PageReserved(mem_map+i))
reserved++;
else if (PageSwapCache(mem_map+i))
cached++;
else if (page_count(mem_map + i))
shared += page_count(mem_map + i) - 1;
}
printk("%d pages of RAM\n", total);
printk("%d reserved pages\n", reserved);
printk("%d pages shared\n", shared);
printk("%d pages swap cached\n", cached);
printk("%ld pages in page table cache\n", pgtable_cache_size);
#endif /* !CONFIG_DISCONTIGMEM */
}
/*
* This is like put_dirty_page() but installs a clean page in the kernel's page table.
*/
......
......@@ -17,8 +17,6 @@
#include <linux/pci.h>
#include <asm/sn/sgi.h>
#include <linux/devfs_fs.h>
#include <linux/devfs_fs_kernel.h>
#include <asm/io.h>
#include <asm/sn/iograph.h>
#include <asm/sn/invent.h>
......@@ -354,7 +352,7 @@ struct file_operations ioconfig_bus_fops = {
/*
* init_ifconfig_bus() - Boot time initialization. Ensure that it is called
* after devfs has been initialized.
* after hwgfs has been initialized.
*
*/
int init_ioconfig_bus(void)
......
......@@ -9,5 +9,4 @@
EXTRA_CFLAGS := -DLITTLE_ENDIAN
obj-y += hcl.o labelcl.o hcl_util.o invent_stub.o \
ramfs.o interface.o
obj-y += hcl.o labelcl.o hcl_util.o ramfs.o interface.o
......@@ -53,73 +53,12 @@ static unsigned int hcl_debug = HCL_DEBUG_NONE;
static unsigned int boot_options = OPTION_NONE;
#endif
/*
* Some Global definitions.
*/
vertex_hdl_t hcl_handle;
invplace_t invplace_none = {
GRAPH_VERTEX_NONE,
GRAPH_VERTEX_PLACE_NONE,
NULL
};
/*
* HCL device driver.
* The purpose of this device driver is to provide a facility
* for User Level Apps e.g. hinv, ioconfig etc. an ioctl path
* to manipulate label entries without having to implement
* system call interfaces. This methodology will enable us to
* make this feature module loadable.
*/
static int hcl_open(struct inode * inode, struct file * filp)
{
if (hcl_debug) {
printk("HCL: hcl_open called.\n");
}
return(0);
}
static int hcl_close(struct inode * inode, struct file * filp)
{
if (hcl_debug) {
printk("HCL: hcl_close called.\n");
}
return(0);
}
static int hcl_ioctl(struct inode * inode, struct file * file,
unsigned int cmd, unsigned long arg)
{
if (hcl_debug) {
printk("HCL: hcl_ioctl called.\n");
}
switch (cmd) {
default:
if (hcl_debug) {
printk("HCL: hcl_ioctl cmd = 0x%x\n", cmd);
}
}
return(0);
}
struct file_operations hcl_fops = {
.owner = (struct module *)0,
.ioctl = hcl_ioctl,
.open = hcl_open,
.release = hcl_close,
};
/*
* init_hcl() - Boot time initialization.
*
......@@ -146,21 +85,6 @@ int __init init_hcl(void)
if (rv)
printk ("WARNING: init_hcl: Failed to create hwgraph_root. Error = %d.\n", rv);
/*
* Create the hcl driver to support inventory entry manipulations.
*
*/
hcl_handle = hwgraph_register(hwgraph_root, ".hcl",
0, 0,
0, 0,
S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
&hcl_fops, NULL);
if (hcl_handle == NULL) {
panic("HCL: Unable to create HCL Driver in init_hcl().\n");
return(0);
}
/*
* Initialize the HCL string table.
*/
......
/* $Id$
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
/*
* Hardware Inventory
*
* See sys/sn/invent.h for an explanation of the hardware inventory contents.
*
*/
#include <linux/types.h>
#include <asm/sn/sgi.h>
#include <asm/sn/hwgfs.h>
#include <asm/sn/invent.h>
#include <asm/sn/hcl.h>
#include <asm/sn/labelcl.h>
#include <asm/sn/invent.h>
void
inventinit(void)
{
}
/*
* For initializing/updating an inventory entry.
*/
void
replace_in_inventory(
inventory_t *pinv, int class, int type,
int controller, int unit, int state)
{
}
/*
* Inventory addition
*
* XXX NOTE: Currently must be called after dynamic memory allocator is
* initialized.
*
*/
void
add_to_inventory(int class, int type, int controller, int unit, int state)
{
}
/*
* Inventory retrieval
*
* These two routines are intended to prevent the caller from having to know
* the internal structure of the inventory table.
*
* The caller of get_next_inventory is supposed to call start_scan_invent
* before the irst call to get_next_inventory, and the caller is required
* to call end_scan_invent after the last call to get_next_inventory.
*/
inventory_t *
get_next_inventory(invplace_t *place)
{
return((inventory_t *) NULL);
}
/* ARGSUSED */
int
get_sizeof_inventory(int abi)
{
return sizeof(inventory_t);
}
/* Must be called prior to first call to get_next_inventory */
void
start_scan_inventory(invplace_t *iplace)
{
}
/* Must be called after last call to get_next_inventory */
void
end_scan_inventory(invplace_t *iplace)
{
}
/*
* Hardware inventory scanner.
*
* Calls fun() for every entry in inventory list unless fun() returns something
* other than 0.
*/
int
scaninvent(int (*fun)(inventory_t *, void *), void *arg)
{
return 0;
}
/*
* Find a particular inventory object
*
* pinv can be a pointer to an inventory entry and the search will begin from
* there, or it can be 0 in which case the search starts at the beginning.
* A -1 for any of the other arguments is a wildcard (i.e. it always matches).
*/
inventory_t *
find_inventory(inventory_t *pinv, int class, int type, int controller,
int unit, int state)
{
return((inventory_t *) NULL);
}
/*
** Retrieve inventory data associated with a device.
*/
inventory_t *
device_inventory_get_next( vertex_hdl_t device,
invplace_t *invplace)
{
return((inventory_t *) NULL);
}
/*
** Associate canonical inventory information with a device (and
** add it to the general inventory).
*/
void
device_inventory_add( vertex_hdl_t device,
int class,
int type,
major_t controller,
minor_t unit,
int state)
{
}
int
device_controller_num_get(vertex_hdl_t device)
{
return (0);
}
void
device_controller_num_set(vertex_hdl_t device, int contr_num)
{
}
......@@ -35,8 +35,6 @@
* can probably be removed with a little more cleanup now that the SAL routines
* work on sn2.
*/
#ifdef CONFIG_PCI
extern vertex_hdl_t devfn_to_vertex(unsigned char bus, unsigned char devfn);
int sn_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val)
......@@ -71,9 +69,3 @@ struct pci_ops sn_pci_ops = {
.read = sn_read_config,
.write = sn_write_config,
};
#else
struct list_head pci_root_buses;
struct list_head pci_root_buses;
struct list_head pci_devices;
#endif /* CONFIG_PCI */
......@@ -426,7 +426,7 @@ sn_pci_fixup(int arg)
struct pci_dev *device_dev = NULL;
struct sn_widget_sysdata *widget_sysdata;
struct sn_device_sysdata *device_sysdata;
pciio_intr_t intr_handle;
pcibr_intr_t intr_handle;
int cpuid;
vertex_hdl_t device_vertex;
pciio_intr_line_t lines;
......@@ -438,14 +438,18 @@ sn_pci_fixup(int arg)
extern void register_sn_procfs(void);
#endif
extern void irix_io_init(void);
extern void sn_init_cpei_timer(void);
init_hcl();
irix_io_init();
for (cnode = 0; cnode < numnodes; cnode++) {
extern void intr_init_vecblk(nodepda_t *npda, cnodeid_t, int);
intr_init_vecblk(NODEPDA(cnode), cnode, 0);
extern void intr_init_vecblk(cnodeid_t);
intr_init_vecblk(cnode);
}
sn_init_cpei_timer();
#ifdef CONFIG_PROC_FS
register_sn_procfs();
#endif
......@@ -545,17 +549,17 @@ sn_pci_fixup(int arg)
(unsigned char *)&lines);
irqpdaindr->curr = device_dev;
intr_handle = pciio_intr_alloc(device_vertex, NULL, lines, device_vertex);
intr_handle = pcibr_intr_alloc(device_vertex, NULL, lines, device_vertex);
irq = intr_handle->pi_irq;
irq = intr_handle->bi_irq;
irqpdaindr->device_dev[irq] = device_dev;
cpuid = intr_handle->pi_cpu;
pciio_intr_connect(intr_handle, (intr_func_t)0, (intr_arg_t)0);
cpuid = intr_handle->bi_cpu;
pcibr_intr_connect(intr_handle, (intr_func_t)0, (intr_arg_t)0);
device_dev->irq = irq;
register_pcibr_intr(irq, (pcibr_intr_t)intr_handle);
register_pcibr_intr(irq, intr_handle);
for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) {
int ibits = ((pcibr_intr_t)intr_handle)->bi_ibits;
int ibits = intr_handle->bi_ibits;
int i;
size = device_dev->resource[idx].end -
......@@ -700,12 +704,12 @@ pci_bus_map_create(vertex_hdl_t xtalk, char * io_moduleid)
* Pre assign DMA maps needed for 32 Bits Page Map DMA.
*/
busnum_to_atedmamaps[num_bridges - 1] = (void *) kmalloc(
sizeof(struct sn_dma_maps_s) * MAX_ATE_MAPS, GFP_KERNEL);
sizeof(struct pcibr_dmamap_s) * MAX_ATE_MAPS, GFP_KERNEL);
if (!busnum_to_atedmamaps[num_bridges - 1])
printk("WARNING: pci_bus_map_create: Unable to precreate ATE DMA Maps for busnum %d vertex 0x%p\n", num_bridges - 1, (void *)xwidget);
memset(busnum_to_atedmamaps[num_bridges - 1], 0x0,
sizeof(struct sn_dma_maps_s) * MAX_ATE_MAPS);
sizeof(struct pcibr_dmamap_s) * MAX_ATE_MAPS);
}
......@@ -764,12 +768,12 @@ pci_bus_map_create(vertex_hdl_t xtalk, char * io_moduleid)
* Pre assign DMA maps needed for 32 Bits Page Map DMA.
*/
busnum_to_atedmamaps[bus_number] = (void *) kmalloc(
sizeof(struct sn_dma_maps_s) * MAX_ATE_MAPS, GFP_KERNEL);
sizeof(struct pcibr_dmamap_s) * MAX_ATE_MAPS, GFP_KERNEL);
if (!busnum_to_atedmamaps[bus_number])
printk("WARNING: pci_bus_map_create: Unable to precreate ATE DMA Maps for busnum %d vertex 0x%p\n", num_bridges - 1, (void *)xwidget);
memset(busnum_to_atedmamaps[bus_number], 0x0,
sizeof(struct sn_dma_maps_s) * MAX_ATE_MAPS);
sizeof(struct pcibr_dmamap_s) * MAX_ATE_MAPS);
}
}
......
......@@ -15,7 +15,6 @@
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/devfs_fs_kernel.h>
#include <linux/module.h>
#include <asm/delay.h>
......@@ -37,7 +36,7 @@
*/
pciio_dmamap_t get_free_pciio_dmamap(vertex_hdl_t);
void free_pciio_dmamap(pcibr_dmamap_t);
static struct sn_dma_maps_s *find_sn_dma_map(dma_addr_t, unsigned char);
static struct pcibr_dmamap_s *find_sn_dma_map(dma_addr_t, unsigned char);
void sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction);
/*
......@@ -58,7 +57,7 @@ pciio_dmamap_t
get_free_pciio_dmamap(vertex_hdl_t pci_bus)
{
int i;
struct sn_dma_maps_s *sn_dma_map = NULL;
struct pcibr_dmamap_s *sn_dma_map = NULL;
/*
* Darn, we need to get the maps allocated for this bus.
......@@ -73,8 +72,8 @@ get_free_pciio_dmamap(vertex_hdl_t pci_bus)
* Now get a free dmamap entry from this list.
*/
for (i = 0; i < MAX_ATE_MAPS; i++, sn_dma_map++) {
if (!sn_dma_map->dma_addr) {
sn_dma_map->dma_addr = -1;
if (!sn_dma_map->bd_dma_addr) {
sn_dma_map->bd_dma_addr = -1;
return( (pciio_dmamap_t) sn_dma_map );
}
}
......@@ -91,10 +90,7 @@ get_free_pciio_dmamap(vertex_hdl_t pci_bus)
void
free_pciio_dmamap(pcibr_dmamap_t dma_map)
{
struct sn_dma_maps_s *sn_dma_map;
sn_dma_map = (struct sn_dma_maps_s *) dma_map;
sn_dma_map->dma_addr = 0;
dma_map->bd_dma_addr = 0;
}
/**
......@@ -104,17 +100,17 @@ free_pciio_dmamap(pcibr_dmamap_t dma_map)
*
* Finds the ATE associated with @dma_addr and @busnum.
*/
static struct sn_dma_maps_s *
static struct pcibr_dmamap_s *
find_sn_dma_map(dma_addr_t dma_addr, unsigned char busnum)
{
struct sn_dma_maps_s *sn_dma_map = NULL;
struct pcibr_dmamap_s *sn_dma_map = NULL;
int i;
sn_dma_map = busnum_to_atedmamaps[busnum];
for (i = 0; i < MAX_ATE_MAPS; i++, sn_dma_map++) {
if (sn_dma_map->dma_addr == dma_addr) {
if (sn_dma_map->bd_dma_addr == dma_addr) {
return sn_dma_map;
}
}
......@@ -148,8 +144,7 @@ sn_pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_hand
vertex_hdl_t vhdl;
struct sn_device_sysdata *device_sysdata;
unsigned long phys_addr;
pciio_dmamap_t dma_map = 0;
struct sn_dma_maps_s *sn_dma_map;
pcibr_dmamap_t dma_map = 0;
*dma_handle = 0;
......@@ -181,7 +176,7 @@ sn_pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_hand
* device on the same bus is already mapped with different
* attributes or to a different memory region.
*/
*dma_handle = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,
*dma_handle = pcibr_dmatrans_addr(vhdl, NULL, phys_addr, size,
((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
PCIIO_DMA_CMD);
......@@ -200,7 +195,7 @@ sn_pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_hand
* so we try to use an ATE.
*/
if (!(*dma_handle)) {
dma_map = pciio_dmamap_alloc(vhdl, NULL, size,
dma_map = pcibr_dmamap_alloc(vhdl, NULL, size,
((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
PCIIO_DMA_CMD);
if (!dma_map) {
......@@ -208,10 +203,9 @@ sn_pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_hand
"allocate anymore 32 bit page map entries.\n");
return 0;
}
*dma_handle = (dma_addr_t) pciio_dmamap_addr(dma_map,phys_addr,
*dma_handle = (dma_addr_t) pcibr_dmamap_addr(dma_map,phys_addr,
size);
sn_dma_map = (struct sn_dma_maps_s *)dma_map;
sn_dma_map->dma_addr = *dma_handle;
dma_map->bd_dma_addr = *dma_handle;
}
return cpuaddr;
......@@ -230,21 +224,21 @@ sn_pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_hand
void
sn_pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
{
struct sn_dma_maps_s *sn_dma_map = NULL;
struct pcibr_dmamap_s *dma_map = NULL;
/*
* Get the sn_dma_map entry.
*/
if (IS_PCI32_MAPPED(dma_handle))
sn_dma_map = find_sn_dma_map(dma_handle, hwdev->bus->number);
dma_map = find_sn_dma_map(dma_handle, hwdev->bus->number);
/*
* and free it if necessary...
*/
if (sn_dma_map) {
pciio_dmamap_done((pciio_dmamap_t)sn_dma_map);
pciio_dmamap_free((pciio_dmamap_t)sn_dma_map);
sn_dma_map->dma_addr = (dma_addr_t)NULL;
if (dma_map) {
pcibr_dmamap_done(dma_map);
pcibr_dmamap_free(dma_map);
dma_map->bd_dma_addr = 0;
}
free_pages((unsigned long) vaddr, get_order(size));
}
......@@ -267,8 +261,7 @@ sn_pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int dire
vertex_hdl_t vhdl;
unsigned long phys_addr;
struct sn_device_sysdata *device_sysdata;
pciio_dmamap_t dma_map;
struct sn_dma_maps_s *sn_dma_map;
pcibr_dmamap_t dma_map;
struct scatterlist *saved_sg = sg;
/* can't go anywhere w/o a direction in life */
......@@ -294,7 +287,7 @@ sn_pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int dire
* call should always succeed.
*/
if (IS_PCIA64(hwdev)) {
sg->dma_address = pciio_dmatrans_addr(vhdl, NULL, phys_addr,
sg->dma_address = pcibr_dmatrans_addr(vhdl, NULL, phys_addr,
sg->length,
((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
PCIIO_DMA_DATA |
......@@ -307,7 +300,7 @@ sn_pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int dire
* Handle 32-63 bit cards via direct mapping
*/
if (IS_PCI32G(hwdev)) {
sg->dma_address = pciio_dmatrans_addr(vhdl, NULL, phys_addr,
sg->dma_address = pcibr_dmatrans_addr(vhdl, NULL, phys_addr,
sg->length,
((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
PCIIO_DMA_DATA);
......@@ -325,7 +318,7 @@ sn_pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int dire
* It is a 32 bit card and we cannot do direct mapping,
* so we use an ATE.
*/
dma_map = pciio_dmamap_alloc(vhdl, NULL, sg->length,
dma_map = pcibr_dmamap_alloc(vhdl, NULL, sg->length,
((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
PCIIO_DMA_DATA);
if (!dma_map) {
......@@ -340,10 +333,9 @@ sn_pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int dire
return (0);
}
sg->dma_address = pciio_dmamap_addr(dma_map, phys_addr, sg->length);
sg->dma_address = pcibr_dmamap_addr(dma_map, phys_addr, sg->length);
sg->dma_length = sg->length;
sn_dma_map = (struct sn_dma_maps_s *)dma_map;
sn_dma_map->dma_addr = sg->dma_address;
dma_map->bd_dma_addr = sg->dma_address;
}
return nents;
......@@ -365,7 +357,7 @@ void
sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
{
int i;
struct sn_dma_maps_s *sn_dma_map;
struct pcibr_dmamap_s *dma_map;
/* can't go anywhere w/o a direction in life */
if (direction == PCI_DMA_NONE)
......@@ -374,12 +366,11 @@ sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int di
for (i = 0; i < nents; i++, sg++){
if (IS_PCI32_MAPPED(sg->dma_address)) {
sn_dma_map = NULL;
sn_dma_map = find_sn_dma_map(sg->dma_address, hwdev->bus->number);
if (sn_dma_map) {
pciio_dmamap_done((pciio_dmamap_t)sn_dma_map);
pciio_dmamap_free((pciio_dmamap_t)sn_dma_map);
sn_dma_map->dma_addr = (dma_addr_t)NULL;
dma_map = find_sn_dma_map(sg->dma_address, hwdev->bus->number);
if (dma_map) {
pcibr_dmamap_done(dma_map);
pcibr_dmamap_free(dma_map);
dma_map->bd_dma_addr = 0;
}
}
......@@ -399,7 +390,7 @@ sn_pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int di
* DMA address. Also known as platform_pci_map_single() by
* the IA64 machvec code.
*
* We map this to the one step pciio_dmamap_trans interface rather than
* We map this to the one step pcibr_dmamap_trans interface rather than
* the two step pciio_dmamap_alloc/pciio_dmamap_addr because we have
* no way of saving the dmamap handle from the alloc to later free
* (which is pretty much unacceptable).
......@@ -415,8 +406,7 @@ sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
dma_addr_t dma_addr;
unsigned long phys_addr;
struct sn_device_sysdata *device_sysdata;
pciio_dmamap_t dma_map = NULL;
struct sn_dma_maps_s *sn_dma_map;
pcibr_dmamap_t dma_map = NULL;
if (direction == PCI_DMA_NONE)
BUG();
......@@ -439,7 +429,7 @@ sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
if (IS_PCIA64(hwdev)) {
/* This device supports 64 bit DMA addresses. */
dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,
dma_addr = pcibr_dmatrans_addr(vhdl, NULL, phys_addr, size,
((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
PCIIO_DMA_DATA |
PCIIO_DMA_A64);
......@@ -453,7 +443,7 @@ sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
* First try to get a 32 bit direct map register.
*/
if (IS_PCI32G(hwdev)) {
dma_addr = pciio_dmatrans_addr(vhdl, NULL, phys_addr, size,
dma_addr = pcibr_dmatrans_addr(vhdl, NULL, phys_addr, size,
((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
PCIIO_DMA_DATA);
if (dma_addr)
......@@ -465,7 +455,7 @@ sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
* let's use the PMU instead.
*/
dma_map = NULL;
dma_map = pciio_dmamap_alloc(vhdl, NULL, size,
dma_map = pcibr_dmamap_alloc(vhdl, NULL, size,
((IS_PIC_DEVICE(hwdev)) ? 0 : PCIIO_BYTE_STREAM) |
PCIIO_DMA_DATA);
......@@ -475,9 +465,8 @@ sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
return 0;
}
dma_addr = (dma_addr_t) pciio_dmamap_addr(dma_map, phys_addr, size);
sn_dma_map = (struct sn_dma_maps_s *)dma_map;
sn_dma_map->dma_addr = dma_addr;
dma_addr = (dma_addr_t) pcibr_dmamap_addr(dma_map, phys_addr, size);
dma_map->bd_dma_addr = dma_addr;
return ((dma_addr_t)dma_addr);
}
......@@ -495,7 +484,7 @@ sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
void
sn_pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction)
{
struct sn_dma_maps_s *sn_dma_map = NULL;
struct pcibr_dmamap_s *dma_map = NULL;
if (direction == PCI_DMA_NONE)
BUG();
......@@ -504,15 +493,15 @@ sn_pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int
* Get the sn_dma_map entry.
*/
if (IS_PCI32_MAPPED(dma_addr))
sn_dma_map = find_sn_dma_map(dma_addr, hwdev->bus->number);
dma_map = find_sn_dma_map(dma_addr, hwdev->bus->number);
/*
* and free it if necessary...
*/
if (sn_dma_map) {
pciio_dmamap_done((pciio_dmamap_t)sn_dma_map);
pciio_dmamap_free((pciio_dmamap_t)sn_dma_map);
sn_dma_map->dma_addr = (dma_addr_t)NULL;
if (dma_map) {
pcibr_dmamap_done(dma_map);
pcibr_dmamap_free(dma_map);
dma_map->bd_dma_addr = 0;
}
}
......@@ -573,8 +562,6 @@ sn_pci_dma_supported(struct pci_dev *hwdev, u64 mask)
return 1;
}
#ifdef CONFIG_PCI
/*
* New generic DMA routines just wrap sn2 PCI routines until we
* support other bus types (if ever).
......@@ -703,8 +690,6 @@ sn_dma_sync_sg(struct device *dev, struct scatterlist *sg, int nelems,
}
EXPORT_SYMBOL(sn_dma_sync_sg);
#endif /* CONFIG_PCI */
EXPORT_SYMBOL(sn_pci_unmap_single);
EXPORT_SYMBOL(sn_pci_map_single);
EXPORT_SYMBOL(sn_pci_dma_sync_single);
......
......@@ -14,6 +14,3 @@ EXTRA_CFLAGS := -DLITTLE_ENDIAN
obj-y += pcibr/ ml_SN_intr.o shub_intr.o shuberror.o shub.o bte_error.o \
pic.o geo_op.o l1_command.o klconflib.o klgraph.o ml_SN_init.o \
ml_iograph.o module.o pciio.o xbow.o xtalk.o shubio.o
obj-$(CONFIG_KDB) += kdba_io.o
obj-$(CONFIG_SHUB_1_0_SPECIFIC) += efi-rtc.o
/*
* Kernel Debugger Architecture Dependent POD functions.
*
* Copyright (C) 1999-2003 Silicon Graphics, Inc. All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* Further, this software is distributed without any warranty that it is
* free of the rightful claim of any third person regarding infringement
* or the like. Any license provided herein, whether implied or
* otherwise, applies only to this software file. Patent licenses, if
* any, provided herein do not apply to combinations of this program with
* other software, or any other product whatsoever.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
* Mountain View, CA 94043, or:
*
* http://www.sgi.com
*
* For further information regarding this notice, see:
*
* http://oss.sgi.com/projects/GenInfo/NoticeExplan
*/
#include <linux/types.h>
#include <linux/kdb.h>
//#include <linux/kdbprivate.h>
/**
* kdba_io - enter POD mode from kdb
* @argc: arg count
* @argv: arg values
* @envp: kdb env. vars
* @regs: current register state
*
* Enter POD mode from kdb using SGI SN specific SAL function call.
*/
static int
kdba_io(int argc, const char **argv, const char **envp, struct pt_regs *regs)
{
kdb_printf("kdba_io entered with addr 0x%p\n", (void *) regs);
return(0);
}
/**
* kdba_io_init - register 'io' command with kdb
*
* Register the 'io' command with kdb at load time.
*/
void
kdba_io_init(void)
{
kdb_register("io", kdba_io, "<vaddr>", "Display IO Contents", 0);
}
/**
* kdba_io_exit - unregister the 'io' command
*
* Tell kdb that the 'io' command is no longer available.
*/
static void __exit
kdba_exit(void)
{
kdb_unregister("io");
}
/* $Id: ml_SN_intr.c,v 1.1 2002/02/28 17:31:25 marcelo Exp $
*
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
......@@ -7,14 +6,6 @@
* Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* intr.c-
* This file contains all of the routines necessary to set up and
* handle interrupts on an IPXX board.
*/
#ident "$Revision: 1.1 $"
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
......@@ -45,75 +36,63 @@ extern irqpda_t *irqpdaindr;
extern cnodeid_t master_node_get(vertex_hdl_t vhdl);
extern nasid_t master_nasid;
// Initialize some shub registers for interrupts, both IO and error.
//
void
intr_init_vecblk( nodepda_t *npda,
cnodeid_t node,
int sn)
/* Initialize some shub registers for interrupts, both IO and error. */
void intr_init_vecblk(cnodeid_t node)
{
int nasid = cnodeid_to_nasid(node);
sh_ii_int0_config_u_t ii_int_config;
cpuid_t cpu;
cpuid_t cpu0, cpu1;
nodepda_t *lnodepda;
sh_ii_int0_enable_u_t ii_int_enable;
int nasid = cnodeid_to_nasid(node);
sh_ii_int0_config_u_t ii_int_config;
cpuid_t cpu;
cpuid_t cpu0, cpu1;
nodepda_t *lnodepda;
sh_ii_int0_enable_u_t ii_int_enable;
sh_int_node_id_config_u_t node_id_config;
sh_local_int5_config_u_t local5_config;
sh_local_int5_enable_u_t local5_enable;
extern void sn_init_cpei_timer(void);
static int timer_added = 0;
if (is_headless_node(node) ) {
int cnode;
struct ia64_sal_retval ret_stuff;
int cnode;
// retarget all interrupts on this node to the master node.
/* retarget all interrupts on this node to the master node. */
node_id_config.sh_int_node_id_config_regval = 0;
node_id_config.sh_int_node_id_config_s.node_id = master_nasid;
node_id_config.sh_int_node_id_config_s.id_sel = 1;
HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_INT_NODE_ID_CONFIG),
HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_INT_NODE_ID_CONFIG),
node_id_config.sh_int_node_id_config_regval);
cnode = nasid_to_cnodeid(master_nasid);
lnodepda = NODEPDA(cnode);
cpu = lnodepda->node_first_cpu;
cpu = cpu_physical_id(cpu);
SAL_CALL(ret_stuff, SN_SAL_REGISTER_CE, nasid, cpu, master_nasid,0,0,0,0);
if (ret_stuff.status < 0) {
if (ret_stuff.status < 0)
printk("%s: SN_SAL_REGISTER_CE SAL_CALL failed\n",__FUNCTION__);
}
} else {
lnodepda = NODEPDA(node);
cpu = lnodepda->node_first_cpu;
cpu = cpu_physical_id(cpu);
}
// Get the physical id's of the cpu's on this node.
/* Get the physical id's of the cpu's on this node. */
cpu0 = nasid_slice_to_cpu_physical_id(nasid, 0);
cpu1 = nasid_slice_to_cpu_physical_id(nasid, 2);
HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_PI_ERROR_MASK), 0);
HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_PI_CRBP_ERROR_MASK), 0);
// Config and enable UART interrupt, all nodes.
/* Config and enable UART interrupt, all nodes. */
local5_config.sh_local_int5_config_regval = 0;
local5_config.sh_local_int5_config_s.idx = SGI_UART_VECTOR;
local5_config.sh_local_int5_config_s.pid = cpu0;
HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_LOCAL_INT5_CONFIG),
HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_LOCAL_INT5_CONFIG),
local5_config.sh_local_int5_config_regval);
local5_enable.sh_local_int5_enable_regval = 0;
local5_enable.sh_local_int5_enable_s.uart_int = 1;
HUB_S( (unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_LOCAL_INT5_ENABLE),
HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_LOCAL_INT5_ENABLE),
local5_enable.sh_local_int5_enable_regval);
// The II_INT_CONFIG register for cpu 0.
/* The II_INT_CONFIG register for cpu 0. */
ii_int_config.sh_ii_int0_config_regval = 0;
ii_int_config.sh_ii_int0_config_s.type = 0;
ii_int_config.sh_ii_int0_config_s.agt = 0;
......@@ -124,7 +103,7 @@ intr_init_vecblk( nodepda_t *npda,
ii_int_config.sh_ii_int0_config_regval);
// The II_INT_CONFIG register for cpu 1.
/* The II_INT_CONFIG register for cpu 1. */
ii_int_config.sh_ii_int0_config_regval = 0;
ii_int_config.sh_ii_int0_config_s.type = 0;
ii_int_config.sh_ii_int0_config_s.agt = 0;
......@@ -135,7 +114,7 @@ intr_init_vecblk( nodepda_t *npda,
ii_int_config.sh_ii_int0_config_regval);
// Enable interrupts for II_INT0 and 1.
/* Enable interrupts for II_INT0 and 1. */
ii_int_enable.sh_ii_int0_enable_regval = 0;
ii_int_enable.sh_ii_int0_enable_s.ii_enable = 1;
......@@ -143,148 +122,99 @@ intr_init_vecblk( nodepda_t *npda,
ii_int_enable.sh_ii_int0_enable_regval);
HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_II_INT1_ENABLE),
ii_int_enable.sh_ii_int0_enable_regval);
if (!timer_added) { // can only init the timer once.
timer_added = 1;
sn_init_cpei_timer();
}
}
// (Un)Reserve an irq on this cpu.
static int
do_intr_reserve_level(cpuid_t cpu,
int bit,
int reserve)
static int intr_reserve_level(cpuid_t cpu, int bit)
{
int i;
irqpda_t *irqs = irqpdaindr;
int min_shared;
int i;
if (bit < 0) {
for (i = IA64_SN2_FIRST_DEVICE_VECTOR; i <= IA64_SN2_LAST_DEVICE_VECTOR; i++) {
if (irqs->irq_flags[i] == 0) {
bit = i;
break;
}
}
}
if (reserve) {
if (bit < 0) {
for (i = IA64_SN2_FIRST_DEVICE_VECTOR; i <= IA64_SN2_LAST_DEVICE_VECTOR; i++) {
if (irqs->irq_flags[i] == 0) {
if (bit < 0) { /* ran out of irqs. Have to share. This will be rare. */
min_shared = 256;
for (i=IA64_SN2_FIRST_DEVICE_VECTOR; i < IA64_SN2_LAST_DEVICE_VECTOR; i++) {
/* Share with the same device class */
/* XXX: gross layering violation.. */
if (irqpdaindr->curr->vendor == irqpdaindr->device_dev[i]->vendor &&
irqpdaindr->curr->device == irqpdaindr->device_dev[i]->device &&
irqpdaindr->share_count[i] < min_shared) {
min_shared = irqpdaindr->share_count[i];
bit = i;
break;
}
}
}
if (bit < 0) { /* ran out of irqs. Have to share. This will be rare. */
min_shared = 256;
min_shared = 256;
if (bit < 0) { /* didn't find a matching device, just pick one. This will be */
/* exceptionally rare. */
for (i=IA64_SN2_FIRST_DEVICE_VECTOR; i < IA64_SN2_LAST_DEVICE_VECTOR; i++) {
/* Share with the same device class */
if (irqpdaindr->curr->vendor == irqpdaindr->device_dev[i]->vendor &&
irqpdaindr->curr->device == irqpdaindr->device_dev[i]->device &&
irqpdaindr->share_count[i] < min_shared) {
min_shared = irqpdaindr->share_count[i];
bit = i;
}
}
min_shared = 256;
if (bit < 0) { /* didn't find a matching device, just pick one. This will be */
/* exceptionally rare. */
for (i=IA64_SN2_FIRST_DEVICE_VECTOR; i < IA64_SN2_LAST_DEVICE_VECTOR; i++) {
if (irqpdaindr->share_count[i] < min_shared) {
min_shared = irqpdaindr->share_count[i];
bit = i;
}
if (irqpdaindr->share_count[i] < min_shared) {
min_shared = irqpdaindr->share_count[i];
bit = i;
}
}
irqpdaindr->share_count[bit]++;
}
if (irqs->irq_flags[bit] & SN2_IRQ_SHARED) {
irqs->irq_flags[bit] |= SN2_IRQ_RESERVED;
return bit;
}
if (irqs->irq_flags[bit] & SN2_IRQ_RESERVED) {
return -1;
} else {
irqs->num_irq_used++;
irqs->irq_flags[bit] |= SN2_IRQ_RESERVED;
return bit;
}
} else {
if (irqs->irq_flags[bit] & SN2_IRQ_RESERVED) {
irqs->num_irq_used--;
irqs->irq_flags[bit] &= ~SN2_IRQ_RESERVED;
return bit;
} else {
irqpdaindr->share_count[bit]++;
}
if (!(irqs->irq_flags[bit] & SN2_IRQ_SHARED)) {
if (irqs->irq_flags[bit] & SN2_IRQ_RESERVED)
return -1;
}
irqs->num_irq_used++;
}
}
int
intr_reserve_level(cpuid_t cpu,
int bit,
int resflags,
vertex_hdl_t owner_dev,
char *name)
{
return(do_intr_reserve_level(cpu, bit, 1));
irqs->irq_flags[bit] |= SN2_IRQ_RESERVED;
return bit;
}
void
intr_unreserve_level(cpuid_t cpu,
void intr_unreserve_level(cpuid_t cpu,
int bit)
{
(void)do_intr_reserve_level(cpu, bit, 0);
}
// Mark an irq on this cpu as (dis)connected.
static int
do_intr_connect_level(cpuid_t cpu,
int bit,
int connect)
{
irqpda_t *irqs = irqpdaindr;
if (connect) {
if (irqs->irq_flags[bit] & SN2_IRQ_SHARED) {
irqs->irq_flags[bit] |= SN2_IRQ_CONNECTED;
return bit;
}
if (irqs->irq_flags[bit] & SN2_IRQ_CONNECTED) {
return -1;
} else {
irqs->irq_flags[bit] |= SN2_IRQ_CONNECTED;
return bit;
}
} else {
if (irqs->irq_flags[bit] & SN2_IRQ_CONNECTED) {
irqs->irq_flags[bit] &= ~SN2_IRQ_CONNECTED;
return bit;
} else {
return -1;
}
if (irqs->irq_flags[bit] & SN2_IRQ_RESERVED) {
irqs->num_irq_used--;
irqs->irq_flags[bit] &= ~SN2_IRQ_RESERVED;
}
return(bit);
}
int
intr_connect_level(cpuid_t cpu,
int bit,
ilvl_t is,
intr_func_t intr_prefunc)
int intr_connect_level(cpuid_t cpu, int bit)
{
return(do_intr_connect_level(cpu, bit, 1));
irqpda_t *irqs = irqpdaindr;
if (!(irqs->irq_flags[bit] & SN2_IRQ_SHARED) &&
(irqs->irq_flags[bit] & SN2_IRQ_CONNECTED))
return -1;
irqs->irq_flags[bit] |= SN2_IRQ_CONNECTED;
return bit;
}
int
intr_disconnect_level(cpuid_t cpu,
int bit)
int intr_disconnect_level(cpuid_t cpu, int bit)
{
return(do_intr_connect_level(cpu, bit, 0));
}
irqpda_t *irqs = irqpdaindr;
// Choose a cpu on this node.
// We choose the one with the least number of int's assigned to it.
if (!(irqs->irq_flags[bit] & SN2_IRQ_CONNECTED))
return -1;
irqs->irq_flags[bit] &= ~SN2_IRQ_CONNECTED;
return bit;
}
static cpuid_t
do_intr_cpu_choose(cnodeid_t cnode) {
/*
* Choose a cpu on this node.
*
* We choose the one with the least number of int's assigned to it.
*/
static cpuid_t intr_cpu_choose_from_node(cnodeid_t cnode)
{
cpuid_t cpu, best_cpu = CPU_NONE;
int slice, min_count = 1000;
irqpda_t *irqs;
......@@ -293,13 +223,10 @@ do_intr_cpu_choose(cnodeid_t cnode) {
int intrs;
cpu = cnode_slice_to_cpuid(cnode, slice);
if (cpu == num_online_cpus()) {
if (cpu == num_online_cpus())
continue;
}
if (!cpu_online(cpu)) {
if (!cpu_online(cpu))
continue;
}
irqs = irqpdaindr;
intrs = irqs->num_irq_used;
......@@ -307,9 +234,12 @@ do_intr_cpu_choose(cnodeid_t cnode) {
if (min_count > intrs) {
min_count = intrs;
best_cpu = cpu;
if ( enable_shub_wars_1_1() ) {
/* Rather than finding the best cpu, always return the first cpu*/
/* This forces all interrupts to the same cpu */
if (enable_shub_wars_1_1()) {
/*
* Rather than finding the best cpu, always
* return the first cpu. This forces all
* interrupts to the same cpu
*/
break;
}
}
......@@ -317,130 +247,76 @@ do_intr_cpu_choose(cnodeid_t cnode) {
return best_cpu;
}
static cpuid_t
intr_cpu_choose_from_node(cnodeid_t cnode)
{
return(do_intr_cpu_choose(cnode));
}
// See if we can use this cpu/vect.
static cpuid_t
intr_bit_reserve_test(cpuid_t cpu,
int favor_subnode,
cnodeid_t cnode,
int req_bit,
int resflags,
vertex_hdl_t owner_dev,
char *name,
int *resp_bit)
/*
* We couldn't put it on the closest node. Try to find another one.
* Do a stupid round-robin assignment of the node.
*/
static cpuid_t intr_cpu_choose_node(void)
{
ASSERT( (cpu == CPU_NONE) || (cnode == CNODEID_NONE) );
if (cnode != CNODEID_NONE) {
cpu = intr_cpu_choose_from_node(cnode);
static cnodeid_t last_node = -1; /* XXX: racy */
cnodeid_t candidate_node;
cpuid_t cpuid;
if (last_node >= numnodes)
last_node = 0;
for (candidate_node = last_node + 1; candidate_node != last_node;
candidate_node++) {
if (candidate_node == numnodes)
candidate_node = 0;
cpuid = intr_cpu_choose_from_node(candidate_node);
if (cpuid != CPU_NONE)
return cpuid;
}
if (cpu != CPU_NONE) {
*resp_bit = do_intr_reserve_level(cpu, req_bit, 1);
if (*resp_bit >= 0) {
return(cpu);
}
}
return CPU_NONE;
}
// Find the node to assign for this interrupt.
cpuid_t
intr_heuristic(vertex_hdl_t dev,
device_desc_t dev_desc,
int req_bit,
int resflags,
vertex_hdl_t owner_dev,
char *name,
int *resp_bit)
/*
* Find the node to assign for this interrupt.
*
* SN2 + pcibr addressing limitation:
* Due to this limitation, all interrupts from a given bridge must
* go to the name node. The interrupt must also be targetted for
* the same processor. This limitation does not exist on PIC.
* But, the processor limitation will stay. The limitation will be
* similar to the bedrock/xbridge limit regarding PI's
*/
cpuid_t intr_heuristic(vertex_hdl_t dev, int req_bit, int *resp_bit)
{
cpuid_t cpuid;
cpuid_t candidate = CPU_NONE;
cnodeid_t candidate_node;
vertex_hdl_t pconn_vhdl;
pcibr_soft_t pcibr_soft;
int bit;
/* SN2 + pcibr addressing limitation */
/* Due to this limitation, all interrupts from a given bridge must go to the name node.*/
/* The interrupt must also be targetted for the same processor. */
/* This limitation does not exist on PIC. */
/* But, the processor limitation will stay. The limitation will be similar to */
/* the bedrock/xbridge limit regarding PI's */
if ( (hwgraph_edge_get(dev, EDGE_LBL_PCI, &pconn_vhdl) == GRAPH_SUCCESS) &&
( (pcibr_soft = pcibr_soft_get(pconn_vhdl) ) != NULL) ) {
if (pcibr_soft->bsi_err_intr) {
candidate = ((hub_intr_t)pcibr_soft->bsi_err_intr)->i_cpuid;
}
/* XXX: gross layering violation.. */
if (hwgraph_edge_get(dev, EDGE_LBL_PCI, &pconn_vhdl) == GRAPH_SUCCESS) {
pcibr_soft = pcibr_soft_get(pconn_vhdl);
if (pcibr_soft && pcibr_soft->bsi_err_intr)
candidate = ((hub_intr_t)pcibr_soft->bsi_err_intr)->i_cpuid;
}
if (candidate != CPU_NONE) {
// The cpu was chosen already when we assigned the error interrupt.
bit = intr_reserve_level(candidate,
req_bit,
resflags,
owner_dev,
name);
if (bit < 0) {
cpuid = CPU_NONE;
} else {
cpuid = candidate;
/*
* The cpu was chosen already when we assigned
* the error interrupt.
*/
bit = intr_reserve_level(candidate, req_bit);
if (bit >= 0) {
*resp_bit = bit;
return candidate;
}
} else {
// Need to choose one. Try the controlling c-brick first.
cpuid = intr_bit_reserve_test(CPU_NONE,
0,
master_node_get(dev),
req_bit,
0,
owner_dev,
name,
resp_bit);
}
if (cpuid != CPU_NONE) {
return cpuid;
}
if (candidate != CPU_NONE) {
printk("Cannot target interrupt to target node (%ld).\n",candidate);
return CPU_NONE; } else {
/* printk("Cannot target interrupt to closest node (%d) 0x%p\n",
master_node_get(dev), (void *)owner_dev); */
}
// We couldn't put it on the closest node. Try to find another one.
// Do a stupid round-robin assignment of the node.
{
static cnodeid_t last_node = -1;
if (last_node >= numnodes) last_node = 0;
for (candidate_node = last_node + 1; candidate_node != last_node; candidate_node++) {
if (candidate_node == numnodes) candidate_node = 0;
cpuid = intr_bit_reserve_test(CPU_NONE,
0,
candidate_node,
req_bit,
0,
owner_dev,
name,
resp_bit);
if (cpuid != CPU_NONE) {
return cpuid;
}
}
return CPU_NONE;
}
printk("cannot target interrupt: 0x%p\n",(void *)owner_dev);
return CPU_NONE;
/*
* Need to choose one. Try the controlling c-brick first.
*/
cpuid = intr_cpu_choose_from_node(master_node_get(dev));
if (cpuid != CPU_NONE)
return cpuid;
return intr_cpu_choose_node();
}
......@@ -268,17 +268,6 @@ iograph_early_init(void)
}
}
/*
* Let boot processor know that we're done initializing our node's IO
* and then exit.
*/
/* ARGSUSED */
static void
io_init_done(cnodeid_t cnodeid,cpu_cookie_t c)
{
/* Let boot processor know that we're done. */
}
/*
* Probe to see if this hub's xtalk link is active. If so,
* return the Crosstalk Identification of the widget that we talk to.
......@@ -329,28 +318,6 @@ early_probe_for_widget(vertex_hdl_t hubv, xwidget_hwid_t hwid)
}
}
/* Add inventory information to the widget vertex
* Right now (module,slot,revision) is being
* added as inventory information.
*/
static void
xwidget_inventory_add(vertex_hdl_t widgetv,
lboard_t *board,
struct xwidget_hwid_s hwid)
{
if (!board)
return;
/* Donot add inventory information for the baseio
* on a speedo with an xbox. It has already been
* taken care of in SN00_vmc.
* Speedo with xbox's baseio comes in at slot io1 (widget 9)
*/
device_inventory_add(widgetv,INV_IOBD,board->brd_type,
geo_module(board->brd_geoid),
SLOTNUM_GETSLOT(board->brd_slot),
hwid.rev_num);
}
/*
* io_xswitch_widget_init
*
......@@ -494,10 +461,6 @@ io_xswitch_widget_init(vertex_hdl_t xswitchv,
hwid.part_num = XWIDGET_PART_NUM(widget_id);
hwid.rev_num = XWIDGET_REV_NUM(widget_id);
hwid.mfg_num = XWIDGET_MFG_NUM(widget_id);
/* Store some inventory information about
* the xwidget in the hardware graph.
*/
xwidget_inventory_add(widgetv,board,hwid);
(void)xwidget_register(&hwid, widgetv, widgetnum,
hubv, hub_widgetid);
......@@ -687,11 +650,6 @@ io_init_node(cnodeid_t cnodeid)
DBG("Interfering with device probing!!!\n");
}
#endif
/* io_init_done takes cpu cookie as 2nd argument
* to do a restorenoderun for the setnoderun done
* at the start of this thread
*/
DBG("**** io_init_node: Node's 0x%p hub widget has XWIDGET_PART_NUM_NONE ****\n", hubv);
return;
/* NOTREACHED */
......@@ -795,15 +753,6 @@ io_init_node(cnodeid_t cnodeid)
(void)xwidget_register(&hwid, widgetv, npdap->basew_id, hubv, hubinfo->h_widgetid);
if (!is_xswitch) {
/* io_init_done takes cpu cookie as 2nd argument
* to do a restorenoderun for the setnoderun done
* at the start of this thread
*/
io_init_done(cnodeid,c);
/* NOTREACHED */
}
/*
* Special handling for Crosstalk Switches (e.g. xbow).
* We need to do things in roughly the following order:
......@@ -848,35 +797,9 @@ io_init_node(cnodeid_t cnodeid)
io_init_xswitch_widgets(switchv, cnodeid);
io_link_xswitch_widgets(switchv, cnodeid);
/* io_init_done takes cpu cookie as 2nd argument
* to do a restorenoderun for the setnoderun done
* at the start of this thread
*/
io_init_done(cnodeid,c);
DBG("\nio_init_node: DONE INITIALIZED ALL I/O FOR CNODEID %d\n\n", cnodeid);
}
#define IOINIT_STKSZ (16 * 1024)
#define __DEVSTR1 "/../.master/"
#define __DEVSTR2 "/target/"
#define __DEVSTR3 "/lun/0/disk/partition/"
#define __DEVSTR4 "/../ef"
/*
* ioconfig starts numbering SCSI's at NUM_BASE_IO_SCSI_CTLR.
*/
#define NUM_BASE_IO_SCSI_CTLR 6
/*
* This tells ioconfig where it can start numbering scsi controllers.
* Below this base number, platform-specific handles the numbering.
* XXX Irix legacy..controller numbering should be part of devfsd's job
*/
int num_base_io_scsi_ctlr = 2; /* used by syssgi */
vertex_hdl_t base_io_scsi_ctlr_vhdl[NUM_BASE_IO_SCSI_CTLR];
#include <asm/sn/ioerror_handling.h>
/* #endif */
......@@ -914,78 +837,6 @@ init_all_devices(void)
#define toint(x) ((int)(x) - (int)('0'))
void
devnamefromarcs(char *devnm)
{
int val;
char tmpnm[MAXDEVNAME];
char *tmp1, *tmp2;
val = strncmp(devnm, "dks", 3);
if (val != 0)
return;
tmp1 = devnm + 3;
if (!isdigit(*tmp1))
return;
val = 0;
while (isdigit(*tmp1)) {
val = 10*val+toint(*tmp1);
tmp1++;
}
if(*tmp1 != 'd')
return;
else
tmp1++;
if ((val < 0) || (val >= num_base_io_scsi_ctlr)) {
int i;
int viable_found = 0;
DBG("Only controller numbers 0..%d are supported for\n", NUM_BASE_IO_SCSI_CTLR-1);
DBG("prom \"root\" variables of the form dksXdXsX.\n");
DBG("To use another disk you must use the full hardware graph path\n\n");
DBG("Possible controller numbers for use in 'dksXdXsX' on this system: ");
for (i=0; i<NUM_BASE_IO_SCSI_CTLR; i++) {
if (base_io_scsi_ctlr_vhdl[i] != GRAPH_VERTEX_NONE) {
DBG("%d ", i);
viable_found=1;
}
}
if (viable_found)
DBG("\n");
else
DBG("none found!\n");
DELAY(15000000);
//prom_reboot();
panic("FIXME: devnamefromarcs: should call prom_reboot here.\n");
/* NOTREACHED */
}
ASSERT(base_io_scsi_ctlr_vhdl[val] != GRAPH_VERTEX_NONE);
vertex_to_name(base_io_scsi_ctlr_vhdl[val],
tmpnm,
MAXDEVNAME);
tmp2 = tmpnm + strlen(tmpnm);
strcpy(tmp2, __DEVSTR2);
tmp2 += strlen(__DEVSTR2);
while (*tmp1 != 's') {
if((*tmp2++ = *tmp1++) == '\0')
return;
}
tmp1++;
strcpy(tmp2, __DEVSTR3);
tmp2 += strlen(__DEVSTR3);
while ( (*tmp2++ = *tmp1++) )
;
tmp2--;
*tmp2++ = '/';
strcpy(tmp2, EDGE_LBL_BLOCK);
strcpy(devnm,tmpnm);
}
static
struct io_brick_map_s io_brick_tab[] = {
......
......@@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <asm/sn/sgi.h>
#include <asm/sn/xtalk/xbow.h> /* Must be before iograph.h to get MAX_PORT_NUM */
#include <asm/sn/iograph.h>
......@@ -947,15 +948,6 @@ pciio_info_get(vertex_hdl_t pciio)
pciio_info = (pciio_info_t) hwgraph_fastinfo_get(pciio);
#ifdef DEBUG_PCIIO
{
int pos;
char dname[256];
pos = devfs_generate_path(pciio, dname, 256);
printk("%s : path= %s\n", __FUNCTION__, &dname[pos]);
}
#endif /* DEBUG_PCIIO */
if ((pciio_info != NULL) &&
(pciio_info->c_fingerprint != pciio_info_fingerprint)
&& (pciio_info->c_fingerprint != NULL)) {
......@@ -1212,14 +1204,6 @@ pciio_device_info_register(
pciio_info->c_vertex = pconn;
pciio_info_set(pconn, pciio_info);
#ifdef DEBUG_PCIIO
{
int pos;
char dname[256];
pos = devfs_generate_path(pconn, dname, 256);
printk("%s : pconn path= %s \n", __FUNCTION__, &dname[pos]);
}
#endif /* DEBUG_PCIIO */
/*
* create link to our pci provider
......@@ -1254,24 +1238,6 @@ pciio_device_info_unregister(vertex_hdl_t connectpt,
hwgraph_vertex_destroy(pconn);
}
/* Add the pci card inventory information to the hwgraph
*/
static void
pciio_device_inventory_add(vertex_hdl_t pconn_vhdl)
{
pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
ASSERT(pciio_info);
ASSERT(pciio_info->c_vertex == pconn_vhdl);
/* Donot add inventory for non-existent devices */
if ((pciio_info->c_vendor == PCIIO_VENDOR_ID_NONE) ||
(pciio_info->c_device == PCIIO_DEVICE_ID_NONE))
return;
device_inventory_add(pconn_vhdl,INV_IOBD,INV_PCIADAP,
pciio_info->c_vendor,pciio_info->c_device,
pciio_info->c_slot);
}
/*ARGSUSED */
int
......@@ -1283,7 +1249,6 @@ pciio_device_attach(vertex_hdl_t pconn,
pciio_device_id_t device_id;
pciio_device_inventory_add(pconn);
pciio_info = pciio_info_get(pconn);
vendor_id = pciio_info->c_vendor;
......@@ -1435,27 +1400,10 @@ pciio_info_type1_get(pciio_info_t pci_info)
return(0);
}
/*
* These are complementary Linux interfaces that takes in a pci_dev * as the
* first arguement instead of vertex_hdl_t.
* XXX: should probably be called __sn2_pci_rrb_alloc
*/
iopaddr_t snia_pciio_dmatrans_addr(struct pci_dev *, device_desc_t, paddr_t, size_t, unsigned);
pciio_dmamap_t snia_pciio_dmamap_alloc(struct pci_dev *, device_desc_t, size_t, unsigned);
void snia_pciio_dmamap_free(pciio_dmamap_t);
iopaddr_t snia_pciio_dmamap_addr(pciio_dmamap_t, paddr_t, size_t);
void snia_pciio_dmamap_done(pciio_dmamap_t);
pciio_endian_t snia_pciio_endian_set(struct pci_dev *pci_dev, pciio_endian_t device_end,
pciio_endian_t desired_end);
#include <linux/module.h>
EXPORT_SYMBOL(snia_pciio_dmatrans_addr);
EXPORT_SYMBOL(snia_pciio_dmamap_alloc);
EXPORT_SYMBOL(snia_pciio_dmamap_free);
EXPORT_SYMBOL(snia_pciio_dmamap_addr);
EXPORT_SYMBOL(snia_pciio_dmamap_done);
EXPORT_SYMBOL(snia_pciio_endian_set);
/* used by qla1280 */
int
snia_pcibr_rrb_alloc(struct pci_dev *pci_dev,
int *count_vchan0,
......@@ -1467,6 +1415,13 @@ snia_pcibr_rrb_alloc(struct pci_dev *pci_dev,
}
EXPORT_SYMBOL(snia_pcibr_rrb_alloc);
/*
* XXX: interface should be more like
*
* int __sn2_pci_enable_bwswap(struct pci_dev *dev);
* void __sn2_pci_disable_bswap(struct pci_dev *dev);
*/
/* used by ioc4 ide */
pciio_endian_t
snia_pciio_endian_set(struct pci_dev *pci_dev,
pciio_endian_t device_end,
......@@ -1477,62 +1432,4 @@ snia_pciio_endian_set(struct pci_dev *pci_dev,
return DEV_FUNC(dev, endian_set)
(dev, device_end, desired_end);
}
iopaddr_t
snia_pciio_dmatrans_addr(struct pci_dev *pci_dev, /* translate for this device */
device_desc_t dev_desc, /* device descriptor */
paddr_t paddr, /* system physical address */
size_t byte_count, /* length */
unsigned flags)
{ /* defined in dma.h */
vertex_hdl_t dev = PCIDEV_VERTEX(pci_dev);
/*
* If the device is not a PIC, we always want the PCIIO_BYTE_STREAM to be
* set. Otherwise, it must not be set. This applies to SN1 and SN2.
*/
return DEV_FUNC(dev, dmatrans_addr)
(dev, dev_desc, paddr, byte_count, (IS_PIC_DEVICE(pci_dev)) ? (flags & ~PCIIO_BYTE_STREAM) : flags | PCIIO_BYTE_STREAM);
}
pciio_dmamap_t
snia_pciio_dmamap_alloc(struct pci_dev *pci_dev, /* set up mappings for this device */
device_desc_t dev_desc, /* device descriptor */
size_t byte_count_max, /* max size of a mapping */
unsigned flags)
{ /* defined in dma.h */
vertex_hdl_t dev = PCIDEV_VERTEX(pci_dev);
/*
* If the device is not a PIC, we always want the PCIIO_BYTE_STREAM to be
* set. Otherwise, it must not be set. This applies to SN1 and SN2.
*/
return (pciio_dmamap_t) DEV_FUNC(dev, dmamap_alloc)
(dev, dev_desc, byte_count_max, (IS_PIC_DEVICE(pci_dev)) ? (flags & ~PCIIO_BYTE_STREAM) : flags | PCIIO_BYTE_STREAM);
}
void
snia_pciio_dmamap_free(pciio_dmamap_t pciio_dmamap)
{
DMAMAP_FUNC(pciio_dmamap, dmamap_free)
(CAST_DMAMAP(pciio_dmamap));
}
iopaddr_t
snia_pciio_dmamap_addr(pciio_dmamap_t pciio_dmamap, /* use these mapping resources */
paddr_t paddr, /* map for this address */
size_t byte_count)
{ /* map this many bytes */
return DMAMAP_FUNC(pciio_dmamap, dmamap_addr)
(CAST_DMAMAP(pciio_dmamap), paddr, byte_count);
}
void
snia_pciio_dmamap_done(pciio_dmamap_t pciio_dmamap)
{
DMAMAP_FUNC(pciio_dmamap, dmamap_done)
(CAST_DMAMAP(pciio_dmamap));
}
EXPORT_SYMBOL(snia_pciio_endian_set);
/* $Id: shub_intr.c,v 1.1 2002/02/28 17:31:25 marcelo Exp $
*
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
......@@ -61,8 +60,7 @@ do_hub_intr_alloc(vertex_hdl_t dev,
xwidget_info_t xwidget_info;
ilvl_t intr_swlevel = 0;
cpu = intr_heuristic(dev, dev_desc, -1, 0, owner_dev, NULL, &vector);
cpu = intr_heuristic(dev, -1, &vector);
if (cpu == CPU_NONE) {
printk("Unable to allocate interrupt for 0x%p\n", (void *)owner_dev);
return(0);
......@@ -150,10 +148,9 @@ hub_intr_connect(hub_intr_t intr_hdl,
ASSERT(intr_hdl->i_flags & HUB_INTR_IS_ALLOCED);
rv = intr_connect_level(cpu, vector, intr_hdl->i_swlevel, NULL);
if (rv < 0) {
rv = intr_connect_level(cpu, vector);
if (rv < 0)
return rv;
}
intr_hdl->i_xtalk_info.xi_setfunc = setfunc;
intr_hdl->i_xtalk_info.xi_sfarg = setfunc_arg;
......
......@@ -145,14 +145,13 @@ hubii_eint_init(cnodeid_t cnode)
/* Select a possible interrupt target where there is a free interrupt
* bit and also reserve the interrupt bit for this IO error interrupt
*/
intr_cpu = intr_heuristic(hub_v,0,SGI_II_ERROR,0,hub_v,
"HUB IO error interrupt",&bit);
intr_cpu = intr_heuristic(hub_v, SGI_II_ERROR, &bit);
if (intr_cpu == CPU_NONE) {
printk("hubii_eint_init: intr_reserve_level failed, cnode %d", cnode);
printk("hubii_eint_init: intr_heuristic failed, cnode %d", cnode);
return;
}
rv = intr_connect_level(intr_cpu, SGI_II_ERROR, 0, NULL);
rv = intr_connect_level(intr_cpu, SGI_II_ERROR);
request_irq(SGI_II_ERROR, hubii_eint_handler, SA_SHIRQ, "SN_hub_error", (void *)hub_v);
irq_descp(bit)->status |= SN2_IRQ_PER_HUB;
ASSERT_ALWAYS(rv >= 0);
......
......@@ -257,7 +257,7 @@ xbow_attach(vertex_hdl_t conn)
*/
/*
* Register a xbow driver with devfs.
* Register a xbow driver with hwgraph.
* file ops.
*/
vhdl = NULL;
......
......@@ -11,4 +11,3 @@ EXTRA_CFLAGS := -DLITTLE_ENDIAN
obj-y += probe.o setup.o bte.o irq.o mca.o idle.o sn2/
obj-$(CONFIG_IA64_GENERIC) += machvec.o
obj-$(CONFIG_MODULES) += sn_ksyms.o
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
/*
* Architecture-specific kernel symbols
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/machvec.h>
#include <asm/sn/intr.h>
#include <asm/sn/sgi.h>
#include <asm/sn/types.h>
#include <asm/sn/arch.h>
#include <asm/sn/bte.h>
#include <asm/sal.h>
#include <asm/sn/sn_sal.h>
#ifdef CONFIG_IA64_SGI_SN_DEBUG
EXPORT_SYMBOL(__pa_debug);
EXPORT_SYMBOL(__va_debug);
#endif
EXPORT_SYMBOL(bte_copy);
EXPORT_SYMBOL(bte_unaligned_copy);
EXPORT_SYMBOL(ia64_sal);
EXPORT_SYMBOL(sal_lock);
EXPORT_SYMBOL(sn_local_partid);
#ifndef meminit_h
#define meminit_h
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
/*
* Entries defined so far:
* - boot param structure itself
* - memory map
* - initrd (optional)
* - command line string
* - kernel code & data
*
* More could be added if necessary
*/
#define IA64_MAX_RSVD_REGIONS 5
struct rsvd_region {
unsigned long start; /* virtual address of beginning of element */
unsigned long end; /* virtual address of end of element + 1 */
};
extern struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
extern int num_rsvd_regions;
extern void find_memory (void);
extern void reserve_memory (void);
extern void find_initrd (void);
extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
#ifdef CONFIG_DISCONTIGMEM
extern void call_pernode_memory (unsigned long start, unsigned long end, void *arg);
#endif
#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
#endif /* meminit_h */
......@@ -11,6 +11,9 @@
#ifndef _ASM_IA64_NUMA_H
#define _ASM_IA64_NUMA_H
#include <linux/config.h>
#include <linux/cpumask.h>
#ifdef CONFIG_NUMA
#ifdef CONFIG_DISCONTIGMEM
......
......@@ -10,8 +10,6 @@
#ifndef _ASM_IA64_SN_HCL_UTIL_H
#define _ASM_IA64_SN_HCL_UTIL_H
#include <linux/devfs_fs_kernel.h>
extern char * dev_to_name(vertex_hdl_t, char *, uint);
extern int device_master_set(vertex_hdl_t, vertex_hdl_t);
extern vertex_hdl_t device_master_get(vertex_hdl_t);
......
......@@ -720,28 +720,14 @@ typedef struct invplace_s {
extern invplace_t invplace_none;
#define INVPLACE_NONE invplace_none
extern void add_to_inventory(int, int, int, int, int);
extern void replace_in_inventory(inventory_t *, int, int, int, int, int);
extern void start_scan_inventory(invplace_t *);
extern inventory_t *get_next_inventory(invplace_t *);
extern void end_scan_inventory(invplace_t *);
extern inventory_t *find_inventory(inventory_t *, int, int, int, int, int);
extern int scaninvent(int (*)(inventory_t *, void *), void *);
extern int get_sizeof_inventory(int);
extern void device_inventory_add( vertex_hdl_t device,
static inline void device_inventory_add(vertex_hdl_t device,
int class,
int type,
major_t ctlr,
minor_t unit,
int state);
int state)
{
}
extern inventory_t *device_inventory_get_next( vertex_hdl_t device,
invplace_t *);
extern void device_controller_num_set( vertex_hdl_t,
int);
extern int device_controller_num_get( vertex_hdl_t);
#endif /* __KERNEL__ */
#endif /* _ASM_IA64_SN_INVENT_H */
......@@ -10,7 +10,6 @@
#include <linux/config.h>
#include <linux/types.h>
#include <linux/devfs_fs_kernel.h>
#include <asm/sn/sgi.h>
#if __KERNEL__
......
......@@ -40,7 +40,7 @@ typedef struct label_info_s {
/*
* Definition of the data structure that provides the link to
* the hwgraph fastinfo and the label entries associated with a
* particular devfs entry.
* particular hwgraph entry.
*/
typedef struct labelcl_info_s {
unsigned long hwcl_magic;
......@@ -87,6 +87,5 @@ extern int labelcl_info_replace_IDX(vertex_hdl_t, int, arbitrary_info_t,
arbitrary_info_t *);
extern int labelcl_info_connectpt_set(vertex_hdl_t, vertex_hdl_t);
extern int labelcl_info_get_IDX(vertex_hdl_t, int, arbitrary_info_t *);
extern struct devfs_handle_t device_info_connectpt_get(vertex_hdl_t);
#endif /* _ASM_IA64_SN_LABELCL_H */
/*
* Intel Multimedia Timer device interface
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2001-2003 Silicon Graphics, Inc. All rights reserved.
*
* Helper file for the SN implementation of mmtimers
*
* 11/01/01 - jbarnes - initial revision
*/
#ifndef _SN_MMTIMER_PRIVATE_H
#define RTC_BITS 55 /* 55 bits for this implementation */
#define NUM_COMPARATORS 2 /* two comparison registers in SN1 */
/*
* Check for an interrupt and clear the pending bit if
* one is waiting.
*/
#define MMTIMER_INT_PENDING(x) (x ? *(RTC_INT_PENDING_B_ADDR) : *(RTC_INT_PENDING_A_ADDR))
/*
* Set interrupts on RTC 'x' to 'v' (true or false)
*/
#define MMTIMER_SET_INT(x,v) (x ? (*(RTC_INT_ENABLED_B_ADDR) = (unsigned long)(v)) : (*(RTC_INT_ENABLED_A_ADDR) = (unsigned long)(v)))
#define MMTIMER_ENABLE_INT(x) MMTIMER_SET_INT(x, 1)
#define MMTIMER_DISABLE_INT(x) MMTIMER_SET_INT(x, 0)
typedef struct mmtimer {
spinlock_t timer_lock;
unsigned long periodic;
int signo;
volatile unsigned long *compare;
struct task_struct *process;
} mmtimer_t;
#endif /* _SN_LINUX_MMTIMER_PRIVATE_H */
......@@ -60,11 +60,6 @@ struct sn_device_sysdata {
volatile unsigned int *xbow_buf_sync;
};
struct sn_dma_maps_s{
struct pcibr_dmamap_s dma_map;
dma_addr_t dma_addr;
};
struct ioports_to_tlbs_s {
unsigned long p:1,
rv_1:1,
......
......@@ -139,6 +139,7 @@ struct pcibr_dmamap_s {
bridge_ate_p bd_ate_ptr; /* where to write first ATE */
bridge_ate_t bd_ate_proto; /* prototype ATE (for xioaddr=0) */
bridge_ate_t bd_ate_prime; /* value of 1st ATE written */
dma_addr_t bd_dma_addr; /* Linux dma handle */
};
#define IBUFSIZE 5 /* size of circular buffer (holds 4) */
......
......@@ -16,7 +16,6 @@
#include <asm/sn/types.h>
#include <asm/uaccess.h> /* for copy_??_user */
#include <linux/mm.h>
#include <linux/devfs_fs_kernel.h>
#include <linux/fs.h>
#include <asm/sn/hwgfs.h>
......
......@@ -49,14 +49,10 @@ extern void get_dir_ent(paddr_t paddr, int *state,
#endif
/* intr.c */
extern int intr_reserve_level(cpuid_t cpu, int level, int err, vertex_hdl_t owner_dev, char *name);
extern void intr_unreserve_level(cpuid_t cpu, int level);
extern int intr_connect_level(cpuid_t cpu, int bit, ilvl_t mask_no,
intr_func_t intr_prefunc);
extern int intr_connect_level(cpuid_t cpu, int bit);
extern int intr_disconnect_level(cpuid_t cpu, int bit);
extern cpuid_t intr_heuristic(vertex_hdl_t dev, device_desc_t dev_desc,
int req_bit,int intr_resflags,vertex_hdl_t owner_dev,
char *intr_name,int *resp_bit);
extern cpuid_t intr_heuristic(vertex_hdl_t dev, int req_bit, int *resp_bit);
extern void intr_block_bit(cpuid_t cpu, int bit);
extern void intr_unblock_bit(cpuid_t cpu, int bit);
extern void setrtvector(intr_func_t);
......
/*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1992-1997,2000-2003 Silicon Graphics, Inc. All rights reserved.
*/
#ifndef _ASM_IA64_SN_UART16550_H
#define _ASM_IA64_SN_UART16550_H
/*
* Definitions for 16550 chip
*/
/* defined as offsets from the data register */
#define REG_DAT 0 /* receive/transmit data */
#define REG_ICR 1 /* interrupt control register */
#define REG_ISR 2 /* interrupt status register */
#define REG_FCR 2 /* fifo control register */
#define REG_LCR 3 /* line control register */
#define REG_MCR 4 /* modem control register */
#define REG_LSR 5 /* line status register */
#define REG_MSR 6 /* modem status register */
#define REG_SCR 7 /* Scratch register */
#define REG_DLL 0 /* divisor latch (lsb) */
#define REG_DLH 1 /* divisor latch (msb) */
#define REG_EFR 2 /* 16650 enhanced feature register */
/*
* 16450/16550 Registers Structure.
*/
/* Line Control Register */
#define LCR_WLS0 0x01 /*word length select bit 0 */
#define LCR_WLS1 0x02 /*word length select bit 2 */
#define LCR_STB 0x04 /* number of stop bits */
#define LCR_PEN 0x08 /* parity enable */
#define LCR_EPS 0x10 /* even parity select */
#define LCR_SETBREAK 0x40 /* break key */
#define LCR_DLAB 0x80 /* divisor latch access bit */
#define LCR_RXLEN 0x03 /* # of data bits per received/xmitted char */
#define LCR_STOP1 0x00
#define LCR_STOP2 0x04
#define LCR_PAREN 0x08
#define LCR_PAREVN 0x10
#define LCR_PARMARK 0x20
#define LCR_SNDBRK 0x40
#define LCR_DLAB 0x80
#define LCR_BITS5 0x00 /* 5 bits per char */
#define LCR_BITS6 0x01 /* 6 bits per char */
#define LCR_BITS7 0x02 /* 7 bits per char */
#define LCR_BITS8 0x03 /* 8 bits per char */
#define LCR_MASK_BITS_CHAR 0x03
#define LCR_MASK_STOP_BITS 0x04
#define LCR_MASK_PARITY_BITS 0x18
/* Line Status Register */
#define LSR_RCA 0x01 /* data ready */
#define LSR_OVRRUN 0x02 /* overrun error */
#define LSR_PARERR 0x04 /* parity error */
#define LSR_FRMERR 0x08 /* framing error */
#define LSR_BRKDET 0x10 /* a break has arrived */
#define LSR_XHRE 0x20 /* tx hold reg is now empty */
#define LSR_XSRE 0x40 /* tx shift reg is now empty */
#define LSR_RFBE 0x80 /* rx FIFO Buffer error */
/* Interrupt Status Regisger */
#define ISR_MSTATUS 0x00
#define ISR_TxRDY 0x02
#define ISR_RxRDY 0x04
#define ISR_ERROR_INTR 0x08
#define ISR_FFTMOUT 0x0c /* FIFO Timeout */
#define ISR_RSTATUS 0x06 /* Receiver Line status */
/* Interrupt Enable Register */
#define ICR_RIEN 0x01 /* Received Data Ready */
#define ICR_TIEN 0x02 /* Tx Hold Register Empty */
#define ICR_SIEN 0x04 /* Receiver Line Status */
#define ICR_MIEN 0x08 /* Modem Status */
/* Modem Control Register */
#define MCR_DTR 0x01 /* Data Terminal Ready */
#define MCR_RTS 0x02 /* Request To Send */
#define MCR_OUT1 0x04 /* Aux output - not used */
#define MCR_OUT2 0x08 /* turns intr to 386 on/off */
#define MCR_LOOP 0x10 /* loopback for diagnostics */
#define MCR_AFE 0x20 /* Auto flow control enable */
/* Modem Status Register */
#define MSR_DCTS 0x01 /* Delta Clear To Send */
#define MSR_DDSR 0x02 /* Delta Data Set Ready */
#define MSR_DRI 0x04 /* Trail Edge Ring Indicator */
#define MSR_DDCD 0x08 /* Delta Data Carrier Detect */
#define MSR_CTS 0x10 /* Clear To Send */
#define MSR_DSR 0x20 /* Data Set Ready */
#define MSR_RI 0x40 /* Ring Indicator */
#define MSR_DCD 0x80 /* Data Carrier Detect */
#define DELTAS(x) ((x)&(MSR_DCTS|MSR_DDSR|MSR_DRI|MSR_DDCD))
#define STATES(x) ((x)(MSR_CTS|MSR_DSR|MSR_RI|MSR_DCD))
#define FCR_FIFOEN 0x01 /* enable receive/transmit fifo */
#define FCR_RxFIFO 0x02 /* enable receive fifo */
#define FCR_TxFIFO 0x04 /* enable transmit fifo */
#define FCR_MODE1 0x08 /* change to mode 1 */
#define RxLVL0 0x00 /* Rx fifo level at 1 */
#define RxLVL1 0x40 /* Rx fifo level at 4 */
#define RxLVL2 0x80 /* Rx fifo level at 8 */
#define RxLVL3 0xc0 /* Rx fifo level at 14 */
#define FIFOEN (FCR_FIFOEN | FCR_RxFIFO | FCR_TxFIFO | RxLVL3 | FCR_MODE1)
#define FCT_TxMASK 0x30 /* mask for Tx trigger */
#define FCT_RxMASK 0xc0 /* mask for Rx trigger */
/* enhanced festures register */
#define EFR_SFLOW 0x0f /* various S/w Flow Controls */
#define EFR_EIC 0x10 /* Enhanced Interrupt Control bit */
#define EFR_SCD 0x20 /* Special Character Detect */
#define EFR_RTS 0x40 /* RTS flow control */
#define EFR_CTS 0x80 /* CTS flow control */
/* Rx Tx software flow controls in 16650 enhanced mode */
#define SFLOW_Tx0 0x00 /* no Xmit flow control */
#define SFLOW_Tx1 0x08 /* Transmit Xon1, Xoff1 */
#define SFLOW_Tx2 0x04 /* Transmit Xon2, Xoff2 */
#define SFLOW_Tx3 0x0c /* Transmit Xon1,Xon2, Xoff1,Xoff2 */
#define SFLOW_Rx0 0x00 /* no Rcv flow control */
#define SFLOW_Rx1 0x02 /* Receiver compares Xon1, Xoff1 */
#define SFLOW_Rx2 0x01 /* Receiver compares Xon2, Xoff2 */
#define ASSERT_DTR(x) (x |= MCR_DTR)
#define ASSERT_RTS(x) (x |= MCR_RTS)
#define DU_RTS_ASSERTED(x) (((x) & MCR_RTS) != 0)
#define DU_RTS_ASSERT(x) ((x) |= MCR_RTS)
#define DU_RTS_DEASSERT(x) ((x) &= ~MCR_RTS)
/*
* ioctl(fd, I_STR, arg)
* use the SIOC_RS422 and SIOC_EXTCLK combination to support MIDI
*/
#define SIOC ('z' << 8) /* z for z85130 */
#define SIOC_EXTCLK (SIOC | 1) /* select/de-select external clock */
#define SIOC_RS422 (SIOC | 2) /* select/de-select RS422 protocol */
#define SIOC_ITIMER (SIOC | 3) /* upstream timer adjustment */
#define SIOC_LOOPBACK (SIOC | 4) /* diagnostic loopback test mode */
/* channel control register */
#define DMA_INT_MASK 0xe0 /* ring intr mask */
#define DMA_INT_TH25 0x20 /* 25% threshold */
#define DMA_INT_TH50 0x40 /* 50% threshold */
#define DMA_INT_TH75 0x60 /* 75% threshold */
#define DMA_INT_EMPTY 0x80 /* ring buffer empty */
#define DMA_INT_NEMPTY 0xa0 /* ring buffer not empty */
#define DMA_INT_FULL 0xc0 /* ring buffer full */
#define DMA_INT_NFULL 0xe0 /* ring buffer not full */
#define DMA_CHANNEL_RESET 0x400 /* reset dma channel */
#define DMA_ENABLE 0x200 /* enable DMA */
/* peripheral controller intr status bits applicable to serial ports */
#define ISA_SERIAL0_MASK 0x03f00000 /* mask for port #1 intrs */
#define ISA_SERIAL0_DIR 0x00100000 /* device intr request */
#define ISA_SERIAL0_Tx_THIR 0x00200000 /* Transmit DMA threshold */
#define ISA_SERIAL0_Tx_PREQ 0x00400000 /* Transmit DMA pair req */
#define ISA_SERIAL0_Tx_MEMERR 0x00800000 /* Transmit DMA memory err */
#define ISA_SERIAL0_Rx_THIR 0x01000000 /* Receive DMA threshold */
#define ISA_SERIAL0_Rx_OVERRUN 0x02000000 /* Receive DMA over-run */
#define ISA_SERIAL1_MASK 0xfc000000 /* mask for port #1 intrs */
#define ISA_SERIAL1_DIR 0x04000000 /* device intr request */
#define ISA_SERIAL1_Tx_THIR 0x08000000 /* Transmit DMA threshold */
#define ISA_SERIAL1_Tx_PREQ 0x10000000 /* Transmit DMA pair req */
#define ISA_SERIAL1_Tx_MEMERR 0x20000000 /* Transmit DMA memory err */
#define ISA_SERIAL1_Rx_THIR 0x40000000 /* Receive DMA threshold */
#define ISA_SERIAL1_Rx_OVERRUN 0x80000000 /* Receive DMA over-run */
#define MAX_RING_BLOCKS 128 /* 4096/32 */
#define MAX_RING_SIZE 4096
/* DMA Input Control Byte */
#define DMA_IC_OVRRUN 0x01 /* overrun error */
#define DMA_IC_PARERR 0x02 /* parity error */
#define DMA_IC_FRMERR 0x04 /* framing error */
#define DMA_IC_BRKDET 0x08 /* a break has arrived */
#define DMA_IC_VALID 0x80 /* pair is valid */
/* DMA Output Control Byte */
#define DMA_OC_TxINTR 0x20 /* set Tx intr after processing byte */
#define DMA_OC_INVALID 0x00 /* invalid pair */
#define DMA_OC_WTHR 0x40 /* Write byte to THR */
#define DMA_OC_WMCR 0x80 /* Write byte to MCR */
#define DMA_OC_DELAY 0xc0 /* time delay before next xmit */
/* ring id's */
#define RID_SERIAL0_TX 0x4 /* serial port 0, transmit ring buffer */
#define RID_SERIAL0_RX 0x5 /* serial port 0, receive ring buffer */
#define RID_SERIAL1_TX 0x6 /* serial port 1, transmit ring buffer */
#define RID_SERIAL1_RX 0x7 /* serial port 1, receive ring buffer */
#define CLOCK_XIN 22
#define PRESCALER_DIVISOR 3
#define CLOCK_ACE 7333333
/*
* increment the ring offset. One way to do this would be to add b'100000.
* this would let the offset value roll over automatically when it reaches
* its maximum value (127). However when we use the offset, we must use
* the appropriate bits only by masking with 0xfe0.
* The other option is to shift the offset right by 5 bits and look at its
* value. Then increment if required and shift back
* note: 127 * 2^5 = 4064
*/
#define INC_RING_POINTER(x) \
( ((x & 0xffe0) < 4064) ? (x += 32) : 0 )
#endif /* _ASM_IA64_SN_UART16550_H */
......@@ -10,7 +10,6 @@
#define _ASM_SN_XTALK_XBOW_INFO_H
#include <linux/types.h>
#include <linux/devfs_fs_kernel.h>
#define XBOW_PERF_MODES 0x03
#define XBOW_PERF_COUNTERS 0x02
......
......@@ -17,7 +17,6 @@
#ifndef __ASSEMBLY__
#include <linux/devfs_fs_kernel.h>
#include <asm/sn/xtalk/xtalk.h>
typedef struct xswitch_info_s *xswitch_info_t;
......
......@@ -10,7 +10,6 @@
#define _ASM_SN_XTALK_XTALK_PRIVATE_H
#include <asm/sn/ioerror.h> /* for error function and arg types */
#include <linux/devfs_fs_kernel.h>
#include <asm/sn/xtalk/xwidget.h>
#include <asm/sn/xtalk/xtalk.h>
......
......@@ -30,6 +30,7 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/sched.h>
......@@ -57,9 +58,10 @@
* address TASK_SIZE is never valid. We also need to make sure that the address doesn't
* point inside the virtually mapped linear page table.
*/
#define __access_ok(addr,size,segment) (((unsigned long) (addr)) <= (segment).seg \
&& ((segment).seg == KERNEL_DS.seg \
|| REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))
#define __access_ok(addr,size,segment) \
likely(((unsigned long) (addr)) <= (segment).seg \
&& ((segment).seg == KERNEL_DS.seg \
|| REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))
#define access_ok(type,addr,size) __access_ok((addr),(size),get_fs())
static inline int
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment