Commit 56cb0890 authored by David Mosberger's avatar David Mosberger

Merge tiger.hpl.hp.com:/data1/bk/vanilla/linux-2.5

into tiger.hpl.hp.com:/data1/bk/lia64/to-linus-2.5
parents 196c4ebd eab10312
...@@ -1732,7 +1732,6 @@ ioc_init(u64 hpa, void *handle) ...@@ -1732,7 +1732,6 @@ ioc_init(u64 hpa, void *handle)
if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask) if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask)
ia64_max_iommu_merge_mask = ~iovp_mask; ia64_max_iommu_merge_mask = ~iovp_mask;
MAX_DMA_ADDRESS = ~0UL;
printk(KERN_INFO PFX printk(KERN_INFO PFX
"%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n", "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
...@@ -1966,6 +1965,18 @@ sba_init(void) ...@@ -1966,6 +1965,18 @@ sba_init(void)
subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */ subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
extern void dig_setup(char**);
/*
* MAX_DMA_ADDRESS needs to be setup prior to paging_init to do any good,
* so we use the platform_setup hook to fix it up.
*/
void __init
sba_setup(char **cmdline_p)
{
MAX_DMA_ADDRESS = ~0UL;
dig_setup(cmdline_p);
}
static int __init static int __init
nosbagart(char *str) nosbagart(char *str)
{ {
......
...@@ -455,6 +455,7 @@ acpi_numa_arch_fixup (void) ...@@ -455,6 +455,7 @@ acpi_numa_arch_fixup (void)
for (i = 0; i < MAX_PXM_DOMAINS; i++) { for (i = 0; i < MAX_PXM_DOMAINS; i++) {
if (pxm_bit_test(i)) { if (pxm_bit_test(i)) {
pxm_to_nid_map[i] = numnodes; pxm_to_nid_map[i] = numnodes;
node_set_online(numnodes);
nid_to_pxm_map[numnodes++] = i; nid_to_pxm_map[numnodes++] = i;
} }
} }
......
...@@ -201,10 +201,16 @@ do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, un ...@@ -201,10 +201,16 @@ do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, un
* A zero mmap always succeeds in Linux, independent of whether or not the * A zero mmap always succeeds in Linux, independent of whether or not the
* remaining arguments are valid. * remaining arguments are valid.
*/ */
len = PAGE_ALIGN(len);
if (len == 0) if (len == 0)
goto out; goto out;
/* Careful about overflows.. */
len = PAGE_ALIGN(len);
if (!len || len > TASK_SIZE) {
addr = -EINVAL;
goto out;
}
/* /*
* Don't permit mappings into unmapped space, the virtual page table of a region, * Don't permit mappings into unmapped space, the virtual page table of a region,
* or across a region boundary. Note: RGN_MAP_LIMIT is equal to 2^n-PAGE_SIZE * or across a region boundary. Note: RGN_MAP_LIMIT is equal to 2^n-PAGE_SIZE
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/mm.h> #include <linux/mm.h>
......
...@@ -36,7 +36,7 @@ walk_parents_mkdir( ...@@ -36,7 +36,7 @@ walk_parents_mkdir(
memcpy(buf, *path, len); memcpy(buf, *path, len);
buf[len] = '\0'; buf[len] = '\0';
error = link_path_walk(buf, nd); error = path_walk(buf, nd);
if (unlikely(error)) if (unlikely(error))
return error; return error;
...@@ -83,7 +83,7 @@ hwgfs_decode( ...@@ -83,7 +83,7 @@ hwgfs_decode(
if (unlikely(error)) if (unlikely(error))
return error; return error;
error = link_path_walk(name, &nd); error = path_walk(name, &nd);
if (unlikely(error)) if (unlikely(error))
return error; return error;
...@@ -274,7 +274,7 @@ hwgfs_find_handle( ...@@ -274,7 +274,7 @@ hwgfs_find_handle(
nd.dentry = dget(base ? base : hwgfs_vfsmount->mnt_sb->s_root); nd.dentry = dget(base ? base : hwgfs_vfsmount->mnt_sb->s_root);
nd.flags = (traverse_symlinks ? LOOKUP_FOLLOW : 0); nd.flags = (traverse_symlinks ? LOOKUP_FOLLOW : 0);
error = link_path_walk(name, &nd); error = path_walk(name, &nd);
if (likely(!error)) { if (likely(!error)) {
dentry = nd.dentry; dentry = nd.dentry;
path_release(&nd); /* stale data from here! */ path_release(&nd); /* stale data from here! */
......
...@@ -811,7 +811,6 @@ sn_pci_init (void) ...@@ -811,7 +811,6 @@ sn_pci_init (void)
/* /*
* set pci_raw_ops, etc. * set pci_raw_ops, etc.
*/ */
sgi_master_io_infr_init(); sgi_master_io_infr_init();
for (cnode = 0; cnode < numnodes; cnode++) { for (cnode = 0; cnode < numnodes; cnode++) {
...@@ -826,16 +825,16 @@ sn_pci_init (void) ...@@ -826,16 +825,16 @@ sn_pci_init (void)
#endif #endif
controller = kmalloc(sizeof(struct pci_controller), GFP_KERNEL); controller = kmalloc(sizeof(struct pci_controller), GFP_KERNEL);
if (controller) { if (!controller) {
memset(controller, 0, sizeof(struct pci_controller)); printk(KERN_WARNING "cannot allocate PCI controller\n");
/* just allocate some devices and fill in the pci_dev structs */ return 0;
for (i = 0; i < PCI_BUSES_TO_SCAN; i++)
pci_scan_bus(i, &sn_pci_ops, controller);
} }
/* memset(controller, 0, sizeof(struct pci_controller));
* actually find devices and fill in hwgraph structs
*/ for (i = 0; i < PCI_BUSES_TO_SCAN; i++)
if (pci_bus_to_vertex(i))
pci_scan_bus(i, &sn_pci_ops, controller);
done_probing = 1; done_probing = 1;
...@@ -857,13 +856,8 @@ sn_pci_init (void) ...@@ -857,13 +856,8 @@ sn_pci_init (void)
* set the root start and end so that drivers calling check_region() * set the root start and end so that drivers calling check_region()
* won't see a conflict * won't see a conflict
*/ */
ioport_resource.start = 0xc000000000000000;
#ifdef CONFIG_IA64_SGI_SN_SIM ioport_resource.end = 0xcfffffffffffffff;
if (! IS_RUNNING_ON_SIMULATOR()) {
ioport_resource.start = 0xc000000000000000;
ioport_resource.end = 0xcfffffffffffffff;
}
#endif
/* /*
* Set the root start and end for Mem Resource. * Set the root start and end for Mem Resource.
......
...@@ -391,11 +391,9 @@ sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) ...@@ -391,11 +391,9 @@ sn_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
dma_map = pcibr_dmamap_alloc(vhdl, NULL, size, PCIIO_DMA_DATA | dma_map = pcibr_dmamap_alloc(vhdl, NULL, size, PCIIO_DMA_DATA |
MINIMAL_ATE_FLAG(phys_addr, size)); MINIMAL_ATE_FLAG(phys_addr, size));
if (!dma_map) { /* PMU out of entries */
printk(KERN_ERR "pci_map_single: Unable to allocate anymore " if (!dma_map)
"32 bit page map entries.\n");
return 0; return 0;
}
dma_addr = (dma_addr_t) pcibr_dmamap_addr(dma_map, phys_addr, size); dma_addr = (dma_addr_t) pcibr_dmamap_addr(dma_map, phys_addr, size);
dma_map->bd_dma_addr = dma_addr; dma_map->bd_dma_addr = dma_addr;
...@@ -655,6 +653,12 @@ EXPORT_SYMBOL(sn_dma_sync_sg_for_device); ...@@ -655,6 +653,12 @@ EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
int int
sn_dma_mapping_error(dma_addr_t dma_addr) sn_dma_mapping_error(dma_addr_t dma_addr)
{ {
/*
* We can only run out of page mapping entries, so if there's
* an error, tell the caller to try again later.
*/
if (!dma_addr)
return -EAGAIN;
return 0; return 0;
} }
......
...@@ -47,6 +47,7 @@ void pcibr_bus_addr_free(pciio_win_info_t); ...@@ -47,6 +47,7 @@ void pcibr_bus_addr_free(pciio_win_info_t);
cfg_p pcibr_find_capability(cfg_p, unsigned); cfg_p pcibr_find_capability(cfg_p, unsigned);
extern uint64_t do_pcibr_config_get(cfg_p, unsigned, unsigned); extern uint64_t do_pcibr_config_get(cfg_p, unsigned, unsigned);
void do_pcibr_config_set(cfg_p, unsigned, unsigned, uint64_t); void do_pcibr_config_set(cfg_p, unsigned, unsigned, uint64_t);
int pcibr_slot_pwr(vertex_hdl_t pcibr_vhdl, pciio_slot_t slot, int up, char *err_msg);
/* /*
...@@ -351,7 +352,7 @@ pcibr_slot_enable(vertex_hdl_t pcibr_vhdl, struct pcibr_slot_enable_req_s *req_p ...@@ -351,7 +352,7 @@ pcibr_slot_enable(vertex_hdl_t pcibr_vhdl, struct pcibr_slot_enable_req_s *req_p
goto enable_unlock; goto enable_unlock;
} }
error = pcibr_slot_attach(pcibr_vhdl, slot, NULL, error = pcibr_slot_attach(pcibr_vhdl, slot, 0,
req_p->req_resp.resp_l1_msg, req_p->req_resp.resp_l1_msg,
&req_p->req_resp.resp_sub_errno); &req_p->req_resp.resp_sub_errno);
......
...@@ -82,10 +82,10 @@ static DECLARE_TASKLET(sn_sal_tasklet, sn_sal_tasklet_action, 0); ...@@ -82,10 +82,10 @@ static DECLARE_TASKLET(sn_sal_tasklet, sn_sal_tasklet_action, 0);
static unsigned long sn_interrupt_timeout; static unsigned long sn_interrupt_timeout;
extern u64 master_node_bedrock_address; extern u64 master_node_bedrock_address;
static int sn_debug_printf(const char *fmt, ...);
#undef DEBUG #undef DEBUG
#ifdef DEBUG #ifdef DEBUG
static int sn_debug_printf(const char *fmt, ...);
#define DPRINTF(x...) sn_debug_printf(x) #define DPRINTF(x...) sn_debug_printf(x)
#else #else
#define DPRINTF(x...) do { } while (0) #define DPRINTF(x...) do { } while (0)
...@@ -247,6 +247,7 @@ early_printk_sn_sal(const char *s, unsigned count) ...@@ -247,6 +247,7 @@ early_printk_sn_sal(const char *s, unsigned count)
sn_func->sal_puts(s, count); sn_func->sal_puts(s, count);
} }
#ifdef DEBUG
/* this is as "close to the metal" as we can get, used when the driver /* this is as "close to the metal" as we can get, used when the driver
* itself may be broken */ * itself may be broken */
static int static int
...@@ -262,6 +263,7 @@ sn_debug_printf(const char *fmt, ...) ...@@ -262,6 +263,7 @@ sn_debug_printf(const char *fmt, ...)
va_end(args); va_end(args);
return printed_len; return printed_len;
} }
#endif /* DEBUG */
/* /*
* Interrupt handling routines. * Interrupt handling routines.
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#define _ASM_IA64_MACHVEC_HPZX1_h #define _ASM_IA64_MACHVEC_HPZX1_h
extern ia64_mv_setup_t dig_setup; extern ia64_mv_setup_t dig_setup;
extern ia64_mv_setup_t sba_setup;
extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; extern ia64_mv_dma_alloc_coherent sba_alloc_coherent;
extern ia64_mv_dma_free_coherent sba_free_coherent; extern ia64_mv_dma_free_coherent sba_free_coherent;
extern ia64_mv_dma_map_single sba_map_single; extern ia64_mv_dma_map_single sba_map_single;
...@@ -19,7 +20,7 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error; ...@@ -19,7 +20,7 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error;
* the macros are used directly. * the macros are used directly.
*/ */
#define platform_name "hpzx1" #define platform_name "hpzx1"
#define platform_setup dig_setup #define platform_setup sba_setup
#define platform_dma_init machvec_noop #define platform_dma_init machvec_noop
#define platform_dma_alloc_coherent sba_alloc_coherent #define platform_dma_alloc_coherent sba_alloc_coherent
#define platform_dma_free_coherent sba_free_coherent #define platform_dma_free_coherent sba_free_coherent
......
...@@ -137,14 +137,6 @@ struct ia64_psr { ...@@ -137,14 +137,6 @@ struct ia64_psr {
* state comes earlier: * state comes earlier:
*/ */
struct cpuinfo_ia64 { struct cpuinfo_ia64 {
/* irq_stat must be 64-bit aligned */
union {
struct {
__u32 irq_count;
__u32 bh_count;
} f;
__u64 irq_and_bh_counts;
} irq_stat;
__u32 softirq_pending; __u32 softirq_pending;
__u64 itm_delta; /* # of clock cycles between clock ticks */ __u64 itm_delta; /* # of clock cycles between clock ticks */
__u64 itm_next; /* interval timer mask value to use for next clock tick */ __u64 itm_next; /* interval timer mask value to use for next clock tick */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment