Commit 40b283ee authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge kroah.com:/home/linux/linux/BK/bleed-2.5

into kroah.com:/home/linux/linux/BK/gregkh-2.5
parents 5a8a7173 8e368e79
......@@ -59,6 +59,7 @@ choice
Jensen DECpc 150, DEC 2000 model 300,
DEC 2000 model 500
LX164 AlphaPC164-LX
Lynx AS 2100A
Miata Personal Workstation 433a, 433au, 500a,
500au, 600a, or 600au
Marvel AlphaServer ES47 / ES80 / GS1280
......@@ -169,6 +170,11 @@ config ALPHA_LX164
A technical overview of this board is available at
<http://www.unix-ag.org/Linux-Alpha/Architectures/LX164.html>.
config ALPHA_LYNX
bool "Lynx"
help
AlphaServer 2100A-based systems.
config ALPHA_MARVEL
bool "Marvel"
help
......@@ -263,22 +269,6 @@ config ISA
(MCA) or VESA. ISA is an older system, now being displaced by PCI;
newer boards don't support it. If you have ISA, say Y, otherwise N.
config EISA
bool
default y
---help---
The Extended Industry Standard Architecture (EISA) bus was
developed as an open alternative to the IBM MicroChannel bus.
The EISA bus provided some of the features of the IBM MicroChannel
bus while maintaining backward compatibility with cards made for
the older ISA bus. The EISA bus saw limited use between 1988 and
1995 when it was made obsolete by the PCI bus.
Say Y here if you are building a kernel for an EISA-based machine.
Otherwise, say N.
config SBUS
bool
......@@ -325,8 +315,8 @@ config ALPHA_NONAME
config ALPHA_EV4
bool
depends on ALPHA_JENSEN || ALPHA_SABLE && !ALPHA_GAMMA || ALPHA_NORITAKE && !ALPHA_PRIMO || ALPHA_MIKASA && !ALPHA_PRIMO || ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P_CH || ALPHA_XL || ALPHA_NONAME || ALPHA_EB66 || ALPHA_EB66P || ALPHA_P2K
default y
depends on ALPHA_JENSEN || (ALPHA_SABLE && !ALPHA_GAMMA) || ALPHA_LYNX || ALPHA_NORITAKE && !ALPHA_PRIMO || ALPHA_MIKASA && !ALPHA_PRIMO || ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P_CH || ALPHA_XL || ALPHA_NONAME || ALPHA_EB66 || ALPHA_EB66P || ALPHA_P2K
default y if !ALPHA_LYNX
config ALPHA_LCA
bool
......@@ -351,9 +341,12 @@ config ALPHA_EB64P
Runs from standard PC power supply.
config ALPHA_EV5
bool "EV5 CPU(s) (model 5/xxx)?" if ALPHA_LYNX
default y if ALPHA_RX164 || ALPHA_RAWHIDE || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_RUFFIAN || ALPHA_SABLE && ALPHA_GAMMA || ALPHA_NORITAKE && ALPHA_PRIMO || ALPHA_MIKASA && ALPHA_PRIMO || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR
config ALPHA_EV4
bool
depends on ALPHA_RX164 || ALPHA_RAWHIDE || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_RUFFIAN || ALPHA_SABLE && ALPHA_GAMMA || ALPHA_NORITAKE && ALPHA_PRIMO || ALPHA_MIKASA && ALPHA_PRIMO || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR
default y
default y if ALPHA_LYNX && !ALPHA_EV5
config ALPHA_CIA
bool
......@@ -384,9 +377,14 @@ config ALPHA_GAMMA
help
Say Y if you have an AS 2000 5/xxx or an AS 2100 5/xxx.
config ALPHA_GAMMA
bool
depends on ALPHA_LYNX
default y
config ALPHA_T2
bool
depends on ALPHA_SABLE
depends on ALPHA_SABLE || ALPHA_LYNX
default y
config ALPHA_PYXIS
......@@ -431,9 +429,23 @@ config ALPHA_IRONGATE
depends on ALPHA_NAUTILUS
default y
config ALPHA_AVANTI
bool
depends on ALPHA_XL || ALPHA_AVANTI_CH
default y
help
Avanti AS 200, AS 205, AS 250, AS 255, AS 300, and AS 400-based
Alphas. Info at
<http://www.unix-ag.org/Linux-Alpha/Architectures/Avanti.html>.
config ALPHA_BROKEN_IRQ_MASK
bool
depends on ALPHA_GENERIC || ALPHA_PC164
default y
config ALPHA_SRM
bool "Use SRM as bootloader" if ALPHA_CABRIOLET || ALPHA_AVANTI_CH || ALPHA_EB64P || ALPHA_PC164 || ALPHA_TAKARA || ALPHA_EB164 || ALPHA_ALCOR || ALPHA_MIATA || ALPHA_LX164 || ALPHA_SX164 || ALPHA_NAUTILUS || ALPHA_NONAME
default y if ALPHA_JENSEN || ALPHA_MIKASA || ALPHA_SABLE || ALPHA_NORITAKE || ALPHA_DP264 || ALPHA_RAWHIDE || ALPHA_EIGER || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_SHARK || ALPHA_MARVEL
default y if ALPHA_JENSEN || ALPHA_MIKASA || ALPHA_SABLE || ALPHA_LYNX || ALPHA_NORITAKE || ALPHA_DP264 || ALPHA_RAWHIDE || ALPHA_EIGER || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_SHARK || ALPHA_MARVEL
---help---
There are two different types of booting firmware on Alphas: SRM,
which is command line driven, and ARC, which uses menus and arrow
......@@ -459,28 +471,26 @@ config EARLY_PRINTK
depends on ALPHA_GENERIC || ALPHA_SRM
default y
config ALPHA_EISA
config EISA
bool
depends on ALPHA_ALCOR || ALPHA_MIKASA || ALPHA_SABLE || ALPHA_NORITAKE || ALPHA_RAWHIDE
depends on ALPHA_GENERIC || ALPHA_JENSEN || ALPHA_ALCOR || ALPHA_MIKASA || ALPHA_SABLE || ALPHA_LYNX || ALPHA_NORITAKE || ALPHA_RAWHIDE
default y
---help---
The Extended Industry Standard Architecture (EISA) bus was
developed as an open alternative to the IBM MicroChannel bus.
config ALPHA_AVANTI
bool
depends on ALPHA_XL || ALPHA_AVANTI_CH
default y
help
Avanti AS 200, AS 205, AS 250, AS 255, AS 300, and AS 400-based
Alphas. Info at
<http://www.unix-ag.org/Linux-Alpha/Architectures/Avanti.html>.
The EISA bus provided some of the features of the IBM MicroChannel
bus while maintaining backward compatibility with cards made for
the older ISA bus. The EISA bus saw limited use between 1988 and
1995 when it was made obsolete by the PCI bus.
config ALPHA_BROKEN_IRQ_MASK
bool
depends on ALPHA_GENERIC || ALPHA_PC164
default y
Say Y here if you are building a kernel for an EISA-based machine.
Otherwise, say N.
config SMP
bool "Symmetric multi-processing support"
depends on ALPHA_SABLE || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL
depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, like most personal computers, say N. If
......
......@@ -83,6 +83,7 @@ obj-$(CONFIG_ALPHA_RAWHIDE) += sys_rawhide.o irq_i8259.o
obj-$(CONFIG_ALPHA_RUFFIAN) += sys_ruffian.o irq_pyxis.o irq_i8259.o
obj-$(CONFIG_ALPHA_RX164) += sys_rx164.o irq_i8259.o
obj-$(CONFIG_ALPHA_SABLE) += sys_sable.o
obj-$(CONFIG_ALPHA_LYNX) += sys_sable.o
obj-$(CONFIG_ALPHA_BOOK1) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
obj-$(CONFIG_ALPHA_AVANTI) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
obj-$(CONFIG_ALPHA_NONAME) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
......
......@@ -17,6 +17,7 @@
#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/delay.h>
#define __EXTERN_INLINE
#include <asm/io.h>
......@@ -26,6 +27,12 @@
#include "proto.h"
#include "pci_impl.h"
/* For dumping initial DMA window settings. */
#define DEBUG_PRINT_INITIAL_SETTINGS 0
/* For dumping final DMA window settings. */
#define DEBUG_PRINT_FINAL_SETTINGS 0
/*
* By default, we direct-map starting at 2GB, in order to allow the
* maximum size direct-map window (2GB) to match the maximum amount of
......@@ -34,11 +41,23 @@
* ISA DMA, since the maximum ISA DMA address is 2GB-1.
*
* For now, this seems a reasonable trade-off: even though most SABLEs
* have far less than even 1GB of memory, floppy usage/performance will
* not really be affected by forcing it to go via scatter/gather...
* have less than 1GB of memory, floppy usage/performance will not
* really be affected by forcing it to go via scatter/gather...
*/
#define T2_DIRECTMAP_2G 1
#if T2_DIRECTMAP_2G
# define T2_DIRECTMAP_START 0x80000000UL
# define T2_DIRECTMAP_LENGTH 0x80000000UL
#else
# define T2_DIRECTMAP_START 0x40000000UL
# define T2_DIRECTMAP_LENGTH 0x40000000UL
#endif
/* The ISA scatter/gather window settings. */
#define T2_ISA_SG_START 0x00800000UL
#define T2_ISA_SG_LENGTH 0x00800000UL
/*
* NOTE: Herein lie back-to-back mb instructions. They are magic.
* One plausible explanation is that the i/o controller does not properly
......@@ -57,6 +76,24 @@
# define DBG(args)
#endif
static volatile unsigned int t2_mcheck_any_expected;
static volatile unsigned int t2_mcheck_last_taken;
/* Place to save the DMA Window registers as set up by SRM
for restoration during shutdown. */
static struct
{
struct {
unsigned long wbase;
unsigned long wmask;
unsigned long tbase;
} window[2];
unsigned long hae_1;
unsigned long hae_2;
unsigned long hae_3;
unsigned long hae_4;
unsigned long hbase;
} t2_saved_config __attribute((common));
/*
* Given a bus, device, and function number, compute resulting
......@@ -134,42 +171,34 @@ mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
return 0;
}
/*
* NOTE: both conf_read() and conf_write() may set HAE_3 when needing
* to do type1 access. This is protected by the use of spinlock IRQ
* primitives in the wrapper functions pci_{read,write}_config_*()
* defined in drivers/pci/pci.c.
*/
static unsigned int
conf_read(unsigned long addr, unsigned char type1)
{
unsigned long flags;
unsigned int value, cpu;
unsigned int value, cpu, taken;
unsigned long t2_cfg = 0;
cpu = smp_processor_id();
local_irq_save(flags); /* avoid getting hit by machine check */
DBG(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1));
#if 0
{
unsigned long stat0;
/* Reset status register to avoid losing errors. */
stat0 = *(vulp)T2_IOCSR;
*(vulp)T2_IOCSR = stat0;
mb();
DBG(("conf_read: T2 IOCSR was 0x%x\n", stat0));
}
#endif
/* If Type1 access, must set T2 CFG. */
if (type1) {
t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL;
*(vulp)T2_HAE_3 = 0x40000000UL | t2_cfg;
mb();
DBG(("conf_read: TYPE1 access\n"));
}
mb();
draina();
mcheck_expected(cpu) = 1;
mcheck_taken(cpu) = 0;
t2_mcheck_any_expected |= (1 << cpu);
mb();
/* Access configuration space. */
......@@ -177,12 +206,20 @@ conf_read(unsigned long addr, unsigned char type1)
mb();
mb(); /* magic */
if (mcheck_taken(cpu)) {
/* Wait for possible mcheck. Also, this lets other CPUs clear
their mchecks as well, as they can reliably tell when
another CPU is in the midst of handling a real mcheck via
the "taken" function. */
udelay(100);
if ((taken = mcheck_taken(cpu))) {
mcheck_taken(cpu) = 0;
t2_mcheck_last_taken |= (1 << cpu);
value = 0xffffffffU;
mb();
}
mcheck_expected(cpu) = 0;
t2_mcheck_any_expected = 0;
mb();
/* If Type1 access, must reset T2 CFG so normal IO space ops work. */
......@@ -190,45 +227,30 @@ conf_read(unsigned long addr, unsigned char type1)
*(vulp)T2_HAE_3 = t2_cfg;
mb();
}
DBG(("conf_read(): finished\n"));
local_irq_restore(flags);
return value;
}
static void
conf_write(unsigned long addr, unsigned int value, unsigned char type1)
{
unsigned long flags;
unsigned int cpu;
unsigned int cpu, taken;
unsigned long t2_cfg = 0;
cpu = smp_processor_id();
local_irq_save(flags); /* avoid getting hit by machine check */
#if 0
{
unsigned long stat0;
/* Reset status register to avoid losing errors. */
stat0 = *(vulp)T2_IOCSR;
*(vulp)T2_IOCSR = stat0;
mb();
DBG(("conf_write: T2 ERR was 0x%x\n", stat0));
}
#endif
/* If Type1 access, must set T2 CFG. */
if (type1) {
t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL;
*(vulp)T2_HAE_3 = t2_cfg | 0x40000000UL;
mb();
DBG(("conf_write: TYPE1 access\n"));
}
mb();
draina();
mcheck_expected(cpu) = 1;
mcheck_taken(cpu) = 0;
t2_mcheck_any_expected |= (1 << cpu);
mb();
/* Access configuration space. */
......@@ -236,7 +258,19 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1)
mb();
mb(); /* magic */
/* Wait for possible mcheck. Also, this lets other CPUs clear
their mchecks as well, as they can reliably tell when
this CPU is in the midst of handling a real mcheck via
the "taken" function. */
udelay(100);
if ((taken = mcheck_taken(cpu))) {
mcheck_taken(cpu) = 0;
t2_mcheck_last_taken |= (1 << cpu);
mb();
}
mcheck_expected(cpu) = 0;
t2_mcheck_any_expected = 0;
mb();
/* If Type1 access, must reset T2 CFG so normal IO space ops work. */
......@@ -244,8 +278,6 @@ conf_write(unsigned long addr, unsigned int value, unsigned char type1)
*(vulp)T2_HAE_3 = t2_cfg;
mb();
}
DBG(("conf_write(): finished\n"));
local_irq_restore(flags);
}
static int
......@@ -290,48 +322,121 @@ struct pci_ops t2_pci_ops =
.write = t2_write_config,
};
static void __init
t2_direct_map_window1(unsigned long base, unsigned long length)
{
unsigned long temp;
__direct_map_base = base;
__direct_map_size = length;
temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20);
*(vulp)T2_WBASE1 = temp | 0x80000UL; /* OR in ENABLE bit */
temp = (length - 1) & 0xfff00000UL;
*(vulp)T2_WMASK1 = temp;
*(vulp)T2_TBASE1 = 0;
#if DEBUG_PRINT_FINAL_SETTINGS
printk("%s: setting WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n",
__FUNCTION__,
*(vulp)T2_WBASE1,
*(vulp)T2_WMASK1,
*(vulp)T2_TBASE1);
#endif
}
static void __init
t2_sg_map_window2(struct pci_controller *hose,
unsigned long base,
unsigned long length)
{
unsigned long temp;
/* Note we can only do 1 SG window, as the other is for direct, so
do an ISA SG area, especially for the floppy. */
hose->sg_isa = iommu_arena_new(hose, base, length, 0);
hose->sg_pci = NULL;
temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20);
*(vulp)T2_WBASE2 = temp | 0xc0000UL; /* OR in ENABLE/SG bits */
temp = (length - 1) & 0xfff00000UL;
*(vulp)T2_WMASK2 = temp;
*(vulp)T2_TBASE2 = virt_to_phys(hose->sg_isa->ptes) >> 1;
mb();
t2_pci_tbi(hose, 0, -1); /* flush TLB all */
#if DEBUG_PRINT_FINAL_SETTINGS
printk("%s: setting WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n",
__FUNCTION__,
*(vulp)T2_WBASE2,
*(vulp)T2_WMASK2,
*(vulp)T2_TBASE2);
#endif
}
static void __init
t2_save_configuration(void)
{
#if DEBUG_PRINT_INITIAL_SETTINGS
printk("%s: HAE_1 was 0x%lx\n", __FUNCTION__, srm_hae); /* HW is 0 */
printk("%s: HAE_2 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_2);
printk("%s: HAE_3 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_3);
printk("%s: HAE_4 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_4);
printk("%s: HBASE was 0x%lx\n", __FUNCTION__, *(vulp)T2_HBASE);
printk("%s: WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n", __FUNCTION__,
*(vulp)T2_WBASE1, *(vulp)T2_WMASK1, *(vulp)T2_TBASE1);
printk("%s: WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n", __FUNCTION__,
*(vulp)T2_WBASE2, *(vulp)T2_WMASK2, *(vulp)T2_TBASE2);
#endif
/*
* Save the DMA Window registers.
*/
t2_saved_config.window[0].wbase = *(vulp)T2_WBASE1;
t2_saved_config.window[0].wmask = *(vulp)T2_WMASK1;
t2_saved_config.window[0].tbase = *(vulp)T2_TBASE1;
t2_saved_config.window[1].wbase = *(vulp)T2_WBASE2;
t2_saved_config.window[1].wmask = *(vulp)T2_WMASK2;
t2_saved_config.window[1].tbase = *(vulp)T2_TBASE2;
t2_saved_config.hae_1 = srm_hae; /* HW is already set to 0 */
t2_saved_config.hae_2 = *(vulp)T2_HAE_2;
t2_saved_config.hae_3 = *(vulp)T2_HAE_3;
t2_saved_config.hae_4 = *(vulp)T2_HAE_4;
t2_saved_config.hbase = *(vulp)T2_HBASE;
}
void __init
t2_init_arch(void)
{
struct pci_controller *hose;
unsigned long t2_iocsr;
unsigned long temp;
unsigned int i;
for (i = 0; i < NR_CPUS; i++) {
mcheck_expected(i) = 0;
mcheck_taken(i) = 0;
}
#if 0
/* Set up error reporting. */
t2_iocsr = *(vulp)T2_IOCSR;
*(vulp)T2_IOCSR = t2_iocsr | (0x1UL << 7); /* TLB error check */
mb();
*(vulp)T2_IOCSR; /* read it back to make sure */
#endif
t2_mcheck_any_expected = 0;
t2_mcheck_last_taken = 0;
/* Enable scatter/gather TLB use. */
t2_iocsr = *(vulp)T2_IOCSR;
if (!(t2_iocsr & (0x1UL << 26))) {
temp = *(vulp)T2_IOCSR;
if (!(temp & (0x1UL << 26))) {
printk("t2_init_arch: enabling SG TLB, IOCSR was 0x%lx\n",
t2_iocsr);
*(vulp)T2_IOCSR = t2_iocsr | (0x1UL << 26);
temp);
*(vulp)T2_IOCSR = temp | (0x1UL << 26);
mb();
*(vulp)T2_IOCSR; /* read it back to make sure */
}
#if 0
printk("t2_init_arch: HBASE was 0x%lx\n", *(vulp)T2_HBASE);
printk("t2_init_arch: WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n",
*(vulp)T2_WBASE1, *(vulp)T2_WMASK1, *(vulp)T2_TBASE1);
printk("t2_init_arch: WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n",
*(vulp)T2_WBASE2, *(vulp)T2_WMASK2, *(vulp)T2_TBASE2);
#endif
t2_save_configuration();
/*
* Create our single hose.
*/
pci_isa_hose = hose = alloc_pci_controller();
hose->io_space = &ioport_resource;
hose->mem_space = &iomem_resource;
......@@ -342,52 +447,51 @@ t2_init_arch(void)
hose->sparse_io_base = T2_IO - IDENT_ADDR;
hose->dense_io_base = 0;
/* Note we can only do 1 SG window, as the other is for direct, so
do an ISA SG area, especially for the floppy. */
hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
hose->sg_pci = NULL;
/*
* Set up the PCI->physical memory translation windows.
*
* Window 1 goes at ? GB and is ?GB large, direct mapped.
* Window 2 goes at 8 MB and is 8MB large, scatter/gather (for ISA).
* Window 1 is direct mapped.
* Window 2 is scatter/gather (for ISA).
*/
#if T2_DIRECTMAP_2G
__direct_map_base = 0x80000000UL;
__direct_map_size = 0x80000000UL;
t2_direct_map_window1(T2_DIRECTMAP_START, T2_DIRECTMAP_LENGTH);
/* WARNING!! must correspond to the direct map window params!!! */
*(vulp)T2_WBASE1 = 0x80080fffU;
*(vulp)T2_WMASK1 = 0x7ff00000U;
*(vulp)T2_TBASE1 = 0;
#else /* T2_DIRECTMAP_2G */
__direct_map_base = 0x40000000UL;
__direct_map_size = 0x40000000UL;
/* Always make an ISA DMA window. */
t2_sg_map_window2(hose, T2_ISA_SG_START, T2_ISA_SG_LENGTH);
/* WARNING!! must correspond to the direct map window params!!! */
*(vulp)T2_WBASE1 = 0x400807ffU;
*(vulp)T2_WMASK1 = 0x3ff00000U;
*(vulp)T2_TBASE1 = 0;
#endif /* T2_DIRECTMAP_2G */
/* WARNING!! must correspond to the SG arena/window params!!! */
*(vulp)T2_WBASE2 = 0x008c000fU;
*(vulp)T2_WMASK2 = 0x00700000U;
*(vulp)T2_TBASE2 = virt_to_phys(hose->sg_isa->ptes) >> 1;
*(vulp)T2_HBASE = 0x0;
*(vulp)T2_HBASE = 0x0; /* Disable HOLES. */
/* Zero HAE. */
*(vulp)T2_HAE_1 = 0; mb();
*(vulp)T2_HAE_2 = 0; mb();
*(vulp)T2_HAE_3 = 0; mb();
*(vulp)T2_HAE_1 = 0; mb(); /* Sparse MEM HAE */
*(vulp)T2_HAE_2 = 0; mb(); /* Sparse I/O HAE */
*(vulp)T2_HAE_3 = 0; mb(); /* Config Space HAE */
#if 0
*(vulp)T2_HAE_4 = 0; mb(); /* DO NOT TOUCH THIS!!! */
/* !!! DO NOT EVER TOUCH THIS !!! */
*(vulp)T2_HAE_4 = 0; mb(); /* Dense MEM HAE */
#endif
}
t2_pci_tbi(hose, 0, -1); /* flush TLB all */
void
t2_kill_arch(int mode)
{
/*
* Restore the DMA Window registers.
*/
*(vulp)T2_WBASE1 = t2_saved_config.window[0].wbase;
*(vulp)T2_WMASK1 = t2_saved_config.window[0].wmask;
*(vulp)T2_TBASE1 = t2_saved_config.window[0].tbase;
*(vulp)T2_WBASE2 = t2_saved_config.window[1].wbase;
*(vulp)T2_WMASK2 = t2_saved_config.window[1].wmask;
*(vulp)T2_TBASE2 = t2_saved_config.window[1].tbase;
mb();
*(vulp)T2_HAE_1 = srm_hae;
*(vulp)T2_HAE_2 = t2_saved_config.hae_2;
*(vulp)T2_HAE_3 = t2_saved_config.hae_3;
*(vulp)T2_HAE_4 = t2_saved_config.hae_4;
*(vulp)T2_HBASE = t2_saved_config.hbase;
mb();
*(vulp)T2_HBASE; /* READ it back to ensure WRITE occurred. */
}
void
......@@ -415,13 +519,7 @@ t2_clear_errors(int cpu)
{
struct sable_cpu_csr *cpu_regs;
cpu_regs = (struct sable_cpu_csr *)T2_CPU0_BASE;
if (cpu == 1)
cpu_regs = (struct sable_cpu_csr *)T2_CPU1_BASE;
if (cpu == 2)
cpu_regs = (struct sable_cpu_csr *)T2_CPU2_BASE;
if (cpu == 3)
cpu_regs = (struct sable_cpu_csr *)T2_CPU3_BASE;
cpu_regs = (struct sable_cpu_csr *)T2_CPUn_BASE(cpu);
cpu_regs->sic &= ~SIC_SEIC;
......@@ -438,19 +536,76 @@ t2_clear_errors(int cpu)
mb(); /* magic */
}
/*
* SABLE seems to have a "broadcast" style machine check, in that all
* CPUs receive it. And, the issuing CPU, in the case of PCI Config
* space read/write faults, will also receive a second mcheck, upon
* lowering IPL during completion processing in pci_read_config_byte()
* et al.
*
* Hence all the taken/expected/any_expected/last_taken stuff...
*/
void
t2_machine_check(unsigned long vector, unsigned long la_ptr,
struct pt_regs * regs)
{
int cpu = smp_processor_id();
#if DEBUG_MCHECK > 0
struct el_common *mchk_header = (struct el_common *)la_ptr;
#endif /* DEBUG_MCHECK */
/* Clear the error before any reporting. */
mb();
mb(); /* magic */
draina();
t2_clear_errors(cpu);
wrmces(rdmces()|1); /* ??? */
/* This should not actually be done until the logout frame is
examined, but, since we don't do that, go on and do this... */
wrmces(0x7);
mb();
/* Now, do testing for the anomalous conditions. */
if (!mcheck_expected(cpu) && t2_mcheck_any_expected) {
/*
* FUNKY: Received mcheck on a CPU and not
* expecting it, but another CPU is expecting one.
*
* Just dismiss it for now on this CPU...
*/
#if DEBUG_MCHECK > 0
printk("t2_machine_check(cpu%d): any_expected 0x%x -"
" (assumed) spurious -"
" code 0x%x\n", cpu, t2_mcheck_any_expected,
(unsigned int)mchk_header->code);
#endif /* DEBUG_MCHECK */
return;
}
if (!mcheck_expected(cpu) && !t2_mcheck_any_expected) {
if (t2_mcheck_last_taken & (1 << cpu)) {
#if DEBUG_MCHECK > 0
printk("t2_machine_check(cpu%d): last_taken 0x%x - "
"unexpected mcheck - code 0x%x\n",
cpu, t2_mcheck_last_taken,
(unsigned int)mchk_header->code);
#endif /* DEBUG_MCHECK */
t2_mcheck_last_taken = 0;
mb();
return;
} else {
t2_mcheck_last_taken = 0;
mb();
}
}
#if DEBUG_MCHECK > 0
printk("%s t2_mcheck(cpu%d): last_taken 0x%x - "
"any_expected 0x%x - code 0x%x\n",
(mcheck_expected(cpu) ? "EX" : "UN"), cpu,
t2_mcheck_last_taken, t2_mcheck_any_expected,
(unsigned int)mchk_header->code);
#endif /* DEBUG_MCHECK */
process_mcheck_info(vector, la_ptr, regs, "T2", mcheck_expected(cpu));
}
......@@ -19,6 +19,7 @@
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/cache.h>
#include <asm/machvec.h>
......
......@@ -76,6 +76,7 @@ extern void polaris_machine_check(u64, u64, struct pt_regs *);
/* core_t2.c */
extern struct pci_ops t2_pci_ops;
extern void t2_init_arch(void);
extern void t2_kill_arch(int);
extern void t2_machine_check(u64, u64, struct pt_regs *);
extern void t2_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
......
......@@ -162,6 +162,7 @@ WEAK(eb66p_mv);
WEAK(eiger_mv);
WEAK(jensen_mv);
WEAK(lx164_mv);
WEAK(lynx_mv);
WEAK(marvel_ev7_mv);
WEAK(miata_mv);
WEAK(mikasa_mv);
......@@ -268,7 +269,7 @@ move_initrd(unsigned long mem_limit)
unsigned long size;
size = initrd_end - initrd_start;
start = __alloc_bootmem(size, PAGE_SIZE, 0);
start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0);
if (!start || __pa(start) + size > mem_limit) {
initrd_start = initrd_end = 0;
return NULL;
......@@ -739,7 +740,7 @@ get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
NULL, /* Turbolaser */
&avanti_mv,
NULL, /* Mustang */
&alcor_mv, /* Alcor, Bret, Maverick. */
NULL, /* Alcor, Bret, Maverick. HWRPB inaccurate? */
NULL, /* Tradewind */
NULL, /* Mikasa -- see below. */
NULL, /* EB64 */
......@@ -748,7 +749,7 @@ get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
&alphabook1_mv,
&rawhide_mv,
NULL, /* K2 */
NULL, /* Lynx */
&lynx_mv, /* Lynx */
&xl_mv,
NULL, /* EB164 -- see variation. */
NULL, /* Noritake -- see below. */
......@@ -930,6 +931,7 @@ get_sysvec_byname(const char *name)
&eiger_mv,
&jensen_mv,
&lx164_mv,
&lynx_mv,
&miata_mv,
&mikasa_mv,
&mikasa_primo_mv,
......
......@@ -132,11 +132,13 @@ common_init_irq(void (*srm_dev_int)(unsigned long v, struct pt_regs *r))
setup_irq(16+4, &isa_cascade_irqaction);
}
#ifndef CONFIG_ALPHA_PC164
static void __init
cabriolet_init_irq(void)
{
common_init_irq(srm_device_interrupt);
}
#endif
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_PC164)
/* In theory, the PC164 has the same interrupt hardware as the other
......
......@@ -5,7 +5,7 @@
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999 Richard Henderson
*
* Code supporting the Sable and Sable-Gamma systems.
* Code supporting the Sable, Sable-Gamma, and Lynx systems.
*/
#include <linux/config.h>
......@@ -31,8 +31,28 @@
#include "pci_impl.h"
#include "machvec_impl.h"
spinlock_t sable_irq_lock = SPIN_LOCK_UNLOCKED;
spinlock_t sable_lynx_irq_lock = SPIN_LOCK_UNLOCKED;
typedef struct irq_swizzle_struct
{
char irq_to_mask[64];
char mask_to_irq[64];
/* Note mask bit is true for DISABLED irqs. */
unsigned long shadow_mask;
void (*update_irq_hw)(unsigned long bit, unsigned long mask);
void (*ack_irq_hw)(unsigned long bit);
} irq_swizzle_t;
static irq_swizzle_t *sable_lynx_irq_swizzle;
static void sable_lynx_init_irq(int nr_irqs);
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE)
/***********************************************************************/
/*
* For SABLE, which is really baroque, we manage 40 IRQ's, but the
* hardware really only supports 24, not via normal ISA PIC,
......@@ -71,30 +91,7 @@ spinlock_t sable_irq_lock = SPIN_LOCK_UNLOCKED;
*23 IIC -
*/
static struct
{
char irq_to_mask[40];
char mask_to_irq[40];
/* Note mask bit is true for DISABLED irqs. */
unsigned long shadow_mask;
} sable_irq_swizzle = {
{
-1, 6, -1, 8, 15, 12, 7, 9, /* pseudo PIC 0-7 */
-1, 16, 17, 18, 3, -1, 21, 22, /* pseudo PIC 8-15 */
-1, -1, -1, -1, -1, -1, -1, -1, /* pseudo EISA 0-7 */
-1, -1, -1, -1, -1, -1, -1, -1, /* pseudo EISA 8-15 */
2, 1, 0, 4, 5, -1, -1, -1, /* pseudo PCI */
},
{
34, 33, 32, 12, 35, 36, 1, 6, /* mask 0-7 */
3, 7, -1, -1, 5, -1, -1, 4, /* mask 8-15 */
9, 10, 11, -1, -1, 14, 15, -1, /* mask 16-23 */
},
-1
};
static inline void
static void
sable_update_irq_hw(unsigned long bit, unsigned long mask)
{
int port = 0x537;
......@@ -110,7 +107,7 @@ sable_update_irq_hw(unsigned long bit, unsigned long mask)
outb(mask, port);
}
static inline void
static void
sable_ack_irq_hw(unsigned long bit)
{
int port, val1, val2;
......@@ -133,102 +130,46 @@ sable_ack_irq_hw(unsigned long bit)
outb(val2, 0x534); /* ack the master */
}
static inline void
sable_enable_irq(unsigned int irq)
{
unsigned long bit, mask;
bit = sable_irq_swizzle.irq_to_mask[irq];
spin_lock(&sable_irq_lock);
mask = sable_irq_swizzle.shadow_mask &= ~(1UL << bit);
sable_update_irq_hw(bit, mask);
spin_unlock(&sable_irq_lock);
}
static void
sable_disable_irq(unsigned int irq)
{
unsigned long bit, mask;
bit = sable_irq_swizzle.irq_to_mask[irq];
spin_lock(&sable_irq_lock);
mask = sable_irq_swizzle.shadow_mask |= 1UL << bit;
sable_update_irq_hw(bit, mask);
spin_unlock(&sable_irq_lock);
}
static unsigned int
sable_startup_irq(unsigned int irq)
{
sable_enable_irq(irq);
return 0;
}
static void
sable_end_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
sable_enable_irq(irq);
}
static void
sable_mask_and_ack_irq(unsigned int irq)
{
unsigned long bit, mask;
bit = sable_irq_swizzle.irq_to_mask[irq];
spin_lock(&sable_irq_lock);
mask = sable_irq_swizzle.shadow_mask |= 1UL << bit;
sable_update_irq_hw(bit, mask);
sable_ack_irq_hw(bit);
spin_unlock(&sable_irq_lock);
}
static struct hw_interrupt_type sable_irq_type = {
.typename = "SABLE",
.startup = sable_startup_irq,
.shutdown = sable_disable_irq,
.enable = sable_enable_irq,
.disable = sable_disable_irq,
.ack = sable_mask_and_ack_irq,
.end = sable_end_irq,
static irq_swizzle_t sable_irq_swizzle = {
{
-1, 6, -1, 8, 15, 12, 7, 9, /* pseudo PIC 0-7 */
-1, 16, 17, 18, 3, -1, 21, 22, /* pseudo PIC 8-15 */
-1, -1, -1, -1, -1, -1, -1, -1, /* pseudo EISA 0-7 */
-1, -1, -1, -1, -1, -1, -1, -1, /* pseudo EISA 8-15 */
2, 1, 0, 4, 5, -1, -1, -1, /* pseudo PCI */
-1, -1, -1, -1, -1, -1, -1, -1, /* */
-1, -1, -1, -1, -1, -1, -1, -1, /* */
-1, -1, -1, -1, -1, -1, -1, -1 /* */
},
{
34, 33, 32, 12, 35, 36, 1, 6, /* mask 0-7 */
3, 7, -1, -1, 5, -1, -1, 4, /* mask 8-15 */
9, 10, 11, -1, -1, 14, 15, -1, /* mask 16-23 */
-1, -1, -1, -1, -1, -1, -1, -1, /* */
-1, -1, -1, -1, -1, -1, -1, -1, /* */
-1, -1, -1, -1, -1, -1, -1, -1, /* */
-1, -1, -1, -1, -1, -1, -1, -1, /* */
-1, -1, -1, -1, -1, -1, -1, -1 /* */
},
-1,
sable_update_irq_hw,
sable_ack_irq_hw
};
static void
sable_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
{
/* Note that the vector reported by the SRM PALcode corresponds
to the interrupt mask bits, but we have to manage via more
normal IRQs. */
int bit, irq;
bit = (vector - 0x800) >> 4;
irq = sable_irq_swizzle.mask_to_irq[bit];
handle_irq(irq, regs);
}
static void __init
sable_init_irq(void)
{
long i;
outb(-1, 0x537); /* slave 0 */
outb(-1, 0x53b); /* slave 1 */
outb(-1, 0x53d); /* slave 2 */
outb(0x44, 0x535); /* enable cascades in master */
for (i = 0; i < 40; ++i) {
irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
irq_desc[i].handler = &sable_irq_type;
}
common_init_isa_dma();
sable_lynx_irq_swizzle = &sable_irq_swizzle;
sable_lynx_init_irq(40);
}
/*
* PCI Fixup configuration for ALPHA SABLE (2100) - 2100A is different ??
* PCI Fixup configuration for ALPHA SABLE (2100).
*
* The device to slot mapping looks like:
*
......@@ -256,7 +197,7 @@ sable_init_irq(void)
static int __init
sable_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[9][5] __initdata = {
static char irq_tab[9][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ 32+0, 32+0, 32+0, 32+0, 32+0}, /* IdSel 0, TULIP */
{ 32+1, 32+1, 32+1, 32+1, 32+1}, /* IdSel 1, SCSI */
......@@ -266,13 +207,349 @@ sable_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{ -1, -1, -1, -1, -1}, /* IdSel 5, none */
{ 32+2, 32+2, 32+2, 32+2, 32+2}, /* IdSel 6, slot 0 */
{ 32+3, 32+3, 32+3, 32+3, 32+3}, /* IdSel 7, slot 1 */
{ 32+4, 32+4, 32+4, 32+4, 32+4}, /* IdSel 8, slot 2 */
};
{ 32+4, 32+4, 32+4, 32+4, 32+4} /* IdSel 8, slot 2 */
};
long min_idsel = 0, max_idsel = 8, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
#endif /* defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SABLE) */
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX)
/***********************************************************************/
/* LYNX hardware specifics
*/
/*
* For LYNX, which is also baroque, we manage 64 IRQs, via a custom IC.
*
* Bit Meaning Kernel IRQ
*------------------------------------------
* 0
* 1
* 2
* 3 mouse 12
* 4
* 5
* 6 keyboard 1
* 7 floppy 6
* 8 COM2 3
* 9 parallel port 7
*10 EISA irq 3 -
*11 EISA irq 4 -
*12 EISA irq 5 5
*13 EISA irq 6 -
*14 EISA irq 7 -
*15 COM1 4
*16 EISA irq 9 9
*17 EISA irq 10 10
*18 EISA irq 11 11
*19 EISA irq 12 -
*20
*21 EISA irq 14 14
*22 EISA irq 15 15
*23 IIC -
*24 VGA (builtin) -
*25
*26
*27
*28 NCR810 (builtin) 28
*29
*30
*31
*32 PCI 0 slot 4 A primary bus 32
*33 PCI 0 slot 4 B primary bus 33
*34 PCI 0 slot 4 C primary bus 34
*35 PCI 0 slot 4 D primary bus
*36 PCI 0 slot 5 A primary bus
*37 PCI 0 slot 5 B primary bus
*38 PCI 0 slot 5 C primary bus
*39 PCI 0 slot 5 D primary bus
*40 PCI 0 slot 6 A primary bus
*41 PCI 0 slot 6 B primary bus
*42 PCI 0 slot 6 C primary bus
*43 PCI 0 slot 6 D primary bus
*44 PCI 0 slot 7 A primary bus
*45 PCI 0 slot 7 B primary bus
*46 PCI 0 slot 7 C primary bus
*47 PCI 0 slot 7 D primary bus
*48 PCI 0 slot 0 A secondary bus
*49 PCI 0 slot 0 B secondary bus
*50 PCI 0 slot 0 C secondary bus
*51 PCI 0 slot 0 D secondary bus
*52 PCI 0 slot 1 A secondary bus
*53 PCI 0 slot 1 B secondary bus
*54 PCI 0 slot 1 C secondary bus
*55 PCI 0 slot 1 D secondary bus
*56 PCI 0 slot 2 A secondary bus
*57 PCI 0 slot 2 B secondary bus
*58 PCI 0 slot 2 C secondary bus
*59 PCI 0 slot 2 D secondary bus
*60 PCI 0 slot 3 A secondary bus
*61 PCI 0 slot 3 B secondary bus
*62 PCI 0 slot 3 C secondary bus
*63 PCI 0 slot 3 D secondary bus
*/
static void
lynx_update_irq_hw(unsigned long bit, unsigned long mask)
{
/*
* Write the AIR register on the T3/T4 with the
* address of the IC mask register (offset 0x40)
*/
*(vulp)T2_AIR = 0x40;
mb();
*(vulp)T2_AIR; /* re-read to force write */
mb();
*(vulp)T2_DIR = mask;
mb();
mb();
}
static void
lynx_ack_irq_hw(unsigned long bit)
{
*(vulp)T2_VAR = (u_long) bit;
mb();
mb();
}
static irq_swizzle_t lynx_irq_swizzle = {
{ /* irq_to_mask */
-1, 6, -1, 8, 15, 12, 7, 9, /* pseudo PIC 0-7 */
-1, 16, 17, 18, 3, -1, 21, 22, /* pseudo PIC 8-15 */
-1, -1, -1, -1, -1, -1, -1, -1, /* pseudo */
-1, -1, -1, -1, 28, -1, -1, -1, /* pseudo */
32, 33, 34, 35, 36, 37, 38, 39, /* mask 32-39 */
40, 41, 42, 43, 44, 45, 46, 47, /* mask 40-47 */
48, 49, 50, 51, 52, 53, 54, 55, /* mask 48-55 */
56, 57, 58, 59, 60, 61, 62, 63 /* mask 56-63 */
},
{ /* mask_to_irq */
-1, -1, -1, 12, -1, -1, 1, 6, /* mask 0-7 */
3, 7, -1, -1, 5, -1, -1, 4, /* mask 8-15 */
9, 10, 11, -1, -1, 14, 15, -1, /* mask 16-23 */
-1, -1, -1, -1, 28, -1, -1, -1, /* mask 24-31 */
32, 33, 34, 35, 36, 37, 38, 39, /* mask 32-39 */
40, 41, 42, 43, 44, 45, 46, 47, /* mask 40-47 */
48, 49, 50, 51, 52, 53, 54, 55, /* mask 48-55 */
56, 57, 58, 59, 60, 61, 62, 63 /* mask 56-63 */
},
-1,
lynx_update_irq_hw,
lynx_ack_irq_hw
};
static void __init
lynx_init_irq(void)
{
sable_lynx_irq_swizzle = &lynx_irq_swizzle;
sable_lynx_init_irq(64);
}
/*
* PCI Fixup configuration for ALPHA LYNX (2100A)
*
* The device to slot mapping looks like:
*
* Slot Device
* 0 none
* 1 none
* 2 PCI-EISA bridge
* 3 PCI-PCI bridge
* 4 NCR 810 (Demi-Lynx only)
* 5 none
* 6 PCI on board slot 4
* 7 PCI on board slot 5
* 8 PCI on board slot 6
* 9 PCI on board slot 7
*
* And behind the PPB we have:
*
* 11 PCI on board slot 0
* 12 PCI on board slot 1
* 13 PCI on board slot 2
* 14 PCI on board slot 3
*/
/*
* NOTE: the IRQ assignments below are arbitrary, but need to be consistent
* with the values in the irq swizzling tables above.
*/
static int __init
lynx_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[19][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ -1, -1, -1, -1, -1}, /* IdSel 13, PCEB */
{ -1, -1, -1, -1, -1}, /* IdSel 14, PPB */
{ 28, 28, 28, 28, 28}, /* IdSel 15, NCR demi */
{ -1, -1, -1, -1, -1}, /* IdSel 16, none */
{ 32, 32, 33, 34, 35}, /* IdSel 17, slot 4 */
{ 36, 36, 37, 38, 39}, /* IdSel 18, slot 5 */
{ 40, 40, 41, 42, 43}, /* IdSel 19, slot 6 */
{ 44, 44, 45, 46, 47}, /* IdSel 20, slot 7 */
{ -1, -1, -1, -1, -1}, /* IdSel 22, none */
/* The following are actually behind the PPB. */
{ -1, -1, -1, -1, -1}, /* IdSel 16 none */
{ 28, 28, 28, 28, 28}, /* IdSel 17 NCR lynx */
{ -1, -1, -1, -1, -1}, /* IdSel 18 none */
{ -1, -1, -1, -1, -1}, /* IdSel 19 none */
{ -1, -1, -1, -1, -1}, /* IdSel 20 none */
{ -1, -1, -1, -1, -1}, /* IdSel 21 none */
{ 48, 48, 49, 50, 51}, /* IdSel 22 slot 0 */
{ 52, 52, 53, 54, 55}, /* IdSel 23 slot 1 */
{ 56, 56, 57, 58, 59}, /* IdSel 24 slot 2 */
{ 60, 60, 61, 62, 63} /* IdSel 25 slot 3 */
};
const long min_idsel = 2, max_idsel = 20, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
static u8 __init
lynx_swizzle(struct pci_dev *dev, u8 *pinp)
{
int slot, pin = *pinp;
if (dev->bus->number == 0) {
slot = PCI_SLOT(dev->devfn);
}
/* Check for the built-in bridge */
else if (PCI_SLOT(dev->bus->self->devfn) == 3) {
slot = PCI_SLOT(dev->devfn) + 11;
}
else
{
/* Must be a card-based bridge. */
do {
if (PCI_SLOT(dev->bus->self->devfn) == 3) {
slot = PCI_SLOT(dev->devfn) + 11;
break;
}
pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn)) ;
/* Move up the chain of bridges. */
dev = dev->bus->self;
/* Slot of the next bridge. */
slot = PCI_SLOT(dev->devfn);
} while (dev->bus->self);
}
*pinp = pin;
return slot;
}
#endif /* defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX) */
/***********************************************************************/
/* GENERIC irq routines */
static inline void
sable_lynx_enable_irq(unsigned int irq)
{
unsigned long bit, mask;
bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit);
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
spin_unlock(&sable_lynx_irq_lock);
#if 0
printk("%s: mask 0x%lx bit 0x%x irq 0x%x\n",
__FUNCTION__, mask, bit, irq);
#endif
}
static void
sable_lynx_disable_irq(unsigned int irq)
{
unsigned long bit, mask;
bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
spin_unlock(&sable_lynx_irq_lock);
#if 0
printk("%s: mask 0x%lx bit 0x%x irq 0x%x\n",
__FUNCTION__, mask, bit, irq);
#endif
}
static unsigned int
sable_lynx_startup_irq(unsigned int irq)
{
sable_lynx_enable_irq(irq);
return 0;
}
static void
sable_lynx_end_irq(unsigned int irq)
{
if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
sable_lynx_enable_irq(irq);
}
static void
sable_lynx_mask_and_ack_irq(unsigned int irq)
{
unsigned long bit, mask;
bit = sable_lynx_irq_swizzle->irq_to_mask[irq];
spin_lock(&sable_lynx_irq_lock);
mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
sable_lynx_irq_swizzle->ack_irq_hw(bit);
spin_unlock(&sable_lynx_irq_lock);
}
static struct hw_interrupt_type sable_lynx_irq_type = {
.typename = "SABLE/LYNX",
.startup = sable_lynx_startup_irq,
.shutdown = sable_lynx_disable_irq,
.enable = sable_lynx_enable_irq,
.disable = sable_lynx_disable_irq,
.ack = sable_lynx_mask_and_ack_irq,
.end = sable_lynx_end_irq,
};
static void
sable_lynx_srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
{
/* Note that the vector reported by the SRM PALcode corresponds
to the interrupt mask bits, but we have to manage via the
so-called legacy IRQs for many common devices. */
int bit, irq;
bit = (vector - 0x800) >> 4;
irq = sable_lynx_irq_swizzle->mask_to_irq[bit];
#if 0
printk("%s: vector 0x%lx bit 0x%x irq 0x%x\n",
__FUNCTION__, vector, bit, irq);
#endif
handle_irq(irq, regs);
}
static void __init
sable_lynx_init_irq(int nr_irqs)
{
long i;
for (i = 0; i < nr_irqs; ++i) {
irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
irq_desc[i].handler = &sable_lynx_irq_type;
}
common_init_isa_dma();
}
static void __init
sable_lynx_init_pci(void)
{
common_init_pci();
}
/*****************************************************************/
/*
* The System Vectors
*
......@@ -280,7 +557,8 @@ sable_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
* these games with GAMMA_BIAS.
*/
#if defined(CONFIG_ALPHA_GENERIC) || !defined(CONFIG_ALPHA_GAMMA)
#if defined(CONFIG_ALPHA_GENERIC) || \
(defined(CONFIG_ALPHA_SABLE) && !defined(CONFIG_ALPHA_GAMMA))
#undef GAMMA_BIAS
#define GAMMA_BIAS 0
struct alpha_machine_vector sable_mv __initmv = {
......@@ -295,13 +573,13 @@ struct alpha_machine_vector sable_mv __initmv = {
.min_mem_address = T2_DEFAULT_MEM_BASE,
.nr_irqs = 40,
.device_interrupt = sable_srm_device_interrupt,
.device_interrupt = sable_lynx_srm_device_interrupt,
.init_arch = t2_init_arch,
.init_irq = sable_init_irq,
.init_rtc = common_init_rtc,
.init_pci = common_init_pci,
.kill_arch = NULL,
.init_pci = sable_lynx_init_pci,
.kill_arch = t2_kill_arch,
.pci_map_irq = sable_map_irq,
.pci_swizzle = common_swizzle,
......@@ -310,9 +588,10 @@ struct alpha_machine_vector sable_mv __initmv = {
} }
};
ALIAS_MV(sable)
#endif
#endif /* GENERIC || (SABLE && !GAMMA) */
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_GAMMA)
#if defined(CONFIG_ALPHA_GENERIC) || \
(defined(CONFIG_ALPHA_SABLE) && defined(CONFIG_ALPHA_GAMMA))
#undef GAMMA_BIAS
#define GAMMA_BIAS _GAMMA_BIAS
struct alpha_machine_vector sable_gamma_mv __initmv = {
......@@ -327,12 +606,13 @@ struct alpha_machine_vector sable_gamma_mv __initmv = {
.min_mem_address = T2_DEFAULT_MEM_BASE,
.nr_irqs = 40,
.device_interrupt = sable_srm_device_interrupt,
.device_interrupt = sable_lynx_srm_device_interrupt,
.init_arch = t2_init_arch,
.init_irq = sable_init_irq,
.init_rtc = common_init_rtc,
.init_pci = common_init_pci,
.init_pci = sable_lynx_init_pci,
.kill_arch = t2_kill_arch,
.pci_map_irq = sable_map_irq,
.pci_swizzle = common_swizzle,
......@@ -341,4 +621,36 @@ struct alpha_machine_vector sable_gamma_mv __initmv = {
} }
};
ALIAS_MV(sable_gamma)
#endif
#endif /* GENERIC || (SABLE && GAMMA) */
#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_LYNX)
#undef GAMMA_BIAS
#define GAMMA_BIAS _GAMMA_BIAS
struct alpha_machine_vector lynx_mv __initmv = {
.vector_name = "Lynx",
DO_EV4_MMU,
DO_DEFAULT_RTC,
DO_T2_IO,
DO_T2_BUS,
.machine_check = t2_machine_check,
.max_isa_dma_address = ALPHA_SABLE_MAX_ISA_DMA_ADDRESS,
.min_io_address = EISA_DEFAULT_IO_BASE,
.min_mem_address = T2_DEFAULT_MEM_BASE,
.nr_irqs = 64,
.device_interrupt = sable_lynx_srm_device_interrupt,
.init_arch = t2_init_arch,
.init_irq = lynx_init_irq,
.init_rtc = common_init_rtc,
.init_pci = sable_lynx_init_pci,
.kill_arch = t2_kill_arch,
.pci_map_irq = lynx_map_irq,
.pci_swizzle = lynx_swizzle,
.sys = { .t2 = {
.gamma_bias = _GAMMA_BIAS
} }
};
ALIAS_MV(lynx)
#endif /* GENERIC || LYNX */
......@@ -324,7 +324,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
int idx;
err = -EFAULT;
if (copy_from_user(&info, (void *)childregs->esi, sizeof(info)))
if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
goto out;
err = -EINVAL;
if (LDT_empty(&info))
......@@ -567,11 +567,14 @@ asmlinkage int sys_execve(struct pt_regs regs)
int error;
char * filename;
filename = getname((char *) regs.ebx);
filename = getname((char __user *) regs.ebx);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = do_execve(filename, (char **) regs.ecx, (char **) regs.edx, &regs);
error = do_execve(filename,
(char __user * __user *) regs.ecx,
(char __user * __user *) regs.edx,
&regs);
if (error == 0) {
current->ptrace &= ~PT_DTRACE;
/* Make sure we don't return using sysenter.. */
......@@ -633,7 +636,7 @@ static int get_free_idx(void)
/*
* Set a given TLS descriptor:
*/
asmlinkage int sys_set_thread_area(struct user_desc *u_info)
asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
{
struct thread_struct *t = &current->thread;
struct user_desc info;
......@@ -700,7 +703,7 @@ asmlinkage int sys_set_thread_area(struct user_desc *u_info)
#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
asmlinkage int sys_get_thread_area(struct user_desc *u_info)
asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
{
struct user_desc info;
struct desc_struct *desc;
......
......@@ -12,6 +12,7 @@
#include <linux/sched.h>
#include <linux/gfp.h>
#include <linux/string.h>
#include <linux/elf.h>
#include <asm/cpufeature.h>
#include <asm/msr.h>
......@@ -52,11 +53,47 @@ void enable_sep_cpu(void *info)
static int __init sysenter_setup(void)
{
static const char int80[] = {
static const char __initdata int80[] = {
0xcd, 0x80, /* int $0x80 */
0xc3 /* ret */
};
static const char sysent[] = {
/* Unwind information for the int80 code. Keep track of
where the return address is stored. */
static const char __initdata int80_eh_frame[] = {
/* First the Common Information Entry (CIE): */
0x14, 0x00, 0x00, 0x00, /* Length of the CIE */
0x00, 0x00, 0x00, 0x00, /* CIE Identifier Tag */
0x01, /* CIE Version */
'z', 'R', 0x00, /* CIE Augmentation */
0x01, /* CIE Code Alignment Factor */
0x7c, /* CIE Data Alignment Factor */
0x08, /* CIE RA Column */
0x01, /* Augmentation size */
0x1b, /* FDE Encoding (pcrel sdata4) */
0x0c, /* DW_CFA_def_cfa */
0x04,
0x04,
0x88, /* DW_CFA_offset, column 0x8 */
0x01,
0x00, /* padding */
0x00,
/* Now the FDE which contains the instructions for the frame. */
0x0a, 0x00, 0x00, 0x00, /* FDE Length */
0x1c, 0x00, 0x00, 0x00, /* FDE CIE offset */
/* The PC-relative offset to the beginning of the code this
FDE covers. The computation below assumes that the offset
can be represented in one byte. Change if this is not true
anymore. The offset from the beginning of the .eh_frame
is represented by EH_FRAME_OFFSET. The word with the offset
starts at byte 0x20 of the .eh_frame. */
0x100 - (EH_FRAME_OFFSET + 0x20),
0xff, 0xff, 0xff, /* FDE initial location */
3, /* FDE address range */
0x00 /* Augmentation size */
/* The code does not change the stack pointer. We need not
record any operations. */
};
static const char __initdata sysent[] = {
0x51, /* push %ecx */
0x52, /* push %edx */
0x55, /* push %ebp */
......@@ -76,13 +113,71 @@ static int __init sysenter_setup(void)
0x59, /* pop %ecx */
0xc3 /* ret */
};
static const char sigreturn[] = {
/* Unwind information for the sysenter code. Keep track of
where the return address is stored. */
static const char __initdata sysent_eh_frame[] = {
/* First the Common Information Entry (CIE): */
0x14, 0x00, 0x00, 0x00, /* Length of the CIE */
0x00, 0x00, 0x00, 0x00, /* CIE Identifier Tag */
0x01, /* CIE Version */
'z', 'R', 0x00, /* CIE Augmentation */
0x01, /* CIE Code Alignment Factor */
0x7c, /* CIE Data Alignment Factor */
0x08, /* CIE RA Column */
0x01, /* Augmentation size */
0x1b, /* FDE Encoding (pcrel sdata4) */
0x0c, /* DW_CFA_def_cfa */
0x04,
0x04,
0x88, /* DW_CFA_offset, column 0x8 */
0x01,
0x00, /* padding */
0x00,
/* Now the FDE which contains the instructions for the frame. */
0x22, 0x00, 0x00, 0x00, /* FDE Length */
0x1c, 0x00, 0x00, 0x00, /* FDE CIE offset */
/* The PC-relative offset to the beginning of the code this
FDE covers. The computation below assumes that the offset
can be represented in one byte. Change if this is not true
anymore. The offset from the beginning of the .eh_frame
is represented by EH_FRAME_OFFSET. The word with the offset
starts at byte 0x20 of the .eh_frame. */
0x100 - (EH_FRAME_OFFSET + 0x20),
0xff, 0xff, 0xff, /* FDE initial location */
0x14, 0x00, 0x00, 0x00, /* FDE address range */
0x00, /* Augmentation size */
/* What follows are the instructions for the table generation.
We have to record all changes of the stack pointer and
callee-saved registers. */
0x41, /* DW_CFA_advance_loc+1, push %ecx */
0x0e, /* DW_CFA_def_cfa_offset */
0x08, /* RA at offset 8 now */
0x41, /* DW_CFA_advance_loc+1, push %edx */
0x0e, /* DW_CFA_def_cfa_offset */
0x0c, /* RA at offset 12 now */
0x41, /* DW_CFA_advance_loc+1, push %ebp */
0x0e, /* DW_CFA_def_cfa_offset */
0x10, /* RA at offset 16 now */
0x85, 0x04, /* DW_CFA_offset %ebp -16 */
/* Finally the epilogue. */
0x4e, /* DW_CFA_advance_loc+14, pop %ebx */
0x0e, /* DW_CFA_def_cfa_offset */
0x12, /* RA at offset 12 now */
0xc5, /* DW_CFA_restore %ebp */
0x41, /* DW_CFA_advance_loc+1, pop %edx */
0x0e, /* DW_CFA_def_cfa_offset */
0x08, /* RA at offset 8 now */
0x41, /* DW_CFA_advance_loc+1, pop %ecx */
0x0e, /* DW_CFA_def_cfa_offset */
0x04 /* RA at offset 4 now */
};
static const char __initdata sigreturn[] = {
/* 32: sigreturn point */
0x58, /* popl %eax */
0xb8, __NR_sigreturn, 0, 0, 0, /* movl $__NR_sigreturn, %eax */
0xcd, 0x80, /* int $0x80 */
};
static const char rt_sigreturn[] = {
static const char __initdata rt_sigreturn[] = {
/* 64: rt_sigreturn point */
0xb8, __NR_rt_sigreturn, 0, 0, 0, /* movl $__NR_rt_sigreturn, %eax */
0xcd, 0x80, /* int $0x80 */
......@@ -93,10 +188,14 @@ static int __init sysenter_setup(void)
memcpy((void *) page, int80, sizeof(int80));
memcpy((void *)(page + 32), sigreturn, sizeof(sigreturn));
memcpy((void *)(page + 64), rt_sigreturn, sizeof(rt_sigreturn));
memcpy((void *)(page + EH_FRAME_OFFSET), int80_eh_frame,
sizeof(int80_eh_frame));
if (!boot_cpu_has(X86_FEATURE_SEP))
return 0;
memcpy((void *) page, sysent, sizeof(sysent));
memcpy((void *)(page + EH_FRAME_OFFSET), sysent_eh_frame,
sizeof(sysent_eh_frame));
on_each_cpu(enable_sep_cpu, NULL, 1, 1);
return 0;
}
......
......@@ -439,8 +439,8 @@ ia32_syscall_table:
data8 sys_ni_syscall
data8 sys_ni_syscall
data8 compat_sys_futex /* 240 */
data8 sys_ni_syscall
data8 sys_ni_syscall
data8 compat_sys_setaffinity
data8 compat_sys_getaffinity
data8 sys_ni_syscall
data8 sys_ni_syscall
data8 sys_ni_syscall /* 245 */
......
......@@ -598,8 +598,8 @@ sys_call_table:
ENTRY_SAME(sendfile64)
ENTRY_COMP(futex) /* 210 */
ENTRY_SAME(sched_setaffinity)
ENTRY_SAME(sched_getaffinity)
ENTRY_COMP(sched_setaffinity)
ENTRY_COMP(sched_getaffinity)
ENTRY_SAME(ni_syscall)
ENTRY_SAME(ni_syscall)
ENTRY_SAME(io_setup) /* 215 */
......
......@@ -724,8 +724,8 @@ _GLOBAL(sys_call_table32)
.llong .sys_lremovexattr
.llong .sys_fremovexattr /* 220 */
.llong .compat_sys_futex
.llong .sys32_sched_setaffinity
.llong .sys32_sched_getaffinity
.llong .compat_sys_sched_setaffinity
.llong .compat_sys_sched_getaffinity
.llong .sys_ni_syscall
.llong .sys_ni_syscall /* 225 - reserved for tux */
.llong .sys32_sendfile64
......
......@@ -2780,56 +2780,6 @@ asmlinkage long sys32_time(compat_time_t* tloc)
return secs;
}
extern asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long *user_mask_ptr);
asmlinkage long sys32_sched_setaffinity(compat_pid_t pid, unsigned int len,
u32 *user_mask_ptr)
{
unsigned long kernel_mask;
mm_segment_t old_fs;
int ret;
if (get_user(kernel_mask, user_mask_ptr))
return -EFAULT;
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_sched_setaffinity(pid,
/* XXX Nice api... */
sizeof(kernel_mask),
&kernel_mask);
set_fs(old_fs);
return ret;
}
extern asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
unsigned long *user_mask_ptr);
asmlinkage long sys32_sched_getaffinity(compat_pid_t pid, unsigned int len,
u32 *user_mask_ptr)
{
unsigned long kernel_mask;
mm_segment_t old_fs;
int ret;
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_sched_getaffinity(pid,
/* XXX Nice api... */
sizeof(kernel_mask),
&kernel_mask);
set_fs(old_fs);
if (ret > 0) {
if (put_user(kernel_mask, user_mask_ptr))
ret = -EFAULT;
}
return ret;
}
int sys32_olduname(struct oldold_utsname * name)
{
int error;
......
......@@ -2831,56 +2831,6 @@ asmlinkage long sys32_sysctl(struct __sysctl_args32 *args)
return error;
}
extern asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long *user_mask_ptr);
asmlinkage long sys32_sched_setaffinity(compat_pid_t pid, unsigned int len,
u32 *user_mask_ptr)
{
unsigned long kernel_mask;
mm_segment_t old_fs;
int ret;
if (get_user(kernel_mask, user_mask_ptr))
return -EFAULT;
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_sched_setaffinity(pid,
/* XXX Nice api... */
sizeof(kernel_mask),
&kernel_mask);
set_fs(old_fs);
return ret;
}
extern asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
unsigned long *user_mask_ptr);
asmlinkage long sys32_sched_getaffinity(compat_pid_t pid, unsigned int len,
u32 *user_mask_ptr)
{
unsigned long kernel_mask;
mm_segment_t old_fs;
int ret;
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_sched_getaffinity(pid,
/* XXX Nice api... */
sizeof(kernel_mask),
&kernel_mask);
set_fs(old_fs);
if (ret > 0) {
if (put_user(kernel_mask, user_mask_ptr))
ret = -EFAULT;
}
return ret;
}
extern long sys_lookup_dcookie(u64 cookie64, char *buf, size_t len);
long sys32_lookup_dcookie(u32 cookie_high, u32 cookie_low, char *buf, size_t len)
......
......@@ -51,7 +51,7 @@ sys_call_table32:
.word sys32_setrlimit, sys_pivot_root, sys32_prctl, sys32_pciconfig_read, sys32_pciconfig_write
/*150*/ .word sys_nis_syscall, sys_nis_syscall, sys_nis_syscall, sys_poll, sys_getdents64
.word compat_sys_fcntl64, sys_ni_syscall, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
/*160*/ .word sys32_sched_setaffinity, sys32_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall
/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall
.word sys_quotactl, sys_set_tid_address, sys32_mount, sys_ustat, sys_setxattr
/*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys32_getdents
.word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr
......
......@@ -445,8 +445,8 @@ ia32_sys_call_table:
.quad sys_tkill /* 238 */
.quad sys_sendfile64
.quad compat_sys_futex /* 240 */
.quad sys32_sched_setaffinity
.quad sys32_sched_getaffinity
.quad compat_sys_sched_setaffinity
.quad compat_sys_sched_getaffinity
.quad sys32_set_thread_area
.quad sys32_get_thread_area
.quad sys32_io_setup
......
......@@ -2038,41 +2038,6 @@ long sys32_module_warning(void)
return -ENOSYS ;
}
long sys_sched_getaffinity(pid_t pid, unsigned int len, unsigned long *new_mask_ptr);
long sys_sched_setaffinity(pid_t pid, unsigned int len, unsigned long *new_mask_ptr);
/* only works on LE */
long sys32_sched_setaffinity(pid_t pid, unsigned int len,
unsigned int *new_mask_ptr)
{
mm_segment_t oldfs = get_fs();
unsigned long mask;
int err;
if (get_user(mask, new_mask_ptr))
return -EFAULT;
set_fs(KERNEL_DS);
err = sys_sched_setaffinity(pid,sizeof(mask),&mask);
set_fs(oldfs);
return err;
}
/* only works on LE */
long sys32_sched_getaffinity(pid_t pid, unsigned int len,
unsigned int *new_mask_ptr)
{
mm_segment_t oldfs = get_fs();
unsigned long mask;
int err;
mask = 0;
set_fs(KERNEL_DS);
err = sys_sched_getaffinity(pid,sizeof(mask),&mask);
set_fs(oldfs);
if (err > 0)
err = put_user((u32)mask, new_mask_ptr);
return err;
}
extern long sys_io_setup(unsigned nr_reqs, aio_context_t *ctx);
long sys32_io_setup(unsigned nr_reqs, u32 *ctx32p)
......
......@@ -9,7 +9,7 @@ config LCS
or zSeries. This device driver supports Token Ring (IEEE 802.5),
FDDI (IEEE 802.7) and Ethernet.
This option is also available as a module which will be
called lcs.o . If you do not know what it is, it's safe to say "Y".
called lcs.ko. If you do not know what it is, it's safe to say "Y".
config CTC
tristate "CTC device support"
......@@ -20,7 +20,7 @@ config CTC
coupling using ESCON. It also supports virtual CTCs when running
under VM. It will use the channel device configuration if this is
available. This option is also available as a module which will be
called ctc.o. If you do not know what it is, it's safe to say "Y".
called ctc.ko. If you do not know what it is, it's safe to say "Y".
config IUCV
tristate "IUCV device support (VM only)"
......@@ -28,7 +28,7 @@ config IUCV
help
Select this option if you want to use inter-user communication
vehicle networking under VM or VIF. This option is also available
as a module which will be called iucv.o. If unsure, say "Y".
as a module which will be called iucv.ko. If unsure, say "Y".
config CCWGROUP
tristate
......
/*
* $Id: ctcmain.c,v 1.36 2003/02/18 09:15:14 mschwide Exp $
* $Id: ctcmain.c,v 1.40 2003/04/08 16:00:17 mschwide Exp $
*
* CTC / ESCON network driver
*
......@@ -36,13 +36,12 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* RELEASE-TAG: CTC/ESCON network driver $Revision: 1.36 $
* RELEASE-TAG: CTC/ESCON network driver $Revision: 1.40 $
*
*/
#undef DEBUG
#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
......@@ -273,7 +272,7 @@ static void
print_banner(void)
{
static int printed = 0;
char vbuf[] = "$Revision: 1.36 $";
char vbuf[] = "$Revision: 1.40 $";
char *version = vbuf;
if (printed)
......@@ -1962,22 +1961,17 @@ ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
}
priv = cdev->dev.driver_data;
ch = (struct channel *) intparm;
if ((ch != priv->channel[READ]) && (ch != priv->channel[WRITE]))
ch = NULL;
if (!ch) {
/* Try to extract channel from driver data. */
if (priv->channel[READ]->cdev == cdev)
ch = priv->channel[READ];
else if (priv->channel[WRITE]->cdev == cdev)
ch = priv->channel[READ];
else {
printk(KERN_ERR
"ctc: Can't determine channel for interrupt, "
"device %s\n", cdev->dev.bus_id);
return;
}
/* Try to extract channel from driver data. */
if (priv->channel[READ]->cdev == cdev)
ch = priv->channel[READ];
else if (priv->channel[WRITE]->cdev == cdev)
ch = priv->channel[READ];
else {
printk(KERN_ERR
"ctc: Can't determine channel for interrupt, "
"device %s\n", cdev->dev.bus_id);
return;
}
dev = (struct net_device *) (ch->netdev);
......@@ -2392,7 +2386,6 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
static int
ctc_open(struct net_device * dev)
{
MOD_INC_USE_COUNT;
fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_START, dev);
return 0;
}
......@@ -2409,7 +2402,6 @@ static int
ctc_close(struct net_device * dev)
{
fsm_event(((struct ctc_priv *) dev->priv)->fsm, DEV_EVENT_STOP, dev);
MOD_DEC_USE_COUNT;
return 0;
}
......@@ -2761,6 +2753,7 @@ ctc_init_netdevice(struct net_device * dev, int alloc_device,
dev->addr_len = 0;
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = 100;
dev->owner = THIS_MODULE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
return dev;
}
......
/*
* $Id: ctctty.c,v 1.9 2002/12/02 15:25:13 aberg Exp $
* $Id: ctctty.c,v 1.10 2003/03/21 18:47:31 aberg Exp $
*
* CTC / ESCON network driver, tty interface.
*
......@@ -22,7 +22,6 @@
*
*/
#define __NO_VERSION__
#include <linux/config.h>
#include <linux/module.h>
#include <linux/tty.h>
......
/**
* $Id: fsm.c,v 1.3 2002/10/08 16:53:45 mschwide Exp $
* $Id: fsm.c,v 1.4 2003/03/28 08:54:40 mschwide Exp $
*
* A generic FSM based on fsm used in isdn4linux
*
*/
#include "fsm.h"
#include <linux/version.h>
#include <linux/config.h>
#include <linux/module.h>
#include <linux/timer.h>
......
/*
* $Id: iucv.c,v 1.9 2002/11/06 13:37:25 cohuck Exp $
* $Id: iucv.c,v 1.10 2003/03/28 08:54:40 mschwide Exp $
*
* IUCV network driver
*
......@@ -29,14 +29,13 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* RELEASE-TAG: IUCV lowlevel driver $Revision: 1.9 $
* RELEASE-TAG: IUCV lowlevel driver $Revision: 1.10 $
*
*/
#include <linux/module.h>
#include <linux/config.h>
#include <linux/version.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/slab.h>
......@@ -333,7 +332,7 @@ do { \
static void
iucv_banner(void)
{
char vbuf[] = "$Revision: 1.9 $";
char vbuf[] = "$Revision: 1.10 $";
char *version = vbuf;
if ((version = strchr(version, ':'))) {
......
......@@ -11,7 +11,7 @@
* Frank Pavlic (pavlic@de.ibm.com) and
* Martin Schwidefsky <schwidefsky@de.ibm.com>
*
* $Revision: 1.44 $ $Date: 2003/02/18 19:49:02 $
* $Revision: 1.51 $ $Date: 2003/03/28 08:54:40 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
......@@ -28,7 +28,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/version.h>
#include <linux/module.h>
#include <linux/if.h>
#include <linux/netdevice.h>
......@@ -59,7 +58,7 @@
/**
* initialization string for output
*/
#define VERSION_LCS_C "$Revision: 1.44 $"
#define VERSION_LCS_C "$Revision: 1.51 $"
static char version[] __initdata = "LCS driver ("VERSION_LCS_C "/" VERSION_LCS_H ")";
......@@ -335,7 +334,9 @@ lcs_setup_card(struct lcs_card *card)
(void *)lcs_start_kernel_thread,card);
card->thread_mask = 0;
spin_lock_init(&card->lock);
#ifdef CONFIG_IP_MULTICAST
INIT_LIST_HEAD(&card->ipm_list);
#endif
INIT_LIST_HEAD(&card->lancmd_waiters);
return 0;
}
......@@ -358,6 +359,7 @@ lcs_cleanup_card(struct lcs_card *card)
kfree(ipm_list);
}
#endif
kfree(card->dev);
/* Cleanup channels. */
lcs_cleanup_channel(&card->write);
lcs_cleanup_channel(&card->read);
......@@ -556,13 +558,12 @@ lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
static int
__lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
{
int index, prevprev, prev, next;
int index, prev, next;
if (buffer->state != BUF_STATE_READY)
BUG();
buffer->state = BUF_STATE_PROCESSED;
index = buffer - channel->iob;
prevprev = (index - 1) & (LCS_NUM_BUFFS - 1);
prev = (index - 1) & (LCS_NUM_BUFFS - 1);
next = (index + 1) & (LCS_NUM_BUFFS - 1);
/* Set the suspend bit and clear the PCI bit of this buffer. */
......@@ -1082,7 +1083,7 @@ lcs_tasklet(unsigned long data)
unsigned long flags;
struct lcs_channel *channel;
struct lcs_buffer *iob;
int buf_idx, io_idx;
int buf_idx;
int rc;
channel = (struct lcs_channel *) data;
......@@ -1092,9 +1093,7 @@ lcs_tasklet(unsigned long data)
/* Check for processed buffers. */
iob = channel->iob;
buf_idx = channel->buf_idx;
io_idx = channel->io_idx;
while (buf_idx != io_idx &&
iob[buf_idx].state == BUF_STATE_PROCESSED) {
while (iob[buf_idx].state == BUF_STATE_PROCESSED) {
/* Do the callback thing. */
if (iob[buf_idx].callback != NULL)
iob[buf_idx].callback(channel, iob + buf_idx);
......@@ -1434,6 +1433,7 @@ static int
lcs_lgw_stoplan_thread(void *data)
{
struct lcs_card *card;
int rc;
card = (struct lcs_card *) data;
daemonize("lgwstop");
......@@ -1446,7 +1446,11 @@ lcs_lgw_stoplan_thread(void *data)
else
PRINT_ERR("Stoplan %s initiated by LGW failed!\n",
card->dev->name);
return 0;
/*Try to reset the card, stop it on failure */
rc = lcs_resetcard(card);
if (rc != 0)
rc = lcs_stopcard(card);
return rc;
}
/**
......@@ -1462,8 +1466,10 @@ lcs_start_kernel_thread(struct lcs_card *card)
kernel_thread(lcs_lgw_startlan_thread, (void *) card, SIGCHLD);
if (test_and_clear_bit(2, &card->thread_mask))
kernel_thread(lcs_lgw_stoplan_thread, (void *) card, SIGCHLD);
#ifdef CONFIG_IP_MULTICAST
if (test_and_clear_bit(3, &card->thread_mask))
kernel_thread(lcs_fix_multicast_list, (void *) card, SIGCHLD);
#endif
}
/**
......@@ -1599,12 +1605,9 @@ lcs_stop_device(struct net_device *dev)
LCS_DBF_TEXT(2, trace, "stopdev");
card = (struct lcs_card *) dev->priv;
netif_stop_queue(dev);
// FIXME: really free the net_device here ?!?
kfree(card->dev);
rc = lcs_stopcard(card);
if (rc)
PRINT_ERR("Try it again!\n ");
MOD_DEC_USE_COUNT;
return rc;
}
......@@ -1626,7 +1629,6 @@ lcs_open_device(struct net_device *dev)
PRINT_ERR("LCS:Error in opening device!\n");
} else {
MOD_INC_USE_COUNT;
netif_wake_queue(dev);
card->state = DEV_STATE_UP;
}
......@@ -1784,6 +1786,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
dev->set_multicast_list = lcs_set_multicast_list;
#endif
dev->get_stats = lcs_getstats;
dev->owner = THIS_MODULE;
netif_stop_queue(dev);
lcs_stopcard(card);
return 0;
......
/*
* $Id: netiucv.c,v 1.16 2003/02/18 09:15:14 mschwide Exp $
* $Id: netiucv.c,v 1.19 2003/04/08 16:00:17 mschwide Exp $
*
* IUCV network driver
*
......@@ -30,7 +30,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* RELEASE-TAG: IUCV network driver $Revision: 1.16 $
* RELEASE-TAG: IUCV network driver $Revision: 1.19 $
*
*/
......@@ -1140,7 +1140,6 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
*/
static int
netiucv_open(struct net_device *dev) {
MOD_INC_USE_COUNT;
SET_DEVICE_START(dev, 1);
fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_START, dev);
return 0;
......@@ -1158,7 +1157,6 @@ static int
netiucv_close(struct net_device *dev) {
SET_DEVICE_START(dev, 0);
fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_STOP, dev);
MOD_DEC_USE_COUNT;
return 0;
}
......@@ -1517,12 +1515,14 @@ netiucv_new_connection(struct net_device *dev, char *username)
conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
conn->netdev = dev;
conn->rx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT, GFP_DMA);
conn->rx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT,
GFP_KERNEL | GFP_DMA);
if (!conn->rx_buff) {
kfree(conn);
return NULL;
}
conn->tx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT, GFP_DMA);
conn->tx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT,
GFP_KERNEL | GFP_DMA);
if (!conn->tx_buff) {
kfree_skb(conn->rx_buff);
kfree(conn);
......@@ -1630,6 +1630,7 @@ netiucv_init_netdevice(int ifno, char *username)
dev->addr_len = 0;
dev->type = ARPHRD_SLIP;
dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT;
dev->owner = THIS_MODULE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
return dev;
}
......@@ -1716,7 +1717,7 @@ static struct device_driver netiucv_driver = {
static void
netiucv_banner(void)
{
char vbuf[] = "$Revision: 1.16 $";
char vbuf[] = "$Revision: 1.19 $";
char *version = vbuf;
if ((version = strchr(version, ':'))) {
......
......@@ -371,7 +371,7 @@ static int init_coda_psdev(void)
CODA_PSDEV_MAJOR);
return -EIO;
}
devfs_mk_dir (NULL, "coda", NULL);
devfs_mk_dir ("coda");
for (i = 0; i < MAX_CODADEVS; i++) {
char name[16];
sprintf(name, "coda/%d", i);
......
......@@ -491,7 +491,8 @@ int kernel_read(struct file *file, unsigned long offset,
old_fs = get_fs();
set_fs(get_ds());
result = vfs_read(file, addr, count, &pos);
/* The cast to a user pointer is valid due to the set_fs() */
result = vfs_read(file, (void __user *)addr, count, &pos);
set_fs(old_fs);
return result;
}
......
......@@ -847,6 +847,7 @@ int path_lookup(const char *name, unsigned int flags, struct nameidata *nd)
read_unlock(&current->fs->lock);
if (__emul_lookup_dentry(name,nd))
return 0;
read_lock(&current->fs->lock);
}
nd->mnt = mntget(current->fs->rootmnt);
nd->dentry = dget(current->fs->root);
......
......@@ -367,7 +367,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
* unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
*/
asmlinkage long sys_umount(char * name, int flags)
asmlinkage long sys_umount(char __user * name, int flags)
{
struct nameidata nd;
int retval;
......@@ -396,7 +396,7 @@ asmlinkage long sys_umount(char * name, int flags)
* The 2.0 compatible umount. No flags.
*/
asmlinkage long sys_oldumount(char * name)
asmlinkage long sys_oldumount(char __user * name)
{
return sys_umount(name,0);
}
......@@ -664,7 +664,7 @@ static int do_add_mount(struct nameidata *nd, char *type, int flags,
return err;
}
static int copy_mount_options (const void *data, unsigned long *where)
static int copy_mount_options (const void __user *data, unsigned long *where)
{
int i;
unsigned long page;
......@@ -842,8 +842,9 @@ int copy_namespace(int flags, struct task_struct *tsk)
return -ENOMEM;
}
asmlinkage long sys_mount(char * dev_name, char * dir_name, char * type,
unsigned long flags, void * data)
asmlinkage long sys_mount(char __user * dev_name, char __user * dir_name,
char __user * type, unsigned long flags,
void __user * data)
{
int retval;
unsigned long data_page;
......@@ -963,7 +964,7 @@ static void chroot_fs_refs(struct nameidata *old_nd, struct nameidata *new_nd)
* first.
*/
asmlinkage long sys_pivot_root(const char *new_root, const char *put_old)
asmlinkage long sys_pivot_root(const char __user *new_root, const char __user *put_old)
{
struct vfsmount *tmp;
struct nameidata new_nd, old_nd, parent_nd, root_parent, user_nd;
......
......@@ -87,10 +87,10 @@ static struct {
};
long
asmlinkage sys_nfsservctl(int cmd, struct nfsctl_arg *arg, void *res)
asmlinkage sys_nfsservctl(int cmd, struct nfsctl_arg __user *arg, void __user *res)
{
struct file *file;
void *p = &arg->u;
void __user *p = &arg->u;
int version;
int err;
......
......@@ -335,7 +335,7 @@ static ssize_t do_readv_writev(int type, struct file *file,
const struct iovec __user * uvector,
unsigned long nr_segs, loff_t *pos)
{
typedef ssize_t (*io_fn_t)(struct file *, char *, size_t, loff_t *);
typedef ssize_t (*io_fn_t)(struct file *, char __user *, size_t, loff_t *);
typedef ssize_t (*iov_fn_t)(struct file *, const struct iovec *, unsigned long, loff_t *);
size_t tot_len;
......@@ -423,7 +423,7 @@ static ssize_t do_readv_writev(int type, struct file *file,
ret = 0;
vector = iov;
while (nr_segs > 0) {
void * base;
void __user * base;
size_t len;
ssize_t nr;
......
......@@ -3,8 +3,9 @@
#include <linux/config.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <asm/compiler.h>
#include <asm/system.h>
/*
* T2 is the internal name for the core logic chipset which provides
......@@ -22,6 +23,7 @@
#define T2_MEM_R1_MASK 0x07ffffff /* Mem sparse region 1 mask is 26 bits */
/* GAMMA-SABLE is a SABLE with EV5-based CPUs */
/* All LYNX machines, EV4 or EV5, use the GAMMA bias also */
#define _GAMMA_BIAS 0x8000000000UL
#if defined(CONFIG_ALPHA_GENERIC)
......@@ -57,10 +59,33 @@
#define T2_WMASK2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001c0UL)
#define T2_TBASE2 (IDENT_ADDR + GAMMA_BIAS + 0x38e0001e0UL)
#define T2_TLBBR (IDENT_ADDR + GAMMA_BIAS + 0x38e000200UL)
#define T2_IVR (IDENT_ADDR + GAMMA_BIAS + 0x38e000220UL)
#define T2_HAE_3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000240UL)
#define T2_HAE_4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000260UL)
/* The CSRs below are T3/T4 only */
#define T2_WBASE3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000280UL)
#define T2_WMASK3 (IDENT_ADDR + GAMMA_BIAS + 0x38e0002a0UL)
#define T2_TBASE3 (IDENT_ADDR + GAMMA_BIAS + 0x38e0002c0UL)
#define T2_TDR0 (IDENT_ADDR + GAMMA_BIAS + 0x38e000300UL)
#define T2_TDR1 (IDENT_ADDR + GAMMA_BIAS + 0x38e000320UL)
#define T2_TDR2 (IDENT_ADDR + GAMMA_BIAS + 0x38e000340UL)
#define T2_TDR3 (IDENT_ADDR + GAMMA_BIAS + 0x38e000360UL)
#define T2_TDR4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000380UL)
#define T2_TDR5 (IDENT_ADDR + GAMMA_BIAS + 0x38e0003a0UL)
#define T2_TDR6 (IDENT_ADDR + GAMMA_BIAS + 0x38e0003c0UL)
#define T2_TDR7 (IDENT_ADDR + GAMMA_BIAS + 0x38e0003e0UL)
#define T2_WBASE4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000400UL)
#define T2_WMASK4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000420UL)
#define T2_TBASE4 (IDENT_ADDR + GAMMA_BIAS + 0x38e000440UL)
#define T2_AIR (IDENT_ADDR + GAMMA_BIAS + 0x38e000460UL)
#define T2_VAR (IDENT_ADDR + GAMMA_BIAS + 0x38e000480UL)
#define T2_DIR (IDENT_ADDR + GAMMA_BIAS + 0x38e0004a0UL)
#define T2_ICE (IDENT_ADDR + GAMMA_BIAS + 0x38e0004c0UL)
#define T2_HAE_ADDRESS T2_HAE_1
/* T2 CSRs are in the non-cachable primary IO space from 3.8000.0000 to
......@@ -100,6 +125,9 @@
#define T2_CPU1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x381000000L)
#define T2_CPU2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x382000000L)
#define T2_CPU3_BASE (IDENT_ADDR + GAMMA_BIAS + 0x383000000L)
#define T2_CPUn_BASE(n) (T2_CPU0_BASE + (((n)&3) * 0x001000000L))
#define T2_MEM0_BASE (IDENT_ADDR + GAMMA_BIAS + 0x388000000L)
#define T2_MEM1_BASE (IDENT_ADDR + GAMMA_BIAS + 0x389000000L)
#define T2_MEM2_BASE (IDENT_ADDR + GAMMA_BIAS + 0x38a000000L)
......@@ -408,87 +436,120 @@ __EXTERN_INLINE void t2_outl(u32 b, unsigned long addr)
set_hae(msb); \
}
static spinlock_t t2_hae_lock = SPIN_LOCK_UNLOCKED;
__EXTERN_INLINE u8 t2_readb(unsigned long addr)
{
unsigned long result, msb;
unsigned long flags;
spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00);
spin_unlock_irqrestore(&t2_hae_lock, flags);
return __kernel_extbl(result, addr & 3);
}
__EXTERN_INLINE u16 t2_readw(unsigned long addr)
{
unsigned long result, msb;
unsigned long flags;
spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08);
spin_unlock_irqrestore(&t2_hae_lock, flags);
return __kernel_extwl(result, addr & 3);
}
/* On SABLE with T2, we must use SPARSE memory even for 32-bit access. */
/*
* On SABLE with T2, we must use SPARSE memory even for 32-bit access,
* because we cannot access all of DENSE without changing its HAE.
*/
__EXTERN_INLINE u32 t2_readl(unsigned long addr)
{
unsigned long msb;
unsigned long result, msb;
unsigned long flags;
spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
return *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18);
spin_unlock_irqrestore(&t2_hae_lock, flags);
return result & 0xffffffffUL;
}
__EXTERN_INLINE u64 t2_readq(unsigned long addr)
{
unsigned long r0, r1, work, msb;
unsigned long flags;
spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
work = (addr << 5) + T2_SPARSE_MEM + 0x18;
r0 = *(vuip)(work);
r1 = *(vuip)(work + (4 << 5));
spin_unlock_irqrestore(&t2_hae_lock, flags);
return r1 << 32 | r0;
}
__EXTERN_INLINE void t2_writeb(u8 b, unsigned long addr)
{
unsigned long msb, w;
unsigned long flags;
spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
w = __kernel_insbl(b, addr & 3);
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w;
spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE void t2_writew(u16 b, unsigned long addr)
{
unsigned long msb, w;
unsigned long flags;
spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
w = __kernel_inswl(b, addr & 3);
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w;
spin_unlock_irqrestore(&t2_hae_lock, flags);
}
/* On SABLE with T2, we must use SPARSE memory even for 32-bit access. */
/*
* On SABLE with T2, we must use SPARSE memory even for 32-bit access,
* because we cannot access all of DENSE without changing its HAE.
*/
__EXTERN_INLINE void t2_writel(u32 b, unsigned long addr)
{
unsigned long msb;
unsigned long flags;
spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
*(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b;
spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE void t2_writeq(u64 b, unsigned long addr)
{
unsigned long msb, work;
unsigned long flags;
spin_lock_irqsave(&t2_hae_lock, flags);
t2_set_hae;
work = (addr << 5) + T2_SPARSE_MEM + 0x18;
*(vuip)work = b;
*(vuip)(work + (4 << 5)) = b >> 32;
spin_unlock_irqrestore(&t2_hae_lock, flags);
}
__EXTERN_INLINE unsigned long t2_ioremap(unsigned long addr,
......
......@@ -42,6 +42,7 @@
# define NR_IRQS 40
#elif defined(CONFIG_ALPHA_DP264) || \
defined(CONFIG_ALPHA_LYNX) || \
defined(CONFIG_ALPHA_SHARK) || \
defined(CONFIG_ALPHA_EIGER)
# define NR_IRQS 64
......
......@@ -232,8 +232,9 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
for (i = 0; i < NR_CPUS; i++)
if (cpu_online(i))
mm->context[i] = 0;
tsk->thread_info->pcb.ptbr
= ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
if (tsk != current)
tsk->thread_info->pcb.ptbr
= ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
return 0;
}
......
......@@ -610,8 +610,12 @@ static inline pid_t waitpid(int pid, int * wait_stat, int flags)
* "Conditional" syscalls
*
* What we want is __attribute__((weak,alias("sys_ni_syscall"))),
* but it doesn't work on all toolchains, so we just do it by hand
* but it doesn't work on all toolchains, so we just do it by hand.
*
* Note that we do *not* provide a parameter list to avoid
* conflicting with one of the syscall declarations in some
* of the relevant header files (including this one).
*/
#define cond_syscall(x) asmlinkage long x(void) __attribute__((weak,alias("sys_ni_syscall")));
#define cond_syscall(x) asmlinkage long x() __attribute__((weak,alias("sys_ni_syscall")));
#endif /* _ALPHA_UNISTD_H */
......@@ -100,7 +100,8 @@ typedef struct user_fxsr_struct elf_fpxregset_t;
* Architecture-neutral AT_ values in 0-17, leave some room
* for more of them, start the x86-specific ones at 32.
*/
#define AT_SYSINFO 32
#define AT_SYSINFO 32
#define AT_SYSINFO_EH_FRAME 33
#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
......@@ -118,9 +119,15 @@ extern void dump_smp_unlazy_fpu(void);
#define ELF_CORE_SYNC dump_smp_unlazy_fpu
#endif
#define ARCH_DLINFO \
do { \
NEW_AUX_ENT(AT_SYSINFO, 0xffffe000); \
/* Offset from the beginning of the page where the .eh_frame information
for the code in the vsyscall page starts. */
#define EH_FRAME_OFFSET 96
#define ARCH_DLINFO \
do { \
NEW_AUX_ENT(AT_SYSINFO, 0xffffe000); \
NEW_AUX_ENT(AT_SYSINFO_EH_FRAME, \
0xffffe000 + EH_FRAME_OFFSET); \
} while (0)
#endif
......
......@@ -56,7 +56,7 @@ extern void remove_arg_zero(struct linux_binprm *);
extern int search_binary_handler(struct linux_binprm *,struct pt_regs *);
extern int flush_old_exec(struct linux_binprm * bprm);
extern int setup_arg_pages(struct linux_binprm * bprm);
extern int copy_strings(int argc,char ** argv,struct linux_binprm *bprm);
extern int copy_strings(int argc,char __user * __user * argv,struct linux_binprm *bprm);
extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm);
extern void compute_creds(struct linux_binprm *binprm);
extern int do_coredump(long signr, int exit_code, struct pt_regs * regs);
......
......@@ -752,11 +752,11 @@ struct inode_operations {
struct seq_file;
extern ssize_t vfs_read(struct file *, char *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char *, size_t, loff_t *);
extern ssize_t vfs_readv(struct file *, const struct iovec *,
extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t vfs_readv(struct file *, const struct iovec __user *,
unsigned long, loff_t *);
extern ssize_t vfs_writev(struct file *, const struct iovec *,
extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
unsigned long, loff_t *);
/*
......@@ -1202,16 +1202,16 @@ extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *);
extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
extern int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
extern ssize_t generic_file_read(struct file *, char *, size_t, loff_t *);
extern ssize_t generic_file_read(struct file *, char __user *, size_t, loff_t *);
int generic_write_checks(struct inode *inode, struct file *file,
loff_t *pos, size_t *count, int isblk);
extern ssize_t generic_file_write(struct file *, const char *, size_t, loff_t *);
extern ssize_t generic_file_write(struct file *, const char __user *, size_t, loff_t *);
extern ssize_t generic_file_aio_read(struct kiocb *, char *, size_t, loff_t);
extern ssize_t generic_file_aio_write(struct kiocb *, const char *, size_t, loff_t);
extern ssize_t generic_file_aio_write_nolock(struct kiocb *, const struct iovec *,
unsigned long, loff_t *);
extern ssize_t do_sync_read(struct file *filp, char *buf, size_t len, loff_t *ppos);
extern ssize_t do_sync_write(struct file *filp, const char *buf, size_t len, loff_t *ppos);
extern ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos);
extern ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
ssize_t generic_file_write_nolock(struct file *file, const struct iovec *iov,
unsigned long nr_segs, loff_t *ppos);
extern ssize_t generic_file_sendfile(struct file *, loff_t *, size_t, read_actor_t, void *);
......@@ -1247,9 +1247,9 @@ static inline void do_generic_file_read(struct file * filp, loff_t *ppos,
extern struct file_operations generic_ro_fops;
extern int vfs_readlink(struct dentry *, char *, int, const char *);
extern int vfs_readlink(struct dentry *, char __user *, int, const char *);
extern int vfs_follow_link(struct nameidata *, const char *);
extern int page_readlink(struct dentry *, char *, int);
extern int page_readlink(struct dentry *, char __user *, int);
extern int page_follow_link(struct dentry *, struct nameidata *);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern struct inode_operations page_symlink_inode_operations;
......
......@@ -39,7 +39,7 @@
#define MAY_LOCK 32
#define MAY_OWNER_OVERRIDE 64
#define MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/
#if (MAY_SATTR | MAY_TRUNC | MAY_LOCK | MAX_OWNER_OVERRIDE | MAY_LOCAL_ACCESS) & (MAY_READ | MAY_WRITE | MAY_EXEC | MAY_OWNER_OVERRIDE)
#if (MAY_SATTR | MAY_TRUNC | MAY_LOCK | MAY_OWNER_OVERRIDE | MAY_LOCAL_ACCESS) & (MAY_READ | MAY_WRITE | MAY_EXEC)
# error "please use a different value for MAY_SATTR or MAY_TRUNC or MAY_LOCK or MAY_OWNER_OVERRIDE."
#endif
#define MAY_CREATE (MAY_EXEC|MAY_WRITE)
......
......@@ -116,7 +116,7 @@ union nfsctl_res {
/*
* Kernel syscall implementation.
*/
extern asmlinkage long sys_nfsservctl(int, struct nfsctl_arg *, void *);
extern asmlinkage long sys_nfsservctl(int, struct nfsctl_arg __user *, void __user *);
extern int exp_addclient(struct nfsctl_client *ncp);
extern int exp_delclient(struct nfsctl_client *ncp);
extern int exp_export(struct nfsctl_export *nxp);
......
......@@ -629,7 +629,7 @@ extern void daemonize(const char *, ...);
extern int allow_signal(int);
extern task_t *child_reaper;
extern int do_execve(char *, char **, char **, struct pt_regs *);
extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
extern struct task_struct *do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int *, int *);
#ifdef CONFIG_SMP
......
#ifndef __LINUX_UIO_H
#define __LINUX_UIO_H
#include <linux/compiler.h>
#include <linux/types.h>
/*
......@@ -18,7 +19,7 @@
struct iovec
{
void *iov_base; /* BSD uses caddr_t (1003.1g requires void *) */
void __user *iov_base; /* BSD uses caddr_t (1003.1g requires void *) */
__kernel_size_t iov_len; /* Must be size_t (1003.1g) */
};
......
......@@ -226,3 +226,54 @@ asmlinkage long compat_sys_futex(u32 *uaddr, int op, int val,
}
return do_futex((unsigned long)uaddr, op, val, timeout);
}
extern asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long *user_mask_ptr);
asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t *user_mask_ptr)
{
unsigned long kernel_mask;
mm_segment_t old_fs;
int ret;
if (get_user(kernel_mask, user_mask_ptr))
return -EFAULT;
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_sched_setaffinity(pid,
sizeof(kernel_mask),
&kernel_mask);
set_fs(old_fs);
return ret;
}
extern asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
unsigned long *user_mask_ptr);
asmlinkage int compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
compat_ulong_t *user_mask_ptr)
{
unsigned long kernel_mask;
mm_segment_t old_fs;
int ret;
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_sched_getaffinity(pid,
sizeof(kernel_mask),
&kernel_mask);
set_fs(old_fs);
if (ret > 0) {
if (put_user(kernel_mask, user_mask_ptr))
ret = -EFAULT;
ret = sizeof(compat_ulong_t);
}
return ret;
}
......@@ -165,7 +165,7 @@ static unsigned long __find_symbol(const char *name,
if (gplok) {
for (i = 0; i < mod->num_gpl_syms; i++) {
if (strcmp(mod->gpl_syms[i].name, name) == 0) {
*crc = symversion(mod->crcs, i);
*crc = symversion(mod->gpl_crcs, i);
return mod->gpl_syms[i].value;
}
}
......
......@@ -87,6 +87,8 @@ static spinlock_t idr_lock = SPIN_LOCK_UNLOCKED;
SIGEV_SIGNAL & \
SIGEV_THREAD & \
SIGEV_THREAD_ID)
#define REQUEUE_PENDING 1
/*
* The timer ID is turned into a timer address by idr_find().
* Verifying a valid ID consists of:
......@@ -245,7 +247,7 @@ static void schedule_next_timer(struct k_itimer *timr)
timr->it_overrun_last = timr->it_overrun;
timr->it_overrun = -1;
timr->it_requeue_pending = 0;
++timr->it_requeue_pending;
add_timer(&timr->it_timer);
}
......@@ -286,16 +288,16 @@ void do_schedule_next_timer(struct siginfo *info)
* without an info block. In this case, we will not get a call back to
* do_schedule_next_timer() so we do it here. This should be rare...
* An interesting problem can occure if, while a signal, and thus a call
* An interesting problem can occur if, while a signal, and thus a call
* back is pending, the timer is rearmed, i.e. stopped and restarted.
* We then need to sort out the call back and do the right thing. What
* we do is to put a counter in the info block and match it with the
* timers copy on the call back. If they don't match, we just ignore
* the call back. Note that we do allow the timer to be deleted while
* a signal is pending. The standard says we can allow that signal to
* be delivered, and we do.
* the call back. The counter is local to the timer and we use odd to
* indicate a call back is pending. Note that we do allow the timer to
* be deleted while a signal is pending. The standard says we can
* allow that signal to be delivered, and we do.
*/
static int pendcount = 1;
static void timer_notify_task(struct k_itimer *timr)
{
......@@ -310,17 +312,12 @@ static void timer_notify_task(struct k_itimer *timr)
info.si_code = SI_TIMER;
info.si_tid = timr->it_id;
info.si_value = timr->it_sigev_value;
if (timr->it_incr){
/*
* Don't allow a call back counter of zero...
* and avoid the test by using 2.
*/
pendcount += 2;
timr->it_requeue_pending = info.si_sys_private = pendcount;
}
if( timr->it_sigev_notify & SIGEV_THREAD_ID & MIPS_SIGEV){
if (timr->it_incr)
info.si_sys_private = ++timr->it_requeue_pending;
if (timr->it_sigev_notify & SIGEV_THREAD_ID & MIPS_SIGEV)
ret = send_sig_info(info.si_signo, &info, timr->it_process);
}else
else
ret = send_group_sig_info(info.si_signo, &info,
timr->it_process);
switch (ret) {
......@@ -617,7 +614,7 @@ do_timer_gettime(struct k_itimer *timr, struct itimerspec *cur_setting)
posix_time_before(&timr->it_timer, &now))
timr->it_timer.expires = expires = 0;
if (expires) {
if (timr->it_requeue_pending ||
if (timr->it_requeue_pending & REQUEUE_PENDING ||
(timr->it_sigev_notify & SIGEV_NONE))
while (posix_time_before(&timr->it_timer, &now))
posix_bump_timer(timr);
......@@ -779,7 +776,8 @@ do_timer_settime(struct k_itimer *timr, int flags,
#else
del_timer(&timr->it_timer);
#endif
timr->it_requeue_pending = 0;
timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
~REQUEUE_PENDING;
timr->it_overrun_last = 0;
timr->it_overrun = -1;
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment