Commit 655d0d45 authored by David Mosberger's avatar David Mosberger

ia64: Sync up with 2.5.29+.

parent be8a58ff
......@@ -15,6 +15,8 @@ OBJECTS = bootloader.o
targets-$(CONFIG_IA64_HP_SIM) += bootloader
targets-$(CONFIG_IA64_GENERIC) += bootloader
CFLAGS := $(CFLAGS) $(CFLAGS_KERNEL)
all: $(targets-y)
bootloader: $(OBJECTS)
......
......@@ -102,7 +102,6 @@ tristate 'Kernel support for MISC binaries' CONFIG_BINFMT_MISC
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
source drivers/acpi/Config.in
bool 'PCI support' CONFIG_PCI
source drivers/pci/Config.in
......@@ -124,18 +123,26 @@ if [ "$CONFIG_NET" = "y" ]; then
fi
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
source drivers/mtd/Config.in
source drivers/pnp/Config.in
source drivers/block/Config.in
source drivers/ieee1394/Config.in
source drivers/message/i2o/Config.in
source drivers/md/Config.in
source drivers/message/fusion/Config.in
mainmenu_option next_comment
comment 'ATA/ATAPI/MFM/RLL support'
tristate 'ATA/ATAPI/MFM/RLL support' CONFIG_IDE
if [ "$CONFIG_IDE" != "n" ]; then
source drivers/ide/Config.in
else
define_bool CONFIG_BLK_DEV_HD n
fi
endmenu
fi
mainmenu_option next_comment
comment 'SCSI support'
......@@ -157,32 +164,46 @@ if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
source drivers/net/Config.in
fi
endmenu
fi
source net/ax25/Config.in
source drivers/isdn/Config.in
mainmenu_option next_comment
comment 'CD-ROM drivers (not for SCSI or IDE/ATAPI drives)'
bool 'Support non-SCSI/IDE/ATAPI CDROM drives' CONFIG_CD_NO_IDESCSI
if [ "$CONFIG_CD_NO_IDESCSI" != "n" ]; then
source drivers/cdrom/Config.in
fi
endmenu
#
# input before char - char/joystick depends on it. As does USB.
#
source drivers/input/Config.in
source drivers/char/Config.in
#source drivers/misc/Config.in
source drivers/media/Config.in
else # HP_SIM
mainmenu_option next_comment
comment 'Block devices'
tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
dep_tristate 'Network block device support' CONFIG_BLK_DEV_NBD $CONFIG_NET
tristate 'RAM disk support' CONFIG_BLK_DEV_RAM
if [ "$CONFIG_BLK_DEV_RAM" = "y" -o "$CONFIG_BLK_DEV_RAM" = "m" ]; then
int ' Default RAM disk size' CONFIG_BLK_DEV_RAM_SIZE 4096
fi
bool 'Support non-SCSI/IDE/ATAPI CDROM drives' CONFIG_CD_NO_IDESCSI
fi # !HP_SIM
#
# input before char - char/joystick depends on it. As does USB.
#
source drivers/input/Config.in
source drivers/char/Config.in
#source drivers/misc/Config.in
source drivers/media/Config.in
endmenu
fi # HP_SIM
source fs/Config.in
if [ "$CONFIG_VT" = "y" ]; then
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
if [ "$CONFIG_VT" = "y" ]; then
mainmenu_option next_comment
comment 'Console drivers'
bool 'VGA text console' CONFIG_VGA_CONSOLE
......@@ -191,9 +212,7 @@ if [ "$CONFIG_VT" = "y" ]; then
define_bool CONFIG_PCI_CONSOLE y
fi
endmenu
fi
if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
fi
mainmenu_option next_comment
comment 'Sound'
......@@ -210,10 +229,9 @@ if [ "$CONFIG_IA64_HP_SIM" = "n" ]; then
fi # !HP_SIM
if [ "$CONFIG_IA64_HP_SIM" != "n" -o "$CONFIG_IA64_GENERIC" != "n" ]; then
source arch/ia64/hp/Config.in
source arch/ia64/hp/sim/Config.in
fi
mainmenu_option next_comment
comment 'Kernel hacking'
......
/*
* Platform dependent support for HP simulator.
*
* Copyright (C) 1998, 1999 Hewlett-Packard Co
* Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Vijay Chander <vijay@engr.sgi.com>
*/
#include <linux/console.h>
#include <linux/init.h>
#include <linux/kdev_t.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/param.h>
#include <linux/root_dev.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/kdev_t.h>
#include <linux/console.h>
#include <linux/root_dev.h>
#include <asm/delay.h>
#include <asm/irq.h>
......@@ -55,5 +56,5 @@ hpsim_setup (char **cmdline_p)
{
ROOT_DEV = Root_SDA1; /* default to first SCSI drive */
register_console (&hpsim_cons);
register_console(&hpsim_cons);
}
......@@ -62,7 +62,9 @@ struct disk_stat {
extern long ia64_ssc (long arg0, long arg1, long arg2, long arg3, int nr);
static int desc[8] = { -1, -1, -1, -1, -1, -1, -1, -1 };
static int desc[16] = {
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
};
static struct queue_entry {
Scsi_Cmnd *sc;
......@@ -148,9 +150,9 @@ simscsi_biosparam (Disk *disk, struct block_device *n, int ip[])
{
int size = disk->capacity;
ip[0] = 64;
ip[1] = 32;
ip[2] = size >> 11;
ip[0] = 64; /* heads */
ip[1] = 32; /* sectors */
ip[2] = size >> 11; /* cylinders */
return 0;
}
......@@ -229,6 +231,29 @@ simscsi_readwrite6 (Scsi_Cmnd *sc, int mode)
simscsi_readwrite(sc, mode, offset, sc->cmnd[4]*512);
}
static size_t
simscsi_get_disk_size (int fd)
{
struct disk_stat stat;
size_t bit, sectors = 0;
struct disk_req req;
char buf[512];
/*
* This is a bit kludgey: the simulator doesn't provide a direct way of determining
* the disk size, so we do a binary search, assuming a maximum disk size of 4GB.
*/
for (bit = (4UL << 30)/512; bit != 0; bit >>= 1) {
req.addr = __pa(&buf);
req.len = sizeof(buf);
ia64_ssc(fd, 1, __pa(&req), ((sectors | bit) - 1)*512, SSC_READ);
stat.fd = fd;
ia64_ssc(__pa(&stat), 0, 0, 0, SSC_WAIT_COMPLETION);
if (stat.count == sizeof(buf))
sectors |= bit;
}
return sectors - 1; /* return last valid sector number */
}
static void
simscsi_readwrite10 (Scsi_Cmnd *sc, int mode)
......@@ -247,6 +272,7 @@ int
simscsi_queuecommand (Scsi_Cmnd *sc, void (*done)(Scsi_Cmnd *))
{
char fname[MAX_ROOT_LEN+16];
size_t disk_size;
char *buf;
#if DEBUG_SIMSCSI
register long sp asm ("sp");
......@@ -258,14 +284,14 @@ simscsi_queuecommand (Scsi_Cmnd *sc, void (*done)(Scsi_Cmnd *))
sc->result = DID_BAD_TARGET << 16;
sc->scsi_done = done;
if (sc->target <= 7 && sc->lun == 0) {
if (sc->target <= 15 && sc->lun == 0) {
switch (sc->cmnd[0]) {
case INQUIRY:
if (sc->request_bufflen < 35) {
break;
}
sprintf (fname, "%s%c", simscsi_root, 'a' + sc->target);
desc[sc->target] = ia64_ssc (__pa(fname), SSC_READ_ACCESS|SSC_WRITE_ACCESS,
desc[sc->target] = ia64_ssc(__pa(fname), SSC_READ_ACCESS|SSC_WRITE_ACCESS,
0, 0, SSC_OPEN);
if (desc[sc->target] < 0) {
/* disk doesn't exist... */
......@@ -319,11 +345,13 @@ simscsi_queuecommand (Scsi_Cmnd *sc, void (*done)(Scsi_Cmnd *))
}
buf = sc->request_buffer;
disk_size = simscsi_get_disk_size(desc[sc->target]);
/* pretend to be a 1GB disk (partition table contains real stuff): */
buf[0] = 0x00;
buf[1] = 0x1f;
buf[2] = 0xff;
buf[3] = 0xff;
buf[0] = (disk_size >> 24) & 0xff;
buf[1] = (disk_size >> 16) & 0xff;
buf[2] = (disk_size >> 8) & 0xff;
buf[3] = (disk_size >> 0) & 0xff;
/* set block size of 512 bytes: */
buf[4] = 0;
buf[5] = 0;
......
......@@ -13,6 +13,7 @@
*
* 02/04/00 D. Mosberger Merged in serial.c bug fixes in rs_close().
* 02/25/00 D. Mosberger Synced up with 2.3.99pre-5 version of serial.c.
* 07/30/02 D. Mosberger Replace sti()/cli() with explicit spinlocks & local irq masking
*/
#include <linux/config.h>
......@@ -62,6 +63,7 @@ extern void ia64_ssc_connect_irq (long intr, long irq);
static char *serial_name = "SimSerial driver";
static char *serial_version = "0.6";
static spinlock_t serial_lock = SPIN_LOCK_UNLOCKED;
/*
* This has been extracted from asm/serial.h. We need one eventually but
......@@ -233,14 +235,14 @@ static void rs_put_char(struct tty_struct *tty, unsigned char ch)
if (!tty || !info->xmit.buf) return;
save_flags(flags); cli();
spin_lock_irqsave(&serial_lock, flags);
if (CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) == 0) {
restore_flags(flags);
spin_unlock_irqrestore(&serial_lock, flags);
return;
}
info->xmit.buf[info->xmit.head] = ch;
info->xmit.head = (info->xmit.head + 1) & (SERIAL_XMIT_SIZE-1);
restore_flags(flags);
spin_unlock_irqrestore(&serial_lock, flags);
}
static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done)
......@@ -248,7 +250,7 @@ static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done)
int count;
unsigned long flags;
save_flags(flags); cli();
spin_lock_irqsave(&serial_lock, flags);
if (info->x_char) {
char c = info->x_char;
......@@ -291,7 +293,7 @@ static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done)
info->xmit.tail += count;
}
out:
restore_flags(flags);
spin_unlock_irqrestore(&serial_lock, flags);
}
static void rs_flush_chars(struct tty_struct *tty)
......@@ -315,7 +317,6 @@ static int rs_write(struct tty_struct * tty, int from_user,
if (!tty || !info->xmit.buf || !tmp_buf) return 0;
save_flags(flags);
if (from_user) {
down(&tmp_buf_sem);
while (1) {
......@@ -332,21 +333,26 @@ static int rs_write(struct tty_struct * tty, int from_user,
ret = -EFAULT;
break;
}
cli();
c1 = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
spin_lock_irqsave(&serial_lock, flags);
{
c1 = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail,
SERIAL_XMIT_SIZE);
if (c1 < c)
c = c1;
memcpy(info->xmit.buf + info->xmit.head, tmp_buf, c);
info->xmit.head = ((info->xmit.head + c) &
(SERIAL_XMIT_SIZE-1));
restore_flags(flags);
}
spin_unlock_irqrestore(&serial_lock, flags);
buf += c;
count -= c;
ret += c;
}
up(&tmp_buf_sem);
} else {
cli();
spin_lock_irqsave(&serial_lock, flags);
while (1) {
c = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE);
if (count < c)
......@@ -361,7 +367,7 @@ static int rs_write(struct tty_struct * tty, int from_user,
count -= c;
ret += c;
}
restore_flags(flags);
spin_unlock_irqrestore(&serial_lock, flags);
}
/*
* Hey, we transmit directly from here in our case
......@@ -392,9 +398,9 @@ static void rs_flush_buffer(struct tty_struct *tty)
struct async_struct *info = (struct async_struct *)tty->driver_data;
unsigned long flags;
save_flags(flags); cli();
spin_lock_irqsave(&serial_lock, flags);
info->xmit.head = info->xmit.tail = 0;
restore_flags(flags);
spin_unlock_irqrestore(&serial_lock, flags);
wake_up_interruptible(&tty->write_wait);
......@@ -567,8 +573,8 @@ static void shutdown(struct async_struct * info)
state->irq);
#endif
save_flags(flags); cli(); /* Disable interrupts */
spin_lock_irqsave(&serial_lock, flags);
{
/*
* First unlink the serial port from the IRQ chain...
*/
......@@ -604,7 +610,8 @@ static void shutdown(struct async_struct * info)
if (info->tty) set_bit(TTY_IO_ERROR, &info->tty->flags);
info->flags &= ~ASYNC_INITIALIZED;
restore_flags(flags);
}
spin_unlock_irqrestore(&serial_lock, flags);
}
/*
......@@ -627,14 +634,13 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
state = info->state;
save_flags(flags); cli();
spin_lock_irqsave(&serial_lock, flags);
if (tty_hung_up_p(filp)) {
#ifdef SIMSERIAL_DEBUG
printk("rs_close: hung_up\n");
#endif
MOD_DEC_USE_COUNT;
restore_flags(flags);
spin_unlock_irqrestore(&serial_lock, flags);
return;
}
#ifdef SIMSERIAL_DEBUG
......@@ -659,11 +665,11 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
}
if (state->count) {
MOD_DEC_USE_COUNT;
restore_flags(flags);
spin_unlock_irqrestore(&serial_lock, flags);
return;
}
info->flags |= ASYNC_CLOSING;
restore_flags(flags);
spin_unlock_irqrestore(&serial_lock, flags);
/*
* Now we wait for the transmit buffer to clear; and we notify
......@@ -771,7 +777,7 @@ startup(struct async_struct *info)
if (!page)
return -ENOMEM;
save_flags(flags); cli();
spin_lock_irqsave(&serial_lock, flags);
if (info->flags & ASYNC_INITIALIZED) {
free_page(page);
......@@ -852,11 +858,11 @@ startup(struct async_struct *info)
}
info->flags |= ASYNC_INITIALIZED;
restore_flags(flags);
spin_unlock_irqrestore(&serial_lock, flags);
return 0;
errout:
restore_flags(flags);
spin_unlock_irqrestore(&serial_lock, flags);
return retval;
}
......
......@@ -27,6 +27,7 @@
#include "../drivers/acpi/include/acstruct.h"
#include "../drivers/acpi/include/acnamesp.h"
#include "../drivers/acpi/include/acutils.h"
#include "../drivers/acpi/acpi_bus.h"
#define PFX "hpzx1: "
......@@ -109,7 +110,7 @@ static int hp_cfg_write##sz (struct pci_dev *dev, int where, u##bits value) \
\
switch (where) { \
case PCI_BASE_ADDRESS_0: \
if (value == ~0) \
if (value == (u##bits) ~0) \
fake_dev->sizing = 1; \
break; \
default: \
......@@ -177,7 +178,7 @@ hpzx1_fake_pci_dev(unsigned long addr, unsigned int bus, unsigned int size)
* Drivers should ioremap what they need, but we have to do
* it here, too, so PCI config accesses work.
*/
dev->mapped_csrs = ioremap(dev->csr_base, dev->csr_size);
dev->mapped_csrs = (unsigned long) ioremap(dev->csr_base, dev->csr_size);
return dev;
}
......@@ -303,7 +304,7 @@ hpzx1_lba_probe(acpi_handle obj, u32 depth, void *context, void **ret)
if ((dev = hpzx1_fake_pci_dev(csr_base, busnum, csr_length)))
printk(KERN_INFO PFX "%s LBA at 0x%lx, _BBN 0x%02x; "
"pci dev %02x:%02x.%d\n",
name, csr_base, busnum, dev->bus,
name, csr_base, (unsigned int) busnum, dev->bus,
PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
return AE_OK;
......
......@@ -30,6 +30,8 @@
#include <asm/ia32.h>
#include <../drivers/char/drm/drm.h>
#include <../drivers/char/drm/mga_drm.h>
#include <../drivers/char/drm/i810_drm.h>
#define IOCTL_NR(a) ((a) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT))
......
......@@ -62,12 +62,13 @@ const char *
acpi_get_sysname (void)
{
#ifdef CONFIG_IA64_GENERIC
unsigned long rsdp_phys = 0;
unsigned long rsdp_phys;
struct acpi20_table_rsdp *rsdp;
struct acpi_table_xsdt *xsdt;
struct acpi_table_header *hdr;
if ((0 != acpi_find_rsdp(&rsdp_phys)) || !rsdp_phys) {
rsdp_phys = acpi_find_rsdp();
if (!rsdp_phys) {
printk("ACPI 2.0 RSDP not found, default to \"dig\"\n");
return "dig";
}
......@@ -101,6 +102,8 @@ acpi_get_sysname (void)
return "sn2";
# elif defined (CONFIG_IA64_DIG)
return "dig";
# elif defined (CONFIG_IA64_HP_ZX1)
return "hpzx1";
# else
# error Unknown platform. Fix acpi.c.
# endif
......@@ -132,9 +135,7 @@ acpi_get_crs (acpi_handle obj, acpi_buffer *buf)
if (!buf->pointer)
return -ENOMEM;
result = acpi_get_current_resources(obj, buf);
return result;
return acpi_get_current_resources(obj, buf);
}
acpi_resource *
......@@ -177,6 +178,8 @@ acpi_dispose_crs (acpi_buffer *buf)
/* Array to record platform interrupt vectors for generic interrupt routing. */
int platform_irq_list[ACPI_MAX_PLATFORM_IRQS] = { [0 ... ACPI_MAX_PLATFORM_IRQS - 1] = -1 };
enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC;
/*
* Interrupt routing API for device drivers. Provides interrupt vector for
* a generic platform event. Currently only CPEI is implemented.
......@@ -191,10 +194,14 @@ acpi_request_vector (u32 int_type)
vector = platform_irq_list[int_type];
} else
printk("acpi_request_vector(): invalid interrupt type\n");
return vector;
}
char *
__acpi_map_table (unsigned long phys_addr, unsigned long size)
{
return __va(phys_addr);
}
/* --------------------------------------------------------------------------
Boot-time Table Parsing
......@@ -220,7 +227,6 @@ acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header)
iounmap((void *) ipi_base_addr);
ipi_base_addr = (unsigned long) ioremap(lapic->address, 0);
}
return 0;
}
......@@ -273,7 +279,6 @@ acpi_parse_lapic_nmi (acpi_table_entry_header *header)
acpi_table_print_madt_entry(header);
/* TBD: Support lapic_nmi entries */
return 0;
}
......@@ -282,10 +287,10 @@ static int __init
acpi_find_iosapic (int global_vector, u32 *irq_base, char **iosapic_address)
{
struct acpi_table_iosapic *iosapic;
int ver = 0;
int max_pin = 0;
char *p = 0;
char *end = 0;
int ver;
int max_pin;
char *p;
char *end;
if (!irq_base || !iosapic_address)
return -ENODEV;
......@@ -341,9 +346,9 @@ static int __init
acpi_parse_plat_int_src (acpi_table_entry_header *header)
{
struct acpi_table_plat_int_src *plintsrc;
int vector = 0;
u32 irq_base = 0;
char *iosapic_address = NULL;
int vector;
u32 irq_base;
char *iosapic_address;
plintsrc = (struct acpi_table_plat_int_src *) header;
if (!plintsrc)
......@@ -356,7 +361,7 @@ acpi_parse_plat_int_src (acpi_table_entry_header *header)
return -ENODEV;
}
if (0 != acpi_find_iosapic(plintsrc->global_irq, &irq_base, &iosapic_address)) {
if (acpi_find_iosapic(plintsrc->global_irq, &irq_base, &iosapic_address)) {
printk(KERN_WARNING PREFIX "IOSAPIC not found\n");
return -ENODEV;
}
......@@ -365,7 +370,7 @@ acpi_parse_plat_int_src (acpi_table_entry_header *header)
* Get vector assignment for this IRQ, set attributes, and program the
* IOSAPIC routing table.
*/
vector = iosapic_register_platform_irq (plintsrc->type,
vector = iosapic_register_platform_irq(plintsrc->type,
plintsrc->global_irq,
plintsrc->iosapic_vector,
plintsrc->eid,
......@@ -398,7 +403,6 @@ acpi_parse_int_src_ovr (acpi_table_entry_header *header)
iosapic_register_legacy_irq(p->bus_irq, p->global_irq,
(p->flags.polarity == 1) ? 1 : 0,
(p->flags.trigger == 1) ? 1 : 0);
return 0;
}
......@@ -415,7 +419,6 @@ acpi_parse_nmi_src (acpi_table_entry_header *header)
acpi_table_print_madt_entry(header);
/* TBD: Support nimsrc entries */
return 0;
}
......@@ -431,15 +434,12 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size)
/* Get base address of IPI Message Block */
if (acpi_madt->lapic_address)
ipi_base_addr = (unsigned long)
ioremap(acpi_madt->lapic_address, 0);
ipi_base_addr = (unsigned long) ioremap(acpi_madt->lapic_address, 0);
printk(KERN_INFO PREFIX "Local APIC address 0x%lx\n", ipi_base_addr);
return 0;
}
static int __init
acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
{
......@@ -461,22 +461,16 @@ acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
return 0;
}
int __init
acpi_find_rsdp (unsigned long *rsdp_phys)
unsigned long __init
acpi_find_rsdp (void)
{
if (!rsdp_phys)
return -EINVAL;
unsigned long rsdp_phys = 0;
if (efi.acpi20) {
(*rsdp_phys) = __pa(efi.acpi20);
return 0;
}
else if (efi.acpi) {
if (efi.acpi20)
rsdp_phys = __pa(efi.acpi20);
else if (efi.acpi)
printk(KERN_WARNING PREFIX "v1.0/r0.71 tables no longer supported\n");
}
return -ENODEV;
return rsdp_phys;
}
......@@ -515,28 +509,27 @@ acpi_parse_spcr (unsigned long phys_addr, unsigned long size)
if ((spcr->base_addr.space_id != ACPI_SERIAL_PCICONF_SPACE) &&
(spcr->int_type == ACPI_SERIAL_INT_SAPIC))
{
u32 irq_base = 0;
char *iosapic_address = NULL;
int vector = 0;
u32 irq_base;
char *iosapic_address;
int vector;
/* We have a UART in memory space with an SAPIC interrupt */
global_int = ( (spcr->global_int[3] << 24) |
global_int = ((spcr->global_int[3] << 24) |
(spcr->global_int[2] << 16) |
(spcr->global_int[1] << 8) |
(spcr->global_int[0]) );
/* Which iosapic does this IRQ belong to? */
if (0 == acpi_find_iosapic(global_int, &irq_base, &iosapic_address)) {
vector = iosapic_register_irq (global_int, 1, 1,
if (!acpi_find_iosapic(global_int, &irq_base, &iosapic_address))
vector = iosapic_register_irq(global_int, 1, 1,
irq_base, iosapic_address);
}
}
return 0;
}
#endif /*CONFIG_SERIAL_ACPI*/
#endif /* CONFIG_SERIAL_ACPI */
int __init
......@@ -564,38 +557,31 @@ acpi_boot_init (char *cmdline)
/* Local APIC */
if (acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR,
acpi_parse_lapic_addr_ovr) < 0)
if (acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR, acpi_parse_lapic_addr_ovr) < 0)
printk(KERN_ERR PREFIX "Error parsing LAPIC address override entry\n");
if (acpi_table_parse_madt(ACPI_MADT_LSAPIC,
acpi_parse_lsapic) < 1)
if (acpi_table_parse_madt(ACPI_MADT_LSAPIC, acpi_parse_lsapic) < 1)
printk(KERN_ERR PREFIX "Error parsing MADT - no LAPIC entries\n");
if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI,
acpi_parse_lapic_nmi) < 0)
if (acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi) < 0)
printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
/* I/O APIC */
if (acpi_table_parse_madt(ACPI_MADT_IOSAPIC,
acpi_parse_iosapic) < 1)
printk(KERN_ERR PREFIX "Error parsing MADT - no IOAPIC entries\n");
if (acpi_table_parse_madt(ACPI_MADT_IOSAPIC, acpi_parse_iosapic) < 1)
printk(KERN_ERR PREFIX "Error parsing MADT - no IOSAPIC entries\n");
/* System-Level Interrupt Routing */
if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC,
acpi_parse_plat_int_src) < 0)
if (acpi_table_parse_madt(ACPI_MADT_PLAT_INT_SRC, acpi_parse_plat_int_src) < 0)
printk(KERN_ERR PREFIX "Error parsing platform interrupt source entry\n");
if (acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR,
acpi_parse_int_src_ovr) < 0)
if (acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr) < 0)
printk(KERN_ERR PREFIX "Error parsing interrupt source overrides entry\n");
if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC,
acpi_parse_nmi_src) < 0)
if (acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src) < 0)
printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
skip_madt:
skip_madt:
/* FADT says whether a legacy keyboard controller is present. */
if (acpi_table_parse(ACPI_FACP, acpi_parse_fadt) < 1)
......@@ -620,7 +606,6 @@ acpi_boot_init (char *cmdline)
#endif
/* Make boot-up look pretty */
printk("%d CPUs available, %d CPUs total\n", available_cpus, total_cpus);
return 0;
}
......@@ -643,14 +628,14 @@ acpi_get_prt (struct pci_vector_struct **vectors, int *count)
*vectors = NULL;
*count = 0;
if (acpi_prts.count <= 0) {
if (acpi_prt.count < 0) {
printk(KERN_ERR PREFIX "No PCI IRQ routing entries\n");
return -ENODEV;
}
/* Allocate vectors */
*vectors = kmalloc(sizeof(struct pci_vector_struct) * acpi_prts.count, GFP_KERNEL);
*vectors = kmalloc(sizeof(struct pci_vector_struct) * acpi_prt.count, GFP_KERNEL);
if (!(*vectors))
return -ENOMEM;
......@@ -658,15 +643,15 @@ acpi_get_prt (struct pci_vector_struct **vectors, int *count)
vector = *vectors;
list_for_each(node, &acpi_prts.entries) {
list_for_each(node, &acpi_prt.entries) {
entry = (struct acpi_prt_entry *)node;
vector[i].bus = entry->id.bus;
vector[i].pci_id = ((u32) entry->id.dev << 16) | 0xffff;
vector[i].pin = entry->id.pin;
vector[i].irq = entry->source.index;
vector[i].pci_id = ((u32) entry->id.device << 16) | 0xffff;
vector[i].pin = entry->pin;
vector[i].irq = entry->link.index;
i++;
}
*count = acpi_prts.count;
*count = acpi_prt.count;
return 0;
}
......@@ -678,8 +663,7 @@ acpi_get_interrupt_model (int *type)
if (!type)
return -EINVAL;
*type = ACPI_INT_MODEL_IOSAPIC;
*type = ACPI_IRQ_MODEL_IOSAPIC;
return 0;
}
......
......@@ -175,6 +175,7 @@ GLOBAL_ENTRY(ia64_switch_to)
(p6) srlz.d
ld8 sp=[r21] // load kernel stack pointer of new task
mov IA64_KR(CURRENT)=r20 // update "current" application register
mov r8=r13 // return pointer to previously running task
mov r13=in0 // set "current" pointer
;;
DO_LOAD_SWITCH_STACK
......
......@@ -88,12 +88,6 @@ EXPORT_SYMBOL(ia64_cpu_to_sapicid);
#include <asm/smplock.h>
EXPORT_SYMBOL(kernel_flag);
/* #include <asm/system.h> */
EXPORT_SYMBOL(__global_sti);
EXPORT_SYMBOL(__global_cli);
EXPORT_SYMBOL(__global_save_flags);
EXPORT_SYMBOL(__global_restore_flags);
#else /* !CONFIG_SMP */
EXPORT_SYMBOL(__flush_tlb_all);
......
......@@ -422,6 +422,7 @@ register_irq (u32 global_vector, int vector, int pin, unsigned char delivery,
irq_desc_t *idesc;
struct hw_interrupt_type *irq_type;
gsi_to_vector(global_vector) = vector;
iosapic_irq[vector].pin = pin;
iosapic_irq[vector].polarity = polarity ? IOSAPIC_POL_HIGH : IOSAPIC_POL_LOW;
iosapic_irq[vector].dmode = delivery;
......@@ -640,7 +641,7 @@ iosapic_init_pci_irq (void)
unsigned int irq;
char *addr;
if (0 != acpi_get_prt(&pci_irq.route, &pci_irq.num_routes))
if (acpi_get_prt(&pci_irq.route, &pci_irq.num_routes))
return;
for (i = 0; i < pci_irq.num_routes; i++) {
......
......@@ -200,277 +200,12 @@ int show_interrupts(struct seq_file *p, void *v)
return 0;
}
/*
* Global interrupt locks for SMP. Allow interrupts to come in on any
* CPU, yet make cli/sti act globally to protect critical regions..
*/
#ifdef CONFIG_SMP
unsigned int global_irq_holder = NO_PROC_ID;
unsigned volatile long global_irq_lock; /* pedantic: long for set_bit --RR */
extern void show_stack(unsigned long* esp);
static void show(char * str)
{
int i;
int cpu = smp_processor_id();
printk("\n%s, CPU %d:\n", str, cpu);
printk("irq: %d [",irqs_running());
for(i=0;i < NR_CPUS;i++)
printk(" %d",irq_count(i));
printk(" ]\nbh: %d [",spin_is_locked(&global_bh_lock) ? 1 : 0);
for(i=0;i < NR_CPUS;i++)
printk(" %d",bh_count(i));
printk(" ]\nStack dumps:");
#if defined(CONFIG_IA64)
/*
* We can't unwind the stack of another CPU without access to
* the registers of that CPU. And sending an IPI when we're
* in a potentially wedged state doesn't sound like a smart
* idea.
*/
#elif defined(CONFIG_X86)
for(i=0;i< NR_CPUS;i++) {
unsigned long esp;
if(i==cpu)
continue;
printk("\nCPU %d:",i);
esp = init_tss[i].esp0;
if(esp==NULL) {
/* tss->esp0 is set to NULL in cpu_init(),
* it's initialized when the cpu returns to user
* space. -- manfreds
*/
printk(" <unknown> ");
continue;
}
esp &= ~(THREAD_SIZE-1);
esp += sizeof(struct task_struct);
show_stack((void*)esp);
}
#else
You lose...
#endif
printk("\nCPU %d:",cpu);
show_stack(NULL);
printk("\n");
}
#define MAXCOUNT 100000000
/*
* I had a lockup scenario where a tight loop doing
* spin_unlock()/spin_lock() on CPU#1 was racing with
* spin_lock() on CPU#0. CPU#0 should have noticed spin_unlock(), but
* apparently the spin_unlock() information did not make it
* through to CPU#0 ... nasty, is this by design, do we have to limit
* 'memory update oscillation frequency' artificially like here?
*
* Such 'high frequency update' races can be avoided by careful design, but
* some of our major constructs like spinlocks use similar techniques,
* it would be nice to clarify this issue. Set this define to 0 if you
* want to check whether your system freezes. I suspect the delay done
* by SYNC_OTHER_CORES() is in correlation with 'snooping latency', but
* i thought that such things are guaranteed by design, since we use
* the 'LOCK' prefix.
*/
#define SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND 0
#if SUSPECTED_CPU_OR_CHIPSET_BUG_WORKAROUND
# define SYNC_OTHER_CORES(x) udelay(x+1)
#else
/*
* We have to allow irqs to arrive between local_irq_enable and local_irq_disable
*/
# ifdef CONFIG_IA64
# define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop 0")
# else
# define SYNC_OTHER_CORES(x) __asm__ __volatile__ ("nop")
# endif
#endif
static inline void wait_on_irq(void)
{
int count = MAXCOUNT;
for (;;) {
/*
* Wait until all interrupts are gone. Wait
* for bottom half handlers unless we're
* already executing in one..
*/
if (!irqs_running())
if (really_local_bh_count() || !spin_is_locked(&global_bh_lock))
break;
/* Duh, we have to loop. Release the lock to avoid deadlocks */
smp_mb__before_clear_bit(); /* need barrier before releasing lock... */
clear_bit(0,&global_irq_lock);
for (;;) {
if (!--count) {
show("wait_on_irq");
count = ~0;
}
local_irq_enable();
SYNC_OTHER_CORES(smp_processor_id());
local_irq_disable();
if (irqs_running())
continue;
if (global_irq_lock)
continue;
if (!really_local_bh_count() && spin_is_locked(&global_bh_lock))
continue;
if (!test_and_set_bit(0,&global_irq_lock))
break;
}
}
}
/*
* This is called when we want to synchronize with
* interrupts. We may for example tell a device to
* stop sending interrupts: but to make sure there
* are no interrupts that are executing on another
* CPU we need to call this function.
*/
void synchronize_irq(void)
{
if (irqs_running()) {
/* Stupid approach */
cli();
sti();
}
}
static inline void get_irqlock(void)
{
if (test_and_set_bit(0,&global_irq_lock)) {
/* do we already hold the lock? */
if (smp_processor_id() == global_irq_holder)
return;
/* Uhhuh.. Somebody else got it. Wait.. */
do {
do {
#ifdef CONFIG_X86
rep_nop();
#endif
} while (test_bit(0,&global_irq_lock));
} while (test_and_set_bit(0,&global_irq_lock));
}
/*
* We also to make sure that nobody else is running
* in an interrupt context.
*/
wait_on_irq();
/*
* Ok, finally..
*/
global_irq_holder = smp_processor_id();
}
#define EFLAGS_IF_SHIFT 9
/*
* A global "cli()" while in an interrupt context
* turns into just a local cli(). Interrupts
* should use spinlocks for the (very unlikely)
* case that they ever want to protect against
* each other.
*
* If we already have local interrupts disabled,
* this will not turn a local disable into a
* global one (problems with spinlocks: this makes
* save_flags+cli+sti usable inside a spinlock).
*/
void __global_cli(void)
{
unsigned int flags;
#ifdef CONFIG_IA64
local_save_flags(flags);
if (flags & IA64_PSR_I) {
local_irq_disable();
if (!really_local_irq_count())
get_irqlock();
}
#else
local_save_flags(flags);
if (flags & (1 << EFLAGS_IF_SHIFT)) {
local_irq_disable();
if (!really_local_irq_count())
get_irqlock();
}
#endif
}
void __global_sti(void)
{
if (!really_local_irq_count())
release_irqlock(smp_processor_id());
local_irq_enable();
}
/*
* SMP flags value to restore to:
* 0 - global cli
* 1 - global sti
* 2 - local cli
* 3 - local sti
*/
unsigned long __global_save_flags(void)
{
int retval;
int local_enabled;
unsigned long flags;
int cpu = smp_processor_id();
local_save_flags(flags);
#ifdef CONFIG_IA64
local_enabled = (flags & IA64_PSR_I) != 0;
#else
local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
#endif
/* default to local */
retval = 2 + local_enabled;
/* check for global flags if we're not in an interrupt */
if (!really_local_irq_count()) {
if (local_enabled)
retval = 1;
if (global_irq_holder == cpu)
retval = 0;
}
return retval;
}
void __global_restore_flags(unsigned long flags)
#if CONFIG_SMP
inline void synchronize_irq(unsigned int irq)
{
switch (flags) {
case 0:
__global_cli();
break;
case 1:
__global_sti();
break;
case 2:
local_irq_disable();
break;
case 3:
local_irq_enable();
break;
default:
printk("global_restore_flags: %08lx (%08lx)\n",
flags, (&flags)[-1]);
}
while (irq_desc(irq)->status & IRQ_INPROGRESS)
cpu_relax();
}
#endif
/*
......@@ -482,11 +217,7 @@ void __global_restore_flags(unsigned long flags)
*/
int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
{
int status;
local_irq_enter(irq);
status = 1; /* Force the "do bottom halves" bit */
int status = 1; /* Force the "do bottom halves" bit */
if (!(action->flags & SA_INTERRUPT))
local_irq_enable();
......@@ -500,11 +231,16 @@ int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction *
add_interrupt_randomness(irq);
local_irq_disable();
local_irq_exit(irq);
return status;
}
/*
* Generic enable/disable code: this just calls
* down into the PIC-specific version for the actual
* hardware disable after having gotten the irq
* controller lock.
*/
/**
* disable_irq_nosync - disable an irq without waiting
* @irq: Interrupt to disable
......@@ -546,14 +282,7 @@ inline void disable_irq_nosync(unsigned int irq)
void disable_irq(unsigned int irq)
{
disable_irq_nosync(irq);
#ifdef CONFIG_SMP
if (!really_local_irq_count()) {
do {
barrier();
} while (irq_desc(irq)->status & IRQ_INPROGRESS);
}
#endif
synchronize_irq(irq);
}
/**
......@@ -616,6 +345,7 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
struct irqaction * action;
unsigned int status;
irq_enter();
kstat.irqs[cpu][irq]++;
if (desc->status & IRQ_PER_CPU) {
......@@ -682,6 +412,7 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
desc->handler->end(irq);
spin_unlock(&desc->lock);
}
irq_exit();
return 1;
}
......@@ -811,7 +542,7 @@ void free_irq(unsigned int irq, void *dev_id)
#ifdef CONFIG_SMP
/* Wait to make sure it's not being used on another CPU */
while (desc->status & IRQ_INPROGRESS)
barrier();
synchronize_irq(irq);
#endif
kfree(action);
return;
......@@ -864,7 +595,7 @@ unsigned long probe_irq_on(void)
/* Wait for longstanding interrupts to trigger. */
for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
/* about 20ms delay */ synchronize_irq();
/* about 20ms delay */ barrier();
/*
* enable any unassigned irqs
......@@ -887,7 +618,7 @@ unsigned long probe_irq_on(void)
* Wait for spurious interrupts to trigger
*/
for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
/* about 100ms delay */ synchronize_irq();
/* about 100ms delay */ barrier();
/*
* Now filter out any obviously spurious interrupts
......
......@@ -54,6 +54,11 @@ __u8 isa_irq_to_vector_map[16] = {
0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
};
/*
* GSI to IA-64 vector translation table.
*/
__u8 gsi_to_vector_map[255];
int
ia64_alloc_irq (void)
{
......
......@@ -626,9 +626,12 @@ ia64_mca_wakeup_all(void)
void
ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
{
int flags, cpu = 0;
unsigned long flags;
int cpu = 0;
/* Mask all interrupts */
save_and_cli(flags);
#warning XXX fix me: this used to be: save_and_cli(flags);
local_irq_save(flags);
#ifdef CONFIG_SMP
cpu = cpu_logical_id(hard_smp_processor_id());
......@@ -646,7 +649,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
ia64_mca_wakeup_ipi_wait();
/* Enable all interrupts */
restore_flags(flags);
local_irq_restore(flags);
}
......
......@@ -165,7 +165,7 @@ struct pci_ops pci_sal_ops = {
*/
struct pci_bus *
pcibios_scan_root(int seg, int bus)
pcibios_scan_root(int bus)
{
struct list_head *list = NULL;
struct pci_bus *pci_bus = NULL;
......@@ -174,12 +174,12 @@ pcibios_scan_root(int seg, int bus)
pci_bus = pci_bus_b(list);
if (pci_bus->number == bus) {
/* Already scanned */
printk("PCI: Bus (%02x:%02x) already probed\n", seg, bus);
printk("PCI: Bus (%02x) already probed\n", bus);
return pci_bus;
}
}
printk("PCI: Probing PCI hardware on bus (%02x:%02x)\n", seg, bus);
printk("PCI: Probing PCI hardware on bus (%02x)\n", bus);
return pci_scan_bus(bus, pci_root_ops, NULL);
}
......
......@@ -616,7 +616,7 @@ pfm_remove_smpl_mapping(struct task_struct *task)
down_write(&task->mm->mmap_sem);
r = do_munmap(task->mm, ctx->ctx_smpl_vaddr, psb->psb_size);
r = do_munmap(task->mm, ctx->ctx_smpl_vaddr, psb->psb_size, 0);
up_write(&task->mm->mmap_sem);
if (r !=0) {
......
/*
* SMP boot-related support
*
* Copyright (C) 2001 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1998-2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here.
* 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code.
* 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence.
* smp_boot_cpus()/smp_commence() is replaced by
* smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
*/
......@@ -66,18 +70,16 @@ static volatile unsigned long go[SLAVE + 1];
#define DEBUG_ITC_SYNC 0
extern void __init calibrate_delay(void);
extern void start_ap(void);
extern void __init calibrate_delay (void);
extern void start_ap (void);
extern unsigned long ia64_iobase;
int cpucount;
task_t *task_for_booting_cpu;
/* Setup configured maximum number of CPUs to activate */
static int max_cpus = -1;
/* Bitmask of currently online CPUs */
volatile unsigned long cpu_online_map;
unsigned long phys_cpu_present_map;
/* which logical CPU number maps to which CPU (physical APIC ID) */
volatile int ia64_cpu_to_sapicid[NR_CPUS];
......@@ -86,44 +88,12 @@ static volatile unsigned long cpu_callin_map;
struct smp_boot_data smp_boot_data __initdata;
/* Set when the idlers are all forked */
volatile int smp_threads_ready;
unsigned long ap_wakeup_vector = -1; /* External Int use to wakeup APs */
char __initdata no_int_routing;
unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */
/*
* Setup routine for controlling SMP activation
*
* Command-line option of "nosmp" or "maxcpus=0" will disable SMP
* activation entirely (the MPS table probe still happens, though).
*
* Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
* greater than 0, limits the maximum number of CPUs activated in
* SMP mode to <NUM>.
*/
static int __init
nosmp (char *str)
{
max_cpus = 0;
return 1;
}
__setup("nosmp", nosmp);
static int __init
maxcpus (char *str)
{
get_option(&str, &max_cpus);
return 1;
}
__setup("maxcpus=", maxcpus);
static int __init
nointroute (char *str)
{
......@@ -299,7 +269,7 @@ smp_setup_percpu_timer (void)
static volatile atomic_t smp_commenced = ATOMIC_INIT(0);
void __init
static void __init
smp_commence (void)
{
/*
......@@ -308,7 +278,7 @@ smp_commence (void)
Dprintk("Setting commenced=1, go go go\n");
wmb();
atomic_set(&smp_commenced,1);
atomic_set(&smp_commenced, 1);
}
......@@ -405,6 +375,9 @@ do_boot_cpu (int sapicid)
int timeout, cpu;
cpu = ++cpucount;
set_bit(cpu, &phys_cpu_present_map);
/*
* We can't use kernel_thread since we must avoid to
* reschedule the child.
......@@ -466,8 +439,8 @@ smp_tune_scheduling (void)
/*
* Cycle through the APs sending Wakeup IPIs to boot each.
*/
void __init
smp_boot_cpus (void)
static void __init
smp_boot_cpus (unsigned int max_cpus)
{
int sapicid, cpu;
int boot_cpu_id = hard_smp_processor_id();
......@@ -486,13 +459,13 @@ smp_boot_cpus (void)
*/
set_bit(0, &cpu_online_map);
set_bit(0, &cpu_callin_map);
set_bit(0, &phys_cpu_present_map);
local_cpu_data->loops_per_jiffy = loops_per_jiffy;
ia64_cpu_to_sapicid[0] = boot_cpu_id;
printk("Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id);
global_irq_holder = NO_PROC_ID;
current_thread_info()->cpu = 0;
smp_tune_scheduling();
......@@ -552,6 +525,29 @@ smp_boot_cpus (void)
;
}
void __init
smp_prepare_cpus (unsigned int max_cpus)
{
smp_boot_cpus(max_cpus);
}
int __devinit
__cpu_up (unsigned int cpu)
{
/*
* Yeah, that's cheesy, but it will do until there is real hotplug support and in
* the meantime, this gives time for the interface changes to settle down...
*/
smp_commence();
return 0;
}
void __init
smp_cpus_done (unsigned int max_cpus)
{
/* nuthing... */
}
/*
* Assume that CPU's have been discovered by some platform-dependant interface. For
* SoftSDV/Lion, that would be ACPI.
......@@ -571,9 +567,6 @@ init_smp_config(void)
ap_startup = (struct fptr *) start_ap;
sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
__pa(ap_startup->fp), __pa(ap_startup->gp), 0, 0, 0, 0);
if (sal_ret < 0) {
printk("SMP: Can't set SAL AP Boot Rendezvous: %s\n Forcing UP mode\n",
ia64_sal_strerror(sal_ret));
max_cpus = 0;
}
if (sal_ret < 0)
printk("SMP: Can't set SAL AP Boot Rendezvous: %s\n", ia64_sal_strerror(sal_ret));
}
......@@ -82,7 +82,6 @@ asmlinkage unsigned long
ia64_shmat (int shmid, void *shmaddr, int shmflg, long arg3, long arg4, long arg5, long arg6,
long arg7, long stack)
{
extern int sys_shmat (int shmid, char *shmaddr, int shmflg, ulong *raddr);
struct pt_regs *regs = (struct pt_regs *) &stack;
unsigned long raddr;
int retval;
......@@ -120,7 +119,7 @@ ia64_brk (unsigned long brk, long arg1, long arg2, long arg3,
/* Always allow shrinking brk. */
if (brk <= mm->brk) {
if (!do_munmap(mm, newbrk, oldbrk-newbrk))
if (!do_munmap(mm, newbrk, oldbrk-newbrk, 1))
goto set_brk;
goto out;
}
......@@ -138,10 +137,6 @@ ia64_brk (unsigned long brk, long arg1, long arg2, long arg3,
if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
goto out;
/* Check if we have enough memory.. */
if (!vm_enough_memory((newbrk-oldbrk) >> PAGE_SHIFT))
goto out;
/* Ok, looks good - let it rip. */
if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
goto out;
......
......@@ -62,14 +62,14 @@ trap_init (void)
void
bust_spinlocks (int yes)
{
int loglevel_save = console_loglevel;
spin_lock_init(&timerlist_lock);
if (yes) {
oops_in_progress = 1;
#ifdef CONFIG_SMP
global_irq_lock = 0; /* Many serial drivers do __global_cli() */
#endif
} else {
int loglevel_save = console_loglevel;
return;
}
#ifdef CONFIG_VT
unblank_screen();
#endif
......@@ -82,7 +82,6 @@ bust_spinlocks (int yes)
console_loglevel = 15; /* NMI oopser may have shut the console up */
printk(" ");
console_loglevel = loglevel_save;
}
}
void
......
......@@ -19,30 +19,38 @@ obj-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
IGNORE_FLAGS_OBJS = __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
__divdi3.o __udivdi3.o __moddi3.o __umoddi3.o
$(L_TARGET): $(obj-y) $(export-objs)
include $(TOPDIR)/Rules.make
AFLAGS___divdi3.o =
AFLAGS___udivdi3.o = -DUNSIGNED
AFLAGS___moddi3.o = -DMODULO
AFLAGS___umoddi3.o = -DUNSIGNED -DMODULO
AFLAGS___divsi3.o =
AFLAGS___udivsi3.o = -DUNSIGNED
AFLAGS___modsi3.o = -DMODULO
AFLAGS___umodsi3.o = -DUNSIGNED -DMODULO
__divdi3.o: idiv64.S
$(CC) $(AFLAGS) -c -o $@ $<
$(cmd_as_o_S)
__udivdi3.o: idiv64.S
$(CC) $(AFLAGS) -c -DUNSIGNED -c -o $@ $<
$(cmd_as_o_S)
__moddi3.o: idiv64.S
$(CC) $(AFLAGS) -c -DMODULO -c -o $@ $<
$(cmd_as_o_S)
__umoddi3.o: idiv64.S
$(CC) $(AFLAGS) -c -DMODULO -DUNSIGNED -c -o $@ $<
$(cmd_as_o_S)
__divsi3.o: idiv32.S
$(CC) $(AFLAGS) -c -o $@ $<
$(cmd_as_o_S)
__udivsi3.o: idiv32.S
$(CC) $(AFLAGS) -c -DUNSIGNED -c -o $@ $<
$(cmd_as_o_S)
__modsi3.o: idiv32.S
$(CC) $(AFLAGS) -c -DMODULO -c -o $@ $<
$(cmd_as_o_S)
__umodsi3.o: idiv32.S
$(CC) $(AFLAGS) -c -DMODULO -DUNSIGNED -c -o $@ $<
include $(TOPDIR)/Rules.make
$(cmd_as_o_S)
......@@ -425,7 +425,8 @@ swiotlb_map_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int d
addr = SG_ENT_VIRT_ADDRESS(sg);
pci_addr = virt_to_phys(addr);
if ((pci_addr & ~hwdev->dma_mask) != 0)
sg->dma_address = map_single(hwdev, addr, sg->length, direction);
sg->dma_address = (dma_addr_t)
map_single(hwdev, addr, sg->length, direction);
else
sg->dma_address = pci_addr;
sg->dma_length = sg->length;
......@@ -447,7 +448,7 @@ swiotlb_unmap_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int
for (i = 0; i < nelems; i++, sg++)
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
unmap_single(hwdev, sg->dma_address, sg->dma_length, direction);
unmap_single(hwdev, (void *) sg->dma_address, sg->dma_length, direction);
else if (direction == PCI_DMA_FROMDEVICE)
mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
}
......@@ -469,7 +470,7 @@ swiotlb_sync_sg (struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int
for (i = 0; i < nelems; i++, sg++)
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
sync_single(hwdev, sg->dma_address, sg->dma_length, direction);
sync_single(hwdev, (void *) sg->dma_address, sg->dma_length, direction);
}
unsigned long
......
......@@ -41,6 +41,8 @@ struct ia64_ctx ia64_ctx = {
.max_ctx = ~0U
};
u8 ia64_need_tlb_flush __per_cpu_data;
/*
* Acquire the ia64_ctx.lock before calling this function!
*/
......@@ -79,7 +81,7 @@ wrap_mmu_context (struct mm_struct *mm)
}
read_unlock(&tasklist_lock);
/* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */
for (i = 0; i < smp_num_cpus; ++i)
for (i = 0; i < NR_CPUS; ++i)
if (i != smp_processor_id())
per_cpu(ia64_need_tlb_flush, i) = 1;
__flush_tlb_all();
......
......@@ -4,7 +4,9 @@ TARGET = $(TOPDIR)/include/asm-ia64/offsets.h
all:
mrproper:
fastdep:
mrproper: clean
clean:
rm -f print_offsets.s print_offsets offsets.h
......
......@@ -41,9 +41,6 @@ SECTIONS
/* Read-only data */
. = ALIGN(16);
__gp = . + 0x200000; /* gp must be 16-byte aligned for exc. table */
/* Global data */
_data = .;
......@@ -146,6 +143,9 @@ SECTIONS
.data : AT(ADDR(.data) - PAGE_OFFSET)
{ *(.data) *(.gnu.linkonce.d*) CONSTRUCTORS }
. = ALIGN(16);
__gp = . + 0x200000; /* gp must be 16-byte aligned for exc. table */
.got : AT(ADDR(.got) - PAGE_OFFSET)
{ *(.got.plt) *(.got) }
/* We want the small data sections together, so single-instruction offsets
......
......@@ -30,11 +30,74 @@
#ifdef __KERNEL__
#define __acpi_map_table(phys_addr, size) __va(phys_addr)
#define COMPILER_DEPENDENT_INT64 long
#define COMPILER_DEPENDENT_UINT64 unsigned long
/*
* Calling conventions:
*
* ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads)
* ACPI_EXTERNAL_XFACE - External ACPI interfaces
* ACPI_INTERNAL_XFACE - Internal ACPI interfaces
* ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces
*/
#define ACPI_SYSTEM_XFACE
#define ACPI_EXTERNAL_XFACE
#define ACPI_INTERNAL_XFACE
#define ACPI_INTERNAL_VAR_XFACE
/* Asm macros */
#define ACPI_ASM_MACROS
#define BREAKPOINT3
#define ACPI_DISABLE_IRQS() local_irq_disable()
#define ACPI_ENABLE_IRQS() local_irq_enable()
#define ACPI_FLUSH_CPU_CACHE()
#define ACPI_ACQUIRE_GLOBAL_LOCK(GLptr, Acq) \
do { \
__asm__ volatile ("1: ld4 r29=%1\n" \
";;\n" \
"mov ar.ccv=r29\n" \
"mov r2=r29\n" \
"shr.u r30=r29,1\n" \
"and r29=-4,r29\n" \
";;\n" \
"add r29=2,r29\n" \
"and r30=1,r30\n" \
";;\n" \
"add r29=r29,r30\n" \
";;\n" \
"cmpxchg4.acq r30=%1,r29,ar.ccv\n" \
";;\n" \
"cmp.eq p6,p7=r2,r30\n" \
"(p7) br.dpnt.few 1b\n" \
"cmp.gt p8,p9=3,r29\n" \
";;\n" \
"(p8) mov %0=-1\n" \
"(p9) mov %0=r0\n" \
:"=r"(Acq):"m"(GLptr):"r2","r29","r30","memory"); \
} while (0)
#define ACPI_RELEASE_GLOBAL_LOCK(GLptr, Acq) \
do { \
__asm__ volatile ("1: ld4 r29=%1\n" \
";;\n" \
"mov ar.ccv=r29\n" \
"mov r2=r29\n" \
"and r29=-4,r29\n" \
";;\n" \
"cmpxchg4.acq r30=%1,r29,ar.ccv\n" \
";;\n" \
"cmp.eq p6,p7=r2,r30\n" \
"(p7) br.dpnt.few 1b\n" \
"and %0=1,r2\n" \
";;\n" \
:"=r"(Acq):"m"(GLptr):"r2","r29","r30","memory"); \
} while (0)
const char *acpi_get_sysname (void);
int acpi_boot_init (char *cdline);
int acpi_find_rsdp (unsigned long *phys_addr);
int acpi_request_vector (u32 int_type);
int acpi_get_prt (struct pci_vector_struct **vectors, int *count);
int acpi_get_interrupt_model(int *type);
......
......@@ -6,6 +6,8 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/page-flags.h>
#include <asm/bitops.h>
#include <asm/page.h>
......@@ -23,7 +25,7 @@
#define flush_dcache_page(page) \
do { \
clear_bit(PG_arch_1, &page->flags); \
clear_bit(PG_arch_1, &(page)->flags); \
} while (0)
extern void flush_icache_range (unsigned long start, unsigned long end);
......
......@@ -2,10 +2,11 @@
#define _ASM_IA64_HW_IRQ_H
/*
* Copyright (C) 2001 Hewlett-Packard Co
* Copyright (C) 2001 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2001-2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/types.h>
......@@ -67,6 +68,8 @@ enum {
extern __u8 isa_irq_to_vector_map[16];
#define isa_irq_to_vector(x) isa_irq_to_vector_map[(x)]
extern __u8 gsi_to_vector_map[255];
#define gsi_to_vector(x) gsi_to_vector_map[(x)]
extern unsigned long ipi_base_addr;
......
......@@ -64,7 +64,7 @@
#define IA64_PSR_RI_BIT 41
#define IA64_PSR_ED_BIT 43
#define IA64_PSR_BN_BIT 44
#define IA64_PSR_IA (__IA64_UL(1) << IA64_PSR_IA_BIT)
#define IA64_PSR_IA_BIT 45
/* A mask of PSR bits that we generally don't want to inherit across a clone2() or an
execve(). Only list flags here that need to be cleared/set for BOTH clone2() and
......@@ -94,6 +94,7 @@
#define IA64_PSR_TB (__IA64_UL(1) << IA64_PSR_TB_BIT)
#define IA64_PSR_RT (__IA64_UL(1) << IA64_PSR_RT_BIT)
/* The following are not affected by save_flags()/restore_flags(): */
#define IA64_PSR_CPL (__IA64_UL(3) << IA64_PSR_CPL0_BIT)
#define IA64_PSR_IS (__IA64_UL(1) << IA64_PSR_IS_BIT)
#define IA64_PSR_MC (__IA64_UL(1) << IA64_PSR_MC_BIT)
#define IA64_PSR_IT (__IA64_UL(1) << IA64_PSR_IT_BIT)
......@@ -104,6 +105,7 @@
#define IA64_PSR_RI (__IA64_UL(3) << IA64_PSR_RI_BIT)
#define IA64_PSR_ED (__IA64_UL(1) << IA64_PSR_ED_BIT)
#define IA64_PSR_BN (__IA64_UL(1) << IA64_PSR_BN_BIT)
#define IA64_PSR_IA (__IA64_UL(1) << IA64_PSR_IA_BIT)
/* User mask bits: */
#define IA64_PSR_UM (IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH)
......
......@@ -4,8 +4,8 @@
/*
* Fundamental kernel parameters.
*
* Copyright (C) 1998, 1999 Hewlett-Packard Co
* Copyright (C) 1998, 1999 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <linux/config.h>
......@@ -33,6 +33,7 @@
#define MAXHOSTNAMELEN 64 /* max length of hostname */
#ifdef __KERNEL__
# define USER_HZ HZ
# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */
#endif
......
......@@ -21,7 +21,7 @@
#define PCIBIOS_MIN_MEM 0x10000000
void pcibios_config_init(void);
struct pci_bus * pcibios_scan_root(int seg, int bus);
struct pci_bus * pcibios_scan_root(int bus);
extern int (*pci_config_read)(int seg, int bus, int dev, int fn, int reg, int len, u32 *value);
extern int (*pci_config_write)(int seg, int bus, int dev, int fn, int reg, int len, u32 value);
......
......@@ -15,9 +15,10 @@
#include <linux/config.h>
#include <linux/compiler.h>
#include <linux/mm.h>
#include <linux/page-flags.h>
#include <linux/threads.h>
#include <linux/compiler.h>
#include <asm/mmu_context.h>
#include <asm/processor.h>
......
#ifndef _IA64_RMAP_H
#define _IA64_RMAP_H
#ifndef _ASM_IA64_RMAP_H
#define _ASM_IA64_RMAP_H
/* nothing to see, move along */
#include <asm-generic/rmap.h>
#endif
#endif /* _ASM_IA64_RMAP_H */
......@@ -17,6 +17,7 @@
#include <linux/threads.h>
#include <linux/kernel.h>
#include <asm/bitops.h>
#include <asm/io.h>
#include <asm/param.h>
#include <asm/processor.h>
......@@ -36,6 +37,7 @@ extern struct smp_boot_data {
extern char no_int_routing __initdata;
extern unsigned long phys_cpu_present_map;
extern volatile unsigned long cpu_online_map;
extern unsigned long ipi_base_addr;
extern unsigned char smp_int_redirect;
......@@ -45,23 +47,26 @@ extern volatile int ia64_cpu_to_sapicid[];
extern unsigned long ap_wakeup_vector;
#define cpu_online(cpu) (cpu_online_map & (1<<(cpu)))
extern inline unsigned int num_online_cpus(void)
#define cpu_possible(cpu) (phys_cpu_present_map & (1UL << (cpu)))
#define cpu_online(cpu) (cpu_online_map & (1UL << (cpu)))
static inline unsigned int
num_online_cpus (void)
{
return hweight64(cpu_online_map);
}
extern inline int any_online_cpu(unsigned int mask)
static inline int
any_online_cpu (unsigned int mask)
{
if (mask & cpu_online_map)
return __ffs(mask & cpu_online_map);
return -1;
}
/*
* Function to map hard smp processor id to logical id. Slow, so
* don't use this in performance-critical code.
* Function to map hard smp processor id to logical id. Slow, so don't use this in
* performance-critical code.
*/
static inline int
cpu_logical_id (int cpuid)
......@@ -120,11 +125,9 @@ hard_smp_processor_id (void)
}
/* Upping and downing of CPUs */
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
extern int __cpu_up(unsigned int cpu);
#define NO_PROC_ID 0xffffffff /* no processor magic marker */
extern int __cpu_disable (void);
extern void __cpu_die (unsigned int cpu);
extern int __cpu_up (unsigned int cpu);
extern void __init init_smp_config (void);
extern void smp_do_timer (struct pt_regs *regs);
......
......@@ -14,11 +14,6 @@ extern spinlock_t kernel_flag;
#ifdef CONFIG_SMP
# define kernel_locked() spin_is_locked(&kernel_flag)
# define check_irq_holder(cpu) \
do { \
if (global_irq_holder == (cpu)) \
BUG(); \
} while (0)
#else
# define kernel_locked() (1)
#endif
......@@ -26,12 +21,10 @@ do { \
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
#define release_kernel_lock(task) \
do { \
if (unlikely(task->lock_depth >= 0)) { \
if (unlikely(task->lock_depth >= 0)) \
spin_unlock(&kernel_flag); \
check_irq_holder(cpu); \
} \
} while (0)
/*
......
......@@ -4,23 +4,23 @@
#include <linux/compiler.h>
/*
* Copyright (C) 1998-2001 Hewlett-Packard Co
* Copyright (C) 1998-2002 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm/hardirq.h>
#include <linux/compiler.h>
#include <linux/preempt.h>
#include <asm/hardirq.h>
#define __local_bh_enable() do { barrier(); really_local_bh_count()--; } while (0)
#define __local_bh_enable() do { barrier(); preempt_count() -= SOFTIRQ_OFFSET; } while (0)
#define local_bh_disable() do { really_local_bh_count()++; barrier(); } while (0)
#define local_bh_disable() do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0)
#define local_bh_enable() \
do { \
__local_bh_enable(); \
if (unlikely(local_softirq_pending()) && really_local_bh_count() == 0) \
if (unlikely(!in_interrupt() && local_softirq_pending())) \
do_softirq(); \
preempt_check_resched(); \
} while (0)
#define in_softirq() (really_local_bh_count() != 0)
#endif /* _ASM_IA64_SOFTIRQ_H */
......@@ -17,6 +17,7 @@
#include <asm/kregs.h>
#include <asm/page.h>
#include <asm/pal.h>
#define KERNEL_START (PAGE_OFFSET + 68*1024*1024)
......@@ -103,6 +104,8 @@ ia64_insn_group_barrier (void)
#define set_mb(var, value) do { (var) = (value); mb(); } while (0)
#define set_wmb(var, value) do { (var) = (value); mb(); } while (0)
#define safe_halt() ia64_pal_halt(1) /* PAL_HALT */
/*
* The group barrier in front of the rsm & ssm are necessary to ensure
* that none of the previous instructions in the same group are
......@@ -169,27 +172,7 @@ do { \
#endif /* !CONFIG_IA64_DEBUG_IRQ */
#define local_irq_enable() __asm__ __volatile__ (";; ssm psr.i;; srlz.d" ::: "memory")
#define local_irq_disable() local_irq_disable ()
#define local_save_flags(flags) __asm__ __volatile__ ("mov %0=psr" : "=r" (flags) :: "memory")
#define local_irq_save(flags) local_irq_save(flags)
#define save_and_cli(flags) local_irq_save(flags)
#ifdef CONFIG_SMP
extern void __global_cli (void);
extern void __global_sti (void);
extern unsigned long __global_save_flags (void);
extern void __global_restore_flags (unsigned long);
# define cli() __global_cli()
# define sti() __global_sti()
# define save_flags(flags) ((flags) = __global_save_flags())
# define restore_flags(flags) __global_restore_flags(flags)
#else /* !CONFIG_SMP */
# define cli() local_irq_disable()
# define sti() local_irq_enable()
# define save_flags(flags) local_save_flags(flags)
# define restore_flags(flags) local_irq_restore(flags)
#endif /* !CONFIG_SMP */
/*
* Force an unresolved reference if someone tries to use
......@@ -377,7 +360,7 @@ static inline void ia32_load_state(struct task_struct *t __attribute__((unused))
* newly created thread returns directly to
* ia64_ret_from_syscall_clear_r8.
*/
extern void ia64_switch_to (void *next_task);
extern struct task_struct *ia64_switch_to (void *next_task);
struct task_struct;
......@@ -391,14 +374,14 @@ extern void ia64_load_extra (struct task_struct *task);
# define PERFMON_IS_SYSWIDE() (0)
#endif
#define __switch_to(prev,next) do { \
#define __switch_to(prev,next,last) do { \
if (((prev)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)) \
|| IS_IA32_PROCESS(ia64_task_regs(prev)) || PERFMON_IS_SYSWIDE()) \
ia64_save_extra(prev); \
if (((next)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)) \
|| IS_IA32_PROCESS(ia64_task_regs(next)) || PERFMON_IS_SYSWIDE()) \
ia64_load_extra(next); \
ia64_switch_to((next)); \
(last) = ia64_switch_to((next)); \
} while (0)
#ifdef CONFIG_SMP
......@@ -413,19 +396,19 @@ extern void ia64_load_extra (struct task_struct *task);
* task->thread.fph, avoiding the complication of having to fetch
* the latest fph state from another CPU.
*/
# define switch_to(prev,next) do { \
# define switch_to(prev,next,last) do { \
if (ia64_psr(ia64_task_regs(prev))->mfh) { \
ia64_psr(ia64_task_regs(prev))->mfh = 0; \
(prev)->thread.flags |= IA64_THREAD_FPH_VALID; \
__ia64_save_fpu((prev)->thread.fph); \
} \
ia64_psr(ia64_task_regs(prev))->dfh = 1; \
__switch_to(prev,next); \
__switch_to(prev,next,last); \
} while (0)
#else
# define switch_to(prev,next) do { \
# define switch_to(prev,next,last) do { \
ia64_psr(ia64_task_regs(next))->dfh = (ia64_get_fpu_owner() != (next)); \
__switch_to(prev,next); \
__switch_to(prev,next,last); \
} while (0)
#endif
......
......@@ -22,7 +22,7 @@
* unmapping a portion of the virtual address space, these hooks are called according to
* the following template:
*
* tlb <- tlb_gather_mmu(mm); // start unmap for address space MM
* tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM
* {
* for each vma that needs a shootdown do {
* tlb_start_vma(tlb, vma);
......@@ -45,7 +45,7 @@
#ifdef CONFIG_SMP
# define FREE_PTE_NR 2048
# define tlb_fast_mode(tlb) ((tlb)->nr == ~0UL)
# define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
#else
# define FREE_PTE_NR 0
# define tlb_fast_mode(tlb) (1)
......@@ -53,7 +53,8 @@
typedef struct {
struct mm_struct *mm;
unsigned long nr; /* == ~0UL => fast mode */
unsigned int nr; /* == ~0U => fast mode */
unsigned int fullmm; /* non-zero means full mm flush */
unsigned long freed; /* number of pages freed */
unsigned long start_addr;
unsigned long end_addr;
......@@ -70,10 +71,17 @@ extern mmu_gather_t mmu_gathers[NR_CPUS];
static inline void
ia64_tlb_flush_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end)
{
unsigned long nr;
unsigned int nr;
if (unlikely (end - start >= 1024*1024*1024*1024UL
|| rgn_index(start) != rgn_index(end - 1)))
if (tlb->fullmm) {
/*
* Tearing down the entire address space. This happens both as a result
* of exit() and execve(). The latter case necessitates the call to
* flush_tlb_mm() here.
*/
flush_tlb_mm(tlb->mm);
} else if (unlikely (end - start >= 1024*1024*1024*1024UL
|| REGION_NUMBER(start) != REGION_NUMBER(end - 1)))
{
/*
* If we flush more than a tera-byte or across regions, we're probably
......@@ -110,16 +118,21 @@ ia64_tlb_flush_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end)
* Return a pointer to an initialized mmu_gather_t.
*/
static inline mmu_gather_t *
tlb_gather_mmu (struct mm_struct *mm)
tlb_gather_mmu (struct mm_struct *mm, unsigned int full_mm_flush)
{
mmu_gather_t *tlb = &mmu_gathers[smp_processor_id()];
tlb->mm = mm;
tlb->nr = 0;
if (full_mm_flush || num_online_cpus() == 1)
/*
* Use fast mode if only 1 CPU is online or if we're tearing down the
* entire address space.
*/
tlb->nr = ~0U;
tlb->fullmm = full_mm_flush;
tlb->freed = 0;
tlb->start_addr = ~0UL;
/* Use fast mode if only one CPU is online */
tlb->nr = smp_num_cpus > 1 ? 0UL : ~0UL;
return tlb;
}
......@@ -152,7 +165,7 @@ tlb_finish_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end)
* PTE, not just those pointing to (normal) physical memory.
*/
static inline void
tlb_remove_tlb_entry (mmu_gather_t *tlb, pte_t pte, unsigned long address)
tlb_remove_tlb_entry (mmu_gather_t *tlb, pte_t *ptep, unsigned long address)
{
if (tlb->start_addr == ~0UL)
tlb->start_addr = address;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment