Commit f31f42ad authored by Linus Torvalds's avatar Linus Torvalds

Merge http://lia64.bkbits.net/to-linus-2.5

into penguin.transmeta.com:/home/penguin/torvalds/repositories/kernel/linux
parents f37f8826 ea5083d6
......@@ -157,6 +157,11 @@ CONFIG_PCMCIA
and ds.o. If you want to compile it as a module, say M here and
read <file:Documentation/modules.txt>.
CONFIG_KALLSYMS
Say Y here to let the kernel print out symbolic crash information and
symbolic stack backtraces. This increases the size of the kernel
somewhat, as all symbols have to be loaded into the kernel image.
CONFIG_KCORE_ELF
If you enabled support for /proc file system then the file
/proc/kcore will contain the kernel core image. This can be used
......
......@@ -33,44 +33,48 @@ ifeq ($(CONFIG_ITANIUM_BSTEP_SPECIFIC),y)
CFLAGS += -mb-step
endif
HEAD := arch/$(ARCH)/kernel/head.o arch/ia64/kernel/init_task.o
HEAD := arch/$(ARCH)/kernel/head.o arch/$(ARCH)/kernel/init_task.o
core-$(CONFIG_IA64_GENERIC) += arch/$(ARCH)/hp/ arch/$(ARCH)/dig/
core-$(CONFIG_IA64_HP_SIM) += arch/$(ARCH)/hp/
core-$(CONFIG_IA64_HP_ZX1) += arch/$(ARCH)/hp/ arch/$(ARCH)/dig/
core-$(CONFIG_IA64_SGI_SN) += arch/$(ARCH)/sn/kernel arch/$(ARCH)/sn/io
libs-y += arch/$(ARCH)/lib/
core-y += arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/
core-$(CONFIG_IA32_SUPPORT) += arch/$(ARCH)/ia32/
core-$(CONFIG_IA64_DIG) += arch/$(ARCH)/dig/
core-$(CONFIG_IA64_GENERIC) += arch/$(ARCH)/dig/ arch/$(ARCH)/hp/common/ arch/$(ARCH)/hp/zx1/ \
arch/$(ARCH)/hp/sim/
core-$(CONFIG_IA64_HP_ZX1) += arch/$(ARCH)/dig/
core-$(CONFIG_IA64_SGI_SN) += arch/$(ARCH)/sn/kernel arch/$(ARCH)/sn/io \
arch/$(ARCH)/sn/fakeprom
drivers-$(CONFIG_PCI) += arch/$(ARCH)/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/$(ARCH)/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/$(ARCH)/hp/common/ arch/$(ARCH)/hp/zx1/
ifdef CONFIG_IA64_SGI_SN
CFLAGS += -DBRINGUP
SUBDIRS += arch/$(ARCH)/sn/fakeprom
endif
core-$(CONFIG_IA32_SUPPORT) += arch/$(ARCH)/ia32/
makeboot = $(call descend,arch/ia64/boot,$(1))
maketool = $(call descend,arch/ia64/tools,$(1))
libs-y += arch/$(ARCH)/lib/
core-y += arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/
drivers-$(CONFIG_PCI) += arch/$(ARCH)/pci/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/$(ARCH)/hp/common/ arch/$(ARCH)/hp/zx1/
.PHONY: compressed archclean archmrproper $(TOPDIR)/include/asm-ia64/offsets.h
all: compressed boot
MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot
boot: vmlinux
+@$(call makeboot,all)
compressed: vmlinux
$(OBJCOPY) $(OBJCOPYFLAGS) vmlinux vmlinux-tmp
gzip vmlinux-tmp
mv vmlinux-tmp.gz vmlinux.gz
rawboot:
@$(MAKEBOOT) rawboot
archclean:
@$(MAKEBOOT) clean
$(MAKE) -rR -f scripts/Makefile.clean obj=arch/$(ARCH)/boot
archmrproper:
@$(MAKE) -C arch/$(ARCH)/tools mrproper
prepare: $(TOPDIR)/include/asm-ia64/offsets.h
$(TOPDIR)/include/asm-ia64/offsets.h: include/asm include/linux/version.h \
include/config/MARKER
@$(MAKE) -C arch/$(ARCH)/tools $@
+@$(call maketool,$@)
......@@ -8,9 +8,9 @@
# Copyright (C) 1998 by David Mosberger-Tang <davidm@hpl.hp.com>
#
LINKFLAGS = -static -T bootloader.lds
LINKFLAGS = -static -T $(src)/bootloader.lds
OBJECTS = bootloader.o
OBJS = $(obj)/bootloader.o
targets-$(CONFIG_IA64_HP_SIM) += bootloader
targets-$(CONFIG_IA64_GENERIC) += bootloader
......@@ -19,8 +19,8 @@ CFLAGS := $(CFLAGS) $(CFLAGS_KERNEL)
all: $(targets-y)
bootloader: $(OBJECTS)
$(LD) $(LINKFLAGS) $(OBJECTS) $(TOPDIR)/lib/lib.a $(TOPDIR)/arch/$(ARCH)/lib/lib.a \
bootloader: $(OBJS)
$(LD) $(LINKFLAGS) $(OBJS) $(TOPDIR)/lib/lib.a $(TOPDIR)/arch/$(ARCH)/lib/lib.a \
-o bootloader
clean:
......
......@@ -66,6 +66,10 @@ fi
if [ "$CONFIG_IA64_GENERIC" = "y" -o "$CONFIG_IA64_DIG" = "y" -o "$CONFIG_IA64_HP_ZX1" = "y" ];
then
bool ' Enable NUMA support' CONFIG_NUMA
if [ "$CONFIG_NUMA" = "y" ]; then
define_bool CONFIG_DISCONTIGMEM y
fi
bool ' Enable IA-64 Machine Check Abort' CONFIG_IA64_MCA
define_bool CONFIG_PM y
define_bool CONFIG_IOSAPIC y
......@@ -267,6 +271,7 @@ choice 'Physical memory granularity' \
bool 'Kernel debugging' CONFIG_DEBUG_KERNEL
if [ "$CONFIG_DEBUG_KERNEL" != "n" ]; then
bool ' Load all symbols for debugging/kksymoops' CONFIG_KALLSYMS
bool ' Print possible IA64 hazards to console' CONFIG_IA64_PRINT_HAZARDS
bool ' Disable VHPT' CONFIG_DISABLE_VHPT
bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ
......
This diff is collapsed.
# arch/ia64/hp/Makefile
# Copyright (c) 2002 Matthew Wilcox for Hewlett Packard
obj-$(CONFIG_IA64_GENERIC) += sim/ zx1/ common/
obj-$(CONFIG_IA64_HP_SIM) += sim/
include $(TOPDIR)/Rules.make
......@@ -361,7 +361,9 @@ simscsi_queuecommand (Scsi_Cmnd *sc, void (*done)(Scsi_Cmnd *))
break;
case MODE_SENSE:
printk("MODE_SENSE\n");
/* sd.c uses this to determine whether disk does write-caching. */
memset(sc->request_buffer, 0, 128);
sc->result = GOOD;
break;
case START_STOP:
......@@ -391,6 +393,4 @@ simscsi_queuecommand (Scsi_Cmnd *sc, void (*done)(Scsi_Cmnd *))
static Scsi_Host_Template driver_template = SIMSCSI;
#define __initcall(fn) late_initcall(fn)
#include "../drivers/scsi/scsi_module.c"
......@@ -752,8 +752,7 @@ static int get_async_struct(int line, struct async_struct **ret_info)
info->flags = sstate->flags;
info->xmit_fifo_size = sstate->xmit_fifo_size;
info->line = line;
info->tqueue.routine = do_softint;
info->tqueue.data = info;
INIT_WORK(&info->work, do_softint, info);
info->state = sstate;
if (sstate->info) {
kfree(info);
......
......@@ -26,6 +26,12 @@
#include <linux/if_ppp.h>
#include <linux/ixjuser.h>
#include <linux/i2o-dev.h>
#include <scsi/scsi.h>
/* Ugly hack. */
#undef __KERNEL__
#include <scsi/scsi_ioctl.h>
#define __KERNEL__
#include <scsi/sg.h>
#include <asm/ia32.h>
......@@ -60,6 +66,235 @@ put_dirent32 (struct dirent *d, struct linux32_dirent *d32)
|| put_user(d->d_reclen, &d32->d_reclen)
|| copy_to_user(d32->d_name, d->d_name, namelen + 1));
}
/*
* The transform code for the SG_IO ioctl was brazenly lifted from
* the Sparc64 port in the file `arch/sparc64/kernel/ioctl32.c'.
* Thanks to Jakub Jelinek & Eddie C. Dost.
*/
typedef struct sg_io_hdr32 {
int interface_id; /* [i] 'S' for SCSI generic (required) */
int dxfer_direction; /* [i] data transfer direction */
char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */
char mx_sb_len; /* [i] max length to write to sbp */
short iovec_count; /* [i] 0 implies no scatter gather */
int dxfer_len; /* [i] byte count of data transfer */
int dxferp; /* [i], [*io] points to data transfer memory
or scatter gather list */
int cmdp; /* [i], [*i] points to command to perform */
int sbp; /* [i], [*o] points to sense_buffer memory */
int timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */
int flags; /* [i] 0 -> default, see SG_FLAG... */
int pack_id; /* [i->o] unused internally (normally) */
int usr_ptr; /* [i->o] unused internally */
char status; /* [o] scsi status */
char masked_status; /* [o] shifted, masked scsi status */
char msg_status; /* [o] messaging level data (optional) */
char sb_len_wr; /* [o] byte count actually written to sbp */
short host_status; /* [o] errors from host adapter */
short driver_status; /* [o] errors from software driver */
int resid; /* [o] dxfer_len - actual_transferred */
int duration; /* [o] time taken by cmd (unit: millisec) */
int info; /* [o] auxiliary information */
} sg_io_hdr32_t; /* 64 bytes long (on IA32) */
struct iovec32 { unsigned int iov_base; int iov_len; };
static int alloc_sg_iovec(sg_io_hdr_t *sgp, int uptr32)
{
struct iovec32 *uiov = (struct iovec32 *) P(uptr32);
sg_iovec_t *kiov;
int i;
sgp->dxferp = kmalloc(sgp->iovec_count *
sizeof(sg_iovec_t), GFP_KERNEL);
if (!sgp->dxferp)
return -ENOMEM;
memset(sgp->dxferp, 0,
sgp->iovec_count * sizeof(sg_iovec_t));
kiov = (sg_iovec_t *) sgp->dxferp;
for (i = 0; i < sgp->iovec_count; i++) {
int iov_base32;
if (__get_user(iov_base32, &uiov->iov_base) ||
__get_user(kiov->iov_len, &uiov->iov_len))
return -EFAULT;
kiov->iov_base = kmalloc(kiov->iov_len, GFP_KERNEL);
if (!kiov->iov_base)
return -ENOMEM;
if (copy_from_user(kiov->iov_base,
(void *) P(iov_base32),
kiov->iov_len))
return -EFAULT;
uiov++;
kiov++;
}
return 0;
}
static int copy_back_sg_iovec(sg_io_hdr_t *sgp, int uptr32)
{
struct iovec32 *uiov = (struct iovec32 *) P(uptr32);
sg_iovec_t *kiov = (sg_iovec_t *) sgp->dxferp;
int i;
for (i = 0; i < sgp->iovec_count; i++) {
int iov_base32;
if (__get_user(iov_base32, &uiov->iov_base))
return -EFAULT;
if (copy_to_user((void *) P(iov_base32),
kiov->iov_base,
kiov->iov_len))
return -EFAULT;
uiov++;
kiov++;
}
return 0;
}
static void free_sg_iovec(sg_io_hdr_t *sgp)
{
sg_iovec_t *kiov = (sg_iovec_t *) sgp->dxferp;
int i;
for (i = 0; i < sgp->iovec_count; i++) {
if (kiov->iov_base) {
kfree(kiov->iov_base);
kiov->iov_base = NULL;
}
kiov++;
}
kfree(sgp->dxferp);
sgp->dxferp = NULL;
}
static int sg_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
{
sg_io_hdr32_t *sg_io32;
sg_io_hdr_t sg_io64;
int dxferp32, cmdp32, sbp32;
mm_segment_t old_fs;
int err = 0;
sg_io32 = (sg_io_hdr32_t *)arg;
err = __get_user(sg_io64.interface_id, &sg_io32->interface_id);
err |= __get_user(sg_io64.dxfer_direction, &sg_io32->dxfer_direction);
err |= __get_user(sg_io64.cmd_len, &sg_io32->cmd_len);
err |= __get_user(sg_io64.mx_sb_len, &sg_io32->mx_sb_len);
err |= __get_user(sg_io64.iovec_count, &sg_io32->iovec_count);
err |= __get_user(sg_io64.dxfer_len, &sg_io32->dxfer_len);
err |= __get_user(sg_io64.timeout, &sg_io32->timeout);
err |= __get_user(sg_io64.flags, &sg_io32->flags);
err |= __get_user(sg_io64.pack_id, &sg_io32->pack_id);
sg_io64.dxferp = NULL;
sg_io64.cmdp = NULL;
sg_io64.sbp = NULL;
err |= __get_user(cmdp32, &sg_io32->cmdp);
sg_io64.cmdp = kmalloc(sg_io64.cmd_len, GFP_KERNEL);
if (!sg_io64.cmdp) {
err = -ENOMEM;
goto out;
}
if (copy_from_user(sg_io64.cmdp,
(void *) P(cmdp32),
sg_io64.cmd_len)) {
err = -EFAULT;
goto out;
}
err |= __get_user(sbp32, &sg_io32->sbp);
sg_io64.sbp = kmalloc(sg_io64.mx_sb_len, GFP_KERNEL);
if (!sg_io64.sbp) {
err = -ENOMEM;
goto out;
}
if (copy_from_user(sg_io64.sbp,
(void *) P(sbp32),
sg_io64.mx_sb_len)) {
err = -EFAULT;
goto out;
}
err |= __get_user(dxferp32, &sg_io32->dxferp);
if (sg_io64.iovec_count) {
int ret;
if ((ret = alloc_sg_iovec(&sg_io64, dxferp32))) {
err = ret;
goto out;
}
} else {
sg_io64.dxferp = kmalloc(sg_io64.dxfer_len, GFP_KERNEL);
if (!sg_io64.dxferp) {
err = -ENOMEM;
goto out;
}
if (copy_from_user(sg_io64.dxferp,
(void *) P(dxferp32),
sg_io64.dxfer_len)) {
err = -EFAULT;
goto out;
}
}
/* Unused internally, do not even bother to copy it over. */
sg_io64.usr_ptr = NULL;
if (err)
return -EFAULT;
old_fs = get_fs();
set_fs (KERNEL_DS);
err = sys_ioctl (fd, cmd, (unsigned long) &sg_io64);
set_fs (old_fs);
if (err < 0)
goto out;
err = __put_user(sg_io64.pack_id, &sg_io32->pack_id);
err |= __put_user(sg_io64.status, &sg_io32->status);
err |= __put_user(sg_io64.masked_status, &sg_io32->masked_status);
err |= __put_user(sg_io64.msg_status, &sg_io32->msg_status);
err |= __put_user(sg_io64.sb_len_wr, &sg_io32->sb_len_wr);
err |= __put_user(sg_io64.host_status, &sg_io32->host_status);
err |= __put_user(sg_io64.driver_status, &sg_io32->driver_status);
err |= __put_user(sg_io64.resid, &sg_io32->resid);
err |= __put_user(sg_io64.duration, &sg_io32->duration);
err |= __put_user(sg_io64.info, &sg_io32->info);
err |= copy_to_user((void *)P(sbp32), sg_io64.sbp, sg_io64.mx_sb_len);
if (sg_io64.dxferp) {
if (sg_io64.iovec_count)
err |= copy_back_sg_iovec(&sg_io64, dxferp32);
else
err |= copy_to_user((void *)P(dxferp32),
sg_io64.dxferp,
sg_io64.dxfer_len);
}
if (err)
err = -EFAULT;
out:
if (sg_io64.cmdp)
kfree(sg_io64.cmdp);
if (sg_io64.sbp)
kfree(sg_io64.sbp);
if (sg_io64.dxferp) {
if (sg_io64.iovec_count) {
free_sg_iovec(&sg_io64);
} else {
kfree(sg_io64.dxferp);
}
}
return err;
}
asmlinkage long
sys32_ioctl (unsigned int fd, unsigned int cmd, unsigned int arg)
......@@ -271,6 +506,9 @@ sys32_ioctl (unsigned int fd, unsigned int cmd, unsigned int arg)
default:
return sys_ioctl(fd, cmd, (unsigned long)arg);
case IOCTL_NR(SG_IO):
return(sg_ioctl_trans(fd, cmd, arg));
}
printk("%x:unimplemented IA32 ioctl system call\n", cmd);
return -EINVAL;
......
This diff is collapsed.
......@@ -2842,20 +2842,6 @@ putreg (struct task_struct *child, int regno, unsigned int value)
}
}
static inline void
ia32f2ia64f (void *dst, void *src)
{
asm volatile ("ldfe f6=[%1];; stf.spill [%0]=f6" :: "r"(dst), "r"(src) : "memory");
return;
}
static inline void
ia64f2ia32f (void *dst, void *src)
{
asm volatile ("ldf.fill f6=[%1];; stfe [%0]=f6" :: "r"(dst), "r"(src) : "memory");
return;
}
static void
put_fpreg (int regno, struct _fpreg_ia32 *reg, struct pt_regs *ptp, struct switch_stack *swp,
int tos)
......
......@@ -8,6 +8,9 @@
* Copyright (C) 2000 Intel Corp.
* Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
* Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com>
* Copyright (C) 2001 Takayoshi Kochi <t-kouchi@cq.jp.nec.com>
* Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
......@@ -43,6 +46,7 @@
#include <asm/machvec.h>
#include <asm/page.h>
#include <asm/system.h>
#include <asm/numa.h>
#define PREFIX "ACPI: "
......@@ -447,6 +451,189 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size)
}
#ifdef CONFIG_ACPI_NUMA
#define SLIT_DEBUG
#define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32)
static int __initdata srat_num_cpus; /* number of cpus */
static u32 __initdata pxm_flag[PXM_FLAG_LEN];
#define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag))
#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
/* maps to convert between proximity domain and logical node ID */
int __initdata pxm_to_nid_map[MAX_PXM_DOMAINS];
int __initdata nid_to_pxm_map[NR_NODES];
static struct acpi_table_slit __initdata *slit_table;
/*
* ACPI 2.0 SLIT (System Locality Information Table)
* http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
*/
void __init
acpi_numa_slit_init (struct acpi_table_slit *slit)
{
u32 len;
len = sizeof(struct acpi_table_header) + 8
+ slit->localities * slit->localities;
if (slit->header.length != len) {
printk("ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
len, slit->header.length);
memset(numa_slit, 10, sizeof(numa_slit));
return;
}
slit_table = slit;
}
void __init
acpi_numa_processor_affinity_init (struct acpi_table_processor_affinity *pa)
{
/* record this node in proximity bitmap */
pxm_bit_set(pa->proximity_domain);
node_cpuid[srat_num_cpus].phys_id = (pa->apic_id << 8) | (pa->lsapic_eid);
/* nid should be overridden as logical node id later */
node_cpuid[srat_num_cpus].nid = pa->proximity_domain;
srat_num_cpus++;
}
void __init
acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma)
{
unsigned long paddr, size, hole_size, min_hole_size;
u8 pxm;
struct node_memblk_s *p, *q, *pend;
pxm = ma->proximity_domain;
/* fill node memory chunk structure */
paddr = ma->base_addr_hi;
paddr = (paddr << 32) | ma->base_addr_lo;
size = ma->length_hi;
size = (size << 32) | ma->length_lo;
if (num_memblks >= NR_MEMBLKS) {
printk("Too many mem chunks in SRAT. Ignoring %ld MBytes at %lx\n",
size/(1024*1024), paddr);
return;
}
/* Ignore disabled entries */
if (!ma->flags.enabled)
return;
/*
* When the chunk is not the first one in the node, check distance
* from the other chunks. When the hole is too huge ignore the chunk.
* This restriction should be removed when multiple chunks per node
* is supported.
*/
pend = &node_memblk[num_memblks];
min_hole_size = 0;
for (p = &node_memblk[0]; p < pend; p++) {
if (p->nid != pxm)
continue;
if (p->start_paddr < paddr)
hole_size = paddr - (p->start_paddr + p->size);
else
hole_size = p->start_paddr - (paddr + size);
if (!min_hole_size || hole_size < min_hole_size)
min_hole_size = hole_size;
}
if (min_hole_size) {
if (min_hole_size > size) {
printk("Too huge memory hole. Ignoring %ld MBytes at %lx\n",
size/(1024*1024), paddr);
return;
}
}
/* record this node in proximity bitmap */
pxm_bit_set(pxm);
/* Insertion sort based on base address */
pend = &node_memblk[num_memblks];
for (p = &node_memblk[0]; p < pend; p++) {
if (paddr < p->start_paddr)
break;
}
if (p < pend) {
for (q = pend; q >= p; q--)
*(q + 1) = *q;
}
p->start_paddr = paddr;
p->size = size;
p->nid = pxm;
num_memblks++;
}
void __init
acpi_numa_arch_fixup(void)
{
int i, j, node_from, node_to;
/* calculate total number of nodes in system from PXM bitmap */
numnodes = 0; /* init total nodes in system */
memset(pxm_to_nid_map, -1, sizeof(pxm_to_nid_map));
memset(nid_to_pxm_map, -1, sizeof(nid_to_pxm_map));
for (i = 0; i < MAX_PXM_DOMAINS; i++) {
if (pxm_bit_test(i)) {
pxm_to_nid_map[i] = numnodes;
nid_to_pxm_map[numnodes++] = i;
}
}
/* set logical node id in memory chunk structure */
for (i = 0; i < num_memblks; i++)
node_memblk[i].nid = pxm_to_nid_map[node_memblk[i].nid];
/* assign memory bank numbers for each chunk on each node */
for (i = 0; i < numnodes; i++) {
int bank;
bank = 0;
for (j = 0; j < num_memblks; j++)
if (node_memblk[j].nid == i)
node_memblk[j].bank = bank++;
}
/* set logical node id in cpu structure */
for (i = 0; i < srat_num_cpus; i++)
node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid];
printk("Number of logical nodes in system = %d\n", numnodes);
printk("Number of memory chunks in system = %d\n", num_memblks);
if (!slit_table) return;
memset(numa_slit, -1, sizeof(numa_slit));
for (i=0; i<slit_table->localities; i++) {
if (!pxm_bit_test(i))
continue;
node_from = pxm_to_nid_map[i];
for (j=0; j<slit_table->localities; j++) {
if (!pxm_bit_test(j))
continue;
node_to = pxm_to_nid_map[j];
node_distance(node_from, node_to) =
slit_table->entry[i*slit_table->localities + j];
}
}
#ifdef SLIT_DEBUG
printk("ACPI 2.0 SLIT locality table:\n");
for (i = 0; i < numnodes; i++) {
for (j = 0; j < numnodes; j++)
printk("%03d ", node_distance(i,j));
printk("\n");
}
#endif
}
#endif /* CONFIG_ACPI_NUMA */
static int __init
acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
{
......@@ -556,12 +743,6 @@ acpi_parse_spcr (unsigned long phys_addr, unsigned long size)
int __init
acpi_boot_init (char *cmdline)
{
int result;
/* Initialize the ACPI boot-time table parser */
result = acpi_table_init(cmdline);
if (result)
return result;
/*
* MADT
......@@ -631,6 +812,9 @@ acpi_boot_init (char *cmdline)
smp_boot_data.cpu_count = total_cpus;
smp_build_cpu_map();
#ifdef CONFIG_NUMA
build_cpu_to_node_map();
#endif
#endif
/* Make boot-up look pretty */
printk("%d CPUs available, %d CPUs total\n", available_cpus, total_cpus);
......
......@@ -60,67 +60,157 @@ struct proc_dir_entry *efi_dir = NULL;
static unsigned long mem_limit = ~0UL;
static efi_status_t
phys_get_time (efi_time_t *tm, efi_time_cap_t *tc)
{
return efi_call_phys(__va(runtime->get_time), __pa(tm), __pa(tc));
#define efi_call_virt(f, args...) (*(f))(args)
#define STUB_GET_TIME(prefix, adjust_arg) \
static efi_status_t \
prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \
{ \
struct ia64_fpreg fr[6]; \
efi_status_t ret; \
\
ia64_save_scratch_fpregs(fr); \
ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), adjust_arg(tm), \
adjust_arg(tc)); \
ia64_load_scratch_fpregs(fr); \
return ret; \
}
static efi_status_t
phys_set_time (efi_time_t *tm)
{
return efi_call_phys(__va(runtime->set_time), __pa(tm));
#define STUB_SET_TIME(prefix, adjust_arg) \
static efi_status_t \
prefix##_set_time (efi_time_t *tm) \
{ \
struct ia64_fpreg fr[6]; \
efi_status_t ret; \
\
ia64_save_scratch_fpregs(fr); \
ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), adjust_arg(tm)); \
ia64_load_scratch_fpregs(fr); \
return ret; \
}
static efi_status_t
phys_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm)
{
return efi_call_phys(__va(runtime->get_wakeup_time), __pa(enabled), __pa(pending),
__pa(tm));
#define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \
static efi_status_t \
prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm) \
{ \
struct ia64_fpreg fr[6]; \
efi_status_t ret; \
\
ia64_save_scratch_fpregs(fr); \
ret = efi_call_##prefix((efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \
adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \
ia64_load_scratch_fpregs(fr); \
return ret; \
}
static efi_status_t
phys_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm)
{
return efi_call_phys(__va(runtime->set_wakeup_time), enabled, __pa(tm));
#define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \
static efi_status_t \
prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \
{ \
struct ia64_fpreg fr[6]; \
efi_status_t ret; \
\
ia64_save_scratch_fpregs(fr); \
ret = efi_call_##prefix((efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \
enabled, adjust_arg(tm)); \
ia64_load_scratch_fpregs(fr); \
return ret; \
}
static efi_status_t
phys_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr,
unsigned long *data_size, void *data)
{
return efi_call_phys(__va(runtime->get_variable), __pa(name), __pa(vendor), __pa(attr),
__pa(data_size), __pa(data));
#define STUB_GET_VARIABLE(prefix, adjust_arg) \
static efi_status_t \
prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \
unsigned long *data_size, void *data) \
{ \
struct ia64_fpreg fr[6]; \
efi_status_t ret; \
\
ia64_save_scratch_fpregs(fr); \
ret = efi_call_##prefix((efi_get_variable_t *) __va(runtime->get_variable), \
adjust_arg(name), adjust_arg(vendor), adjust_arg(attr), \
adjust_arg(data_size), adjust_arg(data)); \
ia64_load_scratch_fpregs(fr); \
return ret; \
}
static efi_status_t
phys_get_next_variable (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor)
{
return efi_call_phys(__va(runtime->get_next_variable), __pa(name_size), __pa(name),
__pa(vendor));
#define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \
static efi_status_t \
prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) \
{ \
struct ia64_fpreg fr[6]; \
efi_status_t ret; \
\
ia64_save_scratch_fpregs(fr); \
ret = efi_call_##prefix((efi_get_next_variable_t *) __va(runtime->get_next_variable), \
adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \
ia64_load_scratch_fpregs(fr); \
return ret; \
}
static efi_status_t
phys_set_variable (efi_char16_t *name, efi_guid_t *vendor, u32 attr,
unsigned long data_size, void *data)
{
return efi_call_phys(__va(runtime->set_variable), __pa(name), __pa(vendor), attr,
data_size, __pa(data));
#define STUB_SET_VARIABLE(prefix, adjust_arg) \
static efi_status_t \
prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, u32 attr, \
unsigned long data_size, void *data) \
{ \
struct ia64_fpreg fr[6]; \
efi_status_t ret; \
\
ia64_save_scratch_fpregs(fr); \
ret = efi_call_##prefix((efi_set_variable_t *) __va(runtime->set_variable), \
adjust_arg(name), adjust_arg(vendor), attr, data_size, \
adjust_arg(data)); \
ia64_load_scratch_fpregs(fr); \
return ret; \
}
static efi_status_t
phys_get_next_high_mono_count (u64 *count)
{
return efi_call_phys(__va(runtime->get_next_high_mono_count), __pa(count));
#define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \
static efi_status_t \
prefix##_get_next_high_mono_count (u64 *count) \
{ \
struct ia64_fpreg fr[6]; \
efi_status_t ret; \
\
ia64_save_scratch_fpregs(fr); \
ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \
__va(runtime->get_next_high_mono_count), adjust_arg(count)); \
ia64_load_scratch_fpregs(fr); \
return ret; \
}
static void
phys_reset_system (int reset_type, efi_status_t status,
unsigned long data_size, efi_char16_t *data)
{
efi_call_phys(__va(runtime->reset_system), status, data_size, __pa(data));
#define STUB_RESET_SYSTEM(prefix, adjust_arg) \
static void \
prefix##_reset_system (int reset_type, efi_status_t status, \
unsigned long data_size, efi_char16_t *data) \
{ \
struct ia64_fpreg fr[6]; \
\
ia64_save_scratch_fpregs(fr); \
efi_call_##prefix((efi_reset_system_t *) __va(runtime->reset_system), \
reset_type, status, data_size, adjust_arg(data)); \
/* should not return, but just in case... */ \
ia64_load_scratch_fpregs(fr); \
}
STUB_GET_TIME(phys, __pa)
STUB_SET_TIME(phys, __pa)
STUB_GET_WAKEUP_TIME(phys, __pa)
STUB_SET_WAKEUP_TIME(phys, __pa)
STUB_GET_VARIABLE(phys, __pa)
STUB_GET_NEXT_VARIABLE(phys, __pa)
STUB_SET_VARIABLE(phys, __pa)
STUB_GET_NEXT_HIGH_MONO_COUNT(phys, __pa)
STUB_RESET_SYSTEM(phys, __pa)
STUB_GET_TIME(virt, )
STUB_SET_TIME(virt, )
STUB_GET_WAKEUP_TIME(virt, )
STUB_SET_WAKEUP_TIME(virt, )
STUB_GET_VARIABLE(virt, )
STUB_GET_NEXT_VARIABLE(virt, )
STUB_SET_VARIABLE(virt, )
STUB_GET_NEXT_HIGH_MONO_COUNT(virt, )
STUB_RESET_SYSTEM(virt, )
void
efi_gettimeofday (struct timeval *tv)
{
......@@ -574,18 +664,17 @@ efi_enter_virtual_mode (void)
}
/*
* Now that EFI is in virtual mode, we arrange for EFI functions to be
* called directly:
* Now that EFI is in virtual mode, we call the EFI functions more efficiently:
*/
efi.get_time = __va(runtime->get_time);
efi.set_time = __va(runtime->set_time);
efi.get_wakeup_time = __va(runtime->get_wakeup_time);
efi.set_wakeup_time = __va(runtime->set_wakeup_time);
efi.get_variable = __va(runtime->get_variable);
efi.get_next_variable = __va(runtime->get_next_variable);
efi.set_variable = __va(runtime->set_variable);
efi.get_next_high_mono_count = __va(runtime->get_next_high_mono_count);
efi.reset_system = __va(runtime->reset_system);
efi.get_time = virt_get_time;
efi.set_time = virt_set_time;
efi.get_wakeup_time = virt_get_wakeup_time;
efi.set_wakeup_time = virt_set_wakeup_time;
efi.get_variable = virt_get_variable;
efi.get_next_variable = virt_get_next_variable;
efi.set_variable = virt_set_variable;
efi.get_next_high_mono_count = virt_get_next_high_mono_count;
efi.reset_system = virt_reset_system;
}
/*
......
......@@ -2,7 +2,7 @@
* PAL & SAL emulation.
*
* Copyright (C) 1998-2001 Hewlett-Packard Co
* Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* For the HP simulator, this file gets include in boot/bootloader.c.
* For SoftSDV, this file gets included in sys_softsdv.c.
......
......@@ -133,12 +133,8 @@ find_iosapic (unsigned int gsi)
return -1;
}
/*
* Translate GSI number to the corresponding IA-64 interrupt vector. If no
* entry exists, return -1.
*/
int
gsi_to_vector (unsigned int gsi)
static inline int
_gsi_to_vector (unsigned int gsi)
{
struct iosapic_intr_info *info;
......@@ -148,6 +144,26 @@ gsi_to_vector (unsigned int gsi)
return -1;
}
/*
* Translate GSI number to the corresponding IA-64 interrupt vector. If no
* entry exists, return -1.
*/
inline int
gsi_to_vector (unsigned int gsi)
{
return _gsi_to_vector(gsi);
}
int
gsi_to_irq (unsigned int gsi)
{
/*
* XXX fix me: this assumes an identity mapping vetween IA-64 vector and Linux irq
* numbers...
*/
return _gsi_to_vector(gsi);
}
static void
set_rte (unsigned int vector, unsigned int dest)
{
......@@ -157,7 +173,7 @@ set_rte (unsigned int vector, unsigned int dest)
int rte_index;
char redir;
DBG(KERN_DEBUG"IOSAPIC: routing vector %d to %x\n", vector, dest);
DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest);
rte_index = iosapic_intr_info[vector].rte_index;
if (rte_index < 0)
......
......@@ -116,8 +116,8 @@ ENTRY(vhpt_miss)
;;
(p8) dep r25=r18,r25,2,6
(p8) shr r22=r22,HPAGE_SHIFT-PAGE_SHIFT
;;
#endif
;;
cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address
;;
......
......@@ -1012,28 +1012,13 @@ ia64_log_prt_oem_data (int header_len, int sect_len, u8 *p_data, prfunc_t prfunc
void
ia64_log_rec_header_print (sal_log_record_header_t *lh, prfunc_t prfunc)
{
char str_buf[32];
sprintf(str_buf, "%2d.%02d",
(lh->revision.major >> 4) * 10 + (lh->revision.major & 0xf),
(lh->revision.minor >> 4) * 10 + (lh->revision.minor & 0xf));
prfunc("+Err Record ID: %d SAL Rev: %s\n", lh->id, str_buf);
sprintf(str_buf, "%02d/%02d/%04d/ %02d:%02d:%02d",
(lh->timestamp.slh_month >> 4) * 10 +
(lh->timestamp.slh_month & 0xf),
(lh->timestamp.slh_day >> 4) * 10 +
(lh->timestamp.slh_day & 0xf),
(lh->timestamp.slh_century >> 4) * 1000 +
(lh->timestamp.slh_century & 0xf) * 100 +
(lh->timestamp.slh_year >> 4) * 10 +
(lh->timestamp.slh_year & 0xf),
(lh->timestamp.slh_hour >> 4) * 10 +
(lh->timestamp.slh_hour & 0xf),
(lh->timestamp.slh_minute >> 4) * 10 +
(lh->timestamp.slh_minute & 0xf),
(lh->timestamp.slh_second >> 4) * 10 +
(lh->timestamp.slh_second & 0xf));
prfunc("+Time: %s Severity %d\n", str_buf, lh->severity);
prfunc("+Err Record ID: %d SAL Rev: %2x.%02x\n", lh->id,
lh->revision.major, lh->revision.minor);
prfunc("+Time: %02x/%02x/%02x%02x %02x:%02x:%02x Severity %d\n",
lh->timestamp.slh_month, lh->timestamp.slh_day,
lh->timestamp.slh_century, lh->timestamp.slh_year,
lh->timestamp.slh_hour, lh->timestamp.slh_minute,
lh->timestamp.slh_second, lh->severity);
}
/*
......
......@@ -63,6 +63,26 @@
* Misc macros and definitions
*/
#define PMU_FIRST_COUNTER 4
#define PMU_MAX_PMCS 256
#define PMU_MAX_PMDS 256
/*
* type of a PMU register (bitmask).
* bitmask structure:
* bit0 : register implemented
* bit1 : end marker
* bit2-3 : reserved
* bit4-7 : register type
* bit8-31: reserved
*/
#define PFM_REG_IMPL 0x1 /* register implemented */
#define PFM_REG_END 0x2 /* end marker */
#define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
#define PFM_REG_COUNTING (0x2<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm AND pmc.oi, a PMD used as a counter */
#define PFM_REG_CONTROL (0x3<<4|PFM_REG_IMPL) /* PMU control register */
#define PFM_REG_CONFIG (0x4<<4|PFM_REG_IMPL) /* refine configuration */
#define PFM_REG_BUFFER (0x5<<4|PFM_REG_IMPL) /* PMD used as buffer */
#define PFM_IS_DISABLED() pmu_conf.pfm_is_disabled
......@@ -70,13 +90,18 @@
#define PFM_FL_INHERIT_MASK (PFM_FL_INHERIT_NONE|PFM_FL_INHERIT_ONCE|PFM_FL_INHERIT_ALL)
/* i assume unsigned */
#define PMC_IS_IMPL(i) (i<pmu_conf.num_pmcs && pmu_conf.impl_regs[i>>6] & (1UL<< (i) %64))
#define PMD_IS_IMPL(i) (i<pmu_conf.num_pmds && pmu_conf.impl_regs[4+(i>>6)] & (1UL<<(i) % 64))
#define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf.pmc_desc[i].type & PFM_REG_IMPL))
#define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf.pmd_desc[i].type & PFM_REG_IMPL))
/* XXX: these three assume that register i is implemented */
#define PMD_IS_COUNTING(i) (pmu_conf.pmd_desc[i].type == PFM_REG_COUNTING)
#define PMC_IS_COUNTING(i) (pmu_conf.pmc_desc[i].type == PFM_REG_COUNTING)
#define PMC_IS_MONITOR(i) (pmu_conf.pmc_desc[i].type == PFM_REG_MONITOR)
#define PMC_DFL_VAL(i) pmu_conf.pmc_desc[i].default_value
#define PMC_RSVD_MASK(i) pmu_conf.pmc_desc[i].reserved_mask
#define PMD_PMD_DEP(i) pmu_conf.pmd_desc[i].dep_pmd[0]
#define PMC_PMD_DEP(i) pmu_conf.pmc_desc[i].dep_pmd[0]
/* k assume unsigned */
#define IBR_IS_IMPL(k) (k<pmu_conf.num_ibrs)
......@@ -175,19 +200,6 @@ typedef struct _pfm_smpl_buffer_desc {
#define LOCK_PSB(p) spin_lock(&(p)->psb_lock)
#define UNLOCK_PSB(p) spin_unlock(&(p)->psb_lock)
/*
* The possible type of a PMU register
*/
typedef enum {
PFM_REG_NOTIMPL, /* not implemented */
PFM_REG_NONE, /* end marker */
PFM_REG_MONITOR, /* a PMC with a pmc.pm field only */
PFM_REG_COUNTING,/* a PMC with a pmc.pm AND pmc.oi, a PMD used as a counter */
PFM_REG_CONTROL, /* PMU control register */
PFM_REG_CONFIG, /* refine configuration */
PFM_REG_BUFFER /* PMD used as buffer */
} pfm_pmu_reg_type_t;
/*
* 64-bit software counter structure
*/
......@@ -283,13 +295,16 @@ typedef struct {
* dep_pmc[]: a bitmask of dependent PMC registers
*/
typedef struct {
pfm_pmu_reg_type_t type;
unsigned int type;
int pm_pos;
unsigned long default_value; /* power-on default value */
unsigned long reserved_mask; /* bitmask of reserved bits */
int (*read_check)(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
int (*write_check)(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
unsigned long dep_pmd[4];
unsigned long dep_pmc[4];
} pfm_reg_desc_t;
/* assume cnum is a valid monitor */
#define PMC_PM(cnum, val) (((val) >> (pmu_conf.pmc_desc[cnum].pm_pos)) & 0x1)
#define PMC_WR_FUNC(cnum) (pmu_conf.pmc_desc[cnum].write_check)
......@@ -401,8 +416,6 @@ static ctl_table pfm_sysctl_root[] = {
};
static struct ctl_table_header *pfm_sysctl_header;
static unsigned long reset_pmcs[IA64_NUM_PMC_REGS]; /* contains PAL reset values for PMCS */
static void pfm_vm_close(struct vm_area_struct * area);
static struct vm_operations_struct pfm_vm_ops={
......@@ -422,7 +435,7 @@ static struct {
/*
* forward declarations
*/
static void ia64_reset_pmu(struct task_struct *);
static void pfm_reset_pmu(struct task_struct *);
#ifdef CONFIG_SMP
static void pfm_fetch_regs(int cpu, struct task_struct *task, pfm_context_t *ctx);
#endif
......@@ -2244,7 +2257,7 @@ pfm_enable(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
pfm_lazy_save_regs(PMU_OWNER());
/* reset all registers to stable quiet state */
ia64_reset_pmu(task);
pfm_reset_pmu(task);
/* make sure nothing starts */
if (ctx->ctx_fl_system) {
......@@ -2307,7 +2320,7 @@ pfm_get_pmc_reset(struct task_struct *task, pfm_context_t *ctx, void *arg, int c
if (!PMC_IS_IMPL(cnum)) goto abort_mission;
tmp.reg_value = reset_pmcs[cnum];
tmp.reg_value = PMC_DFL_VAL(cnum);
PFM_REG_RETFLAG_SET(tmp.reg_flags, 0);
......@@ -2998,6 +3011,8 @@ perfmon_proc_info(char *page)
p += sprintf(p, "CPU%-2d recorded samples : %lu\n", i, pfm_stats[i].pfm_recorded_samples_count);
p += sprintf(p, "CPU%-2d smpl buffer full : %lu\n", i, pfm_stats[i].pfm_full_smpl_buffer_count);
p += sprintf(p, "CPU%-2d owner : %d\n", i, pmu_owners[i].owner ? pmu_owners[i].owner->pid: -1);
p += sprintf(p, "CPU%-2d syst_wide : %d\n", i, per_cpu(pfm_syst_wide, i));
p += sprintf(p, "CPU%-2d dcr_pp : %d\n", i, per_cpu(pfm_dcr_pp, i));
}
LOCK_PFS();
......@@ -3398,11 +3413,10 @@ pfm_load_regs (struct task_struct *task)
* XXX: make this routine able to work with non current context
*/
static void
ia64_reset_pmu(struct task_struct *task)
pfm_reset_pmu(struct task_struct *task)
{
struct thread_struct *t = &task->thread;
pfm_context_t *ctx = t->pfm_context;
unsigned long mask;
int i;
if (task != current) {
......@@ -3415,30 +3429,27 @@ ia64_reset_pmu(struct task_struct *task)
/*
* install reset values for PMC. We skip PMC0 (done above)
* XX: good up to 64 PMCS
*/
mask = pmu_conf.impl_regs[0] >> 1;
for(i=1; mask; mask>>=1, i++) {
if (mask & 0x1) {
ia64_set_pmc(i, reset_pmcs[i]);
for (i=1; (pmu_conf.pmc_desc[i].type & PFM_REG_END) == 0; i++) {
if ((pmu_conf.pmc_desc[i].type & PFM_REG_IMPL) == 0) continue;
ia64_set_pmc(i, PMC_DFL_VAL(i));
/*
* When restoring context, we must restore ALL pmcs, even the ones
* that the task does not use to avoid leaks and possibly corruption
* of the sesion because of configuration conflicts. So here, we
* initialize the entire set used in the context switch restore routine.
*/
t->pmc[i] = reset_pmcs[i];
DBprintk((" pmc[%d]=0x%lx\n", i, reset_pmcs[i]));
}
t->pmc[i] = PMC_DFL_VAL(i);
DBprintk(("pmc[%d]=0x%lx\n", i, t->pmc[i]));
}
/*
* clear reset values for PMD.
* XXX: good up to 64 PMDS. Suppose that zero is a valid value.
*/
mask = pmu_conf.impl_regs[4];
for(i=0; mask; mask>>=1, i++) {
if (mask & 0x1) ia64_set_pmd(i, 0UL);
for (i=0; (pmu_conf.pmd_desc[i].type & PFM_REG_END) == 0; i++) {
if ((pmu_conf.pmd_desc[i].type & PFM_REG_IMPL) == 0) continue;
ia64_set_pmd(i, 0UL);
t->pmd[i] = 0UL;
}
......@@ -4119,23 +4130,6 @@ static struct irqaction perfmon_irqaction = {
};
static void
pfm_pmu_snapshot(void)
{
int i;
for (i=0; i < IA64_NUM_PMC_REGS; i++) {
if (i >= pmu_conf.num_pmcs) break;
if (PMC_IS_IMPL(i)) reset_pmcs[i] = ia64_get_pmc(i);
}
#ifdef CONFIG_MCKINLEY
/*
* set the 'stupid' enable bit to power the PMU!
*/
reset_pmcs[4] |= 1UL << 23;
#endif
}
/*
* perfmon initialization routine, called from the initcall() table
*/
......@@ -4160,6 +4154,9 @@ perfmon_init (void)
}
pmu_conf.perf_ovfl_val = (1UL << pm_info.pal_perf_mon_info_s.width) - 1;
/*
* XXX: use the pfm_*_desc tables instead and simply verify with PAL
*/
pmu_conf.max_counters = pm_info.pal_perf_mon_info_s.generic;
pmu_conf.num_pmcs = find_num_pm_regs(pmu_conf.impl_regs);
pmu_conf.num_pmds = find_num_pm_regs(&pmu_conf.impl_regs[4]);
......@@ -4183,24 +4180,11 @@ perfmon_init (void)
pmu_conf.num_ibrs <<=1;
pmu_conf.num_dbrs <<=1;
/*
* take a snapshot of all PMU registers. PAL is supposed
* to configure them with stable/safe values, i.e., not
* capturing anything.
* We take a snapshot now, before we make any modifications. This
* will become our master copy. Then we will reuse the snapshot
* to reset the PMU in pfm_enable(). Using this technique, perfmon
* does NOT have to know about the specific values to program for
* the PMC/PMD. The safe values may be different from one CPU model to
* the other.
*/
pfm_pmu_snapshot();
/*
* setup the register configuration descriptions for the CPU
*/
pmu_conf.pmc_desc = pmc_desc;
pmu_conf.pmd_desc = pmd_desc;
pmu_conf.pmc_desc = pfm_pmc_desc;
pmu_conf.pmd_desc = pfm_pmd_desc;
/* we are all set */
pmu_conf.pfm_is_disabled = 0;
......@@ -4222,11 +4206,34 @@ __initcall(perfmon_init);
void
perfmon_init_percpu (void)
{
int i;
if (smp_processor_id() == 0)
register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
ia64_set_pmv(IA64_PERFMON_VECTOR);
ia64_srlz_d();
/*
* we first initialize the PMU to a stable state.
* the values may have been changed from their power-up
* values by software executed before the kernel took over.
*
* At this point, pmu_conf has not yet been initialized
*
* On McKinley, this code is ineffective until PMC4 is initialized.
*/
for (i=1; (pfm_pmc_desc[i].type & PFM_REG_END) == 0; i++) {
if ((pfm_pmc_desc[i].type & PFM_REG_IMPL) == 0) continue;
ia64_set_pmc(i, pfm_pmc_desc[i].default_value);
}
for (i=0; (pfm_pmd_desc[i].type & PFM_REG_END) == 0; i++) {
if ((pfm_pmd_desc[i].type & PFM_REG_IMPL) == 0) continue;
ia64_set_pmd(i, 0UL);
}
ia64_set_pmc(0,1UL);
ia64_srlz_d();
}
#else /* !CONFIG_PERFMON */
......
#define RDEP(x) (1UL<<(x))
#if defined(CONFIG_ITANIUM) || defined(CONFIG_MCKINLEY)
#error "This file should only be used when CONFIG_ITANIUM and CONFIG_MCKINLEY are not defined"
#endif
static pfm_reg_desc_t pmc_desc[PMU_MAX_PMCS]={
/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc4 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(4),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
};
static pfm_reg_desc_t pmd_desc[PMU_MAX_PMDS]={
/* pmd0 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
/* pmd1 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
/* pmd2 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
/* pmd3 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
/* pmd4 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
/* pmd5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
/* pmd6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
/* pmd7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
};
......@@ -15,44 +15,44 @@
static int pfm_ita_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
static int pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, struct pt_regs *regs);
static pfm_reg_desc_t pmc_desc[256]={
/* pmc0 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc1 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc2 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc3 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc4 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(4),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc5 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc6 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc7 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc8 */ { PFM_REG_CONFIG, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc9 */ { PFM_REG_CONFIG, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc10 */ { PFM_REG_MONITOR, 6, NULL, NULL, {RDEP(0)|RDEP(1),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc11 */ { PFM_REG_MONITOR, 6, NULL, pfm_ita_pmc_check, {RDEP(2)|RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc12 */ { PFM_REG_MONITOR, 6, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc13 */ { PFM_REG_CONFIG, 0, NULL, pfm_ita_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
{ PFM_REG_NONE, 0, NULL, NULL, {0,}, {0,}}, /* end marker */
static pfm_reg_desc_t pfm_pmc_desc[PMU_MAX_PMCS]={
/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc4 */ { PFM_REG_COUNTING, 6, 0x0UL, -1UL, NULL, NULL, {RDEP(4),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc5 */ { PFM_REG_COUNTING, 6, 0x0UL, -1UL, NULL, NULL, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc6 */ { PFM_REG_COUNTING, 6, 0x0UL, -1UL, NULL, NULL, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc7 */ { PFM_REG_COUNTING, 6, 0x0UL, -1UL, NULL, NULL, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc8 */ { PFM_REG_CONFIG , 0, 0xf00000003ffffff8UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc9 */ { PFM_REG_CONFIG , 0, 0xf00000003ffffff8UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc10 */ { PFM_REG_MONITOR , 6, 0x0UL, -1UL, NULL, NULL, {RDEP(0)|RDEP(1),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc11 */ { PFM_REG_MONITOR , 6, 0x0000000010000000UL, -1UL, NULL, pfm_ita_pmc_check, {RDEP(2)|RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc12 */ { PFM_REG_MONITOR , 6, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc13 */ { PFM_REG_CONFIG , 0, 0x0003ffff00000001UL, -1UL, NULL, pfm_ita_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
};
static pfm_reg_desc_t pmd_desc[256]={
/* pmd0 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(1),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd1 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(0),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd2 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
/* pmd3 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(2)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
/* pmd4 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
/* pmd5 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
/* pmd6 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
/* pmd7 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
/* pmd8 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd9 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd10 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd11 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd12 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd13 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd14 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd15 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd16 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd17 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(2)|RDEP(3),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
{ PFM_REG_NONE, 0, NULL, NULL, {0,}, {0,}}, /* end marker */
static pfm_reg_desc_t pfm_pmd_desc[PMU_MAX_PMDS]={
/* pmd0 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(1),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd1 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(0),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd2 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
/* pmd3 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(2)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
/* pmd4 */ { PFM_REG_COUNTING, 0, 0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
/* pmd5 */ { PFM_REG_COUNTING, 0, 0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
/* pmd6 */ { PFM_REG_COUNTING, 0, 0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
/* pmd7 */ { PFM_REG_COUNTING, 0, 0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
/* pmd8 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd9 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd10 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd11 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd12 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd13 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd14 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd15 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd16 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd17 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(2)|RDEP(3),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
{ PFM_REG_END , 0, 0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
};
static int
......
This diff is collapsed.
......@@ -12,6 +12,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/personality.h>
#include <linux/sched.h>
#include <linux/slab.h>
......@@ -45,8 +46,9 @@ static void
do_show_stack (struct unw_frame_info *info, void *arg)
{
unsigned long ip, sp, bsp;
char buf[80]; /* don't make it so big that it overflows the stack! */
printk("\nCall Trace: ");
printk("\nCall Trace:\n");
do {
unw_get_ip(info, &ip);
if (ip == 0)
......@@ -54,7 +56,9 @@ do_show_stack (struct unw_frame_info *info, void *arg)
unw_get_sp(info, &sp);
unw_get_bsp(info, &bsp);
printk("[<%016lx>] sp=0x%016lx bsp=0x%016lx\n", ip, sp, bsp);
snprintf(buf, sizeof(buf), " [<%016lx>] %%s sp=0x%016lx bsp=0x%016lx\n",
ip, sp, bsp);
print_symbol(buf, ip);
} while (unw_unwind(info) >= 0);
}
......@@ -94,6 +98,7 @@ show_regs (struct pt_regs *regs)
printk("\nPid: %d, comm: %20s\n", current->pid, current->comm);
printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n",
regs->cr_ipsr, regs->cr_ifs, ip, print_tainted());
print_symbol("ip is at %s\n", ip);
printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
printk("rnat: %016lx bsps: %016lx pr : %016lx\n",
......@@ -199,10 +204,8 @@ ia64_save_extra (struct task_struct *task)
if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
pfm_save_regs(task);
# ifdef CONFIG_SMP
if (__get_cpu_var(pfm_syst_wide))
pfm_syst_wide_update_task(task, 0);
# endif
#endif
#ifdef CONFIG_IA32_SUPPORT
......@@ -221,9 +224,8 @@ ia64_load_extra (struct task_struct *task)
if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
pfm_load_regs(task);
# ifdef CONFIG_SMP
if (__get_cpu_var(pfm_syst_wide)) pfm_syst_wide_update_task(task, 1);
# endif
if (__get_cpu_var(pfm_syst_wide))
pfm_syst_wide_update_task(task, 1);
#endif
#ifdef CONFIG_IA32_SUPPORT
......
......@@ -34,6 +34,7 @@
#include <asm/ia32.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/machvec.h>
#include <asm/processor.h>
#include <asm/sal.h>
......@@ -49,9 +50,6 @@
# error "struct cpuinfo_ia64 too big!"
#endif
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#define MAX(a,b) ((a) > (b) ? (a) : (b))
extern char _end;
#ifdef CONFIG_SMP
......@@ -95,6 +93,10 @@ struct rsvd_region {
static struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
static int num_rsvd_regions;
#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
#ifndef CONFIG_DISCONTIGMEM
static unsigned long bootmap_start; /* physical address where the bootmem map is located */
static int
......@@ -108,17 +110,63 @@ find_max_pfn (unsigned long start, unsigned long end, void *arg)
return 0;
}
#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
#else /* CONFIG_DISCONTIGMEM */
/*
* Free available memory based on the primitive map created from
* the boot parameters. This routine does not assume the incoming
* segments are sorted.
* efi_memmap_walk() knows nothing about layout of memory across nodes. Find
* out to which node a block of memory belongs. Ignore memory that we cannot
* identify, and split blocks that run across multiple nodes.
*
* Take this opportunity to round the start address up and the end address
* down to page boundaries.
*/
static int
free_available_memory (unsigned long start, unsigned long end, void *arg)
void
call_pernode_memory (unsigned long start, unsigned long end, void *arg)
{
unsigned long rs, re;
void (*func)(unsigned long, unsigned long, int, int);
int i;
start = PAGE_ALIGN(start);
end &= PAGE_MASK;
if (start >= end)
return;
func = arg;
if (!num_memblks) {
/*
* This machine doesn't have SRAT, so call func with
* nid=0, bank=0.
*/
if (start < end)
(*func)(start, end - start, 0, 0);
return;
}
for (i = 0; i < num_memblks; i++) {
rs = max(start, node_memblk[i].start_paddr);
re = min(end, node_memblk[i].start_paddr+node_memblk[i].size);
if (rs < re)
(*func)(rs, re-rs, node_memblk[i].nid,
node_memblk[i].bank);
}
}
#endif /* CONFIG_DISCONTIGMEM */
/*
* Filter incoming memory segments based on the primitive map created from the boot
* parameters. Segments contained in the map are removed from the memory ranges. A
* caller-specified function is called with the memory ranges that remain after filtering.
* This routine does not assume the incoming segments are sorted.
*/
int
filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
{
unsigned long range_start, range_end, prev_start;
void (*func)(unsigned long, unsigned long);
int i;
#if IGNORE_PFN0
......@@ -132,13 +180,18 @@ free_available_memory (unsigned long start, unsigned long end, void *arg)
* lowest possible address(walker uses virtual)
*/
prev_start = PAGE_OFFSET;
func = arg;
for (i = 0; i < num_rsvd_regions; ++i) {
range_start = MAX(start, prev_start);
range_end = MIN(end, rsvd_region[i].start);
range_start = max(start, prev_start);
range_end = min(end, rsvd_region[i].start);
if (range_start < range_end)
free_bootmem(__pa(range_start), range_end - range_start);
#ifdef CONFIG_DISCONTIGMEM
call_pernode_memory(__pa(range_start), __pa(range_end), func);
#else
(*func)(__pa(range_start), range_end - range_start);
#endif
/* nothing more available in this segment */
if (range_end == end) return 0;
......@@ -150,6 +203,7 @@ free_available_memory (unsigned long start, unsigned long end, void *arg)
}
#ifndef CONFIG_DISCONTIGMEM
/*
* Find a place to put the bootmap and return its starting address in bootmap_start.
* This address must be page-aligned.
......@@ -171,8 +225,8 @@ find_bootmap_location (unsigned long start, unsigned long end, void *arg)
free_start = PAGE_OFFSET;
for (i = 0; i < num_rsvd_regions; i++) {
range_start = MAX(start, free_start);
range_end = MIN(end, rsvd_region[i].start & PAGE_MASK);
range_start = max(start, free_start);
range_end = min(end, rsvd_region[i].start & PAGE_MASK);
if (range_end <= range_start) continue; /* skip over empty range */
......@@ -188,6 +242,7 @@ find_bootmap_location (unsigned long start, unsigned long end, void *arg)
}
return 0;
}
#endif /* !CONFIG_DISCONTIGMEM */
static void
sort_regions (struct rsvd_region *rsvd_region, int max)
......@@ -252,6 +307,15 @@ find_memory (void)
sort_regions(rsvd_region, num_rsvd_regions);
#ifdef CONFIG_DISCONTIGMEM
{
extern void discontig_mem_init (void);
bootmap_size = max_pfn = 0; /* stop gcc warnings */
discontig_mem_init();
}
#else /* !CONFIG_DISCONTIGMEM */
/* first find highest page frame number */
max_pfn = 0;
efi_memmap_walk(find_max_pfn, &max_pfn);
......@@ -268,8 +332,9 @@ find_memory (void)
bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn);
/* Free all available memory, then mark bootmem-map as being in use. */
efi_memmap_walk(free_available_memory, 0);
efi_memmap_walk(filter_rsvd_memory, free_bootmem);
reserve_bootmem(bootmap_start, bootmap_size);
#endif /* !CONFIG_DISCONTIGMEM */
#ifdef CONFIG_BLK_DEV_INITRD
if (ia64_boot_param->initrd_start) {
......@@ -296,6 +361,16 @@ setup_arch (char **cmdline_p)
efi_init();
#ifdef CONFIG_ACPI_BOOT
/* Initialize the ACPI boot-time table parser */
acpi_table_init(*cmdline_p);
#ifdef CONFIG_ACPI_NUMA
acpi_numa_init();
#endif
#endif /* CONFIG_APCI_BOOT */
find_memory();
#if 0
......@@ -530,6 +605,7 @@ setup_per_cpu_areas (void)
/* start_kernel() requires this... */
}
/*
* cpu_init() initializes state that is per-CPU. This function acts
* as a 'CPU state barrier', nothing should get across.
......@@ -542,32 +618,40 @@ cpu_init (void)
unsigned long num_phys_stacked;
pal_vm_info_2_u_t vmi;
unsigned int max_ctx;
struct cpuinfo_ia64 *my_cpu_info;
void *my_cpu_data;
struct cpuinfo_ia64 *cpu_info;
void *cpu_data;
#ifdef CONFIG_SMP
extern char __per_cpu_end[];
int cpu = smp_processor_id();
int cpu;
if (__per_cpu_end - __per_cpu_start > PAGE_SIZE)
panic("Per-cpu data area too big! (%Zu > %Zu)",
__per_cpu_end - __per_cpu_start, PAGE_SIZE);
/*
* On the BSP, the page allocator isn't initialized by the time we get here. On
* the APs, the bootmem allocator is no longer available...
* get_free_pages() cannot be used before cpu_init() done. BSP allocates
* "NR_CPUS" pages for all CPUs to avoid that AP calls get_zeroed_page().
*/
if (cpu == 0)
my_cpu_data = alloc_bootmem_pages(__per_cpu_end - __per_cpu_start);
else
my_cpu_data = (void *) get_zeroed_page(GFP_KERNEL);
memcpy(my_cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *) my_cpu_data - __per_cpu_start;
my_cpu_info = my_cpu_data + ((char *) &__get_cpu_var(cpu_info) - __per_cpu_start);
#else
my_cpu_data = __phys_per_cpu_start;
if (smp_processor_id() == 0) {
cpu_data = (unsigned long)alloc_bootmem_pages(PAGE_SIZE * NR_CPUS);
for (cpu = 0; cpu < NR_CPUS; cpu++) {
memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
cpu_data += PAGE_SIZE;
}
}
cpu_data = __per_cpu_start + __per_cpu_offset[smp_processor_id()];
#else /* !CONFIG_SMP */
cpu_data = __phys_per_cpu_start;
#endif /* !CONFIG_SMP */
cpu_info = cpu_data + ((char *) &__get_cpu_var(cpu_info) - __per_cpu_start);
#ifdef CONFIG_NUMA
cpu_info->node_data = get_node_data_ptr();
cpu_info->nodeid = boot_get_local_nodeid();
#endif
my_cpu_info = my_cpu_data + ((char *) &__get_cpu_var(cpu_info) - __per_cpu_start);
/*
* We can't pass "local_cpu_data" to identify_cpu() because we haven't called
......@@ -575,14 +659,14 @@ cpu_init (void)
* depends on the data returned by identify_cpu(). We break the dependency by
* accessing cpu_data() the old way, through identity mapped space.
*/
identify_cpu(my_cpu_info);
identify_cpu(cpu_info);
#ifdef CONFIG_MCKINLEY
{
#define FEATURE_SET 16
# define FEATURE_SET 16
struct ia64_pal_retval iprv;
if (my_cpu_info->family == 0x1f) {
if (cpu_info->family == 0x1f) {
PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
......@@ -613,7 +697,7 @@ cpu_init (void)
if (current->mm)
BUG();
ia64_mmu_init(my_cpu_data);
ia64_mmu_init(cpu_data);
#ifdef CONFIG_IA32_SUPPORT
/* initialize global ia32 state - CR0 and CR4 */
......
......@@ -41,6 +41,16 @@
# define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0])
#endif
register double f16 asm ("f16"); register double f17 asm ("f17");
register double f18 asm ("f18"); register double f19 asm ("f19");
register double f20 asm ("f20"); register double f21 asm ("f21");
register double f22 asm ("f22"); register double f23 asm ("f23");
register double f24 asm ("f24"); register double f25 asm ("f25");
register double f26 asm ("f26"); register double f27 asm ("f27");
register double f28 asm ("f28"); register double f29 asm ("f29");
register double f30 asm ("f30"); register double f31 asm ("f31");
long
ia64_rt_sigsuspend (sigset_t *uset, size_t sigsetsize, struct sigscratch *scr)
{
......@@ -58,13 +68,13 @@ ia64_rt_sigsuspend (sigset_t *uset, size_t sigsetsize, struct sigscratch *scr)
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
{
oldset = current->blocked;
current->blocked = set;
recalc_sigpending();
}
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
/*
* The return below usually returns to the signal handler. We need to
......@@ -264,12 +274,12 @@ ia64_rt_sigreturn (struct sigscratch *scr)
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
{
current->blocked = set;
recalc_sigpending();
}
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
if (restore_sigcontext(sc, scr))
goto give_sigsegv;
......@@ -458,13 +468,13 @@ handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigse
ka->sa.sa_handler = SIG_DFL;
if (!(ka->sa.sa_flags & SA_NODEFER)) {
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
{
sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
sigaddset(&current->blocked, sig);
recalc_sigpending();
}
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
}
return 1;
}
......
......@@ -16,6 +16,7 @@
#include <linux/config.h>
#include <linux/acpi.h>
#include <linux/bootmem.h>
#include <linux/delay.h>
#include <linux/init.h>
......@@ -427,6 +428,37 @@ smp_build_cpu_map (void)
}
}
#ifdef CONFIG_NUMA
char cpu_to_node_map[NR_CPUS] __cacheline_aligned;
/*
* Build cpu to node mapping.
*/
void __init
build_cpu_to_node_map (void)
{
int cpu, i;
for(cpu = 0; cpu < NR_CPUS; ++cpu) {
/*
* All Itanium NUMA platforms I know use ACPI, so maybe we
* can drop this ifdef completely. [EF]
*/
#ifdef CONFIG_ACPI_NUMA
for (i = 0; i < NR_CPUS; ++i)
if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
cpu_to_node_map[cpu] = node_cpuid[i].nid;
break;
}
#else
# error Fixme: Dunno how to build CPU-to-node map.
#endif
}
}
#endif /* CONFIG_NUMA */
/*
* Cycle through the APs sending Wakeup IPIs to boot each.
*/
......
......@@ -282,18 +282,20 @@ sys_free_hugepages (unsigned long addr)
extern int free_hugepages(struct vm_area_struct *);
int retval;
down_write(&mm->mmap_sem);
{
vma = find_vma(mm, addr);
if (!vma || !is_vm_hugetlb_page(vma) || (vma->vm_start != addr))
return -EINVAL;
retval = -EINVAL;
goto out;
down_write(&mm->mmap_sem);
{
spin_lock(&mm->page_table_lock);
{
retval = free_hugepages(vma);
}
spin_unlock(&mm->page_table_lock);
}
out:
up_write(&mm->mmap_sem);
return retval;
}
......
/*
* linux/arch/ia64/kernel/time.c
*
* Copyright (C) 1998-2001 Hewlett-Packard Co
* Copyright (C) 1998-2000 Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 1999-2001 David Mosberger <davidm@hpl.hp.com>
* Copyright (C) 1998-2002 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
* David Mosberger <davidm@hpl.hp.com>
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
* Copyright (C) 1999-2000 VA Linux Systems
* Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
......@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/profile.h>
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/interrupt.h>
......
......@@ -54,8 +54,8 @@ trap_init (void)
if (ia64_boot_param->fpswa) {
/* FPSWA fixup: make the interface pointer a kernel virtual address: */
fpswa_interface = __va(ia64_boot_param->fpswa);
major = fpswa_interface->revision & 0xffff;
minor = fpswa_interface->revision >> 16;
major = fpswa_interface->revision >> 16;
minor = fpswa_interface->revision & 0xffff;
}
printk("fpswa interface at %lx (rev %d.%d)\n", ia64_boot_param->fpswa, major, minor);
}
......@@ -69,7 +69,6 @@ bust_spinlocks (int yes)
{
int loglevel_save = console_loglevel;
spin_lock_init(&timerlist_lock);
if (yes) {
oops_in_progress = 1;
return;
......
......@@ -32,26 +32,26 @@ AFLAGS___udivsi3.o = -DUNSIGNED
AFLAGS___modsi3.o = -DMODULO
AFLAGS___umodsi3.o = -DUNSIGNED -DMODULO
__divdi3.o: idiv64.S
$(obj)/__divdi3.o: $(src)/idiv64.S
$(cmd_as_o_S)
__udivdi3.o: idiv64.S
$(obj)/__udivdi3.o: $(src)/idiv64.S
$(cmd_as_o_S)
__moddi3.o: idiv64.S
$(obj)/__moddi3.o: $(src)/idiv64.S
$(cmd_as_o_S)
__umoddi3.o: idiv64.S
$(obj)/__umoddi3.o: $(src)/idiv64.S
$(cmd_as_o_S)
__divsi3.o: idiv32.S
$(obj)/__divsi3.o: $(src)/idiv32.S
$(cmd_as_o_S)
__udivsi3.o: idiv32.S
$(obj)/__udivsi3.o: $(src)/idiv32.S
$(cmd_as_o_S)
__modsi3.o: idiv32.S
$(obj)/__modsi3.o: $(src)/idiv32.S
$(cmd_as_o_S)
__umodsi3.o: idiv32.S
$(obj)/__umodsi3.o: $(src)/idiv32.S
$(cmd_as_o_S)
......@@ -9,5 +9,7 @@
obj-y := init.o fault.o tlb.o extable.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_DISCONTIGMEM) += discontig.o
include $(TOPDIR)/Rules.make
/*
* Copyright (c) 2000 Silicon Graphics, Inc. All rights reserved.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 Tony Luck <tony.luck@intel.com>
* Copyright (c) 2002 NEC Corp.
* Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
*/
/*
* Platform initialization for Discontig Memory
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <linux/mmzone.h>
#include <linux/acpi.h>
#include <linux/efi.h>
/*
* Round an address upward to the next multiple of GRANULE size.
*/
#define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1))
static struct ia64_node_data *node_data[NR_NODES];
static long boot_pg_data[8*NR_NODES+sizeof(pg_data_t)] __initdata;
static pg_data_t *pg_data_ptr[NR_NODES] __initdata;
static bootmem_data_t bdata[NR_NODES][NR_BANKS_PER_NODE+1] __initdata;
extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
/*
* Return the compact node number of this cpu. Used prior to
* setting up the cpu_data area.
* Note - not fast, intended for boot use only!!
*/
int
boot_get_local_nodeid(void)
{
int i;
for (i = 0; i < NR_CPUS; i++)
if (node_cpuid[i].phys_id == hard_smp_processor_id())
return node_cpuid[i].nid;
/* node info missing, so nid should be 0.. */
return 0;
}
/*
* Return a pointer to the pg_data structure for a node.
* This function is used ONLY in early boot before the cpu_data
* structure is available.
*/
pg_data_t* __init
boot_get_pg_data_ptr(long node)
{
return pg_data_ptr[node];
}
/*
* Return a pointer to the node data for the current node.
* (boottime initialization only)
*/
struct ia64_node_data *
get_node_data_ptr(void)
{
return node_data[boot_get_local_nodeid()];
}
/*
* We allocate one of the bootmem_data_t structs for each piece of memory
* that we wish to treat as a contiguous block. Each such block must start
* on a BANKSIZE boundary. Multiple banks per node is not supported.
*/
static int __init
build_maps(unsigned long pstart, unsigned long length, int node)
{
bootmem_data_t *bdp;
unsigned long cstart, epfn;
bdp = pg_data_ptr[node]->bdata;
epfn = GRANULEROUNDUP(pstart + length) >> PAGE_SHIFT;
cstart = pstart & ~(BANKSIZE - 1);
if (!bdp->node_low_pfn) {
bdp->node_boot_start = cstart;
bdp->node_low_pfn = epfn;
} else {
bdp->node_boot_start = min(cstart, bdp->node_boot_start);
bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
}
min_low_pfn = min(min_low_pfn, bdp->node_boot_start>>PAGE_SHIFT);
max_low_pfn = max(max_low_pfn, bdp->node_low_pfn);
return 0;
}
/*
* Find space on each node for the bootmem map.
*
* Called by efi_memmap_walk to find boot memory on each node. Note that
* only blocks that are free are passed to this routine (currently filtered by
* free_available_memory).
*/
static int __init
find_bootmap_space(unsigned long pstart, unsigned long length, int node)
{
unsigned long mapsize, pages, epfn;
bootmem_data_t *bdp;
epfn = (pstart + length) >> PAGE_SHIFT;
bdp = &pg_data_ptr[node]->bdata[0];
if (pstart < bdp->node_boot_start || epfn > bdp->node_low_pfn)
return 0;
if (!bdp->node_bootmem_map) {
pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
if (length > mapsize) {
init_bootmem_node(
BOOT_NODE_DATA(node),
pstart>>PAGE_SHIFT,
bdp->node_boot_start>>PAGE_SHIFT,
bdp->node_low_pfn);
}
}
return 0;
}
/*
* Free available memory to the bootmem allocator.
*
* Note that only blocks that are free are passed to this routine (currently
* filtered by free_available_memory).
*
*/
static int __init
discontig_free_bootmem_node(unsigned long pstart, unsigned long length, int node)
{
free_bootmem_node(BOOT_NODE_DATA(node), pstart, length);
return 0;
}
/*
* Reserve the space used by the bootmem maps.
*/
static void __init
discontig_reserve_bootmem(void)
{
int node;
unsigned long mapbase, mapsize, pages;
bootmem_data_t *bdp;
for (node = 0; node < numnodes; node++) {
bdp = BOOT_NODE_DATA(node)->bdata;
pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
mapbase = __pa(bdp->node_bootmem_map);
reserve_bootmem_node(BOOT_NODE_DATA(node), mapbase, mapsize);
}
}
/*
* Allocate per node tables.
* - the pg_data structure is allocated on each node. This minimizes offnode
* memory references
* - the node data is allocated & initialized. Portions of this structure is read-only (after
* boot) and contains node-local pointers to usefuls data structures located on
* other nodes.
*
* We also switch to using the "real" pg_data structures at this point. Earlier in boot, we
* use a different structure. The only use for pg_data prior to the point in boot is to get
* the pointer to the bdata for the node.
*/
static void __init
allocate_pernode_structures(void)
{
pg_data_t *pgdat=0, *new_pgdat_list=0;
int node, mynode;
mynode = boot_get_local_nodeid();
for (node = numnodes - 1; node >= 0 ; node--) {
node_data[node] = alloc_bootmem_node(BOOT_NODE_DATA(node), sizeof (struct ia64_node_data));
pgdat = __alloc_bootmem_node(BOOT_NODE_DATA(node), sizeof(pg_data_t), SMP_CACHE_BYTES, 0);
pgdat->bdata = &(bdata[node][0]);
pg_data_ptr[node] = pgdat;
pgdat->pgdat_next = new_pgdat_list;
new_pgdat_list = pgdat;
}
memcpy(node_data[mynode]->pg_data_ptrs, pg_data_ptr, sizeof(pg_data_ptr));
memcpy(node_data[mynode]->node_data_ptrs, node_data, sizeof(node_data));
pgdat_list = new_pgdat_list;
}
/*
* Called early in boot to setup the boot memory allocator, and to
* allocate the node-local pg_data & node-directory data structures..
*/
void __init
discontig_mem_init(void)
{
int node;
if (numnodes == 0) {
printk("node info missing!\n");
numnodes = 1;
}
for (node = 0; node < numnodes; node++) {
pg_data_ptr[node] = (pg_data_t*) &boot_pg_data[node];
pg_data_ptr[node]->bdata = &bdata[node][0];
}
min_low_pfn = -1;
max_low_pfn = 0;
efi_memmap_walk(filter_rsvd_memory, build_maps);
efi_memmap_walk(filter_rsvd_memory, find_bootmap_space);
efi_memmap_walk(filter_rsvd_memory, discontig_free_bootmem_node);
discontig_reserve_bootmem();
allocate_pernode_structures();
}
/*
* Initialize the paging system.
* - determine sizes of each node
* - initialize the paging system for the node
* - build the nodedir for the node. This contains pointers to
* the per-bank mem_map entries.
* - fix the page struct "virtual" pointers. These are bank specific
* values that the paging system doesnt understand.
* - replicate the nodedir structure to other nodes
*/
void __init
discontig_paging_init(void)
{
int node, mynode;
unsigned long max_dma, zones_size[MAX_NR_ZONES];
unsigned long kaddr, ekaddr, bid;
struct page *page;
bootmem_data_t *bdp;
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
mynode = boot_get_local_nodeid();
for (node = 0; node < numnodes; node++) {
long pfn, startpfn;
memset(zones_size, 0, sizeof(zones_size));
startpfn = -1;
bdp = BOOT_NODE_DATA(node)->bdata;
pfn = bdp->node_boot_start >> PAGE_SHIFT;
if (startpfn == -1)
startpfn = pfn;
if (pfn > max_dma)
zones_size[ZONE_NORMAL] += (bdp->node_low_pfn - pfn);
else if (bdp->node_low_pfn < max_dma)
zones_size[ZONE_DMA] += (bdp->node_low_pfn - pfn);
else {
zones_size[ZONE_DMA] += (max_dma - pfn);
zones_size[ZONE_NORMAL] += (bdp->node_low_pfn - max_dma);
}
free_area_init_node(node, NODE_DATA(node), NULL, zones_size, startpfn, 0);
page = NODE_DATA(node)->node_mem_map;
bdp = BOOT_NODE_DATA(node)->bdata;
kaddr = (unsigned long)__va(bdp->node_boot_start);
ekaddr = (unsigned long)__va(bdp->node_low_pfn << PAGE_SHIFT);
while (kaddr < ekaddr) {
bid = BANK_MEM_MAP_INDEX(kaddr);
node_data[mynode]->node_id_map[bid] = node;
node_data[mynode]->bank_mem_map_base[bid] = page;
kaddr += BANKSIZE;
page += BANKSIZE/PAGE_SIZE;
}
}
/*
* Finish setting up the node data for this node, then copy it to the other nodes.
*/
for (node=0; node < numnodes; node++)
if (mynode != node) {
memcpy(node_data[node], node_data[mynode], sizeof(struct ia64_node_data));
node_data[node]->node = node;
}
}
......@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/efi.h>
#include <linux/mmzone.h>
#include <asm/a.out.h>
#include <asm/bitops.h>
......@@ -78,7 +79,7 @@ ia64_init_addr_space (void)
vma->vm_mm = current->mm;
vma->vm_start = IA64_RBS_BOT;
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_page_prot = protection_map[VM_READ | VM_WRITE];
vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP;
vma->vm_ops = NULL;
vma->vm_pgoff = 0;
......@@ -347,6 +348,15 @@ extern long htlbzone_pages;
extern struct list_head htlbpage_freelist;
#endif
#ifdef CONFIG_DISCONTIGMEM
void
paging_init (void)
{
extern void discontig_paging_init(void);
discontig_paging_init();
}
#else /* !CONFIG_DISCONTIGMEM */
void
paging_init (void)
{
......@@ -365,6 +375,7 @@ paging_init (void)
}
free_area_init(zones_size);
}
#endif /* !CONFIG_DISCONTIGMEM */
static int
count_pages (u64 start, u64 end, void *arg)
......@@ -380,10 +391,9 @@ count_reserved_pages (u64 start, u64 end, void *arg)
{
unsigned long num_reserved = 0;
unsigned long *count = arg;
struct page *pg;
for (pg = virt_to_page(start); pg < virt_to_page(end); ++pg)
if (PageReserved(pg))
for (; start < end; start += PAGE_SIZE)
if (PageReserved(virt_to_page(start)))
++num_reserved;
*count += num_reserved;
return 0;
......@@ -395,6 +405,7 @@ mem_init (void)
extern char __start_gate_section[];
long reserved_pages, codesize, datasize, initsize;
unsigned long num_pgt_pages;
pg_data_t *pgdat;
#ifdef CONFIG_PCI
/*
......@@ -405,16 +416,19 @@ mem_init (void)
platform_pci_dma_init();
#endif
#ifndef CONFIG_DISCONTIGMEM
if (!mem_map)
BUG();
max_mapnr = max_low_pfn;
#endif
num_physpages = 0;
efi_memmap_walk(count_pages, &num_physpages);
max_mapnr = max_low_pfn;
high_memory = __va(max_low_pfn * PAGE_SIZE);
totalram_pages += free_all_bootmem();
for_each_pgdat(pgdat)
totalram_pages += free_all_bootmem_node(pgdat);
reserved_pages = 0;
efi_memmap_walk(count_reserved_pages, &reserved_pages);
......@@ -425,7 +439,7 @@ mem_init (void)
printk("Memory: %luk/%luk available (%luk code, %luk reserved, %luk data, %luk init)\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
max_mapnr << (PAGE_SHIFT - 10), codesize >> 10, reserved_pages << (PAGE_SHIFT - 10),
num_physpages << (PAGE_SHIFT - 10), codesize >> 10, reserved_pages << (PAGE_SHIFT - 10),
datasize >> 10, initsize >> 10);
/*
......@@ -441,6 +455,8 @@ mem_init (void)
if (num_pgt_pages > pgt_cache_water[1])
pgt_cache_water[1] = num_pgt_pages;
show_mem();
/* install the gate page in the global page table: */
put_gate_page(virt_to_page(__start_gate_section), GATE_ADDR);
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* This file contains NUMA specific variables and functions which can
* be split away from DISCONTIGMEM and are used on NUMA machines with
* contiguous memory.
*
* 2002/08/07 Erich Focht <efocht@ess.nec.de>
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mmzone.h>
#include <asm/numa.h>
/*
* The following structures are usually initialized by ACPI or
* similar mechanisms and describe the NUMA characteristics of the machine.
*/
int num_memblks = 0;
struct node_memblk_s node_memblk[NR_MEMBLKS];
struct node_cpuid_s node_cpuid[NR_CPUS];
/*
* This is a matrix with "distances" between nodes, they should be
* proportional to the memory access latency ratios.
*/
u8 numa_slit[NR_NODES * NR_NODES];
/* Identify which cnode a physical address resides on */
int
paddr_to_nid(unsigned long paddr)
{
int i;
for (i = 0; i < num_memblks; i++)
if (paddr >= node_memblk[i].start_paddr &&
paddr < node_memblk[i].start_paddr + node_memblk[i].size)
break;
return (i < num_memblks) ? node_memblk[i].nid : -1;
}
......@@ -141,13 +141,13 @@ pcibios_scan_root (int bus)
/*
* Called after each bus is probed, but before its children are examined.
*/
void __init
void __devinit
pcibios_fixup_bus (struct pci_bus *b)
{
return;
}
void __init
void __devinit
pcibios_update_resource (struct pci_dev *dev, struct resource *root,
struct resource *res, int resource)
{
......@@ -163,7 +163,7 @@ pcibios_update_resource (struct pci_dev *dev, struct resource *root,
/* ??? FIXME -- record old value for shutdown. */
}
void __init
void __devinit
pcibios_update_irq (struct pci_dev *dev, int irq)
{
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
......@@ -171,7 +171,7 @@ pcibios_update_irq (struct pci_dev *dev, int irq)
/* ??? FIXME -- record old value for shutdown. */
}
void __init
void __devinit
pcibios_fixup_pbus_ranges (struct pci_bus * bus, struct pbus_set_ranges_data * ranges)
{
}
......
......@@ -2,6 +2,8 @@ CFLAGS = -g -O2 -Wall $(CPPFLAGS)
TARGET = $(TOPDIR)/include/asm-ia64/offsets.h
src = $(obj)
all:
fastdep:
......@@ -9,12 +11,12 @@ fastdep:
mrproper: clean
clean:
rm -f print_offsets.s print_offsets offsets.h
rm -f $(obj)/print_offsets.s $(obj)/print_offsets $(obj)/offsets.h
$(TARGET): offsets.h
@if ! cmp -s offsets.h ${TARGET}; then \
$(TARGET): $(obj)/offsets.h
@if ! cmp -s $(obj)/offsets.h ${TARGET}; then \
echo -e "*** Updating ${TARGET}..."; \
cp offsets.h ${TARGET}; \
cp $(obj)/offsets.h ${TARGET}; \
else \
echo "*** ${TARGET} is up to date"; \
fi
......@@ -30,25 +32,26 @@ $(TARGET): offsets.h
ifeq ($(CROSS_COMPILE),)
offsets.h: print_offsets
./print_offsets > offsets.h
$(obj)/offsets.h: $(obj)/print_offsets
$(obj)/print_offsets > $(obj)/offsets.h
comma := ,
print_offsets: print_offsets.c FORCE
$(obj)/print_offsets: $(src)/print_offsets.c FORCE
[ -r $(TARGET) ] || echo "#define IA64_TASK_SIZE 0" > $(TARGET)
$(CC) $(CFLAGS) -DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) \
print_offsets.c -o $@
$(src)/print_offsets.c -o $@
FORCE:
else
offsets.h: print_offsets.s
$(AWK) -f print_offsets.awk $^ > $@
$(obj)/offsets.h: $(obj)/print_offsets.s
$(AWK) -f $(src)/print_offsets.awk $^ > $@
print_offsets.s: print_offsets.c
$(CC) $(CFLAGS) -DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) -S \
print_offsets.c -o $@
$(obj)/print_offsets.s: $(src)/print_offsets.c
[ -r $(TARGET) ] || echo "#define IA64_TASK_SIZE 0" > $(TARGET)
$(CC) $(CFLAGS) -DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) -S $^ -o $@
endif
......
......@@ -12,11 +12,11 @@ SECTIONS
{
/* Sections to be discarded */
/DISCARD/ : {
*(.text.exit)
*(.data.exit)
*(.exit.text)
*(.exit.data)
*(.exitcall.exit)
*(.IA_64.unwind.text.exit)
*(.IA_64.unwind_info.text.exit)
*(.IA_64.unwind.exit.text)
*(.IA_64.unwind_info.exit.text)
}
v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
......@@ -65,6 +65,13 @@ SECTIONS
{ *(__ksymtab) }
__stop___ksymtab = .;
__kallsyms : AT(ADDR(__kallsyms) - PAGE_OFFSET)
{
__start___kallsyms = .; /* All kernel symbols */
*(__kallsyms)
__stop___kallsyms = .;
}
/* Unwind info & table: */
. = ALIGN(8);
.IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - PAGE_OFFSET)
......@@ -85,15 +92,15 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_begin = .;
.text.init : AT(ADDR(.text.init) - PAGE_OFFSET)
{ *(.text.init) }
.init.text : AT(ADDR(.init.text) - PAGE_OFFSET)
{ *(.init.text) }
.data.init : AT(ADDR(.data.init) - PAGE_OFFSET)
{ *(.data.init) }
.init.data : AT(ADDR(.init.data) - PAGE_OFFSET)
{ *(.init.data) }
. = ALIGN(16);
__setup_start = .;
.setup.init : AT(ADDR(.setup.init) - PAGE_OFFSET)
{ *(.setup.init) }
.init.setup : AT(ADDR(.init.setup) - PAGE_OFFSET)
{ *(.init.setup) }
__setup_end = .;
__initcall_start = .;
.initcall.init : AT(ADDR(.initcall.init) - PAGE_OFFSET)
......
......@@ -99,6 +99,12 @@ static ssize_t read_kcore(struct file *file, char *buf, size_t count, loff_t *pp
}
#else /* CONFIG_KCORE_AOUT */
#if VMALLOC_START < PAGE_OFFSET
#define KCORE_BASE VMALLOC_START
#else
#define KCORE_BASE PAGE_OFFSET
#endif
#define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
/* An ELF note in memory */
......@@ -118,7 +124,7 @@ static size_t get_kcore_size(int *num_vma, size_t *elf_buflen)
struct vm_struct *m;
*num_vma = 0;
size = ((size_t)high_memory - PAGE_OFFSET + PAGE_SIZE);
size = ((size_t)high_memory - KCORE_BASE + PAGE_SIZE);
if (!vmlist) {
*elf_buflen = PAGE_SIZE;
return (size);
......@@ -126,15 +132,15 @@ static size_t get_kcore_size(int *num_vma, size_t *elf_buflen)
for (m=vmlist; m; m=m->next) {
try = (size_t)m->addr + m->size;
if (try > size)
size = try;
if (try > KCORE_BASE + size)
size = try - KCORE_BASE;
*num_vma = *num_vma + 1;
}
*elf_buflen = sizeof(struct elfhdr) +
(*num_vma + 2)*sizeof(struct elf_phdr) +
3 * sizeof(struct memelfnote);
*elf_buflen = PAGE_ALIGN(*elf_buflen);
return (size - PAGE_OFFSET + *elf_buflen);
return size + *elf_buflen;
}
......@@ -237,7 +243,7 @@ static void elf_kcore_store_hdr(char *bufp, int num_vma, int dataoff)
offset += sizeof(struct elf_phdr);
phdr->p_type = PT_LOAD;
phdr->p_flags = PF_R|PF_W|PF_X;
phdr->p_offset = dataoff;
phdr->p_offset = PAGE_OFFSET - KCORE_BASE + dataoff;
phdr->p_vaddr = PAGE_OFFSET;
phdr->p_paddr = __pa(PAGE_OFFSET);
phdr->p_filesz = phdr->p_memsz = ((unsigned long)high_memory - PAGE_OFFSET);
......@@ -254,7 +260,7 @@ static void elf_kcore_store_hdr(char *bufp, int num_vma, int dataoff)
phdr->p_type = PT_LOAD;
phdr->p_flags = PF_R|PF_W|PF_X;
phdr->p_offset = (size_t)m->addr - PAGE_OFFSET + dataoff;
phdr->p_offset = (size_t)m->addr - KCORE_BASE + dataoff;
phdr->p_vaddr = (size_t)m->addr;
phdr->p_paddr = __pa(m->addr);
phdr->p_filesz = phdr->p_memsz = m->size;
......@@ -385,9 +391,9 @@ static ssize_t read_kcore(struct file *file, char *buffer, size_t buflen, loff_t
/*
* Fill the remainder of the buffer from kernel VM space.
* We said in the ELF header that the data which starts
* at 'elf_buflen' is virtual address PAGE_OFFSET. --rmk
* at 'elf_buflen' is virtual address KCORE_BASE. --rmk
*/
start = PAGE_OFFSET + (*fpos - elf_buflen);
start = KCORE_BASE + (*fpos - elf_buflen);
if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
tsz = buflen;
......
......@@ -97,17 +97,18 @@
} while (0)
const char *acpi_get_sysname (void);
int acpi_boot_init (char *cdline);
int acpi_request_vector (u32 int_type);
int acpi_get_prt (struct pci_vector_struct **vectors, int *count);
int acpi_get_interrupt_model (int *type);
int acpi_irq_to_vector (u32 irq);
#ifdef CONFIG_DISCONTIGMEM
#define NODE_ARRAY_INDEX(x) ((x) / 8) /* 8 bits/char */
#define NODE_ARRAY_OFFSET(x) ((x) % 8) /* 8 bits/char */
#ifdef CONFIG_ACPI_NUMA
#include <asm/numa.h>
/* Proximity bitmap length; _PXM is at most 255 (8 bit)*/
#define MAX_PXM_DOMAINS (256)
#endif /* CONFIG_DISCONTIGMEM */
extern int __initdata pxm_to_nid_map[MAX_PXM_DOMAINS];
extern int __initdata nid_to_pxm_map[NR_NODES];
#endif
#endif /*__KERNEL__*/
......
......@@ -73,6 +73,17 @@ struct _fpreg_ia32 {
unsigned short exponent;
};
struct _fpxreg_ia32 {
unsigned short significand[4];
unsigned short exponent;
unsigned short padding[3];
};
struct _xmmreg_ia32 {
unsigned int element[4];
};
struct _fpstate_ia32 {
unsigned int cw,
sw,
......@@ -82,7 +93,16 @@ struct _fpstate_ia32 {
dataoff,
datasel;
struct _fpreg_ia32 _st[8];
unsigned int status;
unsigned short status;
unsigned short magic; /* 0xffff = regular FPU data only */
/* FXSR FPU environment */
unsigned int _fxsr_env[6]; /* FXSR FPU env is ignored */
unsigned int mxcsr;
unsigned int reserved;
struct _fpxreg_ia32 _fxsr_st[8]; /* FXSR FPU reg data is ignored */
struct _xmmreg_ia32 _xmm[8];
unsigned int padding[56];
};
struct sigcontext_ia32 {
......@@ -486,6 +506,18 @@ extern int ia32_intercept (struct pt_regs *regs, unsigned long isr);
extern unsigned long ia32_do_mmap (struct file *, unsigned long, unsigned long, int, int, loff_t);
extern void ia32_load_segment_descriptors (struct task_struct *task);
#define ia32f2ia64f(dst,src) \
do { \
register double f6 asm ("f6"); \
asm volatile ("ldfe f6=[%2];; stf.spill [%1]=f6" : "=f"(f6): "r"(dst), "r"(src) : "memory"); \
} while(0)
#define ia64f2ia32f(dst,src) \
do { \
register double f6 asm ("f6"); \
asm volatile ("ldf.fill f6=[%2];; stfe [%1]=f6" : "=f"(f6): "r"(dst), "r"(src) : "memory"); \
} while(0)
#endif /* !CONFIG_IA32_SUPPORT */
#endif /* _ASM_IA64_IA32_H */
......@@ -55,6 +55,8 @@ extern void __devinit iosapic_init (unsigned long address,
unsigned int gsi_base,
int pcat_compat);
extern int gsi_to_vector (unsigned int gsi);
extern int gsi_to_irq (unsigned int gsi);
extern void iosapic_parse_prt (void);
extern int iosapic_register_intr (unsigned int gsi, unsigned long polarity,
unsigned long edge_triggered,
u32 gsi_base, char *iosapic_address);
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000 Silicon Graphics, Inc. All rights reserved.
* Copyright (c) 2002 NEC Corp.
* Copyright (c) 2002 Erich Focht <efocht@ess.nec.de>
* Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
*/
#ifndef _ASM_IA64_MMZONE_H
#define _ASM_IA64_MMZONE_H
#include <linux/config.h>
#include <linux/init.h>
/*
* Given a kaddr, find the base mem_map address for the start of the mem_map
* entries for the bank containing the kaddr.
*/
#define BANK_MEM_MAP_BASE(kaddr) local_node_data->bank_mem_map_base[BANK_MEM_MAP_INDEX(kaddr)]
/*
* Given a kaddr, this macro return the relative map number
* within the bank.
*/
#define BANK_MAP_NR(kaddr) (BANK_OFFSET(kaddr) >> PAGE_SHIFT)
/*
* Given a pte, this macro returns a pointer to the page struct for the pte.
*/
#define pte_page(pte) virt_to_page(PAGE_OFFSET | (pte_val(pte)&_PFN_MASK))
/*
* Determine if a kaddr is a valid memory address of memory that
* actually exists.
*
* The check consists of 2 parts:
* - verify that the address is a region 7 address & does not
* contain any bits that preclude it from being a valid platform
* memory address
* - verify that the chunk actually exists.
*
* Note that IO addresses are NOT considered valid addresses.
*
* Note, many platforms can simply check if kaddr exceeds a specific size.
* (However, this wont work on SGI platforms since IO space is embedded
* within the range of valid memory addresses & nodes have holes in the
* address range between banks).
*/
#define kern_addr_valid(kaddr) ({long _kav=(long)(kaddr); \
VALID_MEM_KADDR(_kav);})
/*
* Given a kaddr, return a pointer to the page struct for the page.
* If the kaddr does not represent RAM memory that potentially exists, return
* a pointer the page struct for max_mapnr. IO addresses will
* return the page for max_nr. Addresses in unpopulated RAM banks may
* return undefined results OR may panic the system.
*
*/
#define virt_to_page(kaddr) ({long _kvtp=(long)(kaddr); \
(VALID_MEM_KADDR(_kvtp)) \
? BANK_MEM_MAP_BASE(_kvtp) + BANK_MAP_NR(_kvtp) \
: NULL;})
/*
* Given a page struct entry, return the physical address that the page struct represents.
* Since IA64 has all memory in the DMA zone, the following works:
*/
#define page_to_phys(page) __pa(page_address(page))
#define node_mem_map(nid) (NODE_DATA(nid)->node_mem_map)
#define node_localnr(pfn, nid) ((pfn) - NODE_DATA(nid)->node_start_pfn)
#define pfn_to_page(pfn) (struct page *)(node_mem_map(pfn_to_nid(pfn)) + node_localnr(pfn, pfn_to_nid(pfn)))
#define pfn_to_nid(pfn) local_node_data->node_id_map[(pfn << PAGE_SHIFT) >> DIG_BANKSHIFT]
#define page_to_pfn(page) (long)((page - page_zone(page)->zone_mem_map) + page_zone(page)->zone_start_pfn)
/*
* pfn_valid should be made as fast as possible, and the current definition
* is valid for machines that are NUMA, but still contiguous, which is what
* is currently supported. A more generalised, but slower definition would
* be something like this - mbligh:
* ( pfn_to_pgdat(pfn) && (pfn < node_end_pfn(pfn_to_nid(pfn))) )
*/
#define pfn_valid(pfn) (pfn < max_low_pfn)
extern unsigned long max_low_pfn;
#ifdef CONFIG_IA64_DIG
/*
* Platform definitions for DIG platform with contiguous memory.
*/
#define MAX_PHYSNODE_ID 8 /* Maximum node number +1 */
#define NR_NODES 8 /* Maximum number of nodes in SSI */
#define MAX_PHYS_MEMORY (1UL << 40) /* 1 TB */
/*
* Bank definitions.
* Current settings for DIG: 512MB/bank, 16GB/node.
*/
#define NR_BANKS_PER_NODE 32
#define BANK_OFFSET(addr) ((unsigned long)(addr) & (BANKSIZE-1))
#define DIG_BANKSHIFT 29
#define BANKSIZE (1UL << DIG_BANKSHIFT)
#define NR_BANKS (NR_BANKS_PER_NODE * NR_NODES)
/*
* VALID_MEM_KADDR returns a boolean to indicate if a kaddr is
* potentially a valid cacheable identity mapped RAM memory address.
* Note that the RAM may or may not actually be present!!
*/
#define VALID_MEM_KADDR(kaddr) 1
/*
* Given a nodeid & a bank number, find the address of the mem_map
* entry for the first page of the bank.
*/
#define BANK_MEM_MAP_INDEX(kaddr) \
(((unsigned long)(kaddr) & (MAX_PHYS_MEMORY-1)) >> DIG_BANKSHIFT)
#endif /* CONFIG_IA64_DIG */
#endif /* _ASM_IA64_MMZONE_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2000 Silicon Graphics, Inc. All rights reserved.
* Copyright (c) 2002 NEC Corp.
* Copyright (c) 2002 Erich Focht <efocht@ess.nec.de>
* Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
*/
#ifndef _ASM_IA64_NODEDATA_H
#define _ASM_IA64_NODEDATA_H
#include <asm/mmzone.h>
/*
* Node Data. One of these structures is located on each node of a NUMA system.
*/
struct pglist_data;
struct ia64_node_data {
short node;
struct pglist_data *pg_data_ptrs[NR_NODES];
struct page *bank_mem_map_base[NR_BANKS];
struct ia64_node_data *node_data_ptrs[NR_NODES];
short node_id_map[NR_BANKS];
};
/*
* Return a pointer to the node_data structure for the executing cpu.
*/
#define local_node_data (local_cpu_data->node_data)
/*
* Return a pointer to the node_data structure for the specified node.
*/
#define node_data(node) (local_node_data->node_data_ptrs[node])
/*
* Get a pointer to the node_id/node_data for the current cpu.
* (boot time only)
*/
extern int boot_get_local_nodeid(void);
extern struct ia64_node_data *get_node_data_ptr(void);
/*
* Given a node id, return a pointer to the pg_data_t for the node.
* The following 2 macros are similar.
*
* NODE_DATA - should be used in all code not related to system
* initialization. It uses pernode data structures to minimize
* offnode memory references. However, these structure are not
* present during boot. This macro can be used once cpu_init
* completes.
*
* BOOT_NODE_DATA
* - should be used during system initialization
* prior to freeing __initdata. It does not depend on the percpu
* area being present.
*
* NOTE: The names of these macros are misleading but are difficult to change
* since they are used in generic linux & on other architecures.
*/
#define NODE_DATA(nid) (local_node_data->pg_data_ptrs[nid])
#define BOOT_NODE_DATA(nid) boot_get_pg_data_ptr((long)(nid))
struct pglist_data;
extern struct pglist_data * __init boot_get_pg_data_ptr(long);
#endif /* _ASM_IA64_NODEDATA_H */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* This file contains NUMA specific prototypes and definitions.
*
* 2002/08/05 Erich Focht <efocht@ess.nec.de>
*
*/
#ifndef _ASM_IA64_NUMA_H
#define _ASM_IA64_NUMA_H
#ifdef CONFIG_NUMA
#ifdef CONFIG_DISCONTIGMEM
# include <asm/mmzone.h>
# define NR_MEMBLKS (NR_BANKS)
#else
# define NR_NODES (8)
# define NR_MEMBLKS (NR_NODES * 8)
#endif
extern char cpu_to_node_map[NR_CPUS] __cacheline_aligned;
/* Stuff below this line could be architecture independent */
extern int num_memblks; /* total number of memory chunks */
/*
* List of node memory chunks. Filled when parsing SRAT table to
* obtain information about memory nodes.
*/
struct node_memblk_s {
unsigned long start_paddr;
unsigned long size;
int nid; /* which logical node contains this chunk? */
int bank; /* which mem bank on this node */
};
struct node_cpuid_s {
u16 phys_id; /* id << 8 | eid */
int nid; /* logical node containing this CPU */
};
extern struct node_memblk_s node_memblk[NR_MEMBLKS];
extern struct node_cpuid_s node_cpuid[NR_CPUS];
/*
* ACPI 2.0 SLIT (System Locality Information Table)
* http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
*
* This is a matrix with "distances" between nodes, they should be
* proportional to the memory access latency ratios.
*/
extern u8 numa_slit[NR_NODES * NR_NODES];
#define node_distance(from,to) (numa_slit[from * numnodes + to])
extern int paddr_to_nid(unsigned long paddr);
#endif /* CONFIG_NUMA */
#endif /* _ASM_IA64_NUMA_H */
#ifndef _ASM_MAX_NUMNODES_H
#define _ASM_MAX_NUMNODES_H
#include <asm/mmzone.h>
#define MAX_NUMNODES NR_NODES
#endif /* _ASM_MAX_NUMNODES_H */
......@@ -82,12 +82,15 @@ do { \
flush_dcache_page(page); \
} while (0)
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#ifndef CONFIG_DISCONTIGMEM
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_pfn(page) ((unsigned long) (page - mem_map))
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#endif
typedef union ia64_va {
struct {
......
......@@ -123,7 +123,7 @@ typedef struct {
* Define the version numbers for both perfmon as a whole and the sampling buffer format.
*/
#define PFM_VERSION_MAJ 1U
#define PFM_VERSION_MIN 0U
#define PFM_VERSION_MIN 1U
#define PFM_VERSION (((PFM_VERSION_MAJ&0xffff)<<16)|(PFM_VERSION_MIN & 0xffff))
#define PFM_SMPL_VERSION_MAJ 1U
......
......@@ -89,6 +89,9 @@
#include <asm/rse.h>
#include <asm/unwind.h>
#include <asm/atomic.h>
#ifdef CONFIG_NUMA
#include <asm/nodedata.h>
#endif
/* like above but expressed as bitfields for more efficient access: */
struct ia64_psr {
......@@ -174,6 +177,10 @@ struct cpuinfo_ia64 {
__u64 prof_counter;
__u64 prof_multiplier;
#endif
#ifdef CONFIG_NUMA
struct ia64_node_data *node_data;
int nodeid;
#endif
};
DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
......@@ -185,6 +192,10 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
#define local_cpu_data (&__get_cpu_var(cpu_info))
#define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
#ifdef CONFIG_NUMA
#define numa_node_id() (local_cpu_data->nodeid)
#endif
extern void identify_cpu (struct cpuinfo_ia64 *);
extern void print_cpu_info (struct cpuinfo_ia64 *);
......
......@@ -148,8 +148,8 @@ copy_siginfo (siginfo_t *to, siginfo_t *from)
if (from->si_code < 0)
memcpy(to, from, sizeof(siginfo_t));
else
/* _sigchld is currently the largest know union member */
memcpy(to, from, 4*sizeof(int) + sizeof(from->_sifields._sigchld));
/* _sigprof is currently the largest know union member */
memcpy(to, from, 4*sizeof(int) + sizeof(from->_sifields._sigprof));
}
extern int copy_siginfo_from_user(siginfo_t *to, siginfo_t *from);
......
......@@ -250,7 +250,7 @@ bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
* status register into the notification area.
* This fakes the shub performing the copy.
*/
if (jiffies > bte->idealTransferTimeout) {
if (time_after(jiffies, bte->idealTransferTimeout)) {
bte->notify = HUB_L(bte->bte_base_addr);
bte->idealTransferTimeoutReached++;
bte->idealTransferTimeout = jiffies +
......
......@@ -37,11 +37,11 @@ struct thread_info {
#define INIT_THREAD_INFO(ti) \
{ \
exec_domain: &default_exec_domain, \
flags: 0, \
cpu: 0, \
addr_limit: KERNEL_DS, \
preempt_count: 0, \
.exec_domain = &default_exec_domain, \
.flags = 0, \
.cpu = 0, \
.addr_limit = KERNEL_DS, \
.preempt_count = 0, \
}
/* how to get the thread information struct from C */
......
......@@ -54,7 +54,8 @@
typedef struct {
struct mm_struct *mm;
unsigned int nr; /* == ~0U => fast mode */
unsigned int fullmm; /* non-zero means full mm flush */
unsigned char fullmm; /* non-zero means full mm flush */
unsigned char need_flush; /* really unmapped some PTEs? */
unsigned long freed; /* number of pages freed */
unsigned long start_addr;
unsigned long end_addr;
......@@ -73,6 +74,10 @@ ia64_tlb_flush_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end)
{
unsigned int nr;
if (!tlb->need_flush)
return;
tlb->need_flush = 0;
if (tlb->fullmm) {
/*
* Tearing down the entire address space. This happens both as a result
......@@ -167,18 +172,6 @@ tlb_finish_mmu (mmu_gather_t *tlb, unsigned long start, unsigned long end)
check_pgt_cache();
}
/*
* Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
* PTE, not just those pointing to (normal) physical memory.
*/
static inline void
__tlb_remove_tlb_entry (mmu_gather_t *tlb, pte_t *ptep, unsigned long address)
{
if (tlb->start_addr == ~0UL)
tlb->start_addr = address;
tlb->end_addr = address + PAGE_SIZE;
}
/*
* Logically, this routine frees PAGE. On MP machines, the actual freeing of the page
* must be delayed until after the TLB has been flushed (see comments at the beginning of
......@@ -187,6 +180,8 @@ __tlb_remove_tlb_entry (mmu_gather_t *tlb, pte_t *ptep, unsigned long address)
static inline void
tlb_remove_page (mmu_gather_t *tlb, struct page *page)
{
tlb->need_flush = 1;
if (tlb_fast_mode(tlb)) {
free_page_and_swap_cache(page);
return;
......@@ -196,11 +191,37 @@ tlb_remove_page (mmu_gather_t *tlb, struct page *page)
ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr);
}
/*
* Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any
* PTE, not just those pointing to (normal) physical memory.
*/
static inline void
__tlb_remove_tlb_entry (mmu_gather_t *tlb, pte_t *ptep, unsigned long address)
{
if (tlb->start_addr == ~0UL)
tlb->start_addr = address;
tlb->end_addr = address + PAGE_SIZE;
}
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_remove_tlb_entry(tlb, ptep, addr) __tlb_remove_tlb_entry(tlb, ptep, addr)
#define pte_free_tlb(tlb, ptep) __pte_free_tlb(tlb, ptep)
#define pmd_free_tlb(tlb, ptep) __pmd_free_tlb(tlb, ptep)
#define tlb_remove_tlb_entry(tlb, ptep, addr) \
do { \
tlb->need_flush = 1; \
__tlb_remove_tlb_entry(tlb, ptep, addr); \
} while (0)
#define pte_free_tlb(tlb, ptep) \
do { \
tlb->need_flush = 1; \
__pte_free_tlb(tlb, ptep); \
} while (0)
#define pmd_free_tlb(tlb, ptep) \
do { \
tlb->need_flush = 1; \
__pmd_free_tlb(tlb, ptep); \
} while (0)
#endif /* _ASM_IA64_TLB_H */
/*
* linux/include/asm-ia64/topology.h
*
* Copyright (C) 2002, Erich Focht, NEC
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#ifndef _ASM_IA64_TOPOLOGY_H
#define _ASM_IA64_TOPOLOGY_H
#include <asm-generic/topology.h>
#include <asm/acpi.h>
#include <asm/numa.h>
/* Returns the number of the node containing CPU 'cpu' */
#ifdef CONFIG_NUMA
#define __cpu_to_node(cpu) cpu_to_node_map[cpu]
#else
#define __cpu_to_node(cpu) (0)
#endif
/*
* Returns the number of the node containing MemBlk 'memblk'
*/
#ifdef CONFIG_ACPI_NUMA
#define __memblk_to_node(memblk) (node_memblk[memblk].nid)
#else
#define __memblk_to_node(memblk) (memblk)
#endif
/*
* Returns the number of the node containing Node 'nid'.
* Not implemented here. Multi-level hierarchies detected with
* the help of node_distance().
*/
#define __parent_node(nid) (nid)
/*
* Returns the number of the first CPU on Node 'node'.
* Slow in the current implementation.
* Who needs this?
*/
/* #define __node_to_first_cpu(node) pool_cpus[pool_ptr[node]] */
static inline int __node_to_first_cpu(int node)
{
int i;
for (i=0; i<NR_CPUS; i++)
if (__cpu_to_node(i)==node)
return i;
BUG(); /* couldn't find a cpu on given node */
return -1;
}
/*
* Returns a bitmask of CPUs on Node 'node'.
*/
static inline unsigned long __node_to_cpu_mask(int node)
{
int cpu;
unsigned long mask = 0UL;
for(cpu=0; cpu<NR_CPUS; cpu++)
if (__cpu_to_node(cpu) == node)
mask |= 1UL << cpu;
return mask;
}
/*
* Returns the number of the first MemBlk on Node 'node'
* Should be fixed when IA64 discontigmem goes in.
*/
#define __node_to_memblk(node) (node)
#endif /* _ASM_IA64_TOPOLOGY_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment