Commit 907ec07c authored by James Simmons's avatar James Simmons

Merge bk://linux.bkbits.net/linux-2.5

into maxwell.earthlink.net:/usr/src/linus-2.5
parents 8ead5a2f 191dd985
bk-kernel-howto.txt: Description of kernel workflow under BitKeeper
bk-make-sum: Create summary of changesets in one repository and not
another, typically in preparation to be sent to an upstream maintainer.
Typical usage:
cd my-updated-repo
bk-make-sum ~/repo/original-repo
mv /tmp/linus.txt ../original-repo.txt
bksend: Create readable text output containing summary of changes, GNU
patch of the changes, and BK metadata of changes (as needed for proper
importing into BitKeeper by an upstream maintainer). This output is
suitable for emailing BitKeeper changes. The recipient of this output
may pipe it directly to 'bk receive'.
bz64wrap: helper script. Uncompressed input is piped to this script,
which compresses its input, and then outputs the uu-/base64-encoded
version of the compressed input.
csets-to-patches: Produces a delta of two BK repositories, in the form
of individual files, each containing a single cset as a GNU patch.
Output is several files, each with the filename "/tmp/rev-$REV.patch"
Typical usage:
cd my-updated-repo
bk changes -L ~/repo/original-repo 2>&1 | \
perl csets-to-patches
cset-to-linus: Produces a delta of two BK repositories, in the form of
changeset descriptions, with 'diffstat' output created for each
individual changset.
Typical usage:
cd my-updated-repo
bk changes -L ~/repo/original-repo 2>&1 | \
perl cset-to-linus > summary.txt
unbz64wrap: Reverse an encoded, compressed data stream created by
bz64wrap into an uncompressed, typically text/plain output.
......@@ -32,7 +32,7 @@ land at the right destination... but I'm getting ahead of myself.
Let's start with this progression:
Each BitKeeper source tree on disk is a repository unto itself.
Each repository has a parent.
Each repository has a parent (except the root/original, of course).
Each repository contains a set of a changesets ("csets").
Each cset is one or more changed files, bundled together.
......
......@@ -157,6 +157,11 @@ CONFIG_PCMCIA
and ds.o. If you want to compile it as a module, say M here and
read <file:Documentation/modules.txt>.
CONFIG_KALLSYMS
Say Y here to let the kernel print out symbolic crash information and
symbolic stack backtraces. This increases the size of the kernel
somewhat, as all symbols have to be loaded into the kernel image.
CONFIG_KCORE_ELF
If you enabled support for /proc file system then the file
/proc/kcore will contain the kernel core image. This can be used
......
......@@ -33,44 +33,48 @@ ifeq ($(CONFIG_ITANIUM_BSTEP_SPECIFIC),y)
CFLAGS += -mb-step
endif
HEAD := arch/$(ARCH)/kernel/head.o arch/ia64/kernel/init_task.o
HEAD := arch/$(ARCH)/kernel/head.o arch/$(ARCH)/kernel/init_task.o
core-$(CONFIG_IA64_GENERIC) += arch/$(ARCH)/hp/ arch/$(ARCH)/dig/
core-$(CONFIG_IA64_HP_SIM) += arch/$(ARCH)/hp/
core-$(CONFIG_IA64_HP_ZX1) += arch/$(ARCH)/hp/ arch/$(ARCH)/dig/
core-$(CONFIG_IA64_SGI_SN) += arch/$(ARCH)/sn/kernel arch/$(ARCH)/sn/io
libs-y += arch/$(ARCH)/lib/
core-y += arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/
core-$(CONFIG_IA32_SUPPORT) += arch/$(ARCH)/ia32/
core-$(CONFIG_IA64_DIG) += arch/$(ARCH)/dig/
core-$(CONFIG_IA64_GENERIC) += arch/$(ARCH)/dig/ arch/$(ARCH)/hp/common/ arch/$(ARCH)/hp/zx1/ \
arch/$(ARCH)/hp/sim/
core-$(CONFIG_IA64_HP_ZX1) += arch/$(ARCH)/dig/
core-$(CONFIG_IA64_SGI_SN) += arch/$(ARCH)/sn/kernel arch/$(ARCH)/sn/io \
arch/$(ARCH)/sn/fakeprom
drivers-$(CONFIG_PCI) += arch/$(ARCH)/pci/
drivers-$(CONFIG_IA64_HP_SIM) += arch/$(ARCH)/hp/sim/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/$(ARCH)/hp/common/ arch/$(ARCH)/hp/zx1/
ifdef CONFIG_IA64_SGI_SN
CFLAGS += -DBRINGUP
SUBDIRS += arch/$(ARCH)/sn/fakeprom
endif
core-$(CONFIG_IA32_SUPPORT) += arch/$(ARCH)/ia32/
makeboot = $(call descend,arch/ia64/boot,$(1))
maketool = $(call descend,arch/ia64/tools,$(1))
libs-y += arch/$(ARCH)/lib/
core-y += arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/
drivers-$(CONFIG_PCI) += arch/$(ARCH)/pci/
drivers-$(CONFIG_IA64_HP_ZX1) += arch/$(ARCH)/hp/common/ arch/$(ARCH)/hp/zx1/
.PHONY: compressed archclean archmrproper $(TOPDIR)/include/asm-ia64/offsets.h
all: compressed boot
MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot
boot: vmlinux
+@$(call makeboot,all)
compressed: vmlinux
$(OBJCOPY) $(OBJCOPYFLAGS) vmlinux vmlinux-tmp
gzip vmlinux-tmp
mv vmlinux-tmp.gz vmlinux.gz
rawboot:
@$(MAKEBOOT) rawboot
archclean:
@$(MAKEBOOT) clean
$(MAKE) -rR -f scripts/Makefile.clean obj=arch/$(ARCH)/boot
archmrproper:
@$(MAKE) -C arch/$(ARCH)/tools mrproper
prepare: $(TOPDIR)/include/asm-ia64/offsets.h
$(TOPDIR)/include/asm-ia64/offsets.h: include/asm include/linux/version.h \
include/config/MARKER
@$(MAKE) -C arch/$(ARCH)/tools $@
+@$(call maketool,$@)
......@@ -8,9 +8,9 @@
# Copyright (C) 1998 by David Mosberger-Tang <davidm@hpl.hp.com>
#
LINKFLAGS = -static -T bootloader.lds
LINKFLAGS = -static -T $(src)/bootloader.lds
OBJECTS = bootloader.o
OBJS = $(obj)/bootloader.o
targets-$(CONFIG_IA64_HP_SIM) += bootloader
targets-$(CONFIG_IA64_GENERIC) += bootloader
......@@ -19,8 +19,8 @@ CFLAGS := $(CFLAGS) $(CFLAGS_KERNEL)
all: $(targets-y)
bootloader: $(OBJECTS)
$(LD) $(LINKFLAGS) $(OBJECTS) $(TOPDIR)/lib/lib.a $(TOPDIR)/arch/$(ARCH)/lib/lib.a \
bootloader: $(OBJS)
$(LD) $(LINKFLAGS) $(OBJS) $(TOPDIR)/lib/lib.a $(TOPDIR)/arch/$(ARCH)/lib/lib.a \
-o bootloader
clean:
......
......@@ -66,6 +66,10 @@ fi
if [ "$CONFIG_IA64_GENERIC" = "y" -o "$CONFIG_IA64_DIG" = "y" -o "$CONFIG_IA64_HP_ZX1" = "y" ];
then
bool ' Enable NUMA support' CONFIG_NUMA
if [ "$CONFIG_NUMA" = "y" ]; then
define_bool CONFIG_DISCONTIGMEM y
fi
bool ' Enable IA-64 Machine Check Abort' CONFIG_IA64_MCA
define_bool CONFIG_PM y
define_bool CONFIG_IOSAPIC y
......@@ -267,6 +271,7 @@ choice 'Physical memory granularity' \
bool 'Kernel debugging' CONFIG_DEBUG_KERNEL
if [ "$CONFIG_DEBUG_KERNEL" != "n" ]; then
bool ' Load all symbols for debugging/kksymoops' CONFIG_KALLSYMS
bool ' Print possible IA64 hazards to console' CONFIG_IA64_PRINT_HAZARDS
bool ' Disable VHPT' CONFIG_DISABLE_VHPT
bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ
......
This diff is collapsed.
# arch/ia64/hp/Makefile
# Copyright (c) 2002 Matthew Wilcox for Hewlett Packard
obj-$(CONFIG_IA64_GENERIC) += sim/ zx1/ common/
obj-$(CONFIG_IA64_HP_SIM) += sim/
include $(TOPDIR)/Rules.make
......@@ -361,7 +361,9 @@ simscsi_queuecommand (Scsi_Cmnd *sc, void (*done)(Scsi_Cmnd *))
break;
case MODE_SENSE:
printk("MODE_SENSE\n");
/* sd.c uses this to determine whether disk does write-caching. */
memset(sc->request_buffer, 0, 128);
sc->result = GOOD;
break;
case START_STOP:
......@@ -391,6 +393,4 @@ simscsi_queuecommand (Scsi_Cmnd *sc, void (*done)(Scsi_Cmnd *))
static Scsi_Host_Template driver_template = SIMSCSI;
#define __initcall(fn) late_initcall(fn)
#include "../drivers/scsi/scsi_module.c"
......@@ -752,8 +752,7 @@ static int get_async_struct(int line, struct async_struct **ret_info)
info->flags = sstate->flags;
info->xmit_fifo_size = sstate->xmit_fifo_size;
info->line = line;
info->tqueue.routine = do_softint;
info->tqueue.data = info;
INIT_WORK(&info->work, do_softint, info);
info->state = sstate;
if (sstate->info) {
kfree(info);
......
......@@ -26,6 +26,12 @@
#include <linux/if_ppp.h>
#include <linux/ixjuser.h>
#include <linux/i2o-dev.h>
#include <scsi/scsi.h>
/* Ugly hack. */
#undef __KERNEL__
#include <scsi/scsi_ioctl.h>
#define __KERNEL__
#include <scsi/sg.h>
#include <asm/ia32.h>
......@@ -60,6 +66,235 @@ put_dirent32 (struct dirent *d, struct linux32_dirent *d32)
|| put_user(d->d_reclen, &d32->d_reclen)
|| copy_to_user(d32->d_name, d->d_name, namelen + 1));
}
/*
* The transform code for the SG_IO ioctl was brazenly lifted from
* the Sparc64 port in the file `arch/sparc64/kernel/ioctl32.c'.
* Thanks to Jakub Jelinek & Eddie C. Dost.
*/
typedef struct sg_io_hdr32 {
int interface_id; /* [i] 'S' for SCSI generic (required) */
int dxfer_direction; /* [i] data transfer direction */
char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */
char mx_sb_len; /* [i] max length to write to sbp */
short iovec_count; /* [i] 0 implies no scatter gather */
int dxfer_len; /* [i] byte count of data transfer */
int dxferp; /* [i], [*io] points to data transfer memory
or scatter gather list */
int cmdp; /* [i], [*i] points to command to perform */
int sbp; /* [i], [*o] points to sense_buffer memory */
int timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */
int flags; /* [i] 0 -> default, see SG_FLAG... */
int pack_id; /* [i->o] unused internally (normally) */
int usr_ptr; /* [i->o] unused internally */
char status; /* [o] scsi status */
char masked_status; /* [o] shifted, masked scsi status */
char msg_status; /* [o] messaging level data (optional) */
char sb_len_wr; /* [o] byte count actually written to sbp */
short host_status; /* [o] errors from host adapter */
short driver_status; /* [o] errors from software driver */
int resid; /* [o] dxfer_len - actual_transferred */
int duration; /* [o] time taken by cmd (unit: millisec) */
int info; /* [o] auxiliary information */
} sg_io_hdr32_t; /* 64 bytes long (on IA32) */
struct iovec32 { unsigned int iov_base; int iov_len; };
static int alloc_sg_iovec(sg_io_hdr_t *sgp, int uptr32)
{
struct iovec32 *uiov = (struct iovec32 *) P(uptr32);
sg_iovec_t *kiov;
int i;
sgp->dxferp = kmalloc(sgp->iovec_count *
sizeof(sg_iovec_t), GFP_KERNEL);
if (!sgp->dxferp)
return -ENOMEM;
memset(sgp->dxferp, 0,
sgp->iovec_count * sizeof(sg_iovec_t));
kiov = (sg_iovec_t *) sgp->dxferp;
for (i = 0; i < sgp->iovec_count; i++) {
int iov_base32;
if (__get_user(iov_base32, &uiov->iov_base) ||
__get_user(kiov->iov_len, &uiov->iov_len))
return -EFAULT;
kiov->iov_base = kmalloc(kiov->iov_len, GFP_KERNEL);
if (!kiov->iov_base)
return -ENOMEM;
if (copy_from_user(kiov->iov_base,
(void *) P(iov_base32),
kiov->iov_len))
return -EFAULT;
uiov++;
kiov++;
}
return 0;
}
static int copy_back_sg_iovec(sg_io_hdr_t *sgp, int uptr32)
{
struct iovec32 *uiov = (struct iovec32 *) P(uptr32);
sg_iovec_t *kiov = (sg_iovec_t *) sgp->dxferp;
int i;
for (i = 0; i < sgp->iovec_count; i++) {
int iov_base32;
if (__get_user(iov_base32, &uiov->iov_base))
return -EFAULT;
if (copy_to_user((void *) P(iov_base32),
kiov->iov_base,
kiov->iov_len))
return -EFAULT;
uiov++;
kiov++;
}
return 0;
}
static void free_sg_iovec(sg_io_hdr_t *sgp)
{
sg_iovec_t *kiov = (sg_iovec_t *) sgp->dxferp;
int i;
for (i = 0; i < sgp->iovec_count; i++) {
if (kiov->iov_base) {
kfree(kiov->iov_base);
kiov->iov_base = NULL;
}
kiov++;
}
kfree(sgp->dxferp);
sgp->dxferp = NULL;
}
static int sg_ioctl_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
{
sg_io_hdr32_t *sg_io32;
sg_io_hdr_t sg_io64;
int dxferp32, cmdp32, sbp32;
mm_segment_t old_fs;
int err = 0;
sg_io32 = (sg_io_hdr32_t *)arg;
err = __get_user(sg_io64.interface_id, &sg_io32->interface_id);
err |= __get_user(sg_io64.dxfer_direction, &sg_io32->dxfer_direction);
err |= __get_user(sg_io64.cmd_len, &sg_io32->cmd_len);
err |= __get_user(sg_io64.mx_sb_len, &sg_io32->mx_sb_len);
err |= __get_user(sg_io64.iovec_count, &sg_io32->iovec_count);
err |= __get_user(sg_io64.dxfer_len, &sg_io32->dxfer_len);
err |= __get_user(sg_io64.timeout, &sg_io32->timeout);
err |= __get_user(sg_io64.flags, &sg_io32->flags);
err |= __get_user(sg_io64.pack_id, &sg_io32->pack_id);
sg_io64.dxferp = NULL;
sg_io64.cmdp = NULL;
sg_io64.sbp = NULL;
err |= __get_user(cmdp32, &sg_io32->cmdp);
sg_io64.cmdp = kmalloc(sg_io64.cmd_len, GFP_KERNEL);
if (!sg_io64.cmdp) {
err = -ENOMEM;
goto out;
}
if (copy_from_user(sg_io64.cmdp,
(void *) P(cmdp32),
sg_io64.cmd_len)) {
err = -EFAULT;
goto out;
}
err |= __get_user(sbp32, &sg_io32->sbp);
sg_io64.sbp = kmalloc(sg_io64.mx_sb_len, GFP_KERNEL);
if (!sg_io64.sbp) {
err = -ENOMEM;
goto out;
}
if (copy_from_user(sg_io64.sbp,
(void *) P(sbp32),
sg_io64.mx_sb_len)) {
err = -EFAULT;
goto out;
}
err |= __get_user(dxferp32, &sg_io32->dxferp);
if (sg_io64.iovec_count) {
int ret;
if ((ret = alloc_sg_iovec(&sg_io64, dxferp32))) {
err = ret;
goto out;
}
} else {
sg_io64.dxferp = kmalloc(sg_io64.dxfer_len, GFP_KERNEL);
if (!sg_io64.dxferp) {
err = -ENOMEM;
goto out;
}
if (copy_from_user(sg_io64.dxferp,
(void *) P(dxferp32),
sg_io64.dxfer_len)) {
err = -EFAULT;
goto out;
}
}
/* Unused internally, do not even bother to copy it over. */
sg_io64.usr_ptr = NULL;
if (err)
return -EFAULT;
old_fs = get_fs();
set_fs (KERNEL_DS);
err = sys_ioctl (fd, cmd, (unsigned long) &sg_io64);
set_fs (old_fs);
if (err < 0)
goto out;
err = __put_user(sg_io64.pack_id, &sg_io32->pack_id);
err |= __put_user(sg_io64.status, &sg_io32->status);
err |= __put_user(sg_io64.masked_status, &sg_io32->masked_status);
err |= __put_user(sg_io64.msg_status, &sg_io32->msg_status);
err |= __put_user(sg_io64.sb_len_wr, &sg_io32->sb_len_wr);
err |= __put_user(sg_io64.host_status, &sg_io32->host_status);
err |= __put_user(sg_io64.driver_status, &sg_io32->driver_status);
err |= __put_user(sg_io64.resid, &sg_io32->resid);
err |= __put_user(sg_io64.duration, &sg_io32->duration);
err |= __put_user(sg_io64.info, &sg_io32->info);
err |= copy_to_user((void *)P(sbp32), sg_io64.sbp, sg_io64.mx_sb_len);
if (sg_io64.dxferp) {
if (sg_io64.iovec_count)
err |= copy_back_sg_iovec(&sg_io64, dxferp32);
else
err |= copy_to_user((void *)P(dxferp32),
sg_io64.dxferp,
sg_io64.dxfer_len);
}
if (err)
err = -EFAULT;
out:
if (sg_io64.cmdp)
kfree(sg_io64.cmdp);
if (sg_io64.sbp)
kfree(sg_io64.sbp);
if (sg_io64.dxferp) {
if (sg_io64.iovec_count) {
free_sg_iovec(&sg_io64);
} else {
kfree(sg_io64.dxferp);
}
}
return err;
}
asmlinkage long
sys32_ioctl (unsigned int fd, unsigned int cmd, unsigned int arg)
......@@ -271,6 +506,9 @@ sys32_ioctl (unsigned int fd, unsigned int cmd, unsigned int arg)
default:
return sys_ioctl(fd, cmd, (unsigned long)arg);
case IOCTL_NR(SG_IO):
return(sg_ioctl_trans(fd, cmd, arg));
}
printk("%x:unimplemented IA32 ioctl system call\n", cmd);
return -EINVAL;
......
This diff is collapsed.
......@@ -2842,20 +2842,6 @@ putreg (struct task_struct *child, int regno, unsigned int value)
}
}
static inline void
ia32f2ia64f (void *dst, void *src)
{
asm volatile ("ldfe f6=[%1];; stf.spill [%0]=f6" :: "r"(dst), "r"(src) : "memory");
return;
}
static inline void
ia64f2ia32f (void *dst, void *src)
{
asm volatile ("ldf.fill f6=[%1];; stfe [%0]=f6" :: "r"(dst), "r"(src) : "memory");
return;
}
static void
put_fpreg (int regno, struct _fpreg_ia32 *reg, struct pt_regs *ptp, struct switch_stack *swp,
int tos)
......
......@@ -8,6 +8,9 @@
* Copyright (C) 2000 Intel Corp.
* Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
* Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2001 Jenna Hall <jenna.s.hall@intel.com>
* Copyright (C) 2001 Takayoshi Kochi <t-kouchi@cq.jp.nec.com>
* Copyright (C) 2002 Erich Focht <efocht@ess.nec.de>
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
......@@ -43,6 +46,7 @@
#include <asm/machvec.h>
#include <asm/page.h>
#include <asm/system.h>
#include <asm/numa.h>
#define PREFIX "ACPI: "
......@@ -447,6 +451,189 @@ acpi_parse_madt (unsigned long phys_addr, unsigned long size)
}
#ifdef CONFIG_ACPI_NUMA
#define SLIT_DEBUG
#define PXM_FLAG_LEN ((MAX_PXM_DOMAINS + 1)/32)
static int __initdata srat_num_cpus; /* number of cpus */
static u32 __initdata pxm_flag[PXM_FLAG_LEN];
#define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag))
#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
/* maps to convert between proximity domain and logical node ID */
int __initdata pxm_to_nid_map[MAX_PXM_DOMAINS];
int __initdata nid_to_pxm_map[NR_NODES];
static struct acpi_table_slit __initdata *slit_table;
/*
* ACPI 2.0 SLIT (System Locality Information Table)
* http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf
*/
void __init
acpi_numa_slit_init (struct acpi_table_slit *slit)
{
u32 len;
len = sizeof(struct acpi_table_header) + 8
+ slit->localities * slit->localities;
if (slit->header.length != len) {
printk("ACPI 2.0 SLIT: size mismatch: %d expected, %d actual\n",
len, slit->header.length);
memset(numa_slit, 10, sizeof(numa_slit));
return;
}
slit_table = slit;
}
void __init
acpi_numa_processor_affinity_init (struct acpi_table_processor_affinity *pa)
{
/* record this node in proximity bitmap */
pxm_bit_set(pa->proximity_domain);
node_cpuid[srat_num_cpus].phys_id = (pa->apic_id << 8) | (pa->lsapic_eid);
/* nid should be overridden as logical node id later */
node_cpuid[srat_num_cpus].nid = pa->proximity_domain;
srat_num_cpus++;
}
void __init
acpi_numa_memory_affinity_init (struct acpi_table_memory_affinity *ma)
{
unsigned long paddr, size, hole_size, min_hole_size;
u8 pxm;
struct node_memblk_s *p, *q, *pend;
pxm = ma->proximity_domain;
/* fill node memory chunk structure */
paddr = ma->base_addr_hi;
paddr = (paddr << 32) | ma->base_addr_lo;
size = ma->length_hi;
size = (size << 32) | ma->length_lo;
if (num_memblks >= NR_MEMBLKS) {
printk("Too many mem chunks in SRAT. Ignoring %ld MBytes at %lx\n",
size/(1024*1024), paddr);
return;
}
/* Ignore disabled entries */
if (!ma->flags.enabled)
return;
/*
* When the chunk is not the first one in the node, check distance
* from the other chunks. When the hole is too huge ignore the chunk.
* This restriction should be removed when multiple chunks per node
* is supported.
*/
pend = &node_memblk[num_memblks];
min_hole_size = 0;
for (p = &node_memblk[0]; p < pend; p++) {
if (p->nid != pxm)
continue;
if (p->start_paddr < paddr)
hole_size = paddr - (p->start_paddr + p->size);
else
hole_size = p->start_paddr - (paddr + size);
if (!min_hole_size || hole_size < min_hole_size)
min_hole_size = hole_size;
}
if (min_hole_size) {
if (min_hole_size > size) {
printk("Too huge memory hole. Ignoring %ld MBytes at %lx\n",
size/(1024*1024), paddr);
return;
}
}
/* record this node in proximity bitmap */
pxm_bit_set(pxm);
/* Insertion sort based on base address */
pend = &node_memblk[num_memblks];
for (p = &node_memblk[0]; p < pend; p++) {
if (paddr < p->start_paddr)
break;
}
if (p < pend) {
for (q = pend; q >= p; q--)
*(q + 1) = *q;
}
p->start_paddr = paddr;
p->size = size;
p->nid = pxm;
num_memblks++;
}
void __init
acpi_numa_arch_fixup(void)
{
int i, j, node_from, node_to;
/* calculate total number of nodes in system from PXM bitmap */
numnodes = 0; /* init total nodes in system */
memset(pxm_to_nid_map, -1, sizeof(pxm_to_nid_map));
memset(nid_to_pxm_map, -1, sizeof(nid_to_pxm_map));
for (i = 0; i < MAX_PXM_DOMAINS; i++) {
if (pxm_bit_test(i)) {
pxm_to_nid_map[i] = numnodes;
nid_to_pxm_map[numnodes++] = i;
}
}
/* set logical node id in memory chunk structure */
for (i = 0; i < num_memblks; i++)
node_memblk[i].nid = pxm_to_nid_map[node_memblk[i].nid];
/* assign memory bank numbers for each chunk on each node */
for (i = 0; i < numnodes; i++) {
int bank;
bank = 0;
for (j = 0; j < num_memblks; j++)
if (node_memblk[j].nid == i)
node_memblk[j].bank = bank++;
}
/* set logical node id in cpu structure */
for (i = 0; i < srat_num_cpus; i++)
node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid];
printk("Number of logical nodes in system = %d\n", numnodes);
printk("Number of memory chunks in system = %d\n", num_memblks);
if (!slit_table) return;
memset(numa_slit, -1, sizeof(numa_slit));
for (i=0; i<slit_table->localities; i++) {
if (!pxm_bit_test(i))
continue;
node_from = pxm_to_nid_map[i];
for (j=0; j<slit_table->localities; j++) {
if (!pxm_bit_test(j))
continue;
node_to = pxm_to_nid_map[j];
node_distance(node_from, node_to) =
slit_table->entry[i*slit_table->localities + j];
}
}
#ifdef SLIT_DEBUG
printk("ACPI 2.0 SLIT locality table:\n");
for (i = 0; i < numnodes; i++) {
for (j = 0; j < numnodes; j++)
printk("%03d ", node_distance(i,j));
printk("\n");
}
#endif
}
#endif /* CONFIG_ACPI_NUMA */
static int __init
acpi_parse_fadt (unsigned long phys_addr, unsigned long size)
{
......@@ -556,12 +743,6 @@ acpi_parse_spcr (unsigned long phys_addr, unsigned long size)
int __init
acpi_boot_init (char *cmdline)
{
int result;
/* Initialize the ACPI boot-time table parser */
result = acpi_table_init(cmdline);
if (result)
return result;
/*
* MADT
......@@ -631,6 +812,9 @@ acpi_boot_init (char *cmdline)
smp_boot_data.cpu_count = total_cpus;
smp_build_cpu_map();
#ifdef CONFIG_NUMA
build_cpu_to_node_map();
#endif
#endif
/* Make boot-up look pretty */
printk("%d CPUs available, %d CPUs total\n", available_cpus, total_cpus);
......
......@@ -60,67 +60,157 @@ struct proc_dir_entry *efi_dir = NULL;
static unsigned long mem_limit = ~0UL;
static efi_status_t
phys_get_time (efi_time_t *tm, efi_time_cap_t *tc)
{
return efi_call_phys(__va(runtime->get_time), __pa(tm), __pa(tc));
#define efi_call_virt(f, args...) (*(f))(args)
#define STUB_GET_TIME(prefix, adjust_arg) \
static efi_status_t \
prefix##_get_time (efi_time_t *tm, efi_time_cap_t *tc) \
{ \
struct ia64_fpreg fr[6]; \
efi_status_t ret; \
\
ia64_save_scratch_fpregs(fr); \
ret = efi_call_##prefix((efi_get_time_t *) __va(runtime->get_time), adjust_arg(tm), \
adjust_arg(tc)); \
ia64_load_scratch_fpregs(fr); \
return ret; \
}
static efi_status_t
phys_set_time (efi_time_t *tm)
{
return efi_call_phys(__va(runtime->set_time), __pa(tm));
#define STUB_SET_TIME(prefix, adjust_arg) \
static efi_status_t \
prefix##_set_time (efi_time_t *tm) \
{ \
struct ia64_fpreg fr[6]; \
efi_status_t ret; \
\
ia64_save_scratch_fpregs(fr); \
ret = efi_call_##prefix((efi_set_time_t *) __va(runtime->set_time), adjust_arg(tm)); \
ia64_load_scratch_fpregs(fr); \
return ret; \
}
static efi_status_t
phys_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm)
{
return efi_call_phys(__va(runtime->get_wakeup_time), __pa(enabled), __pa(pending),
__pa(tm));
#define STUB_GET_WAKEUP_TIME(prefix, adjust_arg) \
static efi_status_t \
prefix##_get_wakeup_time (efi_bool_t *enabled, efi_bool_t *pending, efi_time_t *tm) \
{ \
struct ia64_fpreg fr[6]; \
efi_status_t ret; \
\
ia64_save_scratch_fpregs(fr); \
ret = efi_call_##prefix((efi_get_wakeup_time_t *) __va(runtime->get_wakeup_time), \
adjust_arg(enabled), adjust_arg(pending), adjust_arg(tm)); \
ia64_load_scratch_fpregs(fr); \
return ret; \
}
static efi_status_t
phys_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm)
{
return efi_call_phys(__va(runtime->set_wakeup_time), enabled, __pa(tm));
#define STUB_SET_WAKEUP_TIME(prefix, adjust_arg) \
static efi_status_t \
prefix##_set_wakeup_time (efi_bool_t enabled, efi_time_t *tm) \
{ \
struct ia64_fpreg fr[6]; \
efi_status_t ret; \
\
ia64_save_scratch_fpregs(fr); \
ret = efi_call_##prefix((efi_set_wakeup_time_t *) __va(runtime->set_wakeup_time), \
enabled, adjust_arg(tm)); \
ia64_load_scratch_fpregs(fr); \
return ret; \
}
static efi_status_t
phys_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr,
unsigned long *data_size, void *data)
{
return efi_call_phys(__va(runtime->get_variable), __pa(name), __pa(vendor), __pa(attr),
__pa(data_size), __pa(data));
#define STUB_GET_VARIABLE(prefix, adjust_arg) \
static efi_status_t \
prefix##_get_variable (efi_char16_t *name, efi_guid_t *vendor, u32 *attr, \
unsigned long *data_size, void *data) \
{ \
struct ia64_fpreg fr[6]; \
efi_status_t ret; \
\
ia64_save_scratch_fpregs(fr); \
ret = efi_call_##prefix((efi_get_variable_t *) __va(runtime->get_variable), \
adjust_arg(name), adjust_arg(vendor), adjust_arg(attr), \
adjust_arg(data_size), adjust_arg(data)); \
ia64_load_scratch_fpregs(fr); \
return ret; \
}
static efi_status_t
phys_get_next_variable (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor)
{
return efi_call_phys(__va(runtime->get_next_variable), __pa(name_size), __pa(name),
__pa(vendor));
#define STUB_GET_NEXT_VARIABLE(prefix, adjust_arg) \
static efi_status_t \
prefix##_get_next_variable (unsigned long *name_size, efi_char16_t *name, efi_guid_t *vendor) \
{ \
struct ia64_fpreg fr[6]; \
efi_status_t ret; \
\
ia64_save_scratch_fpregs(fr); \
ret = efi_call_##prefix((efi_get_next_variable_t *) __va(runtime->get_next_variable), \
adjust_arg(name_size), adjust_arg(name), adjust_arg(vendor)); \
ia64_load_scratch_fpregs(fr); \
return ret; \
}
static efi_status_t
phys_set_variable (efi_char16_t *name, efi_guid_t *vendor, u32 attr,
unsigned long data_size, void *data)
{
return efi_call_phys(__va(runtime->set_variable), __pa(name), __pa(vendor), attr,
data_size, __pa(data));
#define STUB_SET_VARIABLE(prefix, adjust_arg) \
static efi_status_t \
prefix##_set_variable (efi_char16_t *name, efi_guid_t *vendor, u32 attr, \
unsigned long data_size, void *data) \
{ \
struct ia64_fpreg fr[6]; \
efi_status_t ret; \
\
ia64_save_scratch_fpregs(fr); \
ret = efi_call_##prefix((efi_set_variable_t *) __va(runtime->set_variable), \
adjust_arg(name), adjust_arg(vendor), attr, data_size, \
adjust_arg(data)); \
ia64_load_scratch_fpregs(fr); \
return ret; \
}
static efi_status_t
phys_get_next_high_mono_count (u64 *count)
{
return efi_call_phys(__va(runtime->get_next_high_mono_count), __pa(count));
#define STUB_GET_NEXT_HIGH_MONO_COUNT(prefix, adjust_arg) \
static efi_status_t \
prefix##_get_next_high_mono_count (u64 *count) \
{ \
struct ia64_fpreg fr[6]; \
efi_status_t ret; \
\
ia64_save_scratch_fpregs(fr); \
ret = efi_call_##prefix((efi_get_next_high_mono_count_t *) \
__va(runtime->get_next_high_mono_count), adjust_arg(count)); \
ia64_load_scratch_fpregs(fr); \
return ret; \
}
static void
phys_reset_system (int reset_type, efi_status_t status,
unsigned long data_size, efi_char16_t *data)
{
efi_call_phys(__va(runtime->reset_system), status, data_size, __pa(data));
#define STUB_RESET_SYSTEM(prefix, adjust_arg) \
static void \
prefix##_reset_system (int reset_type, efi_status_t status, \
unsigned long data_size, efi_char16_t *data) \
{ \
struct ia64_fpreg fr[6]; \
\
ia64_save_scratch_fpregs(fr); \
efi_call_##prefix((efi_reset_system_t *) __va(runtime->reset_system), \
reset_type, status, data_size, adjust_arg(data)); \
/* should not return, but just in case... */ \
ia64_load_scratch_fpregs(fr); \
}
STUB_GET_TIME(phys, __pa)
STUB_SET_TIME(phys, __pa)
STUB_GET_WAKEUP_TIME(phys, __pa)
STUB_SET_WAKEUP_TIME(phys, __pa)
STUB_GET_VARIABLE(phys, __pa)
STUB_GET_NEXT_VARIABLE(phys, __pa)
STUB_SET_VARIABLE(phys, __pa)
STUB_GET_NEXT_HIGH_MONO_COUNT(phys, __pa)
STUB_RESET_SYSTEM(phys, __pa)
STUB_GET_TIME(virt, )
STUB_SET_TIME(virt, )
STUB_GET_WAKEUP_TIME(virt, )
STUB_SET_WAKEUP_TIME(virt, )
STUB_GET_VARIABLE(virt, )
STUB_GET_NEXT_VARIABLE(virt, )
STUB_SET_VARIABLE(virt, )
STUB_GET_NEXT_HIGH_MONO_COUNT(virt, )
STUB_RESET_SYSTEM(virt, )
void
efi_gettimeofday (struct timeval *tv)
{
......@@ -574,18 +664,17 @@ efi_enter_virtual_mode (void)
}
/*
* Now that EFI is in virtual mode, we arrange for EFI functions to be
* called directly:
* Now that EFI is in virtual mode, we call the EFI functions more efficiently:
*/
efi.get_time = __va(runtime->get_time);
efi.set_time = __va(runtime->set_time);
efi.get_wakeup_time = __va(runtime->get_wakeup_time);
efi.set_wakeup_time = __va(runtime->set_wakeup_time);
efi.get_variable = __va(runtime->get_variable);
efi.get_next_variable = __va(runtime->get_next_variable);
efi.set_variable = __va(runtime->set_variable);
efi.get_next_high_mono_count = __va(runtime->get_next_high_mono_count);
efi.reset_system = __va(runtime->reset_system);
efi.get_time = virt_get_time;
efi.set_time = virt_set_time;
efi.get_wakeup_time = virt_get_wakeup_time;
efi.set_wakeup_time = virt_set_wakeup_time;
efi.get_variable = virt_get_variable;
efi.get_next_variable = virt_get_next_variable;
efi.set_variable = virt_set_variable;
efi.get_next_high_mono_count = virt_get_next_high_mono_count;
efi.reset_system = virt_reset_system;
}
/*
......
......@@ -2,7 +2,7 @@
* PAL & SAL emulation.
*
* Copyright (C) 1998-2001 Hewlett-Packard Co
* Copyright (C) 1998-2001 David Mosberger-Tang <davidm@hpl.hp.com>
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* For the HP simulator, this file gets include in boot/bootloader.c.
* For SoftSDV, this file gets included in sys_softsdv.c.
......
......@@ -133,12 +133,8 @@ find_iosapic (unsigned int gsi)
return -1;
}
/*
* Translate GSI number to the corresponding IA-64 interrupt vector. If no
* entry exists, return -1.
*/
int
gsi_to_vector (unsigned int gsi)
static inline int
_gsi_to_vector (unsigned int gsi)
{
struct iosapic_intr_info *info;
......@@ -148,6 +144,26 @@ gsi_to_vector (unsigned int gsi)
return -1;
}
/*
* Translate GSI number to the corresponding IA-64 interrupt vector. If no
* entry exists, return -1.
*/
inline int
gsi_to_vector (unsigned int gsi)
{
return _gsi_to_vector(gsi);
}
int
gsi_to_irq (unsigned int gsi)
{
/*
* XXX fix me: this assumes an identity mapping vetween IA-64 vector and Linux irq
* numbers...
*/
return _gsi_to_vector(gsi);
}
static void
set_rte (unsigned int vector, unsigned int dest)
{
......@@ -157,7 +173,7 @@ set_rte (unsigned int vector, unsigned int dest)
int rte_index;
char redir;
DBG(KERN_DEBUG"IOSAPIC: routing vector %d to %x\n", vector, dest);
DBG(KERN_DEBUG"IOSAPIC: routing vector %d to 0x%x\n", vector, dest);
rte_index = iosapic_intr_info[vector].rte_index;
if (rte_index < 0)
......
......@@ -116,8 +116,8 @@ ENTRY(vhpt_miss)
;;
(p8) dep r25=r18,r25,2,6
(p8) shr r22=r22,HPAGE_SHIFT-PAGE_SHIFT
;;
#endif
;;
cmp.eq p6,p7=5,r17 // is IFA pointing into to region 5?
shr.u r18=r22,PGDIR_SHIFT // get bits 33-63 of the faulting address
;;
......
......@@ -1012,28 +1012,13 @@ ia64_log_prt_oem_data (int header_len, int sect_len, u8 *p_data, prfunc_t prfunc
void
ia64_log_rec_header_print (sal_log_record_header_t *lh, prfunc_t prfunc)
{
char str_buf[32];
sprintf(str_buf, "%2d.%02d",
(lh->revision.major >> 4) * 10 + (lh->revision.major & 0xf),
(lh->revision.minor >> 4) * 10 + (lh->revision.minor & 0xf));
prfunc("+Err Record ID: %d SAL Rev: %s\n", lh->id, str_buf);
sprintf(str_buf, "%02d/%02d/%04d/ %02d:%02d:%02d",
(lh->timestamp.slh_month >> 4) * 10 +
(lh->timestamp.slh_month & 0xf),
(lh->timestamp.slh_day >> 4) * 10 +
(lh->timestamp.slh_day & 0xf),
(lh->timestamp.slh_century >> 4) * 1000 +
(lh->timestamp.slh_century & 0xf) * 100 +
(lh->timestamp.slh_year >> 4) * 10 +
(lh->timestamp.slh_year & 0xf),
(lh->timestamp.slh_hour >> 4) * 10 +
(lh->timestamp.slh_hour & 0xf),
(lh->timestamp.slh_minute >> 4) * 10 +
(lh->timestamp.slh_minute & 0xf),
(lh->timestamp.slh_second >> 4) * 10 +
(lh->timestamp.slh_second & 0xf));
prfunc("+Time: %s Severity %d\n", str_buf, lh->severity);
prfunc("+Err Record ID: %d SAL Rev: %2x.%02x\n", lh->id,
lh->revision.major, lh->revision.minor);
prfunc("+Time: %02x/%02x/%02x%02x %02x:%02x:%02x Severity %d\n",
lh->timestamp.slh_month, lh->timestamp.slh_day,
lh->timestamp.slh_century, lh->timestamp.slh_year,
lh->timestamp.slh_hour, lh->timestamp.slh_minute,
lh->timestamp.slh_second, lh->severity);
}
/*
......
......@@ -63,6 +63,26 @@
* Misc macros and definitions
*/
#define PMU_FIRST_COUNTER 4
#define PMU_MAX_PMCS 256
#define PMU_MAX_PMDS 256
/*
* type of a PMU register (bitmask).
* bitmask structure:
* bit0 : register implemented
* bit1 : end marker
* bit2-3 : reserved
* bit4-7 : register type
* bit8-31: reserved
*/
#define PFM_REG_IMPL 0x1 /* register implemented */
#define PFM_REG_END 0x2 /* end marker */
#define PFM_REG_MONITOR (0x1<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm field only */
#define PFM_REG_COUNTING (0x2<<4|PFM_REG_IMPL) /* a PMC with a pmc.pm AND pmc.oi, a PMD used as a counter */
#define PFM_REG_CONTROL (0x3<<4|PFM_REG_IMPL) /* PMU control register */
#define PFM_REG_CONFIG (0x4<<4|PFM_REG_IMPL) /* refine configuration */
#define PFM_REG_BUFFER (0x5<<4|PFM_REG_IMPL) /* PMD used as buffer */
#define PFM_IS_DISABLED() pmu_conf.pfm_is_disabled
......@@ -70,13 +90,18 @@
#define PFM_FL_INHERIT_MASK (PFM_FL_INHERIT_NONE|PFM_FL_INHERIT_ONCE|PFM_FL_INHERIT_ALL)
/* i assume unsigned */
#define PMC_IS_IMPL(i) (i<pmu_conf.num_pmcs && pmu_conf.impl_regs[i>>6] & (1UL<< (i) %64))
#define PMD_IS_IMPL(i) (i<pmu_conf.num_pmds && pmu_conf.impl_regs[4+(i>>6)] & (1UL<<(i) % 64))
#define PMC_IS_IMPL(i) (i< PMU_MAX_PMCS && (pmu_conf.pmc_desc[i].type & PFM_REG_IMPL))
#define PMD_IS_IMPL(i) (i< PMU_MAX_PMDS && (pmu_conf.pmd_desc[i].type & PFM_REG_IMPL))
/* XXX: these three assume that register i is implemented */
#define PMD_IS_COUNTING(i) (pmu_conf.pmd_desc[i].type == PFM_REG_COUNTING)
#define PMC_IS_COUNTING(i) (pmu_conf.pmc_desc[i].type == PFM_REG_COUNTING)
#define PMC_IS_MONITOR(i) (pmu_conf.pmc_desc[i].type == PFM_REG_MONITOR)
#define PMC_DFL_VAL(i) pmu_conf.pmc_desc[i].default_value
#define PMC_RSVD_MASK(i) pmu_conf.pmc_desc[i].reserved_mask
#define PMD_PMD_DEP(i) pmu_conf.pmd_desc[i].dep_pmd[0]
#define PMC_PMD_DEP(i) pmu_conf.pmc_desc[i].dep_pmd[0]
/* k assume unsigned */
#define IBR_IS_IMPL(k) (k<pmu_conf.num_ibrs)
......@@ -175,19 +200,6 @@ typedef struct _pfm_smpl_buffer_desc {
#define LOCK_PSB(p) spin_lock(&(p)->psb_lock)
#define UNLOCK_PSB(p) spin_unlock(&(p)->psb_lock)
/*
* The possible type of a PMU register
*/
typedef enum {
PFM_REG_NOTIMPL, /* not implemented */
PFM_REG_NONE, /* end marker */
PFM_REG_MONITOR, /* a PMC with a pmc.pm field only */
PFM_REG_COUNTING,/* a PMC with a pmc.pm AND pmc.oi, a PMD used as a counter */
PFM_REG_CONTROL, /* PMU control register */
PFM_REG_CONFIG, /* refine configuration */
PFM_REG_BUFFER /* PMD used as buffer */
} pfm_pmu_reg_type_t;
/*
* 64-bit software counter structure
*/
......@@ -283,13 +295,16 @@ typedef struct {
* dep_pmc[]: a bitmask of dependent PMC registers
*/
typedef struct {
pfm_pmu_reg_type_t type;
unsigned int type;
int pm_pos;
unsigned long default_value; /* power-on default value */
unsigned long reserved_mask; /* bitmask of reserved bits */
int (*read_check)(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
int (*write_check)(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
unsigned long dep_pmd[4];
unsigned long dep_pmc[4];
} pfm_reg_desc_t;
/* assume cnum is a valid monitor */
#define PMC_PM(cnum, val) (((val) >> (pmu_conf.pmc_desc[cnum].pm_pos)) & 0x1)
#define PMC_WR_FUNC(cnum) (pmu_conf.pmc_desc[cnum].write_check)
......@@ -401,8 +416,6 @@ static ctl_table pfm_sysctl_root[] = {
};
static struct ctl_table_header *pfm_sysctl_header;
static unsigned long reset_pmcs[IA64_NUM_PMC_REGS]; /* contains PAL reset values for PMCS */
static void pfm_vm_close(struct vm_area_struct * area);
static struct vm_operations_struct pfm_vm_ops={
......@@ -422,7 +435,7 @@ static struct {
/*
* forward declarations
*/
static void ia64_reset_pmu(struct task_struct *);
static void pfm_reset_pmu(struct task_struct *);
#ifdef CONFIG_SMP
static void pfm_fetch_regs(int cpu, struct task_struct *task, pfm_context_t *ctx);
#endif
......@@ -2244,7 +2257,7 @@ pfm_enable(struct task_struct *task, pfm_context_t *ctx, void *arg, int count,
pfm_lazy_save_regs(PMU_OWNER());
/* reset all registers to stable quiet state */
ia64_reset_pmu(task);
pfm_reset_pmu(task);
/* make sure nothing starts */
if (ctx->ctx_fl_system) {
......@@ -2307,7 +2320,7 @@ pfm_get_pmc_reset(struct task_struct *task, pfm_context_t *ctx, void *arg, int c
if (!PMC_IS_IMPL(cnum)) goto abort_mission;
tmp.reg_value = reset_pmcs[cnum];
tmp.reg_value = PMC_DFL_VAL(cnum);
PFM_REG_RETFLAG_SET(tmp.reg_flags, 0);
......@@ -2998,6 +3011,8 @@ perfmon_proc_info(char *page)
p += sprintf(p, "CPU%-2d recorded samples : %lu\n", i, pfm_stats[i].pfm_recorded_samples_count);
p += sprintf(p, "CPU%-2d smpl buffer full : %lu\n", i, pfm_stats[i].pfm_full_smpl_buffer_count);
p += sprintf(p, "CPU%-2d owner : %d\n", i, pmu_owners[i].owner ? pmu_owners[i].owner->pid: -1);
p += sprintf(p, "CPU%-2d syst_wide : %d\n", i, per_cpu(pfm_syst_wide, i));
p += sprintf(p, "CPU%-2d dcr_pp : %d\n", i, per_cpu(pfm_dcr_pp, i));
}
LOCK_PFS();
......@@ -3398,11 +3413,10 @@ pfm_load_regs (struct task_struct *task)
* XXX: make this routine able to work with non current context
*/
static void
ia64_reset_pmu(struct task_struct *task)
pfm_reset_pmu(struct task_struct *task)
{
struct thread_struct *t = &task->thread;
pfm_context_t *ctx = t->pfm_context;
unsigned long mask;
int i;
if (task != current) {
......@@ -3415,30 +3429,27 @@ ia64_reset_pmu(struct task_struct *task)
/*
* install reset values for PMC. We skip PMC0 (done above)
* XX: good up to 64 PMCS
*/
mask = pmu_conf.impl_regs[0] >> 1;
for(i=1; mask; mask>>=1, i++) {
if (mask & 0x1) {
ia64_set_pmc(i, reset_pmcs[i]);
/*
* When restoring context, we must restore ALL pmcs, even the ones
* that the task does not use to avoid leaks and possibly corruption
* of the sesion because of configuration conflicts. So here, we
* initialize the entire set used in the context switch restore routine.
*/
t->pmc[i] = reset_pmcs[i];
DBprintk((" pmc[%d]=0x%lx\n", i, reset_pmcs[i]));
}
for (i=1; (pmu_conf.pmc_desc[i].type & PFM_REG_END) == 0; i++) {
if ((pmu_conf.pmc_desc[i].type & PFM_REG_IMPL) == 0) continue;
ia64_set_pmc(i, PMC_DFL_VAL(i));
/*
* When restoring context, we must restore ALL pmcs, even the ones
* that the task does not use to avoid leaks and possibly corruption
* of the sesion because of configuration conflicts. So here, we
* initialize the entire set used in the context switch restore routine.
*/
t->pmc[i] = PMC_DFL_VAL(i);
DBprintk(("pmc[%d]=0x%lx\n", i, t->pmc[i]));
}
/*
* clear reset values for PMD.
* XXX: good up to 64 PMDS. Suppose that zero is a valid value.
*/
mask = pmu_conf.impl_regs[4];
for(i=0; mask; mask>>=1, i++) {
if (mask & 0x1) ia64_set_pmd(i, 0UL);
for (i=0; (pmu_conf.pmd_desc[i].type & PFM_REG_END) == 0; i++) {
if ((pmu_conf.pmd_desc[i].type & PFM_REG_IMPL) == 0) continue;
ia64_set_pmd(i, 0UL);
t->pmd[i] = 0UL;
}
......@@ -4119,23 +4130,6 @@ static struct irqaction perfmon_irqaction = {
};
static void
pfm_pmu_snapshot(void)
{
int i;
for (i=0; i < IA64_NUM_PMC_REGS; i++) {
if (i >= pmu_conf.num_pmcs) break;
if (PMC_IS_IMPL(i)) reset_pmcs[i] = ia64_get_pmc(i);
}
#ifdef CONFIG_MCKINLEY
/*
* set the 'stupid' enable bit to power the PMU!
*/
reset_pmcs[4] |= 1UL << 23;
#endif
}
/*
* perfmon initialization routine, called from the initcall() table
*/
......@@ -4160,6 +4154,9 @@ perfmon_init (void)
}
pmu_conf.perf_ovfl_val = (1UL << pm_info.pal_perf_mon_info_s.width) - 1;
/*
* XXX: use the pfm_*_desc tables instead and simply verify with PAL
*/
pmu_conf.max_counters = pm_info.pal_perf_mon_info_s.generic;
pmu_conf.num_pmcs = find_num_pm_regs(pmu_conf.impl_regs);
pmu_conf.num_pmds = find_num_pm_regs(&pmu_conf.impl_regs[4]);
......@@ -4183,24 +4180,11 @@ perfmon_init (void)
pmu_conf.num_ibrs <<=1;
pmu_conf.num_dbrs <<=1;
/*
* take a snapshot of all PMU registers. PAL is supposed
* to configure them with stable/safe values, i.e., not
* capturing anything.
* We take a snapshot now, before we make any modifications. This
* will become our master copy. Then we will reuse the snapshot
* to reset the PMU in pfm_enable(). Using this technique, perfmon
* does NOT have to know about the specific values to program for
* the PMC/PMD. The safe values may be different from one CPU model to
* the other.
*/
pfm_pmu_snapshot();
/*
* setup the register configuration descriptions for the CPU
*/
pmu_conf.pmc_desc = pmc_desc;
pmu_conf.pmd_desc = pmd_desc;
pmu_conf.pmc_desc = pfm_pmc_desc;
pmu_conf.pmd_desc = pfm_pmd_desc;
/* we are all set */
pmu_conf.pfm_is_disabled = 0;
......@@ -4222,11 +4206,34 @@ __initcall(perfmon_init);
void
perfmon_init_percpu (void)
{
int i;
if (smp_processor_id() == 0)
register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
ia64_set_pmv(IA64_PERFMON_VECTOR);
ia64_srlz_d();
/*
* we first initialize the PMU to a stable state.
* the values may have been changed from their power-up
* values by software executed before the kernel took over.
*
* At this point, pmu_conf has not yet been initialized
*
* On McKinley, this code is ineffective until PMC4 is initialized.
*/
for (i=1; (pfm_pmc_desc[i].type & PFM_REG_END) == 0; i++) {
if ((pfm_pmc_desc[i].type & PFM_REG_IMPL) == 0) continue;
ia64_set_pmc(i, pfm_pmc_desc[i].default_value);
}
for (i=0; (pfm_pmd_desc[i].type & PFM_REG_END) == 0; i++) {
if ((pfm_pmd_desc[i].type & PFM_REG_IMPL) == 0) continue;
ia64_set_pmd(i, 0UL);
}
ia64_set_pmc(0,1UL);
ia64_srlz_d();
}
#else /* !CONFIG_PERFMON */
......
#define RDEP(x) (1UL<<(x))
#if defined(CONFIG_ITANIUM) || defined(CONFIG_MCKINLEY)
#error "This file should only be used when CONFIG_ITANIUM and CONFIG_MCKINLEY are not defined"
#endif
static pfm_reg_desc_t pmc_desc[PMU_MAX_PMCS]={
/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc4 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(4),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
};
static pfm_reg_desc_t pmd_desc[PMU_MAX_PMDS]={
/* pmd0 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
/* pmd1 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
/* pmd2 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
/* pmd3 */ { PFM_REG_NOTIMPL , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}},
/* pmd4 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
/* pmd5 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
/* pmd6 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
/* pmd7 */ { PFM_REG_COUNTING, 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
};
......@@ -15,44 +15,44 @@
static int pfm_ita_pmc_check(struct task_struct *task, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
static int pfm_write_ibr_dbr(int mode, struct task_struct *task, void *arg, int count, struct pt_regs *regs);
static pfm_reg_desc_t pmc_desc[256]={
/* pmc0 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc1 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc2 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc3 */ { PFM_REG_CONTROL, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc4 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(4),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc5 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc6 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc7 */ { PFM_REG_COUNTING, 6, NULL, NULL, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc8 */ { PFM_REG_CONFIG, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc9 */ { PFM_REG_CONFIG, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc10 */ { PFM_REG_MONITOR, 6, NULL, NULL, {RDEP(0)|RDEP(1),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc11 */ { PFM_REG_MONITOR, 6, NULL, pfm_ita_pmc_check, {RDEP(2)|RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc12 */ { PFM_REG_MONITOR, 6, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc13 */ { PFM_REG_CONFIG, 0, NULL, pfm_ita_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
{ PFM_REG_NONE, 0, NULL, NULL, {0,}, {0,}}, /* end marker */
static pfm_reg_desc_t pfm_pmc_desc[PMU_MAX_PMCS]={
/* pmc0 */ { PFM_REG_CONTROL , 0, 0x1UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc1 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc2 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc3 */ { PFM_REG_CONTROL , 0, 0x0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc4 */ { PFM_REG_COUNTING, 6, 0x0UL, -1UL, NULL, NULL, {RDEP(4),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc5 */ { PFM_REG_COUNTING, 6, 0x0UL, -1UL, NULL, NULL, {RDEP(5),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc6 */ { PFM_REG_COUNTING, 6, 0x0UL, -1UL, NULL, NULL, {RDEP(6),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc7 */ { PFM_REG_COUNTING, 6, 0x0UL, -1UL, NULL, NULL, {RDEP(7),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc8 */ { PFM_REG_CONFIG , 0, 0xf00000003ffffff8UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc9 */ { PFM_REG_CONFIG , 0, 0xf00000003ffffff8UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc10 */ { PFM_REG_MONITOR , 6, 0x0UL, -1UL, NULL, NULL, {RDEP(0)|RDEP(1),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc11 */ { PFM_REG_MONITOR , 6, 0x0000000010000000UL, -1UL, NULL, pfm_ita_pmc_check, {RDEP(2)|RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc12 */ { PFM_REG_MONITOR , 6, 0x0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
/* pmc13 */ { PFM_REG_CONFIG , 0, 0x0003ffff00000001UL, -1UL, NULL, pfm_ita_pmc_check, {0UL,0UL, 0UL, 0UL}, {0UL,0UL, 0UL, 0UL}},
{ PFM_REG_END , 0, 0x0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
};
static pfm_reg_desc_t pmd_desc[256]={
/* pmd0 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(1),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd1 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(0),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd2 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
/* pmd3 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(2)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
/* pmd4 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
/* pmd5 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
/* pmd6 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
/* pmd7 */ { PFM_REG_COUNTING, 0, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
/* pmd8 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd9 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd10 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd11 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd12 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd13 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd14 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd15 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd16 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd17 */ { PFM_REG_BUFFER, 0, NULL, NULL, {RDEP(2)|RDEP(3),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
{ PFM_REG_NONE, 0, NULL, NULL, {0,}, {0,}}, /* end marker */
static pfm_reg_desc_t pfm_pmd_desc[PMU_MAX_PMDS]={
/* pmd0 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(1),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd1 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(0),0UL, 0UL, 0UL}, {RDEP(10),0UL, 0UL, 0UL}},
/* pmd2 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(3)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
/* pmd3 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(2)|RDEP(17),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
/* pmd4 */ { PFM_REG_COUNTING, 0, 0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(4),0UL, 0UL, 0UL}},
/* pmd5 */ { PFM_REG_COUNTING, 0, 0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(5),0UL, 0UL, 0UL}},
/* pmd6 */ { PFM_REG_COUNTING, 0, 0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(6),0UL, 0UL, 0UL}},
/* pmd7 */ { PFM_REG_COUNTING, 0, 0UL, -1UL, NULL, NULL, {0UL,0UL, 0UL, 0UL}, {RDEP(7),0UL, 0UL, 0UL}},
/* pmd8 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd9 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd10 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd11 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd12 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(13)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd13 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(14)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd14 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(15)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd15 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(16),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd16 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(8)|RDEP(9)|RDEP(10)|RDEP(11)|RDEP(12)|RDEP(13)|RDEP(14)|RDEP(15),0UL, 0UL, 0UL}, {RDEP(12),0UL, 0UL, 0UL}},
/* pmd17 */ { PFM_REG_BUFFER , 0, 0UL, -1UL, NULL, NULL, {RDEP(2)|RDEP(3),0UL, 0UL, 0UL}, {RDEP(11),0UL, 0UL, 0UL}},
{ PFM_REG_END , 0, 0UL, -1UL, NULL, NULL, {0,}, {0,}}, /* end marker */
};
static int
......
This diff is collapsed.
......@@ -12,6 +12,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/personality.h>
#include <linux/sched.h>
#include <linux/slab.h>
......@@ -45,8 +46,9 @@ static void
do_show_stack (struct unw_frame_info *info, void *arg)
{
unsigned long ip, sp, bsp;
char buf[80]; /* don't make it so big that it overflows the stack! */
printk("\nCall Trace: ");
printk("\nCall Trace:\n");
do {
unw_get_ip(info, &ip);
if (ip == 0)
......@@ -54,7 +56,9 @@ do_show_stack (struct unw_frame_info *info, void *arg)
unw_get_sp(info, &sp);
unw_get_bsp(info, &bsp);
printk("[<%016lx>] sp=0x%016lx bsp=0x%016lx\n", ip, sp, bsp);
snprintf(buf, sizeof(buf), " [<%016lx>] %%s sp=0x%016lx bsp=0x%016lx\n",
ip, sp, bsp);
print_symbol(buf, ip);
} while (unw_unwind(info) >= 0);
}
......@@ -94,6 +98,7 @@ show_regs (struct pt_regs *regs)
printk("\nPid: %d, comm: %20s\n", current->pid, current->comm);
printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n",
regs->cr_ipsr, regs->cr_ifs, ip, print_tainted());
print_symbol("ip is at %s\n", ip);
printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
printk("rnat: %016lx bsps: %016lx pr : %016lx\n",
......@@ -199,10 +204,8 @@ ia64_save_extra (struct task_struct *task)
if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
pfm_save_regs(task);
# ifdef CONFIG_SMP
if (__get_cpu_var(pfm_syst_wide))
pfm_syst_wide_update_task(task, 0);
# endif
#endif
#ifdef CONFIG_IA32_SUPPORT
......@@ -221,9 +224,8 @@ ia64_load_extra (struct task_struct *task)
if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
pfm_load_regs(task);
# ifdef CONFIG_SMP
if (__get_cpu_var(pfm_syst_wide)) pfm_syst_wide_update_task(task, 1);
# endif
if (__get_cpu_var(pfm_syst_wide))
pfm_syst_wide_update_task(task, 1);
#endif
#ifdef CONFIG_IA32_SUPPORT
......
......@@ -34,6 +34,7 @@
#include <asm/ia32.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/machvec.h>
#include <asm/processor.h>
#include <asm/sal.h>
......@@ -49,9 +50,6 @@
# error "struct cpuinfo_ia64 too big!"
#endif
#define MIN(a,b) ((a) < (b) ? (a) : (b))
#define MAX(a,b) ((a) > (b) ? (a) : (b))
extern char _end;
#ifdef CONFIG_SMP
......@@ -95,6 +93,10 @@ struct rsvd_region {
static struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1];
static int num_rsvd_regions;
#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
#ifndef CONFIG_DISCONTIGMEM
static unsigned long bootmap_start; /* physical address where the bootmem map is located */
static int
......@@ -108,17 +110,63 @@ find_max_pfn (unsigned long start, unsigned long end, void *arg)
return 0;
}
#define IGNORE_PFN0 1 /* XXX fix me: ignore pfn 0 until TLB miss handler is updated... */
#else /* CONFIG_DISCONTIGMEM */
/*
* Free available memory based on the primitive map created from
* the boot parameters. This routine does not assume the incoming
* segments are sorted.
* efi_memmap_walk() knows nothing about layout of memory across nodes. Find
* out to which node a block of memory belongs. Ignore memory that we cannot
* identify, and split blocks that run across multiple nodes.
*
* Take this opportunity to round the start address up and the end address
* down to page boundaries.
*/
static int
free_available_memory (unsigned long start, unsigned long end, void *arg)
void
call_pernode_memory (unsigned long start, unsigned long end, void *arg)
{
unsigned long rs, re;
void (*func)(unsigned long, unsigned long, int, int);
int i;
start = PAGE_ALIGN(start);
end &= PAGE_MASK;
if (start >= end)
return;
func = arg;
if (!num_memblks) {
/*
* This machine doesn't have SRAT, so call func with
* nid=0, bank=0.
*/
if (start < end)
(*func)(start, end - start, 0, 0);
return;
}
for (i = 0; i < num_memblks; i++) {
rs = max(start, node_memblk[i].start_paddr);
re = min(end, node_memblk[i].start_paddr+node_memblk[i].size);
if (rs < re)
(*func)(rs, re-rs, node_memblk[i].nid,
node_memblk[i].bank);
}
}
#endif /* CONFIG_DISCONTIGMEM */
/*
* Filter incoming memory segments based on the primitive map created from the boot
* parameters. Segments contained in the map are removed from the memory ranges. A
* caller-specified function is called with the memory ranges that remain after filtering.
* This routine does not assume the incoming segments are sorted.
*/
int
filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
{
unsigned long range_start, range_end, prev_start;
void (*func)(unsigned long, unsigned long);
int i;
#if IGNORE_PFN0
......@@ -132,13 +180,18 @@ free_available_memory (unsigned long start, unsigned long end, void *arg)
* lowest possible address(walker uses virtual)
*/
prev_start = PAGE_OFFSET;
func = arg;
for (i = 0; i < num_rsvd_regions; ++i) {
range_start = MAX(start, prev_start);
range_end = MIN(end, rsvd_region[i].start);
range_start = max(start, prev_start);
range_end = min(end, rsvd_region[i].start);
if (range_start < range_end)
free_bootmem(__pa(range_start), range_end - range_start);
#ifdef CONFIG_DISCONTIGMEM
call_pernode_memory(__pa(range_start), __pa(range_end), func);
#else
(*func)(__pa(range_start), range_end - range_start);
#endif
/* nothing more available in this segment */
if (range_end == end) return 0;
......@@ -150,6 +203,7 @@ free_available_memory (unsigned long start, unsigned long end, void *arg)
}
#ifndef CONFIG_DISCONTIGMEM
/*
* Find a place to put the bootmap and return its starting address in bootmap_start.
* This address must be page-aligned.
......@@ -171,8 +225,8 @@ find_bootmap_location (unsigned long start, unsigned long end, void *arg)
free_start = PAGE_OFFSET;
for (i = 0; i < num_rsvd_regions; i++) {
range_start = MAX(start, free_start);
range_end = MIN(end, rsvd_region[i].start & PAGE_MASK);
range_start = max(start, free_start);
range_end = min(end, rsvd_region[i].start & PAGE_MASK);
if (range_end <= range_start) continue; /* skip over empty range */
......@@ -188,6 +242,7 @@ find_bootmap_location (unsigned long start, unsigned long end, void *arg)
}
return 0;
}
#endif /* !CONFIG_DISCONTIGMEM */
static void
sort_regions (struct rsvd_region *rsvd_region, int max)
......@@ -252,6 +307,15 @@ find_memory (void)
sort_regions(rsvd_region, num_rsvd_regions);
#ifdef CONFIG_DISCONTIGMEM
{
extern void discontig_mem_init (void);
bootmap_size = max_pfn = 0; /* stop gcc warnings */
discontig_mem_init();
}
#else /* !CONFIG_DISCONTIGMEM */
/* first find highest page frame number */
max_pfn = 0;
efi_memmap_walk(find_max_pfn, &max_pfn);
......@@ -268,8 +332,9 @@ find_memory (void)
bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn);
/* Free all available memory, then mark bootmem-map as being in use. */
efi_memmap_walk(free_available_memory, 0);
efi_memmap_walk(filter_rsvd_memory, free_bootmem);
reserve_bootmem(bootmap_start, bootmap_size);
#endif /* !CONFIG_DISCONTIGMEM */
#ifdef CONFIG_BLK_DEV_INITRD
if (ia64_boot_param->initrd_start) {
......@@ -296,6 +361,16 @@ setup_arch (char **cmdline_p)
efi_init();
#ifdef CONFIG_ACPI_BOOT
/* Initialize the ACPI boot-time table parser */
acpi_table_init(*cmdline_p);
#ifdef CONFIG_ACPI_NUMA
acpi_numa_init();
#endif
#endif /* CONFIG_APCI_BOOT */
find_memory();
#if 0
......@@ -530,6 +605,7 @@ setup_per_cpu_areas (void)
/* start_kernel() requires this... */
}
/*
* cpu_init() initializes state that is per-CPU. This function acts
* as a 'CPU state barrier', nothing should get across.
......@@ -542,32 +618,40 @@ cpu_init (void)
unsigned long num_phys_stacked;
pal_vm_info_2_u_t vmi;
unsigned int max_ctx;
struct cpuinfo_ia64 *my_cpu_info;
void *my_cpu_data;
struct cpuinfo_ia64 *cpu_info;
void *cpu_data;
#ifdef CONFIG_SMP
extern char __per_cpu_end[];
int cpu = smp_processor_id();
int cpu;
if (__per_cpu_end - __per_cpu_start > PAGE_SIZE)
panic("Per-cpu data area too big! (%Zu > %Zu)",
__per_cpu_end - __per_cpu_start, PAGE_SIZE);
/*
* On the BSP, the page allocator isn't initialized by the time we get here. On
* the APs, the bootmem allocator is no longer available...
* get_free_pages() cannot be used before cpu_init() done. BSP allocates
* "NR_CPUS" pages for all CPUs to avoid that AP calls get_zeroed_page().
*/
if (cpu == 0)
my_cpu_data = alloc_bootmem_pages(__per_cpu_end - __per_cpu_start);
else
my_cpu_data = (void *) get_zeroed_page(GFP_KERNEL);
memcpy(my_cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *) my_cpu_data - __per_cpu_start;
my_cpu_info = my_cpu_data + ((char *) &__get_cpu_var(cpu_info) - __per_cpu_start);
#else
my_cpu_data = __phys_per_cpu_start;
if (smp_processor_id() == 0) {
cpu_data = (unsigned long)alloc_bootmem_pages(PAGE_SIZE * NR_CPUS);
for (cpu = 0; cpu < NR_CPUS; cpu++) {
memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
__per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
cpu_data += PAGE_SIZE;
}
}
cpu_data = __per_cpu_start + __per_cpu_offset[smp_processor_id()];
#else /* !CONFIG_SMP */
cpu_data = __phys_per_cpu_start;
#endif /* !CONFIG_SMP */
cpu_info = cpu_data + ((char *) &__get_cpu_var(cpu_info) - __per_cpu_start);
#ifdef CONFIG_NUMA
cpu_info->node_data = get_node_data_ptr();
cpu_info->nodeid = boot_get_local_nodeid();
#endif
my_cpu_info = my_cpu_data + ((char *) &__get_cpu_var(cpu_info) - __per_cpu_start);
/*
* We can't pass "local_cpu_data" to identify_cpu() because we haven't called
......@@ -575,14 +659,14 @@ cpu_init (void)
* depends on the data returned by identify_cpu(). We break the dependency by
* accessing cpu_data() the old way, through identity mapped space.
*/
identify_cpu(my_cpu_info);
identify_cpu(cpu_info);
#ifdef CONFIG_MCKINLEY
{
#define FEATURE_SET 16
# define FEATURE_SET 16
struct ia64_pal_retval iprv;
if (my_cpu_info->family == 0x1f) {
if (cpu_info->family == 0x1f) {
PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0);
if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80))
PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES,
......@@ -613,7 +697,7 @@ cpu_init (void)
if (current->mm)
BUG();
ia64_mmu_init(my_cpu_data);
ia64_mmu_init(cpu_data);
#ifdef CONFIG_IA32_SUPPORT
/* initialize global ia32 state - CR0 and CR4 */
......
......@@ -41,6 +41,16 @@
# define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0])
#endif
register double f16 asm ("f16"); register double f17 asm ("f17");
register double f18 asm ("f18"); register double f19 asm ("f19");
register double f20 asm ("f20"); register double f21 asm ("f21");
register double f22 asm ("f22"); register double f23 asm ("f23");
register double f24 asm ("f24"); register double f25 asm ("f25");
register double f26 asm ("f26"); register double f27 asm ("f27");
register double f28 asm ("f28"); register double f29 asm ("f29");
register double f30 asm ("f30"); register double f31 asm ("f31");
long
ia64_rt_sigsuspend (sigset_t *uset, size_t sigsetsize, struct sigscratch *scr)
{
......@@ -58,13 +68,13 @@ ia64_rt_sigsuspend (sigset_t *uset, size_t sigsetsize, struct sigscratch *scr)
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
{
oldset = current->blocked;
current->blocked = set;
recalc_sigpending();
}
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
/*
* The return below usually returns to the signal handler. We need to
......@@ -264,12 +274,12 @@ ia64_rt_sigreturn (struct sigscratch *scr)
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
{
current->blocked = set;
recalc_sigpending();
}
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
if (restore_sigcontext(sc, scr))
goto give_sigsegv;
......@@ -458,13 +468,13 @@ handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigse
ka->sa.sa_handler = SIG_DFL;
if (!(ka->sa.sa_flags & SA_NODEFER)) {
spin_lock_irq(&current->sigmask_lock);
spin_lock_irq(&current->sig->siglock);
{
sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
sigaddset(&current->blocked, sig);
recalc_sigpending();
}
spin_unlock_irq(&current->sigmask_lock);
spin_unlock_irq(&current->sig->siglock);
}
return 1;
}
......
......@@ -16,6 +16,7 @@
#include <linux/config.h>
#include <linux/acpi.h>
#include <linux/bootmem.h>
#include <linux/delay.h>
#include <linux/init.h>
......@@ -427,6 +428,37 @@ smp_build_cpu_map (void)
}
}
#ifdef CONFIG_NUMA
char cpu_to_node_map[NR_CPUS] __cacheline_aligned;
/*
* Build cpu to node mapping.
*/
void __init
build_cpu_to_node_map (void)
{
int cpu, i;
for(cpu = 0; cpu < NR_CPUS; ++cpu) {
/*
* All Itanium NUMA platforms I know use ACPI, so maybe we
* can drop this ifdef completely. [EF]
*/
#ifdef CONFIG_ACPI_NUMA
for (i = 0; i < NR_CPUS; ++i)
if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
cpu_to_node_map[cpu] = node_cpuid[i].nid;
break;
}
#else
# error Fixme: Dunno how to build CPU-to-node map.
#endif
}
}
#endif /* CONFIG_NUMA */
/*
* Cycle through the APs sending Wakeup IPIs to boot each.
*/
......
......@@ -282,18 +282,20 @@ sys_free_hugepages (unsigned long addr)
extern int free_hugepages(struct vm_area_struct *);
int retval;
vma = find_vma(mm, addr);
if (!vma || !is_vm_hugetlb_page(vma) || (vma->vm_start != addr))
return -EINVAL;
down_write(&mm->mmap_sem);
{
vma = find_vma(mm, addr);
if (!vma || !is_vm_hugetlb_page(vma) || (vma->vm_start != addr))
retval = -EINVAL;
goto out;
spin_lock(&mm->page_table_lock);
{
retval = free_hugepages(vma);
}
spin_unlock(&mm->page_table_lock);
}
out:
up_write(&mm->mmap_sem);
return retval;
}
......
/*
* linux/arch/ia64/kernel/time.c
*
* Copyright (C) 1998-2001 Hewlett-Packard Co
* Copyright (C) 1998-2000 Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 1999-2001 David Mosberger <davidm@hpl.hp.com>
* Copyright (C) 1998-2002 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
* David Mosberger <davidm@hpl.hp.com>
* Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
* Copyright (C) 1999-2000 VA Linux Systems
* Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
......@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/profile.h>
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/interrupt.h>
......
......@@ -54,8 +54,8 @@ trap_init (void)
if (ia64_boot_param->fpswa) {
/* FPSWA fixup: make the interface pointer a kernel virtual address: */
fpswa_interface = __va(ia64_boot_param->fpswa);
major = fpswa_interface->revision & 0xffff;
minor = fpswa_interface->revision >> 16;
major = fpswa_interface->revision >> 16;
minor = fpswa_interface->revision & 0xffff;
}
printk("fpswa interface at %lx (rev %d.%d)\n", ia64_boot_param->fpswa, major, minor);
}
......@@ -69,7 +69,6 @@ bust_spinlocks (int yes)
{
int loglevel_save = console_loglevel;
spin_lock_init(&timerlist_lock);
if (yes) {
oops_in_progress = 1;
return;
......
......@@ -32,26 +32,26 @@ AFLAGS___udivsi3.o = -DUNSIGNED
AFLAGS___modsi3.o = -DMODULO
AFLAGS___umodsi3.o = -DUNSIGNED -DMODULO
__divdi3.o: idiv64.S
$(obj)/__divdi3.o: $(src)/idiv64.S
$(cmd_as_o_S)
__udivdi3.o: idiv64.S
$(obj)/__udivdi3.o: $(src)/idiv64.S
$(cmd_as_o_S)
__moddi3.o: idiv64.S
$(obj)/__moddi3.o: $(src)/idiv64.S
$(cmd_as_o_S)
__umoddi3.o: idiv64.S
$(obj)/__umoddi3.o: $(src)/idiv64.S
$(cmd_as_o_S)
__divsi3.o: idiv32.S
$(obj)/__divsi3.o: $(src)/idiv32.S
$(cmd_as_o_S)
__udivsi3.o: idiv32.S
$(obj)/__udivsi3.o: $(src)/idiv32.S
$(cmd_as_o_S)
__modsi3.o: idiv32.S
$(obj)/__modsi3.o: $(src)/idiv32.S
$(cmd_as_o_S)
__umodsi3.o: idiv32.S
$(obj)/__umodsi3.o: $(src)/idiv32.S
$(cmd_as_o_S)
......@@ -9,5 +9,7 @@
obj-y := init.o fault.o tlb.o extable.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_NUMA) += numa.o
obj-$(CONFIG_DISCONTIGMEM) += discontig.o
include $(TOPDIR)/Rules.make
/*
* Copyright (c) 2000 Silicon Graphics, Inc. All rights reserved.
* Copyright (c) 2001 Intel Corp.
* Copyright (c) 2001 Tony Luck <tony.luck@intel.com>
* Copyright (c) 2002 NEC Corp.
* Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
*/
/*
* Platform initialization for Discontig Memory
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/bootmem.h>
#include <linux/mmzone.h>
#include <linux/acpi.h>
#include <linux/efi.h>
/*
* Round an address upward to the next multiple of GRANULE size.
*/
#define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1))
static struct ia64_node_data *node_data[NR_NODES];
static long boot_pg_data[8*NR_NODES+sizeof(pg_data_t)] __initdata;
static pg_data_t *pg_data_ptr[NR_NODES] __initdata;
static bootmem_data_t bdata[NR_NODES][NR_BANKS_PER_NODE+1] __initdata;
extern int filter_rsvd_memory (unsigned long start, unsigned long end, void *arg);
/*
* Return the compact node number of this cpu. Used prior to
* setting up the cpu_data area.
* Note - not fast, intended for boot use only!!
*/
int
boot_get_local_nodeid(void)
{
int i;
for (i = 0; i < NR_CPUS; i++)
if (node_cpuid[i].phys_id == hard_smp_processor_id())
return node_cpuid[i].nid;
/* node info missing, so nid should be 0.. */
return 0;
}
/*
* Return a pointer to the pg_data structure for a node.
* This function is used ONLY in early boot before the cpu_data
* structure is available.
*/
pg_data_t* __init
boot_get_pg_data_ptr(long node)
{
return pg_data_ptr[node];
}
/*
* Return a pointer to the node data for the current node.
* (boottime initialization only)
*/
struct ia64_node_data *
get_node_data_ptr(void)
{
return node_data[boot_get_local_nodeid()];
}
/*
* We allocate one of the bootmem_data_t structs for each piece of memory
* that we wish to treat as a contiguous block. Each such block must start
* on a BANKSIZE boundary. Multiple banks per node is not supported.
*/
static int __init
build_maps(unsigned long pstart, unsigned long length, int node)
{
bootmem_data_t *bdp;
unsigned long cstart, epfn;
bdp = pg_data_ptr[node]->bdata;
epfn = GRANULEROUNDUP(pstart + length) >> PAGE_SHIFT;
cstart = pstart & ~(BANKSIZE - 1);
if (!bdp->node_low_pfn) {
bdp->node_boot_start = cstart;
bdp->node_low_pfn = epfn;
} else {
bdp->node_boot_start = min(cstart, bdp->node_boot_start);
bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
}
min_low_pfn = min(min_low_pfn, bdp->node_boot_start>>PAGE_SHIFT);
max_low_pfn = max(max_low_pfn, bdp->node_low_pfn);
return 0;
}
/*
* Find space on each node for the bootmem map.
*
* Called by efi_memmap_walk to find boot memory on each node. Note that
* only blocks that are free are passed to this routine (currently filtered by
* free_available_memory).
*/
static int __init
find_bootmap_space(unsigned long pstart, unsigned long length, int node)
{
unsigned long mapsize, pages, epfn;
bootmem_data_t *bdp;
epfn = (pstart + length) >> PAGE_SHIFT;
bdp = &pg_data_ptr[node]->bdata[0];
if (pstart < bdp->node_boot_start || epfn > bdp->node_low_pfn)
return 0;
if (!bdp->node_bootmem_map) {
pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
if (length > mapsize) {
init_bootmem_node(
BOOT_NODE_DATA(node),
pstart>>PAGE_SHIFT,
bdp->node_boot_start>>PAGE_SHIFT,
bdp->node_low_pfn);
}
}
return 0;
}
/*
* Free available memory to the bootmem allocator.
*
* Note that only blocks that are free are passed to this routine (currently
* filtered by free_available_memory).
*
*/
static int __init
discontig_free_bootmem_node(unsigned long pstart, unsigned long length, int node)
{
free_bootmem_node(BOOT_NODE_DATA(node), pstart, length);
return 0;
}
/*
* Reserve the space used by the bootmem maps.
*/
static void __init
discontig_reserve_bootmem(void)
{
int node;
unsigned long mapbase, mapsize, pages;
bootmem_data_t *bdp;
for (node = 0; node < numnodes; node++) {
bdp = BOOT_NODE_DATA(node)->bdata;
pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
mapbase = __pa(bdp->node_bootmem_map);
reserve_bootmem_node(BOOT_NODE_DATA(node), mapbase, mapsize);
}
}
/*
* Allocate per node tables.
* - the pg_data structure is allocated on each node. This minimizes offnode
* memory references
* - the node data is allocated & initialized. Portions of this structure is read-only (after
* boot) and contains node-local pointers to usefuls data structures located on
* other nodes.
*
* We also switch to using the "real" pg_data structures at this point. Earlier in boot, we
* use a different structure. The only use for pg_data prior to the point in boot is to get
* the pointer to the bdata for the node.
*/
static void __init
allocate_pernode_structures(void)
{
pg_data_t *pgdat=0, *new_pgdat_list=0;
int node, mynode;
mynode = boot_get_local_nodeid();
for (node = numnodes - 1; node >= 0 ; node--) {
node_data[node] = alloc_bootmem_node(BOOT_NODE_DATA(node), sizeof (struct ia64_node_data));
pgdat = __alloc_bootmem_node(BOOT_NODE_DATA(node), sizeof(pg_data_t), SMP_CACHE_BYTES, 0);
pgdat->bdata = &(bdata[node][0]);
pg_data_ptr[node] = pgdat;
pgdat->pgdat_next = new_pgdat_list;
new_pgdat_list = pgdat;
}
memcpy(node_data[mynode]->pg_data_ptrs, pg_data_ptr, sizeof(pg_data_ptr));
memcpy(node_data[mynode]->node_data_ptrs, node_data, sizeof(node_data));
pgdat_list = new_pgdat_list;
}
/*
* Called early in boot to setup the boot memory allocator, and to
* allocate the node-local pg_data & node-directory data structures..
*/
void __init
discontig_mem_init(void)
{
int node;
if (numnodes == 0) {
printk("node info missing!\n");
numnodes = 1;
}
for (node = 0; node < numnodes; node++) {
pg_data_ptr[node] = (pg_data_t*) &boot_pg_data[node];
pg_data_ptr[node]->bdata = &bdata[node][0];
}
min_low_pfn = -1;
max_low_pfn = 0;
efi_memmap_walk(filter_rsvd_memory, build_maps);
efi_memmap_walk(filter_rsvd_memory, find_bootmap_space);
efi_memmap_walk(filter_rsvd_memory, discontig_free_bootmem_node);
discontig_reserve_bootmem();
allocate_pernode_structures();
}
/*
* Initialize the paging system.
* - determine sizes of each node
* - initialize the paging system for the node
* - build the nodedir for the node. This contains pointers to
* the per-bank mem_map entries.
* - fix the page struct "virtual" pointers. These are bank specific
* values that the paging system doesnt understand.
* - replicate the nodedir structure to other nodes
*/
void __init
discontig_paging_init(void)
{
int node, mynode;
unsigned long max_dma, zones_size[MAX_NR_ZONES];
unsigned long kaddr, ekaddr, bid;
struct page *page;
bootmem_data_t *bdp;
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
mynode = boot_get_local_nodeid();
for (node = 0; node < numnodes; node++) {
long pfn, startpfn;
memset(zones_size, 0, sizeof(zones_size));
startpfn = -1;
bdp = BOOT_NODE_DATA(node)->bdata;
pfn = bdp->node_boot_start >> PAGE_SHIFT;
if (startpfn == -1)
startpfn = pfn;
if (pfn > max_dma)
zones_size[ZONE_NORMAL] += (bdp->node_low_pfn - pfn);
else if (bdp->node_low_pfn < max_dma)
zones_size[ZONE_DMA] += (bdp->node_low_pfn - pfn);
else {
zones_size[ZONE_DMA] += (max_dma - pfn);
zones_size[ZONE_NORMAL] += (bdp->node_low_pfn - max_dma);
}
free_area_init_node(node, NODE_DATA(node), NULL, zones_size, startpfn, 0);
page = NODE_DATA(node)->node_mem_map;
bdp = BOOT_NODE_DATA(node)->bdata;
kaddr = (unsigned long)__va(bdp->node_boot_start);
ekaddr = (unsigned long)__va(bdp->node_low_pfn << PAGE_SHIFT);
while (kaddr < ekaddr) {
bid = BANK_MEM_MAP_INDEX(kaddr);
node_data[mynode]->node_id_map[bid] = node;
node_data[mynode]->bank_mem_map_base[bid] = page;
kaddr += BANKSIZE;
page += BANKSIZE/PAGE_SIZE;
}
}
/*
* Finish setting up the node data for this node, then copy it to the other nodes.
*/
for (node=0; node < numnodes; node++)
if (mynode != node) {
memcpy(node_data[node], node_data[mynode], sizeof(struct ia64_node_data));
node_data[node]->node = node;
}
}
......@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/efi.h>
#include <linux/mmzone.h>
#include <asm/a.out.h>
#include <asm/bitops.h>
......@@ -78,7 +79,7 @@ ia64_init_addr_space (void)
vma->vm_mm = current->mm;
vma->vm_start = IA64_RBS_BOT;
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_page_prot = protection_map[VM_READ | VM_WRITE];
vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE|VM_GROWSUP;
vma->vm_ops = NULL;
vma->vm_pgoff = 0;
......@@ -347,6 +348,15 @@ extern long htlbzone_pages;
extern struct list_head htlbpage_freelist;
#endif
#ifdef CONFIG_DISCONTIGMEM
void
paging_init (void)
{
extern void discontig_paging_init(void);
discontig_paging_init();
}
#else /* !CONFIG_DISCONTIGMEM */
void
paging_init (void)
{
......@@ -365,6 +375,7 @@ paging_init (void)
}
free_area_init(zones_size);
}
#endif /* !CONFIG_DISCONTIGMEM */
static int
count_pages (u64 start, u64 end, void *arg)
......@@ -380,10 +391,9 @@ count_reserved_pages (u64 start, u64 end, void *arg)
{
unsigned long num_reserved = 0;
unsigned long *count = arg;
struct page *pg;
for (pg = virt_to_page(start); pg < virt_to_page(end); ++pg)
if (PageReserved(pg))
for (; start < end; start += PAGE_SIZE)
if (PageReserved(virt_to_page(start)))
++num_reserved;
*count += num_reserved;
return 0;
......@@ -395,6 +405,7 @@ mem_init (void)
extern char __start_gate_section[];
long reserved_pages, codesize, datasize, initsize;
unsigned long num_pgt_pages;
pg_data_t *pgdat;
#ifdef CONFIG_PCI
/*
......@@ -405,16 +416,19 @@ mem_init (void)
platform_pci_dma_init();
#endif
#ifndef CONFIG_DISCONTIGMEM
if (!mem_map)
BUG();
max_mapnr = max_low_pfn;
#endif
num_physpages = 0;
efi_memmap_walk(count_pages, &num_physpages);
max_mapnr = max_low_pfn;
high_memory = __va(max_low_pfn * PAGE_SIZE);
totalram_pages += free_all_bootmem();
for_each_pgdat(pgdat)
totalram_pages += free_all_bootmem_node(pgdat);
reserved_pages = 0;
efi_memmap_walk(count_reserved_pages, &reserved_pages);
......@@ -425,7 +439,7 @@ mem_init (void)
printk("Memory: %luk/%luk available (%luk code, %luk reserved, %luk data, %luk init)\n",
(unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
max_mapnr << (PAGE_SHIFT - 10), codesize >> 10, reserved_pages << (PAGE_SHIFT - 10),
num_physpages << (PAGE_SHIFT - 10), codesize >> 10, reserved_pages << (PAGE_SHIFT - 10),
datasize >> 10, initsize >> 10);
/*
......@@ -441,6 +455,8 @@ mem_init (void)
if (num_pgt_pages > pgt_cache_water[1])
pgt_cache_water[1] = num_pgt_pages;
show_mem();
/* install the gate page in the global page table: */
put_gate_page(virt_to_page(__start_gate_section), GATE_ADDR);
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* This file contains NUMA specific variables and functions which can
* be split away from DISCONTIGMEM and are used on NUMA machines with
* contiguous memory.
*
* 2002/08/07 Erich Focht <efocht@ess.nec.de>
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mmzone.h>
#include <asm/numa.h>
/*
* The following structures are usually initialized by ACPI or
* similar mechanisms and describe the NUMA characteristics of the machine.
*/
int num_memblks = 0;
struct node_memblk_s node_memblk[NR_MEMBLKS];
struct node_cpuid_s node_cpuid[NR_CPUS];
/*
* This is a matrix with "distances" between nodes, they should be
* proportional to the memory access latency ratios.
*/
u8 numa_slit[NR_NODES * NR_NODES];
/* Identify which cnode a physical address resides on */
int
paddr_to_nid(unsigned long paddr)
{
int i;
for (i = 0; i < num_memblks; i++)
if (paddr >= node_memblk[i].start_paddr &&
paddr < node_memblk[i].start_paddr + node_memblk[i].size)
break;
return (i < num_memblks) ? node_memblk[i].nid : -1;
}
......@@ -141,13 +141,13 @@ pcibios_scan_root (int bus)
/*
* Called after each bus is probed, but before its children are examined.
*/
void __init
void __devinit
pcibios_fixup_bus (struct pci_bus *b)
{
return;
}
void __init
void __devinit
pcibios_update_resource (struct pci_dev *dev, struct resource *root,
struct resource *res, int resource)
{
......@@ -163,7 +163,7 @@ pcibios_update_resource (struct pci_dev *dev, struct resource *root,
/* ??? FIXME -- record old value for shutdown. */
}
void __init
void __devinit
pcibios_update_irq (struct pci_dev *dev, int irq)
{
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
......@@ -171,7 +171,7 @@ pcibios_update_irq (struct pci_dev *dev, int irq)
/* ??? FIXME -- record old value for shutdown. */
}
void __init
void __devinit
pcibios_fixup_pbus_ranges (struct pci_bus * bus, struct pbus_set_ranges_data * ranges)
{
}
......
......@@ -2,6 +2,8 @@ CFLAGS = -g -O2 -Wall $(CPPFLAGS)
TARGET = $(TOPDIR)/include/asm-ia64/offsets.h
src = $(obj)
all:
fastdep:
......@@ -9,13 +11,13 @@ fastdep:
mrproper: clean
clean:
rm -f print_offsets.s print_offsets offsets.h
rm -f $(obj)/print_offsets.s $(obj)/print_offsets $(obj)/offsets.h
$(TARGET): offsets.h
@if ! cmp -s offsets.h ${TARGET}; then \
$(TARGET): $(obj)/offsets.h
@if ! cmp -s $(obj)/offsets.h ${TARGET}; then \
echo -e "*** Updating ${TARGET}..."; \
cp offsets.h ${TARGET}; \
else \
cp $(obj)/offsets.h ${TARGET}; \
else \
echo "*** ${TARGET} is up to date"; \
fi
......@@ -30,25 +32,26 @@ $(TARGET): offsets.h
ifeq ($(CROSS_COMPILE),)
offsets.h: print_offsets
./print_offsets > offsets.h
$(obj)/offsets.h: $(obj)/print_offsets
$(obj)/print_offsets > $(obj)/offsets.h
comma := ,
print_offsets: print_offsets.c FORCE
$(obj)/print_offsets: $(src)/print_offsets.c FORCE
[ -r $(TARGET) ] || echo "#define IA64_TASK_SIZE 0" > $(TARGET)
$(CC) $(CFLAGS) -DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) \
print_offsets.c -o $@
$(src)/print_offsets.c -o $@
FORCE:
else
offsets.h: print_offsets.s
$(AWK) -f print_offsets.awk $^ > $@
$(obj)/offsets.h: $(obj)/print_offsets.s
$(AWK) -f $(src)/print_offsets.awk $^ > $@
print_offsets.s: print_offsets.c
$(CC) $(CFLAGS) -DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) -S \
print_offsets.c -o $@
$(obj)/print_offsets.s: $(src)/print_offsets.c
[ -r $(TARGET) ] || echo "#define IA64_TASK_SIZE 0" > $(TARGET)
$(CC) $(CFLAGS) -DKBUILD_BASENAME=$(subst $(comma),_,$(subst -,_,$(*F))) -S $^ -o $@
endif
......
......@@ -12,11 +12,11 @@ SECTIONS
{
/* Sections to be discarded */
/DISCARD/ : {
*(.text.exit)
*(.data.exit)
*(.exit.text)
*(.exit.data)
*(.exitcall.exit)
*(.IA_64.unwind.text.exit)
*(.IA_64.unwind_info.text.exit)
*(.IA_64.unwind.exit.text)
*(.IA_64.unwind_info.exit.text)
}
v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
......@@ -65,6 +65,13 @@ SECTIONS
{ *(__ksymtab) }
__stop___ksymtab = .;
__kallsyms : AT(ADDR(__kallsyms) - PAGE_OFFSET)
{
__start___kallsyms = .; /* All kernel symbols */
*(__kallsyms)
__stop___kallsyms = .;
}
/* Unwind info & table: */
. = ALIGN(8);
.IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - PAGE_OFFSET)
......@@ -85,15 +92,15 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_begin = .;
.text.init : AT(ADDR(.text.init) - PAGE_OFFSET)
{ *(.text.init) }
.init.text : AT(ADDR(.init.text) - PAGE_OFFSET)
{ *(.init.text) }
.data.init : AT(ADDR(.data.init) - PAGE_OFFSET)
{ *(.data.init) }
.init.data : AT(ADDR(.init.data) - PAGE_OFFSET)
{ *(.init.data) }
. = ALIGN(16);
__setup_start = .;
.setup.init : AT(ADDR(.setup.init) - PAGE_OFFSET)
{ *(.setup.init) }
.init.setup : AT(ADDR(.init.setup) - PAGE_OFFSET)
{ *(.init.setup) }
__setup_end = .;
__initcall_start = .;
.initcall.init : AT(ADDR(.initcall.init) - PAGE_OFFSET)
......
......@@ -118,6 +118,8 @@ deadline_find_hash(struct deadline_data *dd, sector_t offset)
while ((entry = next) != hash_list) {
next = entry->next;
prefetch(next);
drq = list_entry_hash(entry);
BUG_ON(!drq->hash_valid_count);
......@@ -191,6 +193,8 @@ deadline_merge(request_queue_t *q, struct list_head **insert, struct bio *bio)
while ((entry = entry->prev) != sort_list) {
__rq = list_entry_rq(entry);
prefetch(entry->prev);
BUG_ON(__rq->flags & REQ_STARTED);
if (!(__rq->flags & REQ_CMD))
......@@ -298,6 +302,8 @@ static void deadline_move_requests(struct deadline_data *dd, struct request *rq)
struct list_head *nxt = rq->queuelist.next;
int this_rq_cost;
prefetch(nxt);
/*
* take it off the sort and fifo list, move
* to dispatch queue
......
......@@ -272,13 +272,27 @@ void elv_merge_requests(request_queue_t *q, struct request *rq,
e->elevator_merge_req_fn(q, rq, next);
}
/*
* add_request and next_request are required to be supported, naturally
*/
void __elv_add_request(request_queue_t *q, struct request *rq,
struct list_head *insert_here)
void __elv_add_request(request_queue_t *q, struct request *rq, int at_end,
int plug)
{
struct list_head *insert = &q->queue_head;
if (at_end)
insert = insert->prev;
if (plug)
blk_plug_device(q);
q->elevator.elevator_add_req_fn(q, rq, insert);
}
void elv_add_request(request_queue_t *q, struct request *rq, int at_end,
int plug)
{
q->elevator.elevator_add_req_fn(q, rq, insert_here);
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
__elv_add_request(q, rq, at_end, plug);
spin_unlock_irqrestore(q->queue_lock, flags);
}
static inline struct request *__elv_next_request(request_queue_t *q)
......@@ -289,8 +303,14 @@ static inline struct request *__elv_next_request(request_queue_t *q)
struct request *elv_next_request(request_queue_t *q)
{
struct request *rq;
int ret;
while ((rq = __elv_next_request(q))) {
/*
* just mark as started even if we don't start it, a request
* that has been delayed should not be passed by new incoming
* requests
*/
rq->flags |= REQ_STARTED;
if (&rq->queuelist == q->last_merge)
......@@ -299,20 +319,22 @@ struct request *elv_next_request(request_queue_t *q)
if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
break;
/*
* all ok, break and return it
*/
if (!q->prep_rq_fn(q, rq))
ret = q->prep_rq_fn(q, rq);
if (ret == BLKPREP_OK) {
break;
/*
* prep said no-go, kill it
*/
blkdev_dequeue_request(rq);
if (end_that_request_first(rq, 0, rq->nr_sectors))
BUG();
end_that_request_last(rq);
} else if (ret == BLKPREP_DEFER) {
rq = NULL;
break;
} else if (ret == BLKPREP_KILL) {
blkdev_dequeue_request(rq);
rq->flags |= REQ_QUIET;
while (end_that_request_first(rq, 0, rq->nr_sectors))
;
end_that_request_last(rq);
} else {
printk("%s: bad return=%d\n", __FUNCTION__, ret);
break;
}
}
return rq;
......@@ -322,6 +344,16 @@ void elv_remove_request(request_queue_t *q, struct request *rq)
{
elevator_t *e = &q->elevator;
/*
* the main clearing point for q->last_merge is on retrieval of
* request by driver (it calls elv_next_request()), but it _can_
* also happen here if a request is added to the queue but later
* deleted without ever being given to driver (merged with another
* request).
*/
if (&rq->queuelist == q->last_merge)
q->last_merge = NULL;
if (e->elevator_remove_req_fn)
e->elevator_remove_req_fn(q, rq);
}
......@@ -357,6 +389,7 @@ module_init(elevator_global_init);
EXPORT_SYMBOL(elevator_noop);
EXPORT_SYMBOL(elv_add_request);
EXPORT_SYMBOL(__elv_add_request);
EXPORT_SYMBOL(elv_next_request);
EXPORT_SYMBOL(elv_remove_request);
......
This diff is collapsed.
This diff is collapsed.
......@@ -548,12 +548,7 @@ static void process_page(unsigned long data)
return_bio = bio->bi_next;
bio->bi_next = NULL;
/* should use bio_endio(), however already cleared
* BIO_UPTODATE. so set bio->bi_size = 0 manually to indicate
* completely done
*/
bio->bi_size = 0;
bio->bi_end_io(bio, bytes, 0);
bio_endio(bio, bio->bi_size, 0);
}
}
......@@ -1041,7 +1036,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
spin_lock_init(&card->lock);
dev->driver_data = card;
pci_set_drvdata(dev, card);
if (pci_write_cmd != 0x0F) /* If not Memory Write & Invalidate */
pci_write_cmd = 0x07; /* then Memory Write command */
......@@ -1100,7 +1095,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_i
*/
static void mm_pci_remove(struct pci_dev *dev)
{
struct cardinfo *card = dev->driver_data;
struct cardinfo *card = pci_get_drvdata(dev);
tasklet_kill(&card->tasklet);
iounmap(card->csr_remap);
......
This diff is collapsed.
......@@ -1610,56 +1610,6 @@ static void idedisk_add_settings(ide_drive_t *drive)
#endif
}
static int idedisk_suspend(struct device *dev, u32 state, u32 level)
{
ide_drive_t *drive = dev->driver_data;
printk("Suspending device %p\n", dev->driver_data);
/* I hope that every freeze operation from the upper levels have
* already been done...
*/
if (level != SUSPEND_SAVE_STATE)
return 0;
BUG_ON(in_interrupt());
printk("Waiting for commands to finish\n");
/* wait until all commands are finished */
/* FIXME: waiting for spinlocks should be done instead. */
if (!(HWGROUP(drive)))
printk("No hwgroup?\n");
while (HWGROUP(drive)->handler)
yield();
/* set the drive to standby */
printk(KERN_INFO "suspending: %s ", drive->name);
if (drive->driver) {
if (drive->driver->standby)
drive->driver->standby(drive);
}
drive->blocked = 1;
while (HWGROUP(drive)->handler)
yield();
return 0;
}
static int idedisk_resume(struct device *dev, u32 level)
{
ide_drive_t *drive = dev->driver_data;
if (level != RESUME_RESTORE_STATE)
return 0;
if (!drive->blocked)
panic("ide: Resume but not suspended?\n");
drive->blocked = 0;
return 0;
}
/* This is just a hook for the overall driver tree.
*/
......
......@@ -1238,6 +1238,21 @@ static void idefloppy_create_rw_cmd (idefloppy_floppy_t *floppy, idefloppy_pc_t
set_bit(PC_DMA_RECOMMENDED, &pc->flags);
}
static int
idefloppy_blockpc_cmd(idefloppy_floppy_t *floppy, idefloppy_pc_t *pc, struct request *rq)
{
/*
* just support eject for now, it would not be hard to make the
* REQ_BLOCK_PC support fully-featured
*/
if (rq->cmd[0] != IDEFLOPPY_START_STOP_CMD)
return 1;
idefloppy_init_pc(pc);
memcpy(pc->c, rq->cmd, sizeof(pc->c));
return 0;
}
/*
* idefloppy_do_request is our request handling function.
*/
......@@ -1280,6 +1295,12 @@ static ide_startstop_t idefloppy_do_request (ide_drive_t *drive, struct request
idefloppy_create_rw_cmd(floppy, pc, rq, block);
} else if (rq->flags & REQ_SPECIAL) {
pc = (idefloppy_pc_t *) rq->buffer;
} else if (rq->flags & REQ_BLOCK_PC) {
pc = idefloppy_next_pc_storage(drive);
if (idefloppy_blockpc_cmd(floppy, pc, rq)) {
idefloppy_do_end_request(drive, 0, 0);
return ide_stopped;
}
} else {
blk_dump_rq_flags(rq,
"ide-floppy: unsupported command in queue");
......
......@@ -878,13 +878,12 @@ ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
{
ide_startstop_t startstop;
unsigned long block;
ide_hwif_t *hwif = HWIF(drive);
BUG_ON(!(rq->flags & REQ_STARTED));
#ifdef DEBUG
printk("%s: start_request: current=0x%08lx\n",
hwif->name, (unsigned long) rq);
HWIF(drive)->name, (unsigned long) rq);
#endif
/* bail early if we've exceeded max_failures */
......@@ -910,7 +909,7 @@ ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
block = 1; /* redirect MBR access to EZ-Drive partn table */
#if (DISK_RECOVERY_TIME > 0)
while ((read_timer() - hwif->last_time) < DISK_RECOVERY_TIME);
while ((read_timer() - HWIF(drive)->last_time) < DISK_RECOVERY_TIME);
#endif
SELECT_DRIVE(drive);
......@@ -1128,9 +1127,15 @@ void ide_do_request (ide_hwgroup_t *hwgroup, int masked_irq)
break;
}
/*
* we know that the queue isn't empty, but this can happen
* if the q->prep_rq_fn() decides to kill a request
*/
rq = elv_next_request(&drive->queue);
if (!rq)
if (!rq) {
hwgroup->busy = !!ata_pending_commands(drive);
break;
}
if (!rq->bio && ata_pending_commands(drive))
break;
......@@ -1515,10 +1520,8 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
{
unsigned long flags;
ide_hwgroup_t *hwgroup = HWGROUP(drive);
unsigned int major = HWIF(drive)->major;
request_queue_t *q = &drive->queue;
struct list_head *queue_head = &q->queue_head;
DECLARE_COMPLETION(wait);
int insert_end = 1, err;
#ifdef CONFIG_BLK_DEV_PDC4030
if (HWIF(drive)->chipset == ide_pdc4030 && rq->buffer != NULL)
......@@ -1540,29 +1543,35 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
}
rq->rq_disk = drive->disk;
if (action == ide_wait)
/*
* we need to hold an extra reference to request for safe inspection
* after completion
*/
if (action == ide_wait) {
rq->ref_count++;
rq->waiting = &wait;
}
spin_lock_irqsave(&ide_lock, flags);
if (blk_queue_empty(q) || action == ide_preempt) {
if (action == ide_preempt)
hwgroup->rq = NULL;
} else {
if (action == ide_wait || action == ide_end) {
queue_head = queue_head->prev;
} else
queue_head = queue_head->next;
if (action == ide_preempt) {
hwgroup->rq = NULL;
insert_end = 0;
}
q->elevator.elevator_add_req_fn(q, rq, queue_head);
__elv_add_request(&drive->queue, rq, insert_end, 0);
ide_do_request(hwgroup, 0);
spin_unlock_irqrestore(&ide_lock, flags);
err = 0;
if (action == ide_wait) {
/* wait for it to be serviced */
wait_for_completion(&wait);
/* return -EIO if errors */
return rq->errors ? -EIO : 0;
if (rq->errors)
err = -EIO;
blk_put_request(rq);
}
return 0;
return err;
}
EXPORT_SYMBOL(ide_do_drive_cmd);
......@@ -3369,7 +3378,7 @@ int ide_register_driver(ide_driver_t *driver)
list_del_init(&drive->list);
ata_attach(drive);
}
driver->gen_driver.name = driver->name;
driver->gen_driver.name = (char *) driver->name;
driver->gen_driver.bus = &ide_bus_type;
driver->gen_driver.remove = ide_drive_remove;
return driver_register(&driver->gen_driver);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -168,8 +168,7 @@ static int create_strip_zones (mddev_t *mddev)
* @bio: the buffer head that's been built up so far
* @biovec: the request that could be merged to it.
*
* Return 1 if the merge is not permitted (because the
* result would cross a chunk boundary), 0 otherwise.
* Return amount of bytes we can accept at this offset
*/
static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec)
{
......@@ -182,7 +181,7 @@ static int raid0_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_
block = bio->bi_sector >> 1;
bio_sz = (bio->bi_size + biovec->bv_len) >> 10;
return chunk_size < ((block & (chunk_size - 1)) + bio_sz);
return (chunk_size - ((block & (chunk_size - 1)) + bio_sz)) << 10;
}
static int raid0_run (mddev_t *mddev)
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment