Commit 2e926a15 authored by Linus Torvalds's avatar Linus Torvalds

Merge http://ppc.bkbits.net/for-linus-ppc64

into home.osdl.org:/home/torvalds/v2.5/linux
parents e5ac20cb 6e15340f
......@@ -72,6 +72,17 @@ config PPC64
bool
default y
config HUGETLB_PAGE
bool "Huge TLB Page Support"
help
This enables support for huge pages. User space applications
can make use of this support with the sys_alloc_hugepages and
sys_free_hugepages system calls. If your applications are
huge page aware and your processor supports this (only POWER4,
then say Y here.
Otherwise, say N.
config SMP
bool "Symmetric multi-processing support"
---help---
......
......@@ -18,7 +18,13 @@ KERNELLOAD := 0xc000000000000000
LDFLAGS := -m elf64ppc
LDFLAGS_vmlinux := -Bstatic -e $(KERNELLOAD) -Ttext $(KERNELLOAD)
CFLAGS += -msoft-float -pipe -Wno-uninitialized -mminimal-toc \
-mtraceback=full -mcpu=power4
-mcpu=power4
have_zero_bss := $(shell if $(CC) -fno-zero-initialized-in-bss -S -o /dev/null -xc /dev/null > /dev/null 2>&1; then echo y; else echo n; fi)
ifeq ($(have_zero_bss),y)
CFLAGS += -fno-zero-initialized-in-bss
endif
head-y := arch/ppc64/kernel/head.o
......@@ -39,6 +45,12 @@ $(boottarget-y): vmlinux
rm -f .config arch/ppc64/defconfig
cp -f arch/ppc64/configs/$(@:config=defconfig) arch/ppc64/defconfig
bootimage-$(CONFIG_PPC_PSERIES) := zImage
bootimage-$(CONFIG_PPC_ISERIES) := vmlinux.sm
BOOTIMAGE := $(bootimage-y)
install: vmlinux
$(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
......
......@@ -122,5 +122,7 @@ $(obj)/imagesize.c: vmlinux
awk '{printf "unsigned long vmlinux_memsize = 0x%s;\n", substr($$1,8)}' \
>> $(obj)/imagesize.c
install: $(CONFIGURE) $(obj)/$(BOOTIMAGE)
sh -x $(src)/install.sh "$(KERNELRELEASE)" "$(obj)/$(BOOTIMAGE)" "$(TOPDIR)/System.map" "$(INSTALL_PATH)"
clean-files := $(patsubst $(obj)/%,%, $(obj-boot))
#!/bin/sh
#
# arch/ppc64/boot/install.sh
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1995 by Linus Torvalds
#
# Blatantly stolen from in arch/i386/boot/install.sh by Dave Hansen
#
# "make install" script for ppc64 architecture
#
# Arguments:
# $1 - kernel version
# $2 - kernel image file
# $3 - kernel map file
# $4 - default install path (blank if root directory)
#
# User may have a custom install script
if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi
if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi
# Default install
# this should work for both the pSeries zImage and the iSeries vmlinux.sm
image_name=`basename $2`
if [ -f $4/$image_name ]; then
mv $4/$image_name $4/$image_name.old
fi
if [ -f $4/System.map ]; then
mv $4/System.map $4/System.old
fi
cat $2 > $4/$image_name
cp $3 $4/System.map
......@@ -8,11 +8,13 @@ CONFIG_HAVE_DEC_LOCK=y
CONFIG_EARLY_PRINTK=y
CONFIG_COMPAT=y
CONFIG_FRAME_POINTER=y
CONFIG_FORCE_MAX_ZONEORDER=13
#
# Code maturity level options
#
CONFIG_EXPERIMENTAL=y
# CONFIG_BROKEN is not set
#
# General setup
......@@ -21,11 +23,14 @@ CONFIG_SWAP=y
CONFIG_SYSVIPC=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
CONFIG_LOG_BUF_SHIFT=15
CONFIG_LOG_BUF_SHIFT=16
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
......@@ -72,7 +77,6 @@ CONFIG_PROC_DEVICETREE=y
#
# Generic Driver Options
#
# CONFIG_FW_LOADER is not set
#
# Memory Technology Devices (MTD)
......@@ -99,7 +103,7 @@ CONFIG_BLK_DEV_FD=y
# CONFIG_BLK_DEV_UMEM is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
CONFIG_BLK_DEV_NBD=y
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=4096
CONFIG_BLK_DEV_INITRD=y
......@@ -127,9 +131,9 @@ CONFIG_CHR_DEV_SG=y
#
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
#
# CONFIG_SCSI_MULTI_LUN is not set
# CONFIG_SCSI_REPORT_LUNS is not set
# CONFIG_SCSI_CONSTANTS is not set
CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_REPORT_LUNS=y
CONFIG_SCSI_CONSTANTS=y
# CONFIG_SCSI_LOGGING is not set
#
......@@ -178,7 +182,8 @@ CONFIG_MD_RAID0=y
CONFIG_MD_RAID1=y
CONFIG_MD_RAID5=y
# CONFIG_MD_MULTIPATH is not set
# CONFIG_BLK_DEV_DM is not set
CONFIG_BLK_DEV_DM=y
CONFIG_DM_IOCTL_V4=y
#
# Fusion MPT device support
......@@ -206,9 +211,8 @@ CONFIG_NET=y
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
# CONFIG_NETLINK_DEV is not set
# CONFIG_NETFILTER is not set
CONFIG_UNIX=y
# CONFIG_NET_KEY is not set
CONFIG_NET_KEY=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
# CONFIG_IP_ADVANCED_ROUTER is not set
......@@ -217,13 +221,17 @@ CONFIG_NET_IPIP=y
# CONFIG_NET_IPGRE is not set
# CONFIG_IP_MROUTE is not set
# CONFIG_ARPD is not set
# CONFIG_INET_ECN is not set
CONFIG_INET_ECN=y
CONFIG_SYN_COOKIES=y
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
# CONFIG_IPV6 is not set
# CONFIG_XFRM_USER is not set
# CONFIG_DECNET is not set
# CONFIG_BRIDGE is not set
# CONFIG_NETFILTER is not set
CONFIG_XFRM=y
CONFIG_XFRM_USER=m
#
# SCTP Configuration (EXPERIMENTAL)
......@@ -233,8 +241,6 @@ CONFIG_IPV6_SCTP__=y
# CONFIG_ATM is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_LLC is not set
# CONFIG_DECNET is not set
# CONFIG_BRIDGE is not set
# CONFIG_X25 is not set
# CONFIG_LAPB is not set
# CONFIG_NET_DIVERT is not set
......@@ -258,10 +264,10 @@ CONFIG_NETDEVICES=y
# ARCnet devices
#
# CONFIG_ARCNET is not set
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
CONFIG_DUMMY=m
CONFIG_BONDING=m
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
CONFIG_TUN=m
# CONFIG_ETHERTAP is not set
#
......@@ -312,6 +318,7 @@ CONFIG_E1000=y
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
# CONFIG_R8169 is not set
# CONFIG_SIS190 is not set
# CONFIG_SK98LIN is not set
# CONFIG_TIGON3 is not set
......@@ -321,7 +328,14 @@ CONFIG_E1000=y
# CONFIG_IXGB is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
# CONFIG_PPP is not set
CONFIG_PPP=m
# CONFIG_PPP_MULTILINK is not set
# CONFIG_PPP_FILTER is not set
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPPOE=m
# CONFIG_SLIP is not set
#
......@@ -489,26 +503,32 @@ CONFIG_RAW_DRIVER=y
# File systems
#
CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
# CONFIG_EXT2_FS_SECURITY is not set
CONFIG_EXT3_FS=y
# CONFIG_EXT3_FS_XATTR is not set
CONFIG_EXT3_FS_XATTR=y
CONFIG_EXT3_FS_POSIX_ACL=y
# CONFIG_EXT3_FS_SECURITY is not set
CONFIG_JBD=y
# CONFIG_JBD_DEBUG is not set
CONFIG_FS_MBCACHE=y
CONFIG_REISERFS_FS=y
# CONFIG_REISERFS_CHECK is not set
# CONFIG_REISERFS_PROC_INFO is not set
CONFIG_JFS_FS=y
# CONFIG_JFS_POSIX_ACL is not set
CONFIG_JFS_POSIX_ACL=y
# CONFIG_JFS_DEBUG is not set
# CONFIG_JFS_STATISTICS is not set
CONFIG_XFS_FS=y
CONFIG_FS_POSIX_ACL=y
CONFIG_XFS_FS=m
# CONFIG_XFS_RT is not set
# CONFIG_XFS_QUOTA is not set
# CONFIG_XFS_POSIX_ACL is not set
CONFIG_XFS_POSIX_ACL=y
# CONFIG_MINIX_FS is not set
# CONFIG_ROMFS_FS is not set
# CONFIG_QUOTA is not set
CONFIG_AUTOFS_FS=y
CONFIG_AUTOFS_FS=m
# CONFIG_AUTOFS4_FS is not set
#
......@@ -517,7 +537,7 @@ CONFIG_AUTOFS_FS=y
CONFIG_ISO9660_FS=y
# CONFIG_JOLIET is not set
# CONFIG_ZISOFS is not set
# CONFIG_UDF_FS is not set
CONFIG_UDF_FS=m
#
# DOS/FAT/NT Filesystems
......@@ -533,8 +553,9 @@ CONFIG_VFAT_FS=y
CONFIG_PROC_FS=y
# CONFIG_DEVFS_FS is not set
CONFIG_DEVPTS_FS=y
# CONFIG_DEVPTS_FS_XATTR is not set
# CONFIG_TMPFS is not set
CONFIG_DEVPTS_FS_XATTR=y
# CONFIG_DEVPTS_FS_SECURITY is not set
CONFIG_TMPFS=y
CONFIG_RAMFS=y
#
......@@ -546,7 +567,7 @@ CONFIG_RAMFS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
# CONFIG_CRAMFS is not set
CONFIG_CRAMFS=y
# CONFIG_VXFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
......@@ -561,15 +582,16 @@ CONFIG_NFS_V3=y
CONFIG_NFS_V4=y
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
# CONFIG_NFSD_V4 is not set
CONFIG_NFSD_V4=y
CONFIG_NFSD_TCP=y
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_EXPORTFS=y
CONFIG_SUNRPC=y
# CONFIG_SUNRPC_GSS is not set
CONFIG_SUNRPC_GSS=m
CONFIG_RPCSEC_GSS_KRB5=m
# CONFIG_SMB_FS is not set
CONFIG_CIFS=y
CONFIG_CIFS=m
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
# CONFIG_INTERMEZZO_FS is not set
......@@ -705,6 +727,7 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
# CONFIG_PPCDBG is not set
# CONFIG_DEBUG_INFO is not set
#
# Security options
......@@ -714,9 +737,27 @@ CONFIG_XMON_DEFAULT=y
#
# Cryptographic options
#
# CONFIG_CRYPTO is not set
CONFIG_CRYPTO=y
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=m
CONFIG_CRYPTO_SHA1=m
CONFIG_CRYPTO_SHA256=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_DES=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_AES=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DEFLATE=m
CONFIG_CRYPTO_TEST=m
#
# Library routines
#
CONFIG_CRC32=y
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=m
......@@ -83,9 +83,7 @@ int main(void)
DEFINE(PACALPQUEUE, offsetof(struct paca_struct, lpQueuePtr));
DEFINE(PACATOC, offsetof(struct paca_struct, xTOC));
DEFINE(PACAEXCSP, offsetof(struct paca_struct, exception_sp));
DEFINE(PACAHRDWINTSTACK, offsetof(struct paca_struct, xHrdIntStack));
DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, xProcEnabled));
DEFINE(PACAHRDWINTCOUNT, offsetof(struct paca_struct, xHrdIntCount));
DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr));
DEFINE(PACAPROFENABLED, offsetof(struct paca_struct, prof_enabled));
DEFINE(PACAPROFLEN, offsetof(struct paca_struct, prof_len));
......
......@@ -72,6 +72,7 @@ extern void openpic_init_IRQ(void);
extern void find_and_init_phbs(void);
extern void pSeries_get_boot_time(struct rtc_time *rtc_time);
extern void pSeries_get_rtc_time(struct rtc_time *rtc_time);
extern int pSeries_set_rtc_time(struct rtc_time *rtc_time);
void pSeries_calibrate_decr(void);
......@@ -256,7 +257,7 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
ppc_md.power_off = rtas_power_off;
ppc_md.halt = rtas_halt;
ppc_md.get_boot_time = pSeries_get_rtc_time;
ppc_md.get_boot_time = pSeries_get_boot_time;
ppc_md.get_rtc_time = pSeries_get_rtc_time;
ppc_md.set_rtc_time = pSeries_set_rtc_time;
ppc_md.calibrate_decr = pSeries_calibrate_decr;
......
......@@ -115,8 +115,17 @@ unsigned long eeh_check_failure(void *token, unsigned long val)
ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets,
dn->eeh_config_addr, BUID_HI(dn->phb->buid), BUID_LO(dn->phb->buid));
if (ret == 0 && rets[1] == 1 && rets[0] >= 2) {
panic("EEH: MMIO failure (%ld) on device:\n %s %s\n",
rets[0], pci_name(dev), dev->dev.name);
/*
* XXX We should create a separate sysctl for this.
*
* Since the panic_on_oops sysctl is used to halt
* the system in light of potential corruption, we
* can use it here.
*/
if (panic_on_oops)
panic("EEH: MMIO failure (%ld) on device:\n%s\n", rets[0], pci_name(dev));
else
printk("EEH: MMIO failure (%ld) on device:\n%s\n", rets[0], pci_name(dev));
}
}
eeh_false_positives++;
......
......@@ -40,6 +40,15 @@
#define DO_SOFT_DISABLE
#endif
/* copy saved SOFTE bit or EE bit from saved MSR depending
* if we are doing soft-disable or not
*/
#ifdef DO_SOFT_DISABLE
#define DO_COPY_EE() ld r20,SOFTE(r1)
#else
#define DO_COPY_EE() rldicl r20,r23,49,63
#endif
/*
* hcall interface to pSeries LPAR
*/
......@@ -618,11 +627,7 @@ stab_bolted_user_return:
ld r4,_DAR(r1)
ld r5,_DSISR(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef DO_SOFT_DISABLE
ld r20,SOFTE(r1) /* Copy saved SOFTE bit */
#else
rldicl r20,r23,49,63 /* copy EE bit from saved MSR */
#endif
DO_COPY_EE()
li r6,0x300
bl .save_remaining_regs
bl .do_page_fault
......@@ -644,12 +649,9 @@ DataAccessSLB_common:
or. r3,r3,r3 /* Check return code */
beq fast_exception_return /* Return if we succeeded */
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef DO_SOFT_DISABLE
ld r20,SOFTE(r1)
#else
rldicl r20,r23,49,63 /* copy EE bit from saved MSR */
#endif
DO_COPY_EE()
li r6,0x380
li r5,0
bl .save_remaining_regs
bl .do_page_fault
b .ret_from_except
......@@ -670,13 +672,9 @@ InstructionAccess_common:
bl .do_hash_page_ISI /* Try to handle as hpte fault */
1:
mr r4,r22
mr r5,r23
rlwinm r5,r23,0,4,4 /* We only care about PR in error_code */
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef DO_SOFT_DISABLE
ld r20,SOFTE(r1)
#else
rldicl r20,r23,49,63 /* copy EE bit from saved MSR */
#endif
DO_COPY_EE()
li r6,0x400
bl .save_remaining_regs
bl .do_page_fault
......@@ -692,12 +690,9 @@ InstructionAccessSLB_common:
beq+ fast_exception_return /* Return if we succeeded */
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef DO_SOFT_DISABLE
ld r20,SOFTE(r1)
#else
rldicl r20,r23,49,63 /* copy EE bit from saved MSR */
#endif
DO_COPY_EE()
li r6,0x480
li r5,0
bl .save_remaining_regs
bl .do_page_fault
b .ret_from_except
......@@ -710,70 +705,14 @@ HardwareInterrupt_entry:
li r20,0
li r6,0x500
bl .save_remaining_regs
/* Determine if need to run do_irq on a hardware interrupt stack */
/* The first invocation of do_irq will occur on the kernel */
/* stack in the current stack */
/* All other invocations of do_irq will run on the hardware */
/* interrupt stack associated with the PACA of the current */
/* processor. */
/* */
/* The call to do_irq will preserve the value of r14 - r31 */
/* */
/*
* XXX turn off interrupt stacks until the thread_info stuff is fixed.
* Otherwise we end up setting need_resched etc bits in the interrupt
* stack and they never get seen when we return to the process stack - Anton
*/
#if 0
lbz r21,PACAHRDWINTCOUNT(r13) /* get hardware interrupt cnt */
cmpi 0,r21,1 /* */
addi r21,r21,1 /* incr hardware interrupt cnt*/
stb r21,PACAHRDWINTCOUNT(r13) /* */
bne 2f /* */
mr r14,r1 /* preserve current r1 */
ld r1,PACAHRDWINTSTACK(r13) /* */
std r14,0(r1) /* set the back chain */
bl .do_IRQ
lbz r22,PACAHRDWINTCOUNT(r13) /* get hardware interrupt cnt */
cmp 0,r22,r21 /* debug test */
bne 3f
subi r21,r21,1
stb r21,PACAHRDWINTCOUNT(r13) /* */
mr r1,r14 /* */
b .ret_from_except
#endif
2:
bl .do_IRQ
#if 0
lbz r22,PACAHRDWINTCOUNT(r13) /* get hardware interrupt cnt */
cmp 0,r22,r21 /* debug test */
bne 3f /* */
subi r21,r21,1 /* decr hardware interrupt cnt*/
stb r21,PACAHRDWINTCOUNT(r13) /* */
#endif
b .ret_from_except
3:
/* error - counts out of sync */
#ifdef CONFIG_XMON
bl .xmon
#endif
4: b 4b
.globl Alignment_common
Alignment_common:
EXCEPTION_PROLOG_COMMON
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef DO_SOFT_DISABLE
ld r20,SOFTE(r1)
#else
rldicl r20,r23,49,63 /* copy EE bit from saved MSR */
#endif
DO_COPY_EE()
li r6,0x600
bl .save_remaining_regs
bl .AlignmentException
......@@ -783,11 +722,7 @@ Alignment_common:
ProgramCheck_common:
EXCEPTION_PROLOG_COMMON
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef DO_SOFT_DISABLE
ld r20,SOFTE(r1)
#else
rldicl r20,r23,49,63 /* copy EE bit from saved MSR */
#endif
DO_COPY_EE()
li r6,0x700
bl .save_remaining_regs
bl .ProgramCheckException
......@@ -798,11 +733,7 @@ FPUnavailable_common:
EXCEPTION_PROLOG_COMMON
bne .load_up_fpu /* if from user, just load it up */
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef DO_SOFT_DISABLE
ld r20,SOFTE(r1)
#else
rldicl r20,r23,49,63 /* copy EE bit from saved MSR */
#endif
DO_COPY_EE()
li r6,0x800
bl .save_remaining_regs
bl .KernelFPUnavailableException
......@@ -818,11 +749,7 @@ SystemCall_common:
beq+ HardwareInterrupt_entry
1:
#endif
#ifdef DO_SOFT_DISABLE
ld r20,SOFTE(r1)
#else
rldicl r20,r23,49,63 /* copy EE bit from saved MSR */
#endif
DO_COPY_EE()
li r6,0xC00
bl .save_remaining_regs
bl .DoSyscall
......@@ -1866,18 +1793,6 @@ _STATIC(start_here_common)
li r5,0
std r0,PACAKSAVE(r13)
/* ptr to hardware interrupt stack for boot processor */
LOADADDR(r3, hardware_int_paca0)
li r5,PAGE_SIZE
sldi r5,r5,3
subi r5,r5,STACK_FRAME_OVERHEAD
add r3,r3,r5
std r3,PACAHRDWINTSTACK(r13)
li r3,0
stb r3,PACAHRDWINTCOUNT(r13)
/* Restore the parms passed in from the bootloader. */
mr r3,r31
mr r4,r30
......@@ -1999,10 +1914,6 @@ swapper_pg_dir:
ioremap_dir:
.space 4096
.globl hardware_int_paca0
hardware_int_paca0:
.space 8*PAGE_SIZE
/* 1 page segment table per cpu (max 48, cpu0 allocated at STAB0_PHYS_ADDR) */
.globl stab_array
stab_array:
......
......@@ -197,7 +197,7 @@ pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
if (!pgd_none(*pg)) {
pm = pmd_offset(pg, ea);
if (!pmd_none(*pm)) {
if (pmd_present(*pm)) {
pt = pte_offset_kernel(pm, ea);
pte = *pt;
if (!pte_present(pte))
......@@ -436,8 +436,12 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
if (user_region && cpus_equal(mm->cpu_vm_mask, tmp))
local = 1;
ret = hash_huge_page(mm, access, ea, vsid, local);
if (ret < 0) {
ptep = find_linux_pte(pgdir, ea);
ret = __hash_page(ea, access, vsid, ptep, trap, local);
}
spin_unlock(&mm->page_table_lock);
return ret;
......
......@@ -39,6 +39,7 @@
#include <linux/irq.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
#include <linux/kallsyms.h>
#include <asm/uaccess.h>
#include <asm/bitops.h>
......@@ -350,14 +351,11 @@ int show_interrupts(struct seq_file *p, void *v)
return 0;
}
extern char *ppc_find_proc_name(unsigned *p, char *buf, unsigned buflen);
static inline void handle_irq_event(int irq, struct pt_regs *regs,
static inline int handle_irq_event(int irq, struct pt_regs *regs,
struct irqaction *action)
{
int status = 0;
int retval = 0;
struct irqaction *first_action = action;
if (!(action->flags & SA_INTERRUPT))
local_irq_enable();
......@@ -370,28 +368,88 @@ static inline void handle_irq_event(int irq, struct pt_regs *regs,
if (status & SA_SAMPLE_RANDOM)
add_interrupt_randomness(irq);
local_irq_disable();
if (retval != 1) {
static int count = 100;
char name_buf[256];
if (count) {
count--;
if (retval) {
printk("irq event %d: bogus retval mask %x\n",
irq, retval);
return retval;
}
static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
{
struct irqaction *action;
if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
printk(KERN_ERR "irq event %d: bogus return value %x\n",
irq, action_ret);
} else {
printk("irq %d: nobody cared!\n", irq);
printk(KERN_ERR "irq %d: nobody cared!\n", irq);
}
dump_stack();
printk("handlers:\n");
action = first_action;
printk(KERN_ERR "handlers:\n");
action = desc->action;
do {
printk("[<%p>]", action->handler);
printk(" (%s)\n",
ppc_find_proc_name((unsigned *)action->handler, name_buf, 256));
printk(KERN_ERR "[<%p>]", action->handler);
print_symbol(" (%s)",
(unsigned long)action->handler);
printk("\n");
action = action->next;
} while (action);
}
static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
{
static int count = 100;
if (count) {
count--;
__report_bad_irq(irq, desc, action_ret);
}
}
static int noirqdebug;
static int __init noirqdebug_setup(char *str)
{
noirqdebug = 1;
printk("IRQ lockup detection disabled\n");
return 1;
}
__setup("noirqdebug", noirqdebug_setup);
/*
* If 99,900 of the previous 100,000 interrupts have not been handled then
* assume that the IRQ is stuck in some manner. Drop a diagnostic and try to
* turn the IRQ off.
*
* (The other 100-of-100,000 interrupts may have been a correctly-functioning
* device sharing an IRQ with the failing one)
*
* Called under desc->lock
*/
static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
{
if (action_ret != IRQ_HANDLED) {
desc->irqs_unhandled++;
if (action_ret != IRQ_NONE)
report_bad_irq(irq, desc, action_ret);
}
desc->irq_count++;
if (desc->irq_count < 100000)
return;
desc->irq_count = 0;
if (desc->irqs_unhandled > 99900) {
/*
* The interrupt is stuck
*/
__report_bad_irq(irq, desc, action_ret);
/*
* Now kill the IRQ
*/
printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
desc->status |= IRQ_DISABLED;
desc->handler->disable(irq);
}
desc->irqs_unhandled = 0;
}
/*
......@@ -462,10 +520,13 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
* SMP environment.
*/
for (;;) {
irqreturn_t action_ret;
spin_unlock(&desc->lock);
handle_irq_event(irq, regs, action);
action_ret = handle_irq_event(irq, regs, action);
spin_lock(&desc->lock);
if (!noirqdebug)
note_interrupt(irq, desc, action_ret);
if (likely(!(desc->status & IRQ_PENDING)))
break;
desc->status &= ~IRQ_PENDING;
......
......@@ -36,18 +36,6 @@
#include <asm/tlb.h>
#include <asm/hvcall.h>
long plpar_pte_enter(unsigned long flags,
unsigned long ptex,
unsigned long new_pteh, unsigned long new_ptel,
unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
{
unsigned long dummy, ret;
ret = plpar_hcall(H_ENTER, flags, ptex, new_pteh, new_ptel,
old_pteh_ret, old_ptel_ret, &dummy);
return(ret);
}
long plpar_pte_remove(unsigned long flags,
unsigned long ptex,
unsigned long avpn,
......@@ -83,7 +71,6 @@ long plpar_tce_get(unsigned long liobn,
tce_ret, &dummy, &dummy);
}
long plpar_tce_put(unsigned long liobn,
unsigned long ioba,
unsigned long tceval)
......@@ -104,10 +91,9 @@ long plpar_put_term_char(unsigned long termno,
unsigned long len,
const char *buffer)
{
unsigned long dummy;
unsigned long *lbuf = (unsigned long *)buffer; /* ToDo: alignment? */
return plpar_hcall(H_PUT_TERM_CHAR, termno, len,
lbuf[0], lbuf[1], &dummy, &dummy, &dummy);
return plpar_hcall_norets(H_PUT_TERM_CHAR, termno, len, lbuf[0],
lbuf[1]);
}
static void tce_build_pSeriesLP(struct TceTable *tbl, long tcenum,
......@@ -287,12 +273,11 @@ int hvc_get_chars(int index, char *buf, int count)
int hvc_put_chars(int index, const char *buf, int count)
{
unsigned long dummy;
unsigned long *lbuf = (unsigned long *) buf;
long ret;
ret = plpar_hcall(H_PUT_TERM_CHAR, index, count, lbuf[0], lbuf[1],
&dummy, &dummy, &dummy);
ret = plpar_hcall_norets(H_PUT_TERM_CHAR, index, count, lbuf[0],
lbuf[1]);
if (ret == H_Success)
return count;
if (ret == H_Busy)
......@@ -318,7 +303,6 @@ int hvc_count(int *start_termno)
long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long va, unsigned long prpn,
int secondary, unsigned long hpteflags,
......@@ -329,6 +313,7 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long flags;
unsigned long slot;
HPTE lhpte;
unsigned long dummy0, dummy1;
/* Fill in the local HPTE with absolute rpn, avpn and flags */
lhpte.dw1.dword1 = 0;
......@@ -348,7 +333,6 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
/* Now fill in the actual HPTE */
/* Set CEC cookie to 0 */
/* Large page = 0 */
/* Zero page = 0 */
/* I-cache Invalidate = 0 */
/* I-cache synchronize = 0 */
......@@ -359,19 +343,8 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
if (hpteflags & (_PAGE_GUARDED|_PAGE_NO_CACHE))
lhpte.dw1.flags.flags &= ~_PAGE_COHERENT;
__asm__ __volatile__ (
H_ENTER_r3
"mr 4, %2\n"
"mr 5, %3\n"
"mr 6, %4\n"
"mr 7, %5\n"
HSC
"mr %0, 3\n"
"mr %1, 4\n"
: "=r" (lpar_rc), "=r" (slot)
: "r" (flags), "r" (hpte_group), "r" (lhpte.dw0.dword0),
"r" (lhpte.dw1.dword1)
: "r3", "r4", "r5", "r6", "r7", "cc");
lpar_rc = plpar_hcall(H_ENTER, flags, hpte_group, lhpte.dw0.dword0,
lhpte.dw1.dword1, &slot, &dummy0, &dummy1);
if (lpar_rc == H_PTEG_Full)
return -1;
......
......@@ -427,6 +427,7 @@ unsigned long __init find_and_init_phbs(void)
void pcibios_name_device(struct pci_dev *dev)
{
#if 0
struct device_node *dn;
/*
......@@ -446,6 +447,7 @@ void pcibios_name_device(struct pci_dev *dev)
}
}
}
#endif
}
void __init pcibios_fixup_device_resources(struct pci_dev *dev,
......
......@@ -49,8 +49,6 @@
extern int sys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg);
extern int do_signal(sigset_t *, struct pt_regs *);
extern int register_ioctl32_conversion(unsigned int cmd, int (*handler)(unsigned int, unsigned int, unsigned long, struct file *));
extern int unregister_ioctl32_conversion(unsigned int cmd);
int abs(int);
......@@ -66,9 +64,6 @@ EXPORT_SYMBOL(disable_irq_nosync);
EXPORT_SYMBOL(synchronize_irq);
#endif /* CONFIG_SMP */
EXPORT_SYMBOL(register_ioctl32_conversion);
EXPORT_SYMBOL(unregister_ioctl32_conversion);
EXPORT_SYMBOL(isa_io_base);
EXPORT_SYMBOL(pci_io_base);
......
......@@ -32,6 +32,7 @@
#include <linux/init_task.h>
#include <linux/prctl.h>
#include <linux/ptrace.h>
#include <linux/kallsyms.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
......@@ -130,12 +131,9 @@ struct task_struct *__switch_to(struct task_struct *prev,
return last;
}
char *ppc_find_proc_name(unsigned *p, char *buf, unsigned buflen);
void show_regs(struct pt_regs * regs)
{
int i;
char name_buf[256];
printk("NIP: %016lX XER: %016lX LR: %016lX\n",
regs->nip, regs->xer, regs->link);
......@@ -170,8 +168,7 @@ void show_regs(struct pt_regs * regs)
* above info out without failing
*/
printk("NIP [%016lx] ", regs->nip);
printk("%s\n", ppc_find_proc_name((unsigned *)regs->nip,
name_buf, 256));
print_symbol("%s\n", regs->nip);
show_stack(current, (unsigned long *)regs->gpr[1]);
}
......@@ -385,102 +382,6 @@ int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
return error;
}
void initialize_paca_hardware_interrupt_stack(void)
{
int i;
unsigned long stack;
unsigned long end_of_stack =0;
for (i=1; i < NR_CPUS; i++) {
if (!cpu_possible(i))
continue;
/* Carve out storage for the hardware interrupt stack */
stack = __get_free_pages(GFP_ATOMIC, get_order(8*PAGE_SIZE));
if ( !stack ) {
printk("ERROR, cannot find space for hardware stack.\n");
panic(" no hardware stack ");
}
/* Store the stack value in the PACA for the processor */
paca[i].xHrdIntStack = stack + (8*PAGE_SIZE) - STACK_FRAME_OVERHEAD;
paca[i].xHrdIntCount = 0;
}
/*
* __get_free_pages() might give us a page > KERNBASE+256M which
* is mapped with large ptes so we can't set up the guard page.
*/
if (cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE)
return;
for (i=0; i < NR_CPUS; i++) {
if (!cpu_possible(i))
continue;
/* set page at the top of stack to be protected - prevent overflow */
end_of_stack = paca[i].xHrdIntStack - (8*PAGE_SIZE - STACK_FRAME_OVERHEAD);
ppc_md.hpte_updateboltedpp(PP_RXRX,end_of_stack);
}
}
char *ppc_find_proc_name(unsigned *p, char *buf, unsigned buflen)
{
unsigned long tb_flags;
unsigned short name_len;
unsigned long tb_start, code_start, code_ptr, code_offset;
unsigned int code_len;
unsigned long end;
strcpy(buf, "Unknown");
code_ptr = (unsigned long)p;
code_offset = 0;
/* handle functions in text and init sections */
if (((unsigned long)p >= (unsigned long)_stext) &&
((unsigned long)p < (unsigned long)_etext))
end = (unsigned long)_etext;
else if (((unsigned long)p >= (unsigned long)__init_begin) &&
((unsigned long)p < (unsigned long)__init_end))
end = (unsigned long)__init_end;
else
return buf;
while ((unsigned long)p < end) {
if (*p == 0) {
tb_start = (unsigned long)p;
++p; /* Point to traceback flags */
tb_flags = *((unsigned long *)p);
p += 2; /* Skip over traceback flags */
if (tb_flags & TB_NAME_PRESENT) {
if (tb_flags & TB_PARMINFO)
++p; /* skip over parminfo data */
if (tb_flags & TB_HAS_TBOFF) {
code_len = *p; /* get code length */
code_start = tb_start - code_len;
code_offset = code_ptr - code_start + 1;
if (code_offset > 0x100000)
break;
++p; /* skip over code size */
}
name_len = *((unsigned short *)p);
if (name_len > (buflen-20))
name_len = buflen-20;
memcpy(buf, ((char *)p)+2, name_len);
buf[name_len] = 0;
if (code_offset)
sprintf(buf+name_len, "+0x%lx",
code_offset-1);
}
break;
}
++p;
}
return buf;
}
/*
* These bracket the sleeping functions..
*/
......@@ -520,7 +421,6 @@ void show_stack(struct task_struct *p, unsigned long *_sp)
unsigned long ip;
unsigned long stack_page = (unsigned long)p->thread_info;
int count = 0;
char name_buf[256];
unsigned long sp = (unsigned long)_sp;
if (!p)
......@@ -539,8 +439,7 @@ void show_stack(struct task_struct *p, unsigned long *_sp)
if (__get_user(ip, (unsigned long *)(sp + 16)))
break;
printk("[%016lx] ", ip);
printk("%s\n", ppc_find_proc_name((unsigned *)ip,
name_buf, 256));
print_symbol("%s\n", ip);
} while (count++ < 32);
}
......
......@@ -24,6 +24,7 @@
#include <asm/machdep.h>
#include <asm/paca.h>
#include <asm/page.h>
#include <asm/param.h>
#include <asm/system.h>
#include <asm/abs_addr.h>
#include <asm/udbg.h>
......@@ -178,6 +179,26 @@ rtas_call(int token, int nargs, int nret,
return (ulong)((nret > 0) ? rtas_args->rets[0] : 0);
}
/* Given an RTAS status code of 990n compute the hinted delay of 10^n
* (last digit) milliseconds. For now we bound at n=3 (1 sec).
*/
unsigned int
rtas_extended_busy_delay_time(int status)
{
int order = status - 9900;
unsigned int ms;
if (order < 0)
order = 0; /* RTC depends on this for -2 clock busy */
else if (order > 3)
order = 3; /* bound */
/* Use microseconds for reasonable accuracy */
for (ms = 1000; order > 0; order--)
ms = ms * 10;
return ms / (1000000/HZ); /* round down is fine */
}
#define FLASH_BLOCK_LIST_VERSION (1UL)
static void
rtas_flash_firmware(void)
......
......@@ -35,6 +35,7 @@
#include <linux/spinlock.h>
#include <linux/bcd.h>
#include <asm/hardirq.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/system.h>
......@@ -340,20 +341,63 @@ void iSeries_get_boot_time(struct rtc_time *tm)
#endif
#ifdef CONFIG_PPC_PSERIES
void pSeries_get_rtc_time(struct rtc_time *rtc_tm)
#define MAX_RTC_WAIT 5000 /* 5 sec */
#define RTAS_CLOCK_BUSY (-2)
void pSeries_get_boot_time(struct rtc_time *rtc_tm)
{
unsigned long ret[8];
int error;
int count;
int error, wait_time;
unsigned long max_wait_tb;
/*
* error -2 is clock busy, we keep retrying a few times to see
* if it will come good -- paulus
max_wait_tb = __get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("get-time-of-day"), 0, 8, (void *)&ret);
if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
wait_time = rtas_extended_busy_delay_time(error);
/* This is boot time so we spin. */
udelay(wait_time*1000);
error = RTAS_CLOCK_BUSY;
}
} while (error == RTAS_CLOCK_BUSY && (__get_tb() < max_wait_tb));
if (error != 0) {
printk(KERN_WARNING "error: reading the clock failed (%d)\n",
error);
return;
}
rtc_tm->tm_sec = ret[5];
rtc_tm->tm_min = ret[4];
rtc_tm->tm_hour = ret[3];
rtc_tm->tm_mday = ret[2];
rtc_tm->tm_mon = ret[1] - 1;
rtc_tm->tm_year = ret[0] - 1900;
}
/* NOTE: get_rtc_time will get an error if executed in interrupt context
* and if a delay is needed to read the clock. In this case we just
* silently return without updating rtc_tm.
*/
count = 0;
void pSeries_get_rtc_time(struct rtc_time *rtc_tm)
{
unsigned long ret[8];
int error, wait_time;
unsigned long max_wait_tb;
max_wait_tb = __get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("get-time-of-day"), 0, 8, (void *)&ret);
} while (error == -2 && ++count < 1000);
if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
if (in_interrupt()) {
printk(KERN_WARNING "error: reading clock would delay interrupt\n");
return; /* delay not allowed */
}
wait_time = rtas_extended_busy_delay_time(error);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(wait_time);
error = RTAS_CLOCK_BUSY;
}
} while (error == RTAS_CLOCK_BUSY && (__get_tb() < max_wait_tb));
if (error != 0) {
printk(KERN_WARNING "error: reading the clock failed (%d)\n",
......@@ -371,20 +415,24 @@ void pSeries_get_rtc_time(struct rtc_time *rtc_tm)
int pSeries_set_rtc_time(struct rtc_time *tm)
{
int error;
int count;
int error, wait_time;
unsigned long max_wait_tb;
/*
* error -2 is clock busy, we keep retrying a few times to see
* if it will come good -- paulus
*/
count = 0;
max_wait_tb = __get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("set-time-of-day"), 7, 1, NULL,
tm->tm_year + 1900, tm->tm_mon + 1,
tm->tm_mday, tm->tm_hour, tm->tm_min,
tm->tm_sec, 0);
} while (error == -2 && ++count < 1000);
if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
if (in_interrupt())
return 1; /* probably decrementer */
wait_time = rtas_extended_busy_delay_time(error);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(wait_time);
error = RTAS_CLOCK_BUSY;
}
} while (error == RTAS_CLOCK_BUSY && (__get_tb() < max_wait_tb));
if (error != 0)
printk(KERN_WARNING "error: setting the clock failed (%d)\n",
......
......@@ -75,9 +75,8 @@ void __down(struct semaphore *sem)
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
tsk->state = TASK_UNINTERRUPTIBLE;
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
smp_wmb();
/*
* Try to get the semaphore. If the count is > 0, then we've
......@@ -87,10 +86,10 @@ void __down(struct semaphore *sem)
*/
while (__sem_update_count(sem, -1) <= 0) {
schedule();
tsk->state = TASK_UNINTERRUPTIBLE;
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
tsk->state = TASK_RUNNING;
__set_task_state(tsk, TASK_RUNNING);
/*
* If there are any more sleepers, wake one of them up so
......@@ -106,9 +105,8 @@ int __down_interruptible(struct semaphore * sem)
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
tsk->state = TASK_INTERRUPTIBLE;
__set_task_state(tsk, TASK_INTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
smp_wmb();
while (__sem_update_count(sem, -1) <= 0) {
if (signal_pending(current)) {
......@@ -122,10 +120,11 @@ int __down_interruptible(struct semaphore * sem)
break;
}
schedule();
tsk->state = TASK_INTERRUPTIBLE;
set_task_state(tsk, TASK_INTERRUPTIBLE);
}
tsk->state = TASK_RUNNING;
remove_wait_queue(&sem->wait, &wait);
__set_task_state(tsk, TASK_RUNNING);
wake_up(&sem->wait);
return retval;
}
......@@ -221,15 +221,18 @@ void make_slbe(unsigned long esid, unsigned long vsid, int large,
}
static inline void __ste_allocate(unsigned long esid, unsigned long vsid,
int kernel_segment)
int kernel_segment, mm_context_t context)
{
if (cur_cpu_spec->cpu_features & CPU_FTR_SLB) {
int large = 0;
#ifndef CONFIG_PPC_ISERIES
if (REGION_ID(esid << SID_SHIFT) == KERNEL_REGION_ID)
make_slbe(esid, vsid, 1, kernel_segment);
else
large = 1;
else if (REGION_ID(esid << SID_SHIFT) == USER_REGION_ID)
large = in_hugepage_area(context, esid << SID_SHIFT);
#endif
make_slbe(esid, vsid, 0, kernel_segment);
make_slbe(esid, vsid, large, kernel_segment);
} else {
unsigned char top_entry, stab_entry, *segments;
......@@ -255,6 +258,7 @@ int ste_allocate(unsigned long ea)
{
unsigned long vsid, esid;
int kernel_segment = 0;
mm_context_t context;
PMC_SW_PROCESSOR(stab_faults);
......@@ -266,16 +270,18 @@ int ste_allocate(unsigned long ea)
if (REGION_ID(ea) >= KERNEL_REGION_ID) {
kernel_segment = 1;
vsid = get_kernel_vsid(ea);
context = REGION_ID(ea);
} else {
struct mm_struct *mm = current->mm;
if (mm)
vsid = get_vsid(mm->context, ea);
else
if (! current->mm)
return 1;
context = current->mm->context;
vsid = get_vsid(context, ea);
}
esid = GET_ESID(ea);
__ste_allocate(esid, vsid, kernel_segment);
__ste_allocate(esid, vsid, kernel_segment, context);
if (!(cur_cpu_spec->cpu_features & CPU_FTR_SLB)) {
/* Order update */
asm volatile("sync":::"memory");
......@@ -302,7 +308,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
for (esid = 0; esid < 16; esid++) {
unsigned long ea = esid << SID_SHIFT;
vsid = get_vsid(mm->context, ea);
__ste_allocate(esid, vsid, 0);
__ste_allocate(esid, vsid, 0, mm->context);
}
} else {
unsigned long pc = KSTK_EIP(tsk);
......@@ -316,7 +322,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
(REGION_ID(pc) >= KERNEL_REGION_ID))
return;
vsid = get_vsid(mm->context, pc);
__ste_allocate(GET_ESID(pc), vsid, 0);
__ste_allocate(GET_ESID(pc), vsid, 0, mm->context);
}
if (stack && (pc_segment != stack_segment)) {
......@@ -324,7 +330,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
(REGION_ID(stack) >= KERNEL_REGION_ID))
return;
vsid = get_vsid(mm->context, stack);
__ste_allocate(GET_ESID(stack), vsid, 0);
__ste_allocate(GET_ESID(stack), vsid, 0, mm->context);
}
}
......
......@@ -1243,16 +1243,19 @@ asmlinkage long sys32_settimeofday(struct compat_timeval *tv, struct timezone *t
}
struct msgbuf32 { s32 mtype; char mtext[1]; };
struct msgbuf32 {
compat_long_t mtype;
char mtext[1];
};
struct semid_ds32 {
struct ipc_perm sem_perm;
compat_time_t sem_otime;
compat_time_t sem_ctime;
u32 sem_base;
u32 sem_pending;
u32 sem_pending_last;
u32 undo;
compat_uptr_t sem_base;
compat_uptr_t sem_pending;
compat_uptr_t sem_pending_last;
compat_uptr_t undo;
unsigned short sem_nsems;
};
......@@ -1262,21 +1265,20 @@ struct semid64_ds32 {
compat_time_t sem_otime;
unsigned int __unused2;
compat_time_t sem_ctime;
u32 sem_nsems;
u32 __unused3;
u32 __unused4;
compat_ulong_t sem_nsems;
compat_ulong_t __unused3;
compat_ulong_t __unused4;
};
struct msqid_ds32
{
struct msqid_ds32 {
struct ipc_perm msg_perm;
u32 msg_first;
u32 msg_last;
compat_uptr_t msg_first;
compat_uptr_t msg_last;
compat_time_t msg_stime;
compat_time_t msg_rtime;
compat_time_t msg_ctime;
u32 msg_lcbytes;
u32 msg_lqbytes;
compat_ulong_t msg_lcbytes;
compat_ulong_t msg_lqbytes;
unsigned short msg_cbytes;
unsigned short msg_qnum;
unsigned short msg_qbytes;
......@@ -1292,13 +1294,13 @@ struct msqid64_ds32 {
compat_time_t msg_rtime;
unsigned int __unused3;
compat_time_t msg_ctime;
unsigned int msg_cbytes;
unsigned int msg_qnum;
unsigned int msg_qbytes;
compat_ulong_t msg_cbytes;
compat_ulong_t msg_qnum;
compat_ulong_t msg_qbytes;
compat_pid_t msg_lspid;
compat_pid_t msg_lrpid;
unsigned int __unused4;
unsigned int __unused5;
compat_ulong_t __unused4;
compat_ulong_t __unused5;
};
struct shmid_ds32 {
......@@ -1311,8 +1313,8 @@ struct shmid_ds32 {
compat_ipc_pid_t shm_lpid;
unsigned short shm_nattch;
unsigned short __unused;
unsigned int __unused2;
unsigned int __unused3;
compat_uptr_t __unused2;
compat_uptr_t __unused3;
};
struct shmid64_ds32 {
......@@ -1327,9 +1329,9 @@ struct shmid64_ds32 {
compat_size_t shm_segsz;
compat_pid_t shm_cpid;
compat_pid_t shm_lpid;
unsigned int shm_nattch;
unsigned int __unused5;
unsigned int __unused6;
compat_ulong_t shm_nattch;
compat_ulong_t __unused5;
compat_ulong_t __unused6;
};
/*
......@@ -1350,7 +1352,7 @@ static long do_sys32_semctl(int first, int second, int third, void *uptr)
err = -EFAULT;
if (get_user(pad, (u32 *)uptr))
return err;
if (third == SETVAL)
if ((third & ~IPC_64) == SETVAL)
fourth.val = (int)pad;
else
fourth.__pad = (void *)A(pad);
......
/*
* linux/arch/ppc/kernel/sys_ppc.c
* linux/arch/ppc64/kernel/sys_ppc.c
*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
......@@ -40,7 +40,6 @@
#include <asm/uaccess.h>
#include <asm/ipc.h>
#include <asm/semaphore.h>
#include <asm/ppcdebug.h>
#include <asm/time.h>
extern unsigned long wall_jiffies;
......@@ -79,6 +78,7 @@ sys_ipc (uint call, int first, int second, long third, void *ptr, long fifth)
case SEMCTL: {
union semun fourth;
ret = -EINVAL;
if (!ptr)
break;
if ((ret = get_user(fourth.__pad, (void **)ptr)))
......@@ -94,6 +94,7 @@ sys_ipc (uint call, int first, int second, long third, void *ptr, long fifth)
case 0: {
struct ipc_kludge tmp;
ret = -EINVAL;
if (!ptr)
break;
if ((ret = copy_from_user(&tmp,
......@@ -127,6 +128,7 @@ sys_ipc (uint call, int first, int second, long third, void *ptr, long fifth)
break;
}
case 1: /* iBCS2 emulator entry point */
ret = -EINVAL;
if (!segment_eq(get_fs(), get_ds()))
break;
ret = sys_shmat (first, (char *) ptr, second,
......
......@@ -137,4 +137,9 @@ SECTIONS
. = ALIGN(4096);
_end = . ;
PROVIDE (end = .);
/* Sections to be discarded. */
/DISCARD/ : {
*(.exitcall.exit)
}
}
......@@ -267,6 +267,15 @@ void xics_disable_irq(u_int virq)
irq, call_status);
return;
}
/* Have to set XIVE to 0xff to be able to remove a slot */
call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, default_server,
0xff);
if (call_status != 0) {
printk("xics_disable_irq: irq=%x: ibm_set_xive(0xff) returned %lx\n",
irq, call_status);
return;
}
}
void xics_end_irq(u_int irq)
......@@ -375,12 +384,12 @@ void xics_init_IRQ(void)
int i;
unsigned long intr_size = 0;
struct device_node *np;
uint *ireg, ilen, indx=0;
uint *ireg, ilen, indx = 0;
unsigned long intr_base = 0;
struct xics_interrupt_node {
unsigned long long addr;
unsigned long long size;
} inodes[NR_CPUS*2];
unsigned long addr;
unsigned long size;
} inodes[NR_CPUS];
ppc64_boot_msg(0x20, "XICS Init");
......
......@@ -6,3 +6,4 @@ EXTRA_CFLAGS += -mno-minimal-toc
obj-y := fault.o init.o extable.o imalloc.o
obj-$(CONFIG_DISCONTIGMEM) += numa.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
......@@ -46,8 +46,10 @@ int debugger_kernel_faults = 1;
void bad_page_fault(struct pt_regs *, unsigned long, int);
/*
* For 600- and 800-family processors, the error_code parameter is DSISR
* for a data fault, SRR1 for an instruction fault.
* The error_code parameter is
* - DSISR for a non-SLB data access fault,
* - SRR1 & 0x08000000 for a non-SLB instruction access fault
* - 0 any SLB fault.
*/
void do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
......@@ -58,17 +60,6 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long code = SEGV_MAPERR;
unsigned long is_write = error_code & 0x02000000;
/*
* Fortunately the bit assignments in SRR1 for an instruction
* fault and DSISR for a data fault are mostly the same for the
* bits we are interested in. But there are some bits which
* indicate errors in DSISR but can validly be set in SRR1.
*/
if (regs->trap == 0x400)
error_code &= 0x48200000;
else if (regs->trap != 0x300) /* ensure error_code is 0 on SLB miss */
error_code = 0;
#ifdef CONFIG_DEBUG_KERNEL
if (debugger_fault_handler && (regs->trap == 0x300 ||
regs->trap == 0x380)) {
......
This diff is collapsed.
......@@ -290,7 +290,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
if (!pgd_none(*pgd)) {
pmd = pmd_offset(pgd, vmaddr);
if (!pmd_none(*pmd)) {
if (pmd_present(*pmd)) {
ptep = pte_offset_kernel(pmd, vmaddr);
/* Check if HPTE might exist and flush it if so */
pte = __pte(pte_update(ptep, _PAGE_HPTEFLAGS, 0));
......@@ -298,6 +298,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
flush_hash_page(context, vmaddr, pte, local);
}
}
WARN_ON(pmd_hugepage(*pmd));
}
}
......@@ -348,7 +349,7 @@ __flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
pmd_end = (start + PMD_SIZE) & PMD_MASK;
if (pmd_end > end)
pmd_end = end;
if (!pmd_none(*pmd)) {
if (pmd_present(*pmd)) {
ptep = pte_offset_kernel(pmd, start);
do {
if (pte_val(*ptep) & _PAGE_HASHPTE) {
......@@ -367,6 +368,7 @@ __flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
++ptep;
} while (start < pmd_end);
} else {
WARN_ON(pmd_hugepage(*pmd));
start = pmd_end;
}
++pmd;
......@@ -540,8 +542,6 @@ static int __init setup_kcore(void)
}
module_init(setup_kcore);
void initialize_paca_hardware_interrupt_stack(void);
void __init mem_init(void)
{
#ifndef CONFIG_DISCONTIGMEM
......@@ -608,9 +608,6 @@ void __init mem_init(void)
#endif
mem_init_done = 1;
/* set the last page of each hardware interrupt stack to be protected */
initialize_paca_hardware_interrupt_stack();
#ifdef CONFIG_PPC_ISERIES
create_virtual_bus_tce_table();
#endif
......
......@@ -24,18 +24,21 @@
int numa_cpu_lookup_table[NR_CPUS] = { [ 0 ... (NR_CPUS - 1)] = -1};
int numa_memory_lookup_table[MAX_MEMORY >> MEMORY_INCREMENT_SHIFT] =
{ [ 0 ... ((MAX_MEMORY >> MEMORY_INCREMENT_SHIFT) - 1)] = -1};
unsigned long numa_cpumask_lookup_table[MAX_NUMNODES];
cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
int nr_cpus_in_node[MAX_NUMNODES] = { [0 ... (MAX_NUMNODES -1)] = 0};
struct pglist_data node_data[MAX_NUMNODES];
bootmem_data_t plat_node_bdata[MAX_NUMNODES];
EXPORT_SYMBOL(node_data);
EXPORT_SYMBOL(numa_memory_lookup_table);
static inline void map_cpu_to_node(int cpu, int node)
{
dbg("cpu %d maps to domain %d\n", cpu, node);
numa_cpu_lookup_table[cpu] = node;
if (!(numa_cpumask_lookup_table[node] & 1UL << cpu)) {
numa_cpumask_lookup_table[node] |= 1UL << cpu;
if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node]))) {
cpu_set(cpu, numa_cpumask_lookup_table[node]);
nr_cpus_in_node[node]++;
}
}
......
This diff is collapsed.
......@@ -59,9 +59,6 @@
#define H_XIRR 0x74
#define H_PERFMON 0x7c
#define HSC ".long 0x44000022\n"
#define H_ENTER_r3 "li 3, 0x08\n"
/* plpar_hcall() -- Generic call interface using above opcodes
*
* The actual call interface is a hypervisor call instruction with
......
......@@ -42,11 +42,21 @@
((nr) << _IOC_NRSHIFT) | \
((size) << _IOC_SIZESHIFT))
/* provoke compile error for invalid uses of size argument */
extern int __invalid_size_argument_for_IOC;
#define _IOC_TYPECHECK(t) \
((sizeof(t) == sizeof(t[1]) && \
sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
sizeof(t) : __invalid_size_argument_for_IOC)
/* used to create numbers */
#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size)))
#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
#define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
#define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
#define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
/* used to decode them.. */
#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
......
#include <asm-generic/local.h>
......@@ -18,6 +18,12 @@
/* Default "unsigned long" context */
typedef unsigned long mm_context_t;
#ifdef CONFIG_HUGETLB_PAGE
#define CONTEXT_LOW_HPAGES (1UL<<63)
#else
#define CONTEXT_LOW_HPAGES 0
#endif
/*
* Define the size of the cache used for segment table entries. The first
* entry is used as a cache pointer, therefore the actual number of entries
......
......@@ -127,7 +127,8 @@ destroy_context(struct mm_struct *mm)
#endif
mmu_context_queue.size++;
mmu_context_queue.elements[index] = mm->context;
mmu_context_queue.elements[index] =
mm->context & ~CONTEXT_LOW_HPAGES;
spin_unlock_irqrestore(&mmu_context_queue.lock, flags);
}
......@@ -189,6 +190,8 @@ get_vsid( unsigned long context, unsigned long ea )
{
unsigned long ordinal, vsid;
context &= ~CONTEXT_LOW_HPAGES;
ordinal = (((ea >> 28) & 0x1fffff) * LAST_USER_CONTEXT) | context;
vsid = (ordinal * VSID_RANDOMIZER) & VSID_MASK;
......
......@@ -20,7 +20,7 @@ extern struct pglist_data node_data[];
extern int numa_cpu_lookup_table[];
extern int numa_memory_lookup_table[];
extern unsigned long numa_cpumask_lookup_table[];
extern cpumask_t numa_cpumask_lookup_table[];
extern int nr_cpus_in_node[];
#define MAX_MEMORY (1UL << 41)
......
......@@ -63,7 +63,7 @@ struct paca_struct {
u16 xPacaIndex; /* Logical processor number 0x18 */
u16 active; /* Is this cpu active? 0x1a */
u32 default_decr; /* Default decrementer value 0x1c */
u64 xHrdIntStack; /* Stack for hardware interrupts 0x20 */
u64 unused1;
u64 xKsave; /* Saved Kernel stack addr or zero 0x28 */
u64 pvr; /* Processor version register 0x30 */
u8 *exception_sp; /* 0x38 */
......@@ -73,7 +73,7 @@ struct paca_struct {
STAB xStab_data; /* Segment table information 0x50,0x58,0x60 */
u8 xSegments[STAB_CACHE_SIZE]; /* Cache of used stab entries 0x68,0x70 */
u8 xProcEnabled; /* 1=soft enabled 0x78 */
u8 xHrdIntCount; /* Count of active hardware interrupts 0x79 */
u8 unused2;
u8 prof_enabled; /* 1=iSeries profiling enabled 0x7A */
u8 stab_cache_pointer;
u8 resv1[4]; /* 0x7B-0x7F */
......
......@@ -22,6 +22,39 @@
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PAGE_OFFSET_MASK (PAGE_SIZE-1)
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SHIFT 24
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
/* For 64-bit processes the hugepage range is 1T-1.5T */
#define TASK_HPAGE_BASE (0x0000010000000000UL)
#define TASK_HPAGE_END (0x0000018000000000UL)
/* For 32-bit processes the hugepage range is 2-3G */
#define TASK_HPAGE_BASE_32 (0x80000000UL)
#define TASK_HPAGE_END_32 (0xc0000000UL)
#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
#define is_hugepage_only_range(addr, len) \
( ((addr > (TASK_HPAGE_BASE-len)) && (addr < TASK_HPAGE_END)) || \
((current->mm->context & CONTEXT_LOW_HPAGES) && \
(addr > (TASK_HPAGE_BASE_32-len)) && (addr < TASK_HPAGE_END_32)) )
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#define in_hugepage_area(context, addr) \
((cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) && \
((((addr) >= TASK_HPAGE_BASE) && ((addr) < TASK_HPAGE_END)) || \
(((context) & CONTEXT_LOW_HPAGES) && \
(((addr) >= TASK_HPAGE_BASE_32) && ((addr) < TASK_HPAGE_END_32)))))
#else /* !CONFIG_HUGETLB_PAGE */
#define in_hugepage_area(mm, addr) 0
#endif /* !CONFIG_HUGETLB_PAGE */
#define SID_SHIFT 28
#define SID_MASK 0xfffffffff
#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
......
......@@ -149,6 +149,25 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
/* shift to put page number into pte */
#define PTE_SHIFT (16)
/* We allow 2^41 bytes of real memory, so we need 29 bits in the PMD
* to give the PTE page number. The bottom two bits are for flags. */
#define PMD_TO_PTEPAGE_SHIFT (2)
#ifdef CONFIG_HUGETLB_PAGE
#define _PMD_HUGEPAGE 0x00000001U
#define HUGEPTE_BATCH_SIZE (1<<(HPAGE_SHIFT-PMD_SHIFT))
int hash_huge_page(struct mm_struct *mm, unsigned long access,
unsigned long ea, unsigned long vsid, int local);
#define HAVE_ARCH_UNMAPPED_AREA
#else
#define hash_huge_page(mm,a,ea,vsid,local) -1
#define _PMD_HUGEPAGE 0
#endif
#ifndef __ASSEMBLY__
/*
......@@ -178,12 +197,16 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
#define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT)))
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pmd_set(pmdp, ptep) (pmd_val(*(pmdp)) = (__ba_to_bpn(ptep)))
#define pmd_set(pmdp, ptep) \
(pmd_val(*(pmdp)) = (__ba_to_bpn(ptep) << PMD_TO_PTEPAGE_SHIFT))
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) ((pmd_val(pmd)) == 0)
#define pmd_present(pmd) ((pmd_val(pmd)) != 0)
#define pmd_hugepage(pmd) (!!(pmd_val(pmd) & _PMD_HUGEPAGE))
#define pmd_bad(pmd) (((pmd_val(pmd)) == 0) || pmd_hugepage(pmd))
#define pmd_present(pmd) ((!pmd_hugepage(pmd)) \
&& (pmd_val(pmd) & ~_PMD_HUGEPAGE) != 0)
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
#define pmd_page_kernel(pmd) (__bpn_to_ba(pmd_val(pmd)))
#define pmd_page_kernel(pmd) \
(__bpn_to_ba(pmd_val(pmd) >> PMD_TO_PTEPAGE_SHIFT))
#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd))
#define pgd_set(pgdp, pmdp) (pgd_val(*(pgdp)) = (__ba_to_bpn(pmdp)))
#define pgd_none(pgd) (!pgd_val(pgd))
......
......@@ -183,8 +183,6 @@ extern struct device_node *find_type_devices(const char *type);
extern struct device_node *find_path_device(const char *path);
extern struct device_node *find_compatible_devices(const char *type,
const char *compat);
extern struct device_node *find_pci_device_OFnode(unsigned char bus,
unsigned char dev_fn);
extern struct device_node *find_all_nodes(void);
extern int device_is_compatible(struct device_node *device, const char *);
extern int machine_is_compatible(const char *compat);
......
......@@ -166,6 +166,13 @@ extern void rtas_restart(char *cmd);
extern void rtas_power_off(void);
extern void rtas_halt(void);
/* Given an RTAS status code of 9900..9905 compute the hinted delay */
unsigned int rtas_extended_busy_delay_time(int status);
static inline int rtas_is_extended_busy(int status)
{
return status >= 9900 && status <= 9909;
}
/* Some RTAS ops require a data buffer and that buffer must be < 4G.
* Rather than having a memory allocator, just use this buffer
* (get the lock first), make the RTAS call. Copy the data instead
......
/*
* include/asm-ppc/rwsem.h: R/W semaphores for PPC using the stuff
* include/asm-ppc64/rwsem.h: R/W semaphores for PPC using the stuff
* in lib/rwsem.c. Adapted largely from include/asm-i386/rwsem.h
* by Paul Mackerras <paulus@samba.org>.
*
......@@ -74,9 +74,7 @@ static inline void init_rwsem(struct rw_semaphore *sem)
*/
static inline void __down_read(struct rw_semaphore *sem)
{
if (atomic_inc_return((atomic_t *)(&sem->count)) > 0)
smp_wmb();
else
if (unlikely(atomic_inc_return((atomic_t *)(&sem->count)) <= 0))
rwsem_down_read_failed(sem);
}
......@@ -87,7 +85,6 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
while ((tmp = sem->count) >= 0) {
if (tmp == cmpxchg(&sem->count, tmp,
tmp + RWSEM_ACTIVE_READ_BIAS)) {
smp_wmb();
return 1;
}
}
......@@ -103,9 +100,7 @@ static inline void __down_write(struct rw_semaphore *sem)
tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count));
if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
smp_wmb();
else
if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
rwsem_down_write_failed(sem);
}
......@@ -115,7 +110,6 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
smp_wmb();
return tmp == RWSEM_UNLOCKED_VALUE;
}
......@@ -126,9 +120,8 @@ static inline void __up_read(struct rw_semaphore *sem)
{
int tmp;
smp_wmb();
tmp = atomic_dec_return((atomic_t *)(&sem->count));
if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
rwsem_wake(sem);
}
......@@ -137,9 +130,8 @@ static inline void __up_read(struct rw_semaphore *sem)
*/
static inline void __up_write(struct rw_semaphore *sem)
{
smp_wmb();
if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count)) < 0)
if (unlikely(atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
(atomic_t *)(&sem->count)) < 0))
rwsem_wake(sem);
}
......@@ -158,7 +150,6 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
{
int tmp;
smp_wmb();
tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
if (tmp < 0)
rwsem_downgrade_wake(sem);
......@@ -169,7 +160,6 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
*/
static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
{
smp_mb();
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
......
......@@ -82,9 +82,8 @@ static inline void down(struct semaphore * sem)
/*
* Try to get the semaphore, take the slow path if we fail.
*/
if (atomic_dec_return(&sem->count) < 0)
if (unlikely(atomic_dec_return(&sem->count) < 0))
__down(sem);
smp_wmb();
}
static inline int down_interruptible(struct semaphore * sem)
......@@ -96,23 +95,18 @@ static inline int down_interruptible(struct semaphore * sem)
#endif
might_sleep();
if (atomic_dec_return(&sem->count) < 0)
if (unlikely(atomic_dec_return(&sem->count) < 0))
ret = __down_interruptible(sem);
smp_wmb();
return ret;
}
static inline int down_trylock(struct semaphore * sem)
{
int ret;
#ifdef WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
ret = atomic_dec_if_positive(&sem->count) < 0;
smp_wmb();
return ret;
return atomic_dec_if_positive(&sem->count) < 0;
}
static inline void up(struct semaphore * sem)
......@@ -121,8 +115,7 @@ static inline void up(struct semaphore * sem)
CHECK_MAGIC(sem->__magic);
#endif
smp_wmb();
if (atomic_inc_return(&sem->count) <= 0)
if (unlikely(atomic_inc_return(&sem->count) <= 0))
__up(sem);
}
......
......@@ -26,11 +26,9 @@
/* Standard COM flags (except for COM4, because of the 8514 problem) */
#ifdef CONFIG_SERIAL_DETECT_IRQ
#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
#else
#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
#define STD_COM_FLAGS ASYNC_BOOT_AUTOCONF
#endif
#ifdef CONFIG_SERIAL_MANY_PORTS
......@@ -60,8 +58,8 @@
/* UART CLK PORT IRQ FLAGS */ \
{ 0, BASE_BAUD, 0x3F8, 4, STD_COM_FLAGS }, /* ttyS0 */ \
{ 0, BASE_BAUD, 0x2F8, 3, STD_COM_FLAGS }, /* ttyS1 */ \
{ 0, BASE_BAUD, 0x3E8, 4, STD_COM_FLAGS }, /* ttyS2 */ \
{ 0, BASE_BAUD, 0x2E8, 3, STD_COM4_FLAGS }, /* ttyS3 */
{ 0, BASE_BAUD, 0x890, 0xf, STD_COM_FLAGS }, /* ttyS2 */ \
{ 0, BASE_BAUD, 0x898, 0xe, STD_COM_FLAGS }, /* ttyS3 */
#ifdef CONFIG_SERIAL_MANY_PORTS
......
......@@ -24,14 +24,16 @@ static inline int cpu_to_node(int cpu)
#define parent_node(node) (node)
static inline unsigned long node_to_cpumask(int node)
static inline cpumask_t node_to_cpumask(int node)
{
return numa_cpumask_lookup_table[node];
}
static inline int node_to_first_cpu(int node)
{
return __ffs(node_to_cpumask(node));
cpumask_t tmp;
tmp = node_to_cpumask(node);
return first_cpu(tmp);
}
#define node_to_memblk(node) (node)
......
......@@ -132,6 +132,7 @@ extern long __put_user_bad(void);
#define __put_user_size(x,ptr,size,retval,errret) \
do { \
might_sleep(); \
retval = 0; \
switch (size) { \
case 1: __put_user_asm(x,ptr,retval,"stb",errret); break; \
......@@ -185,6 +186,7 @@ extern long __get_user_bad(void);
#define __get_user_size(x,ptr,size,retval,errret) \
do { \
might_sleep(); \
retval = 0; \
switch (size) { \
case 1: __get_user_asm(x,ptr,retval,"lbz",errret); break; \
......@@ -220,6 +222,7 @@ extern unsigned long __copy_tofrom_user(void *to, const void *from,
static inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_sleep();
if (__builtin_constant_p(n)) {
unsigned long ret;
......@@ -244,6 +247,7 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
static inline unsigned long
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_sleep();
if (__builtin_constant_p(n)) {
unsigned long ret;
......@@ -289,6 +293,7 @@ copy_to_user(void *to, const void *from, unsigned long n)
static inline unsigned long
copy_in_user(void *to, const void *from, unsigned long n)
{
might_sleep();
if (likely(access_ok(VERIFY_READ, from, n) &&
access_ok(VERIFY_WRITE, to, n)))
n =__copy_tofrom_user(to, from, n);
......@@ -300,6 +305,7 @@ extern unsigned long __clear_user(void *addr, unsigned long size);
static inline unsigned long
clear_user(void *addr, unsigned long size)
{
might_sleep();
if (likely(access_ok(VERIFY_WRITE, addr, size)))
size = __clear_user(addr, size);
return size;
......@@ -310,6 +316,7 @@ extern int __strncpy_from_user(char *dst, const char *src, long count);
static inline long
strncpy_from_user(char *dst, const char *src, long count)
{
might_sleep();
if (likely(access_ok(VERIFY_READ, src, 1)))
return __strncpy_from_user(dst, src, count);
return -EFAULT;
......@@ -329,6 +336,7 @@ extern int __strnlen_user(const char *str, long len);
*/
static inline int strnlen_user(const char *str, long len)
{
might_sleep();
if (likely(access_ok(VERIFY_READ, str, 1)))
return __strnlen_user(str, len);
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment